python_code
stringlengths 0
1.8M
| repo_name
stringclasses 7
values | file_path
stringlengths 5
99
|
---|---|---|
// SPDX-License-Identifier: GPL-2.0
/*
* Hantro VP9 codec driver
*
* Copyright (C) 2021 Collabora Ltd.
*/
#include "media/videobuf2-core.h"
#include "media/videobuf2-dma-contig.h"
#include "media/videobuf2-v4l2.h"
#include <linux/kernel.h>
#include <linux/vmalloc.h>
#include <media/v4l2-mem2mem.h>
#include <media/v4l2-vp9.h>
#include "hantro.h"
#include "hantro_vp9.h"
#include "hantro_g2_regs.h"
#define G2_ALIGN 16
enum hantro_ref_frames {
INTRA_FRAME = 0,
LAST_FRAME = 1,
GOLDEN_FRAME = 2,
ALTREF_FRAME = 3,
MAX_REF_FRAMES = 4
};
static int start_prepare_run(struct hantro_ctx *ctx, const struct v4l2_ctrl_vp9_frame **dec_params)
{
const struct v4l2_ctrl_vp9_compressed_hdr *prob_updates;
struct hantro_vp9_dec_hw_ctx *vp9_ctx = &ctx->vp9_dec;
struct v4l2_ctrl *ctrl;
unsigned int fctx_idx;
/* v4l2-specific stuff */
hantro_start_prepare_run(ctx);
ctrl = v4l2_ctrl_find(&ctx->ctrl_handler, V4L2_CID_STATELESS_VP9_FRAME);
if (WARN_ON(!ctrl))
return -EINVAL;
*dec_params = ctrl->p_cur.p;
ctrl = v4l2_ctrl_find(&ctx->ctrl_handler, V4L2_CID_STATELESS_VP9_COMPRESSED_HDR);
if (WARN_ON(!ctrl))
return -EINVAL;
prob_updates = ctrl->p_cur.p;
vp9_ctx->cur.tx_mode = prob_updates->tx_mode;
/*
* vp9 stuff
*
* by this point the userspace has done all parts of 6.2 uncompressed_header()
* except this fragment:
* if ( FrameIsIntra || error_resilient_mode ) {
* setup_past_independence ( )
* if ( frame_type == KEY_FRAME || error_resilient_mode == 1 ||
* reset_frame_context == 3 ) {
* for ( i = 0; i < 4; i ++ ) {
* save_probs( i )
* }
* } else if ( reset_frame_context == 2 ) {
* save_probs( frame_context_idx )
* }
* frame_context_idx = 0
* }
*/
fctx_idx = v4l2_vp9_reset_frame_ctx(*dec_params, vp9_ctx->frame_context);
vp9_ctx->cur.frame_context_idx = fctx_idx;
/* 6.1 frame(sz): load_probs() and load_probs2() */
vp9_ctx->probability_tables = vp9_ctx->frame_context[fctx_idx];
/*
* The userspace has also performed 6.3 compressed_header(), but handling the
* probs in a special way. All probs which need updating, except MV-related,
* have been read from the bitstream and translated through inv_map_table[],
* but no 6.3.6 inv_recenter_nonneg(v, m) has been performed. The values passed
* by userspace are either translated values (there are no 0 values in
* inv_map_table[]), or zero to indicate no update. All MV-related probs which need
* updating have been read from the bitstream and (mv_prob << 1) | 1 has been
* performed. The values passed by userspace are either new values
* to replace old ones (the above mentioned shift and bitwise or never result in
* a zero) or zero to indicate no update.
* fw_update_probs() performs actual probs updates or leaves probs as-is
* for values for which a zero was passed from userspace.
*/
v4l2_vp9_fw_update_probs(&vp9_ctx->probability_tables, prob_updates, *dec_params);
return 0;
}
static size_t chroma_offset(const struct hantro_ctx *ctx,
const struct v4l2_ctrl_vp9_frame *dec_params)
{
int bytes_per_pixel = dec_params->bit_depth == 8 ? 1 : 2;
return ctx->src_fmt.width * ctx->src_fmt.height * bytes_per_pixel;
}
static size_t mv_offset(const struct hantro_ctx *ctx,
const struct v4l2_ctrl_vp9_frame *dec_params)
{
size_t cr_offset = chroma_offset(ctx, dec_params);
return ALIGN((cr_offset * 3) / 2, G2_ALIGN);
}
static struct hantro_decoded_buffer *
get_ref_buf(struct hantro_ctx *ctx, struct vb2_v4l2_buffer *dst, u64 timestamp)
{
struct v4l2_m2m_ctx *m2m_ctx = ctx->fh.m2m_ctx;
struct vb2_queue *cap_q = &m2m_ctx->cap_q_ctx.q;
struct vb2_buffer *buf;
/*
* If a ref is unused or invalid, address of current destination
* buffer is returned.
*/
buf = vb2_find_buffer(cap_q, timestamp);
if (!buf)
buf = &dst->vb2_buf;
return vb2_to_hantro_decoded_buf(buf);
}
static void update_dec_buf_info(struct hantro_decoded_buffer *buf,
const struct v4l2_ctrl_vp9_frame *dec_params)
{
buf->vp9.width = dec_params->frame_width_minus_1 + 1;
buf->vp9.height = dec_params->frame_height_minus_1 + 1;
buf->vp9.bit_depth = dec_params->bit_depth;
}
static void update_ctx_cur_info(struct hantro_vp9_dec_hw_ctx *vp9_ctx,
struct hantro_decoded_buffer *buf,
const struct v4l2_ctrl_vp9_frame *dec_params)
{
vp9_ctx->cur.valid = true;
vp9_ctx->cur.reference_mode = dec_params->reference_mode;
vp9_ctx->cur.interpolation_filter = dec_params->interpolation_filter;
vp9_ctx->cur.flags = dec_params->flags;
vp9_ctx->cur.timestamp = buf->base.vb.vb2_buf.timestamp;
}
static void config_output(struct hantro_ctx *ctx,
struct hantro_decoded_buffer *dst,
const struct v4l2_ctrl_vp9_frame *dec_params)
{
dma_addr_t luma_addr, chroma_addr, mv_addr;
hantro_reg_write(ctx->dev, &g2_out_dis, 0);
if (!ctx->dev->variant->legacy_regs)
hantro_reg_write(ctx->dev, &g2_output_format, 0);
luma_addr = hantro_get_dec_buf_addr(ctx, &dst->base.vb.vb2_buf);
hantro_write_addr(ctx->dev, G2_OUT_LUMA_ADDR, luma_addr);
chroma_addr = luma_addr + chroma_offset(ctx, dec_params);
hantro_write_addr(ctx->dev, G2_OUT_CHROMA_ADDR, chroma_addr);
mv_addr = luma_addr + mv_offset(ctx, dec_params);
hantro_write_addr(ctx->dev, G2_OUT_MV_ADDR, mv_addr);
}
struct hantro_vp9_ref_reg {
const struct hantro_reg width;
const struct hantro_reg height;
const struct hantro_reg hor_scale;
const struct hantro_reg ver_scale;
u32 y_base;
u32 c_base;
};
static void config_ref(struct hantro_ctx *ctx,
struct hantro_decoded_buffer *dst,
const struct hantro_vp9_ref_reg *ref_reg,
const struct v4l2_ctrl_vp9_frame *dec_params,
u64 ref_ts)
{
struct hantro_decoded_buffer *buf;
dma_addr_t luma_addr, chroma_addr;
u32 refw, refh;
buf = get_ref_buf(ctx, &dst->base.vb, ref_ts);
refw = buf->vp9.width;
refh = buf->vp9.height;
hantro_reg_write(ctx->dev, &ref_reg->width, refw);
hantro_reg_write(ctx->dev, &ref_reg->height, refh);
hantro_reg_write(ctx->dev, &ref_reg->hor_scale, (refw << 14) / dst->vp9.width);
hantro_reg_write(ctx->dev, &ref_reg->ver_scale, (refh << 14) / dst->vp9.height);
luma_addr = hantro_get_dec_buf_addr(ctx, &buf->base.vb.vb2_buf);
hantro_write_addr(ctx->dev, ref_reg->y_base, luma_addr);
chroma_addr = luma_addr + chroma_offset(ctx, dec_params);
hantro_write_addr(ctx->dev, ref_reg->c_base, chroma_addr);
}
static void config_ref_registers(struct hantro_ctx *ctx,
const struct v4l2_ctrl_vp9_frame *dec_params,
struct hantro_decoded_buffer *dst,
struct hantro_decoded_buffer *mv_ref)
{
static const struct hantro_vp9_ref_reg ref_regs[] = {
{
/* Last */
.width = vp9_lref_width,
.height = vp9_lref_height,
.hor_scale = vp9_lref_hor_scale,
.ver_scale = vp9_lref_ver_scale,
.y_base = G2_REF_LUMA_ADDR(0),
.c_base = G2_REF_CHROMA_ADDR(0),
}, {
/* Golden */
.width = vp9_gref_width,
.height = vp9_gref_height,
.hor_scale = vp9_gref_hor_scale,
.ver_scale = vp9_gref_ver_scale,
.y_base = G2_REF_LUMA_ADDR(4),
.c_base = G2_REF_CHROMA_ADDR(4),
}, {
/* Altref */
.width = vp9_aref_width,
.height = vp9_aref_height,
.hor_scale = vp9_aref_hor_scale,
.ver_scale = vp9_aref_ver_scale,
.y_base = G2_REF_LUMA_ADDR(5),
.c_base = G2_REF_CHROMA_ADDR(5),
},
};
dma_addr_t mv_addr;
config_ref(ctx, dst, &ref_regs[0], dec_params, dec_params->last_frame_ts);
config_ref(ctx, dst, &ref_regs[1], dec_params, dec_params->golden_frame_ts);
config_ref(ctx, dst, &ref_regs[2], dec_params, dec_params->alt_frame_ts);
mv_addr = hantro_get_dec_buf_addr(ctx, &mv_ref->base.vb.vb2_buf) +
mv_offset(ctx, dec_params);
hantro_write_addr(ctx->dev, G2_REF_MV_ADDR(0), mv_addr);
hantro_reg_write(ctx->dev, &vp9_last_sign_bias,
dec_params->ref_frame_sign_bias & V4L2_VP9_SIGN_BIAS_LAST ? 1 : 0);
hantro_reg_write(ctx->dev, &vp9_gref_sign_bias,
dec_params->ref_frame_sign_bias & V4L2_VP9_SIGN_BIAS_GOLDEN ? 1 : 0);
hantro_reg_write(ctx->dev, &vp9_aref_sign_bias,
dec_params->ref_frame_sign_bias & V4L2_VP9_SIGN_BIAS_ALT ? 1 : 0);
}
static void recompute_tile_info(unsigned short *tile_info, unsigned int tiles, unsigned int sbs)
{
int i;
unsigned int accumulated = 0;
unsigned int next_accumulated;
for (i = 1; i <= tiles; ++i) {
next_accumulated = i * sbs / tiles;
*tile_info++ = next_accumulated - accumulated;
accumulated = next_accumulated;
}
}
static void
recompute_tile_rc_info(struct hantro_ctx *ctx,
unsigned int tile_r, unsigned int tile_c,
unsigned int sbs_r, unsigned int sbs_c)
{
struct hantro_vp9_dec_hw_ctx *vp9_ctx = &ctx->vp9_dec;
recompute_tile_info(vp9_ctx->tile_r_info, tile_r, sbs_r);
recompute_tile_info(vp9_ctx->tile_c_info, tile_c, sbs_c);
vp9_ctx->last_tile_r = tile_r;
vp9_ctx->last_tile_c = tile_c;
vp9_ctx->last_sbs_r = sbs_r;
vp9_ctx->last_sbs_c = sbs_c;
}
static inline unsigned int first_tile_row(unsigned int tile_r, unsigned int sbs_r)
{
if (tile_r == sbs_r + 1)
return 1;
if (tile_r == sbs_r + 2)
return 2;
return 0;
}
static void
fill_tile_info(struct hantro_ctx *ctx,
unsigned int tile_r, unsigned int tile_c,
unsigned int sbs_r, unsigned int sbs_c,
unsigned short *tile_mem)
{
struct hantro_vp9_dec_hw_ctx *vp9_ctx = &ctx->vp9_dec;
unsigned int i, j;
bool first = true;
for (i = first_tile_row(tile_r, sbs_r); i < tile_r; ++i) {
unsigned short r_info = vp9_ctx->tile_r_info[i];
if (first) {
if (i > 0)
r_info += vp9_ctx->tile_r_info[0];
if (i == 2)
r_info += vp9_ctx->tile_r_info[1];
first = false;
}
for (j = 0; j < tile_c; ++j) {
*tile_mem++ = vp9_ctx->tile_c_info[j];
*tile_mem++ = r_info;
}
}
}
static void
config_tiles(struct hantro_ctx *ctx,
const struct v4l2_ctrl_vp9_frame *dec_params,
struct hantro_decoded_buffer *dst)
{
struct hantro_vp9_dec_hw_ctx *vp9_ctx = &ctx->vp9_dec;
struct hantro_aux_buf *misc = &vp9_ctx->misc;
struct hantro_aux_buf *tile_edge = &vp9_ctx->tile_edge;
dma_addr_t addr;
unsigned short *tile_mem;
unsigned int rows, cols;
addr = misc->dma + vp9_ctx->tile_info_offset;
hantro_write_addr(ctx->dev, G2_TILE_SIZES_ADDR, addr);
tile_mem = misc->cpu + vp9_ctx->tile_info_offset;
if (dec_params->tile_cols_log2 || dec_params->tile_rows_log2) {
unsigned int tile_r = (1 << dec_params->tile_rows_log2);
unsigned int tile_c = (1 << dec_params->tile_cols_log2);
unsigned int sbs_r = hantro_vp9_num_sbs(dst->vp9.height);
unsigned int sbs_c = hantro_vp9_num_sbs(dst->vp9.width);
if (tile_r != vp9_ctx->last_tile_r || tile_c != vp9_ctx->last_tile_c ||
sbs_r != vp9_ctx->last_sbs_r || sbs_c != vp9_ctx->last_sbs_c)
recompute_tile_rc_info(ctx, tile_r, tile_c, sbs_r, sbs_c);
fill_tile_info(ctx, tile_r, tile_c, sbs_r, sbs_c, tile_mem);
cols = tile_c;
rows = tile_r;
hantro_reg_write(ctx->dev, &g2_tile_e, 1);
} else {
tile_mem[0] = hantro_vp9_num_sbs(dst->vp9.width);
tile_mem[1] = hantro_vp9_num_sbs(dst->vp9.height);
cols = 1;
rows = 1;
hantro_reg_write(ctx->dev, &g2_tile_e, 0);
}
if (ctx->dev->variant->legacy_regs) {
hantro_reg_write(ctx->dev, &g2_num_tile_cols_old, cols);
hantro_reg_write(ctx->dev, &g2_num_tile_rows_old, rows);
} else {
hantro_reg_write(ctx->dev, &g2_num_tile_cols, cols);
hantro_reg_write(ctx->dev, &g2_num_tile_rows, rows);
}
/* provide aux buffers even if no tiles are used */
addr = tile_edge->dma;
hantro_write_addr(ctx->dev, G2_TILE_FILTER_ADDR, addr);
addr = tile_edge->dma + vp9_ctx->bsd_ctrl_offset;
hantro_write_addr(ctx->dev, G2_TILE_BSD_ADDR, addr);
}
static void
update_feat_and_flag(struct hantro_vp9_dec_hw_ctx *vp9_ctx,
const struct v4l2_vp9_segmentation *seg,
unsigned int feature,
unsigned int segid)
{
u8 mask = V4L2_VP9_SEGMENT_FEATURE_ENABLED(feature);
vp9_ctx->feature_data[segid][feature] = seg->feature_data[segid][feature];
vp9_ctx->feature_enabled[segid] &= ~mask;
vp9_ctx->feature_enabled[segid] |= (seg->feature_enabled[segid] & mask);
}
static inline s16 clip3(s16 x, s16 y, s16 z)
{
return (z < x) ? x : (z > y) ? y : z;
}
static s16 feat_val_clip3(s16 feat_val, s16 feature_data, bool absolute, u8 clip)
{
if (absolute)
return feature_data;
return clip3(0, 255, feat_val + feature_data);
}
static void config_segment(struct hantro_ctx *ctx, const struct v4l2_ctrl_vp9_frame *dec_params)
{
struct hantro_vp9_dec_hw_ctx *vp9_ctx = &ctx->vp9_dec;
const struct v4l2_vp9_segmentation *seg;
s16 feat_val;
unsigned char feat_id;
unsigned int segid;
bool segment_enabled, absolute, update_data;
static const struct hantro_reg seg_regs[8][V4L2_VP9_SEG_LVL_MAX] = {
{ vp9_quant_seg0, vp9_filt_level_seg0, vp9_refpic_seg0, vp9_skip_seg0 },
{ vp9_quant_seg1, vp9_filt_level_seg1, vp9_refpic_seg1, vp9_skip_seg1 },
{ vp9_quant_seg2, vp9_filt_level_seg2, vp9_refpic_seg2, vp9_skip_seg2 },
{ vp9_quant_seg3, vp9_filt_level_seg3, vp9_refpic_seg3, vp9_skip_seg3 },
{ vp9_quant_seg4, vp9_filt_level_seg4, vp9_refpic_seg4, vp9_skip_seg4 },
{ vp9_quant_seg5, vp9_filt_level_seg5, vp9_refpic_seg5, vp9_skip_seg5 },
{ vp9_quant_seg6, vp9_filt_level_seg6, vp9_refpic_seg6, vp9_skip_seg6 },
{ vp9_quant_seg7, vp9_filt_level_seg7, vp9_refpic_seg7, vp9_skip_seg7 },
};
segment_enabled = !!(dec_params->seg.flags & V4L2_VP9_SEGMENTATION_FLAG_ENABLED);
hantro_reg_write(ctx->dev, &vp9_segment_e, segment_enabled);
hantro_reg_write(ctx->dev, &vp9_segment_upd_e,
!!(dec_params->seg.flags & V4L2_VP9_SEGMENTATION_FLAG_UPDATE_MAP));
hantro_reg_write(ctx->dev, &vp9_segment_temp_upd_e,
!!(dec_params->seg.flags & V4L2_VP9_SEGMENTATION_FLAG_TEMPORAL_UPDATE));
seg = &dec_params->seg;
absolute = !!(seg->flags & V4L2_VP9_SEGMENTATION_FLAG_ABS_OR_DELTA_UPDATE);
update_data = !!(seg->flags & V4L2_VP9_SEGMENTATION_FLAG_UPDATE_DATA);
for (segid = 0; segid < 8; ++segid) {
/* Quantizer segment feature */
feat_id = V4L2_VP9_SEG_LVL_ALT_Q;
feat_val = dec_params->quant.base_q_idx;
if (segment_enabled) {
if (update_data)
update_feat_and_flag(vp9_ctx, seg, feat_id, segid);
if (v4l2_vp9_seg_feat_enabled(vp9_ctx->feature_enabled, feat_id, segid))
feat_val = feat_val_clip3(feat_val,
vp9_ctx->feature_data[segid][feat_id],
absolute, 255);
}
hantro_reg_write(ctx->dev, &seg_regs[segid][feat_id], feat_val);
/* Loop filter segment feature */
feat_id = V4L2_VP9_SEG_LVL_ALT_L;
feat_val = dec_params->lf.level;
if (segment_enabled) {
if (update_data)
update_feat_and_flag(vp9_ctx, seg, feat_id, segid);
if (v4l2_vp9_seg_feat_enabled(vp9_ctx->feature_enabled, feat_id, segid))
feat_val = feat_val_clip3(feat_val,
vp9_ctx->feature_data[segid][feat_id],
absolute, 63);
}
hantro_reg_write(ctx->dev, &seg_regs[segid][feat_id], feat_val);
/* Reference frame segment feature */
feat_id = V4L2_VP9_SEG_LVL_REF_FRAME;
feat_val = 0;
if (segment_enabled) {
if (update_data)
update_feat_and_flag(vp9_ctx, seg, feat_id, segid);
if (!(dec_params->flags & V4L2_VP9_FRAME_FLAG_KEY_FRAME) &&
v4l2_vp9_seg_feat_enabled(vp9_ctx->feature_enabled, feat_id, segid))
feat_val = vp9_ctx->feature_data[segid][feat_id] + 1;
}
hantro_reg_write(ctx->dev, &seg_regs[segid][feat_id], feat_val);
/* Skip segment feature */
feat_id = V4L2_VP9_SEG_LVL_SKIP;
feat_val = 0;
if (segment_enabled) {
if (update_data)
update_feat_and_flag(vp9_ctx, seg, feat_id, segid);
feat_val = v4l2_vp9_seg_feat_enabled(vp9_ctx->feature_enabled,
feat_id, segid) ? 1 : 0;
}
hantro_reg_write(ctx->dev, &seg_regs[segid][feat_id], feat_val);
}
}
static void config_loop_filter(struct hantro_ctx *ctx, const struct v4l2_ctrl_vp9_frame *dec_params)
{
bool d = dec_params->lf.flags & V4L2_VP9_LOOP_FILTER_FLAG_DELTA_ENABLED;
hantro_reg_write(ctx->dev, &vp9_filt_level, dec_params->lf.level);
hantro_reg_write(ctx->dev, &g2_out_filtering_dis, dec_params->lf.level == 0);
hantro_reg_write(ctx->dev, &vp9_filt_sharpness, dec_params->lf.sharpness);
hantro_reg_write(ctx->dev, &vp9_filt_ref_adj_0, d ? dec_params->lf.ref_deltas[0] : 0);
hantro_reg_write(ctx->dev, &vp9_filt_ref_adj_1, d ? dec_params->lf.ref_deltas[1] : 0);
hantro_reg_write(ctx->dev, &vp9_filt_ref_adj_2, d ? dec_params->lf.ref_deltas[2] : 0);
hantro_reg_write(ctx->dev, &vp9_filt_ref_adj_3, d ? dec_params->lf.ref_deltas[3] : 0);
hantro_reg_write(ctx->dev, &vp9_filt_mb_adj_0, d ? dec_params->lf.mode_deltas[0] : 0);
hantro_reg_write(ctx->dev, &vp9_filt_mb_adj_1, d ? dec_params->lf.mode_deltas[1] : 0);
}
static void config_picture_dimensions(struct hantro_ctx *ctx, struct hantro_decoded_buffer *dst)
{
u32 pic_w_4x4, pic_h_4x4;
hantro_reg_write(ctx->dev, &g2_pic_width_in_cbs, (dst->vp9.width + 7) / 8);
hantro_reg_write(ctx->dev, &g2_pic_height_in_cbs, (dst->vp9.height + 7) / 8);
pic_w_4x4 = roundup(dst->vp9.width, 8) >> 2;
pic_h_4x4 = roundup(dst->vp9.height, 8) >> 2;
hantro_reg_write(ctx->dev, &g2_pic_width_4x4, pic_w_4x4);
hantro_reg_write(ctx->dev, &g2_pic_height_4x4, pic_h_4x4);
}
static void
config_bit_depth(struct hantro_ctx *ctx, const struct v4l2_ctrl_vp9_frame *dec_params)
{
if (ctx->dev->variant->legacy_regs) {
hantro_reg_write(ctx->dev, &g2_bit_depth_y, dec_params->bit_depth);
hantro_reg_write(ctx->dev, &g2_bit_depth_c, dec_params->bit_depth);
hantro_reg_write(ctx->dev, &g2_pix_shift, 0);
} else {
hantro_reg_write(ctx->dev, &g2_bit_depth_y_minus8, dec_params->bit_depth - 8);
hantro_reg_write(ctx->dev, &g2_bit_depth_c_minus8, dec_params->bit_depth - 8);
}
}
static inline bool is_lossless(const struct v4l2_vp9_quantization *quant)
{
return quant->base_q_idx == 0 && quant->delta_q_uv_ac == 0 &&
quant->delta_q_uv_dc == 0 && quant->delta_q_y_dc == 0;
}
static void
config_quant(struct hantro_ctx *ctx, const struct v4l2_ctrl_vp9_frame *dec_params)
{
hantro_reg_write(ctx->dev, &vp9_qp_delta_y_dc, dec_params->quant.delta_q_y_dc);
hantro_reg_write(ctx->dev, &vp9_qp_delta_ch_dc, dec_params->quant.delta_q_uv_dc);
hantro_reg_write(ctx->dev, &vp9_qp_delta_ch_ac, dec_params->quant.delta_q_uv_ac);
hantro_reg_write(ctx->dev, &vp9_lossless_e, is_lossless(&dec_params->quant));
}
static u32
hantro_interp_filter_from_v4l2(unsigned int interpolation_filter)
{
switch (interpolation_filter) {
case V4L2_VP9_INTERP_FILTER_EIGHTTAP:
return 0x1;
case V4L2_VP9_INTERP_FILTER_EIGHTTAP_SMOOTH:
return 0;
case V4L2_VP9_INTERP_FILTER_EIGHTTAP_SHARP:
return 0x2;
case V4L2_VP9_INTERP_FILTER_BILINEAR:
return 0x3;
case V4L2_VP9_INTERP_FILTER_SWITCHABLE:
return 0x4;
}
return 0;
}
static void
config_others(struct hantro_ctx *ctx, const struct v4l2_ctrl_vp9_frame *dec_params,
bool intra_only, bool resolution_change)
{
struct hantro_vp9_dec_hw_ctx *vp9_ctx = &ctx->vp9_dec;
hantro_reg_write(ctx->dev, &g2_idr_pic_e, intra_only);
hantro_reg_write(ctx->dev, &vp9_transform_mode, vp9_ctx->cur.tx_mode);
hantro_reg_write(ctx->dev, &vp9_mcomp_filt_type, intra_only ?
0 : hantro_interp_filter_from_v4l2(dec_params->interpolation_filter));
hantro_reg_write(ctx->dev, &vp9_high_prec_mv_e,
!!(dec_params->flags & V4L2_VP9_FRAME_FLAG_ALLOW_HIGH_PREC_MV));
hantro_reg_write(ctx->dev, &vp9_comp_pred_mode, dec_params->reference_mode);
hantro_reg_write(ctx->dev, &g2_tempor_mvp_e,
!(dec_params->flags & V4L2_VP9_FRAME_FLAG_ERROR_RESILIENT) &&
!(dec_params->flags & V4L2_VP9_FRAME_FLAG_KEY_FRAME) &&
!(vp9_ctx->last.flags & V4L2_VP9_FRAME_FLAG_KEY_FRAME) &&
!(dec_params->flags & V4L2_VP9_FRAME_FLAG_INTRA_ONLY) &&
!resolution_change &&
vp9_ctx->last.flags & V4L2_VP9_FRAME_FLAG_SHOW_FRAME
);
hantro_reg_write(ctx->dev, &g2_write_mvs_e,
!(dec_params->flags & V4L2_VP9_FRAME_FLAG_KEY_FRAME));
}
static void
config_compound_reference(struct hantro_ctx *ctx,
const struct v4l2_ctrl_vp9_frame *dec_params)
{
u32 comp_fixed_ref, comp_var_ref[2];
bool last_ref_frame_sign_bias;
bool golden_ref_frame_sign_bias;
bool alt_ref_frame_sign_bias;
bool comp_ref_allowed = 0;
comp_fixed_ref = 0;
comp_var_ref[0] = 0;
comp_var_ref[1] = 0;
last_ref_frame_sign_bias = dec_params->ref_frame_sign_bias & V4L2_VP9_SIGN_BIAS_LAST;
golden_ref_frame_sign_bias = dec_params->ref_frame_sign_bias & V4L2_VP9_SIGN_BIAS_GOLDEN;
alt_ref_frame_sign_bias = dec_params->ref_frame_sign_bias & V4L2_VP9_SIGN_BIAS_ALT;
/* 6.3.12 Frame reference mode syntax */
comp_ref_allowed |= golden_ref_frame_sign_bias != last_ref_frame_sign_bias;
comp_ref_allowed |= alt_ref_frame_sign_bias != last_ref_frame_sign_bias;
if (comp_ref_allowed) {
if (last_ref_frame_sign_bias ==
golden_ref_frame_sign_bias) {
comp_fixed_ref = ALTREF_FRAME;
comp_var_ref[0] = LAST_FRAME;
comp_var_ref[1] = GOLDEN_FRAME;
} else if (last_ref_frame_sign_bias ==
alt_ref_frame_sign_bias) {
comp_fixed_ref = GOLDEN_FRAME;
comp_var_ref[0] = LAST_FRAME;
comp_var_ref[1] = ALTREF_FRAME;
} else {
comp_fixed_ref = LAST_FRAME;
comp_var_ref[0] = GOLDEN_FRAME;
comp_var_ref[1] = ALTREF_FRAME;
}
}
hantro_reg_write(ctx->dev, &vp9_comp_pred_fixed_ref, comp_fixed_ref);
hantro_reg_write(ctx->dev, &vp9_comp_pred_var_ref0, comp_var_ref[0]);
hantro_reg_write(ctx->dev, &vp9_comp_pred_var_ref1, comp_var_ref[1]);
}
#define INNER_LOOP \
do { \
for (m = 0; m < ARRAY_SIZE(adaptive->coef[0][0][0][0]); ++m) { \
memcpy(adaptive->coef[i][j][k][l][m], \
probs->coef[i][j][k][l][m], \
sizeof(probs->coef[i][j][k][l][m])); \
\
adaptive->coef[i][j][k][l][m][3] = 0; \
} \
} while (0)
static void config_probs(struct hantro_ctx *ctx, const struct v4l2_ctrl_vp9_frame *dec_params)
{
struct hantro_vp9_dec_hw_ctx *vp9_ctx = &ctx->vp9_dec;
struct hantro_aux_buf *misc = &vp9_ctx->misc;
struct hantro_g2_all_probs *all_probs = misc->cpu;
struct hantro_g2_probs *adaptive;
struct hantro_g2_mv_probs *mv;
const struct v4l2_vp9_segmentation *seg = &dec_params->seg;
const struct v4l2_vp9_frame_context *probs = &vp9_ctx->probability_tables;
int i, j, k, l, m;
for (i = 0; i < ARRAY_SIZE(all_probs->kf_y_mode_prob); ++i)
for (j = 0; j < ARRAY_SIZE(all_probs->kf_y_mode_prob[0]); ++j) {
memcpy(all_probs->kf_y_mode_prob[i][j],
v4l2_vp9_kf_y_mode_prob[i][j],
ARRAY_SIZE(all_probs->kf_y_mode_prob[i][j]));
all_probs->kf_y_mode_prob_tail[i][j][0] =
v4l2_vp9_kf_y_mode_prob[i][j][8];
}
memcpy(all_probs->mb_segment_tree_probs, seg->tree_probs,
sizeof(all_probs->mb_segment_tree_probs));
memcpy(all_probs->segment_pred_probs, seg->pred_probs,
sizeof(all_probs->segment_pred_probs));
for (i = 0; i < ARRAY_SIZE(all_probs->kf_uv_mode_prob); ++i) {
memcpy(all_probs->kf_uv_mode_prob[i], v4l2_vp9_kf_uv_mode_prob[i],
ARRAY_SIZE(all_probs->kf_uv_mode_prob[i]));
all_probs->kf_uv_mode_prob_tail[i][0] = v4l2_vp9_kf_uv_mode_prob[i][8];
}
adaptive = &all_probs->probs;
for (i = 0; i < ARRAY_SIZE(adaptive->inter_mode); ++i) {
memcpy(adaptive->inter_mode[i], probs->inter_mode[i],
ARRAY_SIZE(probs->inter_mode[i]));
adaptive->inter_mode[i][3] = 0;
}
memcpy(adaptive->is_inter, probs->is_inter, sizeof(adaptive->is_inter));
for (i = 0; i < ARRAY_SIZE(adaptive->uv_mode); ++i) {
memcpy(adaptive->uv_mode[i], probs->uv_mode[i],
sizeof(adaptive->uv_mode[i]));
adaptive->uv_mode_tail[i][0] = probs->uv_mode[i][8];
}
memcpy(adaptive->tx8, probs->tx8, sizeof(adaptive->tx8));
memcpy(adaptive->tx16, probs->tx16, sizeof(adaptive->tx16));
memcpy(adaptive->tx32, probs->tx32, sizeof(adaptive->tx32));
for (i = 0; i < ARRAY_SIZE(adaptive->y_mode); ++i) {
memcpy(adaptive->y_mode[i], probs->y_mode[i],
ARRAY_SIZE(adaptive->y_mode[i]));
adaptive->y_mode_tail[i][0] = probs->y_mode[i][8];
}
for (i = 0; i < ARRAY_SIZE(adaptive->partition[0]); ++i) {
memcpy(adaptive->partition[0][i], v4l2_vp9_kf_partition_probs[i],
sizeof(v4l2_vp9_kf_partition_probs[i]));
adaptive->partition[0][i][3] = 0;
}
for (i = 0; i < ARRAY_SIZE(adaptive->partition[1]); ++i) {
memcpy(adaptive->partition[1][i], probs->partition[i],
sizeof(probs->partition[i]));
adaptive->partition[1][i][3] = 0;
}
memcpy(adaptive->interp_filter, probs->interp_filter,
sizeof(adaptive->interp_filter));
memcpy(adaptive->comp_mode, probs->comp_mode, sizeof(adaptive->comp_mode));
memcpy(adaptive->skip, probs->skip, sizeof(adaptive->skip));
mv = &adaptive->mv;
memcpy(mv->joint, probs->mv.joint, sizeof(mv->joint));
memcpy(mv->sign, probs->mv.sign, sizeof(mv->sign));
memcpy(mv->class0_bit, probs->mv.class0_bit, sizeof(mv->class0_bit));
memcpy(mv->fr, probs->mv.fr, sizeof(mv->fr));
memcpy(mv->class0_hp, probs->mv.class0_hp, sizeof(mv->class0_hp));
memcpy(mv->hp, probs->mv.hp, sizeof(mv->hp));
memcpy(mv->classes, probs->mv.classes, sizeof(mv->classes));
memcpy(mv->class0_fr, probs->mv.class0_fr, sizeof(mv->class0_fr));
memcpy(mv->bits, probs->mv.bits, sizeof(mv->bits));
memcpy(adaptive->single_ref, probs->single_ref, sizeof(adaptive->single_ref));
memcpy(adaptive->comp_ref, probs->comp_ref, sizeof(adaptive->comp_ref));
for (i = 0; i < ARRAY_SIZE(adaptive->coef); ++i)
for (j = 0; j < ARRAY_SIZE(adaptive->coef[0]); ++j)
for (k = 0; k < ARRAY_SIZE(adaptive->coef[0][0]); ++k)
for (l = 0; l < ARRAY_SIZE(adaptive->coef[0][0][0]); ++l)
INNER_LOOP;
hantro_write_addr(ctx->dev, G2_VP9_PROBS_ADDR, misc->dma);
}
static void config_counts(struct hantro_ctx *ctx)
{
struct hantro_vp9_dec_hw_ctx *vp9_dec = &ctx->vp9_dec;
struct hantro_aux_buf *misc = &vp9_dec->misc;
dma_addr_t addr = misc->dma + vp9_dec->ctx_counters_offset;
hantro_write_addr(ctx->dev, G2_VP9_CTX_COUNT_ADDR, addr);
}
static void config_seg_map(struct hantro_ctx *ctx,
const struct v4l2_ctrl_vp9_frame *dec_params,
bool intra_only, bool update_map)
{
struct hantro_vp9_dec_hw_ctx *vp9_ctx = &ctx->vp9_dec;
struct hantro_aux_buf *segment_map = &vp9_ctx->segment_map;
dma_addr_t addr;
if (intra_only ||
(dec_params->flags & V4L2_VP9_FRAME_FLAG_ERROR_RESILIENT)) {
memset(segment_map->cpu, 0, segment_map->size);
memset(vp9_ctx->feature_data, 0, sizeof(vp9_ctx->feature_data));
memset(vp9_ctx->feature_enabled, 0, sizeof(vp9_ctx->feature_enabled));
}
addr = segment_map->dma + vp9_ctx->active_segment * vp9_ctx->segment_map_size;
hantro_write_addr(ctx->dev, G2_VP9_SEGMENT_READ_ADDR, addr);
addr = segment_map->dma + (1 - vp9_ctx->active_segment) * vp9_ctx->segment_map_size;
hantro_write_addr(ctx->dev, G2_VP9_SEGMENT_WRITE_ADDR, addr);
if (update_map)
vp9_ctx->active_segment = 1 - vp9_ctx->active_segment;
}
static void
config_source(struct hantro_ctx *ctx, const struct v4l2_ctrl_vp9_frame *dec_params,
struct vb2_v4l2_buffer *vb2_src)
{
dma_addr_t stream_base, tmp_addr;
unsigned int headres_size;
u32 src_len, start_bit, src_buf_len;
headres_size = dec_params->uncompressed_header_size
+ dec_params->compressed_header_size;
stream_base = vb2_dma_contig_plane_dma_addr(&vb2_src->vb2_buf, 0);
tmp_addr = stream_base + headres_size;
if (ctx->dev->variant->legacy_regs)
hantro_write_addr(ctx->dev, G2_STREAM_ADDR, (tmp_addr & ~0xf));
else
hantro_write_addr(ctx->dev, G2_STREAM_ADDR, stream_base);
start_bit = (tmp_addr & 0xf) * 8;
hantro_reg_write(ctx->dev, &g2_start_bit, start_bit);
src_len = vb2_get_plane_payload(&vb2_src->vb2_buf, 0);
src_len += start_bit / 8 - headres_size;
hantro_reg_write(ctx->dev, &g2_stream_len, src_len);
if (!ctx->dev->variant->legacy_regs) {
tmp_addr &= ~0xf;
hantro_reg_write(ctx->dev, &g2_strm_start_offset, tmp_addr - stream_base);
src_buf_len = vb2_plane_size(&vb2_src->vb2_buf, 0);
hantro_reg_write(ctx->dev, &g2_strm_buffer_len, src_buf_len);
}
}
static void
config_registers(struct hantro_ctx *ctx, const struct v4l2_ctrl_vp9_frame *dec_params,
struct vb2_v4l2_buffer *vb2_src, struct vb2_v4l2_buffer *vb2_dst)
{
struct hantro_decoded_buffer *dst, *last, *mv_ref;
struct hantro_vp9_dec_hw_ctx *vp9_ctx = &ctx->vp9_dec;
const struct v4l2_vp9_segmentation *seg;
bool intra_only, resolution_change;
/* vp9 stuff */
dst = vb2_to_hantro_decoded_buf(&vb2_dst->vb2_buf);
if (vp9_ctx->last.valid)
last = get_ref_buf(ctx, &dst->base.vb, vp9_ctx->last.timestamp);
else
last = dst;
update_dec_buf_info(dst, dec_params);
update_ctx_cur_info(vp9_ctx, dst, dec_params);
seg = &dec_params->seg;
intra_only = !!(dec_params->flags &
(V4L2_VP9_FRAME_FLAG_KEY_FRAME |
V4L2_VP9_FRAME_FLAG_INTRA_ONLY));
if (!intra_only &&
!(dec_params->flags & V4L2_VP9_FRAME_FLAG_ERROR_RESILIENT) &&
vp9_ctx->last.valid)
mv_ref = last;
else
mv_ref = dst;
resolution_change = dst->vp9.width != last->vp9.width ||
dst->vp9.height != last->vp9.height;
/* configure basic registers */
hantro_reg_write(ctx->dev, &g2_mode, VP9_DEC_MODE);
if (!ctx->dev->variant->legacy_regs) {
hantro_reg_write(ctx->dev, &g2_strm_swap, 0xf);
hantro_reg_write(ctx->dev, &g2_dirmv_swap, 0xf);
hantro_reg_write(ctx->dev, &g2_compress_swap, 0xf);
hantro_reg_write(ctx->dev, &g2_ref_compress_bypass, 1);
} else {
hantro_reg_write(ctx->dev, &g2_strm_swap_old, 0x1f);
hantro_reg_write(ctx->dev, &g2_pic_swap, 0x10);
hantro_reg_write(ctx->dev, &g2_dirmv_swap_old, 0x10);
hantro_reg_write(ctx->dev, &g2_tab0_swap_old, 0x10);
hantro_reg_write(ctx->dev, &g2_tab1_swap_old, 0x10);
hantro_reg_write(ctx->dev, &g2_tab2_swap_old, 0x10);
hantro_reg_write(ctx->dev, &g2_tab3_swap_old, 0x10);
hantro_reg_write(ctx->dev, &g2_rscan_swap, 0x10);
}
hantro_reg_write(ctx->dev, &g2_buswidth, BUS_WIDTH_128);
hantro_reg_write(ctx->dev, &g2_max_burst, 16);
hantro_reg_write(ctx->dev, &g2_apf_threshold, 8);
hantro_reg_write(ctx->dev, &g2_clk_gate_e, 1);
hantro_reg_write(ctx->dev, &g2_max_cb_size, 6);
hantro_reg_write(ctx->dev, &g2_min_cb_size, 3);
if (ctx->dev->variant->double_buffer)
hantro_reg_write(ctx->dev, &g2_double_buffer_e, 1);
config_output(ctx, dst, dec_params);
if (!intra_only)
config_ref_registers(ctx, dec_params, dst, mv_ref);
config_tiles(ctx, dec_params, dst);
config_segment(ctx, dec_params);
config_loop_filter(ctx, dec_params);
config_picture_dimensions(ctx, dst);
config_bit_depth(ctx, dec_params);
config_quant(ctx, dec_params);
config_others(ctx, dec_params, intra_only, resolution_change);
config_compound_reference(ctx, dec_params);
config_probs(ctx, dec_params);
config_counts(ctx);
config_seg_map(ctx, dec_params, intra_only,
seg->flags & V4L2_VP9_SEGMENTATION_FLAG_UPDATE_MAP);
config_source(ctx, dec_params, vb2_src);
}
int hantro_g2_vp9_dec_run(struct hantro_ctx *ctx)
{
const struct v4l2_ctrl_vp9_frame *decode_params;
struct vb2_v4l2_buffer *src;
struct vb2_v4l2_buffer *dst;
int ret;
hantro_g2_check_idle(ctx->dev);
ret = start_prepare_run(ctx, &decode_params);
if (ret) {
hantro_end_prepare_run(ctx);
return ret;
}
src = hantro_get_src_buf(ctx);
dst = hantro_get_dst_buf(ctx);
config_registers(ctx, decode_params, src, dst);
hantro_end_prepare_run(ctx);
vdpu_write(ctx->dev, G2_REG_INTERRUPT_DEC_E, G2_REG_INTERRUPT);
return 0;
}
#define copy_tx_and_skip(p1, p2) \
do { \
memcpy((p1)->tx8, (p2)->tx8, sizeof((p1)->tx8)); \
memcpy((p1)->tx16, (p2)->tx16, sizeof((p1)->tx16)); \
memcpy((p1)->tx32, (p2)->tx32, sizeof((p1)->tx32)); \
memcpy((p1)->skip, (p2)->skip, sizeof((p1)->skip)); \
} while (0)
void hantro_g2_vp9_dec_done(struct hantro_ctx *ctx)
{
struct hantro_vp9_dec_hw_ctx *vp9_ctx = &ctx->vp9_dec;
unsigned int fctx_idx;
if (!(vp9_ctx->cur.flags & V4L2_VP9_FRAME_FLAG_REFRESH_FRAME_CTX))
goto out_update_last;
fctx_idx = vp9_ctx->cur.frame_context_idx;
if (!(vp9_ctx->cur.flags & V4L2_VP9_FRAME_FLAG_PARALLEL_DEC_MODE)) {
/* error_resilient_mode == 0 && frame_parallel_decoding_mode == 0 */
struct v4l2_vp9_frame_context *probs = &vp9_ctx->probability_tables;
bool frame_is_intra = vp9_ctx->cur.flags &
(V4L2_VP9_FRAME_FLAG_KEY_FRAME | V4L2_VP9_FRAME_FLAG_INTRA_ONLY);
struct tx_and_skip {
u8 tx8[2][1];
u8 tx16[2][2];
u8 tx32[2][3];
u8 skip[3];
} _tx_skip, *tx_skip = &_tx_skip;
struct v4l2_vp9_frame_symbol_counts *counts;
struct symbol_counts *hantro_cnts;
u32 tx16p[2][4];
int i;
/* buffer the forward-updated TX and skip probs */
if (frame_is_intra)
copy_tx_and_skip(tx_skip, probs);
/* 6.1.2 refresh_probs(): load_probs() and load_probs2() */
*probs = vp9_ctx->frame_context[fctx_idx];
/* if FrameIsIntra then undo the effect of load_probs2() */
if (frame_is_intra)
copy_tx_and_skip(probs, tx_skip);
counts = &vp9_ctx->cnts;
hantro_cnts = vp9_ctx->misc.cpu + vp9_ctx->ctx_counters_offset;
for (i = 0; i < ARRAY_SIZE(tx16p); ++i) {
memcpy(tx16p[i],
hantro_cnts->tx16x16_count[i],
sizeof(hantro_cnts->tx16x16_count[0]));
tx16p[i][3] = 0;
}
counts->tx16p = &tx16p;
v4l2_vp9_adapt_coef_probs(probs, counts,
!vp9_ctx->last.valid ||
vp9_ctx->last.flags & V4L2_VP9_FRAME_FLAG_KEY_FRAME,
frame_is_intra);
if (!frame_is_intra) {
/* load_probs2() already done */
u32 mv_mode[7][4];
for (i = 0; i < ARRAY_SIZE(mv_mode); ++i) {
mv_mode[i][0] = hantro_cnts->inter_mode_counts[i][1][0];
mv_mode[i][1] = hantro_cnts->inter_mode_counts[i][2][0];
mv_mode[i][2] = hantro_cnts->inter_mode_counts[i][0][0];
mv_mode[i][3] = hantro_cnts->inter_mode_counts[i][2][1];
}
counts->mv_mode = &mv_mode;
v4l2_vp9_adapt_noncoef_probs(&vp9_ctx->probability_tables, counts,
vp9_ctx->cur.reference_mode,
vp9_ctx->cur.interpolation_filter,
vp9_ctx->cur.tx_mode, vp9_ctx->cur.flags);
}
}
vp9_ctx->frame_context[fctx_idx] = vp9_ctx->probability_tables;
out_update_last:
vp9_ctx->last = vp9_ctx->cur;
}
| linux-master | drivers/media/platform/verisilicon/hantro_g2_vp9_dec.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Hantro VPU codec driver
*
* Copyright (C) 2018 Rockchip Electronics Co., Ltd.
*/
#include "hantro.h"
static const u8 zigzag[64] = {
0, 1, 8, 16, 9, 2, 3, 10,
17, 24, 32, 25, 18, 11, 4, 5,
12, 19, 26, 33, 40, 48, 41, 34,
27, 20, 13, 6, 7, 14, 21, 28,
35, 42, 49, 56, 57, 50, 43, 36,
29, 22, 15, 23, 30, 37, 44, 51,
58, 59, 52, 45, 38, 31, 39, 46,
53, 60, 61, 54, 47, 55, 62, 63
};
void hantro_mpeg2_dec_copy_qtable(u8 *qtable,
const struct v4l2_ctrl_mpeg2_quantisation *ctrl)
{
int i, n;
if (!qtable || !ctrl)
return;
for (i = 0; i < ARRAY_SIZE(zigzag); i++) {
n = zigzag[i];
qtable[n + 0] = ctrl->intra_quantiser_matrix[i];
qtable[n + 64] = ctrl->non_intra_quantiser_matrix[i];
qtable[n + 128] = ctrl->chroma_intra_quantiser_matrix[i];
qtable[n + 192] = ctrl->chroma_non_intra_quantiser_matrix[i];
}
}
int hantro_mpeg2_dec_init(struct hantro_ctx *ctx)
{
struct hantro_dev *vpu = ctx->dev;
ctx->mpeg2_dec.qtable.size = ARRAY_SIZE(zigzag) * 4;
ctx->mpeg2_dec.qtable.cpu =
dma_alloc_coherent(vpu->dev,
ctx->mpeg2_dec.qtable.size,
&ctx->mpeg2_dec.qtable.dma,
GFP_KERNEL);
if (!ctx->mpeg2_dec.qtable.cpu)
return -ENOMEM;
return 0;
}
void hantro_mpeg2_dec_exit(struct hantro_ctx *ctx)
{
struct hantro_dev *vpu = ctx->dev;
dma_free_coherent(vpu->dev,
ctx->mpeg2_dec.qtable.size,
ctx->mpeg2_dec.qtable.cpu,
ctx->mpeg2_dec.qtable.dma);
}
| linux-master | drivers/media/platform/verisilicon/hantro_mpeg2.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Allwinner Hantro G2 VPU codec driver
*
* Copyright (C) 2021 Jernej Skrabec <[email protected]>
*/
#include <linux/clk.h>
#include "hantro.h"
static const struct hantro_fmt sunxi_vpu_postproc_fmts[] = {
{
.fourcc = V4L2_PIX_FMT_NV12,
.codec_mode = HANTRO_MODE_NONE,
.postprocessed = true,
.frmsize = {
.min_width = FMT_MIN_WIDTH,
.max_width = FMT_UHD_WIDTH,
.step_width = 32,
.min_height = FMT_MIN_HEIGHT,
.max_height = FMT_UHD_HEIGHT,
.step_height = 32,
},
},
{
.fourcc = V4L2_PIX_FMT_P010,
.codec_mode = HANTRO_MODE_NONE,
.postprocessed = true,
.frmsize = {
.min_width = FMT_MIN_WIDTH,
.max_width = FMT_UHD_WIDTH,
.step_width = 32,
.min_height = FMT_MIN_HEIGHT,
.max_height = FMT_UHD_HEIGHT,
.step_height = 32,
},
},
};
static const struct hantro_fmt sunxi_vpu_dec_fmts[] = {
{
.fourcc = V4L2_PIX_FMT_NV12_4L4,
.codec_mode = HANTRO_MODE_NONE,
.match_depth = true,
.frmsize = {
.min_width = FMT_MIN_WIDTH,
.max_width = FMT_UHD_WIDTH,
.step_width = 32,
.min_height = FMT_MIN_HEIGHT,
.max_height = FMT_UHD_HEIGHT,
.step_height = 32,
},
},
{
.fourcc = V4L2_PIX_FMT_P010_4L4,
.codec_mode = HANTRO_MODE_NONE,
.match_depth = true,
.frmsize = {
.min_width = FMT_MIN_WIDTH,
.max_width = FMT_UHD_WIDTH,
.step_width = 32,
.min_height = FMT_MIN_HEIGHT,
.max_height = FMT_UHD_HEIGHT,
.step_height = 32,
},
},
{
.fourcc = V4L2_PIX_FMT_VP9_FRAME,
.codec_mode = HANTRO_MODE_VP9_DEC,
.max_depth = 2,
.frmsize = {
.min_width = FMT_MIN_WIDTH,
.max_width = FMT_UHD_WIDTH,
.step_width = 32,
.min_height = FMT_MIN_HEIGHT,
.max_height = FMT_UHD_HEIGHT,
.step_height = 32,
},
},
};
static int sunxi_vpu_hw_init(struct hantro_dev *vpu)
{
clk_set_rate(vpu->clocks[0].clk, 300000000);
return 0;
}
static void sunxi_vpu_reset(struct hantro_ctx *ctx)
{
struct hantro_dev *vpu = ctx->dev;
reset_control_reset(vpu->resets);
}
static const struct hantro_codec_ops sunxi_vpu_codec_ops[] = {
[HANTRO_MODE_VP9_DEC] = {
.run = hantro_g2_vp9_dec_run,
.done = hantro_g2_vp9_dec_done,
.reset = sunxi_vpu_reset,
.init = hantro_vp9_dec_init,
.exit = hantro_vp9_dec_exit,
},
};
static const struct hantro_irq sunxi_irqs[] = {
{ NULL, hantro_g2_irq },
};
static const char * const sunxi_clk_names[] = { "mod", "bus" };
const struct hantro_variant sunxi_vpu_variant = {
.dec_fmts = sunxi_vpu_dec_fmts,
.num_dec_fmts = ARRAY_SIZE(sunxi_vpu_dec_fmts),
.postproc_fmts = sunxi_vpu_postproc_fmts,
.num_postproc_fmts = ARRAY_SIZE(sunxi_vpu_postproc_fmts),
.postproc_ops = &hantro_g2_postproc_ops,
.codec = HANTRO_VP9_DECODER,
.codec_ops = sunxi_vpu_codec_ops,
.init = sunxi_vpu_hw_init,
.irqs = sunxi_irqs,
.num_irqs = ARRAY_SIZE(sunxi_irqs),
.clk_names = sunxi_clk_names,
.num_clocks = ARRAY_SIZE(sunxi_clk_names),
.double_buffer = 1,
.legacy_regs = 1,
.late_postproc = 1,
};
| linux-master | drivers/media/platform/verisilicon/sunxi_vpu_hw.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Rockchip RK3288 VPU codec driver
*
* Copyright (c) 2014 Rockchip Electronics Co., Ltd.
* Hertz Wong <[email protected]>
* Herman Chen <[email protected]>
*
* Copyright (C) 2014 Google, Inc.
* Tomasz Figa <[email protected]>
*/
#include <linux/types.h>
#include <media/v4l2-h264.h>
#include <media/v4l2-mem2mem.h>
#include "hantro.h"
#include "hantro_hw.h"
/* Size with u32 units. */
#define CABAC_INIT_BUFFER_SIZE (460 * 2)
#define POC_BUFFER_SIZE 34
#define SCALING_LIST_SIZE (6 * 16 + 2 * 64)
/*
* For valid and long term reference marking, index are reversed, so bit 31
* indicates the status of the picture 0.
*/
#define REF_BIT(i) BIT(32 - 1 - (i))
/* Data structure describing auxiliary buffer format. */
struct hantro_h264_dec_priv_tbl {
u32 cabac_table[CABAC_INIT_BUFFER_SIZE];
u32 poc[POC_BUFFER_SIZE];
u8 scaling_list[SCALING_LIST_SIZE];
};
/*
* Constant CABAC table.
* From drivers/media/platform/rk3288-vpu/rk3288_vpu_hw_h264d.c
* in https://chromium.googlesource.com/chromiumos/third_party/kernel,
* chromeos-3.14 branch.
*/
static const u32 h264_cabac_table[] = {
0x14f10236, 0x034a14f1, 0x0236034a, 0xe47fe968, 0xfa35ff36, 0x07330000,
0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x0029003f, 0x003f003f, 0xf7530456, 0x0061f948, 0x0d29033e, 0x000b0137,
0x0045ef7f, 0xf3660052, 0xf94aeb6b, 0xe57fe17f, 0xe87fee5f, 0xe57feb72,
0xe27fef7b, 0xf473f07a, 0xf573f43f, 0xfe44f154, 0xf368fd46, 0xf85df65a,
0xe27fff4a, 0xfa61f95b, 0xec7ffc38, 0xfb52f94c, 0xea7df95d, 0xf557fd4d,
0xfb47fc3f, 0xfc44f454, 0xf93ef941, 0x083d0538, 0xfe420140, 0x003dfe4e,
0x01320734, 0x0a23002c, 0x0b26012d, 0x002e052c, 0x1f110133, 0x07321c13,
0x10210e3e, 0xf36cf164, 0xf365f35b, 0xf45ef658, 0xf054f656, 0xf953f357,
0xed5e0146, 0x0048fb4a, 0x123bf866, 0xf164005f, 0xfc4b0248, 0xf54bfd47,
0x0f2ef345, 0x003e0041, 0x1525f148, 0x09391036, 0x003e0c48, 0x18000f09,
0x08190d12, 0x0f090d13, 0x0a250c12, 0x061d1421, 0x0f1e042d, 0x013a003e,
0x073d0c26, 0x0b2d0f27, 0x0b2a0d2c, 0x102d0c29, 0x0a311e22, 0x122a0a37,
0x1133112e, 0x00591aed, 0x16ef1aef, 0x1ee71cec, 0x21e925e5, 0x21e928e4,
0x26ef21f5, 0x28f129fa, 0x26012911, 0x1efa1b03, 0x1a1625f0, 0x23fc26f8,
0x26fd2503, 0x26052a00, 0x23102716, 0x0e301b25, 0x153c0c44, 0x0261fd47,
0xfa2afb32, 0xfd36fe3e, 0x003a013f, 0xfe48ff4a, 0xf75bfb43, 0xfb1bfd27,
0xfe2c002e, 0xf040f844, 0xf64efa4d, 0xf656f45c, 0xf137f63c, 0xfa3efc41,
0xf449f84c, 0xf950f758, 0xef6ef561, 0xec54f54f, 0xfa49fc4a, 0xf356f360,
0xf561ed75, 0xf84efb21, 0xfc30fe35, 0xfd3ef347, 0xf64ff456, 0xf35af261,
0x0000fa5d, 0xfa54f84f, 0x0042ff47, 0x003efe3c, 0xfe3bfb4b, 0xfd3efc3a,
0xf742ff4f, 0x00470344, 0x0a2cf93e, 0x0f240e28, 0x101b0c1d, 0x012c1424,
0x1220052a, 0x01300a3e, 0x112e0940, 0xf468f561, 0xf060f958, 0xf855f955,
0xf755f358, 0x0442fd4d, 0xfd4cfa4c, 0x0a3aff4c, 0xff53f963, 0xf25f025f,
0x004cfb4a, 0x0046f54b, 0x01440041, 0xf249033e, 0x043eff44, 0xf34b0b37,
0x05400c46, 0x0f060613, 0x07100c0e, 0x120d0d0b, 0x0d0f0f10, 0x0c170d17,
0x0f140e1a, 0x0e2c1128, 0x112f1811, 0x15151916, 0x1f1b161d, 0x13230e32,
0x0a39073f, 0xfe4dfc52, 0xfd5e0945, 0xf46d24dd, 0x24de20e6, 0x25e22ce0,
0x22ee22f1, 0x28f121f9, 0x23fb2100, 0x2602210d, 0x17230d3a, 0x1dfd1a00,
0x161e1ff9, 0x23f122fd, 0x220324ff, 0x2205200b, 0x2305220c, 0x270b1e1d,
0x221a1d27, 0x13421f15, 0x1f1f1932, 0xef78ec70, 0xee72f555, 0xf15cf259,
0xe647f151, 0xf2500044, 0xf246e838, 0xe944e832, 0xf54a17f3, 0x1af328f1,
0x31f22c03, 0x2d062c22, 0x21361352, 0xfd4bff17, 0x0122012b, 0x0036fe37,
0x003d0140, 0x0044f75c, 0xf26af361, 0xf15af45a, 0xee58f649, 0xf74ff256,
0xf649f646, 0xf645fb42, 0xf740fb3a, 0x023b15f6, 0x18f51cf8, 0x1cff1d03,
0x1d092314, 0x1d240e43, 0x14f10236, 0x034a14f1, 0x0236034a, 0xe47fe968,
0xfa35ff36, 0x07331721, 0x17021500, 0x01090031, 0xdb760539, 0xf34ef541,
0x013e0c31, 0xfc491132, 0x1240092b, 0x1d001a43, 0x105a0968, 0xd27fec68,
0x0143f34e, 0xf541013e, 0xfa56ef5f, 0xfa3d092d, 0xfd45fa51, 0xf5600637,
0x0743fb56, 0x0258003a, 0xfd4cf65e, 0x05360445, 0xfd510058, 0xf943fb4a,
0xfc4afb50, 0xf948013a, 0x0029003f, 0x003f003f, 0xf7530456, 0x0061f948,
0x0d29033e, 0x002dfc4e, 0xfd60e57e, 0xe462e765, 0xe943e452, 0xec5ef053,
0xea6eeb5b, 0xee66f35d, 0xe37ff95c, 0xfb59f960, 0xf36cfd2e, 0xff41ff39,
0xf75dfd4a, 0xf75cf857, 0xe97e0536, 0x063c063b, 0x0645ff30, 0x0044fc45,
0xf858fe55, 0xfa4eff4b, 0xf94d0236, 0x0532fd44, 0x0132062a, 0xfc51013f,
0xfc460043, 0x0239fe4c, 0x0b230440, 0x013d0b23, 0x12190c18, 0x0d1d0d24,
0xf65df949, 0xfe490d2e, 0x0931f964, 0x09350235, 0x0535fe3d, 0x00380038,
0xf33ffb3c, 0xff3e0439, 0xfa450439, 0x0e270433, 0x0d440340, 0x013d093f,
0x07321027, 0x052c0434, 0x0b30fb3c, 0xff3b003b, 0x1621052c, 0x0e2bff4e,
0x003c0945, 0x0b1c0228, 0x032c0031, 0x002e022c, 0x0233002f, 0x0427023e,
0x062e0036, 0x0336023a, 0x043f0633, 0x06390735, 0x06340637, 0x0b2d0e24,
0x0835ff52, 0x0737fd4e, 0x0f2e161f, 0xff541907, 0x1ef91c03, 0x1c042000,
0x22ff1e06, 0x1e062009, 0x1f131a1b, 0x1a1e2514, 0x1c221146, 0x0143053b,
0x0943101e, 0x12201223, 0x161d181f, 0x1726122b, 0x14290b3f, 0x093b0940,
0xff5efe59, 0xf76cfa4c, 0xfe2c002d, 0x0034fd40, 0xfe3bfc46, 0xfc4bf852,
0xef66f74d, 0x0318002a, 0x00300037, 0xfa3bf947, 0xf453f557, 0xe277013a,
0xfd1dff24, 0x0126022b, 0xfa37003a, 0x0040fd4a, 0xf65a0046, 0xfc1d051f,
0x072a013b, 0xfe3afd48, 0xfd51f561, 0x003a0805, 0x0a0e0e12, 0x0d1b0228,
0x003afd46, 0xfa4ff855, 0x0000f36a, 0xf06af657, 0xeb72ee6e, 0xf262ea6e,
0xeb6aee67, 0xeb6be96c, 0xe670f660, 0xf45ffb5b, 0xf75dea5e, 0xfb560943,
0xfc50f655, 0xff46073c, 0x093a053d, 0x0c320f32, 0x12311136, 0x0a29072e,
0xff330731, 0x08340929, 0x062f0237, 0x0d290a2c, 0x06320535, 0x0d31043f,
0x0640fe45, 0xfe3b0646, 0x0a2c091f, 0x0c2b0335, 0x0e220a26, 0xfd340d28,
0x1120072c, 0x07260d32, 0x0a391a2b, 0x0e0b0b0e, 0x090b120b, 0x150917fe,
0x20f120f1, 0x22eb27e9, 0x2adf29e1, 0x2ee426f4, 0x151d2de8, 0x35d330e6,
0x41d52bed, 0x27f61e09, 0x121a141b, 0x0039f252, 0xfb4bed61, 0xdd7d1b00,
0x1c001ffc, 0x1b062208, 0x1e0a1816, 0x21131620, 0x1a1f1529, 0x1a2c172f,
0x10410e47, 0x083c063f, 0x11411518, 0x17141a17, 0x1b201c17, 0x1c181728,
0x18201c1d, 0x172a1339, 0x1635163d, 0x0b560c28, 0x0b330e3b, 0xfc4ff947,
0xfb45f746, 0xf842f644, 0xed49f445, 0xf046f143, 0xec3eed46, 0xf042ea41,
0xec3f09fe, 0x1af721f7, 0x27f929fe, 0x2d033109, 0x2d1b243b, 0xfa42f923,
0xf92af82d, 0xfb30f438, 0xfa3cfb3e, 0xf842f84c, 0xfb55fa51, 0xf64df951,
0xef50ee49, 0xfc4af653, 0xf747f743, 0xff3df842, 0xf242003b, 0x023b15f3,
0x21f227f9, 0x2efe3302, 0x3c063d11, 0x37222a3e, 0x14f10236, 0x034a14f1,
0x0236034a, 0xe47fe968, 0xfa35ff36, 0x07331619, 0x22001000, 0xfe090429,
0xe3760241, 0xfa47f34f, 0x05340932, 0xfd460a36, 0x1a221316, 0x28003902,
0x29241a45, 0xd37ff165, 0xfc4cfa47, 0xf34f0534, 0x0645f35a, 0x0034082b,
0xfe45fb52, 0xf660023b, 0x024bfd57, 0xfd640138, 0xfd4afa55, 0x003bfd51,
0xf956fb5f, 0xff42ff4d, 0x0146fe56, 0xfb48003d, 0x0029003f, 0x003f003f,
0xf7530456, 0x0061f948, 0x0d29033e, 0x0d0f0733, 0x0250d97f, 0xee5bef60,
0xe651dd62, 0xe866e961, 0xe577e863, 0xeb6eee66, 0xdc7f0050, 0xfb59f95e,
0xfc5c0027, 0x0041f154, 0xdd7ffe49, 0xf468f75b, 0xe17f0337, 0x07380737,
0x083dfd35, 0x0044f94a, 0xf758f367, 0xf35bf759, 0xf25cf84c, 0xf457e96e,
0xe869f64e, 0xec70ef63, 0xb27fba7f, 0xce7fd27f, 0xfc42fb4e, 0xfc47f848,
0x023bff37, 0xf946fa4b, 0xf859de77, 0xfd4b2014, 0x1e16d47f, 0x0036fb3d,
0x003aff3c, 0xfd3df843, 0xe754f24a, 0xfb410534, 0x0239003d, 0xf745f546,
0x1237fc47, 0x003a073d, 0x09291219, 0x0920052b, 0x092f002c, 0x0033022e,
0x1326fc42, 0x0f260c2a, 0x09220059, 0x042d0a1c, 0x0a1f21f5, 0x34d5120f,
0x1c0023ea, 0x26e72200, 0x27ee20f4, 0x66a20000, 0x38f121fc, 0x1d0a25fb,
0x33e327f7, 0x34de45c6, 0x43c12cfb, 0x200737e3, 0x20010000, 0x1b2421e7,
0x22e224e4, 0x26e426e5, 0x22ee23f0, 0x22f220f8, 0x25fa2300, 0x1e0a1c12,
0x1a191d29, 0x004b0248, 0x084d0e23, 0x121f1123, 0x151e112d, 0x142a122d,
0x1b1a1036, 0x07421038, 0x0b490a43, 0xf674e970, 0xf147f93d, 0x0035fb42,
0xf54df750, 0xf754f657, 0xde7feb65, 0xfd27fb35, 0xf93df54b, 0xf14def5b,
0xe76be76f, 0xe47af54c, 0xf62cf634, 0xf639f73a, 0xf048f945, 0xfc45fb4a,
0xf7560242, 0xf7220120, 0x0b1f0534, 0xfe37fe43, 0x0049f859, 0x03340704,
0x0a081108, 0x10130325, 0xff3dfb49, 0xff46fc4e, 0x0000eb7e, 0xe97cec6e,
0xe67ee77c, 0xef69e579, 0xe575ef66, 0xe675e574, 0xdf7af65f, 0xf264f85f,
0xef6fe472, 0xfa59fe50, 0xfc52f755, 0xf851ff48, 0x05400143, 0x09380045,
0x01450745, 0xf945fa43, 0xf04dfe40, 0x023dfa43, 0xfd400239, 0xfd41fd42,
0x003e0933, 0xff42fe47, 0xfe4bff46, 0xf7480e3c, 0x1025002f, 0x12230b25,
0x0c290a29, 0x02300c29, 0x0d29003b, 0x03321328, 0x03421232, 0x13fa12fa,
0x0e001af4, 0x1ff021e7, 0x21ea25e4, 0x27e22ae2, 0x2fd62ddc, 0x31de29ef,
0x200945b9, 0x3fc142c0, 0x4db636d9, 0x34dd29f6, 0x240028ff, 0x1e0e1c1a,
0x17250c37, 0x0b4125df, 0x27dc28db, 0x26e22edf, 0x2ae228e8, 0x31e326f4,
0x28f626fd, 0x2efb1f14, 0x1d1e192c, 0x0c300b31, 0x1a2d1616, 0x17161b15,
0x21141a1c, 0x1e181b22, 0x122a1927, 0x12320c46, 0x15360e47, 0x0b531920,
0x15311536, 0xfb55fa51, 0xf64df951, 0xef50ee49, 0xfc4af653, 0xf747f743,
0xff3df842, 0xf242003b, 0x023b11f6, 0x20f32af7, 0x31fb3500, 0x4003440a,
0x421b2f39, 0xfb470018, 0xff24fe2a, 0xfe34f739, 0xfa3ffc41, 0xfc43f952,
0xfd51fd4c, 0xf948fa4e, 0xf448f244, 0xfd46fa4c, 0xfb42fb3e, 0x0039fc3d,
0xf73c0136, 0x023a11f6, 0x20f32af7, 0x31fb3500, 0x4003440a, 0x421b2f39,
0x14f10236, 0x034a14f1, 0x0236034a, 0xe47fe968, 0xfa35ff36, 0x07331d10,
0x19000e00, 0xf633fd3e, 0xe5631a10, 0xfc55e866, 0x05390639, 0xef490e39,
0x1428140a, 0x1d003600, 0x252a0c61, 0xe07fea75, 0xfe4afc55, 0xe8660539,
0xfa5df258, 0xfa2c0437, 0xf559f167, 0xeb741339, 0x143a0454, 0x0660013f,
0xfb55f36a, 0x053f064b, 0xfd5aff65, 0x0337fc4f, 0xfe4bf461, 0xf932013c,
0x0029003f, 0x003f003f, 0xf7530456, 0x0061f948, 0x0d29033e, 0x0722f758,
0xec7fdc7f, 0xef5bf25f, 0xe754e756, 0xf459ef5b, 0xe17ff24c, 0xee67f35a,
0xdb7f0b50, 0x054c0254, 0x054efa37, 0x043df253, 0xdb7ffb4f, 0xf568f55b,
0xe27f0041, 0xfe4f0048, 0xfc5cfa38, 0x0344f847, 0xf362fc56, 0xf458fb52,
0xfd48fc43, 0xf848f059, 0xf745ff3b, 0x05420439, 0xfc47fe47, 0x023aff4a,
0xfc2cff45, 0x003ef933, 0xfc2ffa2a, 0xfd29fa35, 0x084cf74e, 0xf5530934,
0x0043fb5a, 0x0143f148, 0xfb4bf850, 0xeb53eb40, 0xf31fe740, 0xe35e094b,
0x113ff84a, 0xfb23fe1b, 0x0d5b0341, 0xf945084d, 0xf642033e, 0xfd44ec51,
0x001e0107, 0xfd17eb4a, 0x1042e97c, 0x11252cee, 0x32deea7f, 0x0427002a,
0x07220b1d, 0x081f0625, 0x072a0328, 0x08210d2b, 0x0d24042f, 0x0337023a,
0x063c082c, 0x0b2c0e2a, 0x07300438, 0x04340d25, 0x0931133a, 0x0a300c2d,
0x00451421, 0x083f23ee, 0x21e71cfd, 0x180a1b00, 0x22f234d4, 0x27e81311,
0x1f19241d, 0x1821220f, 0x1e141649, 0x1422131f, 0x1b2c1310, 0x0f240f24,
0x151c1915, 0x1e141f0c, 0x1b10182a, 0x005d0e38, 0x0f391a26, 0xe87fe873,
0xea52f73e, 0x0035003b, 0xf255f359, 0xf35ef55c, 0xe37feb64, 0xf239f443,
0xf547f64d, 0xeb55f058, 0xe968f162, 0xdb7ff652, 0xf830f83d, 0xf842f946,
0xf24bf64f, 0xf753f45c, 0xee6cfc4f, 0xea45f04b, 0xfe3a013a, 0xf34ef753,
0xfc51f363, 0xf351fa26, 0xf33efa3a, 0xfe3bf049, 0xf64cf356, 0xf753f657,
0x0000ea7f, 0xe77fe778, 0xe57fed72, 0xe975e776, 0xe675e871, 0xe476e178,
0xdb7cf65e, 0xf166f663, 0xf36ace7f, 0xfb5c1139, 0xfb56f35e, 0xf45bfe4d,
0x0047ff49, 0x0440f951, 0x05400f39, 0x01430044, 0xf6430144, 0x004d0240,
0x0044fb4e, 0x0737053b, 0x02410e36, 0x0f2c053c, 0x0246fe4c, 0xee560c46,
0x0540f446, 0x0b370538, 0x00450241, 0xfa4a0536, 0x0736fa4c, 0xf552fe4d,
0xfe4d192a, 0x11f310f7, 0x11f41beb, 0x25e229d8, 0x2ad730d1, 0x27e02ed8,
0x34cd2ed7, 0x34d92bed, 0x200b3dc9, 0x38d23ece, 0x51bd2dec, 0x23fe1c0f,
0x22012701, 0x1e111426, 0x122d0f36, 0x004f24f0, 0x25f225ef, 0x2001220f,
0x1d0f1819, 0x22161f10, 0x23121f1c, 0x2129241c, 0x1b2f153e, 0x121f131a,
0x24181817, 0x1b10181e, 0x1f1d1629, 0x162a103c, 0x0f340e3c, 0x034ef07b,
0x15351638, 0x193d1521, 0x1332113d, 0xfd4ef84a, 0xf748f648, 0xee4bf447,
0xf53ffb46, 0xef4bf248, 0xf043f835, 0xf23bf734, 0xf54409fe, 0x1ef61ffc,
0x21ff2107, 0x1f0c2517, 0x1f261440, 0xf747f925, 0xf82cf531, 0xf638f43b,
0xf83ff743, 0xfa44f64f, 0xfd4ef84a, 0xf748f648, 0xee4bf447, 0xf53ffb46,
0xef4bf248, 0xf043f835, 0xf23bf734, 0xf54409fe, 0x1ef61ffc, 0x21ff2107,
0x1f0c2517, 0x1f261440
};
static void
assemble_scaling_list(struct hantro_ctx *ctx)
{
const struct hantro_h264_dec_ctrls *ctrls = &ctx->h264_dec.ctrls;
const struct v4l2_ctrl_h264_scaling_matrix *scaling = ctrls->scaling;
const struct v4l2_ctrl_h264_pps *pps = ctrls->pps;
const size_t num_list_4x4 = ARRAY_SIZE(scaling->scaling_list_4x4);
const size_t list_len_4x4 = ARRAY_SIZE(scaling->scaling_list_4x4[0]);
const size_t list_len_8x8 = ARRAY_SIZE(scaling->scaling_list_8x8[0]);
struct hantro_h264_dec_priv_tbl *tbl = ctx->h264_dec.priv.cpu;
u32 *dst = (u32 *)tbl->scaling_list;
const u32 *src;
int i, j;
if (!(pps->flags & V4L2_H264_PPS_FLAG_SCALING_MATRIX_PRESENT))
return;
for (i = 0; i < num_list_4x4; i++) {
src = (u32 *)&scaling->scaling_list_4x4[i];
for (j = 0; j < list_len_4x4 / 4; j++)
*dst++ = swab32(src[j]);
}
/* Only Intra/Inter Y lists */
for (i = 0; i < 2; i++) {
src = (u32 *)&scaling->scaling_list_8x8[i];
for (j = 0; j < list_len_8x8 / 4; j++)
*dst++ = swab32(src[j]);
}
}
static void prepare_table(struct hantro_ctx *ctx)
{
const struct hantro_h264_dec_ctrls *ctrls = &ctx->h264_dec.ctrls;
const struct v4l2_ctrl_h264_decode_params *dec_param = ctrls->decode;
const struct v4l2_ctrl_h264_sps *sps = ctrls->sps;
struct hantro_h264_dec_priv_tbl *tbl = ctx->h264_dec.priv.cpu;
const struct v4l2_h264_dpb_entry *dpb = ctx->h264_dec.dpb;
u32 dpb_longterm = 0;
u32 dpb_valid = 0;
int i;
for (i = 0; i < HANTRO_H264_DPB_SIZE; ++i) {
tbl->poc[i * 2] = dpb[i].top_field_order_cnt;
tbl->poc[i * 2 + 1] = dpb[i].bottom_field_order_cnt;
if (!(dpb[i].flags & V4L2_H264_DPB_ENTRY_FLAG_VALID))
continue;
/*
* Set up bit maps of valid and long term DPBs.
* NOTE: The bits are reversed, i.e. MSb is DPB 0. For frame
* decoding, bit 31 to 15 are used, while for field decoding,
* all bits are used, with bit 31 being a top field, 30 a bottom
* field and so on.
*/
if (dec_param->flags & V4L2_H264_DECODE_PARAM_FLAG_FIELD_PIC) {
if (dpb[i].fields & V4L2_H264_TOP_FIELD_REF)
dpb_valid |= REF_BIT(i * 2);
if (dpb[i].fields & V4L2_H264_BOTTOM_FIELD_REF)
dpb_valid |= REF_BIT(i * 2 + 1);
if (dpb[i].flags & V4L2_H264_DPB_ENTRY_FLAG_LONG_TERM) {
dpb_longterm |= REF_BIT(i * 2);
dpb_longterm |= REF_BIT(i * 2 + 1);
}
} else {
dpb_valid |= REF_BIT(i);
if (dpb[i].flags & V4L2_H264_DPB_ENTRY_FLAG_LONG_TERM)
dpb_longterm |= REF_BIT(i);
}
}
ctx->h264_dec.dpb_valid = dpb_valid;
ctx->h264_dec.dpb_longterm = dpb_longterm;
if ((dec_param->flags & V4L2_H264_DECODE_PARAM_FLAG_FIELD_PIC) ||
!(sps->flags & V4L2_H264_SPS_FLAG_MB_ADAPTIVE_FRAME_FIELD)) {
tbl->poc[32] = ctx->h264_dec.cur_poc;
tbl->poc[33] = 0;
} else {
tbl->poc[32] = dec_param->top_field_order_cnt;
tbl->poc[33] = dec_param->bottom_field_order_cnt;
}
assemble_scaling_list(ctx);
}
static bool dpb_entry_match(const struct v4l2_h264_dpb_entry *a,
const struct v4l2_h264_dpb_entry *b)
{
return a->reference_ts == b->reference_ts;
}
static void update_dpb(struct hantro_ctx *ctx)
{
const struct v4l2_ctrl_h264_decode_params *dec_param;
DECLARE_BITMAP(new, ARRAY_SIZE(dec_param->dpb)) = { 0, };
DECLARE_BITMAP(used, ARRAY_SIZE(dec_param->dpb)) = { 0, };
unsigned int i, j;
dec_param = ctx->h264_dec.ctrls.decode;
/* Disable all entries by default. */
for (i = 0; i < ARRAY_SIZE(ctx->h264_dec.dpb); i++)
ctx->h264_dec.dpb[i].flags = 0;
/* Try to match new DPB entries with existing ones by their POCs. */
for (i = 0; i < ARRAY_SIZE(dec_param->dpb); i++) {
const struct v4l2_h264_dpb_entry *ndpb = &dec_param->dpb[i];
if (!(ndpb->flags & V4L2_H264_DPB_ENTRY_FLAG_VALID))
continue;
/*
* To cut off some comparisons, iterate only on target DPB
* entries which are not used yet.
*/
for_each_clear_bit(j, used, ARRAY_SIZE(ctx->h264_dec.dpb)) {
struct v4l2_h264_dpb_entry *cdpb;
cdpb = &ctx->h264_dec.dpb[j];
if (!dpb_entry_match(cdpb, ndpb))
continue;
*cdpb = *ndpb;
set_bit(j, used);
break;
}
if (j == ARRAY_SIZE(ctx->h264_dec.dpb))
set_bit(i, new);
}
/* For entries that could not be matched, use remaining free slots. */
for_each_set_bit(i, new, ARRAY_SIZE(dec_param->dpb)) {
const struct v4l2_h264_dpb_entry *ndpb = &dec_param->dpb[i];
struct v4l2_h264_dpb_entry *cdpb;
/*
* Both arrays are of the same sizes, so there is no way
* we can end up with no space in target array, unless
* something is buggy.
*/
j = find_first_zero_bit(used, ARRAY_SIZE(ctx->h264_dec.dpb));
if (WARN_ON(j >= ARRAY_SIZE(ctx->h264_dec.dpb)))
return;
cdpb = &ctx->h264_dec.dpb[j];
*cdpb = *ndpb;
set_bit(j, used);
}
}
dma_addr_t hantro_h264_get_ref_buf(struct hantro_ctx *ctx,
unsigned int dpb_idx)
{
struct v4l2_h264_dpb_entry *dpb = ctx->h264_dec.dpb;
dma_addr_t dma_addr = 0;
s32 cur_poc = ctx->h264_dec.cur_poc;
u32 flags;
if (dpb[dpb_idx].flags & V4L2_H264_DPB_ENTRY_FLAG_ACTIVE)
dma_addr = hantro_get_ref(ctx, dpb[dpb_idx].reference_ts);
if (!dma_addr) {
struct vb2_v4l2_buffer *dst_buf;
struct vb2_buffer *buf;
/*
* If a DPB entry is unused or invalid, address of current
* destination buffer is returned.
*/
dst_buf = hantro_get_dst_buf(ctx);
buf = &dst_buf->vb2_buf;
dma_addr = hantro_get_dec_buf_addr(ctx, buf);
}
flags = dpb[dpb_idx].flags & V4L2_H264_DPB_ENTRY_FLAG_FIELD ? 0x2 : 0;
flags |= abs(dpb[dpb_idx].top_field_order_cnt - cur_poc) <
abs(dpb[dpb_idx].bottom_field_order_cnt - cur_poc) ?
0x1 : 0;
return dma_addr | flags;
}
u16 hantro_h264_get_ref_nbr(struct hantro_ctx *ctx, unsigned int dpb_idx)
{
const struct v4l2_h264_dpb_entry *dpb = &ctx->h264_dec.dpb[dpb_idx];
if (!(dpb->flags & V4L2_H264_DPB_ENTRY_FLAG_ACTIVE))
return 0;
return dpb->frame_num;
}
/*
* Removes all references with the same parity as the current picture from the
* reference list. The remaining list will have references with the opposite
* parity. This is effectively a deduplication of references since each buffer
* stores two fields. For this reason, each buffer is found twice in the
* reference list.
*
* This technique has been chosen through trial and error. This simple approach
* resulted in the highest conformance score. Note that this method may suffer
* worse quality in the case an opposite reference frame has been lost. If this
* becomes a problem in the future, it should be possible to add a preprocessing
* to identify un-paired fields and avoid removing them.
*/
static void deduplicate_reflist(struct v4l2_h264_reflist_builder *b,
struct v4l2_h264_reference *reflist)
{
int write_idx = 0;
int i;
if (b->cur_pic_fields == V4L2_H264_FRAME_REF) {
write_idx = b->num_valid;
goto done;
}
for (i = 0; i < b->num_valid; i++) {
if (!(b->cur_pic_fields == reflist[i].fields)) {
reflist[write_idx++] = reflist[i];
continue;
}
}
done:
/* Should not happen unless we have a bug in the reflist builder. */
if (WARN_ON(write_idx > 16))
write_idx = 16;
/* Clear the remaining, some streams fails otherwise */
for (; write_idx < 16; write_idx++)
reflist[write_idx].index = 15;
}
int hantro_h264_dec_prepare_run(struct hantro_ctx *ctx)
{
struct hantro_h264_dec_hw_ctx *h264_ctx = &ctx->h264_dec;
struct hantro_h264_dec_ctrls *ctrls = &h264_ctx->ctrls;
struct v4l2_h264_reflist_builder reflist_builder;
hantro_start_prepare_run(ctx);
ctrls->scaling =
hantro_get_ctrl(ctx, V4L2_CID_STATELESS_H264_SCALING_MATRIX);
if (WARN_ON(!ctrls->scaling))
return -EINVAL;
ctrls->decode =
hantro_get_ctrl(ctx, V4L2_CID_STATELESS_H264_DECODE_PARAMS);
if (WARN_ON(!ctrls->decode))
return -EINVAL;
ctrls->sps =
hantro_get_ctrl(ctx, V4L2_CID_STATELESS_H264_SPS);
if (WARN_ON(!ctrls->sps))
return -EINVAL;
ctrls->pps =
hantro_get_ctrl(ctx, V4L2_CID_STATELESS_H264_PPS);
if (WARN_ON(!ctrls->pps))
return -EINVAL;
/* Update the DPB with new refs. */
update_dpb(ctx);
/* Build the P/B{0,1} ref lists. */
v4l2_h264_init_reflist_builder(&reflist_builder, ctrls->decode,
ctrls->sps, ctx->h264_dec.dpb);
h264_ctx->cur_poc = reflist_builder.cur_pic_order_count;
/* Prepare data in memory. */
prepare_table(ctx);
v4l2_h264_build_p_ref_list(&reflist_builder, h264_ctx->reflists.p);
v4l2_h264_build_b_ref_lists(&reflist_builder, h264_ctx->reflists.b0,
h264_ctx->reflists.b1);
/*
* Reduce ref lists to at most 16 entries, Hantro hardware will deduce
* the actual picture lists in field through the dpb_valid,
* dpb_longterm bitmap along with the current frame parity.
*/
if (reflist_builder.cur_pic_fields != V4L2_H264_FRAME_REF) {
deduplicate_reflist(&reflist_builder, h264_ctx->reflists.p);
deduplicate_reflist(&reflist_builder, h264_ctx->reflists.b0);
deduplicate_reflist(&reflist_builder, h264_ctx->reflists.b1);
}
return 0;
}
void hantro_h264_dec_exit(struct hantro_ctx *ctx)
{
struct hantro_dev *vpu = ctx->dev;
struct hantro_h264_dec_hw_ctx *h264_dec = &ctx->h264_dec;
struct hantro_aux_buf *priv = &h264_dec->priv;
dma_free_coherent(vpu->dev, priv->size, priv->cpu, priv->dma);
}
int hantro_h264_dec_init(struct hantro_ctx *ctx)
{
struct hantro_dev *vpu = ctx->dev;
struct hantro_h264_dec_hw_ctx *h264_dec = &ctx->h264_dec;
struct hantro_aux_buf *priv = &h264_dec->priv;
struct hantro_h264_dec_priv_tbl *tbl;
priv->cpu = dma_alloc_coherent(vpu->dev, sizeof(*tbl), &priv->dma,
GFP_KERNEL);
if (!priv->cpu)
return -ENOMEM;
priv->size = sizeof(*tbl);
tbl = priv->cpu;
memcpy(tbl->cabac_table, h264_cabac_table, sizeof(tbl->cabac_table));
return 0;
}
| linux-master | drivers/media/platform/verisilicon/hantro_h264.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Hantro VPU HEVC codec driver
*
* Copyright (C) 2020 Safran Passenger Innovations LLC
*/
#include <linux/types.h>
#include <media/v4l2-mem2mem.h>
#include "hantro.h"
#include "hantro_hw.h"
#define VERT_FILTER_RAM_SIZE 8 /* bytes per pixel row */
/*
* BSD control data of current picture at tile border
* 128 bits per 4x4 tile = 128/(8*4) bytes per row
*/
#define BSD_CTRL_RAM_SIZE 4 /* bytes per pixel row */
/* tile border coefficients of filter */
#define VERT_SAO_RAM_SIZE 48 /* bytes per pixel */
#define SCALING_LIST_SIZE (16 * 64)
#define MAX_TILE_COLS 20
#define MAX_TILE_ROWS 22
void hantro_hevc_ref_init(struct hantro_ctx *ctx)
{
struct hantro_hevc_dec_hw_ctx *hevc_dec = &ctx->hevc_dec;
hevc_dec->ref_bufs_used = 0;
}
dma_addr_t hantro_hevc_get_ref_buf(struct hantro_ctx *ctx,
s32 poc)
{
struct hantro_hevc_dec_hw_ctx *hevc_dec = &ctx->hevc_dec;
int i;
/* Find the reference buffer in already known ones */
for (i = 0; i < NUM_REF_PICTURES; i++) {
if (hevc_dec->ref_bufs_poc[i] == poc) {
hevc_dec->ref_bufs_used |= 1 << i;
return hevc_dec->ref_bufs[i].dma;
}
}
return 0;
}
int hantro_hevc_add_ref_buf(struct hantro_ctx *ctx, int poc, dma_addr_t addr)
{
struct hantro_hevc_dec_hw_ctx *hevc_dec = &ctx->hevc_dec;
int i;
/* Add a new reference buffer */
for (i = 0; i < NUM_REF_PICTURES; i++) {
if (!(hevc_dec->ref_bufs_used & 1 << i)) {
hevc_dec->ref_bufs_used |= 1 << i;
hevc_dec->ref_bufs_poc[i] = poc;
hevc_dec->ref_bufs[i].dma = addr;
return 0;
}
}
return -EINVAL;
}
static int tile_buffer_reallocate(struct hantro_ctx *ctx)
{
struct hantro_dev *vpu = ctx->dev;
struct hantro_hevc_dec_hw_ctx *hevc_dec = &ctx->hevc_dec;
const struct hantro_hevc_dec_ctrls *ctrls = &ctx->hevc_dec.ctrls;
const struct v4l2_ctrl_hevc_pps *pps = ctrls->pps;
const struct v4l2_ctrl_hevc_sps *sps = ctrls->sps;
unsigned int num_tile_cols = pps->num_tile_columns_minus1 + 1;
unsigned int height64 = (sps->pic_height_in_luma_samples + 63) & ~63;
unsigned int size;
if (num_tile_cols <= 1 ||
num_tile_cols <= hevc_dec->num_tile_cols_allocated)
return 0;
/* Need to reallocate due to tiles passed via PPS */
if (hevc_dec->tile_filter.cpu) {
dma_free_coherent(vpu->dev, hevc_dec->tile_filter.size,
hevc_dec->tile_filter.cpu,
hevc_dec->tile_filter.dma);
hevc_dec->tile_filter.cpu = NULL;
}
if (hevc_dec->tile_sao.cpu) {
dma_free_coherent(vpu->dev, hevc_dec->tile_sao.size,
hevc_dec->tile_sao.cpu,
hevc_dec->tile_sao.dma);
hevc_dec->tile_sao.cpu = NULL;
}
if (hevc_dec->tile_bsd.cpu) {
dma_free_coherent(vpu->dev, hevc_dec->tile_bsd.size,
hevc_dec->tile_bsd.cpu,
hevc_dec->tile_bsd.dma);
hevc_dec->tile_bsd.cpu = NULL;
}
size = (VERT_FILTER_RAM_SIZE * height64 * (num_tile_cols - 1) * ctx->bit_depth) / 8;
hevc_dec->tile_filter.cpu = dma_alloc_coherent(vpu->dev, size,
&hevc_dec->tile_filter.dma,
GFP_KERNEL);
if (!hevc_dec->tile_filter.cpu)
return -ENOMEM;
hevc_dec->tile_filter.size = size;
size = (VERT_SAO_RAM_SIZE * height64 * (num_tile_cols - 1) * ctx->bit_depth) / 8;
hevc_dec->tile_sao.cpu = dma_alloc_coherent(vpu->dev, size,
&hevc_dec->tile_sao.dma,
GFP_KERNEL);
if (!hevc_dec->tile_sao.cpu)
goto err_free_tile_buffers;
hevc_dec->tile_sao.size = size;
size = BSD_CTRL_RAM_SIZE * height64 * (num_tile_cols - 1);
hevc_dec->tile_bsd.cpu = dma_alloc_coherent(vpu->dev, size,
&hevc_dec->tile_bsd.dma,
GFP_KERNEL);
if (!hevc_dec->tile_bsd.cpu)
goto err_free_sao_buffers;
hevc_dec->tile_bsd.size = size;
hevc_dec->num_tile_cols_allocated = num_tile_cols;
return 0;
err_free_sao_buffers:
if (hevc_dec->tile_sao.cpu)
dma_free_coherent(vpu->dev, hevc_dec->tile_sao.size,
hevc_dec->tile_sao.cpu,
hevc_dec->tile_sao.dma);
hevc_dec->tile_sao.cpu = NULL;
err_free_tile_buffers:
if (hevc_dec->tile_filter.cpu)
dma_free_coherent(vpu->dev, hevc_dec->tile_filter.size,
hevc_dec->tile_filter.cpu,
hevc_dec->tile_filter.dma);
hevc_dec->tile_filter.cpu = NULL;
return -ENOMEM;
}
static int hantro_hevc_validate_sps(struct hantro_ctx *ctx, const struct v4l2_ctrl_hevc_sps *sps)
{
/*
* for tile pixel format check if the width and height match
* hardware constraints
*/
if (ctx->vpu_dst_fmt->fourcc == V4L2_PIX_FMT_NV12_4L4) {
if (ctx->dst_fmt.width !=
ALIGN(sps->pic_width_in_luma_samples, ctx->vpu_dst_fmt->frmsize.step_width))
return -EINVAL;
if (ctx->dst_fmt.height !=
ALIGN(sps->pic_height_in_luma_samples, ctx->vpu_dst_fmt->frmsize.step_height))
return -EINVAL;
}
return 0;
}
int hantro_hevc_dec_prepare_run(struct hantro_ctx *ctx)
{
struct hantro_hevc_dec_hw_ctx *hevc_ctx = &ctx->hevc_dec;
struct hantro_hevc_dec_ctrls *ctrls = &hevc_ctx->ctrls;
int ret;
hantro_start_prepare_run(ctx);
ctrls->decode_params =
hantro_get_ctrl(ctx, V4L2_CID_STATELESS_HEVC_DECODE_PARAMS);
if (WARN_ON(!ctrls->decode_params))
return -EINVAL;
ctrls->scaling =
hantro_get_ctrl(ctx, V4L2_CID_STATELESS_HEVC_SCALING_MATRIX);
if (WARN_ON(!ctrls->scaling))
return -EINVAL;
ctrls->sps =
hantro_get_ctrl(ctx, V4L2_CID_STATELESS_HEVC_SPS);
if (WARN_ON(!ctrls->sps))
return -EINVAL;
ret = hantro_hevc_validate_sps(ctx, ctrls->sps);
if (ret)
return ret;
ctrls->pps =
hantro_get_ctrl(ctx, V4L2_CID_STATELESS_HEVC_PPS);
if (WARN_ON(!ctrls->pps))
return -EINVAL;
ret = tile_buffer_reallocate(ctx);
if (ret)
return ret;
return 0;
}
void hantro_hevc_dec_exit(struct hantro_ctx *ctx)
{
struct hantro_dev *vpu = ctx->dev;
struct hantro_hevc_dec_hw_ctx *hevc_dec = &ctx->hevc_dec;
if (hevc_dec->tile_sizes.cpu)
dma_free_coherent(vpu->dev, hevc_dec->tile_sizes.size,
hevc_dec->tile_sizes.cpu,
hevc_dec->tile_sizes.dma);
hevc_dec->tile_sizes.cpu = NULL;
if (hevc_dec->scaling_lists.cpu)
dma_free_coherent(vpu->dev, hevc_dec->scaling_lists.size,
hevc_dec->scaling_lists.cpu,
hevc_dec->scaling_lists.dma);
hevc_dec->scaling_lists.cpu = NULL;
if (hevc_dec->tile_filter.cpu)
dma_free_coherent(vpu->dev, hevc_dec->tile_filter.size,
hevc_dec->tile_filter.cpu,
hevc_dec->tile_filter.dma);
hevc_dec->tile_filter.cpu = NULL;
if (hevc_dec->tile_sao.cpu)
dma_free_coherent(vpu->dev, hevc_dec->tile_sao.size,
hevc_dec->tile_sao.cpu,
hevc_dec->tile_sao.dma);
hevc_dec->tile_sao.cpu = NULL;
if (hevc_dec->tile_bsd.cpu)
dma_free_coherent(vpu->dev, hevc_dec->tile_bsd.size,
hevc_dec->tile_bsd.cpu,
hevc_dec->tile_bsd.dma);
hevc_dec->tile_bsd.cpu = NULL;
}
int hantro_hevc_dec_init(struct hantro_ctx *ctx)
{
struct hantro_dev *vpu = ctx->dev;
struct hantro_hevc_dec_hw_ctx *hevc_dec = &ctx->hevc_dec;
unsigned int size;
memset(hevc_dec, 0, sizeof(*hevc_dec));
/*
* Maximum number of tiles times width and height (2 bytes each),
* rounding up to next 16 bytes boundary + one extra 16 byte
* chunk (HW guys wanted to have this).
*/
size = round_up(MAX_TILE_COLS * MAX_TILE_ROWS * 4 * sizeof(u16) + 16, 16);
hevc_dec->tile_sizes.cpu = dma_alloc_coherent(vpu->dev, size,
&hevc_dec->tile_sizes.dma,
GFP_KERNEL);
if (!hevc_dec->tile_sizes.cpu)
return -ENOMEM;
hevc_dec->tile_sizes.size = size;
hevc_dec->scaling_lists.cpu = dma_alloc_coherent(vpu->dev, SCALING_LIST_SIZE,
&hevc_dec->scaling_lists.dma,
GFP_KERNEL);
if (!hevc_dec->scaling_lists.cpu)
return -ENOMEM;
hevc_dec->scaling_lists.size = SCALING_LIST_SIZE;
hantro_hevc_ref_init(ctx);
return 0;
}
| linux-master | drivers/media/platform/verisilicon/hantro_hevc.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Hantro VPU codec driver
*
* Copyright (C) 2018 Rockchip Electronics Co., Ltd.
*
* JPEG encoder
* ------------
* The VPU JPEG encoder produces JPEG baseline sequential format.
* The quantization coefficients are 8-bit values, complying with
* the baseline specification. Therefore, it requires
* luma and chroma quantization tables. The hardware does entropy
* encoding using internal Huffman tables, as specified in the JPEG
* specification.
*
* In other words, only the luma and chroma quantization tables are
* required for the encoding operation.
*
* Quantization luma table values are written to registers
* VEPU_swreg_0-VEPU_swreg_15, and chroma table values to
* VEPU_swreg_16-VEPU_swreg_31. A special order is needed, neither
* zigzag, nor linear.
*/
#include <asm/unaligned.h>
#include <media/v4l2-mem2mem.h>
#include "hantro_jpeg.h"
#include "hantro.h"
#include "hantro_v4l2.h"
#include "hantro_hw.h"
#include "rockchip_vpu2_regs.h"
#define VEPU_JPEG_QUANT_TABLE_COUNT 16
static void rockchip_vpu2_set_src_img_ctrl(struct hantro_dev *vpu,
struct hantro_ctx *ctx)
{
u32 overfill_r, overfill_b;
u32 reg;
/*
* The format width and height are already macroblock aligned
* by .vidioc_s_fmt_vid_cap_mplane() callback. Destination
* format width and height can be further modified by
* .vidioc_s_selection(), and the width is 4-aligned.
*/
overfill_r = ctx->src_fmt.width - ctx->dst_fmt.width;
overfill_b = ctx->src_fmt.height - ctx->dst_fmt.height;
reg = VEPU_REG_IN_IMG_CTRL_ROW_LEN(ctx->src_fmt.width);
vepu_write_relaxed(vpu, reg, VEPU_REG_INPUT_LUMA_INFO);
reg = VEPU_REG_IN_IMG_CTRL_OVRFLR_D4(overfill_r / 4) |
VEPU_REG_IN_IMG_CTRL_OVRFLB(overfill_b);
/*
* This register controls the input crop, as the offset
* from the right/bottom within the last macroblock. The offset from the
* right must be divided by 4 and so the crop must be aligned to 4 pixels
* horizontally.
*/
vepu_write_relaxed(vpu, reg, VEPU_REG_ENC_OVER_FILL_STRM_OFFSET);
reg = VEPU_REG_IN_IMG_CTRL_FMT(ctx->vpu_src_fmt->enc_fmt);
vepu_write_relaxed(vpu, reg, VEPU_REG_ENC_CTRL1);
}
static void rockchip_vpu2_jpeg_enc_set_buffers(struct hantro_dev *vpu,
struct hantro_ctx *ctx,
struct vb2_buffer *src_buf,
struct vb2_buffer *dst_buf)
{
struct v4l2_pix_format_mplane *pix_fmt = &ctx->src_fmt;
dma_addr_t src[3];
u32 size_left;
size_left = vb2_plane_size(dst_buf, 0) - ctx->vpu_dst_fmt->header_size;
if (WARN_ON(vb2_plane_size(dst_buf, 0) < ctx->vpu_dst_fmt->header_size))
size_left = 0;
WARN_ON(pix_fmt->num_planes > 3);
vepu_write_relaxed(vpu, vb2_dma_contig_plane_dma_addr(dst_buf, 0) +
ctx->vpu_dst_fmt->header_size,
VEPU_REG_ADDR_OUTPUT_STREAM);
vepu_write_relaxed(vpu, size_left, VEPU_REG_STR_BUF_LIMIT);
if (pix_fmt->num_planes == 1) {
src[0] = vb2_dma_contig_plane_dma_addr(src_buf, 0);
vepu_write_relaxed(vpu, src[0], VEPU_REG_ADDR_IN_PLANE_0);
} else if (pix_fmt->num_planes == 2) {
src[0] = vb2_dma_contig_plane_dma_addr(src_buf, 0);
src[1] = vb2_dma_contig_plane_dma_addr(src_buf, 1);
vepu_write_relaxed(vpu, src[0], VEPU_REG_ADDR_IN_PLANE_0);
vepu_write_relaxed(vpu, src[1], VEPU_REG_ADDR_IN_PLANE_1);
} else {
src[0] = vb2_dma_contig_plane_dma_addr(src_buf, 0);
src[1] = vb2_dma_contig_plane_dma_addr(src_buf, 1);
src[2] = vb2_dma_contig_plane_dma_addr(src_buf, 2);
vepu_write_relaxed(vpu, src[0], VEPU_REG_ADDR_IN_PLANE_0);
vepu_write_relaxed(vpu, src[1], VEPU_REG_ADDR_IN_PLANE_1);
vepu_write_relaxed(vpu, src[2], VEPU_REG_ADDR_IN_PLANE_2);
}
}
static void
rockchip_vpu2_jpeg_enc_set_qtable(struct hantro_dev *vpu,
unsigned char *luma_qtable,
unsigned char *chroma_qtable)
{
u32 reg, i;
__be32 *luma_qtable_p;
__be32 *chroma_qtable_p;
luma_qtable_p = (__be32 *)luma_qtable;
chroma_qtable_p = (__be32 *)chroma_qtable;
/*
* Quantization table registers must be written in contiguous blocks.
* DO NOT collapse the below two "for" loops into one.
*/
for (i = 0; i < VEPU_JPEG_QUANT_TABLE_COUNT; i++) {
reg = get_unaligned_be32(&luma_qtable_p[i]);
vepu_write_relaxed(vpu, reg, VEPU_REG_JPEG_LUMA_QUAT(i));
}
for (i = 0; i < VEPU_JPEG_QUANT_TABLE_COUNT; i++) {
reg = get_unaligned_be32(&chroma_qtable_p[i]);
vepu_write_relaxed(vpu, reg, VEPU_REG_JPEG_CHROMA_QUAT(i));
}
}
int rockchip_vpu2_jpeg_enc_run(struct hantro_ctx *ctx)
{
struct hantro_dev *vpu = ctx->dev;
struct vb2_v4l2_buffer *src_buf, *dst_buf;
struct hantro_jpeg_ctx jpeg_ctx;
u32 reg;
src_buf = hantro_get_src_buf(ctx);
dst_buf = hantro_get_dst_buf(ctx);
hantro_start_prepare_run(ctx);
memset(&jpeg_ctx, 0, sizeof(jpeg_ctx));
jpeg_ctx.buffer = vb2_plane_vaddr(&dst_buf->vb2_buf, 0);
if (!jpeg_ctx.buffer)
return -ENOMEM;
jpeg_ctx.width = ctx->dst_fmt.width;
jpeg_ctx.height = ctx->dst_fmt.height;
jpeg_ctx.quality = ctx->jpeg_quality;
hantro_jpeg_header_assemble(&jpeg_ctx);
/* Switch to JPEG encoder mode before writing registers */
vepu_write_relaxed(vpu, VEPU_REG_ENCODE_FORMAT_JPEG,
VEPU_REG_ENCODE_START);
rockchip_vpu2_set_src_img_ctrl(vpu, ctx);
rockchip_vpu2_jpeg_enc_set_buffers(vpu, ctx, &src_buf->vb2_buf,
&dst_buf->vb2_buf);
rockchip_vpu2_jpeg_enc_set_qtable(vpu, jpeg_ctx.hw_luma_qtable,
jpeg_ctx.hw_chroma_qtable);
reg = VEPU_REG_OUTPUT_SWAP32
| VEPU_REG_OUTPUT_SWAP16
| VEPU_REG_OUTPUT_SWAP8
| VEPU_REG_INPUT_SWAP8
| VEPU_REG_INPUT_SWAP16
| VEPU_REG_INPUT_SWAP32;
/* Make sure that all registers are written at this point. */
vepu_write(vpu, reg, VEPU_REG_DATA_ENDIAN);
reg = VEPU_REG_AXI_CTRL_BURST_LEN(16);
vepu_write_relaxed(vpu, reg, VEPU_REG_AXI_CTRL);
reg = VEPU_REG_MB_WIDTH(MB_WIDTH(ctx->src_fmt.width))
| VEPU_REG_MB_HEIGHT(MB_HEIGHT(ctx->src_fmt.height))
| VEPU_REG_FRAME_TYPE_INTRA
| VEPU_REG_ENCODE_FORMAT_JPEG
| VEPU_REG_ENCODE_ENABLE;
/* Kick the watchdog and start encoding */
hantro_end_prepare_run(ctx);
vepu_write(vpu, reg, VEPU_REG_ENCODE_START);
return 0;
}
void rockchip_vpu2_jpeg_enc_done(struct hantro_ctx *ctx)
{
struct hantro_dev *vpu = ctx->dev;
u32 bytesused = vepu_read(vpu, VEPU_REG_STR_BUF_LIMIT) / 8;
struct vb2_v4l2_buffer *dst_buf = hantro_get_dst_buf(ctx);
vb2_set_plane_payload(&dst_buf->vb2_buf, 0,
ctx->vpu_dst_fmt->header_size + bytesused);
}
| linux-master | drivers/media/platform/verisilicon/rockchip_vpu2_hw_jpeg_enc.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Hantro VPU codec driver
*
* Copyright (C) 2018 Rockchip Electronics Co., Ltd.
*/
#include <asm/unaligned.h>
#include <linux/bitfield.h>
#include <media/v4l2-mem2mem.h>
#include "hantro.h"
#include "hantro_hw.h"
#define VDPU_SWREG(nr) ((nr) * 4)
#define VDPU_REG_DEC_OUT_BASE VDPU_SWREG(63)
#define VDPU_REG_RLC_VLC_BASE VDPU_SWREG(64)
#define VDPU_REG_QTABLE_BASE VDPU_SWREG(61)
#define VDPU_REG_REFER0_BASE VDPU_SWREG(131)
#define VDPU_REG_REFER2_BASE VDPU_SWREG(134)
#define VDPU_REG_REFER3_BASE VDPU_SWREG(135)
#define VDPU_REG_REFER1_BASE VDPU_SWREG(148)
#define VDPU_REG_DEC_E(v) ((v) ? BIT(0) : 0)
#define VDPU_REG_DEC_ADV_PRE_DIS(v) ((v) ? BIT(11) : 0)
#define VDPU_REG_DEC_SCMD_DIS(v) ((v) ? BIT(10) : 0)
#define VDPU_REG_FILTERING_DIS(v) ((v) ? BIT(8) : 0)
#define VDPU_REG_DEC_LATENCY(v) (((v) << 1) & GENMASK(6, 1))
#define VDPU_REG_INIT_QP(v) (((v) << 25) & GENMASK(30, 25))
#define VDPU_REG_STREAM_LEN(v) (((v) << 0) & GENMASK(23, 0))
#define VDPU_REG_APF_THRESHOLD(v) (((v) << 17) & GENMASK(30, 17))
#define VDPU_REG_STARTMB_X(v) (((v) << 8) & GENMASK(16, 8))
#define VDPU_REG_STARTMB_Y(v) (((v) << 0) & GENMASK(7, 0))
#define VDPU_REG_DEC_MODE(v) (((v) << 0) & GENMASK(3, 0))
#define VDPU_REG_DEC_STRENDIAN_E(v) ((v) ? BIT(5) : 0)
#define VDPU_REG_DEC_STRSWAP32_E(v) ((v) ? BIT(4) : 0)
#define VDPU_REG_DEC_OUTSWAP32_E(v) ((v) ? BIT(3) : 0)
#define VDPU_REG_DEC_INSWAP32_E(v) ((v) ? BIT(2) : 0)
#define VDPU_REG_DEC_OUT_ENDIAN(v) ((v) ? BIT(1) : 0)
#define VDPU_REG_DEC_IN_ENDIAN(v) ((v) ? BIT(0) : 0)
#define VDPU_REG_DEC_DATA_DISC_E(v) ((v) ? BIT(22) : 0)
#define VDPU_REG_DEC_MAX_BURST(v) (((v) << 16) & GENMASK(20, 16))
#define VDPU_REG_DEC_AXI_WR_ID(v) (((v) << 8) & GENMASK(15, 8))
#define VDPU_REG_DEC_AXI_RD_ID(v) (((v) << 0) & GENMASK(7, 0))
#define VDPU_REG_RLC_MODE_E(v) ((v) ? BIT(20) : 0)
#define VDPU_REG_PIC_INTERLACE_E(v) ((v) ? BIT(17) : 0)
#define VDPU_REG_PIC_FIELDMODE_E(v) ((v) ? BIT(16) : 0)
#define VDPU_REG_PIC_B_E(v) ((v) ? BIT(15) : 0)
#define VDPU_REG_PIC_INTER_E(v) ((v) ? BIT(14) : 0)
#define VDPU_REG_PIC_TOPFIELD_E(v) ((v) ? BIT(13) : 0)
#define VDPU_REG_FWD_INTERLACE_E(v) ((v) ? BIT(12) : 0)
#define VDPU_REG_WRITE_MVS_E(v) ((v) ? BIT(10) : 0)
#define VDPU_REG_DEC_TIMEOUT_E(v) ((v) ? BIT(5) : 0)
#define VDPU_REG_DEC_CLK_GATE_E(v) ((v) ? BIT(4) : 0)
#define VDPU_REG_PIC_MB_WIDTH(v) (((v) << 23) & GENMASK(31, 23))
#define VDPU_REG_PIC_MB_HEIGHT_P(v) (((v) << 11) & GENMASK(18, 11))
#define VDPU_REG_ALT_SCAN_E(v) ((v) ? BIT(6) : 0)
#define VDPU_REG_TOPFIELDFIRST_E(v) ((v) ? BIT(5) : 0)
#define VDPU_REG_STRM_START_BIT(v) (((v) << 26) & GENMASK(31, 26))
#define VDPU_REG_QSCALE_TYPE(v) ((v) ? BIT(24) : 0)
#define VDPU_REG_CON_MV_E(v) ((v) ? BIT(4) : 0)
#define VDPU_REG_INTRA_DC_PREC(v) (((v) << 2) & GENMASK(3, 2))
#define VDPU_REG_INTRA_VLC_TAB(v) ((v) ? BIT(1) : 0)
#define VDPU_REG_FRAME_PRED_DCT(v) ((v) ? BIT(0) : 0)
#define VDPU_REG_ALT_SCAN_FLAG_E(v) ((v) ? BIT(19) : 0)
#define VDPU_REG_FCODE_FWD_HOR(v) (((v) << 15) & GENMASK(18, 15))
#define VDPU_REG_FCODE_FWD_VER(v) (((v) << 11) & GENMASK(14, 11))
#define VDPU_REG_FCODE_BWD_HOR(v) (((v) << 7) & GENMASK(10, 7))
#define VDPU_REG_FCODE_BWD_VER(v) (((v) << 3) & GENMASK(6, 3))
#define VDPU_REG_MV_ACCURACY_FWD(v) ((v) ? BIT(2) : 0)
#define VDPU_REG_MV_ACCURACY_BWD(v) ((v) ? BIT(1) : 0)
static void
rockchip_vpu2_mpeg2_dec_set_quantisation(struct hantro_dev *vpu,
struct hantro_ctx *ctx)
{
struct v4l2_ctrl_mpeg2_quantisation *q;
q = hantro_get_ctrl(ctx, V4L2_CID_STATELESS_MPEG2_QUANTISATION);
hantro_mpeg2_dec_copy_qtable(ctx->mpeg2_dec.qtable.cpu, q);
vdpu_write_relaxed(vpu, ctx->mpeg2_dec.qtable.dma, VDPU_REG_QTABLE_BASE);
}
static void
rockchip_vpu2_mpeg2_dec_set_buffers(struct hantro_dev *vpu,
struct hantro_ctx *ctx,
struct vb2_buffer *src_buf,
struct vb2_buffer *dst_buf,
const struct v4l2_ctrl_mpeg2_sequence *seq,
const struct v4l2_ctrl_mpeg2_picture *pic)
{
dma_addr_t forward_addr = 0, backward_addr = 0;
dma_addr_t current_addr, addr;
switch (pic->picture_coding_type) {
case V4L2_MPEG2_PIC_CODING_TYPE_B:
backward_addr = hantro_get_ref(ctx, pic->backward_ref_ts);
fallthrough;
case V4L2_MPEG2_PIC_CODING_TYPE_P:
forward_addr = hantro_get_ref(ctx, pic->forward_ref_ts);
}
/* Source bitstream buffer */
addr = vb2_dma_contig_plane_dma_addr(src_buf, 0);
vdpu_write_relaxed(vpu, addr, VDPU_REG_RLC_VLC_BASE);
/* Destination frame buffer */
addr = vb2_dma_contig_plane_dma_addr(dst_buf, 0);
current_addr = addr;
if (pic->picture_structure == V4L2_MPEG2_PIC_BOTTOM_FIELD)
addr += ALIGN(ctx->dst_fmt.width, 16);
vdpu_write_relaxed(vpu, addr, VDPU_REG_DEC_OUT_BASE);
if (!forward_addr)
forward_addr = current_addr;
if (!backward_addr)
backward_addr = current_addr;
/* Set forward ref frame (top/bottom field) */
if (pic->picture_structure == V4L2_MPEG2_PIC_FRAME ||
pic->picture_coding_type == V4L2_MPEG2_PIC_CODING_TYPE_B ||
(pic->picture_structure == V4L2_MPEG2_PIC_TOP_FIELD &&
pic->flags & V4L2_MPEG2_PIC_TOP_FIELD) ||
(pic->picture_structure == V4L2_MPEG2_PIC_BOTTOM_FIELD &&
!(pic->flags & V4L2_MPEG2_PIC_TOP_FIELD))) {
vdpu_write_relaxed(vpu, forward_addr, VDPU_REG_REFER0_BASE);
vdpu_write_relaxed(vpu, forward_addr, VDPU_REG_REFER1_BASE);
} else if (pic->picture_structure == V4L2_MPEG2_PIC_TOP_FIELD) {
vdpu_write_relaxed(vpu, forward_addr, VDPU_REG_REFER0_BASE);
vdpu_write_relaxed(vpu, current_addr, VDPU_REG_REFER1_BASE);
} else if (pic->picture_structure == V4L2_MPEG2_PIC_BOTTOM_FIELD) {
vdpu_write_relaxed(vpu, current_addr, VDPU_REG_REFER0_BASE);
vdpu_write_relaxed(vpu, forward_addr, VDPU_REG_REFER1_BASE);
}
/* Set backward ref frame (top/bottom field) */
vdpu_write_relaxed(vpu, backward_addr, VDPU_REG_REFER2_BASE);
vdpu_write_relaxed(vpu, backward_addr, VDPU_REG_REFER3_BASE);
}
int rockchip_vpu2_mpeg2_dec_run(struct hantro_ctx *ctx)
{
struct hantro_dev *vpu = ctx->dev;
struct vb2_v4l2_buffer *src_buf, *dst_buf;
const struct v4l2_ctrl_mpeg2_sequence *seq;
const struct v4l2_ctrl_mpeg2_picture *pic;
u32 reg;
src_buf = hantro_get_src_buf(ctx);
dst_buf = hantro_get_dst_buf(ctx);
hantro_start_prepare_run(ctx);
seq = hantro_get_ctrl(ctx,
V4L2_CID_STATELESS_MPEG2_SEQUENCE);
pic = hantro_get_ctrl(ctx,
V4L2_CID_STATELESS_MPEG2_PICTURE);
reg = VDPU_REG_DEC_ADV_PRE_DIS(0) |
VDPU_REG_DEC_SCMD_DIS(0) |
VDPU_REG_FILTERING_DIS(1) |
VDPU_REG_DEC_LATENCY(0);
vdpu_write_relaxed(vpu, reg, VDPU_SWREG(50));
reg = VDPU_REG_INIT_QP(1) |
VDPU_REG_STREAM_LEN(vb2_get_plane_payload(&src_buf->vb2_buf, 0));
vdpu_write_relaxed(vpu, reg, VDPU_SWREG(51));
reg = VDPU_REG_APF_THRESHOLD(8) |
VDPU_REG_STARTMB_X(0) |
VDPU_REG_STARTMB_Y(0);
vdpu_write_relaxed(vpu, reg, VDPU_SWREG(52));
reg = VDPU_REG_DEC_MODE(5);
vdpu_write_relaxed(vpu, reg, VDPU_SWREG(53));
reg = VDPU_REG_DEC_STRENDIAN_E(1) |
VDPU_REG_DEC_STRSWAP32_E(1) |
VDPU_REG_DEC_OUTSWAP32_E(1) |
VDPU_REG_DEC_INSWAP32_E(1) |
VDPU_REG_DEC_OUT_ENDIAN(1) |
VDPU_REG_DEC_IN_ENDIAN(1);
vdpu_write_relaxed(vpu, reg, VDPU_SWREG(54));
reg = VDPU_REG_DEC_DATA_DISC_E(0) |
VDPU_REG_DEC_MAX_BURST(16) |
VDPU_REG_DEC_AXI_WR_ID(0) |
VDPU_REG_DEC_AXI_RD_ID(0);
vdpu_write_relaxed(vpu, reg, VDPU_SWREG(56));
reg = VDPU_REG_RLC_MODE_E(0) |
VDPU_REG_PIC_INTERLACE_E(!(seq->flags & V4L2_MPEG2_SEQ_FLAG_PROGRESSIVE)) |
VDPU_REG_PIC_FIELDMODE_E(pic->picture_structure != V4L2_MPEG2_PIC_FRAME) |
VDPU_REG_PIC_B_E(pic->picture_coding_type == V4L2_MPEG2_PIC_CODING_TYPE_B) |
VDPU_REG_PIC_INTER_E(pic->picture_coding_type != V4L2_MPEG2_PIC_CODING_TYPE_I) |
VDPU_REG_PIC_TOPFIELD_E(pic->picture_structure == V4L2_MPEG2_PIC_TOP_FIELD) |
VDPU_REG_FWD_INTERLACE_E(0) |
VDPU_REG_WRITE_MVS_E(0) |
VDPU_REG_DEC_TIMEOUT_E(1) |
VDPU_REG_DEC_CLK_GATE_E(1);
vdpu_write_relaxed(vpu, reg, VDPU_SWREG(57));
reg = VDPU_REG_PIC_MB_WIDTH(MB_WIDTH(ctx->dst_fmt.width)) |
VDPU_REG_PIC_MB_HEIGHT_P(MB_HEIGHT(ctx->dst_fmt.height)) |
VDPU_REG_ALT_SCAN_E(pic->flags & V4L2_MPEG2_PIC_FLAG_ALT_SCAN) |
VDPU_REG_TOPFIELDFIRST_E(pic->flags & V4L2_MPEG2_PIC_FLAG_TOP_FIELD_FIRST);
vdpu_write_relaxed(vpu, reg, VDPU_SWREG(120));
reg = VDPU_REG_STRM_START_BIT(0) |
VDPU_REG_QSCALE_TYPE(pic->flags & V4L2_MPEG2_PIC_FLAG_Q_SCALE_TYPE) |
VDPU_REG_CON_MV_E(pic->flags & V4L2_MPEG2_PIC_FLAG_CONCEALMENT_MV) |
VDPU_REG_INTRA_DC_PREC(pic->intra_dc_precision) |
VDPU_REG_INTRA_VLC_TAB(pic->flags & V4L2_MPEG2_PIC_FLAG_INTRA_VLC) |
VDPU_REG_FRAME_PRED_DCT(pic->flags & V4L2_MPEG2_PIC_FLAG_FRAME_PRED_DCT);
vdpu_write_relaxed(vpu, reg, VDPU_SWREG(122));
reg = VDPU_REG_ALT_SCAN_FLAG_E(pic->flags & V4L2_MPEG2_PIC_FLAG_ALT_SCAN) |
VDPU_REG_FCODE_FWD_HOR(pic->f_code[0][0]) |
VDPU_REG_FCODE_FWD_VER(pic->f_code[0][1]) |
VDPU_REG_FCODE_BWD_HOR(pic->f_code[1][0]) |
VDPU_REG_FCODE_BWD_VER(pic->f_code[1][1]) |
VDPU_REG_MV_ACCURACY_FWD(1) |
VDPU_REG_MV_ACCURACY_BWD(1);
vdpu_write_relaxed(vpu, reg, VDPU_SWREG(136));
rockchip_vpu2_mpeg2_dec_set_quantisation(vpu, ctx);
rockchip_vpu2_mpeg2_dec_set_buffers(vpu, ctx, &src_buf->vb2_buf,
&dst_buf->vb2_buf, seq, pic);
/* Kick the watchdog and start decoding */
hantro_end_prepare_run(ctx);
reg = vdpu_read(vpu, VDPU_SWREG(57)) | VDPU_REG_DEC_E(1);
vdpu_write(vpu, reg, VDPU_SWREG(57));
return 0;
}
| linux-master | drivers/media/platform/verisilicon/rockchip_vpu2_hw_mpeg2_dec.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Hantro VPU codec driver
*
* Copyright (C) 2018 Collabora, Ltd.
* Copyright (C) 2018 Rockchip Electronics Co., Ltd.
* Alpha Lin <[email protected]>
* Jeffy Chen <[email protected]>
*
* Copyright 2018 Google LLC.
* Tomasz Figa <[email protected]>
*
* Based on s5p-mfc driver by Samsung Electronics Co., Ltd.
* Copyright (C) 2010-2011 Samsung Electronics Co., Ltd.
*/
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/pm_runtime.h>
#include <linux/videodev2.h>
#include <linux/workqueue.h>
#include <media/v4l2-ctrls.h>
#include <media/v4l2-event.h>
#include <media/v4l2-mem2mem.h>
#include "hantro.h"
#include "hantro_hw.h"
#include "hantro_v4l2.h"
#define HANTRO_DEFAULT_BIT_DEPTH 8
static int hantro_set_fmt_out(struct hantro_ctx *ctx,
struct v4l2_pix_format_mplane *pix_mp,
bool need_postproc);
static int hantro_set_fmt_cap(struct hantro_ctx *ctx,
struct v4l2_pix_format_mplane *pix_mp);
static const struct hantro_fmt *
hantro_get_formats(const struct hantro_ctx *ctx, unsigned int *num_fmts, bool need_postproc)
{
const struct hantro_fmt *formats;
if (need_postproc) {
*num_fmts = 0;
return NULL;
}
if (ctx->is_encoder) {
formats = ctx->dev->variant->enc_fmts;
*num_fmts = ctx->dev->variant->num_enc_fmts;
} else {
formats = ctx->dev->variant->dec_fmts;
*num_fmts = ctx->dev->variant->num_dec_fmts;
}
return formats;
}
static const struct hantro_fmt *
hantro_get_postproc_formats(const struct hantro_ctx *ctx,
unsigned int *num_fmts)
{
struct hantro_dev *vpu = ctx->dev;
if (ctx->is_encoder || !vpu->variant->postproc_fmts) {
*num_fmts = 0;
return NULL;
}
*num_fmts = ctx->dev->variant->num_postproc_fmts;
return ctx->dev->variant->postproc_fmts;
}
int hantro_get_format_depth(u32 fourcc)
{
switch (fourcc) {
case V4L2_PIX_FMT_P010:
case V4L2_PIX_FMT_P010_4L4:
case V4L2_PIX_FMT_NV15_4L4:
return 10;
default:
return 8;
}
}
static bool
hantro_check_depth_match(const struct hantro_fmt *fmt, int bit_depth)
{
int fmt_depth;
if (!fmt->match_depth && !fmt->postprocessed)
return true;
/* 0 means default depth, which is 8 */
if (!bit_depth)
bit_depth = HANTRO_DEFAULT_BIT_DEPTH;
fmt_depth = hantro_get_format_depth(fmt->fourcc);
/*
* Allow only downconversion for postproc formats for now.
* It may be possible to relax that on some HW.
*/
if (!fmt->match_depth)
return fmt_depth <= bit_depth;
return fmt_depth == bit_depth;
}
static const struct hantro_fmt *
hantro_find_format(const struct hantro_ctx *ctx, u32 fourcc)
{
const struct hantro_fmt *formats;
unsigned int i, num_fmts;
formats = hantro_get_formats(ctx, &num_fmts, HANTRO_AUTO_POSTPROC);
for (i = 0; i < num_fmts; i++)
if (formats[i].fourcc == fourcc)
return &formats[i];
formats = hantro_get_postproc_formats(ctx, &num_fmts);
for (i = 0; i < num_fmts; i++)
if (formats[i].fourcc == fourcc)
return &formats[i];
return NULL;
}
const struct hantro_fmt *
hantro_get_default_fmt(const struct hantro_ctx *ctx, bool bitstream,
int bit_depth, bool need_postproc)
{
const struct hantro_fmt *formats;
unsigned int i, num_fmts;
formats = hantro_get_formats(ctx, &num_fmts, need_postproc);
for (i = 0; i < num_fmts; i++) {
if (bitstream == (formats[i].codec_mode !=
HANTRO_MODE_NONE) &&
hantro_check_depth_match(&formats[i], bit_depth))
return &formats[i];
}
formats = hantro_get_postproc_formats(ctx, &num_fmts);
for (i = 0; i < num_fmts; i++) {
if (bitstream == (formats[i].codec_mode !=
HANTRO_MODE_NONE) &&
hantro_check_depth_match(&formats[i], bit_depth))
return &formats[i];
}
return NULL;
}
static int vidioc_querycap(struct file *file, void *priv,
struct v4l2_capability *cap)
{
struct hantro_dev *vpu = video_drvdata(file);
struct video_device *vdev = video_devdata(file);
strscpy(cap->driver, vpu->dev->driver->name, sizeof(cap->driver));
strscpy(cap->card, vdev->name, sizeof(cap->card));
return 0;
}
static int vidioc_enum_framesizes(struct file *file, void *priv,
struct v4l2_frmsizeenum *fsize)
{
struct hantro_ctx *ctx = fh_to_ctx(priv);
const struct hantro_fmt *fmt;
fmt = hantro_find_format(ctx, fsize->pixel_format);
if (!fmt) {
vpu_debug(0, "unsupported bitstream format (%08x)\n",
fsize->pixel_format);
return -EINVAL;
}
/* For non-coded formats check if postprocessing scaling is possible */
if (fmt->codec_mode == HANTRO_MODE_NONE) {
if (hantro_needs_postproc(ctx, fmt))
return hanto_postproc_enum_framesizes(ctx, fsize);
else
return -ENOTTY;
} else if (fsize->index != 0) {
vpu_debug(0, "invalid frame size index (expected 0, got %d)\n",
fsize->index);
return -EINVAL;
}
fsize->type = V4L2_FRMSIZE_TYPE_STEPWISE;
fsize->stepwise = fmt->frmsize;
return 0;
}
static int vidioc_enum_fmt(struct file *file, void *priv,
struct v4l2_fmtdesc *f, bool capture)
{
struct hantro_ctx *ctx = fh_to_ctx(priv);
const struct hantro_fmt *fmt, *formats;
unsigned int num_fmts, i, j = 0;
bool skip_mode_none;
/*
* When dealing with an encoder:
* - on the capture side we want to filter out all MODE_NONE formats.
* - on the output side we want to filter out all formats that are
* not MODE_NONE.
* When dealing with a decoder:
* - on the capture side we want to filter out all formats that are
* not MODE_NONE.
* - on the output side we want to filter out all MODE_NONE formats.
*/
skip_mode_none = capture == ctx->is_encoder;
formats = hantro_get_formats(ctx, &num_fmts, HANTRO_AUTO_POSTPROC);
for (i = 0; i < num_fmts; i++) {
bool mode_none = formats[i].codec_mode == HANTRO_MODE_NONE;
fmt = &formats[i];
if (skip_mode_none == mode_none)
continue;
if (!hantro_check_depth_match(fmt, ctx->bit_depth))
continue;
if (j == f->index) {
f->pixelformat = fmt->fourcc;
return 0;
}
++j;
}
/*
* Enumerate post-processed formats. As per the specification,
* we enumerated these formats after natively decoded formats
* as a hint for applications on what's the preferred fomat.
*/
if (!capture)
return -EINVAL;
formats = hantro_get_postproc_formats(ctx, &num_fmts);
for (i = 0; i < num_fmts; i++) {
fmt = &formats[i];
if (!hantro_check_depth_match(fmt, ctx->bit_depth))
continue;
if (j == f->index) {
f->pixelformat = fmt->fourcc;
return 0;
}
++j;
}
return -EINVAL;
}
static int vidioc_enum_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_fmtdesc *f)
{
return vidioc_enum_fmt(file, priv, f, true);
}
static int vidioc_enum_fmt_vid_out(struct file *file, void *priv,
struct v4l2_fmtdesc *f)
{
return vidioc_enum_fmt(file, priv, f, false);
}
static int vidioc_g_fmt_out_mplane(struct file *file, void *priv,
struct v4l2_format *f)
{
struct v4l2_pix_format_mplane *pix_mp = &f->fmt.pix_mp;
struct hantro_ctx *ctx = fh_to_ctx(priv);
vpu_debug(4, "f->type = %d\n", f->type);
*pix_mp = ctx->src_fmt;
return 0;
}
static int vidioc_g_fmt_cap_mplane(struct file *file, void *priv,
struct v4l2_format *f)
{
struct v4l2_pix_format_mplane *pix_mp = &f->fmt.pix_mp;
struct hantro_ctx *ctx = fh_to_ctx(priv);
vpu_debug(4, "f->type = %d\n", f->type);
*pix_mp = ctx->dst_fmt;
return 0;
}
static int hantro_try_fmt(const struct hantro_ctx *ctx,
struct v4l2_pix_format_mplane *pix_mp,
enum v4l2_buf_type type)
{
const struct hantro_fmt *fmt;
const struct hantro_fmt *vpu_fmt;
bool capture = V4L2_TYPE_IS_CAPTURE(type);
bool coded;
coded = capture == ctx->is_encoder;
vpu_debug(4, "trying format %c%c%c%c\n",
(pix_mp->pixelformat & 0x7f),
(pix_mp->pixelformat >> 8) & 0x7f,
(pix_mp->pixelformat >> 16) & 0x7f,
(pix_mp->pixelformat >> 24) & 0x7f);
fmt = hantro_find_format(ctx, pix_mp->pixelformat);
if (!fmt) {
fmt = hantro_get_default_fmt(ctx, coded, HANTRO_DEFAULT_BIT_DEPTH, HANTRO_AUTO_POSTPROC);
pix_mp->pixelformat = fmt->fourcc;
}
if (coded) {
pix_mp->num_planes = 1;
vpu_fmt = fmt;
} else if (ctx->is_encoder) {
vpu_fmt = hantro_find_format(ctx, ctx->dst_fmt.pixelformat);
} else {
/*
* Width/height on the CAPTURE end of a decoder are ignored and
* replaced by the OUTPUT ones.
*/
pix_mp->width = ctx->src_fmt.width;
pix_mp->height = ctx->src_fmt.height;
vpu_fmt = fmt;
}
pix_mp->field = V4L2_FIELD_NONE;
v4l2_apply_frmsize_constraints(&pix_mp->width, &pix_mp->height,
&vpu_fmt->frmsize);
if (!coded) {
/* Fill remaining fields */
v4l2_fill_pixfmt_mp(pix_mp, fmt->fourcc, pix_mp->width,
pix_mp->height);
if (ctx->vpu_src_fmt->fourcc == V4L2_PIX_FMT_H264_SLICE &&
!hantro_needs_postproc(ctx, fmt))
pix_mp->plane_fmt[0].sizeimage +=
hantro_h264_mv_size(pix_mp->width,
pix_mp->height);
else if (ctx->vpu_src_fmt->fourcc == V4L2_PIX_FMT_VP9_FRAME &&
!hantro_needs_postproc(ctx, fmt))
pix_mp->plane_fmt[0].sizeimage +=
hantro_vp9_mv_size(pix_mp->width,
pix_mp->height);
else if (ctx->vpu_src_fmt->fourcc == V4L2_PIX_FMT_HEVC_SLICE &&
!hantro_needs_postproc(ctx, fmt))
pix_mp->plane_fmt[0].sizeimage +=
hantro_hevc_mv_size(pix_mp->width,
pix_mp->height);
else if (ctx->vpu_src_fmt->fourcc == V4L2_PIX_FMT_AV1_FRAME &&
!hantro_needs_postproc(ctx, fmt))
pix_mp->plane_fmt[0].sizeimage +=
hantro_av1_mv_size(pix_mp->width,
pix_mp->height);
} else if (!pix_mp->plane_fmt[0].sizeimage) {
/*
* For coded formats the application can specify
* sizeimage. If the application passes a zero sizeimage,
* let's default to the maximum frame size.
*/
pix_mp->plane_fmt[0].sizeimage = fmt->header_size +
pix_mp->width * pix_mp->height * fmt->max_depth;
}
return 0;
}
static int vidioc_try_fmt_cap_mplane(struct file *file, void *priv,
struct v4l2_format *f)
{
return hantro_try_fmt(fh_to_ctx(priv), &f->fmt.pix_mp, f->type);
}
static int vidioc_try_fmt_out_mplane(struct file *file, void *priv,
struct v4l2_format *f)
{
return hantro_try_fmt(fh_to_ctx(priv), &f->fmt.pix_mp, f->type);
}
static void
hantro_reset_fmt(struct v4l2_pix_format_mplane *fmt,
const struct hantro_fmt *vpu_fmt)
{
memset(fmt, 0, sizeof(*fmt));
fmt->pixelformat = vpu_fmt->fourcc;
fmt->field = V4L2_FIELD_NONE;
fmt->colorspace = V4L2_COLORSPACE_JPEG;
fmt->ycbcr_enc = V4L2_YCBCR_ENC_DEFAULT;
fmt->quantization = V4L2_QUANTIZATION_DEFAULT;
fmt->xfer_func = V4L2_XFER_FUNC_DEFAULT;
}
static void
hantro_reset_encoded_fmt(struct hantro_ctx *ctx)
{
const struct hantro_fmt *vpu_fmt;
struct v4l2_pix_format_mplane fmt;
vpu_fmt = hantro_get_default_fmt(ctx, true, HANTRO_DEFAULT_BIT_DEPTH, HANTRO_AUTO_POSTPROC);
if (!vpu_fmt)
return;
hantro_reset_fmt(&fmt, vpu_fmt);
fmt.width = vpu_fmt->frmsize.min_width;
fmt.height = vpu_fmt->frmsize.min_height;
if (ctx->is_encoder)
hantro_set_fmt_cap(ctx, &fmt);
else
hantro_set_fmt_out(ctx, &fmt, HANTRO_AUTO_POSTPROC);
}
int
hantro_reset_raw_fmt(struct hantro_ctx *ctx, int bit_depth, bool need_postproc)
{
const struct hantro_fmt *raw_vpu_fmt;
struct v4l2_pix_format_mplane raw_fmt, *encoded_fmt;
int ret;
raw_vpu_fmt = hantro_get_default_fmt(ctx, false, bit_depth, need_postproc);
if (!raw_vpu_fmt)
return -EINVAL;
if (ctx->is_encoder) {
encoded_fmt = &ctx->dst_fmt;
ctx->vpu_src_fmt = raw_vpu_fmt;
} else {
encoded_fmt = &ctx->src_fmt;
}
hantro_reset_fmt(&raw_fmt, raw_vpu_fmt);
raw_fmt.width = encoded_fmt->width;
raw_fmt.height = encoded_fmt->height;
if (ctx->is_encoder)
ret = hantro_set_fmt_out(ctx, &raw_fmt, need_postproc);
else
ret = hantro_set_fmt_cap(ctx, &raw_fmt);
if (!ret) {
ctx->bit_depth = bit_depth;
ctx->need_postproc = need_postproc;
}
return ret;
}
void hantro_reset_fmts(struct hantro_ctx *ctx)
{
hantro_reset_encoded_fmt(ctx);
hantro_reset_raw_fmt(ctx, HANTRO_DEFAULT_BIT_DEPTH, HANTRO_AUTO_POSTPROC);
}
static void
hantro_update_requires_request(struct hantro_ctx *ctx, u32 fourcc)
{
switch (fourcc) {
case V4L2_PIX_FMT_JPEG:
ctx->fh.m2m_ctx->out_q_ctx.q.requires_requests = false;
break;
case V4L2_PIX_FMT_MPEG2_SLICE:
case V4L2_PIX_FMT_VP8_FRAME:
case V4L2_PIX_FMT_H264_SLICE:
case V4L2_PIX_FMT_HEVC_SLICE:
case V4L2_PIX_FMT_VP9_FRAME:
ctx->fh.m2m_ctx->out_q_ctx.q.requires_requests = true;
break;
default:
break;
}
}
static void
hantro_update_requires_hold_capture_buf(struct hantro_ctx *ctx, u32 fourcc)
{
struct vb2_queue *vq;
vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx,
V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
switch (fourcc) {
case V4L2_PIX_FMT_JPEG:
case V4L2_PIX_FMT_MPEG2_SLICE:
case V4L2_PIX_FMT_VP8_FRAME:
case V4L2_PIX_FMT_HEVC_SLICE:
case V4L2_PIX_FMT_VP9_FRAME:
vq->subsystem_flags &= ~(VB2_V4L2_FL_SUPPORTS_M2M_HOLD_CAPTURE_BUF);
break;
case V4L2_PIX_FMT_H264_SLICE:
vq->subsystem_flags |= VB2_V4L2_FL_SUPPORTS_M2M_HOLD_CAPTURE_BUF;
break;
default:
break;
}
}
static int hantro_set_fmt_out(struct hantro_ctx *ctx,
struct v4l2_pix_format_mplane *pix_mp,
bool need_postproc)
{
struct vb2_queue *vq;
int ret;
vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx,
V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
ret = hantro_try_fmt(ctx, pix_mp, V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
if (ret)
return ret;
if (!ctx->is_encoder) {
struct vb2_queue *peer_vq;
/*
* In order to support dynamic resolution change,
* the decoder admits a resolution change, as long
* as the pixelformat remains. Can't be done if streaming.
*/
if (vb2_is_streaming(vq) || (vb2_is_busy(vq) &&
pix_mp->pixelformat != ctx->src_fmt.pixelformat))
return -EBUSY;
/*
* Since format change on the OUTPUT queue will reset
* the CAPTURE queue, we can't allow doing so
* when the CAPTURE queue has buffers allocated.
*/
peer_vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx,
V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE);
if (vb2_is_busy(peer_vq))
return -EBUSY;
} else {
/*
* The encoder doesn't admit a format change if
* there are OUTPUT buffers allocated.
*/
if (vb2_is_busy(vq))
return -EBUSY;
}
ctx->vpu_src_fmt = hantro_find_format(ctx, pix_mp->pixelformat);
ctx->src_fmt = *pix_mp;
/*
* Current raw format might have become invalid with newly
* selected codec, so reset it to default just to be safe and
* keep internal driver state sane. User is mandated to set
* the raw format again after we return, so we don't need
* anything smarter.
* Note that hantro_reset_raw_fmt() also propagates size
* changes to the raw format.
*/
if (!ctx->is_encoder)
hantro_reset_raw_fmt(ctx,
hantro_get_format_depth(pix_mp->pixelformat),
need_postproc);
/* Colorimetry information are always propagated. */
ctx->dst_fmt.colorspace = pix_mp->colorspace;
ctx->dst_fmt.ycbcr_enc = pix_mp->ycbcr_enc;
ctx->dst_fmt.xfer_func = pix_mp->xfer_func;
ctx->dst_fmt.quantization = pix_mp->quantization;
hantro_update_requires_request(ctx, pix_mp->pixelformat);
hantro_update_requires_hold_capture_buf(ctx, pix_mp->pixelformat);
vpu_debug(0, "OUTPUT codec mode: %d\n", ctx->vpu_src_fmt->codec_mode);
vpu_debug(0, "fmt - w: %d, h: %d\n",
pix_mp->width, pix_mp->height);
return 0;
}
static int hantro_set_fmt_cap(struct hantro_ctx *ctx,
struct v4l2_pix_format_mplane *pix_mp)
{
struct vb2_queue *vq;
int ret;
/* Change not allowed if queue is busy. */
vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx,
V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE);
if (vb2_is_busy(vq))
return -EBUSY;
if (ctx->is_encoder) {
struct vb2_queue *peer_vq;
/*
* Since format change on the CAPTURE queue will reset
* the OUTPUT queue, we can't allow doing so
* when the OUTPUT queue has buffers allocated.
*/
peer_vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx,
V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
if (vb2_is_busy(peer_vq) &&
(pix_mp->pixelformat != ctx->dst_fmt.pixelformat ||
pix_mp->height != ctx->dst_fmt.height ||
pix_mp->width != ctx->dst_fmt.width))
return -EBUSY;
}
ret = hantro_try_fmt(ctx, pix_mp, V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE);
if (ret)
return ret;
ctx->vpu_dst_fmt = hantro_find_format(ctx, pix_mp->pixelformat);
ctx->dst_fmt = *pix_mp;
/*
* Current raw format might have become invalid with newly
* selected codec, so reset it to default just to be safe and
* keep internal driver state sane. User is mandated to set
* the raw format again after we return, so we don't need
* anything smarter.
* Note that hantro_reset_raw_fmt() also propagates size
* changes to the raw format.
*/
if (ctx->is_encoder)
hantro_reset_raw_fmt(ctx, HANTRO_DEFAULT_BIT_DEPTH, HANTRO_AUTO_POSTPROC);
/* Colorimetry information are always propagated. */
ctx->src_fmt.colorspace = pix_mp->colorspace;
ctx->src_fmt.ycbcr_enc = pix_mp->ycbcr_enc;
ctx->src_fmt.xfer_func = pix_mp->xfer_func;
ctx->src_fmt.quantization = pix_mp->quantization;
vpu_debug(0, "CAPTURE codec mode: %d\n", ctx->vpu_dst_fmt->codec_mode);
vpu_debug(0, "fmt - w: %d, h: %d\n",
pix_mp->width, pix_mp->height);
hantro_update_requires_request(ctx, pix_mp->pixelformat);
return 0;
}
static int
vidioc_s_fmt_out_mplane(struct file *file, void *priv, struct v4l2_format *f)
{
return hantro_set_fmt_out(fh_to_ctx(priv), &f->fmt.pix_mp, HANTRO_AUTO_POSTPROC);
}
static int
vidioc_s_fmt_cap_mplane(struct file *file, void *priv, struct v4l2_format *f)
{
return hantro_set_fmt_cap(fh_to_ctx(priv), &f->fmt.pix_mp);
}
static int vidioc_g_selection(struct file *file, void *priv,
struct v4l2_selection *sel)
{
struct hantro_ctx *ctx = fh_to_ctx(priv);
/* Crop only supported on source. */
if (!ctx->is_encoder ||
sel->type != V4L2_BUF_TYPE_VIDEO_OUTPUT)
return -EINVAL;
switch (sel->target) {
case V4L2_SEL_TGT_CROP_DEFAULT:
case V4L2_SEL_TGT_CROP_BOUNDS:
sel->r.top = 0;
sel->r.left = 0;
sel->r.width = ctx->src_fmt.width;
sel->r.height = ctx->src_fmt.height;
break;
case V4L2_SEL_TGT_CROP:
sel->r.top = 0;
sel->r.left = 0;
sel->r.width = ctx->dst_fmt.width;
sel->r.height = ctx->dst_fmt.height;
break;
default:
return -EINVAL;
}
return 0;
}
static int vidioc_s_selection(struct file *file, void *priv,
struct v4l2_selection *sel)
{
struct hantro_ctx *ctx = fh_to_ctx(priv);
struct v4l2_rect *rect = &sel->r;
struct vb2_queue *vq;
/* Crop only supported on source. */
if (!ctx->is_encoder ||
sel->type != V4L2_BUF_TYPE_VIDEO_OUTPUT)
return -EINVAL;
/* Change not allowed if the queue is streaming. */
vq = v4l2_m2m_get_src_vq(ctx->fh.m2m_ctx);
if (vb2_is_streaming(vq))
return -EBUSY;
if (sel->target != V4L2_SEL_TGT_CROP)
return -EINVAL;
/*
* We do not support offsets, and we can crop only inside
* right-most or bottom-most macroblocks.
*/
if (rect->left != 0 || rect->top != 0 ||
round_up(rect->width, MB_DIM) != ctx->src_fmt.width ||
round_up(rect->height, MB_DIM) != ctx->src_fmt.height) {
/* Default to full frame for incorrect settings. */
rect->left = 0;
rect->top = 0;
rect->width = ctx->src_fmt.width;
rect->height = ctx->src_fmt.height;
} else {
/* We support widths aligned to 4 pixels and arbitrary heights. */
rect->width = round_up(rect->width, 4);
}
ctx->dst_fmt.width = rect->width;
ctx->dst_fmt.height = rect->height;
return 0;
}
static const struct v4l2_event hantro_eos_event = {
.type = V4L2_EVENT_EOS
};
static int vidioc_encoder_cmd(struct file *file, void *priv,
struct v4l2_encoder_cmd *ec)
{
struct hantro_ctx *ctx = fh_to_ctx(priv);
int ret;
ret = v4l2_m2m_ioctl_try_encoder_cmd(file, priv, ec);
if (ret < 0)
return ret;
if (!vb2_is_streaming(v4l2_m2m_get_src_vq(ctx->fh.m2m_ctx)) ||
!vb2_is_streaming(v4l2_m2m_get_dst_vq(ctx->fh.m2m_ctx)))
return 0;
ret = v4l2_m2m_ioctl_encoder_cmd(file, priv, ec);
if (ret < 0)
return ret;
if (ec->cmd == V4L2_ENC_CMD_STOP &&
v4l2_m2m_has_stopped(ctx->fh.m2m_ctx))
v4l2_event_queue_fh(&ctx->fh, &hantro_eos_event);
if (ec->cmd == V4L2_ENC_CMD_START)
vb2_clear_last_buffer_dequeued(&ctx->fh.m2m_ctx->cap_q_ctx.q);
return 0;
}
const struct v4l2_ioctl_ops hantro_ioctl_ops = {
.vidioc_querycap = vidioc_querycap,
.vidioc_enum_framesizes = vidioc_enum_framesizes,
.vidioc_try_fmt_vid_cap_mplane = vidioc_try_fmt_cap_mplane,
.vidioc_try_fmt_vid_out_mplane = vidioc_try_fmt_out_mplane,
.vidioc_s_fmt_vid_out_mplane = vidioc_s_fmt_out_mplane,
.vidioc_s_fmt_vid_cap_mplane = vidioc_s_fmt_cap_mplane,
.vidioc_g_fmt_vid_out_mplane = vidioc_g_fmt_out_mplane,
.vidioc_g_fmt_vid_cap_mplane = vidioc_g_fmt_cap_mplane,
.vidioc_enum_fmt_vid_out = vidioc_enum_fmt_vid_out,
.vidioc_enum_fmt_vid_cap = vidioc_enum_fmt_vid_cap,
.vidioc_reqbufs = v4l2_m2m_ioctl_reqbufs,
.vidioc_querybuf = v4l2_m2m_ioctl_querybuf,
.vidioc_qbuf = v4l2_m2m_ioctl_qbuf,
.vidioc_dqbuf = v4l2_m2m_ioctl_dqbuf,
.vidioc_prepare_buf = v4l2_m2m_ioctl_prepare_buf,
.vidioc_create_bufs = v4l2_m2m_ioctl_create_bufs,
.vidioc_expbuf = v4l2_m2m_ioctl_expbuf,
.vidioc_subscribe_event = v4l2_ctrl_subscribe_event,
.vidioc_unsubscribe_event = v4l2_event_unsubscribe,
.vidioc_streamon = v4l2_m2m_ioctl_streamon,
.vidioc_streamoff = v4l2_m2m_ioctl_streamoff,
.vidioc_g_selection = vidioc_g_selection,
.vidioc_s_selection = vidioc_s_selection,
.vidioc_try_encoder_cmd = v4l2_m2m_ioctl_try_encoder_cmd,
.vidioc_encoder_cmd = vidioc_encoder_cmd,
};
static int
hantro_queue_setup(struct vb2_queue *vq, unsigned int *num_buffers,
unsigned int *num_planes, unsigned int sizes[],
struct device *alloc_devs[])
{
struct hantro_ctx *ctx = vb2_get_drv_priv(vq);
struct v4l2_pix_format_mplane *pixfmt;
int i;
switch (vq->type) {
case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
pixfmt = &ctx->dst_fmt;
break;
case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
pixfmt = &ctx->src_fmt;
break;
default:
vpu_err("invalid queue type: %d\n", vq->type);
return -EINVAL;
}
if (*num_planes) {
if (*num_planes != pixfmt->num_planes)
return -EINVAL;
for (i = 0; i < pixfmt->num_planes; ++i)
if (sizes[i] < pixfmt->plane_fmt[i].sizeimage)
return -EINVAL;
return 0;
}
*num_planes = pixfmt->num_planes;
for (i = 0; i < pixfmt->num_planes; ++i)
sizes[i] = pixfmt->plane_fmt[i].sizeimage;
return 0;
}
static int
hantro_buf_plane_check(struct vb2_buffer *vb,
struct v4l2_pix_format_mplane *pixfmt)
{
unsigned int sz;
int i;
for (i = 0; i < pixfmt->num_planes; ++i) {
sz = pixfmt->plane_fmt[i].sizeimage;
vpu_debug(4, "plane %d size: %ld, sizeimage: %u\n",
i, vb2_plane_size(vb, i), sz);
if (vb2_plane_size(vb, i) < sz) {
vpu_err("plane %d is too small for output\n", i);
return -EINVAL;
}
}
return 0;
}
static int hantro_buf_prepare(struct vb2_buffer *vb)
{
struct vb2_queue *vq = vb->vb2_queue;
struct hantro_ctx *ctx = vb2_get_drv_priv(vq);
struct v4l2_pix_format_mplane *pix_fmt;
int ret;
if (V4L2_TYPE_IS_OUTPUT(vq->type))
pix_fmt = &ctx->src_fmt;
else
pix_fmt = &ctx->dst_fmt;
ret = hantro_buf_plane_check(vb, pix_fmt);
if (ret)
return ret;
/*
* Buffer's bytesused must be written by driver for CAPTURE buffers.
* (for OUTPUT buffers, if userspace passes 0 bytesused, v4l2-core sets
* it to buffer length).
*/
if (V4L2_TYPE_IS_CAPTURE(vq->type)) {
if (ctx->is_encoder)
vb2_set_plane_payload(vb, 0, 0);
else
vb2_set_plane_payload(vb, 0, pix_fmt->plane_fmt[0].sizeimage);
}
return 0;
}
static void hantro_buf_queue(struct vb2_buffer *vb)
{
struct hantro_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
if (V4L2_TYPE_IS_CAPTURE(vb->vb2_queue->type) &&
vb2_is_streaming(vb->vb2_queue) &&
v4l2_m2m_dst_buf_is_last(ctx->fh.m2m_ctx)) {
unsigned int i;
for (i = 0; i < vb->num_planes; i++)
vb2_set_plane_payload(vb, i, 0);
vbuf->field = V4L2_FIELD_NONE;
vbuf->sequence = ctx->sequence_cap++;
v4l2_m2m_last_buffer_done(ctx->fh.m2m_ctx, vbuf);
v4l2_event_queue_fh(&ctx->fh, &hantro_eos_event);
return;
}
v4l2_m2m_buf_queue(ctx->fh.m2m_ctx, vbuf);
}
static bool hantro_vq_is_coded(struct vb2_queue *q)
{
struct hantro_ctx *ctx = vb2_get_drv_priv(q);
return ctx->is_encoder != V4L2_TYPE_IS_OUTPUT(q->type);
}
static int hantro_start_streaming(struct vb2_queue *q, unsigned int count)
{
struct hantro_ctx *ctx = vb2_get_drv_priv(q);
int ret = 0;
v4l2_m2m_update_start_streaming_state(ctx->fh.m2m_ctx, q);
if (V4L2_TYPE_IS_OUTPUT(q->type))
ctx->sequence_out = 0;
else
ctx->sequence_cap = 0;
if (hantro_vq_is_coded(q)) {
enum hantro_codec_mode codec_mode;
if (V4L2_TYPE_IS_OUTPUT(q->type))
codec_mode = ctx->vpu_src_fmt->codec_mode;
else
codec_mode = ctx->vpu_dst_fmt->codec_mode;
vpu_debug(4, "Codec mode = %d\n", codec_mode);
ctx->codec_ops = &ctx->dev->variant->codec_ops[codec_mode];
if (ctx->codec_ops->init) {
ret = ctx->codec_ops->init(ctx);
if (ret)
return ret;
}
if (hantro_needs_postproc(ctx, ctx->vpu_dst_fmt)) {
ret = hantro_postproc_alloc(ctx);
if (ret)
goto err_codec_exit;
}
}
return ret;
err_codec_exit:
if (ctx->codec_ops->exit)
ctx->codec_ops->exit(ctx);
return ret;
}
static void
hantro_return_bufs(struct vb2_queue *q,
struct vb2_v4l2_buffer *(*buf_remove)(struct v4l2_m2m_ctx *))
{
struct hantro_ctx *ctx = vb2_get_drv_priv(q);
for (;;) {
struct vb2_v4l2_buffer *vbuf;
vbuf = buf_remove(ctx->fh.m2m_ctx);
if (!vbuf)
break;
v4l2_ctrl_request_complete(vbuf->vb2_buf.req_obj.req,
&ctx->ctrl_handler);
v4l2_m2m_buf_done(vbuf, VB2_BUF_STATE_ERROR);
}
}
static void hantro_stop_streaming(struct vb2_queue *q)
{
struct hantro_ctx *ctx = vb2_get_drv_priv(q);
if (hantro_vq_is_coded(q)) {
hantro_postproc_free(ctx);
if (ctx->codec_ops && ctx->codec_ops->exit)
ctx->codec_ops->exit(ctx);
}
/*
* The mem2mem framework calls v4l2_m2m_cancel_job before
* .stop_streaming, so there isn't any job running and
* it is safe to return all the buffers.
*/
if (V4L2_TYPE_IS_OUTPUT(q->type))
hantro_return_bufs(q, v4l2_m2m_src_buf_remove);
else
hantro_return_bufs(q, v4l2_m2m_dst_buf_remove);
v4l2_m2m_update_stop_streaming_state(ctx->fh.m2m_ctx, q);
if (V4L2_TYPE_IS_OUTPUT(q->type) &&
v4l2_m2m_has_stopped(ctx->fh.m2m_ctx))
v4l2_event_queue_fh(&ctx->fh, &hantro_eos_event);
}
static void hantro_buf_request_complete(struct vb2_buffer *vb)
{
struct hantro_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
v4l2_ctrl_request_complete(vb->req_obj.req, &ctx->ctrl_handler);
}
static int hantro_buf_out_validate(struct vb2_buffer *vb)
{
struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
vbuf->field = V4L2_FIELD_NONE;
return 0;
}
const struct vb2_ops hantro_queue_ops = {
.queue_setup = hantro_queue_setup,
.buf_prepare = hantro_buf_prepare,
.buf_queue = hantro_buf_queue,
.buf_out_validate = hantro_buf_out_validate,
.buf_request_complete = hantro_buf_request_complete,
.start_streaming = hantro_start_streaming,
.stop_streaming = hantro_stop_streaming,
.wait_prepare = vb2_ops_wait_prepare,
.wait_finish = vb2_ops_wait_finish,
};
| linux-master | drivers/media/platform/verisilicon/hantro_v4l2.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Hantro VPU codec driver
*
* Copyright (c) 2014 Rockchip Electronics Co., Ltd.
* Hertz Wong <[email protected]>
* Herman Chen <[email protected]>
*
* Copyright (C) 2014 Google, Inc.
* Tomasz Figa <[email protected]>
*/
#include <linux/types.h>
#include <linux/sort.h>
#include <media/v4l2-mem2mem.h>
#include "hantro_hw.h"
#include "hantro_v4l2.h"
#define VDPU_SWREG(nr) ((nr) * 4)
#define VDPU_REG_DEC_OUT_BASE VDPU_SWREG(63)
#define VDPU_REG_RLC_VLC_BASE VDPU_SWREG(64)
#define VDPU_REG_QTABLE_BASE VDPU_SWREG(61)
#define VDPU_REG_DIR_MV_BASE VDPU_SWREG(62)
#define VDPU_REG_REFER_BASE(i) (VDPU_SWREG(84 + (i)))
#define VDPU_REG_DEC_E(v) ((v) ? BIT(0) : 0)
#define VDPU_REG_DEC_ADV_PRE_DIS(v) ((v) ? BIT(11) : 0)
#define VDPU_REG_DEC_SCMD_DIS(v) ((v) ? BIT(10) : 0)
#define VDPU_REG_FILTERING_DIS(v) ((v) ? BIT(8) : 0)
#define VDPU_REG_PIC_FIXED_QUANT(v) ((v) ? BIT(7) : 0)
#define VDPU_REG_DEC_LATENCY(v) (((v) << 1) & GENMASK(6, 1))
#define VDPU_REG_INIT_QP(v) (((v) << 25) & GENMASK(30, 25))
#define VDPU_REG_STREAM_LEN(v) (((v) << 0) & GENMASK(23, 0))
#define VDPU_REG_APF_THRESHOLD(v) (((v) << 17) & GENMASK(30, 17))
#define VDPU_REG_STARTMB_X(v) (((v) << 8) & GENMASK(16, 8))
#define VDPU_REG_STARTMB_Y(v) (((v) << 0) & GENMASK(7, 0))
#define VDPU_REG_DEC_MODE(v) (((v) << 0) & GENMASK(3, 0))
#define VDPU_REG_DEC_STRENDIAN_E(v) ((v) ? BIT(5) : 0)
#define VDPU_REG_DEC_STRSWAP32_E(v) ((v) ? BIT(4) : 0)
#define VDPU_REG_DEC_OUTSWAP32_E(v) ((v) ? BIT(3) : 0)
#define VDPU_REG_DEC_INSWAP32_E(v) ((v) ? BIT(2) : 0)
#define VDPU_REG_DEC_OUT_ENDIAN(v) ((v) ? BIT(1) : 0)
#define VDPU_REG_DEC_IN_ENDIAN(v) ((v) ? BIT(0) : 0)
#define VDPU_REG_DEC_DATA_DISC_E(v) ((v) ? BIT(22) : 0)
#define VDPU_REG_DEC_MAX_BURST(v) (((v) << 16) & GENMASK(20, 16))
#define VDPU_REG_DEC_AXI_WR_ID(v) (((v) << 8) & GENMASK(15, 8))
#define VDPU_REG_DEC_AXI_RD_ID(v) (((v) << 0) & GENMASK(7, 0))
#define VDPU_REG_START_CODE_E(v) ((v) ? BIT(22) : 0)
#define VDPU_REG_CH_8PIX_ILEAV_E(v) ((v) ? BIT(21) : 0)
#define VDPU_REG_RLC_MODE_E(v) ((v) ? BIT(20) : 0)
#define VDPU_REG_PIC_INTERLACE_E(v) ((v) ? BIT(17) : 0)
#define VDPU_REG_PIC_FIELDMODE_E(v) ((v) ? BIT(16) : 0)
#define VDPU_REG_PIC_TOPFIELD_E(v) ((v) ? BIT(13) : 0)
#define VDPU_REG_WRITE_MVS_E(v) ((v) ? BIT(10) : 0)
#define VDPU_REG_SEQ_MBAFF_E(v) ((v) ? BIT(7) : 0)
#define VDPU_REG_PICORD_COUNT_E(v) ((v) ? BIT(6) : 0)
#define VDPU_REG_DEC_TIMEOUT_E(v) ((v) ? BIT(5) : 0)
#define VDPU_REG_DEC_CLK_GATE_E(v) ((v) ? BIT(4) : 0)
#define VDPU_REG_PRED_BC_TAP_0_0(v) (((v) << 22) & GENMASK(31, 22))
#define VDPU_REG_PRED_BC_TAP_0_1(v) (((v) << 12) & GENMASK(21, 12))
#define VDPU_REG_PRED_BC_TAP_0_2(v) (((v) << 2) & GENMASK(11, 2))
#define VDPU_REG_REFBU_E(v) ((v) ? BIT(31) : 0)
#define VDPU_REG_PINIT_RLIST_F9(v) (((v) << 25) & GENMASK(29, 25))
#define VDPU_REG_PINIT_RLIST_F8(v) (((v) << 20) & GENMASK(24, 20))
#define VDPU_REG_PINIT_RLIST_F7(v) (((v) << 15) & GENMASK(19, 15))
#define VDPU_REG_PINIT_RLIST_F6(v) (((v) << 10) & GENMASK(14, 10))
#define VDPU_REG_PINIT_RLIST_F5(v) (((v) << 5) & GENMASK(9, 5))
#define VDPU_REG_PINIT_RLIST_F4(v) (((v) << 0) & GENMASK(4, 0))
#define VDPU_REG_PINIT_RLIST_F15(v) (((v) << 25) & GENMASK(29, 25))
#define VDPU_REG_PINIT_RLIST_F14(v) (((v) << 20) & GENMASK(24, 20))
#define VDPU_REG_PINIT_RLIST_F13(v) (((v) << 15) & GENMASK(19, 15))
#define VDPU_REG_PINIT_RLIST_F12(v) (((v) << 10) & GENMASK(14, 10))
#define VDPU_REG_PINIT_RLIST_F11(v) (((v) << 5) & GENMASK(9, 5))
#define VDPU_REG_PINIT_RLIST_F10(v) (((v) << 0) & GENMASK(4, 0))
#define VDPU_REG_REFER1_NBR(v) (((v) << 16) & GENMASK(31, 16))
#define VDPU_REG_REFER0_NBR(v) (((v) << 0) & GENMASK(15, 0))
#define VDPU_REG_REFER3_NBR(v) (((v) << 16) & GENMASK(31, 16))
#define VDPU_REG_REFER2_NBR(v) (((v) << 0) & GENMASK(15, 0))
#define VDPU_REG_REFER5_NBR(v) (((v) << 16) & GENMASK(31, 16))
#define VDPU_REG_REFER4_NBR(v) (((v) << 0) & GENMASK(15, 0))
#define VDPU_REG_REFER7_NBR(v) (((v) << 16) & GENMASK(31, 16))
#define VDPU_REG_REFER6_NBR(v) (((v) << 0) & GENMASK(15, 0))
#define VDPU_REG_REFER9_NBR(v) (((v) << 16) & GENMASK(31, 16))
#define VDPU_REG_REFER8_NBR(v) (((v) << 0) & GENMASK(15, 0))
#define VDPU_REG_REFER11_NBR(v) (((v) << 16) & GENMASK(31, 16))
#define VDPU_REG_REFER10_NBR(v) (((v) << 0) & GENMASK(15, 0))
#define VDPU_REG_REFER13_NBR(v) (((v) << 16) & GENMASK(31, 16))
#define VDPU_REG_REFER12_NBR(v) (((v) << 0) & GENMASK(15, 0))
#define VDPU_REG_REFER15_NBR(v) (((v) << 16) & GENMASK(31, 16))
#define VDPU_REG_REFER14_NBR(v) (((v) << 0) & GENMASK(15, 0))
#define VDPU_REG_BINIT_RLIST_F5(v) (((v) << 25) & GENMASK(29, 25))
#define VDPU_REG_BINIT_RLIST_F4(v) (((v) << 20) & GENMASK(24, 20))
#define VDPU_REG_BINIT_RLIST_F3(v) (((v) << 15) & GENMASK(19, 15))
#define VDPU_REG_BINIT_RLIST_F2(v) (((v) << 10) & GENMASK(14, 10))
#define VDPU_REG_BINIT_RLIST_F1(v) (((v) << 5) & GENMASK(9, 5))
#define VDPU_REG_BINIT_RLIST_F0(v) (((v) << 0) & GENMASK(4, 0))
#define VDPU_REG_BINIT_RLIST_F11(v) (((v) << 25) & GENMASK(29, 25))
#define VDPU_REG_BINIT_RLIST_F10(v) (((v) << 20) & GENMASK(24, 20))
#define VDPU_REG_BINIT_RLIST_F9(v) (((v) << 15) & GENMASK(19, 15))
#define VDPU_REG_BINIT_RLIST_F8(v) (((v) << 10) & GENMASK(14, 10))
#define VDPU_REG_BINIT_RLIST_F7(v) (((v) << 5) & GENMASK(9, 5))
#define VDPU_REG_BINIT_RLIST_F6(v) (((v) << 0) & GENMASK(4, 0))
#define VDPU_REG_BINIT_RLIST_F15(v) (((v) << 15) & GENMASK(19, 15))
#define VDPU_REG_BINIT_RLIST_F14(v) (((v) << 10) & GENMASK(14, 10))
#define VDPU_REG_BINIT_RLIST_F13(v) (((v) << 5) & GENMASK(9, 5))
#define VDPU_REG_BINIT_RLIST_F12(v) (((v) << 0) & GENMASK(4, 0))
#define VDPU_REG_BINIT_RLIST_B5(v) (((v) << 25) & GENMASK(29, 25))
#define VDPU_REG_BINIT_RLIST_B4(v) (((v) << 20) & GENMASK(24, 20))
#define VDPU_REG_BINIT_RLIST_B3(v) (((v) << 15) & GENMASK(19, 15))
#define VDPU_REG_BINIT_RLIST_B2(v) (((v) << 10) & GENMASK(14, 10))
#define VDPU_REG_BINIT_RLIST_B1(v) (((v) << 5) & GENMASK(9, 5))
#define VDPU_REG_BINIT_RLIST_B0(v) (((v) << 0) & GENMASK(4, 0))
#define VDPU_REG_BINIT_RLIST_B11(v) (((v) << 25) & GENMASK(29, 25))
#define VDPU_REG_BINIT_RLIST_B10(v) (((v) << 20) & GENMASK(24, 20))
#define VDPU_REG_BINIT_RLIST_B9(v) (((v) << 15) & GENMASK(19, 15))
#define VDPU_REG_BINIT_RLIST_B8(v) (((v) << 10) & GENMASK(14, 10))
#define VDPU_REG_BINIT_RLIST_B7(v) (((v) << 5) & GENMASK(9, 5))
#define VDPU_REG_BINIT_RLIST_B6(v) (((v) << 0) & GENMASK(4, 0))
#define VDPU_REG_BINIT_RLIST_B15(v) (((v) << 15) & GENMASK(19, 15))
#define VDPU_REG_BINIT_RLIST_B14(v) (((v) << 10) & GENMASK(14, 10))
#define VDPU_REG_BINIT_RLIST_B13(v) (((v) << 5) & GENMASK(9, 5))
#define VDPU_REG_BINIT_RLIST_B12(v) (((v) << 0) & GENMASK(4, 0))
#define VDPU_REG_PINIT_RLIST_F3(v) (((v) << 15) & GENMASK(19, 15))
#define VDPU_REG_PINIT_RLIST_F2(v) (((v) << 10) & GENMASK(14, 10))
#define VDPU_REG_PINIT_RLIST_F1(v) (((v) << 5) & GENMASK(9, 5))
#define VDPU_REG_PINIT_RLIST_F0(v) (((v) << 0) & GENMASK(4, 0))
#define VDPU_REG_REFER_LTERM_E(v) (((v) << 0) & GENMASK(31, 0))
#define VDPU_REG_REFER_VALID_E(v) (((v) << 0) & GENMASK(31, 0))
#define VDPU_REG_STRM_START_BIT(v) (((v) << 0) & GENMASK(5, 0))
#define VDPU_REG_CH_QP_OFFSET2(v) (((v) << 22) & GENMASK(26, 22))
#define VDPU_REG_CH_QP_OFFSET(v) (((v) << 17) & GENMASK(21, 17))
#define VDPU_REG_PIC_MB_HEIGHT_P(v) (((v) << 9) & GENMASK(16, 9))
#define VDPU_REG_PIC_MB_WIDTH(v) (((v) << 0) & GENMASK(8, 0))
#define VDPU_REG_WEIGHT_BIPR_IDC(v) (((v) << 16) & GENMASK(17, 16))
#define VDPU_REG_REF_FRAMES(v) (((v) << 0) & GENMASK(4, 0))
#define VDPU_REG_FILT_CTRL_PRES(v) ((v) ? BIT(31) : 0)
#define VDPU_REG_RDPIC_CNT_PRES(v) ((v) ? BIT(30) : 0)
#define VDPU_REG_FRAMENUM_LEN(v) (((v) << 16) & GENMASK(20, 16))
#define VDPU_REG_FRAMENUM(v) (((v) << 0) & GENMASK(15, 0))
#define VDPU_REG_REFPIC_MK_LEN(v) (((v) << 16) & GENMASK(26, 16))
#define VDPU_REG_IDR_PIC_ID(v) (((v) << 0) & GENMASK(15, 0))
#define VDPU_REG_PPS_ID(v) (((v) << 24) & GENMASK(31, 24))
#define VDPU_REG_REFIDX1_ACTIVE(v) (((v) << 19) & GENMASK(23, 19))
#define VDPU_REG_REFIDX0_ACTIVE(v) (((v) << 14) & GENMASK(18, 14))
#define VDPU_REG_POC_LENGTH(v) (((v) << 0) & GENMASK(7, 0))
#define VDPU_REG_IDR_PIC_E(v) ((v) ? BIT(8) : 0)
#define VDPU_REG_DIR_8X8_INFER_E(v) ((v) ? BIT(7) : 0)
#define VDPU_REG_BLACKWHITE_E(v) ((v) ? BIT(6) : 0)
#define VDPU_REG_CABAC_E(v) ((v) ? BIT(5) : 0)
#define VDPU_REG_WEIGHT_PRED_E(v) ((v) ? BIT(4) : 0)
#define VDPU_REG_CONST_INTRA_E(v) ((v) ? BIT(3) : 0)
#define VDPU_REG_8X8TRANS_FLAG_E(v) ((v) ? BIT(2) : 0)
#define VDPU_REG_TYPE1_QUANT_E(v) ((v) ? BIT(1) : 0)
#define VDPU_REG_FIELDPIC_FLAG_E(v) ((v) ? BIT(0) : 0)
static void set_params(struct hantro_ctx *ctx, struct vb2_v4l2_buffer *src_buf)
{
const struct hantro_h264_dec_ctrls *ctrls = &ctx->h264_dec.ctrls;
const struct v4l2_ctrl_h264_decode_params *dec_param = ctrls->decode;
const struct v4l2_ctrl_h264_sps *sps = ctrls->sps;
const struct v4l2_ctrl_h264_pps *pps = ctrls->pps;
struct hantro_dev *vpu = ctx->dev;
u32 reg;
reg = VDPU_REG_DEC_ADV_PRE_DIS(0) |
VDPU_REG_DEC_SCMD_DIS(0) |
VDPU_REG_FILTERING_DIS(0) |
VDPU_REG_PIC_FIXED_QUANT(0) |
VDPU_REG_DEC_LATENCY(0);
vdpu_write_relaxed(vpu, reg, VDPU_SWREG(50));
reg = VDPU_REG_INIT_QP(pps->pic_init_qp_minus26 + 26) |
VDPU_REG_STREAM_LEN(vb2_get_plane_payload(&src_buf->vb2_buf, 0));
vdpu_write_relaxed(vpu, reg, VDPU_SWREG(51));
reg = VDPU_REG_APF_THRESHOLD(8) |
VDPU_REG_STARTMB_X(0) |
VDPU_REG_STARTMB_Y(0);
vdpu_write_relaxed(vpu, reg, VDPU_SWREG(52));
reg = VDPU_REG_DEC_MODE(0);
vdpu_write_relaxed(vpu, reg, VDPU_SWREG(53));
reg = VDPU_REG_DEC_STRENDIAN_E(1) |
VDPU_REG_DEC_STRSWAP32_E(1) |
VDPU_REG_DEC_OUTSWAP32_E(1) |
VDPU_REG_DEC_INSWAP32_E(1) |
VDPU_REG_DEC_OUT_ENDIAN(1) |
VDPU_REG_DEC_IN_ENDIAN(0);
vdpu_write_relaxed(vpu, reg, VDPU_SWREG(54));
reg = VDPU_REG_DEC_DATA_DISC_E(0) |
VDPU_REG_DEC_MAX_BURST(16) |
VDPU_REG_DEC_AXI_WR_ID(0) |
VDPU_REG_DEC_AXI_RD_ID(0xff);
vdpu_write_relaxed(vpu, reg, VDPU_SWREG(56));
reg = VDPU_REG_START_CODE_E(1) |
VDPU_REG_CH_8PIX_ILEAV_E(0) |
VDPU_REG_RLC_MODE_E(0) |
VDPU_REG_PIC_INTERLACE_E(!(sps->flags & V4L2_H264_SPS_FLAG_FRAME_MBS_ONLY) &&
(sps->flags & V4L2_H264_SPS_FLAG_MB_ADAPTIVE_FRAME_FIELD ||
dec_param->flags & V4L2_H264_DECODE_PARAM_FLAG_FIELD_PIC)) |
VDPU_REG_PIC_FIELDMODE_E(dec_param->flags & V4L2_H264_DECODE_PARAM_FLAG_FIELD_PIC) |
VDPU_REG_PIC_TOPFIELD_E(!(dec_param->flags & V4L2_H264_DECODE_PARAM_FLAG_BOTTOM_FIELD)) |
VDPU_REG_WRITE_MVS_E((sps->profile_idc > 66) && dec_param->nal_ref_idc) |
VDPU_REG_SEQ_MBAFF_E(sps->flags & V4L2_H264_SPS_FLAG_MB_ADAPTIVE_FRAME_FIELD) |
VDPU_REG_PICORD_COUNT_E(sps->profile_idc > 66) |
VDPU_REG_DEC_TIMEOUT_E(1) |
VDPU_REG_DEC_CLK_GATE_E(1);
vdpu_write_relaxed(vpu, reg, VDPU_SWREG(57));
reg = VDPU_REG_PRED_BC_TAP_0_0(1) |
VDPU_REG_PRED_BC_TAP_0_1((u32)-5) |
VDPU_REG_PRED_BC_TAP_0_2(20);
vdpu_write_relaxed(vpu, reg, VDPU_SWREG(59));
reg = VDPU_REG_REFBU_E(0);
vdpu_write_relaxed(vpu, reg, VDPU_SWREG(65));
reg = VDPU_REG_STRM_START_BIT(0);
vdpu_write_relaxed(vpu, reg, VDPU_SWREG(109));
reg = VDPU_REG_CH_QP_OFFSET2(pps->second_chroma_qp_index_offset) |
VDPU_REG_CH_QP_OFFSET(pps->chroma_qp_index_offset) |
VDPU_REG_PIC_MB_HEIGHT_P(MB_HEIGHT(ctx->src_fmt.height)) |
VDPU_REG_PIC_MB_WIDTH(MB_WIDTH(ctx->src_fmt.width));
vdpu_write_relaxed(vpu, reg, VDPU_SWREG(110));
reg = VDPU_REG_WEIGHT_BIPR_IDC(pps->weighted_bipred_idc) |
VDPU_REG_REF_FRAMES(sps->max_num_ref_frames);
vdpu_write_relaxed(vpu, reg, VDPU_SWREG(111));
reg = VDPU_REG_FILT_CTRL_PRES(pps->flags & V4L2_H264_PPS_FLAG_DEBLOCKING_FILTER_CONTROL_PRESENT) |
VDPU_REG_RDPIC_CNT_PRES(pps->flags & V4L2_H264_PPS_FLAG_REDUNDANT_PIC_CNT_PRESENT) |
VDPU_REG_FRAMENUM_LEN(sps->log2_max_frame_num_minus4 + 4) |
VDPU_REG_FRAMENUM(dec_param->frame_num);
vdpu_write_relaxed(vpu, reg, VDPU_SWREG(112));
reg = VDPU_REG_REFPIC_MK_LEN(dec_param->dec_ref_pic_marking_bit_size) |
VDPU_REG_IDR_PIC_ID(dec_param->idr_pic_id);
vdpu_write_relaxed(vpu, reg, VDPU_SWREG(113));
reg = VDPU_REG_PPS_ID(pps->pic_parameter_set_id) |
VDPU_REG_REFIDX1_ACTIVE(pps->num_ref_idx_l1_default_active_minus1 + 1) |
VDPU_REG_REFIDX0_ACTIVE(pps->num_ref_idx_l0_default_active_minus1 + 1) |
VDPU_REG_POC_LENGTH(dec_param->pic_order_cnt_bit_size);
vdpu_write_relaxed(vpu, reg, VDPU_SWREG(114));
reg = VDPU_REG_IDR_PIC_E(dec_param->flags & V4L2_H264_DECODE_PARAM_FLAG_IDR_PIC) |
VDPU_REG_DIR_8X8_INFER_E(sps->flags & V4L2_H264_SPS_FLAG_DIRECT_8X8_INFERENCE) |
VDPU_REG_BLACKWHITE_E(sps->profile_idc >= 100 && sps->chroma_format_idc == 0) |
VDPU_REG_CABAC_E(pps->flags & V4L2_H264_PPS_FLAG_ENTROPY_CODING_MODE) |
VDPU_REG_WEIGHT_PRED_E(pps->flags & V4L2_H264_PPS_FLAG_WEIGHTED_PRED) |
VDPU_REG_CONST_INTRA_E(pps->flags & V4L2_H264_PPS_FLAG_CONSTRAINED_INTRA_PRED) |
VDPU_REG_8X8TRANS_FLAG_E(pps->flags & V4L2_H264_PPS_FLAG_TRANSFORM_8X8_MODE) |
VDPU_REG_TYPE1_QUANT_E(pps->flags & V4L2_H264_PPS_FLAG_SCALING_MATRIX_PRESENT) |
VDPU_REG_FIELDPIC_FLAG_E(!(sps->flags & V4L2_H264_SPS_FLAG_FRAME_MBS_ONLY));
vdpu_write_relaxed(vpu, reg, VDPU_SWREG(115));
}
static void set_ref(struct hantro_ctx *ctx)
{
const struct v4l2_h264_reference *b0_reflist, *b1_reflist, *p_reflist;
struct hantro_dev *vpu = ctx->dev;
u32 reg;
int i;
b0_reflist = ctx->h264_dec.reflists.b0;
b1_reflist = ctx->h264_dec.reflists.b1;
p_reflist = ctx->h264_dec.reflists.p;
reg = VDPU_REG_PINIT_RLIST_F9(p_reflist[9].index) |
VDPU_REG_PINIT_RLIST_F8(p_reflist[8].index) |
VDPU_REG_PINIT_RLIST_F7(p_reflist[7].index) |
VDPU_REG_PINIT_RLIST_F6(p_reflist[6].index) |
VDPU_REG_PINIT_RLIST_F5(p_reflist[5].index) |
VDPU_REG_PINIT_RLIST_F4(p_reflist[4].index);
vdpu_write_relaxed(vpu, reg, VDPU_SWREG(74));
reg = VDPU_REG_PINIT_RLIST_F15(p_reflist[15].index) |
VDPU_REG_PINIT_RLIST_F14(p_reflist[14].index) |
VDPU_REG_PINIT_RLIST_F13(p_reflist[13].index) |
VDPU_REG_PINIT_RLIST_F12(p_reflist[12].index) |
VDPU_REG_PINIT_RLIST_F11(p_reflist[11].index) |
VDPU_REG_PINIT_RLIST_F10(p_reflist[10].index);
vdpu_write_relaxed(vpu, reg, VDPU_SWREG(75));
reg = VDPU_REG_REFER1_NBR(hantro_h264_get_ref_nbr(ctx, 1)) |
VDPU_REG_REFER0_NBR(hantro_h264_get_ref_nbr(ctx, 0));
vdpu_write_relaxed(vpu, reg, VDPU_SWREG(76));
reg = VDPU_REG_REFER3_NBR(hantro_h264_get_ref_nbr(ctx, 3)) |
VDPU_REG_REFER2_NBR(hantro_h264_get_ref_nbr(ctx, 2));
vdpu_write_relaxed(vpu, reg, VDPU_SWREG(77));
reg = VDPU_REG_REFER5_NBR(hantro_h264_get_ref_nbr(ctx, 5)) |
VDPU_REG_REFER4_NBR(hantro_h264_get_ref_nbr(ctx, 4));
vdpu_write_relaxed(vpu, reg, VDPU_SWREG(78));
reg = VDPU_REG_REFER7_NBR(hantro_h264_get_ref_nbr(ctx, 7)) |
VDPU_REG_REFER6_NBR(hantro_h264_get_ref_nbr(ctx, 6));
vdpu_write_relaxed(vpu, reg, VDPU_SWREG(79));
reg = VDPU_REG_REFER9_NBR(hantro_h264_get_ref_nbr(ctx, 9)) |
VDPU_REG_REFER8_NBR(hantro_h264_get_ref_nbr(ctx, 8));
vdpu_write_relaxed(vpu, reg, VDPU_SWREG(80));
reg = VDPU_REG_REFER11_NBR(hantro_h264_get_ref_nbr(ctx, 11)) |
VDPU_REG_REFER10_NBR(hantro_h264_get_ref_nbr(ctx, 10));
vdpu_write_relaxed(vpu, reg, VDPU_SWREG(81));
reg = VDPU_REG_REFER13_NBR(hantro_h264_get_ref_nbr(ctx, 13)) |
VDPU_REG_REFER12_NBR(hantro_h264_get_ref_nbr(ctx, 12));
vdpu_write_relaxed(vpu, reg, VDPU_SWREG(82));
reg = VDPU_REG_REFER15_NBR(hantro_h264_get_ref_nbr(ctx, 15)) |
VDPU_REG_REFER14_NBR(hantro_h264_get_ref_nbr(ctx, 14));
vdpu_write_relaxed(vpu, reg, VDPU_SWREG(83));
reg = VDPU_REG_BINIT_RLIST_F5(b0_reflist[5].index) |
VDPU_REG_BINIT_RLIST_F4(b0_reflist[4].index) |
VDPU_REG_BINIT_RLIST_F3(b0_reflist[3].index) |
VDPU_REG_BINIT_RLIST_F2(b0_reflist[2].index) |
VDPU_REG_BINIT_RLIST_F1(b0_reflist[1].index) |
VDPU_REG_BINIT_RLIST_F0(b0_reflist[0].index);
vdpu_write_relaxed(vpu, reg, VDPU_SWREG(100));
reg = VDPU_REG_BINIT_RLIST_F11(b0_reflist[11].index) |
VDPU_REG_BINIT_RLIST_F10(b0_reflist[10].index) |
VDPU_REG_BINIT_RLIST_F9(b0_reflist[9].index) |
VDPU_REG_BINIT_RLIST_F8(b0_reflist[8].index) |
VDPU_REG_BINIT_RLIST_F7(b0_reflist[7].index) |
VDPU_REG_BINIT_RLIST_F6(b0_reflist[6].index);
vdpu_write_relaxed(vpu, reg, VDPU_SWREG(101));
reg = VDPU_REG_BINIT_RLIST_F15(b0_reflist[15].index) |
VDPU_REG_BINIT_RLIST_F14(b0_reflist[14].index) |
VDPU_REG_BINIT_RLIST_F13(b0_reflist[13].index) |
VDPU_REG_BINIT_RLIST_F12(b0_reflist[12].index);
vdpu_write_relaxed(vpu, reg, VDPU_SWREG(102));
reg = VDPU_REG_BINIT_RLIST_B5(b1_reflist[5].index) |
VDPU_REG_BINIT_RLIST_B4(b1_reflist[4].index) |
VDPU_REG_BINIT_RLIST_B3(b1_reflist[3].index) |
VDPU_REG_BINIT_RLIST_B2(b1_reflist[2].index) |
VDPU_REG_BINIT_RLIST_B1(b1_reflist[1].index) |
VDPU_REG_BINIT_RLIST_B0(b1_reflist[0].index);
vdpu_write_relaxed(vpu, reg, VDPU_SWREG(103));
reg = VDPU_REG_BINIT_RLIST_B11(b1_reflist[11].index) |
VDPU_REG_BINIT_RLIST_B10(b1_reflist[10].index) |
VDPU_REG_BINIT_RLIST_B9(b1_reflist[9].index) |
VDPU_REG_BINIT_RLIST_B8(b1_reflist[8].index) |
VDPU_REG_BINIT_RLIST_B7(b1_reflist[7].index) |
VDPU_REG_BINIT_RLIST_B6(b1_reflist[6].index);
vdpu_write_relaxed(vpu, reg, VDPU_SWREG(104));
reg = VDPU_REG_BINIT_RLIST_B15(b1_reflist[15].index) |
VDPU_REG_BINIT_RLIST_B14(b1_reflist[14].index) |
VDPU_REG_BINIT_RLIST_B13(b1_reflist[13].index) |
VDPU_REG_BINIT_RLIST_B12(b1_reflist[12].index);
vdpu_write_relaxed(vpu, reg, VDPU_SWREG(105));
reg = VDPU_REG_PINIT_RLIST_F3(p_reflist[3].index) |
VDPU_REG_PINIT_RLIST_F2(p_reflist[2].index) |
VDPU_REG_PINIT_RLIST_F1(p_reflist[1].index) |
VDPU_REG_PINIT_RLIST_F0(p_reflist[0].index);
vdpu_write_relaxed(vpu, reg, VDPU_SWREG(106));
reg = VDPU_REG_REFER_LTERM_E(ctx->h264_dec.dpb_longterm);
vdpu_write_relaxed(vpu, reg, VDPU_SWREG(107));
reg = VDPU_REG_REFER_VALID_E(ctx->h264_dec.dpb_valid);
vdpu_write_relaxed(vpu, reg, VDPU_SWREG(108));
/* Set up addresses of DPB buffers. */
for (i = 0; i < HANTRO_H264_DPB_SIZE; i++) {
dma_addr_t dma_addr = hantro_h264_get_ref_buf(ctx, i);
vdpu_write_relaxed(vpu, dma_addr, VDPU_REG_REFER_BASE(i));
}
}
static void set_buffers(struct hantro_ctx *ctx, struct vb2_v4l2_buffer *src_buf)
{
const struct hantro_h264_dec_ctrls *ctrls = &ctx->h264_dec.ctrls;
struct vb2_v4l2_buffer *dst_buf;
struct hantro_dev *vpu = ctx->dev;
dma_addr_t src_dma, dst_dma;
size_t offset = 0;
/* Source (stream) buffer. */
src_dma = vb2_dma_contig_plane_dma_addr(&src_buf->vb2_buf, 0);
vdpu_write_relaxed(vpu, src_dma, VDPU_REG_RLC_VLC_BASE);
/* Destination (decoded frame) buffer. */
dst_buf = hantro_get_dst_buf(ctx);
dst_dma = hantro_get_dec_buf_addr(ctx, &dst_buf->vb2_buf);
/* Adjust dma addr to start at second line for bottom field */
if (ctrls->decode->flags & V4L2_H264_DECODE_PARAM_FLAG_BOTTOM_FIELD)
offset = ALIGN(ctx->src_fmt.width, MB_DIM);
vdpu_write_relaxed(vpu, dst_dma + offset, VDPU_REG_DEC_OUT_BASE);
/* Higher profiles require DMV buffer appended to reference frames. */
if (ctrls->sps->profile_idc > 66 && ctrls->decode->nal_ref_idc) {
unsigned int bytes_per_mb = 384;
/* DMV buffer for monochrome start directly after Y-plane */
if (ctrls->sps->profile_idc >= 100 &&
ctrls->sps->chroma_format_idc == 0)
bytes_per_mb = 256;
offset = bytes_per_mb * MB_WIDTH(ctx->src_fmt.width) *
MB_HEIGHT(ctx->src_fmt.height);
/*
* DMV buffer is split in two for field encoded frames,
* adjust offset for bottom field
*/
if (ctrls->decode->flags & V4L2_H264_DECODE_PARAM_FLAG_BOTTOM_FIELD)
offset += 32 * MB_WIDTH(ctx->src_fmt.width) *
MB_HEIGHT(ctx->src_fmt.height);
vdpu_write_relaxed(vpu, dst_dma + offset, VDPU_REG_DIR_MV_BASE);
}
/* Auxiliary buffer prepared in hantro_g1_h264_dec_prepare_table(). */
vdpu_write_relaxed(vpu, ctx->h264_dec.priv.dma, VDPU_REG_QTABLE_BASE);
}
int rockchip_vpu2_h264_dec_run(struct hantro_ctx *ctx)
{
struct hantro_dev *vpu = ctx->dev;
struct vb2_v4l2_buffer *src_buf;
u32 reg;
int ret;
/* Prepare the H264 decoder context. */
ret = hantro_h264_dec_prepare_run(ctx);
if (ret)
return ret;
src_buf = hantro_get_src_buf(ctx);
set_params(ctx, src_buf);
set_ref(ctx);
set_buffers(ctx, src_buf);
hantro_end_prepare_run(ctx);
/* Start decoding! */
reg = vdpu_read(vpu, VDPU_SWREG(57)) | VDPU_REG_DEC_E(1);
vdpu_write(vpu, reg, VDPU_SWREG(57));
return 0;
}
| linux-master | drivers/media/platform/verisilicon/rockchip_vpu2_hw_h264_dec.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Hantro VPU codec driver
*
* Copyright (C) 2018 Rockchip Electronics Co., Ltd.
*/
#include "hantro.h"
/*
* probs table with packed
*/
struct vp8_prob_tbl_packed {
u8 prob_mb_skip_false;
u8 prob_intra;
u8 prob_ref_last;
u8 prob_ref_golden;
u8 prob_segment[3];
u8 padding0;
u8 prob_luma_16x16_pred_mode[4];
u8 prob_chroma_pred_mode[3];
u8 padding1;
/* mv prob */
u8 prob_mv_context[2][V4L2_VP8_MV_PROB_CNT];
u8 padding2[2];
/* coeff probs */
u8 prob_coeffs[4][8][3][V4L2_VP8_COEFF_PROB_CNT];
u8 padding3[96];
};
/*
* filter taps taken to 7-bit precision,
* reference RFC6386#Page-16, filters[8][6]
*/
const u32 hantro_vp8_dec_mc_filter[8][6] = {
{ 0, 0, 128, 0, 0, 0 },
{ 0, -6, 123, 12, -1, 0 },
{ 2, -11, 108, 36, -8, 1 },
{ 0, -9, 93, 50, -6, 0 },
{ 3, -16, 77, 77, -16, 3 },
{ 0, -6, 50, 93, -9, 0 },
{ 1, -8, 36, 108, -11, 2 },
{ 0, -1, 12, 123, -6, 0 }
};
void hantro_vp8_prob_update(struct hantro_ctx *ctx,
const struct v4l2_ctrl_vp8_frame *hdr)
{
const struct v4l2_vp8_entropy *entropy = &hdr->entropy;
u32 i, j, k;
u8 *dst;
/* first probs */
dst = ctx->vp8_dec.prob_tbl.cpu;
dst[0] = hdr->prob_skip_false;
dst[1] = hdr->prob_intra;
dst[2] = hdr->prob_last;
dst[3] = hdr->prob_gf;
dst[4] = hdr->segment.segment_probs[0];
dst[5] = hdr->segment.segment_probs[1];
dst[6] = hdr->segment.segment_probs[2];
dst[7] = 0;
dst += 8;
dst[0] = entropy->y_mode_probs[0];
dst[1] = entropy->y_mode_probs[1];
dst[2] = entropy->y_mode_probs[2];
dst[3] = entropy->y_mode_probs[3];
dst[4] = entropy->uv_mode_probs[0];
dst[5] = entropy->uv_mode_probs[1];
dst[6] = entropy->uv_mode_probs[2];
dst[7] = 0; /*unused */
/* mv probs */
dst += 8;
dst[0] = entropy->mv_probs[0][0]; /* is short */
dst[1] = entropy->mv_probs[1][0];
dst[2] = entropy->mv_probs[0][1]; /* sign */
dst[3] = entropy->mv_probs[1][1];
dst[4] = entropy->mv_probs[0][8 + 9];
dst[5] = entropy->mv_probs[0][9 + 9];
dst[6] = entropy->mv_probs[1][8 + 9];
dst[7] = entropy->mv_probs[1][9 + 9];
dst += 8;
for (i = 0; i < 2; ++i) {
for (j = 0; j < 8; j += 4) {
dst[0] = entropy->mv_probs[i][j + 9 + 0];
dst[1] = entropy->mv_probs[i][j + 9 + 1];
dst[2] = entropy->mv_probs[i][j + 9 + 2];
dst[3] = entropy->mv_probs[i][j + 9 + 3];
dst += 4;
}
}
for (i = 0; i < 2; ++i) {
dst[0] = entropy->mv_probs[i][0 + 2];
dst[1] = entropy->mv_probs[i][1 + 2];
dst[2] = entropy->mv_probs[i][2 + 2];
dst[3] = entropy->mv_probs[i][3 + 2];
dst[4] = entropy->mv_probs[i][4 + 2];
dst[5] = entropy->mv_probs[i][5 + 2];
dst[6] = entropy->mv_probs[i][6 + 2];
dst[7] = 0; /*unused */
dst += 8;
}
/* coeff probs (header part) */
dst = ctx->vp8_dec.prob_tbl.cpu;
dst += (8 * 7);
for (i = 0; i < 4; ++i) {
for (j = 0; j < 8; ++j) {
for (k = 0; k < 3; ++k) {
dst[0] = entropy->coeff_probs[i][j][k][0];
dst[1] = entropy->coeff_probs[i][j][k][1];
dst[2] = entropy->coeff_probs[i][j][k][2];
dst[3] = entropy->coeff_probs[i][j][k][3];
dst += 4;
}
}
}
/* coeff probs (footer part) */
dst = ctx->vp8_dec.prob_tbl.cpu;
dst += (8 * 55);
for (i = 0; i < 4; ++i) {
for (j = 0; j < 8; ++j) {
for (k = 0; k < 3; ++k) {
dst[0] = entropy->coeff_probs[i][j][k][4];
dst[1] = entropy->coeff_probs[i][j][k][5];
dst[2] = entropy->coeff_probs[i][j][k][6];
dst[3] = entropy->coeff_probs[i][j][k][7];
dst[4] = entropy->coeff_probs[i][j][k][8];
dst[5] = entropy->coeff_probs[i][j][k][9];
dst[6] = entropy->coeff_probs[i][j][k][10];
dst[7] = 0; /*unused */
dst += 8;
}
}
}
}
int hantro_vp8_dec_init(struct hantro_ctx *ctx)
{
struct hantro_dev *vpu = ctx->dev;
struct hantro_aux_buf *aux_buf;
unsigned int mb_width, mb_height;
size_t segment_map_size;
int ret;
/* segment map table size calculation */
mb_width = DIV_ROUND_UP(ctx->dst_fmt.width, 16);
mb_height = DIV_ROUND_UP(ctx->dst_fmt.height, 16);
segment_map_size = round_up(DIV_ROUND_UP(mb_width * mb_height, 4), 64);
/*
* In context init the dma buffer for segment map must be allocated.
* And the data in segment map buffer must be set to all zero.
*/
aux_buf = &ctx->vp8_dec.segment_map;
aux_buf->size = segment_map_size;
aux_buf->cpu = dma_alloc_coherent(vpu->dev, aux_buf->size,
&aux_buf->dma, GFP_KERNEL);
if (!aux_buf->cpu)
return -ENOMEM;
/*
* Allocate probability table buffer,
* total 1208 bytes, 4K page is far enough.
*/
aux_buf = &ctx->vp8_dec.prob_tbl;
aux_buf->size = sizeof(struct vp8_prob_tbl_packed);
aux_buf->cpu = dma_alloc_coherent(vpu->dev, aux_buf->size,
&aux_buf->dma, GFP_KERNEL);
if (!aux_buf->cpu) {
ret = -ENOMEM;
goto err_free_seg_map;
}
return 0;
err_free_seg_map:
dma_free_coherent(vpu->dev, ctx->vp8_dec.segment_map.size,
ctx->vp8_dec.segment_map.cpu,
ctx->vp8_dec.segment_map.dma);
return ret;
}
void hantro_vp8_dec_exit(struct hantro_ctx *ctx)
{
struct hantro_vp8_dec_hw_ctx *vp8_dec = &ctx->vp8_dec;
struct hantro_dev *vpu = ctx->dev;
dma_free_coherent(vpu->dev, vp8_dec->segment_map.size,
vp8_dec->segment_map.cpu, vp8_dec->segment_map.dma);
dma_free_coherent(vpu->dev, vp8_dec->prob_tbl.size,
vp8_dec->prob_tbl.cpu, vp8_dec->prob_tbl.dma);
}
| linux-master | drivers/media/platform/verisilicon/hantro_vp8.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Hantro VPU codec driver
*
* Copyright (C) 2019 Pengutronix, Philipp Zabel <[email protected]>
*/
#include <linux/clk.h>
#include <linux/delay.h>
#include "hantro.h"
#include "hantro_jpeg.h"
#include "hantro_g1_regs.h"
#include "hantro_g2_regs.h"
#define CTRL_SOFT_RESET 0x00
#define RESET_G1 BIT(1)
#define RESET_G2 BIT(0)
#define CTRL_CLOCK_ENABLE 0x04
#define CLOCK_G1 BIT(1)
#define CLOCK_G2 BIT(0)
#define CTRL_G1_DEC_FUSE 0x08
#define CTRL_G1_PP_FUSE 0x0c
#define CTRL_G2_DEC_FUSE 0x10
static void imx8m_soft_reset(struct hantro_dev *vpu, u32 reset_bits)
{
u32 val;
/* Assert */
val = readl(vpu->ctrl_base + CTRL_SOFT_RESET);
val &= ~reset_bits;
writel(val, vpu->ctrl_base + CTRL_SOFT_RESET);
udelay(2);
/* Release */
val = readl(vpu->ctrl_base + CTRL_SOFT_RESET);
val |= reset_bits;
writel(val, vpu->ctrl_base + CTRL_SOFT_RESET);
}
static void imx8m_clk_enable(struct hantro_dev *vpu, u32 clock_bits)
{
u32 val;
val = readl(vpu->ctrl_base + CTRL_CLOCK_ENABLE);
val |= clock_bits;
writel(val, vpu->ctrl_base + CTRL_CLOCK_ENABLE);
}
static int imx8mq_runtime_resume(struct hantro_dev *vpu)
{
int ret;
ret = clk_bulk_prepare_enable(vpu->variant->num_clocks, vpu->clocks);
if (ret) {
dev_err(vpu->dev, "Failed to enable clocks\n");
return ret;
}
imx8m_soft_reset(vpu, RESET_G1 | RESET_G2);
imx8m_clk_enable(vpu, CLOCK_G1 | CLOCK_G2);
/* Set values of the fuse registers */
writel(0xffffffff, vpu->ctrl_base + CTRL_G1_DEC_FUSE);
writel(0xffffffff, vpu->ctrl_base + CTRL_G1_PP_FUSE);
writel(0xffffffff, vpu->ctrl_base + CTRL_G2_DEC_FUSE);
clk_bulk_disable_unprepare(vpu->variant->num_clocks, vpu->clocks);
return 0;
}
/*
* Supported formats.
*/
static const struct hantro_fmt imx8m_vpu_postproc_fmts[] = {
{
.fourcc = V4L2_PIX_FMT_YUYV,
.codec_mode = HANTRO_MODE_NONE,
.postprocessed = true,
.frmsize = {
.min_width = FMT_MIN_WIDTH,
.max_width = FMT_UHD_WIDTH,
.step_width = MB_DIM,
.min_height = FMT_MIN_HEIGHT,
.max_height = FMT_UHD_HEIGHT,
.step_height = MB_DIM,
},
},
};
static const struct hantro_fmt imx8m_vpu_dec_fmts[] = {
{
.fourcc = V4L2_PIX_FMT_NV12,
.codec_mode = HANTRO_MODE_NONE,
.frmsize = {
.min_width = FMT_MIN_WIDTH,
.max_width = FMT_UHD_WIDTH,
.step_width = MB_DIM,
.min_height = FMT_MIN_HEIGHT,
.max_height = FMT_UHD_HEIGHT,
.step_height = MB_DIM,
},
},
{
.fourcc = V4L2_PIX_FMT_MPEG2_SLICE,
.codec_mode = HANTRO_MODE_MPEG2_DEC,
.max_depth = 2,
.frmsize = {
.min_width = FMT_MIN_WIDTH,
.max_width = FMT_FHD_WIDTH,
.step_width = MB_DIM,
.min_height = FMT_MIN_HEIGHT,
.max_height = FMT_FHD_HEIGHT,
.step_height = MB_DIM,
},
},
{
.fourcc = V4L2_PIX_FMT_VP8_FRAME,
.codec_mode = HANTRO_MODE_VP8_DEC,
.max_depth = 2,
.frmsize = {
.min_width = FMT_MIN_WIDTH,
.max_width = FMT_UHD_WIDTH,
.step_width = MB_DIM,
.min_height = FMT_MIN_HEIGHT,
.max_height = FMT_UHD_HEIGHT,
.step_height = MB_DIM,
},
},
{
.fourcc = V4L2_PIX_FMT_H264_SLICE,
.codec_mode = HANTRO_MODE_H264_DEC,
.max_depth = 2,
.frmsize = {
.min_width = FMT_MIN_WIDTH,
.max_width = FMT_UHD_WIDTH,
.step_width = MB_DIM,
.min_height = FMT_MIN_HEIGHT,
.max_height = FMT_UHD_HEIGHT,
.step_height = MB_DIM,
},
},
};
static const struct hantro_fmt imx8m_vpu_g2_postproc_fmts[] = {
{
.fourcc = V4L2_PIX_FMT_NV12,
.codec_mode = HANTRO_MODE_NONE,
.match_depth = true,
.postprocessed = true,
.frmsize = {
.min_width = FMT_MIN_WIDTH,
.max_width = FMT_UHD_WIDTH,
.step_width = MB_DIM,
.min_height = FMT_MIN_HEIGHT,
.max_height = FMT_UHD_HEIGHT,
.step_height = MB_DIM,
},
},
{
.fourcc = V4L2_PIX_FMT_P010,
.codec_mode = HANTRO_MODE_NONE,
.match_depth = true,
.postprocessed = true,
.frmsize = {
.min_width = FMT_MIN_WIDTH,
.max_width = FMT_UHD_WIDTH,
.step_width = MB_DIM,
.min_height = FMT_MIN_HEIGHT,
.max_height = FMT_UHD_HEIGHT,
.step_height = MB_DIM,
},
},
};
static const struct hantro_fmt imx8m_vpu_g2_dec_fmts[] = {
{
.fourcc = V4L2_PIX_FMT_NV12_4L4,
.codec_mode = HANTRO_MODE_NONE,
.match_depth = true,
.frmsize = {
.min_width = FMT_MIN_WIDTH,
.max_width = FMT_UHD_WIDTH,
.step_width = TILE_MB_DIM,
.min_height = FMT_MIN_HEIGHT,
.max_height = FMT_UHD_HEIGHT,
.step_height = TILE_MB_DIM,
},
},
{
.fourcc = V4L2_PIX_FMT_P010_4L4,
.codec_mode = HANTRO_MODE_NONE,
.match_depth = true,
.frmsize = {
.min_width = FMT_MIN_WIDTH,
.max_width = FMT_UHD_WIDTH,
.step_width = TILE_MB_DIM,
.min_height = FMT_MIN_HEIGHT,
.max_height = FMT_UHD_HEIGHT,
.step_height = TILE_MB_DIM,
},
},
{
.fourcc = V4L2_PIX_FMT_HEVC_SLICE,
.codec_mode = HANTRO_MODE_HEVC_DEC,
.max_depth = 2,
.frmsize = {
.min_width = FMT_MIN_WIDTH,
.max_width = FMT_UHD_WIDTH,
.step_width = TILE_MB_DIM,
.min_height = FMT_MIN_HEIGHT,
.max_height = FMT_UHD_HEIGHT,
.step_height = TILE_MB_DIM,
},
},
{
.fourcc = V4L2_PIX_FMT_VP9_FRAME,
.codec_mode = HANTRO_MODE_VP9_DEC,
.max_depth = 2,
.frmsize = {
.min_width = FMT_MIN_WIDTH,
.max_width = FMT_UHD_WIDTH,
.step_width = TILE_MB_DIM,
.min_height = FMT_MIN_HEIGHT,
.max_height = FMT_UHD_HEIGHT,
.step_height = TILE_MB_DIM,
},
},
};
static irqreturn_t imx8m_vpu_g1_irq(int irq, void *dev_id)
{
struct hantro_dev *vpu = dev_id;
enum vb2_buffer_state state;
u32 status;
status = vdpu_read(vpu, G1_REG_INTERRUPT);
state = (status & G1_REG_INTERRUPT_DEC_RDY_INT) ?
VB2_BUF_STATE_DONE : VB2_BUF_STATE_ERROR;
vdpu_write(vpu, 0, G1_REG_INTERRUPT);
vdpu_write(vpu, G1_REG_CONFIG_DEC_CLK_GATE_E, G1_REG_CONFIG);
hantro_irq_done(vpu, state);
return IRQ_HANDLED;
}
static int imx8mq_vpu_hw_init(struct hantro_dev *vpu)
{
vpu->ctrl_base = vpu->reg_bases[vpu->variant->num_regs - 1];
return 0;
}
static void imx8m_vpu_g1_reset(struct hantro_ctx *ctx)
{
struct hantro_dev *vpu = ctx->dev;
imx8m_soft_reset(vpu, RESET_G1);
}
/*
* Supported codec ops.
*/
static const struct hantro_codec_ops imx8mq_vpu_codec_ops[] = {
[HANTRO_MODE_MPEG2_DEC] = {
.run = hantro_g1_mpeg2_dec_run,
.reset = imx8m_vpu_g1_reset,
.init = hantro_mpeg2_dec_init,
.exit = hantro_mpeg2_dec_exit,
},
[HANTRO_MODE_VP8_DEC] = {
.run = hantro_g1_vp8_dec_run,
.reset = imx8m_vpu_g1_reset,
.init = hantro_vp8_dec_init,
.exit = hantro_vp8_dec_exit,
},
[HANTRO_MODE_H264_DEC] = {
.run = hantro_g1_h264_dec_run,
.reset = imx8m_vpu_g1_reset,
.init = hantro_h264_dec_init,
.exit = hantro_h264_dec_exit,
},
};
static const struct hantro_codec_ops imx8mq_vpu_g1_codec_ops[] = {
[HANTRO_MODE_MPEG2_DEC] = {
.run = hantro_g1_mpeg2_dec_run,
.init = hantro_mpeg2_dec_init,
.exit = hantro_mpeg2_dec_exit,
},
[HANTRO_MODE_VP8_DEC] = {
.run = hantro_g1_vp8_dec_run,
.init = hantro_vp8_dec_init,
.exit = hantro_vp8_dec_exit,
},
[HANTRO_MODE_H264_DEC] = {
.run = hantro_g1_h264_dec_run,
.init = hantro_h264_dec_init,
.exit = hantro_h264_dec_exit,
},
};
static const struct hantro_codec_ops imx8mq_vpu_g2_codec_ops[] = {
[HANTRO_MODE_HEVC_DEC] = {
.run = hantro_g2_hevc_dec_run,
.init = hantro_hevc_dec_init,
.exit = hantro_hevc_dec_exit,
},
[HANTRO_MODE_VP9_DEC] = {
.run = hantro_g2_vp9_dec_run,
.done = hantro_g2_vp9_dec_done,
.init = hantro_vp9_dec_init,
.exit = hantro_vp9_dec_exit,
},
};
/*
* VPU variants.
*/
static const struct hantro_irq imx8mq_irqs[] = {
{ "g1", imx8m_vpu_g1_irq },
};
static const struct hantro_irq imx8mq_g2_irqs[] = {
{ "g2", hantro_g2_irq },
};
static const char * const imx8mq_clk_names[] = { "g1", "g2", "bus" };
static const char * const imx8mq_reg_names[] = { "g1", "g2", "ctrl" };
static const char * const imx8mq_g1_clk_names[] = { "g1" };
static const char * const imx8mq_g2_clk_names[] = { "g2" };
const struct hantro_variant imx8mq_vpu_variant = {
.dec_fmts = imx8m_vpu_dec_fmts,
.num_dec_fmts = ARRAY_SIZE(imx8m_vpu_dec_fmts),
.postproc_fmts = imx8m_vpu_postproc_fmts,
.num_postproc_fmts = ARRAY_SIZE(imx8m_vpu_postproc_fmts),
.postproc_ops = &hantro_g1_postproc_ops,
.codec = HANTRO_MPEG2_DECODER | HANTRO_VP8_DECODER |
HANTRO_H264_DECODER,
.codec_ops = imx8mq_vpu_codec_ops,
.init = imx8mq_vpu_hw_init,
.runtime_resume = imx8mq_runtime_resume,
.irqs = imx8mq_irqs,
.num_irqs = ARRAY_SIZE(imx8mq_irqs),
.clk_names = imx8mq_clk_names,
.num_clocks = ARRAY_SIZE(imx8mq_clk_names),
.reg_names = imx8mq_reg_names,
.num_regs = ARRAY_SIZE(imx8mq_reg_names)
};
const struct hantro_variant imx8mq_vpu_g1_variant = {
.dec_fmts = imx8m_vpu_dec_fmts,
.num_dec_fmts = ARRAY_SIZE(imx8m_vpu_dec_fmts),
.postproc_fmts = imx8m_vpu_postproc_fmts,
.num_postproc_fmts = ARRAY_SIZE(imx8m_vpu_postproc_fmts),
.postproc_ops = &hantro_g1_postproc_ops,
.codec = HANTRO_MPEG2_DECODER | HANTRO_VP8_DECODER |
HANTRO_H264_DECODER,
.codec_ops = imx8mq_vpu_g1_codec_ops,
.irqs = imx8mq_irqs,
.num_irqs = ARRAY_SIZE(imx8mq_irqs),
.clk_names = imx8mq_g1_clk_names,
.num_clocks = ARRAY_SIZE(imx8mq_g1_clk_names),
};
const struct hantro_variant imx8mq_vpu_g2_variant = {
.dec_offset = 0x0,
.dec_fmts = imx8m_vpu_g2_dec_fmts,
.num_dec_fmts = ARRAY_SIZE(imx8m_vpu_g2_dec_fmts),
.postproc_fmts = imx8m_vpu_g2_postproc_fmts,
.num_postproc_fmts = ARRAY_SIZE(imx8m_vpu_g2_postproc_fmts),
.postproc_ops = &hantro_g2_postproc_ops,
.codec = HANTRO_HEVC_DECODER | HANTRO_VP9_DECODER,
.codec_ops = imx8mq_vpu_g2_codec_ops,
.irqs = imx8mq_g2_irqs,
.num_irqs = ARRAY_SIZE(imx8mq_g2_irqs),
.clk_names = imx8mq_g2_clk_names,
.num_clocks = ARRAY_SIZE(imx8mq_g2_clk_names),
};
const struct hantro_variant imx8mm_vpu_g1_variant = {
.dec_fmts = imx8m_vpu_dec_fmts,
.num_dec_fmts = ARRAY_SIZE(imx8m_vpu_dec_fmts),
.codec = HANTRO_MPEG2_DECODER | HANTRO_VP8_DECODER |
HANTRO_H264_DECODER,
.codec_ops = imx8mq_vpu_g1_codec_ops,
.irqs = imx8mq_irqs,
.num_irqs = ARRAY_SIZE(imx8mq_irqs),
.clk_names = imx8mq_g1_clk_names,
.num_clocks = ARRAY_SIZE(imx8mq_g1_clk_names),
};
| linux-master | drivers/media/platform/verisilicon/imx8m_vpu_hw.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Hantro VPU codec driver
*
* Copyright (C) 2018 Rockchip Electronics Co., Ltd.
*/
#include <asm/unaligned.h>
#include <media/v4l2-mem2mem.h>
#include "hantro_jpeg.h"
#include "hantro.h"
#include "hantro_v4l2.h"
#include "hantro_hw.h"
#include "hantro_h1_regs.h"
#define H1_JPEG_QUANT_TABLE_COUNT 16
static void hantro_h1_set_src_img_ctrl(struct hantro_dev *vpu,
struct hantro_ctx *ctx)
{
u32 overfill_r, overfill_b;
u32 reg;
/*
* The format width and height are already macroblock aligned
* by .vidioc_s_fmt_vid_cap_mplane() callback. Destination
* format width and height can be further modified by
* .vidioc_s_selection(), and the width is 4-aligned.
*/
overfill_r = ctx->src_fmt.width - ctx->dst_fmt.width;
overfill_b = ctx->src_fmt.height - ctx->dst_fmt.height;
reg = H1_REG_IN_IMG_CTRL_ROW_LEN(ctx->src_fmt.width)
| H1_REG_IN_IMG_CTRL_OVRFLR_D4(overfill_r / 4)
| H1_REG_IN_IMG_CTRL_OVRFLB(overfill_b)
| H1_REG_IN_IMG_CTRL_FMT(ctx->vpu_src_fmt->enc_fmt);
vepu_write_relaxed(vpu, reg, H1_REG_IN_IMG_CTRL);
}
static void hantro_h1_jpeg_enc_set_buffers(struct hantro_dev *vpu,
struct hantro_ctx *ctx,
struct vb2_buffer *src_buf,
struct vb2_buffer *dst_buf)
{
struct v4l2_pix_format_mplane *pix_fmt = &ctx->src_fmt;
dma_addr_t src[3];
u32 size_left;
size_left = vb2_plane_size(dst_buf, 0) - ctx->vpu_dst_fmt->header_size;
if (WARN_ON(vb2_plane_size(dst_buf, 0) < ctx->vpu_dst_fmt->header_size))
size_left = 0;
WARN_ON(pix_fmt->num_planes > 3);
vepu_write_relaxed(vpu, vb2_dma_contig_plane_dma_addr(dst_buf, 0) +
ctx->vpu_dst_fmt->header_size,
H1_REG_ADDR_OUTPUT_STREAM);
vepu_write_relaxed(vpu, size_left, H1_REG_STR_BUF_LIMIT);
if (pix_fmt->num_planes == 1) {
src[0] = vb2_dma_contig_plane_dma_addr(src_buf, 0);
/* single plane formats we supported are all interlaced */
vepu_write_relaxed(vpu, src[0], H1_REG_ADDR_IN_PLANE_0);
} else if (pix_fmt->num_planes == 2) {
src[0] = vb2_dma_contig_plane_dma_addr(src_buf, 0);
src[1] = vb2_dma_contig_plane_dma_addr(src_buf, 1);
vepu_write_relaxed(vpu, src[0], H1_REG_ADDR_IN_PLANE_0);
vepu_write_relaxed(vpu, src[1], H1_REG_ADDR_IN_PLANE_1);
} else {
src[0] = vb2_dma_contig_plane_dma_addr(src_buf, 0);
src[1] = vb2_dma_contig_plane_dma_addr(src_buf, 1);
src[2] = vb2_dma_contig_plane_dma_addr(src_buf, 2);
vepu_write_relaxed(vpu, src[0], H1_REG_ADDR_IN_PLANE_0);
vepu_write_relaxed(vpu, src[1], H1_REG_ADDR_IN_PLANE_1);
vepu_write_relaxed(vpu, src[2], H1_REG_ADDR_IN_PLANE_2);
}
}
static void
hantro_h1_jpeg_enc_set_qtable(struct hantro_dev *vpu,
unsigned char *luma_qtable,
unsigned char *chroma_qtable)
{
u32 reg, i;
__be32 *luma_qtable_p;
__be32 *chroma_qtable_p;
luma_qtable_p = (__be32 *)luma_qtable;
chroma_qtable_p = (__be32 *)chroma_qtable;
/*
* Quantization table registers must be written in contiguous blocks.
* DO NOT collapse the below two "for" loops into one.
*/
for (i = 0; i < H1_JPEG_QUANT_TABLE_COUNT; i++) {
reg = get_unaligned_be32(&luma_qtable_p[i]);
vepu_write_relaxed(vpu, reg, H1_REG_JPEG_LUMA_QUAT(i));
}
for (i = 0; i < H1_JPEG_QUANT_TABLE_COUNT; i++) {
reg = get_unaligned_be32(&chroma_qtable_p[i]);
vepu_write_relaxed(vpu, reg, H1_REG_JPEG_CHROMA_QUAT(i));
}
}
int hantro_h1_jpeg_enc_run(struct hantro_ctx *ctx)
{
struct hantro_dev *vpu = ctx->dev;
struct vb2_v4l2_buffer *src_buf, *dst_buf;
struct hantro_jpeg_ctx jpeg_ctx;
u32 reg;
src_buf = hantro_get_src_buf(ctx);
dst_buf = hantro_get_dst_buf(ctx);
hantro_start_prepare_run(ctx);
memset(&jpeg_ctx, 0, sizeof(jpeg_ctx));
jpeg_ctx.buffer = vb2_plane_vaddr(&dst_buf->vb2_buf, 0);
jpeg_ctx.width = ctx->dst_fmt.width;
jpeg_ctx.height = ctx->dst_fmt.height;
jpeg_ctx.quality = ctx->jpeg_quality;
hantro_jpeg_header_assemble(&jpeg_ctx);
/* Switch to JPEG encoder mode before writing registers */
vepu_write_relaxed(vpu, H1_REG_ENC_CTRL_ENC_MODE_JPEG,
H1_REG_ENC_CTRL);
hantro_h1_set_src_img_ctrl(vpu, ctx);
hantro_h1_jpeg_enc_set_buffers(vpu, ctx, &src_buf->vb2_buf,
&dst_buf->vb2_buf);
hantro_h1_jpeg_enc_set_qtable(vpu, jpeg_ctx.hw_luma_qtable,
jpeg_ctx.hw_chroma_qtable);
reg = H1_REG_AXI_CTRL_OUTPUT_SWAP16
| H1_REG_AXI_CTRL_INPUT_SWAP16
| H1_REG_AXI_CTRL_BURST_LEN(16)
| H1_REG_AXI_CTRL_OUTPUT_SWAP32
| H1_REG_AXI_CTRL_INPUT_SWAP32
| H1_REG_AXI_CTRL_OUTPUT_SWAP8
| H1_REG_AXI_CTRL_INPUT_SWAP8;
/* Make sure that all registers are written at this point. */
vepu_write(vpu, reg, H1_REG_AXI_CTRL);
reg = H1_REG_ENC_CTRL_WIDTH(MB_WIDTH(ctx->src_fmt.width))
| H1_REG_ENC_CTRL_HEIGHT(MB_HEIGHT(ctx->src_fmt.height))
| H1_REG_ENC_CTRL_ENC_MODE_JPEG
| H1_REG_ENC_PIC_INTRA
| H1_REG_ENC_CTRL_EN_BIT;
hantro_end_prepare_run(ctx);
vepu_write(vpu, reg, H1_REG_ENC_CTRL);
return 0;
}
void hantro_h1_jpeg_enc_done(struct hantro_ctx *ctx)
{
struct hantro_dev *vpu = ctx->dev;
u32 bytesused = vepu_read(vpu, H1_REG_STR_BUF_LIMIT) / 8;
struct vb2_v4l2_buffer *dst_buf = hantro_get_dst_buf(ctx);
vb2_set_plane_payload(&dst_buf->vb2_buf, 0,
ctx->vpu_dst_fmt->header_size + bytesused);
}
| linux-master | drivers/media/platform/verisilicon/hantro_h1_jpeg_enc.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Rockchip VPU codec vp8 decode driver
*
* Copyright (C) 2014 Rockchip Electronics Co., Ltd.
* ZhiChao Yu <[email protected]>
*
* Copyright (C) 2014 Google LLC.
* Tomasz Figa <[email protected]>
*
* Copyright (C) 2015 Rockchip Electronics Co., Ltd.
* Alpha Lin <[email protected]>
*/
#include <media/v4l2-mem2mem.h>
#include "hantro_hw.h"
#include "hantro.h"
#include "hantro_g1_regs.h"
#define VDPU_REG_DEC_CTRL0 0x0c8
#define VDPU_REG_STREAM_LEN 0x0cc
#define VDPU_REG_DEC_FORMAT 0x0d4
#define VDPU_REG_DEC_CTRL0_DEC_MODE(x) (((x) & 0xf) << 0)
#define VDPU_REG_DATA_ENDIAN 0x0d8
#define VDPU_REG_CONFIG_DEC_STRENDIAN_E BIT(5)
#define VDPU_REG_CONFIG_DEC_STRSWAP32_E BIT(4)
#define VDPU_REG_CONFIG_DEC_OUTSWAP32_E BIT(3)
#define VDPU_REG_CONFIG_DEC_INSWAP32_E BIT(2)
#define VDPU_REG_CONFIG_DEC_OUT_ENDIAN BIT(1)
#define VDPU_REG_CONFIG_DEC_IN_ENDIAN BIT(0)
#define VDPU_REG_AXI_CTRL 0x0e0
#define VDPU_REG_CONFIG_DEC_MAX_BURST(x) (((x) & 0x1f) << 16)
#define VDPU_REG_EN_FLAGS 0x0e4
#define VDPU_REG_DEC_CTRL0_PIC_INTER_E BIT(14)
#define VDPU_REG_CONFIG_DEC_TIMEOUT_E BIT(5)
#define VDPU_REG_CONFIG_DEC_CLK_GATE_E BIT(4)
#define VDPU_REG_PRED_FLT 0x0ec
#define VDPU_REG_ADDR_QTABLE 0x0f4
#define VDPU_REG_ADDR_DST 0x0fc
#define VDPU_REG_ADDR_STR 0x100
#define VDPU_REG_VP8_PIC_MB_SIZE 0x1e0
#define VDPU_REG_VP8_DCT_START_BIT 0x1e4
#define VDPU_REG_DEC_CTRL4_VC1_HEIGHT_EXT BIT(13)
#define VDPU_REG_DEC_CTRL4_BILIN_MC_E BIT(12)
#define VDPU_REG_VP8_CTRL0 0x1e8
#define VDPU_REG_VP8_DATA_VAL 0x1f0
#define VDPU_REG_PRED_FLT7 0x1f4
#define VDPU_REG_PRED_FLT8 0x1f8
#define VDPU_REG_PRED_FLT9 0x1fc
#define VDPU_REG_PRED_FLT10 0x200
#define VDPU_REG_FILTER_LEVEL 0x204
#define VDPU_REG_VP8_QUANTER0 0x208
#define VDPU_REG_VP8_ADDR_REF0 0x20c
#define VDPU_REG_FILTER_MB_ADJ 0x210
#define VDPU_REG_REF_PIC_FILT_TYPE_E BIT(31)
#define VDPU_REG_REF_PIC_FILT_SHARPNESS(x) (((x) & 0x7) << 28)
#define VDPU_REG_FILTER_REF_ADJ 0x214
#define VDPU_REG_VP8_ADDR_REF2_5(i) (0x218 + ((i) * 0x4))
#define VDPU_REG_VP8_GREF_SIGN_BIAS BIT(0)
#define VDPU_REG_VP8_AREF_SIGN_BIAS BIT(0)
#define VDPU_REG_VP8_DCT_BASE(i) \
(0x230 + ((((i) < 5) ? (i) : ((i) + 1)) * 0x4))
#define VDPU_REG_VP8_ADDR_CTRL_PART 0x244
#define VDPU_REG_VP8_SEGMENT_VAL 0x254
#define VDPU_REG_FWD_PIC1_SEGMENT_BASE(x) ((x) << 0)
#define VDPU_REG_FWD_PIC1_SEGMENT_UPD_E BIT(1)
#define VDPU_REG_FWD_PIC1_SEGMENT_E BIT(0)
#define VDPU_REG_VP8_DCT_START_BIT2 0x258
#define VDPU_REG_VP8_QUANTER1 0x25c
#define VDPU_REG_VP8_QUANTER2 0x260
#define VDPU_REG_PRED_FLT1 0x264
#define VDPU_REG_PRED_FLT2 0x268
#define VDPU_REG_PRED_FLT3 0x26c
#define VDPU_REG_PRED_FLT4 0x270
#define VDPU_REG_PRED_FLT5 0x274
#define VDPU_REG_PRED_FLT6 0x278
static const struct hantro_reg vp8_dec_dct_base[8] = {
{ VDPU_REG_ADDR_STR, 0, 0xffffffff },
{ VDPU_REG_VP8_DCT_BASE(0), 0, 0xffffffff },
{ VDPU_REG_VP8_DCT_BASE(1), 0, 0xffffffff },
{ VDPU_REG_VP8_DCT_BASE(2), 0, 0xffffffff },
{ VDPU_REG_VP8_DCT_BASE(3), 0, 0xffffffff },
{ VDPU_REG_VP8_DCT_BASE(4), 0, 0xffffffff },
{ VDPU_REG_VP8_DCT_BASE(5), 0, 0xffffffff },
{ VDPU_REG_VP8_DCT_BASE(6), 0, 0xffffffff },
};
static const struct hantro_reg vp8_dec_lf_level[4] = {
{ VDPU_REG_FILTER_LEVEL, 18, 0x3f },
{ VDPU_REG_FILTER_LEVEL, 12, 0x3f },
{ VDPU_REG_FILTER_LEVEL, 6, 0x3f },
{ VDPU_REG_FILTER_LEVEL, 0, 0x3f },
};
static const struct hantro_reg vp8_dec_mb_adj[4] = {
{ VDPU_REG_FILTER_MB_ADJ, 21, 0x7f },
{ VDPU_REG_FILTER_MB_ADJ, 14, 0x7f },
{ VDPU_REG_FILTER_MB_ADJ, 7, 0x7f },
{ VDPU_REG_FILTER_MB_ADJ, 0, 0x7f },
};
static const struct hantro_reg vp8_dec_ref_adj[4] = {
{ VDPU_REG_FILTER_REF_ADJ, 21, 0x7f },
{ VDPU_REG_FILTER_REF_ADJ, 14, 0x7f },
{ VDPU_REG_FILTER_REF_ADJ, 7, 0x7f },
{ VDPU_REG_FILTER_REF_ADJ, 0, 0x7f },
};
static const struct hantro_reg vp8_dec_quant[4] = {
{ VDPU_REG_VP8_QUANTER0, 11, 0x7ff },
{ VDPU_REG_VP8_QUANTER0, 0, 0x7ff },
{ VDPU_REG_VP8_QUANTER1, 11, 0x7ff },
{ VDPU_REG_VP8_QUANTER1, 0, 0x7ff },
};
static const struct hantro_reg vp8_dec_quant_delta[5] = {
{ VDPU_REG_VP8_QUANTER0, 27, 0x1f },
{ VDPU_REG_VP8_QUANTER0, 22, 0x1f },
{ VDPU_REG_VP8_QUANTER1, 27, 0x1f },
{ VDPU_REG_VP8_QUANTER1, 22, 0x1f },
{ VDPU_REG_VP8_QUANTER2, 27, 0x1f },
};
static const struct hantro_reg vp8_dec_dct_start_bits[8] = {
{ VDPU_REG_VP8_CTRL0, 26, 0x3f },
{ VDPU_REG_VP8_DCT_START_BIT, 26, 0x3f },
{ VDPU_REG_VP8_DCT_START_BIT, 20, 0x3f },
{ VDPU_REG_VP8_DCT_START_BIT2, 24, 0x3f },
{ VDPU_REG_VP8_DCT_START_BIT2, 18, 0x3f },
{ VDPU_REG_VP8_DCT_START_BIT2, 12, 0x3f },
{ VDPU_REG_VP8_DCT_START_BIT2, 6, 0x3f },
{ VDPU_REG_VP8_DCT_START_BIT2, 0, 0x3f },
};
static const struct hantro_reg vp8_dec_pred_bc_tap[8][6] = {
{
{ 0, 0, 0},
{ VDPU_REG_PRED_FLT, 22, 0x3ff },
{ VDPU_REG_PRED_FLT, 12, 0x3ff },
{ VDPU_REG_PRED_FLT, 2, 0x3ff },
{ VDPU_REG_PRED_FLT1, 22, 0x3ff },
{ 0, 0, 0},
}, {
{ 0, 0, 0},
{ VDPU_REG_PRED_FLT1, 12, 0x3ff },
{ VDPU_REG_PRED_FLT1, 2, 0x3ff },
{ VDPU_REG_PRED_FLT2, 22, 0x3ff },
{ VDPU_REG_PRED_FLT2, 12, 0x3ff },
{ 0, 0, 0},
}, {
{ VDPU_REG_PRED_FLT10, 10, 0x3 },
{ VDPU_REG_PRED_FLT2, 2, 0x3ff },
{ VDPU_REG_PRED_FLT3, 22, 0x3ff },
{ VDPU_REG_PRED_FLT3, 12, 0x3ff },
{ VDPU_REG_PRED_FLT3, 2, 0x3ff },
{ VDPU_REG_PRED_FLT10, 8, 0x3},
}, {
{ 0, 0, 0},
{ VDPU_REG_PRED_FLT4, 22, 0x3ff },
{ VDPU_REG_PRED_FLT4, 12, 0x3ff },
{ VDPU_REG_PRED_FLT4, 2, 0x3ff },
{ VDPU_REG_PRED_FLT5, 22, 0x3ff },
{ 0, 0, 0},
}, {
{ VDPU_REG_PRED_FLT10, 6, 0x3 },
{ VDPU_REG_PRED_FLT5, 12, 0x3ff },
{ VDPU_REG_PRED_FLT5, 2, 0x3ff },
{ VDPU_REG_PRED_FLT6, 22, 0x3ff },
{ VDPU_REG_PRED_FLT6, 12, 0x3ff },
{ VDPU_REG_PRED_FLT10, 4, 0x3 },
}, {
{ 0, 0, 0},
{ VDPU_REG_PRED_FLT6, 2, 0x3ff },
{ VDPU_REG_PRED_FLT7, 22, 0x3ff },
{ VDPU_REG_PRED_FLT7, 12, 0x3ff },
{ VDPU_REG_PRED_FLT7, 2, 0x3ff },
{ 0, 0, 0},
}, {
{ VDPU_REG_PRED_FLT10, 2, 0x3 },
{ VDPU_REG_PRED_FLT8, 22, 0x3ff },
{ VDPU_REG_PRED_FLT8, 12, 0x3ff },
{ VDPU_REG_PRED_FLT8, 2, 0x3ff },
{ VDPU_REG_PRED_FLT9, 22, 0x3ff },
{ VDPU_REG_PRED_FLT10, 0, 0x3 },
}, {
{ 0, 0, 0},
{ VDPU_REG_PRED_FLT9, 12, 0x3ff },
{ VDPU_REG_PRED_FLT9, 2, 0x3ff },
{ VDPU_REG_PRED_FLT10, 22, 0x3ff },
{ VDPU_REG_PRED_FLT10, 12, 0x3ff },
{ 0, 0, 0},
},
};
static const struct hantro_reg vp8_dec_mb_start_bit = {
.base = VDPU_REG_VP8_CTRL0,
.shift = 18,
.mask = 0x3f
};
static const struct hantro_reg vp8_dec_mb_aligned_data_len = {
.base = VDPU_REG_VP8_DATA_VAL,
.shift = 0,
.mask = 0x3fffff
};
static const struct hantro_reg vp8_dec_num_dct_partitions = {
.base = VDPU_REG_VP8_DATA_VAL,
.shift = 24,
.mask = 0xf
};
static const struct hantro_reg vp8_dec_stream_len = {
.base = VDPU_REG_STREAM_LEN,
.shift = 0,
.mask = 0xffffff
};
static const struct hantro_reg vp8_dec_mb_width = {
.base = VDPU_REG_VP8_PIC_MB_SIZE,
.shift = 23,
.mask = 0x1ff
};
static const struct hantro_reg vp8_dec_mb_height = {
.base = VDPU_REG_VP8_PIC_MB_SIZE,
.shift = 11,
.mask = 0xff
};
static const struct hantro_reg vp8_dec_mb_width_ext = {
.base = VDPU_REG_VP8_PIC_MB_SIZE,
.shift = 3,
.mask = 0x7
};
static const struct hantro_reg vp8_dec_mb_height_ext = {
.base = VDPU_REG_VP8_PIC_MB_SIZE,
.shift = 0,
.mask = 0x7
};
static const struct hantro_reg vp8_dec_bool_range = {
.base = VDPU_REG_VP8_CTRL0,
.shift = 0,
.mask = 0xff
};
static const struct hantro_reg vp8_dec_bool_value = {
.base = VDPU_REG_VP8_CTRL0,
.shift = 8,
.mask = 0xff
};
static const struct hantro_reg vp8_dec_filter_disable = {
.base = VDPU_REG_DEC_CTRL0,
.shift = 8,
.mask = 1
};
static const struct hantro_reg vp8_dec_skip_mode = {
.base = VDPU_REG_DEC_CTRL0,
.shift = 9,
.mask = 1
};
static const struct hantro_reg vp8_dec_start_dec = {
.base = VDPU_REG_EN_FLAGS,
.shift = 0,
.mask = 1
};
static void cfg_lf(struct hantro_ctx *ctx,
const struct v4l2_ctrl_vp8_frame *hdr)
{
const struct v4l2_vp8_segment *seg = &hdr->segment;
const struct v4l2_vp8_loop_filter *lf = &hdr->lf;
struct hantro_dev *vpu = ctx->dev;
unsigned int i;
u32 reg;
if (!(seg->flags & V4L2_VP8_SEGMENT_FLAG_ENABLED)) {
hantro_reg_write(vpu, &vp8_dec_lf_level[0], lf->level);
} else if (seg->flags & V4L2_VP8_SEGMENT_FLAG_DELTA_VALUE_MODE) {
for (i = 0; i < 4; i++) {
u32 lf_level = clamp(lf->level + seg->lf_update[i],
0, 63);
hantro_reg_write(vpu, &vp8_dec_lf_level[i], lf_level);
}
} else {
for (i = 0; i < 4; i++)
hantro_reg_write(vpu, &vp8_dec_lf_level[i],
seg->lf_update[i]);
}
reg = VDPU_REG_REF_PIC_FILT_SHARPNESS(lf->sharpness_level);
if (lf->flags & V4L2_VP8_LF_FILTER_TYPE_SIMPLE)
reg |= VDPU_REG_REF_PIC_FILT_TYPE_E;
vdpu_write_relaxed(vpu, reg, VDPU_REG_FILTER_MB_ADJ);
if (lf->flags & V4L2_VP8_LF_ADJ_ENABLE) {
for (i = 0; i < 4; i++) {
hantro_reg_write(vpu, &vp8_dec_mb_adj[i],
lf->mb_mode_delta[i]);
hantro_reg_write(vpu, &vp8_dec_ref_adj[i],
lf->ref_frm_delta[i]);
}
}
}
static void cfg_qp(struct hantro_ctx *ctx,
const struct v4l2_ctrl_vp8_frame *hdr)
{
const struct v4l2_vp8_quantization *q = &hdr->quant;
const struct v4l2_vp8_segment *seg = &hdr->segment;
struct hantro_dev *vpu = ctx->dev;
unsigned int i;
if (!(seg->flags & V4L2_VP8_SEGMENT_FLAG_ENABLED)) {
hantro_reg_write(vpu, &vp8_dec_quant[0], q->y_ac_qi);
} else if (seg->flags & V4L2_VP8_SEGMENT_FLAG_DELTA_VALUE_MODE) {
for (i = 0; i < 4; i++) {
u32 quant = clamp(q->y_ac_qi + seg->quant_update[i],
0, 127);
hantro_reg_write(vpu, &vp8_dec_quant[i], quant);
}
} else {
for (i = 0; i < 4; i++)
hantro_reg_write(vpu, &vp8_dec_quant[i],
seg->quant_update[i]);
}
hantro_reg_write(vpu, &vp8_dec_quant_delta[0], q->y_dc_delta);
hantro_reg_write(vpu, &vp8_dec_quant_delta[1], q->y2_dc_delta);
hantro_reg_write(vpu, &vp8_dec_quant_delta[2], q->y2_ac_delta);
hantro_reg_write(vpu, &vp8_dec_quant_delta[3], q->uv_dc_delta);
hantro_reg_write(vpu, &vp8_dec_quant_delta[4], q->uv_ac_delta);
}
static void cfg_parts(struct hantro_ctx *ctx,
const struct v4l2_ctrl_vp8_frame *hdr)
{
struct hantro_dev *vpu = ctx->dev;
struct vb2_v4l2_buffer *vb2_src;
u32 first_part_offset = V4L2_VP8_FRAME_IS_KEY_FRAME(hdr) ? 10 : 3;
u32 mb_size, mb_offset_bytes, mb_offset_bits, mb_start_bits;
u32 dct_size_part_size, dct_part_offset;
dma_addr_t src_dma;
u32 dct_part_total_len = 0;
u32 count = 0;
unsigned int i;
vb2_src = hantro_get_src_buf(ctx);
src_dma = vb2_dma_contig_plane_dma_addr(&vb2_src->vb2_buf, 0);
/*
* Calculate control partition mb data info
* @first_part_header_bits: bits offset of mb data from first
* part start pos
* @mb_offset_bits: bits offset of mb data from src_dma
* base addr
* @mb_offset_byte: bytes offset of mb data from src_dma
* base addr
* @mb_start_bits: bits offset of mb data from mb data
* 64bits alignment addr
*/
mb_offset_bits = first_part_offset * 8 +
hdr->first_part_header_bits + 8;
mb_offset_bytes = mb_offset_bits / 8;
mb_start_bits = mb_offset_bits -
(mb_offset_bytes & (~DEC_8190_ALIGN_MASK)) * 8;
mb_size = hdr->first_part_size -
(mb_offset_bytes - first_part_offset) +
(mb_offset_bytes & DEC_8190_ALIGN_MASK);
/* Macroblock data aligned base addr */
vdpu_write_relaxed(vpu, (mb_offset_bytes & (~DEC_8190_ALIGN_MASK)) +
src_dma, VDPU_REG_VP8_ADDR_CTRL_PART);
hantro_reg_write(vpu, &vp8_dec_mb_start_bit, mb_start_bits);
hantro_reg_write(vpu, &vp8_dec_mb_aligned_data_len, mb_size);
/*
* Calculate DCT partition info
* @dct_size_part_size: Containing sizes of DCT part, every DCT part
* has 3 bytes to store its size, except the last
* DCT part
* @dct_part_offset: bytes offset of DCT parts from src_dma base addr
* @dct_part_total_len: total size of all DCT parts
*/
dct_size_part_size = (hdr->num_dct_parts - 1) * 3;
dct_part_offset = first_part_offset + hdr->first_part_size;
for (i = 0; i < hdr->num_dct_parts; i++)
dct_part_total_len += hdr->dct_part_sizes[i];
dct_part_total_len += dct_size_part_size;
dct_part_total_len += (dct_part_offset & DEC_8190_ALIGN_MASK);
/* Number of DCT partitions */
hantro_reg_write(vpu, &vp8_dec_num_dct_partitions,
hdr->num_dct_parts - 1);
/* DCT partition length */
hantro_reg_write(vpu, &vp8_dec_stream_len, dct_part_total_len);
/* DCT partitions base address */
for (i = 0; i < hdr->num_dct_parts; i++) {
u32 byte_offset = dct_part_offset + dct_size_part_size + count;
u32 base_addr = byte_offset + src_dma;
hantro_reg_write(vpu, &vp8_dec_dct_base[i],
base_addr & (~DEC_8190_ALIGN_MASK));
hantro_reg_write(vpu, &vp8_dec_dct_start_bits[i],
(byte_offset & DEC_8190_ALIGN_MASK) * 8);
count += hdr->dct_part_sizes[i];
}
}
/*
* prediction filter taps
* normal 6-tap filters
*/
static void cfg_tap(struct hantro_ctx *ctx,
const struct v4l2_ctrl_vp8_frame *hdr)
{
struct hantro_dev *vpu = ctx->dev;
int i, j;
if ((hdr->version & 0x03) != 0)
return; /* Tap filter not used. */
for (i = 0; i < 8; i++) {
for (j = 0; j < 6; j++) {
if (vp8_dec_pred_bc_tap[i][j].base != 0)
hantro_reg_write(vpu,
&vp8_dec_pred_bc_tap[i][j],
hantro_vp8_dec_mc_filter[i][j]);
}
}
}
static void cfg_ref(struct hantro_ctx *ctx,
const struct v4l2_ctrl_vp8_frame *hdr,
struct vb2_v4l2_buffer *vb2_dst)
{
struct hantro_dev *vpu = ctx->dev;
dma_addr_t ref;
ref = hantro_get_ref(ctx, hdr->last_frame_ts);
if (!ref) {
vpu_debug(0, "failed to find last frame ts=%llu\n",
hdr->last_frame_ts);
ref = vb2_dma_contig_plane_dma_addr(&vb2_dst->vb2_buf, 0);
}
vdpu_write_relaxed(vpu, ref, VDPU_REG_VP8_ADDR_REF0);
ref = hantro_get_ref(ctx, hdr->golden_frame_ts);
if (!ref && hdr->golden_frame_ts)
vpu_debug(0, "failed to find golden frame ts=%llu\n",
hdr->golden_frame_ts);
if (!ref)
ref = vb2_dma_contig_plane_dma_addr(&vb2_dst->vb2_buf, 0);
if (hdr->flags & V4L2_VP8_FRAME_FLAG_SIGN_BIAS_GOLDEN)
ref |= VDPU_REG_VP8_GREF_SIGN_BIAS;
vdpu_write_relaxed(vpu, ref, VDPU_REG_VP8_ADDR_REF2_5(2));
ref = hantro_get_ref(ctx, hdr->alt_frame_ts);
if (!ref && hdr->alt_frame_ts)
vpu_debug(0, "failed to find alt frame ts=%llu\n",
hdr->alt_frame_ts);
if (!ref)
ref = vb2_dma_contig_plane_dma_addr(&vb2_dst->vb2_buf, 0);
if (hdr->flags & V4L2_VP8_FRAME_FLAG_SIGN_BIAS_ALT)
ref |= VDPU_REG_VP8_AREF_SIGN_BIAS;
vdpu_write_relaxed(vpu, ref, VDPU_REG_VP8_ADDR_REF2_5(3));
}
static void cfg_buffers(struct hantro_ctx *ctx,
const struct v4l2_ctrl_vp8_frame *hdr,
struct vb2_v4l2_buffer *vb2_dst)
{
const struct v4l2_vp8_segment *seg = &hdr->segment;
struct hantro_dev *vpu = ctx->dev;
dma_addr_t dst_dma;
u32 reg;
/* Set probability table buffer address */
vdpu_write_relaxed(vpu, ctx->vp8_dec.prob_tbl.dma,
VDPU_REG_ADDR_QTABLE);
/* Set segment map address */
reg = VDPU_REG_FWD_PIC1_SEGMENT_BASE(ctx->vp8_dec.segment_map.dma);
if (seg->flags & V4L2_VP8_SEGMENT_FLAG_ENABLED) {
reg |= VDPU_REG_FWD_PIC1_SEGMENT_E;
if (seg->flags & V4L2_VP8_SEGMENT_FLAG_UPDATE_MAP)
reg |= VDPU_REG_FWD_PIC1_SEGMENT_UPD_E;
}
vdpu_write_relaxed(vpu, reg, VDPU_REG_VP8_SEGMENT_VAL);
/* set output frame buffer address */
dst_dma = vb2_dma_contig_plane_dma_addr(&vb2_dst->vb2_buf, 0);
vdpu_write_relaxed(vpu, dst_dma, VDPU_REG_ADDR_DST);
}
int rockchip_vpu2_vp8_dec_run(struct hantro_ctx *ctx)
{
const struct v4l2_ctrl_vp8_frame *hdr;
struct hantro_dev *vpu = ctx->dev;
struct vb2_v4l2_buffer *vb2_dst;
size_t height = ctx->dst_fmt.height;
size_t width = ctx->dst_fmt.width;
u32 mb_width, mb_height;
u32 reg;
hantro_start_prepare_run(ctx);
hdr = hantro_get_ctrl(ctx, V4L2_CID_STATELESS_VP8_FRAME);
if (WARN_ON(!hdr))
return -EINVAL;
/* Reset segment_map buffer in keyframe */
if (V4L2_VP8_FRAME_IS_KEY_FRAME(hdr) && ctx->vp8_dec.segment_map.cpu)
memset(ctx->vp8_dec.segment_map.cpu, 0,
ctx->vp8_dec.segment_map.size);
hantro_vp8_prob_update(ctx, hdr);
/*
* Extensive testing shows that the hardware does not properly
* clear the internal state from previous a decoding run. This
* causes corruption in decoded frames for multi-instance use cases.
* A soft reset before programming the registers has been found
* to resolve those problems.
*/
ctx->codec_ops->reset(ctx);
reg = VDPU_REG_CONFIG_DEC_TIMEOUT_E
| VDPU_REG_CONFIG_DEC_CLK_GATE_E;
if (!V4L2_VP8_FRAME_IS_KEY_FRAME(hdr))
reg |= VDPU_REG_DEC_CTRL0_PIC_INTER_E;
vdpu_write_relaxed(vpu, reg, VDPU_REG_EN_FLAGS);
reg = VDPU_REG_CONFIG_DEC_STRENDIAN_E
| VDPU_REG_CONFIG_DEC_INSWAP32_E
| VDPU_REG_CONFIG_DEC_STRSWAP32_E
| VDPU_REG_CONFIG_DEC_OUTSWAP32_E
| VDPU_REG_CONFIG_DEC_IN_ENDIAN
| VDPU_REG_CONFIG_DEC_OUT_ENDIAN;
vdpu_write_relaxed(vpu, reg, VDPU_REG_DATA_ENDIAN);
reg = VDPU_REG_CONFIG_DEC_MAX_BURST(16);
vdpu_write_relaxed(vpu, reg, VDPU_REG_AXI_CTRL);
reg = VDPU_REG_DEC_CTRL0_DEC_MODE(10);
vdpu_write_relaxed(vpu, reg, VDPU_REG_DEC_FORMAT);
if (!(hdr->flags & V4L2_VP8_FRAME_FLAG_MB_NO_SKIP_COEFF))
hantro_reg_write(vpu, &vp8_dec_skip_mode, 1);
if (hdr->lf.level == 0)
hantro_reg_write(vpu, &vp8_dec_filter_disable, 1);
/* Frame dimensions */
mb_width = MB_WIDTH(width);
mb_height = MB_HEIGHT(height);
hantro_reg_write(vpu, &vp8_dec_mb_width, mb_width);
hantro_reg_write(vpu, &vp8_dec_mb_height, mb_height);
hantro_reg_write(vpu, &vp8_dec_mb_width_ext, mb_width >> 9);
hantro_reg_write(vpu, &vp8_dec_mb_height_ext, mb_height >> 8);
/* Boolean decoder */
hantro_reg_write(vpu, &vp8_dec_bool_range, hdr->coder_state.range);
hantro_reg_write(vpu, &vp8_dec_bool_value, hdr->coder_state.value);
reg = vdpu_read(vpu, VDPU_REG_VP8_DCT_START_BIT);
if (hdr->version != 3)
reg |= VDPU_REG_DEC_CTRL4_VC1_HEIGHT_EXT;
if (hdr->version & 0x3)
reg |= VDPU_REG_DEC_CTRL4_BILIN_MC_E;
vdpu_write_relaxed(vpu, reg, VDPU_REG_VP8_DCT_START_BIT);
cfg_lf(ctx, hdr);
cfg_qp(ctx, hdr);
cfg_parts(ctx, hdr);
cfg_tap(ctx, hdr);
vb2_dst = hantro_get_dst_buf(ctx);
cfg_ref(ctx, hdr, vb2_dst);
cfg_buffers(ctx, hdr, vb2_dst);
hantro_end_prepare_run(ctx);
hantro_reg_write(vpu, &vp8_dec_start_dec, 1);
return 0;
}
| linux-master | drivers/media/platform/verisilicon/rockchip_vpu2_hw_vp8_dec.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Hantro VP8 codec driver
*
* Copyright (C) 2019 Rockchip Electronics Co., Ltd.
* ZhiChao Yu <[email protected]>
*
* Copyright (C) 2019 Google, Inc.
* Tomasz Figa <[email protected]>
*/
#include <media/v4l2-mem2mem.h>
#include "hantro_hw.h"
#include "hantro.h"
#include "hantro_g1_regs.h"
/* DCT partition base address regs */
static const struct hantro_reg vp8_dec_dct_base[8] = {
{ G1_REG_ADDR_STR, 0, 0xffffffff },
{ G1_REG_ADDR_REF(8), 0, 0xffffffff },
{ G1_REG_ADDR_REF(9), 0, 0xffffffff },
{ G1_REG_ADDR_REF(10), 0, 0xffffffff },
{ G1_REG_ADDR_REF(11), 0, 0xffffffff },
{ G1_REG_ADDR_REF(12), 0, 0xffffffff },
{ G1_REG_ADDR_REF(14), 0, 0xffffffff },
{ G1_REG_ADDR_REF(15), 0, 0xffffffff },
};
/* Loop filter level regs */
static const struct hantro_reg vp8_dec_lf_level[4] = {
{ G1_REG_REF_PIC(2), 18, 0x3f },
{ G1_REG_REF_PIC(2), 12, 0x3f },
{ G1_REG_REF_PIC(2), 6, 0x3f },
{ G1_REG_REF_PIC(2), 0, 0x3f },
};
/* Macroblock loop filter level adjustment regs */
static const struct hantro_reg vp8_dec_mb_adj[4] = {
{ G1_REG_REF_PIC(0), 21, 0x7f },
{ G1_REG_REF_PIC(0), 14, 0x7f },
{ G1_REG_REF_PIC(0), 7, 0x7f },
{ G1_REG_REF_PIC(0), 0, 0x7f },
};
/* Reference frame adjustment regs */
static const struct hantro_reg vp8_dec_ref_adj[4] = {
{ G1_REG_REF_PIC(1), 21, 0x7f },
{ G1_REG_REF_PIC(1), 14, 0x7f },
{ G1_REG_REF_PIC(1), 7, 0x7f },
{ G1_REG_REF_PIC(1), 0, 0x7f },
};
/* Quantizer */
static const struct hantro_reg vp8_dec_quant[4] = {
{ G1_REG_REF_PIC(3), 11, 0x7ff },
{ G1_REG_REF_PIC(3), 0, 0x7ff },
{ G1_REG_BD_REF_PIC(4), 11, 0x7ff },
{ G1_REG_BD_REF_PIC(4), 0, 0x7ff },
};
/* Quantizer delta regs */
static const struct hantro_reg vp8_dec_quant_delta[5] = {
{ G1_REG_REF_PIC(3), 27, 0x1f },
{ G1_REG_REF_PIC(3), 22, 0x1f },
{ G1_REG_BD_REF_PIC(4), 27, 0x1f },
{ G1_REG_BD_REF_PIC(4), 22, 0x1f },
{ G1_REG_BD_P_REF_PIC, 27, 0x1f },
};
/* DCT partition start bits regs */
static const struct hantro_reg vp8_dec_dct_start_bits[8] = {
{ G1_REG_DEC_CTRL2, 26, 0x3f }, { G1_REG_DEC_CTRL4, 26, 0x3f },
{ G1_REG_DEC_CTRL4, 20, 0x3f }, { G1_REG_DEC_CTRL7, 24, 0x3f },
{ G1_REG_DEC_CTRL7, 18, 0x3f }, { G1_REG_DEC_CTRL7, 12, 0x3f },
{ G1_REG_DEC_CTRL7, 6, 0x3f }, { G1_REG_DEC_CTRL7, 0, 0x3f },
};
/* Precision filter tap regs */
static const struct hantro_reg vp8_dec_pred_bc_tap[8][4] = {
{
{ G1_REG_PRED_FLT, 22, 0x3ff },
{ G1_REG_PRED_FLT, 12, 0x3ff },
{ G1_REG_PRED_FLT, 2, 0x3ff },
{ G1_REG_REF_PIC(4), 22, 0x3ff },
},
{
{ G1_REG_REF_PIC(4), 12, 0x3ff },
{ G1_REG_REF_PIC(4), 2, 0x3ff },
{ G1_REG_REF_PIC(5), 22, 0x3ff },
{ G1_REG_REF_PIC(5), 12, 0x3ff },
},
{
{ G1_REG_REF_PIC(5), 2, 0x3ff },
{ G1_REG_REF_PIC(6), 22, 0x3ff },
{ G1_REG_REF_PIC(6), 12, 0x3ff },
{ G1_REG_REF_PIC(6), 2, 0x3ff },
},
{
{ G1_REG_REF_PIC(7), 22, 0x3ff },
{ G1_REG_REF_PIC(7), 12, 0x3ff },
{ G1_REG_REF_PIC(7), 2, 0x3ff },
{ G1_REG_LT_REF, 22, 0x3ff },
},
{
{ G1_REG_LT_REF, 12, 0x3ff },
{ G1_REG_LT_REF, 2, 0x3ff },
{ G1_REG_VALID_REF, 22, 0x3ff },
{ G1_REG_VALID_REF, 12, 0x3ff },
},
{
{ G1_REG_VALID_REF, 2, 0x3ff },
{ G1_REG_BD_REF_PIC(0), 22, 0x3ff },
{ G1_REG_BD_REF_PIC(0), 12, 0x3ff },
{ G1_REG_BD_REF_PIC(0), 2, 0x3ff },
},
{
{ G1_REG_BD_REF_PIC(1), 22, 0x3ff },
{ G1_REG_BD_REF_PIC(1), 12, 0x3ff },
{ G1_REG_BD_REF_PIC(1), 2, 0x3ff },
{ G1_REG_BD_REF_PIC(2), 22, 0x3ff },
},
{
{ G1_REG_BD_REF_PIC(2), 12, 0x3ff },
{ G1_REG_BD_REF_PIC(2), 2, 0x3ff },
{ G1_REG_BD_REF_PIC(3), 22, 0x3ff },
{ G1_REG_BD_REF_PIC(3), 12, 0x3ff },
},
};
/*
* Set loop filters
*/
static void cfg_lf(struct hantro_ctx *ctx,
const struct v4l2_ctrl_vp8_frame *hdr)
{
const struct v4l2_vp8_segment *seg = &hdr->segment;
const struct v4l2_vp8_loop_filter *lf = &hdr->lf;
struct hantro_dev *vpu = ctx->dev;
unsigned int i;
u32 reg;
if (!(seg->flags & V4L2_VP8_SEGMENT_FLAG_ENABLED)) {
hantro_reg_write(vpu, &vp8_dec_lf_level[0], lf->level);
} else if (seg->flags & V4L2_VP8_SEGMENT_FLAG_DELTA_VALUE_MODE) {
for (i = 0; i < 4; i++) {
u32 lf_level = clamp(lf->level + seg->lf_update[i],
0, 63);
hantro_reg_write(vpu, &vp8_dec_lf_level[i], lf_level);
}
} else {
for (i = 0; i < 4; i++)
hantro_reg_write(vpu, &vp8_dec_lf_level[i],
seg->lf_update[i]);
}
reg = G1_REG_REF_PIC_FILT_SHARPNESS(lf->sharpness_level);
if (lf->flags & V4L2_VP8_LF_FILTER_TYPE_SIMPLE)
reg |= G1_REG_REF_PIC_FILT_TYPE_E;
vdpu_write_relaxed(vpu, reg, G1_REG_REF_PIC(0));
if (lf->flags & V4L2_VP8_LF_ADJ_ENABLE) {
for (i = 0; i < 4; i++) {
hantro_reg_write(vpu, &vp8_dec_mb_adj[i],
lf->mb_mode_delta[i]);
hantro_reg_write(vpu, &vp8_dec_ref_adj[i],
lf->ref_frm_delta[i]);
}
}
}
/*
* Set quantization parameters
*/
static void cfg_qp(struct hantro_ctx *ctx,
const struct v4l2_ctrl_vp8_frame *hdr)
{
const struct v4l2_vp8_quantization *q = &hdr->quant;
const struct v4l2_vp8_segment *seg = &hdr->segment;
struct hantro_dev *vpu = ctx->dev;
unsigned int i;
if (!(seg->flags & V4L2_VP8_SEGMENT_FLAG_ENABLED)) {
hantro_reg_write(vpu, &vp8_dec_quant[0], q->y_ac_qi);
} else if (seg->flags & V4L2_VP8_SEGMENT_FLAG_DELTA_VALUE_MODE) {
for (i = 0; i < 4; i++) {
u32 quant = clamp(q->y_ac_qi + seg->quant_update[i],
0, 127);
hantro_reg_write(vpu, &vp8_dec_quant[i], quant);
}
} else {
for (i = 0; i < 4; i++)
hantro_reg_write(vpu, &vp8_dec_quant[i],
seg->quant_update[i]);
}
hantro_reg_write(vpu, &vp8_dec_quant_delta[0], q->y_dc_delta);
hantro_reg_write(vpu, &vp8_dec_quant_delta[1], q->y2_dc_delta);
hantro_reg_write(vpu, &vp8_dec_quant_delta[2], q->y2_ac_delta);
hantro_reg_write(vpu, &vp8_dec_quant_delta[3], q->uv_dc_delta);
hantro_reg_write(vpu, &vp8_dec_quant_delta[4], q->uv_ac_delta);
}
/*
* set control partition and DCT partition regs
*
* VP8 frame stream data layout:
*
* first_part_size parttion_sizes[0]
* ^ ^
* src_dma | |
* ^ +--------+------+ +-----+-----+
* | | control part | | |
* +--------+----------------+------------------+-----------+-----+-----------+
* | tag 3B | extra 7B | hdr | mb_data | DCT sz | DCT part0 | ... | DCT partn |
* +--------+-----------------------------------+-----------+-----+-----------+
* | | | |
* v +----+---+ v
* mb_start | src_dma_end
* v
* DCT size part
* (num_dct-1)*3B
* Note:
* 1. only key-frames have extra 7-bytes
* 2. all offsets are base on src_dma
* 3. number of DCT parts is 1, 2, 4 or 8
* 4. the addresses set to the VPU must be 64-bits aligned
*/
static void cfg_parts(struct hantro_ctx *ctx,
const struct v4l2_ctrl_vp8_frame *hdr)
{
struct hantro_dev *vpu = ctx->dev;
struct vb2_v4l2_buffer *vb2_src;
u32 first_part_offset = V4L2_VP8_FRAME_IS_KEY_FRAME(hdr) ? 10 : 3;
u32 mb_size, mb_offset_bytes, mb_offset_bits, mb_start_bits;
u32 dct_size_part_size, dct_part_offset;
struct hantro_reg reg;
dma_addr_t src_dma;
u32 dct_part_total_len = 0;
u32 count = 0;
unsigned int i;
vb2_src = hantro_get_src_buf(ctx);
src_dma = vb2_dma_contig_plane_dma_addr(&vb2_src->vb2_buf, 0);
/*
* Calculate control partition mb data info
* @first_part_header_bits: bits offset of mb data from first
* part start pos
* @mb_offset_bits: bits offset of mb data from src_dma
* base addr
* @mb_offset_byte: bytes offset of mb data from src_dma
* base addr
* @mb_start_bits: bits offset of mb data from mb data
* 64bits alignment addr
*/
mb_offset_bits = first_part_offset * 8 +
hdr->first_part_header_bits + 8;
mb_offset_bytes = mb_offset_bits / 8;
mb_start_bits = mb_offset_bits -
(mb_offset_bytes & (~DEC_8190_ALIGN_MASK)) * 8;
mb_size = hdr->first_part_size -
(mb_offset_bytes - first_part_offset) +
(mb_offset_bytes & DEC_8190_ALIGN_MASK);
/* Macroblock data aligned base addr */
vdpu_write_relaxed(vpu, (mb_offset_bytes & (~DEC_8190_ALIGN_MASK))
+ src_dma, G1_REG_ADDR_REF(13));
/* Macroblock data start bits */
reg.base = G1_REG_DEC_CTRL2;
reg.mask = 0x3f;
reg.shift = 18;
hantro_reg_write(vpu, ®, mb_start_bits);
/* Macroblock aligned data length */
reg.base = G1_REG_DEC_CTRL6;
reg.mask = 0x3fffff;
reg.shift = 0;
hantro_reg_write(vpu, ®, mb_size + 1);
/*
* Calculate DCT partition info
* @dct_size_part_size: Containing sizes of DCT part, every DCT part
* has 3 bytes to store its size, except the last
* DCT part
* @dct_part_offset: bytes offset of DCT parts from src_dma base addr
* @dct_part_total_len: total size of all DCT parts
*/
dct_size_part_size = (hdr->num_dct_parts - 1) * 3;
dct_part_offset = first_part_offset + hdr->first_part_size;
for (i = 0; i < hdr->num_dct_parts; i++)
dct_part_total_len += hdr->dct_part_sizes[i];
dct_part_total_len += dct_size_part_size;
dct_part_total_len += (dct_part_offset & DEC_8190_ALIGN_MASK);
/* Number of DCT partitions */
reg.base = G1_REG_DEC_CTRL6;
reg.mask = 0xf;
reg.shift = 24;
hantro_reg_write(vpu, ®, hdr->num_dct_parts - 1);
/* DCT partition length */
vdpu_write_relaxed(vpu,
G1_REG_DEC_CTRL3_STREAM_LEN(dct_part_total_len),
G1_REG_DEC_CTRL3);
/* DCT partitions base address */
for (i = 0; i < hdr->num_dct_parts; i++) {
u32 byte_offset = dct_part_offset + dct_size_part_size + count;
u32 base_addr = byte_offset + src_dma;
hantro_reg_write(vpu, &vp8_dec_dct_base[i],
base_addr & (~DEC_8190_ALIGN_MASK));
hantro_reg_write(vpu, &vp8_dec_dct_start_bits[i],
(byte_offset & DEC_8190_ALIGN_MASK) * 8);
count += hdr->dct_part_sizes[i];
}
}
/*
* prediction filter taps
* normal 6-tap filters
*/
static void cfg_tap(struct hantro_ctx *ctx,
const struct v4l2_ctrl_vp8_frame *hdr)
{
struct hantro_dev *vpu = ctx->dev;
struct hantro_reg reg;
u32 val = 0;
int i, j;
reg.base = G1_REG_BD_REF_PIC(3);
reg.mask = 0xf;
if ((hdr->version & 0x03) != 0)
return; /* Tap filter not used. */
for (i = 0; i < 8; i++) {
val = (hantro_vp8_dec_mc_filter[i][0] << 2) |
hantro_vp8_dec_mc_filter[i][5];
for (j = 0; j < 4; j++)
hantro_reg_write(vpu, &vp8_dec_pred_bc_tap[i][j],
hantro_vp8_dec_mc_filter[i][j + 1]);
switch (i) {
case 2:
reg.shift = 8;
break;
case 4:
reg.shift = 4;
break;
case 6:
reg.shift = 0;
break;
default:
continue;
}
hantro_reg_write(vpu, ®, val);
}
}
static void cfg_ref(struct hantro_ctx *ctx,
const struct v4l2_ctrl_vp8_frame *hdr,
struct vb2_v4l2_buffer *vb2_dst)
{
struct hantro_dev *vpu = ctx->dev;
dma_addr_t ref;
ref = hantro_get_ref(ctx, hdr->last_frame_ts);
if (!ref) {
vpu_debug(0, "failed to find last frame ts=%llu\n",
hdr->last_frame_ts);
ref = vb2_dma_contig_plane_dma_addr(&vb2_dst->vb2_buf, 0);
}
vdpu_write_relaxed(vpu, ref, G1_REG_ADDR_REF(0));
ref = hantro_get_ref(ctx, hdr->golden_frame_ts);
if (!ref && hdr->golden_frame_ts)
vpu_debug(0, "failed to find golden frame ts=%llu\n",
hdr->golden_frame_ts);
if (!ref)
ref = vb2_dma_contig_plane_dma_addr(&vb2_dst->vb2_buf, 0);
if (hdr->flags & V4L2_VP8_FRAME_FLAG_SIGN_BIAS_GOLDEN)
ref |= G1_REG_ADDR_REF_TOPC_E;
vdpu_write_relaxed(vpu, ref, G1_REG_ADDR_REF(4));
ref = hantro_get_ref(ctx, hdr->alt_frame_ts);
if (!ref && hdr->alt_frame_ts)
vpu_debug(0, "failed to find alt frame ts=%llu\n",
hdr->alt_frame_ts);
if (!ref)
ref = vb2_dma_contig_plane_dma_addr(&vb2_dst->vb2_buf, 0);
if (hdr->flags & V4L2_VP8_FRAME_FLAG_SIGN_BIAS_ALT)
ref |= G1_REG_ADDR_REF_TOPC_E;
vdpu_write_relaxed(vpu, ref, G1_REG_ADDR_REF(5));
}
static void cfg_buffers(struct hantro_ctx *ctx,
const struct v4l2_ctrl_vp8_frame *hdr,
struct vb2_v4l2_buffer *vb2_dst)
{
const struct v4l2_vp8_segment *seg = &hdr->segment;
struct hantro_dev *vpu = ctx->dev;
dma_addr_t dst_dma;
u32 reg;
/* Set probability table buffer address */
vdpu_write_relaxed(vpu, ctx->vp8_dec.prob_tbl.dma,
G1_REG_ADDR_QTABLE);
/* Set segment map address */
reg = G1_REG_FWD_PIC1_SEGMENT_BASE(ctx->vp8_dec.segment_map.dma);
if (seg->flags & V4L2_VP8_SEGMENT_FLAG_ENABLED) {
reg |= G1_REG_FWD_PIC1_SEGMENT_E;
if (seg->flags & V4L2_VP8_SEGMENT_FLAG_UPDATE_MAP)
reg |= G1_REG_FWD_PIC1_SEGMENT_UPD_E;
}
vdpu_write_relaxed(vpu, reg, G1_REG_FWD_PIC(0));
dst_dma = hantro_get_dec_buf_addr(ctx, &vb2_dst->vb2_buf);
vdpu_write_relaxed(vpu, dst_dma, G1_REG_ADDR_DST);
}
int hantro_g1_vp8_dec_run(struct hantro_ctx *ctx)
{
const struct v4l2_ctrl_vp8_frame *hdr;
struct hantro_dev *vpu = ctx->dev;
struct vb2_v4l2_buffer *vb2_dst;
size_t height = ctx->dst_fmt.height;
size_t width = ctx->dst_fmt.width;
u32 mb_width, mb_height;
u32 reg;
hantro_start_prepare_run(ctx);
hdr = hantro_get_ctrl(ctx, V4L2_CID_STATELESS_VP8_FRAME);
if (WARN_ON(!hdr))
return -EINVAL;
/* Reset segment_map buffer in keyframe */
if (V4L2_VP8_FRAME_IS_KEY_FRAME(hdr) && ctx->vp8_dec.segment_map.cpu)
memset(ctx->vp8_dec.segment_map.cpu, 0,
ctx->vp8_dec.segment_map.size);
hantro_vp8_prob_update(ctx, hdr);
reg = G1_REG_CONFIG_DEC_TIMEOUT_E |
G1_REG_CONFIG_DEC_STRENDIAN_E |
G1_REG_CONFIG_DEC_INSWAP32_E |
G1_REG_CONFIG_DEC_STRSWAP32_E |
G1_REG_CONFIG_DEC_OUTSWAP32_E |
G1_REG_CONFIG_DEC_CLK_GATE_E |
G1_REG_CONFIG_DEC_IN_ENDIAN |
G1_REG_CONFIG_DEC_OUT_ENDIAN |
G1_REG_CONFIG_DEC_MAX_BURST(16);
vdpu_write_relaxed(vpu, reg, G1_REG_CONFIG);
reg = G1_REG_DEC_CTRL0_DEC_MODE(10) |
G1_REG_DEC_CTRL0_DEC_AXI_AUTO;
if (!V4L2_VP8_FRAME_IS_KEY_FRAME(hdr))
reg |= G1_REG_DEC_CTRL0_PIC_INTER_E;
if (!(hdr->flags & V4L2_VP8_FRAME_FLAG_MB_NO_SKIP_COEFF))
reg |= G1_REG_DEC_CTRL0_SKIP_MODE;
if (hdr->lf.level == 0)
reg |= G1_REG_DEC_CTRL0_FILTERING_DIS;
vdpu_write_relaxed(vpu, reg, G1_REG_DEC_CTRL0);
/* Frame dimensions */
mb_width = MB_WIDTH(width);
mb_height = MB_HEIGHT(height);
reg = G1_REG_DEC_CTRL1_PIC_MB_WIDTH(mb_width) |
G1_REG_DEC_CTRL1_PIC_MB_HEIGHT_P(mb_height) |
G1_REG_DEC_CTRL1_PIC_MB_W_EXT(mb_width >> 9) |
G1_REG_DEC_CTRL1_PIC_MB_H_EXT(mb_height >> 8);
vdpu_write_relaxed(vpu, reg, G1_REG_DEC_CTRL1);
/* Boolean decoder */
reg = G1_REG_DEC_CTRL2_BOOLEAN_RANGE(hdr->coder_state.range)
| G1_REG_DEC_CTRL2_BOOLEAN_VALUE(hdr->coder_state.value);
vdpu_write_relaxed(vpu, reg, G1_REG_DEC_CTRL2);
reg = 0;
if (hdr->version != 3)
reg |= G1_REG_DEC_CTRL4_VC1_HEIGHT_EXT;
if (hdr->version & 0x3)
reg |= G1_REG_DEC_CTRL4_BILIN_MC_E;
vdpu_write_relaxed(vpu, reg, G1_REG_DEC_CTRL4);
cfg_lf(ctx, hdr);
cfg_qp(ctx, hdr);
cfg_parts(ctx, hdr);
cfg_tap(ctx, hdr);
vb2_dst = hantro_get_dst_buf(ctx);
cfg_ref(ctx, hdr, vb2_dst);
cfg_buffers(ctx, hdr, vb2_dst);
hantro_end_prepare_run(ctx);
vdpu_write(vpu, G1_REG_INTERRUPT_DEC_E, G1_REG_INTERRUPT);
return 0;
}
| linux-master | drivers/media/platform/verisilicon/hantro_g1_vp8_dec.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Hantro VPU codec driver
*
* Copyright (C) 2021 Collabora Ltd, Andrzej Pietrasiewicz <[email protected]>
*/
#include "hantro_hw.h"
#include "hantro_g2_regs.h"
void hantro_g2_check_idle(struct hantro_dev *vpu)
{
int i;
for (i = 0; i < 3; i++) {
u32 status;
/* Make sure the VPU is idle */
status = vdpu_read(vpu, G2_REG_INTERRUPT);
if (status & G2_REG_INTERRUPT_DEC_E) {
dev_warn(vpu->dev, "device still running, aborting");
status |= G2_REG_INTERRUPT_DEC_ABORT_E | G2_REG_INTERRUPT_DEC_IRQ_DIS;
vdpu_write(vpu, status, G2_REG_INTERRUPT);
}
}
}
irqreturn_t hantro_g2_irq(int irq, void *dev_id)
{
struct hantro_dev *vpu = dev_id;
enum vb2_buffer_state state;
u32 status;
status = vdpu_read(vpu, G2_REG_INTERRUPT);
state = (status & G2_REG_INTERRUPT_DEC_RDY_INT) ?
VB2_BUF_STATE_DONE : VB2_BUF_STATE_ERROR;
vdpu_write(vpu, 0, G2_REG_INTERRUPT);
vdpu_write(vpu, G2_REG_CONFIG_DEC_CLK_GATE_E, G2_REG_CONFIG);
hantro_irq_done(vpu, state);
return IRQ_HANDLED;
}
| linux-master | drivers/media/platform/verisilicon/hantro_g2.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Hantro VPU codec driver
*
* Copyright (C) 2018 Rockchip Electronics Co., Ltd.
*/
#include <asm/unaligned.h>
#include <linux/bitfield.h>
#include <media/v4l2-mem2mem.h>
#include "hantro.h"
#include "hantro_hw.h"
#include "hantro_g1_regs.h"
#define G1_SWREG(nr) ((nr) * 4)
#define G1_REG_RLC_VLC_BASE G1_SWREG(12)
#define G1_REG_DEC_OUT_BASE G1_SWREG(13)
#define G1_REG_REFER0_BASE G1_SWREG(14)
#define G1_REG_REFER1_BASE G1_SWREG(15)
#define G1_REG_REFER2_BASE G1_SWREG(16)
#define G1_REG_REFER3_BASE G1_SWREG(17)
#define G1_REG_QTABLE_BASE G1_SWREG(40)
#define G1_REG_DEC_AXI_RD_ID(v) (((v) << 24) & GENMASK(31, 24))
#define G1_REG_DEC_TIMEOUT_E(v) ((v) ? BIT(23) : 0)
#define G1_REG_DEC_STRSWAP32_E(v) ((v) ? BIT(22) : 0)
#define G1_REG_DEC_STRENDIAN_E(v) ((v) ? BIT(21) : 0)
#define G1_REG_DEC_INSWAP32_E(v) ((v) ? BIT(20) : 0)
#define G1_REG_DEC_OUTSWAP32_E(v) ((v) ? BIT(19) : 0)
#define G1_REG_DEC_DATA_DISC_E(v) ((v) ? BIT(18) : 0)
#define G1_REG_DEC_LATENCY(v) (((v) << 11) & GENMASK(16, 11))
#define G1_REG_DEC_CLK_GATE_E(v) ((v) ? BIT(10) : 0)
#define G1_REG_DEC_IN_ENDIAN(v) ((v) ? BIT(9) : 0)
#define G1_REG_DEC_OUT_ENDIAN(v) ((v) ? BIT(8) : 0)
#define G1_REG_DEC_ADV_PRE_DIS(v) ((v) ? BIT(6) : 0)
#define G1_REG_DEC_SCMD_DIS(v) ((v) ? BIT(5) : 0)
#define G1_REG_DEC_MAX_BURST(v) (((v) << 0) & GENMASK(4, 0))
#define G1_REG_DEC_MODE(v) (((v) << 28) & GENMASK(31, 28))
#define G1_REG_RLC_MODE_E(v) ((v) ? BIT(27) : 0)
#define G1_REG_PIC_INTERLACE_E(v) ((v) ? BIT(23) : 0)
#define G1_REG_PIC_FIELDMODE_E(v) ((v) ? BIT(22) : 0)
#define G1_REG_PIC_B_E(v) ((v) ? BIT(21) : 0)
#define G1_REG_PIC_INTER_E(v) ((v) ? BIT(20) : 0)
#define G1_REG_PIC_TOPFIELD_E(v) ((v) ? BIT(19) : 0)
#define G1_REG_FWD_INTERLACE_E(v) ((v) ? BIT(18) : 0)
#define G1_REG_FILTERING_DIS(v) ((v) ? BIT(14) : 0)
#define G1_REG_WRITE_MVS_E(v) ((v) ? BIT(12) : 0)
#define G1_REG_DEC_AXI_WR_ID(v) (((v) << 0) & GENMASK(7, 0))
#define G1_REG_PIC_MB_WIDTH(v) (((v) << 23) & GENMASK(31, 23))
#define G1_REG_PIC_MB_HEIGHT_P(v) (((v) << 11) & GENMASK(18, 11))
#define G1_REG_ALT_SCAN_E(v) ((v) ? BIT(6) : 0)
#define G1_REG_TOPFIELDFIRST_E(v) ((v) ? BIT(5) : 0)
#define G1_REG_STRM_START_BIT(v) (((v) << 26) & GENMASK(31, 26))
#define G1_REG_QSCALE_TYPE(v) ((v) ? BIT(24) : 0)
#define G1_REG_CON_MV_E(v) ((v) ? BIT(4) : 0)
#define G1_REG_INTRA_DC_PREC(v) (((v) << 2) & GENMASK(3, 2))
#define G1_REG_INTRA_VLC_TAB(v) ((v) ? BIT(1) : 0)
#define G1_REG_FRAME_PRED_DCT(v) ((v) ? BIT(0) : 0)
#define G1_REG_INIT_QP(v) (((v) << 25) & GENMASK(30, 25))
#define G1_REG_STREAM_LEN(v) (((v) << 0) & GENMASK(23, 0))
#define G1_REG_ALT_SCAN_FLAG_E(v) ((v) ? BIT(19) : 0)
#define G1_REG_FCODE_FWD_HOR(v) (((v) << 15) & GENMASK(18, 15))
#define G1_REG_FCODE_FWD_VER(v) (((v) << 11) & GENMASK(14, 11))
#define G1_REG_FCODE_BWD_HOR(v) (((v) << 7) & GENMASK(10, 7))
#define G1_REG_FCODE_BWD_VER(v) (((v) << 3) & GENMASK(6, 3))
#define G1_REG_MV_ACCURACY_FWD(v) ((v) ? BIT(2) : 0)
#define G1_REG_MV_ACCURACY_BWD(v) ((v) ? BIT(1) : 0)
#define G1_REG_STARTMB_X(v) (((v) << 23) & GENMASK(31, 23))
#define G1_REG_STARTMB_Y(v) (((v) << 15) & GENMASK(22, 15))
#define G1_REG_APF_THRESHOLD(v) (((v) << 0) & GENMASK(13, 0))
static void
hantro_g1_mpeg2_dec_set_quantisation(struct hantro_dev *vpu,
struct hantro_ctx *ctx)
{
struct v4l2_ctrl_mpeg2_quantisation *q;
q = hantro_get_ctrl(ctx, V4L2_CID_STATELESS_MPEG2_QUANTISATION);
hantro_mpeg2_dec_copy_qtable(ctx->mpeg2_dec.qtable.cpu, q);
vdpu_write_relaxed(vpu, ctx->mpeg2_dec.qtable.dma, G1_REG_QTABLE_BASE);
}
static void
hantro_g1_mpeg2_dec_set_buffers(struct hantro_dev *vpu, struct hantro_ctx *ctx,
struct vb2_buffer *src_buf,
struct vb2_buffer *dst_buf,
const struct v4l2_ctrl_mpeg2_sequence *seq,
const struct v4l2_ctrl_mpeg2_picture *pic)
{
dma_addr_t forward_addr = 0, backward_addr = 0;
dma_addr_t current_addr, addr;
switch (pic->picture_coding_type) {
case V4L2_MPEG2_PIC_CODING_TYPE_B:
backward_addr = hantro_get_ref(ctx, pic->backward_ref_ts);
fallthrough;
case V4L2_MPEG2_PIC_CODING_TYPE_P:
forward_addr = hantro_get_ref(ctx, pic->forward_ref_ts);
}
/* Source bitstream buffer */
addr = vb2_dma_contig_plane_dma_addr(src_buf, 0);
vdpu_write_relaxed(vpu, addr, G1_REG_RLC_VLC_BASE);
/* Destination frame buffer */
addr = hantro_get_dec_buf_addr(ctx, dst_buf);
current_addr = addr;
if (pic->picture_structure == V4L2_MPEG2_PIC_BOTTOM_FIELD)
addr += ALIGN(ctx->dst_fmt.width, 16);
vdpu_write_relaxed(vpu, addr, G1_REG_DEC_OUT_BASE);
if (!forward_addr)
forward_addr = current_addr;
if (!backward_addr)
backward_addr = current_addr;
/* Set forward ref frame (top/bottom field) */
if (pic->picture_structure == V4L2_MPEG2_PIC_FRAME ||
pic->picture_coding_type == V4L2_MPEG2_PIC_CODING_TYPE_B ||
(pic->picture_structure == V4L2_MPEG2_PIC_TOP_FIELD &&
pic->flags & V4L2_MPEG2_PIC_FLAG_TOP_FIELD_FIRST) ||
(pic->picture_structure == V4L2_MPEG2_PIC_BOTTOM_FIELD &&
!(pic->flags & V4L2_MPEG2_PIC_FLAG_TOP_FIELD_FIRST))) {
vdpu_write_relaxed(vpu, forward_addr, G1_REG_REFER0_BASE);
vdpu_write_relaxed(vpu, forward_addr, G1_REG_REFER1_BASE);
} else if (pic->picture_structure == V4L2_MPEG2_PIC_TOP_FIELD) {
vdpu_write_relaxed(vpu, forward_addr, G1_REG_REFER0_BASE);
vdpu_write_relaxed(vpu, current_addr, G1_REG_REFER1_BASE);
} else if (pic->picture_structure == V4L2_MPEG2_PIC_BOTTOM_FIELD) {
vdpu_write_relaxed(vpu, current_addr, G1_REG_REFER0_BASE);
vdpu_write_relaxed(vpu, forward_addr, G1_REG_REFER1_BASE);
}
/* Set backward ref frame (top/bottom field) */
vdpu_write_relaxed(vpu, backward_addr, G1_REG_REFER2_BASE);
vdpu_write_relaxed(vpu, backward_addr, G1_REG_REFER3_BASE);
}
int hantro_g1_mpeg2_dec_run(struct hantro_ctx *ctx)
{
struct hantro_dev *vpu = ctx->dev;
struct vb2_v4l2_buffer *src_buf, *dst_buf;
const struct v4l2_ctrl_mpeg2_sequence *seq;
const struct v4l2_ctrl_mpeg2_picture *pic;
u32 reg;
src_buf = hantro_get_src_buf(ctx);
dst_buf = hantro_get_dst_buf(ctx);
/* Apply request controls if any */
hantro_start_prepare_run(ctx);
seq = hantro_get_ctrl(ctx,
V4L2_CID_STATELESS_MPEG2_SEQUENCE);
pic = hantro_get_ctrl(ctx,
V4L2_CID_STATELESS_MPEG2_PICTURE);
reg = G1_REG_DEC_AXI_RD_ID(0) |
G1_REG_DEC_TIMEOUT_E(1) |
G1_REG_DEC_STRSWAP32_E(1) |
G1_REG_DEC_STRENDIAN_E(1) |
G1_REG_DEC_INSWAP32_E(1) |
G1_REG_DEC_OUTSWAP32_E(1) |
G1_REG_DEC_DATA_DISC_E(0) |
G1_REG_DEC_LATENCY(0) |
G1_REG_DEC_CLK_GATE_E(1) |
G1_REG_DEC_IN_ENDIAN(1) |
G1_REG_DEC_OUT_ENDIAN(1) |
G1_REG_DEC_ADV_PRE_DIS(0) |
G1_REG_DEC_SCMD_DIS(0) |
G1_REG_DEC_MAX_BURST(16);
vdpu_write_relaxed(vpu, reg, G1_SWREG(2));
reg = G1_REG_DEC_MODE(5) |
G1_REG_RLC_MODE_E(0) |
G1_REG_PIC_INTERLACE_E(!(seq->flags & V4L2_MPEG2_SEQ_FLAG_PROGRESSIVE)) |
G1_REG_PIC_FIELDMODE_E(pic->picture_structure != V4L2_MPEG2_PIC_FRAME) |
G1_REG_PIC_B_E(pic->picture_coding_type == V4L2_MPEG2_PIC_CODING_TYPE_B) |
G1_REG_PIC_INTER_E(pic->picture_coding_type != V4L2_MPEG2_PIC_CODING_TYPE_I) |
G1_REG_PIC_TOPFIELD_E(pic->picture_structure == V4L2_MPEG2_PIC_TOP_FIELD) |
G1_REG_FWD_INTERLACE_E(0) |
G1_REG_FILTERING_DIS(1) |
G1_REG_WRITE_MVS_E(0) |
G1_REG_DEC_AXI_WR_ID(0);
vdpu_write_relaxed(vpu, reg, G1_SWREG(3));
reg = G1_REG_PIC_MB_WIDTH(MB_WIDTH(ctx->dst_fmt.width)) |
G1_REG_PIC_MB_HEIGHT_P(MB_HEIGHT(ctx->dst_fmt.height)) |
G1_REG_ALT_SCAN_E(pic->flags & V4L2_MPEG2_PIC_FLAG_ALT_SCAN) |
G1_REG_TOPFIELDFIRST_E(pic->flags & V4L2_MPEG2_PIC_FLAG_TOP_FIELD_FIRST);
vdpu_write_relaxed(vpu, reg, G1_SWREG(4));
reg = G1_REG_STRM_START_BIT(0) |
G1_REG_QSCALE_TYPE(pic->flags & V4L2_MPEG2_PIC_FLAG_Q_SCALE_TYPE) |
G1_REG_CON_MV_E(pic->flags & V4L2_MPEG2_PIC_FLAG_CONCEALMENT_MV) |
G1_REG_INTRA_DC_PREC(pic->intra_dc_precision) |
G1_REG_INTRA_VLC_TAB(pic->flags & V4L2_MPEG2_PIC_FLAG_INTRA_VLC) |
G1_REG_FRAME_PRED_DCT(pic->flags & V4L2_MPEG2_PIC_FLAG_FRAME_PRED_DCT);
vdpu_write_relaxed(vpu, reg, G1_SWREG(5));
reg = G1_REG_INIT_QP(1) |
G1_REG_STREAM_LEN(vb2_get_plane_payload(&src_buf->vb2_buf, 0));
vdpu_write_relaxed(vpu, reg, G1_SWREG(6));
reg = G1_REG_ALT_SCAN_FLAG_E(pic->flags & V4L2_MPEG2_PIC_FLAG_ALT_SCAN) |
G1_REG_FCODE_FWD_HOR(pic->f_code[0][0]) |
G1_REG_FCODE_FWD_VER(pic->f_code[0][1]) |
G1_REG_FCODE_BWD_HOR(pic->f_code[1][0]) |
G1_REG_FCODE_BWD_VER(pic->f_code[1][1]) |
G1_REG_MV_ACCURACY_FWD(1) |
G1_REG_MV_ACCURACY_BWD(1);
vdpu_write_relaxed(vpu, reg, G1_SWREG(18));
reg = G1_REG_STARTMB_X(0) |
G1_REG_STARTMB_Y(0);
vdpu_write_relaxed(vpu, reg, G1_SWREG(48));
reg = G1_REG_APF_THRESHOLD(8);
vdpu_write_relaxed(vpu, reg, G1_SWREG(55));
hantro_g1_mpeg2_dec_set_quantisation(vpu, ctx);
hantro_g1_mpeg2_dec_set_buffers(vpu, ctx, &src_buf->vb2_buf,
&dst_buf->vb2_buf,
seq, pic);
hantro_end_prepare_run(ctx);
vdpu_write(vpu, G1_REG_INTERRUPT_DEC_E, G1_REG_INTERRUPT);
return 0;
}
| linux-master | drivers/media/platform/verisilicon/hantro_g1_mpeg2_dec.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (C) Collabora, Ltd.
*
* Based on GSPCA and CODA drivers:
* Copyright (C) Jean-Francois Moine (http://moinejf.free.fr)
* Copyright (C) 2014 Philipp Zabel, Pengutronix
*/
#include <linux/align.h>
#include <linux/build_bug.h>
#include <linux/kernel.h>
#include <linux/string.h>
#include "hantro_jpeg.h"
#include "hantro.h"
#define LUMA_QUANT_OFF 25
#define CHROMA_QUANT_OFF 90
#define HEIGHT_OFF 159
#define WIDTH_OFF 161
#define HUFF_LUMA_DC_OFF 178
#define HUFF_LUMA_AC_OFF 211
#define HUFF_CHROMA_DC_OFF 394
#define HUFF_CHROMA_AC_OFF 427
/* Default tables from JPEG ITU-T.81
* (ISO/IEC 10918-1) Annex K, tables K.1 and K.2
*/
static const unsigned char luma_q_table[] = {
0x10, 0x0b, 0x0a, 0x10, 0x18, 0x28, 0x33, 0x3d,
0x0c, 0x0c, 0x0e, 0x13, 0x1a, 0x3a, 0x3c, 0x37,
0x0e, 0x0d, 0x10, 0x18, 0x28, 0x39, 0x45, 0x38,
0x0e, 0x11, 0x16, 0x1d, 0x33, 0x57, 0x50, 0x3e,
0x12, 0x16, 0x25, 0x38, 0x44, 0x6d, 0x67, 0x4d,
0x18, 0x23, 0x37, 0x40, 0x51, 0x68, 0x71, 0x5c,
0x31, 0x40, 0x4e, 0x57, 0x67, 0x79, 0x78, 0x65,
0x48, 0x5c, 0x5f, 0x62, 0x70, 0x64, 0x67, 0x63
};
static const unsigned char chroma_q_table[] = {
0x11, 0x12, 0x18, 0x2f, 0x63, 0x63, 0x63, 0x63,
0x12, 0x15, 0x1a, 0x42, 0x63, 0x63, 0x63, 0x63,
0x18, 0x1a, 0x38, 0x63, 0x63, 0x63, 0x63, 0x63,
0x2f, 0x42, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63,
0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63,
0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63,
0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63,
0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63
};
static const unsigned char zigzag[] = {
0, 1, 8, 16, 9, 2, 3, 10,
17, 24, 32, 25, 18, 11, 4, 5,
12, 19, 26, 33, 40, 48, 41, 34,
27, 20, 13, 6, 7, 14, 21, 28,
35, 42, 49, 56, 57, 50, 43, 36,
29, 22, 15, 23, 30, 37, 44, 51,
58, 59, 52, 45, 38, 31, 39, 46,
53, 60, 61, 54, 47, 55, 62, 63
};
static const u32 hw_reorder[] = {
0, 8, 16, 24, 1, 9, 17, 25,
32, 40, 48, 56, 33, 41, 49, 57,
2, 10, 18, 26, 3, 11, 19, 27,
34, 42, 50, 58, 35, 43, 51, 59,
4, 12, 20, 28, 5, 13, 21, 29,
36, 44, 52, 60, 37, 45, 53, 61,
6, 14, 22, 30, 7, 15, 23, 31,
38, 46, 54, 62, 39, 47, 55, 63
};
/* Huffman tables are shared with CODA */
static const unsigned char luma_dc_table[] = {
0x00, 0x01, 0x05, 0x01, 0x01, 0x01, 0x01, 0x01,
0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
0x08, 0x09, 0x0a, 0x0b,
};
static const unsigned char chroma_dc_table[] = {
0x00, 0x03, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
0x01, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
0x08, 0x09, 0x0a, 0x0b,
};
static const unsigned char luma_ac_table[] = {
0x00, 0x02, 0x01, 0x03, 0x03, 0x02, 0x04, 0x03,
0x05, 0x05, 0x04, 0x04, 0x00, 0x00, 0x01, 0x7d,
0x01, 0x02, 0x03, 0x00, 0x04, 0x11, 0x05, 0x12,
0x21, 0x31, 0x41, 0x06, 0x13, 0x51, 0x61, 0x07,
0x22, 0x71, 0x14, 0x32, 0x81, 0x91, 0xa1, 0x08,
0x23, 0x42, 0xb1, 0xc1, 0x15, 0x52, 0xd1, 0xf0,
0x24, 0x33, 0x62, 0x72, 0x82, 0x09, 0x0a, 0x16,
0x17, 0x18, 0x19, 0x1a, 0x25, 0x26, 0x27, 0x28,
0x29, 0x2a, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39,
0x3a, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49,
0x4a, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58, 0x59,
0x5a, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69,
0x6a, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78, 0x79,
0x7a, 0x83, 0x84, 0x85, 0x86, 0x87, 0x88, 0x89,
0x8a, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, 0x98,
0x99, 0x9a, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7,
0xa8, 0xa9, 0xaa, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6,
0xb7, 0xb8, 0xb9, 0xba, 0xc2, 0xc3, 0xc4, 0xc5,
0xc6, 0xc7, 0xc8, 0xc9, 0xca, 0xd2, 0xd3, 0xd4,
0xd5, 0xd6, 0xd7, 0xd8, 0xd9, 0xda, 0xe1, 0xe2,
0xe3, 0xe4, 0xe5, 0xe6, 0xe7, 0xe8, 0xe9, 0xea,
0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, 0xf8,
0xf9, 0xfa,
};
static const unsigned char chroma_ac_table[] = {
0x00, 0x02, 0x01, 0x02, 0x04, 0x04, 0x03, 0x04,
0x07, 0x05, 0x04, 0x04, 0x00, 0x01, 0x02, 0x77,
0x00, 0x01, 0x02, 0x03, 0x11, 0x04, 0x05, 0x21,
0x31, 0x06, 0x12, 0x41, 0x51, 0x07, 0x61, 0x71,
0x13, 0x22, 0x32, 0x81, 0x08, 0x14, 0x42, 0x91,
0xa1, 0xb1, 0xc1, 0x09, 0x23, 0x33, 0x52, 0xf0,
0x15, 0x62, 0x72, 0xd1, 0x0a, 0x16, 0x24, 0x34,
0xe1, 0x25, 0xf1, 0x17, 0x18, 0x19, 0x1a, 0x26,
0x27, 0x28, 0x29, 0x2a, 0x35, 0x36, 0x37, 0x38,
0x39, 0x3a, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48,
0x49, 0x4a, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58,
0x59, 0x5a, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68,
0x69, 0x6a, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78,
0x79, 0x7a, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
0x88, 0x89, 0x8a, 0x92, 0x93, 0x94, 0x95, 0x96,
0x97, 0x98, 0x99, 0x9a, 0xa2, 0xa3, 0xa4, 0xa5,
0xa6, 0xa7, 0xa8, 0xa9, 0xaa, 0xb2, 0xb3, 0xb4,
0xb5, 0xb6, 0xb7, 0xb8, 0xb9, 0xba, 0xc2, 0xc3,
0xc4, 0xc5, 0xc6, 0xc7, 0xc8, 0xc9, 0xca, 0xd2,
0xd3, 0xd4, 0xd5, 0xd6, 0xd7, 0xd8, 0xd9, 0xda,
0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, 0xe8, 0xe9,
0xea, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, 0xf8,
0xf9, 0xfa,
};
/* For simplicity, we keep a pre-formatted JPEG header,
* and we'll use fixed offsets to change the width, height
* quantization tables, etc.
*/
static const unsigned char hantro_jpeg_header[] = {
/* SOI */
0xff, 0xd8,
/* JFIF-APP0 */
0xff, 0xe0, 0x00, 0x10, 0x4a, 0x46, 0x49, 0x46,
0x00, 0x01, 0x01, 0x00, 0x00, 0x01, 0x00, 0x01,
0x00, 0x00,
/* DQT */
0xff, 0xdb, 0x00, 0x84,
0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x01,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
/* SOF */
0xff, 0xc0, 0x00, 0x11, 0x08, 0x00, 0xf0, 0x01,
0x40, 0x03, 0x01, 0x22, 0x00, 0x02, 0x11, 0x01,
0x03, 0x11, 0x01,
/* DHT */
0xff, 0xc4, 0x00, 0x1f, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
/* DHT */
0xff, 0xc4, 0x00, 0xb5, 0x10,
0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
/* DHT */
0xff, 0xc4, 0x00, 0x1f, 0x01,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
/* DHT */
0xff, 0xc4, 0x00, 0xb5, 0x11,
0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
/* COM */
0xff, 0xfe, 0x00, 0x03, 0x00,
/* SOS */
0xff, 0xda, 0x00, 0x0c, 0x03, 0x01, 0x00, 0x02,
0x11, 0x03, 0x11, 0x00, 0x3f, 0x00,
};
/*
* JPEG_HEADER_SIZE is used in other parts of the driver in lieu of
* "sizeof(hantro_jpeg_header)". The two must be equal.
*/
static_assert(sizeof(hantro_jpeg_header) == JPEG_HEADER_SIZE);
/*
* hantro_jpeg_header is padded with a COM segment, so that the payload
* of the SOS segment (the entropy-encoded image scan), which should
* trail the whole header, is 8-byte aligned for the hardware to write
* to directly.
*/
static_assert(IS_ALIGNED(sizeof(hantro_jpeg_header), 8),
"Hantro JPEG header size needs to be 8-byte aligned.");
static unsigned char jpeg_scale_qp(const unsigned char qp, int scale)
{
unsigned int temp;
temp = DIV_ROUND_CLOSEST((unsigned int)qp * scale, 100);
if (temp <= 0)
temp = 1;
if (temp > 255)
temp = 255;
return (unsigned char)temp;
}
static void
jpeg_scale_quant_table(unsigned char *file_q_tab,
unsigned char *reordered_q_tab,
const unsigned char *tab, int scale)
{
int i;
BUILD_BUG_ON(ARRAY_SIZE(zigzag) != JPEG_QUANT_SIZE);
BUILD_BUG_ON(ARRAY_SIZE(hw_reorder) != JPEG_QUANT_SIZE);
for (i = 0; i < JPEG_QUANT_SIZE; i++) {
file_q_tab[i] = jpeg_scale_qp(tab[zigzag[i]], scale);
reordered_q_tab[i] = jpeg_scale_qp(tab[hw_reorder[i]], scale);
}
}
static void jpeg_set_quality(struct hantro_jpeg_ctx *ctx)
{
int scale;
/*
* Non-linear scaling factor:
* [5,50] -> [1000..100], [51,100] -> [98..0]
*/
if (ctx->quality < 50)
scale = 5000 / ctx->quality;
else
scale = 200 - 2 * ctx->quality;
BUILD_BUG_ON(ARRAY_SIZE(luma_q_table) != JPEG_QUANT_SIZE);
BUILD_BUG_ON(ARRAY_SIZE(chroma_q_table) != JPEG_QUANT_SIZE);
BUILD_BUG_ON(ARRAY_SIZE(ctx->hw_luma_qtable) != JPEG_QUANT_SIZE);
BUILD_BUG_ON(ARRAY_SIZE(ctx->hw_chroma_qtable) != JPEG_QUANT_SIZE);
jpeg_scale_quant_table(ctx->buffer + LUMA_QUANT_OFF,
ctx->hw_luma_qtable, luma_q_table, scale);
jpeg_scale_quant_table(ctx->buffer + CHROMA_QUANT_OFF,
ctx->hw_chroma_qtable, chroma_q_table, scale);
}
void hantro_jpeg_header_assemble(struct hantro_jpeg_ctx *ctx)
{
char *buf = ctx->buffer;
memcpy(buf, hantro_jpeg_header,
sizeof(hantro_jpeg_header));
buf[HEIGHT_OFF + 0] = ctx->height >> 8;
buf[HEIGHT_OFF + 1] = ctx->height;
buf[WIDTH_OFF + 0] = ctx->width >> 8;
buf[WIDTH_OFF + 1] = ctx->width;
memcpy(buf + HUFF_LUMA_DC_OFF, luma_dc_table, sizeof(luma_dc_table));
memcpy(buf + HUFF_LUMA_AC_OFF, luma_ac_table, sizeof(luma_ac_table));
memcpy(buf + HUFF_CHROMA_DC_OFF, chroma_dc_table,
sizeof(chroma_dc_table));
memcpy(buf + HUFF_CHROMA_AC_OFF, chroma_ac_table,
sizeof(chroma_ac_table));
jpeg_set_quality(ctx);
}
| linux-master | drivers/media/platform/verisilicon/hantro_jpeg.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Hantro VPU codec driver
*
* Copyright (C) 2018 Rockchip Electronics Co., Ltd.
* Jeffy Chen <[email protected]>
*/
#include <linux/clk.h>
#include "hantro.h"
#include "hantro_jpeg.h"
#include "hantro_g1_regs.h"
#include "hantro_h1_regs.h"
#include "rockchip_vpu2_regs.h"
#include "rockchip_vpu981_regs.h"
#define RK3066_ACLK_MAX_FREQ (300 * 1000 * 1000)
#define RK3288_ACLK_MAX_FREQ (400 * 1000 * 1000)
#define RK3588_ACLK_MAX_FREQ (300 * 1000 * 1000)
#define ROCKCHIP_VPU981_MIN_SIZE 64
/*
* Supported formats.
*/
static const struct hantro_fmt rockchip_vpu_enc_fmts[] = {
{
.fourcc = V4L2_PIX_FMT_YUV420M,
.codec_mode = HANTRO_MODE_NONE,
.enc_fmt = ROCKCHIP_VPU_ENC_FMT_YUV420P,
},
{
.fourcc = V4L2_PIX_FMT_NV12M,
.codec_mode = HANTRO_MODE_NONE,
.enc_fmt = ROCKCHIP_VPU_ENC_FMT_YUV420SP,
},
{
.fourcc = V4L2_PIX_FMT_YUYV,
.codec_mode = HANTRO_MODE_NONE,
.enc_fmt = ROCKCHIP_VPU_ENC_FMT_YUYV422,
},
{
.fourcc = V4L2_PIX_FMT_UYVY,
.codec_mode = HANTRO_MODE_NONE,
.enc_fmt = ROCKCHIP_VPU_ENC_FMT_UYVY422,
},
{
.fourcc = V4L2_PIX_FMT_JPEG,
.codec_mode = HANTRO_MODE_JPEG_ENC,
.max_depth = 2,
.header_size = JPEG_HEADER_SIZE,
.frmsize = {
.min_width = 96,
.max_width = 8192,
.step_width = MB_DIM,
.min_height = 32,
.max_height = 8192,
.step_height = MB_DIM,
},
},
};
static const struct hantro_fmt rockchip_vpu1_postproc_fmts[] = {
{
.fourcc = V4L2_PIX_FMT_YUYV,
.codec_mode = HANTRO_MODE_NONE,
.postprocessed = true,
.frmsize = {
.min_width = FMT_MIN_WIDTH,
.max_width = FMT_FHD_WIDTH,
.step_width = MB_DIM,
.min_height = FMT_MIN_HEIGHT,
.max_height = FMT_FHD_HEIGHT,
.step_height = MB_DIM,
},
},
};
static const struct hantro_fmt rockchip_vpu981_postproc_fmts[] = {
{
.fourcc = V4L2_PIX_FMT_NV12,
.codec_mode = HANTRO_MODE_NONE,
.match_depth = true,
.postprocessed = true,
.frmsize = {
.min_width = ROCKCHIP_VPU981_MIN_SIZE,
.max_width = FMT_UHD_WIDTH,
.step_width = MB_DIM,
.min_height = ROCKCHIP_VPU981_MIN_SIZE,
.max_height = FMT_UHD_HEIGHT,
.step_height = MB_DIM,
},
},
{
.fourcc = V4L2_PIX_FMT_P010,
.codec_mode = HANTRO_MODE_NONE,
.match_depth = true,
.postprocessed = true,
.frmsize = {
.min_width = ROCKCHIP_VPU981_MIN_SIZE,
.max_width = FMT_UHD_WIDTH,
.step_width = MB_DIM,
.min_height = ROCKCHIP_VPU981_MIN_SIZE,
.max_height = FMT_UHD_HEIGHT,
.step_height = MB_DIM,
},
},
};
static const struct hantro_fmt rk3066_vpu_dec_fmts[] = {
{
.fourcc = V4L2_PIX_FMT_NV12,
.codec_mode = HANTRO_MODE_NONE,
.frmsize = {
.min_width = FMT_MIN_WIDTH,
.max_width = FMT_FHD_WIDTH,
.step_width = MB_DIM,
.min_height = FMT_MIN_HEIGHT,
.max_height = FMT_FHD_HEIGHT,
.step_height = MB_DIM,
},
},
{
.fourcc = V4L2_PIX_FMT_H264_SLICE,
.codec_mode = HANTRO_MODE_H264_DEC,
.max_depth = 2,
.frmsize = {
.min_width = FMT_MIN_WIDTH,
.max_width = FMT_FHD_WIDTH,
.step_width = MB_DIM,
.min_height = FMT_MIN_HEIGHT,
.max_height = FMT_FHD_HEIGHT,
.step_height = MB_DIM,
},
},
{
.fourcc = V4L2_PIX_FMT_MPEG2_SLICE,
.codec_mode = HANTRO_MODE_MPEG2_DEC,
.max_depth = 2,
.frmsize = {
.min_width = FMT_MIN_WIDTH,
.max_width = FMT_FHD_WIDTH,
.step_width = MB_DIM,
.min_height = FMT_MIN_HEIGHT,
.max_height = FMT_FHD_HEIGHT,
.step_height = MB_DIM,
},
},
{
.fourcc = V4L2_PIX_FMT_VP8_FRAME,
.codec_mode = HANTRO_MODE_VP8_DEC,
.max_depth = 2,
.frmsize = {
.min_width = FMT_MIN_WIDTH,
.max_width = FMT_FHD_WIDTH,
.step_width = MB_DIM,
.min_height = FMT_MIN_HEIGHT,
.max_height = FMT_FHD_HEIGHT,
.step_height = MB_DIM,
},
},
};
static const struct hantro_fmt rk3288_vpu_dec_fmts[] = {
{
.fourcc = V4L2_PIX_FMT_NV12,
.codec_mode = HANTRO_MODE_NONE,
.frmsize = {
.min_width = FMT_MIN_WIDTH,
.max_width = FMT_4K_WIDTH,
.step_width = MB_DIM,
.min_height = FMT_MIN_HEIGHT,
.max_height = FMT_4K_HEIGHT,
.step_height = MB_DIM,
},
},
{
.fourcc = V4L2_PIX_FMT_H264_SLICE,
.codec_mode = HANTRO_MODE_H264_DEC,
.max_depth = 2,
.frmsize = {
.min_width = FMT_MIN_WIDTH,
.max_width = FMT_4K_WIDTH,
.step_width = MB_DIM,
.min_height = FMT_MIN_HEIGHT,
.max_height = FMT_4K_HEIGHT,
.step_height = MB_DIM,
},
},
{
.fourcc = V4L2_PIX_FMT_MPEG2_SLICE,
.codec_mode = HANTRO_MODE_MPEG2_DEC,
.max_depth = 2,
.frmsize = {
.min_width = FMT_MIN_WIDTH,
.max_width = FMT_FHD_WIDTH,
.step_width = MB_DIM,
.min_height = FMT_MIN_HEIGHT,
.max_height = FMT_FHD_HEIGHT,
.step_height = MB_DIM,
},
},
{
.fourcc = V4L2_PIX_FMT_VP8_FRAME,
.codec_mode = HANTRO_MODE_VP8_DEC,
.max_depth = 2,
.frmsize = {
.min_width = FMT_MIN_WIDTH,
.max_width = FMT_UHD_WIDTH,
.step_width = MB_DIM,
.min_height = FMT_MIN_HEIGHT,
.max_height = FMT_UHD_HEIGHT,
.step_height = MB_DIM,
},
},
};
static const struct hantro_fmt rockchip_vdpu2_dec_fmts[] = {
{
.fourcc = V4L2_PIX_FMT_NV12,
.codec_mode = HANTRO_MODE_NONE,
.frmsize = {
.min_width = FMT_MIN_WIDTH,
.max_width = FMT_FHD_WIDTH,
.step_width = MB_DIM,
.min_height = FMT_MIN_HEIGHT,
.max_height = FMT_FHD_HEIGHT,
.step_height = MB_DIM,
},
},
{
.fourcc = V4L2_PIX_FMT_H264_SLICE,
.codec_mode = HANTRO_MODE_H264_DEC,
.max_depth = 2,
.frmsize = {
.min_width = FMT_MIN_WIDTH,
.max_width = FMT_FHD_WIDTH,
.step_width = MB_DIM,
.min_height = FMT_MIN_HEIGHT,
.max_height = FMT_FHD_HEIGHT,
.step_height = MB_DIM,
},
},
{
.fourcc = V4L2_PIX_FMT_MPEG2_SLICE,
.codec_mode = HANTRO_MODE_MPEG2_DEC,
.max_depth = 2,
.frmsize = {
.min_width = FMT_MIN_WIDTH,
.max_width = FMT_FHD_WIDTH,
.step_width = MB_DIM,
.min_height = FMT_MIN_HEIGHT,
.max_height = FMT_FHD_HEIGHT,
.step_height = MB_DIM,
},
},
{
.fourcc = V4L2_PIX_FMT_VP8_FRAME,
.codec_mode = HANTRO_MODE_VP8_DEC,
.max_depth = 2,
.frmsize = {
.min_width = FMT_MIN_WIDTH,
.max_width = FMT_UHD_WIDTH,
.step_width = MB_DIM,
.min_height = FMT_MIN_HEIGHT,
.max_height = FMT_UHD_HEIGHT,
.step_height = MB_DIM,
},
},
};
static const struct hantro_fmt rk3399_vpu_dec_fmts[] = {
{
.fourcc = V4L2_PIX_FMT_NV12,
.codec_mode = HANTRO_MODE_NONE,
.frmsize = {
.min_width = FMT_MIN_WIDTH,
.max_width = FMT_FHD_WIDTH,
.step_width = MB_DIM,
.min_height = FMT_MIN_HEIGHT,
.max_height = FMT_FHD_HEIGHT,
.step_height = MB_DIM,
},
},
{
.fourcc = V4L2_PIX_FMT_MPEG2_SLICE,
.codec_mode = HANTRO_MODE_MPEG2_DEC,
.max_depth = 2,
.frmsize = {
.min_width = FMT_MIN_WIDTH,
.max_width = FMT_FHD_WIDTH,
.step_width = MB_DIM,
.min_height = FMT_MIN_HEIGHT,
.max_height = FMT_FHD_HEIGHT,
.step_height = MB_DIM,
},
},
{
.fourcc = V4L2_PIX_FMT_VP8_FRAME,
.codec_mode = HANTRO_MODE_VP8_DEC,
.max_depth = 2,
.frmsize = {
.min_width = FMT_MIN_WIDTH,
.max_width = FMT_UHD_WIDTH,
.step_width = MB_DIM,
.min_height = FMT_MIN_HEIGHT,
.max_height = FMT_UHD_HEIGHT,
.step_height = MB_DIM,
},
},
};
static const struct hantro_fmt rockchip_vpu981_dec_fmts[] = {
{
.fourcc = V4L2_PIX_FMT_NV12_4L4,
.codec_mode = HANTRO_MODE_NONE,
.match_depth = true,
.frmsize = {
.min_width = ROCKCHIP_VPU981_MIN_SIZE,
.max_width = FMT_UHD_WIDTH,
.step_width = MB_DIM,
.min_height = ROCKCHIP_VPU981_MIN_SIZE,
.max_height = FMT_UHD_HEIGHT,
.step_height = MB_DIM,
},
},
{
.fourcc = V4L2_PIX_FMT_NV15_4L4,
.codec_mode = HANTRO_MODE_NONE,
.match_depth = true,
.frmsize = {
.min_width = ROCKCHIP_VPU981_MIN_SIZE,
.max_width = FMT_UHD_WIDTH,
.step_width = MB_DIM,
.min_height = ROCKCHIP_VPU981_MIN_SIZE,
.max_height = FMT_UHD_HEIGHT,
.step_height = MB_DIM,
},
},
{
.fourcc = V4L2_PIX_FMT_AV1_FRAME,
.codec_mode = HANTRO_MODE_AV1_DEC,
.max_depth = 2,
.frmsize = {
.min_width = ROCKCHIP_VPU981_MIN_SIZE,
.max_width = FMT_UHD_WIDTH,
.step_width = MB_DIM,
.min_height = ROCKCHIP_VPU981_MIN_SIZE,
.max_height = FMT_UHD_HEIGHT,
.step_height = MB_DIM,
},
},
};
static irqreturn_t rockchip_vpu1_vepu_irq(int irq, void *dev_id)
{
struct hantro_dev *vpu = dev_id;
enum vb2_buffer_state state;
u32 status;
status = vepu_read(vpu, H1_REG_INTERRUPT);
state = (status & H1_REG_INTERRUPT_FRAME_RDY) ?
VB2_BUF_STATE_DONE : VB2_BUF_STATE_ERROR;
vepu_write(vpu, 0, H1_REG_INTERRUPT);
vepu_write(vpu, 0, H1_REG_AXI_CTRL);
hantro_irq_done(vpu, state);
return IRQ_HANDLED;
}
static irqreturn_t rockchip_vpu2_vdpu_irq(int irq, void *dev_id)
{
struct hantro_dev *vpu = dev_id;
enum vb2_buffer_state state;
u32 status;
status = vdpu_read(vpu, VDPU_REG_INTERRUPT);
state = (status & VDPU_REG_INTERRUPT_DEC_IRQ) ?
VB2_BUF_STATE_DONE : VB2_BUF_STATE_ERROR;
vdpu_write(vpu, 0, VDPU_REG_INTERRUPT);
vdpu_write(vpu, 0, VDPU_REG_AXI_CTRL);
hantro_irq_done(vpu, state);
return IRQ_HANDLED;
}
static irqreturn_t rockchip_vpu2_vepu_irq(int irq, void *dev_id)
{
struct hantro_dev *vpu = dev_id;
enum vb2_buffer_state state;
u32 status;
status = vepu_read(vpu, VEPU_REG_INTERRUPT);
state = (status & VEPU_REG_INTERRUPT_FRAME_READY) ?
VB2_BUF_STATE_DONE : VB2_BUF_STATE_ERROR;
vepu_write(vpu, 0, VEPU_REG_INTERRUPT);
vepu_write(vpu, 0, VEPU_REG_AXI_CTRL);
hantro_irq_done(vpu, state);
return IRQ_HANDLED;
}
static irqreturn_t rk3588_vpu981_irq(int irq, void *dev_id)
{
struct hantro_dev *vpu = dev_id;
enum vb2_buffer_state state;
u32 status;
status = vdpu_read(vpu, AV1_REG_INTERRUPT);
state = (status & AV1_REG_INTERRUPT_DEC_RDY_INT) ?
VB2_BUF_STATE_DONE : VB2_BUF_STATE_ERROR;
vdpu_write(vpu, 0, AV1_REG_INTERRUPT);
vdpu_write(vpu, AV1_REG_CONFIG_DEC_CLK_GATE_E, AV1_REG_CONFIG);
hantro_irq_done(vpu, state);
return IRQ_HANDLED;
}
static int rk3036_vpu_hw_init(struct hantro_dev *vpu)
{
/* Bump ACLK to max. possible freq. to improve performance. */
clk_set_rate(vpu->clocks[0].clk, RK3066_ACLK_MAX_FREQ);
return 0;
}
static int rk3066_vpu_hw_init(struct hantro_dev *vpu)
{
/* Bump ACLKs to max. possible freq. to improve performance. */
clk_set_rate(vpu->clocks[0].clk, RK3066_ACLK_MAX_FREQ);
clk_set_rate(vpu->clocks[2].clk, RK3066_ACLK_MAX_FREQ);
return 0;
}
static int rk3588_vpu981_hw_init(struct hantro_dev *vpu)
{
/* Bump ACLKs to max. possible freq. to improve performance. */
clk_set_rate(vpu->clocks[0].clk, RK3588_ACLK_MAX_FREQ);
return 0;
}
static int rockchip_vpu_hw_init(struct hantro_dev *vpu)
{
/* Bump ACLK to max. possible freq. to improve performance. */
clk_set_rate(vpu->clocks[0].clk, RK3288_ACLK_MAX_FREQ);
return 0;
}
static void rk3066_vpu_dec_reset(struct hantro_ctx *ctx)
{
struct hantro_dev *vpu = ctx->dev;
vdpu_write(vpu, G1_REG_INTERRUPT_DEC_IRQ_DIS, G1_REG_INTERRUPT);
vdpu_write(vpu, G1_REG_CONFIG_DEC_CLK_GATE_E, G1_REG_CONFIG);
}
static void rockchip_vpu1_enc_reset(struct hantro_ctx *ctx)
{
struct hantro_dev *vpu = ctx->dev;
vepu_write(vpu, H1_REG_INTERRUPT_DIS_BIT, H1_REG_INTERRUPT);
vepu_write(vpu, 0, H1_REG_ENC_CTRL);
vepu_write(vpu, 0, H1_REG_AXI_CTRL);
}
static void rockchip_vpu2_dec_reset(struct hantro_ctx *ctx)
{
struct hantro_dev *vpu = ctx->dev;
vdpu_write(vpu, VDPU_REG_INTERRUPT_DEC_IRQ_DIS, VDPU_REG_INTERRUPT);
vdpu_write(vpu, 0, VDPU_REG_EN_FLAGS);
vdpu_write(vpu, 1, VDPU_REG_SOFT_RESET);
}
static void rockchip_vpu2_enc_reset(struct hantro_ctx *ctx)
{
struct hantro_dev *vpu = ctx->dev;
vepu_write(vpu, VEPU_REG_INTERRUPT_DIS_BIT, VEPU_REG_INTERRUPT);
vepu_write(vpu, 0, VEPU_REG_ENCODE_START);
vepu_write(vpu, 0, VEPU_REG_AXI_CTRL);
}
/*
* Supported codec ops.
*/
static const struct hantro_codec_ops rk3036_vpu_codec_ops[] = {
[HANTRO_MODE_H264_DEC] = {
.run = hantro_g1_h264_dec_run,
.reset = hantro_g1_reset,
.init = hantro_h264_dec_init,
.exit = hantro_h264_dec_exit,
},
[HANTRO_MODE_MPEG2_DEC] = {
.run = hantro_g1_mpeg2_dec_run,
.reset = hantro_g1_reset,
.init = hantro_mpeg2_dec_init,
.exit = hantro_mpeg2_dec_exit,
},
[HANTRO_MODE_VP8_DEC] = {
.run = hantro_g1_vp8_dec_run,
.reset = hantro_g1_reset,
.init = hantro_vp8_dec_init,
.exit = hantro_vp8_dec_exit,
},
};
static const struct hantro_codec_ops rk3066_vpu_codec_ops[] = {
[HANTRO_MODE_JPEG_ENC] = {
.run = hantro_h1_jpeg_enc_run,
.reset = rockchip_vpu1_enc_reset,
.done = hantro_h1_jpeg_enc_done,
},
[HANTRO_MODE_H264_DEC] = {
.run = hantro_g1_h264_dec_run,
.reset = rk3066_vpu_dec_reset,
.init = hantro_h264_dec_init,
.exit = hantro_h264_dec_exit,
},
[HANTRO_MODE_MPEG2_DEC] = {
.run = hantro_g1_mpeg2_dec_run,
.reset = rk3066_vpu_dec_reset,
.init = hantro_mpeg2_dec_init,
.exit = hantro_mpeg2_dec_exit,
},
[HANTRO_MODE_VP8_DEC] = {
.run = hantro_g1_vp8_dec_run,
.reset = rk3066_vpu_dec_reset,
.init = hantro_vp8_dec_init,
.exit = hantro_vp8_dec_exit,
},
};
static const struct hantro_codec_ops rk3288_vpu_codec_ops[] = {
[HANTRO_MODE_JPEG_ENC] = {
.run = hantro_h1_jpeg_enc_run,
.reset = rockchip_vpu1_enc_reset,
.done = hantro_h1_jpeg_enc_done,
},
[HANTRO_MODE_H264_DEC] = {
.run = hantro_g1_h264_dec_run,
.reset = hantro_g1_reset,
.init = hantro_h264_dec_init,
.exit = hantro_h264_dec_exit,
},
[HANTRO_MODE_MPEG2_DEC] = {
.run = hantro_g1_mpeg2_dec_run,
.reset = hantro_g1_reset,
.init = hantro_mpeg2_dec_init,
.exit = hantro_mpeg2_dec_exit,
},
[HANTRO_MODE_VP8_DEC] = {
.run = hantro_g1_vp8_dec_run,
.reset = hantro_g1_reset,
.init = hantro_vp8_dec_init,
.exit = hantro_vp8_dec_exit,
},
};
static const struct hantro_codec_ops rk3399_vpu_codec_ops[] = {
[HANTRO_MODE_JPEG_ENC] = {
.run = rockchip_vpu2_jpeg_enc_run,
.reset = rockchip_vpu2_enc_reset,
.done = rockchip_vpu2_jpeg_enc_done,
},
[HANTRO_MODE_H264_DEC] = {
.run = rockchip_vpu2_h264_dec_run,
.reset = rockchip_vpu2_dec_reset,
.init = hantro_h264_dec_init,
.exit = hantro_h264_dec_exit,
},
[HANTRO_MODE_MPEG2_DEC] = {
.run = rockchip_vpu2_mpeg2_dec_run,
.reset = rockchip_vpu2_dec_reset,
.init = hantro_mpeg2_dec_init,
.exit = hantro_mpeg2_dec_exit,
},
[HANTRO_MODE_VP8_DEC] = {
.run = rockchip_vpu2_vp8_dec_run,
.reset = rockchip_vpu2_dec_reset,
.init = hantro_vp8_dec_init,
.exit = hantro_vp8_dec_exit,
},
};
static const struct hantro_codec_ops rk3568_vepu_codec_ops[] = {
[HANTRO_MODE_JPEG_ENC] = {
.run = rockchip_vpu2_jpeg_enc_run,
.reset = rockchip_vpu2_enc_reset,
.done = rockchip_vpu2_jpeg_enc_done,
},
};
static const struct hantro_codec_ops rk3588_vpu981_codec_ops[] = {
[HANTRO_MODE_AV1_DEC] = {
.run = rockchip_vpu981_av1_dec_run,
.init = rockchip_vpu981_av1_dec_init,
.exit = rockchip_vpu981_av1_dec_exit,
.done = rockchip_vpu981_av1_dec_done,
},
};
/*
* VPU variant.
*/
static const struct hantro_irq rockchip_vdpu1_irqs[] = {
{ "vdpu", hantro_g1_irq },
};
static const struct hantro_irq rockchip_vpu1_irqs[] = {
{ "vepu", rockchip_vpu1_vepu_irq },
{ "vdpu", hantro_g1_irq },
};
static const struct hantro_irq rockchip_vdpu2_irqs[] = {
{ "vdpu", rockchip_vpu2_vdpu_irq },
};
static const struct hantro_irq rockchip_vpu2_irqs[] = {
{ "vepu", rockchip_vpu2_vepu_irq },
{ "vdpu", rockchip_vpu2_vdpu_irq },
};
static const struct hantro_irq rk3568_vepu_irqs[] = {
{ "vepu", rockchip_vpu2_vepu_irq },
};
static const char * const rk3066_vpu_clk_names[] = {
"aclk_vdpu", "hclk_vdpu",
"aclk_vepu", "hclk_vepu"
};
static const struct hantro_irq rk3588_vpu981_irqs[] = {
{ "vdpu", rk3588_vpu981_irq },
};
static const char * const rockchip_vpu_clk_names[] = {
"aclk", "hclk"
};
static const char * const rk3588_vpu981_vpu_clk_names[] = {
"aclk", "hclk", "aclk_vdpu_root", "hclk_vdpu_root"
};
/* VDPU1/VEPU1 */
const struct hantro_variant rk3036_vpu_variant = {
.dec_offset = 0x400,
.dec_fmts = rk3066_vpu_dec_fmts,
.num_dec_fmts = ARRAY_SIZE(rk3066_vpu_dec_fmts),
.postproc_fmts = rockchip_vpu1_postproc_fmts,
.num_postproc_fmts = ARRAY_SIZE(rockchip_vpu1_postproc_fmts),
.postproc_ops = &hantro_g1_postproc_ops,
.codec = HANTRO_MPEG2_DECODER | HANTRO_VP8_DECODER |
HANTRO_H264_DECODER,
.codec_ops = rk3036_vpu_codec_ops,
.irqs = rockchip_vdpu1_irqs,
.num_irqs = ARRAY_SIZE(rockchip_vdpu1_irqs),
.init = rk3036_vpu_hw_init,
.clk_names = rockchip_vpu_clk_names,
.num_clocks = ARRAY_SIZE(rockchip_vpu_clk_names)
};
/*
* Despite this variant has separate clocks for decoder and encoder,
* it's still required to enable all four of them for either decoding
* or encoding and we can't split it in separate g1/h1 variants.
*/
const struct hantro_variant rk3066_vpu_variant = {
.enc_offset = 0x0,
.enc_fmts = rockchip_vpu_enc_fmts,
.num_enc_fmts = ARRAY_SIZE(rockchip_vpu_enc_fmts),
.dec_offset = 0x400,
.dec_fmts = rk3066_vpu_dec_fmts,
.num_dec_fmts = ARRAY_SIZE(rk3066_vpu_dec_fmts),
.postproc_fmts = rockchip_vpu1_postproc_fmts,
.num_postproc_fmts = ARRAY_SIZE(rockchip_vpu1_postproc_fmts),
.postproc_ops = &hantro_g1_postproc_ops,
.codec = HANTRO_JPEG_ENCODER | HANTRO_MPEG2_DECODER |
HANTRO_VP8_DECODER | HANTRO_H264_DECODER,
.codec_ops = rk3066_vpu_codec_ops,
.irqs = rockchip_vpu1_irqs,
.num_irqs = ARRAY_SIZE(rockchip_vpu1_irqs),
.init = rk3066_vpu_hw_init,
.clk_names = rk3066_vpu_clk_names,
.num_clocks = ARRAY_SIZE(rk3066_vpu_clk_names)
};
const struct hantro_variant rk3288_vpu_variant = {
.enc_offset = 0x0,
.enc_fmts = rockchip_vpu_enc_fmts,
.num_enc_fmts = ARRAY_SIZE(rockchip_vpu_enc_fmts),
.dec_offset = 0x400,
.dec_fmts = rk3288_vpu_dec_fmts,
.num_dec_fmts = ARRAY_SIZE(rk3288_vpu_dec_fmts),
.postproc_fmts = rockchip_vpu1_postproc_fmts,
.num_postproc_fmts = ARRAY_SIZE(rockchip_vpu1_postproc_fmts),
.postproc_ops = &hantro_g1_postproc_ops,
.codec = HANTRO_JPEG_ENCODER | HANTRO_MPEG2_DECODER |
HANTRO_VP8_DECODER | HANTRO_H264_DECODER,
.codec_ops = rk3288_vpu_codec_ops,
.irqs = rockchip_vpu1_irqs,
.num_irqs = ARRAY_SIZE(rockchip_vpu1_irqs),
.init = rockchip_vpu_hw_init,
.clk_names = rockchip_vpu_clk_names,
.num_clocks = ARRAY_SIZE(rockchip_vpu_clk_names)
};
/* VDPU2/VEPU2 */
const struct hantro_variant rk3328_vpu_variant = {
.dec_offset = 0x400,
.dec_fmts = rockchip_vdpu2_dec_fmts,
.num_dec_fmts = ARRAY_SIZE(rockchip_vdpu2_dec_fmts),
.codec = HANTRO_MPEG2_DECODER | HANTRO_VP8_DECODER |
HANTRO_H264_DECODER,
.codec_ops = rk3399_vpu_codec_ops,
.irqs = rockchip_vdpu2_irqs,
.num_irqs = ARRAY_SIZE(rockchip_vdpu2_irqs),
.init = rockchip_vpu_hw_init,
.clk_names = rockchip_vpu_clk_names,
.num_clocks = ARRAY_SIZE(rockchip_vpu_clk_names),
};
/*
* H.264 decoding explicitly disabled in RK3399.
* This ensures userspace applications use the Rockchip VDEC core,
* which has better performance.
*/
const struct hantro_variant rk3399_vpu_variant = {
.enc_offset = 0x0,
.enc_fmts = rockchip_vpu_enc_fmts,
.num_enc_fmts = ARRAY_SIZE(rockchip_vpu_enc_fmts),
.dec_offset = 0x400,
.dec_fmts = rk3399_vpu_dec_fmts,
.num_dec_fmts = ARRAY_SIZE(rk3399_vpu_dec_fmts),
.codec = HANTRO_JPEG_ENCODER | HANTRO_MPEG2_DECODER |
HANTRO_VP8_DECODER,
.codec_ops = rk3399_vpu_codec_ops,
.irqs = rockchip_vpu2_irqs,
.num_irqs = ARRAY_SIZE(rockchip_vpu2_irqs),
.init = rockchip_vpu_hw_init,
.clk_names = rockchip_vpu_clk_names,
.num_clocks = ARRAY_SIZE(rockchip_vpu_clk_names)
};
const struct hantro_variant rk3568_vepu_variant = {
.enc_offset = 0x0,
.enc_fmts = rockchip_vpu_enc_fmts,
.num_enc_fmts = ARRAY_SIZE(rockchip_vpu_enc_fmts),
.codec = HANTRO_JPEG_ENCODER,
.codec_ops = rk3568_vepu_codec_ops,
.irqs = rk3568_vepu_irqs,
.num_irqs = ARRAY_SIZE(rk3568_vepu_irqs),
.init = rockchip_vpu_hw_init,
.clk_names = rockchip_vpu_clk_names,
.num_clocks = ARRAY_SIZE(rockchip_vpu_clk_names)
};
const struct hantro_variant rk3568_vpu_variant = {
.dec_offset = 0x400,
.dec_fmts = rockchip_vdpu2_dec_fmts,
.num_dec_fmts = ARRAY_SIZE(rockchip_vdpu2_dec_fmts),
.codec = HANTRO_MPEG2_DECODER |
HANTRO_VP8_DECODER | HANTRO_H264_DECODER,
.codec_ops = rk3399_vpu_codec_ops,
.irqs = rockchip_vdpu2_irqs,
.num_irqs = ARRAY_SIZE(rockchip_vdpu2_irqs),
.init = rockchip_vpu_hw_init,
.clk_names = rockchip_vpu_clk_names,
.num_clocks = ARRAY_SIZE(rockchip_vpu_clk_names)
};
const struct hantro_variant px30_vpu_variant = {
.enc_offset = 0x0,
.enc_fmts = rockchip_vpu_enc_fmts,
.num_enc_fmts = ARRAY_SIZE(rockchip_vpu_enc_fmts),
.dec_offset = 0x400,
.dec_fmts = rockchip_vdpu2_dec_fmts,
.num_dec_fmts = ARRAY_SIZE(rockchip_vdpu2_dec_fmts),
.codec = HANTRO_JPEG_ENCODER | HANTRO_MPEG2_DECODER |
HANTRO_VP8_DECODER | HANTRO_H264_DECODER,
.codec_ops = rk3399_vpu_codec_ops,
.irqs = rockchip_vpu2_irqs,
.num_irqs = ARRAY_SIZE(rockchip_vpu2_irqs),
.init = rk3036_vpu_hw_init,
.clk_names = rockchip_vpu_clk_names,
.num_clocks = ARRAY_SIZE(rockchip_vpu_clk_names)
};
const struct hantro_variant rk3588_vpu981_variant = {
.dec_offset = 0x0,
.dec_fmts = rockchip_vpu981_dec_fmts,
.num_dec_fmts = ARRAY_SIZE(rockchip_vpu981_dec_fmts),
.postproc_fmts = rockchip_vpu981_postproc_fmts,
.num_postproc_fmts = ARRAY_SIZE(rockchip_vpu981_postproc_fmts),
.postproc_ops = &rockchip_vpu981_postproc_ops,
.codec = HANTRO_AV1_DECODER,
.codec_ops = rk3588_vpu981_codec_ops,
.irqs = rk3588_vpu981_irqs,
.num_irqs = ARRAY_SIZE(rk3588_vpu981_irqs),
.init = rk3588_vpu981_hw_init,
.clk_names = rk3588_vpu981_vpu_clk_names,
.num_clocks = ARRAY_SIZE(rk3588_vpu981_vpu_clk_names)
};
| linux-master | drivers/media/platform/verisilicon/rockchip_vpu_hw.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Hantro VP9 codec driver
*
* Copyright (C) 2021 Collabora Ltd.
*/
#include <linux/types.h>
#include <media/v4l2-mem2mem.h>
#include "hantro.h"
#include "hantro_hw.h"
#include "hantro_vp9.h"
#define POW2(x) (1 << (x))
#define MAX_LOG2_TILE_COLUMNS 6
#define MAX_NUM_TILE_COLS POW2(MAX_LOG2_TILE_COLUMNS)
#define MAX_TILE_COLS 20
#define MAX_TILE_ROWS 22
static size_t hantro_vp9_tile_filter_size(unsigned int height)
{
u32 h, height32, size;
h = roundup(height, 8);
height32 = roundup(h, 64);
size = 24 * height32 * (MAX_NUM_TILE_COLS - 1); /* luma: 8, chroma: 8 + 8 */
return size;
}
static size_t hantro_vp9_bsd_control_size(unsigned int height)
{
u32 h, height32;
h = roundup(height, 8);
height32 = roundup(h, 64);
return 16 * (height32 / 4) * (MAX_NUM_TILE_COLS - 1);
}
static size_t hantro_vp9_segment_map_size(unsigned int width, unsigned int height)
{
u32 w, h;
int num_ctbs;
w = roundup(width, 8);
h = roundup(height, 8);
num_ctbs = ((w + 63) / 64) * ((h + 63) / 64);
return num_ctbs * 32;
}
static inline size_t hantro_vp9_prob_tab_size(void)
{
return roundup(sizeof(struct hantro_g2_all_probs), 16);
}
static inline size_t hantro_vp9_count_tab_size(void)
{
return roundup(sizeof(struct symbol_counts), 16);
}
static inline size_t hantro_vp9_tile_info_size(void)
{
return roundup((MAX_TILE_COLS * MAX_TILE_ROWS * 4 * sizeof(u16) + 15 + 16) & ~0xf, 16);
}
static void *get_coeffs_arr(struct symbol_counts *cnts, int i, int j, int k, int l, int m)
{
if (i == 0)
return &cnts->count_coeffs[j][k][l][m];
if (i == 1)
return &cnts->count_coeffs8x8[j][k][l][m];
if (i == 2)
return &cnts->count_coeffs16x16[j][k][l][m];
if (i == 3)
return &cnts->count_coeffs32x32[j][k][l][m];
return NULL;
}
static void *get_eobs1(struct symbol_counts *cnts, int i, int j, int k, int l, int m)
{
if (i == 0)
return &cnts->count_coeffs[j][k][l][m][3];
if (i == 1)
return &cnts->count_coeffs8x8[j][k][l][m][3];
if (i == 2)
return &cnts->count_coeffs16x16[j][k][l][m][3];
if (i == 3)
return &cnts->count_coeffs32x32[j][k][l][m][3];
return NULL;
}
#define INNER_LOOP \
do { \
for (m = 0; m < ARRAY_SIZE(vp9_ctx->cnts.coeff[i][0][0][0]); ++m) { \
vp9_ctx->cnts.coeff[i][j][k][l][m] = \
get_coeffs_arr(cnts, i, j, k, l, m); \
vp9_ctx->cnts.eob[i][j][k][l][m][0] = \
&cnts->count_eobs[i][j][k][l][m]; \
vp9_ctx->cnts.eob[i][j][k][l][m][1] = \
get_eobs1(cnts, i, j, k, l, m); \
} \
} while (0)
static void init_v4l2_vp9_count_tbl(struct hantro_ctx *ctx)
{
struct hantro_vp9_dec_hw_ctx *vp9_ctx = &ctx->vp9_dec;
struct symbol_counts *cnts = vp9_ctx->misc.cpu + vp9_ctx->ctx_counters_offset;
int i, j, k, l, m;
vp9_ctx->cnts.partition = &cnts->partition_counts;
vp9_ctx->cnts.skip = &cnts->mbskip_count;
vp9_ctx->cnts.intra_inter = &cnts->intra_inter_count;
vp9_ctx->cnts.tx32p = &cnts->tx32x32_count;
/*
* g2 hardware uses tx16x16_count[2][3], while the api
* expects tx16p[2][4], so this must be explicitly copied
* into vp9_ctx->cnts.tx16p when passing the data to the
* vp9 library function
*/
vp9_ctx->cnts.tx8p = &cnts->tx8x8_count;
vp9_ctx->cnts.y_mode = &cnts->sb_ymode_counts;
vp9_ctx->cnts.uv_mode = &cnts->uv_mode_counts;
vp9_ctx->cnts.comp = &cnts->comp_inter_count;
vp9_ctx->cnts.comp_ref = &cnts->comp_ref_count;
vp9_ctx->cnts.single_ref = &cnts->single_ref_count;
vp9_ctx->cnts.filter = &cnts->switchable_interp_counts;
vp9_ctx->cnts.mv_joint = &cnts->mv_counts.joints;
vp9_ctx->cnts.sign = &cnts->mv_counts.sign;
vp9_ctx->cnts.classes = &cnts->mv_counts.classes;
vp9_ctx->cnts.class0 = &cnts->mv_counts.class0;
vp9_ctx->cnts.bits = &cnts->mv_counts.bits;
vp9_ctx->cnts.class0_fp = &cnts->mv_counts.class0_fp;
vp9_ctx->cnts.fp = &cnts->mv_counts.fp;
vp9_ctx->cnts.class0_hp = &cnts->mv_counts.class0_hp;
vp9_ctx->cnts.hp = &cnts->mv_counts.hp;
for (i = 0; i < ARRAY_SIZE(vp9_ctx->cnts.coeff); ++i)
for (j = 0; j < ARRAY_SIZE(vp9_ctx->cnts.coeff[i]); ++j)
for (k = 0; k < ARRAY_SIZE(vp9_ctx->cnts.coeff[i][0]); ++k)
for (l = 0; l < ARRAY_SIZE(vp9_ctx->cnts.coeff[i][0][0]); ++l)
INNER_LOOP;
}
int hantro_vp9_dec_init(struct hantro_ctx *ctx)
{
struct hantro_dev *vpu = ctx->dev;
const struct hantro_variant *variant = vpu->variant;
struct hantro_vp9_dec_hw_ctx *vp9_dec = &ctx->vp9_dec;
struct hantro_aux_buf *tile_edge = &vp9_dec->tile_edge;
struct hantro_aux_buf *segment_map = &vp9_dec->segment_map;
struct hantro_aux_buf *misc = &vp9_dec->misc;
u32 i, max_width, max_height, size;
if (variant->num_dec_fmts < 1)
return -EINVAL;
for (i = 0; i < variant->num_dec_fmts; ++i)
if (variant->dec_fmts[i].fourcc == V4L2_PIX_FMT_VP9_FRAME)
break;
if (i == variant->num_dec_fmts)
return -EINVAL;
max_width = vpu->variant->dec_fmts[i].frmsize.max_width;
max_height = vpu->variant->dec_fmts[i].frmsize.max_height;
size = hantro_vp9_tile_filter_size(max_height);
vp9_dec->bsd_ctrl_offset = size;
size += hantro_vp9_bsd_control_size(max_height);
tile_edge->cpu = dma_alloc_coherent(vpu->dev, size, &tile_edge->dma, GFP_KERNEL);
if (!tile_edge->cpu)
return -ENOMEM;
tile_edge->size = size;
memset(tile_edge->cpu, 0, size);
size = hantro_vp9_segment_map_size(max_width, max_height);
vp9_dec->segment_map_size = size;
size *= 2; /* we need two areas of this size, used alternately */
segment_map->cpu = dma_alloc_coherent(vpu->dev, size, &segment_map->dma, GFP_KERNEL);
if (!segment_map->cpu)
goto err_segment_map;
segment_map->size = size;
memset(segment_map->cpu, 0, size);
size = hantro_vp9_prob_tab_size();
vp9_dec->ctx_counters_offset = size;
size += hantro_vp9_count_tab_size();
vp9_dec->tile_info_offset = size;
size += hantro_vp9_tile_info_size();
misc->cpu = dma_alloc_coherent(vpu->dev, size, &misc->dma, GFP_KERNEL);
if (!misc->cpu)
goto err_misc;
misc->size = size;
memset(misc->cpu, 0, size);
init_v4l2_vp9_count_tbl(ctx);
return 0;
err_misc:
dma_free_coherent(vpu->dev, segment_map->size, segment_map->cpu, segment_map->dma);
err_segment_map:
dma_free_coherent(vpu->dev, tile_edge->size, tile_edge->cpu, tile_edge->dma);
return -ENOMEM;
}
void hantro_vp9_dec_exit(struct hantro_ctx *ctx)
{
struct hantro_dev *vpu = ctx->dev;
struct hantro_vp9_dec_hw_ctx *vp9_dec = &ctx->vp9_dec;
struct hantro_aux_buf *tile_edge = &vp9_dec->tile_edge;
struct hantro_aux_buf *segment_map = &vp9_dec->segment_map;
struct hantro_aux_buf *misc = &vp9_dec->misc;
dma_free_coherent(vpu->dev, misc->size, misc->cpu, misc->dma);
dma_free_coherent(vpu->dev, segment_map->size, segment_map->cpu, segment_map->dma);
dma_free_coherent(vpu->dev, tile_edge->size, tile_edge->cpu, tile_edge->dma);
}
| linux-master | drivers/media/platform/verisilicon/hantro_vp9.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Hantro VPU codec driver
*
* Copyright (C) 2018 Rockchip Electronics Co., Ltd.
* Jeffy Chen <[email protected]>
* Copyright (C) 2019 Pengutronix, Philipp Zabel <[email protected]>
* Copyright (C) 2021 Collabora Ltd, Emil Velikov <[email protected]>
*/
#include "hantro.h"
#include "hantro_g1_regs.h"
irqreturn_t hantro_g1_irq(int irq, void *dev_id)
{
struct hantro_dev *vpu = dev_id;
enum vb2_buffer_state state;
u32 status;
status = vdpu_read(vpu, G1_REG_INTERRUPT);
state = (status & G1_REG_INTERRUPT_DEC_RDY_INT) ?
VB2_BUF_STATE_DONE : VB2_BUF_STATE_ERROR;
vdpu_write(vpu, 0, G1_REG_INTERRUPT);
vdpu_write(vpu, G1_REG_CONFIG_DEC_CLK_GATE_E, G1_REG_CONFIG);
hantro_irq_done(vpu, state);
return IRQ_HANDLED;
}
void hantro_g1_reset(struct hantro_ctx *ctx)
{
struct hantro_dev *vpu = ctx->dev;
vdpu_write(vpu, G1_REG_INTERRUPT_DEC_IRQ_DIS, G1_REG_INTERRUPT);
vdpu_write(vpu, G1_REG_CONFIG_DEC_CLK_GATE_E, G1_REG_CONFIG);
vdpu_write(vpu, 1, G1_REG_SOFT_RESET);
}
| linux-master | drivers/media/platform/verisilicon/hantro_g1.c |
// SPDX-License-Identifier: BSD-2-Clause
/*
* Copyright (c) 2016, Alliance for Open Media. All rights reserved
*
* This source code is subject to the terms of the BSD 2 Clause License and
* the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
* was not distributed with this source code in the LICENSE file, you can
* obtain it at www.aomedia.org/license/software. If the Alliance for Open
* Media Patent License 1.0 was not distributed with this source code in the
* PATENTS file, you can obtain it at www.aomedia.org/license/patent.
*/
#include "hantro.h"
#include "rockchip_av1_entropymode.h"
#define AOM_ICDF ICDF
#define AOM_CDF2(a0) AOM_ICDF(a0)
#define AOM_CDF3(a0, a1) \
AOM_ICDF(a0), AOM_ICDF(a1)
#define AOM_CDF4(a0, a1, a2) \
AOM_ICDF(a0), AOM_ICDF(a1), AOM_ICDF(a2)
#define AOM_CDF5(a0, a1, a2, a3) \
AOM_ICDF(a0), AOM_ICDF(a1), AOM_ICDF(a2), AOM_ICDF(a3)
#define AOM_CDF6(a0, a1, a2, a3, a4) \
AOM_ICDF(a0), AOM_ICDF(a1), AOM_ICDF(a2), AOM_ICDF(a3), AOM_ICDF(a4)
#define AOM_CDF7(a0, a1, a2, a3, a4, a5) \
AOM_ICDF(a0), AOM_ICDF(a1), AOM_ICDF(a2), \
AOM_ICDF(a3), AOM_ICDF(a4), AOM_ICDF(a5)
#define AOM_CDF8(a0, a1, a2, a3, a4, a5, a6) \
AOM_ICDF(a0), AOM_ICDF(a1), AOM_ICDF(a2), \
AOM_ICDF(a3), AOM_ICDF(a4), AOM_ICDF(a5), AOM_ICDF(a6)
#define AOM_CDF9(a0, a1, a2, a3, a4, a5, a6, a7) \
AOM_ICDF(a0), AOM_ICDF(a1), AOM_ICDF(a2), AOM_ICDF(a3), \
AOM_ICDF(a4), AOM_ICDF(a5), AOM_ICDF(a6), AOM_ICDF(a7)
#define AOM_CDF10(a0, a1, a2, a3, a4, a5, a6, a7, a8) \
AOM_ICDF(a0), AOM_ICDF(a1), AOM_ICDF(a2), AOM_ICDF(a3), \
AOM_ICDF(a4), AOM_ICDF(a5), AOM_ICDF(a6), AOM_ICDF(a7), AOM_ICDF(a8)
#define AOM_CDF11(a0, a1, a2, a3, a4, a5, a6, a7, a8, a9) \
AOM_ICDF(a0), AOM_ICDF(a1), AOM_ICDF(a2), AOM_ICDF(a3), AOM_ICDF(a4), \
AOM_ICDF(a5), AOM_ICDF(a6), AOM_ICDF(a7), AOM_ICDF(a8), AOM_ICDF(a9)
#define AOM_CDF12(a0, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10) \
AOM_ICDF(a0), AOM_ICDF(a1), AOM_ICDF(a2), AOM_ICDF(a3), AOM_ICDF(a4), AOM_ICDF(a5), \
AOM_ICDF(a6), AOM_ICDF(a7), AOM_ICDF(a8), AOM_ICDF(a9), AOM_ICDF(a10)
#define AOM_CDF13(a0, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11) \
AOM_ICDF(a0), AOM_ICDF(a1), AOM_ICDF(a2), AOM_ICDF(a3), AOM_ICDF(a4), AOM_ICDF(a5), \
AOM_ICDF(a6), AOM_ICDF(a7), AOM_ICDF(a8), AOM_ICDF(a9), AOM_ICDF(a10), AOM_ICDF(a11)
#define AOM_CDF14(a0, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12) \
AOM_ICDF(a0), AOM_ICDF(a1), AOM_ICDF(a2), AOM_ICDF(a3), AOM_ICDF(a4), \
AOM_ICDF(a5), AOM_ICDF(a6), AOM_ICDF(a7), AOM_ICDF(a8), AOM_ICDF(a9), \
AOM_ICDF(a10), AOM_ICDF(a11), AOM_ICDF(a12)
#define AOM_CDF15(a0, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13) \
AOM_ICDF(a0), AOM_ICDF(a1), AOM_ICDF(a2), AOM_ICDF(a3), AOM_ICDF(a4), \
AOM_ICDF(a5), AOM_ICDF(a6), AOM_ICDF(a7), AOM_ICDF(a8), AOM_ICDF(a9), \
AOM_ICDF(a10), AOM_ICDF(a11), AOM_ICDF(a12), AOM_ICDF(a13)
#define AOM_CDF16(a0, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14) \
AOM_ICDF(a0), AOM_ICDF(a1), AOM_ICDF(a2), AOM_ICDF(a3), AOM_ICDF(a4), \
AOM_ICDF(a5), AOM_ICDF(a6), AOM_ICDF(a7), AOM_ICDF(a8), AOM_ICDF(a9), \
AOM_ICDF(a10), AOM_ICDF(a11), AOM_ICDF(a12), AOM_ICDF(a13), AOM_ICDF(a14)
static const u16 default_kf_y_mode_cdf
[KF_MODE_CONTEXTS][KF_MODE_CONTEXTS][CDF_SIZE(AV1_INTRA_MODES)] = {
{
{
AOM_CDF13(15588, 17027, 19338, 20218, 20682, 21110,
21825, 23244, 24189, 28165, 29093, 30466)
},
{
AOM_CDF13(12016, 18066, 19516, 20303, 20719, 21444,
21888, 23032, 24434, 28658, 30172, 31409)
},
{
AOM_CDF13(10052, 10771, 22296, 22788, 23055, 23239,
24133, 25620, 26160, 29336, 29929, 31567)
},
{
AOM_CDF13(14091, 15406, 16442, 18808, 19136, 19546,
19998, 22096, 24746, 29585, 30958, 32462)
},
{
AOM_CDF13(12122, 13265, 15603, 16501, 18609, 20033,
22391, 25583, 26437, 30261, 31073, 32475)
}
},
{
{
AOM_CDF13(10023, 19585, 20848, 21440, 21832, 22760,
23089, 24023, 25381, 29014, 30482, 31436)
},
{
AOM_CDF13(5983, 24099, 24560, 24886, 25066, 25795,
25913, 26423, 27610, 29905, 31276, 31794)
},
{
AOM_CDF13(7444, 12781, 20177, 20728, 21077, 21607,
22170, 23405, 24469, 27915, 29090, 30492)
},
{
AOM_CDF13(8537, 14689, 15432, 17087, 17408, 18172,
18408, 19825, 24649, 29153, 31096, 32210)
},
{
AOM_CDF13(7543, 14231, 15496, 16195, 17905, 20717,
21984, 24516, 26001, 29675, 30981, 31994)
}
},
{
{
AOM_CDF13(12613, 13591, 21383, 22004, 22312, 22577,
23401, 25055, 25729, 29538, 30305, 32077)
},
{
AOM_CDF13(9687, 13470, 18506, 19230, 19604, 20147,
20695, 22062, 23219, 27743, 29211, 30907)
},
{
AOM_CDF13(6183, 6505, 26024, 26252, 26366, 26434,
27082, 28354, 28555, 30467, 30794, 32086)
},
{
AOM_CDF13(10718, 11734, 14954, 17224, 17565, 17924,
18561, 21523, 23878, 28975, 30287, 32252)
},
{
AOM_CDF13(9194, 9858, 16501, 17263, 18424, 19171,
21563, 25961, 26561, 30072, 30737, 32463)
}
},
{
{
AOM_CDF13(12602, 14399, 15488, 18381, 18778, 19315,
19724, 21419, 25060, 29696, 30917, 32409)
},
{
AOM_CDF13(8203, 13821, 14524, 17105, 17439, 18131,
18404, 19468, 25225, 29485, 31158, 32342)
},
{
AOM_CDF13(8451, 9731, 15004, 17643, 18012, 18425,
19070, 21538, 24605, 29118, 30078, 32018)
},
{
AOM_CDF13(7714, 9048, 9516, 16667, 16817, 16994,
17153, 18767, 26743, 30389, 31536, 32528)
},
{
AOM_CDF13(8843, 10280, 11496, 15317, 16652, 17943,
19108, 22718, 25769, 29953, 30983, 32485)
}
},
{
{
AOM_CDF13(12578, 13671, 15979, 16834, 19075, 20913,
22989, 25449, 26219, 30214, 31150, 32477)
},
{
AOM_CDF13(9563, 13626, 15080, 15892, 17756, 20863,
22207, 24236, 25380, 29653, 31143, 32277)
},
{
AOM_CDF13(8356, 8901, 17616, 18256, 19350, 20106,
22598, 25947, 26466, 29900, 30523, 32261)
},
{
AOM_CDF13(10835, 11815, 13124, 16042, 17018, 18039,
18947, 22753, 24615, 29489, 30883, 32482)
},
{
AOM_CDF13(7618, 8288, 9859, 10509, 15386, 18657,
22903, 28776, 29180, 31355, 31802, 32593)
}
}
};
static const u16 default_angle_delta_cdf[DIRECTIONAL_MODES]
[CDF_SIZE(2 * MAX_ANGLE_DELTA + 1)] = {
{ AOM_CDF7(2180, 5032, 7567, 22776, 26989, 30217) },
{ AOM_CDF7(2301, 5608, 8801, 23487, 26974, 30330) },
{ AOM_CDF7(3780, 11018, 13699, 19354, 23083, 31286) },
{ AOM_CDF7(4581, 11226, 15147, 17138, 21834, 28397) },
{ AOM_CDF7(1737, 10927, 14509, 19588, 22745, 28823) },
{ AOM_CDF7(2664, 10176, 12485, 17650, 21600, 30495) },
{ AOM_CDF7(2240, 11096, 15453, 20341, 22561, 28917) },
{ AOM_CDF7(3605, 10428, 12459, 17676, 21244, 30655) }
};
static const u16 default_if_y_mode_cdf[BLOCK_SIZE_GROUPS][CDF_SIZE(AV1_INTRA_MODES)] = {
{
AOM_CDF13(22801, 23489, 24293, 24756, 25601, 26123,
26606, 27418, 27945, 29228, 29685, 30349)
},
{
AOM_CDF13(18673, 19845, 22631, 23318, 23950, 24649,
25527, 27364, 28152, 29701, 29984, 30852)
},
{
AOM_CDF13(19770, 20979, 23396, 23939, 24241, 24654,
25136, 27073, 27830, 29360, 29730, 30659)
},
{
AOM_CDF13(20155, 21301, 22838, 23178, 23261, 23533,
23703, 24804, 25352, 26575, 27016, 28049)
}
};
static const u16 default_uv_mode_cdf[CFL_ALLOWED_TYPES]
[AV1_INTRA_MODES][CDF_SIZE(UV_INTRA_MODES)] = {
{
{
AOM_CDF13(22631, 24152, 25378, 25661, 25986, 26520,
27055, 27923, 28244, 30059, 30941, 31961)
},
{
AOM_CDF13(9513, 26881, 26973, 27046, 27118, 27664,
27739, 27824, 28359, 29505, 29800, 31796)
},
{
AOM_CDF13(9845, 9915, 28663, 28704, 28757, 28780,
29198, 29822, 29854, 30764, 31777, 32029)
},
{
AOM_CDF13(13639, 13897, 14171, 25331, 25606, 25727,
25953, 27148, 28577, 30612, 31355, 32493)
},
{
AOM_CDF13(9764, 9835, 9930, 9954, 25386, 27053,
27958, 28148, 28243, 31101, 31744, 32363)
},
{
AOM_CDF13(11825, 13589, 13677, 13720, 15048, 29213,
29301, 29458, 29711, 31161, 31441, 32550)
},
{
AOM_CDF13(14175, 14399, 16608, 16821, 17718, 17775,
28551, 30200, 30245, 31837, 32342, 32667)
},
{
AOM_CDF13(12885, 13038, 14978, 15590, 15673, 15748,
16176, 29128, 29267, 30643, 31961, 32461)
},
{
AOM_CDF13(12026, 13661, 13874, 15305, 15490, 15726,
15995, 16273, 28443, 30388, 30767, 32416)
},
{
AOM_CDF13(19052, 19840, 20579, 20916, 21150, 21467,
21885, 22719, 23174, 28861, 30379, 32175)
},
{
AOM_CDF13(18627, 19649, 20974, 21219, 21492, 21816,
22199, 23119, 23527, 27053, 31397, 32148)
},
{
AOM_CDF13(17026, 19004, 19997, 20339, 20586, 21103,
21349, 21907, 22482, 25896, 26541, 31819)
},
{
AOM_CDF13(12124, 13759, 14959, 14992, 15007, 15051,
15078, 15166, 15255, 15753, 16039, 16606)
}
},
{
{
AOM_CDF14(10407, 11208, 12900, 13181, 13823, 14175,
14899, 15656, 15986, 20086, 20995, 22455,
24212)
},
{
AOM_CDF14(4532, 19780, 20057, 20215, 20428, 21071,
21199, 21451, 22099, 24228, 24693, 27032,
29472)
},
{
AOM_CDF14(5273, 5379, 20177, 20270, 20385, 20439,
20949, 21695, 21774, 23138, 24256, 24703,
26679)
},
{
AOM_CDF14(6740, 7167, 7662, 14152, 14536, 14785,
15034, 16741, 18371, 21520, 22206, 23389,
24182)
},
{
AOM_CDF14(4987, 5368, 5928, 6068, 19114, 20315, 21857,
22253, 22411, 24911, 25380, 26027, 26376)
},
{
AOM_CDF14(5370, 6889, 7247, 7393, 9498, 21114, 21402,
21753, 21981, 24780, 25386, 26517, 27176)
},
{
AOM_CDF14(4816, 4961, 7204, 7326, 8765, 8930, 20169,
20682, 20803, 23188, 23763, 24455, 24940)
},
{
AOM_CDF14(6608, 6740, 8529, 9049, 9257, 9356, 9735,
18827, 19059, 22336, 23204, 23964, 24793)
},
{
AOM_CDF14(5998, 7419, 7781, 8933, 9255, 9549, 9753,
10417, 18898, 22494, 23139, 24764, 25989)
},
{
AOM_CDF14(10660, 11298, 12550, 12957, 13322, 13624,
14040, 15004, 15534, 20714, 21789, 23443,
24861)
},
{
AOM_CDF14(10522, 11530, 12552, 12963, 13378, 13779,
14245, 15235, 15902, 20102, 22696, 23774,
25838)
},
{
AOM_CDF14(10099, 10691, 12639, 13049, 13386, 13665,
14125, 15163, 15636, 19676, 20474, 23519,
25208)
},
{
AOM_CDF14(3144, 5087, 7382, 7504, 7593, 7690, 7801,
8064, 8232, 9248, 9875, 10521, 29048)
}
}
};
static const u16 default_partition_cdf[13][16] = {
{
AOM_CDF4(19132, 25510, 30392), AOM_CDF4(13928, 19855, 28540),
AOM_CDF4(12522, 23679, 28629), AOM_CDF4(9896, 18783, 25853),
AOM_CDF2(11570), AOM_CDF2(16855), AOM_CDF3(9413, 22581)
},
{
AOM_CDF10(15597, 20929, 24571, 26706, 27664, 28821, 29601, 30571, 31902)
},
{
AOM_CDF10(7925, 11043, 16785, 22470, 23971, 25043, 26651, 28701, 29834)
},
{
AOM_CDF10(5414, 13269, 15111, 20488, 22360, 24500, 25537, 26336, 32117)
},
{
AOM_CDF10(2662, 6362, 8614, 20860, 23053, 24778, 26436, 27829, 31171)
},
{
AOM_CDF10(18462, 20920, 23124, 27647, 28227, 29049, 29519, 30178, 31544)
},
{
AOM_CDF10(7689, 9060, 12056, 24992, 25660, 26182, 26951, 28041, 29052)
},
{
AOM_CDF10(6015, 9009, 10062, 24544, 25409, 26545, 27071, 27526, 32047)
},
{
AOM_CDF10(1394, 2208, 2796, 28614, 29061, 29466, 29840, 30185, 31899)
},
{
AOM_CDF10(20137, 21547, 23078, 29566, 29837, 30261, 30524, 30892, 31724),
AOM_CDF8(27899, 28219, 28529, 32484, 32539, 32619, 32639)
},
{
AOM_CDF10(6732, 7490, 9497, 27944, 28250, 28515, 28969, 29630, 30104),
AOM_CDF8(6607, 6990, 8268, 32060, 32219, 32338, 32371)
},
{
AOM_CDF10(5945, 7663, 8348, 28683, 29117, 29749, 30064, 30298, 32238),
AOM_CDF8(5429, 6676, 7122, 32027, 32227, 32531, 32582)
},
{
AOM_CDF10(870, 1212, 1487, 31198, 31394, 31574, 31743, 31881, 32332),
AOM_CDF8(711, 966, 1172, 32448, 32538, 32617, 32664)
},
};
static const u16 default_intra_ext_tx0_cdf[EXTTX_SIZES][AV1_INTRA_MODES][8] = {
{
{ AOM_CDF7(1535, 8035, 9461, 12751, 23467, 27825)},
{ AOM_CDF7(564, 3335, 9709, 10870, 18143, 28094)},
{ AOM_CDF7(672, 3247, 3676, 11982, 19415, 23127)},
{ AOM_CDF7(5279, 13885, 15487, 18044, 23527, 30252)},
{ AOM_CDF7(4423, 6074, 7985, 10416, 25693, 29298)},
{ AOM_CDF7(1486, 4241, 9460, 10662, 16456, 27694)},
{ AOM_CDF7(439, 2838, 3522, 6737, 18058, 23754)},
{ AOM_CDF7(1190, 4233, 4855, 11670, 20281, 24377)},
{ AOM_CDF7(1045, 4312, 8647, 10159, 18644, 29335)},
{ AOM_CDF7(202, 3734, 4747, 7298, 17127, 24016)},
{ AOM_CDF7(447, 4312, 6819, 8884, 16010, 23858)},
{ AOM_CDF7(277, 4369, 5255, 8905, 16465, 22271)},
{ AOM_CDF7(3409, 5436, 10599, 15599, 19687, 24040)},
},
{
{ AOM_CDF7(1870, 13742, 14530, 16498, 23770, 27698)},
{ AOM_CDF7(326, 8796, 14632, 15079, 19272, 27486)},
{ AOM_CDF7(484, 7576, 7712, 14443, 19159, 22591)},
{ AOM_CDF7(1126, 15340, 15895, 17023, 20896, 30279)},
{ AOM_CDF7(655, 4854, 5249, 5913, 22099, 27138)},
{ AOM_CDF7(1299, 6458, 8885, 9290, 14851, 25497)},
{ AOM_CDF7(311, 5295, 5552, 6885, 16107, 22672)},
{ AOM_CDF7(883, 8059, 8270, 11258, 17289, 21549)},
{ AOM_CDF7(741, 7580, 9318, 10345, 16688, 29046)},
{ AOM_CDF7(110, 7406, 7915, 9195, 16041, 23329)},
{ AOM_CDF7(363, 7974, 9357, 10673, 15629, 24474)},
{ AOM_CDF7(153, 7647, 8112, 9936, 15307, 19996)},
{ AOM_CDF7(3511, 6332, 11165, 15335, 19323, 23594)},
},
{
{ AOM_CDF7(4681, 9362, 14043, 18725, 23406, 28087)},
{ AOM_CDF7(4681, 9362, 14043, 18725, 23406, 28087)},
{ AOM_CDF7(4681, 9362, 14043, 18725, 23406, 28087)},
{ AOM_CDF7(4681, 9362, 14043, 18725, 23406, 28087)},
{ AOM_CDF7(4681, 9362, 14043, 18725, 23406, 28087)},
{ AOM_CDF7(4681, 9362, 14043, 18725, 23406, 28087)},
{ AOM_CDF7(4681, 9362, 14043, 18725, 23406, 28087)},
{ AOM_CDF7(4681, 9362, 14043, 18725, 23406, 28087)},
{ AOM_CDF7(4681, 9362, 14043, 18725, 23406, 28087)},
{ AOM_CDF7(4681, 9362, 14043, 18725, 23406, 28087)},
{ AOM_CDF7(4681, 9362, 14043, 18725, 23406, 28087)},
{ AOM_CDF7(4681, 9362, 14043, 18725, 23406, 28087)},
{ AOM_CDF7(4681, 9362, 14043, 18725, 23406, 28087)},
},
{
{ AOM_CDF7(4681, 9362, 14043, 18725, 23406, 28087)},
{ AOM_CDF7(4681, 9362, 14043, 18725, 23406, 28087)},
{ AOM_CDF7(4681, 9362, 14043, 18725, 23406, 28087)},
{ AOM_CDF7(4681, 9362, 14043, 18725, 23406, 28087)},
{ AOM_CDF7(4681, 9362, 14043, 18725, 23406, 28087)},
{ AOM_CDF7(4681, 9362, 14043, 18725, 23406, 28087)},
{ AOM_CDF7(4681, 9362, 14043, 18725, 23406, 28087)},
{ AOM_CDF7(4681, 9362, 14043, 18725, 23406, 28087)},
{ AOM_CDF7(4681, 9362, 14043, 18725, 23406, 28087)},
{ AOM_CDF7(4681, 9362, 14043, 18725, 23406, 28087)},
{ AOM_CDF7(4681, 9362, 14043, 18725, 23406, 28087)},
{ AOM_CDF7(4681, 9362, 14043, 18725, 23406, 28087)},
{ AOM_CDF7(4681, 9362, 14043, 18725, 23406, 28087)},
},
};
static const u16 default_intra_ext_tx1_cdf[EXTTX_SIZES][AV1_INTRA_MODES][4] = {
{
{ AOM_CDF5(6554, 13107, 19661, 26214)},
{ AOM_CDF5(6554, 13107, 19661, 26214)},
{ AOM_CDF5(6554, 13107, 19661, 26214)},
{ AOM_CDF5(6554, 13107, 19661, 26214)},
{ AOM_CDF5(6554, 13107, 19661, 26214)},
{ AOM_CDF5(6554, 13107, 19661, 26214)},
{ AOM_CDF5(6554, 13107, 19661, 26214)},
{ AOM_CDF5(6554, 13107, 19661, 26214)},
{ AOM_CDF5(6554, 13107, 19661, 26214)},
{ AOM_CDF5(6554, 13107, 19661, 26214)},
{ AOM_CDF5(6554, 13107, 19661, 26214)},
{ AOM_CDF5(6554, 13107, 19661, 26214)},
{ AOM_CDF5(6554, 13107, 19661, 26214)},
},
{
{ AOM_CDF5(6554, 13107, 19661, 26214)},
{ AOM_CDF5(6554, 13107, 19661, 26214)},
{ AOM_CDF5(6554, 13107, 19661, 26214)},
{ AOM_CDF5(6554, 13107, 19661, 26214)},
{ AOM_CDF5(6554, 13107, 19661, 26214)},
{ AOM_CDF5(6554, 13107, 19661, 26214)},
{ AOM_CDF5(6554, 13107, 19661, 26214)},
{ AOM_CDF5(6554, 13107, 19661, 26214)},
{ AOM_CDF5(6554, 13107, 19661, 26214)},
{ AOM_CDF5(6554, 13107, 19661, 26214)},
{ AOM_CDF5(6554, 13107, 19661, 26214)},
{ AOM_CDF5(6554, 13107, 19661, 26214)},
{ AOM_CDF5(6554, 13107, 19661, 26214)},
},
{
{ AOM_CDF5(1127, 12814, 22772, 27483)},
{ AOM_CDF5(145, 6761, 11980, 26667)},
{ AOM_CDF5(362, 5887, 11678, 16725)},
{ AOM_CDF5(385, 15213, 18587, 30693)},
{ AOM_CDF5(25, 2914, 23134, 27903)},
{ AOM_CDF5(60, 4470, 11749, 23991)},
{ AOM_CDF5(37, 3332, 14511, 21448)},
{ AOM_CDF5(157, 6320, 13036, 17439)},
{ AOM_CDF5(119, 6719, 12906, 29396)},
{ AOM_CDF5(47, 5537, 12576, 21499)},
{ AOM_CDF5(269, 6076, 11258, 23115)},
{ AOM_CDF5(83, 5615, 12001, 17228)},
{ AOM_CDF5(1968, 5556, 12023, 18547)},
},
{
{ AOM_CDF5(6554, 13107, 19661, 26214)},
{ AOM_CDF5(6554, 13107, 19661, 26214)},
{ AOM_CDF5(6554, 13107, 19661, 26214)},
{ AOM_CDF5(6554, 13107, 19661, 26214)},
{ AOM_CDF5(6554, 13107, 19661, 26214)},
{ AOM_CDF5(6554, 13107, 19661, 26214)},
{ AOM_CDF5(6554, 13107, 19661, 26214)},
{ AOM_CDF5(6554, 13107, 19661, 26214)},
{ AOM_CDF5(6554, 13107, 19661, 26214)},
{ AOM_CDF5(6554, 13107, 19661, 26214)},
{ AOM_CDF5(6554, 13107, 19661, 26214)},
{ AOM_CDF5(6554, 13107, 19661, 26214)},
{ AOM_CDF5(6554, 13107, 19661, 26214)},
},
};
static const u16 default_inter_ext_tx_cdf[2][EXTTX_SIZES][EXT_TX_TYPES] = {
{
{
AOM_CDF16(4458, 5560, 7695, 9709, 13330, 14789, 17537, 20266,
21504, 22848, 23934, 25474, 27727, 28915, 30631)
},
{
AOM_CDF16(1645, 2573, 4778, 5711, 7807, 8622, 10522, 15357, 17674,
20408, 22517, 25010, 27116, 28856, 30749)
},
{
AOM_CDF16(2048, 4096, 6144, 8192, 10240, 12288, 14336, 16384,
18432, 20480, 22528, 24576, 26624, 28672, 30720)
},
{
AOM_CDF16(2048, 4096, 6144, 8192, 10240, 12288, 14336, 16384,
18432, 20480, 22528, 24576, 26624, 28672, 30720)
},
},
{
{
AOM_CDF12(2731, 5461, 8192, 10923, 13653, 16384, 19115, 21845,
24576, 27307, 30037),
AOM_CDF2(16384)
},
{
AOM_CDF12(2731, 5461, 8192, 10923, 13653, 16384, 19115, 21845,
24576, 27307, 30037),
AOM_CDF2(4167)
},
{
AOM_CDF12(770, 2421, 5225, 12907, 15819, 18927, 21561, 24089,
26595, 28526, 30529),
AOM_CDF2(1998)
},
{
AOM_CDF12(2731, 5461, 8192, 10923, 13653, 16384, 19115, 21845,
24576, 27307, 30037),
AOM_CDF2(748)
},
}
};
static const u16 default_cfl_sign_cdf[CDF_SIZE(CFL_JOINT_SIGNS)] = {
AOM_CDF8(1418, 2123, 13340, 18405, 26972, 28343, 32294)
};
static const u16 default_cfl_alpha_cdf[CFL_ALPHA_CONTEXTS][CDF_SIZE(CFL_ALPHABET_SIZE)] = {
{
AOM_CDF16(7637, 20719, 31401, 32481, 32657, 32688, 32692, 32696, 32700,
32704, 32708, 32712, 32716, 32720, 32724)
},
{
AOM_CDF16(14365, 23603, 28135, 31168, 32167, 32395, 32487, 32573,
32620, 32647, 32668, 32672, 32676, 32680, 32684)
},
{
AOM_CDF16(11532, 22380, 28445, 31360, 32349, 32523, 32584, 32649,
32673, 32677, 32681, 32685, 32689, 32693, 32697)
},
{
AOM_CDF16(26990, 31402, 32282, 32571, 32692, 32696, 32700, 32704,
32708, 32712, 32716, 32720, 32724, 32728, 32732)
},
{
AOM_CDF16(17248, 26058, 28904, 30608, 31305, 31877, 32126, 32321,
32394, 32464, 32516, 32560, 32576, 32593, 32622)
},
{
AOM_CDF16(14738, 21678, 25779, 27901, 29024, 30302, 30980, 31843,
32144, 32413, 32520, 32594, 32622, 32656, 32660)
}
};
static const u16 default_switchable_interp_cdf[SWITCHABLE_FILTER_CONTEXTS]
[CDF_SIZE(AV1_SWITCHABLE_FILTERS)] = {
{ AOM_CDF3(31935, 32720) }, { AOM_CDF3(5568, 32719) },
{ AOM_CDF3(422, 2938) }, { AOM_CDF3(28244, 32608) },
{ AOM_CDF3(31206, 31953) }, { AOM_CDF3(4862, 32121) },
{ AOM_CDF3(770, 1152) }, { AOM_CDF3(20889, 25637) },
{ AOM_CDF3(31910, 32724) }, { AOM_CDF3(4120, 32712) },
{ AOM_CDF3(305, 2247) }, { AOM_CDF3(27403, 32636) },
{ AOM_CDF3(31022, 32009) }, { AOM_CDF3(2963, 32093) },
{ AOM_CDF3(601, 943) }, { AOM_CDF3(14969, 21398) }
};
static const u16 default_newmv_cdf[NEWMV_MODE_CONTEXTS][CDF_SIZE(2)] = {
{ AOM_CDF2(24035) }, { AOM_CDF2(16630) }, { AOM_CDF2(15339) },
{ AOM_CDF2(8386) }, { AOM_CDF2(12222) }, { AOM_CDF2(4676) }
};
static const u16 default_zeromv_cdf[GLOBALMV_MODE_CONTEXTS][CDF_SIZE(2)] = {
{ AOM_CDF2(2175) }, { AOM_CDF2(1054) }
};
static const u16 default_refmv_cdf[REFMV_MODE_CONTEXTS][CDF_SIZE(2)] = {
{ AOM_CDF2(23974) }, { AOM_CDF2(24188) }, { AOM_CDF2(17848) },
{ AOM_CDF2(28622) }, { AOM_CDF2(24312) }, { AOM_CDF2(19923) }
};
static const u16 default_drl_cdf[DRL_MODE_CONTEXTS][CDF_SIZE(2)] = {
{ AOM_CDF2(13104) }, { AOM_CDF2(24560) }, { AOM_CDF2(18945) }
};
static const u16 default_inter_compound_mode_cdf[AV1_INTER_MODE_CONTEXTS]
[CDF_SIZE(INTER_COMPOUND_MODES)] = {
{ AOM_CDF8(7760, 13823, 15808, 17641, 19156, 20666, 26891) },
{ AOM_CDF8(10730, 19452, 21145, 22749, 24039, 25131, 28724) },
{ AOM_CDF8(10664, 20221, 21588, 22906, 24295, 25387, 28436) },
{ AOM_CDF8(13298, 16984, 20471, 24182, 25067, 25736, 26422) },
{ AOM_CDF8(18904, 23325, 25242, 27432, 27898, 28258, 30758) },
{ AOM_CDF8(10725, 17454, 20124, 22820, 24195, 25168, 26046) },
{ AOM_CDF8(17125, 24273, 25814, 27492, 28214, 28704, 30592) },
{ AOM_CDF8(13046, 23214, 24505, 25942, 27435, 28442, 29330) }
};
static const u16 default_interintra_cdf[BLOCK_SIZE_GROUPS][CDF_SIZE(2)] = {
{ AOM_CDF2(16384) }, { AOM_CDF2(26887) }, { AOM_CDF2(27597) },
{ AOM_CDF2(30237) }
};
static const u16 default_interintra_mode_cdf[BLOCK_SIZE_GROUPS][CDF_SIZE(INTERINTRA_MODES)] = {
{ AOM_CDF4(8192, 16384, 24576) },
{ AOM_CDF4(1875, 11082, 27332) },
{ AOM_CDF4(2473, 9996, 26388) },
{ AOM_CDF4(4238, 11537, 25926) }
};
static const u16 default_wedge_interintra_cdf[BLOCK_SIZES_ALL][CDF_SIZE(2)] = {
{ AOM_CDF2(16384) }, { AOM_CDF2(16384) }, { AOM_CDF2(16384) },
{ AOM_CDF2(20036) }, { AOM_CDF2(24957) }, { AOM_CDF2(26704) },
{ AOM_CDF2(27530) }, { AOM_CDF2(29564) }, { AOM_CDF2(29444) },
{ AOM_CDF2(26872) }, { AOM_CDF2(16384) }, { AOM_CDF2(16384) },
{ AOM_CDF2(16384) }, { AOM_CDF2(16384) }, { AOM_CDF2(16384) },
{ AOM_CDF2(16384) }, { AOM_CDF2(16384) }, { AOM_CDF2(16384) },
{ AOM_CDF2(16384) }, { AOM_CDF2(16384) }, { AOM_CDF2(16384) },
{ AOM_CDF2(16384) }
};
static const u16 default_compound_type_cdf[BLOCK_SIZES_ALL][CDF_SIZE(COMPOUND_TYPES - 1)] = {
{ AOM_CDF2(16384) }, { AOM_CDF2(16384) }, { AOM_CDF2(16384) },
{ AOM_CDF2(23431) },
{ AOM_CDF2(13171) }, { AOM_CDF2(11470) }, { AOM_CDF2(9770) },
{ AOM_CDF2(9100) },
{ AOM_CDF2(8233) }, { AOM_CDF2(6172) }, { AOM_CDF2(16384) },
{ AOM_CDF2(16384) },
{ AOM_CDF2(16384) }, { AOM_CDF2(16384) }, { AOM_CDF2(16384) },
{ AOM_CDF2(16384) },
{ AOM_CDF2(16384) }, { AOM_CDF2(16384) }, { AOM_CDF2(11820) },
{ AOM_CDF2(7701) },
{ AOM_CDF2(16384) }, { AOM_CDF2(16384) }
};
static const u16 default_wedge_idx_cdf[BLOCK_SIZES_ALL][CDF_SIZE(16)] = {
{
AOM_CDF16(2048, 4096, 6144, 8192, 10240, 12288, 14336, 16384,
18432, 20480, 22528, 24576, 26624, 28672, 30720)
},
{
AOM_CDF16(2048, 4096, 6144, 8192, 10240, 12288, 14336, 16384,
18432, 20480, 22528, 24576, 26624, 28672, 30720)
},
{
AOM_CDF16(2048, 4096, 6144, 8192, 10240, 12288, 14336, 16384,
18432, 20480, 22528, 24576, 26624, 28672, 30720)
},
{
AOM_CDF16(2438, 4440, 6599, 8663, 11005, 12874, 15751, 18094,
20359, 22362, 24127, 25702, 27752, 29450, 31171)
},
{
AOM_CDF16(806, 3266, 6005, 6738, 7218, 7367, 7771, 14588, 16323,
17367, 18452, 19422, 22839, 26127, 29629)
},
{
AOM_CDF16(2779, 3738, 4683, 7213, 7775, 8017, 8655, 14357, 17939,
21332, 24520, 27470, 29456, 30529, 31656)
},
{
AOM_CDF16(1684, 3625, 5675, 7108, 9302, 11274, 14429, 17144, 19163,
20961, 22884, 24471, 26719, 28714, 30877)
},
{
AOM_CDF16(1142, 3491, 6277, 7314, 8089, 8355, 9023, 13624, 15369,
16730, 18114, 19313, 22521, 26012, 29550)
},
{
AOM_CDF16(2742, 4195, 5727, 8035, 8980, 9336, 10146, 14124, 17270,
20533, 23434, 25972, 27944, 29570, 31416)
},
{
AOM_CDF16(1727, 3948, 6101, 7796, 9841, 12344, 15766, 18944, 20638,
22038, 23963, 25311, 26988, 28766, 31012)
},
{
AOM_CDF16(2048, 4096, 6144, 8192, 10240, 12288, 14336, 16384, 18432,
20480, 22528, 24576, 26624, 28672, 30720)
},
{
AOM_CDF16(2048, 4096, 6144, 8192, 10240, 12288, 14336, 16384, 18432,
20480, 22528, 24576, 26624, 28672, 30720)
},
{
AOM_CDF16(2048, 4096, 6144, 8192, 10240, 12288, 14336, 16384, 18432,
20480, 22528, 24576, 26624, 28672, 30720)
},
{
AOM_CDF16(2048, 4096, 6144, 8192, 10240, 12288, 14336, 16384, 18432,
20480, 22528, 24576, 26624, 28672, 30720)
},
{
AOM_CDF16(2048, 4096, 6144, 8192, 10240, 12288, 14336, 16384, 18432,
20480, 22528, 24576, 26624, 28672, 30720)
},
{
AOM_CDF16(2048, 4096, 6144, 8192, 10240, 12288, 14336, 16384, 18432,
20480, 22528, 24576, 26624, 28672, 30720)
},
{
AOM_CDF16(2048, 4096, 6144, 8192, 10240, 12288, 14336, 16384, 18432,
20480, 22528, 24576, 26624, 28672, 30720)
},
{
AOM_CDF16(2048, 4096, 6144, 8192, 10240, 12288, 14336, 16384, 18432,
20480, 22528, 24576, 26624, 28672, 30720)
},
{
AOM_CDF16(154, 987, 1925, 2051, 2088, 2111, 2151, 23033, 23703, 24284,
24985, 25684, 27259, 28883, 30911)
},
{
AOM_CDF16(1135, 1322, 1493, 2635, 2696, 2737, 2770, 21016, 22935,
25057, 27251, 29173, 30089, 30960, 31933)
},
{
AOM_CDF16(2048, 4096, 6144, 8192, 10240, 12288, 14336, 16384, 18432,
20480, 22528, 24576, 26624, 28672, 30720)
},
{
AOM_CDF16(2048, 4096, 6144, 8192, 10240, 12288, 14336, 16384, 18432,
20480, 22528, 24576, 26624, 28672, 30720)
}
};
static const u16 default_motion_mode_cdf[BLOCK_SIZES_ALL][CDF_SIZE(MOTION_MODES)] = {
{ AOM_CDF3(10923, 21845) }, { AOM_CDF3(10923, 21845) },
{ AOM_CDF3(10923, 21845) }, { AOM_CDF3(7651, 24760) },
{ AOM_CDF3(4738, 24765) }, { AOM_CDF3(5391, 25528) },
{ AOM_CDF3(19419, 26810) }, { AOM_CDF3(5123, 23606) },
{ AOM_CDF3(11606, 24308) }, { AOM_CDF3(26260, 29116) },
{ AOM_CDF3(20360, 28062) }, { AOM_CDF3(21679, 26830) },
{ AOM_CDF3(29516, 30701) }, { AOM_CDF3(28898, 30397) },
{ AOM_CDF3(30878, 31335) }, { AOM_CDF3(32507, 32558) },
{ AOM_CDF3(10923, 21845) }, { AOM_CDF3(10923, 21845) },
{ AOM_CDF3(28799, 31390) }, { AOM_CDF3(26431, 30774) },
{ AOM_CDF3(28973, 31594) }, { AOM_CDF3(29742, 31203) }
};
static const u16 default_obmc_cdf[BLOCK_SIZES_ALL][CDF_SIZE(2)] = {
{ AOM_CDF2(16384) }, { AOM_CDF2(16384) }, { AOM_CDF2(16384) },
{ AOM_CDF2(10437) },
{ AOM_CDF2(9371) }, { AOM_CDF2(9301) }, { AOM_CDF2(17432) },
{ AOM_CDF2(14423) },
{ AOM_CDF2(15142) }, { AOM_CDF2(25817) }, { AOM_CDF2(22823) },
{ AOM_CDF2(22083) },
{ AOM_CDF2(30128) }, { AOM_CDF2(31014) }, { AOM_CDF2(31560) },
{ AOM_CDF2(32638) },
{ AOM_CDF2(16384) }, { AOM_CDF2(16384) }, { AOM_CDF2(23664) },
{ AOM_CDF2(20901) },
{ AOM_CDF2(24008) }, { AOM_CDF2(26879) }
};
static const u16 default_intra_inter_cdf[INTRA_INTER_CONTEXTS][CDF_SIZE(2)] = {
{ AOM_CDF2(806) },
{ AOM_CDF2(16662) },
{ AOM_CDF2(20186) },
{ AOM_CDF2(26538) }
};
static const u16 default_comp_inter_cdf[COMP_INTER_CONTEXTS][CDF_SIZE(2)] = {
{ AOM_CDF2(26828) },
{ AOM_CDF2(24035) },
{ AOM_CDF2(12031) },
{ AOM_CDF2(10640) },
{ AOM_CDF2(2901) }
};
static const u16 default_comp_ref_type_cdf[COMP_REF_TYPE_CONTEXTS][CDF_SIZE(2)] = {
{ AOM_CDF2(1198) },
{ AOM_CDF2(2070) },
{ AOM_CDF2(9166) },
{ AOM_CDF2(7499) },
{ AOM_CDF2(22475) }
};
static const u16 default_uni_comp_ref_cdf[UNI_COMP_REF_CONTEXTS]
[UNIDIR_COMP_REFS - 1][CDF_SIZE(2)] = {
{ { AOM_CDF2(5284)}, { AOM_CDF2(3865)}, { AOM_CDF2(3128)} },
{ { AOM_CDF2(23152)}, { AOM_CDF2(14173)}, { AOM_CDF2(15270)} },
{ { AOM_CDF2(31774)}, { AOM_CDF2(25120)}, { AOM_CDF2(26710)} }
};
static const u16 default_single_ref_cdf[REF_CONTEXTS][SINGLE_REFS - 1][CDF_SIZE(2)] = {
{
{ AOM_CDF2(4897)},
{ AOM_CDF2(1555)},
{ AOM_CDF2(4236)},
{ AOM_CDF2(8650)},
{ AOM_CDF2(904)},
{ AOM_CDF2(1444)}
},
{
{ AOM_CDF2(16973)},
{ AOM_CDF2(16751)},
{ AOM_CDF2(19647)},
{ AOM_CDF2(24773)},
{ AOM_CDF2(11014)},
{ AOM_CDF2(15087)}
},
{
{ AOM_CDF2(29744)},
{ AOM_CDF2(30279)},
{ AOM_CDF2(31194)},
{ AOM_CDF2(31895)},
{ AOM_CDF2(26875)},
{ AOM_CDF2(30304)}
}
};
static const u16 default_comp_ref_cdf[REF_CONTEXTS][FWD_REFS - 1][CDF_SIZE(2)] = {
{ { AOM_CDF2(4946)}, { AOM_CDF2(9468)}, { AOM_CDF2(1503)} },
{ { AOM_CDF2(19891)}, { AOM_CDF2(22441)}, { AOM_CDF2(15160)} },
{ { AOM_CDF2(30731)}, { AOM_CDF2(31059)}, { AOM_CDF2(27544)} }
};
static const u16 default_comp_bwdref_cdf[REF_CONTEXTS][BWD_REFS - 1][CDF_SIZE(2)] = {
{ { AOM_CDF2(2235)}, { AOM_CDF2(1423)} },
{ { AOM_CDF2(17182)}, { AOM_CDF2(15175)} },
{ { AOM_CDF2(30606)}, { AOM_CDF2(30489)} }
};
static const u16 default_palette_y_size_cdf[PALETTE_BLOCK_SIZES][CDF_SIZE(PALETTE_SIZES)] = {
{ AOM_CDF7(7952, 13000, 18149, 21478, 25527, 29241) },
{ AOM_CDF7(7139, 11421, 16195, 19544, 23666, 28073) },
{ AOM_CDF7(7788, 12741, 17325, 20500, 24315, 28530) },
{ AOM_CDF7(8271, 14064, 18246, 21564, 25071, 28533) },
{ AOM_CDF7(12725, 19180, 21863, 24839, 27535, 30120) },
{ AOM_CDF7(9711, 14888, 16923, 21052, 25661, 27875) },
{ AOM_CDF7(14940, 20797, 21678, 24186, 27033, 28999) }
};
static const u16 default_palette_uv_size_cdf[PALETTE_BLOCK_SIZES][CDF_SIZE(PALETTE_SIZES)] = {
{ AOM_CDF7(8713, 19979, 27128, 29609, 31331, 32272) },
{ AOM_CDF7(5839, 15573, 23581, 26947, 29848, 31700) },
{ AOM_CDF7(4426, 11260, 17999, 21483, 25863, 29430) },
{ AOM_CDF7(3228, 9464, 14993, 18089, 22523, 27420) },
{ AOM_CDF7(3768, 8886, 13091, 17852, 22495, 27207) },
{ AOM_CDF7(2464, 8451, 12861, 21632, 25525, 28555) },
{ AOM_CDF7(1269, 5435, 10433, 18963, 21700, 25865) }
};
static const u16 default_palette_y_mode_cdf[PALETTE_BLOCK_SIZES]
[PALETTE_Y_MODE_CONTEXTS][CDF_SIZE(2)] = {
{ { AOM_CDF2(31676)}, { AOM_CDF2(3419)}, { AOM_CDF2(1261)} },
{ { AOM_CDF2(31912)}, { AOM_CDF2(2859)}, { AOM_CDF2(980)} },
{ { AOM_CDF2(31823)}, { AOM_CDF2(3400)}, { AOM_CDF2(781)} },
{ { AOM_CDF2(32030)}, { AOM_CDF2(3561)}, { AOM_CDF2(904)} },
{ { AOM_CDF2(32309)}, { AOM_CDF2(7337)}, { AOM_CDF2(1462)} },
{ { AOM_CDF2(32265)}, { AOM_CDF2(4015)}, { AOM_CDF2(1521)} },
{ { AOM_CDF2(32450)}, { AOM_CDF2(7946)}, { AOM_CDF2(129)} }
};
static const u16 default_palette_uv_mode_cdf[PALETTE_UV_MODE_CONTEXTS][CDF_SIZE(2)] = {
{ AOM_CDF2(32461) }, { AOM_CDF2(21488) }
};
static const u16 default_palette_y_color_index_cdf[PALETTE_IDX_CONTEXTS][8] = {
// Palette sizes 2 & 8
{
AOM_CDF2(28710),
AOM_CDF8(21689, 23883, 25163, 26352, 27506, 28827, 30195)
},
{
AOM_CDF2(16384),
AOM_CDF8(6892, 15385, 17840, 21606, 24287, 26753, 29204)
},
{
AOM_CDF2(10553),
AOM_CDF8(5651, 23182, 25042, 26518, 27982, 29392, 30900)
},
{
AOM_CDF2(27036),
AOM_CDF8(19349, 22578, 24418, 25994, 27524, 29031, 30448)
},
{
AOM_CDF2(31603),
AOM_CDF8(31028, 31270, 31504, 31705, 31927, 32153, 32392)
},
// Palette sizes 3 & 7
{
AOM_CDF3(27877, 30490),
AOM_CDF7(23105, 25199, 26464, 27684, 28931, 30318)
},
{
AOM_CDF3(11532, 25697),
AOM_CDF7(6950, 15447, 18952, 22681, 25567, 28563)
},
{
AOM_CDF3(6544, 30234),
AOM_CDF7(7560, 23474, 25490, 27203, 28921, 30708)
},
{
AOM_CDF3(23018, 28072),
AOM_CDF7(18544, 22373, 24457, 26195, 28119, 30045)
},
{
AOM_CDF3(31915, 32385),
AOM_CDF7(31198, 31451, 31670, 31882, 32123, 32391)
},
// Palette sizes 4 & 6
{
AOM_CDF4(25572, 28046, 30045),
AOM_CDF6(23132, 25407, 26970, 28435, 30073)
},
{
AOM_CDF4(9478, 21590, 27256),
AOM_CDF6(7443, 17242, 20717, 24762, 27982)
},
{
AOM_CDF4(7248, 26837, 29824),
AOM_CDF6(6300, 24862, 26944, 28784, 30671)
},
{
AOM_CDF4(19167, 24486, 28349),
AOM_CDF6(18916, 22895, 25267, 27435, 29652)
},
{
AOM_CDF4(31400, 31825, 32250),
AOM_CDF6(31270, 31550, 31808, 32059, 32353)
},
// Palette size 5
{
AOM_CDF5(24779, 26955, 28576, 30282),
AOM_CDF5(8669, 20364, 24073, 28093)
},
{
AOM_CDF5(4255, 27565, 29377, 31067),
AOM_CDF5(19864, 23674, 26716, 29530)
},
{
AOM_CDF5(31646, 31893, 32147, 32426),
0, 0, 0, 0
}
};
static const u16 default_palette_uv_color_index_cdf[PALETTE_IDX_CONTEXTS][8] = {
// Palette sizes 2 & 8
{
AOM_CDF2(29089),
AOM_CDF8(21442, 23288, 24758, 26246, 27649, 28980, 30563)
},
{
AOM_CDF2(16384),
AOM_CDF8(5863, 14933, 17552, 20668, 23683, 26411, 29273)
},
{
AOM_CDF2(8713),
AOM_CDF8(3415, 25810, 26877, 27990, 29223, 30394, 31618)
},
{
AOM_CDF2(29257),
AOM_CDF8(17965, 20084, 22232, 23974, 26274, 28402, 30390)
},
{
AOM_CDF2(31610),
AOM_CDF8(31190, 31329, 31516, 31679, 31825, 32026, 32322)
},
// Palette sizes 3 & 7
{
AOM_CDF3(25257, 29145),
AOM_CDF7(21239, 23168, 25044, 26962, 28705, 30506)
},
{
AOM_CDF3(12287, 27293),
AOM_CDF7(6545, 15012, 18004, 21817, 25503, 28701)
},
{
AOM_CDF3(7033, 27960),
AOM_CDF7(3448, 26295, 27437, 28704, 30126, 31442)
},
{
AOM_CDF3(20145, 25405),
AOM_CDF7(15889, 18323, 21704, 24698, 26976, 29690)
},
{
AOM_CDF3(30608, 31639),
AOM_CDF7(30988, 31204, 31479, 31734, 31983, 32325)
},
// Palette sizes 4 & 6
{
AOM_CDF4(24210, 27175, 29903),
AOM_CDF6(22217, 24567, 26637, 28683, 30548)
},
{
AOM_CDF4(9888, 22386, 27214),
AOM_CDF6(7307, 16406, 19636, 24632, 28424)
},
{
AOM_CDF4(5901, 26053, 29293),
AOM_CDF6(4441, 25064, 26879, 28942, 30919)
},
{
AOM_CDF4(18318, 22152, 28333),
AOM_CDF6(17210, 20528, 23319, 26750, 29582)
},
{
AOM_CDF4(30459, 31136, 31926),
AOM_CDF6(30674, 30953, 31396, 31735, 32207)
},
// Palette size 5
{
AOM_CDF5(22980, 25479, 27781, 29986),
AOM_CDF5(8413, 21408, 24859, 28874)
},
{
AOM_CDF5(2257, 29449, 30594, 31598),
AOM_CDF5(19189, 21202, 25915, 28620)
},
{
AOM_CDF5(31844, 32044, 32281, 32518),
0, 0, 0, 0
}
};
static const u16 default_txfm_partition_cdf[TXFM_PARTITION_CONTEXTS][CDF_SIZE(2)] = {
{ AOM_CDF2(28581) }, { AOM_CDF2(23846) }, { AOM_CDF2(20847) },
{ AOM_CDF2(24315) }, { AOM_CDF2(18196) }, { AOM_CDF2(12133) },
{ AOM_CDF2(18791) }, { AOM_CDF2(10887) }, { AOM_CDF2(11005) },
{ AOM_CDF2(27179) }, { AOM_CDF2(20004) }, { AOM_CDF2(11281) },
{ AOM_CDF2(26549) }, { AOM_CDF2(19308) }, { AOM_CDF2(14224) },
{ AOM_CDF2(28015) }, { AOM_CDF2(21546) }, { AOM_CDF2(14400) },
{ AOM_CDF2(28165) }, { AOM_CDF2(22401) }, { AOM_CDF2(16088) }
};
static const u16 default_skip_cdfs[SKIP_CONTEXTS][CDF_SIZE(2)] = {
{ AOM_CDF2(31671) }, { AOM_CDF2(16515) }, { AOM_CDF2(4576) }
};
static const u16 default_skip_mode_cdfs[SKIP_MODE_CONTEXTS][CDF_SIZE(2)] = {
{ AOM_CDF2(32621) }, { AOM_CDF2(20708) }, { AOM_CDF2(8127) }
};
static const u16 default_compound_idx_cdfs[COMP_INDEX_CONTEXTS][CDF_SIZE(2)] = {
{ AOM_CDF2(18244) }, { AOM_CDF2(12865) }, { AOM_CDF2(7053) },
{ AOM_CDF2(13259) }, { AOM_CDF2(9334) }, { AOM_CDF2(4644) }
};
static const u16 default_comp_group_idx_cdfs[COMP_GROUP_IDX_CONTEXTS][CDF_SIZE(2)] = {
{ AOM_CDF2(26607) }, { AOM_CDF2(22891) }, { AOM_CDF2(18840) },
{ AOM_CDF2(24594) }, { AOM_CDF2(19934) }, { AOM_CDF2(22674) }
};
static const u16 default_intrabc_cdf[CDF_SIZE(2)] = { AOM_CDF2(30531) };
static const u16 default_filter_intra_mode_cdf[CDF_SIZE(FILTER_INTRA_MODES)] = {
AOM_CDF5(8949, 12776, 17211, 29558)
};
static const u16 default_filter_intra_cdfs[BLOCK_SIZES_ALL][CDF_SIZE(2)] = {
{ AOM_CDF2(4621) }, { AOM_CDF2(6743) }, { AOM_CDF2(5893) }, { AOM_CDF2(7866) },
{ AOM_CDF2(12551) }, { AOM_CDF2(9394) }, { AOM_CDF2(12408) }, { AOM_CDF2(14301) },
{ AOM_CDF2(12756) }, { AOM_CDF2(22343) }, { AOM_CDF2(16384) }, { AOM_CDF2(16384) },
{ AOM_CDF2(16384) }, { AOM_CDF2(16384) }, { AOM_CDF2(16384) }, { AOM_CDF2(16384) },
{ AOM_CDF2(12770) }, { AOM_CDF2(10368) }, { AOM_CDF2(20229) }, { AOM_CDF2(18101) },
{ AOM_CDF2(16384) }, { AOM_CDF2(16384) }
};
static const u16 default_delta_q_cdf[CDF_SIZE(DELTA_Q_PROBS + 1)] = {
AOM_CDF4(28160, 32120, 32677)
};
static const u16 default_delta_lf_multi_cdf[FRAME_LF_COUNT][CDF_SIZE(DELTA_LF_PROBS + 1)] = {
{ AOM_CDF4(28160, 32120, 32677) },
{ AOM_CDF4(28160, 32120, 32677) },
{ AOM_CDF4(28160, 32120, 32677) },
{ AOM_CDF4(28160, 32120, 32677) }
};
static const u16 default_delta_lf_cdf[CDF_SIZE(DELTA_LF_PROBS + 1)] = {
AOM_CDF4(28160, 32120, 32677)
};
static const u16 default_segment_pred_cdf[SEG_TEMPORAL_PRED_CTXS][CDF_SIZE(2)] = {
{ AOM_CDF2(128 * 128) },
{ AOM_CDF2(128 * 128) },
{ AOM_CDF2(128 * 128) }
};
static const u16 default_spatial_pred_seg_tree_cdf[SPATIAL_PREDICTION_PROBS]
[CDF_SIZE(MAX_SEGMENTS)] = {
{
AOM_CDF8(5622, 7893, 16093, 18233, 27809, 28373, 32533),
},
{
AOM_CDF8(14274, 18230, 22557, 24935, 29980, 30851, 32344),
},
{
AOM_CDF8(27527, 28487, 28723, 28890, 32397, 32647, 32679),
},
};
static const u16 default_tx_size_cdf[MAX_TX_CATS]
[AV1_TX_SIZE_CONTEXTS][CDF_SIZE(MAX_TX_DEPTH + 1)] = {
{
{ AOM_CDF2(19968)},
{ AOM_CDF2(19968)},
{ AOM_CDF2(24320)}
},
{
{ AOM_CDF3(12272, 30172)},
{ AOM_CDF3(12272, 30172)},
{ AOM_CDF3(18677, 30848)}
},
{
{ AOM_CDF3(12986, 15180)},
{ AOM_CDF3(12986, 15180)},
{ AOM_CDF3(24302, 25602)}
},
{
{ AOM_CDF3(5782, 11475)},
{ AOM_CDF3(5782, 11475)},
{ AOM_CDF3(16803, 22759)}
},
};
static const u16 av1_default_dc_sign_cdfs[TOKEN_CDF_Q_CTXS]
[PLANE_TYPES][DC_SIGN_CONTEXTS][CDF_SIZE(2)] = {
{
{
{ AOM_CDF2(128 * 125)},
{ AOM_CDF2(128 * 102)},
{ AOM_CDF2(128 * 147)},
},
{
{ AOM_CDF2(128 * 119)},
{ AOM_CDF2(128 * 101)},
{ AOM_CDF2(128 * 135)},
}
},
{
{
{ AOM_CDF2(128 * 125)},
{ AOM_CDF2(128 * 102)},
{ AOM_CDF2(128 * 147)},
},
{
{ AOM_CDF2(128 * 119)},
{ AOM_CDF2(128 * 101)},
{ AOM_CDF2(128 * 135)},
}
},
{
{
{ AOM_CDF2(128 * 125)},
{ AOM_CDF2(128 * 102)},
{ AOM_CDF2(128 * 147)},
},
{
{ AOM_CDF2(128 * 119)},
{ AOM_CDF2(128 * 101)},
{ AOM_CDF2(128 * 135)},
}
},
{
{
{ AOM_CDF2(128 * 125)},
{ AOM_CDF2(128 * 102)},
{ AOM_CDF2(128 * 147)},
},
{
{ AOM_CDF2(128 * 119)},
{ AOM_CDF2(128 * 101)},
{ AOM_CDF2(128 * 135)},
}
},
};
static const u16 av1_default_txb_skip_cdfs[TOKEN_CDF_Q_CTXS]
[TX_SIZES][TXB_SKIP_CONTEXTS][CDF_SIZE(2)] = {
{
{
{ AOM_CDF2(31849)},
{ AOM_CDF2(5892)},
{ AOM_CDF2(12112)},
{ AOM_CDF2(21935)},
{ AOM_CDF2(20289)},
{ AOM_CDF2(27473)},
{ AOM_CDF2(32487)},
{ AOM_CDF2(7654)},
{ AOM_CDF2(19473)},
{ AOM_CDF2(29984)},
{ AOM_CDF2(9961)},
{ AOM_CDF2(30242)},
{ AOM_CDF2(32117)}
},
{
{ AOM_CDF2(31548)},
{ AOM_CDF2(1549)},
{ AOM_CDF2(10130)},
{ AOM_CDF2(16656)},
{ AOM_CDF2(18591)},
{ AOM_CDF2(26308)},
{ AOM_CDF2(32537)},
{ AOM_CDF2(5403)},
{ AOM_CDF2(18096)},
{ AOM_CDF2(30003)},
{ AOM_CDF2(16384)},
{ AOM_CDF2(16384)},
{ AOM_CDF2(16384)}
},
{
{ AOM_CDF2(29957)},
{ AOM_CDF2(5391)},
{ AOM_CDF2(18039)},
{ AOM_CDF2(23566)},
{ AOM_CDF2(22431)},
{ AOM_CDF2(25822)},
{ AOM_CDF2(32197)},
{ AOM_CDF2(3778)},
{ AOM_CDF2(15336)},
{ AOM_CDF2(28981)},
{ AOM_CDF2(16384)},
{ AOM_CDF2(16384)},
{ AOM_CDF2(16384)}
},
{
{ AOM_CDF2(17920)},
{ AOM_CDF2(1818)},
{ AOM_CDF2(7282)},
{ AOM_CDF2(25273)},
{ AOM_CDF2(10923)},
{ AOM_CDF2(31554)},
{ AOM_CDF2(32624)},
{ AOM_CDF2(1366)},
{ AOM_CDF2(15628)},
{ AOM_CDF2(30462)},
{ AOM_CDF2(146)},
{ AOM_CDF2(5132)},
{ AOM_CDF2(31657)}
},
{
{ AOM_CDF2(6308)},
{ AOM_CDF2(117)},
{ AOM_CDF2(1638)},
{ AOM_CDF2(2161)},
{ AOM_CDF2(16384)},
{ AOM_CDF2(10923)},
{ AOM_CDF2(30247)},
{ AOM_CDF2(16384)},
{ AOM_CDF2(16384)},
{ AOM_CDF2(16384)},
{ AOM_CDF2(16384)},
{ AOM_CDF2(16384)},
{ AOM_CDF2(16384)}
}
},
{
{
{ AOM_CDF2(30371)},
{ AOM_CDF2(7570)},
{ AOM_CDF2(13155)},
{ AOM_CDF2(20751)},
{ AOM_CDF2(20969)},
{ AOM_CDF2(27067)},
{ AOM_CDF2(32013)},
{ AOM_CDF2(5495)},
{ AOM_CDF2(17942)},
{ AOM_CDF2(28280)},
{ AOM_CDF2(16384)},
{ AOM_CDF2(16384)},
{ AOM_CDF2(16384)}
},
{
{ AOM_CDF2(31782)},
{ AOM_CDF2(1836)},
{ AOM_CDF2(10689)},
{ AOM_CDF2(17604)},
{ AOM_CDF2(21622)},
{ AOM_CDF2(27518)},
{ AOM_CDF2(32399)},
{ AOM_CDF2(4419)},
{ AOM_CDF2(16294)},
{ AOM_CDF2(28345)},
{ AOM_CDF2(16384)},
{ AOM_CDF2(16384)},
{ AOM_CDF2(16384)}
},
{
{ AOM_CDF2(31901)},
{ AOM_CDF2(10311)},
{ AOM_CDF2(18047)},
{ AOM_CDF2(24806)},
{ AOM_CDF2(23288)},
{ AOM_CDF2(27914)},
{ AOM_CDF2(32296)},
{ AOM_CDF2(4215)},
{ AOM_CDF2(15756)},
{ AOM_CDF2(28341)},
{ AOM_CDF2(16384)},
{ AOM_CDF2(16384)},
{ AOM_CDF2(16384)}
},
{
{ AOM_CDF2(26726)},
{ AOM_CDF2(1045)},
{ AOM_CDF2(11703)},
{ AOM_CDF2(20590)},
{ AOM_CDF2(18554)},
{ AOM_CDF2(25970)},
{ AOM_CDF2(31938)},
{ AOM_CDF2(5583)},
{ AOM_CDF2(21313)},
{ AOM_CDF2(29390)},
{ AOM_CDF2(641)},
{ AOM_CDF2(22265)},
{ AOM_CDF2(31452)}
},
{
{ AOM_CDF2(26584)},
{ AOM_CDF2(188)},
{ AOM_CDF2(8847)},
{ AOM_CDF2(24519)},
{ AOM_CDF2(22938)},
{ AOM_CDF2(30583)},
{ AOM_CDF2(32608)},
{ AOM_CDF2(16384)},
{ AOM_CDF2(16384)},
{ AOM_CDF2(16384)},
{ AOM_CDF2(16384)},
{ AOM_CDF2(16384)},
{ AOM_CDF2(16384)}
}
},
{
{
{ AOM_CDF2(29614)},
{ AOM_CDF2(9068)},
{ AOM_CDF2(12924)},
{ AOM_CDF2(19538)},
{ AOM_CDF2(17737)},
{ AOM_CDF2(24619)},
{ AOM_CDF2(30642)},
{ AOM_CDF2(4119)},
{ AOM_CDF2(16026)},
{ AOM_CDF2(25657)},
{ AOM_CDF2(16384)},
{ AOM_CDF2(16384)},
{ AOM_CDF2(16384)}
},
{
{ AOM_CDF2(31957)},
{ AOM_CDF2(3230)},
{ AOM_CDF2(11153)},
{ AOM_CDF2(18123)},
{ AOM_CDF2(20143)},
{ AOM_CDF2(26536)},
{ AOM_CDF2(31986)},
{ AOM_CDF2(3050)},
{ AOM_CDF2(14603)},
{ AOM_CDF2(25155)},
{ AOM_CDF2(16384)},
{ AOM_CDF2(16384)},
{ AOM_CDF2(16384)}
},
{
{ AOM_CDF2(32363)},
{ AOM_CDF2(10692)},
{ AOM_CDF2(19090)},
{ AOM_CDF2(24357)},
{ AOM_CDF2(24442)},
{ AOM_CDF2(28312)},
{ AOM_CDF2(32169)},
{ AOM_CDF2(3648)},
{ AOM_CDF2(15690)},
{ AOM_CDF2(26815)},
{ AOM_CDF2(16384)},
{ AOM_CDF2(16384)},
{ AOM_CDF2(16384)}
},
{
{ AOM_CDF2(30669)},
{ AOM_CDF2(3832)},
{ AOM_CDF2(11663)},
{ AOM_CDF2(18889)},
{ AOM_CDF2(19782)},
{ AOM_CDF2(23313)},
{ AOM_CDF2(31330)},
{ AOM_CDF2(5124)},
{ AOM_CDF2(18719)},
{ AOM_CDF2(28468)},
{ AOM_CDF2(3082)},
{ AOM_CDF2(20982)},
{ AOM_CDF2(29443)}
},
{
{ AOM_CDF2(28573)},
{ AOM_CDF2(3183)},
{ AOM_CDF2(17802)},
{ AOM_CDF2(25977)},
{ AOM_CDF2(26677)},
{ AOM_CDF2(27832)},
{ AOM_CDF2(32387)},
{ AOM_CDF2(16384)},
{ AOM_CDF2(16384)},
{ AOM_CDF2(16384)},
{ AOM_CDF2(16384)},
{ AOM_CDF2(16384)},
{ AOM_CDF2(16384)}
}
},
{
{
{ AOM_CDF2(26887)},
{ AOM_CDF2(6729)},
{ AOM_CDF2(10361)},
{ AOM_CDF2(17442)},
{ AOM_CDF2(15045)},
{ AOM_CDF2(22478)},
{ AOM_CDF2(29072)},
{ AOM_CDF2(2713)},
{ AOM_CDF2(11861)},
{ AOM_CDF2(20773)},
{ AOM_CDF2(16384)},
{ AOM_CDF2(16384)},
{ AOM_CDF2(16384)}
},
{
{ AOM_CDF2(31903)},
{ AOM_CDF2(2044)},
{ AOM_CDF2(7528)},
{ AOM_CDF2(14618)},
{ AOM_CDF2(16182)},
{ AOM_CDF2(24168)},
{ AOM_CDF2(31037)},
{ AOM_CDF2(2786)},
{ AOM_CDF2(11194)},
{ AOM_CDF2(20155)},
{ AOM_CDF2(16384)},
{ AOM_CDF2(16384)},
{ AOM_CDF2(16384)}
},
{
{ AOM_CDF2(32510)},
{ AOM_CDF2(8430)},
{ AOM_CDF2(17318)},
{ AOM_CDF2(24154)},
{ AOM_CDF2(23674)},
{ AOM_CDF2(28789)},
{ AOM_CDF2(32139)},
{ AOM_CDF2(3440)},
{ AOM_CDF2(13117)},
{ AOM_CDF2(22702)},
{ AOM_CDF2(16384)},
{ AOM_CDF2(16384)},
{ AOM_CDF2(16384)}
},
{
{ AOM_CDF2(31671)},
{ AOM_CDF2(2056)},
{ AOM_CDF2(11746)},
{ AOM_CDF2(16852)},
{ AOM_CDF2(18635)},
{ AOM_CDF2(24715)},
{ AOM_CDF2(31484)},
{ AOM_CDF2(4656)},
{ AOM_CDF2(16074)},
{ AOM_CDF2(24704)},
{ AOM_CDF2(1806)},
{ AOM_CDF2(14645)},
{ AOM_CDF2(25336)}
},
{
{ AOM_CDF2(31539)},
{ AOM_CDF2(8433)},
{ AOM_CDF2(20576)},
{ AOM_CDF2(27904)},
{ AOM_CDF2(27852)},
{ AOM_CDF2(30026)},
{ AOM_CDF2(32441)},
{ AOM_CDF2(16384)},
{ AOM_CDF2(16384)},
{ AOM_CDF2(16384)},
{ AOM_CDF2(16384)},
{ AOM_CDF2(16384)},
{ AOM_CDF2(16384)}
}
}
};
static const u16 av1_default_eob_extra_cdfs[TOKEN_CDF_Q_CTXS][TX_SIZES][PLANE_TYPES]
[EOB_COEF_CONTEXTS][CDF_SIZE(2)] = {
{
{
{
{ AOM_CDF2(16961)},
{ AOM_CDF2(17223)},
{ AOM_CDF2(7621)},
{ AOM_CDF2(16384)},
{ AOM_CDF2(16384)},
{ AOM_CDF2(16384)},
{ AOM_CDF2(16384)},
{ AOM_CDF2(16384)},
{ AOM_CDF2(16384)},
},
{
{ AOM_CDF2(19069)},
{ AOM_CDF2(22525)},
{ AOM_CDF2(13377)},
{ AOM_CDF2(16384)},
{ AOM_CDF2(16384)},
{ AOM_CDF2(16384)},
{ AOM_CDF2(16384)},
{ AOM_CDF2(16384)},
{ AOM_CDF2(16384)},
}
},
{
{
{ AOM_CDF2(20401)},
{ AOM_CDF2(17025)},
{ AOM_CDF2(12845)},
{ AOM_CDF2(12873)},
{ AOM_CDF2(14094)},
{ AOM_CDF2(16384)},
{ AOM_CDF2(16384)},
{ AOM_CDF2(16384)},
{ AOM_CDF2(16384)},
},
{
{ AOM_CDF2(20681)},
{ AOM_CDF2(20701)},
{ AOM_CDF2(15250)},
{ AOM_CDF2(15017)},
{ AOM_CDF2(14928)},
{ AOM_CDF2(16384)},
{ AOM_CDF2(16384)},
{ AOM_CDF2(16384)},
{ AOM_CDF2(16384)},
}
},
{
{
{ AOM_CDF2(23905)},
{ AOM_CDF2(17194)},
{ AOM_CDF2(16170)},
{ AOM_CDF2(17695)},
{ AOM_CDF2(13826)},
{ AOM_CDF2(15810)},
{ AOM_CDF2(12036)},
{ AOM_CDF2(16384)},
{ AOM_CDF2(16384)},
},
{
{ AOM_CDF2(23959)},
{ AOM_CDF2(20799)},
{ AOM_CDF2(19021)},
{ AOM_CDF2(16203)},
{ AOM_CDF2(17886)},
{ AOM_CDF2(14144)},
{ AOM_CDF2(12010)},
{ AOM_CDF2(16384)},
{ AOM_CDF2(16384)},
}
},
{
{
{ AOM_CDF2(27399)},
{ AOM_CDF2(16327)},
{ AOM_CDF2(18071)},
{ AOM_CDF2(19584)},
{ AOM_CDF2(20721)},
{ AOM_CDF2(18432)},
{ AOM_CDF2(19560)},
{ AOM_CDF2(10150)},
{ AOM_CDF2(8805)},
},
{
{ AOM_CDF2(24932)},
{ AOM_CDF2(20833)},
{ AOM_CDF2(12027)},
{ AOM_CDF2(16670)},
{ AOM_CDF2(19914)},
{ AOM_CDF2(15106)},
{ AOM_CDF2(17662)},
{ AOM_CDF2(13783)},
{ AOM_CDF2(28756)},
}
},
{
{
{ AOM_CDF2(23406)},
{ AOM_CDF2(21845)},
{ AOM_CDF2(18432)},
{ AOM_CDF2(16384)},
{ AOM_CDF2(17096)},
{ AOM_CDF2(12561)},
{ AOM_CDF2(17320)},
{ AOM_CDF2(22395)},
{ AOM_CDF2(21370)},
},
{
{ AOM_CDF2(16384)},
{ AOM_CDF2(16384)},
{ AOM_CDF2(16384)},
{ AOM_CDF2(16384)},
{ AOM_CDF2(16384)},
{ AOM_CDF2(16384)},
{ AOM_CDF2(16384)},
{ AOM_CDF2(16384)},
{ AOM_CDF2(16384)},
}
}
},
{
{
{
{ AOM_CDF2(17471)},
{ AOM_CDF2(20223)},
{ AOM_CDF2(11357)},
{ AOM_CDF2(16384)},
{ AOM_CDF2(16384)},
{ AOM_CDF2(16384)},
{ AOM_CDF2(16384)},
{ AOM_CDF2(16384)},
{ AOM_CDF2(16384)},
},
{
{ AOM_CDF2(20335)},
{ AOM_CDF2(21667)},
{ AOM_CDF2(14818)},
{ AOM_CDF2(16384)},
{ AOM_CDF2(16384)},
{ AOM_CDF2(16384)},
{ AOM_CDF2(16384)},
{ AOM_CDF2(16384)},
{ AOM_CDF2(16384)},
}
},
{
{
{ AOM_CDF2(20430)},
{ AOM_CDF2(20662)},
{ AOM_CDF2(15367)},
{ AOM_CDF2(16970)},
{ AOM_CDF2(14657)},
{ AOM_CDF2(16384)},
{ AOM_CDF2(16384)},
{ AOM_CDF2(16384)},
{ AOM_CDF2(16384)},
},
{
{ AOM_CDF2(22117)},
{ AOM_CDF2(22028)},
{ AOM_CDF2(18650)},
{ AOM_CDF2(16042)},
{ AOM_CDF2(15885)},
{ AOM_CDF2(16384)},
{ AOM_CDF2(16384)},
{ AOM_CDF2(16384)},
{ AOM_CDF2(16384)},
}
},
{
{
{ AOM_CDF2(22409)},
{ AOM_CDF2(21012)},
{ AOM_CDF2(15650)},
{ AOM_CDF2(17395)},
{ AOM_CDF2(15469)},
{ AOM_CDF2(20205)},
{ AOM_CDF2(19511)},
{ AOM_CDF2(16384)},
{ AOM_CDF2(16384)},
},
{
{ AOM_CDF2(24220)},
{ AOM_CDF2(22480)},
{ AOM_CDF2(17737)},
{ AOM_CDF2(18916)},
{ AOM_CDF2(19268)},
{ AOM_CDF2(18412)},
{ AOM_CDF2(18844)},
{ AOM_CDF2(16384)},
{ AOM_CDF2(16384)},
}
},
{
{
{ AOM_CDF2(25991)},
{ AOM_CDF2(20314)},
{ AOM_CDF2(17731)},
{ AOM_CDF2(19678)},
{ AOM_CDF2(18649)},
{ AOM_CDF2(17307)},
{ AOM_CDF2(21798)},
{ AOM_CDF2(17549)},
{ AOM_CDF2(15630)},
},
{
{ AOM_CDF2(26585)},
{ AOM_CDF2(21469)},
{ AOM_CDF2(20432)},
{ AOM_CDF2(17735)},
{ AOM_CDF2(19280)},
{ AOM_CDF2(15235)},
{ AOM_CDF2(20297)},
{ AOM_CDF2(22471)},
{ AOM_CDF2(28997)},
}
},
{
{
{ AOM_CDF2(26605)},
{ AOM_CDF2(11304)},
{ AOM_CDF2(16726)},
{ AOM_CDF2(16560)},
{ AOM_CDF2(20866)},
{ AOM_CDF2(23524)},
{ AOM_CDF2(19878)},
{ AOM_CDF2(13469)},
{ AOM_CDF2(23084)},
},
{
{ AOM_CDF2(16384)},
{ AOM_CDF2(16384)},
{ AOM_CDF2(16384)},
{ AOM_CDF2(16384)},
{ AOM_CDF2(16384)},
{ AOM_CDF2(16384)},
{ AOM_CDF2(16384)},
{ AOM_CDF2(16384)},
{ AOM_CDF2(16384)},
}
}
},
{
{
{
{ AOM_CDF2(18983)},
{ AOM_CDF2(20512)},
{ AOM_CDF2(14885)},
{ AOM_CDF2(16384)},
{ AOM_CDF2(16384)},
{ AOM_CDF2(16384)},
{ AOM_CDF2(16384)},
{ AOM_CDF2(16384)},
{ AOM_CDF2(16384)},
},
{
{ AOM_CDF2(20090)},
{ AOM_CDF2(19444)},
{ AOM_CDF2(17286)},
{ AOM_CDF2(16384)},
{ AOM_CDF2(16384)},
{ AOM_CDF2(16384)},
{ AOM_CDF2(16384)},
{ AOM_CDF2(16384)},
{ AOM_CDF2(16384)},
}
},
{
{
{ AOM_CDF2(19139)},
{ AOM_CDF2(21487)},
{ AOM_CDF2(18959)},
{ AOM_CDF2(20910)},
{ AOM_CDF2(19089)},
{ AOM_CDF2(16384)},
{ AOM_CDF2(16384)},
{ AOM_CDF2(16384)},
{ AOM_CDF2(16384)},
},
{
{ AOM_CDF2(20536)},
{ AOM_CDF2(20664)},
{ AOM_CDF2(20625)},
{ AOM_CDF2(19123)},
{ AOM_CDF2(14862)},
{ AOM_CDF2(16384)},
{ AOM_CDF2(16384)},
{ AOM_CDF2(16384)},
{ AOM_CDF2(16384)},
}
},
{
{
{ AOM_CDF2(19833)},
{ AOM_CDF2(21502)},
{ AOM_CDF2(17485)},
{ AOM_CDF2(20267)},
{ AOM_CDF2(18353)},
{ AOM_CDF2(23329)},
{ AOM_CDF2(21478)},
{ AOM_CDF2(16384)},
{ AOM_CDF2(16384)},
},
{
{ AOM_CDF2(22041)},
{ AOM_CDF2(23434)},
{ AOM_CDF2(20001)},
{ AOM_CDF2(20554)},
{ AOM_CDF2(20951)},
{ AOM_CDF2(20145)},
{ AOM_CDF2(15562)},
{ AOM_CDF2(16384)},
{ AOM_CDF2(16384)},
}
},
{
{
{ AOM_CDF2(23312)},
{ AOM_CDF2(21607)},
{ AOM_CDF2(16526)},
{ AOM_CDF2(18957)},
{ AOM_CDF2(18034)},
{ AOM_CDF2(18934)},
{ AOM_CDF2(24247)},
{ AOM_CDF2(16921)},
{ AOM_CDF2(17080)},
},
{
{ AOM_CDF2(26579)},
{ AOM_CDF2(24910)},
{ AOM_CDF2(18637)},
{ AOM_CDF2(19800)},
{ AOM_CDF2(20388)},
{ AOM_CDF2(9887)},
{ AOM_CDF2(15642)},
{ AOM_CDF2(30198)},
{ AOM_CDF2(24721)},
}
},
{
{
{ AOM_CDF2(26998)},
{ AOM_CDF2(16737)},
{ AOM_CDF2(17838)},
{ AOM_CDF2(18922)},
{ AOM_CDF2(19515)},
{ AOM_CDF2(18636)},
{ AOM_CDF2(17333)},
{ AOM_CDF2(15776)},
{ AOM_CDF2(22658)},
},
{
{ AOM_CDF2(16384)},
{ AOM_CDF2(16384)},
{ AOM_CDF2(16384)},
{ AOM_CDF2(16384)},
{ AOM_CDF2(16384)},
{ AOM_CDF2(16384)},
{ AOM_CDF2(16384)},
{ AOM_CDF2(16384)},
{ AOM_CDF2(16384)},
}
}
},
{
{
{
{ AOM_CDF2(20177)},
{ AOM_CDF2(20789)},
{ AOM_CDF2(20262)},
{ AOM_CDF2(16384)},
{ AOM_CDF2(16384)},
{ AOM_CDF2(16384)},
{ AOM_CDF2(16384)},
{ AOM_CDF2(16384)},
{ AOM_CDF2(16384)},
},
{
{ AOM_CDF2(21416)},
{ AOM_CDF2(20855)},
{ AOM_CDF2(23410)},
{ AOM_CDF2(16384)},
{ AOM_CDF2(16384)},
{ AOM_CDF2(16384)},
{ AOM_CDF2(16384)},
{ AOM_CDF2(16384)},
{ AOM_CDF2(16384)},
}
},
{
{
{ AOM_CDF2(20238)},
{ AOM_CDF2(21057)},
{ AOM_CDF2(19159)},
{ AOM_CDF2(22337)},
{ AOM_CDF2(20159)},
{ AOM_CDF2(16384)},
{ AOM_CDF2(16384)},
{ AOM_CDF2(16384)},
{ AOM_CDF2(16384)},
},
{
{ AOM_CDF2(20125)},
{ AOM_CDF2(20559)},
{ AOM_CDF2(21707)},
{ AOM_CDF2(22296)},
{ AOM_CDF2(17333)},
{ AOM_CDF2(16384)},
{ AOM_CDF2(16384)},
{ AOM_CDF2(16384)},
{ AOM_CDF2(16384)},
}
},
{
{
{ AOM_CDF2(19941)},
{ AOM_CDF2(20527)},
{ AOM_CDF2(21470)},
{ AOM_CDF2(22487)},
{ AOM_CDF2(19558)},
{ AOM_CDF2(22354)},
{ AOM_CDF2(20331)},
{ AOM_CDF2(16384)},
{ AOM_CDF2(16384)},
},
{
{ AOM_CDF2(22752)},
{ AOM_CDF2(25006)},
{ AOM_CDF2(22075)},
{ AOM_CDF2(21576)},
{ AOM_CDF2(17740)},
{ AOM_CDF2(21690)},
{ AOM_CDF2(19211)},
{ AOM_CDF2(16384)},
{ AOM_CDF2(16384)},
}
},
{
{
{ AOM_CDF2(21442)},
{ AOM_CDF2(22358)},
{ AOM_CDF2(18503)},
{ AOM_CDF2(20291)},
{ AOM_CDF2(19945)},
{ AOM_CDF2(21294)},
{ AOM_CDF2(21178)},
{ AOM_CDF2(19400)},
{ AOM_CDF2(10556)},
},
{
{ AOM_CDF2(24648)},
{ AOM_CDF2(24949)},
{ AOM_CDF2(20708)},
{ AOM_CDF2(23905)},
{ AOM_CDF2(20501)},
{ AOM_CDF2(9558)},
{ AOM_CDF2(9423)},
{ AOM_CDF2(30365)},
{ AOM_CDF2(19253)},
}
},
{
{
{ AOM_CDF2(26064)},
{ AOM_CDF2(22098)},
{ AOM_CDF2(19613)},
{ AOM_CDF2(20525)},
{ AOM_CDF2(17595)},
{ AOM_CDF2(16618)},
{ AOM_CDF2(20497)},
{ AOM_CDF2(18989)},
{ AOM_CDF2(15513)},
},
{
{ AOM_CDF2(16384)},
{ AOM_CDF2(16384)},
{ AOM_CDF2(16384)},
{ AOM_CDF2(16384)},
{ AOM_CDF2(16384)},
{ AOM_CDF2(16384)},
{ AOM_CDF2(16384)},
{ AOM_CDF2(16384)},
{ AOM_CDF2(16384)},
}
}
}
};
static const u16 av1_default_eob_multi16_cdfs[TOKEN_CDF_Q_CTXS][PLANE_TYPES][2][4] = {
{
{
{ AOM_CDF5(840, 1039, 1980, 4895)},
{ AOM_CDF5(370, 671, 1883, 4471)}
},
{
{ AOM_CDF5(3247, 4950, 9688, 14563)},
{ AOM_CDF5(1904, 3354, 7763, 14647)}
}
},
{
{
{ AOM_CDF5(2125, 2551, 5165, 8946)},
{ AOM_CDF5(513, 765, 1859, 6339)}
},
{
{ AOM_CDF5(7637, 9498, 14259, 19108)},
{ AOM_CDF5(2497, 4096, 8866, 16993)}
}
},
{
{
{ AOM_CDF5(4016, 4897, 8881, 14968)},
{ AOM_CDF5(716, 1105, 2646, 10056)}
},
{
{ AOM_CDF5(11139, 13270, 18241, 23566)},
{ AOM_CDF5(3192, 5032, 10297, 19755)}
}
},
{
{
{ AOM_CDF5(6708, 8958, 14746, 22133)},
{ AOM_CDF5(1222, 2074, 4783, 15410)}
},
{
{ AOM_CDF5(19575, 21766, 26044, 29709)},
{ AOM_CDF5(7297, 10767, 19273, 28194)}
}
}
};
static const u16 av1_default_eob_multi32_cdfs[TOKEN_CDF_Q_CTXS][PLANE_TYPES][2][8] = {
{
{
{ AOM_CDF6(400, 520, 977, 2102, 6542)},
{ AOM_CDF6(210, 405, 1315, 3326, 7537)}
},
{
{ AOM_CDF6(2636, 4273, 7588, 11794, 20401)},
{ AOM_CDF6(1786, 3179, 6902, 11357, 19054)}
}
},
{
{
{ AOM_CDF6(989, 1249, 2019, 4151, 10785)},
{ AOM_CDF6(313, 441, 1099, 2917, 8562)}
},
{
{ AOM_CDF6(8394, 10352, 13932, 18855, 26014)},
{ AOM_CDF6(2578, 4124, 8181, 13670, 24234)}
}
},
{
{
{ AOM_CDF6(2515, 3003, 4452, 8162, 16041)},
{ AOM_CDF6(574, 821, 1836, 5089, 13128)}
},
{
{ AOM_CDF6(13468, 16303, 20361, 25105, 29281)},
{ AOM_CDF6(3542, 5502, 10415, 16760, 25644)}
}
},
{
{
{ AOM_CDF6(4617, 5709, 8446, 13584, 23135)},
{ AOM_CDF6(1156, 1702, 3675, 9274, 20539)}
},
{
{ AOM_CDF6(22086, 24282, 27010, 29770, 31743)},
{ AOM_CDF6(7699, 10897, 20891, 26926, 31628)}
}
}
};
static const u16 av1_default_eob_multi64_cdfs[TOKEN_CDF_Q_CTXS][PLANE_TYPES][2][8] = {
{
{
{ AOM_CDF7(329, 498, 1101, 1784, 3265, 7758)},
{ AOM_CDF7(335, 730, 1459, 5494, 8755, 12997)}
},
{
{ AOM_CDF7(3505, 5304, 10086, 13814, 17684, 23370)},
{ AOM_CDF7(1563, 2700, 4876, 10911, 14706, 22480)}
}
},
{
{
{ AOM_CDF7(1260, 1446, 2253, 3712, 6652, 13369)},
{ AOM_CDF7(401, 605, 1029, 2563, 5845, 12626)}
},
{
{ AOM_CDF7(8609, 10612, 14624, 18714, 22614, 29024)},
{ AOM_CDF7(1923, 3127, 5867, 9703, 14277, 27100)}
}
},
{
{
{ AOM_CDF7(2374, 2772, 4583, 7276, 12288, 19706)},
{ AOM_CDF7(497, 810, 1315, 3000, 7004, 15641)}
},
{
{ AOM_CDF7(15050, 17126, 21410, 24886, 28156, 30726)},
{ AOM_CDF7(4034, 6290, 10235, 14982, 21214, 28491)}
}
},
{
{
{ AOM_CDF7(6307, 7541, 12060, 16358, 22553, 27865)},
{ AOM_CDF7(1289, 2320, 3971, 7926, 14153, 24291)}
},
{
{ AOM_CDF7(24212, 25708, 28268, 30035, 31307, 32049)},
{ AOM_CDF7(8726, 12378, 19409, 26450, 30038, 32462)}
}
}
};
static const u16 av1_default_eob_multi128_cdfs[TOKEN_CDF_Q_CTXS][PLANE_TYPES][2][8] = {
{
{
{ AOM_CDF8(219, 482, 1140, 2091, 3680, 6028, 12586)},
{ AOM_CDF8(371, 699, 1254, 4830, 9479, 12562, 17497)}
},
{
{ AOM_CDF8(5245, 7456, 12880, 15852, 20033, 23932, 27608)},
{ AOM_CDF8(2054, 3472, 5869, 14232, 18242, 20590, 26752)}
}
},
{
{
{ AOM_CDF8(685, 933, 1488, 2714, 4766, 8562, 19254)},
{ AOM_CDF8(217, 352, 618, 2303, 5261, 9969, 17472)}
},
{
{ AOM_CDF8(8045, 11200, 15497, 19595, 23948, 27408, 30938)},
{ AOM_CDF8(2310, 4160, 7471, 14997, 17931, 20768, 30240)}
}
},
{
{
{ AOM_CDF8(1366, 1738, 2527, 5016, 9355, 15797, 24643)},
{ AOM_CDF8(354, 558, 944, 2760, 7287, 14037, 21779)}
},
{
{ AOM_CDF8(13627, 16246, 20173, 24429, 27948, 30415, 31863)},
{ AOM_CDF8(6275, 9889, 14769, 23164, 27988, 30493, 32272)}
}
},
{
{
{ AOM_CDF8(3472, 4885, 7489, 12481, 18517, 24536, 29635)},
{ AOM_CDF8(886, 1731, 3271, 8469, 15569, 22126, 28383)}
},
{
{ AOM_CDF8(24313, 26062, 28385, 30107, 31217, 31898, 32345)},
{ AOM_CDF8(9165, 13282, 21150, 30286, 31894, 32571, 32712)}
}
}
};
static const u16 av1_default_eob_multi256_cdfs[TOKEN_CDF_Q_CTXS][PLANE_TYPES][2][8] = {
{
{
{ AOM_CDF9(310, 584, 1887, 3589, 6168, 8611, 11352, 15652)},
{ AOM_CDF9(998, 1850, 2998, 5604, 17341, 19888, 22899, 25583)}
},
{
{ AOM_CDF9(2520, 3240, 5952, 8870, 12577, 17558, 19954, 24168)},
{ AOM_CDF9(2203, 4130, 7435, 10739, 20652, 23681, 25609, 27261)}
}
},
{
{
{ AOM_CDF9(1448, 2109, 4151, 6263, 9329, 13260, 17944, 23300)},
{ AOM_CDF9(399, 1019, 1749, 3038, 10444, 15546, 22739, 27294)}
},
{
{ AOM_CDF9(6402, 8148, 12623, 15072, 18728, 22847, 26447, 29377)},
{ AOM_CDF9(1674, 3252, 5734, 10159, 22397, 23802, 24821, 30940)}
}
},
{
{
{ AOM_CDF9(3089, 3920, 6038, 9460, 14266, 19881, 25766, 29176)},
{ AOM_CDF9(1084, 2358, 3488, 5122, 11483, 18103, 26023, 29799)}
},
{
{ AOM_CDF9(11514, 13794, 17480, 20754, 24361, 27378, 29492, 31277)},
{ AOM_CDF9(6571, 9610, 15516, 21826, 29092, 30829, 31842, 32708)}
}
},
{
{
{ AOM_CDF9(5348, 7113, 11820, 15924, 22106, 26777, 30334, 31757)},
{ AOM_CDF9(2453, 4474, 6307, 8777, 16474, 22975, 29000, 31547)}
},
{
{ AOM_CDF9(23110, 24597, 27140, 28894, 30167, 30927, 31392, 32094)},
{ AOM_CDF9(9998, 17661, 25178, 28097, 31308, 32038, 32403, 32695)}
}
}
};
static const u16 av1_default_eob_multi512_cdfs[TOKEN_CDF_Q_CTXS][PLANE_TYPES][2][16] = {
{
{
{ AOM_CDF10(641, 983, 3707, 5430, 10234, 14958, 18788, 23412, 26061)},
{ AOM_CDF10(3277, 6554, 9830, 13107, 16384, 19661, 22938, 26214, 29491)}
},
{
{ AOM_CDF10(5095, 6446, 9996, 13354, 16017, 17986, 20919, 26129, 29140)},
{ AOM_CDF10(3277, 6554, 9830, 13107, 16384, 19661, 22938, 26214, 29491)}
}
},
{
{
{ AOM_CDF10(1230, 2278, 5035, 7776, 11871, 15346, 19590, 24584, 28749)},
{ AOM_CDF10(3277, 6554, 9830, 13107, 16384, 19661, 22938, 26214, 29491)}
},
{
{ AOM_CDF10(7265, 9979, 15819, 19250, 21780, 23846, 26478, 28396, 31811)},
{ AOM_CDF10(3277, 6554, 9830, 13107, 16384, 19661, 22938, 26214, 29491)}
}
},
{
{
{ AOM_CDF10(2624, 3936, 6480, 9686, 13979, 17726, 23267, 28410, 31078)},
{ AOM_CDF10(3277, 6554, 9830, 13107, 16384, 19661, 22938, 26214, 29491)}
},
{
{ AOM_CDF10(12015, 14769, 19588, 22052, 24222, 25812, 27300, 29219, 32114)},
{ AOM_CDF10(3277, 6554, 9830, 13107, 16384, 19661, 22938, 26214, 29491)}
}
},
{
{
{ AOM_CDF10(5927, 7809, 10923, 14597, 19439, 24135, 28456, 31142, 32060)},
{ AOM_CDF10(3277, 6554, 9830, 13107, 16384, 19661, 22938, 26214, 29491)}
},
{
{ AOM_CDF10(21093, 23043, 25742, 27658, 29097, 29716, 30073, 30820, 31956)},
{ AOM_CDF10(3277, 6554, 9830, 13107, 16384, 19661, 22938, 26214, 29491)}
}
}
};
static const u16 av1_default_eob_multi1024_cdfs[TOKEN_CDF_Q_CTXS][PLANE_TYPES][2][16] = {
{
{
{ AOM_CDF11(393, 421, 751, 1623, 3160,
6352, 13345, 18047, 22571, 25830)},
{ AOM_CDF11(2979, 5958, 8937, 11916, 14895,
17873, 20852, 23831, 26810, 29789)}
},
{
{ AOM_CDF11(1865, 1988, 2930, 4242, 10533,
16538, 21354, 27255, 28546, 31784)},
{ AOM_CDF11(2979, 5958, 8937, 11916, 14895,
17873, 20852, 23831, 26810, 29789)}
}
},
{
{
{ AOM_CDF11(696, 948, 3145, 5702, 9706,
13217, 17851, 21856, 25692, 28034)},
{ AOM_CDF11(2979, 5958, 8937, 11916, 14895,
17873, 20852, 23831, 26810, 29789)}
},
{
{ AOM_CDF11(2672, 3591, 9330, 17084, 22725,
24284, 26527, 28027, 28377, 30876)},
{ AOM_CDF11(2979, 5958, 8937, 11916, 14895,
17873, 20852, 23831, 26810, 29789)}
}
},
{
{
{ AOM_CDF11(2784, 3831, 7041, 10521, 14847,
18844, 23155, 26682, 29229, 31045)},
{ AOM_CDF11(2979, 5958, 8937, 11916, 14895,
17873, 20852, 23831, 26810, 29789)}
},
{
{ AOM_CDF11(9577, 12466, 17739, 20750, 22061,
23215, 24601, 25483, 25843, 32056)},
{ AOM_CDF11(2979, 5958, 8937, 11916, 14895,
17873, 20852, 23831, 26810, 29789)}
}
},
{
{
{ AOM_CDF11(6698, 8334, 11961, 15762, 20186,
23862, 27434, 29326, 31082, 32050)},
{ AOM_CDF11(2979, 5958, 8937, 11916, 14895,
17873, 20852, 23831, 26810, 29789)}
},
{
{ AOM_CDF11(20569, 22426, 25569, 26859, 28053,
28913, 29486, 29724, 29807, 32570)},
{ AOM_CDF11(2979, 5958, 8937, 11916, 14895,
17873, 20852, 23831, 26810, 29789)}
}
}
};
static const u16 av1_default_coeff_lps_multi_cdfs[TOKEN_CDF_Q_CTXS]
[TX_SIZES][PLANE_TYPES][LEVEL_CONTEXTS][CDF_SIZE(BR_CDF_SIZE) + 1] = {
{
{
{
{ AOM_CDF4(14298, 20718, 24174)}, { AOM_CDF4(12536, 19601, 23789)},
{ AOM_CDF4(8712, 15051, 19503)}, { AOM_CDF4(6170, 11327, 15434)},
{ AOM_CDF4(4742, 8926, 12538)}, { AOM_CDF4(3803, 7317, 10546)},
{ AOM_CDF4(1696, 3317, 4871)}, { AOM_CDF4(14392, 19951, 22756)},
{ AOM_CDF4(15978, 23218, 26818)}, { AOM_CDF4(12187, 19474, 23889)},
{ AOM_CDF4(9176, 15640, 20259)}, { AOM_CDF4(7068, 12655, 17028)},
{ AOM_CDF4(5656, 10442, 14472)}, { AOM_CDF4(2580, 4992, 7244)},
{ AOM_CDF4(12136, 18049, 21426)}, { AOM_CDF4(13784, 20721, 24481)},
{ AOM_CDF4(10836, 17621, 21900)}, { AOM_CDF4(8372, 14444, 18847)},
{ AOM_CDF4(6523, 11779, 16000)}, { AOM_CDF4(5337, 9898, 13760)},
{ AOM_CDF4(3034, 5860, 8462)}
},
{
{ AOM_CDF4(15967, 22905, 26286)}, { AOM_CDF4(13534, 20654, 24579)},
{ AOM_CDF4(9504, 16092, 20535)}, { AOM_CDF4(6975, 12568, 16903)},
{ AOM_CDF4(5364, 10091, 14020)}, { AOM_CDF4(4357, 8370, 11857)},
{ AOM_CDF4(2506, 4934, 7218)}, { AOM_CDF4(23032, 28815, 30936)},
{ AOM_CDF4(19540, 26704, 29719)}, { AOM_CDF4(15158, 22969, 27097)},
{ AOM_CDF4(11408, 18865, 23650)}, { AOM_CDF4(8885, 15448, 20250)},
{ AOM_CDF4(7108, 12853, 17416)}, { AOM_CDF4(4231, 8041, 11480)},
{ AOM_CDF4(19823, 26490, 29156)}, { AOM_CDF4(18890, 25929, 28932)},
{ AOM_CDF4(15660, 23491, 27433)}, { AOM_CDF4(12147, 19776, 24488)},
{ AOM_CDF4(9728, 16774, 21649)}, { AOM_CDF4(7919, 14277, 19066)},
{ AOM_CDF4(5440, 10170, 14185)}
}
},
{
{
{ AOM_CDF4(14406, 20862, 24414)}, { AOM_CDF4(11824, 18907, 23109)},
{ AOM_CDF4(8257, 14393, 18803)}, { AOM_CDF4(5860, 10747, 14778)},
{ AOM_CDF4(4475, 8486, 11984)}, { AOM_CDF4(3606, 6954, 10043)},
{ AOM_CDF4(1736, 3410, 5048)}, { AOM_CDF4(14430, 20046, 22882)},
{ AOM_CDF4(15593, 22899, 26709)}, { AOM_CDF4(12102, 19368, 23811)},
{ AOM_CDF4(9059, 15584, 20262)}, { AOM_CDF4(6999, 12603, 17048)},
{ AOM_CDF4(5684, 10497, 14553)}, { AOM_CDF4(2822, 5438, 7862)},
{ AOM_CDF4(15785, 21585, 24359)}, { AOM_CDF4(18347, 25229, 28266)},
{ AOM_CDF4(14974, 22487, 26389)}, { AOM_CDF4(11423, 18681, 23271)},
{ AOM_CDF4(8863, 15350, 20008)}, { AOM_CDF4(7153, 12852, 17278)},
{ AOM_CDF4(3707, 7036, 9982)}
},
{
{ AOM_CDF4(15460, 21696, 25469)}, { AOM_CDF4(12170, 19249, 23191)},
{ AOM_CDF4(8723, 15027, 19332)}, { AOM_CDF4(6428, 11704, 15874)},
{ AOM_CDF4(4922, 9292, 13052)}, { AOM_CDF4(4139, 7695, 11010)},
{ AOM_CDF4(2291, 4508, 6598)}, { AOM_CDF4(19856, 26920, 29828)},
{ AOM_CDF4(17923, 25289, 28792)}, { AOM_CDF4(14278, 21968, 26297)},
{ AOM_CDF4(10910, 18136, 22950)}, { AOM_CDF4(8423, 14815, 19627)},
{ AOM_CDF4(6771, 12283, 16774)}, { AOM_CDF4(4074, 7750, 11081)},
{ AOM_CDF4(19852, 26074, 28672)}, { AOM_CDF4(19371, 26110, 28989)},
{ AOM_CDF4(16265, 23873, 27663)}, { AOM_CDF4(12758, 20378, 24952)},
{ AOM_CDF4(10095, 17098, 21961)}, { AOM_CDF4(8250, 14628, 19451)},
{ AOM_CDF4(5205, 9745, 13622)}
}
},
{
{
{ AOM_CDF4(10563, 16233, 19763)}, { AOM_CDF4(9794, 16022, 19804)},
{ AOM_CDF4(6750, 11945, 15759)}, { AOM_CDF4(4963, 9186, 12752)},
{ AOM_CDF4(3845, 7435, 10627)}, { AOM_CDF4(3051, 6085, 8834)},
{ AOM_CDF4(1311, 2596, 3830)}, { AOM_CDF4(11246, 16404, 19689)},
{ AOM_CDF4(12315, 18911, 22731)}, { AOM_CDF4(10557, 17095, 21289)},
{ AOM_CDF4(8136, 14006, 18249)}, { AOM_CDF4(6348, 11474, 15565)},
{ AOM_CDF4(5196, 9655, 13400)}, { AOM_CDF4(2349, 4526, 6587)},
{ AOM_CDF4(13337, 18730, 21569)}, { AOM_CDF4(19306, 26071, 28882)},
{ AOM_CDF4(15952, 23540, 27254)}, { AOM_CDF4(12409, 19934, 24430)},
{ AOM_CDF4(9760, 16706, 21389)}, { AOM_CDF4(8004, 14220, 18818)},
{ AOM_CDF4(4138, 7794, 10961)}
},
{
{ AOM_CDF4(10870, 16684, 20949)}, { AOM_CDF4(9664, 15230, 18680)},
{ AOM_CDF4(6886, 12109, 15408)}, { AOM_CDF4(4825, 8900, 12305)},
{ AOM_CDF4(3630, 7162, 10314)}, { AOM_CDF4(3036, 6429, 9387)},
{ AOM_CDF4(1671, 3296, 4940)}, { AOM_CDF4(13819, 19159, 23026)},
{ AOM_CDF4(11984, 19108, 23120)}, { AOM_CDF4(10690, 17210, 21663)},
{ AOM_CDF4(7984, 14154, 18333)}, { AOM_CDF4(6868, 12294, 16124)},
{ AOM_CDF4(5274, 8994, 12868)}, { AOM_CDF4(2988, 5771, 8424)},
{ AOM_CDF4(19736, 26647, 29141)}, { AOM_CDF4(18933, 26070, 28984)},
{ AOM_CDF4(15779, 23048, 27200)}, { AOM_CDF4(12638, 20061, 24532)},
{ AOM_CDF4(10692, 17545, 22220)}, { AOM_CDF4(9217, 15251, 20054)},
{ AOM_CDF4(5078, 9284, 12594)}
}
},
{
{
{ AOM_CDF4(2331, 3662, 5244)}, { AOM_CDF4(2891, 4771, 6145)},
{ AOM_CDF4(4598, 7623, 9729)}, { AOM_CDF4(3520, 6845, 9199)},
{ AOM_CDF4(3417, 6119, 9324)}, { AOM_CDF4(2601, 5412, 7385)},
{ AOM_CDF4(600, 1173, 1744)}, { AOM_CDF4(7672, 13286, 17469)},
{ AOM_CDF4(4232, 7792, 10793)}, { AOM_CDF4(2915, 5317, 7397)},
{ AOM_CDF4(2318, 4356, 6152)}, { AOM_CDF4(2127, 4000, 5554)},
{ AOM_CDF4(1850, 3478, 5275)}, { AOM_CDF4(977, 1933, 2843)},
{ AOM_CDF4(18280, 24387, 27989)}, { AOM_CDF4(15852, 22671, 26185)},
{ AOM_CDF4(13845, 20951, 24789)}, { AOM_CDF4(11055, 17966, 22129)},
{ AOM_CDF4(9138, 15422, 19801)}, { AOM_CDF4(7454, 13145, 17456)},
{ AOM_CDF4(3370, 6393, 9013)}
},
{
{ AOM_CDF4(5842, 9229, 10838)}, { AOM_CDF4(2313, 3491, 4276)},
{ AOM_CDF4(2998, 6104, 7496)}, { AOM_CDF4(2420, 7447, 9868)},
{ AOM_CDF4(3034, 8495, 10923)}, { AOM_CDF4(4076, 8937, 10975)},
{ AOM_CDF4(1086, 2370, 3299)}, { AOM_CDF4(9714, 17254, 20444)},
{ AOM_CDF4(8543, 13698, 17123)}, { AOM_CDF4(4918, 9007, 11910)},
{ AOM_CDF4(4129, 7532, 10553)}, { AOM_CDF4(2364, 5533, 8058)},
{ AOM_CDF4(1834, 3546, 5563)}, { AOM_CDF4(1473, 2908, 4133)},
{ AOM_CDF4(15405, 21193, 25619)}, { AOM_CDF4(15691, 21952, 26561)},
{ AOM_CDF4(12962, 19194, 24165)}, { AOM_CDF4(10272, 17855, 22129)},
{ AOM_CDF4(8588, 15270, 20718)}, { AOM_CDF4(8682, 14669, 19500)},
{ AOM_CDF4(4870, 9636, 13205)}
}
},
{
{
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
{ AOM_CDF4(8192, 16384, 24576)}
},
{
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
{ AOM_CDF4(8192, 16384, 24576)}
}
}
},
{
{
{
{ AOM_CDF4(14995, 21341, 24749)}, { AOM_CDF4(13158, 20289, 24601)},
{ AOM_CDF4(8941, 15326, 19876)}, { AOM_CDF4(6297, 11541, 15807)},
{ AOM_CDF4(4817, 9029, 12776)}, { AOM_CDF4(3731, 7273, 10627)},
{ AOM_CDF4(1847, 3617, 5354)}, { AOM_CDF4(14472, 19659, 22343)},
{ AOM_CDF4(16806, 24162, 27533)}, { AOM_CDF4(12900, 20404, 24713)},
{ AOM_CDF4(9411, 16112, 20797)}, { AOM_CDF4(7056, 12697, 17148)},
{ AOM_CDF4(5544, 10339, 14460)}, { AOM_CDF4(2954, 5704, 8319)},
{ AOM_CDF4(12464, 18071, 21354)}, { AOM_CDF4(15482, 22528, 26034)},
{ AOM_CDF4(12070, 19269, 23624)}, { AOM_CDF4(8953, 15406, 20106)},
{ AOM_CDF4(7027, 12730, 17220)}, { AOM_CDF4(5887, 10913, 15140)},
{ AOM_CDF4(3793, 7278, 10447)}
},
{
{ AOM_CDF4(15571, 22232, 25749)}, { AOM_CDF4(14506, 21575, 25374)},
{ AOM_CDF4(10189, 17089, 21569)}, { AOM_CDF4(7316, 13301, 17915)},
{ AOM_CDF4(5783, 10912, 15190)}, { AOM_CDF4(4760, 9155, 13088)},
{ AOM_CDF4(2993, 5966, 8774)}, { AOM_CDF4(23424, 28903, 30778)},
{ AOM_CDF4(20775, 27666, 30290)}, { AOM_CDF4(16474, 24410, 28299)},
{ AOM_CDF4(12471, 20180, 24987)}, { AOM_CDF4(9410, 16487, 21439)},
{ AOM_CDF4(7536, 13614, 18529)}, { AOM_CDF4(5048, 9586, 13549)},
{ AOM_CDF4(21090, 27290, 29756)}, { AOM_CDF4(20796, 27402, 30026)},
{ AOM_CDF4(17819, 25485, 28969)}, { AOM_CDF4(13860, 21909, 26462)},
{ AOM_CDF4(11002, 18494, 23529)}, { AOM_CDF4(8953, 15929, 20897)},
{ AOM_CDF4(6448, 11918, 16454)}
}
},
{
{
{ AOM_CDF4(15999, 22208, 25449)}, { AOM_CDF4(13050, 19988, 24122)},
{ AOM_CDF4(8594, 14864, 19378)}, { AOM_CDF4(6033, 11079, 15238)},
{ AOM_CDF4(4554, 8683, 12347)}, { AOM_CDF4(3672, 7139, 10337)},
{ AOM_CDF4(1900, 3771, 5576)}, { AOM_CDF4(15788, 21340, 23949)},
{ AOM_CDF4(16825, 24235, 27758)}, { AOM_CDF4(12873, 20402, 24810)},
{ AOM_CDF4(9590, 16363, 21094)}, { AOM_CDF4(7352, 13209, 17733)},
{ AOM_CDF4(5960, 10989, 15184)}, { AOM_CDF4(3232, 6234, 9007)},
{ AOM_CDF4(15761, 20716, 23224)}, { AOM_CDF4(19318, 25989, 28759)},
{ AOM_CDF4(15529, 23094, 26929)}, { AOM_CDF4(11662, 18989, 23641)},
{ AOM_CDF4(8955, 15568, 20366)}, { AOM_CDF4(7281, 13106, 17708)},
{ AOM_CDF4(4248, 8059, 11440)}
},
{
{ AOM_CDF4(14899, 21217, 24503)}, { AOM_CDF4(13519, 20283, 24047)},
{ AOM_CDF4(9429, 15966, 20365)}, { AOM_CDF4(6700, 12355, 16652)},
{ AOM_CDF4(5088, 9704, 13716)}, { AOM_CDF4(4243, 8154, 11731)},
{ AOM_CDF4(2702, 5364, 7861)}, { AOM_CDF4(22745, 28388, 30454)},
{ AOM_CDF4(20235, 27146, 29922)}, { AOM_CDF4(15896, 23715, 27637)},
{ AOM_CDF4(11840, 19350, 24131)}, { AOM_CDF4(9122, 15932, 20880)},
{ AOM_CDF4(7488, 13581, 18362)}, { AOM_CDF4(5114, 9568, 13370)},
{ AOM_CDF4(20845, 26553, 28932)}, { AOM_CDF4(20981, 27372, 29884)},
{ AOM_CDF4(17781, 25335, 28785)}, { AOM_CDF4(13760, 21708, 26297)},
{ AOM_CDF4(10975, 18415, 23365)}, { AOM_CDF4(9045, 15789, 20686)},
{ AOM_CDF4(6130, 11199, 15423)}
}
},
{
{
{ AOM_CDF4(13549, 19724, 23158)}, { AOM_CDF4(11844, 18382, 22246)},
{ AOM_CDF4(7919, 13619, 17773)}, { AOM_CDF4(5486, 10143, 13946)},
{ AOM_CDF4(4166, 7983, 11324)}, { AOM_CDF4(3364, 6506, 9427)},
{ AOM_CDF4(1598, 3160, 4674)}, { AOM_CDF4(15281, 20979, 23781)},
{ AOM_CDF4(14939, 22119, 25952)}, { AOM_CDF4(11363, 18407, 22812)},
{ AOM_CDF4(8609, 14857, 19370)}, { AOM_CDF4(6737, 12184, 16480)},
{ AOM_CDF4(5506, 10263, 14262)}, { AOM_CDF4(2990, 5786, 8380)},
{ AOM_CDF4(20249, 25253, 27417)}, { AOM_CDF4(21070, 27518, 30001)},
{ AOM_CDF4(16854, 24469, 28074)}, { AOM_CDF4(12864, 20486, 25000)},
{ AOM_CDF4(9962, 16978, 21778)}, { AOM_CDF4(8074, 14338, 19048)},
{ AOM_CDF4(4494, 8479, 11906)}
},
{
{ AOM_CDF4(13960, 19617, 22829)}, { AOM_CDF4(11150, 17341, 21228)},
{ AOM_CDF4(7150, 12964, 17190)}, { AOM_CDF4(5331, 10002, 13867)},
{ AOM_CDF4(4167, 7744, 11057)}, { AOM_CDF4(3480, 6629, 9646)},
{ AOM_CDF4(1883, 3784, 5686)}, { AOM_CDF4(18752, 25660, 28912)},
{ AOM_CDF4(16968, 24586, 28030)}, { AOM_CDF4(13520, 21055, 25313)},
{ AOM_CDF4(10453, 17626, 22280)}, { AOM_CDF4(8386, 14505, 19116)},
{ AOM_CDF4(6742, 12595, 17008)}, { AOM_CDF4(4273, 8140, 11499)},
{ AOM_CDF4(22120, 27827, 30233)}, { AOM_CDF4(20563, 27358, 29895)},
{ AOM_CDF4(17076, 24644, 28153)}, { AOM_CDF4(13362, 20942, 25309)},
{ AOM_CDF4(10794, 17965, 22695)}, { AOM_CDF4(9014, 15652, 20319)},
{ AOM_CDF4(5708, 10512, 14497)}
}
},
{
{
{ AOM_CDF4(5705, 10930, 15725)}, { AOM_CDF4(7946, 12765, 16115)},
{ AOM_CDF4(6801, 12123, 16226)}, { AOM_CDF4(5462, 10135, 14200)},
{ AOM_CDF4(4189, 8011, 11507)}, { AOM_CDF4(3191, 6229, 9408)},
{ AOM_CDF4(1057, 2137, 3212)}, { AOM_CDF4(10018, 17067, 21491)},
{ AOM_CDF4(7380, 12582, 16453)}, { AOM_CDF4(6068, 10845, 14339)},
{ AOM_CDF4(5098, 9198, 12555)}, { AOM_CDF4(4312, 8010, 11119)},
{ AOM_CDF4(3700, 6966, 9781)}, { AOM_CDF4(1693, 3326, 4887)},
{ AOM_CDF4(18757, 24930, 27774)}, { AOM_CDF4(17648, 24596, 27817)},
{ AOM_CDF4(14707, 22052, 26026)}, { AOM_CDF4(11720, 18852, 23292)},
{ AOM_CDF4(9357, 15952, 20525)}, { AOM_CDF4(7810, 13753, 18210)},
{ AOM_CDF4(3879, 7333, 10328)}
},
{
{ AOM_CDF4(8278, 13242, 15922)}, { AOM_CDF4(10547, 15867, 18919)},
{ AOM_CDF4(9106, 15842, 20609)}, { AOM_CDF4(6833, 13007, 17218)},
{ AOM_CDF4(4811, 9712, 13923)}, { AOM_CDF4(3985, 7352, 11128)},
{ AOM_CDF4(1688, 3458, 5262)}, { AOM_CDF4(12951, 21861, 26510)},
{ AOM_CDF4(9788, 16044, 20276)}, { AOM_CDF4(6309, 11244, 14870)},
{ AOM_CDF4(5183, 9349, 12566)}, { AOM_CDF4(4389, 8229, 11492)},
{ AOM_CDF4(3633, 6945, 10620)}, { AOM_CDF4(3600, 6847, 9907)},
{ AOM_CDF4(21748, 28137, 30255)}, { AOM_CDF4(19436, 26581, 29560)},
{ AOM_CDF4(16359, 24201, 27953)}, { AOM_CDF4(13961, 21693, 25871)},
{ AOM_CDF4(11544, 18686, 23322)}, { AOM_CDF4(9372, 16462, 20952)},
{ AOM_CDF4(6138, 11210, 15390)}
}
},
{
{
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
{ AOM_CDF4(8192, 16384, 24576)}
},
{
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
{ AOM_CDF4(8192, 16384, 24576)}
}
}
},
{
{
{
{ AOM_CDF4(16138, 22223, 25509)}, { AOM_CDF4(15347, 22430, 26332)},
{ AOM_CDF4(9614, 16736, 21332)}, { AOM_CDF4(6600, 12275, 16907)},
{ AOM_CDF4(4811, 9424, 13547)}, { AOM_CDF4(3748, 7809, 11420)},
{ AOM_CDF4(2254, 4587, 6890)}, { AOM_CDF4(15196, 20284, 23177)},
{ AOM_CDF4(18317, 25469, 28451)}, { AOM_CDF4(13918, 21651, 25842)},
{ AOM_CDF4(10052, 17150, 21995)}, { AOM_CDF4(7499, 13630, 18587)},
{ AOM_CDF4(6158, 11417, 16003)}, { AOM_CDF4(4014, 7785, 11252)},
{ AOM_CDF4(15048, 21067, 24384)}, { AOM_CDF4(18202, 25346, 28553)},
{ AOM_CDF4(14302, 22019, 26356)}, { AOM_CDF4(10839, 18139, 23166)},
{ AOM_CDF4(8715, 15744, 20806)}, { AOM_CDF4(7536, 13576, 18544)},
{ AOM_CDF4(5413, 10335, 14498)}
},
{
{ AOM_CDF4(17394, 24501, 27895)}, { AOM_CDF4(15889, 23420, 27185)},
{ AOM_CDF4(11561, 19133, 23870)}, { AOM_CDF4(8285, 14812, 19844)},
{ AOM_CDF4(6496, 12043, 16550)}, { AOM_CDF4(4771, 9574, 13677)},
{ AOM_CDF4(3603, 6830, 10144)}, { AOM_CDF4(21656, 27704, 30200)},
{ AOM_CDF4(21324, 27915, 30511)}, { AOM_CDF4(17327, 25336, 28997)},
{ AOM_CDF4(13417, 21381, 26033)}, { AOM_CDF4(10132, 17425, 22338)},
{ AOM_CDF4(8580, 15016, 19633)}, { AOM_CDF4(5694, 11477, 16411)},
{ AOM_CDF4(24116, 29780, 31450)}, { AOM_CDF4(23853, 29695, 31591)},
{ AOM_CDF4(20085, 27614, 30428)}, { AOM_CDF4(15326, 24335, 28575)},
{ AOM_CDF4(11814, 19472, 24810)}, { AOM_CDF4(10221, 18611, 24767)},
{ AOM_CDF4(7689, 14558, 20321)}
}
},
{
{
{ AOM_CDF4(16214, 22380, 25770)}, { AOM_CDF4(14213, 21304, 25295)},
{ AOM_CDF4(9213, 15823, 20455)}, { AOM_CDF4(6395, 11758, 16139)},
{ AOM_CDF4(4779, 9187, 13066)}, { AOM_CDF4(3821, 7501, 10953)},
{ AOM_CDF4(2293, 4567, 6795)}, { AOM_CDF4(15859, 21283, 23820)},
{ AOM_CDF4(18404, 25602, 28726)}, { AOM_CDF4(14325, 21980, 26206)},
{ AOM_CDF4(10669, 17937, 22720)}, { AOM_CDF4(8297, 14642, 19447)},
{ AOM_CDF4(6746, 12389, 16893)}, { AOM_CDF4(4324, 8251, 11770)},
{ AOM_CDF4(16532, 21631, 24475)}, { AOM_CDF4(20667, 27150, 29668)},
{ AOM_CDF4(16728, 24510, 28175)}, { AOM_CDF4(12861, 20645, 25332)},
{ AOM_CDF4(10076, 17361, 22417)}, { AOM_CDF4(8395, 14940, 19963)},
{ AOM_CDF4(5731, 10683, 14912)}
},
{
{ AOM_CDF4(14433, 21155, 24938)}, { AOM_CDF4(14658, 21716, 25545)},
{ AOM_CDF4(9923, 16824, 21557)}, { AOM_CDF4(6982, 13052, 17721)},
{ AOM_CDF4(5419, 10503, 15050)}, { AOM_CDF4(4852, 9162, 13014)},
{ AOM_CDF4(3271, 6395, 9630)}, { AOM_CDF4(22210, 27833, 30109)},
{ AOM_CDF4(20750, 27368, 29821)}, { AOM_CDF4(16894, 24828, 28573)},
{ AOM_CDF4(13247, 21276, 25757)}, { AOM_CDF4(10038, 17265, 22563)},
{ AOM_CDF4(8587, 14947, 20327)}, { AOM_CDF4(5645, 11371, 15252)},
{ AOM_CDF4(22027, 27526, 29714)}, { AOM_CDF4(23098, 29146, 31221)},
{ AOM_CDF4(19886, 27341, 30272)}, { AOM_CDF4(15609, 23747, 28046)},
{ AOM_CDF4(11993, 20065, 24939)}, { AOM_CDF4(9637, 18267, 23671)},
{ AOM_CDF4(7625, 13801, 19144)}
}
},
{
{
{ AOM_CDF4(14438, 20798, 24089)}, { AOM_CDF4(12621, 19203, 23097)},
{ AOM_CDF4(8177, 14125, 18402)}, { AOM_CDF4(5674, 10501, 14456)},
{ AOM_CDF4(4236, 8239, 11733)}, { AOM_CDF4(3447, 6750, 9806)},
{ AOM_CDF4(1986, 3950, 5864)}, { AOM_CDF4(16208, 22099, 24930)},
{ AOM_CDF4(16537, 24025, 27585)}, { AOM_CDF4(12780, 20381, 24867)},
{ AOM_CDF4(9767, 16612, 21416)}, { AOM_CDF4(7686, 13738, 18398)},
{ AOM_CDF4(6333, 11614, 15964)}, { AOM_CDF4(3941, 7571, 10836)},
{ AOM_CDF4(22819, 27422, 29202)}, { AOM_CDF4(22224, 28514, 30721)},
{ AOM_CDF4(17660, 25433, 28913)}, { AOM_CDF4(13574, 21482, 26002)},
{ AOM_CDF4(10629, 17977, 22938)}, { AOM_CDF4(8612, 15298, 20265)},
{ AOM_CDF4(5607, 10491, 14596)}
},
{
{ AOM_CDF4(13569, 19800, 23206)}, { AOM_CDF4(13128, 19924, 23869)},
{ AOM_CDF4(8329, 14841, 19403)}, { AOM_CDF4(6130, 10976, 15057)},
{ AOM_CDF4(4682, 8839, 12518)}, { AOM_CDF4(3656, 7409, 10588)},
{ AOM_CDF4(2577, 5099, 7412)}, { AOM_CDF4(22427, 28684, 30585)},
{ AOM_CDF4(20913, 27750, 30139)}, { AOM_CDF4(15840, 24109, 27834)},
{ AOM_CDF4(12308, 20029, 24569)}, { AOM_CDF4(10216, 16785, 21458)},
{ AOM_CDF4(8309, 14203, 19113)}, { AOM_CDF4(6043, 11168, 15307)},
{ AOM_CDF4(23166, 28901, 30998)}, { AOM_CDF4(21899, 28405, 30751)},
{ AOM_CDF4(18413, 26091, 29443)}, { AOM_CDF4(15233, 23114, 27352)},
{ AOM_CDF4(12683, 20472, 25288)}, { AOM_CDF4(10702, 18259, 23409)},
{ AOM_CDF4(8125, 14464, 19226)}
}
},
{
{
{ AOM_CDF4(9040, 14786, 18360)}, { AOM_CDF4(9979, 15718, 19415)},
{ AOM_CDF4(7913, 13918, 18311)}, { AOM_CDF4(5859, 10889, 15184)},
{ AOM_CDF4(4593, 8677, 12510)}, { AOM_CDF4(3820, 7396, 10791)},
{ AOM_CDF4(1730, 3471, 5192)}, { AOM_CDF4(11803, 18365, 22709)},
{ AOM_CDF4(11419, 18058, 22225)}, { AOM_CDF4(9418, 15774, 20243)},
{ AOM_CDF4(7539, 13325, 17657)}, { AOM_CDF4(6233, 11317, 15384)},
{ AOM_CDF4(5137, 9656, 13545)}, { AOM_CDF4(2977, 5774, 8349)},
{ AOM_CDF4(21207, 27246, 29640)}, { AOM_CDF4(19547, 26578, 29497)},
{ AOM_CDF4(16169, 23871, 27690)}, { AOM_CDF4(12820, 20458, 25018)},
{ AOM_CDF4(10224, 17332, 22214)}, { AOM_CDF4(8526, 15048, 19884)},
{ AOM_CDF4(5037, 9410, 13118)}
},
{
{ AOM_CDF4(12339, 17329, 20140)}, { AOM_CDF4(13505, 19895, 23225)},
{ AOM_CDF4(9847, 16944, 21564)}, { AOM_CDF4(7280, 13256, 18348)},
{ AOM_CDF4(4712, 10009, 14454)}, { AOM_CDF4(4361, 7914, 12477)},
{ AOM_CDF4(2870, 5628, 7995)}, { AOM_CDF4(20061, 25504, 28526)},
{ AOM_CDF4(15235, 22878, 26145)}, { AOM_CDF4(12985, 19958, 24155)},
{ AOM_CDF4(9782, 16641, 21403)}, { AOM_CDF4(9456, 16360, 20760)},
{ AOM_CDF4(6855, 12940, 18557)}, { AOM_CDF4(5661, 10564, 15002)},
{ AOM_CDF4(25656, 30602, 31894)}, { AOM_CDF4(22570, 29107, 31092)},
{ AOM_CDF4(18917, 26423, 29541)}, { AOM_CDF4(15940, 23649, 27754)},
{ AOM_CDF4(12803, 20581, 25219)}, { AOM_CDF4(11082, 18695, 23376)},
{ AOM_CDF4(7939, 14373, 19005)}
}
},
{
{
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
{ AOM_CDF4(8192, 16384, 24576)}
},
{
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
{ AOM_CDF4(8192, 16384, 24576)}
}
}
},
{
{
{
{ AOM_CDF4(18315, 24289, 27551)}, { AOM_CDF4(16854, 24068, 27835)},
{ AOM_CDF4(10140, 17927, 23173)}, { AOM_CDF4(6722, 12982, 18267)},
{ AOM_CDF4(4661, 9826, 14706)}, { AOM_CDF4(3832, 8165, 12294)},
{ AOM_CDF4(2795, 6098, 9245)}, { AOM_CDF4(17145, 23326, 26672)},
{ AOM_CDF4(20733, 27680, 30308)}, { AOM_CDF4(16032, 24461, 28546)},
{ AOM_CDF4(11653, 20093, 25081)}, { AOM_CDF4(9290, 16429, 22086)},
{ AOM_CDF4(7796, 14598, 19982)}, { AOM_CDF4(6502, 12378, 17441)},
{ AOM_CDF4(21681, 27732, 30320)}, { AOM_CDF4(22389, 29044, 31261)},
{ AOM_CDF4(19027, 26731, 30087)}, { AOM_CDF4(14739, 23755, 28624)},
{ AOM_CDF4(11358, 20778, 25511)}, { AOM_CDF4(10995, 18073, 24190)},
{ AOM_CDF4(9162, 14990, 20617)}
},
{
{ AOM_CDF4(21425, 27952, 30388)}, { AOM_CDF4(18062, 25838, 29034)},
{ AOM_CDF4(11956, 19881, 24808)}, { AOM_CDF4(7718, 15000, 20980)},
{ AOM_CDF4(5702, 11254, 16143)}, { AOM_CDF4(4898, 9088, 16864)},
{ AOM_CDF4(3679, 6776, 11907)}, { AOM_CDF4(23294, 30160, 31663)},
{ AOM_CDF4(24397, 29896, 31836)}, { AOM_CDF4(19245, 27128, 30593)},
{ AOM_CDF4(13202, 19825, 26404)}, { AOM_CDF4(11578, 19297, 23957)},
{ AOM_CDF4(8073, 13297, 21370)}, { AOM_CDF4(5461, 10923, 19745)},
{ AOM_CDF4(27367, 30521, 31934)}, { AOM_CDF4(24904, 30671, 31940)},
{ AOM_CDF4(23075, 28460, 31299)}, { AOM_CDF4(14400, 23658, 30417)},
{ AOM_CDF4(13885, 23882, 28325)}, { AOM_CDF4(14746, 22938, 27853)},
{ AOM_CDF4(5461, 16384, 27307)}
}
},
{
{
{ AOM_CDF4(18274, 24813, 27890)}, { AOM_CDF4(15537, 23149, 27003)},
{ AOM_CDF4(9449, 16740, 21827)}, { AOM_CDF4(6700, 12498, 17261)},
{ AOM_CDF4(4988, 9866, 14198)}, { AOM_CDF4(4236, 8147, 11902)},
{ AOM_CDF4(2867, 5860, 8654)}, { AOM_CDF4(17124, 23171, 26101)},
{ AOM_CDF4(20396, 27477, 30148)}, { AOM_CDF4(16573, 24629, 28492)},
{ AOM_CDF4(12749, 20846, 25674)}, { AOM_CDF4(10233, 17878, 22818)},
{ AOM_CDF4(8525, 15332, 20363)}, { AOM_CDF4(6283, 11632, 16255)},
{ AOM_CDF4(20466, 26511, 29286)}, { AOM_CDF4(23059, 29174, 31191)},
{ AOM_CDF4(19481, 27263, 30241)}, { AOM_CDF4(15458, 23631, 28137)},
{ AOM_CDF4(12416, 20608, 25693)}, { AOM_CDF4(10261, 18011, 23261)},
{ AOM_CDF4(8016, 14655, 19666)}
},
{
{ AOM_CDF4(17616, 24586, 28112)}, { AOM_CDF4(15809, 23299, 27155)},
{ AOM_CDF4(10767, 18890, 23793)}, { AOM_CDF4(7727, 14255, 18865)},
{ AOM_CDF4(6129, 11926, 16882)}, { AOM_CDF4(4482, 9704, 14861)},
{ AOM_CDF4(3277, 7452, 11522)}, { AOM_CDF4(22956, 28551, 30730)},
{ AOM_CDF4(22724, 28937, 30961)}, { AOM_CDF4(18467, 26324, 29580)},
{ AOM_CDF4(13234, 20713, 25649)}, { AOM_CDF4(11181, 17592, 22481)},
{ AOM_CDF4(8291, 18358, 24576)}, { AOM_CDF4(7568, 11881, 14984)},
{ AOM_CDF4(24948, 29001, 31147)}, { AOM_CDF4(25674, 30619, 32151)},
{ AOM_CDF4(20841, 26793, 29603)}, { AOM_CDF4(14669, 24356, 28666)},
{ AOM_CDF4(11334, 23593, 28219)}, { AOM_CDF4(8922, 14762, 22873)},
{ AOM_CDF4(8301, 13544, 20535)}
}
},
{
{
{ AOM_CDF4(17113, 23733, 27081)}, { AOM_CDF4(14139, 21406, 25452)},
{ AOM_CDF4(8552, 15002, 19776)}, { AOM_CDF4(5871, 11120, 15378)},
{ AOM_CDF4(4455, 8616, 12253)}, { AOM_CDF4(3469, 6910, 10386)},
{ AOM_CDF4(2255, 4553, 6782)}, { AOM_CDF4(18224, 24376, 27053)},
{ AOM_CDF4(19290, 26710, 29614)}, { AOM_CDF4(14936, 22991, 27184)},
{ AOM_CDF4(11238, 18951, 23762)}, { AOM_CDF4(8786, 15617, 20588)},
{ AOM_CDF4(7317, 13228, 18003)}, { AOM_CDF4(5101, 9512, 13493)},
{ AOM_CDF4(22639, 28222, 30210)}, { AOM_CDF4(23216, 29331, 31307)},
{ AOM_CDF4(19075, 26762, 29895)}, { AOM_CDF4(15014, 23113, 27457)},
{ AOM_CDF4(11938, 19857, 24752)}, { AOM_CDF4(9942, 17280, 22282)},
{ AOM_CDF4(7167, 13144, 17752)}
},
{
{ AOM_CDF4(15820, 22738, 26488)}, { AOM_CDF4(13530, 20885, 25216)},
{ AOM_CDF4(8395, 15530, 20452)}, { AOM_CDF4(6574, 12321, 16380)},
{ AOM_CDF4(5353, 10419, 14568)}, { AOM_CDF4(4613, 8446, 12381)},
{ AOM_CDF4(3440, 7158, 9903)}, { AOM_CDF4(24247, 29051, 31224)},
{ AOM_CDF4(22118, 28058, 30369)}, { AOM_CDF4(16498, 24768, 28389)},
{ AOM_CDF4(12920, 21175, 26137)}, { AOM_CDF4(10730, 18619, 25352)},
{ AOM_CDF4(10187, 16279, 22791)}, { AOM_CDF4(9310, 14631, 22127)},
{ AOM_CDF4(24970, 30558, 32057)}, { AOM_CDF4(24801, 29942, 31698)},
{ AOM_CDF4(22432, 28453, 30855)}, { AOM_CDF4(19054, 25680, 29580)},
{ AOM_CDF4(14392, 23036, 28109)}, { AOM_CDF4(12495, 20947, 26650)},
{ AOM_CDF4(12442, 20326, 26214)}
}
},
{
{
{ AOM_CDF4(12162, 18785, 22648)}, { AOM_CDF4(12749, 19697, 23806)},
{ AOM_CDF4(8580, 15297, 20346)}, { AOM_CDF4(6169, 11749, 16543)},
{ AOM_CDF4(4836, 9391, 13448)}, { AOM_CDF4(3821, 7711, 11613)},
{ AOM_CDF4(2228, 4601, 7070)}, { AOM_CDF4(16319, 24725, 28280)},
{ AOM_CDF4(15698, 23277, 27168)}, { AOM_CDF4(12726, 20368, 25047)},
{ AOM_CDF4(9912, 17015, 21976)}, { AOM_CDF4(7888, 14220, 19179)},
{ AOM_CDF4(6777, 12284, 17018)}, { AOM_CDF4(4492, 8590, 12252)},
{ AOM_CDF4(23249, 28904, 30947)}, { AOM_CDF4(21050, 27908, 30512)},
{ AOM_CDF4(17440, 25340, 28949)}, { AOM_CDF4(14059, 22018, 26541)},
{ AOM_CDF4(11288, 18903, 23898)}, { AOM_CDF4(9411, 16342, 21428)},
{ AOM_CDF4(6278, 11588, 15944)}
},
{
{ AOM_CDF4(13981, 20067, 23226)}, { AOM_CDF4(16922, 23580, 26783)},
{ AOM_CDF4(11005, 19039, 24487)}, { AOM_CDF4(7389, 14218, 19798)},
{ AOM_CDF4(5598, 11505, 17206)}, { AOM_CDF4(6090, 11213, 15659)},
{ AOM_CDF4(3820, 7371, 10119)}, { AOM_CDF4(21082, 26925, 29675)},
{ AOM_CDF4(21262, 28627, 31128)}, { AOM_CDF4(18392, 26454, 30437)},
{ AOM_CDF4(14870, 22910, 27096)}, { AOM_CDF4(12620, 19484, 24908)},
{ AOM_CDF4(9290, 16553, 22802)}, { AOM_CDF4(6668, 14288, 20004)},
{ AOM_CDF4(27704, 31055, 31949)}, { AOM_CDF4(24709, 29978, 31788)},
{ AOM_CDF4(21668, 29264, 31657)}, { AOM_CDF4(18295, 26968, 30074)},
{ AOM_CDF4(16399, 24422, 29313)}, { AOM_CDF4(14347, 23026, 28104)},
{ AOM_CDF4(12370, 19806, 24477)}
}
},
{
{
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
{ AOM_CDF4(8192, 16384, 24576)}
},
{
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
{ AOM_CDF4(8192, 16384, 24576)}
}
}
}
};
static const u16 av1_default_coeff_base_multi_cdfs
[TOKEN_CDF_Q_CTXS][TX_SIZES][PLANE_TYPES]
[SIG_COEF_CONTEXTS][CDF_SIZE(NUM_BASE_LEVELS + 2) + 1] = {
{
{
{
{ AOM_CDF4(4034, 8930, 12727)}, { AOM_CDF4(18082, 29741, 31877)},
{ AOM_CDF4(12596, 26124, 30493)}, { AOM_CDF4(9446, 21118, 27005)},
{ AOM_CDF4(6308, 15141, 21279)}, { AOM_CDF4(2463, 6357, 9783)},
{ AOM_CDF4(20667, 30546, 31929)}, { AOM_CDF4(13043, 26123, 30134)},
{ AOM_CDF4(8151, 18757, 24778)}, { AOM_CDF4(5255, 12839, 18632)},
{ AOM_CDF4(2820, 7206, 11161)}, { AOM_CDF4(8192, 16384, 24576)},
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(15736, 27553, 30604)},
{ AOM_CDF4(11210, 23794, 28787)}, { AOM_CDF4(5947, 13874, 19701)},
{ AOM_CDF4(4215, 9323, 13891)}, { AOM_CDF4(2833, 6462, 10059)},
{ AOM_CDF4(19605, 30393, 31582)}, { AOM_CDF4(13523, 26252, 30248)},
{ AOM_CDF4(8446, 18622, 24512)}, { AOM_CDF4(3818, 10343, 15974)},
{ AOM_CDF4(1481, 4117, 6796)}, { AOM_CDF4(22649, 31302, 32190)},
{ AOM_CDF4(14829, 27127, 30449)}, { AOM_CDF4(8313, 17702, 23304)},
{ AOM_CDF4(3022, 8301, 12786)}, { AOM_CDF4(1536, 4412, 7184)},
{ AOM_CDF4(22354, 29774, 31372)}, { AOM_CDF4(14723, 25472, 29214)},
{ AOM_CDF4(6673, 13745, 18662)}, { AOM_CDF4(2068, 5766, 9322)},
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)}
},
{
{ AOM_CDF4(6302, 16444, 21761)}, { AOM_CDF4(23040, 31538, 32475)},
{ AOM_CDF4(15196, 28452, 31496)}, { AOM_CDF4(10020, 22946, 28514)},
{ AOM_CDF4(6533, 16862, 23501)}, { AOM_CDF4(3538, 9816, 15076)},
{ AOM_CDF4(24444, 31875, 32525)}, { AOM_CDF4(15881, 28924, 31635)},
{ AOM_CDF4(9922, 22873, 28466)}, { AOM_CDF4(6527, 16966, 23691)},
{ AOM_CDF4(4114, 11303, 17220)}, { AOM_CDF4(8192, 16384, 24576)},
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(20201, 30770, 32209)},
{ AOM_CDF4(14754, 28071, 31258)}, { AOM_CDF4(8378, 20186, 26517)},
{ AOM_CDF4(5916, 15299, 21978)}, { AOM_CDF4(4268, 11583, 17901)},
{ AOM_CDF4(24361, 32025, 32581)}, { AOM_CDF4(18673, 30105, 31943)},
{ AOM_CDF4(10196, 22244, 27576)}, { AOM_CDF4(5495, 14349, 20417)},
{ AOM_CDF4(2676, 7415, 11498)}, { AOM_CDF4(24678, 31958, 32585)},
{ AOM_CDF4(18629, 29906, 31831)}, { AOM_CDF4(9364, 20724, 26315)},
{ AOM_CDF4(4641, 12318, 18094)}, { AOM_CDF4(2758, 7387, 11579)},
{ AOM_CDF4(25433, 31842, 32469)}, { AOM_CDF4(18795, 29289, 31411)},
{ AOM_CDF4(7644, 17584, 23592)}, { AOM_CDF4(3408, 9014, 15047)},
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)}
}
},
{
{
{ AOM_CDF4(4536, 10072, 14001)}, { AOM_CDF4(25459, 31416, 32206)},
{ AOM_CDF4(16605, 28048, 30818)}, { AOM_CDF4(11008, 22857, 27719)},
{ AOM_CDF4(6915, 16268, 22315)}, { AOM_CDF4(2625, 6812, 10537)},
{ AOM_CDF4(24257, 31788, 32499)}, { AOM_CDF4(16880, 29454, 31879)},
{ AOM_CDF4(11958, 25054, 29778)}, { AOM_CDF4(7916, 18718, 25084)},
{ AOM_CDF4(3383, 8777, 13446)}, { AOM_CDF4(22720, 31603, 32393)},
{ AOM_CDF4(14960, 28125, 31335)}, { AOM_CDF4(9731, 22210, 27928)},
{ AOM_CDF4(6304, 15832, 22277)}, { AOM_CDF4(2910, 7818, 12166)},
{ AOM_CDF4(20375, 30627, 32131)}, { AOM_CDF4(13904, 27284, 30887)},
{ AOM_CDF4(9368, 21558, 27144)}, { AOM_CDF4(5937, 14966, 21119)},
{ AOM_CDF4(2667, 7225, 11319)}, { AOM_CDF4(23970, 31470, 32378)},
{ AOM_CDF4(17173, 29734, 32018)}, { AOM_CDF4(12795, 25441, 29965)},
{ AOM_CDF4(8981, 19680, 25893)}, { AOM_CDF4(4728, 11372, 16902)},
{ AOM_CDF4(24287, 31797, 32439)}, { AOM_CDF4(16703, 29145, 31696)},
{ AOM_CDF4(10833, 23554, 28725)}, { AOM_CDF4(6468, 16566, 23057)},
{ AOM_CDF4(2415, 6562, 10278)}, { AOM_CDF4(26610, 32395, 32659)},
{ AOM_CDF4(18590, 30498, 32117)}, { AOM_CDF4(12420, 25756, 29950)},
{ AOM_CDF4(7639, 18746, 24710)}, { AOM_CDF4(3001, 8086, 12347)},
{ AOM_CDF4(25076, 32064, 32580)}, { AOM_CDF4(17946, 30128, 32028)},
{ AOM_CDF4(12024, 24985, 29378)}, { AOM_CDF4(7517, 18390, 24304)},
{ AOM_CDF4(3243, 8781, 13331)}, { AOM_CDF4(8192, 16384, 24576)}
},
{
{ AOM_CDF4(6037, 16771, 21957)}, { AOM_CDF4(24774, 31704, 32426)},
{ AOM_CDF4(16830, 28589, 31056)}, { AOM_CDF4(10602, 22828, 27760)},
{ AOM_CDF4(6733, 16829, 23071)}, { AOM_CDF4(3250, 8914, 13556)},
{ AOM_CDF4(25582, 32220, 32668)}, { AOM_CDF4(18659, 30342, 32223)},
{ AOM_CDF4(12546, 26149, 30515)}, { AOM_CDF4(8420, 20451, 26801)},
{ AOM_CDF4(4636, 12420, 18344)}, { AOM_CDF4(27581, 32362, 32639)},
{ AOM_CDF4(18987, 30083, 31978)}, { AOM_CDF4(11327, 24248, 29084)},
{ AOM_CDF4(7264, 17719, 24120)}, { AOM_CDF4(3995, 10768, 16169)},
{ AOM_CDF4(25893, 31831, 32487)}, { AOM_CDF4(16577, 28587, 31379)},
{ AOM_CDF4(10189, 22748, 28182)}, { AOM_CDF4(6832, 17094, 23556)},
{ AOM_CDF4(3708, 10110, 15334)}, { AOM_CDF4(25904, 32282, 32656)},
{ AOM_CDF4(19721, 30792, 32276)}, { AOM_CDF4(12819, 26243, 30411)},
{ AOM_CDF4(8572, 20614, 26891)}, { AOM_CDF4(5364, 14059, 20467)},
{ AOM_CDF4(26580, 32438, 32677)}, { AOM_CDF4(20852, 31225, 32340)},
{ AOM_CDF4(12435, 25700, 29967)}, { AOM_CDF4(8691, 20825, 26976)},
{ AOM_CDF4(4446, 12209, 17269)}, { AOM_CDF4(27350, 32429, 32696)},
{ AOM_CDF4(21372, 30977, 32272)}, { AOM_CDF4(12673, 25270, 29853)},
{ AOM_CDF4(9208, 20925, 26640)}, { AOM_CDF4(5018, 13351, 18732)},
{ AOM_CDF4(27351, 32479, 32713)}, { AOM_CDF4(21398, 31209, 32387)},
{ AOM_CDF4(12162, 25047, 29842)}, { AOM_CDF4(7896, 18691, 25319)},
{ AOM_CDF4(4670, 12882, 18881)}, { AOM_CDF4(8192, 16384, 24576)}
}
},
{
{
{ AOM_CDF4(5487, 10460, 13708)}, { AOM_CDF4(21597, 28303, 30674)},
{ AOM_CDF4(11037, 21953, 26476)}, { AOM_CDF4(8147, 17962, 22952)},
{ AOM_CDF4(5242, 13061, 18532)}, { AOM_CDF4(1889, 5208, 8182)},
{ AOM_CDF4(26774, 32133, 32590)}, { AOM_CDF4(17844, 29564, 31767)},
{ AOM_CDF4(11690, 24438, 29171)}, { AOM_CDF4(7542, 18215, 24459)},
{ AOM_CDF4(2993, 8050, 12319)}, { AOM_CDF4(28023, 32328, 32591)},
{ AOM_CDF4(18651, 30126, 31954)}, { AOM_CDF4(12164, 25146, 29589)},
{ AOM_CDF4(7762, 18530, 24771)}, { AOM_CDF4(3492, 9183, 13920)},
{ AOM_CDF4(27591, 32008, 32491)}, { AOM_CDF4(17149, 28853, 31510)},
{ AOM_CDF4(11485, 24003, 28860)}, { AOM_CDF4(7697, 18086, 24210)},
{ AOM_CDF4(3075, 7999, 12218)}, { AOM_CDF4(28268, 32482, 32654)},
{ AOM_CDF4(19631, 31051, 32404)}, { AOM_CDF4(13860, 27260, 31020)},
{ AOM_CDF4(9605, 21613, 27594)}, { AOM_CDF4(4876, 12162, 17908)},
{ AOM_CDF4(27248, 32316, 32576)}, { AOM_CDF4(18955, 30457, 32075)},
{ AOM_CDF4(11824, 23997, 28795)}, { AOM_CDF4(7346, 18196, 24647)},
{ AOM_CDF4(3403, 9247, 14111)}, { AOM_CDF4(29711, 32655, 32735)},
{ AOM_CDF4(21169, 31394, 32417)}, { AOM_CDF4(13487, 27198, 30957)},
{ AOM_CDF4(8828, 21683, 27614)}, { AOM_CDF4(4270, 11451, 17038)},
{ AOM_CDF4(28708, 32578, 32731)}, { AOM_CDF4(20120, 31241, 32482)},
{ AOM_CDF4(13692, 27550, 31321)}, { AOM_CDF4(9418, 22514, 28439)},
{ AOM_CDF4(4999, 13283, 19462)}, { AOM_CDF4(8192, 16384, 24576)}
},
{
{ AOM_CDF4(5673, 14302, 19711)}, { AOM_CDF4(26251, 30701, 31834)},
{ AOM_CDF4(12782, 23783, 27803)}, { AOM_CDF4(9127, 20657, 25808)},
{ AOM_CDF4(6368, 16208, 21462)}, { AOM_CDF4(2465, 7177, 10822)},
{ AOM_CDF4(29961, 32563, 32719)}, { AOM_CDF4(18318, 29891, 31949)},
{ AOM_CDF4(11361, 24514, 29357)}, { AOM_CDF4(7900, 19603, 25607)},
{ AOM_CDF4(4002, 10590, 15546)}, { AOM_CDF4(29637, 32310, 32595)},
{ AOM_CDF4(18296, 29913, 31809)}, { AOM_CDF4(10144, 21515, 26871)},
{ AOM_CDF4(5358, 14322, 20394)}, { AOM_CDF4(3067, 8362, 13346)},
{ AOM_CDF4(28652, 32470, 32676)}, { AOM_CDF4(17538, 30771, 32209)},
{ AOM_CDF4(13924, 26882, 30494)}, { AOM_CDF4(10496, 22837, 27869)},
{ AOM_CDF4(7236, 16396, 21621)}, { AOM_CDF4(30743, 32687, 32746)},
{ AOM_CDF4(23006, 31676, 32489)}, { AOM_CDF4(14494, 27828, 31120)},
{ AOM_CDF4(10174, 22801, 28352)}, { AOM_CDF4(6242, 15281, 21043)},
{ AOM_CDF4(25817, 32243, 32720)}, { AOM_CDF4(18618, 31367, 32325)},
{ AOM_CDF4(13997, 28318, 31878)}, { AOM_CDF4(12255, 26534, 31383)},
{ AOM_CDF4(9561, 21588, 28450)}, { AOM_CDF4(28188, 32635, 32724)},
{ AOM_CDF4(22060, 32365, 32728)}, { AOM_CDF4(18102, 30690, 32528)},
{ AOM_CDF4(14196, 28864, 31999)}, { AOM_CDF4(12262, 25792, 30865)},
{ AOM_CDF4(24176, 32109, 32628)}, { AOM_CDF4(18280, 29681, 31963)},
{ AOM_CDF4(10205, 23703, 29664)}, { AOM_CDF4(7889, 20025, 27676)},
{ AOM_CDF4(6060, 16743, 23970)}, { AOM_CDF4(8192, 16384, 24576)}
}
},
{
{
{ AOM_CDF4(5141, 7096, 8260)}, { AOM_CDF4(27186, 29022, 29789)},
{ AOM_CDF4(6668, 12568, 15682)}, { AOM_CDF4(2172, 6181, 8638)},
{ AOM_CDF4(1126, 3379, 4531)}, { AOM_CDF4(443, 1361, 2254)},
{ AOM_CDF4(26083, 31153, 32436)}, { AOM_CDF4(13486, 24603, 28483)},
{ AOM_CDF4(6508, 14840, 19910)}, { AOM_CDF4(3386, 8800, 13286)},
{ AOM_CDF4(1530, 4322, 7054)}, { AOM_CDF4(29639, 32080, 32548)},
{ AOM_CDF4(15897, 27552, 30290)}, { AOM_CDF4(8588, 20047, 25383)},
{ AOM_CDF4(4889, 13339, 19269)}, { AOM_CDF4(2240, 6871, 10498)},
{ AOM_CDF4(28165, 32197, 32517)}, { AOM_CDF4(20735, 30427, 31568)},
{ AOM_CDF4(14325, 24671, 27692)}, { AOM_CDF4(5119, 12554, 17805)},
{ AOM_CDF4(1810, 5441, 8261)}, { AOM_CDF4(31212, 32724, 32748)},
{ AOM_CDF4(23352, 31766, 32545)}, { AOM_CDF4(14669, 27570, 31059)},
{ AOM_CDF4(8492, 20894, 27272)}, { AOM_CDF4(3644, 10194, 15204)},
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)}
},
{
{ AOM_CDF4(2461, 7013, 9371)}, { AOM_CDF4(24749, 29600, 30986)},
{ AOM_CDF4(9466, 19037, 22417)}, { AOM_CDF4(3584, 9280, 14400)},
{ AOM_CDF4(1505, 3929, 5433)}, { AOM_CDF4(677, 1500, 2736)},
{ AOM_CDF4(23987, 30702, 32117)}, { AOM_CDF4(13554, 24571, 29263)},
{ AOM_CDF4(6211, 14556, 21155)}, { AOM_CDF4(3135, 10972, 15625)},
{ AOM_CDF4(2435, 7127, 11427)}, { AOM_CDF4(31300, 32532, 32550)},
{ AOM_CDF4(14757, 30365, 31954)}, { AOM_CDF4(4405, 11612, 18553)},
{ AOM_CDF4(580, 4132, 7322)}, { AOM_CDF4(1695, 10169, 14124)},
{ AOM_CDF4(30008, 32282, 32591)}, { AOM_CDF4(19244, 30108, 31748)},
{ AOM_CDF4(11180, 24158, 29555)}, { AOM_CDF4(5650, 14972, 19209)},
{ AOM_CDF4(2114, 5109, 8456)}, { AOM_CDF4(31856, 32716, 32748)},
{ AOM_CDF4(23012, 31664, 32572)}, { AOM_CDF4(13694, 26656, 30636)},
{ AOM_CDF4(8142, 19508, 26093)}, { AOM_CDF4(4253, 10955, 16724)},
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)}
}
},
{
{
{ AOM_CDF4(601, 983, 1311)}, { AOM_CDF4(18725, 23406, 28087)},
{ AOM_CDF4(5461, 8192, 10923)}, { AOM_CDF4(3781, 15124, 21425)},
{ AOM_CDF4(2587, 7761, 12072)}, { AOM_CDF4(106, 458, 810)},
{ AOM_CDF4(22282, 29710, 31894)}, { AOM_CDF4(8508, 20926, 25984)},
{ AOM_CDF4(3726, 12713, 18083)}, { AOM_CDF4(1620, 7112, 10893)},
{ AOM_CDF4(729, 2236, 3495)}, { AOM_CDF4(30163, 32474, 32684)},
{ AOM_CDF4(18304, 30464, 32000)}, { AOM_CDF4(11443, 26526, 29647)},
{ AOM_CDF4(6007, 15292, 21299)}, { AOM_CDF4(2234, 6703, 8937)},
{ AOM_CDF4(30954, 32177, 32571)}, { AOM_CDF4(17363, 29562, 31076)},
{ AOM_CDF4(9686, 22464, 27410)}, { AOM_CDF4(8192, 16384, 21390)},
{ AOM_CDF4(1755, 8046, 11264)}, { AOM_CDF4(31168, 32734, 32748)},
{ AOM_CDF4(22486, 31441, 32471)}, { AOM_CDF4(12833, 25627, 29738)},
{ AOM_CDF4(6980, 17379, 23122)}, { AOM_CDF4(3111, 8887, 13479)},
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)}
},
{
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)}
}
}
},
{
{
{
{ AOM_CDF4(6041, 11854, 15927)}, { AOM_CDF4(20326, 30905, 32251)},
{ AOM_CDF4(14164, 26831, 30725)}, { AOM_CDF4(9760, 20647, 26585)},
{ AOM_CDF4(6416, 14953, 21219)}, { AOM_CDF4(2966, 7151, 10891)},
{ AOM_CDF4(23567, 31374, 32254)}, { AOM_CDF4(14978, 27416, 30946)},
{ AOM_CDF4(9434, 20225, 26254)}, { AOM_CDF4(6658, 14558, 20535)},
{ AOM_CDF4(3916, 8677, 12989)}, { AOM_CDF4(8192, 16384, 24576)},
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(18088, 29545, 31587)},
{ AOM_CDF4(13062, 25843, 30073)}, { AOM_CDF4(8940, 16827, 22251)},
{ AOM_CDF4(7654, 13220, 17973)}, { AOM_CDF4(5733, 10316, 14456)},
{ AOM_CDF4(22879, 31388, 32114)}, { AOM_CDF4(15215, 27993, 30955)},
{ AOM_CDF4(9397, 19445, 24978)}, { AOM_CDF4(3442, 9813, 15344)},
{ AOM_CDF4(1368, 3936, 6532)}, { AOM_CDF4(25494, 32033, 32406)},
{ AOM_CDF4(16772, 27963, 30718)}, { AOM_CDF4(9419, 18165, 23260)},
{ AOM_CDF4(2677, 7501, 11797)}, { AOM_CDF4(1516, 4344, 7170)},
{ AOM_CDF4(26556, 31454, 32101)}, { AOM_CDF4(17128, 27035, 30108)},
{ AOM_CDF4(8324, 15344, 20249)}, { AOM_CDF4(1903, 5696, 9469)},
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)}
},
{
{ AOM_CDF4(8455, 19003, 24368)}, { AOM_CDF4(23563, 32021, 32604)},
{ AOM_CDF4(16237, 29446, 31935)}, { AOM_CDF4(10724, 23999, 29358)},
{ AOM_CDF4(6725, 17528, 24416)}, { AOM_CDF4(3927, 10927, 16825)},
{ AOM_CDF4(26313, 32288, 32634)}, { AOM_CDF4(17430, 30095, 32095)},
{ AOM_CDF4(11116, 24606, 29679)}, { AOM_CDF4(7195, 18384, 25269)},
{ AOM_CDF4(4726, 12852, 19315)}, { AOM_CDF4(8192, 16384, 24576)},
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(22822, 31648, 32483)},
{ AOM_CDF4(16724, 29633, 31929)}, { AOM_CDF4(10261, 23033, 28725)},
{ AOM_CDF4(7029, 17840, 24528)}, { AOM_CDF4(4867, 13886, 21502)},
{ AOM_CDF4(25298, 31892, 32491)}, { AOM_CDF4(17809, 29330, 31512)},
{ AOM_CDF4(9668, 21329, 26579)}, { AOM_CDF4(4774, 12956, 18976)},
{ AOM_CDF4(2322, 7030, 11540)}, { AOM_CDF4(25472, 31920, 32543)},
{ AOM_CDF4(17957, 29387, 31632)}, { AOM_CDF4(9196, 20593, 26400)},
{ AOM_CDF4(4680, 12705, 19202)}, { AOM_CDF4(2917, 8456, 13436)},
{ AOM_CDF4(26471, 32059, 32574)}, { AOM_CDF4(18458, 29783, 31909)},
{ AOM_CDF4(8400, 19464, 25956)}, { AOM_CDF4(3812, 10973, 17206)},
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)}
}
},
{
{
{ AOM_CDF4(6779, 13743, 17678)}, { AOM_CDF4(24806, 31797, 32457)},
{ AOM_CDF4(17616, 29047, 31372)}, { AOM_CDF4(11063, 23175, 28003)},
{ AOM_CDF4(6521, 16110, 22324)}, { AOM_CDF4(2764, 7504, 11654)},
{ AOM_CDF4(25266, 32367, 32637)}, { AOM_CDF4(19054, 30553, 32175)},
{ AOM_CDF4(12139, 25212, 29807)}, { AOM_CDF4(7311, 18162, 24704)},
{ AOM_CDF4(3397, 9164, 14074)}, { AOM_CDF4(25988, 32208, 32522)},
{ AOM_CDF4(16253, 28912, 31526)}, { AOM_CDF4(9151, 21387, 27372)},
{ AOM_CDF4(5688, 14915, 21496)}, { AOM_CDF4(2717, 7627, 12004)},
{ AOM_CDF4(23144, 31855, 32443)}, { AOM_CDF4(16070, 28491, 31325)},
{ AOM_CDF4(8702, 20467, 26517)}, { AOM_CDF4(5243, 13956, 20367)},
{ AOM_CDF4(2621, 7335, 11567)}, { AOM_CDF4(26636, 32340, 32630)},
{ AOM_CDF4(19990, 31050, 32341)}, { AOM_CDF4(13243, 26105, 30315)},
{ AOM_CDF4(8588, 19521, 25918)}, { AOM_CDF4(4717, 11585, 17304)},
{ AOM_CDF4(25844, 32292, 32582)}, { AOM_CDF4(19090, 30635, 32097)},
{ AOM_CDF4(11963, 24546, 28939)}, { AOM_CDF4(6218, 16087, 22354)},
{ AOM_CDF4(2340, 6608, 10426)}, { AOM_CDF4(28046, 32576, 32694)},
{ AOM_CDF4(21178, 31313, 32296)}, { AOM_CDF4(13486, 26184, 29870)},
{ AOM_CDF4(7149, 17871, 23723)}, { AOM_CDF4(2833, 7958, 12259)},
{ AOM_CDF4(27710, 32528, 32686)}, { AOM_CDF4(20674, 31076, 32268)},
{ AOM_CDF4(12413, 24955, 29243)}, { AOM_CDF4(6676, 16927, 23097)},
{ AOM_CDF4(2966, 8333, 12919)}, { AOM_CDF4(8192, 16384, 24576)}
},
{
{ AOM_CDF4(8639, 19339, 24429)}, { AOM_CDF4(24404, 31837, 32525)},
{ AOM_CDF4(16997, 29425, 31784)}, { AOM_CDF4(11253, 24234, 29149)},
{ AOM_CDF4(6751, 17394, 24028)}, { AOM_CDF4(3490, 9830, 15191)},
{ AOM_CDF4(26283, 32471, 32714)}, { AOM_CDF4(19599, 31168, 32442)},
{ AOM_CDF4(13146, 26954, 30893)}, { AOM_CDF4(8214, 20588, 26890)},
{ AOM_CDF4(4699, 13081, 19300)}, { AOM_CDF4(28212, 32458, 32669)},
{ AOM_CDF4(18594, 30316, 32100)}, { AOM_CDF4(11219, 24408, 29234)},
{ AOM_CDF4(6865, 17656, 24149)}, { AOM_CDF4(3678, 10362, 16006)},
{ AOM_CDF4(25825, 32136, 32616)}, { AOM_CDF4(17313, 29853, 32021)},
{ AOM_CDF4(11197, 24471, 29472)}, { AOM_CDF4(6947, 17781, 24405)},
{ AOM_CDF4(3768, 10660, 16261)}, { AOM_CDF4(27352, 32500, 32706)},
{ AOM_CDF4(20850, 31468, 32469)}, { AOM_CDF4(14021, 27707, 31133)},
{ AOM_CDF4(8964, 21748, 27838)}, { AOM_CDF4(5437, 14665, 21187)},
{ AOM_CDF4(26304, 32492, 32698)}, { AOM_CDF4(20409, 31380, 32385)},
{ AOM_CDF4(13682, 27222, 30632)}, { AOM_CDF4(8974, 21236, 26685)},
{ AOM_CDF4(4234, 11665, 16934)}, { AOM_CDF4(26273, 32357, 32711)},
{ AOM_CDF4(20672, 31242, 32441)}, { AOM_CDF4(14172, 27254, 30902)},
{ AOM_CDF4(9870, 21898, 27275)}, { AOM_CDF4(5164, 13506, 19270)},
{ AOM_CDF4(26725, 32459, 32728)}, { AOM_CDF4(20991, 31442, 32527)},
{ AOM_CDF4(13071, 26434, 30811)}, { AOM_CDF4(8184, 20090, 26742)},
{ AOM_CDF4(4803, 13255, 19895)}, { AOM_CDF4(8192, 16384, 24576)}
}
},
{
{
{ AOM_CDF4(7555, 14942, 18501)}, { AOM_CDF4(24410, 31178, 32287)},
{ AOM_CDF4(14394, 26738, 30253)}, { AOM_CDF4(8413, 19554, 25195)},
{ AOM_CDF4(4766, 12924, 18785)}, { AOM_CDF4(2029, 5806, 9207)},
{ AOM_CDF4(26776, 32364, 32663)}, { AOM_CDF4(18732, 29967, 31931)},
{ AOM_CDF4(11005, 23786, 28852)}, { AOM_CDF4(6466, 16909, 23510)},
{ AOM_CDF4(3044, 8638, 13419)}, { AOM_CDF4(29208, 32582, 32704)},
{ AOM_CDF4(20068, 30857, 32208)}, { AOM_CDF4(12003, 25085, 29595)},
{ AOM_CDF4(6947, 17750, 24189)}, { AOM_CDF4(3245, 9103, 14007)},
{ AOM_CDF4(27359, 32465, 32669)}, { AOM_CDF4(19421, 30614, 32174)},
{ AOM_CDF4(11915, 25010, 29579)}, { AOM_CDF4(6950, 17676, 24074)},
{ AOM_CDF4(3007, 8473, 13096)}, { AOM_CDF4(29002, 32676, 32735)},
{ AOM_CDF4(22102, 31849, 32576)}, { AOM_CDF4(14408, 28009, 31405)},
{ AOM_CDF4(9027, 21679, 27931)}, { AOM_CDF4(4694, 12678, 18748)},
{ AOM_CDF4(28216, 32528, 32682)}, { AOM_CDF4(20849, 31264, 32318)},
{ AOM_CDF4(12756, 25815, 29751)}, { AOM_CDF4(7565, 18801, 24923)},
{ AOM_CDF4(3509, 9533, 14477)}, { AOM_CDF4(30133, 32687, 32739)},
{ AOM_CDF4(23063, 31910, 32515)}, { AOM_CDF4(14588, 28051, 31132)},
{ AOM_CDF4(9085, 21649, 27457)}, { AOM_CDF4(4261, 11654, 17264)},
{ AOM_CDF4(29518, 32691, 32748)}, { AOM_CDF4(22451, 31959, 32613)},
{ AOM_CDF4(14864, 28722, 31700)}, { AOM_CDF4(9695, 22964, 28716)},
{ AOM_CDF4(4932, 13358, 19502)}, { AOM_CDF4(8192, 16384, 24576)}
},
{
{ AOM_CDF4(6465, 16958, 21688)}, { AOM_CDF4(25199, 31514, 32360)},
{ AOM_CDF4(14774, 27149, 30607)}, { AOM_CDF4(9257, 21438, 26972)},
{ AOM_CDF4(5723, 15183, 21882)}, { AOM_CDF4(3150, 8879, 13731)},
{ AOM_CDF4(26989, 32262, 32682)}, { AOM_CDF4(17396, 29937, 32085)},
{ AOM_CDF4(11387, 24901, 29784)}, { AOM_CDF4(7289, 18821, 25548)},
{ AOM_CDF4(3734, 10577, 16086)}, { AOM_CDF4(29728, 32501, 32695)},
{ AOM_CDF4(17431, 29701, 31903)}, { AOM_CDF4(9921, 22826, 28300)},
{ AOM_CDF4(5896, 15434, 22068)}, { AOM_CDF4(3430, 9646, 14757)},
{ AOM_CDF4(28614, 32511, 32705)}, { AOM_CDF4(19364, 30638, 32263)},
{ AOM_CDF4(13129, 26254, 30402)}, { AOM_CDF4(8754, 20484, 26440)},
{ AOM_CDF4(4378, 11607, 17110)}, { AOM_CDF4(30292, 32671, 32744)},
{ AOM_CDF4(21780, 31603, 32501)}, { AOM_CDF4(14314, 27829, 31291)},
{ AOM_CDF4(9611, 22327, 28263)}, { AOM_CDF4(4890, 13087, 19065)},
{ AOM_CDF4(25862, 32567, 32733)}, { AOM_CDF4(20794, 32050, 32567)},
{ AOM_CDF4(17243, 30625, 32254)}, { AOM_CDF4(13283, 27628, 31474)},
{ AOM_CDF4(9669, 22532, 28918)}, { AOM_CDF4(27435, 32697, 32748)},
{ AOM_CDF4(24922, 32390, 32714)}, { AOM_CDF4(21449, 31504, 32536)},
{ AOM_CDF4(16392, 29729, 31832)}, { AOM_CDF4(11692, 24884, 29076)},
{ AOM_CDF4(24193, 32290, 32735)}, { AOM_CDF4(18909, 31104, 32563)},
{ AOM_CDF4(12236, 26841, 31403)}, { AOM_CDF4(8171, 21840, 29082)},
{ AOM_CDF4(7224, 17280, 25275)}, { AOM_CDF4(8192, 16384, 24576)}
}
},
{
{
{ AOM_CDF4(3078, 6839, 9890)}, { AOM_CDF4(13837, 20450, 24479)},
{ AOM_CDF4(5914, 14222, 19328)}, { AOM_CDF4(3866, 10267, 14762)},
{ AOM_CDF4(2612, 7208, 11042)}, { AOM_CDF4(1067, 2991, 4776)},
{ AOM_CDF4(25817, 31646, 32529)}, { AOM_CDF4(13708, 26338, 30385)},
{ AOM_CDF4(7328, 18585, 24870)}, { AOM_CDF4(4691, 13080, 19276)},
{ AOM_CDF4(1825, 5253, 8352)}, { AOM_CDF4(29386, 32315, 32624)},
{ AOM_CDF4(17160, 29001, 31360)}, { AOM_CDF4(9602, 21862, 27396)},
{ AOM_CDF4(5915, 15772, 22148)}, { AOM_CDF4(2786, 7779, 12047)},
{ AOM_CDF4(29246, 32450, 32663)}, { AOM_CDF4(18696, 29929, 31818)},
{ AOM_CDF4(10510, 23369, 28560)}, { AOM_CDF4(6229, 16499, 23125)},
{ AOM_CDF4(2608, 7448, 11705)}, { AOM_CDF4(30753, 32710, 32748)},
{ AOM_CDF4(21638, 31487, 32503)}, { AOM_CDF4(12937, 26854, 30870)},
{ AOM_CDF4(8182, 20596, 26970)}, { AOM_CDF4(3637, 10269, 15497)},
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)}
},
{
{ AOM_CDF4(5244, 12150, 16906)}, { AOM_CDF4(20486, 26858, 29701)},
{ AOM_CDF4(7756, 18317, 23735)}, { AOM_CDF4(3452, 9256, 13146)},
{ AOM_CDF4(2020, 5206, 8229)}, { AOM_CDF4(1801, 4993, 7903)},
{ AOM_CDF4(27051, 31858, 32531)}, { AOM_CDF4(15988, 27531, 30619)},
{ AOM_CDF4(9188, 21484, 26719)}, { AOM_CDF4(6273, 17186, 23800)},
{ AOM_CDF4(3108, 9355, 14764)}, { AOM_CDF4(31076, 32520, 32680)},
{ AOM_CDF4(18119, 30037, 31850)}, { AOM_CDF4(10244, 22969, 27472)},
{ AOM_CDF4(4692, 14077, 19273)}, { AOM_CDF4(3694, 11677, 17556)},
{ AOM_CDF4(30060, 32581, 32720)}, { AOM_CDF4(21011, 30775, 32120)},
{ AOM_CDF4(11931, 24820, 29289)}, { AOM_CDF4(7119, 17662, 24356)},
{ AOM_CDF4(3833, 10706, 16304)}, { AOM_CDF4(31954, 32731, 32748)},
{ AOM_CDF4(23913, 31724, 32489)}, { AOM_CDF4(15520, 28060, 31286)},
{ AOM_CDF4(11517, 23008, 28571)}, { AOM_CDF4(6193, 14508, 20629)},
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)}
}
},
{
{
{ AOM_CDF4(1035, 2807, 4156)}, { AOM_CDF4(13162, 18138, 20939)},
{ AOM_CDF4(2696, 6633, 8755)}, { AOM_CDF4(1373, 4161, 6853)},
{ AOM_CDF4(1099, 2746, 4716)}, { AOM_CDF4(340, 1021, 1599)},
{ AOM_CDF4(22826, 30419, 32135)}, { AOM_CDF4(10395, 21762, 26942)},
{ AOM_CDF4(4726, 12407, 17361)}, { AOM_CDF4(2447, 7080, 10593)},
{ AOM_CDF4(1227, 3717, 6011)}, { AOM_CDF4(28156, 31424, 31934)},
{ AOM_CDF4(16915, 27754, 30373)}, { AOM_CDF4(9148, 20990, 26431)},
{ AOM_CDF4(5950, 15515, 21148)}, { AOM_CDF4(2492, 7327, 11526)},
{ AOM_CDF4(30602, 32477, 32670)}, { AOM_CDF4(20026, 29955, 31568)},
{ AOM_CDF4(11220, 23628, 28105)}, { AOM_CDF4(6652, 17019, 22973)},
{ AOM_CDF4(3064, 8536, 13043)}, { AOM_CDF4(31769, 32724, 32748)},
{ AOM_CDF4(22230, 30887, 32373)}, { AOM_CDF4(12234, 25079, 29731)},
{ AOM_CDF4(7326, 18816, 25353)}, { AOM_CDF4(3933, 10907, 16616)},
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)}
},
{
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)}
}
}
},
{
{
{
{ AOM_CDF4(8896, 16227, 20630)}, { AOM_CDF4(23629, 31782, 32527)},
{ AOM_CDF4(15173, 27755, 31321)}, { AOM_CDF4(10158, 21233, 27382)},
{ AOM_CDF4(6420, 14857, 21558)}, { AOM_CDF4(3269, 8155, 12646)},
{ AOM_CDF4(24835, 32009, 32496)}, { AOM_CDF4(16509, 28421, 31579)},
{ AOM_CDF4(10957, 21514, 27418)}, { AOM_CDF4(7881, 15930, 22096)},
{ AOM_CDF4(5388, 10960, 15918)}, { AOM_CDF4(8192, 16384, 24576)},
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(20745, 30773, 32093)},
{ AOM_CDF4(15200, 27221, 30861)}, { AOM_CDF4(13032, 20873, 25667)},
{ AOM_CDF4(12285, 18663, 23494)}, { AOM_CDF4(11563, 17481, 21489)},
{ AOM_CDF4(26260, 31982, 32320)}, { AOM_CDF4(15397, 28083, 31100)},
{ AOM_CDF4(9742, 19217, 24824)}, { AOM_CDF4(3261, 9629, 15362)},
{ AOM_CDF4(1480, 4322, 7499)}, { AOM_CDF4(27599, 32256, 32460)},
{ AOM_CDF4(16857, 27659, 30774)}, { AOM_CDF4(9551, 18290, 23748)},
{ AOM_CDF4(3052, 8933, 14103)}, { AOM_CDF4(2021, 5910, 9787)},
{ AOM_CDF4(29005, 32015, 32392)}, { AOM_CDF4(17677, 27694, 30863)},
{ AOM_CDF4(9204, 17356, 23219)}, { AOM_CDF4(2403, 7516, 12814)},
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)}
},
{
{ AOM_CDF4(10808, 22056, 26896)}, { AOM_CDF4(25739, 32313, 32676)},
{ AOM_CDF4(17288, 30203, 32221)}, { AOM_CDF4(11359, 24878, 29896)},
{ AOM_CDF4(6949, 17767, 24893)}, { AOM_CDF4(4287, 11796, 18071)},
{ AOM_CDF4(27880, 32521, 32705)}, { AOM_CDF4(19038, 31004, 32414)},
{ AOM_CDF4(12564, 26345, 30768)}, { AOM_CDF4(8269, 19947, 26779)},
{ AOM_CDF4(5674, 14657, 21674)}, { AOM_CDF4(8192, 16384, 24576)},
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(25742, 32319, 32671)},
{ AOM_CDF4(19557, 31164, 32454)}, { AOM_CDF4(13381, 26381, 30755)},
{ AOM_CDF4(10101, 21466, 26722)}, { AOM_CDF4(9209, 19650, 26825)},
{ AOM_CDF4(27107, 31917, 32432)}, { AOM_CDF4(18056, 28893, 31203)},
{ AOM_CDF4(10200, 21434, 26764)}, { AOM_CDF4(4660, 12913, 19502)},
{ AOM_CDF4(2368, 6930, 12504)}, { AOM_CDF4(26960, 32158, 32613)},
{ AOM_CDF4(18628, 30005, 32031)}, { AOM_CDF4(10233, 22442, 28232)},
{ AOM_CDF4(5471, 14630, 21516)}, { AOM_CDF4(3235, 10767, 17109)},
{ AOM_CDF4(27696, 32440, 32692)}, { AOM_CDF4(20032, 31167, 32438)},
{ AOM_CDF4(8700, 21341, 28442)}, { AOM_CDF4(5662, 14831, 21795)},
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)}
}
},
{
{
{ AOM_CDF4(9704, 17294, 21132)}, { AOM_CDF4(26762, 32278, 32633)},
{ AOM_CDF4(18382, 29620, 31819)}, { AOM_CDF4(10891, 23475, 28723)},
{ AOM_CDF4(6358, 16583, 23309)}, { AOM_CDF4(3248, 9118, 14141)},
{ AOM_CDF4(27204, 32573, 32699)}, { AOM_CDF4(19818, 30824, 32329)},
{ AOM_CDF4(11772, 25120, 30041)}, { AOM_CDF4(6995, 18033, 25039)},
{ AOM_CDF4(3752, 10442, 16098)}, { AOM_CDF4(27222, 32256, 32559)},
{ AOM_CDF4(15356, 28399, 31475)}, { AOM_CDF4(8821, 20635, 27057)},
{ AOM_CDF4(5511, 14404, 21239)}, { AOM_CDF4(2935, 8222, 13051)},
{ AOM_CDF4(24875, 32120, 32529)}, { AOM_CDF4(15233, 28265, 31445)},
{ AOM_CDF4(8605, 20570, 26932)}, { AOM_CDF4(5431, 14413, 21196)},
{ AOM_CDF4(2994, 8341, 13223)}, { AOM_CDF4(28201, 32604, 32700)},
{ AOM_CDF4(21041, 31446, 32456)}, { AOM_CDF4(13221, 26213, 30475)},
{ AOM_CDF4(8255, 19385, 26037)}, { AOM_CDF4(4930, 12585, 18830)},
{ AOM_CDF4(28768, 32448, 32627)}, { AOM_CDF4(19705, 30561, 32021)},
{ AOM_CDF4(11572, 23589, 28220)}, { AOM_CDF4(5532, 15034, 21446)},
{ AOM_CDF4(2460, 7150, 11456)}, { AOM_CDF4(29874, 32619, 32699)},
{ AOM_CDF4(21621, 31071, 32201)}, { AOM_CDF4(12511, 24747, 28992)},
{ AOM_CDF4(6281, 16395, 22748)}, { AOM_CDF4(3246, 9278, 14497)},
{ AOM_CDF4(29715, 32625, 32712)}, { AOM_CDF4(20958, 31011, 32283)},
{ AOM_CDF4(11233, 23671, 28806)}, { AOM_CDF4(6012, 16128, 22868)},
{ AOM_CDF4(3427, 9851, 15414)}, { AOM_CDF4(8192, 16384, 24576)}
},
{
{ AOM_CDF4(11016, 22111, 26794)}, { AOM_CDF4(25946, 32357, 32677)},
{ AOM_CDF4(17890, 30452, 32252)}, { AOM_CDF4(11678, 25142, 29816)},
{ AOM_CDF4(6720, 17534, 24584)}, { AOM_CDF4(4230, 11665, 17820)},
{ AOM_CDF4(28400, 32623, 32747)}, { AOM_CDF4(21164, 31668, 32575)},
{ AOM_CDF4(13572, 27388, 31182)}, { AOM_CDF4(8234, 20750, 27358)},
{ AOM_CDF4(5065, 14055, 20897)}, { AOM_CDF4(28981, 32547, 32705)},
{ AOM_CDF4(18681, 30543, 32239)}, { AOM_CDF4(10919, 24075, 29286)},
{ AOM_CDF4(6431, 17199, 24077)}, { AOM_CDF4(3819, 10464, 16618)},
{ AOM_CDF4(26870, 32467, 32693)}, { AOM_CDF4(19041, 30831, 32347)},
{ AOM_CDF4(11794, 25211, 30016)}, { AOM_CDF4(6888, 18019, 24970)},
{ AOM_CDF4(4370, 12363, 18992)}, { AOM_CDF4(29578, 32670, 32744)},
{ AOM_CDF4(23159, 32007, 32613)}, { AOM_CDF4(15315, 28669, 31676)},
{ AOM_CDF4(9298, 22607, 28782)}, { AOM_CDF4(6144, 15913, 22968)},
{ AOM_CDF4(28110, 32499, 32669)}, { AOM_CDF4(21574, 30937, 32015)},
{ AOM_CDF4(12759, 24818, 28727)}, { AOM_CDF4(6545, 16761, 23042)},
{ AOM_CDF4(3649, 10597, 16833)}, { AOM_CDF4(28163, 32552, 32728)},
{ AOM_CDF4(22101, 31469, 32464)}, { AOM_CDF4(13160, 25472, 30143)},
{ AOM_CDF4(7303, 18684, 25468)}, { AOM_CDF4(5241, 13975, 20955)},
{ AOM_CDF4(28400, 32631, 32744)}, { AOM_CDF4(22104, 31793, 32603)},
{ AOM_CDF4(13557, 26571, 30846)}, { AOM_CDF4(7749, 19861, 26675)},
{ AOM_CDF4(4873, 14030, 21234)}, { AOM_CDF4(8192, 16384, 24576)}
}
},
{
{
{ AOM_CDF4(9800, 17635, 21073)}, { AOM_CDF4(26153, 31885, 32527)},
{ AOM_CDF4(15038, 27852, 31006)}, { AOM_CDF4(8718, 20564, 26486)},
{ AOM_CDF4(5128, 14076, 20514)}, { AOM_CDF4(2636, 7566, 11925)},
{ AOM_CDF4(27551, 32504, 32701)}, { AOM_CDF4(18310, 30054, 32100)},
{ AOM_CDF4(10211, 23420, 29082)}, { AOM_CDF4(6222, 16876, 23916)},
{ AOM_CDF4(3462, 9954, 15498)}, { AOM_CDF4(29991, 32633, 32721)},
{ AOM_CDF4(19883, 30751, 32201)}, { AOM_CDF4(11141, 24184, 29285)},
{ AOM_CDF4(6420, 16940, 23774)}, { AOM_CDF4(3392, 9753, 15118)},
{ AOM_CDF4(28465, 32616, 32712)}, { AOM_CDF4(19850, 30702, 32244)},
{ AOM_CDF4(10983, 24024, 29223)}, { AOM_CDF4(6294, 16770, 23582)},
{ AOM_CDF4(3244, 9283, 14509)}, { AOM_CDF4(30023, 32717, 32748)},
{ AOM_CDF4(22940, 32032, 32626)}, { AOM_CDF4(14282, 27928, 31473)},
{ AOM_CDF4(8562, 21327, 27914)}, { AOM_CDF4(4846, 13393, 19919)},
{ AOM_CDF4(29981, 32590, 32695)}, { AOM_CDF4(20465, 30963, 32166)},
{ AOM_CDF4(11479, 23579, 28195)}, { AOM_CDF4(5916, 15648, 22073)},
{ AOM_CDF4(3031, 8605, 13398)}, { AOM_CDF4(31146, 32691, 32739)},
{ AOM_CDF4(23106, 31724, 32444)}, { AOM_CDF4(13783, 26738, 30439)},
{ AOM_CDF4(7852, 19468, 25807)}, { AOM_CDF4(3860, 11124, 16853)},
{ AOM_CDF4(31014, 32724, 32748)}, { AOM_CDF4(23629, 32109, 32628)},
{ AOM_CDF4(14747, 28115, 31403)}, { AOM_CDF4(8545, 21242, 27478)},
{ AOM_CDF4(4574, 12781, 19067)}, { AOM_CDF4(8192, 16384, 24576)}
},
{
{ AOM_CDF4(9185, 19694, 24688)}, { AOM_CDF4(26081, 31985, 32621)},
{ AOM_CDF4(16015, 29000, 31787)}, { AOM_CDF4(10542, 23690, 29206)},
{ AOM_CDF4(6732, 17945, 24677)}, { AOM_CDF4(3916, 11039, 16722)},
{ AOM_CDF4(28224, 32566, 32744)}, { AOM_CDF4(19100, 31138, 32485)},
{ AOM_CDF4(12528, 26620, 30879)}, { AOM_CDF4(7741, 20277, 26885)},
{ AOM_CDF4(4566, 12845, 18990)}, { AOM_CDF4(29933, 32593, 32718)},
{ AOM_CDF4(17670, 30333, 32155)}, { AOM_CDF4(10385, 23600, 28909)},
{ AOM_CDF4(6243, 16236, 22407)}, { AOM_CDF4(3976, 10389, 16017)},
{ AOM_CDF4(28377, 32561, 32738)}, { AOM_CDF4(19366, 31175, 32482)},
{ AOM_CDF4(13327, 27175, 31094)}, { AOM_CDF4(8258, 20769, 27143)},
{ AOM_CDF4(4703, 13198, 19527)}, { AOM_CDF4(31086, 32706, 32748)},
{ AOM_CDF4(22853, 31902, 32583)}, { AOM_CDF4(14759, 28186, 31419)},
{ AOM_CDF4(9284, 22382, 28348)}, { AOM_CDF4(5585, 15192, 21868)},
{ AOM_CDF4(28291, 32652, 32746)}, { AOM_CDF4(19849, 32107, 32571)},
{ AOM_CDF4(14834, 26818, 29214)}, { AOM_CDF4(10306, 22594, 28672)},
{ AOM_CDF4(6615, 17384, 23384)}, { AOM_CDF4(28947, 32604, 32745)},
{ AOM_CDF4(25625, 32289, 32646)}, { AOM_CDF4(18758, 28672, 31403)},
{ AOM_CDF4(10017, 23430, 28523)}, { AOM_CDF4(6862, 15269, 22131)},
{ AOM_CDF4(23933, 32509, 32739)}, { AOM_CDF4(19927, 31495, 32631)},
{ AOM_CDF4(11903, 26023, 30621)}, { AOM_CDF4(7026, 20094, 27252)},
{ AOM_CDF4(5998, 18106, 24437)}, { AOM_CDF4(8192, 16384, 24576)}
}
},
{
{
{ AOM_CDF4(4456, 11274, 15533)}, { AOM_CDF4(21219, 29079, 31616)},
{ AOM_CDF4(11173, 23774, 28567)}, { AOM_CDF4(7282, 18293, 24263)},
{ AOM_CDF4(4890, 13286, 19115)}, { AOM_CDF4(1890, 5508, 8659)},
{ AOM_CDF4(26651, 32136, 32647)}, { AOM_CDF4(14630, 28254, 31455)},
{ AOM_CDF4(8716, 21287, 27395)}, { AOM_CDF4(5615, 15331, 22008)},
{ AOM_CDF4(2675, 7700, 12150)}, { AOM_CDF4(29954, 32526, 32690)},
{ AOM_CDF4(16126, 28982, 31633)}, { AOM_CDF4(9030, 21361, 27352)},
{ AOM_CDF4(5411, 14793, 21271)}, { AOM_CDF4(2943, 8422, 13163)},
{ AOM_CDF4(29539, 32601, 32730)}, { AOM_CDF4(18125, 30385, 32201)},
{ AOM_CDF4(10422, 24090, 29468)}, { AOM_CDF4(6468, 17487, 24438)},
{ AOM_CDF4(2970, 8653, 13531)}, { AOM_CDF4(30912, 32715, 32748)},
{ AOM_CDF4(20666, 31373, 32497)}, { AOM_CDF4(12509, 26640, 30917)},
{ AOM_CDF4(8058, 20629, 27290)}, { AOM_CDF4(4231, 12006, 18052)},
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)}
},
{
{ AOM_CDF4(10202, 20633, 25484)}, { AOM_CDF4(27336, 31445, 32352)},
{ AOM_CDF4(12420, 24384, 28552)}, { AOM_CDF4(7648, 18115, 23856)},
{ AOM_CDF4(5662, 14341, 19902)}, { AOM_CDF4(3611, 10328, 15390)},
{ AOM_CDF4(30945, 32616, 32736)}, { AOM_CDF4(18682, 30505, 32253)},
{ AOM_CDF4(11513, 25336, 30203)}, { AOM_CDF4(7449, 19452, 26148)},
{ AOM_CDF4(4482, 13051, 18886)}, { AOM_CDF4(32022, 32690, 32747)},
{ AOM_CDF4(18578, 30501, 32146)}, { AOM_CDF4(11249, 23368, 28631)},
{ AOM_CDF4(5645, 16958, 22158)}, { AOM_CDF4(5009, 11444, 16637)},
{ AOM_CDF4(31357, 32710, 32748)}, { AOM_CDF4(21552, 31494, 32504)},
{ AOM_CDF4(13891, 27677, 31340)}, { AOM_CDF4(9051, 22098, 28172)},
{ AOM_CDF4(5190, 13377, 19486)}, { AOM_CDF4(32364, 32740, 32748)},
{ AOM_CDF4(24839, 31907, 32551)}, { AOM_CDF4(17160, 28779, 31696)},
{ AOM_CDF4(12452, 24137, 29602)}, { AOM_CDF4(6165, 15389, 22477)},
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)}
}
},
{
{
{ AOM_CDF4(2575, 7281, 11077)}, { AOM_CDF4(14002, 20866, 25402)},
{ AOM_CDF4(6343, 15056, 19658)}, { AOM_CDF4(4474, 11858, 17041)},
{ AOM_CDF4(2865, 8299, 12534)}, { AOM_CDF4(1344, 3949, 6391)},
{ AOM_CDF4(24720, 31239, 32459)}, { AOM_CDF4(12585, 25356, 29968)},
{ AOM_CDF4(7181, 18246, 24444)}, { AOM_CDF4(5025, 13667, 19885)},
{ AOM_CDF4(2521, 7304, 11605)}, { AOM_CDF4(29908, 32252, 32584)},
{ AOM_CDF4(17421, 29156, 31575)}, { AOM_CDF4(9889, 22188, 27782)},
{ AOM_CDF4(5878, 15647, 22123)}, { AOM_CDF4(2814, 8665, 13323)},
{ AOM_CDF4(30183, 32568, 32713)}, { AOM_CDF4(18528, 30195, 32049)},
{ AOM_CDF4(10982, 24606, 29657)}, { AOM_CDF4(6957, 18165, 25231)},
{ AOM_CDF4(3508, 10118, 15468)}, { AOM_CDF4(31761, 32736, 32748)},
{ AOM_CDF4(21041, 31328, 32546)}, { AOM_CDF4(12568, 26732, 31166)},
{ AOM_CDF4(8052, 20720, 27733)}, { AOM_CDF4(4336, 12192, 18396)},
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)}
},
{
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)}
}
}
},
{
{
{
{ AOM_CDF4(7062, 16472, 22319)}, { AOM_CDF4(24538, 32261, 32674)},
{ AOM_CDF4(13675, 28041, 31779)}, { AOM_CDF4(8590, 20674, 27631)},
{ AOM_CDF4(5685, 14675, 22013)}, { AOM_CDF4(3655, 9898, 15731)},
{ AOM_CDF4(26493, 32418, 32658)}, { AOM_CDF4(16376, 29342, 32090)},
{ AOM_CDF4(10594, 22649, 28970)}, { AOM_CDF4(8176, 17170, 24303)},
{ AOM_CDF4(5605, 12694, 19139)}, { AOM_CDF4(8192, 16384, 24576)},
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(23888, 31902, 32542)},
{ AOM_CDF4(18612, 29687, 31987)}, { AOM_CDF4(16245, 24852, 29249)},
{ AOM_CDF4(15765, 22608, 27559)}, { AOM_CDF4(19895, 24699, 27510)},
{ AOM_CDF4(28401, 32212, 32457)}, { AOM_CDF4(15274, 27825, 30980)},
{ AOM_CDF4(9364, 18128, 24332)}, { AOM_CDF4(2283, 8193, 15082)},
{ AOM_CDF4(1228, 3972, 7881)}, { AOM_CDF4(29455, 32469, 32620)},
{ AOM_CDF4(17981, 28245, 31388)}, { AOM_CDF4(10921, 20098, 26240)},
{ AOM_CDF4(3743, 11829, 18657)}, { AOM_CDF4(2374, 9593, 15715)},
{ AOM_CDF4(31068, 32466, 32635)}, { AOM_CDF4(20321, 29572, 31971)},
{ AOM_CDF4(10771, 20255, 27119)}, { AOM_CDF4(2795, 10410, 17361)},
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)}
},
{
{ AOM_CDF4(9320, 22102, 27840)}, { AOM_CDF4(27057, 32464, 32724)},
{ AOM_CDF4(16331, 30268, 32309)}, { AOM_CDF4(10319, 23935, 29720)},
{ AOM_CDF4(6189, 16448, 24106)}, { AOM_CDF4(3589, 10884, 18808)},
{ AOM_CDF4(29026, 32624, 32748)}, { AOM_CDF4(19226, 31507, 32587)},
{ AOM_CDF4(12692, 26921, 31203)}, { AOM_CDF4(7049, 19532, 27635)},
{ AOM_CDF4(7727, 15669, 23252)}, { AOM_CDF4(8192, 16384, 24576)},
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(28056, 32625, 32748)},
{ AOM_CDF4(22383, 32075, 32669)}, { AOM_CDF4(15417, 27098, 31749)},
{ AOM_CDF4(18127, 26493, 27190)}, { AOM_CDF4(5461, 16384, 21845)},
{ AOM_CDF4(27982, 32091, 32584)}, { AOM_CDF4(19045, 29868, 31972)},
{ AOM_CDF4(10397, 22266, 27932)}, { AOM_CDF4(5990, 13697, 21500)},
{ AOM_CDF4(1792, 6912, 15104)}, { AOM_CDF4(28198, 32501, 32718)},
{ AOM_CDF4(21534, 31521, 32569)}, { AOM_CDF4(11109, 25217, 30017)},
{ AOM_CDF4(5671, 15124, 26151)}, { AOM_CDF4(4681, 14043, 18725)},
{ AOM_CDF4(28688, 32580, 32741)}, { AOM_CDF4(22576, 32079, 32661)},
{ AOM_CDF4(10627, 22141, 28340)}, { AOM_CDF4(9362, 14043, 28087)},
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)}
}
},
{
{
{ AOM_CDF4(7754, 16948, 22142)}, { AOM_CDF4(25670, 32330, 32691)},
{ AOM_CDF4(15663, 29225, 31994)}, { AOM_CDF4(9878, 23288, 29158)},
{ AOM_CDF4(6419, 17088, 24336)}, { AOM_CDF4(3859, 11003, 17039)},
{ AOM_CDF4(27562, 32595, 32725)}, { AOM_CDF4(17575, 30588, 32399)},
{ AOM_CDF4(10819, 24838, 30309)}, { AOM_CDF4(7124, 18686, 25916)},
{ AOM_CDF4(4479, 12688, 19340)}, { AOM_CDF4(28385, 32476, 32673)},
{ AOM_CDF4(15306, 29005, 31938)}, { AOM_CDF4(8937, 21615, 28322)},
{ AOM_CDF4(5982, 15603, 22786)}, { AOM_CDF4(3620, 10267, 16136)},
{ AOM_CDF4(27280, 32464, 32667)}, { AOM_CDF4(15607, 29160, 32004)},
{ AOM_CDF4(9091, 22135, 28740)}, { AOM_CDF4(6232, 16632, 24020)},
{ AOM_CDF4(4047, 11377, 17672)}, { AOM_CDF4(29220, 32630, 32718)},
{ AOM_CDF4(19650, 31220, 32462)}, { AOM_CDF4(13050, 26312, 30827)},
{ AOM_CDF4(9228, 20870, 27468)}, { AOM_CDF4(6146, 15149, 21971)},
{ AOM_CDF4(30169, 32481, 32623)}, { AOM_CDF4(17212, 29311, 31554)},
{ AOM_CDF4(9911, 21311, 26882)}, { AOM_CDF4(4487, 13314, 20372)},
{ AOM_CDF4(2570, 7772, 12889)}, { AOM_CDF4(30924, 32613, 32708)},
{ AOM_CDF4(19490, 30206, 32107)}, { AOM_CDF4(11232, 23998, 29276)},
{ AOM_CDF4(6769, 17955, 25035)}, { AOM_CDF4(4398, 12623, 19214)},
{ AOM_CDF4(30609, 32627, 32722)}, { AOM_CDF4(19370, 30582, 32287)},
{ AOM_CDF4(10457, 23619, 29409)}, { AOM_CDF4(6443, 17637, 24834)},
{ AOM_CDF4(4645, 13236, 20106)}, { AOM_CDF4(8192, 16384, 24576)}
},
{
{ AOM_CDF4(8626, 20271, 26216)}, { AOM_CDF4(26707, 32406, 32711)},
{ AOM_CDF4(16999, 30329, 32286)}, { AOM_CDF4(11445, 25123, 30286)},
{ AOM_CDF4(6411, 18828, 25601)}, { AOM_CDF4(6801, 12458, 20248)},
{ AOM_CDF4(29918, 32682, 32748)}, { AOM_CDF4(20649, 31739, 32618)},
{ AOM_CDF4(12879, 27773, 31581)}, { AOM_CDF4(7896, 21751, 28244)},
{ AOM_CDF4(5260, 14870, 23698)}, { AOM_CDF4(29252, 32593, 32731)},
{ AOM_CDF4(17072, 30460, 32294)}, { AOM_CDF4(10653, 24143, 29365)},
{ AOM_CDF4(6536, 17490, 23983)}, { AOM_CDF4(4929, 13170, 20085)},
{ AOM_CDF4(28137, 32518, 32715)}, { AOM_CDF4(18171, 30784, 32407)},
{ AOM_CDF4(11437, 25436, 30459)}, { AOM_CDF4(7252, 18534, 26176)},
{ AOM_CDF4(4126, 13353, 20978)}, { AOM_CDF4(31162, 32726, 32748)},
{ AOM_CDF4(23017, 32222, 32701)}, { AOM_CDF4(15629, 29233, 32046)},
{ AOM_CDF4(9387, 22621, 29480)}, { AOM_CDF4(6922, 17616, 25010)},
{ AOM_CDF4(28838, 32265, 32614)}, { AOM_CDF4(19701, 30206, 31920)},
{ AOM_CDF4(11214, 22410, 27933)}, { AOM_CDF4(5320, 14177, 23034)},
{ AOM_CDF4(5049, 12881, 17827)}, { AOM_CDF4(27484, 32471, 32734)},
{ AOM_CDF4(21076, 31526, 32561)}, { AOM_CDF4(12707, 26303, 31211)},
{ AOM_CDF4(8169, 21722, 28219)}, { AOM_CDF4(6045, 19406, 27042)},
{ AOM_CDF4(27753, 32572, 32745)}, { AOM_CDF4(20832, 31878, 32653)},
{ AOM_CDF4(13250, 27356, 31674)}, { AOM_CDF4(7718, 21508, 29858)},
{ AOM_CDF4(7209, 18350, 25559)}, { AOM_CDF4(8192, 16384, 24576)}
}
},
{
{
{ AOM_CDF4(7876, 16901, 21741)}, { AOM_CDF4(24001, 31898, 32625)},
{ AOM_CDF4(14529, 27959, 31451)}, { AOM_CDF4(8273, 20818, 27258)},
{ AOM_CDF4(5278, 14673, 21510)}, { AOM_CDF4(2983, 8843, 14039)},
{ AOM_CDF4(28016, 32574, 32732)}, { AOM_CDF4(17471, 30306, 32301)},
{ AOM_CDF4(10224, 24063, 29728)}, { AOM_CDF4(6602, 17954, 25052)},
{ AOM_CDF4(4002, 11585, 17759)}, { AOM_CDF4(30190, 32634, 32739)},
{ AOM_CDF4(17497, 30282, 32270)}, { AOM_CDF4(10229, 23729, 29538)},
{ AOM_CDF4(6344, 17211, 24440)}, { AOM_CDF4(3849, 11189, 17108)},
{ AOM_CDF4(28570, 32583, 32726)}, { AOM_CDF4(17521, 30161, 32238)},
{ AOM_CDF4(10153, 23565, 29378)}, { AOM_CDF4(6455, 17341, 24443)},
{ AOM_CDF4(3907, 11042, 17024)}, { AOM_CDF4(30689, 32715, 32748)},
{ AOM_CDF4(21546, 31840, 32610)}, { AOM_CDF4(13547, 27581, 31459)},
{ AOM_CDF4(8912, 21757, 28309)}, { AOM_CDF4(5548, 15080, 22046)},
{ AOM_CDF4(30783, 32540, 32685)}, { AOM_CDF4(17540, 29528, 31668)},
{ AOM_CDF4(10160, 21468, 26783)}, { AOM_CDF4(4724, 13393, 20054)},
{ AOM_CDF4(2702, 8174, 13102)}, { AOM_CDF4(31648, 32686, 32742)},
{ AOM_CDF4(20954, 31094, 32337)}, { AOM_CDF4(12420, 25698, 30179)},
{ AOM_CDF4(7304, 19320, 26248)}, { AOM_CDF4(4366, 12261, 18864)},
{ AOM_CDF4(31581, 32723, 32748)}, { AOM_CDF4(21373, 31586, 32525)},
{ AOM_CDF4(12744, 26625, 30885)}, { AOM_CDF4(7431, 20322, 26950)},
{ AOM_CDF4(4692, 13323, 20111)}, { AOM_CDF4(8192, 16384, 24576)}
},
{
{ AOM_CDF4(7833, 18369, 24095)}, { AOM_CDF4(26650, 32273, 32702)},
{ AOM_CDF4(16371, 29961, 32191)}, { AOM_CDF4(11055, 24082, 29629)},
{ AOM_CDF4(6892, 18644, 25400)}, { AOM_CDF4(5006, 13057, 19240)},
{ AOM_CDF4(29834, 32666, 32748)}, { AOM_CDF4(19577, 31335, 32570)},
{ AOM_CDF4(12253, 26509, 31122)}, { AOM_CDF4(7991, 20772, 27711)},
{ AOM_CDF4(5677, 15910, 23059)}, { AOM_CDF4(30109, 32532, 32720)},
{ AOM_CDF4(16747, 30166, 32252)}, { AOM_CDF4(10134, 23542, 29184)},
{ AOM_CDF4(5791, 16176, 23556)}, { AOM_CDF4(4362, 10414, 17284)},
{ AOM_CDF4(29492, 32626, 32748)}, { AOM_CDF4(19894, 31402, 32525)},
{ AOM_CDF4(12942, 27071, 30869)}, { AOM_CDF4(8346, 21216, 27405)},
{ AOM_CDF4(6572, 17087, 23859)}, { AOM_CDF4(32035, 32735, 32748)},
{ AOM_CDF4(22957, 31838, 32618)}, { AOM_CDF4(14724, 28572, 31772)},
{ AOM_CDF4(10364, 23999, 29553)}, { AOM_CDF4(7004, 18433, 25655)},
{ AOM_CDF4(27528, 32277, 32681)}, { AOM_CDF4(16959, 31171, 32096)},
{ AOM_CDF4(10486, 23593, 27962)}, { AOM_CDF4(8192, 16384, 23211)},
{ AOM_CDF4(8937, 17873, 20852)}, { AOM_CDF4(27715, 32002, 32615)},
{ AOM_CDF4(15073, 29491, 31676)}, { AOM_CDF4(11264, 24576, 28672)},
{ AOM_CDF4(2341, 18725, 23406)}, { AOM_CDF4(7282, 18204, 25486)},
{ AOM_CDF4(28547, 32213, 32657)}, { AOM_CDF4(20788, 29773, 32239)},
{ AOM_CDF4(6780, 21469, 30508)}, { AOM_CDF4(5958, 14895, 23831)},
{ AOM_CDF4(16384, 21845, 27307)}, { AOM_CDF4(8192, 16384, 24576)}
}
},
{
{
{ AOM_CDF4(5992, 14304, 19765)}, { AOM_CDF4(22612, 31238, 32456)},
{ AOM_CDF4(13456, 27162, 31087)}, { AOM_CDF4(8001, 20062, 26504)},
{ AOM_CDF4(5168, 14105, 20764)}, { AOM_CDF4(2632, 7771, 12385)},
{ AOM_CDF4(27034, 32344, 32709)}, { AOM_CDF4(15850, 29415, 31997)},
{ AOM_CDF4(9494, 22776, 28841)}, { AOM_CDF4(6151, 16830, 23969)},
{ AOM_CDF4(3461, 10039, 15722)}, { AOM_CDF4(30134, 32569, 32731)},
{ AOM_CDF4(15638, 29422, 31945)}, { AOM_CDF4(9150, 21865, 28218)},
{ AOM_CDF4(5647, 15719, 22676)}, { AOM_CDF4(3402, 9772, 15477)},
{ AOM_CDF4(28530, 32586, 32735)}, { AOM_CDF4(17139, 30298, 32292)},
{ AOM_CDF4(10200, 24039, 29685)}, { AOM_CDF4(6419, 17674, 24786)},
{ AOM_CDF4(3544, 10225, 15824)}, { AOM_CDF4(31333, 32726, 32748)},
{ AOM_CDF4(20618, 31487, 32544)}, { AOM_CDF4(12901, 27217, 31232)},
{ AOM_CDF4(8624, 21734, 28171)}, { AOM_CDF4(5104, 14191, 20748)},
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)}
},
{
{ AOM_CDF4(11206, 21090, 26561)}, { AOM_CDF4(28759, 32279, 32671)},
{ AOM_CDF4(14171, 27952, 31569)}, { AOM_CDF4(9743, 22907, 29141)},
{ AOM_CDF4(6871, 17886, 24868)}, { AOM_CDF4(4960, 13152, 19315)},
{ AOM_CDF4(31077, 32661, 32748)}, { AOM_CDF4(19400, 31195, 32515)},
{ AOM_CDF4(12752, 26858, 31040)}, { AOM_CDF4(8370, 22098, 28591)},
{ AOM_CDF4(5457, 15373, 22298)}, { AOM_CDF4(31697, 32706, 32748)},
{ AOM_CDF4(17860, 30657, 32333)}, { AOM_CDF4(12510, 24812, 29261)},
{ AOM_CDF4(6180, 19124, 24722)}, { AOM_CDF4(5041, 13548, 17959)},
{ AOM_CDF4(31552, 32716, 32748)}, { AOM_CDF4(21908, 31769, 32623)},
{ AOM_CDF4(14470, 28201, 31565)}, { AOM_CDF4(9493, 22982, 28608)},
{ AOM_CDF4(6858, 17240, 24137)}, { AOM_CDF4(32543, 32752, 32756)},
{ AOM_CDF4(24286, 32097, 32666)}, { AOM_CDF4(15958, 29217, 32024)},
{ AOM_CDF4(10207, 24234, 29958)}, { AOM_CDF4(6929, 18305, 25652)},
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)}
}
},
{
{
{ AOM_CDF4(4137, 10847, 15682)}, { AOM_CDF4(17824, 27001, 30058)},
{ AOM_CDF4(10204, 22796, 28291)}, { AOM_CDF4(6076, 15935, 22125)},
{ AOM_CDF4(3852, 10937, 16816)}, { AOM_CDF4(2252, 6324, 10131)},
{ AOM_CDF4(25840, 32016, 32662)}, { AOM_CDF4(15109, 28268, 31531)},
{ AOM_CDF4(9385, 22231, 28340)}, { AOM_CDF4(6082, 16672, 23479)},
{ AOM_CDF4(3318, 9427, 14681)}, { AOM_CDF4(30594, 32574, 32718)},
{ AOM_CDF4(16836, 29552, 31859)}, { AOM_CDF4(9556, 22542, 28356)},
{ AOM_CDF4(6305, 16725, 23540)}, { AOM_CDF4(3376, 9895, 15184)},
{ AOM_CDF4(29383, 32617, 32745)}, { AOM_CDF4(18891, 30809, 32401)},
{ AOM_CDF4(11688, 25942, 30687)}, { AOM_CDF4(7468, 19469, 26651)},
{ AOM_CDF4(3909, 11358, 17012)}, { AOM_CDF4(31564, 32736, 32748)},
{ AOM_CDF4(20906, 31611, 32600)}, { AOM_CDF4(13191, 27621, 31537)},
{ AOM_CDF4(8768, 22029, 28676)}, { AOM_CDF4(5079, 14109, 20906)},
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)}
},
{
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)},
{ AOM_CDF4(8192, 16384, 24576)}, { AOM_CDF4(8192, 16384, 24576)}
}
}
}
};
static const u16 av1_default_coeff_base_eob_multi_cdfs[TOKEN_CDF_Q_CTXS][TX_SIZES]
[PLANE_TYPES][SIG_COEF_CONTEXTS_EOB][CDF_SIZE(NUM_BASE_LEVELS + 1)] = {
{
{
{
{ AOM_CDF3(17837, 29055)},
{ AOM_CDF3(29600, 31446)},
{ AOM_CDF3(30844, 31878)},
{ AOM_CDF3(24926, 28948)}
},
{
{ AOM_CDF3(21365, 30026)},
{ AOM_CDF3(30512, 32423)},
{ AOM_CDF3(31658, 32621)},
{ AOM_CDF3(29630, 31881)}
}
},
{
{
{ AOM_CDF3(5717, 26477)},
{ AOM_CDF3(30491, 31703)},
{ AOM_CDF3(31550, 32158)},
{ AOM_CDF3(29648, 31491)}
},
{
{ AOM_CDF3(12608, 27820)},
{ AOM_CDF3(30680, 32225)},
{ AOM_CDF3(30809, 32335)},
{ AOM_CDF3(31299, 32423)}
}
},
{
{
{ AOM_CDF3(1786, 12612)},
{ AOM_CDF3(30663, 31625)},
{ AOM_CDF3(32339, 32468)},
{ AOM_CDF3(31148, 31833)}
},
{
{ AOM_CDF3(18857, 23865)},
{ AOM_CDF3(31428, 32428)},
{ AOM_CDF3(31744, 32373)},
{ AOM_CDF3(31775, 32526)}
}
},
{
{
{ AOM_CDF3(1787, 2532)},
{ AOM_CDF3(30832, 31662)},
{ AOM_CDF3(31824, 32682)},
{ AOM_CDF3(32133, 32569)}
},
{
{ AOM_CDF3(13751, 22235)},
{ AOM_CDF3(32089, 32409)},
{ AOM_CDF3(27084, 27920)},
{ AOM_CDF3(29291, 32594)}
}
},
{
{
{ AOM_CDF3(1725, 3449)},
{ AOM_CDF3(31102, 31935)},
{ AOM_CDF3(32457, 32613)},
{ AOM_CDF3(32412, 32649)}
},
{
{ AOM_CDF3(10923, 21845)},
{ AOM_CDF3(10923, 21845)},
{ AOM_CDF3(10923, 21845)},
{ AOM_CDF3(10923, 21845)}
}
}
},
{
{
{
{ AOM_CDF3(17560, 29888)},
{ AOM_CDF3(29671, 31549)},
{ AOM_CDF3(31007, 32056)},
{ AOM_CDF3(27286, 30006)}
},
{
{ AOM_CDF3(26594, 31212)},
{ AOM_CDF3(31208, 32582)},
{ AOM_CDF3(31835, 32637)},
{ AOM_CDF3(30595, 32206)}
}
},
{
{
{ AOM_CDF3(15239, 29932)},
{ AOM_CDF3(31315, 32095)},
{ AOM_CDF3(32130, 32434)},
{ AOM_CDF3(30864, 31996)}
},
{
{ AOM_CDF3(26279, 30968)},
{ AOM_CDF3(31142, 32495)},
{ AOM_CDF3(31713, 32540)},
{ AOM_CDF3(31929, 32594)}
}
},
{
{
{ AOM_CDF3(2644, 25198)},
{ AOM_CDF3(32038, 32451)},
{ AOM_CDF3(32639, 32695)},
{ AOM_CDF3(32166, 32518)}
},
{
{ AOM_CDF3(17187, 27668)},
{ AOM_CDF3(31714, 32550)},
{ AOM_CDF3(32283, 32678)},
{ AOM_CDF3(31930, 32563)}
}
},
{
{
{ AOM_CDF3(1044, 2257)},
{ AOM_CDF3(30755, 31923)},
{ AOM_CDF3(32208, 32693)},
{ AOM_CDF3(32244, 32615)}
},
{
{ AOM_CDF3(21317, 26207)},
{ AOM_CDF3(29133, 30868)},
{ AOM_CDF3(29311, 31231)},
{ AOM_CDF3(29657, 31087)}
}
},
{
{
{ AOM_CDF3(478, 1834)},
{ AOM_CDF3(31005, 31987)},
{ AOM_CDF3(32317, 32724)},
{ AOM_CDF3(30865, 32648)}
},
{
{ AOM_CDF3(10923, 21845)},
{ AOM_CDF3(10923, 21845)},
{ AOM_CDF3(10923, 21845)},
{ AOM_CDF3(10923, 21845)}
}
}
},
{
{
{
{ AOM_CDF3(20092, 30774)},
{ AOM_CDF3(30695, 32020)},
{ AOM_CDF3(31131, 32103)},
{ AOM_CDF3(28666, 30870)}
},
{
{ AOM_CDF3(27258, 31095)},
{ AOM_CDF3(31804, 32623)},
{ AOM_CDF3(31763, 32528)},
{ AOM_CDF3(31438, 32506)}
}
},
{
{
{ AOM_CDF3(18049, 30489)},
{ AOM_CDF3(31706, 32286)},
{ AOM_CDF3(32163, 32473)},
{ AOM_CDF3(31550, 32184)}
},
{
{ AOM_CDF3(27116, 30842)},
{ AOM_CDF3(31971, 32598)},
{ AOM_CDF3(32088, 32576)},
{ AOM_CDF3(32067, 32664)}
}
},
{
{
{ AOM_CDF3(12854, 29093)},
{ AOM_CDF3(32272, 32558)},
{ AOM_CDF3(32667, 32729)},
{ AOM_CDF3(32306, 32585)}
},
{
{ AOM_CDF3(25476, 30366)},
{ AOM_CDF3(32169, 32687)},
{ AOM_CDF3(32479, 32689)},
{ AOM_CDF3(31673, 32634)}
}
},
{
{
{ AOM_CDF3(2809, 19301)},
{ AOM_CDF3(32205, 32622)},
{ AOM_CDF3(32338, 32730)},
{ AOM_CDF3(31786, 32616)}
},
{
{ AOM_CDF3(22737, 29105)},
{ AOM_CDF3(30810, 32362)},
{ AOM_CDF3(30014, 32627)},
{ AOM_CDF3(30528, 32574)}
}
},
{
{
{ AOM_CDF3(935, 3382)},
{ AOM_CDF3(30789, 31909)},
{ AOM_CDF3(32466, 32756)},
{ AOM_CDF3(30860, 32513)}
},
{
{ AOM_CDF3(10923, 21845)},
{ AOM_CDF3(10923, 21845)},
{ AOM_CDF3(10923, 21845)},
{ AOM_CDF3(10923, 21845)}
}
}
},
{
{
{
{ AOM_CDF3(22497, 31198)},
{ AOM_CDF3(31715, 32495)},
{ AOM_CDF3(31606, 32337)},
{ AOM_CDF3(30388, 31990)}
},
{
{ AOM_CDF3(27877, 31584)},
{ AOM_CDF3(32170, 32728)},
{ AOM_CDF3(32155, 32688)},
{ AOM_CDF3(32219, 32702)}
}
},
{
{
{ AOM_CDF3(21457, 31043)},
{ AOM_CDF3(31951, 32483)},
{ AOM_CDF3(32153, 32562)},
{ AOM_CDF3(31473, 32215)}
},
{
{ AOM_CDF3(27558, 31151)},
{ AOM_CDF3(32020, 32640)},
{ AOM_CDF3(32097, 32575)},
{ AOM_CDF3(32242, 32719)}
}
},
{
{
{ AOM_CDF3(19980, 30591)},
{ AOM_CDF3(32219, 32597)},
{ AOM_CDF3(32581, 32706)},
{ AOM_CDF3(31803, 32287)}
},
{
{ AOM_CDF3(26473, 30507)},
{ AOM_CDF3(32431, 32723)},
{ AOM_CDF3(32196, 32611)},
{ AOM_CDF3(31588, 32528)}
}
},
{
{
{ AOM_CDF3(24647, 30463)},
{ AOM_CDF3(32412, 32695)},
{ AOM_CDF3(32468, 32720)},
{ AOM_CDF3(31269, 32523)}
},
{
{ AOM_CDF3(28482, 31505)},
{ AOM_CDF3(32152, 32701)},
{ AOM_CDF3(31732, 32598)},
{ AOM_CDF3(31767, 32712)}
}
},
{
{
{ AOM_CDF3(12358, 24977)},
{ AOM_CDF3(31331, 32385)},
{ AOM_CDF3(32634, 32756)},
{ AOM_CDF3(30411, 32548)}
},
{
{ AOM_CDF3(10923, 21845)},
{ AOM_CDF3(10923, 21845)},
{ AOM_CDF3(10923, 21845)},
{ AOM_CDF3(10923, 21845)}
}
}
}
};
static const u16 default_joint_cdf[] = { ICDF(4096), ICDF(11264), ICDF(19328)};
static const u16 default_clsss_cdf[][10] = {
// Vertical component
{
ICDF(28672), ICDF(30976), ICDF(31858), ICDF(32320), ICDF(32551),
ICDF(32656), ICDF(32740), ICDF(32757), ICDF(32762), ICDF(32767)
},
// Horizontal component
{
ICDF(28672), ICDF(30976), ICDF(31858), ICDF(32320), ICDF(32551),
ICDF(32656), ICDF(32740), ICDF(32757), ICDF(32762), ICDF(32767)
}
};
static const u16 default_clsss0_fp_cdf[][2][3] = {
// Vertical component
{
{ ICDF(16384), ICDF(24576), ICDF(26624)},
{ ICDF(12288), ICDF(21248), ICDF(24128)}
},
// Horizontal component
{
{ ICDF(16384), ICDF(24576), ICDF(26624)},
{ ICDF(12288), ICDF(21248), ICDF(24128)}
}
};
static const u16 default_fp_cdf[][3] = {
// Vertical component
{
ICDF(8192), ICDF(17408), ICDF(21248)
},
// Horizontal component
{
ICDF(8192), ICDF(17408), ICDF(21248)
}
};
static const u16 default_sign_cdf[] = { ICDF(128 * 128), ICDF(128 * 128)};
static const u16 default_class0_hp_cdf[] = { ICDF(160 * 128), ICDF(160 * 128)};
static const u16 default_hp_cdf[] = { ICDF(128 * 128), ICDF(128 * 128)};
static const u16 default_class0_cdf[] = { ICDF(216 * 128), ICDF(216 * 128)};
static const u16 default_bits_cdf[][10] = {
{
ICDF(128 * 136), ICDF(128 * 140), ICDF(128 * 148), ICDF(128 * 160),
ICDF(128 * 176), ICDF(128 * 192), ICDF(128 * 224), ICDF(128 * 234),
ICDF(128 * 234), ICDF(128 * 240)
},
{
ICDF(128 * 136), ICDF(128 * 140), ICDF(128 * 148), ICDF(128 * 160),
ICDF(128 * 176), ICDF(128 * 192), ICDF(128 * 224), ICDF(128 * 234),
ICDF(128 * 234), ICDF(128 * 240)
}
};
static int rockchip_av1_get_q_ctx(int q)
{
if (q <= 20)
return 0;
if (q <= 60)
return 1;
if (q <= 120)
return 2;
return 3;
}
void rockchip_av1_default_coeff_probs(u32 base_qindex, void *ptr)
{
struct av1cdfs *cdfs = (struct av1cdfs *)ptr;
const int index = rockchip_av1_get_q_ctx(base_qindex);
memcpy(cdfs->txb_skip_cdf, av1_default_txb_skip_cdfs[index],
sizeof(av1_default_txb_skip_cdfs[0]));
memcpy(cdfs->eob_extra_cdf, av1_default_eob_extra_cdfs[index],
sizeof(av1_default_eob_extra_cdfs[0]));
memcpy(cdfs->dc_sign_cdf, av1_default_dc_sign_cdfs[index],
sizeof(av1_default_dc_sign_cdfs[0]));
memcpy(cdfs->coeff_br_cdf, av1_default_coeff_lps_multi_cdfs[index],
sizeof(av1_default_coeff_lps_multi_cdfs[0]));
memcpy(cdfs->coeff_base_cdf, av1_default_coeff_base_multi_cdfs[index],
sizeof(av1_default_coeff_base_multi_cdfs[0]));
memcpy(cdfs->coeff_base_eob_cdf,
av1_default_coeff_base_eob_multi_cdfs[index],
sizeof(av1_default_coeff_base_eob_multi_cdfs[0]));
memcpy(cdfs->eob_flag_cdf16, av1_default_eob_multi16_cdfs[index],
sizeof(av1_default_eob_multi16_cdfs[0]));
memcpy(cdfs->eob_flag_cdf32, av1_default_eob_multi32_cdfs[index],
sizeof(av1_default_eob_multi32_cdfs[0]));
memcpy(cdfs->eob_flag_cdf64, av1_default_eob_multi64_cdfs[index],
sizeof(av1_default_eob_multi64_cdfs[0]));
memcpy(cdfs->eob_flag_cdf128, av1_default_eob_multi128_cdfs[index],
sizeof(av1_default_eob_multi128_cdfs[0]));
memcpy(cdfs->eob_flag_cdf256, av1_default_eob_multi256_cdfs[index],
sizeof(av1_default_eob_multi256_cdfs[0]));
memcpy(cdfs->eob_flag_cdf512, av1_default_eob_multi512_cdfs[index],
sizeof(av1_default_eob_multi512_cdfs[0]));
memcpy(cdfs->eob_flag_cdf1024, av1_default_eob_multi1024_cdfs[index],
sizeof(av1_default_eob_multi1024_cdfs[0]));
}
void rockchip_av1_set_default_cdfs(struct av1cdfs *cdfs,
struct mvcdfs *cdfs_ndvc)
{
memcpy(cdfs->partition_cdf, default_partition_cdf,
sizeof(cdfs->partition_cdf));
memcpy(cdfs->tx_type_intra0_cdf, default_intra_ext_tx0_cdf,
sizeof(cdfs->tx_type_intra0_cdf));
memcpy(cdfs->tx_type_intra1_cdf, default_intra_ext_tx1_cdf,
sizeof(cdfs->tx_type_intra1_cdf));
memcpy(cdfs->tx_type_inter_cdf, default_inter_ext_tx_cdf,
sizeof(cdfs->tx_type_inter_cdf));
memcpy(cdfs->vartx_part_cdf, default_txfm_partition_cdf,
sizeof(cdfs->vartx_part_cdf));
memcpy(cdfs->mbskip_cdf, default_skip_cdfs, sizeof(cdfs->mbskip_cdf));
memcpy(cdfs->delta_q_cdf, default_delta_q_cdf,
sizeof(cdfs->delta_q_cdf));
memcpy(cdfs->delta_lf_multi_cdf, default_delta_lf_multi_cdf,
sizeof(cdfs->delta_lf_multi_cdf));
memcpy(cdfs->delta_lf_cdf, default_delta_lf_cdf,
sizeof(cdfs->delta_lf_cdf));
memcpy(cdfs->segment_pred_cdf, default_segment_pred_cdf,
sizeof(cdfs->segment_pred_cdf));
memcpy(cdfs->spatial_pred_seg_tree_cdf,
default_spatial_pred_seg_tree_cdf,
sizeof(cdfs->spatial_pred_seg_tree_cdf));
memcpy(cdfs->skip_mode_cdf, default_skip_mode_cdfs,
sizeof(cdfs->skip_mode_cdf));
memcpy(cdfs->tx_size_cdf, default_tx_size_cdf,
sizeof(cdfs->tx_size_cdf));
memcpy(cdfs->kf_ymode_cdf, default_kf_y_mode_cdf,
sizeof(cdfs->kf_ymode_cdf));
memcpy(cdfs->uv_mode_cdf, default_uv_mode_cdf,
sizeof(cdfs->uv_mode_cdf));
memcpy(cdfs->if_ymode_cdf, default_if_y_mode_cdf,
sizeof(cdfs->if_ymode_cdf));
memcpy(cdfs->intra_inter_cdf, default_intra_inter_cdf,
sizeof(cdfs->intra_inter_cdf));
memcpy(cdfs->comp_ref_cdf, default_comp_ref_cdf,
sizeof(cdfs->comp_ref_cdf));
memcpy(cdfs->comp_bwdref_cdf, default_comp_bwdref_cdf,
sizeof(cdfs->comp_bwdref_cdf));
memcpy(cdfs->comp_inter_cdf, default_comp_inter_cdf,
sizeof(cdfs->comp_inter_cdf));
memcpy(cdfs->single_ref_cdf, default_single_ref_cdf,
sizeof(cdfs->single_ref_cdf));
memcpy(cdfs->comp_ref_type_cdf, default_comp_ref_type_cdf,
sizeof(cdfs->comp_ref_type_cdf));
memcpy(cdfs->uni_comp_ref_cdf, default_uni_comp_ref_cdf,
sizeof(cdfs->uni_comp_ref_cdf));
memcpy(cdfs->newmv_cdf, default_newmv_cdf, sizeof(cdfs->newmv_cdf));
memcpy(cdfs->zeromv_cdf, default_zeromv_cdf, sizeof(cdfs->zeromv_cdf));
memcpy(cdfs->refmv_cdf, default_refmv_cdf, sizeof(cdfs->refmv_cdf));
memcpy(cdfs->drl_cdf, default_drl_cdf, sizeof(cdfs->drl_cdf));
memcpy(cdfs->interp_filter_cdf, default_switchable_interp_cdf,
sizeof(cdfs->interp_filter_cdf));
// Regular MV cdfs
memcpy(cdfs->mv_cdf.joint_cdf, default_joint_cdf,
sizeof(cdfs->mv_cdf.joint_cdf));
memcpy(cdfs->mv_cdf.sign_cdf, default_sign_cdf,
sizeof(cdfs->mv_cdf.sign_cdf));
memcpy(cdfs->mv_cdf.clsss_cdf, default_clsss_cdf,
sizeof(cdfs->mv_cdf.clsss_cdf));
memcpy(cdfs->mv_cdf.clsss0_fp_cdf, default_clsss0_fp_cdf,
sizeof(cdfs->mv_cdf.clsss0_fp_cdf));
memcpy(cdfs->mv_cdf.fp_cdf, default_fp_cdf,
sizeof(cdfs->mv_cdf.fp_cdf));
memcpy(cdfs->mv_cdf.class0_hp_cdf, default_class0_hp_cdf,
sizeof(cdfs->mv_cdf.class0_hp_cdf));
memcpy(cdfs->mv_cdf.hp_cdf, default_hp_cdf,
sizeof(cdfs->mv_cdf.hp_cdf));
memcpy(cdfs->mv_cdf.class0_cdf, default_class0_cdf,
sizeof(cdfs->mv_cdf.class0_cdf));
memcpy(cdfs->mv_cdf.bits_cdf, default_bits_cdf,
sizeof(cdfs->mv_cdf.bits_cdf));
// Intrabc cdfs
memcpy(cdfs_ndvc->joint_cdf, default_joint_cdf,
sizeof(cdfs_ndvc->joint_cdf));
memcpy(cdfs_ndvc->sign_cdf, default_sign_cdf,
sizeof(cdfs_ndvc->sign_cdf));
memcpy(cdfs_ndvc->clsss_cdf, default_clsss_cdf,
sizeof(cdfs_ndvc->clsss_cdf));
memcpy(cdfs_ndvc->clsss0_fp_cdf, default_clsss0_fp_cdf,
sizeof(cdfs_ndvc->clsss0_fp_cdf));
memcpy(cdfs_ndvc->fp_cdf, default_fp_cdf, sizeof(cdfs_ndvc->fp_cdf));
memcpy(cdfs_ndvc->class0_hp_cdf, default_class0_hp_cdf,
sizeof(cdfs_ndvc->class0_hp_cdf));
memcpy(cdfs_ndvc->hp_cdf, default_hp_cdf, sizeof(cdfs_ndvc->hp_cdf));
memcpy(cdfs_ndvc->class0_cdf, default_class0_cdf,
sizeof(cdfs_ndvc->class0_cdf));
memcpy(cdfs_ndvc->bits_cdf, default_bits_cdf,
sizeof(cdfs_ndvc->bits_cdf));
memcpy(cdfs->obmc_cdf, default_obmc_cdf, sizeof(cdfs->obmc_cdf));
memcpy(cdfs->motion_mode_cdf, default_motion_mode_cdf,
sizeof(cdfs->motion_mode_cdf));
memcpy(cdfs->inter_compound_mode_cdf, default_inter_compound_mode_cdf,
sizeof(cdfs->inter_compound_mode_cdf));
memcpy(cdfs->compound_type_cdf, default_compound_type_cdf,
sizeof(cdfs->compound_type_cdf));
memcpy(cdfs->interintra_cdf, default_interintra_cdf,
sizeof(cdfs->interintra_cdf));
memcpy(cdfs->interintra_mode_cdf, default_interintra_mode_cdf,
sizeof(cdfs->interintra_mode_cdf));
memcpy(cdfs->wedge_interintra_cdf, default_wedge_interintra_cdf,
sizeof(cdfs->wedge_interintra_cdf));
memcpy(cdfs->wedge_idx_cdf, default_wedge_idx_cdf,
sizeof(cdfs->wedge_idx_cdf));
memcpy(cdfs->palette_y_mode_cdf, default_palette_y_mode_cdf,
sizeof(cdfs->palette_y_mode_cdf));
memcpy(cdfs->palette_uv_mode_cdf, default_palette_uv_mode_cdf,
sizeof(cdfs->palette_uv_mode_cdf));
memcpy(cdfs->palette_y_size_cdf, default_palette_y_size_cdf,
sizeof(cdfs->palette_y_size_cdf));
memcpy(cdfs->palette_uv_size_cdf, default_palette_uv_size_cdf,
sizeof(cdfs->palette_uv_size_cdf));
memcpy(cdfs->palette_y_color_index_cdf,
default_palette_y_color_index_cdf,
sizeof(cdfs->palette_y_color_index_cdf));
memcpy(cdfs->palette_uv_color_index_cdf,
default_palette_uv_color_index_cdf,
sizeof(cdfs->palette_uv_color_index_cdf));
memcpy(cdfs->cfl_sign_cdf, default_cfl_sign_cdf,
sizeof(cdfs->cfl_sign_cdf));
memcpy(cdfs->cfl_alpha_cdf, default_cfl_alpha_cdf,
sizeof(cdfs->cfl_alpha_cdf));
memcpy(cdfs->intrabc_cdf, default_intrabc_cdf,
sizeof(cdfs->intrabc_cdf));
memcpy(cdfs->angle_delta_cdf, default_angle_delta_cdf,
sizeof(cdfs->angle_delta_cdf));
memcpy(cdfs->filter_intra_mode_cdf, default_filter_intra_mode_cdf,
sizeof(cdfs->filter_intra_mode_cdf));
memcpy(cdfs->filter_intra_cdf, default_filter_intra_cdfs,
sizeof(cdfs->filter_intra_cdf));
memcpy(cdfs->comp_group_idx_cdf, default_comp_group_idx_cdfs,
sizeof(cdfs->comp_group_idx_cdf));
memcpy(cdfs->compound_idx_cdf, default_compound_idx_cdfs,
sizeof(cdfs->compound_idx_cdf));
}
void rockchip_av1_get_cdfs(struct hantro_ctx *ctx, u32 ref_idx)
{
struct hantro_av1_dec_hw_ctx *av1_dec = &ctx->av1_dec;
av1_dec->cdfs = &av1_dec->cdfs_last[ref_idx];
av1_dec->cdfs_ndvc = &av1_dec->cdfs_last_ndvc[ref_idx];
}
void rockchip_av1_store_cdfs(struct hantro_ctx *ctx,
u32 refresh_frame_flags)
{
struct hantro_av1_dec_hw_ctx *av1_dec = &ctx->av1_dec;
int i;
for (i = 0; i < NUM_REF_FRAMES; i++) {
if (refresh_frame_flags & (1 << i)) {
if (&av1_dec->cdfs_last[i] != av1_dec->cdfs) {
av1_dec->cdfs_last[i] = *av1_dec->cdfs;
av1_dec->cdfs_last_ndvc[i] =
*av1_dec->cdfs_ndvc;
}
}
}
}
| linux-master | drivers/media/platform/verisilicon/rockchip_av1_entropymode.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Hantro G1 post-processor support
*
* Copyright (C) 2019 Collabora, Ltd.
*/
#include <linux/dma-mapping.h>
#include <linux/types.h>
#include "hantro.h"
#include "hantro_hw.h"
#include "hantro_g1_regs.h"
#include "hantro_g2_regs.h"
#include "hantro_v4l2.h"
#define HANTRO_PP_REG_WRITE(vpu, reg_name, val) \
{ \
hantro_reg_write(vpu, \
&hantro_g1_postproc_regs.reg_name, \
val); \
}
#define HANTRO_PP_REG_WRITE_RELAXED(vpu, reg_name, val) \
{ \
hantro_reg_write_relaxed(vpu, \
&hantro_g1_postproc_regs.reg_name, \
val); \
}
#define VPU_PP_IN_YUYV 0x0
#define VPU_PP_IN_NV12 0x1
#define VPU_PP_IN_YUV420 0x2
#define VPU_PP_IN_YUV240_TILED 0x5
#define VPU_PP_OUT_RGB 0x0
#define VPU_PP_OUT_YUYV 0x3
static const struct hantro_postproc_regs hantro_g1_postproc_regs = {
.pipeline_en = {G1_REG_PP_INTERRUPT, 1, 0x1},
.max_burst = {G1_REG_PP_DEV_CONFIG, 0, 0x1f},
.clk_gate = {G1_REG_PP_DEV_CONFIG, 1, 0x1},
.out_swap32 = {G1_REG_PP_DEV_CONFIG, 5, 0x1},
.out_endian = {G1_REG_PP_DEV_CONFIG, 6, 0x1},
.out_luma_base = {G1_REG_PP_OUT_LUMA_BASE, 0, 0xffffffff},
.input_width = {G1_REG_PP_INPUT_SIZE, 0, 0x1ff},
.input_height = {G1_REG_PP_INPUT_SIZE, 9, 0x1ff},
.output_width = {G1_REG_PP_CONTROL, 4, 0x7ff},
.output_height = {G1_REG_PP_CONTROL, 15, 0x7ff},
.input_fmt = {G1_REG_PP_CONTROL, 29, 0x7},
.output_fmt = {G1_REG_PP_CONTROL, 26, 0x7},
.orig_width = {G1_REG_PP_MASK1_ORIG_WIDTH, 23, 0x1ff},
.display_width = {G1_REG_PP_DISPLAY_WIDTH, 0, 0xfff},
};
bool hantro_needs_postproc(const struct hantro_ctx *ctx,
const struct hantro_fmt *fmt)
{
if (ctx->is_encoder)
return false;
if (ctx->need_postproc)
return true;
return fmt->postprocessed;
}
static void hantro_postproc_g1_enable(struct hantro_ctx *ctx)
{
struct hantro_dev *vpu = ctx->dev;
struct vb2_v4l2_buffer *dst_buf;
u32 src_pp_fmt, dst_pp_fmt;
dma_addr_t dst_dma;
/* Turn on pipeline mode. Must be done first. */
HANTRO_PP_REG_WRITE(vpu, pipeline_en, 0x1);
src_pp_fmt = VPU_PP_IN_NV12;
switch (ctx->vpu_dst_fmt->fourcc) {
case V4L2_PIX_FMT_YUYV:
dst_pp_fmt = VPU_PP_OUT_YUYV;
break;
default:
WARN(1, "output format %d not supported by the post-processor, this wasn't expected.",
ctx->vpu_dst_fmt->fourcc);
dst_pp_fmt = 0;
break;
}
dst_buf = v4l2_m2m_next_dst_buf(ctx->fh.m2m_ctx);
dst_dma = vb2_dma_contig_plane_dma_addr(&dst_buf->vb2_buf, 0);
HANTRO_PP_REG_WRITE(vpu, clk_gate, 0x1);
HANTRO_PP_REG_WRITE(vpu, out_endian, 0x1);
HANTRO_PP_REG_WRITE(vpu, out_swap32, 0x1);
HANTRO_PP_REG_WRITE(vpu, max_burst, 16);
HANTRO_PP_REG_WRITE(vpu, out_luma_base, dst_dma);
HANTRO_PP_REG_WRITE(vpu, input_width, MB_WIDTH(ctx->dst_fmt.width));
HANTRO_PP_REG_WRITE(vpu, input_height, MB_HEIGHT(ctx->dst_fmt.height));
HANTRO_PP_REG_WRITE(vpu, input_fmt, src_pp_fmt);
HANTRO_PP_REG_WRITE(vpu, output_fmt, dst_pp_fmt);
HANTRO_PP_REG_WRITE(vpu, output_width, ctx->dst_fmt.width);
HANTRO_PP_REG_WRITE(vpu, output_height, ctx->dst_fmt.height);
HANTRO_PP_REG_WRITE(vpu, orig_width, MB_WIDTH(ctx->dst_fmt.width));
HANTRO_PP_REG_WRITE(vpu, display_width, ctx->dst_fmt.width);
}
static int down_scale_factor(struct hantro_ctx *ctx)
{
if (ctx->src_fmt.width == ctx->dst_fmt.width)
return 0;
return DIV_ROUND_CLOSEST(ctx->src_fmt.width, ctx->dst_fmt.width);
}
static void hantro_postproc_g2_enable(struct hantro_ctx *ctx)
{
struct hantro_dev *vpu = ctx->dev;
struct vb2_v4l2_buffer *dst_buf;
int down_scale = down_scale_factor(ctx);
int out_depth;
size_t chroma_offset;
dma_addr_t dst_dma;
dst_buf = hantro_get_dst_buf(ctx);
dst_dma = vb2_dma_contig_plane_dma_addr(&dst_buf->vb2_buf, 0);
chroma_offset = ctx->dst_fmt.plane_fmt[0].bytesperline *
ctx->dst_fmt.height;
if (down_scale) {
hantro_reg_write(vpu, &g2_down_scale_e, 1);
hantro_reg_write(vpu, &g2_down_scale_y, down_scale >> 2);
hantro_reg_write(vpu, &g2_down_scale_x, down_scale >> 2);
hantro_write_addr(vpu, G2_DS_DST, dst_dma);
hantro_write_addr(vpu, G2_DS_DST_CHR, dst_dma + (chroma_offset >> down_scale));
} else {
hantro_write_addr(vpu, G2_RS_OUT_LUMA_ADDR, dst_dma);
hantro_write_addr(vpu, G2_RS_OUT_CHROMA_ADDR, dst_dma + chroma_offset);
}
out_depth = hantro_get_format_depth(ctx->dst_fmt.pixelformat);
if (ctx->dev->variant->legacy_regs) {
u8 pp_shift = 0;
if (out_depth > 8)
pp_shift = 16 - out_depth;
hantro_reg_write(ctx->dev, &g2_rs_out_bit_depth, out_depth);
hantro_reg_write(ctx->dev, &g2_pp_pix_shift, pp_shift);
} else {
hantro_reg_write(vpu, &g2_output_8_bits, out_depth > 8 ? 0 : 1);
hantro_reg_write(vpu, &g2_output_format, out_depth > 8 ? 1 : 0);
}
hantro_reg_write(vpu, &g2_out_rs_e, 1);
}
static int hantro_postproc_g2_enum_framesizes(struct hantro_ctx *ctx,
struct v4l2_frmsizeenum *fsize)
{
/**
* G2 scaler can scale down by 0, 2, 4 or 8
* use fsize->index has power of 2 diviser
**/
if (fsize->index > 3)
return -EINVAL;
if (!ctx->src_fmt.width || !ctx->src_fmt.height)
return -EINVAL;
fsize->type = V4L2_FRMSIZE_TYPE_DISCRETE;
fsize->discrete.width = ctx->src_fmt.width >> fsize->index;
fsize->discrete.height = ctx->src_fmt.height >> fsize->index;
return 0;
}
void hantro_postproc_free(struct hantro_ctx *ctx)
{
struct hantro_dev *vpu = ctx->dev;
unsigned int i;
for (i = 0; i < VB2_MAX_FRAME; ++i) {
struct hantro_aux_buf *priv = &ctx->postproc.dec_q[i];
if (priv->cpu) {
dma_free_attrs(vpu->dev, priv->size, priv->cpu,
priv->dma, priv->attrs);
priv->cpu = NULL;
}
}
}
int hantro_postproc_alloc(struct hantro_ctx *ctx)
{
struct hantro_dev *vpu = ctx->dev;
struct v4l2_m2m_ctx *m2m_ctx = ctx->fh.m2m_ctx;
struct vb2_queue *cap_queue = &m2m_ctx->cap_q_ctx.q;
unsigned int num_buffers = cap_queue->num_buffers;
struct v4l2_pix_format_mplane pix_mp;
const struct hantro_fmt *fmt;
unsigned int i, buf_size;
/* this should always pick native format */
fmt = hantro_get_default_fmt(ctx, false, ctx->bit_depth, HANTRO_AUTO_POSTPROC);
if (!fmt)
return -EINVAL;
v4l2_fill_pixfmt_mp(&pix_mp, fmt->fourcc, ctx->src_fmt.width,
ctx->src_fmt.height);
buf_size = pix_mp.plane_fmt[0].sizeimage;
if (ctx->vpu_src_fmt->fourcc == V4L2_PIX_FMT_H264_SLICE)
buf_size += hantro_h264_mv_size(pix_mp.width,
pix_mp.height);
else if (ctx->vpu_src_fmt->fourcc == V4L2_PIX_FMT_VP9_FRAME)
buf_size += hantro_vp9_mv_size(pix_mp.width,
pix_mp.height);
else if (ctx->vpu_src_fmt->fourcc == V4L2_PIX_FMT_HEVC_SLICE)
buf_size += hantro_hevc_mv_size(pix_mp.width,
pix_mp.height);
else if (ctx->vpu_src_fmt->fourcc == V4L2_PIX_FMT_AV1_FRAME)
buf_size += hantro_av1_mv_size(pix_mp.width,
pix_mp.height);
for (i = 0; i < num_buffers; ++i) {
struct hantro_aux_buf *priv = &ctx->postproc.dec_q[i];
/*
* The buffers on this queue are meant as intermediate
* buffers for the decoder, so no mapping is needed.
*/
priv->attrs = DMA_ATTR_NO_KERNEL_MAPPING;
priv->cpu = dma_alloc_attrs(vpu->dev, buf_size, &priv->dma,
GFP_KERNEL, priv->attrs);
if (!priv->cpu)
return -ENOMEM;
priv->size = buf_size;
}
return 0;
}
static void hantro_postproc_g1_disable(struct hantro_ctx *ctx)
{
struct hantro_dev *vpu = ctx->dev;
HANTRO_PP_REG_WRITE(vpu, pipeline_en, 0x0);
}
static void hantro_postproc_g2_disable(struct hantro_ctx *ctx)
{
struct hantro_dev *vpu = ctx->dev;
hantro_reg_write(vpu, &g2_out_rs_e, 0);
}
void hantro_postproc_disable(struct hantro_ctx *ctx)
{
struct hantro_dev *vpu = ctx->dev;
if (vpu->variant->postproc_ops && vpu->variant->postproc_ops->disable)
vpu->variant->postproc_ops->disable(ctx);
}
void hantro_postproc_enable(struct hantro_ctx *ctx)
{
struct hantro_dev *vpu = ctx->dev;
if (vpu->variant->postproc_ops && vpu->variant->postproc_ops->enable)
vpu->variant->postproc_ops->enable(ctx);
}
int hanto_postproc_enum_framesizes(struct hantro_ctx *ctx,
struct v4l2_frmsizeenum *fsize)
{
struct hantro_dev *vpu = ctx->dev;
if (vpu->variant->postproc_ops && vpu->variant->postproc_ops->enum_framesizes)
return vpu->variant->postproc_ops->enum_framesizes(ctx, fsize);
return -EINVAL;
}
const struct hantro_postproc_ops hantro_g1_postproc_ops = {
.enable = hantro_postproc_g1_enable,
.disable = hantro_postproc_g1_disable,
};
const struct hantro_postproc_ops hantro_g2_postproc_ops = {
.enable = hantro_postproc_g2_enable,
.disable = hantro_postproc_g2_disable,
.enum_framesizes = hantro_postproc_g2_enum_framesizes,
};
| linux-master | drivers/media/platform/verisilicon/hantro_postproc.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Hantro VDEC driver
*
* Copyright (C) 2021 Collabora Ltd, Emil Velikov <[email protected]>
*/
#include "hantro.h"
/*
* Supported formats.
*/
static const struct hantro_fmt sama5d4_vdec_postproc_fmts[] = {
{
.fourcc = V4L2_PIX_FMT_YUYV,
.codec_mode = HANTRO_MODE_NONE,
.postprocessed = true,
.frmsize = {
.min_width = FMT_MIN_WIDTH,
.max_width = FMT_HD_WIDTH,
.step_width = MB_DIM,
.min_height = FMT_MIN_HEIGHT,
.max_height = FMT_HD_HEIGHT,
.step_height = MB_DIM,
},
},
};
static const struct hantro_fmt sama5d4_vdec_fmts[] = {
{
.fourcc = V4L2_PIX_FMT_NV12,
.codec_mode = HANTRO_MODE_NONE,
.frmsize = {
.min_width = FMT_MIN_WIDTH,
.max_width = FMT_HD_WIDTH,
.step_width = MB_DIM,
.min_height = FMT_MIN_HEIGHT,
.max_height = FMT_HD_HEIGHT,
.step_height = MB_DIM,
},
},
{
.fourcc = V4L2_PIX_FMT_MPEG2_SLICE,
.codec_mode = HANTRO_MODE_MPEG2_DEC,
.max_depth = 2,
.frmsize = {
.min_width = FMT_MIN_WIDTH,
.max_width = FMT_HD_WIDTH,
.step_width = MB_DIM,
.min_height = FMT_MIN_HEIGHT,
.max_height = FMT_HD_HEIGHT,
.step_height = MB_DIM,
},
},
{
.fourcc = V4L2_PIX_FMT_VP8_FRAME,
.codec_mode = HANTRO_MODE_VP8_DEC,
.max_depth = 2,
.frmsize = {
.min_width = FMT_MIN_WIDTH,
.max_width = FMT_HD_WIDTH,
.step_width = MB_DIM,
.min_height = FMT_MIN_HEIGHT,
.max_height = FMT_HD_HEIGHT,
.step_height = MB_DIM,
},
},
{
.fourcc = V4L2_PIX_FMT_H264_SLICE,
.codec_mode = HANTRO_MODE_H264_DEC,
.max_depth = 2,
.frmsize = {
.min_width = FMT_MIN_WIDTH,
.max_width = FMT_HD_WIDTH,
.step_width = MB_DIM,
.min_height = FMT_MIN_HEIGHT,
.max_height = FMT_HD_HEIGHT,
.step_height = MB_DIM,
},
},
};
/*
* Supported codec ops.
*/
static const struct hantro_codec_ops sama5d4_vdec_codec_ops[] = {
[HANTRO_MODE_MPEG2_DEC] = {
.run = hantro_g1_mpeg2_dec_run,
.reset = hantro_g1_reset,
.init = hantro_mpeg2_dec_init,
.exit = hantro_mpeg2_dec_exit,
},
[HANTRO_MODE_VP8_DEC] = {
.run = hantro_g1_vp8_dec_run,
.reset = hantro_g1_reset,
.init = hantro_vp8_dec_init,
.exit = hantro_vp8_dec_exit,
},
[HANTRO_MODE_H264_DEC] = {
.run = hantro_g1_h264_dec_run,
.reset = hantro_g1_reset,
.init = hantro_h264_dec_init,
.exit = hantro_h264_dec_exit,
},
};
static const struct hantro_irq sama5d4_irqs[] = {
{ "vdec", hantro_g1_irq },
};
static const char * const sama5d4_clk_names[] = { "vdec_clk" };
const struct hantro_variant sama5d4_vdec_variant = {
.dec_fmts = sama5d4_vdec_fmts,
.num_dec_fmts = ARRAY_SIZE(sama5d4_vdec_fmts),
.postproc_fmts = sama5d4_vdec_postproc_fmts,
.num_postproc_fmts = ARRAY_SIZE(sama5d4_vdec_postproc_fmts),
.postproc_ops = &hantro_g1_postproc_ops,
.codec = HANTRO_MPEG2_DECODER | HANTRO_VP8_DECODER |
HANTRO_H264_DECODER,
.codec_ops = sama5d4_vdec_codec_ops,
.irqs = sama5d4_irqs,
.num_irqs = ARRAY_SIZE(sama5d4_irqs),
.clk_names = sama5d4_clk_names,
.num_clocks = ARRAY_SIZE(sama5d4_clk_names),
};
| linux-master | drivers/media/platform/verisilicon/sama5d4_vdec_hw.c |
// SPDX-License-Identifier: GPL-2.0-only or Apache-2.0
#include "rockchip_av1_filmgrain.h"
static const s32 gaussian_sequence[2048] = {
56, 568, -180, 172, 124, -84, 172, -64, -900, 24, 820,
224, 1248, 996, 272, -8, -916, -388, -732, -104, -188, 800,
112, -652, -320, -376, 140, -252, 492, -168, 44, -788, 588,
-584, 500, -228, 12, 680, 272, -476, 972, -100, 652, 368,
432, -196, -720, -192, 1000, -332, 652, -136, -552, -604, -4,
192, -220, -136, 1000, -52, 372, -96, -624, 124, -24, 396,
540, -12, -104, 640, 464, 244, -208, -84, 368, -528, -740,
248, -968, -848, 608, 376, -60, -292, -40, -156, 252, -292,
248, 224, -280, 400, -244, 244, -60, 76, -80, 212, 532,
340, 128, -36, 824, -352, -60, -264, -96, -612, 416, -704,
220, -204, 640, -160, 1220, -408, 900, 336, 20, -336, -96,
-792, 304, 48, -28, -1232, -1172, -448, 104, -292, -520, 244,
60, -948, 0, -708, 268, 108, 356, -548, 488, -344, -136,
488, -196, -224, 656, -236, -1128, 60, 4, 140, 276, -676,
-376, 168, -108, 464, 8, 564, 64, 240, 308, -300, -400,
-456, -136, 56, 120, -408, -116, 436, 504, -232, 328, 844,
-164, -84, 784, -168, 232, -224, 348, -376, 128, 568, 96,
-1244, -288, 276, 848, 832, -360, 656, 464, -384, -332, -356,
728, -388, 160, -192, 468, 296, 224, 140, -776, -100, 280,
4, 196, 44, -36, -648, 932, 16, 1428, 28, 528, 808,
772, 20, 268, 88, -332, -284, 124, -384, -448, 208, -228,
-1044, -328, 660, 380, -148, -300, 588, 240, 540, 28, 136,
-88, -436, 256, 296, -1000, 1400, 0, -48, 1056, -136, 264,
-528, -1108, 632, -484, -592, -344, 796, 124, -668, -768, 388,
1296, -232, -188, -200, -288, -4, 308, 100, -168, 256, -500,
204, -508, 648, -136, 372, -272, -120, -1004, -552, -548, -384,
548, -296, 428, -108, -8, -912, -324, -224, -88, -112, -220,
-100, 996, -796, 548, 360, -216, 180, 428, -200, -212, 148,
96, 148, 284, 216, -412, -320, 120, -300, -384, -604, -572,
-332, -8, -180, -176, 696, 116, -88, 628, 76, 44, -516,
240, -208, -40, 100, -592, 344, -308, -452, -228, 20, 916,
-1752, -136, -340, -804, 140, 40, 512, 340, 248, 184, -492,
896, -156, 932, -628, 328, -688, -448, -616, -752, -100, 560,
-1020, 180, -800, -64, 76, 576, 1068, 396, 660, 552, -108,
-28, 320, -628, 312, -92, -92, -472, 268, 16, 560, 516,
-672, -52, 492, -100, 260, 384, 284, 292, 304, -148, 88,
-152, 1012, 1064, -228, 164, -376, -684, 592, -392, 156, 196,
-524, -64, -884, 160, -176, 636, 648, 404, -396, -436, 864,
424, -728, 988, -604, 904, -592, 296, -224, 536, -176, -920,
436, -48, 1176, -884, 416, -776, -824, -884, 524, -548, -564,
-68, -164, -96, 692, 364, -692, -1012, -68, 260, -480, 876,
-1116, 452, -332, -352, 892, -1088, 1220, -676, 12, -292, 244,
496, 372, -32, 280, 200, 112, -440, -96, 24, -644, -184,
56, -432, 224, -980, 272, -260, 144, -436, 420, 356, 364,
-528, 76, 172, -744, -368, 404, -752, -416, 684, -688, 72,
540, 416, 92, 444, 480, -72, -1416, 164, -1172, -68, 24,
424, 264, 1040, 128, -912, -524, -356, 64, 876, -12, 4,
-88, 532, 272, -524, 320, 276, -508, 940, 24, -400, -120,
756, 60, 236, -412, 100, 376, -484, 400, -100, -740, -108,
-260, 328, -268, 224, -200, -416, 184, -604, -564, -20, 296,
60, 892, -888, 60, 164, 68, -760, 216, -296, 904, -336,
-28, 404, -356, -568, -208, -1480, -512, 296, 328, -360, -164,
-1560, -776, 1156, -428, 164, -504, -112, 120, -216, -148, -264,
308, 32, 64, -72, 72, 116, 176, -64, -272, 460, -536,
-784, -280, 348, 108, -752, -132, 524, -540, -776, 116, -296,
-1196, -288, -560, 1040, -472, 116, -848, -1116, 116, 636, 696,
284, -176, 1016, 204, -864, -648, -248, 356, 972, -584, -204,
264, 880, 528, -24, -184, 116, 448, -144, 828, 524, 212,
-212, 52, 12, 200, 268, -488, -404, -880, 824, -672, -40,
908, -248, 500, 716, -576, 492, -576, 16, 720, -108, 384,
124, 344, 280, 576, -500, 252, 104, -308, 196, -188, -8,
1268, 296, 1032, -1196, 436, 316, 372, -432, -200, -660, 704,
-224, 596, -132, 268, 32, -452, 884, 104, -1008, 424, -1348,
-280, 4, -1168, 368, 476, 696, 300, -8, 24, 180, -592,
-196, 388, 304, 500, 724, -160, 244, -84, 272, -256, -420,
320, 208, -144, -156, 156, 364, 452, 28, 540, 316, 220,
-644, -248, 464, 72, 360, 32, -388, 496, -680, -48, 208,
-116, -408, 60, -604, -392, 548, -840, 784, -460, 656, -544,
-388, -264, 908, -800, -628, -612, -568, 572, -220, 164, 288,
-16, -308, 308, -112, -636, -760, 280, -668, 432, 364, 240,
-196, 604, 340, 384, 196, 592, -44, -500, 432, -580, -132,
636, -76, 392, 4, -412, 540, 508, 328, -356, -36, 16,
-220, -64, -248, -60, 24, -192, 368, 1040, 92, -24, -1044,
-32, 40, 104, 148, 192, -136, -520, 56, -816, -224, 732,
392, 356, 212, -80, -424, -1008, -324, 588, -1496, 576, 460,
-816, -848, 56, -580, -92, -1372, -112, -496, 200, 364, 52,
-140, 48, -48, -60, 84, 72, 40, 132, -356, -268, -104,
-284, -404, 732, -520, 164, -304, -540, 120, 328, -76, -460,
756, 388, 588, 236, -436, -72, -176, -404, -316, -148, 716,
-604, 404, -72, -88, -888, -68, 944, 88, -220, -344, 960,
472, 460, -232, 704, 120, 832, -228, 692, -508, 132, -476,
844, -748, -364, -44, 1116, -1104, -1056, 76, 428, 552, -692,
60, 356, 96, -384, -188, -612, -576, 736, 508, 892, 352,
-1132, 504, -24, -352, 324, 332, -600, -312, 292, 508, -144,
-8, 484, 48, 284, -260, -240, 256, -100, -292, -204, -44,
472, -204, 908, -188, -1000, -256, 92, 1164, -392, 564, 356,
652, -28, -884, 256, 484, -192, 760, -176, 376, -524, -452,
-436, 860, -736, 212, 124, 504, -476, 468, 76, -472, 552,
-692, -944, -620, 740, -240, 400, 132, 20, 192, -196, 264,
-668, -1012, -60, 296, -316, -828, 76, -156, 284, -768, -448,
-832, 148, 248, 652, 616, 1236, 288, -328, -400, -124, 588,
220, 520, -696, 1032, 768, -740, -92, -272, 296, 448, -464,
412, -200, 392, 440, -200, 264, -152, -260, 320, 1032, 216,
320, -8, -64, 156, -1016, 1084, 1172, 536, 484, -432, 132,
372, -52, -256, 84, 116, -352, 48, 116, 304, -384, 412,
924, -300, 528, 628, 180, 648, 44, -980, -220, 1320, 48,
332, 748, 524, -268, -720, 540, -276, 564, -344, -208, -196,
436, 896, 88, -392, 132, 80, -964, -288, 568, 56, -48,
-456, 888, 8, 552, -156, -292, 948, 288, 128, -716, -292,
1192, -152, 876, 352, -600, -260, -812, -468, -28, -120, -32,
-44, 1284, 496, 192, 464, 312, -76, -516, -380, -456, -1012,
-48, 308, -156, 36, 492, -156, -808, 188, 1652, 68, -120,
-116, 316, 160, -140, 352, 808, -416, 592, 316, -480, 56,
528, -204, -568, 372, -232, 752, -344, 744, -4, 324, -416,
-600, 768, 268, -248, -88, -132, -420, -432, 80, -288, 404,
-316, -1216, -588, 520, -108, 92, -320, 368, -480, -216, -92,
1688, -300, 180, 1020, -176, 820, -68, -228, -260, 436, -904,
20, 40, -508, 440, -736, 312, 332, 204, 760, -372, 728,
96, -20, -632, -520, -560, 336, 1076, -64, -532, 776, 584,
192, 396, -728, -520, 276, -188, 80, -52, -612, -252, -48,
648, 212, -688, 228, -52, -260, 428, -412, -272, -404, 180,
816, -796, 48, 152, 484, -88, -216, 988, 696, 188, -528,
648, -116, -180, 316, 476, 12, -564, 96, 476, -252, -364,
-376, -392, 556, -256, -576, 260, -352, 120, -16, -136, -260,
-492, 72, 556, 660, 580, 616, 772, 436, 424, -32, -324,
-1268, 416, -324, -80, 920, 160, 228, 724, 32, -516, 64,
384, 68, -128, 136, 240, 248, -204, -68, 252, -932, -120,
-480, -628, -84, 192, 852, -404, -288, -132, 204, 100, 168,
-68, -196, -868, 460, 1080, 380, -80, 244, 0, 484, -888,
64, 184, 352, 600, 460, 164, 604, -196, 320, -64, 588,
-184, 228, 12, 372, 48, -848, -344, 224, 208, -200, 484,
128, -20, 272, -468, -840, 384, 256, -720, -520, -464, -580,
112, -120, 644, -356, -208, -608, -528, 704, 560, -424, 392,
828, 40, 84, 200, -152, 0, -144, 584, 280, -120, 80,
-556, -972, -196, -472, 724, 80, 168, -32, 88, 160, -688,
0, 160, 356, 372, -776, 740, -128, 676, -248, -480, 4,
-364, 96, 544, 232, -1032, 956, 236, 356, 20, -40, 300,
24, -676, -596, 132, 1120, -104, 532, -1096, 568, 648, 444,
508, 380, 188, -376, -604, 1488, 424, 24, 756, -220, -192,
716, 120, 920, 688, 168, 44, -460, 568, 284, 1144, 1160,
600, 424, 888, 656, -356, -320, 220, 316, -176, -724, -188,
-816, -628, -348, -228, -380, 1012, -452, -660, 736, 928, 404,
-696, -72, -268, -892, 128, 184, -344, -780, 360, 336, 400,
344, 428, 548, -112, 136, -228, -216, -820, -516, 340, 92,
-136, 116, -300, 376, -244, 100, -316, -520, -284, -12, 824,
164, -548, -180, -128, 116, -924, -828, 268, -368, -580, 620,
192, 160, 0, -1676, 1068, 424, -56, -360, 468, -156, 720,
288, -528, 556, -364, 548, -148, 504, 316, 152, -648, -620,
-684, -24, -376, -384, -108, -920, -1032, 768, 180, -264, -508,
-1268, -260, -60, 300, -240, 988, 724, -376, -576, -212, -736,
556, 192, 1092, -620, -880, 376, -56, -4, -216, -32, 836,
268, 396, 1332, 864, -600, 100, 56, -412, -92, 356, 180,
884, -468, -436, 292, -388, -804, -704, -840, 368, -348, 140,
-724, 1536, 940, 372, 112, -372, 436, -480, 1136, 296, -32,
-228, 132, -48, -220, 868, -1016, -60, -1044, -464, 328, 916,
244, 12, -736, -296, 360, 468, -376, -108, -92, 788, 368,
-56, 544, 400, -672, -420, 728, 16, 320, 44, -284, -380,
-796, 488, 132, 204, -596, -372, 88, -152, -908, -636, -572,
-624, -116, -692, -200, -56, 276, -88, 484, -324, 948, 864,
1000, -456, -184, -276, 292, -296, 156, 676, 320, 160, 908,
-84, -1236, -288, -116, 260, -372, -644, 732, -756, -96, 84,
344, -520, 348, -688, 240, -84, 216, -1044, -136, -676, -396,
-1500, 960, -40, 176, 168, 1516, 420, -504, -344, -364, -360,
1216, -940, -380, -212, 252, -660, -708, 484, -444, -152, 928,
-120, 1112, 476, -260, 560, -148, -344, 108, -196, 228, -288,
504, 560, -328, -88, 288, -1008, 460, -228, 468, -836, -196,
76, 388, 232, 412, -1168, -716, -644, 756, -172, -356, -504,
116, 432, 528, 48, 476, -168, -608, 448, 160, -532, -272,
28, -676, -12, 828, 980, 456, 520, 104, -104, 256, -344,
-4, -28, -368, -52, -524, -572, -556, -200, 768, 1124, -208,
-512, 176, 232, 248, -148, -888, 604, -600, -304, 804, -156,
-212, 488, -192, -804, -256, 368, -360, -916, -328, 228, -240,
-448, -472, 856, -556, -364, 572, -12, -156, -368, -340, 432,
252, -752, -152, 288, 268, -580, -848, -592, 108, -76, 244,
312, -716, 592, -80, 436, 360, 4, -248, 160, 516, 584,
732, 44, -468, -280, -292, -156, -588, 28, 308, 912, 24,
124, 156, 180, -252, 944, -924, -772, -520, -428, -624, 300,
-212, -1144, 32, -724, 800, -1128, -212, -1288, -848, 180, -416,
440, 192, -576, -792, -76, -1080, 80, -532, -352, -132, 380,
-820, 148, 1112, 128, 164, 456, 700, -924, 144, -668, -384,
648, -832, 508, 552, -52, -100, -656, 208, -568, 748, -88,
680, 232, 300, 192, -408, -1012, -152, -252, -268, 272, -876,
-664, -648, -332, -136, 16, 12, 1152, -28, 332, -536, 320,
-672, -460, -316, 532, -260, 228, -40, 1052, -816, 180, 88,
-496, -556, -672, -368, 428, 92, 356, 404, -408, 252, 196,
-176, -556, 792, 268, 32, 372, 40, 96, -332, 328, 120,
372, -900, -40, 472, -264, -592, 952, 128, 656, 112, 664,
-232, 420, 4, -344, -464, 556, 244, -416, -32, 252, 0,
-412, 188, -696, 508, -476, 324, -1096, 656, -312, 560, 264,
-136, 304, 160, -64, -580, 248, 336, -720, 560, -348, -288,
-276, -196, -500, 852, -544, -236, -1128, -992, -776, 116, 56,
52, 860, 884, 212, -12, 168, 1020, 512, -552, 924, -148,
716, 188, 164, -340, -520, -184, 880, -152, -680, -208, -1156,
-300, -528, -472, 364, 100, -744, -1056, -32, 540, 280, 144,
-676, -32, -232, -280, -224, 96, 568, -76, 172, 148, 148,
104, 32, -296, -32, 788, -80, 32, -16, 280, 288, 944,
428, -484
};
static inline s32 clamp(s32 value, s32 low, s32 high)
{
return value < low ? low : (value > high ? high : value);
}
static inline s32 round_power_of_two(const s32 val, s32 n)
{
const s32 a = (s32)1 << (n - 1);
return (val + a) >> n;
}
static void rockchip_av1_init_random_generator(u8 luma_num, u16 seed,
u16 *random_register)
{
u16 random_reg = seed;
random_reg ^= ((luma_num * 37 + 178) & 255) << 8;
random_reg ^= ((luma_num * 173 + 105) & 255);
*random_register = random_reg;
}
static inline void rockchip_av1_update_random_register(u16 *random_register)
{
u16 bit;
u16 random_reg = *random_register;
bit = ((random_reg >> 0) ^ (random_reg >> 1) ^ (random_reg >> 3) ^
(random_reg >> 12)) & 1;
*random_register = (random_reg >> 1) | (bit << 15);
}
static inline s32 rockchip_av1_get_random_number(u16 random_register)
{
return (random_register >> 5) & ((1 << 11) - 1);
}
void rockchip_av1_generate_luma_grain_block(s32 (*luma_grain_block)[73][82],
s32 bitdepth,
u8 num_y_points,
s32 grain_scale_shift,
s32 ar_coeff_lag,
s32 (*ar_coeffs_y)[24],
s32 ar_coeff_shift,
s32 grain_min,
s32 grain_max,
u16 random_seed)
{
s32 gauss_sec_shift = 12 - bitdepth + grain_scale_shift;
u16 grain_random_register = random_seed;
s32 i, j;
for (i = 0; i < 73; i++) {
for (j = 0; j < 82; j++) {
if (num_y_points > 0) {
rockchip_av1_update_random_register
(&grain_random_register);
(*luma_grain_block)[i][j] =
round_power_of_two(gaussian_sequence
[rockchip_av1_get_random_number
(grain_random_register)],
gauss_sec_shift);
} else {
(*luma_grain_block)[i][j] = 0;
}
}
}
for (i = 3; i < 73; i++)
for (j = 3; j < 82 - 3; j++) {
s32 pos = 0;
s32 wsum = 0;
s32 deltarow, deltacol;
for (deltarow = -ar_coeff_lag; deltarow <= 0;
deltarow++) {
for (deltacol = -ar_coeff_lag;
deltacol <= ar_coeff_lag; deltacol++) {
if (deltarow == 0 && deltacol == 0)
break;
wsum = wsum + (*ar_coeffs_y)[pos] *
(*luma_grain_block)[i + deltarow][j + deltacol];
++pos;
}
}
(*luma_grain_block)[i][j] =
clamp((*luma_grain_block)[i][j] +
round_power_of_two(wsum, ar_coeff_shift),
grain_min, grain_max);
}
}
// Calculate chroma grain noise once per frame
void rockchip_av1_generate_chroma_grain_block(s32 (*luma_grain_block)[73][82],
s32 (*cb_grain_block)[38][44],
s32 (*cr_grain_block)[38][44],
s32 bitdepth,
u8 num_y_points,
u8 num_cb_points,
u8 num_cr_points,
s32 grain_scale_shift,
s32 ar_coeff_lag,
s32 (*ar_coeffs_cb)[25],
s32 (*ar_coeffs_cr)[25],
s32 ar_coeff_shift,
s32 grain_min,
s32 grain_max,
u8 chroma_scaling_from_luma,
u16 random_seed)
{
s32 gauss_sec_shift = 12 - bitdepth + grain_scale_shift;
u16 grain_random_register = 0;
s32 i, j;
rockchip_av1_init_random_generator(7, random_seed,
&grain_random_register);
for (i = 0; i < 38; i++) {
for (j = 0; j < 44; j++) {
if (num_cb_points || chroma_scaling_from_luma) {
rockchip_av1_update_random_register
(&grain_random_register);
(*cb_grain_block)[i][j] =
round_power_of_two(gaussian_sequence
[rockchip_av1_get_random_number
(grain_random_register)],
gauss_sec_shift);
} else {
(*cb_grain_block)[i][j] = 0;
}
}
}
rockchip_av1_init_random_generator(11, random_seed,
&grain_random_register);
for (i = 0; i < 38; i++) {
for (j = 0; j < 44; j++) {
if (num_cr_points || chroma_scaling_from_luma) {
rockchip_av1_update_random_register
(&grain_random_register);
(*cr_grain_block)[i][j] =
round_power_of_two(gaussian_sequence
[rockchip_av1_get_random_number
(grain_random_register)],
gauss_sec_shift);
} else {
(*cr_grain_block)[i][j] = 0;
}
}
}
for (i = 3; i < 38; i++) {
for (j = 3; j < 44 - 3; j++) {
s32 wsum_cb = 0;
s32 wsum_cr = 0;
s32 pos = 0;
s32 deltarow, deltacol;
for (deltarow = -ar_coeff_lag; deltarow <= 0;
deltarow++) {
for (deltacol = -ar_coeff_lag;
deltacol <= ar_coeff_lag; deltacol++) {
if (deltarow == 0 && deltacol == 0)
break;
wsum_cb = wsum_cb + (*ar_coeffs_cb)[pos] *
(*cb_grain_block)[i + deltarow][j + deltacol];
wsum_cr =
wsum_cr +
(*ar_coeffs_cr)[pos] *
(*cr_grain_block)[i + deltarow][j + deltacol];
++pos;
}
}
if (num_y_points > 0) {
s32 av_luma = 0;
s32 luma_coord_y = (i << 1) - 3;
s32 luma_coord_x = (j << 1) - 3;
av_luma +=
(*luma_grain_block)[luma_coord_y][luma_coord_x];
av_luma +=
(*luma_grain_block)[luma_coord_y][luma_coord_x + 1];
av_luma +=
(*luma_grain_block)[luma_coord_y + 1][luma_coord_x];
av_luma +=
(*luma_grain_block)[(luma_coord_y + 1)][luma_coord_x + 1];
av_luma = round_power_of_two(av_luma, 2);
wsum_cb = wsum_cb + (*ar_coeffs_cb)[pos] * av_luma;
wsum_cr = wsum_cr + (*ar_coeffs_cr)[pos] * av_luma;
}
if (num_cb_points || chroma_scaling_from_luma) {
(*cb_grain_block)[i][j] =
clamp((*cb_grain_block)[i][j] +
round_power_of_two(wsum_cb, ar_coeff_shift),
grain_min, grain_max);
}
if (num_cr_points || chroma_scaling_from_luma) {
(*cr_grain_block)[i][j] =
clamp((*cr_grain_block)[i][j] +
round_power_of_two(wsum_cr, ar_coeff_shift),
grain_min, grain_max);
}
}
}
}
| linux-master | drivers/media/platform/verisilicon/rockchip_av1_filmgrain.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2023, Collabora
*
* Author: Benjamin Gaignard <[email protected]>
*/
#include <media/v4l2-mem2mem.h>
#include "hantro.h"
#include "hantro_v4l2.h"
#include "rockchip_vpu981_regs.h"
#define AV1_DEC_MODE 17
#define GM_GLOBAL_MODELS_PER_FRAME 7
#define GLOBAL_MODEL_TOTAL_SIZE (6 * 4 + 4 * 2)
#define GLOBAL_MODEL_SIZE ALIGN(GM_GLOBAL_MODELS_PER_FRAME * GLOBAL_MODEL_TOTAL_SIZE, 2048)
#define AV1_MAX_TILES 128
#define AV1_TILE_INFO_SIZE (AV1_MAX_TILES * 16)
#define AV1DEC_MAX_PIC_BUFFERS 24
#define AV1_REF_SCALE_SHIFT 14
#define AV1_INVALID_IDX -1
#define MAX_FRAME_DISTANCE 31
#define AV1_PRIMARY_REF_NONE 7
#define AV1_TILE_SIZE ALIGN(32 * 128, 4096)
/*
* These 3 values aren't defined enum v4l2_av1_segment_feature because
* they are not part of the specification
*/
#define V4L2_AV1_SEG_LVL_ALT_LF_Y_H 2
#define V4L2_AV1_SEG_LVL_ALT_LF_U 3
#define V4L2_AV1_SEG_LVL_ALT_LF_V 4
#define SUPERRES_SCALE_BITS 3
#define SCALE_NUMERATOR 8
#define SUPERRES_SCALE_DENOMINATOR_MIN (SCALE_NUMERATOR + 1)
#define RS_SUBPEL_BITS 6
#define RS_SUBPEL_MASK ((1 << RS_SUBPEL_BITS) - 1)
#define RS_SCALE_SUBPEL_BITS 14
#define RS_SCALE_SUBPEL_MASK ((1 << RS_SCALE_SUBPEL_BITS) - 1)
#define RS_SCALE_EXTRA_BITS (RS_SCALE_SUBPEL_BITS - RS_SUBPEL_BITS)
#define RS_SCALE_EXTRA_OFF (1 << (RS_SCALE_EXTRA_BITS - 1))
#define IS_INTRA(type) ((type == V4L2_AV1_KEY_FRAME) || (type == V4L2_AV1_INTRA_ONLY_FRAME))
#define LST_BUF_IDX (V4L2_AV1_REF_LAST_FRAME - V4L2_AV1_REF_LAST_FRAME)
#define LST2_BUF_IDX (V4L2_AV1_REF_LAST2_FRAME - V4L2_AV1_REF_LAST_FRAME)
#define LST3_BUF_IDX (V4L2_AV1_REF_LAST3_FRAME - V4L2_AV1_REF_LAST_FRAME)
#define GLD_BUF_IDX (V4L2_AV1_REF_GOLDEN_FRAME - V4L2_AV1_REF_LAST_FRAME)
#define BWD_BUF_IDX (V4L2_AV1_REF_BWDREF_FRAME - V4L2_AV1_REF_LAST_FRAME)
#define ALT2_BUF_IDX (V4L2_AV1_REF_ALTREF2_FRAME - V4L2_AV1_REF_LAST_FRAME)
#define ALT_BUF_IDX (V4L2_AV1_REF_ALTREF_FRAME - V4L2_AV1_REF_LAST_FRAME)
#define DIV_LUT_PREC_BITS 14
#define DIV_LUT_BITS 8
#define DIV_LUT_NUM BIT(DIV_LUT_BITS)
#define WARP_PARAM_REDUCE_BITS 6
#define WARPEDMODEL_PREC_BITS 16
#define AV1_DIV_ROUND_UP_POW2(value, n) \
({ \
typeof(n) _n = n; \
typeof(value) _value = value; \
(_value + (BIT(_n) >> 1)) >> _n; \
})
#define AV1_DIV_ROUND_UP_POW2_SIGNED(value, n) \
({ \
typeof(n) _n_ = n; \
typeof(value) _value_ = value; \
(((_value_) < 0) ? -AV1_DIV_ROUND_UP_POW2(-(_value_), (_n_)) \
: AV1_DIV_ROUND_UP_POW2((_value_), (_n_))); \
})
struct rockchip_av1_film_grain {
u8 scaling_lut_y[256];
u8 scaling_lut_cb[256];
u8 scaling_lut_cr[256];
s16 cropped_luma_grain_block[4096];
s16 cropped_chroma_grain_block[1024 * 2];
};
static const short div_lut[DIV_LUT_NUM + 1] = {
16384, 16320, 16257, 16194, 16132, 16070, 16009, 15948, 15888, 15828, 15768,
15709, 15650, 15592, 15534, 15477, 15420, 15364, 15308, 15252, 15197, 15142,
15087, 15033, 14980, 14926, 14873, 14821, 14769, 14717, 14665, 14614, 14564,
14513, 14463, 14413, 14364, 14315, 14266, 14218, 14170, 14122, 14075, 14028,
13981, 13935, 13888, 13843, 13797, 13752, 13707, 13662, 13618, 13574, 13530,
13487, 13443, 13400, 13358, 13315, 13273, 13231, 13190, 13148, 13107, 13066,
13026, 12985, 12945, 12906, 12866, 12827, 12788, 12749, 12710, 12672, 12633,
12596, 12558, 12520, 12483, 12446, 12409, 12373, 12336, 12300, 12264, 12228,
12193, 12157, 12122, 12087, 12053, 12018, 11984, 11950, 11916, 11882, 11848,
11815, 11782, 11749, 11716, 11683, 11651, 11619, 11586, 11555, 11523, 11491,
11460, 11429, 11398, 11367, 11336, 11305, 11275, 11245, 11215, 11185, 11155,
11125, 11096, 11067, 11038, 11009, 10980, 10951, 10923, 10894, 10866, 10838,
10810, 10782, 10755, 10727, 10700, 10673, 10645, 10618, 10592, 10565, 10538,
10512, 10486, 10460, 10434, 10408, 10382, 10356, 10331, 10305, 10280, 10255,
10230, 10205, 10180, 10156, 10131, 10107, 10082, 10058, 10034, 10010, 9986,
9963, 9939, 9916, 9892, 9869, 9846, 9823, 9800, 9777, 9754, 9732,
9709, 9687, 9664, 9642, 9620, 9598, 9576, 9554, 9533, 9511, 9489,
9468, 9447, 9425, 9404, 9383, 9362, 9341, 9321, 9300, 9279, 9259,
9239, 9218, 9198, 9178, 9158, 9138, 9118, 9098, 9079, 9059, 9039,
9020, 9001, 8981, 8962, 8943, 8924, 8905, 8886, 8867, 8849, 8830,
8812, 8793, 8775, 8756, 8738, 8720, 8702, 8684, 8666, 8648, 8630,
8613, 8595, 8577, 8560, 8542, 8525, 8508, 8490, 8473, 8456, 8439,
8422, 8405, 8389, 8372, 8355, 8339, 8322, 8306, 8289, 8273, 8257,
8240, 8224, 8208, 8192,
};
static int rockchip_vpu981_get_frame_index(struct hantro_ctx *ctx, int ref)
{
struct hantro_av1_dec_hw_ctx *av1_dec = &ctx->av1_dec;
struct hantro_av1_dec_ctrls *ctrls = &av1_dec->ctrls;
const struct v4l2_ctrl_av1_frame *frame = ctrls->frame;
u64 timestamp;
int i, idx = frame->ref_frame_idx[ref];
if (idx >= V4L2_AV1_TOTAL_REFS_PER_FRAME || idx < 0)
return AV1_INVALID_IDX;
timestamp = frame->reference_frame_ts[idx];
for (i = 0; i < AV1_MAX_FRAME_BUF_COUNT; i++) {
if (!av1_dec->frame_refs[i].used)
continue;
if (av1_dec->frame_refs[i].timestamp == timestamp)
return i;
}
return AV1_INVALID_IDX;
}
static int rockchip_vpu981_get_order_hint(struct hantro_ctx *ctx, int ref)
{
struct hantro_av1_dec_hw_ctx *av1_dec = &ctx->av1_dec;
int idx = rockchip_vpu981_get_frame_index(ctx, ref);
if (idx != AV1_INVALID_IDX)
return av1_dec->frame_refs[idx].order_hint;
return 0;
}
static int rockchip_vpu981_av1_dec_frame_ref(struct hantro_ctx *ctx,
u64 timestamp)
{
struct hantro_av1_dec_hw_ctx *av1_dec = &ctx->av1_dec;
struct hantro_av1_dec_ctrls *ctrls = &av1_dec->ctrls;
const struct v4l2_ctrl_av1_frame *frame = ctrls->frame;
int i;
for (i = 0; i < AV1_MAX_FRAME_BUF_COUNT; i++) {
int j;
if (av1_dec->frame_refs[i].used)
continue;
av1_dec->frame_refs[i].width = frame->frame_width_minus_1 + 1;
av1_dec->frame_refs[i].height = frame->frame_height_minus_1 + 1;
av1_dec->frame_refs[i].mi_cols = DIV_ROUND_UP(frame->frame_width_minus_1 + 1, 8);
av1_dec->frame_refs[i].mi_rows = DIV_ROUND_UP(frame->frame_height_minus_1 + 1, 8);
av1_dec->frame_refs[i].timestamp = timestamp;
av1_dec->frame_refs[i].frame_type = frame->frame_type;
av1_dec->frame_refs[i].order_hint = frame->order_hint;
if (!av1_dec->frame_refs[i].vb2_ref)
av1_dec->frame_refs[i].vb2_ref = hantro_get_dst_buf(ctx);
for (j = 0; j < V4L2_AV1_TOTAL_REFS_PER_FRAME; j++)
av1_dec->frame_refs[i].order_hints[j] = frame->order_hints[j];
av1_dec->frame_refs[i].used = true;
av1_dec->current_frame_index = i;
return i;
}
return AV1_INVALID_IDX;
}
static void rockchip_vpu981_av1_dec_frame_unref(struct hantro_ctx *ctx, int idx)
{
struct hantro_av1_dec_hw_ctx *av1_dec = &ctx->av1_dec;
if (idx >= 0)
av1_dec->frame_refs[idx].used = false;
}
static void rockchip_vpu981_av1_dec_clean_refs(struct hantro_ctx *ctx)
{
struct hantro_av1_dec_hw_ctx *av1_dec = &ctx->av1_dec;
struct hantro_av1_dec_ctrls *ctrls = &av1_dec->ctrls;
int ref, idx;
for (idx = 0; idx < AV1_MAX_FRAME_BUF_COUNT; idx++) {
u64 timestamp = av1_dec->frame_refs[idx].timestamp;
bool used = false;
if (!av1_dec->frame_refs[idx].used)
continue;
for (ref = 0; ref < V4L2_AV1_TOTAL_REFS_PER_FRAME; ref++) {
if (ctrls->frame->reference_frame_ts[ref] == timestamp)
used = true;
}
if (!used)
rockchip_vpu981_av1_dec_frame_unref(ctx, idx);
}
}
static size_t rockchip_vpu981_av1_dec_luma_size(struct hantro_ctx *ctx)
{
return ctx->dst_fmt.width * ctx->dst_fmt.height * ctx->bit_depth / 8;
}
static size_t rockchip_vpu981_av1_dec_chroma_size(struct hantro_ctx *ctx)
{
size_t cr_offset = rockchip_vpu981_av1_dec_luma_size(ctx);
return ALIGN((cr_offset * 3) / 2, 64);
}
static void rockchip_vpu981_av1_dec_tiles_free(struct hantro_ctx *ctx)
{
struct hantro_dev *vpu = ctx->dev;
struct hantro_av1_dec_hw_ctx *av1_dec = &ctx->av1_dec;
if (av1_dec->db_data_col.cpu)
dma_free_coherent(vpu->dev, av1_dec->db_data_col.size,
av1_dec->db_data_col.cpu,
av1_dec->db_data_col.dma);
av1_dec->db_data_col.cpu = NULL;
if (av1_dec->db_ctrl_col.cpu)
dma_free_coherent(vpu->dev, av1_dec->db_ctrl_col.size,
av1_dec->db_ctrl_col.cpu,
av1_dec->db_ctrl_col.dma);
av1_dec->db_ctrl_col.cpu = NULL;
if (av1_dec->cdef_col.cpu)
dma_free_coherent(vpu->dev, av1_dec->cdef_col.size,
av1_dec->cdef_col.cpu, av1_dec->cdef_col.dma);
av1_dec->cdef_col.cpu = NULL;
if (av1_dec->sr_col.cpu)
dma_free_coherent(vpu->dev, av1_dec->sr_col.size,
av1_dec->sr_col.cpu, av1_dec->sr_col.dma);
av1_dec->sr_col.cpu = NULL;
if (av1_dec->lr_col.cpu)
dma_free_coherent(vpu->dev, av1_dec->lr_col.size,
av1_dec->lr_col.cpu, av1_dec->lr_col.dma);
av1_dec->lr_col.cpu = NULL;
}
static int rockchip_vpu981_av1_dec_tiles_reallocate(struct hantro_ctx *ctx)
{
struct hantro_dev *vpu = ctx->dev;
struct hantro_av1_dec_hw_ctx *av1_dec = &ctx->av1_dec;
struct hantro_av1_dec_ctrls *ctrls = &av1_dec->ctrls;
unsigned int num_tile_cols = 1 << ctrls->tile_group_entry->tile_col;
unsigned int height = ALIGN(ctrls->frame->frame_height_minus_1 + 1, 64);
unsigned int height_in_sb = height / 64;
unsigned int stripe_num = ((height + 8) + 63) / 64;
size_t size;
if (av1_dec->db_data_col.size >=
ALIGN(height * 12 * ctx->bit_depth / 8, 128) * num_tile_cols)
return 0;
rockchip_vpu981_av1_dec_tiles_free(ctx);
size = ALIGN(height * 12 * ctx->bit_depth / 8, 128) * num_tile_cols;
av1_dec->db_data_col.cpu = dma_alloc_coherent(vpu->dev, size,
&av1_dec->db_data_col.dma,
GFP_KERNEL);
if (!av1_dec->db_data_col.cpu)
goto buffer_allocation_error;
av1_dec->db_data_col.size = size;
size = ALIGN(height * 2 * 16 / 4, 128) * num_tile_cols;
av1_dec->db_ctrl_col.cpu = dma_alloc_coherent(vpu->dev, size,
&av1_dec->db_ctrl_col.dma,
GFP_KERNEL);
if (!av1_dec->db_ctrl_col.cpu)
goto buffer_allocation_error;
av1_dec->db_ctrl_col.size = size;
size = ALIGN(height_in_sb * 44 * ctx->bit_depth * 16 / 8, 128) * num_tile_cols;
av1_dec->cdef_col.cpu = dma_alloc_coherent(vpu->dev, size,
&av1_dec->cdef_col.dma,
GFP_KERNEL);
if (!av1_dec->cdef_col.cpu)
goto buffer_allocation_error;
av1_dec->cdef_col.size = size;
size = ALIGN(height_in_sb * (3040 + 1280), 128) * num_tile_cols;
av1_dec->sr_col.cpu = dma_alloc_coherent(vpu->dev, size,
&av1_dec->sr_col.dma,
GFP_KERNEL);
if (!av1_dec->sr_col.cpu)
goto buffer_allocation_error;
av1_dec->sr_col.size = size;
size = ALIGN(stripe_num * 1536 * ctx->bit_depth / 8, 128) * num_tile_cols;
av1_dec->lr_col.cpu = dma_alloc_coherent(vpu->dev, size,
&av1_dec->lr_col.dma,
GFP_KERNEL);
if (!av1_dec->lr_col.cpu)
goto buffer_allocation_error;
av1_dec->lr_col.size = size;
av1_dec->num_tile_cols_allocated = num_tile_cols;
return 0;
buffer_allocation_error:
rockchip_vpu981_av1_dec_tiles_free(ctx);
return -ENOMEM;
}
void rockchip_vpu981_av1_dec_exit(struct hantro_ctx *ctx)
{
struct hantro_dev *vpu = ctx->dev;
struct hantro_av1_dec_hw_ctx *av1_dec = &ctx->av1_dec;
if (av1_dec->global_model.cpu)
dma_free_coherent(vpu->dev, av1_dec->global_model.size,
av1_dec->global_model.cpu,
av1_dec->global_model.dma);
av1_dec->global_model.cpu = NULL;
if (av1_dec->tile_info.cpu)
dma_free_coherent(vpu->dev, av1_dec->tile_info.size,
av1_dec->tile_info.cpu,
av1_dec->tile_info.dma);
av1_dec->tile_info.cpu = NULL;
if (av1_dec->film_grain.cpu)
dma_free_coherent(vpu->dev, av1_dec->film_grain.size,
av1_dec->film_grain.cpu,
av1_dec->film_grain.dma);
av1_dec->film_grain.cpu = NULL;
if (av1_dec->prob_tbl.cpu)
dma_free_coherent(vpu->dev, av1_dec->prob_tbl.size,
av1_dec->prob_tbl.cpu, av1_dec->prob_tbl.dma);
av1_dec->prob_tbl.cpu = NULL;
if (av1_dec->prob_tbl_out.cpu)
dma_free_coherent(vpu->dev, av1_dec->prob_tbl_out.size,
av1_dec->prob_tbl_out.cpu,
av1_dec->prob_tbl_out.dma);
av1_dec->prob_tbl_out.cpu = NULL;
if (av1_dec->tile_buf.cpu)
dma_free_coherent(vpu->dev, av1_dec->tile_buf.size,
av1_dec->tile_buf.cpu, av1_dec->tile_buf.dma);
av1_dec->tile_buf.cpu = NULL;
rockchip_vpu981_av1_dec_tiles_free(ctx);
}
int rockchip_vpu981_av1_dec_init(struct hantro_ctx *ctx)
{
struct hantro_dev *vpu = ctx->dev;
struct hantro_av1_dec_hw_ctx *av1_dec = &ctx->av1_dec;
memset(av1_dec, 0, sizeof(*av1_dec));
av1_dec->global_model.cpu = dma_alloc_coherent(vpu->dev, GLOBAL_MODEL_SIZE,
&av1_dec->global_model.dma,
GFP_KERNEL);
if (!av1_dec->global_model.cpu)
return -ENOMEM;
av1_dec->global_model.size = GLOBAL_MODEL_SIZE;
av1_dec->tile_info.cpu = dma_alloc_coherent(vpu->dev, AV1_MAX_TILES,
&av1_dec->tile_info.dma,
GFP_KERNEL);
if (!av1_dec->tile_info.cpu)
return -ENOMEM;
av1_dec->tile_info.size = AV1_MAX_TILES;
av1_dec->film_grain.cpu = dma_alloc_coherent(vpu->dev,
ALIGN(sizeof(struct rockchip_av1_film_grain), 2048),
&av1_dec->film_grain.dma,
GFP_KERNEL);
if (!av1_dec->film_grain.cpu)
return -ENOMEM;
av1_dec->film_grain.size = ALIGN(sizeof(struct rockchip_av1_film_grain), 2048);
av1_dec->prob_tbl.cpu = dma_alloc_coherent(vpu->dev,
ALIGN(sizeof(struct av1cdfs), 2048),
&av1_dec->prob_tbl.dma,
GFP_KERNEL);
if (!av1_dec->prob_tbl.cpu)
return -ENOMEM;
av1_dec->prob_tbl.size = ALIGN(sizeof(struct av1cdfs), 2048);
av1_dec->prob_tbl_out.cpu = dma_alloc_coherent(vpu->dev,
ALIGN(sizeof(struct av1cdfs), 2048),
&av1_dec->prob_tbl_out.dma,
GFP_KERNEL);
if (!av1_dec->prob_tbl_out.cpu)
return -ENOMEM;
av1_dec->prob_tbl_out.size = ALIGN(sizeof(struct av1cdfs), 2048);
av1_dec->cdfs = &av1_dec->default_cdfs;
av1_dec->cdfs_ndvc = &av1_dec->default_cdfs_ndvc;
rockchip_av1_set_default_cdfs(av1_dec->cdfs, av1_dec->cdfs_ndvc);
av1_dec->tile_buf.cpu = dma_alloc_coherent(vpu->dev,
AV1_TILE_SIZE,
&av1_dec->tile_buf.dma,
GFP_KERNEL);
if (!av1_dec->tile_buf.cpu)
return -ENOMEM;
av1_dec->tile_buf.size = AV1_TILE_SIZE;
return 0;
}
static int rockchip_vpu981_av1_dec_prepare_run(struct hantro_ctx *ctx)
{
struct hantro_av1_dec_hw_ctx *av1_dec = &ctx->av1_dec;
struct hantro_av1_dec_ctrls *ctrls = &av1_dec->ctrls;
ctrls->sequence = hantro_get_ctrl(ctx, V4L2_CID_STATELESS_AV1_SEQUENCE);
if (WARN_ON(!ctrls->sequence))
return -EINVAL;
ctrls->tile_group_entry =
hantro_get_ctrl(ctx, V4L2_CID_STATELESS_AV1_TILE_GROUP_ENTRY);
if (WARN_ON(!ctrls->tile_group_entry))
return -EINVAL;
ctrls->frame = hantro_get_ctrl(ctx, V4L2_CID_STATELESS_AV1_FRAME);
if (WARN_ON(!ctrls->frame))
return -EINVAL;
ctrls->film_grain =
hantro_get_ctrl(ctx, V4L2_CID_STATELESS_AV1_FILM_GRAIN);
return rockchip_vpu981_av1_dec_tiles_reallocate(ctx);
}
static inline int rockchip_vpu981_av1_dec_get_msb(u32 n)
{
if (n == 0)
return 0;
return 31 ^ __builtin_clz(n);
}
static short rockchip_vpu981_av1_dec_resolve_divisor_32(u32 d, short *shift)
{
int f;
u64 e;
*shift = rockchip_vpu981_av1_dec_get_msb(d);
/* e is obtained from D after resetting the most significant 1 bit. */
e = d - ((u32)1 << *shift);
/* Get the most significant DIV_LUT_BITS (8) bits of e into f */
if (*shift > DIV_LUT_BITS)
f = AV1_DIV_ROUND_UP_POW2(e, *shift - DIV_LUT_BITS);
else
f = e << (DIV_LUT_BITS - *shift);
if (f > DIV_LUT_NUM)
return -1;
*shift += DIV_LUT_PREC_BITS;
/* Use f as lookup into the precomputed table of multipliers */
return div_lut[f];
}
static void
rockchip_vpu981_av1_dec_get_shear_params(const u32 *params, s64 *alpha,
s64 *beta, s64 *gamma, s64 *delta)
{
const int *mat = params;
short shift;
short y;
long long gv, dv;
if (mat[2] <= 0)
return;
*alpha = clamp_val(mat[2] - (1 << WARPEDMODEL_PREC_BITS), S16_MIN, S16_MAX);
*beta = clamp_val(mat[3], S16_MIN, S16_MAX);
y = rockchip_vpu981_av1_dec_resolve_divisor_32(abs(mat[2]), &shift) * (mat[2] < 0 ? -1 : 1);
gv = ((long long)mat[4] * (1 << WARPEDMODEL_PREC_BITS)) * y;
*gamma = clamp_val((int)AV1_DIV_ROUND_UP_POW2_SIGNED(gv, shift), S16_MIN, S16_MAX);
dv = ((long long)mat[3] * mat[4]) * y;
*delta = clamp_val(mat[5] -
(int)AV1_DIV_ROUND_UP_POW2_SIGNED(dv, shift) - (1 << WARPEDMODEL_PREC_BITS),
S16_MIN, S16_MAX);
*alpha = AV1_DIV_ROUND_UP_POW2_SIGNED(*alpha, WARP_PARAM_REDUCE_BITS)
* (1 << WARP_PARAM_REDUCE_BITS);
*beta = AV1_DIV_ROUND_UP_POW2_SIGNED(*beta, WARP_PARAM_REDUCE_BITS)
* (1 << WARP_PARAM_REDUCE_BITS);
*gamma = AV1_DIV_ROUND_UP_POW2_SIGNED(*gamma, WARP_PARAM_REDUCE_BITS)
* (1 << WARP_PARAM_REDUCE_BITS);
*delta = AV1_DIV_ROUND_UP_POW2_SIGNED(*delta, WARP_PARAM_REDUCE_BITS)
* (1 << WARP_PARAM_REDUCE_BITS);
}
static void rockchip_vpu981_av1_dec_set_global_model(struct hantro_ctx *ctx)
{
struct hantro_av1_dec_hw_ctx *av1_dec = &ctx->av1_dec;
struct hantro_av1_dec_ctrls *ctrls = &av1_dec->ctrls;
const struct v4l2_ctrl_av1_frame *frame = ctrls->frame;
const struct v4l2_av1_global_motion *gm = &frame->global_motion;
u8 *dst = av1_dec->global_model.cpu;
struct hantro_dev *vpu = ctx->dev;
int ref_frame, i;
memset(dst, 0, GLOBAL_MODEL_SIZE);
for (ref_frame = 0; ref_frame < V4L2_AV1_REFS_PER_FRAME; ++ref_frame) {
s64 alpha = 0, beta = 0, gamma = 0, delta = 0;
for (i = 0; i < 6; ++i) {
if (i == 2)
*(s32 *)dst =
gm->params[V4L2_AV1_REF_LAST_FRAME + ref_frame][3];
else if (i == 3)
*(s32 *)dst =
gm->params[V4L2_AV1_REF_LAST_FRAME + ref_frame][2];
else
*(s32 *)dst =
gm->params[V4L2_AV1_REF_LAST_FRAME + ref_frame][i];
dst += 4;
}
if (gm->type[V4L2_AV1_REF_LAST_FRAME + ref_frame] <= V4L2_AV1_WARP_MODEL_AFFINE)
rockchip_vpu981_av1_dec_get_shear_params(&gm->params[V4L2_AV1_REF_LAST_FRAME + ref_frame][0],
&alpha, &beta, &gamma, &delta);
*(s16 *)dst = alpha;
dst += 2;
*(s16 *)dst = beta;
dst += 2;
*(s16 *)dst = gamma;
dst += 2;
*(s16 *)dst = delta;
dst += 2;
}
hantro_write_addr(vpu, AV1_GLOBAL_MODEL, av1_dec->global_model.dma);
}
static int rockchip_vpu981_av1_tile_log2(int target)
{
int k;
/*
* returns the smallest value for k such that 1 << k is greater
* than or equal to target
*/
for (k = 0; (1 << k) < target; k++);
return k;
}
static void rockchip_vpu981_av1_dec_set_tile_info(struct hantro_ctx *ctx)
{
struct hantro_av1_dec_hw_ctx *av1_dec = &ctx->av1_dec;
struct hantro_av1_dec_ctrls *ctrls = &av1_dec->ctrls;
const struct v4l2_av1_tile_info *tile_info = &ctrls->frame->tile_info;
const struct v4l2_ctrl_av1_tile_group_entry *group_entry =
ctrls->tile_group_entry;
int context_update_y =
tile_info->context_update_tile_id / tile_info->tile_cols;
int context_update_x =
tile_info->context_update_tile_id % tile_info->tile_cols;
int context_update_tile_id =
context_update_x * tile_info->tile_rows + context_update_y;
u8 *dst = av1_dec->tile_info.cpu;
struct hantro_dev *vpu = ctx->dev;
int tile0, tile1;
memset(dst, 0, av1_dec->tile_info.size);
for (tile0 = 0; tile0 < tile_info->tile_cols; tile0++) {
for (tile1 = 0; tile1 < tile_info->tile_rows; tile1++) {
int tile_id = tile1 * tile_info->tile_cols + tile0;
u32 start, end;
u32 y0 =
tile_info->height_in_sbs_minus_1[tile1] + 1;
u32 x0 = tile_info->width_in_sbs_minus_1[tile0] + 1;
/* tile size in SB units (width,height) */
*dst++ = x0;
*dst++ = 0;
*dst++ = 0;
*dst++ = 0;
*dst++ = y0;
*dst++ = 0;
*dst++ = 0;
*dst++ = 0;
/* tile start position */
start = group_entry[tile_id].tile_offset - group_entry[0].tile_offset;
*dst++ = start & 255;
*dst++ = (start >> 8) & 255;
*dst++ = (start >> 16) & 255;
*dst++ = (start >> 24) & 255;
/* number of bytes in tile data */
end = start + group_entry[tile_id].tile_size;
*dst++ = end & 255;
*dst++ = (end >> 8) & 255;
*dst++ = (end >> 16) & 255;
*dst++ = (end >> 24) & 255;
}
}
hantro_reg_write(vpu, &av1_multicore_expect_context_update, !!(context_update_x == 0));
hantro_reg_write(vpu, &av1_tile_enable,
!!((tile_info->tile_cols > 1) || (tile_info->tile_rows > 1)));
hantro_reg_write(vpu, &av1_num_tile_cols_8k, tile_info->tile_cols);
hantro_reg_write(vpu, &av1_num_tile_rows_8k, tile_info->tile_rows);
hantro_reg_write(vpu, &av1_context_update_tile_id, context_update_tile_id);
hantro_reg_write(vpu, &av1_tile_transpose, 1);
if (rockchip_vpu981_av1_tile_log2(tile_info->tile_cols) ||
rockchip_vpu981_av1_tile_log2(tile_info->tile_rows))
hantro_reg_write(vpu, &av1_dec_tile_size_mag, tile_info->tile_size_bytes - 1);
else
hantro_reg_write(vpu, &av1_dec_tile_size_mag, 3);
hantro_write_addr(vpu, AV1_TILE_BASE, av1_dec->tile_info.dma);
}
static int rockchip_vpu981_av1_dec_get_dist(struct hantro_ctx *ctx,
int a, int b)
{
struct hantro_av1_dec_hw_ctx *av1_dec = &ctx->av1_dec;
struct hantro_av1_dec_ctrls *ctrls = &av1_dec->ctrls;
int bits = ctrls->sequence->order_hint_bits - 1;
int diff, m;
if (!ctrls->sequence->order_hint_bits)
return 0;
diff = a - b;
m = 1 << bits;
diff = (diff & (m - 1)) - (diff & m);
return diff;
}
static void rockchip_vpu981_av1_dec_set_frame_sign_bias(struct hantro_ctx *ctx)
{
struct hantro_av1_dec_hw_ctx *av1_dec = &ctx->av1_dec;
struct hantro_av1_dec_ctrls *ctrls = &av1_dec->ctrls;
const struct v4l2_ctrl_av1_frame *frame = ctrls->frame;
const struct v4l2_ctrl_av1_sequence *sequence = ctrls->sequence;
int i;
if (!sequence->order_hint_bits || IS_INTRA(frame->frame_type)) {
for (i = 0; i < V4L2_AV1_TOTAL_REFS_PER_FRAME; i++)
av1_dec->ref_frame_sign_bias[i] = 0;
return;
}
// Identify the nearest forward and backward references.
for (i = 0; i < V4L2_AV1_TOTAL_REFS_PER_FRAME - 1; i++) {
if (rockchip_vpu981_get_frame_index(ctx, i) >= 0) {
int rel_off =
rockchip_vpu981_av1_dec_get_dist(ctx,
rockchip_vpu981_get_order_hint(ctx, i),
frame->order_hint);
av1_dec->ref_frame_sign_bias[i + 1] = (rel_off <= 0) ? 0 : 1;
}
}
}
static bool
rockchip_vpu981_av1_dec_set_ref(struct hantro_ctx *ctx, int ref, int idx,
int width, int height)
{
struct hantro_av1_dec_hw_ctx *av1_dec = &ctx->av1_dec;
struct hantro_av1_dec_ctrls *ctrls = &av1_dec->ctrls;
const struct v4l2_ctrl_av1_frame *frame = ctrls->frame;
struct hantro_dev *vpu = ctx->dev;
struct hantro_decoded_buffer *dst;
dma_addr_t luma_addr, chroma_addr, mv_addr = 0;
size_t cr_offset = rockchip_vpu981_av1_dec_luma_size(ctx);
size_t mv_offset = rockchip_vpu981_av1_dec_chroma_size(ctx);
int cur_width = frame->frame_width_minus_1 + 1;
int cur_height = frame->frame_height_minus_1 + 1;
int scale_width =
((width << AV1_REF_SCALE_SHIFT) + cur_width / 2) / cur_width;
int scale_height =
((height << AV1_REF_SCALE_SHIFT) + cur_height / 2) / cur_height;
switch (ref) {
case 0:
hantro_reg_write(vpu, &av1_ref0_height, height);
hantro_reg_write(vpu, &av1_ref0_width, width);
hantro_reg_write(vpu, &av1_ref0_ver_scale, scale_width);
hantro_reg_write(vpu, &av1_ref0_hor_scale, scale_height);
break;
case 1:
hantro_reg_write(vpu, &av1_ref1_height, height);
hantro_reg_write(vpu, &av1_ref1_width, width);
hantro_reg_write(vpu, &av1_ref1_ver_scale, scale_width);
hantro_reg_write(vpu, &av1_ref1_hor_scale, scale_height);
break;
case 2:
hantro_reg_write(vpu, &av1_ref2_height, height);
hantro_reg_write(vpu, &av1_ref2_width, width);
hantro_reg_write(vpu, &av1_ref2_ver_scale, scale_width);
hantro_reg_write(vpu, &av1_ref2_hor_scale, scale_height);
break;
case 3:
hantro_reg_write(vpu, &av1_ref3_height, height);
hantro_reg_write(vpu, &av1_ref3_width, width);
hantro_reg_write(vpu, &av1_ref3_ver_scale, scale_width);
hantro_reg_write(vpu, &av1_ref3_hor_scale, scale_height);
break;
case 4:
hantro_reg_write(vpu, &av1_ref4_height, height);
hantro_reg_write(vpu, &av1_ref4_width, width);
hantro_reg_write(vpu, &av1_ref4_ver_scale, scale_width);
hantro_reg_write(vpu, &av1_ref4_hor_scale, scale_height);
break;
case 5:
hantro_reg_write(vpu, &av1_ref5_height, height);
hantro_reg_write(vpu, &av1_ref5_width, width);
hantro_reg_write(vpu, &av1_ref5_ver_scale, scale_width);
hantro_reg_write(vpu, &av1_ref5_hor_scale, scale_height);
break;
case 6:
hantro_reg_write(vpu, &av1_ref6_height, height);
hantro_reg_write(vpu, &av1_ref6_width, width);
hantro_reg_write(vpu, &av1_ref6_ver_scale, scale_width);
hantro_reg_write(vpu, &av1_ref6_hor_scale, scale_height);
break;
default:
pr_warn("AV1 invalid reference frame index\n");
}
dst = vb2_to_hantro_decoded_buf(&av1_dec->frame_refs[idx].vb2_ref->vb2_buf);
luma_addr = hantro_get_dec_buf_addr(ctx, &dst->base.vb.vb2_buf);
chroma_addr = luma_addr + cr_offset;
mv_addr = luma_addr + mv_offset;
hantro_write_addr(vpu, AV1_REFERENCE_Y(ref), luma_addr);
hantro_write_addr(vpu, AV1_REFERENCE_CB(ref), chroma_addr);
hantro_write_addr(vpu, AV1_REFERENCE_MV(ref), mv_addr);
return (scale_width != (1 << AV1_REF_SCALE_SHIFT)) ||
(scale_height != (1 << AV1_REF_SCALE_SHIFT));
}
static void rockchip_vpu981_av1_dec_set_sign_bias(struct hantro_ctx *ctx,
int ref, int val)
{
struct hantro_dev *vpu = ctx->dev;
switch (ref) {
case 0:
hantro_reg_write(vpu, &av1_ref0_sign_bias, val);
break;
case 1:
hantro_reg_write(vpu, &av1_ref1_sign_bias, val);
break;
case 2:
hantro_reg_write(vpu, &av1_ref2_sign_bias, val);
break;
case 3:
hantro_reg_write(vpu, &av1_ref3_sign_bias, val);
break;
case 4:
hantro_reg_write(vpu, &av1_ref4_sign_bias, val);
break;
case 5:
hantro_reg_write(vpu, &av1_ref5_sign_bias, val);
break;
case 6:
hantro_reg_write(vpu, &av1_ref6_sign_bias, val);
break;
default:
pr_warn("AV1 invalid sign bias index\n");
break;
}
}
static void rockchip_vpu981_av1_dec_set_segmentation(struct hantro_ctx *ctx)
{
struct hantro_av1_dec_hw_ctx *av1_dec = &ctx->av1_dec;
struct hantro_av1_dec_ctrls *ctrls = &av1_dec->ctrls;
const struct v4l2_ctrl_av1_frame *frame = ctrls->frame;
const struct v4l2_av1_segmentation *seg = &frame->segmentation;
u32 segval[V4L2_AV1_MAX_SEGMENTS][V4L2_AV1_SEG_LVL_MAX] = { 0 };
struct hantro_dev *vpu = ctx->dev;
u8 segsign = 0, preskip_segid = 0, last_active_seg = 0, i, j;
if (!!(seg->flags & V4L2_AV1_SEGMENTATION_FLAG_ENABLED) &&
frame->primary_ref_frame < V4L2_AV1_REFS_PER_FRAME) {
int idx = rockchip_vpu981_get_frame_index(ctx, frame->primary_ref_frame);
if (idx >= 0) {
dma_addr_t luma_addr, mv_addr = 0;
struct hantro_decoded_buffer *seg;
size_t mv_offset = rockchip_vpu981_av1_dec_chroma_size(ctx);
seg = vb2_to_hantro_decoded_buf(&av1_dec->frame_refs[idx].vb2_ref->vb2_buf);
luma_addr = hantro_get_dec_buf_addr(ctx, &seg->base.vb.vb2_buf);
mv_addr = luma_addr + mv_offset;
hantro_write_addr(vpu, AV1_SEGMENTATION, mv_addr);
hantro_reg_write(vpu, &av1_use_temporal3_mvs, 1);
}
}
hantro_reg_write(vpu, &av1_segment_temp_upd_e,
!!(seg->flags & V4L2_AV1_SEGMENTATION_FLAG_TEMPORAL_UPDATE));
hantro_reg_write(vpu, &av1_segment_upd_e,
!!(seg->flags & V4L2_AV1_SEGMENTATION_FLAG_UPDATE_MAP));
hantro_reg_write(vpu, &av1_segment_e,
!!(seg->flags & V4L2_AV1_SEGMENTATION_FLAG_ENABLED));
hantro_reg_write(vpu, &av1_error_resilient,
!!(frame->flags & V4L2_AV1_FRAME_FLAG_ERROR_RESILIENT_MODE));
if (IS_INTRA(frame->frame_type) ||
!!(frame->flags & V4L2_AV1_FRAME_FLAG_ERROR_RESILIENT_MODE)) {
hantro_reg_write(vpu, &av1_use_temporal3_mvs, 0);
}
if (seg->flags & V4L2_AV1_SEGMENTATION_FLAG_ENABLED) {
int s;
for (s = 0; s < V4L2_AV1_MAX_SEGMENTS; s++) {
if (seg->feature_enabled[s] &
V4L2_AV1_SEGMENT_FEATURE_ENABLED(V4L2_AV1_SEG_LVL_ALT_Q)) {
segval[s][V4L2_AV1_SEG_LVL_ALT_Q] =
clamp(abs(seg->feature_data[s][V4L2_AV1_SEG_LVL_ALT_Q]),
0, 255);
segsign |=
(seg->feature_data[s][V4L2_AV1_SEG_LVL_ALT_Q] < 0) << s;
}
if (seg->feature_enabled[s] &
V4L2_AV1_SEGMENT_FEATURE_ENABLED(V4L2_AV1_SEG_LVL_ALT_LF_Y_V))
segval[s][V4L2_AV1_SEG_LVL_ALT_LF_Y_V] =
clamp(abs(seg->feature_data[s][V4L2_AV1_SEG_LVL_ALT_LF_Y_V]),
-63, 63);
if (seg->feature_enabled[s] &
V4L2_AV1_SEGMENT_FEATURE_ENABLED(V4L2_AV1_SEG_LVL_ALT_LF_Y_H))
segval[s][V4L2_AV1_SEG_LVL_ALT_LF_Y_H] =
clamp(abs(seg->feature_data[s][V4L2_AV1_SEG_LVL_ALT_LF_Y_H]),
-63, 63);
if (seg->feature_enabled[s] &
V4L2_AV1_SEGMENT_FEATURE_ENABLED(V4L2_AV1_SEG_LVL_ALT_LF_U))
segval[s][V4L2_AV1_SEG_LVL_ALT_LF_U] =
clamp(abs(seg->feature_data[s][V4L2_AV1_SEG_LVL_ALT_LF_U]),
-63, 63);
if (seg->feature_enabled[s] &
V4L2_AV1_SEGMENT_FEATURE_ENABLED(V4L2_AV1_SEG_LVL_ALT_LF_V))
segval[s][V4L2_AV1_SEG_LVL_ALT_LF_V] =
clamp(abs(seg->feature_data[s][V4L2_AV1_SEG_LVL_ALT_LF_V]),
-63, 63);
if (frame->frame_type && seg->feature_enabled[s] &
V4L2_AV1_SEGMENT_FEATURE_ENABLED(V4L2_AV1_SEG_LVL_REF_FRAME))
segval[s][V4L2_AV1_SEG_LVL_REF_FRAME]++;
if (seg->feature_enabled[s] &
V4L2_AV1_SEGMENT_FEATURE_ENABLED(V4L2_AV1_SEG_LVL_REF_SKIP))
segval[s][V4L2_AV1_SEG_LVL_REF_SKIP] = 1;
if (seg->feature_enabled[s] &
V4L2_AV1_SEGMENT_FEATURE_ENABLED(V4L2_AV1_SEG_LVL_REF_GLOBALMV))
segval[s][V4L2_AV1_SEG_LVL_REF_GLOBALMV] = 1;
}
}
for (i = 0; i < V4L2_AV1_MAX_SEGMENTS; i++) {
for (j = 0; j < V4L2_AV1_SEG_LVL_MAX; j++) {
if (seg->feature_enabled[i]
& V4L2_AV1_SEGMENT_FEATURE_ENABLED(j)) {
preskip_segid |= (j >= V4L2_AV1_SEG_LVL_REF_FRAME);
last_active_seg = max(i, last_active_seg);
}
}
}
hantro_reg_write(vpu, &av1_last_active_seg, last_active_seg);
hantro_reg_write(vpu, &av1_preskip_segid, preskip_segid);
hantro_reg_write(vpu, &av1_seg_quant_sign, segsign);
/* Write QP, filter level, ref frame and skip for every segment */
hantro_reg_write(vpu, &av1_quant_seg0,
segval[0][V4L2_AV1_SEG_LVL_ALT_Q]);
hantro_reg_write(vpu, &av1_filt_level_delta0_seg0,
segval[0][V4L2_AV1_SEG_LVL_ALT_LF_Y_V]);
hantro_reg_write(vpu, &av1_filt_level_delta1_seg0,
segval[0][V4L2_AV1_SEG_LVL_ALT_LF_Y_H]);
hantro_reg_write(vpu, &av1_filt_level_delta2_seg0,
segval[0][V4L2_AV1_SEG_LVL_ALT_LF_U]);
hantro_reg_write(vpu, &av1_filt_level_delta3_seg0,
segval[0][V4L2_AV1_SEG_LVL_ALT_LF_V]);
hantro_reg_write(vpu, &av1_refpic_seg0,
segval[0][V4L2_AV1_SEG_LVL_REF_FRAME]);
hantro_reg_write(vpu, &av1_skip_seg0,
segval[0][V4L2_AV1_SEG_LVL_REF_SKIP]);
hantro_reg_write(vpu, &av1_global_mv_seg0,
segval[0][V4L2_AV1_SEG_LVL_REF_GLOBALMV]);
hantro_reg_write(vpu, &av1_quant_seg1,
segval[1][V4L2_AV1_SEG_LVL_ALT_Q]);
hantro_reg_write(vpu, &av1_filt_level_delta0_seg1,
segval[1][V4L2_AV1_SEG_LVL_ALT_LF_Y_V]);
hantro_reg_write(vpu, &av1_filt_level_delta1_seg1,
segval[1][V4L2_AV1_SEG_LVL_ALT_LF_Y_H]);
hantro_reg_write(vpu, &av1_filt_level_delta2_seg1,
segval[1][V4L2_AV1_SEG_LVL_ALT_LF_U]);
hantro_reg_write(vpu, &av1_filt_level_delta3_seg1,
segval[1][V4L2_AV1_SEG_LVL_ALT_LF_V]);
hantro_reg_write(vpu, &av1_refpic_seg1,
segval[1][V4L2_AV1_SEG_LVL_REF_FRAME]);
hantro_reg_write(vpu, &av1_skip_seg1,
segval[1][V4L2_AV1_SEG_LVL_REF_SKIP]);
hantro_reg_write(vpu, &av1_global_mv_seg1,
segval[1][V4L2_AV1_SEG_LVL_REF_GLOBALMV]);
hantro_reg_write(vpu, &av1_quant_seg2,
segval[2][V4L2_AV1_SEG_LVL_ALT_Q]);
hantro_reg_write(vpu, &av1_filt_level_delta0_seg2,
segval[2][V4L2_AV1_SEG_LVL_ALT_LF_Y_V]);
hantro_reg_write(vpu, &av1_filt_level_delta1_seg2,
segval[2][V4L2_AV1_SEG_LVL_ALT_LF_Y_H]);
hantro_reg_write(vpu, &av1_filt_level_delta2_seg2,
segval[2][V4L2_AV1_SEG_LVL_ALT_LF_U]);
hantro_reg_write(vpu, &av1_filt_level_delta3_seg2,
segval[2][V4L2_AV1_SEG_LVL_ALT_LF_V]);
hantro_reg_write(vpu, &av1_refpic_seg2,
segval[2][V4L2_AV1_SEG_LVL_REF_FRAME]);
hantro_reg_write(vpu, &av1_skip_seg2,
segval[2][V4L2_AV1_SEG_LVL_REF_SKIP]);
hantro_reg_write(vpu, &av1_global_mv_seg2,
segval[2][V4L2_AV1_SEG_LVL_REF_GLOBALMV]);
hantro_reg_write(vpu, &av1_quant_seg3,
segval[3][V4L2_AV1_SEG_LVL_ALT_Q]);
hantro_reg_write(vpu, &av1_filt_level_delta0_seg3,
segval[3][V4L2_AV1_SEG_LVL_ALT_LF_Y_V]);
hantro_reg_write(vpu, &av1_filt_level_delta1_seg3,
segval[3][V4L2_AV1_SEG_LVL_ALT_LF_Y_H]);
hantro_reg_write(vpu, &av1_filt_level_delta2_seg3,
segval[3][V4L2_AV1_SEG_LVL_ALT_LF_U]);
hantro_reg_write(vpu, &av1_filt_level_delta3_seg3,
segval[3][V4L2_AV1_SEG_LVL_ALT_LF_V]);
hantro_reg_write(vpu, &av1_refpic_seg3,
segval[3][V4L2_AV1_SEG_LVL_REF_FRAME]);
hantro_reg_write(vpu, &av1_skip_seg3,
segval[3][V4L2_AV1_SEG_LVL_REF_SKIP]);
hantro_reg_write(vpu, &av1_global_mv_seg3,
segval[3][V4L2_AV1_SEG_LVL_REF_GLOBALMV]);
hantro_reg_write(vpu, &av1_quant_seg4,
segval[4][V4L2_AV1_SEG_LVL_ALT_Q]);
hantro_reg_write(vpu, &av1_filt_level_delta0_seg4,
segval[4][V4L2_AV1_SEG_LVL_ALT_LF_Y_V]);
hantro_reg_write(vpu, &av1_filt_level_delta1_seg4,
segval[4][V4L2_AV1_SEG_LVL_ALT_LF_Y_H]);
hantro_reg_write(vpu, &av1_filt_level_delta2_seg4,
segval[4][V4L2_AV1_SEG_LVL_ALT_LF_U]);
hantro_reg_write(vpu, &av1_filt_level_delta3_seg4,
segval[4][V4L2_AV1_SEG_LVL_ALT_LF_V]);
hantro_reg_write(vpu, &av1_refpic_seg4,
segval[4][V4L2_AV1_SEG_LVL_REF_FRAME]);
hantro_reg_write(vpu, &av1_skip_seg4,
segval[4][V4L2_AV1_SEG_LVL_REF_SKIP]);
hantro_reg_write(vpu, &av1_global_mv_seg4,
segval[4][V4L2_AV1_SEG_LVL_REF_GLOBALMV]);
hantro_reg_write(vpu, &av1_quant_seg5,
segval[5][V4L2_AV1_SEG_LVL_ALT_Q]);
hantro_reg_write(vpu, &av1_filt_level_delta0_seg5,
segval[5][V4L2_AV1_SEG_LVL_ALT_LF_Y_V]);
hantro_reg_write(vpu, &av1_filt_level_delta1_seg5,
segval[5][V4L2_AV1_SEG_LVL_ALT_LF_Y_H]);
hantro_reg_write(vpu, &av1_filt_level_delta2_seg5,
segval[5][V4L2_AV1_SEG_LVL_ALT_LF_U]);
hantro_reg_write(vpu, &av1_filt_level_delta3_seg5,
segval[5][V4L2_AV1_SEG_LVL_ALT_LF_V]);
hantro_reg_write(vpu, &av1_refpic_seg5,
segval[5][V4L2_AV1_SEG_LVL_REF_FRAME]);
hantro_reg_write(vpu, &av1_skip_seg5,
segval[5][V4L2_AV1_SEG_LVL_REF_SKIP]);
hantro_reg_write(vpu, &av1_global_mv_seg5,
segval[5][V4L2_AV1_SEG_LVL_REF_GLOBALMV]);
hantro_reg_write(vpu, &av1_quant_seg6,
segval[6][V4L2_AV1_SEG_LVL_ALT_Q]);
hantro_reg_write(vpu, &av1_filt_level_delta0_seg6,
segval[6][V4L2_AV1_SEG_LVL_ALT_LF_Y_V]);
hantro_reg_write(vpu, &av1_filt_level_delta1_seg6,
segval[6][V4L2_AV1_SEG_LVL_ALT_LF_Y_H]);
hantro_reg_write(vpu, &av1_filt_level_delta2_seg6,
segval[6][V4L2_AV1_SEG_LVL_ALT_LF_U]);
hantro_reg_write(vpu, &av1_filt_level_delta3_seg6,
segval[6][V4L2_AV1_SEG_LVL_ALT_LF_V]);
hantro_reg_write(vpu, &av1_refpic_seg6,
segval[6][V4L2_AV1_SEG_LVL_REF_FRAME]);
hantro_reg_write(vpu, &av1_skip_seg6,
segval[6][V4L2_AV1_SEG_LVL_REF_SKIP]);
hantro_reg_write(vpu, &av1_global_mv_seg6,
segval[6][V4L2_AV1_SEG_LVL_REF_GLOBALMV]);
hantro_reg_write(vpu, &av1_quant_seg7,
segval[7][V4L2_AV1_SEG_LVL_ALT_Q]);
hantro_reg_write(vpu, &av1_filt_level_delta0_seg7,
segval[7][V4L2_AV1_SEG_LVL_ALT_LF_Y_V]);
hantro_reg_write(vpu, &av1_filt_level_delta1_seg7,
segval[7][V4L2_AV1_SEG_LVL_ALT_LF_Y_H]);
hantro_reg_write(vpu, &av1_filt_level_delta2_seg7,
segval[7][V4L2_AV1_SEG_LVL_ALT_LF_U]);
hantro_reg_write(vpu, &av1_filt_level_delta3_seg7,
segval[7][V4L2_AV1_SEG_LVL_ALT_LF_V]);
hantro_reg_write(vpu, &av1_refpic_seg7,
segval[7][V4L2_AV1_SEG_LVL_REF_FRAME]);
hantro_reg_write(vpu, &av1_skip_seg7,
segval[7][V4L2_AV1_SEG_LVL_REF_SKIP]);
hantro_reg_write(vpu, &av1_global_mv_seg7,
segval[7][V4L2_AV1_SEG_LVL_REF_GLOBALMV]);
}
static bool rockchip_vpu981_av1_dec_is_lossless(struct hantro_ctx *ctx)
{
struct hantro_av1_dec_hw_ctx *av1_dec = &ctx->av1_dec;
struct hantro_av1_dec_ctrls *ctrls = &av1_dec->ctrls;
const struct v4l2_ctrl_av1_frame *frame = ctrls->frame;
const struct v4l2_av1_segmentation *segmentation = &frame->segmentation;
const struct v4l2_av1_quantization *quantization = &frame->quantization;
int i;
for (i = 0; i < V4L2_AV1_MAX_SEGMENTS; i++) {
int qindex = quantization->base_q_idx;
if (segmentation->feature_enabled[i] &
V4L2_AV1_SEGMENT_FEATURE_ENABLED(V4L2_AV1_SEG_LVL_ALT_Q)) {
qindex += segmentation->feature_data[i][V4L2_AV1_SEG_LVL_ALT_Q];
}
qindex = clamp(qindex, 0, 255);
if (qindex ||
quantization->delta_q_y_dc ||
quantization->delta_q_u_dc ||
quantization->delta_q_u_ac ||
quantization->delta_q_v_dc ||
quantization->delta_q_v_ac)
return false;
}
return true;
}
static void rockchip_vpu981_av1_dec_set_loopfilter(struct hantro_ctx *ctx)
{
struct hantro_av1_dec_hw_ctx *av1_dec = &ctx->av1_dec;
struct hantro_av1_dec_ctrls *ctrls = &av1_dec->ctrls;
const struct v4l2_ctrl_av1_frame *frame = ctrls->frame;
const struct v4l2_av1_loop_filter *loop_filter = &frame->loop_filter;
bool filtering_dis = (loop_filter->level[0] == 0) && (loop_filter->level[1] == 0);
struct hantro_dev *vpu = ctx->dev;
hantro_reg_write(vpu, &av1_filtering_dis, filtering_dis);
hantro_reg_write(vpu, &av1_filt_level_base_gt32, loop_filter->level[0] > 32);
hantro_reg_write(vpu, &av1_filt_sharpness, loop_filter->sharpness);
hantro_reg_write(vpu, &av1_filt_level0, loop_filter->level[0]);
hantro_reg_write(vpu, &av1_filt_level1, loop_filter->level[1]);
hantro_reg_write(vpu, &av1_filt_level2, loop_filter->level[2]);
hantro_reg_write(vpu, &av1_filt_level3, loop_filter->level[3]);
if (loop_filter->flags & V4L2_AV1_LOOP_FILTER_FLAG_DELTA_ENABLED &&
!rockchip_vpu981_av1_dec_is_lossless(ctx) &&
!(frame->flags & V4L2_AV1_FRAME_FLAG_ALLOW_INTRABC)) {
hantro_reg_write(vpu, &av1_filt_ref_adj_0,
loop_filter->ref_deltas[0]);
hantro_reg_write(vpu, &av1_filt_ref_adj_1,
loop_filter->ref_deltas[1]);
hantro_reg_write(vpu, &av1_filt_ref_adj_2,
loop_filter->ref_deltas[2]);
hantro_reg_write(vpu, &av1_filt_ref_adj_3,
loop_filter->ref_deltas[3]);
hantro_reg_write(vpu, &av1_filt_ref_adj_4,
loop_filter->ref_deltas[4]);
hantro_reg_write(vpu, &av1_filt_ref_adj_5,
loop_filter->ref_deltas[5]);
hantro_reg_write(vpu, &av1_filt_ref_adj_6,
loop_filter->ref_deltas[6]);
hantro_reg_write(vpu, &av1_filt_ref_adj_7,
loop_filter->ref_deltas[7]);
hantro_reg_write(vpu, &av1_filt_mb_adj_0,
loop_filter->mode_deltas[0]);
hantro_reg_write(vpu, &av1_filt_mb_adj_1,
loop_filter->mode_deltas[1]);
} else {
hantro_reg_write(vpu, &av1_filt_ref_adj_0, 0);
hantro_reg_write(vpu, &av1_filt_ref_adj_1, 0);
hantro_reg_write(vpu, &av1_filt_ref_adj_2, 0);
hantro_reg_write(vpu, &av1_filt_ref_adj_3, 0);
hantro_reg_write(vpu, &av1_filt_ref_adj_4, 0);
hantro_reg_write(vpu, &av1_filt_ref_adj_5, 0);
hantro_reg_write(vpu, &av1_filt_ref_adj_6, 0);
hantro_reg_write(vpu, &av1_filt_ref_adj_7, 0);
hantro_reg_write(vpu, &av1_filt_mb_adj_0, 0);
hantro_reg_write(vpu, &av1_filt_mb_adj_1, 0);
}
hantro_write_addr(vpu, AV1_DB_DATA_COL, av1_dec->db_data_col.dma);
hantro_write_addr(vpu, AV1_DB_CTRL_COL, av1_dec->db_ctrl_col.dma);
}
static void rockchip_vpu981_av1_dec_update_prob(struct hantro_ctx *ctx)
{
struct hantro_av1_dec_hw_ctx *av1_dec = &ctx->av1_dec;
struct hantro_av1_dec_ctrls *ctrls = &av1_dec->ctrls;
const struct v4l2_ctrl_av1_frame *frame = ctrls->frame;
bool frame_is_intra = IS_INTRA(frame->frame_type);
struct av1cdfs *out_cdfs = (struct av1cdfs *)av1_dec->prob_tbl_out.cpu;
int i;
if (frame->flags & V4L2_AV1_FRAME_FLAG_DISABLE_FRAME_END_UPDATE_CDF)
return;
for (i = 0; i < NUM_REF_FRAMES; i++) {
if (frame->refresh_frame_flags & BIT(i)) {
struct mvcdfs stored_mv_cdf;
rockchip_av1_get_cdfs(ctx, i);
stored_mv_cdf = av1_dec->cdfs->mv_cdf;
*av1_dec->cdfs = *out_cdfs;
if (frame_is_intra) {
av1_dec->cdfs->mv_cdf = stored_mv_cdf;
*av1_dec->cdfs_ndvc = out_cdfs->mv_cdf;
}
rockchip_av1_store_cdfs(ctx,
frame->refresh_frame_flags);
break;
}
}
}
void rockchip_vpu981_av1_dec_done(struct hantro_ctx *ctx)
{
rockchip_vpu981_av1_dec_update_prob(ctx);
}
static void rockchip_vpu981_av1_dec_set_prob(struct hantro_ctx *ctx)
{
struct hantro_av1_dec_hw_ctx *av1_dec = &ctx->av1_dec;
struct hantro_av1_dec_ctrls *ctrls = &av1_dec->ctrls;
const struct v4l2_ctrl_av1_frame *frame = ctrls->frame;
const struct v4l2_av1_quantization *quantization = &frame->quantization;
struct hantro_dev *vpu = ctx->dev;
bool error_resilient_mode =
!!(frame->flags & V4L2_AV1_FRAME_FLAG_ERROR_RESILIENT_MODE);
bool frame_is_intra = IS_INTRA(frame->frame_type);
if (error_resilient_mode || frame_is_intra ||
frame->primary_ref_frame == AV1_PRIMARY_REF_NONE) {
av1_dec->cdfs = &av1_dec->default_cdfs;
av1_dec->cdfs_ndvc = &av1_dec->default_cdfs_ndvc;
rockchip_av1_default_coeff_probs(quantization->base_q_idx,
av1_dec->cdfs);
} else {
rockchip_av1_get_cdfs(ctx, frame->ref_frame_idx[frame->primary_ref_frame]);
}
rockchip_av1_store_cdfs(ctx, frame->refresh_frame_flags);
memcpy(av1_dec->prob_tbl.cpu, av1_dec->cdfs, sizeof(struct av1cdfs));
if (frame_is_intra) {
int mv_offset = offsetof(struct av1cdfs, mv_cdf);
/* Overwrite MV context area with intrabc MV context */
memcpy(av1_dec->prob_tbl.cpu + mv_offset, av1_dec->cdfs_ndvc,
sizeof(struct mvcdfs));
}
hantro_write_addr(vpu, AV1_PROP_TABLE_OUT, av1_dec->prob_tbl_out.dma);
hantro_write_addr(vpu, AV1_PROP_TABLE, av1_dec->prob_tbl.dma);
}
static void
rockchip_vpu981_av1_dec_init_scaling_function(const u8 *values, const u8 *scaling,
u8 num_points, u8 *scaling_lut)
{
int i, point;
if (num_points == 0) {
memset(scaling_lut, 0, 256);
return;
}
for (point = 0; point < num_points - 1; point++) {
int x;
s32 delta_y = scaling[point + 1] - scaling[point];
s32 delta_x = values[point + 1] - values[point];
s64 delta =
delta_x ? delta_y * ((65536 + (delta_x >> 1)) /
delta_x) : 0;
for (x = 0; x < delta_x; x++) {
scaling_lut[values[point] + x] =
scaling[point] +
(s32)((x * delta + 32768) >> 16);
}
}
for (i = values[num_points - 1]; i < 256; i++)
scaling_lut[i] = scaling[num_points - 1];
}
static void rockchip_vpu981_av1_dec_set_fgs(struct hantro_ctx *ctx)
{
struct hantro_av1_dec_hw_ctx *av1_dec = &ctx->av1_dec;
struct hantro_av1_dec_ctrls *ctrls = &av1_dec->ctrls;
const struct v4l2_ctrl_av1_film_grain *film_grain = ctrls->film_grain;
struct rockchip_av1_film_grain *fgmem = av1_dec->film_grain.cpu;
struct hantro_dev *vpu = ctx->dev;
bool scaling_from_luma =
!!(film_grain->flags & V4L2_AV1_FILM_GRAIN_FLAG_CHROMA_SCALING_FROM_LUMA);
s32 (*ar_coeffs_y)[24];
s32 (*ar_coeffs_cb)[25];
s32 (*ar_coeffs_cr)[25];
s32 (*luma_grain_block)[73][82];
s32 (*cb_grain_block)[38][44];
s32 (*cr_grain_block)[38][44];
s32 ar_coeff_lag, ar_coeff_shift;
s32 grain_scale_shift, bitdepth;
s32 grain_center, grain_min, grain_max;
int i, j;
hantro_reg_write(vpu, &av1_apply_grain, 0);
if (!(film_grain->flags & V4L2_AV1_FILM_GRAIN_FLAG_APPLY_GRAIN)) {
hantro_reg_write(vpu, &av1_num_y_points_b, 0);
hantro_reg_write(vpu, &av1_num_cb_points_b, 0);
hantro_reg_write(vpu, &av1_num_cr_points_b, 0);
hantro_reg_write(vpu, &av1_scaling_shift, 0);
hantro_reg_write(vpu, &av1_cb_mult, 0);
hantro_reg_write(vpu, &av1_cb_luma_mult, 0);
hantro_reg_write(vpu, &av1_cb_offset, 0);
hantro_reg_write(vpu, &av1_cr_mult, 0);
hantro_reg_write(vpu, &av1_cr_luma_mult, 0);
hantro_reg_write(vpu, &av1_cr_offset, 0);
hantro_reg_write(vpu, &av1_overlap_flag, 0);
hantro_reg_write(vpu, &av1_clip_to_restricted_range, 0);
hantro_reg_write(vpu, &av1_chroma_scaling_from_luma, 0);
hantro_reg_write(vpu, &av1_random_seed, 0);
hantro_write_addr(vpu, AV1_FILM_GRAIN, 0);
return;
}
ar_coeffs_y = kzalloc(sizeof(int32_t) * 24, GFP_KERNEL);
ar_coeffs_cb = kzalloc(sizeof(int32_t) * 25, GFP_KERNEL);
ar_coeffs_cr = kzalloc(sizeof(int32_t) * 25, GFP_KERNEL);
luma_grain_block = kzalloc(sizeof(int32_t) * 73 * 82, GFP_KERNEL);
cb_grain_block = kzalloc(sizeof(int32_t) * 38 * 44, GFP_KERNEL);
cr_grain_block = kzalloc(sizeof(int32_t) * 38 * 44, GFP_KERNEL);
if (!ar_coeffs_y || !ar_coeffs_cb || !ar_coeffs_cr ||
!luma_grain_block || !cb_grain_block || !cr_grain_block) {
pr_warn("Fail allocating memory for film grain parameters\n");
goto alloc_fail;
}
hantro_reg_write(vpu, &av1_apply_grain, 1);
hantro_reg_write(vpu, &av1_num_y_points_b,
film_grain->num_y_points > 0);
hantro_reg_write(vpu, &av1_num_cb_points_b,
film_grain->num_cb_points > 0);
hantro_reg_write(vpu, &av1_num_cr_points_b,
film_grain->num_cr_points > 0);
hantro_reg_write(vpu, &av1_scaling_shift,
film_grain->grain_scaling_minus_8 + 8);
if (!scaling_from_luma) {
hantro_reg_write(vpu, &av1_cb_mult, film_grain->cb_mult - 128);
hantro_reg_write(vpu, &av1_cb_luma_mult, film_grain->cb_luma_mult - 128);
hantro_reg_write(vpu, &av1_cb_offset, film_grain->cb_offset - 256);
hantro_reg_write(vpu, &av1_cr_mult, film_grain->cr_mult - 128);
hantro_reg_write(vpu, &av1_cr_luma_mult, film_grain->cr_luma_mult - 128);
hantro_reg_write(vpu, &av1_cr_offset, film_grain->cr_offset - 256);
} else {
hantro_reg_write(vpu, &av1_cb_mult, 0);
hantro_reg_write(vpu, &av1_cb_luma_mult, 0);
hantro_reg_write(vpu, &av1_cb_offset, 0);
hantro_reg_write(vpu, &av1_cr_mult, 0);
hantro_reg_write(vpu, &av1_cr_luma_mult, 0);
hantro_reg_write(vpu, &av1_cr_offset, 0);
}
hantro_reg_write(vpu, &av1_overlap_flag,
!!(film_grain->flags & V4L2_AV1_FILM_GRAIN_FLAG_OVERLAP));
hantro_reg_write(vpu, &av1_clip_to_restricted_range,
!!(film_grain->flags & V4L2_AV1_FILM_GRAIN_FLAG_CLIP_TO_RESTRICTED_RANGE));
hantro_reg_write(vpu, &av1_chroma_scaling_from_luma, scaling_from_luma);
hantro_reg_write(vpu, &av1_random_seed, film_grain->grain_seed);
rockchip_vpu981_av1_dec_init_scaling_function(film_grain->point_y_value,
film_grain->point_y_scaling,
film_grain->num_y_points,
fgmem->scaling_lut_y);
if (film_grain->flags &
V4L2_AV1_FILM_GRAIN_FLAG_CHROMA_SCALING_FROM_LUMA) {
memcpy(fgmem->scaling_lut_cb, fgmem->scaling_lut_y,
sizeof(*fgmem->scaling_lut_y) * 256);
memcpy(fgmem->scaling_lut_cr, fgmem->scaling_lut_y,
sizeof(*fgmem->scaling_lut_y) * 256);
} else {
rockchip_vpu981_av1_dec_init_scaling_function
(film_grain->point_cb_value, film_grain->point_cb_scaling,
film_grain->num_cb_points, fgmem->scaling_lut_cb);
rockchip_vpu981_av1_dec_init_scaling_function
(film_grain->point_cr_value, film_grain->point_cr_scaling,
film_grain->num_cr_points, fgmem->scaling_lut_cr);
}
for (i = 0; i < V4L2_AV1_AR_COEFFS_SIZE; i++) {
if (i < 24)
(*ar_coeffs_y)[i] = film_grain->ar_coeffs_y_plus_128[i] - 128;
(*ar_coeffs_cb)[i] = film_grain->ar_coeffs_cb_plus_128[i] - 128;
(*ar_coeffs_cr)[i] = film_grain->ar_coeffs_cr_plus_128[i] - 128;
}
ar_coeff_lag = film_grain->ar_coeff_lag;
ar_coeff_shift = film_grain->ar_coeff_shift_minus_6 + 6;
grain_scale_shift = film_grain->grain_scale_shift;
bitdepth = ctx->bit_depth;
grain_center = 128 << (bitdepth - 8);
grain_min = 0 - grain_center;
grain_max = (256 << (bitdepth - 8)) - 1 - grain_center;
rockchip_av1_generate_luma_grain_block(luma_grain_block, bitdepth,
film_grain->num_y_points, grain_scale_shift,
ar_coeff_lag, ar_coeffs_y, ar_coeff_shift,
grain_min, grain_max, film_grain->grain_seed);
rockchip_av1_generate_chroma_grain_block(luma_grain_block, cb_grain_block,
cr_grain_block, bitdepth,
film_grain->num_y_points,
film_grain->num_cb_points,
film_grain->num_cr_points,
grain_scale_shift, ar_coeff_lag, ar_coeffs_cb,
ar_coeffs_cr, ar_coeff_shift, grain_min,
grain_max,
scaling_from_luma,
film_grain->grain_seed);
for (i = 0; i < 64; i++) {
for (j = 0; j < 64; j++)
fgmem->cropped_luma_grain_block[i * 64 + j] =
(*luma_grain_block)[i + 9][j + 9];
}
for (i = 0; i < 32; i++) {
for (j = 0; j < 32; j++) {
fgmem->cropped_chroma_grain_block[i * 64 + 2 * j] =
(*cb_grain_block)[i + 6][j + 6];
fgmem->cropped_chroma_grain_block[i * 64 + 2 * j + 1] =
(*cr_grain_block)[i + 6][j + 6];
}
}
hantro_write_addr(vpu, AV1_FILM_GRAIN, av1_dec->film_grain.dma);
alloc_fail:
kfree(ar_coeffs_y);
kfree(ar_coeffs_cb);
kfree(ar_coeffs_cr);
kfree(luma_grain_block);
kfree(cb_grain_block);
kfree(cr_grain_block);
}
static void rockchip_vpu981_av1_dec_set_cdef(struct hantro_ctx *ctx)
{
struct hantro_av1_dec_hw_ctx *av1_dec = &ctx->av1_dec;
struct hantro_av1_dec_ctrls *ctrls = &av1_dec->ctrls;
const struct v4l2_ctrl_av1_frame *frame = ctrls->frame;
const struct v4l2_av1_cdef *cdef = &frame->cdef;
struct hantro_dev *vpu = ctx->dev;
u32 luma_pri_strength = 0;
u16 luma_sec_strength = 0;
u32 chroma_pri_strength = 0;
u16 chroma_sec_strength = 0;
int i;
hantro_reg_write(vpu, &av1_cdef_bits, cdef->bits);
hantro_reg_write(vpu, &av1_cdef_damping, cdef->damping_minus_3);
for (i = 0; i < BIT(cdef->bits); i++) {
luma_pri_strength |= cdef->y_pri_strength[i] << (i * 4);
if (cdef->y_sec_strength[i] == 4)
luma_sec_strength |= 3 << (i * 2);
else
luma_sec_strength |= cdef->y_sec_strength[i] << (i * 2);
chroma_pri_strength |= cdef->uv_pri_strength[i] << (i * 4);
if (cdef->uv_sec_strength[i] == 4)
chroma_sec_strength |= 3 << (i * 2);
else
chroma_sec_strength |= cdef->uv_sec_strength[i] << (i * 2);
}
hantro_reg_write(vpu, &av1_cdef_luma_primary_strength,
luma_pri_strength);
hantro_reg_write(vpu, &av1_cdef_luma_secondary_strength,
luma_sec_strength);
hantro_reg_write(vpu, &av1_cdef_chroma_primary_strength,
chroma_pri_strength);
hantro_reg_write(vpu, &av1_cdef_chroma_secondary_strength,
chroma_sec_strength);
hantro_write_addr(vpu, AV1_CDEF_COL, av1_dec->cdef_col.dma);
}
static void rockchip_vpu981_av1_dec_set_lr(struct hantro_ctx *ctx)
{
struct hantro_av1_dec_hw_ctx *av1_dec = &ctx->av1_dec;
struct hantro_av1_dec_ctrls *ctrls = &av1_dec->ctrls;
const struct v4l2_ctrl_av1_frame *frame = ctrls->frame;
const struct v4l2_av1_loop_restoration *loop_restoration =
&frame->loop_restoration;
struct hantro_dev *vpu = ctx->dev;
u16 lr_type = 0, lr_unit_size = 0;
u8 restoration_unit_size[V4L2_AV1_NUM_PLANES_MAX] = { 3, 3, 3 };
int i;
if (loop_restoration->flags & V4L2_AV1_LOOP_RESTORATION_FLAG_USES_LR) {
restoration_unit_size[0] = 1 + loop_restoration->lr_unit_shift;
restoration_unit_size[1] =
1 + loop_restoration->lr_unit_shift - loop_restoration->lr_uv_shift;
restoration_unit_size[2] =
1 + loop_restoration->lr_unit_shift - loop_restoration->lr_uv_shift;
}
for (i = 0; i < V4L2_AV1_NUM_PLANES_MAX; i++) {
lr_type |=
loop_restoration->frame_restoration_type[i] << (i * 2);
lr_unit_size |= restoration_unit_size[i] << (i * 2);
}
hantro_reg_write(vpu, &av1_lr_type, lr_type);
hantro_reg_write(vpu, &av1_lr_unit_size, lr_unit_size);
hantro_write_addr(vpu, AV1_LR_COL, av1_dec->lr_col.dma);
}
static void rockchip_vpu981_av1_dec_set_superres_params(struct hantro_ctx *ctx)
{
struct hantro_av1_dec_hw_ctx *av1_dec = &ctx->av1_dec;
struct hantro_av1_dec_ctrls *ctrls = &av1_dec->ctrls;
const struct v4l2_ctrl_av1_frame *frame = ctrls->frame;
struct hantro_dev *vpu = ctx->dev;
u8 superres_scale_denominator = SCALE_NUMERATOR;
int superres_luma_step = RS_SCALE_SUBPEL_BITS;
int superres_chroma_step = RS_SCALE_SUBPEL_BITS;
int superres_luma_step_invra = RS_SCALE_SUBPEL_BITS;
int superres_chroma_step_invra = RS_SCALE_SUBPEL_BITS;
int superres_init_luma_subpel_x = 0;
int superres_init_chroma_subpel_x = 0;
int superres_is_scaled = 0;
int min_w = min_t(uint32_t, 16, frame->upscaled_width);
int upscaled_luma, downscaled_luma;
int downscaled_chroma, upscaled_chroma;
int step_luma, step_chroma;
int err_luma, err_chroma;
int initial_luma, initial_chroma;
int width = 0;
if (frame->flags & V4L2_AV1_FRAME_FLAG_USE_SUPERRES)
superres_scale_denominator = frame->superres_denom;
if (superres_scale_denominator <= SCALE_NUMERATOR)
goto set_regs;
width = (frame->upscaled_width * SCALE_NUMERATOR +
(superres_scale_denominator / 2)) / superres_scale_denominator;
if (width < min_w)
width = min_w;
if (width == frame->upscaled_width)
goto set_regs;
superres_is_scaled = 1;
upscaled_luma = frame->upscaled_width;
downscaled_luma = width;
downscaled_chroma = (downscaled_luma + 1) >> 1;
upscaled_chroma = (upscaled_luma + 1) >> 1;
step_luma =
((downscaled_luma << RS_SCALE_SUBPEL_BITS) +
(upscaled_luma / 2)) / upscaled_luma;
step_chroma =
((downscaled_chroma << RS_SCALE_SUBPEL_BITS) +
(upscaled_chroma / 2)) / upscaled_chroma;
err_luma =
(upscaled_luma * step_luma)
- (downscaled_luma << RS_SCALE_SUBPEL_BITS);
err_chroma =
(upscaled_chroma * step_chroma)
- (downscaled_chroma << RS_SCALE_SUBPEL_BITS);
initial_luma =
((-((upscaled_luma - downscaled_luma) << (RS_SCALE_SUBPEL_BITS - 1))
+ upscaled_luma / 2)
/ upscaled_luma + (1 << (RS_SCALE_EXTRA_BITS - 1)) - err_luma / 2)
& RS_SCALE_SUBPEL_MASK;
initial_chroma =
((-((upscaled_chroma - downscaled_chroma) << (RS_SCALE_SUBPEL_BITS - 1))
+ upscaled_chroma / 2)
/ upscaled_chroma + (1 << (RS_SCALE_EXTRA_BITS - 1)) - err_chroma / 2)
& RS_SCALE_SUBPEL_MASK;
superres_luma_step = step_luma;
superres_chroma_step = step_chroma;
superres_luma_step_invra =
((upscaled_luma << RS_SCALE_SUBPEL_BITS) + (downscaled_luma / 2))
/ downscaled_luma;
superres_chroma_step_invra =
((upscaled_chroma << RS_SCALE_SUBPEL_BITS) + (downscaled_chroma / 2))
/ downscaled_chroma;
superres_init_luma_subpel_x = initial_luma;
superres_init_chroma_subpel_x = initial_chroma;
set_regs:
hantro_reg_write(vpu, &av1_superres_pic_width, frame->upscaled_width);
if (frame->flags & V4L2_AV1_FRAME_FLAG_USE_SUPERRES)
hantro_reg_write(vpu, &av1_scale_denom_minus9,
frame->superres_denom - SUPERRES_SCALE_DENOMINATOR_MIN);
else
hantro_reg_write(vpu, &av1_scale_denom_minus9, frame->superres_denom);
hantro_reg_write(vpu, &av1_superres_luma_step, superres_luma_step);
hantro_reg_write(vpu, &av1_superres_chroma_step, superres_chroma_step);
hantro_reg_write(vpu, &av1_superres_luma_step_invra,
superres_luma_step_invra);
hantro_reg_write(vpu, &av1_superres_chroma_step_invra,
superres_chroma_step_invra);
hantro_reg_write(vpu, &av1_superres_init_luma_subpel_x,
superres_init_luma_subpel_x);
hantro_reg_write(vpu, &av1_superres_init_chroma_subpel_x,
superres_init_chroma_subpel_x);
hantro_reg_write(vpu, &av1_superres_is_scaled, superres_is_scaled);
hantro_write_addr(vpu, AV1_SR_COL, av1_dec->sr_col.dma);
}
static void rockchip_vpu981_av1_dec_set_picture_dimensions(struct hantro_ctx *ctx)
{
struct hantro_av1_dec_hw_ctx *av1_dec = &ctx->av1_dec;
struct hantro_av1_dec_ctrls *ctrls = &av1_dec->ctrls;
const struct v4l2_ctrl_av1_frame *frame = ctrls->frame;
struct hantro_dev *vpu = ctx->dev;
int pic_width_in_cbs = DIV_ROUND_UP(frame->frame_width_minus_1 + 1, 8);
int pic_height_in_cbs = DIV_ROUND_UP(frame->frame_height_minus_1 + 1, 8);
int pic_width_pad = ALIGN(frame->frame_width_minus_1 + 1, 8)
- (frame->frame_width_minus_1 + 1);
int pic_height_pad = ALIGN(frame->frame_height_minus_1 + 1, 8)
- (frame->frame_height_minus_1 + 1);
hantro_reg_write(vpu, &av1_pic_width_in_cbs, pic_width_in_cbs);
hantro_reg_write(vpu, &av1_pic_height_in_cbs, pic_height_in_cbs);
hantro_reg_write(vpu, &av1_pic_width_pad, pic_width_pad);
hantro_reg_write(vpu, &av1_pic_height_pad, pic_height_pad);
rockchip_vpu981_av1_dec_set_superres_params(ctx);
}
static void rockchip_vpu981_av1_dec_set_other_frames(struct hantro_ctx *ctx)
{
struct hantro_av1_dec_hw_ctx *av1_dec = &ctx->av1_dec;
struct hantro_av1_dec_ctrls *ctrls = &av1_dec->ctrls;
const struct v4l2_ctrl_av1_frame *frame = ctrls->frame;
struct hantro_dev *vpu = ctx->dev;
bool use_ref_frame_mvs =
!!(ctrls->frame->flags & V4L2_AV1_FRAME_FLAG_USE_REF_FRAME_MVS);
int cur_frame_offset = frame->order_hint;
int alt_frame_offset = 0;
int gld_frame_offset = 0;
int bwd_frame_offset = 0;
int alt2_frame_offset = 0;
int refs_selected[3] = { 0, 0, 0 };
int cur_mi_cols = DIV_ROUND_UP(frame->frame_width_minus_1 + 1, 8);
int cur_mi_rows = DIV_ROUND_UP(frame->frame_height_minus_1 + 1, 8);
int cur_offset[V4L2_AV1_TOTAL_REFS_PER_FRAME - 1];
int cur_roffset[V4L2_AV1_TOTAL_REFS_PER_FRAME - 1];
int mf_types[3] = { 0, 0, 0 };
int ref_stamp = 2;
int ref_ind = 0;
int rf, idx;
alt_frame_offset = rockchip_vpu981_get_order_hint(ctx, ALT_BUF_IDX);
gld_frame_offset = rockchip_vpu981_get_order_hint(ctx, GLD_BUF_IDX);
bwd_frame_offset = rockchip_vpu981_get_order_hint(ctx, BWD_BUF_IDX);
alt2_frame_offset = rockchip_vpu981_get_order_hint(ctx, ALT2_BUF_IDX);
idx = rockchip_vpu981_get_frame_index(ctx, LST_BUF_IDX);
if (idx >= 0) {
int alt_frame_offset_in_lst =
av1_dec->frame_refs[idx].order_hints[V4L2_AV1_REF_ALTREF_FRAME];
bool is_lst_overlay =
(alt_frame_offset_in_lst == gld_frame_offset);
if (!is_lst_overlay) {
int lst_mi_cols = av1_dec->frame_refs[idx].mi_cols;
int lst_mi_rows = av1_dec->frame_refs[idx].mi_rows;
bool lst_intra_only =
IS_INTRA(av1_dec->frame_refs[idx].frame_type);
if (lst_mi_cols == cur_mi_cols &&
lst_mi_rows == cur_mi_rows && !lst_intra_only) {
mf_types[ref_ind] = V4L2_AV1_REF_LAST_FRAME;
refs_selected[ref_ind++] = LST_BUF_IDX;
}
}
ref_stamp--;
}
idx = rockchip_vpu981_get_frame_index(ctx, BWD_BUF_IDX);
if (rockchip_vpu981_av1_dec_get_dist(ctx, bwd_frame_offset, cur_frame_offset) > 0) {
int bwd_mi_cols = av1_dec->frame_refs[idx].mi_cols;
int bwd_mi_rows = av1_dec->frame_refs[idx].mi_rows;
bool bwd_intra_only =
IS_INTRA(av1_dec->frame_refs[idx].frame_type);
if (bwd_mi_cols == cur_mi_cols && bwd_mi_rows == cur_mi_rows &&
!bwd_intra_only) {
mf_types[ref_ind] = V4L2_AV1_REF_BWDREF_FRAME;
refs_selected[ref_ind++] = BWD_BUF_IDX;
ref_stamp--;
}
}
idx = rockchip_vpu981_get_frame_index(ctx, ALT2_BUF_IDX);
if (rockchip_vpu981_av1_dec_get_dist(ctx, alt2_frame_offset, cur_frame_offset) > 0) {
int alt2_mi_cols = av1_dec->frame_refs[idx].mi_cols;
int alt2_mi_rows = av1_dec->frame_refs[idx].mi_rows;
bool alt2_intra_only =
IS_INTRA(av1_dec->frame_refs[idx].frame_type);
if (alt2_mi_cols == cur_mi_cols && alt2_mi_rows == cur_mi_rows &&
!alt2_intra_only) {
mf_types[ref_ind] = V4L2_AV1_REF_ALTREF2_FRAME;
refs_selected[ref_ind++] = ALT2_BUF_IDX;
ref_stamp--;
}
}
idx = rockchip_vpu981_get_frame_index(ctx, ALT_BUF_IDX);
if (rockchip_vpu981_av1_dec_get_dist(ctx, alt_frame_offset, cur_frame_offset) > 0 &&
ref_stamp >= 0) {
int alt_mi_cols = av1_dec->frame_refs[idx].mi_cols;
int alt_mi_rows = av1_dec->frame_refs[idx].mi_rows;
bool alt_intra_only =
IS_INTRA(av1_dec->frame_refs[idx].frame_type);
if (alt_mi_cols == cur_mi_cols && alt_mi_rows == cur_mi_rows &&
!alt_intra_only) {
mf_types[ref_ind] = V4L2_AV1_REF_ALTREF_FRAME;
refs_selected[ref_ind++] = ALT_BUF_IDX;
ref_stamp--;
}
}
idx = rockchip_vpu981_get_frame_index(ctx, LST2_BUF_IDX);
if (idx >= 0 && ref_stamp >= 0) {
int lst2_mi_cols = av1_dec->frame_refs[idx].mi_cols;
int lst2_mi_rows = av1_dec->frame_refs[idx].mi_rows;
bool lst2_intra_only =
IS_INTRA(av1_dec->frame_refs[idx].frame_type);
if (lst2_mi_cols == cur_mi_cols && lst2_mi_rows == cur_mi_rows &&
!lst2_intra_only) {
mf_types[ref_ind] = V4L2_AV1_REF_LAST2_FRAME;
refs_selected[ref_ind++] = LST2_BUF_IDX;
ref_stamp--;
}
}
for (rf = 0; rf < V4L2_AV1_TOTAL_REFS_PER_FRAME - 1; ++rf) {
idx = rockchip_vpu981_get_frame_index(ctx, rf);
if (idx >= 0) {
int rf_order_hint = rockchip_vpu981_get_order_hint(ctx, rf);
cur_offset[rf] =
rockchip_vpu981_av1_dec_get_dist(ctx, cur_frame_offset, rf_order_hint);
cur_roffset[rf] =
rockchip_vpu981_av1_dec_get_dist(ctx, rf_order_hint, cur_frame_offset);
} else {
cur_offset[rf] = 0;
cur_roffset[rf] = 0;
}
}
hantro_reg_write(vpu, &av1_use_temporal0_mvs, 0);
hantro_reg_write(vpu, &av1_use_temporal1_mvs, 0);
hantro_reg_write(vpu, &av1_use_temporal2_mvs, 0);
hantro_reg_write(vpu, &av1_use_temporal3_mvs, 0);
hantro_reg_write(vpu, &av1_mf1_last_offset, 0);
hantro_reg_write(vpu, &av1_mf1_last2_offset, 0);
hantro_reg_write(vpu, &av1_mf1_last3_offset, 0);
hantro_reg_write(vpu, &av1_mf1_golden_offset, 0);
hantro_reg_write(vpu, &av1_mf1_bwdref_offset, 0);
hantro_reg_write(vpu, &av1_mf1_altref2_offset, 0);
hantro_reg_write(vpu, &av1_mf1_altref_offset, 0);
if (use_ref_frame_mvs && ref_ind > 0 &&
cur_offset[mf_types[0] - V4L2_AV1_REF_LAST_FRAME] <= MAX_FRAME_DISTANCE &&
cur_offset[mf_types[0] - V4L2_AV1_REF_LAST_FRAME] >= -MAX_FRAME_DISTANCE) {
int rf = rockchip_vpu981_get_order_hint(ctx, refs_selected[0]);
int idx = rockchip_vpu981_get_frame_index(ctx, refs_selected[0]);
u32 *oh = av1_dec->frame_refs[idx].order_hints;
int val;
hantro_reg_write(vpu, &av1_use_temporal0_mvs, 1);
val = rockchip_vpu981_av1_dec_get_dist(ctx, rf, oh[V4L2_AV1_REF_LAST_FRAME]);
hantro_reg_write(vpu, &av1_mf1_last_offset, val);
val = rockchip_vpu981_av1_dec_get_dist(ctx, rf, oh[V4L2_AV1_REF_LAST2_FRAME]);
hantro_reg_write(vpu, &av1_mf1_last2_offset, val);
val = rockchip_vpu981_av1_dec_get_dist(ctx, rf, oh[V4L2_AV1_REF_LAST3_FRAME]);
hantro_reg_write(vpu, &av1_mf1_last3_offset, val);
val = rockchip_vpu981_av1_dec_get_dist(ctx, rf, oh[V4L2_AV1_REF_GOLDEN_FRAME]);
hantro_reg_write(vpu, &av1_mf1_golden_offset, val);
val = rockchip_vpu981_av1_dec_get_dist(ctx, rf, oh[V4L2_AV1_REF_BWDREF_FRAME]);
hantro_reg_write(vpu, &av1_mf1_bwdref_offset, val);
val = rockchip_vpu981_av1_dec_get_dist(ctx, rf, oh[V4L2_AV1_REF_ALTREF2_FRAME]);
hantro_reg_write(vpu, &av1_mf1_altref2_offset, val);
val = rockchip_vpu981_av1_dec_get_dist(ctx, rf, oh[V4L2_AV1_REF_ALTREF_FRAME]);
hantro_reg_write(vpu, &av1_mf1_altref_offset, val);
}
hantro_reg_write(vpu, &av1_mf2_last_offset, 0);
hantro_reg_write(vpu, &av1_mf2_last2_offset, 0);
hantro_reg_write(vpu, &av1_mf2_last3_offset, 0);
hantro_reg_write(vpu, &av1_mf2_golden_offset, 0);
hantro_reg_write(vpu, &av1_mf2_bwdref_offset, 0);
hantro_reg_write(vpu, &av1_mf2_altref2_offset, 0);
hantro_reg_write(vpu, &av1_mf2_altref_offset, 0);
if (use_ref_frame_mvs && ref_ind > 1 &&
cur_offset[mf_types[1] - V4L2_AV1_REF_LAST_FRAME] <= MAX_FRAME_DISTANCE &&
cur_offset[mf_types[1] - V4L2_AV1_REF_LAST_FRAME] >= -MAX_FRAME_DISTANCE) {
int rf = rockchip_vpu981_get_order_hint(ctx, refs_selected[1]);
int idx = rockchip_vpu981_get_frame_index(ctx, refs_selected[1]);
u32 *oh = av1_dec->frame_refs[idx].order_hints;
int val;
hantro_reg_write(vpu, &av1_use_temporal1_mvs, 1);
val = rockchip_vpu981_av1_dec_get_dist(ctx, rf, oh[V4L2_AV1_REF_LAST_FRAME]);
hantro_reg_write(vpu, &av1_mf2_last_offset, val);
val = rockchip_vpu981_av1_dec_get_dist(ctx, rf, oh[V4L2_AV1_REF_LAST2_FRAME]);
hantro_reg_write(vpu, &av1_mf2_last2_offset, val);
val = rockchip_vpu981_av1_dec_get_dist(ctx, rf, oh[V4L2_AV1_REF_LAST3_FRAME]);
hantro_reg_write(vpu, &av1_mf2_last3_offset, val);
val = rockchip_vpu981_av1_dec_get_dist(ctx, rf, oh[V4L2_AV1_REF_GOLDEN_FRAME]);
hantro_reg_write(vpu, &av1_mf2_golden_offset, val);
val = rockchip_vpu981_av1_dec_get_dist(ctx, rf, oh[V4L2_AV1_REF_BWDREF_FRAME]);
hantro_reg_write(vpu, &av1_mf2_bwdref_offset, val);
val = rockchip_vpu981_av1_dec_get_dist(ctx, rf, oh[V4L2_AV1_REF_ALTREF2_FRAME]);
hantro_reg_write(vpu, &av1_mf2_altref2_offset, val);
val = rockchip_vpu981_av1_dec_get_dist(ctx, rf, oh[V4L2_AV1_REF_ALTREF_FRAME]);
hantro_reg_write(vpu, &av1_mf2_altref_offset, val);
}
hantro_reg_write(vpu, &av1_mf3_last_offset, 0);
hantro_reg_write(vpu, &av1_mf3_last2_offset, 0);
hantro_reg_write(vpu, &av1_mf3_last3_offset, 0);
hantro_reg_write(vpu, &av1_mf3_golden_offset, 0);
hantro_reg_write(vpu, &av1_mf3_bwdref_offset, 0);
hantro_reg_write(vpu, &av1_mf3_altref2_offset, 0);
hantro_reg_write(vpu, &av1_mf3_altref_offset, 0);
if (use_ref_frame_mvs && ref_ind > 2 &&
cur_offset[mf_types[2] - V4L2_AV1_REF_LAST_FRAME] <= MAX_FRAME_DISTANCE &&
cur_offset[mf_types[2] - V4L2_AV1_REF_LAST_FRAME] >= -MAX_FRAME_DISTANCE) {
int rf = rockchip_vpu981_get_order_hint(ctx, refs_selected[2]);
int idx = rockchip_vpu981_get_frame_index(ctx, refs_selected[2]);
u32 *oh = av1_dec->frame_refs[idx].order_hints;
int val;
hantro_reg_write(vpu, &av1_use_temporal2_mvs, 1);
val = rockchip_vpu981_av1_dec_get_dist(ctx, rf, oh[V4L2_AV1_REF_LAST_FRAME]);
hantro_reg_write(vpu, &av1_mf3_last_offset, val);
val = rockchip_vpu981_av1_dec_get_dist(ctx, rf, oh[V4L2_AV1_REF_LAST2_FRAME]);
hantro_reg_write(vpu, &av1_mf3_last2_offset, val);
val = rockchip_vpu981_av1_dec_get_dist(ctx, rf, oh[V4L2_AV1_REF_LAST3_FRAME]);
hantro_reg_write(vpu, &av1_mf3_last3_offset, val);
val = rockchip_vpu981_av1_dec_get_dist(ctx, rf, oh[V4L2_AV1_REF_GOLDEN_FRAME]);
hantro_reg_write(vpu, &av1_mf3_golden_offset, val);
val = rockchip_vpu981_av1_dec_get_dist(ctx, rf, oh[V4L2_AV1_REF_BWDREF_FRAME]);
hantro_reg_write(vpu, &av1_mf3_bwdref_offset, val);
val = rockchip_vpu981_av1_dec_get_dist(ctx, rf, oh[V4L2_AV1_REF_ALTREF2_FRAME]);
hantro_reg_write(vpu, &av1_mf3_altref2_offset, val);
val = rockchip_vpu981_av1_dec_get_dist(ctx, rf, oh[V4L2_AV1_REF_ALTREF_FRAME]);
hantro_reg_write(vpu, &av1_mf3_altref_offset, val);
}
hantro_reg_write(vpu, &av1_cur_last_offset, cur_offset[0]);
hantro_reg_write(vpu, &av1_cur_last2_offset, cur_offset[1]);
hantro_reg_write(vpu, &av1_cur_last3_offset, cur_offset[2]);
hantro_reg_write(vpu, &av1_cur_golden_offset, cur_offset[3]);
hantro_reg_write(vpu, &av1_cur_bwdref_offset, cur_offset[4]);
hantro_reg_write(vpu, &av1_cur_altref2_offset, cur_offset[5]);
hantro_reg_write(vpu, &av1_cur_altref_offset, cur_offset[6]);
hantro_reg_write(vpu, &av1_cur_last_roffset, cur_roffset[0]);
hantro_reg_write(vpu, &av1_cur_last2_roffset, cur_roffset[1]);
hantro_reg_write(vpu, &av1_cur_last3_roffset, cur_roffset[2]);
hantro_reg_write(vpu, &av1_cur_golden_roffset, cur_roffset[3]);
hantro_reg_write(vpu, &av1_cur_bwdref_roffset, cur_roffset[4]);
hantro_reg_write(vpu, &av1_cur_altref2_roffset, cur_roffset[5]);
hantro_reg_write(vpu, &av1_cur_altref_roffset, cur_roffset[6]);
hantro_reg_write(vpu, &av1_mf1_type, mf_types[0] - V4L2_AV1_REF_LAST_FRAME);
hantro_reg_write(vpu, &av1_mf2_type, mf_types[1] - V4L2_AV1_REF_LAST_FRAME);
hantro_reg_write(vpu, &av1_mf3_type, mf_types[2] - V4L2_AV1_REF_LAST_FRAME);
}
static void rockchip_vpu981_av1_dec_set_reference_frames(struct hantro_ctx *ctx)
{
struct hantro_av1_dec_hw_ctx *av1_dec = &ctx->av1_dec;
struct hantro_av1_dec_ctrls *ctrls = &av1_dec->ctrls;
const struct v4l2_ctrl_av1_frame *frame = ctrls->frame;
int frame_type = frame->frame_type;
bool allow_intrabc = !!(ctrls->frame->flags & V4L2_AV1_FRAME_FLAG_ALLOW_INTRABC);
int ref_count[AV1DEC_MAX_PIC_BUFFERS] = { 0 };
struct hantro_dev *vpu = ctx->dev;
int i, ref_frames = 0;
bool scale_enable = false;
if (IS_INTRA(frame_type) && !allow_intrabc)
return;
if (!allow_intrabc) {
for (i = 0; i < V4L2_AV1_REFS_PER_FRAME; i++) {
int idx = rockchip_vpu981_get_frame_index(ctx, i);
if (idx >= 0)
ref_count[idx]++;
}
for (i = 0; i < AV1DEC_MAX_PIC_BUFFERS; i++) {
if (ref_count[i])
ref_frames++;
}
} else {
ref_frames = 1;
}
hantro_reg_write(vpu, &av1_ref_frames, ref_frames);
rockchip_vpu981_av1_dec_set_frame_sign_bias(ctx);
for (i = V4L2_AV1_REF_LAST_FRAME; i < V4L2_AV1_TOTAL_REFS_PER_FRAME; i++) {
u32 ref = i - 1;
int idx = 0;
int width, height;
if (allow_intrabc) {
idx = av1_dec->current_frame_index;
width = frame->frame_width_minus_1 + 1;
height = frame->frame_height_minus_1 + 1;
} else {
if (rockchip_vpu981_get_frame_index(ctx, ref) > 0)
idx = rockchip_vpu981_get_frame_index(ctx, ref);
width = av1_dec->frame_refs[idx].width;
height = av1_dec->frame_refs[idx].height;
}
scale_enable |=
rockchip_vpu981_av1_dec_set_ref(ctx, ref, idx, width,
height);
rockchip_vpu981_av1_dec_set_sign_bias(ctx, ref,
av1_dec->ref_frame_sign_bias[i]);
}
hantro_reg_write(vpu, &av1_ref_scaling_enable, scale_enable);
hantro_reg_write(vpu, &av1_ref0_gm_mode,
frame->global_motion.type[V4L2_AV1_REF_LAST_FRAME]);
hantro_reg_write(vpu, &av1_ref1_gm_mode,
frame->global_motion.type[V4L2_AV1_REF_LAST2_FRAME]);
hantro_reg_write(vpu, &av1_ref2_gm_mode,
frame->global_motion.type[V4L2_AV1_REF_LAST3_FRAME]);
hantro_reg_write(vpu, &av1_ref3_gm_mode,
frame->global_motion.type[V4L2_AV1_REF_GOLDEN_FRAME]);
hantro_reg_write(vpu, &av1_ref4_gm_mode,
frame->global_motion.type[V4L2_AV1_REF_BWDREF_FRAME]);
hantro_reg_write(vpu, &av1_ref5_gm_mode,
frame->global_motion.type[V4L2_AV1_REF_ALTREF2_FRAME]);
hantro_reg_write(vpu, &av1_ref6_gm_mode,
frame->global_motion.type[V4L2_AV1_REF_ALTREF_FRAME]);
rockchip_vpu981_av1_dec_set_other_frames(ctx);
}
static void rockchip_vpu981_av1_dec_set_parameters(struct hantro_ctx *ctx)
{
struct hantro_dev *vpu = ctx->dev;
struct hantro_av1_dec_hw_ctx *av1_dec = &ctx->av1_dec;
struct hantro_av1_dec_ctrls *ctrls = &av1_dec->ctrls;
hantro_reg_write(vpu, &av1_skip_mode,
!!(ctrls->frame->flags & V4L2_AV1_FRAME_FLAG_SKIP_MODE_PRESENT));
hantro_reg_write(vpu, &av1_tempor_mvp_e,
!!(ctrls->frame->flags & V4L2_AV1_FRAME_FLAG_USE_REF_FRAME_MVS));
hantro_reg_write(vpu, &av1_delta_lf_res_log,
ctrls->frame->loop_filter.delta_lf_res);
hantro_reg_write(vpu, &av1_delta_lf_multi,
!!(ctrls->frame->loop_filter.flags
& V4L2_AV1_LOOP_FILTER_FLAG_DELTA_LF_MULTI));
hantro_reg_write(vpu, &av1_delta_lf_present,
!!(ctrls->frame->loop_filter.flags
& V4L2_AV1_LOOP_FILTER_FLAG_DELTA_LF_PRESENT));
hantro_reg_write(vpu, &av1_disable_cdf_update,
!!(ctrls->frame->flags & V4L2_AV1_FRAME_FLAG_DISABLE_CDF_UPDATE));
hantro_reg_write(vpu, &av1_allow_warp,
!!(ctrls->frame->flags & V4L2_AV1_FRAME_FLAG_ALLOW_WARPED_MOTION));
hantro_reg_write(vpu, &av1_show_frame,
!!(ctrls->frame->flags & V4L2_AV1_FRAME_FLAG_SHOW_FRAME));
hantro_reg_write(vpu, &av1_switchable_motion_mode,
!!(ctrls->frame->flags & V4L2_AV1_FRAME_FLAG_IS_MOTION_MODE_SWITCHABLE));
hantro_reg_write(vpu, &av1_enable_cdef,
!!(ctrls->sequence->flags & V4L2_AV1_SEQUENCE_FLAG_ENABLE_CDEF));
hantro_reg_write(vpu, &av1_allow_masked_compound,
!!(ctrls->sequence->flags
& V4L2_AV1_SEQUENCE_FLAG_ENABLE_MASKED_COMPOUND));
hantro_reg_write(vpu, &av1_allow_interintra,
!!(ctrls->sequence->flags
& V4L2_AV1_SEQUENCE_FLAG_ENABLE_INTERINTRA_COMPOUND));
hantro_reg_write(vpu, &av1_enable_intra_edge_filter,
!!(ctrls->sequence->flags
& V4L2_AV1_SEQUENCE_FLAG_ENABLE_INTRA_EDGE_FILTER));
hantro_reg_write(vpu, &av1_allow_filter_intra,
!!(ctrls->sequence->flags & V4L2_AV1_SEQUENCE_FLAG_ENABLE_FILTER_INTRA));
hantro_reg_write(vpu, &av1_enable_jnt_comp,
!!(ctrls->sequence->flags & V4L2_AV1_SEQUENCE_FLAG_ENABLE_JNT_COMP));
hantro_reg_write(vpu, &av1_enable_dual_filter,
!!(ctrls->sequence->flags & V4L2_AV1_SEQUENCE_FLAG_ENABLE_DUAL_FILTER));
hantro_reg_write(vpu, &av1_reduced_tx_set_used,
!!(ctrls->frame->flags & V4L2_AV1_FRAME_FLAG_REDUCED_TX_SET));
hantro_reg_write(vpu, &av1_allow_screen_content_tools,
!!(ctrls->frame->flags & V4L2_AV1_FRAME_FLAG_ALLOW_SCREEN_CONTENT_TOOLS));
hantro_reg_write(vpu, &av1_allow_intrabc,
!!(ctrls->frame->flags & V4L2_AV1_FRAME_FLAG_ALLOW_INTRABC));
if (!(ctrls->frame->flags & V4L2_AV1_FRAME_FLAG_ALLOW_SCREEN_CONTENT_TOOLS))
hantro_reg_write(vpu, &av1_force_interger_mv, 0);
else
hantro_reg_write(vpu, &av1_force_interger_mv,
!!(ctrls->frame->flags & V4L2_AV1_FRAME_FLAG_FORCE_INTEGER_MV));
hantro_reg_write(vpu, &av1_blackwhite_e, 0);
hantro_reg_write(vpu, &av1_delta_q_res_log, ctrls->frame->quantization.delta_q_res);
hantro_reg_write(vpu, &av1_delta_q_present,
!!(ctrls->frame->quantization.flags
& V4L2_AV1_QUANTIZATION_FLAG_DELTA_Q_PRESENT));
hantro_reg_write(vpu, &av1_idr_pic_e, !ctrls->frame->frame_type);
hantro_reg_write(vpu, &av1_quant_base_qindex, ctrls->frame->quantization.base_q_idx);
hantro_reg_write(vpu, &av1_bit_depth_y_minus8, ctx->bit_depth - 8);
hantro_reg_write(vpu, &av1_bit_depth_c_minus8, ctx->bit_depth - 8);
hantro_reg_write(vpu, &av1_mcomp_filt_type, ctrls->frame->interpolation_filter);
hantro_reg_write(vpu, &av1_high_prec_mv_e,
!!(ctrls->frame->flags & V4L2_AV1_FRAME_FLAG_ALLOW_HIGH_PRECISION_MV));
hantro_reg_write(vpu, &av1_comp_pred_mode,
(ctrls->frame->flags & V4L2_AV1_FRAME_FLAG_REFERENCE_SELECT) ? 2 : 0);
hantro_reg_write(vpu, &av1_transform_mode, (ctrls->frame->tx_mode == 1) ? 3 : 4);
hantro_reg_write(vpu, &av1_max_cb_size,
(ctrls->sequence->flags
& V4L2_AV1_SEQUENCE_FLAG_USE_128X128_SUPERBLOCK) ? 7 : 6);
hantro_reg_write(vpu, &av1_min_cb_size, 3);
hantro_reg_write(vpu, &av1_comp_pred_fixed_ref, 0);
hantro_reg_write(vpu, &av1_comp_pred_var_ref0_av1, 0);
hantro_reg_write(vpu, &av1_comp_pred_var_ref1_av1, 0);
hantro_reg_write(vpu, &av1_filt_level_seg0, 0);
hantro_reg_write(vpu, &av1_filt_level_seg1, 0);
hantro_reg_write(vpu, &av1_filt_level_seg2, 0);
hantro_reg_write(vpu, &av1_filt_level_seg3, 0);
hantro_reg_write(vpu, &av1_filt_level_seg4, 0);
hantro_reg_write(vpu, &av1_filt_level_seg5, 0);
hantro_reg_write(vpu, &av1_filt_level_seg6, 0);
hantro_reg_write(vpu, &av1_filt_level_seg7, 0);
hantro_reg_write(vpu, &av1_qp_delta_y_dc_av1, ctrls->frame->quantization.delta_q_y_dc);
hantro_reg_write(vpu, &av1_qp_delta_ch_dc_av1, ctrls->frame->quantization.delta_q_u_dc);
hantro_reg_write(vpu, &av1_qp_delta_ch_ac_av1, ctrls->frame->quantization.delta_q_u_ac);
if (ctrls->frame->quantization.flags & V4L2_AV1_QUANTIZATION_FLAG_USING_QMATRIX) {
hantro_reg_write(vpu, &av1_qmlevel_y, ctrls->frame->quantization.qm_y);
hantro_reg_write(vpu, &av1_qmlevel_u, ctrls->frame->quantization.qm_u);
hantro_reg_write(vpu, &av1_qmlevel_v, ctrls->frame->quantization.qm_v);
} else {
hantro_reg_write(vpu, &av1_qmlevel_y, 0xff);
hantro_reg_write(vpu, &av1_qmlevel_u, 0xff);
hantro_reg_write(vpu, &av1_qmlevel_v, 0xff);
}
hantro_reg_write(vpu, &av1_lossless_e, rockchip_vpu981_av1_dec_is_lossless(ctx));
hantro_reg_write(vpu, &av1_quant_delta_v_dc, ctrls->frame->quantization.delta_q_v_dc);
hantro_reg_write(vpu, &av1_quant_delta_v_ac, ctrls->frame->quantization.delta_q_v_ac);
hantro_reg_write(vpu, &av1_skip_ref0,
(ctrls->frame->skip_mode_frame[0]) ? ctrls->frame->skip_mode_frame[0] : 1);
hantro_reg_write(vpu, &av1_skip_ref1,
(ctrls->frame->skip_mode_frame[1]) ? ctrls->frame->skip_mode_frame[1] : 1);
hantro_write_addr(vpu, AV1_MC_SYNC_CURR, av1_dec->tile_buf.dma);
hantro_write_addr(vpu, AV1_MC_SYNC_LEFT, av1_dec->tile_buf.dma);
}
static void
rockchip_vpu981_av1_dec_set_input_buffer(struct hantro_ctx *ctx,
struct vb2_v4l2_buffer *vb2_src)
{
struct hantro_av1_dec_hw_ctx *av1_dec = &ctx->av1_dec;
struct hantro_av1_dec_ctrls *ctrls = &av1_dec->ctrls;
const struct v4l2_ctrl_av1_tile_group_entry *group_entry =
ctrls->tile_group_entry;
struct hantro_dev *vpu = ctx->dev;
dma_addr_t src_dma;
u32 src_len, src_buf_len;
int start_bit, offset;
src_dma = vb2_dma_contig_plane_dma_addr(&vb2_src->vb2_buf, 0);
src_len = vb2_get_plane_payload(&vb2_src->vb2_buf, 0);
src_buf_len = vb2_plane_size(&vb2_src->vb2_buf, 0);
start_bit = (group_entry[0].tile_offset & 0xf) * 8;
offset = group_entry[0].tile_offset & ~0xf;
hantro_reg_write(vpu, &av1_strm_buffer_len, src_buf_len);
hantro_reg_write(vpu, &av1_strm_start_bit, start_bit);
hantro_reg_write(vpu, &av1_stream_len, src_len);
hantro_reg_write(vpu, &av1_strm_start_offset, 0);
hantro_write_addr(vpu, AV1_INPUT_STREAM, src_dma + offset);
}
static void
rockchip_vpu981_av1_dec_set_output_buffer(struct hantro_ctx *ctx)
{
struct hantro_av1_dec_hw_ctx *av1_dec = &ctx->av1_dec;
struct hantro_dev *vpu = ctx->dev;
struct hantro_decoded_buffer *dst;
struct vb2_v4l2_buffer *vb2_dst;
dma_addr_t luma_addr, chroma_addr, mv_addr = 0;
size_t cr_offset = rockchip_vpu981_av1_dec_luma_size(ctx);
size_t mv_offset = rockchip_vpu981_av1_dec_chroma_size(ctx);
vb2_dst = av1_dec->frame_refs[av1_dec->current_frame_index].vb2_ref;
dst = vb2_to_hantro_decoded_buf(&vb2_dst->vb2_buf);
luma_addr = hantro_get_dec_buf_addr(ctx, &dst->base.vb.vb2_buf);
chroma_addr = luma_addr + cr_offset;
mv_addr = luma_addr + mv_offset;
hantro_write_addr(vpu, AV1_TILE_OUT_LU, luma_addr);
hantro_write_addr(vpu, AV1_TILE_OUT_CH, chroma_addr);
hantro_write_addr(vpu, AV1_TILE_OUT_MV, mv_addr);
}
int rockchip_vpu981_av1_dec_run(struct hantro_ctx *ctx)
{
struct hantro_dev *vpu = ctx->dev;
struct vb2_v4l2_buffer *vb2_src;
int ret;
hantro_start_prepare_run(ctx);
ret = rockchip_vpu981_av1_dec_prepare_run(ctx);
if (ret)
goto prepare_error;
vb2_src = hantro_get_src_buf(ctx);
if (!vb2_src) {
ret = -EINVAL;
goto prepare_error;
}
rockchip_vpu981_av1_dec_clean_refs(ctx);
rockchip_vpu981_av1_dec_frame_ref(ctx, vb2_src->vb2_buf.timestamp);
rockchip_vpu981_av1_dec_set_parameters(ctx);
rockchip_vpu981_av1_dec_set_global_model(ctx);
rockchip_vpu981_av1_dec_set_tile_info(ctx);
rockchip_vpu981_av1_dec_set_reference_frames(ctx);
rockchip_vpu981_av1_dec_set_segmentation(ctx);
rockchip_vpu981_av1_dec_set_loopfilter(ctx);
rockchip_vpu981_av1_dec_set_picture_dimensions(ctx);
rockchip_vpu981_av1_dec_set_cdef(ctx);
rockchip_vpu981_av1_dec_set_lr(ctx);
rockchip_vpu981_av1_dec_set_fgs(ctx);
rockchip_vpu981_av1_dec_set_prob(ctx);
hantro_reg_write(vpu, &av1_dec_mode, AV1_DEC_MODE);
hantro_reg_write(vpu, &av1_dec_out_ec_byte_word, 0);
hantro_reg_write(vpu, &av1_write_mvs_e, 1);
hantro_reg_write(vpu, &av1_dec_out_ec_bypass, 1);
hantro_reg_write(vpu, &av1_dec_clk_gate_e, 1);
hantro_reg_write(vpu, &av1_dec_abort_e, 0);
hantro_reg_write(vpu, &av1_dec_tile_int_e, 0);
hantro_reg_write(vpu, &av1_dec_alignment, 64);
hantro_reg_write(vpu, &av1_apf_disable, 0);
hantro_reg_write(vpu, &av1_apf_threshold, 8);
hantro_reg_write(vpu, &av1_dec_buswidth, 2);
hantro_reg_write(vpu, &av1_dec_max_burst, 16);
hantro_reg_write(vpu, &av1_error_conceal_e, 0);
hantro_reg_write(vpu, &av1_axi_rd_ostd_threshold, 64);
hantro_reg_write(vpu, &av1_axi_wr_ostd_threshold, 64);
hantro_reg_write(vpu, &av1_ext_timeout_cycles, 0xfffffff);
hantro_reg_write(vpu, &av1_ext_timeout_override_e, 1);
hantro_reg_write(vpu, &av1_timeout_cycles, 0xfffffff);
hantro_reg_write(vpu, &av1_timeout_override_e, 1);
rockchip_vpu981_av1_dec_set_output_buffer(ctx);
rockchip_vpu981_av1_dec_set_input_buffer(ctx, vb2_src);
hantro_end_prepare_run(ctx);
hantro_reg_write(vpu, &av1_dec_e, 1);
return 0;
prepare_error:
hantro_end_prepare_run(ctx);
hantro_irq_done(vpu, VB2_BUF_STATE_ERROR);
return ret;
}
static void rockchip_vpu981_postproc_enable(struct hantro_ctx *ctx)
{
struct hantro_dev *vpu = ctx->dev;
int width = ctx->dst_fmt.width;
int height = ctx->dst_fmt.height;
struct vb2_v4l2_buffer *vb2_dst;
size_t chroma_offset;
dma_addr_t dst_dma;
vb2_dst = hantro_get_dst_buf(ctx);
dst_dma = vb2_dma_contig_plane_dma_addr(&vb2_dst->vb2_buf, 0);
chroma_offset = ctx->dst_fmt.plane_fmt[0].bytesperline *
ctx->dst_fmt.height;
/* enable post processor */
hantro_reg_write(vpu, &av1_pp_out_e, 1);
hantro_reg_write(vpu, &av1_pp_in_format, 0);
hantro_reg_write(vpu, &av1_pp0_dup_hor, 1);
hantro_reg_write(vpu, &av1_pp0_dup_ver, 1);
hantro_reg_write(vpu, &av1_pp_in_height, height / 2);
hantro_reg_write(vpu, &av1_pp_in_width, width / 2);
hantro_reg_write(vpu, &av1_pp_out_height, height);
hantro_reg_write(vpu, &av1_pp_out_width, width);
hantro_reg_write(vpu, &av1_pp_out_y_stride,
ctx->dst_fmt.plane_fmt[0].bytesperline);
hantro_reg_write(vpu, &av1_pp_out_c_stride,
ctx->dst_fmt.plane_fmt[0].bytesperline);
switch (ctx->dst_fmt.pixelformat) {
case V4L2_PIX_FMT_P010:
hantro_reg_write(vpu, &av1_pp_out_format, 1);
break;
case V4L2_PIX_FMT_NV12:
hantro_reg_write(vpu, &av1_pp_out_format, 3);
break;
default:
hantro_reg_write(vpu, &av1_pp_out_format, 0);
}
hantro_reg_write(vpu, &av1_ppd_blend_exist, 0);
hantro_reg_write(vpu, &av1_ppd_dith_exist, 0);
hantro_reg_write(vpu, &av1_ablend_crop_e, 0);
hantro_reg_write(vpu, &av1_pp_format_customer1_e, 0);
hantro_reg_write(vpu, &av1_pp_crop_exist, 0);
hantro_reg_write(vpu, &av1_pp_up_level, 0);
hantro_reg_write(vpu, &av1_pp_down_level, 0);
hantro_reg_write(vpu, &av1_pp_exist, 0);
hantro_write_addr(vpu, AV1_PP_OUT_LU, dst_dma);
hantro_write_addr(vpu, AV1_PP_OUT_CH, dst_dma + chroma_offset);
}
static void rockchip_vpu981_postproc_disable(struct hantro_ctx *ctx)
{
struct hantro_dev *vpu = ctx->dev;
/* disable post processor */
hantro_reg_write(vpu, &av1_pp_out_e, 0);
}
const struct hantro_postproc_ops rockchip_vpu981_postproc_ops = {
.enable = rockchip_vpu981_postproc_enable,
.disable = rockchip_vpu981_postproc_disable,
};
| linux-master | drivers/media/platform/verisilicon/rockchip_vpu981_hw_av1_dec.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Rockchip RK3288 VPU codec driver
*
* Copyright (c) 2014 Rockchip Electronics Co., Ltd.
* Hertz Wong <[email protected]>
* Herman Chen <[email protected]>
*
* Copyright (C) 2014 Google, Inc.
* Tomasz Figa <[email protected]>
*/
#include <linux/types.h>
#include <linux/sort.h>
#include <media/v4l2-mem2mem.h>
#include "hantro_g1_regs.h"
#include "hantro_hw.h"
#include "hantro_v4l2.h"
static void set_params(struct hantro_ctx *ctx, struct vb2_v4l2_buffer *src_buf)
{
const struct hantro_h264_dec_ctrls *ctrls = &ctx->h264_dec.ctrls;
const struct v4l2_ctrl_h264_decode_params *dec_param = ctrls->decode;
const struct v4l2_ctrl_h264_sps *sps = ctrls->sps;
const struct v4l2_ctrl_h264_pps *pps = ctrls->pps;
struct hantro_dev *vpu = ctx->dev;
u32 reg;
/* Decoder control register 0. */
reg = G1_REG_DEC_CTRL0_DEC_AXI_AUTO;
if (sps->flags & V4L2_H264_SPS_FLAG_MB_ADAPTIVE_FRAME_FIELD)
reg |= G1_REG_DEC_CTRL0_SEQ_MBAFF_E;
if (sps->profile_idc > 66) {
reg |= G1_REG_DEC_CTRL0_PICORD_COUNT_E;
if (dec_param->nal_ref_idc)
reg |= G1_REG_DEC_CTRL0_WRITE_MVS_E;
}
if (!(sps->flags & V4L2_H264_SPS_FLAG_FRAME_MBS_ONLY) &&
(sps->flags & V4L2_H264_SPS_FLAG_MB_ADAPTIVE_FRAME_FIELD ||
dec_param->flags & V4L2_H264_DECODE_PARAM_FLAG_FIELD_PIC))
reg |= G1_REG_DEC_CTRL0_PIC_INTERLACE_E;
if (dec_param->flags & V4L2_H264_DECODE_PARAM_FLAG_FIELD_PIC)
reg |= G1_REG_DEC_CTRL0_PIC_FIELDMODE_E;
if (!(dec_param->flags & V4L2_H264_DECODE_PARAM_FLAG_BOTTOM_FIELD))
reg |= G1_REG_DEC_CTRL0_PIC_TOPFIELD_E;
vdpu_write_relaxed(vpu, reg, G1_REG_DEC_CTRL0);
/* Decoder control register 1. */
reg = G1_REG_DEC_CTRL1_PIC_MB_WIDTH(MB_WIDTH(ctx->src_fmt.width)) |
G1_REG_DEC_CTRL1_PIC_MB_HEIGHT_P(MB_HEIGHT(ctx->src_fmt.height)) |
G1_REG_DEC_CTRL1_REF_FRAMES(sps->max_num_ref_frames);
vdpu_write_relaxed(vpu, reg, G1_REG_DEC_CTRL1);
/* Decoder control register 2. */
reg = G1_REG_DEC_CTRL2_CH_QP_OFFSET(pps->chroma_qp_index_offset) |
G1_REG_DEC_CTRL2_CH_QP_OFFSET2(pps->second_chroma_qp_index_offset);
if (pps->flags & V4L2_H264_PPS_FLAG_SCALING_MATRIX_PRESENT)
reg |= G1_REG_DEC_CTRL2_TYPE1_QUANT_E;
if (!(sps->flags & V4L2_H264_SPS_FLAG_FRAME_MBS_ONLY))
reg |= G1_REG_DEC_CTRL2_FIELDPIC_FLAG_E;
vdpu_write_relaxed(vpu, reg, G1_REG_DEC_CTRL2);
/* Decoder control register 3. */
reg = G1_REG_DEC_CTRL3_START_CODE_E |
G1_REG_DEC_CTRL3_INIT_QP(pps->pic_init_qp_minus26 + 26) |
G1_REG_DEC_CTRL3_STREAM_LEN(vb2_get_plane_payload(&src_buf->vb2_buf, 0));
vdpu_write_relaxed(vpu, reg, G1_REG_DEC_CTRL3);
/* Decoder control register 4. */
reg = G1_REG_DEC_CTRL4_FRAMENUM_LEN(sps->log2_max_frame_num_minus4 + 4) |
G1_REG_DEC_CTRL4_FRAMENUM(dec_param->frame_num) |
G1_REG_DEC_CTRL4_WEIGHT_BIPR_IDC(pps->weighted_bipred_idc);
if (pps->flags & V4L2_H264_PPS_FLAG_ENTROPY_CODING_MODE)
reg |= G1_REG_DEC_CTRL4_CABAC_E;
if (sps->flags & V4L2_H264_SPS_FLAG_DIRECT_8X8_INFERENCE)
reg |= G1_REG_DEC_CTRL4_DIR_8X8_INFER_E;
if (sps->profile_idc >= 100 && sps->chroma_format_idc == 0)
reg |= G1_REG_DEC_CTRL4_BLACKWHITE_E;
if (pps->flags & V4L2_H264_PPS_FLAG_WEIGHTED_PRED)
reg |= G1_REG_DEC_CTRL4_WEIGHT_PRED_E;
vdpu_write_relaxed(vpu, reg, G1_REG_DEC_CTRL4);
/* Decoder control register 5. */
reg = G1_REG_DEC_CTRL5_REFPIC_MK_LEN(dec_param->dec_ref_pic_marking_bit_size) |
G1_REG_DEC_CTRL5_IDR_PIC_ID(dec_param->idr_pic_id);
if (pps->flags & V4L2_H264_PPS_FLAG_CONSTRAINED_INTRA_PRED)
reg |= G1_REG_DEC_CTRL5_CONST_INTRA_E;
if (pps->flags & V4L2_H264_PPS_FLAG_DEBLOCKING_FILTER_CONTROL_PRESENT)
reg |= G1_REG_DEC_CTRL5_FILT_CTRL_PRES;
if (pps->flags & V4L2_H264_PPS_FLAG_REDUNDANT_PIC_CNT_PRESENT)
reg |= G1_REG_DEC_CTRL5_RDPIC_CNT_PRES;
if (pps->flags & V4L2_H264_PPS_FLAG_TRANSFORM_8X8_MODE)
reg |= G1_REG_DEC_CTRL5_8X8TRANS_FLAG_E;
if (dec_param->flags & V4L2_H264_DECODE_PARAM_FLAG_IDR_PIC)
reg |= G1_REG_DEC_CTRL5_IDR_PIC_E;
vdpu_write_relaxed(vpu, reg, G1_REG_DEC_CTRL5);
/* Decoder control register 6. */
reg = G1_REG_DEC_CTRL6_PPS_ID(pps->pic_parameter_set_id) |
G1_REG_DEC_CTRL6_REFIDX0_ACTIVE(pps->num_ref_idx_l0_default_active_minus1 + 1) |
G1_REG_DEC_CTRL6_REFIDX1_ACTIVE(pps->num_ref_idx_l1_default_active_minus1 + 1) |
G1_REG_DEC_CTRL6_POC_LENGTH(dec_param->pic_order_cnt_bit_size);
vdpu_write_relaxed(vpu, reg, G1_REG_DEC_CTRL6);
/* Error concealment register. */
vdpu_write_relaxed(vpu, 0, G1_REG_ERR_CONC);
/* Prediction filter tap register. */
vdpu_write_relaxed(vpu,
G1_REG_PRED_FLT_PRED_BC_TAP_0_0(1) |
G1_REG_PRED_FLT_PRED_BC_TAP_0_1(-5 & 0x3ff) |
G1_REG_PRED_FLT_PRED_BC_TAP_0_2(20),
G1_REG_PRED_FLT);
/* Reference picture buffer control register. */
vdpu_write_relaxed(vpu, 0, G1_REG_REF_BUF_CTRL);
/* Reference picture buffer control register 2. */
vdpu_write_relaxed(vpu, G1_REG_REF_BUF_CTRL2_APF_THRESHOLD(8),
G1_REG_REF_BUF_CTRL2);
}
static void set_ref(struct hantro_ctx *ctx)
{
const struct v4l2_h264_reference *b0_reflist, *b1_reflist, *p_reflist;
struct hantro_dev *vpu = ctx->dev;
int reg_num;
u32 reg;
int i;
vdpu_write_relaxed(vpu, ctx->h264_dec.dpb_valid, G1_REG_VALID_REF);
vdpu_write_relaxed(vpu, ctx->h264_dec.dpb_longterm, G1_REG_LT_REF);
/*
* Set up reference frame picture numbers.
*
* Each G1_REG_REF_PIC(x) register contains numbers of two
* subsequential reference pictures.
*/
for (i = 0; i < HANTRO_H264_DPB_SIZE; i += 2) {
reg = G1_REG_REF_PIC_REFER0_NBR(hantro_h264_get_ref_nbr(ctx, i)) |
G1_REG_REF_PIC_REFER1_NBR(hantro_h264_get_ref_nbr(ctx, i + 1));
vdpu_write_relaxed(vpu, reg, G1_REG_REF_PIC(i / 2));
}
b0_reflist = ctx->h264_dec.reflists.b0;
b1_reflist = ctx->h264_dec.reflists.b1;
p_reflist = ctx->h264_dec.reflists.p;
/*
* Each G1_REG_BD_REF_PIC(x) register contains three entries
* of each forward and backward picture list.
*/
reg_num = 0;
for (i = 0; i < 15; i += 3) {
reg = G1_REG_BD_REF_PIC_BINIT_RLIST_F0(b0_reflist[i].index) |
G1_REG_BD_REF_PIC_BINIT_RLIST_F1(b0_reflist[i + 1].index) |
G1_REG_BD_REF_PIC_BINIT_RLIST_F2(b0_reflist[i + 2].index) |
G1_REG_BD_REF_PIC_BINIT_RLIST_B0(b1_reflist[i].index) |
G1_REG_BD_REF_PIC_BINIT_RLIST_B1(b1_reflist[i + 1].index) |
G1_REG_BD_REF_PIC_BINIT_RLIST_B2(b1_reflist[i + 2].index);
vdpu_write_relaxed(vpu, reg, G1_REG_BD_REF_PIC(reg_num++));
}
/*
* G1_REG_BD_P_REF_PIC register contains last entries (index 15)
* of forward and backward reference picture lists and first 4 entries
* of P forward picture list.
*/
reg = G1_REG_BD_P_REF_PIC_BINIT_RLIST_F15(b0_reflist[15].index) |
G1_REG_BD_P_REF_PIC_BINIT_RLIST_B15(b1_reflist[15].index) |
G1_REG_BD_P_REF_PIC_PINIT_RLIST_F0(p_reflist[0].index) |
G1_REG_BD_P_REF_PIC_PINIT_RLIST_F1(p_reflist[1].index) |
G1_REG_BD_P_REF_PIC_PINIT_RLIST_F2(p_reflist[2].index) |
G1_REG_BD_P_REF_PIC_PINIT_RLIST_F3(p_reflist[3].index);
vdpu_write_relaxed(vpu, reg, G1_REG_BD_P_REF_PIC);
/*
* Each G1_REG_FWD_PIC(x) register contains six consecutive
* entries of P forward picture list, starting from index 4.
*/
reg_num = 0;
for (i = 4; i < HANTRO_H264_DPB_SIZE; i += 6) {
reg = G1_REG_FWD_PIC_PINIT_RLIST_F0(p_reflist[i].index) |
G1_REG_FWD_PIC_PINIT_RLIST_F1(p_reflist[i + 1].index) |
G1_REG_FWD_PIC_PINIT_RLIST_F2(p_reflist[i + 2].index) |
G1_REG_FWD_PIC_PINIT_RLIST_F3(p_reflist[i + 3].index) |
G1_REG_FWD_PIC_PINIT_RLIST_F4(p_reflist[i + 4].index) |
G1_REG_FWD_PIC_PINIT_RLIST_F5(p_reflist[i + 5].index);
vdpu_write_relaxed(vpu, reg, G1_REG_FWD_PIC(reg_num++));
}
/* Set up addresses of DPB buffers. */
for (i = 0; i < HANTRO_H264_DPB_SIZE; i++) {
dma_addr_t dma_addr = hantro_h264_get_ref_buf(ctx, i);
vdpu_write_relaxed(vpu, dma_addr, G1_REG_ADDR_REF(i));
}
}
static void set_buffers(struct hantro_ctx *ctx, struct vb2_v4l2_buffer *src_buf)
{
const struct hantro_h264_dec_ctrls *ctrls = &ctx->h264_dec.ctrls;
struct vb2_v4l2_buffer *dst_buf;
struct hantro_dev *vpu = ctx->dev;
dma_addr_t src_dma, dst_dma;
size_t offset = 0;
/* Source (stream) buffer. */
src_dma = vb2_dma_contig_plane_dma_addr(&src_buf->vb2_buf, 0);
vdpu_write_relaxed(vpu, src_dma, G1_REG_ADDR_STR);
/* Destination (decoded frame) buffer. */
dst_buf = hantro_get_dst_buf(ctx);
dst_dma = hantro_get_dec_buf_addr(ctx, &dst_buf->vb2_buf);
/* Adjust dma addr to start at second line for bottom field */
if (ctrls->decode->flags & V4L2_H264_DECODE_PARAM_FLAG_BOTTOM_FIELD)
offset = ALIGN(ctx->src_fmt.width, MB_DIM);
vdpu_write_relaxed(vpu, dst_dma + offset, G1_REG_ADDR_DST);
/* Higher profiles require DMV buffer appended to reference frames. */
if (ctrls->sps->profile_idc > 66 && ctrls->decode->nal_ref_idc) {
unsigned int bytes_per_mb = 384;
/* DMV buffer for monochrome start directly after Y-plane */
if (ctrls->sps->profile_idc >= 100 &&
ctrls->sps->chroma_format_idc == 0)
bytes_per_mb = 256;
offset = bytes_per_mb * MB_WIDTH(ctx->src_fmt.width) *
MB_HEIGHT(ctx->src_fmt.height);
/*
* DMV buffer is split in two for field encoded frames,
* adjust offset for bottom field
*/
if (ctrls->decode->flags & V4L2_H264_DECODE_PARAM_FLAG_BOTTOM_FIELD)
offset += 32 * MB_WIDTH(ctx->src_fmt.width) *
MB_HEIGHT(ctx->src_fmt.height);
vdpu_write_relaxed(vpu, dst_dma + offset, G1_REG_ADDR_DIR_MV);
}
/* Auxiliary buffer prepared in hantro_g1_h264_dec_prepare_table(). */
vdpu_write_relaxed(vpu, ctx->h264_dec.priv.dma, G1_REG_ADDR_QTABLE);
}
int hantro_g1_h264_dec_run(struct hantro_ctx *ctx)
{
struct hantro_dev *vpu = ctx->dev;
struct vb2_v4l2_buffer *src_buf;
int ret;
/* Prepare the H264 decoder context. */
ret = hantro_h264_dec_prepare_run(ctx);
if (ret)
return ret;
/* Configure hardware registers. */
src_buf = hantro_get_src_buf(ctx);
set_params(ctx, src_buf);
set_ref(ctx);
set_buffers(ctx, src_buf);
hantro_end_prepare_run(ctx);
/* Start decoding! */
vdpu_write_relaxed(vpu,
G1_REG_CONFIG_DEC_AXI_RD_ID(0xffu) |
G1_REG_CONFIG_DEC_TIMEOUT_E |
G1_REG_CONFIG_DEC_OUT_ENDIAN |
G1_REG_CONFIG_DEC_STRENDIAN_E |
G1_REG_CONFIG_DEC_MAX_BURST(16) |
G1_REG_CONFIG_DEC_OUTSWAP32_E |
G1_REG_CONFIG_DEC_INSWAP32_E |
G1_REG_CONFIG_DEC_STRSWAP32_E |
G1_REG_CONFIG_DEC_CLK_GATE_E,
G1_REG_CONFIG);
vdpu_write(vpu, G1_REG_INTERRUPT_DEC_E, G1_REG_INTERRUPT);
return 0;
}
| linux-master | drivers/media/platform/verisilicon/hantro_g1_h264_dec.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Support for the camera device found on Marvell MMP processors; known
* to work with the Armada 610 as used in the OLPC 1.75 system.
*
* Copyright 2011 Jonathan Corbet <[email protected]>
* Copyright 2018 Lubomir Rintel <[email protected]>
*/
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/spinlock.h>
#include <linux/slab.h>
#include <linux/videodev2.h>
#include <media/v4l2-device.h>
#include <linux/platform_data/media/mmp-camera.h>
#include <linux/device.h>
#include <linux/of.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/io.h>
#include <linux/list.h>
#include <linux/pm.h>
#include <linux/clk.h>
#include "mcam-core.h"
MODULE_ALIAS("platform:mmp-camera");
MODULE_AUTHOR("Jonathan Corbet <[email protected]>");
MODULE_LICENSE("GPL");
static char *mcam_clks[] = {"axi", "func", "phy"};
struct mmp_camera {
struct platform_device *pdev;
struct mcam_camera mcam;
struct list_head devlist;
struct clk *mipi_clk;
int irq;
};
static inline struct mmp_camera *mcam_to_cam(struct mcam_camera *mcam)
{
return container_of(mcam, struct mmp_camera, mcam);
}
/*
* calc the dphy register values
* There are three dphy registers being used.
* dphy[0] - CSI2_DPHY3
* dphy[1] - CSI2_DPHY5
* dphy[2] - CSI2_DPHY6
* CSI2_DPHY3 and CSI2_DPHY6 can be set with a default value
* or be calculated dynamically
*/
static void mmpcam_calc_dphy(struct mcam_camera *mcam)
{
struct mmp_camera *cam = mcam_to_cam(mcam);
struct mmp_camera_platform_data *pdata = cam->pdev->dev.platform_data;
struct device *dev = &cam->pdev->dev;
unsigned long tx_clk_esc;
/*
* If CSI2_DPHY3 is calculated dynamically,
* pdata->lane_clk should be already set
* either in the board driver statically
* or in the sensor driver dynamically.
*/
/*
* dphy[0] - CSI2_DPHY3:
* bit 0 ~ bit 7: HS Term Enable.
* defines the time that the DPHY
* wait before enabling the data
* lane termination after detecting
* that the sensor has driven the data
* lanes to the LP00 bridge state.
* The value is calculated by:
* (Max T(D_TERM_EN)/Period(DDR)) - 1
* bit 8 ~ bit 15: HS_SETTLE
* Time interval during which the HS
* receiver shall ignore any Data Lane
* HS transitions.
* The value has been calibrated on
* different boards. It seems to work well.
*
* More detail please refer
* MIPI Alliance Spectification for D-PHY
* document for explanation of HS-SETTLE
* and D-TERM-EN.
*/
switch (pdata->dphy3_algo) {
case DPHY3_ALGO_PXA910:
/*
* Calculate CSI2_DPHY3 algo for PXA910
*/
pdata->dphy[0] =
(((1 + (pdata->lane_clk * 80) / 1000) & 0xff) << 8)
| (1 + pdata->lane_clk * 35 / 1000);
break;
case DPHY3_ALGO_PXA2128:
/*
* Calculate CSI2_DPHY3 algo for PXA2128
*/
pdata->dphy[0] =
(((2 + (pdata->lane_clk * 110) / 1000) & 0xff) << 8)
| (1 + pdata->lane_clk * 35 / 1000);
break;
default:
/*
* Use default CSI2_DPHY3 value for PXA688/PXA988
*/
dev_dbg(dev, "camera: use the default CSI2_DPHY3 value\n");
}
/*
* mipi_clk will never be changed, it is a fixed value on MMP
*/
if (IS_ERR(cam->mipi_clk))
return;
/* get the escape clk, this is hard coded */
clk_prepare_enable(cam->mipi_clk);
tx_clk_esc = (clk_get_rate(cam->mipi_clk) / 1000000) / 12;
clk_disable_unprepare(cam->mipi_clk);
/*
* dphy[2] - CSI2_DPHY6:
* bit 0 ~ bit 7: CK Term Enable
* Time for the Clock Lane receiver to enable the HS line
* termination. The value is calculated similarly with
* HS Term Enable
* bit 8 ~ bit 15: CK Settle
* Time interval during which the HS receiver shall ignore
* any Clock Lane HS transitions.
* The value is calibrated on the boards.
*/
pdata->dphy[2] =
((((534 * tx_clk_esc) / 2000 - 1) & 0xff) << 8)
| (((38 * tx_clk_esc) / 1000 - 1) & 0xff);
dev_dbg(dev, "camera: DPHY sets: dphy3=0x%x, dphy5=0x%x, dphy6=0x%x\n",
pdata->dphy[0], pdata->dphy[1], pdata->dphy[2]);
}
static irqreturn_t mmpcam_irq(int irq, void *data)
{
struct mcam_camera *mcam = data;
unsigned int irqs, handled;
spin_lock(&mcam->dev_lock);
irqs = mcam_reg_read(mcam, REG_IRQSTAT);
handled = mccic_irq(mcam, irqs);
spin_unlock(&mcam->dev_lock);
return IRQ_RETVAL(handled);
}
static void mcam_init_clk(struct mcam_camera *mcam)
{
unsigned int i;
for (i = 0; i < NR_MCAM_CLK; i++) {
if (mcam_clks[i] != NULL) {
/* Some clks are not necessary on some boards
* We still try to run even it fails getting clk
*/
mcam->clk[i] = devm_clk_get(mcam->dev, mcam_clks[i]);
if (IS_ERR(mcam->clk[i]))
dev_warn(mcam->dev, "Could not get clk: %s\n",
mcam_clks[i]);
}
}
}
static int mmpcam_probe(struct platform_device *pdev)
{
struct mmp_camera *cam;
struct mcam_camera *mcam;
struct resource *res;
struct fwnode_handle *ep;
struct mmp_camera_platform_data *pdata;
struct v4l2_async_connection *asd;
int ret;
cam = devm_kzalloc(&pdev->dev, sizeof(*cam), GFP_KERNEL);
if (cam == NULL)
return -ENOMEM;
platform_set_drvdata(pdev, cam);
cam->pdev = pdev;
INIT_LIST_HEAD(&cam->devlist);
mcam = &cam->mcam;
mcam->calc_dphy = mmpcam_calc_dphy;
mcam->dev = &pdev->dev;
pdata = pdev->dev.platform_data;
if (pdata) {
mcam->mclk_src = pdata->mclk_src;
mcam->mclk_div = pdata->mclk_div;
mcam->bus_type = pdata->bus_type;
mcam->dphy = pdata->dphy;
mcam->lane = pdata->lane;
} else {
/*
* These are values that used to be hardcoded in mcam-core and
* work well on a OLPC XO 1.75 with a parallel bus sensor.
* If it turns out other setups make sense, the values should
* be obtained from the device tree.
*/
mcam->mclk_src = 3;
mcam->mclk_div = 2;
}
if (mcam->bus_type == V4L2_MBUS_CSI2_DPHY) {
cam->mipi_clk = devm_clk_get(mcam->dev, "mipi");
if ((IS_ERR(cam->mipi_clk) && mcam->dphy[2] == 0))
return PTR_ERR(cam->mipi_clk);
}
mcam->mipi_enabled = false;
mcam->chip_id = MCAM_ARMADA610;
mcam->buffer_mode = B_DMA_sg;
strscpy(mcam->bus_info, "platform:mmp-camera", sizeof(mcam->bus_info));
spin_lock_init(&mcam->dev_lock);
/*
* Get our I/O memory.
*/
mcam->regs = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(mcam->regs))
return PTR_ERR(mcam->regs);
mcam->regs_size = resource_size(res);
mcam_init_clk(mcam);
/*
* Create a match of the sensor against its OF node.
*/
ep = fwnode_graph_get_next_endpoint(of_fwnode_handle(pdev->dev.of_node),
NULL);
if (!ep)
return -ENODEV;
v4l2_async_nf_init(&mcam->notifier, &mcam->v4l2_dev);
asd = v4l2_async_nf_add_fwnode_remote(&mcam->notifier, ep,
struct v4l2_async_connection);
fwnode_handle_put(ep);
if (IS_ERR(asd)) {
ret = PTR_ERR(asd);
goto out;
}
/*
* Register the device with the core.
*/
ret = mccic_register(mcam);
if (ret)
goto out;
/*
* Add OF clock provider.
*/
ret = of_clk_add_provider(pdev->dev.of_node, of_clk_src_simple_get,
mcam->mclk);
if (ret) {
dev_err(&pdev->dev, "can't add DT clock provider\n");
goto out;
}
/*
* Finally, set up our IRQ now that the core is ready to
* deal with it.
*/
ret = platform_get_irq(pdev, 0);
if (ret < 0)
goto out;
cam->irq = ret;
ret = devm_request_irq(&pdev->dev, cam->irq, mmpcam_irq, IRQF_SHARED,
"mmp-camera", mcam);
if (ret)
goto out;
pm_runtime_enable(&pdev->dev);
return 0;
out:
mccic_shutdown(mcam);
return ret;
}
static void mmpcam_remove(struct platform_device *pdev)
{
struct mmp_camera *cam = platform_get_drvdata(pdev);
struct mcam_camera *mcam = &cam->mcam;
mccic_shutdown(mcam);
pm_runtime_force_suspend(mcam->dev);
}
/*
* Suspend/resume support.
*/
static int __maybe_unused mmpcam_runtime_resume(struct device *dev)
{
struct mmp_camera *cam = dev_get_drvdata(dev);
struct mcam_camera *mcam = &cam->mcam;
unsigned int i;
for (i = 0; i < NR_MCAM_CLK; i++) {
if (!IS_ERR(mcam->clk[i]))
clk_prepare_enable(mcam->clk[i]);
}
return 0;
}
static int __maybe_unused mmpcam_runtime_suspend(struct device *dev)
{
struct mmp_camera *cam = dev_get_drvdata(dev);
struct mcam_camera *mcam = &cam->mcam;
int i;
for (i = NR_MCAM_CLK - 1; i >= 0; i--) {
if (!IS_ERR(mcam->clk[i]))
clk_disable_unprepare(mcam->clk[i]);
}
return 0;
}
static int __maybe_unused mmpcam_suspend(struct device *dev)
{
struct mmp_camera *cam = dev_get_drvdata(dev);
if (!pm_runtime_suspended(dev))
mccic_suspend(&cam->mcam);
return 0;
}
static int __maybe_unused mmpcam_resume(struct device *dev)
{
struct mmp_camera *cam = dev_get_drvdata(dev);
if (!pm_runtime_suspended(dev))
return mccic_resume(&cam->mcam);
return 0;
}
static const struct dev_pm_ops mmpcam_pm_ops = {
SET_RUNTIME_PM_OPS(mmpcam_runtime_suspend, mmpcam_runtime_resume, NULL)
SET_SYSTEM_SLEEP_PM_OPS(mmpcam_suspend, mmpcam_resume)
};
static const struct of_device_id mmpcam_of_match[] = {
{ .compatible = "marvell,mmp2-ccic", },
{},
};
MODULE_DEVICE_TABLE(of, mmpcam_of_match);
static struct platform_driver mmpcam_driver = {
.probe = mmpcam_probe,
.remove_new = mmpcam_remove,
.driver = {
.name = "mmp-camera",
.of_match_table = mmpcam_of_match,
.pm = &mmpcam_pm_ops,
}
};
module_platform_driver(mmpcam_driver);
| linux-master | drivers/media/platform/marvell/mmp-driver.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* A driver for the CMOS camera controller in the Marvell 88ALP01 "cafe"
* multifunction chip. Currently works with the Omnivision OV7670
* sensor.
*
* The data sheet for this device can be found at:
* http://wiki.laptop.org/images/5/5c/88ALP01_Datasheet_July_2007.pdf
*
* Copyright 2006-11 One Laptop Per Child Association, Inc.
* Copyright 2006-11 Jonathan Corbet <[email protected]>
* Copyright 2018 Lubomir Rintel <[email protected]>
*
* Written by Jonathan Corbet, [email protected].
*
* v4l2_device/v4l2_subdev conversion by:
* Copyright (C) 2009 Hans Verkuil <[email protected]>
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/pci.h>
#include <linux/i2c.h>
#include <linux/interrupt.h>
#include <linux/spinlock.h>
#include <linux/slab.h>
#include <linux/videodev2.h>
#include <media/v4l2-device.h>
#include <media/i2c/ov7670.h>
#include <linux/device.h>
#include <linux/wait.h>
#include <linux/delay.h>
#include <linux/io.h>
#include <linux/clkdev.h>
#include "mcam-core.h"
#define CAFE_VERSION 0x000002
/*
* Parameters.
*/
MODULE_AUTHOR("Jonathan Corbet <[email protected]>");
MODULE_DESCRIPTION("Marvell 88ALP01 CMOS Camera Controller driver");
MODULE_LICENSE("GPL");
struct cafe_camera {
int registered; /* Fully initialized? */
struct mcam_camera mcam;
struct pci_dev *pdev;
struct i2c_adapter *i2c_adapter;
wait_queue_head_t smbus_wait; /* Waiting on i2c events */
};
/*
* Most of the camera controller registers are defined in mcam-core.h,
* but the Cafe platform has some additional registers of its own;
* they are described here.
*/
/*
* "General purpose register" has a couple of GPIOs used for sensor
* power and reset on OLPC XO 1.0 systems.
*/
#define REG_GPR 0xb4
#define GPR_C1EN 0x00000020 /* Pad 1 (power down) enable */
#define GPR_C0EN 0x00000010 /* Pad 0 (reset) enable */
#define GPR_C1 0x00000002 /* Control 1 value */
/*
* Control 0 is wired to reset on OLPC machines. For ov7x sensors,
* it is active low.
*/
#define GPR_C0 0x00000001 /* Control 0 value */
/*
* These registers control the SMBUS module for communicating
* with the sensor.
*/
#define REG_TWSIC0 0xb8 /* TWSI (smbus) control 0 */
#define TWSIC0_EN 0x00000001 /* TWSI enable */
#define TWSIC0_MODE 0x00000002 /* 1 = 16-bit, 0 = 8-bit */
#define TWSIC0_SID 0x000003fc /* Slave ID */
/*
* Subtle trickery: the slave ID field starts with bit 2. But the
* Linux i2c stack wants to treat the bottommost bit as a separate
* read/write bit, which is why slave ID's are usually presented
* >>1. For consistency with that behavior, we shift over three
* bits instead of two.
*/
#define TWSIC0_SID_SHIFT 3
#define TWSIC0_CLKDIV 0x0007fc00 /* Clock divider */
#define TWSIC0_MASKACK 0x00400000 /* Mask ack from sensor */
#define TWSIC0_OVMAGIC 0x00800000 /* Make it work on OV sensors */
#define REG_TWSIC1 0xbc /* TWSI control 1 */
#define TWSIC1_DATA 0x0000ffff /* Data to/from camchip */
#define TWSIC1_ADDR 0x00ff0000 /* Address (register) */
#define TWSIC1_ADDR_SHIFT 16
#define TWSIC1_READ 0x01000000 /* Set for read op */
#define TWSIC1_WSTAT 0x02000000 /* Write status */
#define TWSIC1_RVALID 0x04000000 /* Read data valid */
#define TWSIC1_ERROR 0x08000000 /* Something screwed up */
/*
* Here's the weird global control registers
*/
#define REG_GL_CSR 0x3004 /* Control/status register */
#define GCSR_SRS 0x00000001 /* SW Reset set */
#define GCSR_SRC 0x00000002 /* SW Reset clear */
#define GCSR_MRS 0x00000004 /* Master reset set */
#define GCSR_MRC 0x00000008 /* HW Reset clear */
#define GCSR_CCIC_EN 0x00004000 /* CCIC Clock enable */
#define REG_GL_IMASK 0x300c /* Interrupt mask register */
#define GIMSK_CCIC_EN 0x00000004 /* CCIC Interrupt enable */
#define REG_GL_FCR 0x3038 /* GPIO functional control register */
#define GFCR_GPIO_ON 0x08 /* Camera GPIO enabled */
#define REG_GL_GPIOR 0x315c /* GPIO register */
#define GGPIO_OUT 0x80000 /* GPIO output */
#define GGPIO_VAL 0x00008 /* Output pin value */
#define REG_LEN (REG_GL_IMASK + 4)
/*
* Debugging and related.
*/
#define cam_err(cam, fmt, arg...) \
dev_err(&(cam)->pdev->dev, fmt, ##arg);
#define cam_warn(cam, fmt, arg...) \
dev_warn(&(cam)->pdev->dev, fmt, ##arg);
/* -------------------------------------------------------------------- */
/*
* The I2C/SMBUS interface to the camera itself starts here. The
* controller handles SMBUS itself, presenting a relatively simple register
* interface; all we have to do is to tell it where to route the data.
*/
#define CAFE_SMBUS_TIMEOUT (HZ) /* generous */
static int cafe_smbus_write_done(struct mcam_camera *mcam)
{
unsigned long flags;
int c1;
/*
* We must delay after the interrupt, or the controller gets confused
* and never does give us good status. Fortunately, we don't do this
* often.
*/
udelay(20);
spin_lock_irqsave(&mcam->dev_lock, flags);
c1 = mcam_reg_read(mcam, REG_TWSIC1);
spin_unlock_irqrestore(&mcam->dev_lock, flags);
return (c1 & (TWSIC1_WSTAT|TWSIC1_ERROR)) != TWSIC1_WSTAT;
}
static int cafe_smbus_write_data(struct cafe_camera *cam,
u16 addr, u8 command, u8 value)
{
unsigned int rval;
unsigned long flags;
struct mcam_camera *mcam = &cam->mcam;
spin_lock_irqsave(&mcam->dev_lock, flags);
rval = TWSIC0_EN | ((addr << TWSIC0_SID_SHIFT) & TWSIC0_SID);
rval |= TWSIC0_OVMAGIC; /* Make OV sensors work */
/*
* Marvell sez set clkdiv to all 1's for now.
*/
rval |= TWSIC0_CLKDIV;
mcam_reg_write(mcam, REG_TWSIC0, rval);
(void) mcam_reg_read(mcam, REG_TWSIC1); /* force write */
rval = value | ((command << TWSIC1_ADDR_SHIFT) & TWSIC1_ADDR);
mcam_reg_write(mcam, REG_TWSIC1, rval);
spin_unlock_irqrestore(&mcam->dev_lock, flags);
/* Unfortunately, reading TWSIC1 too soon after sending a command
* causes the device to die.
* Use a busy-wait because we often send a large quantity of small
* commands at-once; using msleep() would cause a lot of context
* switches which take longer than 2ms, resulting in a noticeable
* boot-time and capture-start delays.
*/
mdelay(2);
/*
* Another sad fact is that sometimes, commands silently complete but
* cafe_smbus_write_done() never becomes aware of this.
* This happens at random and appears to possible occur with any
* command.
* We don't understand why this is. We work around this issue
* with the timeout in the wait below, assuming that all commands
* complete within the timeout.
*/
wait_event_timeout(cam->smbus_wait, cafe_smbus_write_done(mcam),
CAFE_SMBUS_TIMEOUT);
spin_lock_irqsave(&mcam->dev_lock, flags);
rval = mcam_reg_read(mcam, REG_TWSIC1);
spin_unlock_irqrestore(&mcam->dev_lock, flags);
if (rval & TWSIC1_WSTAT) {
cam_err(cam, "SMBUS write (%02x/%02x/%02x) timed out\n", addr,
command, value);
return -EIO;
}
if (rval & TWSIC1_ERROR) {
cam_err(cam, "SMBUS write (%02x/%02x/%02x) error\n", addr,
command, value);
return -EIO;
}
return 0;
}
static int cafe_smbus_read_done(struct mcam_camera *mcam)
{
unsigned long flags;
int c1;
/*
* We must delay after the interrupt, or the controller gets confused
* and never does give us good status. Fortunately, we don't do this
* often.
*/
udelay(20);
spin_lock_irqsave(&mcam->dev_lock, flags);
c1 = mcam_reg_read(mcam, REG_TWSIC1);
spin_unlock_irqrestore(&mcam->dev_lock, flags);
return c1 & (TWSIC1_RVALID|TWSIC1_ERROR);
}
static int cafe_smbus_read_data(struct cafe_camera *cam,
u16 addr, u8 command, u8 *value)
{
unsigned int rval;
unsigned long flags;
struct mcam_camera *mcam = &cam->mcam;
spin_lock_irqsave(&mcam->dev_lock, flags);
rval = TWSIC0_EN | ((addr << TWSIC0_SID_SHIFT) & TWSIC0_SID);
rval |= TWSIC0_OVMAGIC; /* Make OV sensors work */
/*
* Marvel sez set clkdiv to all 1's for now.
*/
rval |= TWSIC0_CLKDIV;
mcam_reg_write(mcam, REG_TWSIC0, rval);
(void) mcam_reg_read(mcam, REG_TWSIC1); /* force write */
rval = TWSIC1_READ | ((command << TWSIC1_ADDR_SHIFT) & TWSIC1_ADDR);
mcam_reg_write(mcam, REG_TWSIC1, rval);
spin_unlock_irqrestore(&mcam->dev_lock, flags);
wait_event_timeout(cam->smbus_wait,
cafe_smbus_read_done(mcam), CAFE_SMBUS_TIMEOUT);
spin_lock_irqsave(&mcam->dev_lock, flags);
rval = mcam_reg_read(mcam, REG_TWSIC1);
spin_unlock_irqrestore(&mcam->dev_lock, flags);
if (rval & TWSIC1_ERROR) {
cam_err(cam, "SMBUS read (%02x/%02x) error\n", addr, command);
return -EIO;
}
if (!(rval & TWSIC1_RVALID)) {
cam_err(cam, "SMBUS read (%02x/%02x) timed out\n", addr,
command);
return -EIO;
}
*value = rval & 0xff;
return 0;
}
/*
* Perform a transfer over SMBUS. This thing is called under
* the i2c bus lock, so we shouldn't race with ourselves...
*/
static int cafe_smbus_xfer(struct i2c_adapter *adapter, u16 addr,
unsigned short flags, char rw, u8 command,
int size, union i2c_smbus_data *data)
{
struct cafe_camera *cam = i2c_get_adapdata(adapter);
int ret = -EINVAL;
/*
* This interface would appear to only do byte data ops. OK
* it can do word too, but the cam chip has no use for that.
*/
if (size != I2C_SMBUS_BYTE_DATA) {
cam_err(cam, "funky xfer size %d\n", size);
return -EINVAL;
}
if (rw == I2C_SMBUS_WRITE)
ret = cafe_smbus_write_data(cam, addr, command, data->byte);
else if (rw == I2C_SMBUS_READ)
ret = cafe_smbus_read_data(cam, addr, command, &data->byte);
return ret;
}
static void cafe_smbus_enable_irq(struct cafe_camera *cam)
{
unsigned long flags;
spin_lock_irqsave(&cam->mcam.dev_lock, flags);
mcam_reg_set_bit(&cam->mcam, REG_IRQMASK, TWSIIRQS);
spin_unlock_irqrestore(&cam->mcam.dev_lock, flags);
}
static u32 cafe_smbus_func(struct i2c_adapter *adapter)
{
return I2C_FUNC_SMBUS_READ_BYTE_DATA |
I2C_FUNC_SMBUS_WRITE_BYTE_DATA;
}
static const struct i2c_algorithm cafe_smbus_algo = {
.smbus_xfer = cafe_smbus_xfer,
.functionality = cafe_smbus_func
};
static int cafe_smbus_setup(struct cafe_camera *cam)
{
struct i2c_adapter *adap;
int ret;
adap = kzalloc(sizeof(*adap), GFP_KERNEL);
if (adap == NULL)
return -ENOMEM;
adap->owner = THIS_MODULE;
adap->algo = &cafe_smbus_algo;
strscpy(adap->name, "cafe_ccic", sizeof(adap->name));
adap->dev.parent = &cam->pdev->dev;
i2c_set_adapdata(adap, cam);
ret = i2c_add_adapter(adap);
if (ret) {
printk(KERN_ERR "Unable to register cafe i2c adapter\n");
kfree(adap);
return ret;
}
cam->i2c_adapter = adap;
cafe_smbus_enable_irq(cam);
return 0;
}
static void cafe_smbus_shutdown(struct cafe_camera *cam)
{
i2c_del_adapter(cam->i2c_adapter);
kfree(cam->i2c_adapter);
}
/*
* Controller-level stuff
*/
static void cafe_ctlr_init(struct mcam_camera *mcam)
{
unsigned long flags;
spin_lock_irqsave(&mcam->dev_lock, flags);
/*
* Added magic to bring up the hardware on the B-Test board
*/
mcam_reg_write(mcam, 0x3038, 0x8);
mcam_reg_write(mcam, 0x315c, 0x80008);
/*
* Go through the dance needed to wake the device up.
* Note that these registers are global and shared
* with the NAND and SD devices. Interaction between the
* three still needs to be examined.
*/
mcam_reg_write(mcam, REG_GL_CSR, GCSR_SRS|GCSR_MRS); /* Needed? */
mcam_reg_write(mcam, REG_GL_CSR, GCSR_SRC|GCSR_MRC);
mcam_reg_write(mcam, REG_GL_CSR, GCSR_SRC|GCSR_MRS);
/*
* Here we must wait a bit for the controller to come around.
*/
spin_unlock_irqrestore(&mcam->dev_lock, flags);
msleep(5);
spin_lock_irqsave(&mcam->dev_lock, flags);
mcam_reg_write(mcam, REG_GL_CSR, GCSR_CCIC_EN|GCSR_SRC|GCSR_MRC);
mcam_reg_set_bit(mcam, REG_GL_IMASK, GIMSK_CCIC_EN);
/*
* Mask all interrupts.
*/
mcam_reg_write(mcam, REG_IRQMASK, 0);
spin_unlock_irqrestore(&mcam->dev_lock, flags);
}
static int cafe_ctlr_power_up(struct mcam_camera *mcam)
{
/*
* Part one of the sensor dance: turn the global
* GPIO signal on.
*/
mcam_reg_write(mcam, REG_GL_FCR, GFCR_GPIO_ON);
mcam_reg_write(mcam, REG_GL_GPIOR, GGPIO_OUT|GGPIO_VAL);
/*
* Put the sensor into operational mode (assumes OLPC-style
* wiring). Control 0 is reset - set to 1 to operate.
* Control 1 is power down, set to 0 to operate.
*/
mcam_reg_write(mcam, REG_GPR, GPR_C1EN|GPR_C0EN); /* pwr up, reset */
mcam_reg_write(mcam, REG_GPR, GPR_C1EN|GPR_C0EN|GPR_C0);
return 0;
}
static void cafe_ctlr_power_down(struct mcam_camera *mcam)
{
mcam_reg_write(mcam, REG_GPR, GPR_C1EN|GPR_C0EN|GPR_C1);
mcam_reg_write(mcam, REG_GL_FCR, GFCR_GPIO_ON);
mcam_reg_write(mcam, REG_GL_GPIOR, GGPIO_OUT);
}
/*
* The platform interrupt handler.
*/
static irqreturn_t cafe_irq(int irq, void *data)
{
struct cafe_camera *cam = data;
struct mcam_camera *mcam = &cam->mcam;
unsigned int irqs, handled;
spin_lock(&mcam->dev_lock);
irqs = mcam_reg_read(mcam, REG_IRQSTAT);
handled = cam->registered && mccic_irq(mcam, irqs);
if (irqs & TWSIIRQS) {
mcam_reg_write(mcam, REG_IRQSTAT, TWSIIRQS);
wake_up(&cam->smbus_wait);
handled = 1;
}
spin_unlock(&mcam->dev_lock);
return IRQ_RETVAL(handled);
}
/* -------------------------------------------------------------------------- */
static struct ov7670_config sensor_cfg = {
/*
* Exclude QCIF mode, because it only captures a tiny portion
* of the sensor FOV
*/
.min_width = 320,
.min_height = 240,
/*
* Set the clock speed for the XO 1; I don't believe this
* driver has ever run anywhere else.
*/
.clock_speed = 45,
.use_smbus = 1,
};
static struct i2c_board_info ov7670_info = {
.type = "ov7670",
.addr = 0x42 >> 1,
.platform_data = &sensor_cfg,
};
/* -------------------------------------------------------------------------- */
/*
* PCI interface stuff.
*/
static int cafe_pci_probe(struct pci_dev *pdev,
const struct pci_device_id *id)
{
int ret;
struct cafe_camera *cam;
struct mcam_camera *mcam;
struct v4l2_async_connection *asd;
struct i2c_client *i2c_dev;
/*
* Start putting together one of our big camera structures.
*/
ret = -ENOMEM;
cam = kzalloc(sizeof(struct cafe_camera), GFP_KERNEL);
if (cam == NULL)
goto out;
pci_set_drvdata(pdev, cam);
cam->pdev = pdev;
mcam = &cam->mcam;
mcam->chip_id = MCAM_CAFE;
spin_lock_init(&mcam->dev_lock);
init_waitqueue_head(&cam->smbus_wait);
mcam->plat_power_up = cafe_ctlr_power_up;
mcam->plat_power_down = cafe_ctlr_power_down;
mcam->dev = &pdev->dev;
/*
* Vmalloc mode for buffers is traditional with this driver.
* We *might* be able to run DMA_contig, especially on a system
* with CMA in it.
*/
mcam->buffer_mode = B_vmalloc;
/*
* Get set up on the PCI bus.
*/
ret = pci_enable_device(pdev);
if (ret)
goto out_free;
pci_set_master(pdev);
ret = -EIO;
mcam->regs = pci_iomap(pdev, 0, 0);
if (!mcam->regs) {
printk(KERN_ERR "Unable to ioremap cafe-ccic regs\n");
goto out_disable;
}
mcam->regs_size = pci_resource_len(pdev, 0);
ret = request_irq(pdev->irq, cafe_irq, IRQF_SHARED, "cafe-ccic", cam);
if (ret)
goto out_iounmap;
/*
* Initialize the controller.
*/
cafe_ctlr_init(mcam);
/*
* Set up I2C/SMBUS communications. We have to drop the mutex here
* because the sensor could attach in this call chain, leading to
* unsightly deadlocks.
*/
ret = cafe_smbus_setup(cam);
if (ret)
goto out_pdown;
ret = v4l2_device_register(mcam->dev, &mcam->v4l2_dev);
if (ret)
goto out_smbus_shutdown;
v4l2_async_nf_init(&mcam->notifier, &mcam->v4l2_dev);
asd = v4l2_async_nf_add_i2c(&mcam->notifier,
i2c_adapter_id(cam->i2c_adapter),
ov7670_info.addr,
struct v4l2_async_connection);
if (IS_ERR(asd)) {
ret = PTR_ERR(asd);
goto out_v4l2_device_unregister;
}
ret = mccic_register(mcam);
if (ret)
goto out_v4l2_device_unregister;
clkdev_create(mcam->mclk, "xclk", "%d-%04x",
i2c_adapter_id(cam->i2c_adapter), ov7670_info.addr);
i2c_dev = i2c_new_client_device(cam->i2c_adapter, &ov7670_info);
if (IS_ERR(i2c_dev)) {
ret = PTR_ERR(i2c_dev);
goto out_mccic_shutdown;
}
cam->registered = 1;
return 0;
out_mccic_shutdown:
mccic_shutdown(mcam);
out_v4l2_device_unregister:
v4l2_device_unregister(&mcam->v4l2_dev);
out_smbus_shutdown:
cafe_smbus_shutdown(cam);
out_pdown:
cafe_ctlr_power_down(mcam);
free_irq(pdev->irq, cam);
out_iounmap:
pci_iounmap(pdev, mcam->regs);
out_disable:
pci_disable_device(pdev);
out_free:
kfree(cam);
out:
return ret;
}
/*
* Shut down an initialized device
*/
static void cafe_shutdown(struct cafe_camera *cam)
{
mccic_shutdown(&cam->mcam);
v4l2_device_unregister(&cam->mcam.v4l2_dev);
cafe_smbus_shutdown(cam);
free_irq(cam->pdev->irq, cam);
pci_iounmap(cam->pdev, cam->mcam.regs);
}
static void cafe_pci_remove(struct pci_dev *pdev)
{
struct cafe_camera *cam = pci_get_drvdata(pdev);
if (cam == NULL) {
printk(KERN_WARNING "pci_remove on unknown pdev %p\n", pdev);
return;
}
cafe_shutdown(cam);
kfree(cam);
}
/*
* Basic power management.
*/
static int __maybe_unused cafe_pci_suspend(struct device *dev)
{
struct cafe_camera *cam = dev_get_drvdata(dev);
mccic_suspend(&cam->mcam);
return 0;
}
static int __maybe_unused cafe_pci_resume(struct device *dev)
{
struct cafe_camera *cam = dev_get_drvdata(dev);
cafe_ctlr_init(&cam->mcam);
return mccic_resume(&cam->mcam);
}
static const struct pci_device_id cafe_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL,
PCI_DEVICE_ID_MARVELL_88ALP01_CCIC) },
{ 0, }
};
MODULE_DEVICE_TABLE(pci, cafe_ids);
static SIMPLE_DEV_PM_OPS(cafe_pci_pm_ops, cafe_pci_suspend, cafe_pci_resume);
static struct pci_driver cafe_pci_driver = {
.name = "cafe1000-ccic",
.id_table = cafe_ids,
.probe = cafe_pci_probe,
.remove = cafe_pci_remove,
.driver.pm = &cafe_pci_pm_ops,
};
static int __init cafe_init(void)
{
int ret;
printk(KERN_NOTICE "Marvell M88ALP01 'CAFE' Camera Controller version %d\n",
CAFE_VERSION);
ret = pci_register_driver(&cafe_pci_driver);
if (ret) {
printk(KERN_ERR "Unable to register cafe_ccic driver\n");
goto out;
}
ret = 0;
out:
return ret;
}
static void __exit cafe_exit(void)
{
pci_unregister_driver(&cafe_pci_driver);
}
module_init(cafe_init);
module_exit(cafe_exit);
| linux-master | drivers/media/platform/marvell/cafe-driver.c |
// SPDX-License-Identifier: GPL-2.0
/*
* The Marvell camera core. This device appears in a number of settings,
* so it needs platform-specific support outside of the core.
*
* Copyright 2011 Jonathan Corbet [email protected]
* Copyright 2018 Lubomir Rintel <[email protected]>
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/fs.h>
#include <linux/mm.h>
#include <linux/i2c.h>
#include <linux/interrupt.h>
#include <linux/spinlock.h>
#include <linux/slab.h>
#include <linux/device.h>
#include <linux/wait.h>
#include <linux/list.h>
#include <linux/dma-mapping.h>
#include <linux/delay.h>
#include <linux/vmalloc.h>
#include <linux/io.h>
#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/videodev2.h>
#include <linux/pm_runtime.h>
#include <media/v4l2-device.h>
#include <media/v4l2-ioctl.h>
#include <media/v4l2-ctrls.h>
#include <media/v4l2-event.h>
#include <media/videobuf2-vmalloc.h>
#include <media/videobuf2-dma-contig.h>
#include <media/videobuf2-dma-sg.h>
#include "mcam-core.h"
#ifdef MCAM_MODE_VMALLOC
/*
* Internal DMA buffer management. Since the controller cannot do S/G I/O,
* we must have physically contiguous buffers to bring frames into.
* These parameters control how many buffers we use, whether we
* allocate them at load time (better chance of success, but nails down
* memory) or when somebody tries to use the camera (riskier), and,
* for load-time allocation, how big they should be.
*
* The controller can cycle through three buffers. We could use
* more by flipping pointers around, but it probably makes little
* sense.
*/
static bool alloc_bufs_at_read;
module_param(alloc_bufs_at_read, bool, 0444);
MODULE_PARM_DESC(alloc_bufs_at_read,
"Non-zero value causes DMA buffers to be allocated when the video capture device is read, rather than at module load time. This saves memory, but decreases the chances of successfully getting those buffers. This parameter is only used in the vmalloc buffer mode");
static int n_dma_bufs = 3;
module_param(n_dma_bufs, uint, 0644);
MODULE_PARM_DESC(n_dma_bufs,
"The number of DMA buffers to allocate. Can be either two (saves memory, makes timing tighter) or three.");
static int dma_buf_size = VGA_WIDTH * VGA_HEIGHT * 2; /* Worst case */
module_param(dma_buf_size, uint, 0444);
MODULE_PARM_DESC(dma_buf_size,
"The size of the allocated DMA buffers. If actual operating parameters require larger buffers, an attempt to reallocate will be made.");
#else /* MCAM_MODE_VMALLOC */
static const bool alloc_bufs_at_read;
static const int n_dma_bufs = 3; /* Used by S/G_PARM */
#endif /* MCAM_MODE_VMALLOC */
static bool flip;
module_param(flip, bool, 0444);
MODULE_PARM_DESC(flip,
"If set, the sensor will be instructed to flip the image vertically.");
static int buffer_mode = -1;
module_param(buffer_mode, int, 0444);
MODULE_PARM_DESC(buffer_mode,
"Set the buffer mode to be used; default is to go with what the platform driver asks for. Set to 0 for vmalloc, 1 for DMA contiguous.");
/*
* Status flags. Always manipulated with bit operations.
*/
#define CF_BUF0_VALID 0 /* Buffers valid - first three */
#define CF_BUF1_VALID 1
#define CF_BUF2_VALID 2
#define CF_DMA_ACTIVE 3 /* A frame is incoming */
#define CF_CONFIG_NEEDED 4 /* Must configure hardware */
#define CF_SINGLE_BUFFER 5 /* Running with a single buffer */
#define CF_SG_RESTART 6 /* SG restart needed */
#define CF_FRAME_SOF0 7 /* Frame 0 started */
#define CF_FRAME_SOF1 8
#define CF_FRAME_SOF2 9
#define sensor_call(cam, o, f, args...) \
v4l2_subdev_call(cam->sensor, o, f, ##args)
#define notifier_to_mcam(notifier) \
container_of(notifier, struct mcam_camera, notifier)
static struct mcam_format_struct {
__u32 pixelformat;
int bpp; /* Bytes per pixel */
bool planar;
u32 mbus_code;
} mcam_formats[] = {
{
.pixelformat = V4L2_PIX_FMT_YUYV,
.mbus_code = MEDIA_BUS_FMT_YUYV8_2X8,
.bpp = 2,
.planar = false,
},
{
.pixelformat = V4L2_PIX_FMT_YVYU,
.mbus_code = MEDIA_BUS_FMT_YUYV8_2X8,
.bpp = 2,
.planar = false,
},
{
.pixelformat = V4L2_PIX_FMT_YUV420,
.mbus_code = MEDIA_BUS_FMT_YUYV8_2X8,
.bpp = 1,
.planar = true,
},
{
.pixelformat = V4L2_PIX_FMT_YVU420,
.mbus_code = MEDIA_BUS_FMT_YUYV8_2X8,
.bpp = 1,
.planar = true,
},
{
.pixelformat = V4L2_PIX_FMT_XRGB444,
.mbus_code = MEDIA_BUS_FMT_RGB444_2X8_PADHI_LE,
.bpp = 2,
.planar = false,
},
{
.pixelformat = V4L2_PIX_FMT_RGB565,
.mbus_code = MEDIA_BUS_FMT_RGB565_2X8_LE,
.bpp = 2,
.planar = false,
},
{
.pixelformat = V4L2_PIX_FMT_SBGGR8,
.mbus_code = MEDIA_BUS_FMT_SBGGR8_1X8,
.bpp = 1,
.planar = false,
},
};
#define N_MCAM_FMTS ARRAY_SIZE(mcam_formats)
static struct mcam_format_struct *mcam_find_format(u32 pixelformat)
{
unsigned i;
for (i = 0; i < N_MCAM_FMTS; i++)
if (mcam_formats[i].pixelformat == pixelformat)
return mcam_formats + i;
/* Not found? Then return the first format. */
return mcam_formats;
}
/*
* The default format we use until somebody says otherwise.
*/
static const struct v4l2_pix_format mcam_def_pix_format = {
.width = VGA_WIDTH,
.height = VGA_HEIGHT,
.pixelformat = V4L2_PIX_FMT_YUYV,
.field = V4L2_FIELD_NONE,
.bytesperline = VGA_WIDTH*2,
.sizeimage = VGA_WIDTH*VGA_HEIGHT*2,
.colorspace = V4L2_COLORSPACE_SRGB,
};
static const u32 mcam_def_mbus_code = MEDIA_BUS_FMT_YUYV8_2X8;
/*
* The two-word DMA descriptor format used by the Armada 610 and like. There
* Is a three-word format as well (set C1_DESC_3WORD) where the third
* word is a pointer to the next descriptor, but we don't use it. Two-word
* descriptors have to be contiguous in memory.
*/
struct mcam_dma_desc {
u32 dma_addr;
u32 segment_len;
};
/*
* Our buffer type for working with videobuf2. Note that the vb2
* developers have decreed that struct vb2_v4l2_buffer must be at the
* beginning of this structure.
*/
struct mcam_vb_buffer {
struct vb2_v4l2_buffer vb_buf;
struct list_head queue;
struct mcam_dma_desc *dma_desc; /* Descriptor virtual address */
dma_addr_t dma_desc_pa; /* Descriptor physical address */
};
static inline struct mcam_vb_buffer *vb_to_mvb(struct vb2_v4l2_buffer *vb)
{
return container_of(vb, struct mcam_vb_buffer, vb_buf);
}
/*
* Hand a completed buffer back to user space.
*/
static void mcam_buffer_done(struct mcam_camera *cam, int frame,
struct vb2_v4l2_buffer *vbuf)
{
vbuf->vb2_buf.planes[0].bytesused = cam->pix_format.sizeimage;
vbuf->sequence = cam->buf_seq[frame];
vbuf->field = V4L2_FIELD_NONE;
vbuf->vb2_buf.timestamp = ktime_get_ns();
vb2_set_plane_payload(&vbuf->vb2_buf, 0, cam->pix_format.sizeimage);
vb2_buffer_done(&vbuf->vb2_buf, VB2_BUF_STATE_DONE);
}
/*
* Debugging and related.
*/
#define cam_err(cam, fmt, arg...) \
dev_err((cam)->dev, fmt, ##arg);
#define cam_warn(cam, fmt, arg...) \
dev_warn((cam)->dev, fmt, ##arg);
#define cam_dbg(cam, fmt, arg...) \
dev_dbg((cam)->dev, fmt, ##arg);
/*
* Flag manipulation helpers
*/
static void mcam_reset_buffers(struct mcam_camera *cam)
{
int i;
cam->next_buf = -1;
for (i = 0; i < cam->nbufs; i++) {
clear_bit(i, &cam->flags);
clear_bit(CF_FRAME_SOF0 + i, &cam->flags);
}
}
static inline int mcam_needs_config(struct mcam_camera *cam)
{
return test_bit(CF_CONFIG_NEEDED, &cam->flags);
}
static void mcam_set_config_needed(struct mcam_camera *cam, int needed)
{
if (needed)
set_bit(CF_CONFIG_NEEDED, &cam->flags);
else
clear_bit(CF_CONFIG_NEEDED, &cam->flags);
}
/* ------------------------------------------------------------------- */
/*
* Make the controller start grabbing images. Everything must
* be set up before doing this.
*/
static void mcam_ctlr_start(struct mcam_camera *cam)
{
/* set_bit performs a read, so no other barrier should be
needed here */
mcam_reg_set_bit(cam, REG_CTRL0, C0_ENABLE);
}
static void mcam_ctlr_stop(struct mcam_camera *cam)
{
mcam_reg_clear_bit(cam, REG_CTRL0, C0_ENABLE);
}
static void mcam_enable_mipi(struct mcam_camera *mcam)
{
/* Using MIPI mode and enable MIPI */
if (mcam->calc_dphy)
mcam->calc_dphy(mcam);
cam_dbg(mcam, "camera: DPHY3=0x%x, DPHY5=0x%x, DPHY6=0x%x\n",
mcam->dphy[0], mcam->dphy[1], mcam->dphy[2]);
mcam_reg_write(mcam, REG_CSI2_DPHY3, mcam->dphy[0]);
mcam_reg_write(mcam, REG_CSI2_DPHY5, mcam->dphy[1]);
mcam_reg_write(mcam, REG_CSI2_DPHY6, mcam->dphy[2]);
if (!mcam->mipi_enabled) {
if (mcam->lane > 4 || mcam->lane <= 0) {
cam_warn(mcam, "lane number error\n");
mcam->lane = 1; /* set the default value */
}
/*
* 0x41 actives 1 lane
* 0x43 actives 2 lanes
* 0x45 actives 3 lanes (never happen)
* 0x47 actives 4 lanes
*/
mcam_reg_write(mcam, REG_CSI2_CTRL0,
CSI2_C0_MIPI_EN | CSI2_C0_ACT_LANE(mcam->lane));
mcam->mipi_enabled = true;
}
}
static void mcam_disable_mipi(struct mcam_camera *mcam)
{
/* Using Parallel mode or disable MIPI */
mcam_reg_write(mcam, REG_CSI2_CTRL0, 0x0);
mcam_reg_write(mcam, REG_CSI2_DPHY3, 0x0);
mcam_reg_write(mcam, REG_CSI2_DPHY5, 0x0);
mcam_reg_write(mcam, REG_CSI2_DPHY6, 0x0);
mcam->mipi_enabled = false;
}
static bool mcam_fmt_is_planar(__u32 pfmt)
{
struct mcam_format_struct *f;
f = mcam_find_format(pfmt);
return f->planar;
}
static void mcam_write_yuv_bases(struct mcam_camera *cam,
unsigned frame, dma_addr_t base)
{
struct v4l2_pix_format *fmt = &cam->pix_format;
u32 pixel_count = fmt->width * fmt->height;
dma_addr_t y, u = 0, v = 0;
y = base;
switch (fmt->pixelformat) {
case V4L2_PIX_FMT_YUV420:
u = y + pixel_count;
v = u + pixel_count / 4;
break;
case V4L2_PIX_FMT_YVU420:
v = y + pixel_count;
u = v + pixel_count / 4;
break;
default:
break;
}
mcam_reg_write(cam, REG_Y0BAR + frame * 4, y);
if (mcam_fmt_is_planar(fmt->pixelformat)) {
mcam_reg_write(cam, REG_U0BAR + frame * 4, u);
mcam_reg_write(cam, REG_V0BAR + frame * 4, v);
}
}
/* ------------------------------------------------------------------- */
#ifdef MCAM_MODE_VMALLOC
/*
* Code specific to the vmalloc buffer mode.
*/
/*
* Allocate in-kernel DMA buffers for vmalloc mode.
*/
static int mcam_alloc_dma_bufs(struct mcam_camera *cam, int loadtime)
{
int i;
mcam_set_config_needed(cam, 1);
if (loadtime)
cam->dma_buf_size = dma_buf_size;
else
cam->dma_buf_size = cam->pix_format.sizeimage;
if (n_dma_bufs > 3)
n_dma_bufs = 3;
cam->nbufs = 0;
for (i = 0; i < n_dma_bufs; i++) {
cam->dma_bufs[i] = dma_alloc_coherent(cam->dev,
cam->dma_buf_size, cam->dma_handles + i,
GFP_KERNEL);
if (cam->dma_bufs[i] == NULL) {
cam_warn(cam, "Failed to allocate DMA buffer\n");
break;
}
(cam->nbufs)++;
}
switch (cam->nbufs) {
case 1:
dma_free_coherent(cam->dev, cam->dma_buf_size,
cam->dma_bufs[0], cam->dma_handles[0]);
cam->nbufs = 0;
fallthrough;
case 0:
cam_err(cam, "Insufficient DMA buffers, cannot operate\n");
return -ENOMEM;
case 2:
if (n_dma_bufs > 2)
cam_warn(cam, "Will limp along with only 2 buffers\n");
break;
}
return 0;
}
static void mcam_free_dma_bufs(struct mcam_camera *cam)
{
int i;
for (i = 0; i < cam->nbufs; i++) {
dma_free_coherent(cam->dev, cam->dma_buf_size,
cam->dma_bufs[i], cam->dma_handles[i]);
cam->dma_bufs[i] = NULL;
}
cam->nbufs = 0;
}
/*
* Set up DMA buffers when operating in vmalloc mode
*/
static void mcam_ctlr_dma_vmalloc(struct mcam_camera *cam)
{
/*
* Store the first two YUV buffers. Then either
* set the third if it exists, or tell the controller
* to just use two.
*/
mcam_write_yuv_bases(cam, 0, cam->dma_handles[0]);
mcam_write_yuv_bases(cam, 1, cam->dma_handles[1]);
if (cam->nbufs > 2) {
mcam_write_yuv_bases(cam, 2, cam->dma_handles[2]);
mcam_reg_clear_bit(cam, REG_CTRL1, C1_TWOBUFS);
} else
mcam_reg_set_bit(cam, REG_CTRL1, C1_TWOBUFS);
if (cam->chip_id == MCAM_CAFE)
mcam_reg_write(cam, REG_UBAR, 0); /* 32 bits only */
}
/*
* Copy data out to user space in the vmalloc case
*/
static void mcam_frame_tasklet(struct tasklet_struct *t)
{
struct mcam_camera *cam = from_tasklet(cam, t, s_tasklet);
int i;
unsigned long flags;
struct mcam_vb_buffer *buf;
spin_lock_irqsave(&cam->dev_lock, flags);
for (i = 0; i < cam->nbufs; i++) {
int bufno = cam->next_buf;
if (cam->state != S_STREAMING || bufno < 0)
break; /* I/O got stopped */
if (++(cam->next_buf) >= cam->nbufs)
cam->next_buf = 0;
if (!test_bit(bufno, &cam->flags))
continue;
if (list_empty(&cam->buffers)) {
cam->frame_state.singles++;
break; /* Leave it valid, hope for better later */
}
cam->frame_state.delivered++;
clear_bit(bufno, &cam->flags);
buf = list_first_entry(&cam->buffers, struct mcam_vb_buffer,
queue);
list_del_init(&buf->queue);
/*
* Drop the lock during the big copy. This *should* be safe...
*/
spin_unlock_irqrestore(&cam->dev_lock, flags);
memcpy(vb2_plane_vaddr(&buf->vb_buf.vb2_buf, 0),
cam->dma_bufs[bufno],
cam->pix_format.sizeimage);
mcam_buffer_done(cam, bufno, &buf->vb_buf);
spin_lock_irqsave(&cam->dev_lock, flags);
}
spin_unlock_irqrestore(&cam->dev_lock, flags);
}
/*
* Make sure our allocated buffers are up to the task.
*/
static int mcam_check_dma_buffers(struct mcam_camera *cam)
{
if (cam->nbufs > 0 && cam->dma_buf_size < cam->pix_format.sizeimage)
mcam_free_dma_bufs(cam);
if (cam->nbufs == 0)
return mcam_alloc_dma_bufs(cam, 0);
return 0;
}
static void mcam_vmalloc_done(struct mcam_camera *cam, int frame)
{
tasklet_schedule(&cam->s_tasklet);
}
#else /* MCAM_MODE_VMALLOC */
static inline int mcam_alloc_dma_bufs(struct mcam_camera *cam, int loadtime)
{
return 0;
}
static inline void mcam_free_dma_bufs(struct mcam_camera *cam)
{
return;
}
static inline int mcam_check_dma_buffers(struct mcam_camera *cam)
{
return 0;
}
#endif /* MCAM_MODE_VMALLOC */
#ifdef MCAM_MODE_DMA_CONTIG
/* ---------------------------------------------------------------------- */
/*
* DMA-contiguous code.
*/
/*
* Set up a contiguous buffer for the given frame. Here also is where
* the underrun strategy is set: if there is no buffer available, reuse
* the buffer from the other BAR and set the CF_SINGLE_BUFFER flag to
* keep the interrupt handler from giving that buffer back to user
* space. In this way, we always have a buffer to DMA to and don't
* have to try to play games stopping and restarting the controller.
*/
static void mcam_set_contig_buffer(struct mcam_camera *cam, int frame)
{
struct mcam_vb_buffer *buf;
dma_addr_t dma_handle;
struct vb2_v4l2_buffer *vb;
/*
* If there are no available buffers, go into single mode
*/
if (list_empty(&cam->buffers)) {
buf = cam->vb_bufs[frame ^ 0x1];
set_bit(CF_SINGLE_BUFFER, &cam->flags);
cam->frame_state.singles++;
} else {
/*
* OK, we have a buffer we can use.
*/
buf = list_first_entry(&cam->buffers, struct mcam_vb_buffer,
queue);
list_del_init(&buf->queue);
clear_bit(CF_SINGLE_BUFFER, &cam->flags);
}
cam->vb_bufs[frame] = buf;
vb = &buf->vb_buf;
dma_handle = vb2_dma_contig_plane_dma_addr(&vb->vb2_buf, 0);
mcam_write_yuv_bases(cam, frame, dma_handle);
}
/*
* Initial B_DMA_contig setup.
*/
static void mcam_ctlr_dma_contig(struct mcam_camera *cam)
{
mcam_reg_set_bit(cam, REG_CTRL1, C1_TWOBUFS);
cam->nbufs = 2;
mcam_set_contig_buffer(cam, 0);
mcam_set_contig_buffer(cam, 1);
}
/*
* Frame completion handling.
*/
static void mcam_dma_contig_done(struct mcam_camera *cam, int frame)
{
struct mcam_vb_buffer *buf = cam->vb_bufs[frame];
if (!test_bit(CF_SINGLE_BUFFER, &cam->flags)) {
cam->frame_state.delivered++;
cam->vb_bufs[frame] = NULL;
mcam_buffer_done(cam, frame, &buf->vb_buf);
}
mcam_set_contig_buffer(cam, frame);
}
#endif /* MCAM_MODE_DMA_CONTIG */
#ifdef MCAM_MODE_DMA_SG
/* ---------------------------------------------------------------------- */
/*
* Scatter/gather-specific code.
*/
/*
* Set up the next buffer for S/G I/O; caller should be sure that
* the controller is stopped and a buffer is available.
*/
static void mcam_sg_next_buffer(struct mcam_camera *cam)
{
struct mcam_vb_buffer *buf;
struct sg_table *sg_table;
buf = list_first_entry(&cam->buffers, struct mcam_vb_buffer, queue);
list_del_init(&buf->queue);
sg_table = vb2_dma_sg_plane_desc(&buf->vb_buf.vb2_buf, 0);
/*
* Very Bad Not Good Things happen if you don't clear
* C1_DESC_ENA before making any descriptor changes.
*/
mcam_reg_clear_bit(cam, REG_CTRL1, C1_DESC_ENA);
mcam_reg_write(cam, REG_DMA_DESC_Y, buf->dma_desc_pa);
mcam_reg_write(cam, REG_DESC_LEN_Y,
sg_table->nents * sizeof(struct mcam_dma_desc));
mcam_reg_write(cam, REG_DESC_LEN_U, 0);
mcam_reg_write(cam, REG_DESC_LEN_V, 0);
mcam_reg_set_bit(cam, REG_CTRL1, C1_DESC_ENA);
cam->vb_bufs[0] = buf;
}
/*
* Initial B_DMA_sg setup
*/
static void mcam_ctlr_dma_sg(struct mcam_camera *cam)
{
/*
* The list-empty condition can hit us at resume time
* if the buffer list was empty when the system was suspended.
*/
if (list_empty(&cam->buffers)) {
set_bit(CF_SG_RESTART, &cam->flags);
return;
}
mcam_reg_clear_bit(cam, REG_CTRL1, C1_DESC_3WORD);
mcam_sg_next_buffer(cam);
cam->nbufs = 3;
}
/*
* Frame completion with S/G is trickier. We can't muck with
* a descriptor chain on the fly, since the controller buffers it
* internally. So we have to actually stop and restart; Marvell
* says this is the way to do it.
*
* Of course, stopping is easier said than done; experience shows
* that the controller can start a frame *after* C0_ENABLE has been
* cleared. So when running in S/G mode, the controller is "stopped"
* on receipt of the start-of-frame interrupt. That means we can
* safely change the DMA descriptor array here and restart things
* (assuming there's another buffer waiting to go).
*/
static void mcam_dma_sg_done(struct mcam_camera *cam, int frame)
{
struct mcam_vb_buffer *buf = cam->vb_bufs[0];
/*
* If we're no longer supposed to be streaming, don't do anything.
*/
if (cam->state != S_STREAMING)
return;
/*
* If we have another buffer available, put it in and
* restart the engine.
*/
if (!list_empty(&cam->buffers)) {
mcam_sg_next_buffer(cam);
mcam_ctlr_start(cam);
/*
* Otherwise set CF_SG_RESTART and the controller will
* be restarted once another buffer shows up.
*/
} else {
set_bit(CF_SG_RESTART, &cam->flags);
cam->frame_state.singles++;
cam->vb_bufs[0] = NULL;
}
/*
* Now we can give the completed frame back to user space.
*/
cam->frame_state.delivered++;
mcam_buffer_done(cam, frame, &buf->vb_buf);
}
/*
* Scatter/gather mode requires stopping the controller between
* frames so we can put in a new DMA descriptor array. If no new
* buffer exists at frame completion, the controller is left stopped;
* this function is charged with getting things going again.
*/
static void mcam_sg_restart(struct mcam_camera *cam)
{
mcam_ctlr_dma_sg(cam);
mcam_ctlr_start(cam);
clear_bit(CF_SG_RESTART, &cam->flags);
}
#else /* MCAM_MODE_DMA_SG */
static inline void mcam_sg_restart(struct mcam_camera *cam)
{
return;
}
#endif /* MCAM_MODE_DMA_SG */
/* ---------------------------------------------------------------------- */
/*
* Buffer-mode-independent controller code.
*/
/*
* Image format setup
*/
static void mcam_ctlr_image(struct mcam_camera *cam)
{
struct v4l2_pix_format *fmt = &cam->pix_format;
u32 widthy = 0, widthuv = 0, imgsz_h, imgsz_w;
cam_dbg(cam, "camera: bytesperline = %d; height = %d\n",
fmt->bytesperline, fmt->sizeimage / fmt->bytesperline);
imgsz_h = (fmt->height << IMGSZ_V_SHIFT) & IMGSZ_V_MASK;
imgsz_w = (fmt->width * 2) & IMGSZ_H_MASK;
switch (fmt->pixelformat) {
case V4L2_PIX_FMT_YUYV:
case V4L2_PIX_FMT_YVYU:
widthy = fmt->width * 2;
widthuv = 0;
break;
case V4L2_PIX_FMT_YUV420:
case V4L2_PIX_FMT_YVU420:
widthy = fmt->width;
widthuv = fmt->width / 2;
break;
default:
widthy = fmt->bytesperline;
widthuv = 0;
break;
}
mcam_reg_write_mask(cam, REG_IMGPITCH, widthuv << 16 | widthy,
IMGP_YP_MASK | IMGP_UVP_MASK);
mcam_reg_write(cam, REG_IMGSIZE, imgsz_h | imgsz_w);
mcam_reg_write(cam, REG_IMGOFFSET, 0x0);
/*
* Tell the controller about the image format we are using.
*/
switch (fmt->pixelformat) {
case V4L2_PIX_FMT_YUV420:
case V4L2_PIX_FMT_YVU420:
mcam_reg_write_mask(cam, REG_CTRL0,
C0_DF_YUV | C0_YUV_420PL | C0_YUVE_VYUY, C0_DF_MASK);
break;
case V4L2_PIX_FMT_YUYV:
mcam_reg_write_mask(cam, REG_CTRL0,
C0_DF_YUV | C0_YUV_PACKED | C0_YUVE_NOSWAP, C0_DF_MASK);
break;
case V4L2_PIX_FMT_YVYU:
mcam_reg_write_mask(cam, REG_CTRL0,
C0_DF_YUV | C0_YUV_PACKED | C0_YUVE_SWAP24, C0_DF_MASK);
break;
case V4L2_PIX_FMT_XRGB444:
mcam_reg_write_mask(cam, REG_CTRL0,
C0_DF_RGB | C0_RGBF_444 | C0_RGB4_XBGR, C0_DF_MASK);
break;
case V4L2_PIX_FMT_RGB565:
mcam_reg_write_mask(cam, REG_CTRL0,
C0_DF_RGB | C0_RGBF_565 | C0_RGB5_BGGR, C0_DF_MASK);
break;
case V4L2_PIX_FMT_SBGGR8:
mcam_reg_write_mask(cam, REG_CTRL0,
C0_DF_RGB | C0_RGB5_GRBG, C0_DF_MASK);
break;
default:
cam_err(cam, "camera: unknown format: %#x\n", fmt->pixelformat);
break;
}
/*
* Make sure it knows we want to use hsync/vsync.
*/
mcam_reg_write_mask(cam, REG_CTRL0, C0_SIF_HVSYNC, C0_SIFM_MASK);
}
/*
* Configure the controller for operation; caller holds the
* device mutex.
*/
static int mcam_ctlr_configure(struct mcam_camera *cam)
{
unsigned long flags;
spin_lock_irqsave(&cam->dev_lock, flags);
clear_bit(CF_SG_RESTART, &cam->flags);
cam->dma_setup(cam);
mcam_ctlr_image(cam);
mcam_set_config_needed(cam, 0);
spin_unlock_irqrestore(&cam->dev_lock, flags);
return 0;
}
static void mcam_ctlr_irq_enable(struct mcam_camera *cam)
{
/*
* Clear any pending interrupts, since we do not
* expect to have I/O active prior to enabling.
*/
mcam_reg_write(cam, REG_IRQSTAT, FRAMEIRQS);
mcam_reg_set_bit(cam, REG_IRQMASK, FRAMEIRQS);
}
static void mcam_ctlr_irq_disable(struct mcam_camera *cam)
{
mcam_reg_clear_bit(cam, REG_IRQMASK, FRAMEIRQS);
}
/*
* Stop the controller, and don't return until we're really sure that no
* further DMA is going on.
*/
static void mcam_ctlr_stop_dma(struct mcam_camera *cam)
{
unsigned long flags;
/*
* Theory: stop the camera controller (whether it is operating
* or not). Delay briefly just in case we race with the SOF
* interrupt, then wait until no DMA is active.
*/
spin_lock_irqsave(&cam->dev_lock, flags);
clear_bit(CF_SG_RESTART, &cam->flags);
mcam_ctlr_stop(cam);
cam->state = S_IDLE;
spin_unlock_irqrestore(&cam->dev_lock, flags);
/*
* This is a brutally long sleep, but experience shows that
* it can take the controller a while to get the message that
* it needs to stop grabbing frames. In particular, we can
* sometimes (on mmp) get a frame at the end WITHOUT the
* start-of-frame indication.
*/
msleep(150);
if (test_bit(CF_DMA_ACTIVE, &cam->flags))
cam_err(cam, "Timeout waiting for DMA to end\n");
/* This would be bad news - what now? */
spin_lock_irqsave(&cam->dev_lock, flags);
mcam_ctlr_irq_disable(cam);
spin_unlock_irqrestore(&cam->dev_lock, flags);
}
/*
* Power up and down.
*/
static int mcam_ctlr_power_up(struct mcam_camera *cam)
{
unsigned long flags;
int ret;
spin_lock_irqsave(&cam->dev_lock, flags);
if (cam->plat_power_up) {
ret = cam->plat_power_up(cam);
if (ret) {
spin_unlock_irqrestore(&cam->dev_lock, flags);
return ret;
}
}
mcam_reg_clear_bit(cam, REG_CTRL1, C1_PWRDWN);
spin_unlock_irqrestore(&cam->dev_lock, flags);
return 0;
}
static void mcam_ctlr_power_down(struct mcam_camera *cam)
{
unsigned long flags;
spin_lock_irqsave(&cam->dev_lock, flags);
/*
* School of hard knocks department: be sure we do any register
* twiddling on the controller *before* calling the platform
* power down routine.
*/
mcam_reg_set_bit(cam, REG_CTRL1, C1_PWRDWN);
if (cam->plat_power_down)
cam->plat_power_down(cam);
spin_unlock_irqrestore(&cam->dev_lock, flags);
}
/* ---------------------------------------------------------------------- */
/*
* Master sensor clock.
*/
static int mclk_prepare(struct clk_hw *hw)
{
struct mcam_camera *cam = container_of(hw, struct mcam_camera, mclk_hw);
clk_prepare(cam->clk[0]);
return 0;
}
static void mclk_unprepare(struct clk_hw *hw)
{
struct mcam_camera *cam = container_of(hw, struct mcam_camera, mclk_hw);
clk_unprepare(cam->clk[0]);
}
static int mclk_enable(struct clk_hw *hw)
{
struct mcam_camera *cam = container_of(hw, struct mcam_camera, mclk_hw);
int mclk_src;
int mclk_div;
int ret;
/*
* Clock the sensor appropriately. Controller clock should
* be 48MHz, sensor "typical" value is half that.
*/
if (cam->bus_type == V4L2_MBUS_CSI2_DPHY) {
mclk_src = cam->mclk_src;
mclk_div = cam->mclk_div;
} else {
mclk_src = 3;
mclk_div = 2;
}
ret = pm_runtime_resume_and_get(cam->dev);
if (ret < 0)
return ret;
clk_enable(cam->clk[0]);
mcam_reg_write(cam, REG_CLKCTRL, (mclk_src << 29) | mclk_div);
mcam_ctlr_power_up(cam);
return 0;
}
static void mclk_disable(struct clk_hw *hw)
{
struct mcam_camera *cam = container_of(hw, struct mcam_camera, mclk_hw);
mcam_ctlr_power_down(cam);
clk_disable(cam->clk[0]);
pm_runtime_put(cam->dev);
}
static unsigned long mclk_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
return 48000000;
}
static const struct clk_ops mclk_ops = {
.prepare = mclk_prepare,
.unprepare = mclk_unprepare,
.enable = mclk_enable,
.disable = mclk_disable,
.recalc_rate = mclk_recalc_rate,
};
/* -------------------------------------------------------------------- */
/*
* Communications with the sensor.
*/
static int __mcam_cam_reset(struct mcam_camera *cam)
{
return sensor_call(cam, core, reset, 0);
}
/*
* We have found the sensor on the i2c. Let's try to have a
* conversation.
*/
static int mcam_cam_init(struct mcam_camera *cam)
{
int ret;
if (cam->state != S_NOTREADY)
cam_warn(cam, "Cam init with device in funky state %d",
cam->state);
ret = __mcam_cam_reset(cam);
/* Get/set parameters? */
cam->state = S_IDLE;
return ret;
}
/*
* Configure the sensor to match the parameters we have. Caller should
* hold s_mutex
*/
static int mcam_cam_set_flip(struct mcam_camera *cam)
{
struct v4l2_control ctrl;
memset(&ctrl, 0, sizeof(ctrl));
ctrl.id = V4L2_CID_VFLIP;
ctrl.value = flip;
return v4l2_s_ctrl(NULL, cam->sensor->ctrl_handler, &ctrl);
}
static int mcam_cam_configure(struct mcam_camera *cam)
{
struct v4l2_subdev_format format = {
.which = V4L2_SUBDEV_FORMAT_ACTIVE,
};
int ret;
v4l2_fill_mbus_format(&format.format, &cam->pix_format, cam->mbus_code);
ret = sensor_call(cam, core, init, 0);
if (ret == 0)
ret = sensor_call(cam, pad, set_fmt, NULL, &format);
/*
* OV7670 does weird things if flip is set *before* format...
*/
ret += mcam_cam_set_flip(cam);
return ret;
}
/*
* Get everything ready, and start grabbing frames.
*/
static int mcam_read_setup(struct mcam_camera *cam)
{
int ret;
unsigned long flags;
/*
* Configuration. If we still don't have DMA buffers,
* make one last, desperate attempt.
*/
if (cam->buffer_mode == B_vmalloc && cam->nbufs == 0 &&
mcam_alloc_dma_bufs(cam, 0))
return -ENOMEM;
if (mcam_needs_config(cam)) {
mcam_cam_configure(cam);
ret = mcam_ctlr_configure(cam);
if (ret)
return ret;
}
/*
* Turn it loose.
*/
spin_lock_irqsave(&cam->dev_lock, flags);
clear_bit(CF_DMA_ACTIVE, &cam->flags);
mcam_reset_buffers(cam);
if (cam->bus_type == V4L2_MBUS_CSI2_DPHY)
mcam_enable_mipi(cam);
else
mcam_disable_mipi(cam);
mcam_ctlr_irq_enable(cam);
cam->state = S_STREAMING;
if (!test_bit(CF_SG_RESTART, &cam->flags))
mcam_ctlr_start(cam);
spin_unlock_irqrestore(&cam->dev_lock, flags);
return 0;
}
/* ----------------------------------------------------------------------- */
/*
* Videobuf2 interface code.
*/
static int mcam_vb_queue_setup(struct vb2_queue *vq,
unsigned int *nbufs,
unsigned int *num_planes, unsigned int sizes[],
struct device *alloc_devs[])
{
struct mcam_camera *cam = vb2_get_drv_priv(vq);
int minbufs = (cam->buffer_mode == B_DMA_contig) ? 3 : 2;
unsigned size = cam->pix_format.sizeimage;
if (*nbufs < minbufs)
*nbufs = minbufs;
if (*num_planes)
return sizes[0] < size ? -EINVAL : 0;
sizes[0] = size;
*num_planes = 1; /* Someday we have to support planar formats... */
return 0;
}
static void mcam_vb_buf_queue(struct vb2_buffer *vb)
{
struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
struct mcam_vb_buffer *mvb = vb_to_mvb(vbuf);
struct mcam_camera *cam = vb2_get_drv_priv(vb->vb2_queue);
unsigned long flags;
int start;
spin_lock_irqsave(&cam->dev_lock, flags);
start = (cam->state == S_BUFWAIT) && !list_empty(&cam->buffers);
list_add(&mvb->queue, &cam->buffers);
if (cam->state == S_STREAMING && test_bit(CF_SG_RESTART, &cam->flags))
mcam_sg_restart(cam);
spin_unlock_irqrestore(&cam->dev_lock, flags);
if (start)
mcam_read_setup(cam);
}
static void mcam_vb_requeue_bufs(struct vb2_queue *vq,
enum vb2_buffer_state state)
{
struct mcam_camera *cam = vb2_get_drv_priv(vq);
struct mcam_vb_buffer *buf, *node;
unsigned long flags;
unsigned i;
spin_lock_irqsave(&cam->dev_lock, flags);
list_for_each_entry_safe(buf, node, &cam->buffers, queue) {
vb2_buffer_done(&buf->vb_buf.vb2_buf, state);
list_del(&buf->queue);
}
for (i = 0; i < MAX_DMA_BUFS; i++) {
buf = cam->vb_bufs[i];
if (buf) {
vb2_buffer_done(&buf->vb_buf.vb2_buf, state);
cam->vb_bufs[i] = NULL;
}
}
spin_unlock_irqrestore(&cam->dev_lock, flags);
}
/*
* These need to be called with the mutex held from vb2
*/
static int mcam_vb_start_streaming(struct vb2_queue *vq, unsigned int count)
{
struct mcam_camera *cam = vb2_get_drv_priv(vq);
unsigned int frame;
int ret;
if (cam->state != S_IDLE) {
mcam_vb_requeue_bufs(vq, VB2_BUF_STATE_QUEUED);
return -EINVAL;
}
cam->frame_state.frames = 0;
cam->frame_state.singles = 0;
cam->frame_state.delivered = 0;
cam->sequence = 0;
/*
* Videobuf2 sneakily hoards all the buffers and won't
* give them to us until *after* streaming starts. But
* we can't actually start streaming until we have a
* destination. So go into a wait state and hope they
* give us buffers soon.
*/
if (cam->buffer_mode != B_vmalloc && list_empty(&cam->buffers)) {
cam->state = S_BUFWAIT;
return 0;
}
/*
* Ensure clear the left over frame flags
* before every really start streaming
*/
for (frame = 0; frame < cam->nbufs; frame++)
clear_bit(CF_FRAME_SOF0 + frame, &cam->flags);
ret = mcam_read_setup(cam);
if (ret)
mcam_vb_requeue_bufs(vq, VB2_BUF_STATE_QUEUED);
return ret;
}
static void mcam_vb_stop_streaming(struct vb2_queue *vq)
{
struct mcam_camera *cam = vb2_get_drv_priv(vq);
cam_dbg(cam, "stop_streaming: %d frames, %d singles, %d delivered\n",
cam->frame_state.frames, cam->frame_state.singles,
cam->frame_state.delivered);
if (cam->state == S_BUFWAIT) {
/* They never gave us buffers */
cam->state = S_IDLE;
return;
}
if (cam->state != S_STREAMING)
return;
mcam_ctlr_stop_dma(cam);
/*
* VB2 reclaims the buffers, so we need to forget
* about them.
*/
mcam_vb_requeue_bufs(vq, VB2_BUF_STATE_ERROR);
}
static const struct vb2_ops mcam_vb2_ops = {
.queue_setup = mcam_vb_queue_setup,
.buf_queue = mcam_vb_buf_queue,
.start_streaming = mcam_vb_start_streaming,
.stop_streaming = mcam_vb_stop_streaming,
.wait_prepare = vb2_ops_wait_prepare,
.wait_finish = vb2_ops_wait_finish,
};
#ifdef MCAM_MODE_DMA_SG
/*
* Scatter/gather mode uses all of the above functions plus a
* few extras to deal with DMA mapping.
*/
static int mcam_vb_sg_buf_init(struct vb2_buffer *vb)
{
struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
struct mcam_vb_buffer *mvb = vb_to_mvb(vbuf);
struct mcam_camera *cam = vb2_get_drv_priv(vb->vb2_queue);
int ndesc = cam->pix_format.sizeimage/PAGE_SIZE + 1;
mvb->dma_desc = dma_alloc_coherent(cam->dev,
ndesc * sizeof(struct mcam_dma_desc),
&mvb->dma_desc_pa, GFP_KERNEL);
if (mvb->dma_desc == NULL) {
cam_err(cam, "Unable to get DMA descriptor array\n");
return -ENOMEM;
}
return 0;
}
static int mcam_vb_sg_buf_prepare(struct vb2_buffer *vb)
{
struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
struct mcam_vb_buffer *mvb = vb_to_mvb(vbuf);
struct sg_table *sg_table = vb2_dma_sg_plane_desc(vb, 0);
struct mcam_dma_desc *desc = mvb->dma_desc;
struct scatterlist *sg;
int i;
for_each_sg(sg_table->sgl, sg, sg_table->nents, i) {
desc->dma_addr = sg_dma_address(sg);
desc->segment_len = sg_dma_len(sg);
desc++;
}
return 0;
}
static void mcam_vb_sg_buf_cleanup(struct vb2_buffer *vb)
{
struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
struct mcam_camera *cam = vb2_get_drv_priv(vb->vb2_queue);
struct mcam_vb_buffer *mvb = vb_to_mvb(vbuf);
int ndesc = cam->pix_format.sizeimage/PAGE_SIZE + 1;
dma_free_coherent(cam->dev, ndesc * sizeof(struct mcam_dma_desc),
mvb->dma_desc, mvb->dma_desc_pa);
}
static const struct vb2_ops mcam_vb2_sg_ops = {
.queue_setup = mcam_vb_queue_setup,
.buf_init = mcam_vb_sg_buf_init,
.buf_prepare = mcam_vb_sg_buf_prepare,
.buf_queue = mcam_vb_buf_queue,
.buf_cleanup = mcam_vb_sg_buf_cleanup,
.start_streaming = mcam_vb_start_streaming,
.stop_streaming = mcam_vb_stop_streaming,
.wait_prepare = vb2_ops_wait_prepare,
.wait_finish = vb2_ops_wait_finish,
};
#endif /* MCAM_MODE_DMA_SG */
static int mcam_setup_vb2(struct mcam_camera *cam)
{
struct vb2_queue *vq = &cam->vb_queue;
memset(vq, 0, sizeof(*vq));
vq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
vq->drv_priv = cam;
vq->lock = &cam->s_mutex;
vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
vq->io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF | VB2_READ;
vq->buf_struct_size = sizeof(struct mcam_vb_buffer);
vq->dev = cam->dev;
INIT_LIST_HEAD(&cam->buffers);
switch (cam->buffer_mode) {
case B_DMA_contig:
#ifdef MCAM_MODE_DMA_CONTIG
vq->ops = &mcam_vb2_ops;
vq->mem_ops = &vb2_dma_contig_memops;
cam->dma_setup = mcam_ctlr_dma_contig;
cam->frame_complete = mcam_dma_contig_done;
#endif
break;
case B_DMA_sg:
#ifdef MCAM_MODE_DMA_SG
vq->ops = &mcam_vb2_sg_ops;
vq->mem_ops = &vb2_dma_sg_memops;
cam->dma_setup = mcam_ctlr_dma_sg;
cam->frame_complete = mcam_dma_sg_done;
#endif
break;
case B_vmalloc:
#ifdef MCAM_MODE_VMALLOC
tasklet_setup(&cam->s_tasklet, mcam_frame_tasklet);
vq->ops = &mcam_vb2_ops;
vq->mem_ops = &vb2_vmalloc_memops;
cam->dma_setup = mcam_ctlr_dma_vmalloc;
cam->frame_complete = mcam_vmalloc_done;
#endif
break;
}
return vb2_queue_init(vq);
}
/* ---------------------------------------------------------------------- */
/*
* The long list of V4L2 ioctl() operations.
*/
static int mcam_vidioc_querycap(struct file *file, void *priv,
struct v4l2_capability *cap)
{
struct mcam_camera *cam = video_drvdata(file);
strscpy(cap->driver, "marvell_ccic", sizeof(cap->driver));
strscpy(cap->card, "marvell_ccic", sizeof(cap->card));
strscpy(cap->bus_info, cam->bus_info, sizeof(cap->bus_info));
return 0;
}
static int mcam_vidioc_enum_fmt_vid_cap(struct file *filp,
void *priv, struct v4l2_fmtdesc *fmt)
{
if (fmt->index >= N_MCAM_FMTS)
return -EINVAL;
fmt->pixelformat = mcam_formats[fmt->index].pixelformat;
return 0;
}
static int mcam_vidioc_try_fmt_vid_cap(struct file *filp, void *priv,
struct v4l2_format *fmt)
{
struct mcam_camera *cam = video_drvdata(filp);
struct mcam_format_struct *f;
struct v4l2_pix_format *pix = &fmt->fmt.pix;
struct v4l2_subdev_pad_config pad_cfg;
struct v4l2_subdev_state pad_state = {
.pads = &pad_cfg,
};
struct v4l2_subdev_format format = {
.which = V4L2_SUBDEV_FORMAT_TRY,
};
int ret;
f = mcam_find_format(pix->pixelformat);
pix->pixelformat = f->pixelformat;
v4l2_fill_mbus_format(&format.format, pix, f->mbus_code);
ret = sensor_call(cam, pad, set_fmt, &pad_state, &format);
v4l2_fill_pix_format(pix, &format.format);
pix->bytesperline = pix->width * f->bpp;
switch (f->pixelformat) {
case V4L2_PIX_FMT_YUV420:
case V4L2_PIX_FMT_YVU420:
pix->sizeimage = pix->height * pix->bytesperline * 3 / 2;
break;
default:
pix->sizeimage = pix->height * pix->bytesperline;
break;
}
pix->colorspace = V4L2_COLORSPACE_SRGB;
return ret;
}
static int mcam_vidioc_s_fmt_vid_cap(struct file *filp, void *priv,
struct v4l2_format *fmt)
{
struct mcam_camera *cam = video_drvdata(filp);
struct mcam_format_struct *f;
int ret;
/*
* Can't do anything if the device is not idle
* Also can't if there are streaming buffers in place.
*/
if (cam->state != S_IDLE || vb2_is_busy(&cam->vb_queue))
return -EBUSY;
f = mcam_find_format(fmt->fmt.pix.pixelformat);
/*
* See if the formatting works in principle.
*/
ret = mcam_vidioc_try_fmt_vid_cap(filp, priv, fmt);
if (ret)
return ret;
/*
* Now we start to change things for real, so let's do it
* under lock.
*/
cam->pix_format = fmt->fmt.pix;
cam->mbus_code = f->mbus_code;
/*
* Make sure we have appropriate DMA buffers.
*/
if (cam->buffer_mode == B_vmalloc) {
ret = mcam_check_dma_buffers(cam);
if (ret)
goto out;
}
mcam_set_config_needed(cam, 1);
out:
return ret;
}
/*
* Return our stored notion of how the camera is/should be configured.
* The V4l2 spec wants us to be smarter, and actually get this from
* the camera (and not mess with it at open time). Someday.
*/
static int mcam_vidioc_g_fmt_vid_cap(struct file *filp, void *priv,
struct v4l2_format *f)
{
struct mcam_camera *cam = video_drvdata(filp);
f->fmt.pix = cam->pix_format;
return 0;
}
/*
* We only have one input - the sensor - so minimize the nonsense here.
*/
static int mcam_vidioc_enum_input(struct file *filp, void *priv,
struct v4l2_input *input)
{
if (input->index != 0)
return -EINVAL;
input->type = V4L2_INPUT_TYPE_CAMERA;
strscpy(input->name, "Camera", sizeof(input->name));
return 0;
}
static int mcam_vidioc_g_input(struct file *filp, void *priv, unsigned int *i)
{
*i = 0;
return 0;
}
static int mcam_vidioc_s_input(struct file *filp, void *priv, unsigned int i)
{
if (i != 0)
return -EINVAL;
return 0;
}
/*
* G/S_PARM. Most of this is done by the sensor, but we are
* the level which controls the number of read buffers.
*/
static int mcam_vidioc_g_parm(struct file *filp, void *priv,
struct v4l2_streamparm *a)
{
struct mcam_camera *cam = video_drvdata(filp);
int ret;
ret = v4l2_g_parm_cap(video_devdata(filp), cam->sensor, a);
a->parm.capture.readbuffers = n_dma_bufs;
return ret;
}
static int mcam_vidioc_s_parm(struct file *filp, void *priv,
struct v4l2_streamparm *a)
{
struct mcam_camera *cam = video_drvdata(filp);
int ret;
ret = v4l2_s_parm_cap(video_devdata(filp), cam->sensor, a);
a->parm.capture.readbuffers = n_dma_bufs;
return ret;
}
static int mcam_vidioc_enum_framesizes(struct file *filp, void *priv,
struct v4l2_frmsizeenum *sizes)
{
struct mcam_camera *cam = video_drvdata(filp);
struct mcam_format_struct *f;
struct v4l2_subdev_frame_size_enum fse = {
.index = sizes->index,
.which = V4L2_SUBDEV_FORMAT_ACTIVE,
};
int ret;
f = mcam_find_format(sizes->pixel_format);
if (f->pixelformat != sizes->pixel_format)
return -EINVAL;
fse.code = f->mbus_code;
ret = sensor_call(cam, pad, enum_frame_size, NULL, &fse);
if (ret)
return ret;
if (fse.min_width == fse.max_width &&
fse.min_height == fse.max_height) {
sizes->type = V4L2_FRMSIZE_TYPE_DISCRETE;
sizes->discrete.width = fse.min_width;
sizes->discrete.height = fse.min_height;
return 0;
}
sizes->type = V4L2_FRMSIZE_TYPE_CONTINUOUS;
sizes->stepwise.min_width = fse.min_width;
sizes->stepwise.max_width = fse.max_width;
sizes->stepwise.min_height = fse.min_height;
sizes->stepwise.max_height = fse.max_height;
sizes->stepwise.step_width = 1;
sizes->stepwise.step_height = 1;
return 0;
}
static int mcam_vidioc_enum_frameintervals(struct file *filp, void *priv,
struct v4l2_frmivalenum *interval)
{
struct mcam_camera *cam = video_drvdata(filp);
struct mcam_format_struct *f;
struct v4l2_subdev_frame_interval_enum fie = {
.index = interval->index,
.width = interval->width,
.height = interval->height,
.which = V4L2_SUBDEV_FORMAT_ACTIVE,
};
int ret;
f = mcam_find_format(interval->pixel_format);
if (f->pixelformat != interval->pixel_format)
return -EINVAL;
fie.code = f->mbus_code;
ret = sensor_call(cam, pad, enum_frame_interval, NULL, &fie);
if (ret)
return ret;
interval->type = V4L2_FRMIVAL_TYPE_DISCRETE;
interval->discrete = fie.interval;
return 0;
}
#ifdef CONFIG_VIDEO_ADV_DEBUG
static int mcam_vidioc_g_register(struct file *file, void *priv,
struct v4l2_dbg_register *reg)
{
struct mcam_camera *cam = video_drvdata(file);
if (reg->reg > cam->regs_size - 4)
return -EINVAL;
reg->val = mcam_reg_read(cam, reg->reg);
reg->size = 4;
return 0;
}
static int mcam_vidioc_s_register(struct file *file, void *priv,
const struct v4l2_dbg_register *reg)
{
struct mcam_camera *cam = video_drvdata(file);
if (reg->reg > cam->regs_size - 4)
return -EINVAL;
mcam_reg_write(cam, reg->reg, reg->val);
return 0;
}
#endif
static const struct v4l2_ioctl_ops mcam_v4l_ioctl_ops = {
.vidioc_querycap = mcam_vidioc_querycap,
.vidioc_enum_fmt_vid_cap = mcam_vidioc_enum_fmt_vid_cap,
.vidioc_try_fmt_vid_cap = mcam_vidioc_try_fmt_vid_cap,
.vidioc_s_fmt_vid_cap = mcam_vidioc_s_fmt_vid_cap,
.vidioc_g_fmt_vid_cap = mcam_vidioc_g_fmt_vid_cap,
.vidioc_enum_input = mcam_vidioc_enum_input,
.vidioc_g_input = mcam_vidioc_g_input,
.vidioc_s_input = mcam_vidioc_s_input,
.vidioc_reqbufs = vb2_ioctl_reqbufs,
.vidioc_create_bufs = vb2_ioctl_create_bufs,
.vidioc_querybuf = vb2_ioctl_querybuf,
.vidioc_qbuf = vb2_ioctl_qbuf,
.vidioc_dqbuf = vb2_ioctl_dqbuf,
.vidioc_expbuf = vb2_ioctl_expbuf,
.vidioc_streamon = vb2_ioctl_streamon,
.vidioc_streamoff = vb2_ioctl_streamoff,
.vidioc_g_parm = mcam_vidioc_g_parm,
.vidioc_s_parm = mcam_vidioc_s_parm,
.vidioc_enum_framesizes = mcam_vidioc_enum_framesizes,
.vidioc_enum_frameintervals = mcam_vidioc_enum_frameintervals,
.vidioc_subscribe_event = v4l2_ctrl_subscribe_event,
.vidioc_unsubscribe_event = v4l2_event_unsubscribe,
#ifdef CONFIG_VIDEO_ADV_DEBUG
.vidioc_g_register = mcam_vidioc_g_register,
.vidioc_s_register = mcam_vidioc_s_register,
#endif
};
/* ---------------------------------------------------------------------- */
/*
* Our various file operations.
*/
static int mcam_v4l_open(struct file *filp)
{
struct mcam_camera *cam = video_drvdata(filp);
int ret;
mutex_lock(&cam->s_mutex);
ret = v4l2_fh_open(filp);
if (ret)
goto out;
if (v4l2_fh_is_singular_file(filp)) {
ret = sensor_call(cam, core, s_power, 1);
if (ret)
goto out;
ret = pm_runtime_resume_and_get(cam->dev);
if (ret < 0)
goto out;
__mcam_cam_reset(cam);
mcam_set_config_needed(cam, 1);
}
out:
mutex_unlock(&cam->s_mutex);
if (ret)
v4l2_fh_release(filp);
return ret;
}
static int mcam_v4l_release(struct file *filp)
{
struct mcam_camera *cam = video_drvdata(filp);
bool last_open;
mutex_lock(&cam->s_mutex);
last_open = v4l2_fh_is_singular_file(filp);
_vb2_fop_release(filp, NULL);
if (last_open) {
mcam_disable_mipi(cam);
sensor_call(cam, core, s_power, 0);
pm_runtime_put(cam->dev);
if (cam->buffer_mode == B_vmalloc && alloc_bufs_at_read)
mcam_free_dma_bufs(cam);
}
mutex_unlock(&cam->s_mutex);
return 0;
}
static const struct v4l2_file_operations mcam_v4l_fops = {
.owner = THIS_MODULE,
.open = mcam_v4l_open,
.release = mcam_v4l_release,
.read = vb2_fop_read,
.poll = vb2_fop_poll,
.mmap = vb2_fop_mmap,
.unlocked_ioctl = video_ioctl2,
};
/*
* This template device holds all of those v4l2 methods; we
* clone it for specific real devices.
*/
static const struct video_device mcam_v4l_template = {
.name = "mcam",
.fops = &mcam_v4l_fops,
.ioctl_ops = &mcam_v4l_ioctl_ops,
.release = video_device_release_empty,
.device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_READWRITE |
V4L2_CAP_STREAMING,
};
/* ---------------------------------------------------------------------- */
/*
* Interrupt handler stuff
*/
static void mcam_frame_complete(struct mcam_camera *cam, int frame)
{
/*
* Basic frame housekeeping.
*/
set_bit(frame, &cam->flags);
clear_bit(CF_DMA_ACTIVE, &cam->flags);
cam->next_buf = frame;
cam->buf_seq[frame] = cam->sequence++;
cam->frame_state.frames++;
/*
* "This should never happen"
*/
if (cam->state != S_STREAMING)
return;
/*
* Process the frame and set up the next one.
*/
cam->frame_complete(cam, frame);
}
/*
* The interrupt handler; this needs to be called from the
* platform irq handler with the lock held.
*/
int mccic_irq(struct mcam_camera *cam, unsigned int irqs)
{
unsigned int frame, handled = 0;
mcam_reg_write(cam, REG_IRQSTAT, FRAMEIRQS); /* Clear'em all */
/*
* Handle any frame completions. There really should
* not be more than one of these, or we have fallen
* far behind.
*
* When running in S/G mode, the frame number lacks any
* real meaning - there's only one descriptor array - but
* the controller still picks a different one to signal
* each time.
*/
for (frame = 0; frame < cam->nbufs; frame++)
if (irqs & (IRQ_EOF0 << frame) &&
test_bit(CF_FRAME_SOF0 + frame, &cam->flags)) {
mcam_frame_complete(cam, frame);
handled = 1;
clear_bit(CF_FRAME_SOF0 + frame, &cam->flags);
if (cam->buffer_mode == B_DMA_sg)
break;
}
/*
* If a frame starts, note that we have DMA active. This
* code assumes that we won't get multiple frame interrupts
* at once; may want to rethink that.
*/
for (frame = 0; frame < cam->nbufs; frame++) {
if (irqs & (IRQ_SOF0 << frame)) {
set_bit(CF_FRAME_SOF0 + frame, &cam->flags);
handled = IRQ_HANDLED;
}
}
if (handled == IRQ_HANDLED) {
set_bit(CF_DMA_ACTIVE, &cam->flags);
if (cam->buffer_mode == B_DMA_sg)
mcam_ctlr_stop(cam);
}
return handled;
}
EXPORT_SYMBOL_GPL(mccic_irq);
/* ---------------------------------------------------------------------- */
/*
* Registration and such.
*/
static int mccic_notify_bound(struct v4l2_async_notifier *notifier,
struct v4l2_subdev *subdev, struct v4l2_async_connection *asd)
{
struct mcam_camera *cam = notifier_to_mcam(notifier);
int ret;
mutex_lock(&cam->s_mutex);
if (cam->sensor) {
cam_err(cam, "sensor already bound\n");
ret = -EBUSY;
goto out;
}
v4l2_set_subdev_hostdata(subdev, cam);
cam->sensor = subdev;
ret = mcam_cam_init(cam);
if (ret) {
cam->sensor = NULL;
goto out;
}
ret = mcam_setup_vb2(cam);
if (ret) {
cam->sensor = NULL;
goto out;
}
cam->vdev = mcam_v4l_template;
cam->vdev.v4l2_dev = &cam->v4l2_dev;
cam->vdev.lock = &cam->s_mutex;
cam->vdev.queue = &cam->vb_queue;
video_set_drvdata(&cam->vdev, cam);
ret = video_register_device(&cam->vdev, VFL_TYPE_VIDEO, -1);
if (ret) {
cam->sensor = NULL;
goto out;
}
cam_dbg(cam, "sensor %s bound\n", subdev->name);
out:
mutex_unlock(&cam->s_mutex);
return ret;
}
static void mccic_notify_unbind(struct v4l2_async_notifier *notifier,
struct v4l2_subdev *subdev, struct v4l2_async_connection *asd)
{
struct mcam_camera *cam = notifier_to_mcam(notifier);
mutex_lock(&cam->s_mutex);
if (cam->sensor != subdev) {
cam_err(cam, "sensor %s not bound\n", subdev->name);
goto out;
}
video_unregister_device(&cam->vdev);
cam->sensor = NULL;
cam_dbg(cam, "sensor %s unbound\n", subdev->name);
out:
mutex_unlock(&cam->s_mutex);
}
static int mccic_notify_complete(struct v4l2_async_notifier *notifier)
{
struct mcam_camera *cam = notifier_to_mcam(notifier);
int ret;
/*
* Get the v4l2 setup done.
*/
ret = v4l2_ctrl_handler_init(&cam->ctrl_handler, 10);
if (!ret)
cam->v4l2_dev.ctrl_handler = &cam->ctrl_handler;
return ret;
}
static const struct v4l2_async_notifier_operations mccic_notify_ops = {
.bound = mccic_notify_bound,
.unbind = mccic_notify_unbind,
.complete = mccic_notify_complete,
};
int mccic_register(struct mcam_camera *cam)
{
struct clk_init_data mclk_init = { };
int ret;
/*
* Validate the requested buffer mode.
*/
if (buffer_mode >= 0)
cam->buffer_mode = buffer_mode;
if (cam->buffer_mode == B_DMA_sg &&
cam->chip_id == MCAM_CAFE) {
printk(KERN_ERR "marvell-cam: Cafe can't do S/G I/O, attempting vmalloc mode instead\n");
cam->buffer_mode = B_vmalloc;
}
if (!mcam_buffer_mode_supported(cam->buffer_mode)) {
printk(KERN_ERR "marvell-cam: buffer mode %d unsupported\n",
cam->buffer_mode);
ret = -EINVAL;
goto out;
}
mutex_init(&cam->s_mutex);
cam->state = S_NOTREADY;
mcam_set_config_needed(cam, 1);
cam->pix_format = mcam_def_pix_format;
cam->mbus_code = mcam_def_mbus_code;
cam->notifier.ops = &mccic_notify_ops;
ret = v4l2_async_nf_register(&cam->notifier);
if (ret < 0) {
cam_warn(cam, "failed to register a sensor notifier");
goto out;
}
/*
* Register sensor master clock.
*/
mclk_init.parent_names = NULL;
mclk_init.num_parents = 0;
mclk_init.ops = &mclk_ops;
mclk_init.name = "mclk";
of_property_read_string(cam->dev->of_node, "clock-output-names",
&mclk_init.name);
cam->mclk_hw.init = &mclk_init;
cam->mclk = devm_clk_register(cam->dev, &cam->mclk_hw);
if (IS_ERR(cam->mclk)) {
ret = PTR_ERR(cam->mclk);
dev_err(cam->dev, "can't register clock\n");
goto out;
}
/*
* If so requested, try to get our DMA buffers now.
*/
if (cam->buffer_mode == B_vmalloc && !alloc_bufs_at_read) {
if (mcam_alloc_dma_bufs(cam, 1))
cam_warn(cam, "Unable to alloc DMA buffers at load will try again later.");
}
return 0;
out:
v4l2_async_nf_unregister(&cam->notifier);
v4l2_async_nf_cleanup(&cam->notifier);
return ret;
}
EXPORT_SYMBOL_GPL(mccic_register);
void mccic_shutdown(struct mcam_camera *cam)
{
/*
* If we have no users (and we really, really should have no
* users) the device will already be powered down. Trying to
* take it down again will wedge the machine, which is frowned
* upon.
*/
if (!list_empty(&cam->vdev.fh_list)) {
cam_warn(cam, "Removing a device with users!\n");
sensor_call(cam, core, s_power, 0);
}
if (cam->buffer_mode == B_vmalloc)
mcam_free_dma_bufs(cam);
v4l2_ctrl_handler_free(&cam->ctrl_handler);
v4l2_async_nf_unregister(&cam->notifier);
v4l2_async_nf_cleanup(&cam->notifier);
}
EXPORT_SYMBOL_GPL(mccic_shutdown);
/*
* Power management
*/
void mccic_suspend(struct mcam_camera *cam)
{
mutex_lock(&cam->s_mutex);
if (!list_empty(&cam->vdev.fh_list)) {
enum mcam_state cstate = cam->state;
mcam_ctlr_stop_dma(cam);
sensor_call(cam, core, s_power, 0);
cam->state = cstate;
}
mutex_unlock(&cam->s_mutex);
}
EXPORT_SYMBOL_GPL(mccic_suspend);
int mccic_resume(struct mcam_camera *cam)
{
int ret = 0;
mutex_lock(&cam->s_mutex);
if (!list_empty(&cam->vdev.fh_list)) {
ret = sensor_call(cam, core, s_power, 1);
if (ret) {
mutex_unlock(&cam->s_mutex);
return ret;
}
__mcam_cam_reset(cam);
} else {
sensor_call(cam, core, s_power, 0);
}
mutex_unlock(&cam->s_mutex);
set_bit(CF_CONFIG_NEEDED, &cam->flags);
if (cam->state == S_STREAMING) {
/*
* If there was a buffer in the DMA engine at suspend
* time, put it back on the queue or we'll forget about it.
*/
if (cam->buffer_mode == B_DMA_sg && cam->vb_bufs[0])
list_add(&cam->vb_bufs[0]->queue, &cam->buffers);
ret = mcam_read_setup(cam);
}
return ret;
}
EXPORT_SYMBOL_GPL(mccic_resume);
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Jonathan Corbet <[email protected]>");
| linux-master | drivers/media/platform/marvell/mcam-core.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Coda multi-standard codec IP - BIT processor functions
*
* Copyright (C) 2012 Vista Silicon S.L.
* Javier Martin, <[email protected]>
* Xavier Duret
* Copyright (C) 2012-2014 Philipp Zabel, Pengutronix
*/
#include <linux/clk.h>
#include <linux/irqreturn.h>
#include <linux/kernel.h>
#include <linux/log2.h>
#include <linux/platform_device.h>
#include <linux/ratelimit.h>
#include <linux/reset.h>
#include <linux/slab.h>
#include <linux/videodev2.h>
#include <media/v4l2-common.h>
#include <media/v4l2-ctrls.h>
#include <media/v4l2-fh.h>
#include <media/v4l2-mem2mem.h>
#include <media/videobuf2-v4l2.h>
#include <media/videobuf2-dma-contig.h>
#include <media/videobuf2-vmalloc.h>
#include "coda.h"
#include "imx-vdoa.h"
#define CREATE_TRACE_POINTS
#include "trace.h"
#define CODA_PARA_BUF_SIZE (10 * 1024)
#define CODA7_PS_BUF_SIZE 0x28000
#define CODA9_PS_SAVE_SIZE (512 * 1024)
#define CODA_DEFAULT_GAMMA 4096
#define CODA9_DEFAULT_GAMMA 24576 /* 0.75 * 32768 */
static void coda_free_bitstream_buffer(struct coda_ctx *ctx);
static inline int coda_is_initialized(struct coda_dev *dev)
{
return coda_read(dev, CODA_REG_BIT_CUR_PC) != 0;
}
static inline unsigned long coda_isbusy(struct coda_dev *dev)
{
return coda_read(dev, CODA_REG_BIT_BUSY);
}
static int coda_wait_timeout(struct coda_dev *dev)
{
unsigned long timeout = jiffies + msecs_to_jiffies(1000);
while (coda_isbusy(dev)) {
if (time_after(jiffies, timeout))
return -ETIMEDOUT;
}
return 0;
}
static void coda_command_async(struct coda_ctx *ctx, int cmd)
{
struct coda_dev *dev = ctx->dev;
if (dev->devtype->product == CODA_HX4 ||
dev->devtype->product == CODA_7541 ||
dev->devtype->product == CODA_960) {
/* Restore context related registers to CODA */
coda_write(dev, ctx->bit_stream_param,
CODA_REG_BIT_BIT_STREAM_PARAM);
coda_write(dev, ctx->frm_dis_flg,
CODA_REG_BIT_FRM_DIS_FLG(ctx->reg_idx));
coda_write(dev, ctx->frame_mem_ctrl,
CODA_REG_BIT_FRAME_MEM_CTRL);
coda_write(dev, ctx->workbuf.paddr, CODA_REG_BIT_WORK_BUF_ADDR);
}
if (dev->devtype->product == CODA_960) {
coda_write(dev, 1, CODA9_GDI_WPROT_ERR_CLR);
coda_write(dev, 0, CODA9_GDI_WPROT_RGN_EN);
}
coda_write(dev, CODA_REG_BIT_BUSY_FLAG, CODA_REG_BIT_BUSY);
coda_write(dev, ctx->idx, CODA_REG_BIT_RUN_INDEX);
coda_write(dev, ctx->params.codec_mode, CODA_REG_BIT_RUN_COD_STD);
coda_write(dev, ctx->params.codec_mode_aux, CODA7_REG_BIT_RUN_AUX_STD);
trace_coda_bit_run(ctx, cmd);
coda_write(dev, cmd, CODA_REG_BIT_RUN_COMMAND);
}
static int coda_command_sync(struct coda_ctx *ctx, int cmd)
{
struct coda_dev *dev = ctx->dev;
int ret;
lockdep_assert_held(&dev->coda_mutex);
coda_command_async(ctx, cmd);
ret = coda_wait_timeout(dev);
trace_coda_bit_done(ctx);
return ret;
}
int coda_hw_reset(struct coda_ctx *ctx)
{
struct coda_dev *dev = ctx->dev;
unsigned long timeout;
unsigned int idx;
int ret;
lockdep_assert_held(&dev->coda_mutex);
if (!dev->rstc)
return -ENOENT;
idx = coda_read(dev, CODA_REG_BIT_RUN_INDEX);
if (dev->devtype->product == CODA_960) {
timeout = jiffies + msecs_to_jiffies(100);
coda_write(dev, 0x11, CODA9_GDI_BUS_CTRL);
while (coda_read(dev, CODA9_GDI_BUS_STATUS) != 0x77) {
if (time_after(jiffies, timeout))
return -ETIME;
cpu_relax();
}
}
ret = reset_control_reset(dev->rstc);
if (ret < 0)
return ret;
if (dev->devtype->product == CODA_960)
coda_write(dev, 0x00, CODA9_GDI_BUS_CTRL);
coda_write(dev, CODA_REG_BIT_BUSY_FLAG, CODA_REG_BIT_BUSY);
coda_write(dev, CODA_REG_RUN_ENABLE, CODA_REG_BIT_CODE_RUN);
ret = coda_wait_timeout(dev);
coda_write(dev, idx, CODA_REG_BIT_RUN_INDEX);
return ret;
}
static void coda_kfifo_sync_from_device(struct coda_ctx *ctx)
{
struct __kfifo *kfifo = &ctx->bitstream_fifo.kfifo;
struct coda_dev *dev = ctx->dev;
u32 rd_ptr;
rd_ptr = coda_read(dev, CODA_REG_BIT_RD_PTR(ctx->reg_idx));
kfifo->out = (kfifo->in & ~kfifo->mask) |
(rd_ptr - ctx->bitstream.paddr);
if (kfifo->out > kfifo->in)
kfifo->out -= kfifo->mask + 1;
}
static void coda_kfifo_sync_to_device_full(struct coda_ctx *ctx)
{
struct __kfifo *kfifo = &ctx->bitstream_fifo.kfifo;
struct coda_dev *dev = ctx->dev;
u32 rd_ptr, wr_ptr;
rd_ptr = ctx->bitstream.paddr + (kfifo->out & kfifo->mask);
coda_write(dev, rd_ptr, CODA_REG_BIT_RD_PTR(ctx->reg_idx));
wr_ptr = ctx->bitstream.paddr + (kfifo->in & kfifo->mask);
coda_write(dev, wr_ptr, CODA_REG_BIT_WR_PTR(ctx->reg_idx));
}
static void coda_kfifo_sync_to_device_write(struct coda_ctx *ctx)
{
struct __kfifo *kfifo = &ctx->bitstream_fifo.kfifo;
struct coda_dev *dev = ctx->dev;
u32 wr_ptr;
wr_ptr = ctx->bitstream.paddr + (kfifo->in & kfifo->mask);
coda_write(dev, wr_ptr, CODA_REG_BIT_WR_PTR(ctx->reg_idx));
}
static int coda_h264_bitstream_pad(struct coda_ctx *ctx, u32 size)
{
unsigned char *buf;
u32 n;
if (size < 6)
size = 6;
buf = kmalloc(size, GFP_KERNEL);
if (!buf)
return -ENOMEM;
coda_h264_filler_nal(size, buf);
n = kfifo_in(&ctx->bitstream_fifo, buf, size);
kfree(buf);
return (n < size) ? -ENOSPC : 0;
}
int coda_bitstream_flush(struct coda_ctx *ctx)
{
int ret;
if (ctx->inst_type != CODA_INST_DECODER || !ctx->use_bit)
return 0;
ret = coda_command_sync(ctx, CODA_COMMAND_DEC_BUF_FLUSH);
if (ret < 0) {
v4l2_err(&ctx->dev->v4l2_dev, "failed to flush bitstream\n");
return ret;
}
kfifo_init(&ctx->bitstream_fifo, ctx->bitstream.vaddr,
ctx->bitstream.size);
coda_kfifo_sync_to_device_full(ctx);
return 0;
}
static int coda_bitstream_queue(struct coda_ctx *ctx, const u8 *buf, u32 size)
{
u32 n = kfifo_in(&ctx->bitstream_fifo, buf, size);
return (n < size) ? -ENOSPC : 0;
}
static u32 coda_buffer_parse_headers(struct coda_ctx *ctx,
struct vb2_v4l2_buffer *src_buf,
u32 payload)
{
u8 *vaddr = vb2_plane_vaddr(&src_buf->vb2_buf, 0);
u32 size = 0;
switch (ctx->codec->src_fourcc) {
case V4L2_PIX_FMT_MPEG2:
size = coda_mpeg2_parse_headers(ctx, vaddr, payload);
break;
case V4L2_PIX_FMT_MPEG4:
size = coda_mpeg4_parse_headers(ctx, vaddr, payload);
break;
default:
break;
}
return size;
}
static bool coda_bitstream_try_queue(struct coda_ctx *ctx,
struct vb2_v4l2_buffer *src_buf)
{
unsigned long payload = vb2_get_plane_payload(&src_buf->vb2_buf, 0);
u8 *vaddr = vb2_plane_vaddr(&src_buf->vb2_buf, 0);
int ret;
int i;
if (coda_get_bitstream_payload(ctx) + payload + 512 >=
ctx->bitstream.size)
return false;
if (!vaddr) {
v4l2_err(&ctx->dev->v4l2_dev, "trying to queue empty buffer\n");
return true;
}
if (ctx->qsequence == 0 && payload < 512) {
/*
* Add padding after the first buffer, if it is too small to be
* fetched by the CODA, by repeating the headers. Without
* repeated headers, or the first frame already queued, decoder
* sequence initialization fails with error code 0x2000 on i.MX6
* or error code 0x1 on i.MX51.
*/
u32 header_size = coda_buffer_parse_headers(ctx, src_buf,
payload);
if (header_size) {
coda_dbg(1, ctx, "pad with %u-byte header\n",
header_size);
for (i = payload; i < 512; i += header_size) {
ret = coda_bitstream_queue(ctx, vaddr,
header_size);
if (ret < 0) {
v4l2_err(&ctx->dev->v4l2_dev,
"bitstream buffer overflow\n");
return false;
}
if (ctx->dev->devtype->product == CODA_960)
break;
}
} else {
coda_dbg(1, ctx,
"could not parse header, sequence initialization might fail\n");
}
/* Add padding before the first buffer, if it is too small */
if (ctx->codec->src_fourcc == V4L2_PIX_FMT_H264)
coda_h264_bitstream_pad(ctx, 512 - payload);
}
ret = coda_bitstream_queue(ctx, vaddr, payload);
if (ret < 0) {
v4l2_err(&ctx->dev->v4l2_dev, "bitstream buffer overflow\n");
return false;
}
src_buf->sequence = ctx->qsequence++;
/* Sync read pointer to device */
if (ctx == v4l2_m2m_get_curr_priv(ctx->dev->m2m_dev))
coda_kfifo_sync_to_device_write(ctx);
/* Set the stream-end flag after the last buffer is queued */
if (src_buf->flags & V4L2_BUF_FLAG_LAST)
coda_bit_stream_end_flag(ctx);
ctx->hold = false;
return true;
}
void coda_fill_bitstream(struct coda_ctx *ctx, struct list_head *buffer_list)
{
struct vb2_v4l2_buffer *src_buf;
struct coda_buffer_meta *meta;
u32 start;
lockdep_assert_held(&ctx->bitstream_mutex);
if (ctx->bit_stream_param & CODA_BIT_STREAM_END_FLAG)
return;
while (v4l2_m2m_num_src_bufs_ready(ctx->fh.m2m_ctx) > 0) {
/*
* Only queue two JPEGs into the bitstream buffer to keep
* latency low. We need at least one complete buffer and the
* header of another buffer (for prescan) in the bitstream.
*/
if (ctx->codec->src_fourcc == V4L2_PIX_FMT_JPEG &&
ctx->num_metas > 1)
break;
if (ctx->num_internal_frames &&
ctx->num_metas >= ctx->num_internal_frames) {
meta = list_first_entry(&ctx->buffer_meta_list,
struct coda_buffer_meta, list);
/*
* If we managed to fill in at least a full reorder
* window of buffers (num_internal_frames is a
* conservative estimate for this) and the bitstream
* prefetcher has at least 2 256 bytes periods beyond
* the first buffer to fetch, we can safely stop queuing
* in order to limit the decoder drain latency.
*/
if (coda_bitstream_can_fetch_past(ctx, meta->end))
break;
}
src_buf = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
/* Drop frames that do not start/end with a SOI/EOI markers */
if (ctx->codec->src_fourcc == V4L2_PIX_FMT_JPEG &&
!coda_jpeg_check_buffer(ctx, &src_buf->vb2_buf)) {
v4l2_err(&ctx->dev->v4l2_dev,
"dropping invalid JPEG frame %d\n",
ctx->qsequence);
src_buf = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
if (buffer_list) {
struct v4l2_m2m_buffer *m2m_buf;
m2m_buf = container_of(src_buf,
struct v4l2_m2m_buffer,
vb);
list_add_tail(&m2m_buf->list, buffer_list);
} else {
v4l2_m2m_buf_done(src_buf, VB2_BUF_STATE_ERROR);
}
continue;
}
/* Dump empty buffers */
if (!vb2_get_plane_payload(&src_buf->vb2_buf, 0)) {
src_buf = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
v4l2_m2m_buf_done(src_buf, VB2_BUF_STATE_DONE);
continue;
}
/* Buffer start position */
start = ctx->bitstream_fifo.kfifo.in;
if (coda_bitstream_try_queue(ctx, src_buf)) {
/*
* Source buffer is queued in the bitstream ringbuffer;
* queue the timestamp and mark source buffer as done
*/
src_buf = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
meta = kmalloc(sizeof(*meta), GFP_KERNEL);
if (meta) {
meta->sequence = src_buf->sequence;
meta->timecode = src_buf->timecode;
meta->timestamp = src_buf->vb2_buf.timestamp;
meta->start = start;
meta->end = ctx->bitstream_fifo.kfifo.in;
meta->last = src_buf->flags & V4L2_BUF_FLAG_LAST;
if (meta->last)
coda_dbg(1, ctx, "marking last meta");
spin_lock(&ctx->buffer_meta_lock);
list_add_tail(&meta->list,
&ctx->buffer_meta_list);
ctx->num_metas++;
spin_unlock(&ctx->buffer_meta_lock);
trace_coda_bit_queue(ctx, src_buf, meta);
}
if (buffer_list) {
struct v4l2_m2m_buffer *m2m_buf;
m2m_buf = container_of(src_buf,
struct v4l2_m2m_buffer,
vb);
list_add_tail(&m2m_buf->list, buffer_list);
} else {
v4l2_m2m_buf_done(src_buf, VB2_BUF_STATE_DONE);
}
} else {
break;
}
}
}
void coda_bit_stream_end_flag(struct coda_ctx *ctx)
{
struct coda_dev *dev = ctx->dev;
ctx->bit_stream_param |= CODA_BIT_STREAM_END_FLAG;
/* If this context is currently running, update the hardware flag */
if ((dev->devtype->product == CODA_960) &&
coda_isbusy(dev) &&
(ctx->idx == coda_read(dev, CODA_REG_BIT_RUN_INDEX))) {
coda_write(dev, ctx->bit_stream_param,
CODA_REG_BIT_BIT_STREAM_PARAM);
}
}
static void coda_parabuf_write(struct coda_ctx *ctx, int index, u32 value)
{
struct coda_dev *dev = ctx->dev;
u32 *p = ctx->parabuf.vaddr;
if (dev->devtype->product == CODA_DX6)
p[index] = value;
else
p[index ^ 1] = value;
}
static inline int coda_alloc_context_buf(struct coda_ctx *ctx,
struct coda_aux_buf *buf, size_t size,
const char *name)
{
return coda_alloc_aux_buf(ctx->dev, buf, size, name, ctx->debugfs_entry);
}
static void coda_free_framebuffers(struct coda_ctx *ctx)
{
int i;
for (i = 0; i < CODA_MAX_FRAMEBUFFERS; i++)
coda_free_aux_buf(ctx->dev, &ctx->internal_frames[i].buf);
}
static int coda_alloc_framebuffers(struct coda_ctx *ctx,
struct coda_q_data *q_data, u32 fourcc)
{
struct coda_dev *dev = ctx->dev;
unsigned int ysize, ycbcr_size;
int ret;
int i;
if (ctx->codec->src_fourcc == V4L2_PIX_FMT_H264 ||
ctx->codec->dst_fourcc == V4L2_PIX_FMT_H264 ||
ctx->codec->src_fourcc == V4L2_PIX_FMT_MPEG4 ||
ctx->codec->dst_fourcc == V4L2_PIX_FMT_MPEG4)
ysize = round_up(q_data->rect.width, 16) *
round_up(q_data->rect.height, 16);
else
ysize = round_up(q_data->rect.width, 8) * q_data->rect.height;
if (ctx->tiled_map_type == GDI_TILED_FRAME_MB_RASTER_MAP)
ycbcr_size = round_up(ysize, 4096) + ysize / 2;
else
ycbcr_size = ysize + ysize / 2;
/* Allocate frame buffers */
for (i = 0; i < ctx->num_internal_frames; i++) {
size_t size = ycbcr_size;
char *name;
/* Add space for mvcol buffers */
if (dev->devtype->product != CODA_DX6 &&
(ctx->codec->src_fourcc == V4L2_PIX_FMT_H264 ||
(ctx->codec->src_fourcc == V4L2_PIX_FMT_MPEG4 && i == 0)))
size += ysize / 4;
name = kasprintf(GFP_KERNEL, "fb%d", i);
if (!name) {
coda_free_framebuffers(ctx);
return -ENOMEM;
}
ret = coda_alloc_context_buf(ctx, &ctx->internal_frames[i].buf,
size, name);
kfree(name);
if (ret < 0) {
coda_free_framebuffers(ctx);
return ret;
}
}
/* Register frame buffers in the parameter buffer */
for (i = 0; i < ctx->num_internal_frames; i++) {
u32 y, cb, cr, mvcol;
/* Start addresses of Y, Cb, Cr planes */
y = ctx->internal_frames[i].buf.paddr;
cb = y + ysize;
cr = y + ysize + ysize/4;
mvcol = y + ysize + ysize/4 + ysize/4;
if (ctx->tiled_map_type == GDI_TILED_FRAME_MB_RASTER_MAP) {
cb = round_up(cb, 4096);
mvcol = cb + ysize/2;
cr = 0;
/* Packed 20-bit MSB of base addresses */
/* YYYYYCCC, CCyyyyyc, cccc.... */
y = (y & 0xfffff000) | cb >> 20;
cb = (cb & 0x000ff000) << 12;
}
coda_parabuf_write(ctx, i * 3 + 0, y);
coda_parabuf_write(ctx, i * 3 + 1, cb);
coda_parabuf_write(ctx, i * 3 + 2, cr);
if (dev->devtype->product == CODA_DX6)
continue;
/* mvcol buffer for h.264 and mpeg4 */
if (ctx->codec->src_fourcc == V4L2_PIX_FMT_H264)
coda_parabuf_write(ctx, 96 + i, mvcol);
if (ctx->codec->src_fourcc == V4L2_PIX_FMT_MPEG4 && i == 0)
coda_parabuf_write(ctx, 97, mvcol);
}
return 0;
}
static void coda_free_context_buffers(struct coda_ctx *ctx)
{
struct coda_dev *dev = ctx->dev;
coda_free_aux_buf(dev, &ctx->slicebuf);
coda_free_aux_buf(dev, &ctx->psbuf);
if (dev->devtype->product != CODA_DX6)
coda_free_aux_buf(dev, &ctx->workbuf);
coda_free_aux_buf(dev, &ctx->parabuf);
}
static int coda_alloc_context_buffers(struct coda_ctx *ctx,
struct coda_q_data *q_data)
{
struct coda_dev *dev = ctx->dev;
size_t size;
int ret;
if (!ctx->parabuf.vaddr) {
ret = coda_alloc_context_buf(ctx, &ctx->parabuf,
CODA_PARA_BUF_SIZE, "parabuf");
if (ret < 0)
return ret;
}
if (dev->devtype->product == CODA_DX6)
return 0;
if (!ctx->slicebuf.vaddr && q_data->fourcc == V4L2_PIX_FMT_H264) {
/* worst case slice size */
size = (DIV_ROUND_UP(q_data->rect.width, 16) *
DIV_ROUND_UP(q_data->rect.height, 16)) * 3200 / 8 + 512;
ret = coda_alloc_context_buf(ctx, &ctx->slicebuf, size,
"slicebuf");
if (ret < 0)
goto err;
}
if (!ctx->psbuf.vaddr && (dev->devtype->product == CODA_HX4 ||
dev->devtype->product == CODA_7541)) {
ret = coda_alloc_context_buf(ctx, &ctx->psbuf,
CODA7_PS_BUF_SIZE, "psbuf");
if (ret < 0)
goto err;
}
if (!ctx->workbuf.vaddr) {
size = dev->devtype->workbuf_size;
if (dev->devtype->product == CODA_960 &&
q_data->fourcc == V4L2_PIX_FMT_H264)
size += CODA9_PS_SAVE_SIZE;
ret = coda_alloc_context_buf(ctx, &ctx->workbuf, size,
"workbuf");
if (ret < 0)
goto err;
}
return 0;
err:
coda_free_context_buffers(ctx);
return ret;
}
static int coda_encode_header(struct coda_ctx *ctx, struct vb2_v4l2_buffer *buf,
int header_code, u8 *header, int *size)
{
struct vb2_buffer *vb = &buf->vb2_buf;
struct coda_dev *dev = ctx->dev;
struct coda_q_data *q_data_src;
struct v4l2_rect *r;
size_t bufsize;
int ret;
int i;
if (dev->devtype->product == CODA_960)
memset(vb2_plane_vaddr(vb, 0), 0, 64);
coda_write(dev, vb2_dma_contig_plane_dma_addr(vb, 0),
CODA_CMD_ENC_HEADER_BB_START);
bufsize = vb2_plane_size(vb, 0);
if (dev->devtype->product == CODA_960)
bufsize /= 1024;
coda_write(dev, bufsize, CODA_CMD_ENC_HEADER_BB_SIZE);
if (dev->devtype->product == CODA_960 &&
ctx->codec->dst_fourcc == V4L2_PIX_FMT_H264 &&
header_code == CODA_HEADER_H264_SPS) {
q_data_src = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT);
r = &q_data_src->rect;
if (r->width % 16 || r->height % 16) {
u32 crop_right = round_up(r->width, 16) - r->width;
u32 crop_bottom = round_up(r->height, 16) - r->height;
coda_write(dev, crop_right,
CODA9_CMD_ENC_HEADER_FRAME_CROP_H);
coda_write(dev, crop_bottom,
CODA9_CMD_ENC_HEADER_FRAME_CROP_V);
header_code |= CODA9_HEADER_FRAME_CROP;
}
}
coda_write(dev, header_code, CODA_CMD_ENC_HEADER_CODE);
ret = coda_command_sync(ctx, CODA_COMMAND_ENCODE_HEADER);
if (ret < 0) {
v4l2_err(&dev->v4l2_dev, "CODA_COMMAND_ENCODE_HEADER timeout\n");
return ret;
}
if (dev->devtype->product == CODA_960) {
for (i = 63; i > 0; i--)
if (((char *)vb2_plane_vaddr(vb, 0))[i] != 0)
break;
*size = i + 1;
} else {
*size = coda_read(dev, CODA_REG_BIT_WR_PTR(ctx->reg_idx)) -
coda_read(dev, CODA_CMD_ENC_HEADER_BB_START);
}
memcpy(header, vb2_plane_vaddr(vb, 0), *size);
return 0;
}
static u32 coda_slice_mode(struct coda_ctx *ctx)
{
int size, unit;
switch (ctx->params.slice_mode) {
case V4L2_MPEG_VIDEO_MULTI_SLICE_MODE_SINGLE:
default:
return 0;
case V4L2_MPEG_VIDEO_MULTI_SLICE_MODE_MAX_MB:
size = ctx->params.slice_max_mb;
unit = 1;
break;
case V4L2_MPEG_VIDEO_MULTI_SLICE_MODE_MAX_BYTES:
size = ctx->params.slice_max_bits;
unit = 0;
break;
}
return ((size & CODA_SLICING_SIZE_MASK) << CODA_SLICING_SIZE_OFFSET) |
((unit & CODA_SLICING_UNIT_MASK) << CODA_SLICING_UNIT_OFFSET) |
((1 & CODA_SLICING_MODE_MASK) << CODA_SLICING_MODE_OFFSET);
}
static int coda_enc_param_change(struct coda_ctx *ctx)
{
struct coda_dev *dev = ctx->dev;
u32 change_enable = 0;
u32 success;
int ret;
if (ctx->params.gop_size_changed) {
change_enable |= CODA_PARAM_CHANGE_RC_GOP;
coda_write(dev, ctx->params.gop_size,
CODA_CMD_ENC_PARAM_RC_GOP);
ctx->gopcounter = ctx->params.gop_size - 1;
ctx->params.gop_size_changed = false;
}
if (ctx->params.h264_intra_qp_changed) {
coda_dbg(1, ctx, "parameter change: intra Qp %u\n",
ctx->params.h264_intra_qp);
if (ctx->params.bitrate) {
change_enable |= CODA_PARAM_CHANGE_RC_INTRA_QP;
coda_write(dev, ctx->params.h264_intra_qp,
CODA_CMD_ENC_PARAM_RC_INTRA_QP);
}
ctx->params.h264_intra_qp_changed = false;
}
if (ctx->params.bitrate_changed) {
coda_dbg(1, ctx, "parameter change: bitrate %u kbit/s\n",
ctx->params.bitrate);
change_enable |= CODA_PARAM_CHANGE_RC_BITRATE;
coda_write(dev, ctx->params.bitrate,
CODA_CMD_ENC_PARAM_RC_BITRATE);
ctx->params.bitrate_changed = false;
}
if (ctx->params.framerate_changed) {
coda_dbg(1, ctx, "parameter change: frame rate %u/%u Hz\n",
ctx->params.framerate & 0xffff,
(ctx->params.framerate >> 16) + 1);
change_enable |= CODA_PARAM_CHANGE_RC_FRAME_RATE;
coda_write(dev, ctx->params.framerate,
CODA_CMD_ENC_PARAM_RC_FRAME_RATE);
ctx->params.framerate_changed = false;
}
if (ctx->params.intra_refresh_changed) {
coda_dbg(1, ctx, "parameter change: intra refresh MBs %u\n",
ctx->params.intra_refresh);
change_enable |= CODA_PARAM_CHANGE_INTRA_MB_NUM;
coda_write(dev, ctx->params.intra_refresh,
CODA_CMD_ENC_PARAM_INTRA_MB_NUM);
ctx->params.intra_refresh_changed = false;
}
if (ctx->params.slice_mode_changed) {
change_enable |= CODA_PARAM_CHANGE_SLICE_MODE;
coda_write(dev, coda_slice_mode(ctx),
CODA_CMD_ENC_PARAM_SLICE_MODE);
ctx->params.slice_mode_changed = false;
}
if (!change_enable)
return 0;
coda_write(dev, change_enable, CODA_CMD_ENC_PARAM_CHANGE_ENABLE);
ret = coda_command_sync(ctx, CODA_COMMAND_RC_CHANGE_PARAMETER);
if (ret < 0)
return ret;
success = coda_read(dev, CODA_RET_ENC_PARAM_CHANGE_SUCCESS);
if (success != 1)
coda_dbg(1, ctx, "parameter change failed: %u\n", success);
return 0;
}
static phys_addr_t coda_iram_alloc(struct coda_iram_info *iram, size_t size)
{
phys_addr_t ret;
size = round_up(size, 1024);
if (size > iram->remaining)
return 0;
iram->remaining -= size;
ret = iram->next_paddr;
iram->next_paddr += size;
return ret;
}
static void coda_setup_iram(struct coda_ctx *ctx)
{
struct coda_iram_info *iram_info = &ctx->iram_info;
struct coda_dev *dev = ctx->dev;
int w64, w128;
int mb_width;
int dbk_bits;
int bit_bits;
int ip_bits;
int me_bits;
memset(iram_info, 0, sizeof(*iram_info));
iram_info->next_paddr = dev->iram.paddr;
iram_info->remaining = dev->iram.size;
if (!dev->iram.vaddr)
return;
switch (dev->devtype->product) {
case CODA_HX4:
dbk_bits = CODA7_USE_HOST_DBK_ENABLE;
bit_bits = CODA7_USE_HOST_BIT_ENABLE;
ip_bits = CODA7_USE_HOST_IP_ENABLE;
me_bits = CODA7_USE_HOST_ME_ENABLE;
break;
case CODA_7541:
dbk_bits = CODA7_USE_HOST_DBK_ENABLE | CODA7_USE_DBK_ENABLE;
bit_bits = CODA7_USE_HOST_BIT_ENABLE | CODA7_USE_BIT_ENABLE;
ip_bits = CODA7_USE_HOST_IP_ENABLE | CODA7_USE_IP_ENABLE;
me_bits = CODA7_USE_HOST_ME_ENABLE | CODA7_USE_ME_ENABLE;
break;
case CODA_960:
dbk_bits = CODA9_USE_HOST_DBK_ENABLE | CODA9_USE_DBK_ENABLE;
bit_bits = CODA9_USE_HOST_BIT_ENABLE | CODA7_USE_BIT_ENABLE;
ip_bits = CODA9_USE_HOST_IP_ENABLE | CODA7_USE_IP_ENABLE;
me_bits = 0;
break;
default: /* CODA_DX6 */
return;
}
if (ctx->inst_type == CODA_INST_ENCODER) {
struct coda_q_data *q_data_src;
q_data_src = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT);
mb_width = DIV_ROUND_UP(q_data_src->rect.width, 16);
w128 = mb_width * 128;
w64 = mb_width * 64;
/* Prioritize in case IRAM is too small for everything */
if (dev->devtype->product == CODA_HX4 ||
dev->devtype->product == CODA_7541) {
iram_info->search_ram_size = round_up(mb_width * 16 *
36 + 2048, 1024);
iram_info->search_ram_paddr = coda_iram_alloc(iram_info,
iram_info->search_ram_size);
if (!iram_info->search_ram_paddr) {
pr_err("IRAM is smaller than the search ram size\n");
goto out;
}
iram_info->axi_sram_use |= me_bits;
}
/* Only H.264BP and H.263P3 are considered */
iram_info->buf_dbk_y_use = coda_iram_alloc(iram_info, w64);
iram_info->buf_dbk_c_use = coda_iram_alloc(iram_info, w64);
if (!iram_info->buf_dbk_y_use || !iram_info->buf_dbk_c_use)
goto out;
iram_info->axi_sram_use |= dbk_bits;
iram_info->buf_bit_use = coda_iram_alloc(iram_info, w128);
if (!iram_info->buf_bit_use)
goto out;
iram_info->axi_sram_use |= bit_bits;
iram_info->buf_ip_ac_dc_use = coda_iram_alloc(iram_info, w128);
if (!iram_info->buf_ip_ac_dc_use)
goto out;
iram_info->axi_sram_use |= ip_bits;
/* OVL and BTP disabled for encoder */
} else if (ctx->inst_type == CODA_INST_DECODER) {
struct coda_q_data *q_data_dst;
q_data_dst = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE);
mb_width = DIV_ROUND_UP(q_data_dst->width, 16);
w128 = mb_width * 128;
iram_info->buf_dbk_y_use = coda_iram_alloc(iram_info, w128);
iram_info->buf_dbk_c_use = coda_iram_alloc(iram_info, w128);
if (!iram_info->buf_dbk_y_use || !iram_info->buf_dbk_c_use)
goto out;
iram_info->axi_sram_use |= dbk_bits;
iram_info->buf_bit_use = coda_iram_alloc(iram_info, w128);
if (!iram_info->buf_bit_use)
goto out;
iram_info->axi_sram_use |= bit_bits;
iram_info->buf_ip_ac_dc_use = coda_iram_alloc(iram_info, w128);
if (!iram_info->buf_ip_ac_dc_use)
goto out;
iram_info->axi_sram_use |= ip_bits;
/* OVL and BTP unused as there is no VC1 support yet */
}
out:
if (!(iram_info->axi_sram_use & CODA7_USE_HOST_IP_ENABLE))
coda_dbg(1, ctx, "IRAM smaller than needed\n");
if (dev->devtype->product == CODA_HX4 ||
dev->devtype->product == CODA_7541) {
/* TODO - Enabling these causes picture errors on CODA7541 */
if (ctx->inst_type == CODA_INST_DECODER) {
/* fw 1.4.50 */
iram_info->axi_sram_use &= ~(CODA7_USE_HOST_IP_ENABLE |
CODA7_USE_IP_ENABLE);
} else {
/* fw 13.4.29 */
iram_info->axi_sram_use &= ~(CODA7_USE_HOST_IP_ENABLE |
CODA7_USE_HOST_DBK_ENABLE |
CODA7_USE_IP_ENABLE |
CODA7_USE_DBK_ENABLE);
}
}
}
static u32 coda_supported_firmwares[] = {
CODA_FIRMWARE_VERNUM(CODA_DX6, 2, 2, 5),
CODA_FIRMWARE_VERNUM(CODA_HX4, 1, 4, 50),
CODA_FIRMWARE_VERNUM(CODA_7541, 1, 4, 50),
CODA_FIRMWARE_VERNUM(CODA_960, 2, 1, 5),
CODA_FIRMWARE_VERNUM(CODA_960, 2, 1, 9),
CODA_FIRMWARE_VERNUM(CODA_960, 2, 3, 10),
CODA_FIRMWARE_VERNUM(CODA_960, 3, 1, 1),
};
static bool coda_firmware_supported(u32 vernum)
{
int i;
for (i = 0; i < ARRAY_SIZE(coda_supported_firmwares); i++)
if (vernum == coda_supported_firmwares[i])
return true;
return false;
}
int coda_check_firmware(struct coda_dev *dev)
{
u16 product, major, minor, release;
u32 data;
int ret;
ret = clk_prepare_enable(dev->clk_per);
if (ret)
goto err_clk_per;
ret = clk_prepare_enable(dev->clk_ahb);
if (ret)
goto err_clk_ahb;
coda_write(dev, 0, CODA_CMD_FIRMWARE_VERNUM);
coda_write(dev, CODA_REG_BIT_BUSY_FLAG, CODA_REG_BIT_BUSY);
coda_write(dev, 0, CODA_REG_BIT_RUN_INDEX);
coda_write(dev, 0, CODA_REG_BIT_RUN_COD_STD);
coda_write(dev, CODA_COMMAND_FIRMWARE_GET, CODA_REG_BIT_RUN_COMMAND);
if (coda_wait_timeout(dev)) {
v4l2_err(&dev->v4l2_dev, "firmware get command error\n");
ret = -EIO;
goto err_run_cmd;
}
if (dev->devtype->product == CODA_960) {
data = coda_read(dev, CODA9_CMD_FIRMWARE_CODE_REV);
v4l2_info(&dev->v4l2_dev, "Firmware code revision: %d\n",
data);
}
/* Check we are compatible with the loaded firmware */
data = coda_read(dev, CODA_CMD_FIRMWARE_VERNUM);
product = CODA_FIRMWARE_PRODUCT(data);
major = CODA_FIRMWARE_MAJOR(data);
minor = CODA_FIRMWARE_MINOR(data);
release = CODA_FIRMWARE_RELEASE(data);
clk_disable_unprepare(dev->clk_per);
clk_disable_unprepare(dev->clk_ahb);
if (product != dev->devtype->product) {
v4l2_err(&dev->v4l2_dev,
"Wrong firmware. Hw: %s, Fw: %s, Version: %u.%u.%u\n",
coda_product_name(dev->devtype->product),
coda_product_name(product), major, minor, release);
return -EINVAL;
}
v4l2_info(&dev->v4l2_dev, "Initialized %s.\n",
coda_product_name(product));
if (coda_firmware_supported(data)) {
v4l2_info(&dev->v4l2_dev, "Firmware version: %u.%u.%u\n",
major, minor, release);
} else {
v4l2_warn(&dev->v4l2_dev,
"Unsupported firmware version: %u.%u.%u\n",
major, minor, release);
}
return 0;
err_run_cmd:
clk_disable_unprepare(dev->clk_ahb);
err_clk_ahb:
clk_disable_unprepare(dev->clk_per);
err_clk_per:
return ret;
}
static void coda9_set_frame_cache(struct coda_ctx *ctx, u32 fourcc)
{
u32 cache_size, cache_config;
if (ctx->tiled_map_type == GDI_LINEAR_FRAME_MAP) {
/* Luma 2x0 page, 2x6 cache, chroma 2x0 page, 2x4 cache size */
cache_size = 0x20262024;
cache_config = 2 << CODA9_CACHE_PAGEMERGE_OFFSET;
} else {
/* Luma 0x2 page, 4x4 cache, chroma 0x2 page, 4x3 cache size */
cache_size = 0x02440243;
cache_config = 1 << CODA9_CACHE_PAGEMERGE_OFFSET;
}
coda_write(ctx->dev, cache_size, CODA9_CMD_SET_FRAME_CACHE_SIZE);
if (fourcc == V4L2_PIX_FMT_NV12 || fourcc == V4L2_PIX_FMT_YUYV) {
cache_config |= 32 << CODA9_CACHE_LUMA_BUFFER_SIZE_OFFSET |
16 << CODA9_CACHE_CR_BUFFER_SIZE_OFFSET |
0 << CODA9_CACHE_CB_BUFFER_SIZE_OFFSET;
} else {
cache_config |= 32 << CODA9_CACHE_LUMA_BUFFER_SIZE_OFFSET |
8 << CODA9_CACHE_CR_BUFFER_SIZE_OFFSET |
8 << CODA9_CACHE_CB_BUFFER_SIZE_OFFSET;
}
coda_write(ctx->dev, cache_config, CODA9_CMD_SET_FRAME_CACHE_CONFIG);
}
/*
* Encoder context operations
*/
static int coda_encoder_reqbufs(struct coda_ctx *ctx,
struct v4l2_requestbuffers *rb)
{
struct coda_q_data *q_data_src;
int ret;
if (rb->type != V4L2_BUF_TYPE_VIDEO_OUTPUT)
return 0;
if (rb->count) {
q_data_src = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT);
ret = coda_alloc_context_buffers(ctx, q_data_src);
if (ret < 0)
return ret;
} else {
coda_free_context_buffers(ctx);
}
return 0;
}
static int coda_start_encoding(struct coda_ctx *ctx)
{
struct coda_dev *dev = ctx->dev;
struct v4l2_device *v4l2_dev = &dev->v4l2_dev;
struct coda_q_data *q_data_src, *q_data_dst;
u32 bitstream_buf, bitstream_size;
struct vb2_v4l2_buffer *buf;
int gamma, ret, value;
u32 dst_fourcc;
int num_fb;
u32 stride;
q_data_src = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT);
q_data_dst = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE);
dst_fourcc = q_data_dst->fourcc;
buf = v4l2_m2m_next_dst_buf(ctx->fh.m2m_ctx);
bitstream_buf = vb2_dma_contig_plane_dma_addr(&buf->vb2_buf, 0);
bitstream_size = q_data_dst->sizeimage;
if (!coda_is_initialized(dev)) {
v4l2_err(v4l2_dev, "coda is not initialized.\n");
return -EFAULT;
}
if (dst_fourcc == V4L2_PIX_FMT_JPEG) {
if (!ctx->params.jpeg_qmat_tab[0]) {
ctx->params.jpeg_qmat_tab[0] = kmalloc(64, GFP_KERNEL);
if (!ctx->params.jpeg_qmat_tab[0])
return -ENOMEM;
}
if (!ctx->params.jpeg_qmat_tab[1]) {
ctx->params.jpeg_qmat_tab[1] = kmalloc(64, GFP_KERNEL);
if (!ctx->params.jpeg_qmat_tab[1])
return -ENOMEM;
}
coda_set_jpeg_compression_quality(ctx, ctx->params.jpeg_quality);
}
mutex_lock(&dev->coda_mutex);
coda_write(dev, ctx->parabuf.paddr, CODA_REG_BIT_PARA_BUF_ADDR);
coda_write(dev, bitstream_buf, CODA_REG_BIT_RD_PTR(ctx->reg_idx));
coda_write(dev, bitstream_buf, CODA_REG_BIT_WR_PTR(ctx->reg_idx));
switch (dev->devtype->product) {
case CODA_DX6:
coda_write(dev, CODADX6_STREAM_BUF_DYNALLOC_EN |
CODADX6_STREAM_BUF_PIC_RESET, CODA_REG_BIT_STREAM_CTRL);
break;
case CODA_960:
coda_write(dev, 0, CODA9_GDI_WPROT_RGN_EN);
fallthrough;
case CODA_HX4:
case CODA_7541:
coda_write(dev, CODA7_STREAM_BUF_DYNALLOC_EN |
CODA7_STREAM_BUF_PIC_RESET, CODA_REG_BIT_STREAM_CTRL);
break;
}
ctx->frame_mem_ctrl &= ~(CODA_FRAME_CHROMA_INTERLEAVE | (0x3 << 9) |
CODA9_FRAME_TILED2LINEAR);
if (q_data_src->fourcc == V4L2_PIX_FMT_NV12)
ctx->frame_mem_ctrl |= CODA_FRAME_CHROMA_INTERLEAVE;
if (ctx->tiled_map_type == GDI_TILED_FRAME_MB_RASTER_MAP)
ctx->frame_mem_ctrl |= (0x3 << 9) | CODA9_FRAME_TILED2LINEAR;
coda_write(dev, ctx->frame_mem_ctrl, CODA_REG_BIT_FRAME_MEM_CTRL);
if (dev->devtype->product == CODA_DX6) {
/* Configure the coda */
coda_write(dev, dev->iram.paddr,
CODADX6_REG_BIT_SEARCH_RAM_BASE_ADDR);
}
/* Could set rotation here if needed */
value = 0;
switch (dev->devtype->product) {
case CODA_DX6:
value = (q_data_src->rect.width & CODADX6_PICWIDTH_MASK)
<< CODADX6_PICWIDTH_OFFSET;
value |= (q_data_src->rect.height & CODADX6_PICHEIGHT_MASK)
<< CODA_PICHEIGHT_OFFSET;
break;
case CODA_HX4:
case CODA_7541:
if (dst_fourcc == V4L2_PIX_FMT_H264) {
value = (round_up(q_data_src->rect.width, 16) &
CODA7_PICWIDTH_MASK) << CODA7_PICWIDTH_OFFSET;
value |= (round_up(q_data_src->rect.height, 16) &
CODA7_PICHEIGHT_MASK) << CODA_PICHEIGHT_OFFSET;
break;
}
fallthrough;
case CODA_960:
value = (q_data_src->rect.width & CODA7_PICWIDTH_MASK)
<< CODA7_PICWIDTH_OFFSET;
value |= (q_data_src->rect.height & CODA7_PICHEIGHT_MASK)
<< CODA_PICHEIGHT_OFFSET;
}
coda_write(dev, value, CODA_CMD_ENC_SEQ_SRC_SIZE);
if (dst_fourcc == V4L2_PIX_FMT_JPEG)
ctx->params.framerate = 0;
coda_write(dev, ctx->params.framerate,
CODA_CMD_ENC_SEQ_SRC_F_RATE);
ctx->params.codec_mode = ctx->codec->mode;
switch (dst_fourcc) {
case V4L2_PIX_FMT_MPEG4:
if (dev->devtype->product == CODA_960)
coda_write(dev, CODA9_STD_MPEG4,
CODA_CMD_ENC_SEQ_COD_STD);
else
coda_write(dev, CODA_STD_MPEG4,
CODA_CMD_ENC_SEQ_COD_STD);
coda_write(dev, 0, CODA_CMD_ENC_SEQ_MP4_PARA);
break;
case V4L2_PIX_FMT_H264:
if (dev->devtype->product == CODA_960)
coda_write(dev, CODA9_STD_H264,
CODA_CMD_ENC_SEQ_COD_STD);
else
coda_write(dev, CODA_STD_H264,
CODA_CMD_ENC_SEQ_COD_STD);
value = ((ctx->params.h264_disable_deblocking_filter_idc &
CODA_264PARAM_DISABLEDEBLK_MASK) <<
CODA_264PARAM_DISABLEDEBLK_OFFSET) |
((ctx->params.h264_slice_alpha_c0_offset_div2 &
CODA_264PARAM_DEBLKFILTEROFFSETALPHA_MASK) <<
CODA_264PARAM_DEBLKFILTEROFFSETALPHA_OFFSET) |
((ctx->params.h264_slice_beta_offset_div2 &
CODA_264PARAM_DEBLKFILTEROFFSETBETA_MASK) <<
CODA_264PARAM_DEBLKFILTEROFFSETBETA_OFFSET) |
(ctx->params.h264_constrained_intra_pred_flag <<
CODA_264PARAM_CONSTRAINEDINTRAPREDFLAG_OFFSET) |
(ctx->params.h264_chroma_qp_index_offset &
CODA_264PARAM_CHROMAQPOFFSET_MASK);
coda_write(dev, value, CODA_CMD_ENC_SEQ_264_PARA);
break;
case V4L2_PIX_FMT_JPEG:
coda_write(dev, 0, CODA_CMD_ENC_SEQ_JPG_PARA);
coda_write(dev, ctx->params.jpeg_restart_interval,
CODA_CMD_ENC_SEQ_JPG_RST_INTERVAL);
coda_write(dev, 0, CODA_CMD_ENC_SEQ_JPG_THUMB_EN);
coda_write(dev, 0, CODA_CMD_ENC_SEQ_JPG_THUMB_SIZE);
coda_write(dev, 0, CODA_CMD_ENC_SEQ_JPG_THUMB_OFFSET);
coda_jpeg_write_tables(ctx);
break;
default:
v4l2_err(v4l2_dev,
"dst format (0x%08x) invalid.\n", dst_fourcc);
ret = -EINVAL;
goto out;
}
/*
* slice mode and GOP size registers are used for thumb size/offset
* in JPEG mode
*/
if (dst_fourcc != V4L2_PIX_FMT_JPEG) {
value = coda_slice_mode(ctx);
coda_write(dev, value, CODA_CMD_ENC_SEQ_SLICE_MODE);
value = ctx->params.gop_size;
coda_write(dev, value, CODA_CMD_ENC_SEQ_GOP_SIZE);
}
if (ctx->params.bitrate && (ctx->params.frame_rc_enable ||
ctx->params.mb_rc_enable)) {
ctx->params.bitrate_changed = false;
ctx->params.h264_intra_qp_changed = false;
/* Rate control enabled */
value = (ctx->params.bitrate & CODA_RATECONTROL_BITRATE_MASK)
<< CODA_RATECONTROL_BITRATE_OFFSET;
value |= 1 & CODA_RATECONTROL_ENABLE_MASK;
value |= (ctx->params.vbv_delay &
CODA_RATECONTROL_INITIALDELAY_MASK)
<< CODA_RATECONTROL_INITIALDELAY_OFFSET;
if (dev->devtype->product == CODA_960)
value |= BIT(31); /* disable autoskip */
} else {
value = 0;
}
coda_write(dev, value, CODA_CMD_ENC_SEQ_RC_PARA);
coda_write(dev, ctx->params.vbv_size, CODA_CMD_ENC_SEQ_RC_BUF_SIZE);
coda_write(dev, ctx->params.intra_refresh,
CODA_CMD_ENC_SEQ_INTRA_REFRESH);
coda_write(dev, bitstream_buf, CODA_CMD_ENC_SEQ_BB_START);
coda_write(dev, bitstream_size / 1024, CODA_CMD_ENC_SEQ_BB_SIZE);
value = 0;
if (dev->devtype->product == CODA_960)
gamma = CODA9_DEFAULT_GAMMA;
else
gamma = CODA_DEFAULT_GAMMA;
if (gamma > 0) {
coda_write(dev, (gamma & CODA_GAMMA_MASK) << CODA_GAMMA_OFFSET,
CODA_CMD_ENC_SEQ_RC_GAMMA);
}
if (ctx->params.h264_min_qp || ctx->params.h264_max_qp) {
coda_write(dev,
ctx->params.h264_min_qp << CODA_QPMIN_OFFSET |
ctx->params.h264_max_qp << CODA_QPMAX_OFFSET,
CODA_CMD_ENC_SEQ_RC_QP_MIN_MAX);
}
if (dev->devtype->product == CODA_960) {
if (ctx->params.h264_max_qp)
value |= 1 << CODA9_OPTION_RCQPMAX_OFFSET;
if (CODA_DEFAULT_GAMMA > 0)
value |= 1 << CODA9_OPTION_GAMMA_OFFSET;
} else {
if (CODA_DEFAULT_GAMMA > 0) {
if (dev->devtype->product == CODA_DX6)
value |= 1 << CODADX6_OPTION_GAMMA_OFFSET;
else
value |= 1 << CODA7_OPTION_GAMMA_OFFSET;
}
if (ctx->params.h264_min_qp)
value |= 1 << CODA7_OPTION_RCQPMIN_OFFSET;
if (ctx->params.h264_max_qp)
value |= 1 << CODA7_OPTION_RCQPMAX_OFFSET;
}
coda_write(dev, value, CODA_CMD_ENC_SEQ_OPTION);
if (ctx->params.frame_rc_enable && !ctx->params.mb_rc_enable)
value = 1;
else
value = 0;
coda_write(dev, value, CODA_CMD_ENC_SEQ_RC_INTERVAL_MODE);
coda_setup_iram(ctx);
if (dst_fourcc == V4L2_PIX_FMT_H264) {
switch (dev->devtype->product) {
case CODA_DX6:
value = FMO_SLICE_SAVE_BUF_SIZE << 7;
coda_write(dev, value, CODADX6_CMD_ENC_SEQ_FMO);
break;
case CODA_HX4:
case CODA_7541:
coda_write(dev, ctx->iram_info.search_ram_paddr,
CODA7_CMD_ENC_SEQ_SEARCH_BASE);
coda_write(dev, ctx->iram_info.search_ram_size,
CODA7_CMD_ENC_SEQ_SEARCH_SIZE);
break;
case CODA_960:
coda_write(dev, 0, CODA9_CMD_ENC_SEQ_ME_OPTION);
coda_write(dev, 0, CODA9_CMD_ENC_SEQ_INTRA_WEIGHT);
}
}
ret = coda_command_sync(ctx, CODA_COMMAND_SEQ_INIT);
if (ret < 0) {
v4l2_err(v4l2_dev, "CODA_COMMAND_SEQ_INIT timeout\n");
goto out;
}
if (coda_read(dev, CODA_RET_ENC_SEQ_SUCCESS) == 0) {
v4l2_err(v4l2_dev, "CODA_COMMAND_SEQ_INIT failed\n");
ret = -EFAULT;
goto out;
}
ctx->initialized = 1;
if (dst_fourcc != V4L2_PIX_FMT_JPEG) {
if (dev->devtype->product == CODA_960)
ctx->num_internal_frames = 4;
else
ctx->num_internal_frames = 2;
ret = coda_alloc_framebuffers(ctx, q_data_src, dst_fourcc);
if (ret < 0) {
v4l2_err(v4l2_dev, "failed to allocate framebuffers\n");
goto out;
}
num_fb = 2;
stride = q_data_src->bytesperline;
} else {
ctx->num_internal_frames = 0;
num_fb = 0;
stride = 0;
}
coda_write(dev, num_fb, CODA_CMD_SET_FRAME_BUF_NUM);
coda_write(dev, stride, CODA_CMD_SET_FRAME_BUF_STRIDE);
if (dev->devtype->product == CODA_HX4 ||
dev->devtype->product == CODA_7541) {
coda_write(dev, q_data_src->bytesperline,
CODA7_CMD_SET_FRAME_SOURCE_BUF_STRIDE);
}
if (dev->devtype->product != CODA_DX6) {
coda_write(dev, ctx->iram_info.buf_bit_use,
CODA7_CMD_SET_FRAME_AXI_BIT_ADDR);
coda_write(dev, ctx->iram_info.buf_ip_ac_dc_use,
CODA7_CMD_SET_FRAME_AXI_IPACDC_ADDR);
coda_write(dev, ctx->iram_info.buf_dbk_y_use,
CODA7_CMD_SET_FRAME_AXI_DBKY_ADDR);
coda_write(dev, ctx->iram_info.buf_dbk_c_use,
CODA7_CMD_SET_FRAME_AXI_DBKC_ADDR);
coda_write(dev, ctx->iram_info.buf_ovl_use,
CODA7_CMD_SET_FRAME_AXI_OVL_ADDR);
if (dev->devtype->product == CODA_960) {
coda_write(dev, ctx->iram_info.buf_btp_use,
CODA9_CMD_SET_FRAME_AXI_BTP_ADDR);
coda9_set_frame_cache(ctx, q_data_src->fourcc);
/* FIXME */
coda_write(dev, ctx->internal_frames[2].buf.paddr,
CODA9_CMD_SET_FRAME_SUBSAMP_A);
coda_write(dev, ctx->internal_frames[3].buf.paddr,
CODA9_CMD_SET_FRAME_SUBSAMP_B);
}
}
ret = coda_command_sync(ctx, CODA_COMMAND_SET_FRAME_BUF);
if (ret < 0) {
v4l2_err(v4l2_dev, "CODA_COMMAND_SET_FRAME_BUF timeout\n");
goto out;
}
coda_dbg(1, ctx, "start encoding %dx%d %4.4s->%4.4s @ %d/%d Hz\n",
q_data_src->rect.width, q_data_src->rect.height,
(char *)&ctx->codec->src_fourcc, (char *)&dst_fourcc,
ctx->params.framerate & 0xffff,
(ctx->params.framerate >> 16) + 1);
/* Save stream headers */
buf = v4l2_m2m_next_dst_buf(ctx->fh.m2m_ctx);
switch (dst_fourcc) {
case V4L2_PIX_FMT_H264:
/*
* Get SPS in the first frame and copy it to an
* intermediate buffer.
*/
ret = coda_encode_header(ctx, buf, CODA_HEADER_H264_SPS,
&ctx->vpu_header[0][0],
&ctx->vpu_header_size[0]);
if (ret < 0)
goto out;
/*
* If visible width or height are not aligned to macroblock
* size, the crop_right and crop_bottom SPS fields must be set
* to the difference between visible and coded size. This is
* only supported by CODA960 firmware. All others do not allow
* writing frame cropping parameters, so we have to manually
* fix up the SPS RBSP (Sequence Parameter Set Raw Byte
* Sequence Payload) ourselves.
*/
if (ctx->dev->devtype->product != CODA_960 &&
((q_data_src->rect.width % 16) ||
(q_data_src->rect.height % 16))) {
ret = coda_h264_sps_fixup(ctx, q_data_src->rect.width,
q_data_src->rect.height,
&ctx->vpu_header[0][0],
&ctx->vpu_header_size[0],
sizeof(ctx->vpu_header[0]));
if (ret < 0)
goto out;
}
/*
* Get PPS in the first frame and copy it to an
* intermediate buffer.
*/
ret = coda_encode_header(ctx, buf, CODA_HEADER_H264_PPS,
&ctx->vpu_header[1][0],
&ctx->vpu_header_size[1]);
if (ret < 0)
goto out;
/*
* Length of H.264 headers is variable and thus it might not be
* aligned for the coda to append the encoded frame. In that is
* the case a filler NAL must be added to header 2.
*/
ctx->vpu_header_size[2] = coda_h264_padding(
(ctx->vpu_header_size[0] +
ctx->vpu_header_size[1]),
ctx->vpu_header[2]);
break;
case V4L2_PIX_FMT_MPEG4:
/*
* Get VOS in the first frame and copy it to an
* intermediate buffer
*/
ret = coda_encode_header(ctx, buf, CODA_HEADER_MP4V_VOS,
&ctx->vpu_header[0][0],
&ctx->vpu_header_size[0]);
if (ret < 0)
goto out;
ret = coda_encode_header(ctx, buf, CODA_HEADER_MP4V_VIS,
&ctx->vpu_header[1][0],
&ctx->vpu_header_size[1]);
if (ret < 0)
goto out;
ret = coda_encode_header(ctx, buf, CODA_HEADER_MP4V_VOL,
&ctx->vpu_header[2][0],
&ctx->vpu_header_size[2]);
if (ret < 0)
goto out;
break;
default:
/* No more formats need to save headers at the moment */
break;
}
out:
mutex_unlock(&dev->coda_mutex);
return ret;
}
static int coda_prepare_encode(struct coda_ctx *ctx)
{
struct coda_q_data *q_data_src, *q_data_dst;
struct vb2_v4l2_buffer *src_buf, *dst_buf;
struct coda_dev *dev = ctx->dev;
int force_ipicture;
int quant_param = 0;
u32 pic_stream_buffer_addr, pic_stream_buffer_size;
u32 rot_mode = 0;
u32 dst_fourcc;
u32 reg;
int ret;
ret = coda_enc_param_change(ctx);
if (ret < 0) {
v4l2_warn(&ctx->dev->v4l2_dev, "parameter change failed: %d\n",
ret);
}
src_buf = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
dst_buf = v4l2_m2m_next_dst_buf(ctx->fh.m2m_ctx);
q_data_src = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT);
q_data_dst = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE);
dst_fourcc = q_data_dst->fourcc;
src_buf->sequence = ctx->osequence;
dst_buf->sequence = ctx->osequence;
ctx->osequence++;
force_ipicture = ctx->params.force_ipicture;
if (force_ipicture)
ctx->params.force_ipicture = false;
else if (ctx->params.gop_size != 0 &&
(src_buf->sequence % ctx->params.gop_size) == 0)
force_ipicture = 1;
/*
* Workaround coda firmware BUG that only marks the first
* frame as IDR. This is a problem for some decoders that can't
* recover when a frame is lost.
*/
if (!force_ipicture) {
src_buf->flags |= V4L2_BUF_FLAG_PFRAME;
src_buf->flags &= ~V4L2_BUF_FLAG_KEYFRAME;
} else {
src_buf->flags |= V4L2_BUF_FLAG_KEYFRAME;
src_buf->flags &= ~V4L2_BUF_FLAG_PFRAME;
}
if (dev->devtype->product == CODA_960)
coda_set_gdi_regs(ctx);
/*
* Copy headers in front of the first frame and forced I frames for
* H.264 only. In MPEG4 they are already copied by the CODA.
*/
if (src_buf->sequence == 0 || force_ipicture) {
pic_stream_buffer_addr =
vb2_dma_contig_plane_dma_addr(&dst_buf->vb2_buf, 0) +
ctx->vpu_header_size[0] +
ctx->vpu_header_size[1] +
ctx->vpu_header_size[2];
pic_stream_buffer_size = q_data_dst->sizeimage -
ctx->vpu_header_size[0] -
ctx->vpu_header_size[1] -
ctx->vpu_header_size[2];
memcpy(vb2_plane_vaddr(&dst_buf->vb2_buf, 0),
&ctx->vpu_header[0][0], ctx->vpu_header_size[0]);
memcpy(vb2_plane_vaddr(&dst_buf->vb2_buf, 0)
+ ctx->vpu_header_size[0], &ctx->vpu_header[1][0],
ctx->vpu_header_size[1]);
memcpy(vb2_plane_vaddr(&dst_buf->vb2_buf, 0)
+ ctx->vpu_header_size[0] + ctx->vpu_header_size[1],
&ctx->vpu_header[2][0], ctx->vpu_header_size[2]);
} else {
pic_stream_buffer_addr =
vb2_dma_contig_plane_dma_addr(&dst_buf->vb2_buf, 0);
pic_stream_buffer_size = q_data_dst->sizeimage;
}
if (force_ipicture) {
switch (dst_fourcc) {
case V4L2_PIX_FMT_H264:
quant_param = ctx->params.h264_intra_qp;
break;
case V4L2_PIX_FMT_MPEG4:
quant_param = ctx->params.mpeg4_intra_qp;
break;
case V4L2_PIX_FMT_JPEG:
quant_param = 30;
break;
default:
v4l2_warn(&ctx->dev->v4l2_dev,
"cannot set intra qp, fmt not supported\n");
break;
}
} else {
switch (dst_fourcc) {
case V4L2_PIX_FMT_H264:
quant_param = ctx->params.h264_inter_qp;
break;
case V4L2_PIX_FMT_MPEG4:
quant_param = ctx->params.mpeg4_inter_qp;
break;
default:
v4l2_warn(&ctx->dev->v4l2_dev,
"cannot set inter qp, fmt not supported\n");
break;
}
}
/* submit */
if (ctx->params.rot_mode)
rot_mode = CODA_ROT_MIR_ENABLE | ctx->params.rot_mode;
coda_write(dev, rot_mode, CODA_CMD_ENC_PIC_ROT_MODE);
coda_write(dev, quant_param, CODA_CMD_ENC_PIC_QS);
if (dev->devtype->product == CODA_960) {
coda_write(dev, 4/*FIXME: 0*/, CODA9_CMD_ENC_PIC_SRC_INDEX);
coda_write(dev, q_data_src->bytesperline,
CODA9_CMD_ENC_PIC_SRC_STRIDE);
coda_write(dev, 0, CODA9_CMD_ENC_PIC_SUB_FRAME_SYNC);
reg = CODA9_CMD_ENC_PIC_SRC_ADDR_Y;
} else {
reg = CODA_CMD_ENC_PIC_SRC_ADDR_Y;
}
coda_write_base(ctx, q_data_src, src_buf, reg);
coda_write(dev, force_ipicture << 1 & 0x2,
CODA_CMD_ENC_PIC_OPTION);
coda_write(dev, pic_stream_buffer_addr, CODA_CMD_ENC_PIC_BB_START);
coda_write(dev, pic_stream_buffer_size / 1024,
CODA_CMD_ENC_PIC_BB_SIZE);
if (!ctx->streamon_out) {
/* After streamoff on the output side, set stream end flag */
ctx->bit_stream_param |= CODA_BIT_STREAM_END_FLAG;
coda_write(dev, ctx->bit_stream_param,
CODA_REG_BIT_BIT_STREAM_PARAM);
}
if (dev->devtype->product != CODA_DX6)
coda_write(dev, ctx->iram_info.axi_sram_use,
CODA7_REG_BIT_AXI_SRAM_USE);
trace_coda_enc_pic_run(ctx, src_buf);
coda_command_async(ctx, CODA_COMMAND_PIC_RUN);
return 0;
}
static char coda_frame_type_char(u32 flags)
{
return (flags & V4L2_BUF_FLAG_KEYFRAME) ? 'I' :
(flags & V4L2_BUF_FLAG_PFRAME) ? 'P' :
(flags & V4L2_BUF_FLAG_BFRAME) ? 'B' : '?';
}
static void coda_finish_encode(struct coda_ctx *ctx)
{
struct vb2_v4l2_buffer *src_buf, *dst_buf;
struct coda_dev *dev = ctx->dev;
u32 wr_ptr, start_ptr;
if (ctx->aborting)
return;
/*
* Lock to make sure that an encoder stop command running in parallel
* will either already have marked src_buf as last, or it will wake up
* the capture queue after the buffers are returned.
*/
mutex_lock(&ctx->wakeup_mutex);
src_buf = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
dst_buf = v4l2_m2m_next_dst_buf(ctx->fh.m2m_ctx);
trace_coda_enc_pic_done(ctx, dst_buf);
/* Get results from the coda */
start_ptr = coda_read(dev, CODA_CMD_ENC_PIC_BB_START);
wr_ptr = coda_read(dev, CODA_REG_BIT_WR_PTR(ctx->reg_idx));
/* Calculate bytesused field */
if (dst_buf->sequence == 0 ||
src_buf->flags & V4L2_BUF_FLAG_KEYFRAME) {
vb2_set_plane_payload(&dst_buf->vb2_buf, 0, wr_ptr - start_ptr +
ctx->vpu_header_size[0] +
ctx->vpu_header_size[1] +
ctx->vpu_header_size[2]);
} else {
vb2_set_plane_payload(&dst_buf->vb2_buf, 0, wr_ptr - start_ptr);
}
coda_dbg(1, ctx, "frame size = %u\n", wr_ptr - start_ptr);
coda_read(dev, CODA_RET_ENC_PIC_SLICE_NUM);
coda_read(dev, CODA_RET_ENC_PIC_FLAG);
dst_buf->flags &= ~(V4L2_BUF_FLAG_KEYFRAME |
V4L2_BUF_FLAG_PFRAME |
V4L2_BUF_FLAG_LAST);
if (coda_read(dev, CODA_RET_ENC_PIC_TYPE) == 0)
dst_buf->flags |= V4L2_BUF_FLAG_KEYFRAME;
else
dst_buf->flags |= V4L2_BUF_FLAG_PFRAME;
dst_buf->flags |= src_buf->flags & V4L2_BUF_FLAG_LAST;
v4l2_m2m_buf_copy_metadata(src_buf, dst_buf, false);
v4l2_m2m_buf_done(src_buf, VB2_BUF_STATE_DONE);
dst_buf = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx);
coda_m2m_buf_done(ctx, dst_buf, VB2_BUF_STATE_DONE);
mutex_unlock(&ctx->wakeup_mutex);
ctx->gopcounter--;
if (ctx->gopcounter < 0)
ctx->gopcounter = ctx->params.gop_size - 1;
coda_dbg(1, ctx, "job finished: encoded %c frame (%d)%s\n",
coda_frame_type_char(dst_buf->flags), dst_buf->sequence,
(dst_buf->flags & V4L2_BUF_FLAG_LAST) ? " (last)" : "");
}
static void coda_seq_end_work(struct work_struct *work)
{
struct coda_ctx *ctx = container_of(work, struct coda_ctx, seq_end_work);
struct coda_dev *dev = ctx->dev;
mutex_lock(&ctx->buffer_mutex);
mutex_lock(&dev->coda_mutex);
if (ctx->initialized == 0)
goto out;
coda_dbg(1, ctx, "%s: sent command 'SEQ_END' to coda\n", __func__);
if (coda_command_sync(ctx, CODA_COMMAND_SEQ_END)) {
v4l2_err(&dev->v4l2_dev,
"CODA_COMMAND_SEQ_END failed\n");
}
/*
* FIXME: Sometimes h.264 encoding fails with 8-byte sequences missing
* from the output stream after the h.264 decoder has run. Resetting the
* hardware after the decoder has finished seems to help.
*/
if (dev->devtype->product == CODA_960)
coda_hw_reset(ctx);
kfifo_init(&ctx->bitstream_fifo,
ctx->bitstream.vaddr, ctx->bitstream.size);
coda_free_framebuffers(ctx);
ctx->initialized = 0;
out:
mutex_unlock(&dev->coda_mutex);
mutex_unlock(&ctx->buffer_mutex);
}
static void coda_bit_release(struct coda_ctx *ctx)
{
mutex_lock(&ctx->buffer_mutex);
coda_free_framebuffers(ctx);
coda_free_context_buffers(ctx);
coda_free_bitstream_buffer(ctx);
mutex_unlock(&ctx->buffer_mutex);
}
const struct coda_context_ops coda_bit_encode_ops = {
.queue_init = coda_encoder_queue_init,
.reqbufs = coda_encoder_reqbufs,
.start_streaming = coda_start_encoding,
.prepare_run = coda_prepare_encode,
.finish_run = coda_finish_encode,
.seq_end_work = coda_seq_end_work,
.release = coda_bit_release,
};
/*
* Decoder context operations
*/
static int coda_alloc_bitstream_buffer(struct coda_ctx *ctx,
struct coda_q_data *q_data)
{
if (ctx->bitstream.vaddr)
return 0;
ctx->bitstream.size = roundup_pow_of_two(q_data->sizeimage * 2);
ctx->bitstream.vaddr = dma_alloc_wc(ctx->dev->dev, ctx->bitstream.size,
&ctx->bitstream.paddr, GFP_KERNEL);
if (!ctx->bitstream.vaddr) {
v4l2_err(&ctx->dev->v4l2_dev,
"failed to allocate bitstream ringbuffer");
return -ENOMEM;
}
kfifo_init(&ctx->bitstream_fifo,
ctx->bitstream.vaddr, ctx->bitstream.size);
return 0;
}
static void coda_free_bitstream_buffer(struct coda_ctx *ctx)
{
if (ctx->bitstream.vaddr == NULL)
return;
dma_free_wc(ctx->dev->dev, ctx->bitstream.size, ctx->bitstream.vaddr,
ctx->bitstream.paddr);
ctx->bitstream.vaddr = NULL;
kfifo_init(&ctx->bitstream_fifo, NULL, 0);
}
static int coda_decoder_reqbufs(struct coda_ctx *ctx,
struct v4l2_requestbuffers *rb)
{
struct coda_q_data *q_data_src;
int ret;
if (rb->type != V4L2_BUF_TYPE_VIDEO_OUTPUT)
return 0;
if (rb->count) {
q_data_src = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT);
ret = coda_alloc_context_buffers(ctx, q_data_src);
if (ret < 0)
return ret;
ret = coda_alloc_bitstream_buffer(ctx, q_data_src);
if (ret < 0) {
coda_free_context_buffers(ctx);
return ret;
}
} else {
coda_free_bitstream_buffer(ctx);
coda_free_context_buffers(ctx);
}
return 0;
}
static bool coda_reorder_enable(struct coda_ctx *ctx)
{
struct coda_dev *dev = ctx->dev;
int profile;
if (dev->devtype->product != CODA_HX4 &&
dev->devtype->product != CODA_7541 &&
dev->devtype->product != CODA_960)
return false;
if (ctx->codec->src_fourcc == V4L2_PIX_FMT_JPEG)
return false;
if (ctx->codec->src_fourcc != V4L2_PIX_FMT_H264)
return true;
profile = coda_h264_profile(ctx->params.h264_profile_idc);
if (profile < 0)
v4l2_warn(&dev->v4l2_dev, "Unknown H264 Profile: %u\n",
ctx->params.h264_profile_idc);
/* Baseline profile does not support reordering */
return profile > V4L2_MPEG_VIDEO_H264_PROFILE_BASELINE;
}
static void coda_decoder_drop_used_metas(struct coda_ctx *ctx)
{
struct coda_buffer_meta *meta, *tmp;
/*
* All metas that end at or before the RD pointer (fifo out),
* are now consumed by the VPU and should be released.
*/
spin_lock(&ctx->buffer_meta_lock);
list_for_each_entry_safe(meta, tmp, &ctx->buffer_meta_list, list) {
if (ctx->bitstream_fifo.kfifo.out >= meta->end) {
coda_dbg(2, ctx, "releasing meta: seq=%d start=%d end=%d\n",
meta->sequence, meta->start, meta->end);
list_del(&meta->list);
ctx->num_metas--;
ctx->first_frame_sequence++;
kfree(meta);
}
}
spin_unlock(&ctx->buffer_meta_lock);
}
static int __coda_decoder_seq_init(struct coda_ctx *ctx)
{
struct coda_q_data *q_data_src, *q_data_dst;
u32 bitstream_buf, bitstream_size;
struct coda_dev *dev = ctx->dev;
int width, height;
u32 src_fourcc, dst_fourcc;
u32 val;
int ret;
lockdep_assert_held(&dev->coda_mutex);
coda_dbg(1, ctx, "Video Data Order Adapter: %s\n",
ctx->use_vdoa ? "Enabled" : "Disabled");
/* Start decoding */
q_data_src = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT);
q_data_dst = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE);
bitstream_buf = ctx->bitstream.paddr;
bitstream_size = ctx->bitstream.size;
src_fourcc = q_data_src->fourcc;
dst_fourcc = q_data_dst->fourcc;
/* Update coda bitstream read and write pointers from kfifo */
coda_kfifo_sync_to_device_full(ctx);
ctx->frame_mem_ctrl &= ~(CODA_FRAME_CHROMA_INTERLEAVE | (0x3 << 9) |
CODA9_FRAME_TILED2LINEAR);
if (dst_fourcc == V4L2_PIX_FMT_NV12 || dst_fourcc == V4L2_PIX_FMT_YUYV)
ctx->frame_mem_ctrl |= CODA_FRAME_CHROMA_INTERLEAVE;
if (ctx->tiled_map_type == GDI_TILED_FRAME_MB_RASTER_MAP)
ctx->frame_mem_ctrl |= (0x3 << 9) |
((ctx->use_vdoa) ? 0 : CODA9_FRAME_TILED2LINEAR);
coda_write(dev, ctx->frame_mem_ctrl, CODA_REG_BIT_FRAME_MEM_CTRL);
ctx->display_idx = -1;
ctx->frm_dis_flg = 0;
coda_write(dev, 0, CODA_REG_BIT_FRM_DIS_FLG(ctx->reg_idx));
coda_write(dev, bitstream_buf, CODA_CMD_DEC_SEQ_BB_START);
coda_write(dev, bitstream_size / 1024, CODA_CMD_DEC_SEQ_BB_SIZE);
val = 0;
if (coda_reorder_enable(ctx))
val |= CODA_REORDER_ENABLE;
if (ctx->codec->src_fourcc == V4L2_PIX_FMT_JPEG)
val |= CODA_NO_INT_ENABLE;
coda_write(dev, val, CODA_CMD_DEC_SEQ_OPTION);
ctx->params.codec_mode = ctx->codec->mode;
if (dev->devtype->product == CODA_960 &&
src_fourcc == V4L2_PIX_FMT_MPEG4)
ctx->params.codec_mode_aux = CODA_MP4_AUX_MPEG4;
else
ctx->params.codec_mode_aux = 0;
if (src_fourcc == V4L2_PIX_FMT_MPEG4) {
coda_write(dev, CODA_MP4_CLASS_MPEG4,
CODA_CMD_DEC_SEQ_MP4_ASP_CLASS);
}
if (src_fourcc == V4L2_PIX_FMT_H264) {
if (dev->devtype->product == CODA_HX4 ||
dev->devtype->product == CODA_7541) {
coda_write(dev, ctx->psbuf.paddr,
CODA_CMD_DEC_SEQ_PS_BB_START);
coda_write(dev, (CODA7_PS_BUF_SIZE / 1024),
CODA_CMD_DEC_SEQ_PS_BB_SIZE);
}
if (dev->devtype->product == CODA_960) {
coda_write(dev, 0, CODA_CMD_DEC_SEQ_X264_MV_EN);
coda_write(dev, 512, CODA_CMD_DEC_SEQ_SPP_CHUNK_SIZE);
}
}
if (src_fourcc == V4L2_PIX_FMT_JPEG)
coda_write(dev, 0, CODA_CMD_DEC_SEQ_JPG_THUMB_EN);
if (dev->devtype->product != CODA_960)
coda_write(dev, 0, CODA_CMD_DEC_SEQ_SRC_SIZE);
ctx->bit_stream_param = CODA_BIT_DEC_SEQ_INIT_ESCAPE;
ret = coda_command_sync(ctx, CODA_COMMAND_SEQ_INIT);
ctx->bit_stream_param = 0;
if (ret) {
v4l2_err(&dev->v4l2_dev, "CODA_COMMAND_SEQ_INIT timeout\n");
return ret;
}
ctx->sequence_offset = ~0U;
ctx->initialized = 1;
ctx->first_frame_sequence = 0;
/* Update kfifo out pointer from coda bitstream read pointer */
coda_kfifo_sync_from_device(ctx);
/*
* After updating the read pointer, we need to check if
* any metas are consumed and should be released.
*/
coda_decoder_drop_used_metas(ctx);
if (coda_read(dev, CODA_RET_DEC_SEQ_SUCCESS) == 0) {
v4l2_err(&dev->v4l2_dev,
"CODA_COMMAND_SEQ_INIT failed, error code = 0x%x\n",
coda_read(dev, CODA_RET_DEC_SEQ_ERR_REASON));
return -EAGAIN;
}
val = coda_read(dev, CODA_RET_DEC_SEQ_SRC_SIZE);
if (dev->devtype->product == CODA_DX6) {
width = (val >> CODADX6_PICWIDTH_OFFSET) & CODADX6_PICWIDTH_MASK;
height = val & CODADX6_PICHEIGHT_MASK;
} else {
width = (val >> CODA7_PICWIDTH_OFFSET) & CODA7_PICWIDTH_MASK;
height = val & CODA7_PICHEIGHT_MASK;
}
if (width > q_data_dst->bytesperline || height > q_data_dst->height) {
v4l2_err(&dev->v4l2_dev, "stream is %dx%d, not %dx%d\n",
width, height, q_data_dst->bytesperline,
q_data_dst->height);
return -EINVAL;
}
width = round_up(width, 16);
height = round_up(height, 16);
coda_dbg(1, ctx, "start decoding: %dx%d\n", width, height);
ctx->num_internal_frames = coda_read(dev, CODA_RET_DEC_SEQ_FRAME_NEED);
/*
* If the VDOA is used, the decoder needs one additional frame,
* because the frames are freed when the next frame is decoded.
* Otherwise there are visible errors in the decoded frames (green
* regions in displayed frames) and a broken order of frames (earlier
* frames are sporadically displayed after later frames).
*/
if (ctx->use_vdoa)
ctx->num_internal_frames += 1;
if (ctx->num_internal_frames > CODA_MAX_FRAMEBUFFERS) {
v4l2_err(&dev->v4l2_dev,
"not enough framebuffers to decode (%d < %d)\n",
CODA_MAX_FRAMEBUFFERS, ctx->num_internal_frames);
return -EINVAL;
}
if (src_fourcc == V4L2_PIX_FMT_H264) {
u32 left_right;
u32 top_bottom;
left_right = coda_read(dev, CODA_RET_DEC_SEQ_CROP_LEFT_RIGHT);
top_bottom = coda_read(dev, CODA_RET_DEC_SEQ_CROP_TOP_BOTTOM);
q_data_dst->rect.left = (left_right >> 10) & 0x3ff;
q_data_dst->rect.top = (top_bottom >> 10) & 0x3ff;
q_data_dst->rect.width = width - q_data_dst->rect.left -
(left_right & 0x3ff);
q_data_dst->rect.height = height - q_data_dst->rect.top -
(top_bottom & 0x3ff);
}
if (dev->devtype->product != CODA_DX6) {
u8 profile, level;
val = coda_read(dev, CODA7_RET_DEC_SEQ_HEADER_REPORT);
profile = val & 0xff;
level = (val >> 8) & 0x7f;
if (profile || level)
coda_update_profile_level_ctrls(ctx, profile, level);
}
return 0;
}
static void coda_dec_seq_init_work(struct work_struct *work)
{
struct coda_ctx *ctx = container_of(work,
struct coda_ctx, seq_init_work);
struct coda_dev *dev = ctx->dev;
mutex_lock(&ctx->buffer_mutex);
mutex_lock(&dev->coda_mutex);
if (!ctx->initialized)
__coda_decoder_seq_init(ctx);
mutex_unlock(&dev->coda_mutex);
mutex_unlock(&ctx->buffer_mutex);
}
static int __coda_start_decoding(struct coda_ctx *ctx)
{
struct coda_q_data *q_data_src, *q_data_dst;
struct coda_dev *dev = ctx->dev;
u32 src_fourcc, dst_fourcc;
int ret;
q_data_src = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT);
q_data_dst = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE);
src_fourcc = q_data_src->fourcc;
dst_fourcc = q_data_dst->fourcc;
if (!ctx->initialized) {
ret = __coda_decoder_seq_init(ctx);
if (ret < 0)
return ret;
} else {
ctx->frame_mem_ctrl &= ~(CODA_FRAME_CHROMA_INTERLEAVE | (0x3 << 9) |
CODA9_FRAME_TILED2LINEAR);
if (dst_fourcc == V4L2_PIX_FMT_NV12 || dst_fourcc == V4L2_PIX_FMT_YUYV)
ctx->frame_mem_ctrl |= CODA_FRAME_CHROMA_INTERLEAVE;
if (ctx->tiled_map_type == GDI_TILED_FRAME_MB_RASTER_MAP)
ctx->frame_mem_ctrl |= (0x3 << 9) |
((ctx->use_vdoa) ? 0 : CODA9_FRAME_TILED2LINEAR);
}
coda_write(dev, ctx->parabuf.paddr, CODA_REG_BIT_PARA_BUF_ADDR);
ret = coda_alloc_framebuffers(ctx, q_data_dst, src_fourcc);
if (ret < 0) {
v4l2_err(&dev->v4l2_dev, "failed to allocate framebuffers\n");
return ret;
}
/* Tell the decoder how many frame buffers we allocated. */
coda_write(dev, ctx->num_internal_frames, CODA_CMD_SET_FRAME_BUF_NUM);
coda_write(dev, round_up(q_data_dst->rect.width, 16),
CODA_CMD_SET_FRAME_BUF_STRIDE);
if (dev->devtype->product != CODA_DX6) {
/* Set secondary AXI IRAM */
coda_setup_iram(ctx);
coda_write(dev, ctx->iram_info.buf_bit_use,
CODA7_CMD_SET_FRAME_AXI_BIT_ADDR);
coda_write(dev, ctx->iram_info.buf_ip_ac_dc_use,
CODA7_CMD_SET_FRAME_AXI_IPACDC_ADDR);
coda_write(dev, ctx->iram_info.buf_dbk_y_use,
CODA7_CMD_SET_FRAME_AXI_DBKY_ADDR);
coda_write(dev, ctx->iram_info.buf_dbk_c_use,
CODA7_CMD_SET_FRAME_AXI_DBKC_ADDR);
coda_write(dev, ctx->iram_info.buf_ovl_use,
CODA7_CMD_SET_FRAME_AXI_OVL_ADDR);
if (dev->devtype->product == CODA_960) {
coda_write(dev, ctx->iram_info.buf_btp_use,
CODA9_CMD_SET_FRAME_AXI_BTP_ADDR);
coda_write(dev, -1, CODA9_CMD_SET_FRAME_DELAY);
coda9_set_frame_cache(ctx, dst_fourcc);
}
}
if (src_fourcc == V4L2_PIX_FMT_H264) {
coda_write(dev, ctx->slicebuf.paddr,
CODA_CMD_SET_FRAME_SLICE_BB_START);
coda_write(dev, ctx->slicebuf.size / 1024,
CODA_CMD_SET_FRAME_SLICE_BB_SIZE);
}
if (dev->devtype->product == CODA_HX4 ||
dev->devtype->product == CODA_7541) {
int max_mb_x = 1920 / 16;
int max_mb_y = 1088 / 16;
int max_mb_num = max_mb_x * max_mb_y;
coda_write(dev, max_mb_num << 16 | max_mb_x << 8 | max_mb_y,
CODA7_CMD_SET_FRAME_MAX_DEC_SIZE);
} else if (dev->devtype->product == CODA_960) {
int max_mb_x = 1920 / 16;
int max_mb_y = 1088 / 16;
int max_mb_num = max_mb_x * max_mb_y;
coda_write(dev, max_mb_num << 16 | max_mb_x << 8 | max_mb_y,
CODA9_CMD_SET_FRAME_MAX_DEC_SIZE);
}
if (coda_command_sync(ctx, CODA_COMMAND_SET_FRAME_BUF)) {
v4l2_err(&ctx->dev->v4l2_dev,
"CODA_COMMAND_SET_FRAME_BUF timeout\n");
return -ETIMEDOUT;
}
return 0;
}
static int coda_start_decoding(struct coda_ctx *ctx)
{
struct coda_dev *dev = ctx->dev;
int ret;
mutex_lock(&dev->coda_mutex);
ret = __coda_start_decoding(ctx);
mutex_unlock(&dev->coda_mutex);
return ret;
}
static int coda_prepare_decode(struct coda_ctx *ctx)
{
struct vb2_v4l2_buffer *dst_buf;
struct coda_dev *dev = ctx->dev;
struct coda_q_data *q_data_dst;
struct coda_buffer_meta *meta;
u32 rot_mode = 0;
u32 reg_addr, reg_stride;
dst_buf = v4l2_m2m_next_dst_buf(ctx->fh.m2m_ctx);
q_data_dst = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE);
/* Try to copy source buffer contents into the bitstream ringbuffer */
mutex_lock(&ctx->bitstream_mutex);
coda_fill_bitstream(ctx, NULL);
mutex_unlock(&ctx->bitstream_mutex);
if (coda_get_bitstream_payload(ctx) < 512 &&
(!(ctx->bit_stream_param & CODA_BIT_STREAM_END_FLAG))) {
coda_dbg(1, ctx, "bitstream payload: %d, skipping\n",
coda_get_bitstream_payload(ctx));
return -EAGAIN;
}
/* Run coda_start_decoding (again) if not yet initialized */
if (!ctx->initialized) {
int ret = __coda_start_decoding(ctx);
if (ret < 0) {
v4l2_err(&dev->v4l2_dev, "failed to start decoding\n");
return -EAGAIN;
} else {
ctx->initialized = 1;
}
}
if (dev->devtype->product == CODA_960)
coda_set_gdi_regs(ctx);
if (ctx->use_vdoa &&
ctx->display_idx >= 0 &&
ctx->display_idx < ctx->num_internal_frames) {
vdoa_device_run(ctx->vdoa,
vb2_dma_contig_plane_dma_addr(&dst_buf->vb2_buf, 0),
ctx->internal_frames[ctx->display_idx].buf.paddr);
} else {
if (dev->devtype->product == CODA_960) {
/*
* It was previously assumed that the CODA960 has an
* internal list of 64 buffer entries that contains
* both the registered internal frame buffers as well
* as the rotator buffer output, and that the ROT_INDEX
* register must be set to a value between the last
* internal frame buffers' index and 64.
* At least on firmware version 3.1.1 it turns out that
* setting ROT_INDEX to any value >= 32 causes CODA
* hangups that it can not recover from with the SRC VPU
* reset.
* It does appear to work however, to just set it to a
* fixed value in the [ctx->num_internal_frames, 31]
* range, for example CODA_MAX_FRAMEBUFFERS.
*/
coda_write(dev, CODA_MAX_FRAMEBUFFERS,
CODA9_CMD_DEC_PIC_ROT_INDEX);
reg_addr = CODA9_CMD_DEC_PIC_ROT_ADDR_Y;
reg_stride = CODA9_CMD_DEC_PIC_ROT_STRIDE;
} else {
reg_addr = CODA_CMD_DEC_PIC_ROT_ADDR_Y;
reg_stride = CODA_CMD_DEC_PIC_ROT_STRIDE;
}
coda_write_base(ctx, q_data_dst, dst_buf, reg_addr);
coda_write(dev, q_data_dst->bytesperline, reg_stride);
rot_mode = CODA_ROT_MIR_ENABLE | ctx->params.rot_mode;
}
coda_write(dev, rot_mode, CODA_CMD_DEC_PIC_ROT_MODE);
switch (dev->devtype->product) {
case CODA_DX6:
/* TBD */
case CODA_HX4:
case CODA_7541:
coda_write(dev, CODA_PRE_SCAN_EN, CODA_CMD_DEC_PIC_OPTION);
break;
case CODA_960:
/* 'hardcode to use interrupt disable mode'? */
coda_write(dev, (1 << 10), CODA_CMD_DEC_PIC_OPTION);
break;
}
coda_write(dev, 0, CODA_CMD_DEC_PIC_SKIP_NUM);
coda_write(dev, 0, CODA_CMD_DEC_PIC_BB_START);
coda_write(dev, 0, CODA_CMD_DEC_PIC_START_BYTE);
if (dev->devtype->product != CODA_DX6)
coda_write(dev, ctx->iram_info.axi_sram_use,
CODA7_REG_BIT_AXI_SRAM_USE);
spin_lock(&ctx->buffer_meta_lock);
meta = list_first_entry_or_null(&ctx->buffer_meta_list,
struct coda_buffer_meta, list);
if (meta && ctx->codec->src_fourcc == V4L2_PIX_FMT_JPEG) {
/* If this is the last buffer in the bitstream, add padding */
if (meta->end == ctx->bitstream_fifo.kfifo.in) {
static unsigned char buf[512];
unsigned int pad;
/* Pad to multiple of 256 and then add 256 more */
pad = ((0 - meta->end) & 0xff) + 256;
memset(buf, 0xff, sizeof(buf));
kfifo_in(&ctx->bitstream_fifo, buf, pad);
}
}
spin_unlock(&ctx->buffer_meta_lock);
coda_kfifo_sync_to_device_full(ctx);
/* Clear decode success flag */
coda_write(dev, 0, CODA_RET_DEC_PIC_SUCCESS);
/* Clear error return value */
coda_write(dev, 0, CODA_RET_DEC_PIC_ERR_MB);
trace_coda_dec_pic_run(ctx, meta);
coda_command_async(ctx, CODA_COMMAND_PIC_RUN);
return 0;
}
static void coda_finish_decode(struct coda_ctx *ctx)
{
struct coda_dev *dev = ctx->dev;
struct coda_q_data *q_data_src;
struct coda_q_data *q_data_dst;
struct vb2_v4l2_buffer *dst_buf;
struct coda_buffer_meta *meta;
int width, height;
int decoded_idx;
int display_idx;
struct coda_internal_frame *decoded_frame = NULL;
u32 src_fourcc;
int success;
u32 err_mb;
int err_vdoa = 0;
u32 val;
if (ctx->aborting)
return;
/* Update kfifo out pointer from coda bitstream read pointer */
coda_kfifo_sync_from_device(ctx);
/*
* in stream-end mode, the read pointer can overshoot the write pointer
* by up to 512 bytes
*/
if (ctx->bit_stream_param & CODA_BIT_STREAM_END_FLAG) {
if (coda_get_bitstream_payload(ctx) >= ctx->bitstream.size - 512)
kfifo_init(&ctx->bitstream_fifo,
ctx->bitstream.vaddr, ctx->bitstream.size);
}
q_data_src = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT);
src_fourcc = q_data_src->fourcc;
val = coda_read(dev, CODA_RET_DEC_PIC_SUCCESS);
if (val != 1)
pr_err("DEC_PIC_SUCCESS = %d\n", val);
success = val & 0x1;
if (!success)
v4l2_err(&dev->v4l2_dev, "decode failed\n");
if (src_fourcc == V4L2_PIX_FMT_H264) {
if (val & (1 << 3))
v4l2_err(&dev->v4l2_dev,
"insufficient PS buffer space (%d bytes)\n",
ctx->psbuf.size);
if (val & (1 << 2))
v4l2_err(&dev->v4l2_dev,
"insufficient slice buffer space (%d bytes)\n",
ctx->slicebuf.size);
}
val = coda_read(dev, CODA_RET_DEC_PIC_SIZE);
width = (val >> 16) & 0xffff;
height = val & 0xffff;
q_data_dst = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE);
/* frame crop information */
if (src_fourcc == V4L2_PIX_FMT_H264) {
u32 left_right;
u32 top_bottom;
left_right = coda_read(dev, CODA_RET_DEC_PIC_CROP_LEFT_RIGHT);
top_bottom = coda_read(dev, CODA_RET_DEC_PIC_CROP_TOP_BOTTOM);
if (left_right == 0xffffffff && top_bottom == 0xffffffff) {
/* Keep current crop information */
} else {
struct v4l2_rect *rect = &q_data_dst->rect;
rect->left = left_right >> 16 & 0xffff;
rect->top = top_bottom >> 16 & 0xffff;
rect->width = width - rect->left -
(left_right & 0xffff);
rect->height = height - rect->top -
(top_bottom & 0xffff);
}
} else {
/* no cropping */
}
err_mb = coda_read(dev, CODA_RET_DEC_PIC_ERR_MB);
if (err_mb > 0) {
if (__ratelimit(&dev->mb_err_rs))
coda_dbg(1, ctx, "errors in %d macroblocks\n", err_mb);
v4l2_ctrl_s_ctrl(ctx->mb_err_cnt_ctrl,
v4l2_ctrl_g_ctrl(ctx->mb_err_cnt_ctrl) + err_mb);
}
if (dev->devtype->product == CODA_HX4 ||
dev->devtype->product == CODA_7541) {
val = coda_read(dev, CODA_RET_DEC_PIC_OPTION);
if (val == 0) {
/* not enough bitstream data */
coda_dbg(1, ctx, "prescan failed: %d\n", val);
ctx->hold = true;
return;
}
}
/* Wait until the VDOA finished writing the previous display frame */
if (ctx->use_vdoa &&
ctx->display_idx >= 0 &&
ctx->display_idx < ctx->num_internal_frames) {
err_vdoa = vdoa_wait_for_completion(ctx->vdoa);
}
ctx->frm_dis_flg = coda_read(dev,
CODA_REG_BIT_FRM_DIS_FLG(ctx->reg_idx));
/* The previous display frame was copied out and can be overwritten */
if (ctx->display_idx >= 0 &&
ctx->display_idx < ctx->num_internal_frames) {
ctx->frm_dis_flg &= ~(1 << ctx->display_idx);
coda_write(dev, ctx->frm_dis_flg,
CODA_REG_BIT_FRM_DIS_FLG(ctx->reg_idx));
}
/*
* The index of the last decoded frame, not necessarily in
* display order, and the index of the next display frame.
* The latter could have been decoded in a previous run.
*/
decoded_idx = coda_read(dev, CODA_RET_DEC_PIC_CUR_IDX);
display_idx = coda_read(dev, CODA_RET_DEC_PIC_FRAME_IDX);
if (decoded_idx == -1) {
/* no frame was decoded, but we might have a display frame */
if (display_idx >= 0 && display_idx < ctx->num_internal_frames)
ctx->sequence_offset++;
else if (ctx->display_idx < 0)
ctx->hold = true;
} else if (decoded_idx == -2) {
if (ctx->display_idx >= 0 &&
ctx->display_idx < ctx->num_internal_frames)
ctx->sequence_offset++;
/* no frame was decoded, we still return remaining buffers */
} else if (decoded_idx < 0 || decoded_idx >= ctx->num_internal_frames) {
v4l2_err(&dev->v4l2_dev,
"decoded frame index out of range: %d\n", decoded_idx);
} else {
int sequence;
decoded_frame = &ctx->internal_frames[decoded_idx];
val = coda_read(dev, CODA_RET_DEC_PIC_FRAME_NUM);
if (ctx->sequence_offset == -1)
ctx->sequence_offset = val;
sequence = val + ctx->first_frame_sequence
- ctx->sequence_offset;
spin_lock(&ctx->buffer_meta_lock);
if (!list_empty(&ctx->buffer_meta_list)) {
meta = list_first_entry(&ctx->buffer_meta_list,
struct coda_buffer_meta, list);
list_del(&meta->list);
ctx->num_metas--;
spin_unlock(&ctx->buffer_meta_lock);
/*
* Clamp counters to 16 bits for comparison, as the HW
* counter rolls over at this point for h.264. This
* may be different for other formats, but using 16 bits
* should be enough to detect most errors and saves us
* from doing different things based on the format.
*/
if ((sequence & 0xffff) != (meta->sequence & 0xffff)) {
v4l2_err(&dev->v4l2_dev,
"sequence number mismatch (%d(%d) != %d)\n",
sequence, ctx->sequence_offset,
meta->sequence);
}
decoded_frame->meta = *meta;
kfree(meta);
} else {
spin_unlock(&ctx->buffer_meta_lock);
v4l2_err(&dev->v4l2_dev, "empty timestamp list!\n");
memset(&decoded_frame->meta, 0,
sizeof(struct coda_buffer_meta));
decoded_frame->meta.sequence = sequence;
decoded_frame->meta.last = false;
ctx->sequence_offset++;
}
trace_coda_dec_pic_done(ctx, &decoded_frame->meta);
val = coda_read(dev, CODA_RET_DEC_PIC_TYPE) & 0x7;
decoded_frame->type = (val == 0) ? V4L2_BUF_FLAG_KEYFRAME :
(val == 1) ? V4L2_BUF_FLAG_PFRAME :
V4L2_BUF_FLAG_BFRAME;
decoded_frame->error = err_mb;
}
if (display_idx == -1) {
/*
* no more frames to be decoded, but there could still
* be rotator output to dequeue
*/
ctx->hold = true;
} else if (display_idx == -3) {
/* possibly prescan failure */
} else if (display_idx < 0 || display_idx >= ctx->num_internal_frames) {
v4l2_err(&dev->v4l2_dev,
"presentation frame index out of range: %d\n",
display_idx);
}
/* If a frame was copied out, return it */
if (ctx->display_idx >= 0 &&
ctx->display_idx < ctx->num_internal_frames) {
struct coda_internal_frame *ready_frame;
ready_frame = &ctx->internal_frames[ctx->display_idx];
dst_buf = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx);
dst_buf->sequence = ctx->osequence++;
dst_buf->field = V4L2_FIELD_NONE;
dst_buf->flags &= ~(V4L2_BUF_FLAG_KEYFRAME |
V4L2_BUF_FLAG_PFRAME |
V4L2_BUF_FLAG_BFRAME);
dst_buf->flags |= ready_frame->type;
meta = &ready_frame->meta;
if (meta->last && !coda_reorder_enable(ctx)) {
/*
* If this was the last decoded frame, and reordering
* is disabled, this will be the last display frame.
*/
coda_dbg(1, ctx, "last meta, marking as last frame\n");
dst_buf->flags |= V4L2_BUF_FLAG_LAST;
} else if (ctx->bit_stream_param & CODA_BIT_STREAM_END_FLAG &&
display_idx == -1) {
/*
* If there is no designated presentation frame anymore,
* this frame has to be the last one.
*/
coda_dbg(1, ctx,
"no more frames to return, marking as last frame\n");
dst_buf->flags |= V4L2_BUF_FLAG_LAST;
}
dst_buf->timecode = meta->timecode;
dst_buf->vb2_buf.timestamp = meta->timestamp;
trace_coda_dec_rot_done(ctx, dst_buf, meta);
vb2_set_plane_payload(&dst_buf->vb2_buf, 0,
q_data_dst->sizeimage);
if (ready_frame->error || err_vdoa)
coda_m2m_buf_done(ctx, dst_buf, VB2_BUF_STATE_ERROR);
else
coda_m2m_buf_done(ctx, dst_buf, VB2_BUF_STATE_DONE);
if (decoded_frame) {
coda_dbg(1, ctx, "job finished: decoded %c frame %u, returned %c frame %u (%u/%u)%s\n",
coda_frame_type_char(decoded_frame->type),
decoded_frame->meta.sequence,
coda_frame_type_char(dst_buf->flags),
ready_frame->meta.sequence,
dst_buf->sequence, ctx->qsequence,
(dst_buf->flags & V4L2_BUF_FLAG_LAST) ?
" (last)" : "");
} else {
coda_dbg(1, ctx, "job finished: no frame decoded (%d), returned %c frame %u (%u/%u)%s\n",
decoded_idx,
coda_frame_type_char(dst_buf->flags),
ready_frame->meta.sequence,
dst_buf->sequence, ctx->qsequence,
(dst_buf->flags & V4L2_BUF_FLAG_LAST) ?
" (last)" : "");
}
} else {
if (decoded_frame) {
coda_dbg(1, ctx, "job finished: decoded %c frame %u, no frame returned (%d)\n",
coda_frame_type_char(decoded_frame->type),
decoded_frame->meta.sequence,
ctx->display_idx);
} else {
coda_dbg(1, ctx, "job finished: no frame decoded (%d) or returned (%d)\n",
decoded_idx, ctx->display_idx);
}
}
/* The rotator will copy the current display frame next time */
ctx->display_idx = display_idx;
/*
* The current decode run might have brought the bitstream fill level
* below the size where we can start the next decode run. As userspace
* might have filled the output queue completely and might thus be
* blocked, we can't rely on the next qbuf to trigger the bitstream
* refill. Check if we have data to refill the bitstream now.
*/
mutex_lock(&ctx->bitstream_mutex);
coda_fill_bitstream(ctx, NULL);
mutex_unlock(&ctx->bitstream_mutex);
}
static void coda_decode_timeout(struct coda_ctx *ctx)
{
struct vb2_v4l2_buffer *dst_buf;
/*
* For now this only handles the case where we would deadlock with
* userspace, i.e. userspace issued DEC_CMD_STOP and waits for EOS,
* but after a failed decode run we would hold the context and wait for
* userspace to queue more buffers.
*/
if (!(ctx->bit_stream_param & CODA_BIT_STREAM_END_FLAG))
return;
dst_buf = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx);
dst_buf->sequence = ctx->qsequence - 1;
coda_m2m_buf_done(ctx, dst_buf, VB2_BUF_STATE_ERROR);
}
const struct coda_context_ops coda_bit_decode_ops = {
.queue_init = coda_decoder_queue_init,
.reqbufs = coda_decoder_reqbufs,
.start_streaming = coda_start_decoding,
.prepare_run = coda_prepare_decode,
.finish_run = coda_finish_decode,
.run_timeout = coda_decode_timeout,
.seq_init_work = coda_dec_seq_init_work,
.seq_end_work = coda_seq_end_work,
.release = coda_bit_release,
};
irqreturn_t coda_irq_handler(int irq, void *data)
{
struct coda_dev *dev = data;
struct coda_ctx *ctx;
/* read status register to attend the IRQ */
coda_read(dev, CODA_REG_BIT_INT_STATUS);
coda_write(dev, 0, CODA_REG_BIT_INT_REASON);
coda_write(dev, CODA_REG_BIT_INT_CLEAR_SET,
CODA_REG_BIT_INT_CLEAR);
ctx = v4l2_m2m_get_curr_priv(dev->m2m_dev);
if (ctx == NULL) {
v4l2_err(&dev->v4l2_dev,
"Instance released before the end of transaction\n");
return IRQ_HANDLED;
}
trace_coda_bit_done(ctx);
if (ctx->aborting) {
coda_dbg(1, ctx, "task has been aborted\n");
}
if (coda_isbusy(ctx->dev)) {
coda_dbg(1, ctx, "coda is still busy!!!!\n");
return IRQ_NONE;
}
complete(&ctx->completion);
return IRQ_HANDLED;
}
| linux-master | drivers/media/platform/chips-media/coda-bit.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Coda multi-standard codec IP - MPEG-4 helper functions
*
* Copyright (C) 2019 Pengutronix, Philipp Zabel
*/
#include <linux/kernel.h>
#include <linux/videodev2.h>
#include "coda.h"
int coda_mpeg4_profile(int profile_idc)
{
switch (profile_idc) {
case 0:
return V4L2_MPEG_VIDEO_MPEG4_PROFILE_SIMPLE;
case 15:
return V4L2_MPEG_VIDEO_MPEG4_PROFILE_ADVANCED_SIMPLE;
case 2:
return V4L2_MPEG_VIDEO_MPEG4_PROFILE_CORE;
case 1:
return V4L2_MPEG_VIDEO_MPEG4_PROFILE_SIMPLE_SCALABLE;
case 11:
return V4L2_MPEG_VIDEO_MPEG4_PROFILE_ADVANCED_CODING_EFFICIENCY;
default:
return -EINVAL;
}
}
int coda_mpeg4_level(int level_idc)
{
switch (level_idc) {
case 0:
return V4L2_MPEG_VIDEO_MPEG4_LEVEL_0;
case 1:
return V4L2_MPEG_VIDEO_MPEG4_LEVEL_1;
case 2:
return V4L2_MPEG_VIDEO_MPEG4_LEVEL_2;
case 3:
return V4L2_MPEG_VIDEO_MPEG4_LEVEL_3;
case 4:
return V4L2_MPEG_VIDEO_MPEG4_LEVEL_4;
case 5:
return V4L2_MPEG_VIDEO_MPEG4_LEVEL_5;
default:
return -EINVAL;
}
}
/*
* Check if the buffer starts with the MPEG-4 visual object sequence and visual
* object headers, for example:
*
* 00 00 01 b0 f1
* 00 00 01 b5 a9 13 00 00 01 00 00 00 01 20 08
* d4 8d 88 00 f5 04 04 08 14 30 3f
*
* Returns the detected header size in bytes or 0.
*/
u32 coda_mpeg4_parse_headers(struct coda_ctx *ctx, u8 *buf, u32 size)
{
static const u8 vos_start[4] = { 0x00, 0x00, 0x01, 0xb0 };
static const union {
u8 vo_start[4];
u8 start_code_prefix[3];
} u = { { 0x00, 0x00, 0x01, 0xb5 } };
if (size < 30 ||
memcmp(buf, vos_start, 4) != 0 ||
memcmp(buf + 5, u.vo_start, 4) != 0)
return 0;
if (size == 30 ||
(size >= 33 && memcmp(buf + 30, u.start_code_prefix, 3) == 0))
return 30;
if (size == 31 ||
(size >= 34 && memcmp(buf + 31, u.start_code_prefix, 3) == 0))
return 31;
if (size == 32 ||
(size >= 35 && memcmp(buf + 32, u.start_code_prefix, 3) == 0))
return 32;
return 0;
}
| linux-master | drivers/media/platform/chips-media/coda-mpeg4.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Coda multi-standard codec IP - H.264 helper functions
*
* Copyright (C) 2012 Vista Silicon S.L.
* Javier Martin, <[email protected]>
* Xavier Duret
*/
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/videodev2.h>
#include "coda.h"
static const u8 coda_filler_size[8] = { 0, 7, 14, 13, 12, 11, 10, 9 };
static const u8 *coda_find_nal_header(const u8 *buf, const u8 *end)
{
u32 val = 0xffffffff;
do {
val = val << 8 | *buf++;
if (buf >= end)
return NULL;
} while (val != 0x00000001);
return buf;
}
int coda_sps_parse_profile(struct coda_ctx *ctx, struct vb2_buffer *vb)
{
const u8 *buf = vb2_plane_vaddr(vb, 0);
const u8 *end = buf + vb2_get_plane_payload(vb, 0);
/* Find SPS header */
do {
buf = coda_find_nal_header(buf, end);
if (!buf)
return -EINVAL;
} while ((*buf++ & 0x1f) != 0x7);
ctx->params.h264_profile_idc = buf[0];
ctx->params.h264_level_idc = buf[2];
return 0;
}
int coda_h264_filler_nal(int size, char *p)
{
if (size < 6)
return -EINVAL;
p[0] = 0x00;
p[1] = 0x00;
p[2] = 0x00;
p[3] = 0x01;
p[4] = 0x0c;
memset(p + 5, 0xff, size - 6);
/* Add rbsp stop bit and trailing at the end */
p[size - 1] = 0x80;
return 0;
}
int coda_h264_padding(int size, char *p)
{
int nal_size;
int diff;
diff = size - (size & ~0x7);
if (diff == 0)
return 0;
nal_size = coda_filler_size[diff];
coda_h264_filler_nal(nal_size, p);
return nal_size;
}
int coda_h264_profile(int profile_idc)
{
switch (profile_idc) {
case 66: return V4L2_MPEG_VIDEO_H264_PROFILE_BASELINE;
case 77: return V4L2_MPEG_VIDEO_H264_PROFILE_MAIN;
case 88: return V4L2_MPEG_VIDEO_H264_PROFILE_EXTENDED;
case 100: return V4L2_MPEG_VIDEO_H264_PROFILE_HIGH;
default: return -EINVAL;
}
}
int coda_h264_level(int level_idc)
{
switch (level_idc) {
case 10: return V4L2_MPEG_VIDEO_H264_LEVEL_1_0;
case 9: return V4L2_MPEG_VIDEO_H264_LEVEL_1B;
case 11: return V4L2_MPEG_VIDEO_H264_LEVEL_1_1;
case 12: return V4L2_MPEG_VIDEO_H264_LEVEL_1_2;
case 13: return V4L2_MPEG_VIDEO_H264_LEVEL_1_3;
case 20: return V4L2_MPEG_VIDEO_H264_LEVEL_2_0;
case 21: return V4L2_MPEG_VIDEO_H264_LEVEL_2_1;
case 22: return V4L2_MPEG_VIDEO_H264_LEVEL_2_2;
case 30: return V4L2_MPEG_VIDEO_H264_LEVEL_3_0;
case 31: return V4L2_MPEG_VIDEO_H264_LEVEL_3_1;
case 32: return V4L2_MPEG_VIDEO_H264_LEVEL_3_2;
case 40: return V4L2_MPEG_VIDEO_H264_LEVEL_4_0;
case 41: return V4L2_MPEG_VIDEO_H264_LEVEL_4_1;
case 42: return V4L2_MPEG_VIDEO_H264_LEVEL_4_2;
case 50: return V4L2_MPEG_VIDEO_H264_LEVEL_5_0;
case 51: return V4L2_MPEG_VIDEO_H264_LEVEL_5_1;
default: return -EINVAL;
}
}
struct rbsp {
char *buf;
int size;
int pos;
};
static inline int rbsp_read_bit(struct rbsp *rbsp)
{
int shift = 7 - (rbsp->pos % 8);
int ofs = rbsp->pos++ / 8;
if (ofs >= rbsp->size)
return -EINVAL;
return (rbsp->buf[ofs] >> shift) & 1;
}
static inline int rbsp_write_bit(struct rbsp *rbsp, int bit)
{
int shift = 7 - (rbsp->pos % 8);
int ofs = rbsp->pos++ / 8;
if (ofs >= rbsp->size)
return -EINVAL;
rbsp->buf[ofs] &= ~(1 << shift);
rbsp->buf[ofs] |= bit << shift;
return 0;
}
static inline int rbsp_read_bits(struct rbsp *rbsp, int num, int *val)
{
int i, ret;
int tmp = 0;
if (num > 32)
return -EINVAL;
for (i = 0; i < num; i++) {
ret = rbsp_read_bit(rbsp);
if (ret < 0)
return ret;
tmp |= ret << (num - i - 1);
}
if (val)
*val = tmp;
return 0;
}
static int rbsp_write_bits(struct rbsp *rbsp, int num, int value)
{
int ret;
while (num--) {
ret = rbsp_write_bit(rbsp, (value >> num) & 1);
if (ret)
return ret;
}
return 0;
}
static int rbsp_read_uev(struct rbsp *rbsp, unsigned int *val)
{
int leading_zero_bits = 0;
unsigned int tmp = 0;
int ret;
while ((ret = rbsp_read_bit(rbsp)) == 0)
leading_zero_bits++;
if (ret < 0)
return ret;
if (leading_zero_bits > 0) {
ret = rbsp_read_bits(rbsp, leading_zero_bits, &tmp);
if (ret)
return ret;
}
if (val)
*val = (1 << leading_zero_bits) - 1 + tmp;
return 0;
}
static int rbsp_write_uev(struct rbsp *rbsp, unsigned int value)
{
int i;
int ret;
int tmp = value + 1;
int leading_zero_bits = fls(tmp) - 1;
for (i = 0; i < leading_zero_bits; i++) {
ret = rbsp_write_bit(rbsp, 0);
if (ret)
return ret;
}
return rbsp_write_bits(rbsp, leading_zero_bits + 1, tmp);
}
static int rbsp_read_sev(struct rbsp *rbsp, int *val)
{
unsigned int tmp;
int ret;
ret = rbsp_read_uev(rbsp, &tmp);
if (ret)
return ret;
if (val) {
if (tmp & 1)
*val = (tmp + 1) / 2;
else
*val = -(tmp / 2);
}
return 0;
}
/**
* coda_h264_sps_fixup - fixes frame cropping values in h.264 SPS
* @ctx: encoder context
* @width: visible width
* @height: visible height
* @buf: buffer containing h.264 SPS RBSP, starting with NAL header
* @size: modified RBSP size return value
* @max_size: available size in buf
*
* Rewrites the frame cropping values in an h.264 SPS RBSP correctly for the
* given visible width and height.
*/
int coda_h264_sps_fixup(struct coda_ctx *ctx, int width, int height, char *buf,
int *size, int max_size)
{
int profile_idc;
unsigned int pic_order_cnt_type;
int pic_width_in_mbs_minus1, pic_height_in_map_units_minus1;
int frame_mbs_only_flag, frame_cropping_flag;
int vui_parameters_present_flag;
unsigned int crop_right, crop_bottom;
struct rbsp sps;
int pos;
int ret;
if (*size < 8 || *size >= max_size)
return -EINVAL;
sps.buf = buf + 5; /* Skip NAL header */
sps.size = *size - 5;
profile_idc = sps.buf[0];
/* Skip constraint_set[0-5]_flag, reserved_zero_2bits */
/* Skip level_idc */
sps.pos = 24;
/* seq_parameter_set_id */
ret = rbsp_read_uev(&sps, NULL);
if (ret)
return ret;
if (profile_idc == 100 || profile_idc == 110 || profile_idc == 122 ||
profile_idc == 244 || profile_idc == 44 || profile_idc == 83 ||
profile_idc == 86 || profile_idc == 118 || profile_idc == 128 ||
profile_idc == 138 || profile_idc == 139 || profile_idc == 134 ||
profile_idc == 135) {
dev_err(ctx->fh.vdev->dev_parent,
"%s: Handling profile_idc %d not implemented\n",
__func__, profile_idc);
return -EINVAL;
}
/* log2_max_frame_num_minus4 */
ret = rbsp_read_uev(&sps, NULL);
if (ret)
return ret;
ret = rbsp_read_uev(&sps, &pic_order_cnt_type);
if (ret)
return ret;
if (pic_order_cnt_type == 0) {
/* log2_max_pic_order_cnt_lsb_minus4 */
ret = rbsp_read_uev(&sps, NULL);
if (ret)
return ret;
} else if (pic_order_cnt_type == 1) {
unsigned int i, num_ref_frames_in_pic_order_cnt_cycle;
/* delta_pic_order_always_zero_flag */
ret = rbsp_read_bit(&sps);
if (ret < 0)
return ret;
/* offset_for_non_ref_pic */
ret = rbsp_read_sev(&sps, NULL);
if (ret)
return ret;
/* offset_for_top_to_bottom_field */
ret = rbsp_read_sev(&sps, NULL);
if (ret)
return ret;
ret = rbsp_read_uev(&sps,
&num_ref_frames_in_pic_order_cnt_cycle);
if (ret)
return ret;
for (i = 0; i < num_ref_frames_in_pic_order_cnt_cycle; i++) {
/* offset_for_ref_frame */
ret = rbsp_read_sev(&sps, NULL);
if (ret)
return ret;
}
}
/* max_num_ref_frames */
ret = rbsp_read_uev(&sps, NULL);
if (ret)
return ret;
/* gaps_in_frame_num_value_allowed_flag */
ret = rbsp_read_bit(&sps);
if (ret < 0)
return ret;
ret = rbsp_read_uev(&sps, &pic_width_in_mbs_minus1);
if (ret)
return ret;
ret = rbsp_read_uev(&sps, &pic_height_in_map_units_minus1);
if (ret)
return ret;
frame_mbs_only_flag = ret = rbsp_read_bit(&sps);
if (ret < 0)
return ret;
if (!frame_mbs_only_flag) {
/* mb_adaptive_frame_field_flag */
ret = rbsp_read_bit(&sps);
if (ret < 0)
return ret;
}
/* direct_8x8_inference_flag */
ret = rbsp_read_bit(&sps);
if (ret < 0)
return ret;
/* Mark position of the frame cropping flag */
pos = sps.pos;
frame_cropping_flag = ret = rbsp_read_bit(&sps);
if (ret < 0)
return ret;
if (frame_cropping_flag) {
unsigned int crop_left, crop_top;
ret = rbsp_read_uev(&sps, &crop_left);
if (ret)
return ret;
ret = rbsp_read_uev(&sps, &crop_right);
if (ret)
return ret;
ret = rbsp_read_uev(&sps, &crop_top);
if (ret)
return ret;
ret = rbsp_read_uev(&sps, &crop_bottom);
if (ret)
return ret;
}
vui_parameters_present_flag = ret = rbsp_read_bit(&sps);
if (ret < 0)
return ret;
if (vui_parameters_present_flag) {
dev_err(ctx->fh.vdev->dev_parent,
"%s: Handling vui_parameters not implemented\n",
__func__);
return -EINVAL;
}
crop_right = round_up(width, 16) - width;
crop_bottom = round_up(height, 16) - height;
crop_right /= 2;
if (frame_mbs_only_flag)
crop_bottom /= 2;
else
crop_bottom /= 4;
sps.size = max_size - 5;
sps.pos = pos;
frame_cropping_flag = 1;
ret = rbsp_write_bit(&sps, frame_cropping_flag);
if (ret)
return ret;
ret = rbsp_write_uev(&sps, 0); /* crop_left */
if (ret)
return ret;
ret = rbsp_write_uev(&sps, crop_right);
if (ret)
return ret;
ret = rbsp_write_uev(&sps, 0); /* crop_top */
if (ret)
return ret;
ret = rbsp_write_uev(&sps, crop_bottom);
if (ret)
return ret;
ret = rbsp_write_bit(&sps, 0); /* vui_parameters_present_flag */
if (ret)
return ret;
ret = rbsp_write_bit(&sps, 1);
if (ret)
return ret;
*size = 5 + DIV_ROUND_UP(sps.pos, 8);
return 0;
}
| linux-master | drivers/media/platform/chips-media/coda-h264.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Coda multi-standard codec IP
*
* Copyright (C) 2014 Philipp Zabel, Pengutronix
*/
#include <linux/bitops.h>
#include "coda.h"
#define XY2_INVERT BIT(7)
#define XY2_ZERO BIT(6)
#define XY2_TB_XOR BIT(5)
#define XY2_XYSEL BIT(4)
#define XY2_Y (1 << 4)
#define XY2_X (0 << 4)
#define XY2(luma_sel, luma_bit, chroma_sel, chroma_bit) \
(((XY2_##luma_sel) | (luma_bit)) << 8 | \
(XY2_##chroma_sel) | (chroma_bit))
static const u16 xy2ca_zero_map[16] = {
XY2(ZERO, 0, ZERO, 0),
XY2(ZERO, 0, ZERO, 0),
XY2(ZERO, 0, ZERO, 0),
XY2(ZERO, 0, ZERO, 0),
XY2(ZERO, 0, ZERO, 0),
XY2(ZERO, 0, ZERO, 0),
XY2(ZERO, 0, ZERO, 0),
XY2(ZERO, 0, ZERO, 0),
XY2(ZERO, 0, ZERO, 0),
XY2(ZERO, 0, ZERO, 0),
XY2(ZERO, 0, ZERO, 0),
XY2(ZERO, 0, ZERO, 0),
XY2(ZERO, 0, ZERO, 0),
XY2(ZERO, 0, ZERO, 0),
XY2(ZERO, 0, ZERO, 0),
XY2(ZERO, 0, ZERO, 0),
};
static const u16 xy2ca_tiled_map[16] = {
XY2(Y, 0, Y, 0),
XY2(Y, 1, Y, 1),
XY2(Y, 2, Y, 2),
XY2(Y, 3, X, 3),
XY2(X, 3, ZERO, 0),
XY2(ZERO, 0, ZERO, 0),
XY2(ZERO, 0, ZERO, 0),
XY2(ZERO, 0, ZERO, 0),
XY2(ZERO, 0, ZERO, 0),
XY2(ZERO, 0, ZERO, 0),
XY2(ZERO, 0, ZERO, 0),
XY2(ZERO, 0, ZERO, 0),
XY2(ZERO, 0, ZERO, 0),
XY2(ZERO, 0, ZERO, 0),
XY2(ZERO, 0, ZERO, 0),
XY2(ZERO, 0, ZERO, 0),
};
/*
* RA[15:0], CA[15:8] are hardwired to contain the 24-bit macroblock
* start offset (macroblock size is 16x16 for luma, 16x8 for chroma).
* Bits CA[4:0] are set using XY2CA above. BA[3:0] seems to be unused.
*/
#define RBC_CA (0 << 4)
#define RBC_BA (1 << 4)
#define RBC_RA (2 << 4)
#define RBC_ZERO (3 << 4)
#define RBC(luma_sel, luma_bit, chroma_sel, chroma_bit) \
(((RBC_##luma_sel) | (luma_bit)) << 6 | \
(RBC_##chroma_sel) | (chroma_bit))
static const u16 rbc2axi_tiled_map[32] = {
RBC(ZERO, 0, ZERO, 0),
RBC(ZERO, 0, ZERO, 0),
RBC(ZERO, 0, ZERO, 0),
RBC(CA, 0, CA, 0),
RBC(CA, 1, CA, 1),
RBC(CA, 2, CA, 2),
RBC(CA, 3, CA, 3),
RBC(CA, 4, CA, 8),
RBC(CA, 8, CA, 9),
RBC(CA, 9, CA, 10),
RBC(CA, 10, CA, 11),
RBC(CA, 11, CA, 12),
RBC(CA, 12, CA, 13),
RBC(CA, 13, CA, 14),
RBC(CA, 14, CA, 15),
RBC(CA, 15, RA, 0),
RBC(RA, 0, RA, 1),
RBC(RA, 1, RA, 2),
RBC(RA, 2, RA, 3),
RBC(RA, 3, RA, 4),
RBC(RA, 4, RA, 5),
RBC(RA, 5, RA, 6),
RBC(RA, 6, RA, 7),
RBC(RA, 7, RA, 8),
RBC(RA, 8, RA, 9),
RBC(RA, 9, RA, 10),
RBC(RA, 10, RA, 11),
RBC(RA, 11, RA, 12),
RBC(RA, 12, RA, 13),
RBC(RA, 13, RA, 14),
RBC(RA, 14, RA, 15),
RBC(RA, 15, ZERO, 0),
};
void coda_set_gdi_regs(struct coda_ctx *ctx)
{
struct coda_dev *dev = ctx->dev;
const u16 *xy2ca_map;
u32 xy2rbc_config;
int i;
switch (ctx->tiled_map_type) {
case GDI_LINEAR_FRAME_MAP:
default:
xy2ca_map = xy2ca_zero_map;
xy2rbc_config = 0;
break;
case GDI_TILED_FRAME_MB_RASTER_MAP:
xy2ca_map = xy2ca_tiled_map;
xy2rbc_config = CODA9_XY2RBC_TILED_MAP |
CODA9_XY2RBC_CA_INC_HOR |
(16 - 1) << 12 | (8 - 1) << 4;
break;
}
for (i = 0; i < 16; i++)
coda_write(dev, xy2ca_map[i],
CODA9_GDI_XY2_CAS_0 + 4 * i);
for (i = 0; i < 4; i++)
coda_write(dev, XY2(ZERO, 0, ZERO, 0),
CODA9_GDI_XY2_BA_0 + 4 * i);
for (i = 0; i < 16; i++)
coda_write(dev, XY2(ZERO, 0, ZERO, 0),
CODA9_GDI_XY2_RAS_0 + 4 * i);
coda_write(dev, xy2rbc_config, CODA9_GDI_XY2_RBC_CONFIG);
if (xy2rbc_config) {
for (i = 0; i < 32; i++)
coda_write(dev, rbc2axi_tiled_map[i],
CODA9_GDI_RBC2_AXI_0 + 4 * i);
}
}
| linux-master | drivers/media/platform/chips-media/coda-gdi.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* i.MX6 Video Data Order Adapter (VDOA)
*
* Copyright (C) 2014 Philipp Zabel
* Copyright (C) 2016 Pengutronix, Michael Tretter <[email protected]>
*/
#include <linux/clk.h>
#include <linux/device.h>
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/mod_devicetable.h>
#include <linux/dma-mapping.h>
#include <linux/platform_device.h>
#include <linux/videodev2.h>
#include <linux/slab.h>
#include "imx-vdoa.h"
#define VDOA_NAME "imx-vdoa"
#define VDOAC 0x00
#define VDOASRR 0x04
#define VDOAIE 0x08
#define VDOAIST 0x0c
#define VDOAFP 0x10
#define VDOAIEBA00 0x14
#define VDOAIEBA01 0x18
#define VDOAIEBA02 0x1c
#define VDOAIEBA10 0x20
#define VDOAIEBA11 0x24
#define VDOAIEBA12 0x28
#define VDOASL 0x2c
#define VDOAIUBO 0x30
#define VDOAVEBA0 0x34
#define VDOAVEBA1 0x38
#define VDOAVEBA2 0x3c
#define VDOAVUBO 0x40
#define VDOASR 0x44
#define VDOAC_ISEL BIT(6)
#define VDOAC_PFS BIT(5)
#define VDOAC_SO BIT(4)
#define VDOAC_SYNC BIT(3)
#define VDOAC_NF BIT(2)
#define VDOAC_BNDM_MASK 0x3
#define VDOAC_BAND_HEIGHT_8 0x0
#define VDOAC_BAND_HEIGHT_16 0x1
#define VDOAC_BAND_HEIGHT_32 0x2
#define VDOASRR_START BIT(1)
#define VDOASRR_SWRST BIT(0)
#define VDOAIE_EITERR BIT(1)
#define VDOAIE_EIEOT BIT(0)
#define VDOAIST_TERR BIT(1)
#define VDOAIST_EOT BIT(0)
#define VDOAFP_FH_MASK (0x1fff << 16)
#define VDOAFP_FW_MASK (0x3fff)
#define VDOASL_VSLY_MASK (0x3fff << 16)
#define VDOASL_ISLY_MASK (0x7fff)
#define VDOASR_ERRW BIT(4)
#define VDOASR_EOB BIT(3)
#define VDOASR_CURRENT_FRAME (0x3 << 1)
#define VDOASR_CURRENT_BUFFER BIT(1)
enum {
V4L2_M2M_SRC = 0,
V4L2_M2M_DST = 1,
};
struct vdoa_data {
struct vdoa_ctx *curr_ctx;
struct device *dev;
struct clk *vdoa_clk;
void __iomem *regs;
};
struct vdoa_q_data {
unsigned int width;
unsigned int height;
unsigned int bytesperline;
unsigned int sizeimage;
u32 pixelformat;
};
struct vdoa_ctx {
struct vdoa_data *vdoa;
struct completion completion;
struct vdoa_q_data q_data[2];
unsigned int submitted_job;
unsigned int completed_job;
};
static irqreturn_t vdoa_irq_handler(int irq, void *data)
{
struct vdoa_data *vdoa = data;
struct vdoa_ctx *curr_ctx;
u32 val;
/* Disable interrupts */
writel(0, vdoa->regs + VDOAIE);
curr_ctx = vdoa->curr_ctx;
if (!curr_ctx) {
dev_warn(vdoa->dev,
"Instance released before the end of transaction\n");
return IRQ_HANDLED;
}
val = readl(vdoa->regs + VDOAIST);
writel(val, vdoa->regs + VDOAIST);
if (val & VDOAIST_TERR) {
val = readl(vdoa->regs + VDOASR) & VDOASR_ERRW;
dev_err(vdoa->dev, "AXI %s error\n", val ? "write" : "read");
} else if (!(val & VDOAIST_EOT)) {
dev_warn(vdoa->dev, "Spurious interrupt\n");
}
curr_ctx->completed_job++;
complete(&curr_ctx->completion);
return IRQ_HANDLED;
}
int vdoa_wait_for_completion(struct vdoa_ctx *ctx)
{
struct vdoa_data *vdoa = ctx->vdoa;
if (ctx->submitted_job == ctx->completed_job)
return 0;
if (!wait_for_completion_timeout(&ctx->completion,
msecs_to_jiffies(300))) {
dev_err(vdoa->dev,
"Timeout waiting for transfer result\n");
return -ETIMEDOUT;
}
return 0;
}
EXPORT_SYMBOL(vdoa_wait_for_completion);
void vdoa_device_run(struct vdoa_ctx *ctx, dma_addr_t dst, dma_addr_t src)
{
struct vdoa_q_data *src_q_data, *dst_q_data;
struct vdoa_data *vdoa = ctx->vdoa;
u32 val;
if (vdoa->curr_ctx)
vdoa_wait_for_completion(vdoa->curr_ctx);
vdoa->curr_ctx = ctx;
reinit_completion(&ctx->completion);
ctx->submitted_job++;
src_q_data = &ctx->q_data[V4L2_M2M_SRC];
dst_q_data = &ctx->q_data[V4L2_M2M_DST];
/* Progressive, no sync, 1 frame per run */
if (dst_q_data->pixelformat == V4L2_PIX_FMT_YUYV)
val = VDOAC_PFS;
else
val = 0;
writel(val, vdoa->regs + VDOAC);
writel(dst_q_data->height << 16 | dst_q_data->width,
vdoa->regs + VDOAFP);
val = dst;
writel(val, vdoa->regs + VDOAIEBA00);
writel(src_q_data->bytesperline << 16 | dst_q_data->bytesperline,
vdoa->regs + VDOASL);
if (dst_q_data->pixelformat == V4L2_PIX_FMT_NV12 ||
dst_q_data->pixelformat == V4L2_PIX_FMT_NV21)
val = dst_q_data->bytesperline * dst_q_data->height;
else
val = 0;
writel(val, vdoa->regs + VDOAIUBO);
val = src;
writel(val, vdoa->regs + VDOAVEBA0);
val = round_up(src_q_data->bytesperline * src_q_data->height, 4096);
writel(val, vdoa->regs + VDOAVUBO);
/* Enable interrupts and start transfer */
writel(VDOAIE_EITERR | VDOAIE_EIEOT, vdoa->regs + VDOAIE);
writel(VDOASRR_START, vdoa->regs + VDOASRR);
}
EXPORT_SYMBOL(vdoa_device_run);
struct vdoa_ctx *vdoa_context_create(struct vdoa_data *vdoa)
{
struct vdoa_ctx *ctx;
int err;
ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
if (!ctx)
return NULL;
err = clk_prepare_enable(vdoa->vdoa_clk);
if (err) {
kfree(ctx);
return NULL;
}
init_completion(&ctx->completion);
ctx->vdoa = vdoa;
return ctx;
}
EXPORT_SYMBOL(vdoa_context_create);
void vdoa_context_destroy(struct vdoa_ctx *ctx)
{
struct vdoa_data *vdoa = ctx->vdoa;
if (vdoa->curr_ctx == ctx) {
vdoa_wait_for_completion(vdoa->curr_ctx);
vdoa->curr_ctx = NULL;
}
clk_disable_unprepare(vdoa->vdoa_clk);
kfree(ctx);
}
EXPORT_SYMBOL(vdoa_context_destroy);
int vdoa_context_configure(struct vdoa_ctx *ctx,
unsigned int width, unsigned int height,
u32 pixelformat)
{
struct vdoa_q_data *src_q_data;
struct vdoa_q_data *dst_q_data;
if (width < 16 || width > 8192 || width % 16 != 0 ||
height < 16 || height > 4096 || height % 16 != 0)
return -EINVAL;
if (pixelformat != V4L2_PIX_FMT_YUYV &&
pixelformat != V4L2_PIX_FMT_NV12)
return -EINVAL;
/* If no context is passed, only check if the format is valid */
if (!ctx)
return 0;
src_q_data = &ctx->q_data[V4L2_M2M_SRC];
dst_q_data = &ctx->q_data[V4L2_M2M_DST];
src_q_data->width = width;
src_q_data->height = height;
src_q_data->bytesperline = width;
src_q_data->sizeimage =
round_up(src_q_data->bytesperline * height, 4096) +
src_q_data->bytesperline * height / 2;
dst_q_data->width = width;
dst_q_data->height = height;
dst_q_data->pixelformat = pixelformat;
switch (pixelformat) {
case V4L2_PIX_FMT_YUYV:
dst_q_data->bytesperline = width * 2;
dst_q_data->sizeimage = dst_q_data->bytesperline * height;
break;
case V4L2_PIX_FMT_NV12:
default:
dst_q_data->bytesperline = width;
dst_q_data->sizeimage =
dst_q_data->bytesperline * height * 3 / 2;
break;
}
return 0;
}
EXPORT_SYMBOL(vdoa_context_configure);
static int vdoa_probe(struct platform_device *pdev)
{
struct vdoa_data *vdoa;
int ret;
ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
if (ret) {
dev_err(&pdev->dev, "DMA enable failed\n");
return ret;
}
vdoa = devm_kzalloc(&pdev->dev, sizeof(*vdoa), GFP_KERNEL);
if (!vdoa)
return -ENOMEM;
vdoa->dev = &pdev->dev;
vdoa->vdoa_clk = devm_clk_get(vdoa->dev, NULL);
if (IS_ERR(vdoa->vdoa_clk)) {
dev_err(vdoa->dev, "Failed to get clock\n");
return PTR_ERR(vdoa->vdoa_clk);
}
vdoa->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(vdoa->regs))
return PTR_ERR(vdoa->regs);
ret = platform_get_irq(pdev, 0);
if (ret < 0)
return ret;
ret = devm_request_threaded_irq(&pdev->dev, ret, NULL,
vdoa_irq_handler, IRQF_ONESHOT,
"vdoa", vdoa);
if (ret < 0) {
dev_err(vdoa->dev, "Failed to get irq\n");
return ret;
}
platform_set_drvdata(pdev, vdoa);
return 0;
}
static const struct of_device_id vdoa_dt_ids[] = {
{ .compatible = "fsl,imx6q-vdoa" },
{}
};
MODULE_DEVICE_TABLE(of, vdoa_dt_ids);
static struct platform_driver vdoa_driver = {
.probe = vdoa_probe,
.driver = {
.name = VDOA_NAME,
.of_match_table = vdoa_dt_ids,
},
};
module_platform_driver(vdoa_driver);
MODULE_DESCRIPTION("Video Data Order Adapter");
MODULE_AUTHOR("Philipp Zabel <[email protected]>");
MODULE_ALIAS("platform:imx-vdoa");
MODULE_LICENSE("GPL");
| linux-master | drivers/media/platform/chips-media/imx-vdoa.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Coda multi-standard codec IP
*
* Copyright (C) 2012 Vista Silicon S.L.
* Javier Martin, <[email protected]>
* Xavier Duret
*/
#include <linux/clk.h>
#include <linux/debugfs.h>
#include <linux/delay.h>
#include <linux/firmware.h>
#include <linux/gcd.h>
#include <linux/genalloc.h>
#include <linux/idr.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/irq.h>
#include <linux/kfifo.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/slab.h>
#include <linux/videodev2.h>
#include <linux/ratelimit.h>
#include <linux/reset.h>
#include <media/v4l2-ctrls.h>
#include <media/v4l2-device.h>
#include <media/v4l2-event.h>
#include <media/v4l2-ioctl.h>
#include <media/v4l2-mem2mem.h>
#include <media/videobuf2-v4l2.h>
#include <media/videobuf2-dma-contig.h>
#include <media/videobuf2-vmalloc.h>
#include "coda.h"
#include "imx-vdoa.h"
#define CODA_NAME "coda"
#define CODADX6_MAX_INSTANCES 4
#define CODA_MAX_FORMATS 5
#define CODA_ISRAM_SIZE (2048 * 2)
#define MIN_W 48
#define MIN_H 16
#define S_ALIGN 1 /* multiple of 2 */
#define W_ALIGN 1 /* multiple of 2 */
#define H_ALIGN 1 /* multiple of 2 */
#define fh_to_ctx(__fh) container_of(__fh, struct coda_ctx, fh)
int coda_debug;
module_param(coda_debug, int, 0644);
MODULE_PARM_DESC(coda_debug, "Debug level (0-2)");
static int disable_tiling;
module_param(disable_tiling, int, 0644);
MODULE_PARM_DESC(disable_tiling, "Disable tiled frame buffers");
static int disable_vdoa;
module_param(disable_vdoa, int, 0644);
MODULE_PARM_DESC(disable_vdoa, "Disable Video Data Order Adapter tiled to raster-scan conversion");
static int enable_bwb = 0;
module_param(enable_bwb, int, 0644);
MODULE_PARM_DESC(enable_bwb, "Enable BWB unit for decoding, may crash on certain streams");
void coda_write(struct coda_dev *dev, u32 data, u32 reg)
{
v4l2_dbg(3, coda_debug, &dev->v4l2_dev,
"%s: data=0x%x, reg=0x%x\n", __func__, data, reg);
writel(data, dev->regs_base + reg);
}
unsigned int coda_read(struct coda_dev *dev, u32 reg)
{
u32 data;
data = readl(dev->regs_base + reg);
v4l2_dbg(3, coda_debug, &dev->v4l2_dev,
"%s: data=0x%x, reg=0x%x\n", __func__, data, reg);
return data;
}
void coda_write_base(struct coda_ctx *ctx, struct coda_q_data *q_data,
struct vb2_v4l2_buffer *buf, unsigned int reg_y)
{
u32 base_y = vb2_dma_contig_plane_dma_addr(&buf->vb2_buf, 0);
u32 base_cb, base_cr;
switch (q_data->fourcc) {
case V4L2_PIX_FMT_YUYV:
/* Fallthrough: IN -H264-> CODA -NV12 MB-> VDOA -YUYV-> OUT */
case V4L2_PIX_FMT_NV12:
case V4L2_PIX_FMT_YUV420:
default:
base_cb = base_y + q_data->bytesperline * q_data->height;
base_cr = base_cb + q_data->bytesperline * q_data->height / 4;
break;
case V4L2_PIX_FMT_YVU420:
/* Switch Cb and Cr for YVU420 format */
base_cr = base_y + q_data->bytesperline * q_data->height;
base_cb = base_cr + q_data->bytesperline * q_data->height / 4;
break;
case V4L2_PIX_FMT_YUV422P:
base_cb = base_y + q_data->bytesperline * q_data->height;
base_cr = base_cb + q_data->bytesperline * q_data->height / 2;
}
coda_write(ctx->dev, base_y, reg_y);
coda_write(ctx->dev, base_cb, reg_y + 4);
coda_write(ctx->dev, base_cr, reg_y + 8);
}
#define CODA_CODEC(mode, src_fourcc, dst_fourcc, max_w, max_h) \
{ mode, src_fourcc, dst_fourcc, max_w, max_h }
/*
* Arrays of codecs supported by each given version of Coda:
* i.MX27 -> codadx6
* i.MX51 -> codahx4
* i.MX53 -> coda7
* i.MX6 -> coda960
* Use V4L2_PIX_FMT_YUV420 as placeholder for all supported YUV 4:2:0 variants
*/
static const struct coda_codec codadx6_codecs[] = {
CODA_CODEC(CODADX6_MODE_ENCODE_H264, V4L2_PIX_FMT_YUV420, V4L2_PIX_FMT_H264, 720, 576),
CODA_CODEC(CODADX6_MODE_ENCODE_MP4, V4L2_PIX_FMT_YUV420, V4L2_PIX_FMT_MPEG4, 720, 576),
};
static const struct coda_codec codahx4_codecs[] = {
CODA_CODEC(CODA7_MODE_ENCODE_H264, V4L2_PIX_FMT_YUV420, V4L2_PIX_FMT_H264, 720, 576),
CODA_CODEC(CODA7_MODE_DECODE_H264, V4L2_PIX_FMT_H264, V4L2_PIX_FMT_YUV420, 1920, 1088),
CODA_CODEC(CODA7_MODE_DECODE_MP2, V4L2_PIX_FMT_MPEG2, V4L2_PIX_FMT_YUV420, 1920, 1088),
CODA_CODEC(CODA7_MODE_DECODE_MP4, V4L2_PIX_FMT_MPEG4, V4L2_PIX_FMT_YUV420, 1280, 720),
};
static const struct coda_codec coda7_codecs[] = {
CODA_CODEC(CODA7_MODE_ENCODE_H264, V4L2_PIX_FMT_YUV420, V4L2_PIX_FMT_H264, 1280, 720),
CODA_CODEC(CODA7_MODE_ENCODE_MP4, V4L2_PIX_FMT_YUV420, V4L2_PIX_FMT_MPEG4, 1280, 720),
CODA_CODEC(CODA7_MODE_ENCODE_MJPG, V4L2_PIX_FMT_YUV420, V4L2_PIX_FMT_JPEG, 8192, 8192),
CODA_CODEC(CODA7_MODE_DECODE_H264, V4L2_PIX_FMT_H264, V4L2_PIX_FMT_YUV420, 1920, 1088),
CODA_CODEC(CODA7_MODE_DECODE_MP2, V4L2_PIX_FMT_MPEG2, V4L2_PIX_FMT_YUV420, 1920, 1088),
CODA_CODEC(CODA7_MODE_DECODE_MP4, V4L2_PIX_FMT_MPEG4, V4L2_PIX_FMT_YUV420, 1920, 1088),
CODA_CODEC(CODA7_MODE_DECODE_MJPG, V4L2_PIX_FMT_JPEG, V4L2_PIX_FMT_YUV420, 8192, 8192),
};
static const struct coda_codec coda9_codecs[] = {
CODA_CODEC(CODA9_MODE_ENCODE_H264, V4L2_PIX_FMT_YUV420, V4L2_PIX_FMT_H264, 1920, 1088),
CODA_CODEC(CODA9_MODE_ENCODE_MP4, V4L2_PIX_FMT_YUV420, V4L2_PIX_FMT_MPEG4, 1920, 1088),
CODA_CODEC(CODA9_MODE_ENCODE_MJPG, V4L2_PIX_FMT_YUV420, V4L2_PIX_FMT_JPEG, 8192, 8192),
CODA_CODEC(CODA9_MODE_DECODE_H264, V4L2_PIX_FMT_H264, V4L2_PIX_FMT_YUV420, 1920, 1088),
CODA_CODEC(CODA9_MODE_DECODE_MP2, V4L2_PIX_FMT_MPEG2, V4L2_PIX_FMT_YUV420, 1920, 1088),
CODA_CODEC(CODA9_MODE_DECODE_MP4, V4L2_PIX_FMT_MPEG4, V4L2_PIX_FMT_YUV420, 1920, 1088),
CODA_CODEC(CODA9_MODE_DECODE_MJPG, V4L2_PIX_FMT_JPEG, V4L2_PIX_FMT_YUV420, 8192, 8192),
};
struct coda_video_device {
const char *name;
enum coda_inst_type type;
const struct coda_context_ops *ops;
bool direct;
u32 src_formats[CODA_MAX_FORMATS];
u32 dst_formats[CODA_MAX_FORMATS];
};
static const struct coda_video_device coda_bit_encoder = {
.name = "coda-video-encoder",
.type = CODA_INST_ENCODER,
.ops = &coda_bit_encode_ops,
.src_formats = {
V4L2_PIX_FMT_NV12,
V4L2_PIX_FMT_YUV420,
V4L2_PIX_FMT_YVU420,
},
.dst_formats = {
V4L2_PIX_FMT_H264,
V4L2_PIX_FMT_MPEG4,
},
};
static const struct coda_video_device coda_bit_jpeg_encoder = {
.name = "coda-jpeg-encoder",
.type = CODA_INST_ENCODER,
.ops = &coda_bit_encode_ops,
.src_formats = {
V4L2_PIX_FMT_NV12,
V4L2_PIX_FMT_YUV420,
V4L2_PIX_FMT_YVU420,
V4L2_PIX_FMT_YUV422P,
},
.dst_formats = {
V4L2_PIX_FMT_JPEG,
},
};
static const struct coda_video_device coda_bit_decoder = {
.name = "coda-video-decoder",
.type = CODA_INST_DECODER,
.ops = &coda_bit_decode_ops,
.src_formats = {
V4L2_PIX_FMT_H264,
V4L2_PIX_FMT_MPEG2,
V4L2_PIX_FMT_MPEG4,
},
.dst_formats = {
V4L2_PIX_FMT_NV12,
V4L2_PIX_FMT_YUV420,
V4L2_PIX_FMT_YVU420,
/*
* If V4L2_PIX_FMT_YUYV should be default,
* set_default_params() must be adjusted.
*/
V4L2_PIX_FMT_YUYV,
},
};
static const struct coda_video_device coda_bit_jpeg_decoder = {
.name = "coda-jpeg-decoder",
.type = CODA_INST_DECODER,
.ops = &coda_bit_decode_ops,
.src_formats = {
V4L2_PIX_FMT_JPEG,
},
.dst_formats = {
V4L2_PIX_FMT_NV12,
V4L2_PIX_FMT_YUV420,
V4L2_PIX_FMT_YVU420,
V4L2_PIX_FMT_YUV422P,
},
};
static const struct coda_video_device coda9_jpeg_encoder = {
.name = "coda-jpeg-encoder",
.type = CODA_INST_ENCODER,
.ops = &coda9_jpeg_encode_ops,
.direct = true,
.src_formats = {
V4L2_PIX_FMT_NV12,
V4L2_PIX_FMT_YUV420,
V4L2_PIX_FMT_YVU420,
V4L2_PIX_FMT_YUV422P,
V4L2_PIX_FMT_GREY,
},
.dst_formats = {
V4L2_PIX_FMT_JPEG,
},
};
static const struct coda_video_device coda9_jpeg_decoder = {
.name = "coda-jpeg-decoder",
.type = CODA_INST_DECODER,
.ops = &coda9_jpeg_decode_ops,
.direct = true,
.src_formats = {
V4L2_PIX_FMT_JPEG,
},
.dst_formats = {
V4L2_PIX_FMT_NV12,
V4L2_PIX_FMT_YUV420,
V4L2_PIX_FMT_YVU420,
V4L2_PIX_FMT_YUV422P,
},
};
static const struct coda_video_device *codadx6_video_devices[] = {
&coda_bit_encoder,
};
static const struct coda_video_device *codahx4_video_devices[] = {
&coda_bit_encoder,
&coda_bit_decoder,
};
static const struct coda_video_device *coda7_video_devices[] = {
&coda_bit_jpeg_encoder,
&coda_bit_jpeg_decoder,
&coda_bit_encoder,
&coda_bit_decoder,
};
static const struct coda_video_device *coda9_video_devices[] = {
&coda9_jpeg_encoder,
&coda9_jpeg_decoder,
&coda_bit_encoder,
&coda_bit_decoder,
};
/*
* Normalize all supported YUV 4:2:0 formats to the value used in the codec
* tables.
*/
static u32 coda_format_normalize_yuv(u32 fourcc)
{
switch (fourcc) {
case V4L2_PIX_FMT_NV12:
case V4L2_PIX_FMT_YUV420:
case V4L2_PIX_FMT_YVU420:
case V4L2_PIX_FMT_YUV422P:
case V4L2_PIX_FMT_YUYV:
return V4L2_PIX_FMT_YUV420;
default:
return fourcc;
}
}
static const struct coda_codec *coda_find_codec(struct coda_dev *dev,
int src_fourcc, int dst_fourcc)
{
const struct coda_codec *codecs = dev->devtype->codecs;
int num_codecs = dev->devtype->num_codecs;
int k;
src_fourcc = coda_format_normalize_yuv(src_fourcc);
dst_fourcc = coda_format_normalize_yuv(dst_fourcc);
if (src_fourcc == dst_fourcc)
return NULL;
for (k = 0; k < num_codecs; k++) {
if (codecs[k].src_fourcc == src_fourcc &&
codecs[k].dst_fourcc == dst_fourcc)
break;
}
if (k == num_codecs)
return NULL;
return &codecs[k];
}
static void coda_get_max_dimensions(struct coda_dev *dev,
const struct coda_codec *codec,
int *max_w, int *max_h)
{
const struct coda_codec *codecs = dev->devtype->codecs;
int num_codecs = dev->devtype->num_codecs;
unsigned int w, h;
int k;
if (codec) {
w = codec->max_w;
h = codec->max_h;
} else {
for (k = 0, w = 0, h = 0; k < num_codecs; k++) {
w = max(w, codecs[k].max_w);
h = max(h, codecs[k].max_h);
}
}
if (max_w)
*max_w = w;
if (max_h)
*max_h = h;
}
static const struct coda_video_device *to_coda_video_device(struct video_device
*vdev)
{
struct coda_dev *dev = video_get_drvdata(vdev);
unsigned int i = vdev - dev->vfd;
if (i >= dev->devtype->num_vdevs)
return NULL;
return dev->devtype->vdevs[i];
}
const char *coda_product_name(int product)
{
static char buf[9];
switch (product) {
case CODA_DX6:
return "CodaDx6";
case CODA_HX4:
return "CodaHx4";
case CODA_7541:
return "CODA7541";
case CODA_960:
return "CODA960";
default:
snprintf(buf, sizeof(buf), "(0x%04x)", product);
return buf;
}
}
static struct vdoa_data *coda_get_vdoa_data(void)
{
struct device_node *vdoa_node;
struct platform_device *vdoa_pdev;
struct vdoa_data *vdoa_data = NULL;
vdoa_node = of_find_compatible_node(NULL, NULL, "fsl,imx6q-vdoa");
if (!vdoa_node)
return NULL;
vdoa_pdev = of_find_device_by_node(vdoa_node);
if (!vdoa_pdev)
goto out;
vdoa_data = platform_get_drvdata(vdoa_pdev);
if (!vdoa_data)
vdoa_data = ERR_PTR(-EPROBE_DEFER);
put_device(&vdoa_pdev->dev);
out:
of_node_put(vdoa_node);
return vdoa_data;
}
/*
* V4L2 ioctl() operations.
*/
static int coda_querycap(struct file *file, void *priv,
struct v4l2_capability *cap)
{
struct coda_ctx *ctx = fh_to_ctx(priv);
strscpy(cap->driver, CODA_NAME, sizeof(cap->driver));
strscpy(cap->card, coda_product_name(ctx->dev->devtype->product),
sizeof(cap->card));
strscpy(cap->bus_info, "platform:" CODA_NAME, sizeof(cap->bus_info));
return 0;
}
static const u32 coda_formats_420[CODA_MAX_FORMATS] = {
V4L2_PIX_FMT_NV12,
V4L2_PIX_FMT_YUV420,
V4L2_PIX_FMT_YVU420,
};
static int coda_enum_fmt(struct file *file, void *priv,
struct v4l2_fmtdesc *f)
{
struct video_device *vdev = video_devdata(file);
const struct coda_video_device *cvd = to_coda_video_device(vdev);
struct coda_ctx *ctx = fh_to_ctx(priv);
const u32 *formats;
if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT)
formats = cvd->src_formats;
else if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) {
struct coda_q_data *q_data_src;
struct vb2_queue *src_vq;
formats = cvd->dst_formats;
/*
* If the source format is already fixed, only allow the same
* chroma subsampling.
*/
q_data_src = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT);
src_vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx,
V4L2_BUF_TYPE_VIDEO_OUTPUT);
if (q_data_src->fourcc == V4L2_PIX_FMT_JPEG &&
vb2_is_streaming(src_vq)) {
if (ctx->params.jpeg_chroma_subsampling ==
V4L2_JPEG_CHROMA_SUBSAMPLING_420) {
formats = coda_formats_420;
} else if (ctx->params.jpeg_chroma_subsampling ==
V4L2_JPEG_CHROMA_SUBSAMPLING_422) {
f->pixelformat = V4L2_PIX_FMT_YUV422P;
return f->index ? -EINVAL : 0;
}
}
} else {
return -EINVAL;
}
if (f->index >= CODA_MAX_FORMATS || formats[f->index] == 0)
return -EINVAL;
/* Skip YUYV if the vdoa is not available */
if (!ctx->vdoa && f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE &&
formats[f->index] == V4L2_PIX_FMT_YUYV)
return -EINVAL;
f->pixelformat = formats[f->index];
return 0;
}
static int coda_g_fmt(struct file *file, void *priv,
struct v4l2_format *f)
{
struct coda_q_data *q_data;
struct coda_ctx *ctx = fh_to_ctx(priv);
q_data = get_q_data(ctx, f->type);
if (!q_data)
return -EINVAL;
f->fmt.pix.field = V4L2_FIELD_NONE;
f->fmt.pix.pixelformat = q_data->fourcc;
f->fmt.pix.width = q_data->width;
f->fmt.pix.height = q_data->height;
f->fmt.pix.bytesperline = q_data->bytesperline;
f->fmt.pix.sizeimage = q_data->sizeimage;
f->fmt.pix.colorspace = ctx->colorspace;
f->fmt.pix.xfer_func = ctx->xfer_func;
f->fmt.pix.ycbcr_enc = ctx->ycbcr_enc;
f->fmt.pix.quantization = ctx->quantization;
return 0;
}
static int coda_try_pixelformat(struct coda_ctx *ctx, struct v4l2_format *f)
{
struct coda_q_data *q_data;
const u32 *formats;
int i;
if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT)
formats = ctx->cvd->src_formats;
else if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
formats = ctx->cvd->dst_formats;
else
return -EINVAL;
for (i = 0; i < CODA_MAX_FORMATS; i++) {
/* Skip YUYV if the vdoa is not available */
if (!ctx->vdoa && f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE &&
formats[i] == V4L2_PIX_FMT_YUYV)
continue;
if (formats[i] == f->fmt.pix.pixelformat) {
f->fmt.pix.pixelformat = formats[i];
return 0;
}
}
/* Fall back to currently set pixelformat */
q_data = get_q_data(ctx, f->type);
f->fmt.pix.pixelformat = q_data->fourcc;
return 0;
}
static int coda_try_fmt_vdoa(struct coda_ctx *ctx, struct v4l2_format *f,
bool *use_vdoa)
{
int err;
if (f->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
return -EINVAL;
if (!use_vdoa)
return -EINVAL;
if (!ctx->vdoa) {
*use_vdoa = false;
return 0;
}
err = vdoa_context_configure(NULL, round_up(f->fmt.pix.width, 16),
f->fmt.pix.height, f->fmt.pix.pixelformat);
if (err) {
*use_vdoa = false;
return 0;
}
*use_vdoa = true;
return 0;
}
static unsigned int coda_estimate_sizeimage(struct coda_ctx *ctx, u32 sizeimage,
u32 width, u32 height)
{
/*
* This is a rough estimate for sensible compressed buffer
* sizes (between 1 and 16 bits per pixel). This could be
* improved by better format specific worst case estimates.
*/
return round_up(clamp(sizeimage, width * height / 8,
width * height * 2), PAGE_SIZE);
}
static int coda_try_fmt(struct coda_ctx *ctx, const struct coda_codec *codec,
struct v4l2_format *f)
{
struct coda_dev *dev = ctx->dev;
unsigned int max_w, max_h;
enum v4l2_field field;
field = f->fmt.pix.field;
if (field == V4L2_FIELD_ANY)
field = V4L2_FIELD_NONE;
else if (V4L2_FIELD_NONE != field)
return -EINVAL;
/* V4L2 specification suggests the driver corrects the format struct
* if any of the dimensions is unsupported */
f->fmt.pix.field = field;
coda_get_max_dimensions(dev, codec, &max_w, &max_h);
v4l_bound_align_image(&f->fmt.pix.width, MIN_W, max_w, W_ALIGN,
&f->fmt.pix.height, MIN_H, max_h, H_ALIGN,
S_ALIGN);
switch (f->fmt.pix.pixelformat) {
case V4L2_PIX_FMT_NV12:
case V4L2_PIX_FMT_YUV420:
case V4L2_PIX_FMT_YVU420:
/*
* Frame stride must be at least multiple of 8,
* but multiple of 16 for h.264 or JPEG 4:2:x
*/
f->fmt.pix.bytesperline = round_up(f->fmt.pix.width, 16);
f->fmt.pix.sizeimage = f->fmt.pix.bytesperline *
f->fmt.pix.height * 3 / 2;
break;
case V4L2_PIX_FMT_YUYV:
f->fmt.pix.bytesperline = round_up(f->fmt.pix.width, 16) * 2;
f->fmt.pix.sizeimage = f->fmt.pix.bytesperline *
f->fmt.pix.height;
break;
case V4L2_PIX_FMT_YUV422P:
f->fmt.pix.bytesperline = round_up(f->fmt.pix.width, 16);
f->fmt.pix.sizeimage = f->fmt.pix.bytesperline *
f->fmt.pix.height * 2;
break;
case V4L2_PIX_FMT_GREY:
/* keep 16 pixel alignment of 8-bit pixel data */
f->fmt.pix.bytesperline = round_up(f->fmt.pix.width, 16);
f->fmt.pix.sizeimage = f->fmt.pix.bytesperline * f->fmt.pix.height;
break;
case V4L2_PIX_FMT_JPEG:
case V4L2_PIX_FMT_H264:
case V4L2_PIX_FMT_MPEG4:
case V4L2_PIX_FMT_MPEG2:
f->fmt.pix.bytesperline = 0;
f->fmt.pix.sizeimage = coda_estimate_sizeimage(ctx,
f->fmt.pix.sizeimage,
f->fmt.pix.width,
f->fmt.pix.height);
break;
default:
BUG();
}
return 0;
}
static int coda_try_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_format *f)
{
struct coda_ctx *ctx = fh_to_ctx(priv);
const struct coda_q_data *q_data_src;
const struct coda_codec *codec;
struct vb2_queue *src_vq;
int hscale = 0;
int vscale = 0;
int ret;
bool use_vdoa;
ret = coda_try_pixelformat(ctx, f);
if (ret < 0)
return ret;
q_data_src = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT);
/*
* If the source format is already fixed, only allow the same output
* resolution. When decoding JPEG images, we also have to make sure to
* use the same chroma subsampling.
*/
src_vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT);
if (vb2_is_streaming(src_vq)) {
if (q_data_src->fourcc == V4L2_PIX_FMT_JPEG &&
ctx->dev->devtype->product == CODA_960) {
hscale = coda_jpeg_scale(q_data_src->width, f->fmt.pix.width);
vscale = coda_jpeg_scale(q_data_src->height, f->fmt.pix.height);
}
f->fmt.pix.width = q_data_src->width >> hscale;
f->fmt.pix.height = q_data_src->height >> vscale;
if (q_data_src->fourcc == V4L2_PIX_FMT_JPEG) {
if (ctx->params.jpeg_chroma_subsampling ==
V4L2_JPEG_CHROMA_SUBSAMPLING_420 &&
f->fmt.pix.pixelformat == V4L2_PIX_FMT_YUV422P)
f->fmt.pix.pixelformat = V4L2_PIX_FMT_NV12;
else if (ctx->params.jpeg_chroma_subsampling ==
V4L2_JPEG_CHROMA_SUBSAMPLING_422)
f->fmt.pix.pixelformat = V4L2_PIX_FMT_YUV422P;
}
}
f->fmt.pix.colorspace = ctx->colorspace;
f->fmt.pix.xfer_func = ctx->xfer_func;
f->fmt.pix.ycbcr_enc = ctx->ycbcr_enc;
f->fmt.pix.quantization = ctx->quantization;
q_data_src = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT);
codec = coda_find_codec(ctx->dev, q_data_src->fourcc,
f->fmt.pix.pixelformat);
if (!codec)
return -EINVAL;
ret = coda_try_fmt(ctx, codec, f);
if (ret < 0)
return ret;
/* The decoders always write complete macroblocks or MCUs */
if (ctx->inst_type == CODA_INST_DECODER) {
f->fmt.pix.bytesperline = round_up(f->fmt.pix.width, 16 >> hscale);
f->fmt.pix.height = round_up(f->fmt.pix.height, 16 >> vscale);
if (codec->src_fourcc == V4L2_PIX_FMT_JPEG &&
f->fmt.pix.pixelformat == V4L2_PIX_FMT_YUV422P) {
f->fmt.pix.sizeimage = f->fmt.pix.bytesperline *
f->fmt.pix.height * 2;
} else {
f->fmt.pix.sizeimage = f->fmt.pix.bytesperline *
f->fmt.pix.height * 3 / 2;
}
ret = coda_try_fmt_vdoa(ctx, f, &use_vdoa);
if (ret < 0)
return ret;
if (f->fmt.pix.pixelformat == V4L2_PIX_FMT_YUYV) {
if (!use_vdoa)
return -EINVAL;
f->fmt.pix.bytesperline = round_up(f->fmt.pix.width, 16) * 2;
f->fmt.pix.sizeimage = f->fmt.pix.bytesperline *
f->fmt.pix.height;
}
}
return 0;
}
static void coda_set_default_colorspace(struct v4l2_pix_format *fmt)
{
enum v4l2_colorspace colorspace;
if (fmt->pixelformat == V4L2_PIX_FMT_JPEG)
colorspace = V4L2_COLORSPACE_JPEG;
else if (fmt->width <= 720 && fmt->height <= 576)
colorspace = V4L2_COLORSPACE_SMPTE170M;
else
colorspace = V4L2_COLORSPACE_REC709;
fmt->colorspace = colorspace;
fmt->xfer_func = V4L2_XFER_FUNC_DEFAULT;
fmt->ycbcr_enc = V4L2_YCBCR_ENC_DEFAULT;
fmt->quantization = V4L2_QUANTIZATION_DEFAULT;
}
static int coda_try_fmt_vid_out(struct file *file, void *priv,
struct v4l2_format *f)
{
struct coda_ctx *ctx = fh_to_ctx(priv);
struct coda_dev *dev = ctx->dev;
const struct coda_q_data *q_data_dst;
const struct coda_codec *codec;
int ret;
ret = coda_try_pixelformat(ctx, f);
if (ret < 0)
return ret;
if (f->fmt.pix.colorspace == V4L2_COLORSPACE_DEFAULT)
coda_set_default_colorspace(&f->fmt.pix);
q_data_dst = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE);
codec = coda_find_codec(dev, f->fmt.pix.pixelformat, q_data_dst->fourcc);
return coda_try_fmt(ctx, codec, f);
}
static int coda_s_fmt(struct coda_ctx *ctx, struct v4l2_format *f,
struct v4l2_rect *r)
{
struct coda_q_data *q_data;
struct vb2_queue *vq;
vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, f->type);
if (!vq)
return -EINVAL;
q_data = get_q_data(ctx, f->type);
if (!q_data)
return -EINVAL;
if (vb2_is_busy(vq)) {
v4l2_err(&ctx->dev->v4l2_dev, "%s: %s queue busy: %d\n",
__func__, v4l2_type_names[f->type], vq->num_buffers);
return -EBUSY;
}
q_data->fourcc = f->fmt.pix.pixelformat;
q_data->width = f->fmt.pix.width;
q_data->height = f->fmt.pix.height;
q_data->bytesperline = f->fmt.pix.bytesperline;
q_data->sizeimage = f->fmt.pix.sizeimage;
if (r) {
q_data->rect = *r;
} else {
q_data->rect.left = 0;
q_data->rect.top = 0;
q_data->rect.width = f->fmt.pix.width;
q_data->rect.height = f->fmt.pix.height;
}
switch (f->fmt.pix.pixelformat) {
case V4L2_PIX_FMT_YUYV:
ctx->tiled_map_type = GDI_TILED_FRAME_MB_RASTER_MAP;
break;
case V4L2_PIX_FMT_NV12:
if (!disable_tiling && ctx->use_bit &&
ctx->dev->devtype->product == CODA_960) {
ctx->tiled_map_type = GDI_TILED_FRAME_MB_RASTER_MAP;
break;
}
fallthrough;
case V4L2_PIX_FMT_YUV420:
case V4L2_PIX_FMT_YVU420:
case V4L2_PIX_FMT_YUV422P:
ctx->tiled_map_type = GDI_LINEAR_FRAME_MAP;
break;
default:
break;
}
if (ctx->tiled_map_type == GDI_TILED_FRAME_MB_RASTER_MAP &&
!coda_try_fmt_vdoa(ctx, f, &ctx->use_vdoa) &&
ctx->use_vdoa)
vdoa_context_configure(ctx->vdoa,
round_up(f->fmt.pix.width, 16),
f->fmt.pix.height,
f->fmt.pix.pixelformat);
else
ctx->use_vdoa = false;
coda_dbg(1, ctx, "Setting %s format, wxh: %dx%d, fmt: %4.4s %c\n",
v4l2_type_names[f->type], q_data->width, q_data->height,
(char *)&q_data->fourcc,
(ctx->tiled_map_type == GDI_LINEAR_FRAME_MAP) ? 'L' : 'T');
return 0;
}
static int coda_s_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_format *f)
{
struct coda_ctx *ctx = fh_to_ctx(priv);
struct coda_q_data *q_data_src;
const struct coda_codec *codec;
struct v4l2_rect r;
int hscale = 0;
int vscale = 0;
int ret;
q_data_src = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT);
if (q_data_src->fourcc == V4L2_PIX_FMT_JPEG &&
ctx->dev->devtype->product == CODA_960) {
hscale = coda_jpeg_scale(q_data_src->width, f->fmt.pix.width);
vscale = coda_jpeg_scale(q_data_src->height, f->fmt.pix.height);
}
ret = coda_try_fmt_vid_cap(file, priv, f);
if (ret)
return ret;
r.left = 0;
r.top = 0;
r.width = q_data_src->width >> hscale;
r.height = q_data_src->height >> vscale;
ret = coda_s_fmt(ctx, f, &r);
if (ret)
return ret;
if (ctx->inst_type != CODA_INST_ENCODER)
return 0;
/* Setting the coded format determines the selected codec */
codec = coda_find_codec(ctx->dev, q_data_src->fourcc,
f->fmt.pix.pixelformat);
if (!codec) {
v4l2_err(&ctx->dev->v4l2_dev, "failed to determine codec\n");
return -EINVAL;
}
ctx->codec = codec;
ctx->colorspace = f->fmt.pix.colorspace;
ctx->xfer_func = f->fmt.pix.xfer_func;
ctx->ycbcr_enc = f->fmt.pix.ycbcr_enc;
ctx->quantization = f->fmt.pix.quantization;
return 0;
}
static int coda_s_fmt_vid_out(struct file *file, void *priv,
struct v4l2_format *f)
{
struct coda_ctx *ctx = fh_to_ctx(priv);
const struct coda_codec *codec;
struct v4l2_format f_cap;
struct vb2_queue *dst_vq;
int ret;
ret = coda_try_fmt_vid_out(file, priv, f);
if (ret)
return ret;
ret = coda_s_fmt(ctx, f, NULL);
if (ret)
return ret;
ctx->colorspace = f->fmt.pix.colorspace;
ctx->xfer_func = f->fmt.pix.xfer_func;
ctx->ycbcr_enc = f->fmt.pix.ycbcr_enc;
ctx->quantization = f->fmt.pix.quantization;
if (ctx->inst_type != CODA_INST_DECODER)
return 0;
/* Setting the coded format determines the selected codec */
codec = coda_find_codec(ctx->dev, f->fmt.pix.pixelformat,
V4L2_PIX_FMT_YUV420);
if (!codec) {
v4l2_err(&ctx->dev->v4l2_dev, "failed to determine codec\n");
return -EINVAL;
}
ctx->codec = codec;
dst_vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE);
if (!dst_vq)
return -EINVAL;
/*
* Setting the capture queue format is not possible while the capture
* queue is still busy. This is not an error, but the user will have to
* make sure themselves that the capture format is set correctly before
* starting the output queue again.
*/
if (vb2_is_busy(dst_vq))
return 0;
memset(&f_cap, 0, sizeof(f_cap));
f_cap.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
coda_g_fmt(file, priv, &f_cap);
f_cap.fmt.pix.width = f->fmt.pix.width;
f_cap.fmt.pix.height = f->fmt.pix.height;
return coda_s_fmt_vid_cap(file, priv, &f_cap);
}
static int coda_reqbufs(struct file *file, void *priv,
struct v4l2_requestbuffers *rb)
{
struct coda_ctx *ctx = fh_to_ctx(priv);
int ret;
ret = v4l2_m2m_reqbufs(file, ctx->fh.m2m_ctx, rb);
if (ret)
return ret;
/*
* Allow to allocate instance specific per-context buffers, such as
* bitstream ringbuffer, slice buffer, work buffer, etc. if needed.
*/
if (rb->type == V4L2_BUF_TYPE_VIDEO_OUTPUT && ctx->ops->reqbufs)
return ctx->ops->reqbufs(ctx, rb);
return 0;
}
static int coda_qbuf(struct file *file, void *priv,
struct v4l2_buffer *buf)
{
struct coda_ctx *ctx = fh_to_ctx(priv);
if (ctx->inst_type == CODA_INST_DECODER &&
buf->type == V4L2_BUF_TYPE_VIDEO_OUTPUT)
buf->flags &= ~V4L2_BUF_FLAG_LAST;
return v4l2_m2m_qbuf(file, ctx->fh.m2m_ctx, buf);
}
static int coda_dqbuf(struct file *file, void *priv, struct v4l2_buffer *buf)
{
struct coda_ctx *ctx = fh_to_ctx(priv);
int ret;
ret = v4l2_m2m_dqbuf(file, ctx->fh.m2m_ctx, buf);
if (ctx->inst_type == CODA_INST_DECODER &&
buf->type == V4L2_BUF_TYPE_VIDEO_OUTPUT)
buf->flags &= ~V4L2_BUF_FLAG_LAST;
return ret;
}
void coda_m2m_buf_done(struct coda_ctx *ctx, struct vb2_v4l2_buffer *buf,
enum vb2_buffer_state state)
{
const struct v4l2_event eos_event = {
.type = V4L2_EVENT_EOS
};
if (buf->flags & V4L2_BUF_FLAG_LAST)
v4l2_event_queue_fh(&ctx->fh, &eos_event);
v4l2_m2m_buf_done(buf, state);
}
static int coda_g_selection(struct file *file, void *fh,
struct v4l2_selection *s)
{
struct coda_ctx *ctx = fh_to_ctx(fh);
struct coda_q_data *q_data;
struct v4l2_rect r, *rsel;
q_data = get_q_data(ctx, s->type);
if (!q_data)
return -EINVAL;
r.left = 0;
r.top = 0;
r.width = q_data->width;
r.height = q_data->height;
rsel = &q_data->rect;
switch (s->target) {
case V4L2_SEL_TGT_CROP_DEFAULT:
case V4L2_SEL_TGT_CROP_BOUNDS:
rsel = &r;
fallthrough;
case V4L2_SEL_TGT_CROP:
if (s->type != V4L2_BUF_TYPE_VIDEO_OUTPUT ||
ctx->inst_type == CODA_INST_DECODER)
return -EINVAL;
break;
case V4L2_SEL_TGT_COMPOSE_BOUNDS:
case V4L2_SEL_TGT_COMPOSE_PADDED:
rsel = &r;
fallthrough;
case V4L2_SEL_TGT_COMPOSE:
case V4L2_SEL_TGT_COMPOSE_DEFAULT:
if (s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE ||
ctx->inst_type == CODA_INST_ENCODER)
return -EINVAL;
break;
default:
return -EINVAL;
}
s->r = *rsel;
return 0;
}
static int coda_s_selection(struct file *file, void *fh,
struct v4l2_selection *s)
{
struct coda_ctx *ctx = fh_to_ctx(fh);
struct coda_q_data *q_data;
switch (s->target) {
case V4L2_SEL_TGT_CROP:
if (ctx->inst_type == CODA_INST_ENCODER &&
s->type == V4L2_BUF_TYPE_VIDEO_OUTPUT) {
q_data = get_q_data(ctx, s->type);
if (!q_data)
return -EINVAL;
s->r.left = 0;
s->r.top = 0;
s->r.width = clamp(s->r.width, 2U, q_data->width);
s->r.height = clamp(s->r.height, 2U, q_data->height);
if (s->flags & V4L2_SEL_FLAG_LE) {
s->r.width = round_up(s->r.width, 2);
s->r.height = round_up(s->r.height, 2);
} else {
s->r.width = round_down(s->r.width, 2);
s->r.height = round_down(s->r.height, 2);
}
q_data->rect = s->r;
coda_dbg(1, ctx, "Setting crop rectangle: %dx%d\n",
s->r.width, s->r.height);
return 0;
}
fallthrough;
case V4L2_SEL_TGT_NATIVE_SIZE:
case V4L2_SEL_TGT_COMPOSE:
return coda_g_selection(file, fh, s);
default:
/* v4l2-compliance expects this to fail for read-only targets */
return -EINVAL;
}
}
static void coda_wake_up_capture_queue(struct coda_ctx *ctx)
{
struct vb2_queue *dst_vq;
coda_dbg(1, ctx, "waking up capture queue\n");
dst_vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE);
dst_vq->last_buffer_dequeued = true;
wake_up(&dst_vq->done_wq);
}
static int coda_encoder_cmd(struct file *file, void *fh,
struct v4l2_encoder_cmd *ec)
{
struct coda_ctx *ctx = fh_to_ctx(fh);
struct vb2_v4l2_buffer *buf;
int ret;
ret = v4l2_m2m_ioctl_try_encoder_cmd(file, fh, ec);
if (ret < 0)
return ret;
mutex_lock(&ctx->wakeup_mutex);
buf = v4l2_m2m_last_src_buf(ctx->fh.m2m_ctx);
if (buf) {
/*
* If the last output buffer is still on the queue, make sure
* that decoder finish_run will see the last flag and report it
* to userspace.
*/
buf->flags |= V4L2_BUF_FLAG_LAST;
} else {
/* Set the stream-end flag on this context */
ctx->bit_stream_param |= CODA_BIT_STREAM_END_FLAG;
/*
* If the last output buffer has already been taken from the
* queue, wake up the capture queue and signal end of stream
* via the -EPIPE mechanism.
*/
coda_wake_up_capture_queue(ctx);
}
mutex_unlock(&ctx->wakeup_mutex);
return 0;
}
static bool coda_mark_last_meta(struct coda_ctx *ctx)
{
struct coda_buffer_meta *meta;
coda_dbg(1, ctx, "marking last meta\n");
spin_lock(&ctx->buffer_meta_lock);
if (list_empty(&ctx->buffer_meta_list)) {
spin_unlock(&ctx->buffer_meta_lock);
return false;
}
meta = list_last_entry(&ctx->buffer_meta_list, struct coda_buffer_meta,
list);
meta->last = true;
spin_unlock(&ctx->buffer_meta_lock);
return true;
}
static bool coda_mark_last_dst_buf(struct coda_ctx *ctx)
{
struct vb2_v4l2_buffer *buf;
struct vb2_buffer *dst_vb;
struct vb2_queue *dst_vq;
unsigned long flags;
coda_dbg(1, ctx, "marking last capture buffer\n");
dst_vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE);
spin_lock_irqsave(&dst_vq->done_lock, flags);
if (list_empty(&dst_vq->done_list)) {
spin_unlock_irqrestore(&dst_vq->done_lock, flags);
return false;
}
dst_vb = list_last_entry(&dst_vq->done_list, struct vb2_buffer,
done_entry);
buf = to_vb2_v4l2_buffer(dst_vb);
buf->flags |= V4L2_BUF_FLAG_LAST;
spin_unlock_irqrestore(&dst_vq->done_lock, flags);
return true;
}
static int coda_decoder_cmd(struct file *file, void *fh,
struct v4l2_decoder_cmd *dc)
{
struct coda_ctx *ctx = fh_to_ctx(fh);
struct coda_dev *dev = ctx->dev;
struct vb2_v4l2_buffer *buf;
struct vb2_queue *dst_vq;
bool stream_end;
bool wakeup;
int ret;
ret = v4l2_m2m_ioctl_try_decoder_cmd(file, fh, dc);
if (ret < 0)
return ret;
switch (dc->cmd) {
case V4L2_DEC_CMD_START:
mutex_lock(&dev->coda_mutex);
mutex_lock(&ctx->bitstream_mutex);
coda_bitstream_flush(ctx);
dst_vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx,
V4L2_BUF_TYPE_VIDEO_CAPTURE);
vb2_clear_last_buffer_dequeued(dst_vq);
ctx->bit_stream_param &= ~CODA_BIT_STREAM_END_FLAG;
coda_fill_bitstream(ctx, NULL);
mutex_unlock(&ctx->bitstream_mutex);
mutex_unlock(&dev->coda_mutex);
break;
case V4L2_DEC_CMD_STOP:
stream_end = false;
wakeup = false;
mutex_lock(&ctx->wakeup_mutex);
buf = v4l2_m2m_last_src_buf(ctx->fh.m2m_ctx);
if (buf) {
coda_dbg(1, ctx, "marking last pending buffer\n");
/* Mark last buffer */
buf->flags |= V4L2_BUF_FLAG_LAST;
if (v4l2_m2m_num_src_bufs_ready(ctx->fh.m2m_ctx) == 0) {
coda_dbg(1, ctx, "all remaining buffers queued\n");
stream_end = true;
}
} else {
if (ctx->use_bit)
if (coda_mark_last_meta(ctx))
stream_end = true;
else
wakeup = true;
else
if (!coda_mark_last_dst_buf(ctx))
wakeup = true;
}
if (stream_end) {
coda_dbg(1, ctx, "all remaining buffers queued\n");
/* Set the stream-end flag on this context */
coda_bit_stream_end_flag(ctx);
ctx->hold = false;
v4l2_m2m_try_schedule(ctx->fh.m2m_ctx);
}
if (wakeup) {
/* If there is no buffer in flight, wake up */
coda_wake_up_capture_queue(ctx);
}
mutex_unlock(&ctx->wakeup_mutex);
break;
default:
return -EINVAL;
}
return 0;
}
static int coda_enum_framesizes(struct file *file, void *fh,
struct v4l2_frmsizeenum *fsize)
{
struct coda_ctx *ctx = fh_to_ctx(fh);
struct coda_q_data *q_data_dst;
const struct coda_codec *codec;
if (fsize->index)
return -EINVAL;
if (coda_format_normalize_yuv(fsize->pixel_format) ==
V4L2_PIX_FMT_YUV420) {
q_data_dst = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE);
codec = coda_find_codec(ctx->dev, fsize->pixel_format,
q_data_dst->fourcc);
} else {
codec = coda_find_codec(ctx->dev, V4L2_PIX_FMT_YUV420,
fsize->pixel_format);
}
if (!codec)
return -EINVAL;
fsize->type = V4L2_FRMSIZE_TYPE_CONTINUOUS;
fsize->stepwise.min_width = MIN_W;
fsize->stepwise.max_width = codec->max_w;
fsize->stepwise.step_width = 1;
fsize->stepwise.min_height = MIN_H;
fsize->stepwise.max_height = codec->max_h;
fsize->stepwise.step_height = 1;
return 0;
}
static int coda_enum_frameintervals(struct file *file, void *fh,
struct v4l2_frmivalenum *f)
{
struct coda_ctx *ctx = fh_to_ctx(fh);
struct coda_q_data *q_data;
const struct coda_codec *codec;
if (f->index)
return -EINVAL;
/* Disallow YUYV if the vdoa is not available */
if (!ctx->vdoa && f->pixel_format == V4L2_PIX_FMT_YUYV)
return -EINVAL;
if (coda_format_normalize_yuv(f->pixel_format) == V4L2_PIX_FMT_YUV420) {
q_data = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE);
codec = coda_find_codec(ctx->dev, f->pixel_format,
q_data->fourcc);
} else {
codec = coda_find_codec(ctx->dev, V4L2_PIX_FMT_YUV420,
f->pixel_format);
}
if (!codec)
return -EINVAL;
if (f->width < MIN_W || f->width > codec->max_w ||
f->height < MIN_H || f->height > codec->max_h)
return -EINVAL;
f->type = V4L2_FRMIVAL_TYPE_CONTINUOUS;
f->stepwise.min.numerator = 1;
f->stepwise.min.denominator = 65535;
f->stepwise.max.numerator = 65536;
f->stepwise.max.denominator = 1;
f->stepwise.step.numerator = 1;
f->stepwise.step.denominator = 1;
return 0;
}
static int coda_g_parm(struct file *file, void *fh, struct v4l2_streamparm *a)
{
struct coda_ctx *ctx = fh_to_ctx(fh);
struct v4l2_fract *tpf;
if (a->type != V4L2_BUF_TYPE_VIDEO_OUTPUT)
return -EINVAL;
a->parm.output.capability = V4L2_CAP_TIMEPERFRAME;
tpf = &a->parm.output.timeperframe;
tpf->denominator = ctx->params.framerate & CODA_FRATE_RES_MASK;
tpf->numerator = 1 + (ctx->params.framerate >>
CODA_FRATE_DIV_OFFSET);
return 0;
}
/*
* Approximate timeperframe v4l2_fract with values that can be written
* into the 16-bit CODA_FRATE_DIV and CODA_FRATE_RES fields.
*/
static void coda_approximate_timeperframe(struct v4l2_fract *timeperframe)
{
struct v4l2_fract s = *timeperframe;
struct v4l2_fract f0;
struct v4l2_fract f1 = { 1, 0 };
struct v4l2_fract f2 = { 0, 1 };
unsigned int i, div, s_denominator;
/* Lower bound is 1/65535 */
if (s.numerator == 0 || s.denominator / s.numerator > 65535) {
timeperframe->numerator = 1;
timeperframe->denominator = 65535;
return;
}
/* Upper bound is 65536/1 */
if (s.denominator == 0 || s.numerator / s.denominator > 65536) {
timeperframe->numerator = 65536;
timeperframe->denominator = 1;
return;
}
/* Reduce fraction to lowest terms */
div = gcd(s.numerator, s.denominator);
if (div > 1) {
s.numerator /= div;
s.denominator /= div;
}
if (s.numerator <= 65536 && s.denominator < 65536) {
*timeperframe = s;
return;
}
/* Find successive convergents from continued fraction expansion */
while (f2.numerator <= 65536 && f2.denominator < 65536) {
f0 = f1;
f1 = f2;
/* Stop when f2 exactly equals timeperframe */
if (s.numerator == 0)
break;
i = s.denominator / s.numerator;
f2.numerator = f0.numerator + i * f1.numerator;
f2.denominator = f0.denominator + i * f2.denominator;
s_denominator = s.numerator;
s.numerator = s.denominator % s.numerator;
s.denominator = s_denominator;
}
*timeperframe = f1;
}
static uint32_t coda_timeperframe_to_frate(struct v4l2_fract *timeperframe)
{
return ((timeperframe->numerator - 1) << CODA_FRATE_DIV_OFFSET) |
timeperframe->denominator;
}
static int coda_s_parm(struct file *file, void *fh, struct v4l2_streamparm *a)
{
struct coda_ctx *ctx = fh_to_ctx(fh);
struct v4l2_fract *tpf;
if (a->type != V4L2_BUF_TYPE_VIDEO_OUTPUT)
return -EINVAL;
a->parm.output.capability = V4L2_CAP_TIMEPERFRAME;
tpf = &a->parm.output.timeperframe;
coda_approximate_timeperframe(tpf);
ctx->params.framerate = coda_timeperframe_to_frate(tpf);
ctx->params.framerate_changed = true;
return 0;
}
static int coda_subscribe_event(struct v4l2_fh *fh,
const struct v4l2_event_subscription *sub)
{
struct coda_ctx *ctx = fh_to_ctx(fh);
switch (sub->type) {
case V4L2_EVENT_EOS:
return v4l2_event_subscribe(fh, sub, 0, NULL);
case V4L2_EVENT_SOURCE_CHANGE:
if (ctx->inst_type == CODA_INST_DECODER)
return v4l2_event_subscribe(fh, sub, 0, NULL);
else
return -EINVAL;
default:
return v4l2_ctrl_subscribe_event(fh, sub);
}
}
static const struct v4l2_ioctl_ops coda_ioctl_ops = {
.vidioc_querycap = coda_querycap,
.vidioc_enum_fmt_vid_cap = coda_enum_fmt,
.vidioc_g_fmt_vid_cap = coda_g_fmt,
.vidioc_try_fmt_vid_cap = coda_try_fmt_vid_cap,
.vidioc_s_fmt_vid_cap = coda_s_fmt_vid_cap,
.vidioc_enum_fmt_vid_out = coda_enum_fmt,
.vidioc_g_fmt_vid_out = coda_g_fmt,
.vidioc_try_fmt_vid_out = coda_try_fmt_vid_out,
.vidioc_s_fmt_vid_out = coda_s_fmt_vid_out,
.vidioc_reqbufs = coda_reqbufs,
.vidioc_querybuf = v4l2_m2m_ioctl_querybuf,
.vidioc_qbuf = coda_qbuf,
.vidioc_expbuf = v4l2_m2m_ioctl_expbuf,
.vidioc_dqbuf = coda_dqbuf,
.vidioc_create_bufs = v4l2_m2m_ioctl_create_bufs,
.vidioc_prepare_buf = v4l2_m2m_ioctl_prepare_buf,
.vidioc_streamon = v4l2_m2m_ioctl_streamon,
.vidioc_streamoff = v4l2_m2m_ioctl_streamoff,
.vidioc_g_selection = coda_g_selection,
.vidioc_s_selection = coda_s_selection,
.vidioc_try_encoder_cmd = v4l2_m2m_ioctl_try_encoder_cmd,
.vidioc_encoder_cmd = coda_encoder_cmd,
.vidioc_try_decoder_cmd = v4l2_m2m_ioctl_try_decoder_cmd,
.vidioc_decoder_cmd = coda_decoder_cmd,
.vidioc_g_parm = coda_g_parm,
.vidioc_s_parm = coda_s_parm,
.vidioc_enum_framesizes = coda_enum_framesizes,
.vidioc_enum_frameintervals = coda_enum_frameintervals,
.vidioc_subscribe_event = coda_subscribe_event,
.vidioc_unsubscribe_event = v4l2_event_unsubscribe,
};
/*
* Mem-to-mem operations.
*/
static void coda_device_run(void *m2m_priv)
{
struct coda_ctx *ctx = m2m_priv;
struct coda_dev *dev = ctx->dev;
queue_work(dev->workqueue, &ctx->pic_run_work);
}
static void coda_pic_run_work(struct work_struct *work)
{
struct coda_ctx *ctx = container_of(work, struct coda_ctx, pic_run_work);
struct coda_dev *dev = ctx->dev;
int ret;
mutex_lock(&ctx->buffer_mutex);
mutex_lock(&dev->coda_mutex);
ret = ctx->ops->prepare_run(ctx);
if (ret < 0 && ctx->inst_type == CODA_INST_DECODER)
goto out;
if (!wait_for_completion_timeout(&ctx->completion,
msecs_to_jiffies(1000))) {
if (ctx->use_bit) {
dev_err(dev->dev, "CODA PIC_RUN timeout\n");
ctx->hold = true;
coda_hw_reset(ctx);
}
if (ctx->ops->run_timeout)
ctx->ops->run_timeout(ctx);
} else {
ctx->ops->finish_run(ctx);
}
if ((ctx->aborting || (!ctx->streamon_cap && !ctx->streamon_out)) &&
ctx->ops->seq_end_work)
queue_work(dev->workqueue, &ctx->seq_end_work);
out:
mutex_unlock(&dev->coda_mutex);
mutex_unlock(&ctx->buffer_mutex);
v4l2_m2m_job_finish(ctx->dev->m2m_dev, ctx->fh.m2m_ctx);
}
static int coda_job_ready(void *m2m_priv)
{
struct coda_ctx *ctx = m2m_priv;
int src_bufs = v4l2_m2m_num_src_bufs_ready(ctx->fh.m2m_ctx);
/*
* For both 'P' and 'key' frame cases 1 picture
* and 1 frame are needed. In the decoder case,
* the compressed frame can be in the bitstream.
*/
if (!src_bufs && ctx->inst_type != CODA_INST_DECODER) {
coda_dbg(1, ctx, "not ready: not enough vid-out buffers.\n");
return 0;
}
if (!v4l2_m2m_num_dst_bufs_ready(ctx->fh.m2m_ctx)) {
coda_dbg(1, ctx, "not ready: not enough vid-cap buffers.\n");
return 0;
}
if (ctx->inst_type == CODA_INST_DECODER && ctx->use_bit) {
bool stream_end = ctx->bit_stream_param &
CODA_BIT_STREAM_END_FLAG;
int num_metas = ctx->num_metas;
struct coda_buffer_meta *meta;
unsigned int count;
count = hweight32(ctx->frm_dis_flg);
if (ctx->use_vdoa && count >= (ctx->num_internal_frames - 1)) {
coda_dbg(1, ctx,
"not ready: all internal buffers in use: %d/%d (0x%x)",
count, ctx->num_internal_frames,
ctx->frm_dis_flg);
return 0;
}
if (ctx->hold && !src_bufs) {
coda_dbg(1, ctx,
"not ready: on hold for more buffers.\n");
return 0;
}
if (!stream_end && (num_metas + src_bufs) < 2) {
coda_dbg(1, ctx,
"not ready: need 2 buffers available (queue:%d + bitstream:%d)\n",
num_metas, src_bufs);
return 0;
}
meta = list_first_entry(&ctx->buffer_meta_list,
struct coda_buffer_meta, list);
if (!coda_bitstream_can_fetch_past(ctx, meta->end) &&
!stream_end) {
coda_dbg(1, ctx,
"not ready: not enough bitstream data to read past %u (%u)\n",
meta->end, ctx->bitstream_fifo.kfifo.in);
return 0;
}
}
if (ctx->aborting) {
coda_dbg(1, ctx, "not ready: aborting\n");
return 0;
}
coda_dbg(2, ctx, "job ready\n");
return 1;
}
static void coda_job_abort(void *priv)
{
struct coda_ctx *ctx = priv;
ctx->aborting = 1;
coda_dbg(1, ctx, "job abort\n");
}
static const struct v4l2_m2m_ops coda_m2m_ops = {
.device_run = coda_device_run,
.job_ready = coda_job_ready,
.job_abort = coda_job_abort,
};
static void set_default_params(struct coda_ctx *ctx)
{
unsigned int max_w, max_h, usize, csize;
ctx->codec = coda_find_codec(ctx->dev, ctx->cvd->src_formats[0],
ctx->cvd->dst_formats[0]);
max_w = min(ctx->codec->max_w, 1920U);
max_h = min(ctx->codec->max_h, 1088U);
usize = max_w * max_h * 3 / 2;
csize = coda_estimate_sizeimage(ctx, usize, max_w, max_h);
ctx->params.codec_mode = ctx->codec->mode;
if (ctx->cvd->src_formats[0] == V4L2_PIX_FMT_JPEG ||
ctx->cvd->dst_formats[0] == V4L2_PIX_FMT_JPEG) {
ctx->colorspace = V4L2_COLORSPACE_SRGB;
ctx->xfer_func = V4L2_XFER_FUNC_SRGB;
ctx->ycbcr_enc = V4L2_YCBCR_ENC_601;
ctx->quantization = V4L2_QUANTIZATION_FULL_RANGE;
} else {
ctx->colorspace = V4L2_COLORSPACE_REC709;
ctx->xfer_func = V4L2_XFER_FUNC_DEFAULT;
ctx->ycbcr_enc = V4L2_YCBCR_ENC_DEFAULT;
ctx->quantization = V4L2_QUANTIZATION_DEFAULT;
}
ctx->params.framerate = 30;
/* Default formats for output and input queues */
ctx->q_data[V4L2_M2M_SRC].fourcc = ctx->cvd->src_formats[0];
ctx->q_data[V4L2_M2M_DST].fourcc = ctx->cvd->dst_formats[0];
ctx->q_data[V4L2_M2M_SRC].width = max_w;
ctx->q_data[V4L2_M2M_SRC].height = max_h;
ctx->q_data[V4L2_M2M_DST].width = max_w;
ctx->q_data[V4L2_M2M_DST].height = max_h;
if (ctx->codec->src_fourcc == V4L2_PIX_FMT_YUV420) {
ctx->q_data[V4L2_M2M_SRC].bytesperline = max_w;
ctx->q_data[V4L2_M2M_SRC].sizeimage = usize;
ctx->q_data[V4L2_M2M_DST].bytesperline = 0;
ctx->q_data[V4L2_M2M_DST].sizeimage = csize;
} else {
ctx->q_data[V4L2_M2M_SRC].bytesperline = 0;
ctx->q_data[V4L2_M2M_SRC].sizeimage = csize;
ctx->q_data[V4L2_M2M_DST].bytesperline = max_w;
ctx->q_data[V4L2_M2M_DST].sizeimage = usize;
}
ctx->q_data[V4L2_M2M_SRC].rect.width = max_w;
ctx->q_data[V4L2_M2M_SRC].rect.height = max_h;
ctx->q_data[V4L2_M2M_DST].rect.width = max_w;
ctx->q_data[V4L2_M2M_DST].rect.height = max_h;
/*
* Since the RBC2AXI logic only supports a single chroma plane,
* macroblock tiling only works for to NV12 pixel format.
*/
ctx->tiled_map_type = GDI_LINEAR_FRAME_MAP;
}
/*
* Queue operations
*/
static int coda_queue_setup(struct vb2_queue *vq,
unsigned int *nbuffers, unsigned int *nplanes,
unsigned int sizes[], struct device *alloc_devs[])
{
struct coda_ctx *ctx = vb2_get_drv_priv(vq);
struct coda_q_data *q_data;
unsigned int size;
q_data = get_q_data(ctx, vq->type);
size = q_data->sizeimage;
if (*nplanes)
return sizes[0] < size ? -EINVAL : 0;
*nplanes = 1;
sizes[0] = size;
coda_dbg(1, ctx, "get %d buffer(s) of size %d each.\n", *nbuffers,
size);
return 0;
}
static int coda_buf_prepare(struct vb2_buffer *vb)
{
struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
struct coda_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
struct coda_q_data *q_data;
q_data = get_q_data(ctx, vb->vb2_queue->type);
if (V4L2_TYPE_IS_OUTPUT(vb->vb2_queue->type)) {
if (vbuf->field == V4L2_FIELD_ANY)
vbuf->field = V4L2_FIELD_NONE;
if (vbuf->field != V4L2_FIELD_NONE) {
v4l2_warn(&ctx->dev->v4l2_dev,
"%s field isn't supported\n", __func__);
return -EINVAL;
}
}
if (vb2_plane_size(vb, 0) < q_data->sizeimage) {
v4l2_warn(&ctx->dev->v4l2_dev,
"%s data will not fit into plane (%lu < %lu)\n",
__func__, vb2_plane_size(vb, 0),
(long)q_data->sizeimage);
return -EINVAL;
}
return 0;
}
static void coda_update_menu_ctrl(struct v4l2_ctrl *ctrl, int value)
{
if (!ctrl)
return;
v4l2_ctrl_lock(ctrl);
/*
* Extend the control range if the parsed stream contains a known but
* unsupported value or level.
*/
if (value > ctrl->maximum) {
__v4l2_ctrl_modify_range(ctrl, ctrl->minimum, value,
ctrl->menu_skip_mask & ~(1 << value),
ctrl->default_value);
} else if (value < ctrl->minimum) {
__v4l2_ctrl_modify_range(ctrl, value, ctrl->maximum,
ctrl->menu_skip_mask & ~(1 << value),
ctrl->default_value);
}
__v4l2_ctrl_s_ctrl(ctrl, value);
v4l2_ctrl_unlock(ctrl);
}
void coda_update_profile_level_ctrls(struct coda_ctx *ctx, u8 profile_idc,
u8 level_idc)
{
const char * const *profile_names;
const char * const *level_names;
struct v4l2_ctrl *profile_ctrl;
struct v4l2_ctrl *level_ctrl;
const char *codec_name;
u32 profile_cid;
u32 level_cid;
int profile;
int level;
switch (ctx->codec->src_fourcc) {
case V4L2_PIX_FMT_H264:
codec_name = "H264";
profile_cid = V4L2_CID_MPEG_VIDEO_H264_PROFILE;
level_cid = V4L2_CID_MPEG_VIDEO_H264_LEVEL;
profile_ctrl = ctx->h264_profile_ctrl;
level_ctrl = ctx->h264_level_ctrl;
profile = coda_h264_profile(profile_idc);
level = coda_h264_level(level_idc);
break;
case V4L2_PIX_FMT_MPEG2:
codec_name = "MPEG-2";
profile_cid = V4L2_CID_MPEG_VIDEO_MPEG2_PROFILE;
level_cid = V4L2_CID_MPEG_VIDEO_MPEG2_LEVEL;
profile_ctrl = ctx->mpeg2_profile_ctrl;
level_ctrl = ctx->mpeg2_level_ctrl;
profile = coda_mpeg2_profile(profile_idc);
level = coda_mpeg2_level(level_idc);
break;
case V4L2_PIX_FMT_MPEG4:
codec_name = "MPEG-4";
profile_cid = V4L2_CID_MPEG_VIDEO_MPEG4_PROFILE;
level_cid = V4L2_CID_MPEG_VIDEO_MPEG4_LEVEL;
profile_ctrl = ctx->mpeg4_profile_ctrl;
level_ctrl = ctx->mpeg4_level_ctrl;
profile = coda_mpeg4_profile(profile_idc);
level = coda_mpeg4_level(level_idc);
break;
default:
return;
}
profile_names = v4l2_ctrl_get_menu(profile_cid);
level_names = v4l2_ctrl_get_menu(level_cid);
if (profile < 0) {
v4l2_warn(&ctx->dev->v4l2_dev, "Invalid %s profile: %u\n",
codec_name, profile_idc);
} else {
coda_dbg(1, ctx, "Parsed %s profile: %s\n", codec_name,
profile_names[profile]);
coda_update_menu_ctrl(profile_ctrl, profile);
}
if (level < 0) {
v4l2_warn(&ctx->dev->v4l2_dev, "Invalid %s level: %u\n",
codec_name, level_idc);
} else {
coda_dbg(1, ctx, "Parsed %s level: %s\n", codec_name,
level_names[level]);
coda_update_menu_ctrl(level_ctrl, level);
}
}
static void coda_queue_source_change_event(struct coda_ctx *ctx)
{
static const struct v4l2_event source_change_event = {
.type = V4L2_EVENT_SOURCE_CHANGE,
.u.src_change.changes = V4L2_EVENT_SRC_CH_RESOLUTION,
};
v4l2_event_queue_fh(&ctx->fh, &source_change_event);
}
static void coda_buf_queue(struct vb2_buffer *vb)
{
struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
struct coda_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
struct vb2_queue *vq = vb->vb2_queue;
struct coda_q_data *q_data;
q_data = get_q_data(ctx, vb->vb2_queue->type);
/*
* In the decoder case, immediately try to copy the buffer into the
* bitstream ringbuffer and mark it as ready to be dequeued.
*/
if (ctx->bitstream.size && vq->type == V4L2_BUF_TYPE_VIDEO_OUTPUT) {
/*
* For backwards compatibility, queuing an empty buffer marks
* the stream end
*/
if (vb2_get_plane_payload(vb, 0) == 0)
coda_bit_stream_end_flag(ctx);
if (q_data->fourcc == V4L2_PIX_FMT_H264) {
/*
* Unless already done, try to obtain profile_idc and
* level_idc from the SPS header. This allows to decide
* whether to enable reordering during sequence
* initialization.
*/
if (!ctx->params.h264_profile_idc) {
coda_sps_parse_profile(ctx, vb);
coda_update_profile_level_ctrls(ctx,
ctx->params.h264_profile_idc,
ctx->params.h264_level_idc);
}
}
mutex_lock(&ctx->bitstream_mutex);
v4l2_m2m_buf_queue(ctx->fh.m2m_ctx, vbuf);
if (vb2_is_streaming(vb->vb2_queue))
/* This set buf->sequence = ctx->qsequence++ */
coda_fill_bitstream(ctx, NULL);
mutex_unlock(&ctx->bitstream_mutex);
if (!ctx->initialized) {
/*
* Run sequence initialization in case the queued
* buffer contained headers.
*/
if (vb2_is_streaming(vb->vb2_queue) &&
ctx->ops->seq_init_work) {
queue_work(ctx->dev->workqueue,
&ctx->seq_init_work);
flush_work(&ctx->seq_init_work);
}
if (ctx->initialized)
coda_queue_source_change_event(ctx);
}
} else {
if ((ctx->inst_type == CODA_INST_ENCODER || !ctx->use_bit) &&
vq->type == V4L2_BUF_TYPE_VIDEO_OUTPUT)
vbuf->sequence = ctx->qsequence++;
v4l2_m2m_buf_queue(ctx->fh.m2m_ctx, vbuf);
}
}
int coda_alloc_aux_buf(struct coda_dev *dev, struct coda_aux_buf *buf,
size_t size, const char *name, struct dentry *parent)
{
buf->vaddr = dma_alloc_coherent(dev->dev, size, &buf->paddr,
GFP_KERNEL);
if (!buf->vaddr) {
v4l2_err(&dev->v4l2_dev,
"Failed to allocate %s buffer of size %zu\n",
name, size);
return -ENOMEM;
}
buf->size = size;
if (name && parent) {
buf->blob.data = buf->vaddr;
buf->blob.size = size;
buf->dentry = debugfs_create_blob(name, 0444, parent,
&buf->blob);
}
return 0;
}
void coda_free_aux_buf(struct coda_dev *dev,
struct coda_aux_buf *buf)
{
if (buf->vaddr) {
dma_free_coherent(dev->dev, buf->size, buf->vaddr, buf->paddr);
buf->vaddr = NULL;
buf->size = 0;
debugfs_remove(buf->dentry);
buf->dentry = NULL;
}
}
static int coda_start_streaming(struct vb2_queue *q, unsigned int count)
{
struct coda_ctx *ctx = vb2_get_drv_priv(q);
struct v4l2_device *v4l2_dev = &ctx->dev->v4l2_dev;
struct coda_q_data *q_data_src, *q_data_dst;
struct v4l2_m2m_buffer *m2m_buf, *tmp;
struct vb2_v4l2_buffer *buf;
struct list_head list;
int ret = 0;
if (count < 1)
return -EINVAL;
coda_dbg(1, ctx, "start streaming %s\n", v4l2_type_names[q->type]);
INIT_LIST_HEAD(&list);
q_data_src = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT);
if (q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT) {
if (ctx->inst_type == CODA_INST_DECODER && ctx->use_bit) {
/* copy the buffers that were queued before streamon */
mutex_lock(&ctx->bitstream_mutex);
coda_fill_bitstream(ctx, &list);
mutex_unlock(&ctx->bitstream_mutex);
if (ctx->dev->devtype->product != CODA_960 &&
coda_get_bitstream_payload(ctx) < 512) {
v4l2_err(v4l2_dev, "start payload < 512\n");
ret = -EINVAL;
goto err;
}
if (!ctx->initialized) {
/* Run sequence initialization */
if (ctx->ops->seq_init_work) {
queue_work(ctx->dev->workqueue,
&ctx->seq_init_work);
flush_work(&ctx->seq_init_work);
}
}
}
/*
* Check the first input JPEG buffer to determine chroma
* subsampling.
*/
if (q_data_src->fourcc == V4L2_PIX_FMT_JPEG) {
buf = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
coda_jpeg_decode_header(ctx, &buf->vb2_buf);
/*
* We have to start streaming even if the first buffer
* does not contain a valid JPEG image. The error will
* be caught during device run and will be signalled
* via the capture buffer error flag.
*/
q_data_dst = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE);
q_data_dst->width = round_up(q_data_src->width, 16);
q_data_dst->height = round_up(q_data_src->height, 16);
q_data_dst->bytesperline = q_data_dst->width;
if (ctx->params.jpeg_chroma_subsampling ==
V4L2_JPEG_CHROMA_SUBSAMPLING_420) {
q_data_dst->sizeimage =
q_data_dst->bytesperline *
q_data_dst->height * 3 / 2;
if (q_data_dst->fourcc != V4L2_PIX_FMT_YUV420)
q_data_dst->fourcc = V4L2_PIX_FMT_NV12;
} else {
q_data_dst->sizeimage =
q_data_dst->bytesperline *
q_data_dst->height * 2;
q_data_dst->fourcc = V4L2_PIX_FMT_YUV422P;
}
q_data_dst->rect.left = 0;
q_data_dst->rect.top = 0;
q_data_dst->rect.width = q_data_src->width;
q_data_dst->rect.height = q_data_src->height;
}
ctx->streamon_out = 1;
} else {
ctx->streamon_cap = 1;
}
/* Don't start the coda unless both queues are on */
if (!(ctx->streamon_out && ctx->streamon_cap))
goto out;
q_data_dst = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE);
if ((q_data_src->rect.width != q_data_dst->width &&
round_up(q_data_src->rect.width, 16) != q_data_dst->width) ||
(q_data_src->rect.height != q_data_dst->height &&
round_up(q_data_src->rect.height, 16) != q_data_dst->height)) {
v4l2_err(v4l2_dev, "can't convert %dx%d to %dx%d\n",
q_data_src->rect.width, q_data_src->rect.height,
q_data_dst->width, q_data_dst->height);
ret = -EINVAL;
goto err;
}
/* Allow BIT decoder device_run with no new buffers queued */
if (ctx->inst_type == CODA_INST_DECODER && ctx->use_bit)
v4l2_m2m_set_src_buffered(ctx->fh.m2m_ctx, true);
ctx->gopcounter = ctx->params.gop_size - 1;
if (q_data_dst->fourcc == V4L2_PIX_FMT_JPEG)
ctx->params.gop_size = 1;
ctx->gopcounter = ctx->params.gop_size - 1;
/* Only decoders have this control */
if (ctx->mb_err_cnt_ctrl)
v4l2_ctrl_s_ctrl(ctx->mb_err_cnt_ctrl, 0);
ret = ctx->ops->start_streaming(ctx);
if (ctx->inst_type == CODA_INST_DECODER) {
if (ret == -EAGAIN)
goto out;
}
if (ret < 0)
goto err;
out:
if (q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT) {
list_for_each_entry_safe(m2m_buf, tmp, &list, list) {
list_del(&m2m_buf->list);
v4l2_m2m_buf_done(&m2m_buf->vb, VB2_BUF_STATE_DONE);
}
}
return 0;
err:
if (q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT) {
list_for_each_entry_safe(m2m_buf, tmp, &list, list) {
list_del(&m2m_buf->list);
v4l2_m2m_buf_done(&m2m_buf->vb, VB2_BUF_STATE_QUEUED);
}
while ((buf = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx)))
v4l2_m2m_buf_done(buf, VB2_BUF_STATE_QUEUED);
} else {
while ((buf = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx)))
v4l2_m2m_buf_done(buf, VB2_BUF_STATE_QUEUED);
}
return ret;
}
static void coda_stop_streaming(struct vb2_queue *q)
{
struct coda_ctx *ctx = vb2_get_drv_priv(q);
struct coda_dev *dev = ctx->dev;
struct vb2_v4l2_buffer *buf;
bool stop;
stop = ctx->streamon_out && ctx->streamon_cap;
coda_dbg(1, ctx, "stop streaming %s\n", v4l2_type_names[q->type]);
if (q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT) {
ctx->streamon_out = 0;
coda_bit_stream_end_flag(ctx);
ctx->qsequence = 0;
while ((buf = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx)))
v4l2_m2m_buf_done(buf, VB2_BUF_STATE_ERROR);
} else {
ctx->streamon_cap = 0;
ctx->osequence = 0;
ctx->sequence_offset = 0;
while ((buf = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx)))
v4l2_m2m_buf_done(buf, VB2_BUF_STATE_ERROR);
}
if (stop) {
struct coda_buffer_meta *meta;
if (ctx->ops->seq_end_work) {
queue_work(dev->workqueue, &ctx->seq_end_work);
flush_work(&ctx->seq_end_work);
}
spin_lock(&ctx->buffer_meta_lock);
while (!list_empty(&ctx->buffer_meta_list)) {
meta = list_first_entry(&ctx->buffer_meta_list,
struct coda_buffer_meta, list);
list_del(&meta->list);
kfree(meta);
}
ctx->num_metas = 0;
spin_unlock(&ctx->buffer_meta_lock);
kfifo_init(&ctx->bitstream_fifo,
ctx->bitstream.vaddr, ctx->bitstream.size);
ctx->runcounter = 0;
ctx->aborting = 0;
ctx->hold = false;
}
if (!ctx->streamon_out && !ctx->streamon_cap)
ctx->bit_stream_param &= ~CODA_BIT_STREAM_END_FLAG;
}
static const struct vb2_ops coda_qops = {
.queue_setup = coda_queue_setup,
.buf_prepare = coda_buf_prepare,
.buf_queue = coda_buf_queue,
.start_streaming = coda_start_streaming,
.stop_streaming = coda_stop_streaming,
.wait_prepare = vb2_ops_wait_prepare,
.wait_finish = vb2_ops_wait_finish,
};
static int coda_s_ctrl(struct v4l2_ctrl *ctrl)
{
const char * const *val_names = v4l2_ctrl_get_menu(ctrl->id);
struct coda_ctx *ctx =
container_of(ctrl->handler, struct coda_ctx, ctrls);
if (val_names)
coda_dbg(2, ctx, "s_ctrl: id = 0x%x, name = \"%s\", val = %d (\"%s\")\n",
ctrl->id, ctrl->name, ctrl->val, val_names[ctrl->val]);
else
coda_dbg(2, ctx, "s_ctrl: id = 0x%x, name = \"%s\", val = %d\n",
ctrl->id, ctrl->name, ctrl->val);
switch (ctrl->id) {
case V4L2_CID_HFLIP:
if (ctrl->val)
ctx->params.rot_mode |= CODA_MIR_HOR;
else
ctx->params.rot_mode &= ~CODA_MIR_HOR;
break;
case V4L2_CID_VFLIP:
if (ctrl->val)
ctx->params.rot_mode |= CODA_MIR_VER;
else
ctx->params.rot_mode &= ~CODA_MIR_VER;
break;
case V4L2_CID_MPEG_VIDEO_BITRATE:
ctx->params.bitrate = ctrl->val / 1000;
ctx->params.bitrate_changed = true;
break;
case V4L2_CID_MPEG_VIDEO_GOP_SIZE:
ctx->params.gop_size = ctrl->val;
break;
case V4L2_CID_MPEG_VIDEO_H264_I_FRAME_QP:
ctx->params.h264_intra_qp = ctrl->val;
ctx->params.h264_intra_qp_changed = true;
break;
case V4L2_CID_MPEG_VIDEO_H264_P_FRAME_QP:
ctx->params.h264_inter_qp = ctrl->val;
break;
case V4L2_CID_MPEG_VIDEO_H264_MIN_QP:
ctx->params.h264_min_qp = ctrl->val;
break;
case V4L2_CID_MPEG_VIDEO_H264_MAX_QP:
ctx->params.h264_max_qp = ctrl->val;
break;
case V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_ALPHA:
ctx->params.h264_slice_alpha_c0_offset_div2 = ctrl->val;
break;
case V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_BETA:
ctx->params.h264_slice_beta_offset_div2 = ctrl->val;
break;
case V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_MODE:
ctx->params.h264_disable_deblocking_filter_idc = ctrl->val;
break;
case V4L2_CID_MPEG_VIDEO_H264_CONSTRAINED_INTRA_PREDICTION:
ctx->params.h264_constrained_intra_pred_flag = ctrl->val;
break;
case V4L2_CID_MPEG_VIDEO_FRAME_RC_ENABLE:
ctx->params.frame_rc_enable = ctrl->val;
break;
case V4L2_CID_MPEG_VIDEO_MB_RC_ENABLE:
ctx->params.mb_rc_enable = ctrl->val;
break;
case V4L2_CID_MPEG_VIDEO_H264_CHROMA_QP_INDEX_OFFSET:
ctx->params.h264_chroma_qp_index_offset = ctrl->val;
break;
case V4L2_CID_MPEG_VIDEO_H264_PROFILE:
/* TODO: switch between baseline and constrained baseline */
if (ctx->inst_type == CODA_INST_ENCODER)
ctx->params.h264_profile_idc = 66;
break;
case V4L2_CID_MPEG_VIDEO_H264_LEVEL:
/* nothing to do, this is set by the encoder */
break;
case V4L2_CID_MPEG_VIDEO_MPEG4_I_FRAME_QP:
ctx->params.mpeg4_intra_qp = ctrl->val;
break;
case V4L2_CID_MPEG_VIDEO_MPEG4_P_FRAME_QP:
ctx->params.mpeg4_inter_qp = ctrl->val;
break;
case V4L2_CID_MPEG_VIDEO_MPEG2_PROFILE:
case V4L2_CID_MPEG_VIDEO_MPEG2_LEVEL:
case V4L2_CID_MPEG_VIDEO_MPEG4_PROFILE:
case V4L2_CID_MPEG_VIDEO_MPEG4_LEVEL:
/* nothing to do, these are fixed */
break;
case V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MODE:
ctx->params.slice_mode = ctrl->val;
ctx->params.slice_mode_changed = true;
break;
case V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MAX_MB:
ctx->params.slice_max_mb = ctrl->val;
ctx->params.slice_mode_changed = true;
break;
case V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MAX_BYTES:
ctx->params.slice_max_bits = ctrl->val * 8;
ctx->params.slice_mode_changed = true;
break;
case V4L2_CID_MPEG_VIDEO_HEADER_MODE:
break;
case V4L2_CID_MPEG_VIDEO_CYCLIC_INTRA_REFRESH_MB:
ctx->params.intra_refresh = ctrl->val;
ctx->params.intra_refresh_changed = true;
break;
case V4L2_CID_MPEG_VIDEO_FORCE_KEY_FRAME:
ctx->params.force_ipicture = true;
break;
case V4L2_CID_JPEG_COMPRESSION_QUALITY:
coda_set_jpeg_compression_quality(ctx, ctrl->val);
break;
case V4L2_CID_JPEG_RESTART_INTERVAL:
ctx->params.jpeg_restart_interval = ctrl->val;
break;
case V4L2_CID_MPEG_VIDEO_VBV_DELAY:
ctx->params.vbv_delay = ctrl->val;
break;
case V4L2_CID_MPEG_VIDEO_VBV_SIZE:
ctx->params.vbv_size = min(ctrl->val * 8192, 0x7fffffff);
break;
default:
coda_dbg(1, ctx, "Invalid control, id=%d, val=%d\n",
ctrl->id, ctrl->val);
return -EINVAL;
}
return 0;
}
static const struct v4l2_ctrl_ops coda_ctrl_ops = {
.s_ctrl = coda_s_ctrl,
};
static void coda_encode_ctrls(struct coda_ctx *ctx)
{
int max_gop_size = (ctx->dev->devtype->product == CODA_DX6) ? 60 : 99;
v4l2_ctrl_new_std(&ctx->ctrls, &coda_ctrl_ops,
V4L2_CID_MPEG_VIDEO_BITRATE, 0, 32767000, 1000, 0);
v4l2_ctrl_new_std(&ctx->ctrls, &coda_ctrl_ops,
V4L2_CID_MPEG_VIDEO_GOP_SIZE, 0, max_gop_size, 1, 16);
v4l2_ctrl_new_std(&ctx->ctrls, &coda_ctrl_ops,
V4L2_CID_MPEG_VIDEO_H264_I_FRAME_QP, 0, 51, 1, 25);
v4l2_ctrl_new_std(&ctx->ctrls, &coda_ctrl_ops,
V4L2_CID_MPEG_VIDEO_H264_P_FRAME_QP, 0, 51, 1, 25);
if (ctx->dev->devtype->product != CODA_960) {
v4l2_ctrl_new_std(&ctx->ctrls, &coda_ctrl_ops,
V4L2_CID_MPEG_VIDEO_H264_MIN_QP, 0, 51, 1, 12);
}
v4l2_ctrl_new_std(&ctx->ctrls, &coda_ctrl_ops,
V4L2_CID_MPEG_VIDEO_H264_MAX_QP, 0, 51, 1, 51);
v4l2_ctrl_new_std(&ctx->ctrls, &coda_ctrl_ops,
V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_ALPHA, -6, 6, 1, 0);
v4l2_ctrl_new_std(&ctx->ctrls, &coda_ctrl_ops,
V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_BETA, -6, 6, 1, 0);
v4l2_ctrl_new_std_menu(&ctx->ctrls, &coda_ctrl_ops,
V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_MODE,
V4L2_MPEG_VIDEO_H264_LOOP_FILTER_MODE_DISABLED_AT_SLICE_BOUNDARY,
0x0, V4L2_MPEG_VIDEO_H264_LOOP_FILTER_MODE_ENABLED);
v4l2_ctrl_new_std(&ctx->ctrls, &coda_ctrl_ops,
V4L2_CID_MPEG_VIDEO_H264_CONSTRAINED_INTRA_PREDICTION, 0, 1, 1,
0);
v4l2_ctrl_new_std(&ctx->ctrls, &coda_ctrl_ops,
V4L2_CID_MPEG_VIDEO_FRAME_RC_ENABLE, 0, 1, 1, 1);
v4l2_ctrl_new_std(&ctx->ctrls, &coda_ctrl_ops,
V4L2_CID_MPEG_VIDEO_MB_RC_ENABLE, 0, 1, 1, 1);
v4l2_ctrl_new_std(&ctx->ctrls, &coda_ctrl_ops,
V4L2_CID_MPEG_VIDEO_H264_CHROMA_QP_INDEX_OFFSET, -12, 12, 1, 0);
v4l2_ctrl_new_std_menu(&ctx->ctrls, &coda_ctrl_ops,
V4L2_CID_MPEG_VIDEO_H264_PROFILE,
V4L2_MPEG_VIDEO_H264_PROFILE_CONSTRAINED_BASELINE, 0x0,
V4L2_MPEG_VIDEO_H264_PROFILE_CONSTRAINED_BASELINE);
if (ctx->dev->devtype->product == CODA_HX4 ||
ctx->dev->devtype->product == CODA_7541) {
v4l2_ctrl_new_std_menu(&ctx->ctrls, &coda_ctrl_ops,
V4L2_CID_MPEG_VIDEO_H264_LEVEL,
V4L2_MPEG_VIDEO_H264_LEVEL_3_1,
~((1 << V4L2_MPEG_VIDEO_H264_LEVEL_2_0) |
(1 << V4L2_MPEG_VIDEO_H264_LEVEL_3_0) |
(1 << V4L2_MPEG_VIDEO_H264_LEVEL_3_1)),
V4L2_MPEG_VIDEO_H264_LEVEL_3_1);
}
if (ctx->dev->devtype->product == CODA_960) {
v4l2_ctrl_new_std_menu(&ctx->ctrls, &coda_ctrl_ops,
V4L2_CID_MPEG_VIDEO_H264_LEVEL,
V4L2_MPEG_VIDEO_H264_LEVEL_4_2,
~((1 << V4L2_MPEG_VIDEO_H264_LEVEL_1_0) |
(1 << V4L2_MPEG_VIDEO_H264_LEVEL_2_0) |
(1 << V4L2_MPEG_VIDEO_H264_LEVEL_3_0) |
(1 << V4L2_MPEG_VIDEO_H264_LEVEL_3_1) |
(1 << V4L2_MPEG_VIDEO_H264_LEVEL_3_2) |
(1 << V4L2_MPEG_VIDEO_H264_LEVEL_4_0) |
(1 << V4L2_MPEG_VIDEO_H264_LEVEL_4_1) |
(1 << V4L2_MPEG_VIDEO_H264_LEVEL_4_2)),
V4L2_MPEG_VIDEO_H264_LEVEL_4_0);
}
v4l2_ctrl_new_std(&ctx->ctrls, &coda_ctrl_ops,
V4L2_CID_MPEG_VIDEO_MPEG4_I_FRAME_QP, 1, 31, 1, 2);
v4l2_ctrl_new_std(&ctx->ctrls, &coda_ctrl_ops,
V4L2_CID_MPEG_VIDEO_MPEG4_P_FRAME_QP, 1, 31, 1, 2);
v4l2_ctrl_new_std_menu(&ctx->ctrls, &coda_ctrl_ops,
V4L2_CID_MPEG_VIDEO_MPEG4_PROFILE,
V4L2_MPEG_VIDEO_MPEG4_PROFILE_SIMPLE, 0x0,
V4L2_MPEG_VIDEO_MPEG4_PROFILE_SIMPLE);
if (ctx->dev->devtype->product == CODA_HX4 ||
ctx->dev->devtype->product == CODA_7541 ||
ctx->dev->devtype->product == CODA_960) {
v4l2_ctrl_new_std_menu(&ctx->ctrls, &coda_ctrl_ops,
V4L2_CID_MPEG_VIDEO_MPEG4_LEVEL,
V4L2_MPEG_VIDEO_MPEG4_LEVEL_5,
~(1 << V4L2_MPEG_VIDEO_MPEG4_LEVEL_5),
V4L2_MPEG_VIDEO_MPEG4_LEVEL_5);
}
v4l2_ctrl_new_std_menu(&ctx->ctrls, &coda_ctrl_ops,
V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MODE,
V4L2_MPEG_VIDEO_MULTI_SLICE_MODE_MAX_BYTES, 0x0,
V4L2_MPEG_VIDEO_MULTI_SLICE_MODE_SINGLE);
v4l2_ctrl_new_std(&ctx->ctrls, &coda_ctrl_ops,
V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MAX_MB, 1, 0x3fffffff, 1, 1);
v4l2_ctrl_new_std(&ctx->ctrls, &coda_ctrl_ops,
V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MAX_BYTES, 1, 0x3fffffff, 1,
500);
v4l2_ctrl_new_std_menu(&ctx->ctrls, &coda_ctrl_ops,
V4L2_CID_MPEG_VIDEO_HEADER_MODE,
V4L2_MPEG_VIDEO_HEADER_MODE_JOINED_WITH_1ST_FRAME,
(1 << V4L2_MPEG_VIDEO_HEADER_MODE_SEPARATE),
V4L2_MPEG_VIDEO_HEADER_MODE_JOINED_WITH_1ST_FRAME);
v4l2_ctrl_new_std(&ctx->ctrls, &coda_ctrl_ops,
V4L2_CID_MPEG_VIDEO_CYCLIC_INTRA_REFRESH_MB, 0,
1920 * 1088 / 256, 1, 0);
v4l2_ctrl_new_std(&ctx->ctrls, &coda_ctrl_ops,
V4L2_CID_MPEG_VIDEO_VBV_DELAY, 0, 0x7fff, 1, 0);
/*
* The maximum VBV size value is 0x7fffffff bits,
* one bit less than 262144 KiB
*/
v4l2_ctrl_new_std(&ctx->ctrls, &coda_ctrl_ops,
V4L2_CID_MPEG_VIDEO_VBV_SIZE, 0, 262144, 1, 0);
}
static void coda_jpeg_encode_ctrls(struct coda_ctx *ctx)
{
v4l2_ctrl_new_std(&ctx->ctrls, &coda_ctrl_ops,
V4L2_CID_JPEG_COMPRESSION_QUALITY, 5, 100, 1, 50);
v4l2_ctrl_new_std(&ctx->ctrls, &coda_ctrl_ops,
V4L2_CID_JPEG_RESTART_INTERVAL, 0, 100, 1, 0);
}
static void coda_decode_ctrls(struct coda_ctx *ctx)
{
u8 max;
ctx->h264_profile_ctrl = v4l2_ctrl_new_std_menu(&ctx->ctrls,
&coda_ctrl_ops, V4L2_CID_MPEG_VIDEO_H264_PROFILE,
V4L2_MPEG_VIDEO_H264_PROFILE_HIGH,
~((1 << V4L2_MPEG_VIDEO_H264_PROFILE_CONSTRAINED_BASELINE) |
(1 << V4L2_MPEG_VIDEO_H264_PROFILE_MAIN) |
(1 << V4L2_MPEG_VIDEO_H264_PROFILE_HIGH)),
V4L2_MPEG_VIDEO_H264_PROFILE_HIGH);
if (ctx->h264_profile_ctrl)
ctx->h264_profile_ctrl->flags |= V4L2_CTRL_FLAG_READ_ONLY;
if (ctx->dev->devtype->product == CODA_HX4 ||
ctx->dev->devtype->product == CODA_7541)
max = V4L2_MPEG_VIDEO_H264_LEVEL_4_0;
else if (ctx->dev->devtype->product == CODA_960)
max = V4L2_MPEG_VIDEO_H264_LEVEL_4_1;
else
return;
ctx->h264_level_ctrl = v4l2_ctrl_new_std_menu(&ctx->ctrls,
&coda_ctrl_ops, V4L2_CID_MPEG_VIDEO_H264_LEVEL, max, 0, max);
if (ctx->h264_level_ctrl)
ctx->h264_level_ctrl->flags |= V4L2_CTRL_FLAG_READ_ONLY;
ctx->mpeg2_profile_ctrl = v4l2_ctrl_new_std_menu(&ctx->ctrls,
&coda_ctrl_ops, V4L2_CID_MPEG_VIDEO_MPEG2_PROFILE,
V4L2_MPEG_VIDEO_MPEG2_PROFILE_HIGH, 0,
V4L2_MPEG_VIDEO_MPEG2_PROFILE_HIGH);
if (ctx->mpeg2_profile_ctrl)
ctx->mpeg2_profile_ctrl->flags |= V4L2_CTRL_FLAG_READ_ONLY;
ctx->mpeg2_level_ctrl = v4l2_ctrl_new_std_menu(&ctx->ctrls,
&coda_ctrl_ops, V4L2_CID_MPEG_VIDEO_MPEG2_LEVEL,
V4L2_MPEG_VIDEO_MPEG2_LEVEL_HIGH, 0,
V4L2_MPEG_VIDEO_MPEG2_LEVEL_HIGH);
if (ctx->mpeg2_level_ctrl)
ctx->mpeg2_level_ctrl->flags |= V4L2_CTRL_FLAG_READ_ONLY;
ctx->mpeg4_profile_ctrl = v4l2_ctrl_new_std_menu(&ctx->ctrls,
&coda_ctrl_ops, V4L2_CID_MPEG_VIDEO_MPEG4_PROFILE,
V4L2_MPEG_VIDEO_MPEG4_PROFILE_ADVANCED_CODING_EFFICIENCY, 0,
V4L2_MPEG_VIDEO_MPEG4_PROFILE_ADVANCED_CODING_EFFICIENCY);
if (ctx->mpeg4_profile_ctrl)
ctx->mpeg4_profile_ctrl->flags |= V4L2_CTRL_FLAG_READ_ONLY;
ctx->mpeg4_level_ctrl = v4l2_ctrl_new_std_menu(&ctx->ctrls,
&coda_ctrl_ops, V4L2_CID_MPEG_VIDEO_MPEG4_LEVEL,
V4L2_MPEG_VIDEO_MPEG4_LEVEL_5, 0,
V4L2_MPEG_VIDEO_MPEG4_LEVEL_5);
if (ctx->mpeg4_level_ctrl)
ctx->mpeg4_level_ctrl->flags |= V4L2_CTRL_FLAG_READ_ONLY;
}
static const struct v4l2_ctrl_config coda_mb_err_cnt_ctrl_config = {
.id = V4L2_CID_CODA_MB_ERR_CNT,
.name = "Macroblocks Error Count",
.type = V4L2_CTRL_TYPE_INTEGER,
.min = 0,
.max = 0x7fffffff,
.step = 1,
};
static int coda_ctrls_setup(struct coda_ctx *ctx)
{
v4l2_ctrl_handler_init(&ctx->ctrls, 2);
v4l2_ctrl_new_std(&ctx->ctrls, &coda_ctrl_ops,
V4L2_CID_HFLIP, 0, 1, 1, 0);
v4l2_ctrl_new_std(&ctx->ctrls, &coda_ctrl_ops,
V4L2_CID_VFLIP, 0, 1, 1, 0);
if (ctx->inst_type == CODA_INST_ENCODER) {
v4l2_ctrl_new_std(&ctx->ctrls, &coda_ctrl_ops,
V4L2_CID_MIN_BUFFERS_FOR_OUTPUT,
1, 1, 1, 1);
if (ctx->cvd->dst_formats[0] == V4L2_PIX_FMT_JPEG)
coda_jpeg_encode_ctrls(ctx);
else
coda_encode_ctrls(ctx);
} else {
v4l2_ctrl_new_std(&ctx->ctrls, &coda_ctrl_ops,
V4L2_CID_MIN_BUFFERS_FOR_CAPTURE,
1, 1, 1, 1);
if (ctx->cvd->src_formats[0] == V4L2_PIX_FMT_H264)
coda_decode_ctrls(ctx);
ctx->mb_err_cnt_ctrl = v4l2_ctrl_new_custom(&ctx->ctrls,
&coda_mb_err_cnt_ctrl_config,
NULL);
if (ctx->mb_err_cnt_ctrl)
ctx->mb_err_cnt_ctrl->flags |= V4L2_CTRL_FLAG_READ_ONLY;
}
if (ctx->ctrls.error) {
v4l2_err(&ctx->dev->v4l2_dev,
"control initialization error (%d)",
ctx->ctrls.error);
return -EINVAL;
}
return v4l2_ctrl_handler_setup(&ctx->ctrls);
}
static int coda_queue_init(struct coda_ctx *ctx, struct vb2_queue *vq)
{
vq->drv_priv = ctx;
vq->ops = &coda_qops;
vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
vq->lock = &ctx->dev->dev_mutex;
/* One way to indicate end-of-stream for coda is to set the
* bytesused == 0. However by default videobuf2 handles bytesused
* equal to 0 as a special case and changes its value to the size
* of the buffer. Set the allow_zero_bytesused flag, so
* that videobuf2 will keep the value of bytesused intact.
*/
vq->allow_zero_bytesused = 1;
/*
* We might be fine with no buffers on some of the queues, but that
* would need to be reflected in job_ready(). Currently we expect all
* queues to have at least one buffer queued.
*/
vq->min_buffers_needed = 1;
vq->dev = ctx->dev->dev;
return vb2_queue_init(vq);
}
int coda_encoder_queue_init(void *priv, struct vb2_queue *src_vq,
struct vb2_queue *dst_vq)
{
int ret;
src_vq->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
src_vq->io_modes = VB2_DMABUF | VB2_MMAP;
src_vq->mem_ops = &vb2_dma_contig_memops;
ret = coda_queue_init(priv, src_vq);
if (ret)
return ret;
dst_vq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
dst_vq->io_modes = VB2_DMABUF | VB2_MMAP;
dst_vq->mem_ops = &vb2_dma_contig_memops;
return coda_queue_init(priv, dst_vq);
}
int coda_decoder_queue_init(void *priv, struct vb2_queue *src_vq,
struct vb2_queue *dst_vq)
{
int ret;
src_vq->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
src_vq->io_modes = VB2_DMABUF | VB2_MMAP | VB2_USERPTR;
src_vq->mem_ops = &vb2_vmalloc_memops;
ret = coda_queue_init(priv, src_vq);
if (ret)
return ret;
dst_vq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
dst_vq->io_modes = VB2_DMABUF | VB2_MMAP;
dst_vq->dma_attrs = DMA_ATTR_NO_KERNEL_MAPPING;
dst_vq->mem_ops = &vb2_dma_contig_memops;
return coda_queue_init(priv, dst_vq);
}
/*
* File operations
*/
static int coda_open(struct file *file)
{
struct video_device *vdev = video_devdata(file);
struct coda_dev *dev = video_get_drvdata(vdev);
struct coda_ctx *ctx;
unsigned int max = ~0;
char *name;
int ret;
int idx;
ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
if (!ctx)
return -ENOMEM;
if (dev->devtype->product == CODA_DX6)
max = CODADX6_MAX_INSTANCES - 1;
idx = ida_alloc_max(&dev->ida, max, GFP_KERNEL);
if (idx < 0) {
ret = idx;
goto err_coda_max;
}
name = kasprintf(GFP_KERNEL, "context%d", idx);
if (!name) {
ret = -ENOMEM;
goto err_coda_name_init;
}
ctx->debugfs_entry = debugfs_create_dir(name, dev->debugfs_root);
kfree(name);
ctx->cvd = to_coda_video_device(vdev);
ctx->inst_type = ctx->cvd->type;
ctx->ops = ctx->cvd->ops;
ctx->use_bit = !ctx->cvd->direct;
init_completion(&ctx->completion);
INIT_WORK(&ctx->pic_run_work, coda_pic_run_work);
if (ctx->ops->seq_init_work)
INIT_WORK(&ctx->seq_init_work, ctx->ops->seq_init_work);
if (ctx->ops->seq_end_work)
INIT_WORK(&ctx->seq_end_work, ctx->ops->seq_end_work);
v4l2_fh_init(&ctx->fh, video_devdata(file));
file->private_data = &ctx->fh;
v4l2_fh_add(&ctx->fh);
ctx->dev = dev;
ctx->idx = idx;
coda_dbg(1, ctx, "open instance (%p)\n", ctx);
switch (dev->devtype->product) {
case CODA_960:
/*
* Enabling the BWB when decoding can hang the firmware with
* certain streams. The issue was tracked as ENGR00293425 by
* Freescale. As a workaround, disable BWB for all decoders.
* The enable_bwb module parameter allows to override this.
*/
if (enable_bwb || ctx->inst_type == CODA_INST_ENCODER)
ctx->frame_mem_ctrl = CODA9_FRAME_ENABLE_BWB;
fallthrough;
case CODA_HX4:
case CODA_7541:
ctx->reg_idx = 0;
break;
default:
ctx->reg_idx = idx;
}
if (ctx->dev->vdoa && !disable_vdoa) {
ctx->vdoa = vdoa_context_create(dev->vdoa);
if (!ctx->vdoa)
v4l2_warn(&dev->v4l2_dev,
"Failed to create vdoa context: not using vdoa");
}
ctx->use_vdoa = false;
/* Power up and upload firmware if necessary */
ret = pm_runtime_resume_and_get(dev->dev);
if (ret < 0) {
v4l2_err(&dev->v4l2_dev, "failed to power up: %d\n", ret);
goto err_pm_get;
}
ret = clk_prepare_enable(dev->clk_per);
if (ret)
goto err_clk_enable;
ret = clk_prepare_enable(dev->clk_ahb);
if (ret)
goto err_clk_ahb;
set_default_params(ctx);
ctx->fh.m2m_ctx = v4l2_m2m_ctx_init(dev->m2m_dev, ctx,
ctx->ops->queue_init);
if (IS_ERR(ctx->fh.m2m_ctx)) {
ret = PTR_ERR(ctx->fh.m2m_ctx);
v4l2_err(&dev->v4l2_dev, "%s return error (%d)\n",
__func__, ret);
goto err_ctx_init;
}
ret = coda_ctrls_setup(ctx);
if (ret) {
v4l2_err(&dev->v4l2_dev, "failed to setup coda controls\n");
goto err_ctrls_setup;
}
ctx->fh.ctrl_handler = &ctx->ctrls;
mutex_init(&ctx->bitstream_mutex);
mutex_init(&ctx->buffer_mutex);
mutex_init(&ctx->wakeup_mutex);
INIT_LIST_HEAD(&ctx->buffer_meta_list);
spin_lock_init(&ctx->buffer_meta_lock);
return 0;
err_ctrls_setup:
v4l2_m2m_ctx_release(ctx->fh.m2m_ctx);
err_ctx_init:
clk_disable_unprepare(dev->clk_ahb);
err_clk_ahb:
clk_disable_unprepare(dev->clk_per);
err_clk_enable:
pm_runtime_put_sync(dev->dev);
err_pm_get:
v4l2_fh_del(&ctx->fh);
v4l2_fh_exit(&ctx->fh);
err_coda_name_init:
ida_free(&dev->ida, ctx->idx);
err_coda_max:
kfree(ctx);
return ret;
}
static int coda_release(struct file *file)
{
struct coda_dev *dev = video_drvdata(file);
struct coda_ctx *ctx = fh_to_ctx(file->private_data);
coda_dbg(1, ctx, "release instance (%p)\n", ctx);
if (ctx->inst_type == CODA_INST_DECODER && ctx->use_bit)
coda_bit_stream_end_flag(ctx);
/* If this instance is running, call .job_abort and wait for it to end */
v4l2_m2m_ctx_release(ctx->fh.m2m_ctx);
if (ctx->vdoa)
vdoa_context_destroy(ctx->vdoa);
/* In case the instance was not running, we still need to call SEQ_END */
if (ctx->ops->seq_end_work) {
queue_work(dev->workqueue, &ctx->seq_end_work);
flush_work(&ctx->seq_end_work);
}
if (ctx->dev->devtype->product == CODA_DX6)
coda_free_aux_buf(dev, &ctx->workbuf);
v4l2_ctrl_handler_free(&ctx->ctrls);
clk_disable_unprepare(dev->clk_ahb);
clk_disable_unprepare(dev->clk_per);
pm_runtime_put_sync(dev->dev);
v4l2_fh_del(&ctx->fh);
v4l2_fh_exit(&ctx->fh);
ida_free(&dev->ida, ctx->idx);
if (ctx->ops->release)
ctx->ops->release(ctx);
debugfs_remove_recursive(ctx->debugfs_entry);
kfree(ctx);
return 0;
}
static const struct v4l2_file_operations coda_fops = {
.owner = THIS_MODULE,
.open = coda_open,
.release = coda_release,
.poll = v4l2_m2m_fop_poll,
.unlocked_ioctl = video_ioctl2,
.mmap = v4l2_m2m_fop_mmap,
};
static int coda_hw_init(struct coda_dev *dev)
{
u32 data;
u16 *p;
int i, ret;
ret = clk_prepare_enable(dev->clk_per);
if (ret)
goto err_clk_per;
ret = clk_prepare_enable(dev->clk_ahb);
if (ret)
goto err_clk_ahb;
reset_control_reset(dev->rstc);
/*
* Copy the first CODA_ISRAM_SIZE in the internal SRAM.
* The 16-bit chars in the code buffer are in memory access
* order, re-sort them to CODA order for register download.
* Data in this SRAM survives a reboot.
*/
p = (u16 *)dev->codebuf.vaddr;
if (dev->devtype->product == CODA_DX6) {
for (i = 0; i < (CODA_ISRAM_SIZE / 2); i++) {
data = CODA_DOWN_ADDRESS_SET(i) |
CODA_DOWN_DATA_SET(p[i ^ 1]);
coda_write(dev, data, CODA_REG_BIT_CODE_DOWN);
}
} else {
for (i = 0; i < (CODA_ISRAM_SIZE / 2); i++) {
data = CODA_DOWN_ADDRESS_SET(i) |
CODA_DOWN_DATA_SET(p[round_down(i, 4) +
3 - (i % 4)]);
coda_write(dev, data, CODA_REG_BIT_CODE_DOWN);
}
}
/* Clear registers */
for (i = 0; i < 64; i++)
coda_write(dev, 0, CODA_REG_BIT_CODE_BUF_ADDR + i * 4);
/* Tell the BIT where to find everything it needs */
if (dev->devtype->product == CODA_960 ||
dev->devtype->product == CODA_7541 ||
dev->devtype->product == CODA_HX4) {
coda_write(dev, dev->tempbuf.paddr,
CODA_REG_BIT_TEMP_BUF_ADDR);
coda_write(dev, 0, CODA_REG_BIT_BIT_STREAM_PARAM);
} else {
coda_write(dev, dev->workbuf.paddr,
CODA_REG_BIT_WORK_BUF_ADDR);
}
coda_write(dev, dev->codebuf.paddr,
CODA_REG_BIT_CODE_BUF_ADDR);
coda_write(dev, 0, CODA_REG_BIT_CODE_RUN);
/* Set default values */
switch (dev->devtype->product) {
case CODA_DX6:
coda_write(dev, CODADX6_STREAM_BUF_PIC_FLUSH,
CODA_REG_BIT_STREAM_CTRL);
break;
default:
coda_write(dev, CODA7_STREAM_BUF_PIC_FLUSH,
CODA_REG_BIT_STREAM_CTRL);
}
if (dev->devtype->product == CODA_960)
coda_write(dev, CODA9_FRAME_ENABLE_BWB,
CODA_REG_BIT_FRAME_MEM_CTRL);
else
coda_write(dev, 0, CODA_REG_BIT_FRAME_MEM_CTRL);
if (dev->devtype->product != CODA_DX6)
coda_write(dev, 0, CODA7_REG_BIT_AXI_SRAM_USE);
coda_write(dev, CODA_INT_INTERRUPT_ENABLE,
CODA_REG_BIT_INT_ENABLE);
/* Reset VPU and start processor */
data = coda_read(dev, CODA_REG_BIT_CODE_RESET);
data |= CODA_REG_RESET_ENABLE;
coda_write(dev, data, CODA_REG_BIT_CODE_RESET);
udelay(10);
data &= ~CODA_REG_RESET_ENABLE;
coda_write(dev, data, CODA_REG_BIT_CODE_RESET);
coda_write(dev, CODA_REG_RUN_ENABLE, CODA_REG_BIT_CODE_RUN);
clk_disable_unprepare(dev->clk_ahb);
clk_disable_unprepare(dev->clk_per);
return 0;
err_clk_ahb:
clk_disable_unprepare(dev->clk_per);
err_clk_per:
return ret;
}
static int coda_register_device(struct coda_dev *dev, int i)
{
struct video_device *vfd = &dev->vfd[i];
const char *name;
int ret;
if (i >= dev->devtype->num_vdevs)
return -EINVAL;
name = dev->devtype->vdevs[i]->name;
strscpy(vfd->name, dev->devtype->vdevs[i]->name, sizeof(vfd->name));
vfd->fops = &coda_fops;
vfd->ioctl_ops = &coda_ioctl_ops;
vfd->release = video_device_release_empty;
vfd->lock = &dev->dev_mutex;
vfd->v4l2_dev = &dev->v4l2_dev;
vfd->vfl_dir = VFL_DIR_M2M;
vfd->device_caps = V4L2_CAP_VIDEO_M2M | V4L2_CAP_STREAMING;
video_set_drvdata(vfd, dev);
/* Not applicable, use the selection API instead */
v4l2_disable_ioctl(vfd, VIDIOC_CROPCAP);
v4l2_disable_ioctl(vfd, VIDIOC_G_CROP);
v4l2_disable_ioctl(vfd, VIDIOC_S_CROP);
if (dev->devtype->vdevs[i]->type == CODA_INST_ENCODER) {
v4l2_disable_ioctl(vfd, VIDIOC_DECODER_CMD);
v4l2_disable_ioctl(vfd, VIDIOC_TRY_DECODER_CMD);
if (dev->devtype->vdevs[i]->dst_formats[0] == V4L2_PIX_FMT_JPEG) {
v4l2_disable_ioctl(vfd, VIDIOC_ENUM_FRAMEINTERVALS);
v4l2_disable_ioctl(vfd, VIDIOC_G_PARM);
v4l2_disable_ioctl(vfd, VIDIOC_S_PARM);
}
} else {
v4l2_disable_ioctl(vfd, VIDIOC_ENCODER_CMD);
v4l2_disable_ioctl(vfd, VIDIOC_TRY_ENCODER_CMD);
v4l2_disable_ioctl(vfd, VIDIOC_ENUM_FRAMESIZES);
v4l2_disable_ioctl(vfd, VIDIOC_ENUM_FRAMEINTERVALS);
v4l2_disable_ioctl(vfd, VIDIOC_G_PARM);
v4l2_disable_ioctl(vfd, VIDIOC_S_PARM);
}
ret = video_register_device(vfd, VFL_TYPE_VIDEO, 0);
if (!ret)
v4l2_info(&dev->v4l2_dev, "%s registered as %s\n",
name, video_device_node_name(vfd));
return ret;
}
static void coda_copy_firmware(struct coda_dev *dev, const u8 * const buf,
size_t size)
{
u32 *src = (u32 *)buf;
/* Check if the firmware has a 16-byte Freescale header, skip it */
if (buf[0] == 'M' && buf[1] == 'X')
src += 4;
/*
* Check whether the firmware is in native order or pre-reordered for
* memory access. The first instruction opcode always is 0xe40e.
*/
if (__le16_to_cpup((__le16 *)src) == 0xe40e) {
u32 *dst = dev->codebuf.vaddr;
int i;
/* Firmware in native order, reorder while copying */
if (dev->devtype->product == CODA_DX6) {
for (i = 0; i < (size - 16) / 4; i++)
dst[i] = (src[i] << 16) | (src[i] >> 16);
} else {
for (i = 0; i < (size - 16) / 4; i += 2) {
dst[i] = (src[i + 1] << 16) | (src[i + 1] >> 16);
dst[i + 1] = (src[i] << 16) | (src[i] >> 16);
}
}
} else {
/* Copy the already reordered firmware image */
memcpy(dev->codebuf.vaddr, src, size);
}
}
static void coda_fw_callback(const struct firmware *fw, void *context);
static int coda_firmware_request(struct coda_dev *dev)
{
char *fw;
if (dev->firmware >= ARRAY_SIZE(dev->devtype->firmware))
return -EINVAL;
fw = dev->devtype->firmware[dev->firmware];
dev_dbg(dev->dev, "requesting firmware '%s' for %s\n", fw,
coda_product_name(dev->devtype->product));
return request_firmware_nowait(THIS_MODULE, true, fw, dev->dev,
GFP_KERNEL, dev, coda_fw_callback);
}
static void coda_fw_callback(const struct firmware *fw, void *context)
{
struct coda_dev *dev = context;
int i, ret;
if (!fw) {
dev->firmware++;
ret = coda_firmware_request(dev);
if (ret < 0) {
v4l2_err(&dev->v4l2_dev, "firmware request failed\n");
goto put_pm;
}
return;
}
if (dev->firmware > 0) {
/*
* Since we can't suppress warnings for failed asynchronous
* firmware requests, report that the fallback firmware was
* found.
*/
dev_info(dev->dev, "Using fallback firmware %s\n",
dev->devtype->firmware[dev->firmware]);
}
/* allocate auxiliary per-device code buffer for the BIT processor */
ret = coda_alloc_aux_buf(dev, &dev->codebuf, fw->size, "codebuf",
dev->debugfs_root);
if (ret < 0)
goto put_pm;
coda_copy_firmware(dev, fw->data, fw->size);
release_firmware(fw);
ret = coda_hw_init(dev);
if (ret < 0) {
v4l2_err(&dev->v4l2_dev, "HW initialization failed\n");
goto put_pm;
}
ret = coda_check_firmware(dev);
if (ret < 0)
goto put_pm;
dev->m2m_dev = v4l2_m2m_init(&coda_m2m_ops);
if (IS_ERR(dev->m2m_dev)) {
v4l2_err(&dev->v4l2_dev, "Failed to init mem2mem device\n");
goto put_pm;
}
for (i = 0; i < dev->devtype->num_vdevs; i++) {
ret = coda_register_device(dev, i);
if (ret) {
v4l2_err(&dev->v4l2_dev,
"Failed to register %s video device: %d\n",
dev->devtype->vdevs[i]->name, ret);
goto rel_vfd;
}
}
pm_runtime_put_sync(dev->dev);
return;
rel_vfd:
while (--i >= 0)
video_unregister_device(&dev->vfd[i]);
v4l2_m2m_release(dev->m2m_dev);
put_pm:
pm_runtime_put_sync(dev->dev);
}
enum coda_platform {
CODA_IMX27,
CODA_IMX51,
CODA_IMX53,
CODA_IMX6Q,
CODA_IMX6DL,
};
static const struct coda_devtype coda_devdata[] = {
[CODA_IMX27] = {
.firmware = {
"vpu_fw_imx27_TO2.bin",
"vpu/vpu_fw_imx27_TO2.bin",
"v4l-codadx6-imx27.bin"
},
.product = CODA_DX6,
.codecs = codadx6_codecs,
.num_codecs = ARRAY_SIZE(codadx6_codecs),
.vdevs = codadx6_video_devices,
.num_vdevs = ARRAY_SIZE(codadx6_video_devices),
.workbuf_size = 288 * 1024 + FMO_SLICE_SAVE_BUF_SIZE * 8 * 1024,
.iram_size = 0xb000,
},
[CODA_IMX51] = {
.firmware = {
"vpu_fw_imx51.bin",
"vpu/vpu_fw_imx51.bin",
"v4l-codahx4-imx51.bin"
},
.product = CODA_HX4,
.codecs = codahx4_codecs,
.num_codecs = ARRAY_SIZE(codahx4_codecs),
.vdevs = codahx4_video_devices,
.num_vdevs = ARRAY_SIZE(codahx4_video_devices),
.workbuf_size = 128 * 1024,
.tempbuf_size = 304 * 1024,
.iram_size = 0x14000,
},
[CODA_IMX53] = {
.firmware = {
"vpu_fw_imx53.bin",
"vpu/vpu_fw_imx53.bin",
"v4l-coda7541-imx53.bin"
},
.product = CODA_7541,
.codecs = coda7_codecs,
.num_codecs = ARRAY_SIZE(coda7_codecs),
.vdevs = coda7_video_devices,
.num_vdevs = ARRAY_SIZE(coda7_video_devices),
.workbuf_size = 128 * 1024,
.tempbuf_size = 304 * 1024,
.iram_size = 0x14000,
},
[CODA_IMX6Q] = {
.firmware = {
"vpu_fw_imx6q.bin",
"vpu/vpu_fw_imx6q.bin",
"v4l-coda960-imx6q.bin"
},
.product = CODA_960,
.codecs = coda9_codecs,
.num_codecs = ARRAY_SIZE(coda9_codecs),
.vdevs = coda9_video_devices,
.num_vdevs = ARRAY_SIZE(coda9_video_devices),
.workbuf_size = 80 * 1024,
.tempbuf_size = 204 * 1024,
.iram_size = 0x21000,
},
[CODA_IMX6DL] = {
.firmware = {
"vpu_fw_imx6d.bin",
"vpu/vpu_fw_imx6d.bin",
"v4l-coda960-imx6dl.bin"
},
.product = CODA_960,
.codecs = coda9_codecs,
.num_codecs = ARRAY_SIZE(coda9_codecs),
.vdevs = coda9_video_devices,
.num_vdevs = ARRAY_SIZE(coda9_video_devices),
.workbuf_size = 80 * 1024,
.tempbuf_size = 204 * 1024,
.iram_size = 0x1f000, /* leave 4k for suspend code */
},
};
static const struct of_device_id coda_dt_ids[] = {
{ .compatible = "fsl,imx27-vpu", .data = &coda_devdata[CODA_IMX27] },
{ .compatible = "fsl,imx51-vpu", .data = &coda_devdata[CODA_IMX51] },
{ .compatible = "fsl,imx53-vpu", .data = &coda_devdata[CODA_IMX53] },
{ .compatible = "fsl,imx6q-vpu", .data = &coda_devdata[CODA_IMX6Q] },
{ .compatible = "fsl,imx6dl-vpu", .data = &coda_devdata[CODA_IMX6DL] },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, coda_dt_ids);
static int coda_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
struct gen_pool *pool;
struct coda_dev *dev;
int ret, irq;
dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL);
if (!dev)
return -ENOMEM;
dev->devtype = of_device_get_match_data(&pdev->dev);
dev->dev = &pdev->dev;
dev->clk_per = devm_clk_get(&pdev->dev, "per");
if (IS_ERR(dev->clk_per)) {
dev_err(&pdev->dev, "Could not get per clock\n");
return PTR_ERR(dev->clk_per);
}
dev->clk_ahb = devm_clk_get(&pdev->dev, "ahb");
if (IS_ERR(dev->clk_ahb)) {
dev_err(&pdev->dev, "Could not get ahb clock\n");
return PTR_ERR(dev->clk_ahb);
}
/* Get memory for physical registers */
dev->regs_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(dev->regs_base))
return PTR_ERR(dev->regs_base);
/* IRQ */
irq = platform_get_irq_byname(pdev, "bit");
if (irq < 0)
irq = platform_get_irq(pdev, 0);
if (irq < 0)
return irq;
ret = devm_request_irq(&pdev->dev, irq, coda_irq_handler, 0,
CODA_NAME "-video", dev);
if (ret < 0) {
dev_err(&pdev->dev, "failed to request irq: %d\n", ret);
return ret;
}
/* JPEG IRQ */
if (dev->devtype->product == CODA_960) {
irq = platform_get_irq_byname(pdev, "jpeg");
if (irq < 0)
return irq;
ret = devm_request_threaded_irq(&pdev->dev, irq, NULL,
coda9_jpeg_irq_handler,
IRQF_ONESHOT, CODA_NAME "-jpeg",
dev);
if (ret < 0) {
dev_err(&pdev->dev, "failed to request jpeg irq\n");
return ret;
}
}
dev->rstc = devm_reset_control_get_optional_exclusive(&pdev->dev,
NULL);
if (IS_ERR(dev->rstc)) {
ret = PTR_ERR(dev->rstc);
dev_err(&pdev->dev, "failed get reset control: %d\n", ret);
return ret;
}
/* Get IRAM pool from device tree */
pool = of_gen_pool_get(np, "iram", 0);
if (!pool) {
dev_err(&pdev->dev, "iram pool not available\n");
return -ENOMEM;
}
dev->iram_pool = pool;
/* Get vdoa_data if supported by the platform */
dev->vdoa = coda_get_vdoa_data();
if (PTR_ERR(dev->vdoa) == -EPROBE_DEFER)
return -EPROBE_DEFER;
ret = v4l2_device_register(&pdev->dev, &dev->v4l2_dev);
if (ret)
return ret;
ratelimit_default_init(&dev->mb_err_rs);
mutex_init(&dev->dev_mutex);
mutex_init(&dev->coda_mutex);
ida_init(&dev->ida);
dev->debugfs_root = debugfs_create_dir("coda", NULL);
/* allocate auxiliary per-device buffers for the BIT processor */
if (dev->devtype->product == CODA_DX6) {
ret = coda_alloc_aux_buf(dev, &dev->workbuf,
dev->devtype->workbuf_size, "workbuf",
dev->debugfs_root);
if (ret < 0)
goto err_v4l2_register;
}
if (dev->devtype->tempbuf_size) {
ret = coda_alloc_aux_buf(dev, &dev->tempbuf,
dev->devtype->tempbuf_size, "tempbuf",
dev->debugfs_root);
if (ret < 0)
goto err_v4l2_register;
}
dev->iram.size = dev->devtype->iram_size;
dev->iram.vaddr = gen_pool_dma_alloc(dev->iram_pool, dev->iram.size,
&dev->iram.paddr);
if (!dev->iram.vaddr) {
dev_warn(&pdev->dev, "unable to alloc iram\n");
} else {
memset(dev->iram.vaddr, 0, dev->iram.size);
dev->iram.blob.data = dev->iram.vaddr;
dev->iram.blob.size = dev->iram.size;
dev->iram.dentry = debugfs_create_blob("iram", 0444,
dev->debugfs_root,
&dev->iram.blob);
}
dev->workqueue = alloc_ordered_workqueue("coda", WQ_MEM_RECLAIM);
if (!dev->workqueue) {
dev_err(&pdev->dev, "unable to alloc workqueue\n");
ret = -ENOMEM;
goto err_v4l2_register;
}
platform_set_drvdata(pdev, dev);
/*
* Start activated so we can directly call coda_hw_init in
* coda_fw_callback regardless of whether CONFIG_PM is
* enabled or whether the device is associated with a PM domain.
*/
pm_runtime_get_noresume(&pdev->dev);
pm_runtime_set_active(&pdev->dev);
pm_runtime_enable(&pdev->dev);
ret = coda_firmware_request(dev);
if (ret)
goto err_alloc_workqueue;
return 0;
err_alloc_workqueue:
pm_runtime_disable(&pdev->dev);
pm_runtime_put_noidle(&pdev->dev);
destroy_workqueue(dev->workqueue);
err_v4l2_register:
v4l2_device_unregister(&dev->v4l2_dev);
return ret;
}
static void coda_remove(struct platform_device *pdev)
{
struct coda_dev *dev = platform_get_drvdata(pdev);
int i;
for (i = 0; i < ARRAY_SIZE(dev->vfd); i++) {
if (video_get_drvdata(&dev->vfd[i]))
video_unregister_device(&dev->vfd[i]);
}
if (dev->m2m_dev)
v4l2_m2m_release(dev->m2m_dev);
pm_runtime_disable(&pdev->dev);
v4l2_device_unregister(&dev->v4l2_dev);
destroy_workqueue(dev->workqueue);
if (dev->iram.vaddr)
gen_pool_free(dev->iram_pool, (unsigned long)dev->iram.vaddr,
dev->iram.size);
coda_free_aux_buf(dev, &dev->codebuf);
coda_free_aux_buf(dev, &dev->tempbuf);
coda_free_aux_buf(dev, &dev->workbuf);
debugfs_remove_recursive(dev->debugfs_root);
ida_destroy(&dev->ida);
}
#ifdef CONFIG_PM
static int coda_runtime_resume(struct device *dev)
{
struct coda_dev *cdev = dev_get_drvdata(dev);
int ret = 0;
if (dev->pm_domain && cdev->codebuf.vaddr) {
ret = coda_hw_init(cdev);
if (ret)
v4l2_err(&cdev->v4l2_dev, "HW initialization failed\n");
}
return ret;
}
#endif
static const struct dev_pm_ops coda_pm_ops = {
SET_RUNTIME_PM_OPS(NULL, coda_runtime_resume, NULL)
};
static struct platform_driver coda_driver = {
.probe = coda_probe,
.remove_new = coda_remove,
.driver = {
.name = CODA_NAME,
.of_match_table = coda_dt_ids,
.pm = &coda_pm_ops,
},
};
module_platform_driver(coda_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Javier Martin <[email protected]>");
MODULE_DESCRIPTION("Coda multi-standard codec V4L2 driver");
| linux-master | drivers/media/platform/chips-media/coda-common.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Coda multi-standard codec IP - JPEG support functions
*
* Copyright (C) 2014 Philipp Zabel, Pengutronix
*/
#include <asm/unaligned.h>
#include <linux/irqreturn.h>
#include <linux/kernel.h>
#include <linux/ktime.h>
#include <linux/slab.h>
#include <linux/swab.h>
#include <linux/videodev2.h>
#include <media/v4l2-common.h>
#include <media/v4l2-fh.h>
#include <media/v4l2-jpeg.h>
#include <media/v4l2-mem2mem.h>
#include <media/videobuf2-core.h>
#include <media/videobuf2-dma-contig.h>
#include "coda.h"
#include "trace.h"
#define SOI_MARKER 0xffd8
#define APP9_MARKER 0xffe9
#define DRI_MARKER 0xffdd
#define DQT_MARKER 0xffdb
#define DHT_MARKER 0xffc4
#define SOF_MARKER 0xffc0
#define SOS_MARKER 0xffda
#define EOI_MARKER 0xffd9
enum {
CODA9_JPEG_FORMAT_420,
CODA9_JPEG_FORMAT_422,
CODA9_JPEG_FORMAT_224,
CODA9_JPEG_FORMAT_444,
CODA9_JPEG_FORMAT_400,
};
struct coda_huff_tab {
u8 luma_dc[16 + 12];
u8 chroma_dc[16 + 12];
u8 luma_ac[16 + 162];
u8 chroma_ac[16 + 162];
/* DC Luma, DC Chroma, AC Luma, AC Chroma */
s16 min[4 * 16];
s16 max[4 * 16];
s8 ptr[4 * 16];
};
#define CODA9_JPEG_ENC_HUFF_DATA_SIZE (256 + 256 + 16 + 16)
/*
* Typical Huffman tables for 8-bit precision luminance and
* chrominance from JPEG ITU-T.81 (ISO/IEC 10918-1) Annex K.3
*/
static const unsigned char luma_dc[16 + 12] = {
/* bits */
0x00, 0x01, 0x05, 0x01, 0x01, 0x01, 0x01, 0x01,
0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
/* values */
0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
0x08, 0x09, 0x0a, 0x0b,
};
static const unsigned char chroma_dc[16 + 12] = {
/* bits */
0x00, 0x03, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
0x01, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00,
/* values */
0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
0x08, 0x09, 0x0a, 0x0b,
};
static const unsigned char luma_ac[16 + 162 + 2] = {
/* bits */
0x00, 0x02, 0x01, 0x03, 0x03, 0x02, 0x04, 0x03,
0x05, 0x05, 0x04, 0x04, 0x00, 0x00, 0x01, 0x7d,
/* values */
0x01, 0x02, 0x03, 0x00, 0x04, 0x11, 0x05, 0x12,
0x21, 0x31, 0x41, 0x06, 0x13, 0x51, 0x61, 0x07,
0x22, 0x71, 0x14, 0x32, 0x81, 0x91, 0xa1, 0x08,
0x23, 0x42, 0xb1, 0xc1, 0x15, 0x52, 0xd1, 0xf0,
0x24, 0x33, 0x62, 0x72, 0x82, 0x09, 0x0a, 0x16,
0x17, 0x18, 0x19, 0x1a, 0x25, 0x26, 0x27, 0x28,
0x29, 0x2a, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39,
0x3a, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49,
0x4a, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58, 0x59,
0x5a, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69,
0x6a, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78, 0x79,
0x7a, 0x83, 0x84, 0x85, 0x86, 0x87, 0x88, 0x89,
0x8a, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, 0x98,
0x99, 0x9a, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7,
0xa8, 0xa9, 0xaa, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6,
0xb7, 0xb8, 0xb9, 0xba, 0xc2, 0xc3, 0xc4, 0xc5,
0xc6, 0xc7, 0xc8, 0xc9, 0xca, 0xd2, 0xd3, 0xd4,
0xd5, 0xd6, 0xd7, 0xd8, 0xd9, 0xda, 0xe1, 0xe2,
0xe3, 0xe4, 0xe5, 0xe6, 0xe7, 0xe8, 0xe9, 0xea,
0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, 0xf8,
0xf9, 0xfa, /* padded to 32-bit */
};
static const unsigned char chroma_ac[16 + 162 + 2] = {
/* bits */
0x00, 0x02, 0x01, 0x02, 0x04, 0x04, 0x03, 0x04,
0x07, 0x05, 0x04, 0x04, 0x00, 0x01, 0x02, 0x77,
/* values */
0x00, 0x01, 0x02, 0x03, 0x11, 0x04, 0x05, 0x21,
0x31, 0x06, 0x12, 0x41, 0x51, 0x07, 0x61, 0x71,
0x13, 0x22, 0x32, 0x81, 0x08, 0x14, 0x42, 0x91,
0xa1, 0xb1, 0xc1, 0x09, 0x23, 0x33, 0x52, 0xf0,
0x15, 0x62, 0x72, 0xd1, 0x0a, 0x16, 0x24, 0x34,
0xe1, 0x25, 0xf1, 0x17, 0x18, 0x19, 0x1a, 0x26,
0x27, 0x28, 0x29, 0x2a, 0x35, 0x36, 0x37, 0x38,
0x39, 0x3a, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48,
0x49, 0x4a, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58,
0x59, 0x5a, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68,
0x69, 0x6a, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78,
0x79, 0x7a, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
0x88, 0x89, 0x8a, 0x92, 0x93, 0x94, 0x95, 0x96,
0x97, 0x98, 0x99, 0x9a, 0xa2, 0xa3, 0xa4, 0xa5,
0xa6, 0xa7, 0xa8, 0xa9, 0xaa, 0xb2, 0xb3, 0xb4,
0xb5, 0xb6, 0xb7, 0xb8, 0xb9, 0xba, 0xc2, 0xc3,
0xc4, 0xc5, 0xc6, 0xc7, 0xc8, 0xc9, 0xca, 0xd2,
0xd3, 0xd4, 0xd5, 0xd6, 0xd7, 0xd8, 0xd9, 0xda,
0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, 0xe8, 0xe9,
0xea, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, 0xf8,
0xf9, 0xfa, /* padded to 32-bit */
};
/*
* Quantization tables for luminance and chrominance components in
* zig-zag scan order from the Freescale i.MX VPU libraries
*/
static unsigned char luma_q[64] = {
0x06, 0x04, 0x04, 0x04, 0x05, 0x04, 0x06, 0x05,
0x05, 0x06, 0x09, 0x06, 0x05, 0x06, 0x09, 0x0b,
0x08, 0x06, 0x06, 0x08, 0x0b, 0x0c, 0x0a, 0x0a,
0x0b, 0x0a, 0x0a, 0x0c, 0x10, 0x0c, 0x0c, 0x0c,
0x0c, 0x0c, 0x0c, 0x10, 0x0c, 0x0c, 0x0c, 0x0c,
0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c,
0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c,
0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c,
};
static unsigned char chroma_q[64] = {
0x07, 0x07, 0x07, 0x0d, 0x0c, 0x0d, 0x18, 0x10,
0x10, 0x18, 0x14, 0x0e, 0x0e, 0x0e, 0x14, 0x14,
0x0e, 0x0e, 0x0e, 0x0e, 0x14, 0x11, 0x0c, 0x0c,
0x0c, 0x0c, 0x0c, 0x11, 0x11, 0x0c, 0x0c, 0x0c,
0x0c, 0x0c, 0x0c, 0x11, 0x0c, 0x0c, 0x0c, 0x0c,
0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c,
0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c,
0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c,
};
static const unsigned char width_align[] = {
[CODA9_JPEG_FORMAT_420] = 16,
[CODA9_JPEG_FORMAT_422] = 16,
[CODA9_JPEG_FORMAT_224] = 8,
[CODA9_JPEG_FORMAT_444] = 8,
[CODA9_JPEG_FORMAT_400] = 8,
};
static const unsigned char height_align[] = {
[CODA9_JPEG_FORMAT_420] = 16,
[CODA9_JPEG_FORMAT_422] = 8,
[CODA9_JPEG_FORMAT_224] = 16,
[CODA9_JPEG_FORMAT_444] = 8,
[CODA9_JPEG_FORMAT_400] = 8,
};
static int coda9_jpeg_chroma_format(u32 pixfmt)
{
switch (pixfmt) {
case V4L2_PIX_FMT_YUV420:
case V4L2_PIX_FMT_NV12:
return CODA9_JPEG_FORMAT_420;
case V4L2_PIX_FMT_YUV422P:
return CODA9_JPEG_FORMAT_422;
case V4L2_PIX_FMT_YUV444:
return CODA9_JPEG_FORMAT_444;
case V4L2_PIX_FMT_GREY:
return CODA9_JPEG_FORMAT_400;
}
return -EINVAL;
}
struct coda_memcpy_desc {
int offset;
const void *src;
size_t len;
};
static void coda_memcpy_parabuf(void *parabuf,
const struct coda_memcpy_desc *desc)
{
u32 *dst = parabuf + desc->offset;
const u32 *src = desc->src;
int len = desc->len / 4;
int i;
for (i = 0; i < len; i += 2) {
dst[i + 1] = swab32(src[i]);
dst[i] = swab32(src[i + 1]);
}
}
int coda_jpeg_write_tables(struct coda_ctx *ctx)
{
int i;
static const struct coda_memcpy_desc huff[8] = {
{ 0, luma_dc, sizeof(luma_dc) },
{ 32, luma_ac, sizeof(luma_ac) },
{ 216, chroma_dc, sizeof(chroma_dc) },
{ 248, chroma_ac, sizeof(chroma_ac) },
};
struct coda_memcpy_desc qmat[3] = {
{ 512, ctx->params.jpeg_qmat_tab[0], 64 },
{ 576, ctx->params.jpeg_qmat_tab[1], 64 },
{ 640, ctx->params.jpeg_qmat_tab[1], 64 },
};
/* Write huffman tables to parameter memory */
for (i = 0; i < ARRAY_SIZE(huff); i++)
coda_memcpy_parabuf(ctx->parabuf.vaddr, huff + i);
/* Write Q-matrix to parameter memory */
for (i = 0; i < ARRAY_SIZE(qmat); i++)
coda_memcpy_parabuf(ctx->parabuf.vaddr, qmat + i);
return 0;
}
bool coda_jpeg_check_buffer(struct coda_ctx *ctx, struct vb2_buffer *vb)
{
void *vaddr = vb2_plane_vaddr(vb, 0);
u16 soi, eoi;
int len, i;
soi = be16_to_cpup((__be16 *)vaddr);
if (soi != SOI_MARKER)
return false;
len = vb2_get_plane_payload(vb, 0);
vaddr += len - 2;
for (i = 0; i < 32; i++) {
eoi = be16_to_cpup((__be16 *)(vaddr - i));
if (eoi == EOI_MARKER) {
if (i > 0)
vb2_set_plane_payload(vb, 0, len - i);
return true;
}
}
return false;
}
static int coda9_jpeg_gen_dec_huff_tab(struct coda_ctx *ctx, int tab_num);
int coda_jpeg_decode_header(struct coda_ctx *ctx, struct vb2_buffer *vb)
{
struct coda_dev *dev = ctx->dev;
u8 *buf = vb2_plane_vaddr(vb, 0);
size_t len = vb2_get_plane_payload(vb, 0);
struct v4l2_jpeg_scan_header scan_header;
struct v4l2_jpeg_reference quantization_tables[4] = { };
struct v4l2_jpeg_reference huffman_tables[4] = { };
struct v4l2_jpeg_header header = {
.scan = &scan_header,
.quantization_tables = quantization_tables,
.huffman_tables = huffman_tables,
};
struct coda_q_data *q_data_src;
struct coda_huff_tab *huff_tab;
int i, j, ret;
ret = v4l2_jpeg_parse_header(buf, len, &header);
if (ret < 0) {
v4l2_err(&dev->v4l2_dev, "failed to parse JPEG header: %pe\n",
ERR_PTR(ret));
return ret;
}
ctx->params.jpeg_restart_interval = header.restart_interval;
/* check frame header */
if (header.frame.height > ctx->codec->max_h ||
header.frame.width > ctx->codec->max_w) {
v4l2_err(&dev->v4l2_dev, "invalid dimensions: %dx%d\n",
header.frame.width, header.frame.height);
return -EINVAL;
}
q_data_src = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT);
if (header.frame.height != q_data_src->height ||
header.frame.width != q_data_src->width) {
v4l2_err(&dev->v4l2_dev,
"dimensions don't match format: %dx%d\n",
header.frame.width, header.frame.height);
return -EINVAL;
}
if (header.frame.num_components != 3) {
v4l2_err(&dev->v4l2_dev,
"unsupported number of components: %d\n",
header.frame.num_components);
return -EINVAL;
}
/* install quantization tables */
if (quantization_tables[3].start) {
v4l2_err(&dev->v4l2_dev,
"only 3 quantization tables supported\n");
return -EINVAL;
}
for (i = 0; i < 3; i++) {
if (!quantization_tables[i].start)
continue;
if (quantization_tables[i].length != 64) {
v4l2_err(&dev->v4l2_dev,
"only 8-bit quantization tables supported\n");
continue;
}
if (!ctx->params.jpeg_qmat_tab[i]) {
ctx->params.jpeg_qmat_tab[i] = kmalloc(64, GFP_KERNEL);
if (!ctx->params.jpeg_qmat_tab[i])
return -ENOMEM;
}
memcpy(ctx->params.jpeg_qmat_tab[i],
quantization_tables[i].start, 64);
}
/* install Huffman tables */
for (i = 0; i < 4; i++) {
if (!huffman_tables[i].start) {
v4l2_err(&dev->v4l2_dev, "missing Huffman table\n");
return -EINVAL;
}
/* AC tables should be between 17 -> 178, DC between 17 -> 28 */
if (huffman_tables[i].length < 17 ||
huffman_tables[i].length > 178 ||
((i & 2) == 0 && huffman_tables[i].length > 28)) {
v4l2_err(&dev->v4l2_dev,
"invalid Huffman table %d length: %zu\n",
i, huffman_tables[i].length);
return -EINVAL;
}
}
huff_tab = ctx->params.jpeg_huff_tab;
if (!huff_tab) {
huff_tab = kzalloc(sizeof(struct coda_huff_tab), GFP_KERNEL);
if (!huff_tab)
return -ENOMEM;
ctx->params.jpeg_huff_tab = huff_tab;
}
memset(huff_tab, 0, sizeof(*huff_tab));
memcpy(huff_tab->luma_dc, huffman_tables[0].start, huffman_tables[0].length);
memcpy(huff_tab->chroma_dc, huffman_tables[1].start, huffman_tables[1].length);
memcpy(huff_tab->luma_ac, huffman_tables[2].start, huffman_tables[2].length);
memcpy(huff_tab->chroma_ac, huffman_tables[3].start, huffman_tables[3].length);
/* check scan header */
for (i = 0; i < scan_header.num_components; i++) {
struct v4l2_jpeg_scan_component_spec *scan_component;
scan_component = &scan_header.component[i];
for (j = 0; j < header.frame.num_components; j++) {
if (header.frame.component[j].component_identifier ==
scan_component->component_selector)
break;
}
if (j == header.frame.num_components)
continue;
ctx->params.jpeg_huff_dc_index[j] =
scan_component->dc_entropy_coding_table_selector;
ctx->params.jpeg_huff_ac_index[j] =
scan_component->ac_entropy_coding_table_selector;
}
/* Generate Huffman table information */
for (i = 0; i < 4; i++)
coda9_jpeg_gen_dec_huff_tab(ctx, i);
/* start of entropy coded segment */
ctx->jpeg_ecs_offset = header.ecs_offset;
switch (header.frame.subsampling) {
case V4L2_JPEG_CHROMA_SUBSAMPLING_420:
case V4L2_JPEG_CHROMA_SUBSAMPLING_422:
ctx->params.jpeg_chroma_subsampling = header.frame.subsampling;
break;
default:
v4l2_err(&dev->v4l2_dev, "chroma subsampling not supported: %d",
header.frame.subsampling);
return -EINVAL;
}
return 0;
}
static inline void coda9_jpeg_write_huff_values(struct coda_dev *dev, u8 *bits,
int num_values)
{
s8 *values = (s8 *)(bits + 16);
int huff_length, i;
for (huff_length = 0, i = 0; i < 16; i++)
huff_length += bits[i];
for (i = huff_length; i < num_values; i++)
values[i] = -1;
for (i = 0; i < num_values; i++)
coda_write(dev, (s32)values[i], CODA9_REG_JPEG_HUFF_DATA);
}
static void coda9_jpeg_dec_huff_setup(struct coda_ctx *ctx)
{
struct coda_huff_tab *huff_tab = ctx->params.jpeg_huff_tab;
struct coda_dev *dev = ctx->dev;
s16 *huff_min = huff_tab->min;
s16 *huff_max = huff_tab->max;
s8 *huff_ptr = huff_tab->ptr;
int i;
/* MIN Tables */
coda_write(dev, 0x003, CODA9_REG_JPEG_HUFF_CTRL);
coda_write(dev, 0x000, CODA9_REG_JPEG_HUFF_ADDR);
for (i = 0; i < 4 * 16; i++)
coda_write(dev, (s32)huff_min[i], CODA9_REG_JPEG_HUFF_DATA);
/* MAX Tables */
coda_write(dev, 0x403, CODA9_REG_JPEG_HUFF_CTRL);
coda_write(dev, 0x440, CODA9_REG_JPEG_HUFF_ADDR);
for (i = 0; i < 4 * 16; i++)
coda_write(dev, (s32)huff_max[i], CODA9_REG_JPEG_HUFF_DATA);
/* PTR Tables */
coda_write(dev, 0x803, CODA9_REG_JPEG_HUFF_CTRL);
coda_write(dev, 0x880, CODA9_REG_JPEG_HUFF_ADDR);
for (i = 0; i < 4 * 16; i++)
coda_write(dev, (s32)huff_ptr[i], CODA9_REG_JPEG_HUFF_DATA);
/* VAL Tables: DC Luma, DC Chroma, AC Luma, AC Chroma */
coda_write(dev, 0xc03, CODA9_REG_JPEG_HUFF_CTRL);
coda9_jpeg_write_huff_values(dev, huff_tab->luma_dc, 12);
coda9_jpeg_write_huff_values(dev, huff_tab->chroma_dc, 12);
coda9_jpeg_write_huff_values(dev, huff_tab->luma_ac, 162);
coda9_jpeg_write_huff_values(dev, huff_tab->chroma_ac, 162);
coda_write(dev, 0x000, CODA9_REG_JPEG_HUFF_CTRL);
}
static inline void coda9_jpeg_write_qmat_tab(struct coda_dev *dev,
u8 *qmat, int index)
{
int i;
coda_write(dev, index | 0x3, CODA9_REG_JPEG_QMAT_CTRL);
for (i = 0; i < 64; i++)
coda_write(dev, qmat[i], CODA9_REG_JPEG_QMAT_DATA);
coda_write(dev, 0, CODA9_REG_JPEG_QMAT_CTRL);
}
static void coda9_jpeg_qmat_setup(struct coda_ctx *ctx)
{
struct coda_dev *dev = ctx->dev;
int *qmat_index = ctx->params.jpeg_qmat_index;
u8 **qmat_tab = ctx->params.jpeg_qmat_tab;
coda9_jpeg_write_qmat_tab(dev, qmat_tab[qmat_index[0]], 0x00);
coda9_jpeg_write_qmat_tab(dev, qmat_tab[qmat_index[1]], 0x40);
coda9_jpeg_write_qmat_tab(dev, qmat_tab[qmat_index[2]], 0x80);
}
static void coda9_jpeg_dec_bbc_gbu_setup(struct coda_ctx *ctx,
struct vb2_buffer *buf, u32 ecs_offset)
{
struct coda_dev *dev = ctx->dev;
int page_ptr, word_ptr, bit_ptr;
u32 bbc_base_addr, end_addr;
int bbc_cur_pos;
int ret, val;
bbc_base_addr = vb2_dma_contig_plane_dma_addr(buf, 0);
end_addr = bbc_base_addr + vb2_get_plane_payload(buf, 0);
page_ptr = ecs_offset / 256;
word_ptr = (ecs_offset % 256) / 4;
if (page_ptr & 1)
word_ptr += 64;
bit_ptr = (ecs_offset % 4) * 8;
if (word_ptr & 1)
bit_ptr += 32;
word_ptr &= ~0x1;
coda_write(dev, end_addr, CODA9_REG_JPEG_BBC_WR_PTR);
coda_write(dev, bbc_base_addr, CODA9_REG_JPEG_BBC_BAS_ADDR);
/* Leave 3 256-byte page margin to avoid a BBC interrupt */
coda_write(dev, end_addr + 256 * 3 + 256, CODA9_REG_JPEG_BBC_END_ADDR);
val = DIV_ROUND_UP(vb2_plane_size(buf, 0), 256) + 3;
coda_write(dev, BIT(31) | val, CODA9_REG_JPEG_BBC_STRM_CTRL);
bbc_cur_pos = page_ptr;
coda_write(dev, bbc_cur_pos, CODA9_REG_JPEG_BBC_CUR_POS);
coda_write(dev, bbc_base_addr + (bbc_cur_pos << 8),
CODA9_REG_JPEG_BBC_EXT_ADDR);
coda_write(dev, (bbc_cur_pos & 1) << 6, CODA9_REG_JPEG_BBC_INT_ADDR);
coda_write(dev, 64, CODA9_REG_JPEG_BBC_DATA_CNT);
coda_write(dev, 0, CODA9_REG_JPEG_BBC_COMMAND);
do {
ret = coda_read(dev, CODA9_REG_JPEG_BBC_BUSY);
} while (ret == 1);
bbc_cur_pos++;
coda_write(dev, bbc_cur_pos, CODA9_REG_JPEG_BBC_CUR_POS);
coda_write(dev, bbc_base_addr + (bbc_cur_pos << 8),
CODA9_REG_JPEG_BBC_EXT_ADDR);
coda_write(dev, (bbc_cur_pos & 1) << 6, CODA9_REG_JPEG_BBC_INT_ADDR);
coda_write(dev, 64, CODA9_REG_JPEG_BBC_DATA_CNT);
coda_write(dev, 0, CODA9_REG_JPEG_BBC_COMMAND);
do {
ret = coda_read(dev, CODA9_REG_JPEG_BBC_BUSY);
} while (ret == 1);
bbc_cur_pos++;
coda_write(dev, bbc_cur_pos, CODA9_REG_JPEG_BBC_CUR_POS);
coda_write(dev, 1, CODA9_REG_JPEG_BBC_CTRL);
coda_write(dev, 0, CODA9_REG_JPEG_GBU_TT_CNT);
coda_write(dev, word_ptr, CODA9_REG_JPEG_GBU_WD_PTR);
coda_write(dev, 0, CODA9_REG_JPEG_GBU_BBSR);
coda_write(dev, 127, CODA9_REG_JPEG_GBU_BBER);
if (page_ptr & 1) {
coda_write(dev, 0, CODA9_REG_JPEG_GBU_BBIR);
coda_write(dev, 0, CODA9_REG_JPEG_GBU_BBHR);
} else {
coda_write(dev, 64, CODA9_REG_JPEG_GBU_BBIR);
coda_write(dev, 64, CODA9_REG_JPEG_GBU_BBHR);
}
coda_write(dev, 4, CODA9_REG_JPEG_GBU_CTRL);
coda_write(dev, bit_ptr, CODA9_REG_JPEG_GBU_FF_RPTR);
coda_write(dev, 3, CODA9_REG_JPEG_GBU_CTRL);
}
static const int bus_req_num[] = {
[CODA9_JPEG_FORMAT_420] = 2,
[CODA9_JPEG_FORMAT_422] = 3,
[CODA9_JPEG_FORMAT_224] = 3,
[CODA9_JPEG_FORMAT_444] = 4,
[CODA9_JPEG_FORMAT_400] = 4,
};
#define MCU_INFO(mcu_block_num, comp_num, comp0_info, comp1_info, comp2_info) \
(((mcu_block_num) << CODA9_JPEG_MCU_BLOCK_NUM_OFFSET) | \
((comp_num) << CODA9_JPEG_COMP_NUM_OFFSET) | \
((comp0_info) << CODA9_JPEG_COMP0_INFO_OFFSET) | \
((comp1_info) << CODA9_JPEG_COMP1_INFO_OFFSET) | \
((comp2_info) << CODA9_JPEG_COMP2_INFO_OFFSET))
static const u32 mcu_info[] = {
[CODA9_JPEG_FORMAT_420] = MCU_INFO(6, 3, 10, 5, 5),
[CODA9_JPEG_FORMAT_422] = MCU_INFO(4, 3, 9, 5, 5),
[CODA9_JPEG_FORMAT_224] = MCU_INFO(4, 3, 6, 5, 5),
[CODA9_JPEG_FORMAT_444] = MCU_INFO(3, 3, 5, 5, 5),
[CODA9_JPEG_FORMAT_400] = MCU_INFO(1, 1, 5, 0, 0),
};
/*
* Convert Huffman table specifcations to tables of codes and code lengths.
* For reference, see JPEG ITU-T.81 (ISO/IEC 10918-1) [1]
*
* [1] https://www.w3.org/Graphics/JPEG/itu-t81.pdf
*/
static int coda9_jpeg_gen_enc_huff_tab(struct coda_ctx *ctx, int tab_num,
int *ehufsi, int *ehufco)
{
int i, j, k, lastk, si, code, maxsymbol;
const u8 *bits, *huffval;
struct {
int size[256];
int code[256];
} *huff;
static const unsigned char *huff_tabs[4] = {
luma_dc, luma_ac, chroma_dc, chroma_ac,
};
int ret = -EINVAL;
huff = kzalloc(sizeof(*huff), GFP_KERNEL);
if (!huff)
return -ENOMEM;
bits = huff_tabs[tab_num];
huffval = huff_tabs[tab_num] + 16;
maxsymbol = tab_num & 1 ? 256 : 16;
/* Figure C.1 - Generation of table of Huffman code sizes */
k = 0;
for (i = 1; i <= 16; i++) {
j = bits[i - 1];
if (k + j > maxsymbol)
goto out;
while (j--)
huff->size[k++] = i;
}
lastk = k;
/* Figure C.2 - Generation of table of Huffman codes */
k = 0;
code = 0;
si = huff->size[0];
while (k < lastk) {
while (huff->size[k] == si) {
huff->code[k++] = code;
code++;
}
if (code >= (1 << si))
goto out;
code <<= 1;
si++;
}
/* Figure C.3 - Ordering procedure for encoding procedure code tables */
for (k = 0; k < lastk; k++) {
i = huffval[k];
if (i >= maxsymbol || ehufsi[i])
goto out;
ehufco[i] = huff->code[k];
ehufsi[i] = huff->size[k];
}
ret = 0;
out:
kfree(huff);
return ret;
}
#define DC_TABLE_INDEX0 0
#define AC_TABLE_INDEX0 1
#define DC_TABLE_INDEX1 2
#define AC_TABLE_INDEX1 3
static u8 *coda9_jpeg_get_huff_bits(struct coda_ctx *ctx, int tab_num)
{
struct coda_huff_tab *huff_tab = ctx->params.jpeg_huff_tab;
if (!huff_tab)
return NULL;
switch (tab_num) {
case DC_TABLE_INDEX0: return huff_tab->luma_dc;
case AC_TABLE_INDEX0: return huff_tab->luma_ac;
case DC_TABLE_INDEX1: return huff_tab->chroma_dc;
case AC_TABLE_INDEX1: return huff_tab->chroma_ac;
}
return NULL;
}
static int coda9_jpeg_gen_dec_huff_tab(struct coda_ctx *ctx, int tab_num)
{
int ptr_cnt = 0, huff_code = 0, zero_flag = 0, data_flag = 0;
u8 *huff_bits;
s16 *huff_max;
s16 *huff_min;
s8 *huff_ptr;
int ofs;
int i;
huff_bits = coda9_jpeg_get_huff_bits(ctx, tab_num);
if (!huff_bits)
return -EINVAL;
/* DC/AC Luma, DC/AC Chroma -> DC Luma/Chroma, AC Luma/Chroma */
ofs = ((tab_num & 1) << 1) | ((tab_num >> 1) & 1);
ofs *= 16;
huff_ptr = ctx->params.jpeg_huff_tab->ptr + ofs;
huff_max = ctx->params.jpeg_huff_tab->max + ofs;
huff_min = ctx->params.jpeg_huff_tab->min + ofs;
for (i = 0; i < 16; i++) {
if (huff_bits[i]) {
huff_ptr[i] = ptr_cnt;
ptr_cnt += huff_bits[i];
huff_min[i] = huff_code;
huff_max[i] = huff_code + (huff_bits[i] - 1);
data_flag = 1;
zero_flag = 0;
} else {
huff_ptr[i] = -1;
huff_min[i] = -1;
huff_max[i] = -1;
zero_flag = 1;
}
if (data_flag == 1) {
if (zero_flag == 1)
huff_code <<= 1;
else
huff_code = (huff_max[i] + 1) << 1;
}
}
return 0;
}
static int coda9_jpeg_load_huff_tab(struct coda_ctx *ctx)
{
struct {
int size[4][256];
int code[4][256];
} *huff;
u32 *huff_data;
int i, j;
int ret;
huff = kzalloc(sizeof(*huff), GFP_KERNEL);
if (!huff)
return -ENOMEM;
/* Generate all four (luma/chroma DC/AC) code/size lookup tables */
for (i = 0; i < 4; i++) {
ret = coda9_jpeg_gen_enc_huff_tab(ctx, i, huff->size[i],
huff->code[i]);
if (ret)
goto out;
}
if (!ctx->params.jpeg_huff_data) {
ctx->params.jpeg_huff_data =
kzalloc(sizeof(u32) * CODA9_JPEG_ENC_HUFF_DATA_SIZE,
GFP_KERNEL);
if (!ctx->params.jpeg_huff_data) {
ret = -ENOMEM;
goto out;
}
}
huff_data = ctx->params.jpeg_huff_data;
for (j = 0; j < 4; j++) {
/* Store Huffman lookup tables in AC0, AC1, DC0, DC1 order */
int t = (j == 0) ? AC_TABLE_INDEX0 :
(j == 1) ? AC_TABLE_INDEX1 :
(j == 2) ? DC_TABLE_INDEX0 :
DC_TABLE_INDEX1;
/* DC tables only have 16 entries */
int len = (j < 2) ? 256 : 16;
for (i = 0; i < len; i++) {
if (huff->size[t][i] == 0 && huff->code[t][i] == 0)
*(huff_data++) = 0;
else
*(huff_data++) =
((huff->size[t][i] - 1) << 16) |
huff->code[t][i];
}
}
ret = 0;
out:
kfree(huff);
return ret;
}
static void coda9_jpeg_write_huff_tab(struct coda_ctx *ctx)
{
struct coda_dev *dev = ctx->dev;
u32 *huff_data = ctx->params.jpeg_huff_data;
int i;
/* Write Huffman size/code lookup tables in AC0, AC1, DC0, DC1 order */
coda_write(dev, 0x3, CODA9_REG_JPEG_HUFF_CTRL);
for (i = 0; i < CODA9_JPEG_ENC_HUFF_DATA_SIZE; i++)
coda_write(dev, *(huff_data++), CODA9_REG_JPEG_HUFF_DATA);
coda_write(dev, 0x0, CODA9_REG_JPEG_HUFF_CTRL);
}
static inline void coda9_jpeg_write_qmat_quotients(struct coda_dev *dev,
u8 *qmat, int index)
{
int i;
coda_write(dev, index | 0x3, CODA9_REG_JPEG_QMAT_CTRL);
for (i = 0; i < 64; i++)
coda_write(dev, 0x80000 / qmat[i], CODA9_REG_JPEG_QMAT_DATA);
coda_write(dev, index, CODA9_REG_JPEG_QMAT_CTRL);
}
static void coda9_jpeg_load_qmat_tab(struct coda_ctx *ctx)
{
struct coda_dev *dev = ctx->dev;
u8 *luma_tab;
u8 *chroma_tab;
luma_tab = ctx->params.jpeg_qmat_tab[0];
if (!luma_tab)
luma_tab = luma_q;
chroma_tab = ctx->params.jpeg_qmat_tab[1];
if (!chroma_tab)
chroma_tab = chroma_q;
coda9_jpeg_write_qmat_quotients(dev, luma_tab, 0x00);
coda9_jpeg_write_qmat_quotients(dev, chroma_tab, 0x40);
coda9_jpeg_write_qmat_quotients(dev, chroma_tab, 0x80);
}
struct coda_jpeg_stream {
u8 *curr;
u8 *end;
};
static inline int coda_jpeg_put_byte(u8 byte, struct coda_jpeg_stream *stream)
{
if (stream->curr >= stream->end)
return -EINVAL;
*stream->curr++ = byte;
return 0;
}
static inline int coda_jpeg_put_word(u16 word, struct coda_jpeg_stream *stream)
{
if (stream->curr + sizeof(__be16) > stream->end)
return -EINVAL;
put_unaligned_be16(word, stream->curr);
stream->curr += sizeof(__be16);
return 0;
}
static int coda_jpeg_put_table(u16 marker, u8 index, const u8 *table,
size_t len, struct coda_jpeg_stream *stream)
{
int i, ret;
ret = coda_jpeg_put_word(marker, stream);
if (ret < 0)
return ret;
ret = coda_jpeg_put_word(3 + len, stream);
if (ret < 0)
return ret;
ret = coda_jpeg_put_byte(index, stream);
for (i = 0; i < len && ret == 0; i++)
ret = coda_jpeg_put_byte(table[i], stream);
return ret;
}
static int coda_jpeg_define_quantization_table(struct coda_ctx *ctx, u8 index,
struct coda_jpeg_stream *stream)
{
return coda_jpeg_put_table(DQT_MARKER, index,
ctx->params.jpeg_qmat_tab[index], 64,
stream);
}
static int coda_jpeg_define_huffman_table(u8 index, const u8 *table, size_t len,
struct coda_jpeg_stream *stream)
{
return coda_jpeg_put_table(DHT_MARKER, index, table, len, stream);
}
static int coda9_jpeg_encode_header(struct coda_ctx *ctx, int len, u8 *buf)
{
struct coda_jpeg_stream stream = { buf, buf + len };
struct coda_q_data *q_data_src;
int chroma_format, comp_num;
int i, ret, pad;
q_data_src = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT);
chroma_format = coda9_jpeg_chroma_format(q_data_src->fourcc);
if (chroma_format < 0)
return 0;
/* Start Of Image */
ret = coda_jpeg_put_word(SOI_MARKER, &stream);
if (ret < 0)
return ret;
/* Define Restart Interval */
if (ctx->params.jpeg_restart_interval) {
ret = coda_jpeg_put_word(DRI_MARKER, &stream);
if (ret < 0)
return ret;
ret = coda_jpeg_put_word(4, &stream);
if (ret < 0)
return ret;
ret = coda_jpeg_put_word(ctx->params.jpeg_restart_interval,
&stream);
if (ret < 0)
return ret;
}
/* Define Quantization Tables */
ret = coda_jpeg_define_quantization_table(ctx, 0x00, &stream);
if (ret < 0)
return ret;
if (chroma_format != CODA9_JPEG_FORMAT_400) {
ret = coda_jpeg_define_quantization_table(ctx, 0x01, &stream);
if (ret < 0)
return ret;
}
/* Define Huffman Tables */
ret = coda_jpeg_define_huffman_table(0x00, luma_dc, 16 + 12, &stream);
if (ret < 0)
return ret;
ret = coda_jpeg_define_huffman_table(0x10, luma_ac, 16 + 162, &stream);
if (ret < 0)
return ret;
if (chroma_format != CODA9_JPEG_FORMAT_400) {
ret = coda_jpeg_define_huffman_table(0x01, chroma_dc, 16 + 12,
&stream);
if (ret < 0)
return ret;
ret = coda_jpeg_define_huffman_table(0x11, chroma_ac, 16 + 162,
&stream);
if (ret < 0)
return ret;
}
/* Start Of Frame */
ret = coda_jpeg_put_word(SOF_MARKER, &stream);
if (ret < 0)
return ret;
comp_num = (chroma_format == CODA9_JPEG_FORMAT_400) ? 1 : 3;
ret = coda_jpeg_put_word(8 + comp_num * 3, &stream);
if (ret < 0)
return ret;
ret = coda_jpeg_put_byte(0x08, &stream);
if (ret < 0)
return ret;
ret = coda_jpeg_put_word(q_data_src->height, &stream);
if (ret < 0)
return ret;
ret = coda_jpeg_put_word(q_data_src->width, &stream);
if (ret < 0)
return ret;
ret = coda_jpeg_put_byte(comp_num, &stream);
if (ret < 0)
return ret;
for (i = 0; i < comp_num; i++) {
static unsigned char subsampling[5][3] = {
[CODA9_JPEG_FORMAT_420] = { 0x22, 0x11, 0x11 },
[CODA9_JPEG_FORMAT_422] = { 0x21, 0x11, 0x11 },
[CODA9_JPEG_FORMAT_224] = { 0x12, 0x11, 0x11 },
[CODA9_JPEG_FORMAT_444] = { 0x11, 0x11, 0x11 },
[CODA9_JPEG_FORMAT_400] = { 0x11 },
};
/* Component identifier, matches SOS */
ret = coda_jpeg_put_byte(i + 1, &stream);
if (ret < 0)
return ret;
ret = coda_jpeg_put_byte(subsampling[chroma_format][i],
&stream);
if (ret < 0)
return ret;
/* Chroma table index */
ret = coda_jpeg_put_byte((i == 0) ? 0 : 1, &stream);
if (ret < 0)
return ret;
}
/* Pad to multiple of 8 bytes */
pad = (stream.curr - buf) % 8;
if (pad) {
pad = 8 - pad;
while (pad--) {
ret = coda_jpeg_put_byte(0x00, &stream);
if (ret < 0)
return ret;
}
}
return stream.curr - buf;
}
/*
* Scale quantization table using nonlinear scaling factor
* u8 qtab[64], scale [50,190]
*/
static void coda_scale_quant_table(u8 *q_tab, int scale)
{
unsigned int temp;
int i;
for (i = 0; i < 64; i++) {
temp = DIV_ROUND_CLOSEST((unsigned int)q_tab[i] * scale, 100);
if (temp <= 0)
temp = 1;
if (temp > 255)
temp = 255;
q_tab[i] = (unsigned char)temp;
}
}
void coda_set_jpeg_compression_quality(struct coda_ctx *ctx, int quality)
{
unsigned int scale;
ctx->params.jpeg_quality = quality;
/* Clip quality setting to [5,100] interval */
if (quality > 100)
quality = 100;
if (quality < 5)
quality = 5;
/*
* Non-linear scaling factor:
* [5,50] -> [1000..100], [51,100] -> [98..0]
*/
if (quality < 50)
scale = 5000 / quality;
else
scale = 200 - 2 * quality;
if (ctx->params.jpeg_qmat_tab[0]) {
memcpy(ctx->params.jpeg_qmat_tab[0], luma_q, 64);
coda_scale_quant_table(ctx->params.jpeg_qmat_tab[0], scale);
}
if (ctx->params.jpeg_qmat_tab[1]) {
memcpy(ctx->params.jpeg_qmat_tab[1], chroma_q, 64);
coda_scale_quant_table(ctx->params.jpeg_qmat_tab[1], scale);
}
}
/*
* Encoder context operations
*/
static int coda9_jpeg_start_encoding(struct coda_ctx *ctx)
{
struct coda_dev *dev = ctx->dev;
int ret;
ret = coda9_jpeg_load_huff_tab(ctx);
if (ret < 0) {
v4l2_err(&dev->v4l2_dev, "error loading Huffman tables\n");
return ret;
}
if (!ctx->params.jpeg_qmat_tab[0]) {
ctx->params.jpeg_qmat_tab[0] = kmalloc(64, GFP_KERNEL);
if (!ctx->params.jpeg_qmat_tab[0])
return -ENOMEM;
}
if (!ctx->params.jpeg_qmat_tab[1]) {
ctx->params.jpeg_qmat_tab[1] = kmalloc(64, GFP_KERNEL);
if (!ctx->params.jpeg_qmat_tab[1])
return -ENOMEM;
}
coda_set_jpeg_compression_quality(ctx, ctx->params.jpeg_quality);
return 0;
}
static int coda9_jpeg_prepare_encode(struct coda_ctx *ctx)
{
struct coda_q_data *q_data_src;
struct vb2_v4l2_buffer *src_buf, *dst_buf;
struct coda_dev *dev = ctx->dev;
u32 start_addr, end_addr;
u16 aligned_width, aligned_height;
bool chroma_interleave;
int chroma_format;
int header_len;
int ret;
ktime_t timeout;
src_buf = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
dst_buf = v4l2_m2m_next_dst_buf(ctx->fh.m2m_ctx);
q_data_src = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT);
if (vb2_get_plane_payload(&src_buf->vb2_buf, 0) == 0)
vb2_set_plane_payload(&src_buf->vb2_buf, 0,
vb2_plane_size(&src_buf->vb2_buf, 0));
src_buf->sequence = ctx->osequence;
dst_buf->sequence = ctx->osequence;
ctx->osequence++;
src_buf->flags |= V4L2_BUF_FLAG_KEYFRAME;
src_buf->flags &= ~V4L2_BUF_FLAG_PFRAME;
coda_set_gdi_regs(ctx);
start_addr = vb2_dma_contig_plane_dma_addr(&dst_buf->vb2_buf, 0);
end_addr = start_addr + vb2_plane_size(&dst_buf->vb2_buf, 0);
chroma_format = coda9_jpeg_chroma_format(q_data_src->fourcc);
if (chroma_format < 0)
return chroma_format;
/* Round image dimensions to multiple of MCU size */
aligned_width = round_up(q_data_src->width, width_align[chroma_format]);
aligned_height = round_up(q_data_src->height,
height_align[chroma_format]);
if (aligned_width != q_data_src->bytesperline) {
v4l2_err(&dev->v4l2_dev, "wrong stride: %d instead of %d\n",
aligned_width, q_data_src->bytesperline);
}
header_len =
coda9_jpeg_encode_header(ctx,
vb2_plane_size(&dst_buf->vb2_buf, 0),
vb2_plane_vaddr(&dst_buf->vb2_buf, 0));
if (header_len < 0)
return header_len;
coda_write(dev, start_addr + header_len, CODA9_REG_JPEG_BBC_BAS_ADDR);
coda_write(dev, end_addr, CODA9_REG_JPEG_BBC_END_ADDR);
coda_write(dev, start_addr + header_len, CODA9_REG_JPEG_BBC_WR_PTR);
coda_write(dev, start_addr + header_len, CODA9_REG_JPEG_BBC_RD_PTR);
coda_write(dev, 0, CODA9_REG_JPEG_BBC_CUR_POS);
/* 64 words per 256-byte page */
coda_write(dev, 64, CODA9_REG_JPEG_BBC_DATA_CNT);
coda_write(dev, start_addr, CODA9_REG_JPEG_BBC_EXT_ADDR);
coda_write(dev, 0, CODA9_REG_JPEG_BBC_INT_ADDR);
coda_write(dev, 0, CODA9_REG_JPEG_GBU_BT_PTR);
coda_write(dev, 0, CODA9_REG_JPEG_GBU_WD_PTR);
coda_write(dev, 0, CODA9_REG_JPEG_GBU_BBSR);
coda_write(dev, BIT(31) | ((end_addr - start_addr - header_len) / 256),
CODA9_REG_JPEG_BBC_STRM_CTRL);
coda_write(dev, 0, CODA9_REG_JPEG_GBU_CTRL);
coda_write(dev, 0, CODA9_REG_JPEG_GBU_FF_RPTR);
coda_write(dev, 127, CODA9_REG_JPEG_GBU_BBER);
coda_write(dev, 64, CODA9_REG_JPEG_GBU_BBIR);
coda_write(dev, 64, CODA9_REG_JPEG_GBU_BBHR);
chroma_interleave = (q_data_src->fourcc == V4L2_PIX_FMT_NV12);
coda_write(dev, CODA9_JPEG_PIC_CTRL_TC_DIRECTION |
CODA9_JPEG_PIC_CTRL_ENCODER_EN, CODA9_REG_JPEG_PIC_CTRL);
coda_write(dev, 0, CODA9_REG_JPEG_SCL_INFO);
coda_write(dev, chroma_interleave, CODA9_REG_JPEG_DPB_CONFIG);
coda_write(dev, ctx->params.jpeg_restart_interval,
CODA9_REG_JPEG_RST_INTVAL);
coda_write(dev, 1, CODA9_REG_JPEG_BBC_CTRL);
coda_write(dev, bus_req_num[chroma_format], CODA9_REG_JPEG_OP_INFO);
coda9_jpeg_write_huff_tab(ctx);
coda9_jpeg_load_qmat_tab(ctx);
if (ctx->params.rot_mode & CODA_ROT_90) {
aligned_width = aligned_height;
aligned_height = q_data_src->bytesperline;
if (chroma_format == CODA9_JPEG_FORMAT_422)
chroma_format = CODA9_JPEG_FORMAT_224;
else if (chroma_format == CODA9_JPEG_FORMAT_224)
chroma_format = CODA9_JPEG_FORMAT_422;
}
/* These need to be multiples of MCU size */
coda_write(dev, aligned_width << 16 | aligned_height,
CODA9_REG_JPEG_PIC_SIZE);
coda_write(dev, ctx->params.rot_mode ?
(CODA_ROT_MIR_ENABLE | ctx->params.rot_mode) : 0,
CODA9_REG_JPEG_ROT_INFO);
coda_write(dev, mcu_info[chroma_format], CODA9_REG_JPEG_MCU_INFO);
coda_write(dev, 1, CODA9_GDI_CONTROL);
timeout = ktime_add_us(ktime_get(), 100000);
do {
ret = coda_read(dev, CODA9_GDI_STATUS);
if (ktime_compare(ktime_get(), timeout) > 0) {
v4l2_err(&dev->v4l2_dev, "timeout waiting for GDI\n");
return -ETIMEDOUT;
}
} while (!ret);
coda_write(dev, (chroma_format << 17) | (chroma_interleave << 16) |
q_data_src->bytesperline, CODA9_GDI_INFO_CONTROL);
/* The content of this register seems to be irrelevant: */
coda_write(dev, aligned_width << 16 | aligned_height,
CODA9_GDI_INFO_PIC_SIZE);
coda_write_base(ctx, q_data_src, src_buf, CODA9_GDI_INFO_BASE_Y);
coda_write(dev, 0, CODA9_REG_JPEG_DPB_BASE00);
coda_write(dev, 0, CODA9_GDI_CONTROL);
coda_write(dev, 1, CODA9_GDI_PIC_INIT_HOST);
coda_write(dev, 1, CODA9_GDI_WPROT_ERR_CLR);
coda_write(dev, 0, CODA9_GDI_WPROT_RGN_EN);
trace_coda_jpeg_run(ctx, src_buf);
coda_write(dev, 1, CODA9_REG_JPEG_PIC_START);
return 0;
}
static void coda9_jpeg_finish_encode(struct coda_ctx *ctx)
{
struct vb2_v4l2_buffer *src_buf, *dst_buf;
struct coda_dev *dev = ctx->dev;
u32 wr_ptr, start_ptr;
u32 err_mb;
if (ctx->aborting) {
coda_write(ctx->dev, 0, CODA9_REG_JPEG_BBC_FLUSH_CMD);
return;
}
/*
* Lock to make sure that an encoder stop command running in parallel
* will either already have marked src_buf as last, or it will wake up
* the capture queue after the buffers are returned.
*/
mutex_lock(&ctx->wakeup_mutex);
src_buf = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
dst_buf = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx);
trace_coda_jpeg_done(ctx, dst_buf);
/*
* Set plane payload to the number of bytes written out
* by the JPEG processing unit
*/
start_ptr = vb2_dma_contig_plane_dma_addr(&dst_buf->vb2_buf, 0);
wr_ptr = coda_read(dev, CODA9_REG_JPEG_BBC_WR_PTR);
vb2_set_plane_payload(&dst_buf->vb2_buf, 0, wr_ptr - start_ptr);
err_mb = coda_read(dev, CODA9_REG_JPEG_PIC_ERRMB);
if (err_mb)
coda_dbg(1, ctx, "ERRMB: 0x%x\n", err_mb);
coda_write(dev, 0, CODA9_REG_JPEG_BBC_FLUSH_CMD);
dst_buf->flags &= ~(V4L2_BUF_FLAG_PFRAME | V4L2_BUF_FLAG_LAST);
dst_buf->flags |= V4L2_BUF_FLAG_KEYFRAME;
dst_buf->flags |= src_buf->flags & V4L2_BUF_FLAG_LAST;
v4l2_m2m_buf_copy_metadata(src_buf, dst_buf, false);
v4l2_m2m_buf_done(src_buf, VB2_BUF_STATE_DONE);
coda_m2m_buf_done(ctx, dst_buf, err_mb ? VB2_BUF_STATE_ERROR :
VB2_BUF_STATE_DONE);
mutex_unlock(&ctx->wakeup_mutex);
coda_dbg(1, ctx, "job finished: encoded frame (%u)%s\n",
dst_buf->sequence,
(dst_buf->flags & V4L2_BUF_FLAG_LAST) ? " (last)" : "");
/*
* Reset JPEG processing unit after each encode run to work
* around hangups when switching context between encoder and
* decoder.
*/
coda_hw_reset(ctx);
}
static void coda9_jpeg_encode_timeout(struct coda_ctx *ctx)
{
struct coda_dev *dev = ctx->dev;
u32 end_addr, wr_ptr;
/* Handle missing BBC overflow interrupt via timeout */
end_addr = coda_read(dev, CODA9_REG_JPEG_BBC_END_ADDR);
wr_ptr = coda_read(dev, CODA9_REG_JPEG_BBC_WR_PTR);
if (wr_ptr >= end_addr - 256) {
v4l2_err(&dev->v4l2_dev, "JPEG too large for capture buffer\n");
coda9_jpeg_finish_encode(ctx);
return;
}
coda_hw_reset(ctx);
}
static void coda9_jpeg_release(struct coda_ctx *ctx)
{
int i;
if (ctx->params.jpeg_qmat_tab[0] == luma_q)
ctx->params.jpeg_qmat_tab[0] = NULL;
if (ctx->params.jpeg_qmat_tab[1] == chroma_q)
ctx->params.jpeg_qmat_tab[1] = NULL;
for (i = 0; i < 3; i++)
kfree(ctx->params.jpeg_qmat_tab[i]);
kfree(ctx->params.jpeg_huff_data);
kfree(ctx->params.jpeg_huff_tab);
}
const struct coda_context_ops coda9_jpeg_encode_ops = {
.queue_init = coda_encoder_queue_init,
.start_streaming = coda9_jpeg_start_encoding,
.prepare_run = coda9_jpeg_prepare_encode,
.finish_run = coda9_jpeg_finish_encode,
.run_timeout = coda9_jpeg_encode_timeout,
.release = coda9_jpeg_release,
};
/*
* Decoder context operations
*/
static int coda9_jpeg_start_decoding(struct coda_ctx *ctx)
{
ctx->params.jpeg_qmat_index[0] = 0;
ctx->params.jpeg_qmat_index[1] = 1;
ctx->params.jpeg_qmat_index[2] = 1;
ctx->params.jpeg_qmat_tab[0] = luma_q;
ctx->params.jpeg_qmat_tab[1] = chroma_q;
/* nothing more to do here */
/* TODO: we could already scan the first header to get the chroma
* format.
*/
return 0;
}
static int coda9_jpeg_prepare_decode(struct coda_ctx *ctx)
{
struct coda_dev *dev = ctx->dev;
int aligned_width, aligned_height;
int chroma_format;
int ret;
u32 val, dst_fourcc;
struct coda_q_data *q_data_src, *q_data_dst;
struct vb2_v4l2_buffer *src_buf, *dst_buf;
int chroma_interleave;
int scl_hor_mode, scl_ver_mode;
src_buf = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
dst_buf = v4l2_m2m_next_dst_buf(ctx->fh.m2m_ctx);
q_data_src = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT);
q_data_dst = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE);
dst_fourcc = q_data_dst->fourcc;
scl_hor_mode = coda_jpeg_scale(q_data_src->width, q_data_dst->width);
scl_ver_mode = coda_jpeg_scale(q_data_src->height, q_data_dst->height);
if (vb2_get_plane_payload(&src_buf->vb2_buf, 0) == 0)
vb2_set_plane_payload(&src_buf->vb2_buf, 0,
vb2_plane_size(&src_buf->vb2_buf, 0));
chroma_format = coda9_jpeg_chroma_format(q_data_dst->fourcc);
if (chroma_format < 0)
return chroma_format;
ret = coda_jpeg_decode_header(ctx, &src_buf->vb2_buf);
if (ret < 0) {
src_buf = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
dst_buf = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx);
v4l2_m2m_buf_done(src_buf, VB2_BUF_STATE_DONE);
v4l2_m2m_buf_done(dst_buf, VB2_BUF_STATE_ERROR);
return ret;
}
/* Round image dimensions to multiple of MCU size */
aligned_width = round_up(q_data_src->width, width_align[chroma_format]);
aligned_height = round_up(q_data_src->height, height_align[chroma_format]);
if (aligned_width != q_data_dst->bytesperline) {
v4l2_err(&dev->v4l2_dev, "stride mismatch: %d != %d\n",
aligned_width, q_data_dst->bytesperline);
}
coda_set_gdi_regs(ctx);
val = ctx->params.jpeg_huff_ac_index[0] << 12 |
ctx->params.jpeg_huff_ac_index[1] << 11 |
ctx->params.jpeg_huff_ac_index[2] << 10 |
ctx->params.jpeg_huff_dc_index[0] << 9 |
ctx->params.jpeg_huff_dc_index[1] << 8 |
ctx->params.jpeg_huff_dc_index[2] << 7;
if (ctx->params.jpeg_huff_tab)
val |= CODA9_JPEG_PIC_CTRL_USER_HUFFMAN_EN;
coda_write(dev, val, CODA9_REG_JPEG_PIC_CTRL);
coda_write(dev, aligned_width << 16 | aligned_height,
CODA9_REG_JPEG_PIC_SIZE);
chroma_interleave = (dst_fourcc == V4L2_PIX_FMT_NV12);
coda_write(dev, 0, CODA9_REG_JPEG_ROT_INFO);
coda_write(dev, bus_req_num[chroma_format], CODA9_REG_JPEG_OP_INFO);
coda_write(dev, mcu_info[chroma_format], CODA9_REG_JPEG_MCU_INFO);
if (scl_hor_mode || scl_ver_mode)
val = CODA9_JPEG_SCL_ENABLE | (scl_hor_mode << 2) | scl_ver_mode;
else
val = 0;
coda_write(dev, val, CODA9_REG_JPEG_SCL_INFO);
coda_write(dev, chroma_interleave, CODA9_REG_JPEG_DPB_CONFIG);
coda_write(dev, ctx->params.jpeg_restart_interval,
CODA9_REG_JPEG_RST_INTVAL);
if (ctx->params.jpeg_huff_tab)
coda9_jpeg_dec_huff_setup(ctx);
coda9_jpeg_qmat_setup(ctx);
coda9_jpeg_dec_bbc_gbu_setup(ctx, &src_buf->vb2_buf,
ctx->jpeg_ecs_offset);
coda_write(dev, 0, CODA9_REG_JPEG_RST_INDEX);
coda_write(dev, 0, CODA9_REG_JPEG_RST_COUNT);
coda_write(dev, 0, CODA9_REG_JPEG_DPCM_DIFF_Y);
coda_write(dev, 0, CODA9_REG_JPEG_DPCM_DIFF_CB);
coda_write(dev, 0, CODA9_REG_JPEG_DPCM_DIFF_CR);
coda_write(dev, 0, CODA9_REG_JPEG_ROT_INFO);
coda_write(dev, 1, CODA9_GDI_CONTROL);
do {
ret = coda_read(dev, CODA9_GDI_STATUS);
} while (!ret);
val = (chroma_format << 17) | (chroma_interleave << 16) |
q_data_dst->bytesperline;
if (ctx->tiled_map_type == GDI_TILED_FRAME_MB_RASTER_MAP)
val |= 3 << 20;
coda_write(dev, val, CODA9_GDI_INFO_CONTROL);
coda_write(dev, aligned_width << 16 | aligned_height,
CODA9_GDI_INFO_PIC_SIZE);
coda_write_base(ctx, q_data_dst, dst_buf, CODA9_GDI_INFO_BASE_Y);
coda_write(dev, 0, CODA9_REG_JPEG_DPB_BASE00);
coda_write(dev, 0, CODA9_GDI_CONTROL);
coda_write(dev, 1, CODA9_GDI_PIC_INIT_HOST);
trace_coda_jpeg_run(ctx, src_buf);
coda_write(dev, 1, CODA9_REG_JPEG_PIC_START);
return 0;
}
static void coda9_jpeg_finish_decode(struct coda_ctx *ctx)
{
struct coda_dev *dev = ctx->dev;
struct vb2_v4l2_buffer *dst_buf, *src_buf;
struct coda_q_data *q_data_dst;
u32 err_mb;
err_mb = coda_read(dev, CODA9_REG_JPEG_PIC_ERRMB);
if (err_mb)
v4l2_err(&dev->v4l2_dev, "ERRMB: 0x%x\n", err_mb);
coda_write(dev, 0, CODA9_REG_JPEG_BBC_FLUSH_CMD);
/*
* Lock to make sure that a decoder stop command running in parallel
* will either already have marked src_buf as last, or it will wake up
* the capture queue after the buffers are returned.
*/
mutex_lock(&ctx->wakeup_mutex);
src_buf = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
dst_buf = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx);
dst_buf->sequence = ctx->osequence++;
trace_coda_jpeg_done(ctx, dst_buf);
dst_buf->flags &= ~(V4L2_BUF_FLAG_PFRAME | V4L2_BUF_FLAG_LAST);
dst_buf->flags |= V4L2_BUF_FLAG_KEYFRAME;
dst_buf->flags |= src_buf->flags & V4L2_BUF_FLAG_LAST;
v4l2_m2m_buf_copy_metadata(src_buf, dst_buf, false);
q_data_dst = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE);
vb2_set_plane_payload(&dst_buf->vb2_buf, 0, q_data_dst->sizeimage);
v4l2_m2m_buf_done(src_buf, VB2_BUF_STATE_DONE);
coda_m2m_buf_done(ctx, dst_buf, err_mb ? VB2_BUF_STATE_ERROR :
VB2_BUF_STATE_DONE);
mutex_unlock(&ctx->wakeup_mutex);
coda_dbg(1, ctx, "job finished: decoded frame (%u)%s\n",
dst_buf->sequence,
(dst_buf->flags & V4L2_BUF_FLAG_LAST) ? " (last)" : "");
/*
* Reset JPEG processing unit after each decode run to work
* around hangups when switching context between encoder and
* decoder.
*/
coda_hw_reset(ctx);
}
const struct coda_context_ops coda9_jpeg_decode_ops = {
.queue_init = coda_encoder_queue_init, /* non-bitstream operation */
.start_streaming = coda9_jpeg_start_decoding,
.prepare_run = coda9_jpeg_prepare_decode,
.finish_run = coda9_jpeg_finish_decode,
.release = coda9_jpeg_release,
};
irqreturn_t coda9_jpeg_irq_handler(int irq, void *data)
{
struct coda_dev *dev = data;
struct coda_ctx *ctx;
int status;
int err_mb;
status = coda_read(dev, CODA9_REG_JPEG_PIC_STATUS);
if (status == 0)
return IRQ_HANDLED;
coda_write(dev, status, CODA9_REG_JPEG_PIC_STATUS);
if (status & CODA9_JPEG_STATUS_OVERFLOW)
v4l2_err(&dev->v4l2_dev, "JPEG overflow\n");
if (status & CODA9_JPEG_STATUS_BBC_INT)
v4l2_err(&dev->v4l2_dev, "JPEG BBC interrupt\n");
if (status & CODA9_JPEG_STATUS_ERROR) {
v4l2_err(&dev->v4l2_dev, "JPEG error\n");
err_mb = coda_read(dev, CODA9_REG_JPEG_PIC_ERRMB);
if (err_mb) {
v4l2_err(&dev->v4l2_dev,
"ERRMB: 0x%x: rst idx %d, mcu pos (%d,%d)\n",
err_mb, err_mb >> 24, (err_mb >> 12) & 0xfff,
err_mb & 0xfff);
}
}
ctx = v4l2_m2m_get_curr_priv(dev->m2m_dev);
if (!ctx) {
v4l2_err(&dev->v4l2_dev,
"Instance released before the end of transaction\n");
mutex_unlock(&dev->coda_mutex);
return IRQ_HANDLED;
}
complete(&ctx->completion);
return IRQ_HANDLED;
}
| linux-master | drivers/media/platform/chips-media/coda-jpeg.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Coda multi-standard codec IP - MPEG-2 helper functions
*
* Copyright (C) 2019 Pengutronix, Philipp Zabel
*/
#include <linux/kernel.h>
#include <linux/videodev2.h>
#include "coda.h"
int coda_mpeg2_profile(int profile_idc)
{
switch (profile_idc) {
case 5:
return V4L2_MPEG_VIDEO_MPEG2_PROFILE_SIMPLE;
case 4:
return V4L2_MPEG_VIDEO_MPEG2_PROFILE_MAIN;
case 3:
return V4L2_MPEG_VIDEO_MPEG2_PROFILE_SNR_SCALABLE;
case 2:
return V4L2_MPEG_VIDEO_MPEG2_PROFILE_SPATIALLY_SCALABLE;
case 1:
return V4L2_MPEG_VIDEO_MPEG2_PROFILE_HIGH;
default:
return -EINVAL;
}
}
int coda_mpeg2_level(int level_idc)
{
switch (level_idc) {
case 10:
return V4L2_MPEG_VIDEO_MPEG2_LEVEL_LOW;
case 8:
return V4L2_MPEG_VIDEO_MPEG2_LEVEL_MAIN;
case 6:
return V4L2_MPEG_VIDEO_MPEG2_LEVEL_HIGH_1440;
case 4:
return V4L2_MPEG_VIDEO_MPEG2_LEVEL_HIGH;
default:
return -EINVAL;
}
}
/*
* Check if the buffer starts with the MPEG-2 sequence header (with or without
* quantization matrix) and extension header, for example:
*
* 00 00 01 b3 2d 01 e0 34 08 8b a3 81
* 10 11 11 12 12 12 13 13 13 13 14 14 14 14 14 15
* 15 15 15 15 15 16 16 16 16 16 16 16 17 17 17 17
* 17 17 17 17 18 18 18 19 18 18 18 19 1a 1a 1a 1a
* 19 1b 1b 1b 1b 1b 1c 1c 1c 1c 1e 1e 1e 1f 1f 21
* 00 00 01 b5 14 8a 00 01 00 00
*
* or:
*
* 00 00 01 b3 08 00 40 15 ff ff e0 28
* 00 00 01 b5 14 8a 00 01 00 00
*
* Returns the detected header size in bytes or 0.
*/
u32 coda_mpeg2_parse_headers(struct coda_ctx *ctx, u8 *buf, u32 size)
{
static const u8 sequence_header_start[4] = { 0x00, 0x00, 0x01, 0xb3 };
static const union {
u8 extension_start[4];
u8 start_code_prefix[3];
} u = { { 0x00, 0x00, 0x01, 0xb5 } };
if (size < 22 ||
memcmp(buf, sequence_header_start, 4) != 0)
return 0;
if ((size == 22 ||
(size >= 25 && memcmp(buf + 22, u.start_code_prefix, 3) == 0)) &&
memcmp(buf + 12, u.extension_start, 4) == 0)
return 22;
if ((size == 86 ||
(size > 89 && memcmp(buf + 86, u.start_code_prefix, 3) == 0)) &&
memcmp(buf + 76, u.extension_start, 4) == 0)
return 86;
return 0;
}
| linux-master | drivers/media/platform/chips-media/coda-mpeg2.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2020, The Linux Foundation. All rights reserved.
*/
#include <linux/kernel.h>
#include <linux/sizes.h>
#include <linux/videodev2.h>
#include "hfi.h"
#include "hfi_plat_bufs.h"
#include "helpers.h"
#define MIN_INPUT_BUFFERS 4
#define MIN_ENC_OUTPUT_BUFFERS 4
#define NV12_UBWC_Y_TILE_WIDTH 32
#define NV12_UBWC_Y_TILE_HEIGHT 8
#define NV12_UBWC_UV_TILE_WIDTH 16
#define NV12_UBWC_UV_TILE_HEIGHT 8
#define TP10_UBWC_Y_TILE_WIDTH 48
#define TP10_UBWC_Y_TILE_HEIGHT 4
#define METADATA_STRIDE_MULTIPLE 64
#define METADATA_HEIGHT_MULTIPLE 16
#define HFI_DMA_ALIGNMENT 256
#define MAX_FE_NBR_CTRL_LCU64_LINE_BUFFER_SIZE 64
#define MAX_FE_NBR_CTRL_LCU32_LINE_BUFFER_SIZE 64
#define MAX_FE_NBR_CTRL_LCU16_LINE_BUFFER_SIZE 64
#define MAX_FE_NBR_DATA_LUMA_LINE_BUFFER_SIZE 640
#define MAX_FE_NBR_DATA_CB_LINE_BUFFER_SIZE 320
#define MAX_FE_NBR_DATA_CR_LINE_BUFFER_SIZE 320
#define MAX_SE_NBR_CTRL_LCU64_LINE_BUFFER_SIZE (128 / 8)
#define MAX_SE_NBR_CTRL_LCU32_LINE_BUFFER_SIZE (128 / 8)
#define MAX_SE_NBR_CTRL_LCU16_LINE_BUFFER_SIZE (128 / 8)
#define MAX_PE_NBR_DATA_LCU64_LINE_BUFFER_SIZE (64 * 2 * 3)
#define MAX_PE_NBR_DATA_LCU32_LINE_BUFFER_SIZE (32 * 2 * 3)
#define MAX_PE_NBR_DATA_LCU16_LINE_BUFFER_SIZE (16 * 2 * 3)
#define MAX_TILE_COLUMNS 32 /* 8K/256 */
#define VPP_CMD_MAX_SIZE BIT(20)
#define NUM_HW_PIC_BUF 32
#define BIN_BUFFER_THRESHOLD (1280 * 736)
#define H264D_MAX_SLICE 1800
/* sizeof(h264d_buftab_t) aligned to 256 */
#define SIZE_H264D_BUFTAB_T 256
/* sizeof(h264d_hw_pic_t) aligned to 32 */
#define SIZE_H264D_HW_PIC_T BIT(11)
#define SIZE_H264D_BSE_CMD_PER_BUF (32 * 4)
#define SIZE_H264D_VPP_CMD_PER_BUF 512
/* Line Buffer definitions, One for Luma and 1/2 for each Chroma */
#define SIZE_H264D_LB_FE_TOP_DATA(width, height) \
(MAX_FE_NBR_DATA_LUMA_LINE_BUFFER_SIZE * ALIGN((width), 16) * 3)
#define SIZE_H264D_LB_FE_TOP_CTRL(width, height) \
(MAX_FE_NBR_CTRL_LCU64_LINE_BUFFER_SIZE * (((width) + 15) >> 4))
#define SIZE_H264D_LB_FE_LEFT_CTRL(width, height) \
(MAX_FE_NBR_CTRL_LCU64_LINE_BUFFER_SIZE * (((height) + 15) >> 4))
#define SIZE_H264D_LB_SE_TOP_CTRL(width, height) \
(MAX_SE_NBR_CTRL_LCU64_LINE_BUFFER_SIZE * (((width) + 15) >> 4))
#define SIZE_H264D_LB_SE_LEFT_CTRL(width, height) \
(MAX_SE_NBR_CTRL_LCU64_LINE_BUFFER_SIZE * (((height) + 15) >> 4))
#define SIZE_H264D_LB_PE_TOP_DATA(width, height) \
(MAX_PE_NBR_DATA_LCU64_LINE_BUFFER_SIZE * (((width) + 15) >> 4))
#define SIZE_H264D_LB_VSP_TOP(width, height) (((((width) + 15) >> 4) << 7))
#define SIZE_H264D_LB_RECON_DMA_METADATA_WR(width, height) \
(ALIGN((height), 16) * 32)
#define SIZE_H264D_QP(width, height) \
((((width) + 63) >> 6) * (((height) + 63) >> 6) * 128)
#define SIZE_HW_PIC(size_per_buf) (NUM_HW_PIC_BUF * (size_per_buf))
#define H264_CABAC_HDR_RATIO_HD_TOT 1
#define H264_CABAC_RES_RATIO_HD_TOT 3
/*
* Some content need more bin buffer, but limit buffer
* size for high resolution
*/
#define NUM_SLIST_BUF_H264 (256 + 32)
#define SIZE_SLIST_BUF_H264 512
#define LCU_MAX_SIZE_PELS 64
#define LCU_MIN_SIZE_PELS 16
#define SIZE_SEI_USERDATA 4096
#define H265D_MAX_SLICE 3600
#define SIZE_H265D_HW_PIC_T SIZE_H264D_HW_PIC_T
#define SIZE_H265D_BSE_CMD_PER_BUF (16 * sizeof(u32))
#define SIZE_H265D_VPP_CMD_PER_BUF 256
#define SIZE_H265D_LB_FE_TOP_DATA(width, height) \
(MAX_FE_NBR_DATA_LUMA_LINE_BUFFER_SIZE * (ALIGN(width, 64) + 8) * 2)
#define SIZE_H265D_LB_FE_TOP_CTRL(width, height) \
(MAX_FE_NBR_CTRL_LCU64_LINE_BUFFER_SIZE * \
(ALIGN(width, LCU_MAX_SIZE_PELS) / LCU_MIN_SIZE_PELS))
#define SIZE_H265D_LB_FE_LEFT_CTRL(width, height) \
(MAX_FE_NBR_CTRL_LCU64_LINE_BUFFER_SIZE * \
(ALIGN(height, LCU_MAX_SIZE_PELS) / LCU_MIN_SIZE_PELS))
#define SIZE_H265D_LB_SE_TOP_CTRL(width, height) \
((LCU_MAX_SIZE_PELS / 8 * (128 / 8)) * (((width) + 15) >> 4))
static inline u32 size_h265d_lb_se_left_ctrl(u32 width, u32 height)
{
u32 x, y, z;
x = ((height + 16 - 1) / 8) * MAX_SE_NBR_CTRL_LCU16_LINE_BUFFER_SIZE;
y = ((height + 32 - 1) / 8) * MAX_SE_NBR_CTRL_LCU32_LINE_BUFFER_SIZE;
z = ((height + 64 - 1) / 8) * MAX_SE_NBR_CTRL_LCU64_LINE_BUFFER_SIZE;
return max3(x, y, z);
}
#define SIZE_H265D_LB_PE_TOP_DATA(width, height) \
(MAX_PE_NBR_DATA_LCU64_LINE_BUFFER_SIZE * \
(ALIGN(width, LCU_MIN_SIZE_PELS) / LCU_MIN_SIZE_PELS))
#define SIZE_H265D_LB_VSP_TOP(width, height) ((((width) + 63) >> 6) * 128)
#define SIZE_H265D_LB_VSP_LEFT(width, height) ((((height) + 63) >> 6) * 128)
#define SIZE_H265D_LB_RECON_DMA_METADATA_WR(width, height) \
SIZE_H264D_LB_RECON_DMA_METADATA_WR(width, height)
#define SIZE_H265D_QP(width, height) SIZE_H264D_QP(width, height)
#define H265_CABAC_HDR_RATIO_HD_TOT 2
#define H265_CABAC_RES_RATIO_HD_TOT 2
/*
* Some content need more bin buffer, but limit buffer size
* for high resolution
*/
#define SIZE_SLIST_BUF_H265 BIT(10)
#define NUM_SLIST_BUF_H265 (80 + 20)
#define H265_NUM_TILE_COL 32
#define H265_NUM_TILE_ROW 128
#define H265_NUM_TILE (H265_NUM_TILE_ROW * H265_NUM_TILE_COL + 1)
static inline u32 size_vpxd_lb_fe_left_ctrl(u32 width, u32 height)
{
u32 x, y, z;
x = ((height + 15) >> 4) * MAX_FE_NBR_CTRL_LCU16_LINE_BUFFER_SIZE;
y = ((height + 31) >> 5) * MAX_FE_NBR_CTRL_LCU32_LINE_BUFFER_SIZE;
z = ((height + 63) >> 6) * MAX_FE_NBR_CTRL_LCU64_LINE_BUFFER_SIZE;
return max3(x, y, z);
}
#define SIZE_VPXD_LB_FE_TOP_CTRL(width, height) \
(((ALIGN(width, 64) + 8) * 10 * 2)) /* small line */
#define SIZE_VPXD_LB_SE_TOP_CTRL(width, height) \
((((width) + 15) >> 4) * MAX_FE_NBR_CTRL_LCU16_LINE_BUFFER_SIZE)
static inline u32 size_vpxd_lb_se_left_ctrl(u32 width, u32 height)
{
u32 x, y, z;
x = ((height + 15) >> 4) * MAX_SE_NBR_CTRL_LCU16_LINE_BUFFER_SIZE;
y = ((height + 31) >> 5) * MAX_SE_NBR_CTRL_LCU32_LINE_BUFFER_SIZE;
z = ((height + 63) >> 6) * MAX_SE_NBR_CTRL_LCU64_LINE_BUFFER_SIZE;
return max3(x, y, z);
}
#define SIZE_VPXD_LB_RECON_DMA_METADATA_WR(width, height) \
ALIGN((ALIGN(height, 16) / (4 / 2)) * 64, 32)
#define SIZE_VP8D_LB_FE_TOP_DATA(width, height) \
((ALIGN(width, 16) + 8) * 10 * 2)
#define SIZE_VP9D_LB_FE_TOP_DATA(width, height) \
((ALIGN(ALIGN(width, 16), 64) + 8) * 10 * 2)
#define SIZE_VP8D_LB_PE_TOP_DATA(width, height) \
((ALIGN(width, 16) >> 4) * 64)
#define SIZE_VP9D_LB_PE_TOP_DATA(width, height) \
((ALIGN(ALIGN(width, 16), 64) >> 6) * 176)
#define SIZE_VP8D_LB_VSP_TOP(width, height) \
(((ALIGN(width, 16) >> 4) * 64 / 2) + 256)
#define SIZE_VP9D_LB_VSP_TOP(width, height) \
(((ALIGN(ALIGN(width, 16), 64) >> 6) * 64 * 8) + 256)
#define HFI_IRIS2_VP9D_COMV_SIZE \
((((8192 + 63) >> 6) * ((4320 + 63) >> 6) * 8 * 8 * 2 * 8))
#define VPX_DECODER_FRAME_CONCURENCY_LVL 2
#define VPX_DECODER_FRAME_BIN_HDR_BUDGET_RATIO_NUM 1
#define VPX_DECODER_FRAME_BIN_HDR_BUDGET_RATIO_DEN 2
#define VPX_DECODER_FRAME_BIN_RES_BUDGET_RATIO_NUM 3
#define VPX_DECODER_FRAME_BIN_RES_BUDGET_RATIO_DEN 2
#define VP8_NUM_FRAME_INFO_BUF (5 + 1)
#define VP9_NUM_FRAME_INFO_BUF 32
#define VP8_NUM_PROBABILITY_TABLE_BUF VP8_NUM_FRAME_INFO_BUF
#define VP9_NUM_PROBABILITY_TABLE_BUF (VP9_NUM_FRAME_INFO_BUF + 4)
#define VP8_PROB_TABLE_SIZE 3840
#define VP9_PROB_TABLE_SIZE 3840
#define VP9_UDC_HEADER_BUF_SIZE (3 * 128)
#define MAX_SUPERFRAME_HEADER_LEN 34
#define CCE_TILE_OFFSET_SIZE ALIGN(32 * 4 * 4, 32)
#define QMATRIX_SIZE (sizeof(u32) * 128 + 256)
#define MP2D_QPDUMP_SIZE 115200
#define HFI_IRIS2_ENC_PERSIST_SIZE 204800
#define HFI_MAX_COL_FRAME 6
#define HFI_VENUS_VENC_TRE_WB_BUFF_SIZE (65 << 4) /* in Bytes */
#define HFI_VENUS_VENC_DB_LINE_BUFF_PER_MB 512
#define HFI_VENUS_VPPSG_MAX_REGISTERS 2048
#define HFI_VENUS_WIDTH_ALIGNMENT 128
#define HFI_VENUS_WIDTH_TEN_BIT_ALIGNMENT 192
#define HFI_VENUS_HEIGHT_ALIGNMENT 32
#define SYSTEM_LAL_TILE10 192
#define NUM_MBS_720P (((1280 + 15) >> 4) * ((720 + 15) >> 4))
#define NUM_MBS_4K (((4096 + 15) >> 4) * ((2304 + 15) >> 4))
#define MB_SIZE_IN_PIXEL (16 * 16)
#define HDR10PLUS_PAYLOAD_SIZE 1024
#define HDR10_HIST_EXTRADATA_SIZE 4096
static u32 size_vpss_lb(u32 width, u32 height, u32 num_vpp_pipes)
{
u32 vpss_4tap_top_buffer_size, vpss_div2_top_buffer_size;
u32 vpss_4tap_left_buffer_size, vpss_div2_left_buffer_size;
u32 opb_wr_top_line_luma_buf_size, opb_wr_top_line_chroma_buf_size;
u32 opb_lb_wr_llb_y_buffer_size, opb_lb_wr_llb_uv_buffer_size;
u32 macrotiling_size;
u32 size = 0;
vpss_4tap_top_buffer_size = 0;
vpss_div2_top_buffer_size = 0;
vpss_4tap_left_buffer_size = 0;
vpss_div2_left_buffer_size = 0;
macrotiling_size = 32;
opb_wr_top_line_luma_buf_size =
ALIGN(width, macrotiling_size) / macrotiling_size * 256;
opb_wr_top_line_luma_buf_size =
ALIGN(opb_wr_top_line_luma_buf_size, HFI_DMA_ALIGNMENT) +
(MAX_TILE_COLUMNS - 1) * 256;
opb_wr_top_line_luma_buf_size =
max(opb_wr_top_line_luma_buf_size, (32 * ALIGN(height, 16)));
opb_wr_top_line_chroma_buf_size = opb_wr_top_line_luma_buf_size;
opb_lb_wr_llb_y_buffer_size = ALIGN((ALIGN(height, 16) / 2) * 64, 32);
opb_lb_wr_llb_uv_buffer_size = opb_lb_wr_llb_y_buffer_size;
size = num_vpp_pipes *
2 * (vpss_4tap_top_buffer_size + vpss_div2_top_buffer_size) +
2 * (vpss_4tap_left_buffer_size + vpss_div2_left_buffer_size) +
opb_wr_top_line_luma_buf_size +
opb_wr_top_line_chroma_buf_size +
opb_lb_wr_llb_uv_buffer_size +
opb_lb_wr_llb_y_buffer_size;
return size;
}
static u32 size_h264d_hw_bin_buffer(u32 width, u32 height)
{
u32 size_yuv, size_bin_hdr, size_bin_res;
u32 size = 0;
u32 product;
product = width * height;
size_yuv = (product <= BIN_BUFFER_THRESHOLD) ?
((BIN_BUFFER_THRESHOLD * 3) >> 1) : ((product * 3) >> 1);
size_bin_hdr = size_yuv * H264_CABAC_HDR_RATIO_HD_TOT;
size_bin_res = size_yuv * H264_CABAC_RES_RATIO_HD_TOT;
size_bin_hdr = ALIGN(size_bin_hdr, HFI_DMA_ALIGNMENT);
size_bin_res = ALIGN(size_bin_res, HFI_DMA_ALIGNMENT);
size = size_bin_hdr + size_bin_res;
return size;
}
static u32 h264d_scratch_size(u32 width, u32 height, bool is_interlaced)
{
u32 aligned_width = ALIGN(width, 16);
u32 aligned_height = ALIGN(height, 16);
u32 size = 0;
if (!is_interlaced)
size = size_h264d_hw_bin_buffer(aligned_width, aligned_height);
return size;
}
static u32 size_h265d_hw_bin_buffer(u32 width, u32 height)
{
u32 size_yuv, size_bin_hdr, size_bin_res;
u32 size = 0;
u32 product;
product = width * height;
size_yuv = (product <= BIN_BUFFER_THRESHOLD) ?
((BIN_BUFFER_THRESHOLD * 3) >> 1) : ((product * 3) >> 1);
size_bin_hdr = size_yuv * H265_CABAC_HDR_RATIO_HD_TOT;
size_bin_res = size_yuv * H265_CABAC_RES_RATIO_HD_TOT;
size_bin_hdr = ALIGN(size_bin_hdr, HFI_DMA_ALIGNMENT);
size_bin_res = ALIGN(size_bin_res, HFI_DMA_ALIGNMENT);
size = size_bin_hdr + size_bin_res;
return size;
}
static u32 h265d_scratch_size(u32 width, u32 height, bool is_interlaced)
{
u32 aligned_width = ALIGN(width, 16);
u32 aligned_height = ALIGN(height, 16);
u32 size = 0;
if (!is_interlaced)
size = size_h265d_hw_bin_buffer(aligned_width, aligned_height);
return size;
}
static u32 vpxd_scratch_size(u32 width, u32 height, bool is_interlaced)
{
u32 aligned_width = ALIGN(width, 16);
u32 aligned_height = ALIGN(height, 16);
u32 size_yuv = aligned_width * aligned_height * 3 / 2;
u32 size = 0;
if (!is_interlaced) {
u32 binbuffer1_size, binbufer2_size;
binbuffer1_size = max_t(u32, size_yuv,
((BIN_BUFFER_THRESHOLD * 3) >> 1));
binbuffer1_size *= VPX_DECODER_FRAME_CONCURENCY_LVL *
VPX_DECODER_FRAME_BIN_HDR_BUDGET_RATIO_NUM /
VPX_DECODER_FRAME_BIN_HDR_BUDGET_RATIO_DEN;
binbufer2_size = max_t(u32, size_yuv,
((BIN_BUFFER_THRESHOLD * 3) >> 1));
binbufer2_size *= VPX_DECODER_FRAME_CONCURENCY_LVL *
VPX_DECODER_FRAME_BIN_RES_BUDGET_RATIO_NUM /
VPX_DECODER_FRAME_BIN_RES_BUDGET_RATIO_DEN;
size = ALIGN(binbuffer1_size + binbufer2_size,
HFI_DMA_ALIGNMENT);
}
return size;
}
static u32 mpeg2d_scratch_size(u32 width, u32 height, bool is_interlaced)
{
return 0;
}
static u32 calculate_enc_output_frame_size(u32 width, u32 height, u32 rc_type)
{
u32 aligned_width, aligned_height;
u32 mbs_per_frame;
u32 frame_size;
/*
* Encoder output size calculation: 32 Align width/height
* For resolution < 720p : YUVsize * 4
* For resolution > 720p & <= 4K : YUVsize / 2
* For resolution > 4k : YUVsize / 4
* Initially frame_size = YUVsize * 2;
*/
aligned_width = ALIGN(width, 32);
aligned_height = ALIGN(height, 32);
mbs_per_frame = (ALIGN(aligned_height, 16) *
ALIGN(aligned_width, 16)) / 256;
frame_size = width * height * 3;
if (mbs_per_frame < NUM_MBS_720P)
frame_size = frame_size << 1;
else if (mbs_per_frame <= NUM_MBS_4K)
frame_size = frame_size >> 2;
else
frame_size = frame_size >> 3;
if (rc_type == HFI_RATE_CONTROL_OFF || rc_type == HFI_RATE_CONTROL_CQ)
frame_size = frame_size << 1;
/*
* In case of opaque color format bitdepth will be known
* with first ETB, buffers allocated already with 8 bit
* won't be sufficient for 10 bit
* calculate size considering 10-bit by default
* For 10-bit cases size = size * 1.25
*/
frame_size *= 5;
frame_size /= 4;
return ALIGN(frame_size, SZ_4K);
}
static u32 calculate_enc_scratch_size(u32 width, u32 height, u32 work_mode,
u32 lcu_size, u32 num_vpp_pipes,
u32 rc_type)
{
u32 aligned_width, aligned_height, bitstream_size;
u32 total_bitbin_buffers, size_single_pipe, bitbin_size;
u32 sao_bin_buffer_size, padded_bin_size, size;
aligned_width = ALIGN(width, lcu_size);
aligned_height = ALIGN(height, lcu_size);
bitstream_size =
calculate_enc_output_frame_size(width, height, rc_type);
bitstream_size = ALIGN(bitstream_size, HFI_DMA_ALIGNMENT);
if (work_mode == VIDC_WORK_MODE_2) {
total_bitbin_buffers = 3;
bitbin_size = bitstream_size * 17 / 10;
bitbin_size = ALIGN(bitbin_size, HFI_DMA_ALIGNMENT);
} else {
total_bitbin_buffers = 1;
bitstream_size = aligned_width * aligned_height * 3;
bitbin_size = ALIGN(bitstream_size, HFI_DMA_ALIGNMENT);
}
if (num_vpp_pipes > 2)
size_single_pipe = bitbin_size / 2;
else
size_single_pipe = bitbin_size;
size_single_pipe = ALIGN(size_single_pipe, HFI_DMA_ALIGNMENT);
sao_bin_buffer_size =
(64 * (((width + 32) * (height + 32)) >> 10)) + 384;
padded_bin_size = ALIGN(size_single_pipe, HFI_DMA_ALIGNMENT);
size_single_pipe = sao_bin_buffer_size + padded_bin_size;
size_single_pipe = ALIGN(size_single_pipe, HFI_DMA_ALIGNMENT);
bitbin_size = size_single_pipe * num_vpp_pipes;
size = ALIGN(bitbin_size, HFI_DMA_ALIGNMENT) *
total_bitbin_buffers + 512;
return size;
}
static u32 h264e_scratch_size(u32 width, u32 height, u32 work_mode,
u32 num_vpp_pipes, u32 rc_type)
{
return calculate_enc_scratch_size(width, height, work_mode, 16,
num_vpp_pipes, rc_type);
}
static u32 h265e_scratch_size(u32 width, u32 height, u32 work_mode,
u32 num_vpp_pipes, u32 rc_type)
{
return calculate_enc_scratch_size(width, height, work_mode, 32,
num_vpp_pipes, rc_type);
}
static u32 vp8e_scratch_size(u32 width, u32 height, u32 work_mode,
u32 num_vpp_pipes, u32 rc_type)
{
return calculate_enc_scratch_size(width, height, work_mode, 16,
num_vpp_pipes, rc_type);
}
static u32 hfi_iris2_h264d_comv_size(u32 width, u32 height,
u32 yuv_buf_min_count)
{
u32 frame_width_in_mbs = ((width + 15) >> 4);
u32 frame_height_in_mbs = ((height + 15) >> 4);
u32 col_mv_aligned_width = (frame_width_in_mbs << 7);
u32 col_zero_aligned_width = (frame_width_in_mbs << 2);
u32 col_zero_size = 0, size_colloc = 0, comv_size = 0;
col_mv_aligned_width = ALIGN(col_mv_aligned_width, 16);
col_zero_aligned_width = ALIGN(col_zero_aligned_width, 16);
col_zero_size =
col_zero_aligned_width * ((frame_height_in_mbs + 1) >> 1);
col_zero_size = ALIGN(col_zero_size, 64);
col_zero_size <<= 1;
col_zero_size = ALIGN(col_zero_size, 512);
size_colloc = col_mv_aligned_width * ((frame_height_in_mbs + 1) >> 1);
size_colloc = ALIGN(size_colloc, 64);
size_colloc <<= 1;
size_colloc = ALIGN(size_colloc, 512);
size_colloc += (col_zero_size + SIZE_H264D_BUFTAB_T * 2);
comv_size = size_colloc * yuv_buf_min_count;
comv_size += 512;
return comv_size;
}
static u32 size_h264d_bse_cmd_buf(u32 height)
{
u32 aligned_height = ALIGN(height, 32);
return min_t(u32, (((aligned_height + 15) >> 4) * 3 * 4),
H264D_MAX_SLICE) * SIZE_H264D_BSE_CMD_PER_BUF;
}
static u32 size_h264d_vpp_cmd_buf(u32 height)
{
u32 aligned_height = ALIGN(height, 32);
u32 size;
size = min_t(u32, (((aligned_height + 15) >> 4) * 3 * 4),
H264D_MAX_SLICE) * SIZE_H264D_VPP_CMD_PER_BUF;
if (size > VPP_CMD_MAX_SIZE)
size = VPP_CMD_MAX_SIZE;
return size;
}
static u32 hfi_iris2_h264d_non_comv_size(u32 width, u32 height,
u32 num_vpp_pipes)
{
u32 size_bse, size_vpp, size;
size_bse = size_h264d_bse_cmd_buf(height);
size_vpp = size_h264d_vpp_cmd_buf(height);
size =
ALIGN(size_bse, HFI_DMA_ALIGNMENT) +
ALIGN(size_vpp, HFI_DMA_ALIGNMENT) +
ALIGN(SIZE_HW_PIC(SIZE_H264D_HW_PIC_T), HFI_DMA_ALIGNMENT) +
ALIGN(SIZE_H264D_LB_FE_TOP_DATA(width, height),
HFI_DMA_ALIGNMENT) +
ALIGN(SIZE_H264D_LB_FE_TOP_CTRL(width, height),
HFI_DMA_ALIGNMENT) +
ALIGN(SIZE_H264D_LB_FE_LEFT_CTRL(width, height),
HFI_DMA_ALIGNMENT) * num_vpp_pipes +
ALIGN(SIZE_H264D_LB_SE_TOP_CTRL(width, height),
HFI_DMA_ALIGNMENT) +
ALIGN(SIZE_H264D_LB_SE_LEFT_CTRL(width, height),
HFI_DMA_ALIGNMENT) * num_vpp_pipes +
ALIGN(SIZE_H264D_LB_PE_TOP_DATA(width, height),
HFI_DMA_ALIGNMENT) +
ALIGN(SIZE_H264D_LB_VSP_TOP(width, height), HFI_DMA_ALIGNMENT) +
ALIGN(SIZE_H264D_LB_RECON_DMA_METADATA_WR(width, height),
HFI_DMA_ALIGNMENT) * 2 +
ALIGN(SIZE_H264D_QP(width, height), HFI_DMA_ALIGNMENT);
return ALIGN(size, HFI_DMA_ALIGNMENT);
}
static u32 size_h265d_bse_cmd_buf(u32 width, u32 height)
{
u32 size;
size = (ALIGN(width, LCU_MAX_SIZE_PELS) / LCU_MIN_SIZE_PELS) *
(ALIGN(height, LCU_MAX_SIZE_PELS) / LCU_MIN_SIZE_PELS) *
NUM_HW_PIC_BUF;
size = min_t(u32, size, H265D_MAX_SLICE + 1);
size = 2 * size * SIZE_H265D_BSE_CMD_PER_BUF;
return ALIGN(size, HFI_DMA_ALIGNMENT);
}
static u32 size_h265d_vpp_cmd_buf(u32 width, u32 height)
{
u32 size;
size = (ALIGN(width, LCU_MAX_SIZE_PELS) / LCU_MIN_SIZE_PELS) *
(ALIGN(height, LCU_MAX_SIZE_PELS) / LCU_MIN_SIZE_PELS) *
NUM_HW_PIC_BUF;
size = min_t(u32, size, H265D_MAX_SLICE + 1);
size = ALIGN(size, 4);
size = 2 * size * SIZE_H265D_VPP_CMD_PER_BUF;
size = ALIGN(size, HFI_DMA_ALIGNMENT);
if (size > VPP_CMD_MAX_SIZE)
size = VPP_CMD_MAX_SIZE;
return size;
}
static u32 hfi_iris2_h265d_comv_size(u32 width, u32 height,
u32 yuv_buf_count_min)
{
u32 size;
size = ALIGN(((((width + 15) >> 4) * ((height + 15) >> 4)) << 8), 512);
size *= yuv_buf_count_min;
size += 512;
return size;
}
static u32 hfi_iris2_h265d_non_comv_size(u32 width, u32 height,
u32 num_vpp_pipes)
{
u32 size_bse, size_vpp, size;
size_bse = size_h265d_bse_cmd_buf(width, height);
size_vpp = size_h265d_vpp_cmd_buf(width, height);
size =
ALIGN(size_bse, HFI_DMA_ALIGNMENT) +
ALIGN(size_vpp, HFI_DMA_ALIGNMENT) +
ALIGN(NUM_HW_PIC_BUF * 20 * 22 * 4, HFI_DMA_ALIGNMENT) +
ALIGN(2 * sizeof(u16) *
(ALIGN(width, LCU_MAX_SIZE_PELS) / LCU_MIN_SIZE_PELS) *
(ALIGN(height, LCU_MAX_SIZE_PELS) / LCU_MIN_SIZE_PELS),
HFI_DMA_ALIGNMENT) +
ALIGN(SIZE_HW_PIC(SIZE_H265D_HW_PIC_T), HFI_DMA_ALIGNMENT) +
ALIGN(SIZE_H265D_LB_FE_TOP_DATA(width, height),
HFI_DMA_ALIGNMENT) +
ALIGN(SIZE_H265D_LB_FE_TOP_CTRL(width, height),
HFI_DMA_ALIGNMENT) +
ALIGN(SIZE_H265D_LB_FE_LEFT_CTRL(width, height),
HFI_DMA_ALIGNMENT) * num_vpp_pipes +
ALIGN(size_h265d_lb_se_left_ctrl(width, height),
HFI_DMA_ALIGNMENT) * num_vpp_pipes +
ALIGN(SIZE_H265D_LB_SE_TOP_CTRL(width, height),
HFI_DMA_ALIGNMENT) +
ALIGN(SIZE_H265D_LB_PE_TOP_DATA(width, height),
HFI_DMA_ALIGNMENT) +
ALIGN(SIZE_H265D_LB_VSP_TOP(width, height), HFI_DMA_ALIGNMENT) +
ALIGN(SIZE_H265D_LB_VSP_LEFT(width, height),
HFI_DMA_ALIGNMENT) * num_vpp_pipes +
ALIGN(SIZE_H265D_LB_RECON_DMA_METADATA_WR(width, height),
HFI_DMA_ALIGNMENT)
* 4 +
ALIGN(SIZE_H265D_QP(width, height), HFI_DMA_ALIGNMENT);
return ALIGN(size, HFI_DMA_ALIGNMENT);
}
static u32 hfi_iris2_vp8d_comv_size(u32 width, u32 height,
u32 yuv_min_buf_count)
{
return (((width + 15) >> 4) * ((height + 15) >> 4) * 8 * 2);
}
static u32 h264d_scratch1_size(u32 width, u32 height, u32 min_buf_count,
bool split_mode_enabled, u32 num_vpp_pipes)
{
u32 co_mv_size, nonco_mv_size, vpss_lb_size = 0;
co_mv_size = hfi_iris2_h264d_comv_size(width, height, min_buf_count);
nonco_mv_size = hfi_iris2_h264d_non_comv_size(width, height,
num_vpp_pipes);
if (split_mode_enabled)
vpss_lb_size = size_vpss_lb(width, height, num_vpp_pipes);
return co_mv_size + nonco_mv_size + vpss_lb_size;
}
static u32 h265d_scratch1_size(u32 width, u32 height, u32 min_buf_count,
bool split_mode_enabled, u32 num_vpp_pipes)
{
u32 co_mv_size, nonco_mv_size, vpss_lb_size = 0;
co_mv_size = hfi_iris2_h265d_comv_size(width, height, min_buf_count);
nonco_mv_size = hfi_iris2_h265d_non_comv_size(width, height,
num_vpp_pipes);
if (split_mode_enabled)
vpss_lb_size = size_vpss_lb(width, height, num_vpp_pipes);
return co_mv_size + nonco_mv_size + vpss_lb_size +
HDR10_HIST_EXTRADATA_SIZE;
}
static u32 vp8d_scratch1_size(u32 width, u32 height, u32 min_buf_count,
bool split_mode_enabled, u32 num_vpp_pipes)
{
u32 vpss_lb_size = 0, size;
size = hfi_iris2_vp8d_comv_size(width, height, 0);
size += ALIGN(size_vpxd_lb_fe_left_ctrl(width, height),
HFI_DMA_ALIGNMENT) * num_vpp_pipes +
ALIGN(size_vpxd_lb_se_left_ctrl(width, height),
HFI_DMA_ALIGNMENT) * num_vpp_pipes +
ALIGN(SIZE_VP8D_LB_VSP_TOP(width, height), HFI_DMA_ALIGNMENT) +
ALIGN(SIZE_VPXD_LB_FE_TOP_CTRL(width, height),
HFI_DMA_ALIGNMENT) +
2 * ALIGN(SIZE_VPXD_LB_RECON_DMA_METADATA_WR(width, height),
HFI_DMA_ALIGNMENT) +
ALIGN(SIZE_VPXD_LB_SE_TOP_CTRL(width, height),
HFI_DMA_ALIGNMENT) +
ALIGN(SIZE_VP8D_LB_PE_TOP_DATA(width, height),
HFI_DMA_ALIGNMENT) +
ALIGN(SIZE_VP8D_LB_FE_TOP_DATA(width, height),
HFI_DMA_ALIGNMENT);
if (split_mode_enabled)
vpss_lb_size = size_vpss_lb(width, height, num_vpp_pipes);
size += vpss_lb_size;
return size;
}
static u32 vp9d_scratch1_size(u32 width, u32 height, u32 min_buf_count,
bool split_mode_enabled, u32 num_vpp_pipes)
{
u32 vpss_lb_size = 0;
u32 size;
size =
ALIGN(size_vpxd_lb_fe_left_ctrl(width, height),
HFI_DMA_ALIGNMENT) * num_vpp_pipes +
ALIGN(size_vpxd_lb_se_left_ctrl(width, height),
HFI_DMA_ALIGNMENT) * num_vpp_pipes +
ALIGN(SIZE_VP9D_LB_VSP_TOP(width, height), HFI_DMA_ALIGNMENT) +
ALIGN(SIZE_VPXD_LB_FE_TOP_CTRL(width, height),
HFI_DMA_ALIGNMENT) +
2 * ALIGN(SIZE_VPXD_LB_RECON_DMA_METADATA_WR(width, height),
HFI_DMA_ALIGNMENT) +
ALIGN(SIZE_VPXD_LB_SE_TOP_CTRL(width, height),
HFI_DMA_ALIGNMENT) +
ALIGN(SIZE_VP9D_LB_PE_TOP_DATA(width, height),
HFI_DMA_ALIGNMENT) +
ALIGN(SIZE_VP9D_LB_FE_TOP_DATA(width, height),
HFI_DMA_ALIGNMENT);
if (split_mode_enabled)
vpss_lb_size = size_vpss_lb(width, height, num_vpp_pipes);
size += vpss_lb_size + HDR10_HIST_EXTRADATA_SIZE;
return size;
}
static u32 mpeg2d_scratch1_size(u32 width, u32 height, u32 min_buf_count,
bool split_mode_enabled, u32 num_vpp_pipes)
{
u32 vpss_lb_size = 0;
u32 size;
size =
ALIGN(size_vpxd_lb_fe_left_ctrl(width, height),
HFI_DMA_ALIGNMENT) * num_vpp_pipes +
ALIGN(size_vpxd_lb_se_left_ctrl(width, height),
HFI_DMA_ALIGNMENT) * num_vpp_pipes +
ALIGN(SIZE_VP8D_LB_VSP_TOP(width, height), HFI_DMA_ALIGNMENT) +
ALIGN(SIZE_VPXD_LB_FE_TOP_CTRL(width, height),
HFI_DMA_ALIGNMENT) +
2 * ALIGN(SIZE_VPXD_LB_RECON_DMA_METADATA_WR(width, height),
HFI_DMA_ALIGNMENT) +
ALIGN(SIZE_VPXD_LB_SE_TOP_CTRL(width, height),
HFI_DMA_ALIGNMENT) +
ALIGN(SIZE_VP8D_LB_PE_TOP_DATA(width, height),
HFI_DMA_ALIGNMENT) +
ALIGN(SIZE_VP8D_LB_FE_TOP_DATA(width, height),
HFI_DMA_ALIGNMENT);
if (split_mode_enabled)
vpss_lb_size = size_vpss_lb(width, height, num_vpp_pipes);
size += vpss_lb_size;
return size;
}
static u32
calculate_enc_scratch1_size(u32 width, u32 height, u32 lcu_size, u32 num_ref,
bool ten_bit, u32 num_vpp_pipes, bool is_h265)
{
u32 line_buf_ctrl_size, line_buf_data_size, leftline_buf_ctrl_size;
u32 line_buf_sde_size, sps_pps_slice_hdr, topline_buf_ctrl_size_FE;
u32 leftline_buf_ctrl_size_FE, line_buf_recon_pix_size;
u32 leftline_buf_recon_pix_size, lambda_lut_size, override_buffer_size;
u32 col_mv_buf_size, vpp_reg_buffer_size, ir_buffer_size;
u32 vpss_line_buf, leftline_buf_meta_recony, h265e_colrcbuf_size;
u32 h265e_framerc_bufsize, h265e_lcubitcnt_bufsize;
u32 h265e_lcubitmap_bufsize, se_stats_bufsize;
u32 bse_reg_buffer_size, bse_slice_cmd_buffer_size, slice_info_bufsize;
u32 line_buf_ctrl_size_buffid2, slice_cmd_buffer_size;
u32 width_lcu_num, height_lcu_num, width_coded, height_coded;
u32 frame_num_lcu, linebuf_meta_recon_uv, topline_bufsize_fe_1stg_sao;
u32 size, bit_depth, num_lcu_mb;
u32 vpss_line_buffer_size_1;
width_lcu_num = (width + lcu_size - 1) / lcu_size;
height_lcu_num = (height + lcu_size - 1) / lcu_size;
frame_num_lcu = width_lcu_num * height_lcu_num;
width_coded = width_lcu_num * lcu_size;
height_coded = height_lcu_num * lcu_size;
num_lcu_mb = (height_coded / lcu_size) *
((width_coded + lcu_size * 8) / lcu_size);
slice_info_bufsize = 256 + (frame_num_lcu << 4);
slice_info_bufsize = ALIGN(slice_info_bufsize, HFI_DMA_ALIGNMENT);
line_buf_ctrl_size = ALIGN(width_coded, HFI_DMA_ALIGNMENT);
line_buf_ctrl_size_buffid2 = ALIGN(width_coded, HFI_DMA_ALIGNMENT);
bit_depth = ten_bit ? 10 : 8;
line_buf_data_size =
(((((bit_depth * width_coded + 1024) +
(HFI_DMA_ALIGNMENT - 1)) & (~(HFI_DMA_ALIGNMENT - 1))) * 1) +
(((((bit_depth * width_coded + 1024) >> 1) +
(HFI_DMA_ALIGNMENT - 1)) & (~(HFI_DMA_ALIGNMENT - 1))) * 2));
leftline_buf_ctrl_size = is_h265 ?
((height_coded + 32) / 32 * 4 * 16) :
((height_coded + 15) / 16 * 5 * 16);
if (num_vpp_pipes > 1) {
leftline_buf_ctrl_size += 512;
leftline_buf_ctrl_size =
ALIGN(leftline_buf_ctrl_size, 512) * num_vpp_pipes;
}
leftline_buf_ctrl_size =
ALIGN(leftline_buf_ctrl_size, HFI_DMA_ALIGNMENT);
leftline_buf_recon_pix_size = (((ten_bit + 1) * 2 *
(height_coded) + HFI_DMA_ALIGNMENT) +
(HFI_DMA_ALIGNMENT << (num_vpp_pipes - 1)) - 1) &
(~((HFI_DMA_ALIGNMENT << (num_vpp_pipes - 1)) - 1)) * 1;
topline_buf_ctrl_size_FE = is_h265 ? (64 * (width_coded >> 5)) :
(HFI_DMA_ALIGNMENT + 16 * (width_coded >> 4));
topline_buf_ctrl_size_FE =
ALIGN(topline_buf_ctrl_size_FE, HFI_DMA_ALIGNMENT);
leftline_buf_ctrl_size_FE =
(((HFI_DMA_ALIGNMENT + 64 * (height_coded >> 4)) +
(HFI_DMA_ALIGNMENT << (num_vpp_pipes - 1)) - 1) &
(~((HFI_DMA_ALIGNMENT << (num_vpp_pipes - 1)) - 1)) * 1) *
num_vpp_pipes;
leftline_buf_meta_recony = (HFI_DMA_ALIGNMENT + 64 *
((height_coded) / (8 * (ten_bit ? 4 : 8))));
leftline_buf_meta_recony =
ALIGN(leftline_buf_meta_recony, HFI_DMA_ALIGNMENT);
leftline_buf_meta_recony = leftline_buf_meta_recony * num_vpp_pipes;
linebuf_meta_recon_uv = (HFI_DMA_ALIGNMENT + 64 *
((height_coded) / (4 * (ten_bit ? 4 : 8))));
linebuf_meta_recon_uv = ALIGN(linebuf_meta_recon_uv, HFI_DMA_ALIGNMENT);
linebuf_meta_recon_uv = linebuf_meta_recon_uv * num_vpp_pipes;
line_buf_recon_pix_size = ((ten_bit ? 3 : 2) * width_coded);
line_buf_recon_pix_size =
ALIGN(line_buf_recon_pix_size, HFI_DMA_ALIGNMENT);
slice_cmd_buffer_size = ALIGN(20480, HFI_DMA_ALIGNMENT);
sps_pps_slice_hdr = 2048 + 4096;
col_mv_buf_size = is_h265 ? (16 * ((frame_num_lcu << 2) + 32)) :
(3 * 16 * (width_lcu_num * height_lcu_num + 32));
col_mv_buf_size =
ALIGN(col_mv_buf_size, HFI_DMA_ALIGNMENT) * (num_ref + 1);
h265e_colrcbuf_size =
(((width_lcu_num + 7) >> 3) * 16 * 2 * height_lcu_num);
if (num_vpp_pipes > 1)
h265e_colrcbuf_size =
ALIGN(h265e_colrcbuf_size, HFI_DMA_ALIGNMENT) *
num_vpp_pipes;
h265e_colrcbuf_size = ALIGN(h265e_colrcbuf_size, HFI_DMA_ALIGNMENT) *
HFI_MAX_COL_FRAME;
h265e_framerc_bufsize = (is_h265) ? (256 + 16 *
(14 + (((height_coded >> 5) + 7) >> 3))) :
(256 + 16 * (14 + (((height_coded >> 4) + 7) >> 3)));
h265e_framerc_bufsize *= 6; /* multiply by max numtilescol */
if (num_vpp_pipes > 1)
h265e_framerc_bufsize =
ALIGN(h265e_framerc_bufsize, HFI_DMA_ALIGNMENT) *
num_vpp_pipes;
h265e_framerc_bufsize = ALIGN(h265e_framerc_bufsize, 512) *
HFI_MAX_COL_FRAME;
h265e_lcubitcnt_bufsize = 256 + 4 * frame_num_lcu;
h265e_lcubitcnt_bufsize =
ALIGN(h265e_lcubitcnt_bufsize, HFI_DMA_ALIGNMENT);
h265e_lcubitmap_bufsize = 256 + (frame_num_lcu >> 3);
h265e_lcubitmap_bufsize =
ALIGN(h265e_lcubitmap_bufsize, HFI_DMA_ALIGNMENT);
line_buf_sde_size = 256 + 16 * (width_coded >> 4);
line_buf_sde_size = ALIGN(line_buf_sde_size, HFI_DMA_ALIGNMENT);
if ((width_coded * height_coded) > (4096 * 2160))
se_stats_bufsize = 0;
else if ((width_coded * height_coded) > (1920 * 1088))
se_stats_bufsize = (40 * 4 * frame_num_lcu + 256 + 256);
else
se_stats_bufsize = (1024 * frame_num_lcu + 256 + 256);
se_stats_bufsize = ALIGN(se_stats_bufsize, HFI_DMA_ALIGNMENT) * 2;
bse_slice_cmd_buffer_size = (((8192 << 2) + 7) & (~7)) * 6;
bse_reg_buffer_size = (((512 << 3) + 7) & (~7)) * 4;
vpp_reg_buffer_size =
(((HFI_VENUS_VPPSG_MAX_REGISTERS << 3) + 31) & (~31)) * 10;
lambda_lut_size = 256 * 11;
override_buffer_size = 16 * ((num_lcu_mb + 7) >> 3);
override_buffer_size =
ALIGN(override_buffer_size, HFI_DMA_ALIGNMENT) * 2;
ir_buffer_size = (((frame_num_lcu << 1) + 7) & (~7)) * 3;
vpss_line_buffer_size_1 = (((8192 >> 2) << 5) * num_vpp_pipes) + 64;
vpss_line_buf =
(((((max(width_coded, height_coded) + 3) >> 2) << 5) + 256) *
16) + vpss_line_buffer_size_1;
topline_bufsize_fe_1stg_sao = 16 * (width_coded >> 5);
topline_bufsize_fe_1stg_sao =
ALIGN(topline_bufsize_fe_1stg_sao, HFI_DMA_ALIGNMENT);
size =
line_buf_ctrl_size + line_buf_data_size +
line_buf_ctrl_size_buffid2 + leftline_buf_ctrl_size +
vpss_line_buf + col_mv_buf_size + topline_buf_ctrl_size_FE +
leftline_buf_ctrl_size_FE + line_buf_recon_pix_size +
leftline_buf_recon_pix_size +
leftline_buf_meta_recony + linebuf_meta_recon_uv +
h265e_colrcbuf_size + h265e_framerc_bufsize +
h265e_lcubitcnt_bufsize + h265e_lcubitmap_bufsize +
line_buf_sde_size +
topline_bufsize_fe_1stg_sao + override_buffer_size +
bse_reg_buffer_size + vpp_reg_buffer_size + sps_pps_slice_hdr +
slice_cmd_buffer_size + bse_slice_cmd_buffer_size +
ir_buffer_size + slice_info_bufsize + lambda_lut_size +
se_stats_bufsize + 1024;
return size;
}
static u32 h264e_scratch1_size(u32 width, u32 height, u32 num_ref, bool ten_bit,
u32 num_vpp_pipes)
{
return calculate_enc_scratch1_size(width, height, 16, num_ref, ten_bit,
num_vpp_pipes, false);
}
static u32 h265e_scratch1_size(u32 width, u32 height, u32 num_ref, bool ten_bit,
u32 num_vpp_pipes)
{
return calculate_enc_scratch1_size(width, height, 32, num_ref, ten_bit,
num_vpp_pipes, true);
}
static u32 vp8e_scratch1_size(u32 width, u32 height, u32 num_ref, bool ten_bit,
u32 num_vpp_pipes)
{
return calculate_enc_scratch1_size(width, height, 16, num_ref, ten_bit,
1, false);
}
static u32 ubwc_metadata_plane_stride(u32 width, u32 metadata_stride_multi,
u32 tile_width_pels)
{
return ALIGN(((width + (tile_width_pels - 1)) / tile_width_pels),
metadata_stride_multi);
}
static u32 ubwc_metadata_plane_bufheight(u32 height, u32 metadata_height_multi,
u32 tile_height_pels)
{
return ALIGN(((height + (tile_height_pels - 1)) / tile_height_pels),
metadata_height_multi);
}
static u32 ubwc_metadata_plane_buffer_size(u32 metadata_stride,
u32 metadata_buf_height)
{
return ALIGN(metadata_stride * metadata_buf_height, SZ_4K);
}
static u32 enc_scratch2_size(u32 width, u32 height, u32 num_ref, bool ten_bit)
{
u32 aligned_width, aligned_height, chroma_height, ref_buf_height;
u32 luma_size, chroma_size;
u32 metadata_stride, meta_buf_height, meta_size_y, meta_size_c;
u32 ref_luma_stride_bytes, ref_chroma_height_bytes;
u32 ref_buf_size, ref_stride;
u32 size;
if (!ten_bit) {
aligned_height = ALIGN(height, HFI_VENUS_HEIGHT_ALIGNMENT);
chroma_height = height >> 1;
chroma_height = ALIGN(chroma_height,
HFI_VENUS_HEIGHT_ALIGNMENT);
aligned_width = ALIGN(width, HFI_VENUS_WIDTH_ALIGNMENT);
metadata_stride =
ubwc_metadata_plane_stride(width, 64,
NV12_UBWC_Y_TILE_WIDTH);
meta_buf_height =
ubwc_metadata_plane_bufheight(height, 16,
NV12_UBWC_Y_TILE_HEIGHT);
meta_size_y = ubwc_metadata_plane_buffer_size(metadata_stride,
meta_buf_height);
meta_size_c = ubwc_metadata_plane_buffer_size(metadata_stride,
meta_buf_height);
size = (aligned_height + chroma_height) * aligned_width +
meta_size_y + meta_size_c;
size = (size * (num_ref + 3)) + 4096;
} else {
ref_buf_height = (height + (HFI_VENUS_HEIGHT_ALIGNMENT - 1))
& (~(HFI_VENUS_HEIGHT_ALIGNMENT - 1));
ref_luma_stride_bytes =
((width + SYSTEM_LAL_TILE10 - 1) / SYSTEM_LAL_TILE10) *
SYSTEM_LAL_TILE10;
ref_stride = 4 * (ref_luma_stride_bytes / 3);
ref_stride = (ref_stride + (128 - 1)) & (~(128 - 1));
luma_size = ref_buf_height * ref_stride;
ref_chroma_height_bytes = (((height + 1) >> 1) +
(32 - 1)) & (~(32 - 1));
chroma_size = ref_stride * ref_chroma_height_bytes;
luma_size = (luma_size + (SZ_4K - 1)) & (~(SZ_4K - 1));
chroma_size = (chroma_size + (SZ_4K - 1)) & (~(SZ_4K - 1));
ref_buf_size = luma_size + chroma_size;
metadata_stride =
ubwc_metadata_plane_stride(width,
METADATA_STRIDE_MULTIPLE,
TP10_UBWC_Y_TILE_WIDTH);
meta_buf_height =
ubwc_metadata_plane_bufheight(height,
METADATA_HEIGHT_MULTIPLE,
TP10_UBWC_Y_TILE_HEIGHT);
meta_size_y = ubwc_metadata_plane_buffer_size(metadata_stride,
meta_buf_height);
meta_size_c = ubwc_metadata_plane_buffer_size(metadata_stride,
meta_buf_height);
size = ref_buf_size + meta_size_y + meta_size_c;
size = (size * (num_ref + 3)) + 4096;
}
return size;
}
static u32 enc_persist_size(void)
{
return HFI_IRIS2_ENC_PERSIST_SIZE;
}
static u32 h264d_persist1_size(void)
{
return ALIGN((SIZE_SLIST_BUF_H264 * NUM_SLIST_BUF_H264
+ NUM_HW_PIC_BUF * SIZE_SEI_USERDATA), HFI_DMA_ALIGNMENT);
}
static u32 h265d_persist1_size(void)
{
return ALIGN((SIZE_SLIST_BUF_H265 * NUM_SLIST_BUF_H265 + H265_NUM_TILE
* sizeof(u32) + NUM_HW_PIC_BUF * SIZE_SEI_USERDATA), HFI_DMA_ALIGNMENT);
}
static u32 vp8d_persist1_size(void)
{
return ALIGN(VP8_NUM_PROBABILITY_TABLE_BUF * VP8_PROB_TABLE_SIZE,
HFI_DMA_ALIGNMENT);
}
static u32 vp9d_persist1_size(void)
{
return
ALIGN(VP9_NUM_PROBABILITY_TABLE_BUF * VP9_PROB_TABLE_SIZE,
HFI_DMA_ALIGNMENT) +
ALIGN(HFI_IRIS2_VP9D_COMV_SIZE, HFI_DMA_ALIGNMENT) +
ALIGN(MAX_SUPERFRAME_HEADER_LEN, HFI_DMA_ALIGNMENT) +
ALIGN(VP9_UDC_HEADER_BUF_SIZE, HFI_DMA_ALIGNMENT) +
ALIGN(VP9_NUM_FRAME_INFO_BUF * CCE_TILE_OFFSET_SIZE,
HFI_DMA_ALIGNMENT);
}
static u32 mpeg2d_persist1_size(void)
{
return QMATRIX_SIZE + MP2D_QPDUMP_SIZE;
}
struct dec_bufsize_ops {
u32 (*scratch)(u32 width, u32 height, bool is_interlaced);
u32 (*scratch1)(u32 width, u32 height, u32 min_buf_count,
bool split_mode_enabled, u32 num_vpp_pipes);
u32 (*persist1)(void);
};
struct enc_bufsize_ops {
u32 (*scratch)(u32 width, u32 height, u32 work_mode, u32 num_vpp_pipes,
u32 rc_type);
u32 (*scratch1)(u32 width, u32 height, u32 num_ref, bool ten_bit,
u32 num_vpp_pipes);
u32 (*scratch2)(u32 width, u32 height, u32 num_ref, bool ten_bit);
u32 (*persist)(void);
};
static struct dec_bufsize_ops dec_h264_ops = {
.scratch = h264d_scratch_size,
.scratch1 = h264d_scratch1_size,
.persist1 = h264d_persist1_size,
};
static struct dec_bufsize_ops dec_h265_ops = {
.scratch = h265d_scratch_size,
.scratch1 = h265d_scratch1_size,
.persist1 = h265d_persist1_size,
};
static struct dec_bufsize_ops dec_vp8_ops = {
.scratch = vpxd_scratch_size,
.scratch1 = vp8d_scratch1_size,
.persist1 = vp8d_persist1_size,
};
static struct dec_bufsize_ops dec_vp9_ops = {
.scratch = vpxd_scratch_size,
.scratch1 = vp9d_scratch1_size,
.persist1 = vp9d_persist1_size,
};
static struct dec_bufsize_ops dec_mpeg2_ops = {
.scratch = mpeg2d_scratch_size,
.scratch1 = mpeg2d_scratch1_size,
.persist1 = mpeg2d_persist1_size,
};
static struct enc_bufsize_ops enc_h264_ops = {
.scratch = h264e_scratch_size,
.scratch1 = h264e_scratch1_size,
.scratch2 = enc_scratch2_size,
.persist = enc_persist_size,
};
static struct enc_bufsize_ops enc_h265_ops = {
.scratch = h265e_scratch_size,
.scratch1 = h265e_scratch1_size,
.scratch2 = enc_scratch2_size,
.persist = enc_persist_size,
};
static struct enc_bufsize_ops enc_vp8_ops = {
.scratch = vp8e_scratch_size,
.scratch1 = vp8e_scratch1_size,
.scratch2 = enc_scratch2_size,
.persist = enc_persist_size,
};
static u32
calculate_dec_input_frame_size(u32 width, u32 height, u32 codec,
u32 max_mbs_per_frame, u32 buffer_size_limit)
{
u32 frame_size, num_mbs;
u32 div_factor = 1;
u32 base_res_mbs = NUM_MBS_4K;
/*
* Decoder input size calculation:
* If clip is 8k buffer size is calculated for 8k : 8k mbs/4
* For 8k cases we expect width/height to be set always.
* In all other cases size is calculated for 4k:
* 4k mbs for VP8/VP9 and 4k/2 for remaining codecs
*/
num_mbs = (ALIGN(height, 16) * ALIGN(width, 16)) / 256;
if (num_mbs > NUM_MBS_4K) {
div_factor = 4;
base_res_mbs = max_mbs_per_frame;
} else {
base_res_mbs = NUM_MBS_4K;
if (codec == V4L2_PIX_FMT_VP9)
div_factor = 1;
else
div_factor = 2;
}
frame_size = base_res_mbs * MB_SIZE_IN_PIXEL * 3 / 2 / div_factor;
/* multiply by 10/8 (1.25) to get size for 10 bit case */
if (codec == V4L2_PIX_FMT_VP9 || codec == V4L2_PIX_FMT_HEVC)
frame_size = frame_size + (frame_size >> 2);
if (buffer_size_limit && buffer_size_limit < frame_size)
frame_size = buffer_size_limit;
return ALIGN(frame_size, SZ_4K);
}
static int output_buffer_count(u32 session_type, u32 codec)
{
u32 output_min_count;
if (session_type == VIDC_SESSION_TYPE_DEC) {
switch (codec) {
case V4L2_PIX_FMT_MPEG2:
case V4L2_PIX_FMT_VP8:
output_min_count = 6;
break;
case V4L2_PIX_FMT_VP9:
output_min_count = 11;
break;
case V4L2_PIX_FMT_H264:
case V4L2_PIX_FMT_HEVC:
default:
output_min_count = 18;
break;
}
} else {
output_min_count = MIN_ENC_OUTPUT_BUFFERS;
}
return output_min_count;
}
static int bufreq_dec(struct hfi_plat_buffers_params *params, u32 buftype,
struct hfi_buffer_requirements *bufreq)
{
enum hfi_version version = params->version;
u32 codec = params->codec;
u32 width = params->width, height = params->height, out_min_count;
u32 out_width = params->out_width, out_height = params->out_height;
struct dec_bufsize_ops *dec_ops;
bool is_secondary_output = params->dec.is_secondary_output;
bool is_interlaced = params->dec.is_interlaced;
u32 max_mbs_per_frame = params->dec.max_mbs_per_frame;
u32 buffer_size_limit = params->dec.buffer_size_limit;
u32 num_vpp_pipes = params->num_vpp_pipes;
switch (codec) {
case V4L2_PIX_FMT_H264:
dec_ops = &dec_h264_ops;
break;
case V4L2_PIX_FMT_HEVC:
dec_ops = &dec_h265_ops;
break;
case V4L2_PIX_FMT_VP8:
dec_ops = &dec_vp8_ops;
break;
case V4L2_PIX_FMT_VP9:
dec_ops = &dec_vp9_ops;
break;
case V4L2_PIX_FMT_MPEG2:
dec_ops = &dec_mpeg2_ops;
break;
default:
return -EINVAL;
}
out_min_count = output_buffer_count(VIDC_SESSION_TYPE_DEC, codec);
/* Max of driver and FW count */
out_min_count = max(out_min_count, hfi_bufreq_get_count_min(bufreq, version));
bufreq->type = buftype;
bufreq->region_size = 0;
bufreq->count_actual = 1;
hfi_bufreq_set_count_min(bufreq, version, 1);
hfi_bufreq_set_hold_count(bufreq, version, 1);
bufreq->contiguous = 1;
bufreq->alignment = 256;
if (buftype == HFI_BUFFER_INPUT) {
hfi_bufreq_set_count_min(bufreq, version, MIN_INPUT_BUFFERS);
bufreq->size =
calculate_dec_input_frame_size(width, height, codec,
max_mbs_per_frame,
buffer_size_limit);
} else if (buftype == HFI_BUFFER_OUTPUT || buftype == HFI_BUFFER_OUTPUT2) {
hfi_bufreq_set_count_min(bufreq, version, out_min_count);
bufreq->size =
venus_helper_get_framesz_raw(params->hfi_color_fmt,
out_width, out_height);
if (buftype == HFI_BUFFER_OUTPUT &&
params->dec.is_secondary_output)
bufreq->size =
venus_helper_get_framesz_raw(params->hfi_dpb_color_fmt,
out_width, out_height);
} else if (buftype == HFI_BUFFER_INTERNAL_SCRATCH(version)) {
bufreq->size = dec_ops->scratch(width, height, is_interlaced);
} else if (buftype == HFI_BUFFER_INTERNAL_SCRATCH_1(version)) {
bufreq->size = dec_ops->scratch1(width, height, VB2_MAX_FRAME,
is_secondary_output,
num_vpp_pipes);
} else if (buftype == HFI_BUFFER_INTERNAL_PERSIST_1) {
bufreq->size = dec_ops->persist1();
} else {
bufreq->size = 0;
}
return 0;
}
static int bufreq_enc(struct hfi_plat_buffers_params *params, u32 buftype,
struct hfi_buffer_requirements *bufreq)
{
enum hfi_version version = params->version;
struct enc_bufsize_ops *enc_ops;
u32 width = params->width;
u32 height = params->height;
bool is_tenbit = params->enc.is_tenbit;
u32 num_bframes = params->enc.num_b_frames;
u32 codec = params->codec;
u32 work_mode = params->enc.work_mode;
u32 rc_type = params->enc.rc_type;
u32 num_vpp_pipes = params->num_vpp_pipes;
u32 num_ref, count_min;
switch (codec) {
case V4L2_PIX_FMT_H264:
enc_ops = &enc_h264_ops;
break;
case V4L2_PIX_FMT_HEVC:
enc_ops = &enc_h265_ops;
break;
case V4L2_PIX_FMT_VP8:
enc_ops = &enc_vp8_ops;
break;
default:
return -EINVAL;
}
num_ref = num_bframes > 0 ? num_bframes + 1 : 1;
bufreq->type = buftype;
bufreq->region_size = 0;
bufreq->count_actual = 1;
hfi_bufreq_set_count_min(bufreq, version, 1);
hfi_bufreq_set_hold_count(bufreq, version, 1);
bufreq->contiguous = 1;
bufreq->alignment = 256;
if (buftype == HFI_BUFFER_INPUT) {
hfi_bufreq_set_count_min(bufreq, version, MIN_INPUT_BUFFERS);
bufreq->size =
venus_helper_get_framesz_raw(params->hfi_color_fmt,
width, height);
} else if (buftype == HFI_BUFFER_OUTPUT ||
buftype == HFI_BUFFER_OUTPUT2) {
count_min = output_buffer_count(VIDC_SESSION_TYPE_ENC, codec);
hfi_bufreq_set_count_min(bufreq, version, count_min);
bufreq->size = calculate_enc_output_frame_size(width, height,
rc_type);
} else if (buftype == HFI_BUFFER_INTERNAL_SCRATCH(version)) {
bufreq->size = enc_ops->scratch(width, height, work_mode,
num_vpp_pipes, rc_type);
} else if (buftype == HFI_BUFFER_INTERNAL_SCRATCH_1(version)) {
bufreq->size = enc_ops->scratch1(width, height, num_ref,
is_tenbit, num_vpp_pipes);
} else if (buftype == HFI_BUFFER_INTERNAL_SCRATCH_2(version)) {
bufreq->size = enc_ops->scratch2(width, height, num_ref,
is_tenbit);
} else if (buftype == HFI_BUFFER_INTERNAL_PERSIST) {
bufreq->size = enc_ops->persist();
} else {
bufreq->size = 0;
}
return 0;
}
int hfi_plat_bufreq_v6(struct hfi_plat_buffers_params *params, u32 session_type,
u32 buftype, struct hfi_buffer_requirements *bufreq)
{
if (session_type == VIDC_SESSION_TYPE_DEC)
return bufreq_dec(params, buftype, bufreq);
else
return bufreq_enc(params, buftype, bufreq);
}
| linux-master | drivers/media/platform/qcom/venus/hfi_plat_bufs_v6.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
* Copyright (C) 2017 Linaro Ltd.
*/
#include <linux/types.h>
#include <media/v4l2-ctrls.h>
#include "core.h"
#include "helpers.h"
#include "vdec.h"
static int vdec_op_s_ctrl(struct v4l2_ctrl *ctrl)
{
struct venus_inst *inst = ctrl_to_inst(ctrl);
struct vdec_controls *ctr = &inst->controls.dec;
switch (ctrl->id) {
case V4L2_CID_MPEG_VIDEO_DECODER_MPEG4_DEBLOCK_FILTER:
ctr->post_loop_deb_mode = ctrl->val;
break;
case V4L2_CID_MPEG_VIDEO_H264_PROFILE:
case V4L2_CID_MPEG_VIDEO_MPEG4_PROFILE:
case V4L2_CID_MPEG_VIDEO_VP8_PROFILE:
case V4L2_CID_MPEG_VIDEO_VP9_PROFILE:
ctr->profile = ctrl->val;
break;
case V4L2_CID_MPEG_VIDEO_H264_LEVEL:
case V4L2_CID_MPEG_VIDEO_MPEG4_LEVEL:
case V4L2_CID_MPEG_VIDEO_VP9_LEVEL:
ctr->level = ctrl->val;
break;
case V4L2_CID_MPEG_VIDEO_DEC_DISPLAY_DELAY:
ctr->display_delay = ctrl->val;
break;
case V4L2_CID_MPEG_VIDEO_DEC_DISPLAY_DELAY_ENABLE:
ctr->display_delay_enable = ctrl->val;
break;
case V4L2_CID_MPEG_VIDEO_DEC_CONCEAL_COLOR:
ctr->conceal_color = *ctrl->p_new.p_s64;
break;
default:
return -EINVAL;
}
return 0;
}
static int vdec_op_g_volatile_ctrl(struct v4l2_ctrl *ctrl)
{
struct venus_inst *inst = ctrl_to_inst(ctrl);
struct vdec_controls *ctr = &inst->controls.dec;
struct hfi_buffer_requirements bufreq;
enum hfi_version ver = inst->core->res->hfi_version;
u32 profile, level;
int ret;
switch (ctrl->id) {
case V4L2_CID_MPEG_VIDEO_H264_PROFILE:
case V4L2_CID_MPEG_VIDEO_MPEG4_PROFILE:
case V4L2_CID_MPEG_VIDEO_VP8_PROFILE:
case V4L2_CID_MPEG_VIDEO_VP9_PROFILE:
ret = venus_helper_get_profile_level(inst, &profile, &level);
if (!ret)
ctr->profile = profile;
ctrl->val = ctr->profile;
break;
case V4L2_CID_MPEG_VIDEO_H264_LEVEL:
case V4L2_CID_MPEG_VIDEO_MPEG4_LEVEL:
case V4L2_CID_MPEG_VIDEO_VP9_LEVEL:
ret = venus_helper_get_profile_level(inst, &profile, &level);
if (!ret)
ctr->level = level;
ctrl->val = ctr->level;
break;
case V4L2_CID_MPEG_VIDEO_DECODER_MPEG4_DEBLOCK_FILTER:
ctrl->val = ctr->post_loop_deb_mode;
break;
case V4L2_CID_MIN_BUFFERS_FOR_CAPTURE:
ret = venus_helper_get_bufreq(inst, HFI_BUFFER_OUTPUT, &bufreq);
if (!ret)
ctrl->val = hfi_bufreq_get_count_min(&bufreq, ver);
break;
default:
return -EINVAL;
}
return 0;
}
static const struct v4l2_ctrl_ops vdec_ctrl_ops = {
.s_ctrl = vdec_op_s_ctrl,
.g_volatile_ctrl = vdec_op_g_volatile_ctrl,
};
int vdec_ctrl_init(struct venus_inst *inst)
{
struct v4l2_ctrl *ctrl;
int ret;
ret = v4l2_ctrl_handler_init(&inst->ctrl_handler, 12);
if (ret)
return ret;
ctrl = v4l2_ctrl_new_std_menu(&inst->ctrl_handler, &vdec_ctrl_ops,
V4L2_CID_MPEG_VIDEO_MPEG4_PROFILE,
V4L2_MPEG_VIDEO_MPEG4_PROFILE_ADVANCED_CODING_EFFICIENCY,
~((1 << V4L2_MPEG_VIDEO_MPEG4_PROFILE_SIMPLE) |
(1 << V4L2_MPEG_VIDEO_MPEG4_PROFILE_ADVANCED_SIMPLE)),
V4L2_MPEG_VIDEO_MPEG4_PROFILE_SIMPLE);
if (ctrl)
ctrl->flags |= V4L2_CTRL_FLAG_VOLATILE;
ctrl = v4l2_ctrl_new_std_menu(&inst->ctrl_handler, &vdec_ctrl_ops,
V4L2_CID_MPEG_VIDEO_MPEG4_LEVEL,
V4L2_MPEG_VIDEO_MPEG4_LEVEL_5,
0, V4L2_MPEG_VIDEO_MPEG4_LEVEL_0);
if (ctrl)
ctrl->flags |= V4L2_CTRL_FLAG_VOLATILE;
ctrl = v4l2_ctrl_new_std_menu(&inst->ctrl_handler, &vdec_ctrl_ops,
V4L2_CID_MPEG_VIDEO_H264_PROFILE,
V4L2_MPEG_VIDEO_H264_PROFILE_MULTIVIEW_HIGH,
~((1 << V4L2_MPEG_VIDEO_H264_PROFILE_BASELINE) |
(1 << V4L2_MPEG_VIDEO_H264_PROFILE_CONSTRAINED_BASELINE) |
(1 << V4L2_MPEG_VIDEO_H264_PROFILE_MAIN) |
(1 << V4L2_MPEG_VIDEO_H264_PROFILE_HIGH) |
(1 << V4L2_MPEG_VIDEO_H264_PROFILE_STEREO_HIGH) |
(1 << V4L2_MPEG_VIDEO_H264_PROFILE_MULTIVIEW_HIGH)),
V4L2_MPEG_VIDEO_H264_PROFILE_BASELINE);
if (ctrl)
ctrl->flags |= V4L2_CTRL_FLAG_VOLATILE;
ctrl = v4l2_ctrl_new_std_menu(&inst->ctrl_handler, &vdec_ctrl_ops,
V4L2_CID_MPEG_VIDEO_H264_LEVEL,
V4L2_MPEG_VIDEO_H264_LEVEL_5_1,
0, V4L2_MPEG_VIDEO_H264_LEVEL_1_0);
if (ctrl)
ctrl->flags |= V4L2_CTRL_FLAG_VOLATILE;
ctrl = v4l2_ctrl_new_std_menu(&inst->ctrl_handler, &vdec_ctrl_ops,
V4L2_CID_MPEG_VIDEO_VP8_PROFILE,
V4L2_MPEG_VIDEO_VP8_PROFILE_3,
0, V4L2_MPEG_VIDEO_VP8_PROFILE_0);
if (ctrl)
ctrl->flags |= V4L2_CTRL_FLAG_VOLATILE;
ctrl = v4l2_ctrl_new_std_menu(&inst->ctrl_handler, &vdec_ctrl_ops,
V4L2_CID_MPEG_VIDEO_VP9_PROFILE,
V4L2_MPEG_VIDEO_VP9_PROFILE_3,
0, V4L2_MPEG_VIDEO_VP9_PROFILE_0);
if (ctrl)
ctrl->flags |= V4L2_CTRL_FLAG_VOLATILE;
ctrl = v4l2_ctrl_new_std_menu(&inst->ctrl_handler, &vdec_ctrl_ops,
V4L2_CID_MPEG_VIDEO_VP9_LEVEL,
V4L2_MPEG_VIDEO_VP9_LEVEL_6_2,
0, V4L2_MPEG_VIDEO_VP9_LEVEL_1_0);
if (ctrl)
ctrl->flags |= V4L2_CTRL_FLAG_VOLATILE;
v4l2_ctrl_new_std(&inst->ctrl_handler, &vdec_ctrl_ops,
V4L2_CID_MPEG_VIDEO_DECODER_MPEG4_DEBLOCK_FILTER, 0, 1, 1, 0);
ctrl = v4l2_ctrl_new_std(&inst->ctrl_handler, &vdec_ctrl_ops,
V4L2_CID_MIN_BUFFERS_FOR_CAPTURE, 1, 32, 1, 1);
if (ctrl)
ctrl->flags |= V4L2_CTRL_FLAG_VOLATILE;
v4l2_ctrl_new_std(&inst->ctrl_handler, &vdec_ctrl_ops,
V4L2_CID_MPEG_VIDEO_DEC_DISPLAY_DELAY,
0, 16383, 1, 0);
v4l2_ctrl_new_std(&inst->ctrl_handler, &vdec_ctrl_ops,
V4L2_CID_MPEG_VIDEO_DEC_DISPLAY_DELAY_ENABLE,
0, 1, 1, 0);
v4l2_ctrl_new_std(&inst->ctrl_handler, &vdec_ctrl_ops,
V4L2_CID_MPEG_VIDEO_DEC_CONCEAL_COLOR, 0,
0xffffffffffffLL, 1, 0x8000800010LL);
ret = inst->ctrl_handler.error;
if (ret) {
v4l2_ctrl_handler_free(&inst->ctrl_handler);
return ret;
}
return 0;
}
void vdec_ctrl_deinit(struct venus_inst *inst)
{
v4l2_ctrl_handler_free(&inst->ctrl_handler);
}
| linux-master | drivers/media/platform/qcom/venus/vdec_ctrls.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2019 Linaro Ltd.
*
* Author: Stanimir Varbanov <[email protected]>
*/
#include <linux/clk.h>
#include <linux/interconnect.h>
#include <linux/iopoll.h>
#include <linux/kernel.h>
#include <linux/pm_domain.h>
#include <linux/pm_opp.h>
#include <linux/pm_runtime.h>
#include <linux/reset.h>
#include <linux/types.h>
#include <media/v4l2-mem2mem.h>
#include "core.h"
#include "hfi_parser.h"
#include "hfi_venus_io.h"
#include "pm_helpers.h"
#include "hfi_platform.h"
static bool legacy_binding;
static int core_clks_get(struct venus_core *core)
{
const struct venus_resources *res = core->res;
struct device *dev = core->dev;
unsigned int i;
for (i = 0; i < res->clks_num; i++) {
core->clks[i] = devm_clk_get(dev, res->clks[i]);
if (IS_ERR(core->clks[i]))
return PTR_ERR(core->clks[i]);
}
return 0;
}
static int core_clks_enable(struct venus_core *core)
{
const struct venus_resources *res = core->res;
const struct freq_tbl *freq_tbl = core->res->freq_tbl;
unsigned int freq_tbl_size = core->res->freq_tbl_size;
unsigned long freq;
unsigned int i;
int ret;
if (!freq_tbl)
return -EINVAL;
freq = freq_tbl[freq_tbl_size - 1].freq;
for (i = 0; i < res->clks_num; i++) {
if (IS_V6(core)) {
ret = clk_set_rate(core->clks[i], freq);
if (ret)
goto err;
}
ret = clk_prepare_enable(core->clks[i]);
if (ret)
goto err;
}
return 0;
err:
while (i--)
clk_disable_unprepare(core->clks[i]);
return ret;
}
static void core_clks_disable(struct venus_core *core)
{
const struct venus_resources *res = core->res;
unsigned int i = res->clks_num;
while (i--)
clk_disable_unprepare(core->clks[i]);
}
static int core_clks_set_rate(struct venus_core *core, unsigned long freq)
{
int ret;
ret = dev_pm_opp_set_rate(core->dev, freq);
if (ret)
return ret;
ret = clk_set_rate(core->vcodec0_clks[0], freq);
if (ret)
return ret;
ret = clk_set_rate(core->vcodec1_clks[0], freq);
if (ret)
return ret;
return 0;
}
static int vcodec_clks_get(struct venus_core *core, struct device *dev,
struct clk **clks, const char * const *id)
{
const struct venus_resources *res = core->res;
unsigned int i;
for (i = 0; i < res->vcodec_clks_num; i++) {
if (!id[i])
continue;
clks[i] = devm_clk_get(dev, id[i]);
if (IS_ERR(clks[i]))
return PTR_ERR(clks[i]);
}
return 0;
}
static int vcodec_clks_enable(struct venus_core *core, struct clk **clks)
{
const struct venus_resources *res = core->res;
unsigned int i;
int ret;
for (i = 0; i < res->vcodec_clks_num; i++) {
ret = clk_prepare_enable(clks[i]);
if (ret)
goto err;
}
return 0;
err:
while (i--)
clk_disable_unprepare(clks[i]);
return ret;
}
static void vcodec_clks_disable(struct venus_core *core, struct clk **clks)
{
const struct venus_resources *res = core->res;
unsigned int i = res->vcodec_clks_num;
while (i--)
clk_disable_unprepare(clks[i]);
}
static u32 load_per_instance(struct venus_inst *inst)
{
u32 mbs;
if (!inst || !(inst->state >= INST_INIT && inst->state < INST_STOP))
return 0;
mbs = (ALIGN(inst->width, 16) / 16) * (ALIGN(inst->height, 16) / 16);
return mbs * inst->fps;
}
static u32 load_per_type(struct venus_core *core, u32 session_type)
{
struct venus_inst *inst = NULL;
u32 mbs_per_sec = 0;
list_for_each_entry(inst, &core->instances, list) {
if (inst->session_type != session_type)
continue;
mbs_per_sec += load_per_instance(inst);
}
return mbs_per_sec;
}
static void mbs_to_bw(struct venus_inst *inst, u32 mbs, u32 *avg, u32 *peak)
{
const struct venus_resources *res = inst->core->res;
const struct bw_tbl *bw_tbl;
unsigned int num_rows, i;
*avg = 0;
*peak = 0;
if (mbs == 0)
return;
if (inst->session_type == VIDC_SESSION_TYPE_ENC) {
num_rows = res->bw_tbl_enc_size;
bw_tbl = res->bw_tbl_enc;
} else if (inst->session_type == VIDC_SESSION_TYPE_DEC) {
num_rows = res->bw_tbl_dec_size;
bw_tbl = res->bw_tbl_dec;
} else {
return;
}
if (!bw_tbl || num_rows == 0)
return;
for (i = 0; i < num_rows; i++) {
if (i != 0 && mbs > bw_tbl[i].mbs_per_sec)
break;
if (inst->dpb_fmt & HFI_COLOR_FORMAT_10_BIT_BASE) {
*avg = bw_tbl[i].avg_10bit;
*peak = bw_tbl[i].peak_10bit;
} else {
*avg = bw_tbl[i].avg;
*peak = bw_tbl[i].peak;
}
}
}
static int load_scale_bw(struct venus_core *core)
{
struct venus_inst *inst = NULL;
u32 mbs_per_sec, avg, peak, total_avg = 0, total_peak = 0;
list_for_each_entry(inst, &core->instances, list) {
mbs_per_sec = load_per_instance(inst);
mbs_to_bw(inst, mbs_per_sec, &avg, &peak);
total_avg += avg;
total_peak += peak;
}
/*
* keep minimum bandwidth vote for "video-mem" path,
* so that clks can be disabled during vdec_session_release().
* Actual bandwidth drop will be done during device supend
* so that device can power down without any warnings.
*/
if (!total_avg && !total_peak)
total_avg = kbps_to_icc(1000);
dev_dbg(core->dev, VDBGL "total: avg_bw: %u, peak_bw: %u\n",
total_avg, total_peak);
return icc_set_bw(core->video_path, total_avg, total_peak);
}
static int load_scale_v1(struct venus_inst *inst)
{
struct venus_core *core = inst->core;
const struct freq_tbl *table = core->res->freq_tbl;
unsigned int num_rows = core->res->freq_tbl_size;
unsigned long freq = table[0].freq;
struct device *dev = core->dev;
u32 mbs_per_sec;
unsigned int i;
int ret = 0;
mutex_lock(&core->lock);
mbs_per_sec = load_per_type(core, VIDC_SESSION_TYPE_ENC) +
load_per_type(core, VIDC_SESSION_TYPE_DEC);
if (mbs_per_sec > core->res->max_load)
dev_warn(dev, "HW is overloaded, needed: %d max: %d\n",
mbs_per_sec, core->res->max_load);
if (!mbs_per_sec && num_rows > 1) {
freq = table[num_rows - 1].freq;
goto set_freq;
}
for (i = 0; i < num_rows; i++) {
if (mbs_per_sec > table[i].load)
break;
freq = table[i].freq;
}
set_freq:
ret = core_clks_set_rate(core, freq);
if (ret) {
dev_err(dev, "failed to set clock rate %lu (%d)\n",
freq, ret);
goto exit;
}
ret = load_scale_bw(core);
if (ret) {
dev_err(dev, "failed to set bandwidth (%d)\n",
ret);
goto exit;
}
exit:
mutex_unlock(&core->lock);
return ret;
}
static int core_get_v1(struct venus_core *core)
{
int ret;
ret = core_clks_get(core);
if (ret)
return ret;
ret = devm_pm_opp_set_clkname(core->dev, "core");
if (ret)
return ret;
return 0;
}
static void core_put_v1(struct venus_core *core)
{
}
static int core_power_v1(struct venus_core *core, int on)
{
int ret = 0;
if (on == POWER_ON)
ret = core_clks_enable(core);
else
core_clks_disable(core);
return ret;
}
static const struct venus_pm_ops pm_ops_v1 = {
.core_get = core_get_v1,
.core_put = core_put_v1,
.core_power = core_power_v1,
.load_scale = load_scale_v1,
};
static void
vcodec_control_v3(struct venus_core *core, u32 session_type, bool enable)
{
void __iomem *ctrl;
if (session_type == VIDC_SESSION_TYPE_DEC)
ctrl = core->wrapper_base + WRAPPER_VDEC_VCODEC_POWER_CONTROL;
else
ctrl = core->wrapper_base + WRAPPER_VENC_VCODEC_POWER_CONTROL;
if (enable)
writel(0, ctrl);
else
writel(1, ctrl);
}
static int vdec_get_v3(struct device *dev)
{
struct venus_core *core = dev_get_drvdata(dev);
return vcodec_clks_get(core, dev, core->vcodec0_clks,
core->res->vcodec0_clks);
}
static int vdec_power_v3(struct device *dev, int on)
{
struct venus_core *core = dev_get_drvdata(dev);
int ret = 0;
vcodec_control_v3(core, VIDC_SESSION_TYPE_DEC, true);
if (on == POWER_ON)
ret = vcodec_clks_enable(core, core->vcodec0_clks);
else
vcodec_clks_disable(core, core->vcodec0_clks);
vcodec_control_v3(core, VIDC_SESSION_TYPE_DEC, false);
return ret;
}
static int venc_get_v3(struct device *dev)
{
struct venus_core *core = dev_get_drvdata(dev);
return vcodec_clks_get(core, dev, core->vcodec1_clks,
core->res->vcodec1_clks);
}
static int venc_power_v3(struct device *dev, int on)
{
struct venus_core *core = dev_get_drvdata(dev);
int ret = 0;
vcodec_control_v3(core, VIDC_SESSION_TYPE_ENC, true);
if (on == POWER_ON)
ret = vcodec_clks_enable(core, core->vcodec1_clks);
else
vcodec_clks_disable(core, core->vcodec1_clks);
vcodec_control_v3(core, VIDC_SESSION_TYPE_ENC, false);
return ret;
}
static const struct venus_pm_ops pm_ops_v3 = {
.core_get = core_get_v1,
.core_put = core_put_v1,
.core_power = core_power_v1,
.vdec_get = vdec_get_v3,
.vdec_power = vdec_power_v3,
.venc_get = venc_get_v3,
.venc_power = venc_power_v3,
.load_scale = load_scale_v1,
};
static int vcodec_control_v4(struct venus_core *core, u32 coreid, bool enable)
{
void __iomem *ctrl, *stat;
u32 val;
int ret;
if (IS_V6(core)) {
ctrl = core->wrapper_base + WRAPPER_CORE_POWER_CONTROL_V6;
stat = core->wrapper_base + WRAPPER_CORE_POWER_STATUS_V6;
} else if (coreid == VIDC_CORE_ID_1) {
ctrl = core->wrapper_base + WRAPPER_VCODEC0_MMCC_POWER_CONTROL;
stat = core->wrapper_base + WRAPPER_VCODEC0_MMCC_POWER_STATUS;
} else {
ctrl = core->wrapper_base + WRAPPER_VCODEC1_MMCC_POWER_CONTROL;
stat = core->wrapper_base + WRAPPER_VCODEC1_MMCC_POWER_STATUS;
}
if (enable) {
writel(0, ctrl);
ret = readl_poll_timeout(stat, val, val & BIT(1), 1, 100);
if (ret)
return ret;
} else {
writel(1, ctrl);
ret = readl_poll_timeout(stat, val, !(val & BIT(1)), 1, 100);
if (ret)
return ret;
}
return 0;
}
static int poweroff_coreid(struct venus_core *core, unsigned int coreid_mask)
{
int ret;
if (coreid_mask & VIDC_CORE_ID_1) {
ret = vcodec_control_v4(core, VIDC_CORE_ID_1, true);
if (ret)
return ret;
vcodec_clks_disable(core, core->vcodec0_clks);
ret = vcodec_control_v4(core, VIDC_CORE_ID_1, false);
if (ret)
return ret;
ret = pm_runtime_put_sync(core->pmdomains[1]);
if (ret < 0)
return ret;
}
if (coreid_mask & VIDC_CORE_ID_2) {
ret = vcodec_control_v4(core, VIDC_CORE_ID_2, true);
if (ret)
return ret;
vcodec_clks_disable(core, core->vcodec1_clks);
ret = vcodec_control_v4(core, VIDC_CORE_ID_2, false);
if (ret)
return ret;
ret = pm_runtime_put_sync(core->pmdomains[2]);
if (ret < 0)
return ret;
}
return 0;
}
static int poweron_coreid(struct venus_core *core, unsigned int coreid_mask)
{
int ret;
if (coreid_mask & VIDC_CORE_ID_1) {
ret = pm_runtime_get_sync(core->pmdomains[1]);
if (ret < 0)
return ret;
ret = vcodec_control_v4(core, VIDC_CORE_ID_1, true);
if (ret)
return ret;
ret = vcodec_clks_enable(core, core->vcodec0_clks);
if (ret)
return ret;
ret = vcodec_control_v4(core, VIDC_CORE_ID_1, false);
if (ret < 0)
return ret;
}
if (coreid_mask & VIDC_CORE_ID_2) {
ret = pm_runtime_get_sync(core->pmdomains[2]);
if (ret < 0)
return ret;
ret = vcodec_control_v4(core, VIDC_CORE_ID_2, true);
if (ret)
return ret;
ret = vcodec_clks_enable(core, core->vcodec1_clks);
if (ret)
return ret;
ret = vcodec_control_v4(core, VIDC_CORE_ID_2, false);
if (ret < 0)
return ret;
}
return 0;
}
static inline int power_save_mode_enable(struct venus_inst *inst,
bool enable)
{
struct venc_controls *enc_ctr = &inst->controls.enc;
const u32 ptype = HFI_PROPERTY_CONFIG_VENC_PERF_MODE;
u32 venc_mode;
int ret = 0;
if (inst->session_type != VIDC_SESSION_TYPE_ENC)
return 0;
if (enc_ctr->bitrate_mode == V4L2_MPEG_VIDEO_BITRATE_MODE_CQ)
enable = false;
venc_mode = enable ? HFI_VENC_PERFMODE_POWER_SAVE :
HFI_VENC_PERFMODE_MAX_QUALITY;
ret = hfi_session_set_property(inst, ptype, &venc_mode);
if (ret)
return ret;
inst->flags = enable ? inst->flags | VENUS_LOW_POWER :
inst->flags & ~VENUS_LOW_POWER;
return ret;
}
static int move_core_to_power_save_mode(struct venus_core *core,
u32 core_id)
{
struct venus_inst *inst = NULL;
mutex_lock(&core->lock);
list_for_each_entry(inst, &core->instances, list) {
if (inst->clk_data.core_id == core_id &&
inst->session_type == VIDC_SESSION_TYPE_ENC)
power_save_mode_enable(inst, true);
}
mutex_unlock(&core->lock);
return 0;
}
static void
min_loaded_core(struct venus_inst *inst, u32 *min_coreid, u32 *min_load, bool low_power)
{
u32 mbs_per_sec, load, core1_load = 0, core2_load = 0;
u32 cores_max = core_num_max(inst);
struct venus_core *core = inst->core;
struct venus_inst *inst_pos;
unsigned long vpp_freq;
u32 coreid;
mutex_lock(&core->lock);
list_for_each_entry(inst_pos, &core->instances, list) {
if (inst_pos == inst)
continue;
if (inst_pos->state != INST_START)
continue;
if (inst->session_type == VIDC_SESSION_TYPE_DEC)
vpp_freq = inst_pos->clk_data.vpp_freq;
else if (inst->session_type == VIDC_SESSION_TYPE_ENC)
vpp_freq = low_power ? inst_pos->clk_data.low_power_freq :
inst_pos->clk_data.vpp_freq;
else
continue;
coreid = inst_pos->clk_data.core_id;
mbs_per_sec = load_per_instance(inst_pos);
load = mbs_per_sec * vpp_freq;
if ((coreid & VIDC_CORE_ID_3) == VIDC_CORE_ID_3) {
core1_load += load / 2;
core2_load += load / 2;
} else if (coreid & VIDC_CORE_ID_1) {
core1_load += load;
} else if (coreid & VIDC_CORE_ID_2) {
core2_load += load;
}
}
*min_coreid = core1_load <= core2_load ?
VIDC_CORE_ID_1 : VIDC_CORE_ID_2;
*min_load = min(core1_load, core2_load);
if (cores_max < VIDC_CORE_ID_2 || core->res->vcodec_num < 2) {
*min_coreid = VIDC_CORE_ID_1;
*min_load = core1_load;
}
mutex_unlock(&core->lock);
}
static int decide_core(struct venus_inst *inst)
{
const u32 ptype = HFI_PROPERTY_CONFIG_VIDEOCORES_USAGE;
struct venus_core *core = inst->core;
u32 min_coreid, min_load, cur_inst_load;
u32 min_lp_coreid, min_lp_load, cur_inst_lp_load;
struct hfi_videocores_usage_type cu;
unsigned long max_freq;
int ret = 0;
if (legacy_binding) {
if (inst->session_type == VIDC_SESSION_TYPE_DEC)
cu.video_core_enable_mask = VIDC_CORE_ID_1;
else
cu.video_core_enable_mask = VIDC_CORE_ID_2;
goto done;
}
if (inst->clk_data.core_id != VIDC_CORE_ID_DEFAULT)
return 0;
cur_inst_load = load_per_instance(inst);
cur_inst_load *= inst->clk_data.vpp_freq;
/*TODO : divide this inst->load by work_route */
cur_inst_lp_load = load_per_instance(inst);
cur_inst_lp_load *= inst->clk_data.low_power_freq;
/*TODO : divide this inst->load by work_route */
max_freq = core->res->freq_tbl[0].freq;
min_loaded_core(inst, &min_coreid, &min_load, false);
min_loaded_core(inst, &min_lp_coreid, &min_lp_load, true);
if (cur_inst_load + min_load <= max_freq) {
inst->clk_data.core_id = min_coreid;
cu.video_core_enable_mask = min_coreid;
} else if (cur_inst_lp_load + min_load <= max_freq) {
/* Move current instance to LP and return */
inst->clk_data.core_id = min_coreid;
cu.video_core_enable_mask = min_coreid;
power_save_mode_enable(inst, true);
} else if (cur_inst_lp_load + min_lp_load <= max_freq) {
/* Move all instances to LP mode and return */
inst->clk_data.core_id = min_lp_coreid;
cu.video_core_enable_mask = min_lp_coreid;
move_core_to_power_save_mode(core, min_lp_coreid);
} else {
dev_warn(core->dev, "HW can't support this load");
return -EINVAL;
}
done:
ret = hfi_session_set_property(inst, ptype, &cu);
if (ret)
return ret;
return ret;
}
static int acquire_core(struct venus_inst *inst)
{
struct venus_core *core = inst->core;
unsigned int coreid_mask = 0;
if (inst->core_acquired)
return 0;
inst->core_acquired = true;
if (inst->clk_data.core_id & VIDC_CORE_ID_1) {
if (core->core0_usage_count++)
return 0;
coreid_mask = VIDC_CORE_ID_1;
}
if (inst->clk_data.core_id & VIDC_CORE_ID_2) {
if (core->core1_usage_count++)
return 0;
coreid_mask |= VIDC_CORE_ID_2;
}
return poweron_coreid(core, coreid_mask);
}
static int release_core(struct venus_inst *inst)
{
struct venus_core *core = inst->core;
unsigned int coreid_mask = 0;
int ret;
if (!inst->core_acquired)
return 0;
if (inst->clk_data.core_id & VIDC_CORE_ID_1) {
if (--core->core0_usage_count)
goto done;
coreid_mask = VIDC_CORE_ID_1;
}
if (inst->clk_data.core_id & VIDC_CORE_ID_2) {
if (--core->core1_usage_count)
goto done;
coreid_mask |= VIDC_CORE_ID_2;
}
ret = poweroff_coreid(core, coreid_mask);
if (ret)
return ret;
done:
inst->clk_data.core_id = VIDC_CORE_ID_DEFAULT;
inst->core_acquired = false;
return 0;
}
static int coreid_power_v4(struct venus_inst *inst, int on)
{
struct venus_core *core = inst->core;
int ret;
if (legacy_binding)
return 0;
if (on == POWER_ON) {
ret = decide_core(inst);
if (ret)
return ret;
mutex_lock(&core->lock);
ret = acquire_core(inst);
mutex_unlock(&core->lock);
} else {
mutex_lock(&core->lock);
ret = release_core(inst);
mutex_unlock(&core->lock);
}
return ret;
}
static int vdec_get_v4(struct device *dev)
{
struct venus_core *core = dev_get_drvdata(dev);
if (!legacy_binding)
return 0;
return vcodec_clks_get(core, dev, core->vcodec0_clks,
core->res->vcodec0_clks);
}
static void vdec_put_v4(struct device *dev)
{
struct venus_core *core = dev_get_drvdata(dev);
unsigned int i;
if (!legacy_binding)
return;
for (i = 0; i < core->res->vcodec_clks_num; i++)
core->vcodec0_clks[i] = NULL;
}
static int vdec_power_v4(struct device *dev, int on)
{
struct venus_core *core = dev_get_drvdata(dev);
int ret;
if (!legacy_binding)
return 0;
ret = vcodec_control_v4(core, VIDC_CORE_ID_1, true);
if (ret)
return ret;
if (on == POWER_ON)
ret = vcodec_clks_enable(core, core->vcodec0_clks);
else
vcodec_clks_disable(core, core->vcodec0_clks);
vcodec_control_v4(core, VIDC_CORE_ID_1, false);
return ret;
}
static int venc_get_v4(struct device *dev)
{
struct venus_core *core = dev_get_drvdata(dev);
if (!legacy_binding)
return 0;
return vcodec_clks_get(core, dev, core->vcodec1_clks,
core->res->vcodec1_clks);
}
static void venc_put_v4(struct device *dev)
{
struct venus_core *core = dev_get_drvdata(dev);
unsigned int i;
if (!legacy_binding)
return;
for (i = 0; i < core->res->vcodec_clks_num; i++)
core->vcodec1_clks[i] = NULL;
}
static int venc_power_v4(struct device *dev, int on)
{
struct venus_core *core = dev_get_drvdata(dev);
int ret;
if (!legacy_binding)
return 0;
ret = vcodec_control_v4(core, VIDC_CORE_ID_2, true);
if (ret)
return ret;
if (on == POWER_ON)
ret = vcodec_clks_enable(core, core->vcodec1_clks);
else
vcodec_clks_disable(core, core->vcodec1_clks);
vcodec_control_v4(core, VIDC_CORE_ID_2, false);
return ret;
}
static int vcodec_domains_get(struct venus_core *core)
{
int ret;
struct device **opp_virt_dev;
struct device *dev = core->dev;
const struct venus_resources *res = core->res;
struct device *pd;
unsigned int i;
if (!res->vcodec_pmdomains_num)
goto skip_pmdomains;
for (i = 0; i < res->vcodec_pmdomains_num; i++) {
pd = dev_pm_domain_attach_by_name(dev,
res->vcodec_pmdomains[i]);
if (IS_ERR_OR_NULL(pd))
return PTR_ERR(pd) ? : -ENODATA;
core->pmdomains[i] = pd;
}
skip_pmdomains:
if (!core->res->opp_pmdomain)
return 0;
/* Attach the power domain for setting performance state */
ret = devm_pm_opp_attach_genpd(dev, res->opp_pmdomain, &opp_virt_dev);
if (ret)
goto opp_attach_err;
core->opp_pmdomain = *opp_virt_dev;
core->opp_dl_venus = device_link_add(dev, core->opp_pmdomain,
DL_FLAG_RPM_ACTIVE |
DL_FLAG_PM_RUNTIME |
DL_FLAG_STATELESS);
if (!core->opp_dl_venus) {
ret = -ENODEV;
goto opp_attach_err;
}
return 0;
opp_attach_err:
for (i = 0; i < res->vcodec_pmdomains_num; i++) {
if (IS_ERR_OR_NULL(core->pmdomains[i]))
continue;
dev_pm_domain_detach(core->pmdomains[i], true);
}
return ret;
}
static void vcodec_domains_put(struct venus_core *core)
{
const struct venus_resources *res = core->res;
unsigned int i;
if (!res->vcodec_pmdomains_num)
goto skip_pmdomains;
for (i = 0; i < res->vcodec_pmdomains_num; i++) {
if (IS_ERR_OR_NULL(core->pmdomains[i]))
continue;
dev_pm_domain_detach(core->pmdomains[i], true);
}
skip_pmdomains:
if (!core->has_opp_table)
return;
if (core->opp_dl_venus)
device_link_del(core->opp_dl_venus);
}
static int core_resets_reset(struct venus_core *core)
{
const struct venus_resources *res = core->res;
unsigned int i;
int ret;
if (!res->resets_num)
return 0;
for (i = 0; i < res->resets_num; i++) {
ret = reset_control_assert(core->resets[i]);
if (ret)
goto err;
usleep_range(150, 250);
ret = reset_control_deassert(core->resets[i]);
if (ret)
goto err;
}
err:
return ret;
}
static int core_resets_get(struct venus_core *core)
{
struct device *dev = core->dev;
const struct venus_resources *res = core->res;
unsigned int i;
int ret;
if (!res->resets_num)
return 0;
for (i = 0; i < res->resets_num; i++) {
core->resets[i] =
devm_reset_control_get_exclusive(dev, res->resets[i]);
if (IS_ERR(core->resets[i])) {
ret = PTR_ERR(core->resets[i]);
return ret;
}
}
return 0;
}
static int core_get_v4(struct venus_core *core)
{
struct device *dev = core->dev;
const struct venus_resources *res = core->res;
int ret;
ret = core_clks_get(core);
if (ret)
return ret;
if (!res->vcodec_pmdomains_num)
legacy_binding = true;
dev_info(dev, "%s legacy binding\n", legacy_binding ? "" : "non");
ret = vcodec_clks_get(core, dev, core->vcodec0_clks, res->vcodec0_clks);
if (ret)
return ret;
ret = vcodec_clks_get(core, dev, core->vcodec1_clks, res->vcodec1_clks);
if (ret)
return ret;
ret = core_resets_get(core);
if (ret)
return ret;
if (legacy_binding)
return 0;
ret = devm_pm_opp_set_clkname(dev, "core");
if (ret)
return ret;
ret = vcodec_domains_get(core);
if (ret)
return ret;
if (core->res->opp_pmdomain) {
ret = devm_pm_opp_of_add_table(dev);
if (!ret) {
core->has_opp_table = true;
} else if (ret != -ENODEV) {
dev_err(dev, "invalid OPP table in device tree\n");
return ret;
}
}
return 0;
}
static void core_put_v4(struct venus_core *core)
{
if (legacy_binding)
return;
vcodec_domains_put(core);
}
static int core_power_v4(struct venus_core *core, int on)
{
struct device *dev = core->dev;
struct device *pmctrl = core->pmdomains[0];
int ret = 0;
if (on == POWER_ON) {
if (pmctrl) {
ret = pm_runtime_resume_and_get(pmctrl);
if (ret < 0) {
return ret;
}
}
ret = core_resets_reset(core);
if (ret) {
if (pmctrl)
pm_runtime_put_sync(pmctrl);
return ret;
}
ret = core_clks_enable(core);
if (ret < 0 && pmctrl)
pm_runtime_put_sync(pmctrl);
} else {
/* Drop the performance state vote */
if (core->opp_pmdomain)
dev_pm_opp_set_rate(dev, 0);
core_clks_disable(core);
ret = core_resets_reset(core);
if (pmctrl)
pm_runtime_put_sync(pmctrl);
}
return ret;
}
static unsigned long calculate_inst_freq(struct venus_inst *inst,
unsigned long filled_len)
{
unsigned long vpp_freq_per_mb = 0, vpp_freq = 0, vsp_freq = 0;
u32 fps = (u32)inst->fps;
u32 mbs_per_sec;
mbs_per_sec = load_per_instance(inst);
if (inst->state != INST_START)
return 0;
if (inst->session_type == VIDC_SESSION_TYPE_ENC) {
vpp_freq_per_mb = inst->flags & VENUS_LOW_POWER ?
inst->clk_data.low_power_freq :
inst->clk_data.vpp_freq;
vpp_freq = mbs_per_sec * vpp_freq_per_mb;
} else {
vpp_freq = mbs_per_sec * inst->clk_data.vpp_freq;
}
/* 21 / 20 is overhead factor */
vpp_freq += vpp_freq / 20;
vsp_freq = mbs_per_sec * inst->clk_data.vsp_freq;
/* 10 / 7 is overhead factor */
if (inst->session_type == VIDC_SESSION_TYPE_ENC)
vsp_freq += (inst->controls.enc.bitrate * 10) / 7;
else
vsp_freq += ((fps * filled_len * 8) * 10) / 7;
return max(vpp_freq, vsp_freq);
}
static int load_scale_v4(struct venus_inst *inst)
{
struct venus_core *core = inst->core;
const struct freq_tbl *table = core->res->freq_tbl;
unsigned int num_rows = core->res->freq_tbl_size;
struct device *dev = core->dev;
unsigned long freq = 0, freq_core1 = 0, freq_core2 = 0;
unsigned long filled_len = 0;
int i, ret = 0;
for (i = 0; i < inst->num_input_bufs; i++)
filled_len = max(filled_len, inst->payloads[i]);
if (inst->session_type == VIDC_SESSION_TYPE_DEC && !filled_len)
return ret;
freq = calculate_inst_freq(inst, filled_len);
inst->clk_data.freq = freq;
mutex_lock(&core->lock);
list_for_each_entry(inst, &core->instances, list) {
if (inst->clk_data.core_id == VIDC_CORE_ID_1) {
freq_core1 += inst->clk_data.freq;
} else if (inst->clk_data.core_id == VIDC_CORE_ID_2) {
freq_core2 += inst->clk_data.freq;
} else if (inst->clk_data.core_id == VIDC_CORE_ID_3) {
freq_core1 += inst->clk_data.freq;
freq_core2 += inst->clk_data.freq;
}
}
freq = max(freq_core1, freq_core2);
if (freq > table[0].freq) {
dev_dbg(dev, VDBGL "requested clock rate: %lu scaling clock rate : %lu\n",
freq, table[0].freq);
freq = table[0].freq;
goto set_freq;
}
for (i = num_rows - 1 ; i >= 0; i--) {
if (freq <= table[i].freq) {
freq = table[i].freq;
break;
}
}
set_freq:
ret = core_clks_set_rate(core, freq);
if (ret) {
dev_err(dev, "failed to set clock rate %lu (%d)\n",
freq, ret);
goto exit;
}
ret = load_scale_bw(core);
if (ret) {
dev_err(dev, "failed to set bandwidth (%d)\n",
ret);
goto exit;
}
exit:
mutex_unlock(&core->lock);
return ret;
}
static const struct venus_pm_ops pm_ops_v4 = {
.core_get = core_get_v4,
.core_put = core_put_v4,
.core_power = core_power_v4,
.vdec_get = vdec_get_v4,
.vdec_put = vdec_put_v4,
.vdec_power = vdec_power_v4,
.venc_get = venc_get_v4,
.venc_put = venc_put_v4,
.venc_power = venc_power_v4,
.coreid_power = coreid_power_v4,
.load_scale = load_scale_v4,
};
const struct venus_pm_ops *venus_pm_get(enum hfi_version version)
{
switch (version) {
case HFI_VERSION_1XX:
default:
return &pm_ops_v1;
case HFI_VERSION_3XX:
return &pm_ops_v3;
case HFI_VERSION_4XX:
case HFI_VERSION_6XX:
return &pm_ops_v4;
}
return NULL;
}
| linux-master | drivers/media/platform/qcom/venus/pm_helpers.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2020 Linaro Ltd.
*/
#include <linux/debugfs.h>
#include <linux/fault-inject.h>
#include "core.h"
#ifdef CONFIG_FAULT_INJECTION
DECLARE_FAULT_ATTR(venus_ssr_attr);
#endif
void venus_dbgfs_init(struct venus_core *core)
{
core->root = debugfs_create_dir("venus", NULL);
debugfs_create_x32("fw_level", 0644, core->root, &venus_fw_debug);
#ifdef CONFIG_FAULT_INJECTION
fault_create_debugfs_attr("fail_ssr", core->root, &venus_ssr_attr);
#endif
}
void venus_dbgfs_deinit(struct venus_core *core)
{
debugfs_remove_recursive(core->root);
}
| linux-master | drivers/media/platform/qcom/venus/dbgfs.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2020, The Linux Foundation. All rights reserved.
*/
#include "hfi_platform.h"
static const struct hfi_plat_caps caps[] = {
{
.codec = HFI_VIDEO_CODEC_H264,
.domain = VIDC_SESSION_TYPE_DEC,
.cap_bufs_mode_dynamic = true,
.caps[0] = {HFI_CAPABILITY_FRAME_WIDTH, 96, 4096, 1},
.caps[1] = {HFI_CAPABILITY_FRAME_HEIGHT, 96, 4096, 1},
.caps[2] = {HFI_CAPABILITY_MBS_PER_FRAME, 1, 36864, 1},
.caps[3] = {HFI_CAPABILITY_BITRATE, 1, 120000000, 1},
.caps[4] = {HFI_CAPABILITY_SCALE_X, 4096, 65536, 1},
.caps[5] = {HFI_CAPABILITY_SCALE_Y, 4096, 65536, 1},
.caps[6] = {HFI_CAPABILITY_MBS_PER_SECOND, 1, 2073600, 1},
.caps[7] = {HFI_CAPABILITY_FRAMERATE, 1, 480, 1},
.caps[8] = {HFI_CAPABILITY_MAX_VIDEOCORES, 1, 2, 1},
.caps[9] = {HFI_CAPABILITY_MAX_WORKMODES, 1, 3, 1},
.num_caps = 10,
.pl[0] = {HFI_H264_PROFILE_BASELINE, HFI_H264_LEVEL_52},
.pl[1] = {HFI_H264_PROFILE_MAIN, HFI_H264_LEVEL_52},
.pl[2] = {HFI_H264_PROFILE_HIGH, HFI_H264_LEVEL_52},
.pl[3] = {HFI_H264_PROFILE_CONSTRAINED_BASE, HFI_H264_LEVEL_52},
.pl[4] = {HFI_H264_PROFILE_CONSTRAINED_HIGH, HFI_H264_LEVEL_52},
.num_pl = 5,
.fmts[0] = {HFI_BUFFER_OUTPUT, HFI_COLOR_FORMAT_NV12_UBWC},
.fmts[1] = {HFI_BUFFER_OUTPUT2, HFI_COLOR_FORMAT_NV12_UBWC},
.fmts[2] = {HFI_BUFFER_OUTPUT2, HFI_COLOR_FORMAT_NV12},
.fmts[3] = {HFI_BUFFER_OUTPUT2, HFI_COLOR_FORMAT_NV21},
.num_fmts = 4,
}, {
.codec = HFI_VIDEO_CODEC_HEVC,
.domain = VIDC_SESSION_TYPE_DEC,
.cap_bufs_mode_dynamic = true,
.caps[0] = {HFI_CAPABILITY_FRAME_WIDTH, 96, 4096, 1},
.caps[1] = {HFI_CAPABILITY_FRAME_HEIGHT, 96, 4096, 1},
.caps[2] = {HFI_CAPABILITY_MBS_PER_FRAME, 1, 36864, 1},
.caps[3] = {HFI_CAPABILITY_BITRATE, 1, 120000000, 1},
.caps[4] = {HFI_CAPABILITY_SCALE_X, 4096, 65536, 1},
.caps[5] = {HFI_CAPABILITY_SCALE_Y, 4096, 65536, 1},
.caps[6] = {HFI_CAPABILITY_MBS_PER_SECOND, 1, 2073600, 1},
.caps[7] = {HFI_CAPABILITY_FRAMERATE, 1, 480, 1},
.caps[8] = {HFI_CAPABILITY_MAX_VIDEOCORES, 1, 2, 1},
.caps[9] = {HFI_CAPABILITY_MAX_WORKMODES, 1, 3, 1},
.num_caps = 10,
.pl[0] = {HFI_HEVC_PROFILE_MAIN, HFI_HEVC_LEVEL_6 | HFI_HEVC_TIER_HIGH0 << 28},
.pl[1] = {HFI_HEVC_PROFILE_MAIN10, HFI_HEVC_LEVEL_6 | HFI_HEVC_TIER_HIGH0 << 28},
.num_pl = 2,
.fmts[0] = {HFI_BUFFER_OUTPUT, HFI_COLOR_FORMAT_NV12_UBWC},
.fmts[1] = {HFI_BUFFER_OUTPUT, HFI_COLOR_FORMAT_YUV420_TP10_UBWC},
.fmts[2] = {HFI_BUFFER_OUTPUT2, HFI_COLOR_FORMAT_NV12_UBWC},
.fmts[3] = {HFI_BUFFER_OUTPUT2, HFI_COLOR_FORMAT_NV12},
.fmts[4] = {HFI_BUFFER_OUTPUT2, HFI_COLOR_FORMAT_NV21},
.fmts[5] = {HFI_BUFFER_OUTPUT2, HFI_COLOR_FORMAT_P010},
.fmts[6] = {HFI_BUFFER_OUTPUT2, HFI_COLOR_FORMAT_YUV420_TP10_UBWC},
.num_fmts = 7,
}, {
.codec = HFI_VIDEO_CODEC_VP8,
.domain = VIDC_SESSION_TYPE_DEC,
.cap_bufs_mode_dynamic = true,
.caps[0] = {HFI_CAPABILITY_FRAME_WIDTH, 96, 4096, 1},
.caps[1] = {HFI_CAPABILITY_FRAME_HEIGHT, 96, 4096, 1},
.caps[2] = {HFI_CAPABILITY_MBS_PER_FRAME, 1, 36864, 1},
.caps[3] = {HFI_CAPABILITY_BITRATE, 1, 120000000, 1},
.caps[4] = {HFI_CAPABILITY_SCALE_X, 4096, 65536, 1},
.caps[5] = {HFI_CAPABILITY_SCALE_Y, 4096, 65536, 1},
.caps[6] = {HFI_CAPABILITY_MBS_PER_SECOND, 1, 2073600, 1},
.caps[7] = {HFI_CAPABILITY_FRAMERATE, 1, 480, 1},
.caps[8] = {HFI_CAPABILITY_MAX_VIDEOCORES, 1, 2, 1},
.caps[9] = {HFI_CAPABILITY_MAX_WORKMODES, 1, 3, 1},
.num_caps = 10,
.pl[0] = {HFI_VPX_PROFILE_MAIN, HFI_VPX_LEVEL_VERSION_0},
.pl[1] = {HFI_VPX_PROFILE_MAIN, HFI_VPX_LEVEL_VERSION_1},
.pl[2] = {HFI_VPX_PROFILE_MAIN, HFI_VPX_LEVEL_VERSION_2},
.pl[3] = {HFI_VPX_PROFILE_MAIN, HFI_VPX_LEVEL_VERSION_3},
.num_pl = 4,
.fmts[0] = {HFI_BUFFER_OUTPUT, HFI_COLOR_FORMAT_NV12_UBWC},
.fmts[1] = {HFI_BUFFER_OUTPUT2, HFI_COLOR_FORMAT_NV12_UBWC},
.fmts[2] = {HFI_BUFFER_OUTPUT2, HFI_COLOR_FORMAT_NV12},
.fmts[3] = {HFI_BUFFER_OUTPUT2, HFI_COLOR_FORMAT_NV21},
.num_fmts = 4,
}, {
.codec = HFI_VIDEO_CODEC_VP9,
.domain = VIDC_SESSION_TYPE_DEC,
.cap_bufs_mode_dynamic = true,
.caps[0] = {HFI_CAPABILITY_FRAME_WIDTH, 96, 4096, 1},
.caps[1] = {HFI_CAPABILITY_FRAME_HEIGHT, 96, 4096, 1},
.caps[2] = {HFI_CAPABILITY_MBS_PER_FRAME, 1, 36864, 1},
.caps[3] = {HFI_CAPABILITY_BITRATE, 1, 120000000, 1},
.caps[4] = {HFI_CAPABILITY_SCALE_X, 4096, 65536, 1},
.caps[5] = {HFI_CAPABILITY_SCALE_Y, 4096, 65536, 1},
.caps[6] = {HFI_CAPABILITY_MBS_PER_SECOND, 1, 2073600, 1},
.caps[7] = {HFI_CAPABILITY_FRAMERATE, 1, 480, 1},
.caps[8] = {HFI_CAPABILITY_MAX_VIDEOCORES, 1, 2, 1},
.caps[9] = {HFI_CAPABILITY_MAX_WORKMODES, 1, 3, 1},
.num_caps = 10,
.pl[0] = {HFI_VP9_PROFILE_P0, 200},
.pl[1] = {HFI_VP9_PROFILE_P2_10B, 200},
.num_pl = 2,
.fmts[0] = {HFI_BUFFER_OUTPUT, HFI_COLOR_FORMAT_NV12_UBWC},
.fmts[1] = {HFI_BUFFER_OUTPUT, HFI_COLOR_FORMAT_YUV420_TP10_UBWC},
.fmts[2] = {HFI_BUFFER_OUTPUT2, HFI_COLOR_FORMAT_NV12_UBWC},
.fmts[3] = {HFI_BUFFER_OUTPUT2, HFI_COLOR_FORMAT_NV12},
.fmts[4] = {HFI_BUFFER_OUTPUT2, HFI_COLOR_FORMAT_NV21},
.fmts[5] = {HFI_BUFFER_OUTPUT2, HFI_COLOR_FORMAT_P010},
.fmts[6] = {HFI_BUFFER_OUTPUT2, HFI_COLOR_FORMAT_YUV420_TP10_UBWC},
.num_fmts = 7,
}, {
.codec = HFI_VIDEO_CODEC_MPEG2,
.domain = VIDC_SESSION_TYPE_DEC,
.cap_bufs_mode_dynamic = true,
.caps[0] = {HFI_CAPABILITY_FRAME_WIDTH, 96, 1920, 1},
.caps[1] = {HFI_CAPABILITY_FRAME_HEIGHT, 96, 1920, 1},
.caps[2] = {HFI_CAPABILITY_MBS_PER_FRAME, 1, 8160, 1},
.caps[3] = {HFI_CAPABILITY_BITRATE, 1, 40000000, 1},
.caps[4] = {HFI_CAPABILITY_SCALE_X, 4096, 65536, 1},
.caps[5] = {HFI_CAPABILITY_SCALE_Y, 4096, 65536, 1},
.caps[6] = {HFI_CAPABILITY_MBS_PER_SECOND, 1, 244800, 1},
.caps[7] = {HFI_CAPABILITY_FRAMERATE, 1, 30, 1},
.caps[8] = {HFI_CAPABILITY_MAX_VIDEOCORES, 1, 2, 1},
.caps[9] = {HFI_CAPABILITY_MAX_WORKMODES, 1, 1, 1},
.num_caps = 10,
.pl[0] = {HFI_MPEG2_PROFILE_SIMPLE, HFI_MPEG2_LEVEL_H14},
.pl[1] = {HFI_MPEG2_PROFILE_MAIN, HFI_MPEG2_LEVEL_H14},
.num_pl = 2,
.fmts[0] = {HFI_BUFFER_OUTPUT, HFI_COLOR_FORMAT_NV12_UBWC},
.fmts[1] = {HFI_BUFFER_OUTPUT2, HFI_COLOR_FORMAT_NV12_UBWC},
.fmts[2] = {HFI_BUFFER_OUTPUT2, HFI_COLOR_FORMAT_NV12},
.fmts[3] = {HFI_BUFFER_OUTPUT2, HFI_COLOR_FORMAT_NV21},
.num_fmts = 4,
}, {
.codec = HFI_VIDEO_CODEC_H264,
.domain = VIDC_SESSION_TYPE_ENC,
.cap_bufs_mode_dynamic = true,
.caps[0] = {HFI_CAPABILITY_FRAME_WIDTH, 96, 4096, 16},
.caps[1] = {HFI_CAPABILITY_FRAME_HEIGHT, 96, 4096, 16},
.caps[2] = {HFI_CAPABILITY_MBS_PER_FRAME, 1, 36864, 1},
.caps[3] = {HFI_CAPABILITY_BITRATE, 1, 120000000, 1},
.caps[4] = {HFI_CAPABILITY_SCALE_X, 8192, 65536, 1},
.caps[5] = {HFI_CAPABILITY_SCALE_Y, 8192, 65536, 1},
.caps[6] = {HFI_CAPABILITY_MBS_PER_SECOND, 1, 1036800, 1},
.caps[7] = {HFI_CAPABILITY_FRAMERATE, 1, 480, 1},
.caps[8] = {HFI_CAPABILITY_MAX_VIDEOCORES, 1, 3, 1},
.caps[9] = {HFI_CAPABILITY_PEAKBITRATE, 32000, 160000000, 1},
.caps[10] = {HFI_CAPABILITY_HIER_P_NUM_ENH_LAYERS, 0, 5, 1},
.caps[11] = {HFI_CAPABILITY_ENC_LTR_COUNT, 0, 4, 1},
.caps[12] = {HFI_CAPABILITY_LCU_SIZE, 16, 16, 1},
.caps[13] = {HFI_CAPABILITY_BFRAME, 0, 1, 1},
.caps[14] = {HFI_CAPABILITY_HIER_P_HYBRID_NUM_ENH_LAYERS, 0, 5, 1},
.caps[15] = {HFI_CAPABILITY_I_FRAME_QP, 0, 51, 1},
.caps[16] = {HFI_CAPABILITY_P_FRAME_QP, 0, 51, 1},
.caps[17] = {HFI_CAPABILITY_B_FRAME_QP, 0, 51, 1},
.caps[18] = {HFI_CAPABILITY_MAX_WORKMODES, 1, 2, 1},
.caps[19] = {HFI_CAPABILITY_RATE_CONTROL_MODES, 0x1000001, 0x1000005, 1},
.caps[20] = {HFI_CAPABILITY_COLOR_SPACE_CONVERSION, 0, 2, 1},
.num_caps = 21,
.pl[0] = {HFI_H264_PROFILE_BASELINE, HFI_H264_LEVEL_52},
.pl[1] = {HFI_H264_PROFILE_MAIN, HFI_H264_LEVEL_52},
.pl[2] = {HFI_H264_PROFILE_HIGH, HFI_H264_LEVEL_52},
.pl[3] = {HFI_H264_PROFILE_CONSTRAINED_BASE, HFI_H264_LEVEL_52},
.pl[4] = {HFI_H264_PROFILE_CONSTRAINED_HIGH, HFI_H264_LEVEL_52},
.num_pl = 5,
.fmts[0] = {HFI_BUFFER_INPUT, HFI_COLOR_FORMAT_NV12},
.fmts[1] = {HFI_BUFFER_INPUT, HFI_COLOR_FORMAT_NV12_UBWC},
.fmts[2] = {HFI_BUFFER_INPUT, HFI_COLOR_FORMAT_YUV420_TP10_UBWC},
.fmts[3] = {HFI_BUFFER_INPUT, HFI_COLOR_FORMAT_P010},
.num_fmts = 4,
}, {
.codec = HFI_VIDEO_CODEC_HEVC,
.domain = VIDC_SESSION_TYPE_ENC,
.cap_bufs_mode_dynamic = true,
.caps[0] = {HFI_CAPABILITY_FRAME_WIDTH, 96, 4096, 16},
.caps[1] = {HFI_CAPABILITY_FRAME_HEIGHT, 96, 4096, 16},
.caps[2] = {HFI_CAPABILITY_MBS_PER_FRAME, 1, 36864, 1},
.caps[3] = {HFI_CAPABILITY_BITRATE, 1, 120000000, 1},
.caps[4] = {HFI_CAPABILITY_SCALE_X, 8192, 65536, 1},
.caps[5] = {HFI_CAPABILITY_SCALE_Y, 8192, 65536, 1},
.caps[6] = {HFI_CAPABILITY_MBS_PER_SECOND, 1, 1036800, 1},
.caps[7] = {HFI_CAPABILITY_FRAMERATE, 1, 480, 1},
.caps[8] = {HFI_CAPABILITY_MAX_VIDEOCORES, 1, 3, 1},
.caps[9] = {HFI_CAPABILITY_PEAKBITRATE, 32000, 160000000, 1},
.caps[10] = {HFI_CAPABILITY_HIER_P_NUM_ENH_LAYERS, 0, 5, 1},
.caps[11] = {HFI_CAPABILITY_ENC_LTR_COUNT, 0, 4, 1},
.caps[12] = {HFI_CAPABILITY_LCU_SIZE, 32, 32, 1},
.caps[13] = {HFI_CAPABILITY_BFRAME, 0, 1, 1},
.caps[14] = {HFI_CAPABILITY_HIER_P_HYBRID_NUM_ENH_LAYERS, 0, 5, 1},
.caps[15] = {HFI_CAPABILITY_I_FRAME_QP, 0, 63, 1},
.caps[16] = {HFI_CAPABILITY_P_FRAME_QP, 0, 63, 1},
.caps[17] = {HFI_CAPABILITY_B_FRAME_QP, 0, 63, 1},
.caps[18] = {HFI_CAPABILITY_MAX_WORKMODES, 1, 2, 1},
.caps[19] = {HFI_CAPABILITY_RATE_CONTROL_MODES, 0x1000001, 0x1000005, 1},
.caps[20] = {HFI_CAPABILITY_COLOR_SPACE_CONVERSION, 0, 2, 1},
.caps[21] = {HFI_CAPABILITY_ROTATION, 1, 4, 90},
.caps[22] = {HFI_CAPABILITY_BLUR_WIDTH, 96, 4096, 16},
.caps[23] = {HFI_CAPABILITY_BLUR_HEIGHT, 96, 4096, 16},
.num_caps = 24,
.pl[0] = {HFI_HEVC_PROFILE_MAIN, HFI_HEVC_LEVEL_6 | HFI_HEVC_TIER_HIGH0},
.pl[1] = {HFI_HEVC_PROFILE_MAIN10, HFI_HEVC_LEVEL_6 | HFI_HEVC_TIER_HIGH0},
.num_pl = 2,
.fmts[0] = {HFI_BUFFER_INPUT, HFI_COLOR_FORMAT_NV12},
.fmts[1] = {HFI_BUFFER_INPUT, HFI_COLOR_FORMAT_NV12_UBWC},
.fmts[2] = {HFI_BUFFER_INPUT, HFI_COLOR_FORMAT_YUV420_TP10_UBWC},
.fmts[3] = {HFI_BUFFER_INPUT, HFI_COLOR_FORMAT_P010},
.num_fmts = 4,
}, {
.codec = HFI_VIDEO_CODEC_VP8,
.domain = VIDC_SESSION_TYPE_ENC,
.cap_bufs_mode_dynamic = true,
.caps[0] = {HFI_CAPABILITY_FRAME_WIDTH, 96, 4096, 16},
.caps[1] = {HFI_CAPABILITY_FRAME_HEIGHT, 96, 4096, 16},
.caps[2] = {HFI_CAPABILITY_MBS_PER_FRAME, 1, 36864, 1},
.caps[3] = {HFI_CAPABILITY_BITRATE, 1, 120000000, 1},
.caps[4] = {HFI_CAPABILITY_SCALE_X, 8192, 65536, 1},
.caps[5] = {HFI_CAPABILITY_SCALE_Y, 8192, 65536, 1},
.caps[6] = {HFI_CAPABILITY_MBS_PER_SECOND, 1, 1036800, 1},
.caps[7] = {HFI_CAPABILITY_FRAMERATE, 1, 240, 1},
.caps[8] = {HFI_CAPABILITY_MAX_VIDEOCORES, 1, 3, 1},
.caps[9] = {HFI_CAPABILITY_PEAKBITRATE, 32000, 160000000, 1},
.caps[10] = {HFI_CAPABILITY_HIER_P_NUM_ENH_LAYERS, 0, 3, 1},
.caps[11] = {HFI_CAPABILITY_ENC_LTR_COUNT, 0, 2, 1},
.caps[12] = {HFI_CAPABILITY_LCU_SIZE, 16, 16, 1},
.caps[13] = {HFI_CAPABILITY_BFRAME, 0, 1, 1},
.caps[14] = {HFI_CAPABILITY_HIER_P_HYBRID_NUM_ENH_LAYERS, 0, 5, 1},
.caps[15] = {HFI_CAPABILITY_I_FRAME_QP, 0, 127, 1},
.caps[16] = {HFI_CAPABILITY_P_FRAME_QP, 0, 127, 1},
.caps[17] = {HFI_CAPABILITY_MAX_WORKMODES, 1, 2, 1},
.caps[18] = {HFI_CAPABILITY_RATE_CONTROL_MODES, 0x1000001, 0x1000005, 1},
.caps[19] = {HFI_CAPABILITY_BLUR_WIDTH, 96, 4096, 16},
.caps[20] = {HFI_CAPABILITY_BLUR_HEIGHT, 96, 4096, 16},
.caps[21] = {HFI_CAPABILITY_COLOR_SPACE_CONVERSION, 0, 2, 1},
.caps[22] = {HFI_CAPABILITY_ROTATION, 1, 4, 90},
.num_caps = 23,
.pl[0] = {HFI_VPX_PROFILE_MAIN, HFI_VPX_LEVEL_VERSION_0},
.pl[1] = {HFI_VPX_PROFILE_MAIN, HFI_VPX_LEVEL_VERSION_1},
.pl[2] = {HFI_VPX_PROFILE_MAIN, HFI_VPX_LEVEL_VERSION_2},
.pl[3] = {HFI_VPX_PROFILE_MAIN, HFI_VPX_LEVEL_VERSION_3},
.num_pl = 4,
.fmts[0] = {HFI_BUFFER_INPUT, HFI_COLOR_FORMAT_NV12},
.fmts[1] = {HFI_BUFFER_INPUT, HFI_COLOR_FORMAT_NV12_UBWC},
.fmts[2] = {HFI_BUFFER_INPUT, HFI_COLOR_FORMAT_YUV420_TP10_UBWC},
.fmts[3] = {HFI_BUFFER_INPUT, HFI_COLOR_FORMAT_P010},
.num_fmts = 4,
} };
static const struct hfi_plat_caps *get_capabilities(unsigned int *entries)
{
*entries = ARRAY_SIZE(caps);
return caps;
}
static void get_codecs(u32 *enc_codecs, u32 *dec_codecs, u32 *count)
{
*enc_codecs = HFI_VIDEO_CODEC_H264 | HFI_VIDEO_CODEC_HEVC |
HFI_VIDEO_CODEC_VP8;
*dec_codecs = HFI_VIDEO_CODEC_H264 | HFI_VIDEO_CODEC_HEVC |
HFI_VIDEO_CODEC_VP8 | HFI_VIDEO_CODEC_VP9 |
HFI_VIDEO_CODEC_MPEG2;
*count = 8;
}
static const struct hfi_platform_codec_freq_data codec_freq_data[] = {
{ V4L2_PIX_FMT_H264, VIDC_SESSION_TYPE_ENC, 675, 10, 320 },
{ V4L2_PIX_FMT_HEVC, VIDC_SESSION_TYPE_ENC, 675, 10, 320 },
{ V4L2_PIX_FMT_VP8, VIDC_SESSION_TYPE_ENC, 675, 10, 320 },
{ V4L2_PIX_FMT_MPEG2, VIDC_SESSION_TYPE_DEC, 200, 10, 200 },
{ V4L2_PIX_FMT_H264, VIDC_SESSION_TYPE_DEC, 200, 10, 200 },
{ V4L2_PIX_FMT_HEVC, VIDC_SESSION_TYPE_DEC, 200, 10, 200 },
{ V4L2_PIX_FMT_VP8, VIDC_SESSION_TYPE_DEC, 200, 10, 200 },
{ V4L2_PIX_FMT_VP9, VIDC_SESSION_TYPE_DEC, 200, 10, 200 },
};
static const struct hfi_platform_codec_freq_data *
get_codec_freq_data(u32 session_type, u32 pixfmt)
{
const struct hfi_platform_codec_freq_data *data = codec_freq_data;
unsigned int i, data_size = ARRAY_SIZE(codec_freq_data);
const struct hfi_platform_codec_freq_data *found = NULL;
for (i = 0; i < data_size; i++) {
if (data[i].pixfmt == pixfmt && data[i].session_type == session_type) {
found = &data[i];
break;
}
}
return found;
}
static unsigned long codec_vpp_freq(u32 session_type, u32 codec)
{
const struct hfi_platform_codec_freq_data *data;
data = get_codec_freq_data(session_type, codec);
if (data)
return data->vpp_freq;
return 0;
}
static unsigned long codec_vsp_freq(u32 session_type, u32 codec)
{
const struct hfi_platform_codec_freq_data *data;
data = get_codec_freq_data(session_type, codec);
if (data)
return data->vsp_freq;
return 0;
}
static unsigned long codec_lp_freq(u32 session_type, u32 codec)
{
const struct hfi_platform_codec_freq_data *data;
data = get_codec_freq_data(session_type, codec);
if (data)
return data->low_power_freq;
return 0;
}
const struct hfi_platform hfi_plat_v4 = {
.codec_vpp_freq = codec_vpp_freq,
.codec_vsp_freq = codec_vsp_freq,
.codec_lp_freq = codec_lp_freq,
.codecs = get_codecs,
.capabilities = get_capabilities,
};
| linux-master | drivers/media/platform/qcom/venus/hfi_platform_v4.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2017 Linaro Ltd.
*/
#include <linux/device.h>
#include <linux/firmware.h>
#include <linux/kernel.h>
#include <linux/iommu.h>
#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_reserved_mem.h>
#include <linux/platform_device.h>
#include <linux/of_device.h>
#include <linux/firmware/qcom/qcom_scm.h>
#include <linux/sizes.h>
#include <linux/soc/qcom/mdt_loader.h>
#include "core.h"
#include "firmware.h"
#include "hfi_venus_io.h"
#define VENUS_PAS_ID 9
#define VENUS_FW_MEM_SIZE (6 * SZ_1M)
#define VENUS_FW_START_ADDR 0x0
static void venus_reset_cpu(struct venus_core *core)
{
u32 fw_size = core->fw.mapped_mem_size;
void __iomem *wrapper_base;
if (IS_IRIS2_1(core))
wrapper_base = core->wrapper_tz_base;
else
wrapper_base = core->wrapper_base;
writel(0, wrapper_base + WRAPPER_FW_START_ADDR);
writel(fw_size, wrapper_base + WRAPPER_FW_END_ADDR);
writel(0, wrapper_base + WRAPPER_CPA_START_ADDR);
writel(fw_size, wrapper_base + WRAPPER_CPA_END_ADDR);
writel(fw_size, wrapper_base + WRAPPER_NONPIX_START_ADDR);
writel(fw_size, wrapper_base + WRAPPER_NONPIX_END_ADDR);
if (IS_IRIS2_1(core)) {
/* Bring XTSS out of reset */
writel(0, wrapper_base + WRAPPER_TZ_XTSS_SW_RESET);
} else {
writel(0x0, wrapper_base + WRAPPER_CPU_CGC_DIS);
writel(0x0, wrapper_base + WRAPPER_CPU_CLOCK_CONFIG);
/* Bring ARM9 out of reset */
writel(0, wrapper_base + WRAPPER_A9SS_SW_RESET);
}
}
int venus_set_hw_state(struct venus_core *core, bool resume)
{
int ret;
if (core->use_tz) {
ret = qcom_scm_set_remote_state(resume, 0);
if (resume && ret == -EINVAL)
ret = 0;
return ret;
}
if (resume) {
venus_reset_cpu(core);
} else {
if (IS_IRIS2_1(core))
writel(WRAPPER_XTSS_SW_RESET_BIT,
core->wrapper_tz_base + WRAPPER_TZ_XTSS_SW_RESET);
else
writel(WRAPPER_A9SS_SW_RESET_BIT,
core->wrapper_base + WRAPPER_A9SS_SW_RESET);
}
return 0;
}
static int venus_load_fw(struct venus_core *core, const char *fwname,
phys_addr_t *mem_phys, size_t *mem_size)
{
const struct firmware *mdt;
struct reserved_mem *rmem;
struct device_node *node;
struct device *dev;
ssize_t fw_size;
void *mem_va;
int ret;
*mem_phys = 0;
*mem_size = 0;
dev = core->dev;
node = of_parse_phandle(dev->of_node, "memory-region", 0);
if (!node) {
dev_err(dev, "no memory-region specified\n");
return -EINVAL;
}
rmem = of_reserved_mem_lookup(node);
of_node_put(node);
if (!rmem) {
dev_err(dev, "failed to lookup reserved memory-region\n");
return -EINVAL;
}
ret = request_firmware(&mdt, fwname, dev);
if (ret < 0)
return ret;
fw_size = qcom_mdt_get_size(mdt);
if (fw_size < 0) {
ret = fw_size;
goto err_release_fw;
}
*mem_phys = rmem->base;
*mem_size = rmem->size;
if (*mem_size < fw_size || fw_size > VENUS_FW_MEM_SIZE) {
ret = -EINVAL;
goto err_release_fw;
}
mem_va = memremap(*mem_phys, *mem_size, MEMREMAP_WC);
if (!mem_va) {
dev_err(dev, "unable to map memory region %pa size %#zx\n", mem_phys, *mem_size);
ret = -ENOMEM;
goto err_release_fw;
}
if (core->use_tz)
ret = qcom_mdt_load(dev, mdt, fwname, VENUS_PAS_ID,
mem_va, *mem_phys, *mem_size, NULL);
else
ret = qcom_mdt_load_no_init(dev, mdt, fwname, VENUS_PAS_ID,
mem_va, *mem_phys, *mem_size, NULL);
memunmap(mem_va);
err_release_fw:
release_firmware(mdt);
return ret;
}
static int venus_boot_no_tz(struct venus_core *core, phys_addr_t mem_phys,
size_t mem_size)
{
struct iommu_domain *iommu;
struct device *dev;
int ret;
dev = core->fw.dev;
if (!dev)
return -EPROBE_DEFER;
iommu = core->fw.iommu_domain;
core->fw.mapped_mem_size = mem_size;
ret = iommu_map(iommu, VENUS_FW_START_ADDR, mem_phys, mem_size,
IOMMU_READ | IOMMU_WRITE | IOMMU_PRIV, GFP_KERNEL);
if (ret) {
dev_err(dev, "could not map video firmware region\n");
return ret;
}
venus_reset_cpu(core);
return 0;
}
static int venus_shutdown_no_tz(struct venus_core *core)
{
const size_t mapped = core->fw.mapped_mem_size;
struct iommu_domain *iommu;
size_t unmapped;
u32 reg;
struct device *dev = core->fw.dev;
void __iomem *wrapper_base = core->wrapper_base;
void __iomem *wrapper_tz_base = core->wrapper_tz_base;
if (IS_IRIS2_1(core)) {
/* Assert the reset to XTSS */
reg = readl(wrapper_tz_base + WRAPPER_TZ_XTSS_SW_RESET);
reg |= WRAPPER_XTSS_SW_RESET_BIT;
writel(reg, wrapper_tz_base + WRAPPER_TZ_XTSS_SW_RESET);
} else {
/* Assert the reset to ARM9 */
reg = readl(wrapper_base + WRAPPER_A9SS_SW_RESET);
reg |= WRAPPER_A9SS_SW_RESET_BIT;
writel(reg, wrapper_base + WRAPPER_A9SS_SW_RESET);
}
iommu = core->fw.iommu_domain;
if (core->fw.mapped_mem_size && iommu) {
unmapped = iommu_unmap(iommu, VENUS_FW_START_ADDR, mapped);
if (unmapped != mapped)
dev_err(dev, "failed to unmap firmware\n");
else
core->fw.mapped_mem_size = 0;
}
return 0;
}
int venus_boot(struct venus_core *core)
{
struct device *dev = core->dev;
const struct venus_resources *res = core->res;
const char *fwpath = NULL;
phys_addr_t mem_phys;
size_t mem_size;
int ret;
if (!IS_ENABLED(CONFIG_QCOM_MDT_LOADER) ||
(core->use_tz && !qcom_scm_is_available()))
return -EPROBE_DEFER;
ret = of_property_read_string_index(dev->of_node, "firmware-name", 0,
&fwpath);
if (ret)
fwpath = core->res->fwname;
ret = venus_load_fw(core, fwpath, &mem_phys, &mem_size);
if (ret) {
dev_err(dev, "fail to load video firmware\n");
return -EINVAL;
}
core->fw.mem_size = mem_size;
core->fw.mem_phys = mem_phys;
if (core->use_tz)
ret = qcom_scm_pas_auth_and_reset(VENUS_PAS_ID);
else
ret = venus_boot_no_tz(core, mem_phys, mem_size);
if (ret)
return ret;
if (core->use_tz && res->cp_size) {
/*
* Clues for porting using downstream data:
* cp_start = 0
* cp_size = venus_ns/virtual-addr-pool[0] - yes, address and not size!
* This works, as the non-secure context bank is placed
* contiguously right after the Content Protection region.
*
* cp_nonpixel_start = venus_sec_non_pixel/virtual-addr-pool[0]
* cp_nonpixel_size = venus_sec_non_pixel/virtual-addr-pool[1]
*/
ret = qcom_scm_mem_protect_video_var(res->cp_start,
res->cp_size,
res->cp_nonpixel_start,
res->cp_nonpixel_size);
if (ret) {
qcom_scm_pas_shutdown(VENUS_PAS_ID);
dev_err(dev, "set virtual address ranges fail (%d)\n",
ret);
return ret;
}
}
return 0;
}
int venus_shutdown(struct venus_core *core)
{
int ret;
if (core->use_tz)
ret = qcom_scm_pas_shutdown(VENUS_PAS_ID);
else
ret = venus_shutdown_no_tz(core);
return ret;
}
int venus_firmware_init(struct venus_core *core)
{
struct platform_device_info info;
struct iommu_domain *iommu_dom;
struct platform_device *pdev;
struct device_node *np;
int ret;
np = of_get_child_by_name(core->dev->of_node, "video-firmware");
if (!np) {
core->use_tz = true;
return 0;
}
memset(&info, 0, sizeof(info));
info.fwnode = &np->fwnode;
info.parent = core->dev;
info.name = np->name;
info.dma_mask = DMA_BIT_MASK(32);
pdev = platform_device_register_full(&info);
if (IS_ERR(pdev)) {
of_node_put(np);
return PTR_ERR(pdev);
}
pdev->dev.of_node = np;
ret = of_dma_configure(&pdev->dev, np, true);
if (ret) {
dev_err(core->dev, "dma configure fail\n");
goto err_unregister;
}
core->fw.dev = &pdev->dev;
iommu_dom = iommu_domain_alloc(&platform_bus_type);
if (!iommu_dom) {
dev_err(core->fw.dev, "Failed to allocate iommu domain\n");
ret = -ENOMEM;
goto err_unregister;
}
ret = iommu_attach_device(iommu_dom, core->fw.dev);
if (ret) {
dev_err(core->fw.dev, "could not attach device\n");
goto err_iommu_free;
}
core->fw.iommu_domain = iommu_dom;
of_node_put(np);
return 0;
err_iommu_free:
iommu_domain_free(iommu_dom);
err_unregister:
platform_device_unregister(pdev);
of_node_put(np);
return ret;
}
void venus_firmware_deinit(struct venus_core *core)
{
struct iommu_domain *iommu;
if (!core->fw.dev)
return;
iommu = core->fw.iommu_domain;
iommu_detach_device(iommu, core->fw.dev);
if (core->fw.iommu_domain) {
iommu_domain_free(iommu);
core->fw.iommu_domain = NULL;
}
platform_device_unregister(to_platform_device(core->fw.dev));
}
| linux-master | drivers/media/platform/qcom/venus/firmware.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
* Copyright (C) 2017 Linaro Ltd.
*/
#include <linux/hash.h>
#include <linux/list.h>
#include <linux/slab.h>
#include <linux/soc/qcom/smem.h>
#include <media/videobuf2-v4l2.h>
#include "core.h"
#include "hfi.h"
#include "hfi_helper.h"
#include "hfi_msgs.h"
#include "hfi_parser.h"
#define SMEM_IMG_VER_TBL 469
#define VER_STR_SZ 128
#define SMEM_IMG_OFFSET_VENUS (14 * 128)
static void event_seq_changed(struct venus_core *core, struct venus_inst *inst,
struct hfi_msg_event_notify_pkt *pkt)
{
enum hfi_version ver = core->res->hfi_version;
struct hfi_event_data event = {0};
int num_properties_changed;
struct hfi_framesize *frame_sz;
struct hfi_profile_level *profile_level;
struct hfi_bit_depth *pixel_depth;
struct hfi_pic_struct *pic_struct;
struct hfi_colour_space *colour_info;
struct hfi_buffer_requirements *bufreq;
struct hfi_extradata_input_crop *crop;
struct hfi_dpb_counts *dpb_count;
u8 *data_ptr;
u32 ptype;
inst->error = HFI_ERR_NONE;
switch (pkt->event_data1) {
case HFI_EVENT_DATA_SEQUENCE_CHANGED_SUFFICIENT_BUF_RESOURCES:
case HFI_EVENT_DATA_SEQUENCE_CHANGED_INSUFFICIENT_BUF_RESOURCES:
break;
default:
inst->error = HFI_ERR_SESSION_INVALID_PARAMETER;
goto done;
}
event.event_type = pkt->event_data1;
num_properties_changed = pkt->event_data2;
if (!num_properties_changed) {
inst->error = HFI_ERR_SESSION_INSUFFICIENT_RESOURCES;
goto done;
}
data_ptr = (u8 *)&pkt->ext_event_data[0];
do {
ptype = *((u32 *)data_ptr);
switch (ptype) {
case HFI_PROPERTY_PARAM_FRAME_SIZE:
data_ptr += sizeof(u32);
frame_sz = (struct hfi_framesize *)data_ptr;
event.width = frame_sz->width;
event.height = frame_sz->height;
data_ptr += sizeof(*frame_sz);
break;
case HFI_PROPERTY_PARAM_PROFILE_LEVEL_CURRENT:
data_ptr += sizeof(u32);
profile_level = (struct hfi_profile_level *)data_ptr;
event.profile = profile_level->profile;
event.level = profile_level->level;
data_ptr += sizeof(*profile_level);
break;
case HFI_PROPERTY_PARAM_VDEC_PIXEL_BITDEPTH:
data_ptr += sizeof(u32);
pixel_depth = (struct hfi_bit_depth *)data_ptr;
event.bit_depth = pixel_depth->bit_depth;
data_ptr += sizeof(*pixel_depth);
break;
case HFI_PROPERTY_PARAM_VDEC_PIC_STRUCT:
data_ptr += sizeof(u32);
pic_struct = (struct hfi_pic_struct *)data_ptr;
event.pic_struct = pic_struct->progressive_only;
data_ptr += sizeof(*pic_struct);
break;
case HFI_PROPERTY_PARAM_VDEC_COLOUR_SPACE:
data_ptr += sizeof(u32);
colour_info = (struct hfi_colour_space *)data_ptr;
event.colour_space = colour_info->colour_space;
data_ptr += sizeof(*colour_info);
break;
case HFI_PROPERTY_CONFIG_VDEC_ENTROPY:
data_ptr += sizeof(u32);
event.entropy_mode = *(u32 *)data_ptr;
data_ptr += sizeof(u32);
break;
case HFI_PROPERTY_CONFIG_BUFFER_REQUIREMENTS:
data_ptr += sizeof(u32);
bufreq = (struct hfi_buffer_requirements *)data_ptr;
event.buf_count = hfi_bufreq_get_count_min(bufreq, ver);
data_ptr += sizeof(*bufreq);
break;
case HFI_INDEX_EXTRADATA_INPUT_CROP:
data_ptr += sizeof(u32);
crop = (struct hfi_extradata_input_crop *)data_ptr;
event.input_crop.left = crop->left;
event.input_crop.top = crop->top;
event.input_crop.width = crop->width;
event.input_crop.height = crop->height;
data_ptr += sizeof(*crop);
break;
case HFI_PROPERTY_PARAM_VDEC_DPB_COUNTS:
data_ptr += sizeof(u32);
dpb_count = (struct hfi_dpb_counts *)data_ptr;
event.buf_count = dpb_count->fw_min_cnt;
data_ptr += sizeof(*dpb_count);
break;
default:
break;
}
num_properties_changed--;
} while (num_properties_changed > 0);
done:
inst->ops->event_notify(inst, EVT_SYS_EVENT_CHANGE, &event);
}
static void event_release_buffer_ref(struct venus_core *core,
struct venus_inst *inst,
struct hfi_msg_event_notify_pkt *pkt)
{
struct hfi_event_data event = {0};
struct hfi_msg_event_release_buffer_ref_pkt *data;
data = (struct hfi_msg_event_release_buffer_ref_pkt *)
pkt->ext_event_data;
event.event_type = HFI_EVENT_RELEASE_BUFFER_REFERENCE;
event.packet_buffer = data->packet_buffer;
event.extradata_buffer = data->extradata_buffer;
event.tag = data->output_tag;
inst->error = HFI_ERR_NONE;
inst->ops->event_notify(inst, EVT_SYS_EVENT_CHANGE, &event);
}
static void event_sys_error(struct venus_core *core, u32 event,
struct hfi_msg_event_notify_pkt *pkt)
{
if (pkt)
dev_dbg(core->dev, VDBGH
"sys error (session id:%x, data1:%x, data2:%x)\n",
pkt->shdr.session_id, pkt->event_data1,
pkt->event_data2);
core->core_ops->event_notify(core, event);
}
static void
event_session_error(struct venus_core *core, struct venus_inst *inst,
struct hfi_msg_event_notify_pkt *pkt)
{
struct device *dev = core->dev;
dev_dbg(dev, VDBGH "session error: event id:%x, session id:%x\n",
pkt->event_data1, pkt->shdr.session_id);
if (!inst)
return;
switch (pkt->event_data1) {
/* non fatal session errors */
case HFI_ERR_SESSION_INVALID_SCALE_FACTOR:
case HFI_ERR_SESSION_UNSUPPORT_BUFFERTYPE:
case HFI_ERR_SESSION_UNSUPPORTED_SETTING:
case HFI_ERR_SESSION_UPSCALE_NOT_SUPPORTED:
inst->error = HFI_ERR_NONE;
break;
default:
dev_err(dev, "session error: event id:%x (%x), session id:%x\n",
pkt->event_data1, pkt->event_data2,
pkt->shdr.session_id);
inst->error = pkt->event_data1;
inst->ops->event_notify(inst, EVT_SESSION_ERROR, NULL);
break;
}
}
static void hfi_event_notify(struct venus_core *core, struct venus_inst *inst,
void *packet)
{
struct hfi_msg_event_notify_pkt *pkt = packet;
if (!packet)
return;
switch (pkt->event_id) {
case HFI_EVENT_SYS_ERROR:
event_sys_error(core, EVT_SYS_ERROR, pkt);
break;
case HFI_EVENT_SESSION_ERROR:
event_session_error(core, inst, pkt);
break;
case HFI_EVENT_SESSION_SEQUENCE_CHANGED:
event_seq_changed(core, inst, pkt);
break;
case HFI_EVENT_RELEASE_BUFFER_REFERENCE:
event_release_buffer_ref(core, inst, pkt);
break;
case HFI_EVENT_SESSION_PROPERTY_CHANGED:
break;
default:
break;
}
}
static void hfi_sys_init_done(struct venus_core *core, struct venus_inst *inst,
void *packet)
{
struct hfi_msg_sys_init_done_pkt *pkt = packet;
int rem_bytes;
u32 error;
error = pkt->error_type;
if (error != HFI_ERR_NONE)
goto done;
if (!pkt->num_properties) {
error = HFI_ERR_SYS_INVALID_PARAMETER;
goto done;
}
rem_bytes = pkt->hdr.size - sizeof(*pkt);
if (rem_bytes <= 0) {
/* missing property data */
error = HFI_ERR_SYS_INSUFFICIENT_RESOURCES;
goto done;
}
error = hfi_parser(core, inst, pkt->data, rem_bytes);
done:
core->error = error;
complete(&core->done);
}
static void
sys_get_prop_image_version(struct venus_core *core,
struct hfi_msg_sys_property_info_pkt *pkt)
{
struct device *dev = core->dev;
u8 *smem_tbl_ptr;
u8 *img_ver;
int req_bytes;
size_t smem_blk_sz;
int ret;
req_bytes = pkt->hdr.size - sizeof(*pkt);
if (req_bytes < VER_STR_SZ || !pkt->data[0] || pkt->num_properties > 1)
/* bad packet */
return;
img_ver = pkt->data;
if (!img_ver)
return;
ret = sscanf(img_ver, "14:video-firmware.%u.%u-%u",
&core->venus_ver.major, &core->venus_ver.minor, &core->venus_ver.rev);
if (ret)
goto done;
ret = sscanf(img_ver, "14:VIDEO.VPU.%u.%u-%u",
&core->venus_ver.major, &core->venus_ver.minor, &core->venus_ver.rev);
if (ret)
goto done;
ret = sscanf(img_ver, "14:VIDEO.VE.%u.%u-%u",
&core->venus_ver.major, &core->venus_ver.minor, &core->venus_ver.rev);
if (ret)
goto done;
dev_err(dev, VDBGL "error reading F/W version\n");
return;
done:
dev_dbg(dev, VDBGL "F/W version: %s, major %u, minor %u, revision %u\n",
img_ver, core->venus_ver.major, core->venus_ver.minor, core->venus_ver.rev);
smem_tbl_ptr = qcom_smem_get(QCOM_SMEM_HOST_ANY,
SMEM_IMG_VER_TBL, &smem_blk_sz);
if (!IS_ERR(smem_tbl_ptr) && smem_blk_sz >= SMEM_IMG_OFFSET_VENUS + VER_STR_SZ)
memcpy(smem_tbl_ptr + SMEM_IMG_OFFSET_VENUS,
img_ver, VER_STR_SZ);
}
static void hfi_sys_property_info(struct venus_core *core,
struct venus_inst *inst, void *packet)
{
struct hfi_msg_sys_property_info_pkt *pkt = packet;
struct device *dev = core->dev;
if (!pkt->num_properties) {
dev_dbg(dev, VDBGL "no properties\n");
return;
}
switch (pkt->property) {
case HFI_PROPERTY_SYS_IMAGE_VERSION:
sys_get_prop_image_version(core, pkt);
break;
default:
dev_dbg(dev, VDBGL "unknown property data\n");
break;
}
}
static void hfi_sys_rel_resource_done(struct venus_core *core,
struct venus_inst *inst,
void *packet)
{
struct hfi_msg_sys_release_resource_done_pkt *pkt = packet;
core->error = pkt->error_type;
complete(&core->done);
}
static void hfi_sys_ping_done(struct venus_core *core, struct venus_inst *inst,
void *packet)
{
struct hfi_msg_sys_ping_ack_pkt *pkt = packet;
core->error = HFI_ERR_NONE;
if (pkt->client_data != 0xbeef)
core->error = HFI_ERR_SYS_FATAL;
complete(&core->done);
}
static void hfi_sys_idle_done(struct venus_core *core, struct venus_inst *inst,
void *packet)
{
dev_dbg(core->dev, VDBGL "sys idle\n");
}
static void hfi_sys_pc_prepare_done(struct venus_core *core,
struct venus_inst *inst, void *packet)
{
struct hfi_msg_sys_pc_prep_done_pkt *pkt = packet;
dev_dbg(core->dev, VDBGL "pc prepare done (error %x)\n",
pkt->error_type);
}
static unsigned int
session_get_prop_profile_level(struct hfi_msg_session_property_info_pkt *pkt,
struct hfi_profile_level *profile_level)
{
struct hfi_profile_level *hfi;
u32 req_bytes;
req_bytes = pkt->shdr.hdr.size - sizeof(*pkt);
if (!req_bytes || req_bytes % sizeof(struct hfi_profile_level))
/* bad packet */
return HFI_ERR_SESSION_INVALID_PARAMETER;
hfi = (struct hfi_profile_level *)&pkt->data[0];
profile_level->profile = hfi->profile;
profile_level->level = hfi->level;
return HFI_ERR_NONE;
}
static unsigned int
session_get_prop_buf_req(struct hfi_msg_session_property_info_pkt *pkt,
struct hfi_buffer_requirements *bufreq)
{
struct hfi_buffer_requirements *buf_req;
u32 req_bytes;
unsigned int idx = 0;
req_bytes = pkt->shdr.hdr.size - sizeof(*pkt);
if (!req_bytes || req_bytes % sizeof(*buf_req) || !pkt->data[0])
/* bad packet */
return HFI_ERR_SESSION_INVALID_PARAMETER;
buf_req = (struct hfi_buffer_requirements *)&pkt->data[0];
if (!buf_req)
return HFI_ERR_SESSION_INVALID_PARAMETER;
while (req_bytes) {
memcpy(&bufreq[idx], buf_req, sizeof(*bufreq));
idx++;
if (idx > HFI_BUFFER_TYPE_MAX)
return HFI_ERR_SESSION_INVALID_PARAMETER;
req_bytes -= sizeof(struct hfi_buffer_requirements);
buf_req++;
}
return HFI_ERR_NONE;
}
static void hfi_session_prop_info(struct venus_core *core,
struct venus_inst *inst, void *packet)
{
struct hfi_msg_session_property_info_pkt *pkt = packet;
struct device *dev = core->dev;
union hfi_get_property *hprop = &inst->hprop;
unsigned int error = HFI_ERR_NONE;
if (!pkt->num_properties) {
error = HFI_ERR_SESSION_INVALID_PARAMETER;
dev_err(dev, "%s: no properties\n", __func__);
goto done;
}
switch (pkt->property) {
case HFI_PROPERTY_CONFIG_BUFFER_REQUIREMENTS:
memset(hprop->bufreq, 0, sizeof(hprop->bufreq));
error = session_get_prop_buf_req(pkt, hprop->bufreq);
break;
case HFI_PROPERTY_PARAM_PROFILE_LEVEL_CURRENT:
memset(&hprop->profile_level, 0, sizeof(hprop->profile_level));
error = session_get_prop_profile_level(pkt,
&hprop->profile_level);
break;
case HFI_PROPERTY_CONFIG_VDEC_ENTROPY:
break;
default:
dev_dbg(dev, VDBGM "unknown property id:%x\n", pkt->property);
return;
}
done:
inst->error = error;
complete(&inst->done);
}
static void hfi_session_init_done(struct venus_core *core,
struct venus_inst *inst, void *packet)
{
struct hfi_msg_session_init_done_pkt *pkt = packet;
int rem_bytes;
u32 error;
error = pkt->error_type;
if (error != HFI_ERR_NONE)
goto done;
if (!IS_V1(core))
goto done;
rem_bytes = pkt->shdr.hdr.size - sizeof(*pkt);
if (rem_bytes <= 0) {
error = HFI_ERR_SESSION_INSUFFICIENT_RESOURCES;
goto done;
}
error = hfi_parser(core, inst, pkt->data, rem_bytes);
done:
inst->error = error;
complete(&inst->done);
}
static void hfi_session_load_res_done(struct venus_core *core,
struct venus_inst *inst, void *packet)
{
struct hfi_msg_session_load_resources_done_pkt *pkt = packet;
inst->error = pkt->error_type;
complete(&inst->done);
}
static void hfi_session_flush_done(struct venus_core *core,
struct venus_inst *inst, void *packet)
{
struct hfi_msg_session_flush_done_pkt *pkt = packet;
inst->error = pkt->error_type;
complete(&inst->done);
if (inst->ops->flush_done)
inst->ops->flush_done(inst);
}
static void hfi_session_etb_done(struct venus_core *core,
struct venus_inst *inst, void *packet)
{
struct hfi_msg_session_empty_buffer_done_pkt *pkt = packet;
inst->error = pkt->error_type;
inst->ops->buf_done(inst, HFI_BUFFER_INPUT, pkt->input_tag,
pkt->filled_len, pkt->offset, 0, 0, 0);
}
static void hfi_session_ftb_done(struct venus_core *core,
struct venus_inst *inst, void *packet)
{
u32 session_type = inst->session_type;
u64 timestamp_us = 0;
u32 timestamp_hi = 0, timestamp_lo = 0;
unsigned int error;
u32 flags = 0, hfi_flags = 0, offset = 0, filled_len = 0;
u32 pic_type = 0, buffer_type = 0, output_tag = -1;
if (session_type == VIDC_SESSION_TYPE_ENC) {
struct hfi_msg_session_fbd_compressed_pkt *pkt = packet;
timestamp_hi = pkt->time_stamp_hi;
timestamp_lo = pkt->time_stamp_lo;
hfi_flags = pkt->flags;
offset = pkt->offset;
filled_len = pkt->filled_len;
pic_type = pkt->picture_type;
output_tag = pkt->output_tag;
buffer_type = HFI_BUFFER_OUTPUT;
error = pkt->error_type;
} else if (session_type == VIDC_SESSION_TYPE_DEC) {
struct hfi_msg_session_fbd_uncompressed_plane0_pkt *pkt =
packet;
timestamp_hi = pkt->time_stamp_hi;
timestamp_lo = pkt->time_stamp_lo;
hfi_flags = pkt->flags;
offset = pkt->offset;
filled_len = pkt->filled_len;
pic_type = pkt->picture_type;
output_tag = pkt->output_tag;
if (pkt->stream_id == 0)
buffer_type = HFI_BUFFER_OUTPUT;
else if (pkt->stream_id == 1)
buffer_type = HFI_BUFFER_OUTPUT2;
error = pkt->error_type;
} else {
error = HFI_ERR_SESSION_INVALID_PARAMETER;
}
if (buffer_type != HFI_BUFFER_OUTPUT &&
buffer_type != HFI_BUFFER_OUTPUT2)
goto done;
if (hfi_flags & HFI_BUFFERFLAG_EOS)
flags |= V4L2_BUF_FLAG_LAST;
switch (pic_type) {
case HFI_PICTURE_IDR:
case HFI_PICTURE_I:
flags |= V4L2_BUF_FLAG_KEYFRAME;
break;
case HFI_PICTURE_P:
flags |= V4L2_BUF_FLAG_PFRAME;
break;
case HFI_PICTURE_B:
flags |= V4L2_BUF_FLAG_BFRAME;
break;
case HFI_FRAME_NOTCODED:
case HFI_UNUSED_PICT:
case HFI_FRAME_YUV:
default:
break;
}
if (!(hfi_flags & HFI_BUFFERFLAG_TIMESTAMPINVALID) && filled_len) {
timestamp_us = timestamp_hi;
timestamp_us = (timestamp_us << 32) | timestamp_lo;
}
done:
inst->error = error;
inst->ops->buf_done(inst, buffer_type, output_tag, filled_len,
offset, flags, hfi_flags, timestamp_us);
}
static void hfi_session_start_done(struct venus_core *core,
struct venus_inst *inst, void *packet)
{
struct hfi_msg_session_start_done_pkt *pkt = packet;
inst->error = pkt->error_type;
complete(&inst->done);
}
static void hfi_session_stop_done(struct venus_core *core,
struct venus_inst *inst, void *packet)
{
struct hfi_msg_session_stop_done_pkt *pkt = packet;
inst->error = pkt->error_type;
complete(&inst->done);
}
static void hfi_session_rel_res_done(struct venus_core *core,
struct venus_inst *inst, void *packet)
{
struct hfi_msg_session_release_resources_done_pkt *pkt = packet;
inst->error = pkt->error_type;
complete(&inst->done);
}
static void hfi_session_rel_buf_done(struct venus_core *core,
struct venus_inst *inst, void *packet)
{
struct hfi_msg_session_release_buffers_done_pkt *pkt = packet;
inst->error = pkt->error_type;
complete(&inst->done);
}
static void hfi_session_end_done(struct venus_core *core,
struct venus_inst *inst, void *packet)
{
struct hfi_msg_session_end_done_pkt *pkt = packet;
inst->error = pkt->error_type;
complete(&inst->done);
}
static void hfi_session_abort_done(struct venus_core *core,
struct venus_inst *inst, void *packet)
{
struct hfi_msg_sys_session_abort_done_pkt *pkt = packet;
inst->error = pkt->error_type;
complete(&inst->done);
}
static void hfi_session_get_seq_hdr_done(struct venus_core *core,
struct venus_inst *inst, void *packet)
{
struct hfi_msg_session_get_sequence_hdr_done_pkt *pkt = packet;
inst->error = pkt->error_type;
complete(&inst->done);
}
struct hfi_done_handler {
u32 pkt;
u32 pkt_sz;
u32 pkt_sz2;
void (*done)(struct venus_core *, struct venus_inst *, void *);
bool is_sys_pkt;
};
static const struct hfi_done_handler handlers[] = {
{.pkt = HFI_MSG_EVENT_NOTIFY,
.pkt_sz = sizeof(struct hfi_msg_event_notify_pkt),
.done = hfi_event_notify,
},
{.pkt = HFI_MSG_SYS_INIT,
.pkt_sz = sizeof(struct hfi_msg_sys_init_done_pkt),
.done = hfi_sys_init_done,
.is_sys_pkt = true,
},
{.pkt = HFI_MSG_SYS_PROPERTY_INFO,
.pkt_sz = sizeof(struct hfi_msg_sys_property_info_pkt),
.done = hfi_sys_property_info,
.is_sys_pkt = true,
},
{.pkt = HFI_MSG_SYS_RELEASE_RESOURCE,
.pkt_sz = sizeof(struct hfi_msg_sys_release_resource_done_pkt),
.done = hfi_sys_rel_resource_done,
.is_sys_pkt = true,
},
{.pkt = HFI_MSG_SYS_PING_ACK,
.pkt_sz = sizeof(struct hfi_msg_sys_ping_ack_pkt),
.done = hfi_sys_ping_done,
.is_sys_pkt = true,
},
{.pkt = HFI_MSG_SYS_IDLE,
.pkt_sz = sizeof(struct hfi_msg_sys_idle_pkt),
.done = hfi_sys_idle_done,
.is_sys_pkt = true,
},
{.pkt = HFI_MSG_SYS_PC_PREP,
.pkt_sz = sizeof(struct hfi_msg_sys_pc_prep_done_pkt),
.done = hfi_sys_pc_prepare_done,
.is_sys_pkt = true,
},
{.pkt = HFI_MSG_SYS_SESSION_INIT,
.pkt_sz = sizeof(struct hfi_msg_session_init_done_pkt),
.done = hfi_session_init_done,
},
{.pkt = HFI_MSG_SYS_SESSION_END,
.pkt_sz = sizeof(struct hfi_msg_session_end_done_pkt),
.done = hfi_session_end_done,
},
{.pkt = HFI_MSG_SESSION_LOAD_RESOURCES,
.pkt_sz = sizeof(struct hfi_msg_session_load_resources_done_pkt),
.done = hfi_session_load_res_done,
},
{.pkt = HFI_MSG_SESSION_START,
.pkt_sz = sizeof(struct hfi_msg_session_start_done_pkt),
.done = hfi_session_start_done,
},
{.pkt = HFI_MSG_SESSION_STOP,
.pkt_sz = sizeof(struct hfi_msg_session_stop_done_pkt),
.done = hfi_session_stop_done,
},
{.pkt = HFI_MSG_SYS_SESSION_ABORT,
.pkt_sz = sizeof(struct hfi_msg_sys_session_abort_done_pkt),
.done = hfi_session_abort_done,
},
{.pkt = HFI_MSG_SESSION_EMPTY_BUFFER,
.pkt_sz = sizeof(struct hfi_msg_session_empty_buffer_done_pkt),
.done = hfi_session_etb_done,
},
{.pkt = HFI_MSG_SESSION_FILL_BUFFER,
.pkt_sz = sizeof(struct hfi_msg_session_fbd_uncompressed_plane0_pkt),
.pkt_sz2 = sizeof(struct hfi_msg_session_fbd_compressed_pkt),
.done = hfi_session_ftb_done,
},
{.pkt = HFI_MSG_SESSION_FLUSH,
.pkt_sz = sizeof(struct hfi_msg_session_flush_done_pkt),
.done = hfi_session_flush_done,
},
{.pkt = HFI_MSG_SESSION_PROPERTY_INFO,
.pkt_sz = sizeof(struct hfi_msg_session_property_info_pkt),
.done = hfi_session_prop_info,
},
{.pkt = HFI_MSG_SESSION_RELEASE_RESOURCES,
.pkt_sz = sizeof(struct hfi_msg_session_release_resources_done_pkt),
.done = hfi_session_rel_res_done,
},
{.pkt = HFI_MSG_SESSION_GET_SEQUENCE_HEADER,
.pkt_sz = sizeof(struct hfi_msg_session_get_sequence_hdr_done_pkt),
.done = hfi_session_get_seq_hdr_done,
},
{.pkt = HFI_MSG_SESSION_RELEASE_BUFFERS,
.pkt_sz = sizeof(struct hfi_msg_session_release_buffers_done_pkt),
.done = hfi_session_rel_buf_done,
},
};
void hfi_process_watchdog_timeout(struct venus_core *core)
{
event_sys_error(core, EVT_SYS_WATCHDOG_TIMEOUT, NULL);
}
static struct venus_inst *to_instance(struct venus_core *core, u32 session_id)
{
struct venus_inst *inst;
mutex_lock(&core->lock);
list_for_each_entry(inst, &core->instances, list)
if (hash32_ptr(inst) == session_id) {
mutex_unlock(&core->lock);
return inst;
}
mutex_unlock(&core->lock);
return NULL;
}
u32 hfi_process_msg_packet(struct venus_core *core, struct hfi_pkt_hdr *hdr)
{
const struct hfi_done_handler *handler;
struct device *dev = core->dev;
struct venus_inst *inst;
bool found = false;
unsigned int i;
for (i = 0; i < ARRAY_SIZE(handlers); i++) {
handler = &handlers[i];
if (handler->pkt != hdr->pkt_type)
continue;
found = true;
break;
}
if (!found)
return hdr->pkt_type;
if (hdr->size && hdr->size < handler->pkt_sz &&
hdr->size < handler->pkt_sz2) {
dev_err(dev, "bad packet size (%d should be %d, pkt type:%x)\n",
hdr->size, handler->pkt_sz, hdr->pkt_type);
return hdr->pkt_type;
}
if (handler->is_sys_pkt) {
inst = NULL;
} else {
struct hfi_session_pkt *pkt;
pkt = (struct hfi_session_pkt *)hdr;
inst = to_instance(core, pkt->shdr.session_id);
if (!inst)
dev_warn(dev, "no valid instance(pkt session_id:%x, pkt:%x)\n",
pkt->shdr.session_id,
handler ? handler->pkt : 0);
/*
* Event of type HFI_EVENT_SYS_ERROR will not have any session
* associated with it
*/
if (!inst && hdr->pkt_type != HFI_MSG_EVENT_NOTIFY) {
dev_err(dev, "got invalid session id:%x\n",
pkt->shdr.session_id);
goto invalid_session;
}
}
handler->done(core, inst, hdr);
invalid_session:
return hdr->pkt_type;
}
| linux-master | drivers/media/platform/qcom/venus/hfi_msgs.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
* Copyright (C) 2017 Linaro Ltd.
*/
#include <linux/types.h>
#include <media/v4l2-ctrls.h>
#include "core.h"
#include "venc.h"
#include "helpers.h"
#define BITRATE_MIN 32000
#define BITRATE_MAX 160000000
#define BITRATE_DEFAULT 1000000
#define BITRATE_DEFAULT_PEAK (BITRATE_DEFAULT * 2)
#define BITRATE_STEP 100
#define SLICE_BYTE_SIZE_MAX 1024
#define SLICE_BYTE_SIZE_MIN 1024
#define SLICE_MB_SIZE_MAX 300
#define AT_SLICE_BOUNDARY \
V4L2_MPEG_VIDEO_H264_LOOP_FILTER_MODE_DISABLED_AT_SLICE_BOUNDARY
#define MAX_LTR_FRAME_COUNT 4
static int venc_calc_bpframes(u32 gop_size, u32 conseq_b, u32 *bf, u32 *pf)
{
u32 half = (gop_size - 1) >> 1;
u32 b, p, ratio;
bool found = false;
if (!gop_size)
return -EINVAL;
*bf = *pf = 0;
if (!conseq_b) {
*pf = gop_size - 1;
return 0;
}
b = p = half;
for (; b <= gop_size - 1; b++, p--) {
if (b % p)
continue;
ratio = b / p;
if (ratio == conseq_b) {
found = true;
break;
}
if (ratio > conseq_b)
break;
}
if (!found)
return -EINVAL;
if (b + p + 1 != gop_size)
return -EINVAL;
*bf = b;
*pf = p;
return 0;
}
static int venc_op_s_ctrl(struct v4l2_ctrl *ctrl)
{
struct venus_inst *inst = ctrl_to_inst(ctrl);
struct venc_controls *ctr = &inst->controls.enc;
struct hfi_enable en = { .enable = 1 };
struct hfi_bitrate brate;
struct hfi_ltr_use ltr_use;
struct hfi_ltr_mark ltr_mark;
u32 bframes;
u32 ptype;
int ret;
switch (ctrl->id) {
case V4L2_CID_MPEG_VIDEO_BITRATE_MODE:
ctr->bitrate_mode = ctrl->val;
break;
case V4L2_CID_MPEG_VIDEO_BITRATE:
ctr->bitrate = ctrl->val;
mutex_lock(&inst->lock);
if (inst->streamon_out && inst->streamon_cap) {
ptype = HFI_PROPERTY_CONFIG_VENC_TARGET_BITRATE;
brate.bitrate = ctr->bitrate;
brate.layer_id = 0;
ret = hfi_session_set_property(inst, ptype, &brate);
if (ret) {
mutex_unlock(&inst->lock);
return ret;
}
}
mutex_unlock(&inst->lock);
break;
case V4L2_CID_MPEG_VIDEO_BITRATE_PEAK:
ctr->bitrate_peak = ctrl->val;
break;
case V4L2_CID_MPEG_VIDEO_H264_ENTROPY_MODE:
ctr->h264_entropy_mode = ctrl->val;
break;
case V4L2_CID_MPEG_VIDEO_MPEG4_PROFILE:
ctr->profile.mpeg4 = ctrl->val;
break;
case V4L2_CID_MPEG_VIDEO_H264_PROFILE:
ctr->profile.h264 = ctrl->val;
break;
case V4L2_CID_MPEG_VIDEO_HEVC_PROFILE:
ctr->profile.hevc = ctrl->val;
break;
case V4L2_CID_MPEG_VIDEO_VP8_PROFILE:
ctr->profile.vp8 = ctrl->val;
break;
case V4L2_CID_MPEG_VIDEO_MPEG4_LEVEL:
ctr->level.mpeg4 = ctrl->val;
break;
case V4L2_CID_MPEG_VIDEO_H264_LEVEL:
ctr->level.h264 = ctrl->val;
break;
case V4L2_CID_MPEG_VIDEO_HEVC_LEVEL:
ctr->level.hevc = ctrl->val;
break;
case V4L2_CID_MPEG_VIDEO_H264_I_FRAME_QP:
ctr->h264_i_qp = ctrl->val;
break;
case V4L2_CID_MPEG_VIDEO_H264_P_FRAME_QP:
ctr->h264_p_qp = ctrl->val;
break;
case V4L2_CID_MPEG_VIDEO_H264_B_FRAME_QP:
ctr->h264_b_qp = ctrl->val;
break;
case V4L2_CID_MPEG_VIDEO_H264_MIN_QP:
ctr->h264_min_qp = ctrl->val;
break;
case V4L2_CID_MPEG_VIDEO_H264_I_FRAME_MIN_QP:
ctr->h264_i_min_qp = ctrl->val;
break;
case V4L2_CID_MPEG_VIDEO_H264_P_FRAME_MIN_QP:
ctr->h264_p_min_qp = ctrl->val;
break;
case V4L2_CID_MPEG_VIDEO_H264_B_FRAME_MIN_QP:
ctr->h264_b_min_qp = ctrl->val;
break;
case V4L2_CID_MPEG_VIDEO_H264_MAX_QP:
ctr->h264_max_qp = ctrl->val;
break;
case V4L2_CID_MPEG_VIDEO_H264_I_FRAME_MAX_QP:
ctr->h264_i_max_qp = ctrl->val;
break;
case V4L2_CID_MPEG_VIDEO_H264_P_FRAME_MAX_QP:
ctr->h264_p_max_qp = ctrl->val;
break;
case V4L2_CID_MPEG_VIDEO_H264_B_FRAME_MAX_QP:
ctr->h264_b_max_qp = ctrl->val;
break;
case V4L2_CID_MPEG_VIDEO_HEVC_I_FRAME_QP:
ctr->hevc_i_qp = ctrl->val;
break;
case V4L2_CID_MPEG_VIDEO_HEVC_P_FRAME_QP:
ctr->hevc_p_qp = ctrl->val;
break;
case V4L2_CID_MPEG_VIDEO_HEVC_B_FRAME_QP:
ctr->hevc_b_qp = ctrl->val;
break;
case V4L2_CID_MPEG_VIDEO_HEVC_MIN_QP:
ctr->hevc_min_qp = ctrl->val;
break;
case V4L2_CID_MPEG_VIDEO_HEVC_I_FRAME_MIN_QP:
ctr->hevc_i_min_qp = ctrl->val;
break;
case V4L2_CID_MPEG_VIDEO_HEVC_P_FRAME_MIN_QP:
ctr->hevc_p_min_qp = ctrl->val;
break;
case V4L2_CID_MPEG_VIDEO_HEVC_B_FRAME_MIN_QP:
ctr->hevc_b_min_qp = ctrl->val;
break;
case V4L2_CID_MPEG_VIDEO_HEVC_MAX_QP:
ctr->hevc_max_qp = ctrl->val;
break;
case V4L2_CID_MPEG_VIDEO_HEVC_I_FRAME_MAX_QP:
ctr->hevc_i_max_qp = ctrl->val;
break;
case V4L2_CID_MPEG_VIDEO_HEVC_P_FRAME_MAX_QP:
ctr->hevc_p_max_qp = ctrl->val;
break;
case V4L2_CID_MPEG_VIDEO_HEVC_B_FRAME_MAX_QP:
ctr->hevc_b_max_qp = ctrl->val;
break;
case V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MODE:
ctr->multi_slice_mode = ctrl->val;
break;
case V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MAX_BYTES:
ctr->multi_slice_max_bytes = ctrl->val;
break;
case V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MAX_MB:
ctr->multi_slice_max_mb = ctrl->val;
break;
case V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_ALPHA:
ctr->h264_loop_filter_alpha = ctrl->val;
break;
case V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_BETA:
ctr->h264_loop_filter_beta = ctrl->val;
break;
case V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_MODE:
ctr->h264_loop_filter_mode = ctrl->val;
break;
case V4L2_CID_MPEG_VIDEO_HEADER_MODE:
ctr->header_mode = ctrl->val;
mutex_lock(&inst->lock);
if (inst->streamon_out && inst->streamon_cap) {
if (ctrl->val == V4L2_MPEG_VIDEO_HEADER_MODE_SEPARATE)
en.enable = 0;
else
en.enable = 1;
ptype = HFI_PROPERTY_CONFIG_VENC_SYNC_FRAME_SEQUENCE_HEADER;
ret = hfi_session_set_property(inst, ptype, &en);
if (ret) {
mutex_unlock(&inst->lock);
return ret;
}
}
mutex_unlock(&inst->lock);
break;
case V4L2_CID_MPEG_VIDEO_GOP_SIZE:
ret = venc_calc_bpframes(ctrl->val, ctr->num_b_frames, &bframes,
&ctr->num_p_frames);
if (ret)
return ret;
ctr->gop_size = ctrl->val;
break;
case V4L2_CID_MPEG_VIDEO_H264_I_PERIOD:
ctr->h264_i_period = ctrl->val;
break;
case V4L2_CID_MPEG_VIDEO_VPX_MIN_QP:
ctr->vp8_min_qp = ctrl->val;
break;
case V4L2_CID_MPEG_VIDEO_VPX_MAX_QP:
ctr->vp8_max_qp = ctrl->val;
break;
case V4L2_CID_MPEG_VIDEO_B_FRAMES:
ret = venc_calc_bpframes(ctr->gop_size, ctrl->val, &bframes,
&ctr->num_p_frames);
if (ret)
return ret;
ctr->num_b_frames = bframes;
break;
case V4L2_CID_MPEG_VIDEO_FORCE_KEY_FRAME:
mutex_lock(&inst->lock);
if (inst->streamon_out && inst->streamon_cap) {
ptype = HFI_PROPERTY_CONFIG_VENC_REQUEST_SYNC_FRAME;
ret = hfi_session_set_property(inst, ptype, &en);
if (ret) {
mutex_unlock(&inst->lock);
return ret;
}
}
mutex_unlock(&inst->lock);
break;
case V4L2_CID_MPEG_VIDEO_FRAME_RC_ENABLE:
ctr->rc_enable = ctrl->val;
break;
case V4L2_CID_MPEG_VIDEO_CONSTANT_QUALITY:
ctr->const_quality = ctrl->val;
break;
case V4L2_CID_MPEG_VIDEO_FRAME_SKIP_MODE:
ctr->frame_skip_mode = ctrl->val;
break;
case V4L2_CID_MPEG_VIDEO_BASELAYER_PRIORITY_ID:
ctr->base_priority_id = ctrl->val;
break;
case V4L2_CID_MPEG_VIDEO_AU_DELIMITER:
ctr->aud_enable = ctrl->val;
break;
case V4L2_CID_MPEG_VIDEO_LTR_COUNT:
ctr->ltr_count = ctrl->val;
break;
case V4L2_CID_MPEG_VIDEO_FRAME_LTR_INDEX:
mutex_lock(&inst->lock);
if (inst->streamon_out && inst->streamon_cap) {
ptype = HFI_PROPERTY_CONFIG_VENC_MARKLTRFRAME;
ltr_mark.mark_frame = ctrl->val;
ret = hfi_session_set_property(inst, ptype, <r_mark);
if (ret) {
mutex_unlock(&inst->lock);
return ret;
}
}
mutex_unlock(&inst->lock);
break;
case V4L2_CID_MPEG_VIDEO_USE_LTR_FRAMES:
mutex_lock(&inst->lock);
if (inst->streamon_out && inst->streamon_cap) {
ptype = HFI_PROPERTY_CONFIG_VENC_USELTRFRAME;
ltr_use.ref_ltr = ctrl->val;
ltr_use.use_constrnt = true;
ltr_use.frames = 0;
ret = hfi_session_set_property(inst, ptype, <r_use);
if (ret) {
mutex_unlock(&inst->lock);
return ret;
}
}
mutex_unlock(&inst->lock);
break;
case V4L2_CID_COLORIMETRY_HDR10_CLL_INFO:
ctr->cll = *ctrl->p_new.p_hdr10_cll;
break;
case V4L2_CID_COLORIMETRY_HDR10_MASTERING_DISPLAY:
ctr->mastering = *ctrl->p_new.p_hdr10_mastering;
break;
case V4L2_CID_MPEG_VIDEO_INTRA_REFRESH_PERIOD_TYPE:
ctr->intra_refresh_type = ctrl->val;
break;
case V4L2_CID_MPEG_VIDEO_INTRA_REFRESH_PERIOD:
ctr->intra_refresh_period = ctrl->val;
break;
case V4L2_CID_MPEG_VIDEO_H264_8X8_TRANSFORM:
if (ctr->profile.h264 != V4L2_MPEG_VIDEO_H264_PROFILE_HIGH &&
ctr->profile.h264 != V4L2_MPEG_VIDEO_H264_PROFILE_CONSTRAINED_HIGH)
return -EINVAL;
/*
* In video firmware, 8x8 transform is supported only for
* high profile(HP) and constrained high profile(CHP).
* If client wants to disable 8x8 transform for HP/CHP,
* it is better to set profile as main profile(MP).
* Because there is no difference between HP and MP
* if we disable 8x8 transform for HP.
*/
ctr->h264_8x8_transform = ctrl->val;
break;
default:
return -EINVAL;
}
return 0;
}
static int venc_op_g_volatile_ctrl(struct v4l2_ctrl *ctrl)
{
struct venus_inst *inst = ctrl_to_inst(ctrl);
struct hfi_buffer_requirements bufreq;
enum hfi_version ver = inst->core->res->hfi_version;
int ret;
switch (ctrl->id) {
case V4L2_CID_MIN_BUFFERS_FOR_OUTPUT:
ret = venus_helper_get_bufreq(inst, HFI_BUFFER_INPUT, &bufreq);
if (!ret)
ctrl->val = hfi_bufreq_get_count_min(&bufreq, ver);
break;
default:
return -EINVAL;
}
return 0;
}
static const struct v4l2_ctrl_ops venc_ctrl_ops = {
.s_ctrl = venc_op_s_ctrl,
.g_volatile_ctrl = venc_op_g_volatile_ctrl,
};
int venc_ctrl_init(struct venus_inst *inst)
{
int ret;
struct v4l2_ctrl_hdr10_mastering_display p_hdr10_mastering = {
{ 34000, 13250, 7500 },
{ 16000, 34500, 3000 }, 15635, 16450, 10000000, 500,
};
struct v4l2_ctrl_hdr10_cll_info p_hdr10_cll = { 1000, 400 };
ret = v4l2_ctrl_handler_init(&inst->ctrl_handler, 59);
if (ret)
return ret;
v4l2_ctrl_new_std_menu(&inst->ctrl_handler, &venc_ctrl_ops,
V4L2_CID_MPEG_VIDEO_BITRATE_MODE,
V4L2_MPEG_VIDEO_BITRATE_MODE_CBR,
~((1 << V4L2_MPEG_VIDEO_BITRATE_MODE_VBR) |
(1 << V4L2_MPEG_VIDEO_BITRATE_MODE_CBR) |
(1 << V4L2_MPEG_VIDEO_BITRATE_MODE_CQ)),
V4L2_MPEG_VIDEO_BITRATE_MODE_VBR);
v4l2_ctrl_new_std_menu(&inst->ctrl_handler, &venc_ctrl_ops,
V4L2_CID_MPEG_VIDEO_H264_ENTROPY_MODE,
V4L2_MPEG_VIDEO_H264_ENTROPY_MODE_CABAC,
0, V4L2_MPEG_VIDEO_H264_ENTROPY_MODE_CAVLC);
v4l2_ctrl_new_std_menu(&inst->ctrl_handler, &venc_ctrl_ops,
V4L2_CID_MPEG_VIDEO_MPEG4_PROFILE,
V4L2_MPEG_VIDEO_MPEG4_PROFILE_ADVANCED_CODING_EFFICIENCY,
~((1 << V4L2_MPEG_VIDEO_MPEG4_PROFILE_SIMPLE) |
(1 << V4L2_MPEG_VIDEO_MPEG4_PROFILE_ADVANCED_SIMPLE)),
V4L2_MPEG_VIDEO_MPEG4_PROFILE_SIMPLE);
v4l2_ctrl_new_std_menu(&inst->ctrl_handler, &venc_ctrl_ops,
V4L2_CID_MPEG_VIDEO_MPEG4_LEVEL,
V4L2_MPEG_VIDEO_MPEG4_LEVEL_5,
0, V4L2_MPEG_VIDEO_MPEG4_LEVEL_0);
v4l2_ctrl_new_std_menu(&inst->ctrl_handler, &venc_ctrl_ops,
V4L2_CID_MPEG_VIDEO_HEVC_PROFILE,
V4L2_MPEG_VIDEO_HEVC_PROFILE_MAIN_10,
~((1 << V4L2_MPEG_VIDEO_HEVC_PROFILE_MAIN) |
(1 << V4L2_MPEG_VIDEO_HEVC_PROFILE_MAIN_STILL_PICTURE) |
(1 << V4L2_MPEG_VIDEO_HEVC_PROFILE_MAIN_10)),
V4L2_MPEG_VIDEO_HEVC_PROFILE_MAIN);
v4l2_ctrl_new_std_menu(&inst->ctrl_handler, &venc_ctrl_ops,
V4L2_CID_MPEG_VIDEO_HEVC_LEVEL,
V4L2_MPEG_VIDEO_HEVC_LEVEL_6_2,
0, V4L2_MPEG_VIDEO_HEVC_LEVEL_1);
v4l2_ctrl_new_std_menu(&inst->ctrl_handler, &venc_ctrl_ops,
V4L2_CID_MPEG_VIDEO_H264_PROFILE,
V4L2_MPEG_VIDEO_H264_PROFILE_MULTIVIEW_HIGH,
~((1 << V4L2_MPEG_VIDEO_H264_PROFILE_BASELINE) |
(1 << V4L2_MPEG_VIDEO_H264_PROFILE_CONSTRAINED_BASELINE) |
(1 << V4L2_MPEG_VIDEO_H264_PROFILE_MAIN) |
(1 << V4L2_MPEG_VIDEO_H264_PROFILE_HIGH) |
(1 << V4L2_MPEG_VIDEO_H264_PROFILE_STEREO_HIGH) |
(1 << V4L2_MPEG_VIDEO_H264_PROFILE_MULTIVIEW_HIGH)),
V4L2_MPEG_VIDEO_H264_PROFILE_HIGH);
v4l2_ctrl_new_std_menu(&inst->ctrl_handler, &venc_ctrl_ops,
V4L2_CID_MPEG_VIDEO_H264_LEVEL,
V4L2_MPEG_VIDEO_H264_LEVEL_5_1,
0, V4L2_MPEG_VIDEO_H264_LEVEL_1_0);
v4l2_ctrl_new_std_menu(&inst->ctrl_handler, &venc_ctrl_ops,
V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_MODE,
AT_SLICE_BOUNDARY,
0, V4L2_MPEG_VIDEO_H264_LOOP_FILTER_MODE_DISABLED);
v4l2_ctrl_new_std_menu(&inst->ctrl_handler, &venc_ctrl_ops,
V4L2_CID_MPEG_VIDEO_HEADER_MODE,
V4L2_MPEG_VIDEO_HEADER_MODE_JOINED_WITH_1ST_FRAME,
~((1 << V4L2_MPEG_VIDEO_HEADER_MODE_SEPARATE) |
(1 << V4L2_MPEG_VIDEO_HEADER_MODE_JOINED_WITH_1ST_FRAME)),
V4L2_MPEG_VIDEO_HEADER_MODE_JOINED_WITH_1ST_FRAME);
v4l2_ctrl_new_std_menu(&inst->ctrl_handler, &venc_ctrl_ops,
V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MODE,
V4L2_MPEG_VIDEO_MULTI_SLICE_MODE_MAX_BYTES,
0, V4L2_MPEG_VIDEO_MULTI_SLICE_MODE_SINGLE);
v4l2_ctrl_new_std_menu(&inst->ctrl_handler, &venc_ctrl_ops,
V4L2_CID_MPEG_VIDEO_VP8_PROFILE,
V4L2_MPEG_VIDEO_VP8_PROFILE_3,
0, V4L2_MPEG_VIDEO_VP8_PROFILE_0);
v4l2_ctrl_new_std(&inst->ctrl_handler, &venc_ctrl_ops,
V4L2_CID_MIN_BUFFERS_FOR_OUTPUT, 4, 11, 1, 4);
v4l2_ctrl_new_std(&inst->ctrl_handler, &venc_ctrl_ops,
V4L2_CID_MPEG_VIDEO_BITRATE, BITRATE_MIN, BITRATE_MAX,
BITRATE_STEP, BITRATE_DEFAULT);
v4l2_ctrl_new_std(&inst->ctrl_handler, &venc_ctrl_ops,
V4L2_CID_MPEG_VIDEO_BITRATE_PEAK, BITRATE_MIN, BITRATE_MAX,
BITRATE_STEP, BITRATE_DEFAULT_PEAK);
v4l2_ctrl_new_std(&inst->ctrl_handler, &venc_ctrl_ops,
V4L2_CID_MPEG_VIDEO_H264_I_FRAME_QP, 1, 51, 1, 26);
v4l2_ctrl_new_std(&inst->ctrl_handler, &venc_ctrl_ops,
V4L2_CID_MPEG_VIDEO_H264_P_FRAME_QP, 1, 51, 1, 28);
v4l2_ctrl_new_std(&inst->ctrl_handler, &venc_ctrl_ops,
V4L2_CID_MPEG_VIDEO_H264_B_FRAME_QP, 1, 51, 1, 30);
v4l2_ctrl_new_std(&inst->ctrl_handler, &venc_ctrl_ops,
V4L2_CID_MPEG_VIDEO_H264_MIN_QP, 1, 51, 1, 1);
v4l2_ctrl_new_std(&inst->ctrl_handler, &venc_ctrl_ops,
V4L2_CID_MPEG_VIDEO_H264_I_FRAME_MIN_QP, 1, 51, 1, 1);
v4l2_ctrl_new_std(&inst->ctrl_handler, &venc_ctrl_ops,
V4L2_CID_MPEG_VIDEO_H264_8X8_TRANSFORM, 0, 1, 1, 1);
v4l2_ctrl_new_std(&inst->ctrl_handler, &venc_ctrl_ops,
V4L2_CID_MPEG_VIDEO_H264_P_FRAME_MIN_QP, 1, 51, 1, 1);
v4l2_ctrl_new_std(&inst->ctrl_handler, &venc_ctrl_ops,
V4L2_CID_MPEG_VIDEO_H264_B_FRAME_MIN_QP, 1, 51, 1, 1);
v4l2_ctrl_new_std(&inst->ctrl_handler, &venc_ctrl_ops,
V4L2_CID_MPEG_VIDEO_H264_MAX_QP, 1, 51, 1, 51);
v4l2_ctrl_new_std(&inst->ctrl_handler, &venc_ctrl_ops,
V4L2_CID_MPEG_VIDEO_H264_I_FRAME_MAX_QP, 1, 51, 1, 51);
v4l2_ctrl_new_std(&inst->ctrl_handler, &venc_ctrl_ops,
V4L2_CID_MPEG_VIDEO_H264_P_FRAME_MAX_QP, 1, 51, 1, 51);
v4l2_ctrl_new_std(&inst->ctrl_handler, &venc_ctrl_ops,
V4L2_CID_MPEG_VIDEO_H264_B_FRAME_MAX_QP, 1, 51, 1, 51);
v4l2_ctrl_new_std(&inst->ctrl_handler, &venc_ctrl_ops,
V4L2_CID_MPEG_VIDEO_HEVC_I_FRAME_QP, 1, 63, 1, 26);
v4l2_ctrl_new_std(&inst->ctrl_handler, &venc_ctrl_ops,
V4L2_CID_MPEG_VIDEO_HEVC_P_FRAME_QP, 1, 63, 1, 28);
v4l2_ctrl_new_std(&inst->ctrl_handler, &venc_ctrl_ops,
V4L2_CID_MPEG_VIDEO_HEVC_B_FRAME_QP, 1, 63, 1, 30);
v4l2_ctrl_new_std(&inst->ctrl_handler, &venc_ctrl_ops,
V4L2_CID_MPEG_VIDEO_HEVC_MIN_QP, 1, 63, 1, 1);
v4l2_ctrl_new_std(&inst->ctrl_handler, &venc_ctrl_ops,
V4L2_CID_MPEG_VIDEO_HEVC_I_FRAME_MIN_QP, 1, 63, 1, 1);
v4l2_ctrl_new_std(&inst->ctrl_handler, &venc_ctrl_ops,
V4L2_CID_MPEG_VIDEO_HEVC_P_FRAME_MIN_QP, 1, 63, 1, 1);
v4l2_ctrl_new_std(&inst->ctrl_handler, &venc_ctrl_ops,
V4L2_CID_MPEG_VIDEO_HEVC_B_FRAME_MIN_QP, 1, 63, 1, 1);
v4l2_ctrl_new_std(&inst->ctrl_handler, &venc_ctrl_ops,
V4L2_CID_MPEG_VIDEO_HEVC_MAX_QP, 1, 63, 1, 63);
v4l2_ctrl_new_std(&inst->ctrl_handler, &venc_ctrl_ops,
V4L2_CID_MPEG_VIDEO_HEVC_I_FRAME_MAX_QP, 1, 63, 1, 63);
v4l2_ctrl_new_std(&inst->ctrl_handler, &venc_ctrl_ops,
V4L2_CID_MPEG_VIDEO_HEVC_P_FRAME_MAX_QP, 1, 63, 1, 63);
v4l2_ctrl_new_std(&inst->ctrl_handler, &venc_ctrl_ops,
V4L2_CID_MPEG_VIDEO_HEVC_B_FRAME_MAX_QP, 1, 63, 1, 63);
v4l2_ctrl_new_std(&inst->ctrl_handler, &venc_ctrl_ops,
V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MAX_BYTES, SLICE_BYTE_SIZE_MIN,
SLICE_BYTE_SIZE_MAX, 1, SLICE_BYTE_SIZE_MIN);
v4l2_ctrl_new_std(&inst->ctrl_handler, &venc_ctrl_ops,
V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MAX_MB, 1,
SLICE_MB_SIZE_MAX, 1, 1);
v4l2_ctrl_new_std(&inst->ctrl_handler, &venc_ctrl_ops,
V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_ALPHA, -6, 6, 1, 0);
v4l2_ctrl_new_std(&inst->ctrl_handler, &venc_ctrl_ops,
V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_BETA, -6, 6, 1, 0);
v4l2_ctrl_new_std(&inst->ctrl_handler, &venc_ctrl_ops,
V4L2_CID_MPEG_VIDEO_GOP_SIZE, 0, (1 << 16) - 1, 1, 30);
v4l2_ctrl_new_std(&inst->ctrl_handler, &venc_ctrl_ops,
V4L2_CID_MPEG_VIDEO_VPX_MIN_QP, 1, 128, 1, 1);
v4l2_ctrl_new_std(&inst->ctrl_handler, &venc_ctrl_ops,
V4L2_CID_MPEG_VIDEO_VPX_MAX_QP, 1, 128, 1, 128);
v4l2_ctrl_new_std(&inst->ctrl_handler, &venc_ctrl_ops,
V4L2_CID_MPEG_VIDEO_B_FRAMES, 0, 4, 1, 0);
v4l2_ctrl_new_std(&inst->ctrl_handler, &venc_ctrl_ops,
V4L2_CID_MPEG_VIDEO_H264_I_PERIOD, 0, (1 << 16) - 1, 1, 0);
v4l2_ctrl_new_std(&inst->ctrl_handler, &venc_ctrl_ops,
V4L2_CID_MPEG_VIDEO_FORCE_KEY_FRAME, 0, 0, 0, 0);
v4l2_ctrl_new_std(&inst->ctrl_handler, &venc_ctrl_ops,
V4L2_CID_MPEG_VIDEO_FRAME_RC_ENABLE, 0, 1, 1, 1);
v4l2_ctrl_new_std(&inst->ctrl_handler, &venc_ctrl_ops,
V4L2_CID_MPEG_VIDEO_CONSTANT_QUALITY, 0, 100, 1, 0);
v4l2_ctrl_new_std_menu(&inst->ctrl_handler, &venc_ctrl_ops,
V4L2_CID_MPEG_VIDEO_FRAME_SKIP_MODE,
V4L2_MPEG_VIDEO_FRAME_SKIP_MODE_BUF_LIMIT,
~((1 << V4L2_MPEG_VIDEO_FRAME_SKIP_MODE_DISABLED) |
(1 << V4L2_MPEG_VIDEO_FRAME_SKIP_MODE_BUF_LIMIT)),
V4L2_MPEG_VIDEO_FRAME_SKIP_MODE_DISABLED);
v4l2_ctrl_new_std(&inst->ctrl_handler, &venc_ctrl_ops,
V4L2_CID_MPEG_VIDEO_BASELAYER_PRIORITY_ID, 0,
6, 1, 0);
v4l2_ctrl_new_std(&inst->ctrl_handler, &venc_ctrl_ops,
V4L2_CID_MPEG_VIDEO_AU_DELIMITER, 0, 1, 1, 0);
v4l2_ctrl_new_std(&inst->ctrl_handler, &venc_ctrl_ops,
V4L2_CID_MPEG_VIDEO_USE_LTR_FRAMES, 0,
((1 << MAX_LTR_FRAME_COUNT) - 1), 0, 0);
v4l2_ctrl_new_std(&inst->ctrl_handler, &venc_ctrl_ops,
V4L2_CID_MPEG_VIDEO_LTR_COUNT, 0,
MAX_LTR_FRAME_COUNT, 1, 0);
v4l2_ctrl_new_std(&inst->ctrl_handler, &venc_ctrl_ops,
V4L2_CID_MPEG_VIDEO_FRAME_LTR_INDEX, 0,
(MAX_LTR_FRAME_COUNT - 1), 1, 0);
v4l2_ctrl_new_std_compound(&inst->ctrl_handler, &venc_ctrl_ops,
V4L2_CID_COLORIMETRY_HDR10_CLL_INFO,
v4l2_ctrl_ptr_create(&p_hdr10_cll));
v4l2_ctrl_new_std_compound(&inst->ctrl_handler, &venc_ctrl_ops,
V4L2_CID_COLORIMETRY_HDR10_MASTERING_DISPLAY,
v4l2_ctrl_ptr_create((void *)&p_hdr10_mastering));
v4l2_ctrl_new_std_menu(&inst->ctrl_handler, &venc_ctrl_ops,
V4L2_CID_MPEG_VIDEO_INTRA_REFRESH_PERIOD_TYPE,
V4L2_CID_MPEG_VIDEO_INTRA_REFRESH_PERIOD_TYPE_CYCLIC,
0, V4L2_CID_MPEG_VIDEO_INTRA_REFRESH_PERIOD_TYPE_RANDOM);
v4l2_ctrl_new_std(&inst->ctrl_handler, &venc_ctrl_ops,
V4L2_CID_MPEG_VIDEO_INTRA_REFRESH_PERIOD, 0,
((4096 * 2304) >> 8), 1, 0);
ret = inst->ctrl_handler.error;
if (ret)
goto err;
ret = v4l2_ctrl_handler_setup(&inst->ctrl_handler);
if (ret)
goto err;
return 0;
err:
v4l2_ctrl_handler_free(&inst->ctrl_handler);
return ret;
}
void venc_ctrl_deinit(struct venus_inst *inst)
{
v4l2_ctrl_handler_free(&inst->ctrl_handler);
}
| linux-master | drivers/media/platform/qcom/venus/venc_ctrls.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
* Copyright (C) 2017 Linaro Ltd.
*/
#include <linux/idr.h>
#include <linux/list.h>
#include <linux/mutex.h>
#include <linux/slab.h>
#include <linux/kernel.h>
#include <media/videobuf2-dma-contig.h>
#include <media/v4l2-mem2mem.h>
#include <asm/div64.h>
#include "core.h"
#include "helpers.h"
#include "hfi_helper.h"
#include "pm_helpers.h"
#include "hfi_platform.h"
#include "hfi_parser.h"
#define NUM_MBS_720P (((ALIGN(1280, 16)) >> 4) * ((ALIGN(736, 16)) >> 4))
#define NUM_MBS_4K (((ALIGN(4096, 16)) >> 4) * ((ALIGN(2304, 16)) >> 4))
enum dpb_buf_owner {
DRIVER,
FIRMWARE,
};
struct intbuf {
struct list_head list;
u32 type;
size_t size;
void *va;
dma_addr_t da;
unsigned long attrs;
enum dpb_buf_owner owned_by;
u32 dpb_out_tag;
};
bool venus_helper_check_codec(struct venus_inst *inst, u32 v4l2_pixfmt)
{
struct venus_core *core = inst->core;
u32 session_type = inst->session_type;
u32 codec;
switch (v4l2_pixfmt) {
case V4L2_PIX_FMT_H264:
codec = HFI_VIDEO_CODEC_H264;
break;
case V4L2_PIX_FMT_H263:
codec = HFI_VIDEO_CODEC_H263;
break;
case V4L2_PIX_FMT_MPEG1:
codec = HFI_VIDEO_CODEC_MPEG1;
break;
case V4L2_PIX_FMT_MPEG2:
codec = HFI_VIDEO_CODEC_MPEG2;
break;
case V4L2_PIX_FMT_MPEG4:
codec = HFI_VIDEO_CODEC_MPEG4;
break;
case V4L2_PIX_FMT_VC1_ANNEX_G:
case V4L2_PIX_FMT_VC1_ANNEX_L:
codec = HFI_VIDEO_CODEC_VC1;
break;
case V4L2_PIX_FMT_VP8:
codec = HFI_VIDEO_CODEC_VP8;
break;
case V4L2_PIX_FMT_VP9:
codec = HFI_VIDEO_CODEC_VP9;
break;
case V4L2_PIX_FMT_XVID:
codec = HFI_VIDEO_CODEC_DIVX;
break;
case V4L2_PIX_FMT_HEVC:
codec = HFI_VIDEO_CODEC_HEVC;
break;
default:
return false;
}
if (session_type == VIDC_SESSION_TYPE_ENC && core->enc_codecs & codec)
return true;
if (session_type == VIDC_SESSION_TYPE_DEC && core->dec_codecs & codec)
return true;
return false;
}
EXPORT_SYMBOL_GPL(venus_helper_check_codec);
static void free_dpb_buf(struct venus_inst *inst, struct intbuf *buf)
{
ida_free(&inst->dpb_ids, buf->dpb_out_tag);
list_del_init(&buf->list);
dma_free_attrs(inst->core->dev, buf->size, buf->va, buf->da,
buf->attrs);
kfree(buf);
}
int venus_helper_queue_dpb_bufs(struct venus_inst *inst)
{
struct intbuf *buf, *next;
unsigned int dpb_size = 0;
int ret = 0;
if (inst->dpb_buftype == HFI_BUFFER_OUTPUT)
dpb_size = inst->output_buf_size;
else if (inst->dpb_buftype == HFI_BUFFER_OUTPUT2)
dpb_size = inst->output2_buf_size;
list_for_each_entry_safe(buf, next, &inst->dpbbufs, list) {
struct hfi_frame_data fdata;
memset(&fdata, 0, sizeof(fdata));
fdata.alloc_len = buf->size;
fdata.device_addr = buf->da;
fdata.buffer_type = buf->type;
if (buf->owned_by == FIRMWARE)
continue;
/* free buffer from previous sequence which was released later */
if (dpb_size > buf->size) {
free_dpb_buf(inst, buf);
continue;
}
fdata.clnt_data = buf->dpb_out_tag;
ret = hfi_session_process_buf(inst, &fdata);
if (ret)
goto fail;
buf->owned_by = FIRMWARE;
}
fail:
return ret;
}
EXPORT_SYMBOL_GPL(venus_helper_queue_dpb_bufs);
int venus_helper_free_dpb_bufs(struct venus_inst *inst)
{
struct intbuf *buf, *n;
list_for_each_entry_safe(buf, n, &inst->dpbbufs, list) {
if (buf->owned_by == FIRMWARE)
continue;
free_dpb_buf(inst, buf);
}
if (list_empty(&inst->dpbbufs))
INIT_LIST_HEAD(&inst->dpbbufs);
return 0;
}
EXPORT_SYMBOL_GPL(venus_helper_free_dpb_bufs);
int venus_helper_alloc_dpb_bufs(struct venus_inst *inst)
{
struct venus_core *core = inst->core;
struct device *dev = core->dev;
enum hfi_version ver = core->res->hfi_version;
struct hfi_buffer_requirements bufreq;
u32 buftype = inst->dpb_buftype;
unsigned int dpb_size = 0;
struct intbuf *buf;
unsigned int i;
u32 count;
int ret;
int id;
/* no need to allocate dpb buffers */
if (!inst->dpb_fmt)
return 0;
if (inst->dpb_buftype == HFI_BUFFER_OUTPUT)
dpb_size = inst->output_buf_size;
else if (inst->dpb_buftype == HFI_BUFFER_OUTPUT2)
dpb_size = inst->output2_buf_size;
if (!dpb_size)
return 0;
ret = venus_helper_get_bufreq(inst, buftype, &bufreq);
if (ret)
return ret;
count = hfi_bufreq_get_count_min(&bufreq, ver);
for (i = 0; i < count; i++) {
buf = kzalloc(sizeof(*buf), GFP_KERNEL);
if (!buf) {
ret = -ENOMEM;
goto fail;
}
buf->type = buftype;
buf->size = dpb_size;
buf->attrs = DMA_ATTR_WRITE_COMBINE |
DMA_ATTR_NO_KERNEL_MAPPING;
buf->va = dma_alloc_attrs(dev, buf->size, &buf->da, GFP_KERNEL,
buf->attrs);
if (!buf->va) {
ret = -ENOMEM;
goto fail;
}
buf->owned_by = DRIVER;
id = ida_alloc_min(&inst->dpb_ids, VB2_MAX_FRAME, GFP_KERNEL);
if (id < 0) {
ret = id;
goto fail;
}
buf->dpb_out_tag = id;
list_add_tail(&buf->list, &inst->dpbbufs);
}
return 0;
fail:
kfree(buf);
venus_helper_free_dpb_bufs(inst);
return ret;
}
EXPORT_SYMBOL_GPL(venus_helper_alloc_dpb_bufs);
static int intbufs_set_buffer(struct venus_inst *inst, u32 type)
{
struct venus_core *core = inst->core;
struct device *dev = core->dev;
struct hfi_buffer_requirements bufreq;
struct hfi_buffer_desc bd;
struct intbuf *buf;
unsigned int i;
int ret;
ret = venus_helper_get_bufreq(inst, type, &bufreq);
if (ret)
return 0;
if (!bufreq.size)
return 0;
for (i = 0; i < bufreq.count_actual; i++) {
buf = kzalloc(sizeof(*buf), GFP_KERNEL);
if (!buf) {
ret = -ENOMEM;
goto fail;
}
buf->type = bufreq.type;
buf->size = bufreq.size;
buf->attrs = DMA_ATTR_WRITE_COMBINE |
DMA_ATTR_NO_KERNEL_MAPPING;
buf->va = dma_alloc_attrs(dev, buf->size, &buf->da, GFP_KERNEL,
buf->attrs);
if (!buf->va) {
ret = -ENOMEM;
goto fail;
}
memset(&bd, 0, sizeof(bd));
bd.buffer_size = buf->size;
bd.buffer_type = buf->type;
bd.num_buffers = 1;
bd.device_addr = buf->da;
ret = hfi_session_set_buffers(inst, &bd);
if (ret) {
dev_err(dev, "set session buffers failed\n");
goto dma_free;
}
list_add_tail(&buf->list, &inst->internalbufs);
}
return 0;
dma_free:
dma_free_attrs(dev, buf->size, buf->va, buf->da, buf->attrs);
fail:
kfree(buf);
return ret;
}
static int intbufs_unset_buffers(struct venus_inst *inst)
{
struct hfi_buffer_desc bd = {0};
struct intbuf *buf, *n;
int ret = 0;
list_for_each_entry_safe(buf, n, &inst->internalbufs, list) {
bd.buffer_size = buf->size;
bd.buffer_type = buf->type;
bd.num_buffers = 1;
bd.device_addr = buf->da;
bd.response_required = true;
ret = hfi_session_unset_buffers(inst, &bd);
list_del_init(&buf->list);
dma_free_attrs(inst->core->dev, buf->size, buf->va, buf->da,
buf->attrs);
kfree(buf);
}
return ret;
}
static const unsigned int intbuf_types_1xx[] = {
HFI_BUFFER_INTERNAL_SCRATCH(HFI_VERSION_1XX),
HFI_BUFFER_INTERNAL_SCRATCH_1(HFI_VERSION_1XX),
HFI_BUFFER_INTERNAL_SCRATCH_2(HFI_VERSION_1XX),
HFI_BUFFER_INTERNAL_PERSIST,
HFI_BUFFER_INTERNAL_PERSIST_1,
};
static const unsigned int intbuf_types_4xx[] = {
HFI_BUFFER_INTERNAL_SCRATCH(HFI_VERSION_4XX),
HFI_BUFFER_INTERNAL_SCRATCH_1(HFI_VERSION_4XX),
HFI_BUFFER_INTERNAL_SCRATCH_2(HFI_VERSION_4XX),
HFI_BUFFER_INTERNAL_PERSIST,
HFI_BUFFER_INTERNAL_PERSIST_1,
};
static const unsigned int intbuf_types_6xx[] = {
HFI_BUFFER_INTERNAL_SCRATCH(HFI_VERSION_6XX),
HFI_BUFFER_INTERNAL_SCRATCH_1(HFI_VERSION_6XX),
HFI_BUFFER_INTERNAL_SCRATCH_2(HFI_VERSION_6XX),
HFI_BUFFER_INTERNAL_PERSIST,
HFI_BUFFER_INTERNAL_PERSIST_1,
};
int venus_helper_intbufs_alloc(struct venus_inst *inst)
{
const unsigned int *intbuf;
size_t arr_sz, i;
int ret;
if (IS_V6(inst->core)) {
arr_sz = ARRAY_SIZE(intbuf_types_6xx);
intbuf = intbuf_types_6xx;
} else if (IS_V4(inst->core)) {
arr_sz = ARRAY_SIZE(intbuf_types_4xx);
intbuf = intbuf_types_4xx;
} else {
arr_sz = ARRAY_SIZE(intbuf_types_1xx);
intbuf = intbuf_types_1xx;
}
for (i = 0; i < arr_sz; i++) {
ret = intbufs_set_buffer(inst, intbuf[i]);
if (ret)
goto error;
}
return 0;
error:
intbufs_unset_buffers(inst);
return ret;
}
EXPORT_SYMBOL_GPL(venus_helper_intbufs_alloc);
int venus_helper_intbufs_free(struct venus_inst *inst)
{
return intbufs_unset_buffers(inst);
}
EXPORT_SYMBOL_GPL(venus_helper_intbufs_free);
int venus_helper_intbufs_realloc(struct venus_inst *inst)
{
enum hfi_version ver = inst->core->res->hfi_version;
struct hfi_buffer_desc bd;
struct intbuf *buf, *n;
int ret;
list_for_each_entry_safe(buf, n, &inst->internalbufs, list) {
if (buf->type == HFI_BUFFER_INTERNAL_PERSIST ||
buf->type == HFI_BUFFER_INTERNAL_PERSIST_1)
continue;
memset(&bd, 0, sizeof(bd));
bd.buffer_size = buf->size;
bd.buffer_type = buf->type;
bd.num_buffers = 1;
bd.device_addr = buf->da;
bd.response_required = true;
ret = hfi_session_unset_buffers(inst, &bd);
dma_free_attrs(inst->core->dev, buf->size, buf->va, buf->da,
buf->attrs);
list_del_init(&buf->list);
kfree(buf);
}
ret = intbufs_set_buffer(inst, HFI_BUFFER_INTERNAL_SCRATCH(ver));
if (ret)
goto err;
ret = intbufs_set_buffer(inst, HFI_BUFFER_INTERNAL_SCRATCH_1(ver));
if (ret)
goto err;
ret = intbufs_set_buffer(inst, HFI_BUFFER_INTERNAL_SCRATCH_2(ver));
if (ret)
goto err;
return 0;
err:
return ret;
}
EXPORT_SYMBOL_GPL(venus_helper_intbufs_realloc);
static void fill_buffer_desc(const struct venus_buffer *buf,
struct hfi_buffer_desc *bd, bool response)
{
memset(bd, 0, sizeof(*bd));
bd->buffer_type = HFI_BUFFER_OUTPUT;
bd->buffer_size = buf->size;
bd->num_buffers = 1;
bd->device_addr = buf->dma_addr;
bd->response_required = response;
}
static void return_buf_error(struct venus_inst *inst,
struct vb2_v4l2_buffer *vbuf)
{
struct v4l2_m2m_ctx *m2m_ctx = inst->m2m_ctx;
if (vbuf->vb2_buf.type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
v4l2_m2m_src_buf_remove_by_buf(m2m_ctx, vbuf);
else
v4l2_m2m_dst_buf_remove_by_buf(m2m_ctx, vbuf);
v4l2_m2m_buf_done(vbuf, VB2_BUF_STATE_ERROR);
}
static void
put_ts_metadata(struct venus_inst *inst, struct vb2_v4l2_buffer *vbuf)
{
struct vb2_buffer *vb = &vbuf->vb2_buf;
unsigned int i;
int slot = -1;
u64 ts_us = vb->timestamp;
for (i = 0; i < ARRAY_SIZE(inst->tss); i++) {
if (!inst->tss[i].used) {
slot = i;
break;
}
}
if (slot == -1) {
dev_dbg(inst->core->dev, VDBGL "no free slot\n");
return;
}
do_div(ts_us, NSEC_PER_USEC);
inst->tss[slot].used = true;
inst->tss[slot].flags = vbuf->flags;
inst->tss[slot].tc = vbuf->timecode;
inst->tss[slot].ts_us = ts_us;
inst->tss[slot].ts_ns = vb->timestamp;
}
void venus_helper_get_ts_metadata(struct venus_inst *inst, u64 timestamp_us,
struct vb2_v4l2_buffer *vbuf)
{
struct vb2_buffer *vb = &vbuf->vb2_buf;
unsigned int i;
for (i = 0; i < ARRAY_SIZE(inst->tss); ++i) {
if (!inst->tss[i].used)
continue;
if (inst->tss[i].ts_us != timestamp_us)
continue;
inst->tss[i].used = false;
vbuf->flags |= inst->tss[i].flags;
vbuf->timecode = inst->tss[i].tc;
vb->timestamp = inst->tss[i].ts_ns;
break;
}
}
EXPORT_SYMBOL_GPL(venus_helper_get_ts_metadata);
static int
session_process_buf(struct venus_inst *inst, struct vb2_v4l2_buffer *vbuf)
{
struct venus_buffer *buf = to_venus_buffer(vbuf);
struct vb2_buffer *vb = &vbuf->vb2_buf;
unsigned int type = vb->type;
struct hfi_frame_data fdata;
memset(&fdata, 0, sizeof(fdata));
fdata.alloc_len = buf->size;
fdata.device_addr = buf->dma_addr;
fdata.timestamp = vb->timestamp;
do_div(fdata.timestamp, NSEC_PER_USEC);
fdata.flags = 0;
fdata.clnt_data = vbuf->vb2_buf.index;
if (type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
fdata.buffer_type = HFI_BUFFER_INPUT;
fdata.filled_len = vb2_get_plane_payload(vb, 0);
fdata.offset = vb->planes[0].data_offset;
if (vbuf->flags & V4L2_BUF_FLAG_LAST || !fdata.filled_len)
fdata.flags |= HFI_BUFFERFLAG_EOS;
if (inst->session_type == VIDC_SESSION_TYPE_DEC)
put_ts_metadata(inst, vbuf);
venus_pm_load_scale(inst);
} else if (type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
if (inst->session_type == VIDC_SESSION_TYPE_ENC)
fdata.buffer_type = HFI_BUFFER_OUTPUT;
else
fdata.buffer_type = inst->opb_buftype;
fdata.filled_len = 0;
fdata.offset = 0;
}
return hfi_session_process_buf(inst, &fdata);
}
static bool is_dynamic_bufmode(struct venus_inst *inst)
{
struct venus_core *core = inst->core;
struct hfi_plat_caps *caps;
/*
* v4 doesn't send BUFFER_ALLOC_MODE_SUPPORTED property and supports
* dynamic buffer mode by default for HFI_BUFFER_OUTPUT/OUTPUT2.
*/
if (IS_V4(core) || IS_V6(core))
return true;
caps = venus_caps_by_codec(core, inst->hfi_codec, inst->session_type);
if (!caps)
return false;
return caps->cap_bufs_mode_dynamic;
}
int venus_helper_unregister_bufs(struct venus_inst *inst)
{
struct venus_buffer *buf, *n;
struct hfi_buffer_desc bd;
int ret = 0;
if (is_dynamic_bufmode(inst))
return 0;
list_for_each_entry_safe(buf, n, &inst->registeredbufs, reg_list) {
fill_buffer_desc(buf, &bd, true);
ret = hfi_session_unset_buffers(inst, &bd);
list_del_init(&buf->reg_list);
}
return ret;
}
EXPORT_SYMBOL_GPL(venus_helper_unregister_bufs);
static int session_register_bufs(struct venus_inst *inst)
{
struct venus_core *core = inst->core;
struct device *dev = core->dev;
struct hfi_buffer_desc bd;
struct venus_buffer *buf;
int ret = 0;
if (is_dynamic_bufmode(inst))
return 0;
list_for_each_entry(buf, &inst->registeredbufs, reg_list) {
fill_buffer_desc(buf, &bd, false);
ret = hfi_session_set_buffers(inst, &bd);
if (ret) {
dev_err(dev, "%s: set buffer failed\n", __func__);
break;
}
}
return ret;
}
static u32 to_hfi_raw_fmt(u32 v4l2_fmt)
{
switch (v4l2_fmt) {
case V4L2_PIX_FMT_NV12:
return HFI_COLOR_FORMAT_NV12;
case V4L2_PIX_FMT_NV21:
return HFI_COLOR_FORMAT_NV21;
case V4L2_PIX_FMT_QC08C:
return HFI_COLOR_FORMAT_NV12_UBWC;
case V4L2_PIX_FMT_QC10C:
return HFI_COLOR_FORMAT_YUV420_TP10_UBWC;
case V4L2_PIX_FMT_P010:
return HFI_COLOR_FORMAT_P010;
default:
break;
}
return 0;
}
static int platform_get_bufreq(struct venus_inst *inst, u32 buftype,
struct hfi_buffer_requirements *req)
{
enum hfi_version version = inst->core->res->hfi_version;
const struct hfi_platform *hfi_plat;
struct hfi_plat_buffers_params params;
bool is_dec = inst->session_type == VIDC_SESSION_TYPE_DEC;
struct venc_controls *enc_ctr = &inst->controls.enc;
hfi_plat = hfi_platform_get(version);
if (!hfi_plat || !hfi_plat->bufreq)
return -EINVAL;
params.version = version;
params.num_vpp_pipes = inst->core->res->num_vpp_pipes;
if (is_dec) {
params.width = inst->width;
params.height = inst->height;
params.out_width = inst->out_width;
params.out_height = inst->out_height;
params.codec = inst->fmt_out->pixfmt;
params.hfi_color_fmt = to_hfi_raw_fmt(inst->fmt_cap->pixfmt);
params.dec.max_mbs_per_frame = mbs_per_frame_max(inst);
params.dec.buffer_size_limit = 0;
params.dec.is_secondary_output =
inst->opb_buftype == HFI_BUFFER_OUTPUT2;
if (params.dec.is_secondary_output)
params.hfi_dpb_color_fmt = inst->dpb_fmt;
params.dec.is_interlaced =
inst->pic_struct != HFI_INTERLACE_FRAME_PROGRESSIVE;
} else {
params.width = inst->out_width;
params.height = inst->out_height;
params.codec = inst->fmt_cap->pixfmt;
params.hfi_color_fmt = to_hfi_raw_fmt(inst->fmt_out->pixfmt);
params.enc.work_mode = VIDC_WORK_MODE_2;
params.enc.rc_type = HFI_RATE_CONTROL_OFF;
if (enc_ctr->bitrate_mode == V4L2_MPEG_VIDEO_BITRATE_MODE_CQ)
params.enc.rc_type = HFI_RATE_CONTROL_CQ;
params.enc.num_b_frames = enc_ctr->num_b_frames;
params.enc.is_tenbit = inst->bit_depth == VIDC_BITDEPTH_10;
}
return hfi_plat->bufreq(¶ms, inst->session_type, buftype, req);
}
int venus_helper_get_bufreq(struct venus_inst *inst, u32 type,
struct hfi_buffer_requirements *req)
{
u32 ptype = HFI_PROPERTY_CONFIG_BUFFER_REQUIREMENTS;
enum hfi_version ver = inst->core->res->hfi_version;
union hfi_get_property hprop;
unsigned int i;
int ret;
memset(req, 0, sizeof(*req));
if (type == HFI_BUFFER_OUTPUT || type == HFI_BUFFER_OUTPUT2)
hfi_bufreq_set_count_min(req, ver, inst->fw_min_cnt);
ret = platform_get_bufreq(inst, type, req);
if (!ret) {
if (type == HFI_BUFFER_OUTPUT || type == HFI_BUFFER_OUTPUT2)
inst->fw_min_cnt = hfi_bufreq_get_count_min(req, ver);
return 0;
}
ret = hfi_session_get_property(inst, ptype, &hprop);
if (ret)
return ret;
ret = -EINVAL;
for (i = 0; i < HFI_BUFFER_TYPE_MAX; i++) {
if (hprop.bufreq[i].type != type)
continue;
memcpy(req, &hprop.bufreq[i], sizeof(*req));
ret = 0;
break;
}
return ret;
}
EXPORT_SYMBOL_GPL(venus_helper_get_bufreq);
struct id_mapping {
u32 hfi_id;
u32 v4l2_id;
};
static const struct id_mapping mpeg4_profiles[] = {
{ HFI_MPEG4_PROFILE_SIMPLE, V4L2_MPEG_VIDEO_MPEG4_PROFILE_SIMPLE },
{ HFI_MPEG4_PROFILE_ADVANCEDSIMPLE, V4L2_MPEG_VIDEO_MPEG4_PROFILE_ADVANCED_SIMPLE },
};
static const struct id_mapping mpeg4_levels[] = {
{ HFI_MPEG4_LEVEL_0, V4L2_MPEG_VIDEO_MPEG4_LEVEL_0 },
{ HFI_MPEG4_LEVEL_0b, V4L2_MPEG_VIDEO_MPEG4_LEVEL_0B },
{ HFI_MPEG4_LEVEL_1, V4L2_MPEG_VIDEO_MPEG4_LEVEL_1 },
{ HFI_MPEG4_LEVEL_2, V4L2_MPEG_VIDEO_MPEG4_LEVEL_2 },
{ HFI_MPEG4_LEVEL_3, V4L2_MPEG_VIDEO_MPEG4_LEVEL_3 },
{ HFI_MPEG4_LEVEL_4, V4L2_MPEG_VIDEO_MPEG4_LEVEL_4 },
{ HFI_MPEG4_LEVEL_5, V4L2_MPEG_VIDEO_MPEG4_LEVEL_5 },
};
static const struct id_mapping mpeg2_profiles[] = {
{ HFI_MPEG2_PROFILE_SIMPLE, V4L2_MPEG_VIDEO_MPEG2_PROFILE_SIMPLE },
{ HFI_MPEG2_PROFILE_MAIN, V4L2_MPEG_VIDEO_MPEG2_PROFILE_MAIN },
{ HFI_MPEG2_PROFILE_SNR, V4L2_MPEG_VIDEO_MPEG2_PROFILE_SNR_SCALABLE },
{ HFI_MPEG2_PROFILE_SPATIAL, V4L2_MPEG_VIDEO_MPEG2_PROFILE_SPATIALLY_SCALABLE },
{ HFI_MPEG2_PROFILE_HIGH, V4L2_MPEG_VIDEO_MPEG2_PROFILE_HIGH },
};
static const struct id_mapping mpeg2_levels[] = {
{ HFI_MPEG2_LEVEL_LL, V4L2_MPEG_VIDEO_MPEG2_LEVEL_LOW },
{ HFI_MPEG2_LEVEL_ML, V4L2_MPEG_VIDEO_MPEG2_LEVEL_MAIN },
{ HFI_MPEG2_LEVEL_H14, V4L2_MPEG_VIDEO_MPEG2_LEVEL_HIGH_1440 },
{ HFI_MPEG2_LEVEL_HL, V4L2_MPEG_VIDEO_MPEG2_LEVEL_HIGH },
};
static const struct id_mapping h264_profiles[] = {
{ HFI_H264_PROFILE_BASELINE, V4L2_MPEG_VIDEO_H264_PROFILE_BASELINE },
{ HFI_H264_PROFILE_MAIN, V4L2_MPEG_VIDEO_H264_PROFILE_MAIN },
{ HFI_H264_PROFILE_HIGH, V4L2_MPEG_VIDEO_H264_PROFILE_HIGH },
{ HFI_H264_PROFILE_STEREO_HIGH, V4L2_MPEG_VIDEO_H264_PROFILE_STEREO_HIGH },
{ HFI_H264_PROFILE_MULTIVIEW_HIGH, V4L2_MPEG_VIDEO_H264_PROFILE_MULTIVIEW_HIGH },
{ HFI_H264_PROFILE_CONSTRAINED_BASE, V4L2_MPEG_VIDEO_H264_PROFILE_CONSTRAINED_BASELINE },
{ HFI_H264_PROFILE_CONSTRAINED_HIGH, V4L2_MPEG_VIDEO_H264_PROFILE_CONSTRAINED_HIGH },
};
static const struct id_mapping h264_levels[] = {
{ HFI_H264_LEVEL_1, V4L2_MPEG_VIDEO_H264_LEVEL_1_0 },
{ HFI_H264_LEVEL_1b, V4L2_MPEG_VIDEO_H264_LEVEL_1B },
{ HFI_H264_LEVEL_11, V4L2_MPEG_VIDEO_H264_LEVEL_1_1 },
{ HFI_H264_LEVEL_12, V4L2_MPEG_VIDEO_H264_LEVEL_1_2 },
{ HFI_H264_LEVEL_13, V4L2_MPEG_VIDEO_H264_LEVEL_1_3 },
{ HFI_H264_LEVEL_2, V4L2_MPEG_VIDEO_H264_LEVEL_2_0 },
{ HFI_H264_LEVEL_21, V4L2_MPEG_VIDEO_H264_LEVEL_2_1 },
{ HFI_H264_LEVEL_22, V4L2_MPEG_VIDEO_H264_LEVEL_2_2 },
{ HFI_H264_LEVEL_3, V4L2_MPEG_VIDEO_H264_LEVEL_3_0 },
{ HFI_H264_LEVEL_31, V4L2_MPEG_VIDEO_H264_LEVEL_3_1 },
{ HFI_H264_LEVEL_32, V4L2_MPEG_VIDEO_H264_LEVEL_3_2 },
{ HFI_H264_LEVEL_4, V4L2_MPEG_VIDEO_H264_LEVEL_4_0 },
{ HFI_H264_LEVEL_41, V4L2_MPEG_VIDEO_H264_LEVEL_4_1 },
{ HFI_H264_LEVEL_42, V4L2_MPEG_VIDEO_H264_LEVEL_4_2 },
{ HFI_H264_LEVEL_5, V4L2_MPEG_VIDEO_H264_LEVEL_5_0 },
{ HFI_H264_LEVEL_51, V4L2_MPEG_VIDEO_H264_LEVEL_5_1 },
{ HFI_H264_LEVEL_52, V4L2_MPEG_VIDEO_H264_LEVEL_5_1 },
};
static const struct id_mapping hevc_profiles[] = {
{ HFI_HEVC_PROFILE_MAIN, V4L2_MPEG_VIDEO_HEVC_PROFILE_MAIN },
{ HFI_HEVC_PROFILE_MAIN_STILL_PIC, V4L2_MPEG_VIDEO_HEVC_PROFILE_MAIN_STILL_PICTURE },
{ HFI_HEVC_PROFILE_MAIN10, V4L2_MPEG_VIDEO_HEVC_PROFILE_MAIN_10 },
};
static const struct id_mapping hevc_levels[] = {
{ HFI_HEVC_LEVEL_1, V4L2_MPEG_VIDEO_HEVC_LEVEL_1 },
{ HFI_HEVC_LEVEL_2, V4L2_MPEG_VIDEO_HEVC_LEVEL_2 },
{ HFI_HEVC_LEVEL_21, V4L2_MPEG_VIDEO_HEVC_LEVEL_2_1 },
{ HFI_HEVC_LEVEL_3, V4L2_MPEG_VIDEO_HEVC_LEVEL_3 },
{ HFI_HEVC_LEVEL_31, V4L2_MPEG_VIDEO_HEVC_LEVEL_3_1 },
{ HFI_HEVC_LEVEL_4, V4L2_MPEG_VIDEO_HEVC_LEVEL_4 },
{ HFI_HEVC_LEVEL_41, V4L2_MPEG_VIDEO_HEVC_LEVEL_4_1 },
{ HFI_HEVC_LEVEL_5, V4L2_MPEG_VIDEO_HEVC_LEVEL_5 },
{ HFI_HEVC_LEVEL_51, V4L2_MPEG_VIDEO_HEVC_LEVEL_5_1 },
{ HFI_HEVC_LEVEL_52, V4L2_MPEG_VIDEO_HEVC_LEVEL_5_2 },
{ HFI_HEVC_LEVEL_6, V4L2_MPEG_VIDEO_HEVC_LEVEL_6 },
{ HFI_HEVC_LEVEL_61, V4L2_MPEG_VIDEO_HEVC_LEVEL_6_1 },
{ HFI_HEVC_LEVEL_62, V4L2_MPEG_VIDEO_HEVC_LEVEL_6_2 },
};
static const struct id_mapping vp8_profiles[] = {
{ HFI_VPX_PROFILE_VERSION_0, V4L2_MPEG_VIDEO_VP8_PROFILE_0 },
{ HFI_VPX_PROFILE_VERSION_1, V4L2_MPEG_VIDEO_VP8_PROFILE_1 },
{ HFI_VPX_PROFILE_VERSION_2, V4L2_MPEG_VIDEO_VP8_PROFILE_2 },
{ HFI_VPX_PROFILE_VERSION_3, V4L2_MPEG_VIDEO_VP8_PROFILE_3 },
};
static const struct id_mapping vp9_profiles[] = {
{ HFI_VP9_PROFILE_P0, V4L2_MPEG_VIDEO_VP9_PROFILE_0 },
{ HFI_VP9_PROFILE_P2_10B, V4L2_MPEG_VIDEO_VP9_PROFILE_2 },
};
static const struct id_mapping vp9_levels[] = {
{ HFI_VP9_LEVEL_1, V4L2_MPEG_VIDEO_VP9_LEVEL_1_0 },
{ HFI_VP9_LEVEL_11, V4L2_MPEG_VIDEO_VP9_LEVEL_1_1 },
{ HFI_VP9_LEVEL_2, V4L2_MPEG_VIDEO_VP9_LEVEL_2_0},
{ HFI_VP9_LEVEL_21, V4L2_MPEG_VIDEO_VP9_LEVEL_2_1 },
{ HFI_VP9_LEVEL_3, V4L2_MPEG_VIDEO_VP9_LEVEL_3_0},
{ HFI_VP9_LEVEL_31, V4L2_MPEG_VIDEO_VP9_LEVEL_3_1 },
{ HFI_VP9_LEVEL_4, V4L2_MPEG_VIDEO_VP9_LEVEL_4_0 },
{ HFI_VP9_LEVEL_41, V4L2_MPEG_VIDEO_VP9_LEVEL_4_1 },
{ HFI_VP9_LEVEL_5, V4L2_MPEG_VIDEO_VP9_LEVEL_5_0 },
{ HFI_VP9_LEVEL_51, V4L2_MPEG_VIDEO_VP9_LEVEL_5_1 },
{ HFI_VP9_LEVEL_6, V4L2_MPEG_VIDEO_VP9_LEVEL_6_0 },
{ HFI_VP9_LEVEL_61, V4L2_MPEG_VIDEO_VP9_LEVEL_6_1 },
};
static u32 find_v4l2_id(u32 hfi_id, const struct id_mapping *array, unsigned int array_sz)
{
unsigned int i;
if (!array || !array_sz)
return 0;
for (i = 0; i < array_sz; i++)
if (hfi_id == array[i].hfi_id)
return array[i].v4l2_id;
return 0;
}
static u32 find_hfi_id(u32 v4l2_id, const struct id_mapping *array, unsigned int array_sz)
{
unsigned int i;
if (!array || !array_sz)
return 0;
for (i = 0; i < array_sz; i++)
if (v4l2_id == array[i].v4l2_id)
return array[i].hfi_id;
return 0;
}
static void
v4l2_id_profile_level(u32 hfi_codec, struct hfi_profile_level *pl, u32 *profile, u32 *level)
{
u32 hfi_pf = pl->profile;
u32 hfi_lvl = pl->level;
switch (hfi_codec) {
case HFI_VIDEO_CODEC_H264:
*profile = find_v4l2_id(hfi_pf, h264_profiles, ARRAY_SIZE(h264_profiles));
*level = find_v4l2_id(hfi_lvl, h264_levels, ARRAY_SIZE(h264_levels));
break;
case HFI_VIDEO_CODEC_MPEG2:
*profile = find_v4l2_id(hfi_pf, mpeg2_profiles, ARRAY_SIZE(mpeg2_profiles));
*level = find_v4l2_id(hfi_lvl, mpeg2_levels, ARRAY_SIZE(mpeg2_levels));
break;
case HFI_VIDEO_CODEC_MPEG4:
*profile = find_v4l2_id(hfi_pf, mpeg4_profiles, ARRAY_SIZE(mpeg4_profiles));
*level = find_v4l2_id(hfi_lvl, mpeg4_levels, ARRAY_SIZE(mpeg4_levels));
break;
case HFI_VIDEO_CODEC_VP8:
*profile = find_v4l2_id(hfi_pf, vp8_profiles, ARRAY_SIZE(vp8_profiles));
*level = 0;
break;
case HFI_VIDEO_CODEC_VP9:
*profile = find_v4l2_id(hfi_pf, vp9_profiles, ARRAY_SIZE(vp9_profiles));
*level = find_v4l2_id(hfi_lvl, vp9_levels, ARRAY_SIZE(vp9_levels));
break;
case HFI_VIDEO_CODEC_HEVC:
*profile = find_v4l2_id(hfi_pf, hevc_profiles, ARRAY_SIZE(hevc_profiles));
*level = find_v4l2_id(hfi_lvl, hevc_levels, ARRAY_SIZE(hevc_levels));
break;
default:
break;
}
}
static void
hfi_id_profile_level(u32 hfi_codec, u32 v4l2_pf, u32 v4l2_lvl, struct hfi_profile_level *pl)
{
switch (hfi_codec) {
case HFI_VIDEO_CODEC_H264:
pl->profile = find_hfi_id(v4l2_pf, h264_profiles, ARRAY_SIZE(h264_profiles));
pl->level = find_hfi_id(v4l2_lvl, h264_levels, ARRAY_SIZE(h264_levels));
break;
case HFI_VIDEO_CODEC_MPEG2:
pl->profile = find_hfi_id(v4l2_pf, mpeg2_profiles, ARRAY_SIZE(mpeg2_profiles));
pl->level = find_hfi_id(v4l2_lvl, mpeg2_levels, ARRAY_SIZE(mpeg2_levels));
break;
case HFI_VIDEO_CODEC_MPEG4:
pl->profile = find_hfi_id(v4l2_pf, mpeg4_profiles, ARRAY_SIZE(mpeg4_profiles));
pl->level = find_hfi_id(v4l2_lvl, mpeg4_levels, ARRAY_SIZE(mpeg4_levels));
break;
case HFI_VIDEO_CODEC_VP8:
pl->profile = find_hfi_id(v4l2_pf, vp8_profiles, ARRAY_SIZE(vp8_profiles));
pl->level = 0;
break;
case HFI_VIDEO_CODEC_VP9:
pl->profile = find_hfi_id(v4l2_pf, vp9_profiles, ARRAY_SIZE(vp9_profiles));
pl->level = find_hfi_id(v4l2_lvl, vp9_levels, ARRAY_SIZE(vp9_levels));
break;
case HFI_VIDEO_CODEC_HEVC:
pl->profile = find_hfi_id(v4l2_pf, hevc_profiles, ARRAY_SIZE(hevc_profiles));
pl->level = find_hfi_id(v4l2_lvl, hevc_levels, ARRAY_SIZE(hevc_levels));
break;
default:
break;
}
}
int venus_helper_get_profile_level(struct venus_inst *inst, u32 *profile, u32 *level)
{
const u32 ptype = HFI_PROPERTY_PARAM_PROFILE_LEVEL_CURRENT;
union hfi_get_property hprop;
int ret;
ret = hfi_session_get_property(inst, ptype, &hprop);
if (ret)
return ret;
v4l2_id_profile_level(inst->hfi_codec, &hprop.profile_level, profile, level);
return 0;
}
EXPORT_SYMBOL_GPL(venus_helper_get_profile_level);
int venus_helper_set_profile_level(struct venus_inst *inst, u32 profile, u32 level)
{
const u32 ptype = HFI_PROPERTY_PARAM_PROFILE_LEVEL_CURRENT;
struct hfi_profile_level pl;
hfi_id_profile_level(inst->hfi_codec, profile, level, &pl);
return hfi_session_set_property(inst, ptype, &pl);
}
EXPORT_SYMBOL_GPL(venus_helper_set_profile_level);
static u32 get_framesize_raw_nv12(u32 width, u32 height)
{
u32 y_stride, uv_stride, y_plane;
u32 y_sclines, uv_sclines, uv_plane;
u32 size;
y_stride = ALIGN(width, 128);
uv_stride = ALIGN(width, 128);
y_sclines = ALIGN(height, 32);
uv_sclines = ALIGN(((height + 1) >> 1), 16);
y_plane = y_stride * y_sclines;
uv_plane = uv_stride * uv_sclines + SZ_4K;
size = y_plane + uv_plane + SZ_8K;
return ALIGN(size, SZ_4K);
}
static u32 get_framesize_raw_nv12_ubwc(u32 width, u32 height)
{
u32 y_meta_stride, y_meta_plane;
u32 y_stride, y_plane;
u32 uv_meta_stride, uv_meta_plane;
u32 uv_stride, uv_plane;
u32 extradata = SZ_16K;
y_meta_stride = ALIGN(DIV_ROUND_UP(width, 32), 64);
y_meta_plane = y_meta_stride * ALIGN(DIV_ROUND_UP(height, 8), 16);
y_meta_plane = ALIGN(y_meta_plane, SZ_4K);
y_stride = ALIGN(width, 128);
y_plane = ALIGN(y_stride * ALIGN(height, 32), SZ_4K);
uv_meta_stride = ALIGN(DIV_ROUND_UP(width / 2, 16), 64);
uv_meta_plane = uv_meta_stride * ALIGN(DIV_ROUND_UP(height / 2, 8), 16);
uv_meta_plane = ALIGN(uv_meta_plane, SZ_4K);
uv_stride = ALIGN(width, 128);
uv_plane = ALIGN(uv_stride * ALIGN(height / 2, 32), SZ_4K);
return ALIGN(y_meta_plane + y_plane + uv_meta_plane + uv_plane +
max(extradata, y_stride * 48), SZ_4K);
}
static u32 get_framesize_raw_p010(u32 width, u32 height)
{
u32 y_plane, uv_plane, y_stride, uv_stride, y_sclines, uv_sclines;
y_stride = ALIGN(width * 2, 128);
uv_stride = ALIGN(width * 2, 128);
y_sclines = ALIGN(height, 32);
uv_sclines = ALIGN((height + 1) >> 1, 16);
y_plane = y_stride * y_sclines;
uv_plane = uv_stride * uv_sclines;
return ALIGN((y_plane + uv_plane), SZ_4K);
}
static u32 get_framesize_raw_p010_ubwc(u32 width, u32 height)
{
u32 y_stride, uv_stride, y_sclines, uv_sclines;
u32 y_ubwc_plane, uv_ubwc_plane;
u32 y_meta_stride, y_meta_scanlines;
u32 uv_meta_stride, uv_meta_scanlines;
u32 y_meta_plane, uv_meta_plane;
u32 size;
y_stride = ALIGN(width * 2, 256);
uv_stride = ALIGN(width * 2, 256);
y_sclines = ALIGN(height, 16);
uv_sclines = ALIGN((height + 1) >> 1, 16);
y_ubwc_plane = ALIGN(y_stride * y_sclines, SZ_4K);
uv_ubwc_plane = ALIGN(uv_stride * uv_sclines, SZ_4K);
y_meta_stride = ALIGN(DIV_ROUND_UP(width, 32), 64);
y_meta_scanlines = ALIGN(DIV_ROUND_UP(height, 4), 16);
y_meta_plane = ALIGN(y_meta_stride * y_meta_scanlines, SZ_4K);
uv_meta_stride = ALIGN(DIV_ROUND_UP((width + 1) >> 1, 16), 64);
uv_meta_scanlines = ALIGN(DIV_ROUND_UP((height + 1) >> 1, 4), 16);
uv_meta_plane = ALIGN(uv_meta_stride * uv_meta_scanlines, SZ_4K);
size = y_ubwc_plane + uv_ubwc_plane + y_meta_plane + uv_meta_plane;
return ALIGN(size, SZ_4K);
}
static u32 get_framesize_raw_yuv420_tp10_ubwc(u32 width, u32 height)
{
u32 y_stride, uv_stride, y_sclines, uv_sclines;
u32 y_ubwc_plane, uv_ubwc_plane;
u32 y_meta_stride, y_meta_scanlines;
u32 uv_meta_stride, uv_meta_scanlines;
u32 y_meta_plane, uv_meta_plane;
u32 extradata = SZ_16K;
u32 size;
y_stride = ALIGN(width * 4 / 3, 256);
uv_stride = ALIGN(width * 4 / 3, 256);
y_sclines = ALIGN(height, 16);
uv_sclines = ALIGN((height + 1) >> 1, 16);
y_ubwc_plane = ALIGN(y_stride * y_sclines, SZ_4K);
uv_ubwc_plane = ALIGN(uv_stride * uv_sclines, SZ_4K);
y_meta_stride = ALIGN(DIV_ROUND_UP(width, 48), 64);
y_meta_scanlines = ALIGN(DIV_ROUND_UP(height, 4), 16);
y_meta_plane = ALIGN(y_meta_stride * y_meta_scanlines, SZ_4K);
uv_meta_stride = ALIGN(DIV_ROUND_UP((width + 1) >> 1, 24), 64);
uv_meta_scanlines = ALIGN(DIV_ROUND_UP((height + 1) >> 1, 4), 16);
uv_meta_plane = ALIGN(uv_meta_stride * uv_meta_scanlines, SZ_4K);
size = y_ubwc_plane + uv_ubwc_plane + y_meta_plane + uv_meta_plane;
size += max(extradata + SZ_8K, y_stride * 48);
return ALIGN(size, SZ_4K);
}
u32 venus_helper_get_framesz_raw(u32 hfi_fmt, u32 width, u32 height)
{
switch (hfi_fmt) {
case HFI_COLOR_FORMAT_NV12:
case HFI_COLOR_FORMAT_NV21:
return get_framesize_raw_nv12(width, height);
case HFI_COLOR_FORMAT_NV12_UBWC:
return get_framesize_raw_nv12_ubwc(width, height);
case HFI_COLOR_FORMAT_P010:
return get_framesize_raw_p010(width, height);
case HFI_COLOR_FORMAT_P010_UBWC:
return get_framesize_raw_p010_ubwc(width, height);
case HFI_COLOR_FORMAT_YUV420_TP10_UBWC:
return get_framesize_raw_yuv420_tp10_ubwc(width, height);
default:
return 0;
}
}
EXPORT_SYMBOL_GPL(venus_helper_get_framesz_raw);
u32 venus_helper_get_framesz(u32 v4l2_fmt, u32 width, u32 height)
{
u32 hfi_fmt, sz;
bool compressed;
switch (v4l2_fmt) {
case V4L2_PIX_FMT_MPEG:
case V4L2_PIX_FMT_H264:
case V4L2_PIX_FMT_H264_NO_SC:
case V4L2_PIX_FMT_H264_MVC:
case V4L2_PIX_FMT_H263:
case V4L2_PIX_FMT_MPEG1:
case V4L2_PIX_FMT_MPEG2:
case V4L2_PIX_FMT_MPEG4:
case V4L2_PIX_FMT_XVID:
case V4L2_PIX_FMT_VC1_ANNEX_G:
case V4L2_PIX_FMT_VC1_ANNEX_L:
case V4L2_PIX_FMT_VP8:
case V4L2_PIX_FMT_VP9:
case V4L2_PIX_FMT_HEVC:
compressed = true;
break;
default:
compressed = false;
break;
}
if (compressed) {
sz = ALIGN(height, 32) * ALIGN(width, 32) * 3 / 2 / 2;
if (width < 1280 || height < 720)
sz *= 8;
return ALIGN(sz, SZ_4K);
}
hfi_fmt = to_hfi_raw_fmt(v4l2_fmt);
if (!hfi_fmt)
return 0;
return venus_helper_get_framesz_raw(hfi_fmt, width, height);
}
EXPORT_SYMBOL_GPL(venus_helper_get_framesz);
int venus_helper_set_input_resolution(struct venus_inst *inst,
unsigned int width, unsigned int height)
{
u32 ptype = HFI_PROPERTY_PARAM_FRAME_SIZE;
struct hfi_framesize fs;
fs.buffer_type = HFI_BUFFER_INPUT;
fs.width = width;
fs.height = height;
return hfi_session_set_property(inst, ptype, &fs);
}
EXPORT_SYMBOL_GPL(venus_helper_set_input_resolution);
int venus_helper_set_output_resolution(struct venus_inst *inst,
unsigned int width, unsigned int height,
u32 buftype)
{
u32 ptype = HFI_PROPERTY_PARAM_FRAME_SIZE;
struct hfi_framesize fs;
fs.buffer_type = buftype;
fs.width = width;
fs.height = height;
return hfi_session_set_property(inst, ptype, &fs);
}
EXPORT_SYMBOL_GPL(venus_helper_set_output_resolution);
static u32 venus_helper_get_work_mode(struct venus_inst *inst)
{
u32 mode;
u32 num_mbs;
mode = VIDC_WORK_MODE_2;
if (inst->session_type == VIDC_SESSION_TYPE_DEC) {
num_mbs = (ALIGN(inst->height, 16) * ALIGN(inst->width, 16)) / 256;
if (inst->hfi_codec == HFI_VIDEO_CODEC_MPEG2 ||
inst->pic_struct != HFI_INTERLACE_FRAME_PROGRESSIVE ||
num_mbs <= NUM_MBS_720P)
mode = VIDC_WORK_MODE_1;
} else {
num_mbs = (ALIGN(inst->out_height, 16) * ALIGN(inst->out_width, 16)) / 256;
if (inst->hfi_codec == HFI_VIDEO_CODEC_VP8 &&
num_mbs <= NUM_MBS_4K)
mode = VIDC_WORK_MODE_1;
}
return mode;
}
int venus_helper_set_work_mode(struct venus_inst *inst)
{
const u32 ptype = HFI_PROPERTY_PARAM_WORK_MODE;
struct hfi_video_work_mode wm;
u32 mode;
if (!IS_V4(inst->core) && !IS_V6(inst->core))
return 0;
mode = venus_helper_get_work_mode(inst);
wm.video_work_mode = mode;
return hfi_session_set_property(inst, ptype, &wm);
}
EXPORT_SYMBOL_GPL(venus_helper_set_work_mode);
int venus_helper_set_format_constraints(struct venus_inst *inst)
{
const u32 ptype = HFI_PROPERTY_PARAM_UNCOMPRESSED_PLANE_ACTUAL_CONSTRAINTS_INFO;
struct hfi_uncompressed_plane_actual_constraints_info pconstraint;
if (!IS_V6(inst->core))
return 0;
if (inst->opb_fmt == HFI_COLOR_FORMAT_NV12_UBWC ||
inst->opb_fmt == HFI_COLOR_FORMAT_YUV420_TP10_UBWC)
return 0;
pconstraint.buffer_type = HFI_BUFFER_OUTPUT2;
pconstraint.num_planes = 2;
pconstraint.plane_format[0].stride_multiples = 128;
pconstraint.plane_format[0].max_stride = 8192;
pconstraint.plane_format[0].min_plane_buffer_height_multiple = 32;
pconstraint.plane_format[0].buffer_alignment = 256;
pconstraint.plane_format[1].stride_multiples = 128;
pconstraint.plane_format[1].max_stride = 8192;
pconstraint.plane_format[1].min_plane_buffer_height_multiple = 16;
pconstraint.plane_format[1].buffer_alignment = 256;
return hfi_session_set_property(inst, ptype, &pconstraint);
}
EXPORT_SYMBOL_GPL(venus_helper_set_format_constraints);
int venus_helper_set_num_bufs(struct venus_inst *inst, unsigned int input_bufs,
unsigned int output_bufs,
unsigned int output2_bufs)
{
u32 ptype = HFI_PROPERTY_PARAM_BUFFER_COUNT_ACTUAL;
struct hfi_buffer_count_actual buf_count;
int ret;
buf_count.type = HFI_BUFFER_INPUT;
buf_count.count_actual = input_bufs;
ret = hfi_session_set_property(inst, ptype, &buf_count);
if (ret)
return ret;
buf_count.type = HFI_BUFFER_OUTPUT;
buf_count.count_actual = output_bufs;
ret = hfi_session_set_property(inst, ptype, &buf_count);
if (ret)
return ret;
if (output2_bufs) {
buf_count.type = HFI_BUFFER_OUTPUT2;
buf_count.count_actual = output2_bufs;
ret = hfi_session_set_property(inst, ptype, &buf_count);
}
return ret;
}
EXPORT_SYMBOL_GPL(venus_helper_set_num_bufs);
int venus_helper_set_raw_format(struct venus_inst *inst, u32 hfi_format,
u32 buftype)
{
const u32 ptype = HFI_PROPERTY_PARAM_UNCOMPRESSED_FORMAT_SELECT;
struct hfi_uncompressed_format_select fmt;
fmt.buffer_type = buftype;
fmt.format = hfi_format;
return hfi_session_set_property(inst, ptype, &fmt);
}
EXPORT_SYMBOL_GPL(venus_helper_set_raw_format);
int venus_helper_set_color_format(struct venus_inst *inst, u32 pixfmt)
{
u32 hfi_format, buftype;
if (inst->session_type == VIDC_SESSION_TYPE_DEC)
buftype = HFI_BUFFER_OUTPUT;
else if (inst->session_type == VIDC_SESSION_TYPE_ENC)
buftype = HFI_BUFFER_INPUT;
else
return -EINVAL;
hfi_format = to_hfi_raw_fmt(pixfmt);
if (!hfi_format)
return -EINVAL;
return venus_helper_set_raw_format(inst, hfi_format, buftype);
}
EXPORT_SYMBOL_GPL(venus_helper_set_color_format);
int venus_helper_set_multistream(struct venus_inst *inst, bool out_en,
bool out2_en)
{
struct hfi_multi_stream multi = {0};
u32 ptype = HFI_PROPERTY_PARAM_VDEC_MULTI_STREAM;
int ret;
multi.buffer_type = HFI_BUFFER_OUTPUT;
multi.enable = out_en;
ret = hfi_session_set_property(inst, ptype, &multi);
if (ret)
return ret;
multi.buffer_type = HFI_BUFFER_OUTPUT2;
multi.enable = out2_en;
return hfi_session_set_property(inst, ptype, &multi);
}
EXPORT_SYMBOL_GPL(venus_helper_set_multistream);
int venus_helper_set_dyn_bufmode(struct venus_inst *inst)
{
const u32 ptype = HFI_PROPERTY_PARAM_BUFFER_ALLOC_MODE;
struct hfi_buffer_alloc_mode mode;
int ret;
if (!is_dynamic_bufmode(inst))
return 0;
mode.type = HFI_BUFFER_OUTPUT;
mode.mode = HFI_BUFFER_MODE_DYNAMIC;
ret = hfi_session_set_property(inst, ptype, &mode);
if (ret)
return ret;
mode.type = HFI_BUFFER_OUTPUT2;
return hfi_session_set_property(inst, ptype, &mode);
}
EXPORT_SYMBOL_GPL(venus_helper_set_dyn_bufmode);
int venus_helper_set_bufsize(struct venus_inst *inst, u32 bufsize, u32 buftype)
{
const u32 ptype = HFI_PROPERTY_PARAM_BUFFER_SIZE_ACTUAL;
struct hfi_buffer_size_actual bufsz;
bufsz.type = buftype;
bufsz.size = bufsize;
return hfi_session_set_property(inst, ptype, &bufsz);
}
EXPORT_SYMBOL_GPL(venus_helper_set_bufsize);
unsigned int venus_helper_get_opb_size(struct venus_inst *inst)
{
/* the encoder has only one output */
if (inst->session_type == VIDC_SESSION_TYPE_ENC)
return inst->output_buf_size;
if (inst->opb_buftype == HFI_BUFFER_OUTPUT)
return inst->output_buf_size;
else if (inst->opb_buftype == HFI_BUFFER_OUTPUT2)
return inst->output2_buf_size;
return 0;
}
EXPORT_SYMBOL_GPL(venus_helper_get_opb_size);
static void delayed_process_buf_func(struct work_struct *work)
{
struct venus_buffer *buf, *n;
struct venus_inst *inst;
int ret;
inst = container_of(work, struct venus_inst, delayed_process_work);
mutex_lock(&inst->lock);
if (!(inst->streamon_out & inst->streamon_cap))
goto unlock;
list_for_each_entry_safe(buf, n, &inst->delayed_process, ref_list) {
if (buf->flags & HFI_BUFFERFLAG_READONLY)
continue;
ret = session_process_buf(inst, &buf->vb);
if (ret)
return_buf_error(inst, &buf->vb);
list_del_init(&buf->ref_list);
}
unlock:
mutex_unlock(&inst->lock);
}
void venus_helper_release_buf_ref(struct venus_inst *inst, unsigned int idx)
{
struct venus_buffer *buf;
list_for_each_entry(buf, &inst->registeredbufs, reg_list) {
if (buf->vb.vb2_buf.index == idx) {
buf->flags &= ~HFI_BUFFERFLAG_READONLY;
schedule_work(&inst->delayed_process_work);
break;
}
}
}
EXPORT_SYMBOL_GPL(venus_helper_release_buf_ref);
void venus_helper_acquire_buf_ref(struct vb2_v4l2_buffer *vbuf)
{
struct venus_buffer *buf = to_venus_buffer(vbuf);
buf->flags |= HFI_BUFFERFLAG_READONLY;
}
EXPORT_SYMBOL_GPL(venus_helper_acquire_buf_ref);
static int is_buf_refed(struct venus_inst *inst, struct vb2_v4l2_buffer *vbuf)
{
struct venus_buffer *buf = to_venus_buffer(vbuf);
if (buf->flags & HFI_BUFFERFLAG_READONLY) {
list_add_tail(&buf->ref_list, &inst->delayed_process);
schedule_work(&inst->delayed_process_work);
return 1;
}
return 0;
}
struct vb2_v4l2_buffer *
venus_helper_find_buf(struct venus_inst *inst, unsigned int type, u32 idx)
{
struct v4l2_m2m_ctx *m2m_ctx = inst->m2m_ctx;
if (type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
return v4l2_m2m_src_buf_remove_by_idx(m2m_ctx, idx);
else
return v4l2_m2m_dst_buf_remove_by_idx(m2m_ctx, idx);
}
EXPORT_SYMBOL_GPL(venus_helper_find_buf);
void venus_helper_change_dpb_owner(struct venus_inst *inst,
struct vb2_v4l2_buffer *vbuf, unsigned int type,
unsigned int buf_type, u32 tag)
{
struct intbuf *dpb_buf;
if (!V4L2_TYPE_IS_CAPTURE(type) ||
buf_type != inst->dpb_buftype)
return;
list_for_each_entry(dpb_buf, &inst->dpbbufs, list)
if (dpb_buf->dpb_out_tag == tag) {
dpb_buf->owned_by = DRIVER;
break;
}
}
EXPORT_SYMBOL_GPL(venus_helper_change_dpb_owner);
int venus_helper_vb2_buf_init(struct vb2_buffer *vb)
{
struct venus_inst *inst = vb2_get_drv_priv(vb->vb2_queue);
struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
struct venus_buffer *buf = to_venus_buffer(vbuf);
buf->size = vb2_plane_size(vb, 0);
buf->dma_addr = vb2_dma_contig_plane_dma_addr(vb, 0);
if (vb->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
list_add_tail(&buf->reg_list, &inst->registeredbufs);
return 0;
}
EXPORT_SYMBOL_GPL(venus_helper_vb2_buf_init);
int venus_helper_vb2_buf_prepare(struct vb2_buffer *vb)
{
struct venus_inst *inst = vb2_get_drv_priv(vb->vb2_queue);
unsigned int out_buf_size = venus_helper_get_opb_size(inst);
struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
if (V4L2_TYPE_IS_OUTPUT(vb->vb2_queue->type)) {
if (vbuf->field == V4L2_FIELD_ANY)
vbuf->field = V4L2_FIELD_NONE;
if (vbuf->field != V4L2_FIELD_NONE) {
dev_err(inst->core->dev, "%s field isn't supported\n",
__func__);
return -EINVAL;
}
}
if (vb->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE &&
vb2_plane_size(vb, 0) < out_buf_size)
return -EINVAL;
if (vb->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE &&
vb2_plane_size(vb, 0) < inst->input_buf_size)
return -EINVAL;
return 0;
}
EXPORT_SYMBOL_GPL(venus_helper_vb2_buf_prepare);
static void cache_payload(struct venus_inst *inst, struct vb2_buffer *vb)
{
struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
unsigned int idx = vbuf->vb2_buf.index;
if (vbuf->vb2_buf.type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
inst->payloads[idx] = vb2_get_plane_payload(vb, 0);
}
void venus_helper_vb2_buf_queue(struct vb2_buffer *vb)
{
struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
struct venus_inst *inst = vb2_get_drv_priv(vb->vb2_queue);
struct v4l2_m2m_ctx *m2m_ctx = inst->m2m_ctx;
int ret;
v4l2_m2m_buf_queue(m2m_ctx, vbuf);
/* Skip processing queued capture buffers after LAST flag */
if (inst->session_type == VIDC_SESSION_TYPE_DEC &&
V4L2_TYPE_IS_CAPTURE(vb->vb2_queue->type) &&
inst->codec_state == VENUS_DEC_STATE_DRC)
return;
cache_payload(inst, vb);
if (inst->session_type == VIDC_SESSION_TYPE_ENC &&
!(inst->streamon_out && inst->streamon_cap))
return;
if (vb2_start_streaming_called(vb->vb2_queue)) {
ret = is_buf_refed(inst, vbuf);
if (ret)
return;
ret = session_process_buf(inst, vbuf);
if (ret)
return_buf_error(inst, vbuf);
}
}
EXPORT_SYMBOL_GPL(venus_helper_vb2_buf_queue);
void venus_helper_buffers_done(struct venus_inst *inst, unsigned int type,
enum vb2_buffer_state state)
{
struct vb2_v4l2_buffer *buf;
if (type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
while ((buf = v4l2_m2m_src_buf_remove(inst->m2m_ctx)))
v4l2_m2m_buf_done(buf, state);
} else if (type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
while ((buf = v4l2_m2m_dst_buf_remove(inst->m2m_ctx)))
v4l2_m2m_buf_done(buf, state);
}
}
EXPORT_SYMBOL_GPL(venus_helper_buffers_done);
void venus_helper_vb2_stop_streaming(struct vb2_queue *q)
{
struct venus_inst *inst = vb2_get_drv_priv(q);
struct venus_core *core = inst->core;
int ret;
mutex_lock(&inst->lock);
if (inst->streamon_out & inst->streamon_cap) {
ret = hfi_session_stop(inst);
ret |= hfi_session_unload_res(inst);
ret |= venus_helper_unregister_bufs(inst);
ret |= venus_helper_intbufs_free(inst);
ret |= hfi_session_deinit(inst);
if (inst->session_error || test_bit(0, &core->sys_error))
ret = -EIO;
if (ret)
hfi_session_abort(inst);
venus_helper_free_dpb_bufs(inst);
venus_pm_load_scale(inst);
INIT_LIST_HEAD(&inst->registeredbufs);
}
venus_helper_buffers_done(inst, V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE,
VB2_BUF_STATE_ERROR);
venus_helper_buffers_done(inst, V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE,
VB2_BUF_STATE_ERROR);
if (q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
inst->streamon_out = 0;
else
inst->streamon_cap = 0;
venus_pm_release_core(inst);
inst->session_error = 0;
mutex_unlock(&inst->lock);
}
EXPORT_SYMBOL_GPL(venus_helper_vb2_stop_streaming);
void venus_helper_vb2_queue_error(struct venus_inst *inst)
{
struct v4l2_m2m_ctx *m2m_ctx = inst->m2m_ctx;
struct vb2_queue *q;
q = v4l2_m2m_get_src_vq(m2m_ctx);
vb2_queue_error(q);
q = v4l2_m2m_get_dst_vq(m2m_ctx);
vb2_queue_error(q);
}
EXPORT_SYMBOL_GPL(venus_helper_vb2_queue_error);
int venus_helper_process_initial_cap_bufs(struct venus_inst *inst)
{
struct v4l2_m2m_ctx *m2m_ctx = inst->m2m_ctx;
struct v4l2_m2m_buffer *buf, *n;
int ret;
v4l2_m2m_for_each_dst_buf_safe(m2m_ctx, buf, n) {
ret = session_process_buf(inst, &buf->vb);
if (ret) {
return_buf_error(inst, &buf->vb);
return ret;
}
}
return 0;
}
EXPORT_SYMBOL_GPL(venus_helper_process_initial_cap_bufs);
int venus_helper_process_initial_out_bufs(struct venus_inst *inst)
{
struct v4l2_m2m_ctx *m2m_ctx = inst->m2m_ctx;
struct v4l2_m2m_buffer *buf, *n;
int ret;
v4l2_m2m_for_each_src_buf_safe(m2m_ctx, buf, n) {
ret = session_process_buf(inst, &buf->vb);
if (ret) {
return_buf_error(inst, &buf->vb);
return ret;
}
}
return 0;
}
EXPORT_SYMBOL_GPL(venus_helper_process_initial_out_bufs);
int venus_helper_vb2_start_streaming(struct venus_inst *inst)
{
int ret;
ret = venus_helper_intbufs_alloc(inst);
if (ret)
return ret;
ret = session_register_bufs(inst);
if (ret)
goto err_bufs_free;
venus_pm_load_scale(inst);
ret = hfi_session_load_res(inst);
if (ret)
goto err_unreg_bufs;
ret = hfi_session_start(inst);
if (ret)
goto err_unload_res;
return 0;
err_unload_res:
hfi_session_unload_res(inst);
err_unreg_bufs:
venus_helper_unregister_bufs(inst);
err_bufs_free:
venus_helper_intbufs_free(inst);
return ret;
}
EXPORT_SYMBOL_GPL(venus_helper_vb2_start_streaming);
void venus_helper_m2m_device_run(void *priv)
{
struct venus_inst *inst = priv;
struct v4l2_m2m_ctx *m2m_ctx = inst->m2m_ctx;
struct v4l2_m2m_buffer *buf, *n;
int ret;
mutex_lock(&inst->lock);
v4l2_m2m_for_each_dst_buf_safe(m2m_ctx, buf, n) {
ret = session_process_buf(inst, &buf->vb);
if (ret)
return_buf_error(inst, &buf->vb);
}
v4l2_m2m_for_each_src_buf_safe(m2m_ctx, buf, n) {
ret = session_process_buf(inst, &buf->vb);
if (ret)
return_buf_error(inst, &buf->vb);
}
mutex_unlock(&inst->lock);
}
EXPORT_SYMBOL_GPL(venus_helper_m2m_device_run);
void venus_helper_m2m_job_abort(void *priv)
{
struct venus_inst *inst = priv;
v4l2_m2m_job_finish(inst->m2m_dev, inst->m2m_ctx);
}
EXPORT_SYMBOL_GPL(venus_helper_m2m_job_abort);
int venus_helper_session_init(struct venus_inst *inst)
{
enum hfi_version version = inst->core->res->hfi_version;
u32 session_type = inst->session_type;
u32 codec;
int ret;
codec = inst->session_type == VIDC_SESSION_TYPE_DEC ?
inst->fmt_out->pixfmt : inst->fmt_cap->pixfmt;
ret = hfi_session_init(inst, codec);
if (ret)
return ret;
inst->clk_data.vpp_freq = hfi_platform_get_codec_vpp_freq(version, codec,
session_type);
inst->clk_data.vsp_freq = hfi_platform_get_codec_vsp_freq(version, codec,
session_type);
inst->clk_data.low_power_freq = hfi_platform_get_codec_lp_freq(version, codec,
session_type);
return 0;
}
EXPORT_SYMBOL_GPL(venus_helper_session_init);
void venus_helper_init_instance(struct venus_inst *inst)
{
if (inst->session_type == VIDC_SESSION_TYPE_DEC) {
INIT_LIST_HEAD(&inst->delayed_process);
INIT_WORK(&inst->delayed_process_work,
delayed_process_buf_func);
}
}
EXPORT_SYMBOL_GPL(venus_helper_init_instance);
static bool find_fmt_from_caps(struct hfi_plat_caps *caps, u32 buftype, u32 fmt)
{
unsigned int i;
for (i = 0; i < caps->num_fmts; i++) {
if (caps->fmts[i].buftype == buftype &&
caps->fmts[i].fmt == fmt)
return true;
}
return false;
}
int venus_helper_get_out_fmts(struct venus_inst *inst, u32 v4l2_fmt,
u32 *out_fmt, u32 *out2_fmt, bool ubwc)
{
struct venus_core *core = inst->core;
struct hfi_plat_caps *caps;
u32 ubwc_fmt, fmt = to_hfi_raw_fmt(v4l2_fmt);
bool found, found_ubwc;
*out_fmt = *out2_fmt = 0;
if (!fmt)
return -EINVAL;
caps = venus_caps_by_codec(core, inst->hfi_codec, inst->session_type);
if (!caps)
return -EINVAL;
if (inst->bit_depth == VIDC_BITDEPTH_10 && inst->session_type == VIDC_SESSION_TYPE_DEC) {
found_ubwc = find_fmt_from_caps(caps, HFI_BUFFER_OUTPUT,
HFI_COLOR_FORMAT_YUV420_TP10_UBWC);
found = find_fmt_from_caps(caps, HFI_BUFFER_OUTPUT2, fmt);
if (found_ubwc && found) {
/*
* Hard-code DPB buffers to be 10bit UBWC
* until V4L2 is able to expose compressed/tiled
* formats to applications.
*/
*out_fmt = HFI_COLOR_FORMAT_YUV420_TP10_UBWC;
*out2_fmt = fmt;
return 0;
}
}
if (ubwc) {
ubwc_fmt = fmt | HFI_COLOR_FORMAT_UBWC_BASE;
found_ubwc = find_fmt_from_caps(caps, HFI_BUFFER_OUTPUT,
ubwc_fmt);
found = find_fmt_from_caps(caps, HFI_BUFFER_OUTPUT2, fmt);
if (found_ubwc && found) {
*out_fmt = ubwc_fmt;
*out2_fmt = fmt;
return 0;
}
}
found = find_fmt_from_caps(caps, HFI_BUFFER_OUTPUT, fmt);
if (found) {
*out_fmt = fmt;
*out2_fmt = 0;
return 0;
}
found = find_fmt_from_caps(caps, HFI_BUFFER_OUTPUT2, fmt);
if (found) {
*out_fmt = 0;
*out2_fmt = fmt;
return 0;
}
return -EINVAL;
}
EXPORT_SYMBOL_GPL(venus_helper_get_out_fmts);
bool venus_helper_check_format(struct venus_inst *inst, u32 v4l2_pixfmt)
{
struct venus_core *core = inst->core;
u32 fmt = to_hfi_raw_fmt(v4l2_pixfmt);
struct hfi_plat_caps *caps;
bool found;
if (!fmt)
return false;
caps = venus_caps_by_codec(core, inst->hfi_codec, inst->session_type);
if (!caps)
return false;
found = find_fmt_from_caps(caps, HFI_BUFFER_OUTPUT, fmt);
if (found)
goto done;
found = find_fmt_from_caps(caps, HFI_BUFFER_OUTPUT2, fmt);
done:
return found;
}
EXPORT_SYMBOL_GPL(venus_helper_check_format);
int venus_helper_set_stride(struct venus_inst *inst,
unsigned int width, unsigned int height)
{
const u32 ptype = HFI_PROPERTY_PARAM_UNCOMPRESSED_PLANE_ACTUAL_INFO;
struct hfi_uncompressed_plane_actual_info plane_actual_info;
plane_actual_info.buffer_type = HFI_BUFFER_INPUT;
plane_actual_info.num_planes = 2;
plane_actual_info.plane_format[0].actual_stride = width;
plane_actual_info.plane_format[0].actual_plane_buffer_height = height;
plane_actual_info.plane_format[1].actual_stride = width;
plane_actual_info.plane_format[1].actual_plane_buffer_height = height / 2;
return hfi_session_set_property(inst, ptype, &plane_actual_info);
}
EXPORT_SYMBOL_GPL(venus_helper_set_stride);
| linux-master | drivers/media/platform/qcom/venus/helpers.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
* Copyright (C) 2017 Linaro Ltd.
*/
#include <linux/slab.h>
#include <linux/mutex.h>
#include <linux/list.h>
#include <linux/completion.h>
#include <linux/platform_device.h>
#include <linux/videodev2.h>
#include "core.h"
#include "hfi.h"
#include "hfi_cmds.h"
#include "hfi_venus.h"
#define TIMEOUT msecs_to_jiffies(1000)
static u32 to_codec_type(u32 pixfmt)
{
switch (pixfmt) {
case V4L2_PIX_FMT_H264:
case V4L2_PIX_FMT_H264_NO_SC:
return HFI_VIDEO_CODEC_H264;
case V4L2_PIX_FMT_H263:
return HFI_VIDEO_CODEC_H263;
case V4L2_PIX_FMT_MPEG1:
return HFI_VIDEO_CODEC_MPEG1;
case V4L2_PIX_FMT_MPEG2:
return HFI_VIDEO_CODEC_MPEG2;
case V4L2_PIX_FMT_MPEG4:
return HFI_VIDEO_CODEC_MPEG4;
case V4L2_PIX_FMT_VC1_ANNEX_G:
case V4L2_PIX_FMT_VC1_ANNEX_L:
return HFI_VIDEO_CODEC_VC1;
case V4L2_PIX_FMT_VP8:
return HFI_VIDEO_CODEC_VP8;
case V4L2_PIX_FMT_VP9:
return HFI_VIDEO_CODEC_VP9;
case V4L2_PIX_FMT_XVID:
return HFI_VIDEO_CODEC_DIVX;
case V4L2_PIX_FMT_HEVC:
return HFI_VIDEO_CODEC_HEVC;
default:
return 0;
}
}
int hfi_core_init(struct venus_core *core)
{
int ret = 0;
mutex_lock(&core->lock);
if (core->state >= CORE_INIT)
goto unlock;
reinit_completion(&core->done);
ret = core->ops->core_init(core);
if (ret)
goto unlock;
ret = wait_for_completion_timeout(&core->done, TIMEOUT);
if (!ret) {
ret = -ETIMEDOUT;
goto unlock;
}
ret = 0;
if (core->error != HFI_ERR_NONE) {
ret = -EIO;
goto unlock;
}
core->state = CORE_INIT;
unlock:
mutex_unlock(&core->lock);
return ret;
}
int hfi_core_deinit(struct venus_core *core, bool blocking)
{
int ret = 0, empty;
mutex_lock(&core->lock);
if (core->state == CORE_UNINIT)
goto unlock;
empty = list_empty(&core->instances);
if (!empty && !blocking) {
ret = -EBUSY;
goto unlock;
}
if (!empty) {
mutex_unlock(&core->lock);
wait_var_event(&core->insts_count,
!atomic_read(&core->insts_count));
mutex_lock(&core->lock);
}
if (!core->ops)
goto unlock;
ret = core->ops->core_deinit(core);
if (!ret)
core->state = CORE_UNINIT;
unlock:
mutex_unlock(&core->lock);
return ret;
}
int hfi_core_suspend(struct venus_core *core)
{
if (core->state != CORE_INIT)
return 0;
return core->ops->suspend(core);
}
int hfi_core_resume(struct venus_core *core, bool force)
{
if (!force && core->state != CORE_INIT)
return 0;
return core->ops->resume(core);
}
int hfi_core_trigger_ssr(struct venus_core *core, u32 type)
{
return core->ops->core_trigger_ssr(core, type);
}
int hfi_core_ping(struct venus_core *core)
{
int ret;
mutex_lock(&core->lock);
ret = core->ops->core_ping(core, 0xbeef);
if (ret)
goto unlock;
ret = wait_for_completion_timeout(&core->done, TIMEOUT);
if (!ret) {
ret = -ETIMEDOUT;
goto unlock;
}
ret = 0;
if (core->error != HFI_ERR_NONE)
ret = -ENODEV;
unlock:
mutex_unlock(&core->lock);
return ret;
}
static int wait_session_msg(struct venus_inst *inst)
{
int ret;
ret = wait_for_completion_timeout(&inst->done, TIMEOUT);
if (!ret)
return -ETIMEDOUT;
if (inst->error != HFI_ERR_NONE)
return -EIO;
return 0;
}
int hfi_session_create(struct venus_inst *inst, const struct hfi_inst_ops *ops)
{
struct venus_core *core = inst->core;
bool max;
int ret;
if (!ops)
return -EINVAL;
inst->state = INST_UNINIT;
init_completion(&inst->done);
inst->ops = ops;
mutex_lock(&core->lock);
if (test_bit(0, &inst->core->sys_error)) {
ret = -EIO;
goto unlock;
}
max = atomic_add_unless(&core->insts_count, 1,
core->max_sessions_supported);
if (!max) {
ret = -EAGAIN;
} else {
list_add_tail(&inst->list, &core->instances);
ret = 0;
}
unlock:
mutex_unlock(&core->lock);
return ret;
}
EXPORT_SYMBOL_GPL(hfi_session_create);
int hfi_session_init(struct venus_inst *inst, u32 pixfmt)
{
struct venus_core *core = inst->core;
const struct hfi_ops *ops = core->ops;
int ret;
/*
* If core shutdown is in progress or if we are in system
* recovery, return an error as during system error recovery
* session_init() can't pass successfully
*/
mutex_lock(&core->lock);
if (!core->ops || test_bit(0, &inst->core->sys_error)) {
mutex_unlock(&core->lock);
return -EIO;
}
mutex_unlock(&core->lock);
if (inst->state != INST_UNINIT)
return -EALREADY;
inst->hfi_codec = to_codec_type(pixfmt);
reinit_completion(&inst->done);
ret = ops->session_init(inst, inst->session_type, inst->hfi_codec);
if (ret)
return ret;
ret = wait_session_msg(inst);
if (ret)
return ret;
inst->state = INST_INIT;
return 0;
}
EXPORT_SYMBOL_GPL(hfi_session_init);
void hfi_session_destroy(struct venus_inst *inst)
{
struct venus_core *core = inst->core;
mutex_lock(&core->lock);
list_del_init(&inst->list);
if (atomic_dec_and_test(&core->insts_count))
wake_up_var(&core->insts_count);
mutex_unlock(&core->lock);
}
EXPORT_SYMBOL_GPL(hfi_session_destroy);
int hfi_session_deinit(struct venus_inst *inst)
{
const struct hfi_ops *ops = inst->core->ops;
int ret;
if (inst->state == INST_UNINIT)
return 0;
if (inst->state < INST_INIT)
return -EINVAL;
if (test_bit(0, &inst->core->sys_error))
goto done;
reinit_completion(&inst->done);
ret = ops->session_end(inst);
if (ret)
return ret;
ret = wait_session_msg(inst);
if (ret)
return ret;
done:
inst->state = INST_UNINIT;
return 0;
}
EXPORT_SYMBOL_GPL(hfi_session_deinit);
int hfi_session_start(struct venus_inst *inst)
{
const struct hfi_ops *ops = inst->core->ops;
int ret;
if (test_bit(0, &inst->core->sys_error))
return -EIO;
if (inst->state != INST_LOAD_RESOURCES)
return -EINVAL;
reinit_completion(&inst->done);
ret = ops->session_start(inst);
if (ret)
return ret;
ret = wait_session_msg(inst);
if (ret)
return ret;
inst->state = INST_START;
return 0;
}
EXPORT_SYMBOL_GPL(hfi_session_start);
int hfi_session_stop(struct venus_inst *inst)
{
const struct hfi_ops *ops = inst->core->ops;
int ret;
if (test_bit(0, &inst->core->sys_error))
return -EIO;
if (inst->state != INST_START)
return -EINVAL;
reinit_completion(&inst->done);
ret = ops->session_stop(inst);
if (ret)
return ret;
ret = wait_session_msg(inst);
if (ret)
return ret;
inst->state = INST_STOP;
return 0;
}
EXPORT_SYMBOL_GPL(hfi_session_stop);
int hfi_session_continue(struct venus_inst *inst)
{
struct venus_core *core = inst->core;
if (test_bit(0, &inst->core->sys_error))
return -EIO;
if (core->res->hfi_version == HFI_VERSION_1XX)
return 0;
return core->ops->session_continue(inst);
}
EXPORT_SYMBOL_GPL(hfi_session_continue);
int hfi_session_abort(struct venus_inst *inst)
{
const struct hfi_ops *ops = inst->core->ops;
int ret;
if (test_bit(0, &inst->core->sys_error))
return -EIO;
reinit_completion(&inst->done);
ret = ops->session_abort(inst);
if (ret)
return ret;
ret = wait_session_msg(inst);
if (ret)
return ret;
return 0;
}
EXPORT_SYMBOL_GPL(hfi_session_abort);
int hfi_session_load_res(struct venus_inst *inst)
{
const struct hfi_ops *ops = inst->core->ops;
int ret;
if (test_bit(0, &inst->core->sys_error))
return -EIO;
if (inst->state != INST_INIT)
return -EINVAL;
reinit_completion(&inst->done);
ret = ops->session_load_res(inst);
if (ret)
return ret;
ret = wait_session_msg(inst);
if (ret)
return ret;
inst->state = INST_LOAD_RESOURCES;
return 0;
}
int hfi_session_unload_res(struct venus_inst *inst)
{
const struct hfi_ops *ops = inst->core->ops;
int ret;
if (test_bit(0, &inst->core->sys_error))
return -EIO;
if (inst->state != INST_STOP)
return -EINVAL;
reinit_completion(&inst->done);
ret = ops->session_release_res(inst);
if (ret)
return ret;
ret = wait_session_msg(inst);
if (ret)
return ret;
inst->state = INST_RELEASE_RESOURCES;
return 0;
}
EXPORT_SYMBOL_GPL(hfi_session_unload_res);
int hfi_session_flush(struct venus_inst *inst, u32 type, bool block)
{
const struct hfi_ops *ops = inst->core->ops;
int ret;
if (test_bit(0, &inst->core->sys_error))
return -EIO;
reinit_completion(&inst->done);
ret = ops->session_flush(inst, type);
if (ret)
return ret;
if (block) {
ret = wait_session_msg(inst);
if (ret)
return ret;
}
return 0;
}
EXPORT_SYMBOL_GPL(hfi_session_flush);
int hfi_session_set_buffers(struct venus_inst *inst, struct hfi_buffer_desc *bd)
{
const struct hfi_ops *ops = inst->core->ops;
if (test_bit(0, &inst->core->sys_error))
return -EIO;
return ops->session_set_buffers(inst, bd);
}
int hfi_session_unset_buffers(struct venus_inst *inst,
struct hfi_buffer_desc *bd)
{
const struct hfi_ops *ops = inst->core->ops;
int ret;
if (test_bit(0, &inst->core->sys_error))
return -EIO;
reinit_completion(&inst->done);
ret = ops->session_unset_buffers(inst, bd);
if (ret)
return ret;
if (!bd->response_required)
return 0;
ret = wait_session_msg(inst);
if (ret)
return ret;
return 0;
}
int hfi_session_get_property(struct venus_inst *inst, u32 ptype,
union hfi_get_property *hprop)
{
const struct hfi_ops *ops = inst->core->ops;
int ret;
if (test_bit(0, &inst->core->sys_error))
return -EIO;
if (inst->state < INST_INIT || inst->state >= INST_STOP)
return -EINVAL;
reinit_completion(&inst->done);
ret = ops->session_get_property(inst, ptype);
if (ret)
return ret;
ret = wait_session_msg(inst);
if (ret)
return ret;
*hprop = inst->hprop;
return 0;
}
EXPORT_SYMBOL_GPL(hfi_session_get_property);
int hfi_session_set_property(struct venus_inst *inst, u32 ptype, void *pdata)
{
const struct hfi_ops *ops = inst->core->ops;
if (test_bit(0, &inst->core->sys_error))
return -EIO;
if (inst->state < INST_INIT || inst->state >= INST_STOP)
return -EINVAL;
return ops->session_set_property(inst, ptype, pdata);
}
EXPORT_SYMBOL_GPL(hfi_session_set_property);
int hfi_session_process_buf(struct venus_inst *inst, struct hfi_frame_data *fd)
{
const struct hfi_ops *ops = inst->core->ops;
if (test_bit(0, &inst->core->sys_error))
return -EIO;
if (fd->buffer_type == HFI_BUFFER_INPUT)
return ops->session_etb(inst, fd);
else if (fd->buffer_type == HFI_BUFFER_OUTPUT ||
fd->buffer_type == HFI_BUFFER_OUTPUT2)
return ops->session_ftb(inst, fd);
return -EINVAL;
}
EXPORT_SYMBOL_GPL(hfi_session_process_buf);
irqreturn_t hfi_isr_thread(int irq, void *dev_id)
{
struct venus_core *core = dev_id;
return core->ops->isr_thread(core);
}
irqreturn_t hfi_isr(int irq, void *dev)
{
struct venus_core *core = dev;
return core->ops->isr(core);
}
int hfi_create(struct venus_core *core, const struct hfi_core_ops *ops)
{
if (!ops)
return -EINVAL;
atomic_set(&core->insts_count, 0);
core->core_ops = ops;
core->state = CORE_UNINIT;
init_completion(&core->done);
pkt_set_version(core->res->hfi_version);
return venus_hfi_create(core);
}
void hfi_destroy(struct venus_core *core)
{
venus_hfi_destroy(core);
}
void hfi_reinit(struct venus_core *core)
{
venus_hfi_queues_reinit(core);
}
| linux-master | drivers/media/platform/qcom/venus/hfi.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
* Copyright (C) 2017 Linaro Ltd.
*/
#include <linux/init.h>
#include <linux/interconnect.h>
#include <linux/io.h>
#include <linux/ioctl.h>
#include <linux/delay.h>
#include <linux/devcoredump.h>
#include <linux/list.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/types.h>
#include <linux/pm_runtime.h>
#include <media/videobuf2-v4l2.h>
#include <media/v4l2-mem2mem.h>
#include <media/v4l2-ioctl.h>
#include "core.h"
#include "firmware.h"
#include "pm_helpers.h"
#include "hfi_venus_io.h"
static void venus_coredump(struct venus_core *core)
{
struct device *dev;
phys_addr_t mem_phys;
size_t mem_size;
void *mem_va;
void *data;
dev = core->dev;
mem_phys = core->fw.mem_phys;
mem_size = core->fw.mem_size;
mem_va = memremap(mem_phys, mem_size, MEMREMAP_WC);
if (!mem_va)
return;
data = vmalloc(mem_size);
if (!data) {
memunmap(mem_va);
return;
}
memcpy(data, mem_va, mem_size);
memunmap(mem_va);
dev_coredumpv(dev, data, mem_size, GFP_KERNEL);
}
static void venus_event_notify(struct venus_core *core, u32 event)
{
struct venus_inst *inst;
switch (event) {
case EVT_SYS_WATCHDOG_TIMEOUT:
case EVT_SYS_ERROR:
break;
default:
return;
}
mutex_lock(&core->lock);
set_bit(0, &core->sys_error);
list_for_each_entry(inst, &core->instances, list)
inst->ops->event_notify(inst, EVT_SESSION_ERROR, NULL);
mutex_unlock(&core->lock);
disable_irq_nosync(core->irq);
schedule_delayed_work(&core->work, msecs_to_jiffies(10));
}
static const struct hfi_core_ops venus_core_ops = {
.event_notify = venus_event_notify,
};
#define RPM_WAIT_FOR_IDLE_MAX_ATTEMPTS 10
static void venus_sys_error_handler(struct work_struct *work)
{
struct venus_core *core =
container_of(work, struct venus_core, work.work);
int ret, i, max_attempts = RPM_WAIT_FOR_IDLE_MAX_ATTEMPTS;
const char *err_msg = "";
bool failed = false;
ret = pm_runtime_get_sync(core->dev);
if (ret < 0) {
err_msg = "resume runtime PM";
max_attempts = 0;
failed = true;
}
core->ops->core_deinit(core);
core->state = CORE_UNINIT;
for (i = 0; i < max_attempts; i++) {
if (!pm_runtime_active(core->dev_dec) && !pm_runtime_active(core->dev_enc))
break;
msleep(10);
}
mutex_lock(&core->lock);
venus_shutdown(core);
venus_coredump(core);
pm_runtime_put_sync(core->dev);
for (i = 0; i < max_attempts; i++) {
if (!core->pmdomains[0] || !pm_runtime_active(core->pmdomains[0]))
break;
usleep_range(1000, 1500);
}
hfi_reinit(core);
ret = pm_runtime_get_sync(core->dev);
if (ret < 0) {
err_msg = "resume runtime PM";
failed = true;
}
ret = venus_boot(core);
if (ret && !failed) {
err_msg = "boot Venus";
failed = true;
}
ret = hfi_core_resume(core, true);
if (ret && !failed) {
err_msg = "resume HFI";
failed = true;
}
enable_irq(core->irq);
mutex_unlock(&core->lock);
ret = hfi_core_init(core);
if (ret && !failed) {
err_msg = "init HFI";
failed = true;
}
pm_runtime_put_sync(core->dev);
if (failed) {
disable_irq_nosync(core->irq);
dev_warn_ratelimited(core->dev,
"System error has occurred, recovery failed to %s\n",
err_msg);
schedule_delayed_work(&core->work, msecs_to_jiffies(10));
return;
}
dev_warn(core->dev, "system error has occurred (recovered)\n");
mutex_lock(&core->lock);
clear_bit(0, &core->sys_error);
wake_up_all(&core->sys_err_done);
mutex_unlock(&core->lock);
}
static u32 to_v4l2_codec_type(u32 codec)
{
switch (codec) {
case HFI_VIDEO_CODEC_H264:
return V4L2_PIX_FMT_H264;
case HFI_VIDEO_CODEC_H263:
return V4L2_PIX_FMT_H263;
case HFI_VIDEO_CODEC_MPEG1:
return V4L2_PIX_FMT_MPEG1;
case HFI_VIDEO_CODEC_MPEG2:
return V4L2_PIX_FMT_MPEG2;
case HFI_VIDEO_CODEC_MPEG4:
return V4L2_PIX_FMT_MPEG4;
case HFI_VIDEO_CODEC_VC1:
return V4L2_PIX_FMT_VC1_ANNEX_G;
case HFI_VIDEO_CODEC_VP8:
return V4L2_PIX_FMT_VP8;
case HFI_VIDEO_CODEC_VP9:
return V4L2_PIX_FMT_VP9;
case HFI_VIDEO_CODEC_DIVX:
case HFI_VIDEO_CODEC_DIVX_311:
return V4L2_PIX_FMT_XVID;
default:
return 0;
}
}
static int venus_enumerate_codecs(struct venus_core *core, u32 type)
{
const struct hfi_inst_ops dummy_ops = {};
struct venus_inst *inst;
u32 codec, codecs;
unsigned int i;
int ret;
if (core->res->hfi_version != HFI_VERSION_1XX)
return 0;
inst = kzalloc(sizeof(*inst), GFP_KERNEL);
if (!inst)
return -ENOMEM;
mutex_init(&inst->lock);
inst->core = core;
inst->session_type = type;
if (type == VIDC_SESSION_TYPE_DEC)
codecs = core->dec_codecs;
else
codecs = core->enc_codecs;
ret = hfi_session_create(inst, &dummy_ops);
if (ret)
goto err;
for (i = 0; i < MAX_CODEC_NUM; i++) {
codec = (1UL << i) & codecs;
if (!codec)
continue;
ret = hfi_session_init(inst, to_v4l2_codec_type(codec));
if (ret)
goto done;
ret = hfi_session_deinit(inst);
if (ret)
goto done;
}
done:
hfi_session_destroy(inst);
err:
mutex_destroy(&inst->lock);
kfree(inst);
return ret;
}
static void venus_assign_register_offsets(struct venus_core *core)
{
if (IS_IRIS2(core) || IS_IRIS2_1(core)) {
core->vbif_base = core->base + VBIF_BASE;
core->cpu_base = core->base + CPU_BASE_V6;
core->cpu_cs_base = core->base + CPU_CS_BASE_V6;
core->cpu_ic_base = core->base + CPU_IC_BASE_V6;
core->wrapper_base = core->base + WRAPPER_BASE_V6;
core->wrapper_tz_base = core->base + WRAPPER_TZ_BASE_V6;
core->aon_base = core->base + AON_BASE_V6;
} else {
core->vbif_base = core->base + VBIF_BASE;
core->cpu_base = core->base + CPU_BASE;
core->cpu_cs_base = core->base + CPU_CS_BASE;
core->cpu_ic_base = core->base + CPU_IC_BASE;
core->wrapper_base = core->base + WRAPPER_BASE;
core->wrapper_tz_base = NULL;
core->aon_base = NULL;
}
}
static irqreturn_t venus_isr_thread(int irq, void *dev_id)
{
struct venus_core *core = dev_id;
irqreturn_t ret;
ret = hfi_isr_thread(irq, dev_id);
if (ret == IRQ_HANDLED && venus_fault_inject_ssr())
hfi_core_trigger_ssr(core, HFI_TEST_SSR_SW_ERR_FATAL);
return ret;
}
static int venus_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct venus_core *core;
int ret;
core = devm_kzalloc(dev, sizeof(*core), GFP_KERNEL);
if (!core)
return -ENOMEM;
core->dev = dev;
core->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(core->base))
return PTR_ERR(core->base);
core->video_path = devm_of_icc_get(dev, "video-mem");
if (IS_ERR(core->video_path))
return PTR_ERR(core->video_path);
core->cpucfg_path = devm_of_icc_get(dev, "cpu-cfg");
if (IS_ERR(core->cpucfg_path))
return PTR_ERR(core->cpucfg_path);
core->irq = platform_get_irq(pdev, 0);
if (core->irq < 0)
return core->irq;
core->res = of_device_get_match_data(dev);
if (!core->res)
return -ENODEV;
mutex_init(&core->pm_lock);
core->pm_ops = venus_pm_get(core->res->hfi_version);
if (!core->pm_ops)
return -ENODEV;
if (core->pm_ops->core_get) {
ret = core->pm_ops->core_get(core);
if (ret)
return ret;
}
ret = dma_set_mask_and_coherent(dev, core->res->dma_mask);
if (ret)
goto err_core_put;
dma_set_max_seg_size(dev, UINT_MAX);
INIT_LIST_HEAD(&core->instances);
mutex_init(&core->lock);
INIT_DELAYED_WORK(&core->work, venus_sys_error_handler);
init_waitqueue_head(&core->sys_err_done);
ret = devm_request_threaded_irq(dev, core->irq, hfi_isr, venus_isr_thread,
IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
"venus", core);
if (ret)
goto err_core_put;
ret = hfi_create(core, &venus_core_ops);
if (ret)
goto err_core_put;
venus_assign_register_offsets(core);
ret = v4l2_device_register(dev, &core->v4l2_dev);
if (ret)
goto err_core_deinit;
platform_set_drvdata(pdev, core);
pm_runtime_enable(dev);
ret = pm_runtime_get_sync(dev);
if (ret < 0)
goto err_runtime_disable;
ret = of_platform_populate(dev->of_node, NULL, NULL, dev);
if (ret)
goto err_runtime_disable;
ret = venus_firmware_init(core);
if (ret)
goto err_of_depopulate;
ret = venus_boot(core);
if (ret)
goto err_firmware_deinit;
ret = hfi_core_resume(core, true);
if (ret)
goto err_venus_shutdown;
ret = hfi_core_init(core);
if (ret)
goto err_venus_shutdown;
ret = venus_enumerate_codecs(core, VIDC_SESSION_TYPE_DEC);
if (ret)
goto err_venus_shutdown;
ret = venus_enumerate_codecs(core, VIDC_SESSION_TYPE_ENC);
if (ret)
goto err_venus_shutdown;
ret = pm_runtime_put_sync(dev);
if (ret) {
pm_runtime_get_noresume(dev);
goto err_dev_unregister;
}
venus_dbgfs_init(core);
return 0;
err_dev_unregister:
v4l2_device_unregister(&core->v4l2_dev);
err_venus_shutdown:
venus_shutdown(core);
err_firmware_deinit:
venus_firmware_deinit(core);
err_of_depopulate:
of_platform_depopulate(dev);
err_runtime_disable:
pm_runtime_put_noidle(dev);
pm_runtime_set_suspended(dev);
pm_runtime_disable(dev);
hfi_destroy(core);
err_core_deinit:
hfi_core_deinit(core, false);
err_core_put:
if (core->pm_ops->core_put)
core->pm_ops->core_put(core);
return ret;
}
static void venus_remove(struct platform_device *pdev)
{
struct venus_core *core = platform_get_drvdata(pdev);
const struct venus_pm_ops *pm_ops = core->pm_ops;
struct device *dev = core->dev;
int ret;
ret = pm_runtime_get_sync(dev);
WARN_ON(ret < 0);
ret = hfi_core_deinit(core, true);
WARN_ON(ret);
venus_shutdown(core);
of_platform_depopulate(dev);
venus_firmware_deinit(core);
pm_runtime_put_sync(dev);
pm_runtime_disable(dev);
if (pm_ops->core_put)
pm_ops->core_put(core);
v4l2_device_unregister(&core->v4l2_dev);
hfi_destroy(core);
mutex_destroy(&core->pm_lock);
mutex_destroy(&core->lock);
venus_dbgfs_deinit(core);
}
static void venus_core_shutdown(struct platform_device *pdev)
{
struct venus_core *core = platform_get_drvdata(pdev);
pm_runtime_get_sync(core->dev);
venus_shutdown(core);
venus_firmware_deinit(core);
pm_runtime_put_sync(core->dev);
}
static __maybe_unused int venus_runtime_suspend(struct device *dev)
{
struct venus_core *core = dev_get_drvdata(dev);
const struct venus_pm_ops *pm_ops = core->pm_ops;
int ret;
ret = hfi_core_suspend(core);
if (ret)
return ret;
if (pm_ops->core_power) {
ret = pm_ops->core_power(core, POWER_OFF);
if (ret)
return ret;
}
ret = icc_set_bw(core->cpucfg_path, 0, 0);
if (ret)
goto err_cpucfg_path;
ret = icc_set_bw(core->video_path, 0, 0);
if (ret)
goto err_video_path;
return ret;
err_video_path:
icc_set_bw(core->cpucfg_path, kbps_to_icc(1000), 0);
err_cpucfg_path:
if (pm_ops->core_power)
pm_ops->core_power(core, POWER_ON);
return ret;
}
static __maybe_unused int venus_runtime_resume(struct device *dev)
{
struct venus_core *core = dev_get_drvdata(dev);
const struct venus_pm_ops *pm_ops = core->pm_ops;
int ret;
ret = icc_set_bw(core->video_path, kbps_to_icc(20000), 0);
if (ret)
return ret;
ret = icc_set_bw(core->cpucfg_path, kbps_to_icc(1000), 0);
if (ret)
return ret;
if (pm_ops->core_power) {
ret = pm_ops->core_power(core, POWER_ON);
if (ret)
return ret;
}
return hfi_core_resume(core, false);
}
static const struct dev_pm_ops venus_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
pm_runtime_force_resume)
SET_RUNTIME_PM_OPS(venus_runtime_suspend, venus_runtime_resume, NULL)
};
static const struct freq_tbl msm8916_freq_table[] = {
{ 352800, 228570000 }, /* 1920x1088 @ 30 + 1280x720 @ 30 */
{ 244800, 160000000 }, /* 1920x1088 @ 30 */
{ 108000, 100000000 }, /* 1280x720 @ 30 */
};
static const struct reg_val msm8916_reg_preset[] = {
{ 0xe0020, 0x05555556 },
{ 0xe0024, 0x05555556 },
{ 0x80124, 0x00000003 },
};
static const struct venus_resources msm8916_res = {
.freq_tbl = msm8916_freq_table,
.freq_tbl_size = ARRAY_SIZE(msm8916_freq_table),
.reg_tbl = msm8916_reg_preset,
.reg_tbl_size = ARRAY_SIZE(msm8916_reg_preset),
.clks = { "core", "iface", "bus", },
.clks_num = 3,
.max_load = 352800, /* 720p@30 + 1080p@30 */
.hfi_version = HFI_VERSION_1XX,
.vmem_id = VIDC_RESOURCE_NONE,
.vmem_size = 0,
.vmem_addr = 0,
.dma_mask = 0xddc00000 - 1,
.fwname = "qcom/venus-1.8/venus.mdt",
};
static const struct freq_tbl msm8996_freq_table[] = {
{ 1944000, 520000000 }, /* 4k UHD @ 60 (decode only) */
{ 972000, 520000000 }, /* 4k UHD @ 30 */
{ 489600, 346666667 }, /* 1080p @ 60 */
{ 244800, 150000000 }, /* 1080p @ 30 */
{ 108000, 75000000 }, /* 720p @ 30 */
};
static const struct reg_val msm8996_reg_preset[] = {
{ 0x80010, 0xffffffff },
{ 0x80018, 0x00001556 },
{ 0x8001C, 0x00001556 },
};
static const struct venus_resources msm8996_res = {
.freq_tbl = msm8996_freq_table,
.freq_tbl_size = ARRAY_SIZE(msm8996_freq_table),
.reg_tbl = msm8996_reg_preset,
.reg_tbl_size = ARRAY_SIZE(msm8996_reg_preset),
.clks = {"core", "iface", "bus", "mbus" },
.clks_num = 4,
.vcodec0_clks = { "core" },
.vcodec1_clks = { "core" },
.vcodec_clks_num = 1,
.max_load = 2563200,
.hfi_version = HFI_VERSION_3XX,
.vmem_id = VIDC_RESOURCE_NONE,
.vmem_size = 0,
.vmem_addr = 0,
.dma_mask = 0xddc00000 - 1,
.fwname = "qcom/venus-4.2/venus.mdt",
};
static const struct freq_tbl sdm660_freq_table[] = {
{ 979200, 518400000 },
{ 489600, 441600000 },
{ 432000, 404000000 },
{ 244800, 320000000 },
{ 216000, 269330000 },
{ 108000, 133330000 },
};
static const struct reg_val sdm660_reg_preset[] = {
{ 0x80010, 0x001f001f },
{ 0x80018, 0x00000156 },
{ 0x8001c, 0x00000156 },
};
static const struct bw_tbl sdm660_bw_table_enc[] = {
{ 979200, 1044000, 0, 2446336, 0 }, /* 4k UHD @ 30 */
{ 864000, 887000, 0, 2108416, 0 }, /* 720p @ 240 */
{ 489600, 666000, 0, 1207296, 0 }, /* 1080p @ 60 */
{ 432000, 578000, 0, 1058816, 0 }, /* 720p @ 120 */
{ 244800, 346000, 0, 616448, 0 }, /* 1080p @ 30 */
{ 216000, 293000, 0, 534528, 0 }, /* 720p @ 60 */
{ 108000, 151000, 0, 271360, 0 }, /* 720p @ 30 */
};
static const struct bw_tbl sdm660_bw_table_dec[] = {
{ 979200, 2365000, 0, 1892000, 0 }, /* 4k UHD @ 30 */
{ 864000, 1978000, 0, 1554000, 0 }, /* 720p @ 240 */
{ 489600, 1133000, 0, 895000, 0 }, /* 1080p @ 60 */
{ 432000, 994000, 0, 781000, 0 }, /* 720p @ 120 */
{ 244800, 580000, 0, 460000, 0 }, /* 1080p @ 30 */
{ 216000, 501000, 0, 301000, 0 }, /* 720p @ 60 */
{ 108000, 255000, 0, 202000, 0 }, /* 720p @ 30 */
};
static const struct venus_resources sdm660_res = {
.freq_tbl = sdm660_freq_table,
.freq_tbl_size = ARRAY_SIZE(sdm660_freq_table),
.reg_tbl = sdm660_reg_preset,
.reg_tbl_size = ARRAY_SIZE(sdm660_reg_preset),
.bw_tbl_enc = sdm660_bw_table_enc,
.bw_tbl_enc_size = ARRAY_SIZE(sdm660_bw_table_enc),
.bw_tbl_dec = sdm660_bw_table_dec,
.bw_tbl_dec_size = ARRAY_SIZE(sdm660_bw_table_dec),
.clks = {"core", "iface", "bus", "bus_throttle" },
.clks_num = 4,
.vcodec0_clks = { "vcodec0_core" },
.vcodec1_clks = { "vcodec0_core" },
.vcodec_clks_num = 1,
.vcodec_num = 1,
.max_load = 1036800,
.hfi_version = HFI_VERSION_3XX,
.vmem_id = VIDC_RESOURCE_NONE,
.vmem_size = 0,
.vmem_addr = 0,
.cp_start = 0,
.cp_size = 0x79000000,
.cp_nonpixel_start = 0x1000000,
.cp_nonpixel_size = 0x28000000,
.dma_mask = 0xd9000000 - 1,
.fwname = "qcom/venus-4.4/venus.mdt",
};
static const struct freq_tbl sdm845_freq_table[] = {
{ 3110400, 533000000 }, /* 4096x2160@90 */
{ 2073600, 444000000 }, /* 4096x2160@60 */
{ 1944000, 404000000 }, /* 3840x2160@60 */
{ 972000, 330000000 }, /* 3840x2160@30 */
{ 489600, 200000000 }, /* 1920x1080@60 */
{ 244800, 100000000 }, /* 1920x1080@30 */
};
static const struct bw_tbl sdm845_bw_table_enc[] = {
{ 1944000, 1612000, 0, 2416000, 0 }, /* 3840x2160@60 */
{ 972000, 951000, 0, 1434000, 0 }, /* 3840x2160@30 */
{ 489600, 723000, 0, 973000, 0 }, /* 1920x1080@60 */
{ 244800, 370000, 0, 495000, 0 }, /* 1920x1080@30 */
};
static const struct bw_tbl sdm845_bw_table_dec[] = {
{ 2073600, 3929000, 0, 5551000, 0 }, /* 4096x2160@60 */
{ 1036800, 1987000, 0, 2797000, 0 }, /* 4096x2160@30 */
{ 489600, 1040000, 0, 1298000, 0 }, /* 1920x1080@60 */
{ 244800, 530000, 0, 659000, 0 }, /* 1920x1080@30 */
};
static const struct venus_resources sdm845_res = {
.freq_tbl = sdm845_freq_table,
.freq_tbl_size = ARRAY_SIZE(sdm845_freq_table),
.bw_tbl_enc = sdm845_bw_table_enc,
.bw_tbl_enc_size = ARRAY_SIZE(sdm845_bw_table_enc),
.bw_tbl_dec = sdm845_bw_table_dec,
.bw_tbl_dec_size = ARRAY_SIZE(sdm845_bw_table_dec),
.clks = {"core", "iface", "bus" },
.clks_num = 3,
.vcodec0_clks = { "core", "bus" },
.vcodec1_clks = { "core", "bus" },
.vcodec_clks_num = 2,
.max_load = 3110400, /* 4096x2160@90 */
.hfi_version = HFI_VERSION_4XX,
.vpu_version = VPU_VERSION_AR50,
.vmem_id = VIDC_RESOURCE_NONE,
.vmem_size = 0,
.vmem_addr = 0,
.dma_mask = 0xe0000000 - 1,
.fwname = "qcom/venus-5.2/venus.mdt",
};
static const struct venus_resources sdm845_res_v2 = {
.freq_tbl = sdm845_freq_table,
.freq_tbl_size = ARRAY_SIZE(sdm845_freq_table),
.bw_tbl_enc = sdm845_bw_table_enc,
.bw_tbl_enc_size = ARRAY_SIZE(sdm845_bw_table_enc),
.bw_tbl_dec = sdm845_bw_table_dec,
.bw_tbl_dec_size = ARRAY_SIZE(sdm845_bw_table_dec),
.clks = {"core", "iface", "bus" },
.clks_num = 3,
.vcodec0_clks = { "vcodec0_core", "vcodec0_bus" },
.vcodec1_clks = { "vcodec1_core", "vcodec1_bus" },
.vcodec_clks_num = 2,
.vcodec_pmdomains = { "venus", "vcodec0", "vcodec1" },
.vcodec_pmdomains_num = 3,
.opp_pmdomain = (const char *[]) { "cx", NULL },
.vcodec_num = 2,
.max_load = 3110400, /* 4096x2160@90 */
.hfi_version = HFI_VERSION_4XX,
.vpu_version = VPU_VERSION_AR50,
.vmem_id = VIDC_RESOURCE_NONE,
.vmem_size = 0,
.vmem_addr = 0,
.dma_mask = 0xe0000000 - 1,
.cp_start = 0,
.cp_size = 0x70800000,
.cp_nonpixel_start = 0x1000000,
.cp_nonpixel_size = 0x24800000,
.fwname = "qcom/venus-5.2/venus.mdt",
};
static const struct freq_tbl sc7180_freq_table[] = {
{ 0, 500000000 },
{ 0, 434000000 },
{ 0, 340000000 },
{ 0, 270000000 },
{ 0, 150000000 },
};
static const struct bw_tbl sc7180_bw_table_enc[] = {
{ 972000, 750000, 0, 0, 0 }, /* 3840x2160@30 */
{ 489600, 451000, 0, 0, 0 }, /* 1920x1080@60 */
{ 244800, 234000, 0, 0, 0 }, /* 1920x1080@30 */
};
static const struct bw_tbl sc7180_bw_table_dec[] = {
{ 1036800, 1386000, 0, 1875000, 0 }, /* 4096x2160@30 */
{ 489600, 865000, 0, 1146000, 0 }, /* 1920x1080@60 */
{ 244800, 530000, 0, 583000, 0 }, /* 1920x1080@30 */
};
static const struct venus_resources sc7180_res = {
.freq_tbl = sc7180_freq_table,
.freq_tbl_size = ARRAY_SIZE(sc7180_freq_table),
.bw_tbl_enc = sc7180_bw_table_enc,
.bw_tbl_enc_size = ARRAY_SIZE(sc7180_bw_table_enc),
.bw_tbl_dec = sc7180_bw_table_dec,
.bw_tbl_dec_size = ARRAY_SIZE(sc7180_bw_table_dec),
.clks = {"core", "iface", "bus" },
.clks_num = 3,
.vcodec0_clks = { "vcodec0_core", "vcodec0_bus" },
.vcodec_clks_num = 2,
.vcodec_pmdomains = { "venus", "vcodec0" },
.vcodec_pmdomains_num = 2,
.opp_pmdomain = (const char *[]) { "cx", NULL },
.vcodec_num = 1,
.hfi_version = HFI_VERSION_4XX,
.vpu_version = VPU_VERSION_AR50,
.vmem_id = VIDC_RESOURCE_NONE,
.vmem_size = 0,
.vmem_addr = 0,
.dma_mask = 0xe0000000 - 1,
.cp_start = 0,
.cp_size = 0x70800000,
.cp_nonpixel_start = 0x1000000,
.cp_nonpixel_size = 0x24800000,
.fwname = "qcom/venus-5.4/venus.mdt",
};
static const struct freq_tbl sm8250_freq_table[] = {
{ 0, 444000000 },
{ 0, 366000000 },
{ 0, 338000000 },
{ 0, 240000000 },
};
static const struct bw_tbl sm8250_bw_table_enc[] = {
{ 1944000, 1954000, 0, 3711000, 0 }, /* 3840x2160@60 */
{ 972000, 996000, 0, 1905000, 0 }, /* 3840x2160@30 */
{ 489600, 645000, 0, 977000, 0 }, /* 1920x1080@60 */
{ 244800, 332000, 0, 498000, 0 }, /* 1920x1080@30 */
};
static const struct bw_tbl sm8250_bw_table_dec[] = {
{ 2073600, 2403000, 0, 4113000, 0 }, /* 4096x2160@60 */
{ 1036800, 1224000, 0, 2079000, 0 }, /* 4096x2160@30 */
{ 489600, 812000, 0, 998000, 0 }, /* 1920x1080@60 */
{ 244800, 416000, 0, 509000, 0 }, /* 1920x1080@30 */
};
static const struct reg_val sm8250_reg_preset[] = {
{ 0xb0088, 0 },
};
static const struct venus_resources sm8250_res = {
.freq_tbl = sm8250_freq_table,
.freq_tbl_size = ARRAY_SIZE(sm8250_freq_table),
.reg_tbl = sm8250_reg_preset,
.reg_tbl_size = ARRAY_SIZE(sm8250_reg_preset),
.bw_tbl_enc = sm8250_bw_table_enc,
.bw_tbl_enc_size = ARRAY_SIZE(sm8250_bw_table_enc),
.bw_tbl_dec = sm8250_bw_table_dec,
.bw_tbl_dec_size = ARRAY_SIZE(sm8250_bw_table_dec),
.clks = {"core", "iface"},
.clks_num = 2,
.resets = { "bus", "core" },
.resets_num = 2,
.vcodec0_clks = { "vcodec0_core" },
.vcodec_clks_num = 1,
.vcodec_pmdomains = { "venus", "vcodec0" },
.vcodec_pmdomains_num = 2,
.opp_pmdomain = (const char *[]) { "mx", NULL },
.vcodec_num = 1,
.max_load = 7833600,
.hfi_version = HFI_VERSION_6XX,
.vpu_version = VPU_VERSION_IRIS2,
.num_vpp_pipes = 4,
.vmem_id = VIDC_RESOURCE_NONE,
.vmem_size = 0,
.vmem_addr = 0,
.dma_mask = 0xe0000000 - 1,
.fwname = "qcom/vpu-1.0/venus.mbn",
};
static const struct freq_tbl sc7280_freq_table[] = {
{ 0, 460000000 },
{ 0, 424000000 },
{ 0, 335000000 },
{ 0, 240000000 },
{ 0, 133333333 },
};
static const struct bw_tbl sc7280_bw_table_enc[] = {
{ 1944000, 1896000, 0, 3657000, 0 }, /* 3840x2160@60 */
{ 972000, 968000, 0, 1848000, 0 }, /* 3840x2160@30 */
{ 489600, 618000, 0, 941000, 0 }, /* 1920x1080@60 */
{ 244800, 318000, 0, 480000, 0 }, /* 1920x1080@30 */
};
static const struct bw_tbl sc7280_bw_table_dec[] = {
{ 2073600, 2128000, 0, 3831000, 0 }, /* 4096x2160@60 */
{ 1036800, 1085000, 0, 1937000, 0 }, /* 4096x2160@30 */
{ 489600, 779000, 0, 998000, 0 }, /* 1920x1080@60 */
{ 244800, 400000, 0, 509000, 0 }, /* 1920x1080@30 */
};
static const struct reg_val sm7280_reg_preset[] = {
{ 0xb0088, 0 },
};
static const struct hfi_ubwc_config sc7280_ubwc_config = {
0, 0, {1, 1, 1, 0, 0, 0}, 8, 32, 14, 0, 0, {0, 0}
};
static const struct venus_resources sc7280_res = {
.freq_tbl = sc7280_freq_table,
.freq_tbl_size = ARRAY_SIZE(sc7280_freq_table),
.reg_tbl = sm7280_reg_preset,
.reg_tbl_size = ARRAY_SIZE(sm7280_reg_preset),
.bw_tbl_enc = sc7280_bw_table_enc,
.bw_tbl_enc_size = ARRAY_SIZE(sc7280_bw_table_enc),
.bw_tbl_dec = sc7280_bw_table_dec,
.bw_tbl_dec_size = ARRAY_SIZE(sc7280_bw_table_dec),
.ubwc_conf = &sc7280_ubwc_config,
.clks = {"core", "bus", "iface"},
.clks_num = 3,
.vcodec0_clks = {"vcodec_core", "vcodec_bus"},
.vcodec_clks_num = 2,
.vcodec_pmdomains = { "venus", "vcodec0" },
.vcodec_pmdomains_num = 2,
.opp_pmdomain = (const char *[]) { "cx", NULL },
.vcodec_num = 1,
.hfi_version = HFI_VERSION_6XX,
.vpu_version = VPU_VERSION_IRIS2_1,
.num_vpp_pipes = 1,
.vmem_id = VIDC_RESOURCE_NONE,
.vmem_size = 0,
.vmem_addr = 0,
.dma_mask = 0xe0000000 - 1,
.fwname = "qcom/vpu-2.0/venus.mbn",
};
static const struct of_device_id venus_dt_match[] = {
{ .compatible = "qcom,msm8916-venus", .data = &msm8916_res, },
{ .compatible = "qcom,msm8996-venus", .data = &msm8996_res, },
{ .compatible = "qcom,sdm660-venus", .data = &sdm660_res, },
{ .compatible = "qcom,sdm845-venus", .data = &sdm845_res, },
{ .compatible = "qcom,sdm845-venus-v2", .data = &sdm845_res_v2, },
{ .compatible = "qcom,sc7180-venus", .data = &sc7180_res, },
{ .compatible = "qcom,sc7280-venus", .data = &sc7280_res, },
{ .compatible = "qcom,sm8250-venus", .data = &sm8250_res, },
{ }
};
MODULE_DEVICE_TABLE(of, venus_dt_match);
static struct platform_driver qcom_venus_driver = {
.probe = venus_probe,
.remove_new = venus_remove,
.driver = {
.name = "qcom-venus",
.of_match_table = venus_dt_match,
.pm = &venus_pm_ops,
},
.shutdown = venus_core_shutdown,
};
module_platform_driver(qcom_venus_driver);
MODULE_ALIAS("platform:qcom-venus");
MODULE_DESCRIPTION("Qualcomm Venus video encoder and decoder driver");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/media/platform/qcom/venus/core.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2020, The Linux Foundation. All rights reserved.
*/
#include "hfi_platform.h"
static const struct hfi_plat_caps caps[] = {
{
.codec = HFI_VIDEO_CODEC_H264,
.domain = VIDC_SESSION_TYPE_DEC,
.cap_bufs_mode_dynamic = true,
.caps[0] = {HFI_CAPABILITY_FRAME_WIDTH, 128, 8192, 1},
.caps[1] = {HFI_CAPABILITY_FRAME_HEIGHT, 128, 8192, 1},
/* ((5760 * 2880) / 256) */
.caps[2] = {HFI_CAPABILITY_MBS_PER_FRAME, 64, 138240, 1},
.caps[3] = {HFI_CAPABILITY_BITRATE, 1, 220000000, 1},
.caps[4] = {HFI_CAPABILITY_SCALE_X, 65536, 65536, 1},
.caps[5] = {HFI_CAPABILITY_SCALE_Y, 65536, 65536, 1},
.caps[6] = {HFI_CAPABILITY_MBS_PER_SECOND, 64, 7833600, 1},
.caps[7] = {HFI_CAPABILITY_FRAMERATE, 1, 960, 1},
.caps[8] = {HFI_CAPABILITY_MAX_VIDEOCORES, 0, 1, 1},
.num_caps = 9,
.pl[0] = {HFI_H264_PROFILE_BASELINE, HFI_H264_LEVEL_52},
.pl[1] = {HFI_H264_PROFILE_MAIN, HFI_H264_LEVEL_52},
.pl[2] = {HFI_H264_PROFILE_HIGH, HFI_H264_LEVEL_52},
.pl[3] = {HFI_H264_PROFILE_CONSTRAINED_BASE, HFI_H264_LEVEL_52},
.pl[4] = {HFI_H264_PROFILE_CONSTRAINED_HIGH, HFI_H264_LEVEL_52},
.num_pl = 5,
.fmts[0] = {HFI_BUFFER_OUTPUT, HFI_COLOR_FORMAT_NV12_UBWC},
.fmts[1] = {HFI_BUFFER_OUTPUT2, HFI_COLOR_FORMAT_NV12_UBWC},
.fmts[2] = {HFI_BUFFER_OUTPUT2, HFI_COLOR_FORMAT_NV12},
.fmts[3] = {HFI_BUFFER_OUTPUT2, HFI_COLOR_FORMAT_NV21},
.num_fmts = 4,
}, {
.codec = HFI_VIDEO_CODEC_HEVC,
.domain = VIDC_SESSION_TYPE_DEC,
.cap_bufs_mode_dynamic = true,
.caps[0] = {HFI_CAPABILITY_FRAME_WIDTH, 128, 8192, 1},
.caps[1] = {HFI_CAPABILITY_FRAME_HEIGHT, 128, 8192, 1},
.caps[2] = {HFI_CAPABILITY_MBS_PER_FRAME, 64, 138240, 1},
.caps[3] = {HFI_CAPABILITY_BITRATE, 1, 220000000, 1},
.caps[4] = {HFI_CAPABILITY_SCALE_X, 65536, 65536, 1},
.caps[5] = {HFI_CAPABILITY_SCALE_Y, 65536, 65536, 1},
.caps[6] = {HFI_CAPABILITY_MBS_PER_SECOND, 64, 7833600, 1},
.caps[7] = {HFI_CAPABILITY_FRAMERATE, 1, 960, 1},
.caps[8] = {HFI_CAPABILITY_MAX_VIDEOCORES, 0, 1, 1},
.caps[9] = {HFI_CAPABILITY_MAX_WORKMODES, 1, 3, 1},
.num_caps = 10,
.pl[0] = {HFI_HEVC_PROFILE_MAIN, HFI_HEVC_LEVEL_6 | HFI_HEVC_TIER_HIGH0},
.pl[1] = {HFI_HEVC_PROFILE_MAIN10, HFI_HEVC_LEVEL_6 | HFI_HEVC_TIER_HIGH0},
.num_pl = 2,
.fmts[0] = {HFI_BUFFER_OUTPUT, HFI_COLOR_FORMAT_NV12_UBWC},
.fmts[1] = {HFI_BUFFER_OUTPUT, HFI_COLOR_FORMAT_YUV420_TP10_UBWC},
.fmts[2] = {HFI_BUFFER_OUTPUT2, HFI_COLOR_FORMAT_NV12_UBWC},
.fmts[3] = {HFI_BUFFER_OUTPUT2, HFI_COLOR_FORMAT_NV12},
.fmts[4] = {HFI_BUFFER_OUTPUT2, HFI_COLOR_FORMAT_NV21},
.fmts[5] = {HFI_BUFFER_OUTPUT2, HFI_COLOR_FORMAT_P010},
.fmts[6] = {HFI_BUFFER_OUTPUT2, HFI_COLOR_FORMAT_YUV420_TP10_UBWC},
.num_fmts = 7,
}, {
.codec = HFI_VIDEO_CODEC_VP8,
.domain = VIDC_SESSION_TYPE_DEC,
.cap_bufs_mode_dynamic = true,
.caps[0] = {HFI_CAPABILITY_FRAME_WIDTH, 128, 4096, 1},
.caps[1] = {HFI_CAPABILITY_FRAME_HEIGHT, 128, 4096, 1},
.caps[2] = {HFI_CAPABILITY_MBS_PER_FRAME, 64, 36864, 1},
.caps[3] = {HFI_CAPABILITY_BITRATE, 1, 100000000, 1},
.caps[4] = {HFI_CAPABILITY_SCALE_X, 65536, 65536, 1},
.caps[5] = {HFI_CAPABILITY_SCALE_Y, 65536, 65536, 1},
.caps[6] = {HFI_CAPABILITY_MBS_PER_SECOND, 64, 4423680, 1},
.caps[7] = {HFI_CAPABILITY_FRAMERATE, 1, 120, 1},
.caps[8] = {HFI_CAPABILITY_MAX_VIDEOCORES, 0, 1, 1},
.caps[9] = {HFI_CAPABILITY_MAX_WORKMODES, 1, 3, 1},
.num_caps = 10,
.pl[0] = {HFI_VPX_PROFILE_MAIN, HFI_VPX_LEVEL_VERSION_0},
.pl[1] = {HFI_VPX_PROFILE_MAIN, HFI_VPX_LEVEL_VERSION_1},
.pl[2] = {HFI_VPX_PROFILE_MAIN, HFI_VPX_LEVEL_VERSION_2},
.pl[3] = {HFI_VPX_PROFILE_MAIN, HFI_VPX_LEVEL_VERSION_3},
.num_pl = 4,
.fmts[0] = {HFI_BUFFER_OUTPUT, HFI_COLOR_FORMAT_NV12_UBWC},
.fmts[1] = {HFI_BUFFER_OUTPUT2, HFI_COLOR_FORMAT_NV12_UBWC},
.fmts[2] = {HFI_BUFFER_OUTPUT2, HFI_COLOR_FORMAT_NV12},
.fmts[3] = {HFI_BUFFER_OUTPUT2, HFI_COLOR_FORMAT_NV21},
.num_fmts = 4,
}, {
.codec = HFI_VIDEO_CODEC_VP9,
.domain = VIDC_SESSION_TYPE_DEC,
.cap_bufs_mode_dynamic = true,
.caps[0] = {HFI_CAPABILITY_FRAME_WIDTH, 128, 8192, 1},
.caps[1] = {HFI_CAPABILITY_FRAME_HEIGHT, 128, 8192, 1},
.caps[2] = {HFI_CAPABILITY_MBS_PER_FRAME, 64, 138240, 1},
.caps[3] = {HFI_CAPABILITY_BITRATE, 1, 220000000, 1},
.caps[4] = {HFI_CAPABILITY_SCALE_X, 65536, 65536, 1},
.caps[5] = {HFI_CAPABILITY_SCALE_Y, 65536, 65536, 1},
.caps[6] = {HFI_CAPABILITY_MBS_PER_SECOND, 64, 7833600, 1},
.caps[7] = {HFI_CAPABILITY_FRAMERATE, 1, 960, 1},
.caps[8] = {HFI_CAPABILITY_MAX_VIDEOCORES, 0, 1, 1},
.caps[9] = {HFI_CAPABILITY_MAX_WORKMODES, 1, 3, 1},
.num_caps = 10,
.pl[0] = {HFI_VP9_PROFILE_P0, 200},
.pl[1] = {HFI_VP9_PROFILE_P2_10B, 200},
.num_pl = 2,
.fmts[0] = {HFI_BUFFER_OUTPUT, HFI_COLOR_FORMAT_NV12_UBWC},
.fmts[1] = {HFI_BUFFER_OUTPUT, HFI_COLOR_FORMAT_YUV420_TP10_UBWC},
.fmts[2] = {HFI_BUFFER_OUTPUT2, HFI_COLOR_FORMAT_NV12_UBWC},
.fmts[3] = {HFI_BUFFER_OUTPUT2, HFI_COLOR_FORMAT_NV12},
.fmts[4] = {HFI_BUFFER_OUTPUT2, HFI_COLOR_FORMAT_NV21},
.fmts[5] = {HFI_BUFFER_OUTPUT2, HFI_COLOR_FORMAT_P010},
.fmts[6] = {HFI_BUFFER_OUTPUT2, HFI_COLOR_FORMAT_YUV420_TP10_UBWC},
.num_fmts = 7,
}, {
.codec = HFI_VIDEO_CODEC_MPEG2,
.domain = VIDC_SESSION_TYPE_DEC,
.cap_bufs_mode_dynamic = true,
.caps[0] = {HFI_CAPABILITY_FRAME_WIDTH, 128, 1920, 1},
.caps[1] = {HFI_CAPABILITY_FRAME_HEIGHT, 128, 1920, 1},
.caps[2] = {HFI_CAPABILITY_MBS_PER_FRAME, 64, 8160, 1},
.caps[3] = {HFI_CAPABILITY_BITRATE, 1, 40000000, 1},
.caps[4] = {HFI_CAPABILITY_SCALE_X, 65536, 65536, 1},
.caps[5] = {HFI_CAPABILITY_SCALE_Y, 65536, 65536, 1},
.caps[6] = {HFI_CAPABILITY_MBS_PER_SECOND, 64, 7833600, 1},
.caps[7] = {HFI_CAPABILITY_FRAMERATE, 1, 30, 1},
.caps[8] = {HFI_CAPABILITY_MAX_VIDEOCORES, 0, 1, 1},
.caps[9] = {HFI_CAPABILITY_MAX_WORKMODES, 1, 1, 1},
.num_caps = 10,
.pl[0] = {HFI_MPEG2_PROFILE_SIMPLE, HFI_MPEG2_LEVEL_H14},
.pl[1] = {HFI_MPEG2_PROFILE_MAIN, HFI_MPEG2_LEVEL_H14},
.num_pl = 2,
.fmts[0] = {HFI_BUFFER_OUTPUT, HFI_COLOR_FORMAT_NV12_UBWC},
.fmts[1] = {HFI_BUFFER_OUTPUT2, HFI_COLOR_FORMAT_NV12_UBWC},
.fmts[2] = {HFI_BUFFER_OUTPUT2, HFI_COLOR_FORMAT_NV12},
.fmts[3] = {HFI_BUFFER_OUTPUT2, HFI_COLOR_FORMAT_NV21},
.num_fmts = 4,
}, {
.codec = HFI_VIDEO_CODEC_H264,
.domain = VIDC_SESSION_TYPE_ENC,
.cap_bufs_mode_dynamic = true,
.caps[0] = {HFI_CAPABILITY_FRAME_WIDTH, 128, 8192, 1},
.caps[1] = {HFI_CAPABILITY_FRAME_HEIGHT, 128, 8192, 1},
.caps[2] = {HFI_CAPABILITY_MBS_PER_FRAME, 64, 138240, 1},
.caps[3] = {HFI_CAPABILITY_BITRATE, 1, 220000000, 1},
.caps[4] = {HFI_CAPABILITY_SCALE_X, 8192, 65536, 1},
.caps[5] = {HFI_CAPABILITY_SCALE_Y, 8192, 65536, 1},
.caps[6] = {HFI_CAPABILITY_MBS_PER_SECOND, 64, 7833600, 1},
.caps[7] = {HFI_CAPABILITY_FRAMERATE, 1, 960, 1},
.caps[8] = {HFI_CAPABILITY_MAX_VIDEOCORES, 0, 1, 1},
.caps[9] = {HFI_CAPABILITY_PEAKBITRATE, 32000, 160000000, 1},
.caps[10] = {HFI_CAPABILITY_HIER_P_NUM_ENH_LAYERS, 0, 6, 1},
.caps[11] = {HFI_CAPABILITY_ENC_LTR_COUNT, 0, 2, 1},
.caps[12] = {HFI_CAPABILITY_LCU_SIZE, 16, 16, 1},
.caps[13] = {HFI_CAPABILITY_BFRAME, 0, 1, 1},
.caps[14] = {HFI_CAPABILITY_HIER_P_HYBRID_NUM_ENH_LAYERS, 0, 6, 1},
.caps[15] = {HFI_CAPABILITY_I_FRAME_QP, 0, 51, 1},
.caps[16] = {HFI_CAPABILITY_P_FRAME_QP, 0, 51, 1},
.caps[17] = {HFI_CAPABILITY_B_FRAME_QP, 0, 51, 1},
.caps[18] = {HFI_CAPABILITY_MAX_WORKMODES, 1, 2, 1},
.caps[19] = {HFI_CAPABILITY_RATE_CONTROL_MODES, 0x1000001, 0x1000005, 1},
.caps[20] = {HFI_CAPABILITY_COLOR_SPACE_CONVERSION, 0, 2, 1},
.num_caps = 21,
.pl[0] = {HFI_H264_PROFILE_BASELINE, HFI_H264_LEVEL_52},
.pl[1] = {HFI_H264_PROFILE_MAIN, HFI_H264_LEVEL_52},
.pl[2] = {HFI_H264_PROFILE_HIGH, HFI_H264_LEVEL_52},
.pl[3] = {HFI_H264_PROFILE_CONSTRAINED_BASE, HFI_H264_LEVEL_52},
.pl[4] = {HFI_H264_PROFILE_CONSTRAINED_HIGH, HFI_H264_LEVEL_52},
.num_pl = 5,
.fmts[0] = {HFI_BUFFER_INPUT, HFI_COLOR_FORMAT_NV12},
.fmts[1] = {HFI_BUFFER_INPUT, HFI_COLOR_FORMAT_NV12_UBWC},
.fmts[2] = {HFI_BUFFER_INPUT, HFI_COLOR_FORMAT_YUV420_TP10_UBWC},
.fmts[3] = {HFI_BUFFER_INPUT, HFI_COLOR_FORMAT_P010},
.num_fmts = 4,
}, {
.codec = HFI_VIDEO_CODEC_HEVC,
.domain = VIDC_SESSION_TYPE_ENC,
.cap_bufs_mode_dynamic = true,
.caps[0] = {HFI_CAPABILITY_FRAME_WIDTH, 128, 8192, 16},
.caps[1] = {HFI_CAPABILITY_FRAME_HEIGHT, 128, 8192, 16},
.caps[2] = {HFI_CAPABILITY_MBS_PER_FRAME, 64, 138240, 1},
.caps[3] = {HFI_CAPABILITY_BITRATE, 1, 160000000, 1},
.caps[4] = {HFI_CAPABILITY_SCALE_X, 8192, 65536, 1},
.caps[5] = {HFI_CAPABILITY_SCALE_Y, 8192, 65536, 1},
.caps[6] = {HFI_CAPABILITY_MBS_PER_SECOND, 64, 7833600, 1},
.caps[7] = {HFI_CAPABILITY_FRAMERATE, 1, 960, 1},
.caps[8] = {HFI_CAPABILITY_MAX_VIDEOCORES, 0, 1, 1},
.caps[9] = {HFI_CAPABILITY_PEAKBITRATE, 32000, 160000000, 1},
.caps[10] = {HFI_CAPABILITY_HIER_P_NUM_ENH_LAYERS, 0, 5, 1},
.caps[11] = {HFI_CAPABILITY_ENC_LTR_COUNT, 0, 2, 1},
.caps[12] = {HFI_CAPABILITY_LCU_SIZE, 32, 32, 1},
.caps[13] = {HFI_CAPABILITY_BFRAME, 0, 1, 1},
.caps[14] = {HFI_CAPABILITY_HIER_P_HYBRID_NUM_ENH_LAYERS, 0, 5, 1},
.caps[15] = {HFI_CAPABILITY_I_FRAME_QP, 0, 51, 1},
.caps[16] = {HFI_CAPABILITY_P_FRAME_QP, 0, 51, 1},
.caps[17] = {HFI_CAPABILITY_B_FRAME_QP, 0, 51, 1},
.caps[18] = {HFI_CAPABILITY_MAX_WORKMODES, 1, 2, 1},
.caps[19] = {HFI_CAPABILITY_RATE_CONTROL_MODES, 0x1000001, 0x1000005, 1},
.caps[20] = {HFI_CAPABILITY_COLOR_SPACE_CONVERSION, 0, 2, 1},
.caps[21] = {HFI_CAPABILITY_ROTATION, 1, 4, 90},
.caps[22] = {HFI_CAPABILITY_BLUR_WIDTH, 96, 4096, 16},
.caps[23] = {HFI_CAPABILITY_BLUR_HEIGHT, 96, 4096, 16},
.num_caps = 24,
.pl[0] = {HFI_HEVC_PROFILE_MAIN, HFI_HEVC_LEVEL_6 | HFI_HEVC_TIER_HIGH0},
.pl[1] = {HFI_HEVC_PROFILE_MAIN10, HFI_HEVC_LEVEL_6 | HFI_HEVC_TIER_HIGH0},
.num_pl = 2,
.fmts[0] = {HFI_BUFFER_INPUT, HFI_COLOR_FORMAT_NV12},
.fmts[1] = {HFI_BUFFER_INPUT, HFI_COLOR_FORMAT_NV12_UBWC},
.fmts[2] = {HFI_BUFFER_INPUT, HFI_COLOR_FORMAT_YUV420_TP10_UBWC},
.fmts[3] = {HFI_BUFFER_INPUT, HFI_COLOR_FORMAT_P010},
.num_fmts = 4,
}, {
.codec = HFI_VIDEO_CODEC_VP8,
.domain = VIDC_SESSION_TYPE_ENC,
.cap_bufs_mode_dynamic = true,
.caps[0] = {HFI_CAPABILITY_FRAME_WIDTH, 128, 4096, 16},
.caps[1] = {HFI_CAPABILITY_FRAME_HEIGHT, 128, 4096, 16},
.caps[2] = {HFI_CAPABILITY_MBS_PER_FRAME, 64, 36864, 1},
.caps[3] = {HFI_CAPABILITY_BITRATE, 1, 74000000, 1},
.caps[4] = {HFI_CAPABILITY_SCALE_X, 8192, 65536, 1},
.caps[5] = {HFI_CAPABILITY_SCALE_Y, 8192, 65536, 1},
.caps[6] = {HFI_CAPABILITY_MBS_PER_SECOND, 64, 4423680, 1},
.caps[7] = {HFI_CAPABILITY_FRAMERATE, 1, 120, 1},
.caps[8] = {HFI_CAPABILITY_MAX_VIDEOCORES, 0, 1, 1},
.caps[9] = {HFI_CAPABILITY_PEAKBITRATE, 32000, 160000000, 1},
.caps[10] = {HFI_CAPABILITY_HIER_P_NUM_ENH_LAYERS, 0, 3, 1},
.caps[11] = {HFI_CAPABILITY_ENC_LTR_COUNT, 0, 2, 1},
.caps[12] = {HFI_CAPABILITY_LCU_SIZE, 16, 16, 1},
.caps[13] = {HFI_CAPABILITY_BFRAME, 0, 0, 1},
.caps[14] = {HFI_CAPABILITY_HIER_P_HYBRID_NUM_ENH_LAYERS, 0, 5, 1},
.caps[15] = {HFI_CAPABILITY_I_FRAME_QP, 0, 127, 1},
.caps[16] = {HFI_CAPABILITY_P_FRAME_QP, 0, 127, 1},
.caps[17] = {HFI_CAPABILITY_MAX_WORKMODES, 1, 2, 1},
.caps[18] = {HFI_CAPABILITY_RATE_CONTROL_MODES, 0x1000001, 0x1000005, 1},
.caps[19] = {HFI_CAPABILITY_BLUR_WIDTH, 96, 4096, 16},
.caps[20] = {HFI_CAPABILITY_BLUR_HEIGHT, 96, 4096, 16},
.caps[21] = {HFI_CAPABILITY_COLOR_SPACE_CONVERSION, 0, 2, 1},
.caps[22] = {HFI_CAPABILITY_ROTATION, 1, 4, 90},
.num_caps = 23,
.pl[0] = {HFI_VPX_PROFILE_MAIN, HFI_VPX_LEVEL_VERSION_0},
.pl[1] = {HFI_VPX_PROFILE_MAIN, HFI_VPX_LEVEL_VERSION_1},
.pl[2] = {HFI_VPX_PROFILE_MAIN, HFI_VPX_LEVEL_VERSION_2},
.pl[3] = {HFI_VPX_PROFILE_MAIN, HFI_VPX_LEVEL_VERSION_3},
.num_pl = 4,
.fmts[0] = {HFI_BUFFER_INPUT, HFI_COLOR_FORMAT_NV12},
.fmts[1] = {HFI_BUFFER_INPUT, HFI_COLOR_FORMAT_NV12_UBWC},
.fmts[2] = {HFI_BUFFER_INPUT, HFI_COLOR_FORMAT_YUV420_TP10_UBWC},
.fmts[3] = {HFI_BUFFER_INPUT, HFI_COLOR_FORMAT_P010},
.num_fmts = 4,
} };
static const struct hfi_plat_caps *get_capabilities(unsigned int *entries)
{
*entries = ARRAY_SIZE(caps);
return caps;
}
static void get_codecs(u32 *enc_codecs, u32 *dec_codecs, u32 *count)
{
*enc_codecs = HFI_VIDEO_CODEC_H264 | HFI_VIDEO_CODEC_HEVC |
HFI_VIDEO_CODEC_VP8;
*dec_codecs = HFI_VIDEO_CODEC_H264 | HFI_VIDEO_CODEC_HEVC |
HFI_VIDEO_CODEC_VP8 | HFI_VIDEO_CODEC_VP9 |
HFI_VIDEO_CODEC_MPEG2;
*count = 8;
}
static const struct hfi_platform_codec_freq_data codec_freq_data[] = {
{ V4L2_PIX_FMT_H264, VIDC_SESSION_TYPE_ENC, 675, 25, 320 },
{ V4L2_PIX_FMT_HEVC, VIDC_SESSION_TYPE_ENC, 675, 25, 320 },
{ V4L2_PIX_FMT_VP8, VIDC_SESSION_TYPE_ENC, 675, 60, 320 },
{ V4L2_PIX_FMT_MPEG2, VIDC_SESSION_TYPE_DEC, 200, 25, 200 },
{ V4L2_PIX_FMT_H264, VIDC_SESSION_TYPE_DEC, 200, 25, 200 },
{ V4L2_PIX_FMT_HEVC, VIDC_SESSION_TYPE_DEC, 200, 25, 200 },
{ V4L2_PIX_FMT_VP8, VIDC_SESSION_TYPE_DEC, 200, 60, 200 },
{ V4L2_PIX_FMT_VP9, VIDC_SESSION_TYPE_DEC, 200, 60, 200 },
};
static const struct hfi_platform_codec_freq_data *
get_codec_freq_data(u32 session_type, u32 pixfmt)
{
const struct hfi_platform_codec_freq_data *data = codec_freq_data;
unsigned int i, data_size = ARRAY_SIZE(codec_freq_data);
const struct hfi_platform_codec_freq_data *found = NULL;
for (i = 0; i < data_size; i++) {
if (data[i].pixfmt == pixfmt && data[i].session_type == session_type) {
found = &data[i];
break;
}
}
return found;
}
static unsigned long codec_vpp_freq(u32 session_type, u32 codec)
{
const struct hfi_platform_codec_freq_data *data;
data = get_codec_freq_data(session_type, codec);
if (data)
return data->vpp_freq;
return 0;
}
static unsigned long codec_vsp_freq(u32 session_type, u32 codec)
{
const struct hfi_platform_codec_freq_data *data;
data = get_codec_freq_data(session_type, codec);
if (data)
return data->vsp_freq;
return 0;
}
static unsigned long codec_lp_freq(u32 session_type, u32 codec)
{
const struct hfi_platform_codec_freq_data *data;
data = get_codec_freq_data(session_type, codec);
if (data)
return data->low_power_freq;
return 0;
}
const struct hfi_platform hfi_plat_v6 = {
.codec_vpp_freq = codec_vpp_freq,
.codec_vsp_freq = codec_vsp_freq,
.codec_lp_freq = codec_lp_freq,
.codecs = get_codecs,
.capabilities = get_capabilities,
.bufreq = hfi_plat_bufreq_v6,
};
| linux-master | drivers/media/platform/qcom/venus/hfi_platform_v6.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
* Copyright (C) 2017 Linaro Ltd.
*/
#include <linux/clk.h>
#include <linux/module.h>
#include <linux/mod_devicetable.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/slab.h>
#include <media/v4l2-ioctl.h>
#include <media/v4l2-event.h>
#include <media/v4l2-ctrls.h>
#include <media/v4l2-mem2mem.h>
#include <media/videobuf2-dma-contig.h>
#include "hfi_venus_io.h"
#include "hfi_parser.h"
#include "core.h"
#include "helpers.h"
#include "vdec.h"
#include "pm_helpers.h"
/*
* Three resons to keep MPLANE formats (despite that the number of planes
* currently is one):
* - the MPLANE formats allow only one plane to be used
* - the downstream driver use MPLANE formats too
* - future firmware versions could add support for >1 planes
*/
static const struct venus_format vdec_formats[] = {
[VENUS_FMT_NV12] = {
.pixfmt = V4L2_PIX_FMT_NV12,
.num_planes = 1,
.type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE,
},
[VENUS_FMT_QC08C] = {
.pixfmt = V4L2_PIX_FMT_QC08C,
.num_planes = 1,
.type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE,
},
[VENUS_FMT_QC10C] = {
.pixfmt = V4L2_PIX_FMT_QC10C,
.num_planes = 1,
.type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE,
},
[VENUS_FMT_P010] = {
.pixfmt = V4L2_PIX_FMT_P010,
.num_planes = 1,
.type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE,
},
[VENUS_FMT_H264] = {
.pixfmt = V4L2_PIX_FMT_H264,
.num_planes = 1,
.type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE,
.flags = V4L2_FMT_FLAG_DYN_RESOLUTION,
},
[VENUS_FMT_VP8] = {
.pixfmt = V4L2_PIX_FMT_VP8,
.num_planes = 1,
.type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE,
.flags = V4L2_FMT_FLAG_DYN_RESOLUTION,
},
[VENUS_FMT_VP9] = {
.pixfmt = V4L2_PIX_FMT_VP9,
.num_planes = 1,
.type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE,
.flags = V4L2_FMT_FLAG_DYN_RESOLUTION,
},
[VENUS_FMT_HEVC] = {
.pixfmt = V4L2_PIX_FMT_HEVC,
.num_planes = 1,
.type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE,
.flags = V4L2_FMT_FLAG_DYN_RESOLUTION,
},
[VENUS_FMT_VC1_ANNEX_G] = {
.pixfmt = V4L2_PIX_FMT_VC1_ANNEX_G,
.num_planes = 1,
.type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE,
.flags = V4L2_FMT_FLAG_DYN_RESOLUTION,
},
[VENUS_FMT_VC1_ANNEX_L] = {
.pixfmt = V4L2_PIX_FMT_VC1_ANNEX_L,
.num_planes = 1,
.type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE,
.flags = V4L2_FMT_FLAG_DYN_RESOLUTION,
},
[VENUS_FMT_MPEG4] = {
.pixfmt = V4L2_PIX_FMT_MPEG4,
.num_planes = 1,
.type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE,
.flags = V4L2_FMT_FLAG_DYN_RESOLUTION,
},
[VENUS_FMT_MPEG2] = {
.pixfmt = V4L2_PIX_FMT_MPEG2,
.num_planes = 1,
.type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE,
.flags = V4L2_FMT_FLAG_DYN_RESOLUTION,
},
[VENUS_FMT_H263] = {
.pixfmt = V4L2_PIX_FMT_H263,
.num_planes = 1,
.type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE,
.flags = V4L2_FMT_FLAG_DYN_RESOLUTION,
},
[VENUS_FMT_XVID] = {
.pixfmt = V4L2_PIX_FMT_XVID,
.num_planes = 1,
.type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE,
.flags = V4L2_FMT_FLAG_DYN_RESOLUTION,
},
};
static const struct venus_format *
find_format(struct venus_inst *inst, u32 pixfmt, u32 type)
{
const struct venus_format *fmt = vdec_formats;
unsigned int size = ARRAY_SIZE(vdec_formats);
unsigned int i;
for (i = 0; i < size; i++) {
if (fmt[i].pixfmt == pixfmt)
break;
}
if (i == size || fmt[i].type != type)
return NULL;
if (type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE &&
!venus_helper_check_codec(inst, fmt[i].pixfmt))
return NULL;
if (V4L2_TYPE_IS_CAPTURE(type) &&
!venus_helper_check_format(inst, fmt[i].pixfmt))
return NULL;
if (V4L2_TYPE_IS_CAPTURE(type) && fmt[i].pixfmt == V4L2_PIX_FMT_QC10C &&
!(inst->bit_depth == VIDC_BITDEPTH_10))
return NULL;
return &fmt[i];
}
static const struct venus_format *
find_format_by_index(struct venus_inst *inst, unsigned int index, u32 type)
{
const struct venus_format *fmt = vdec_formats;
unsigned int size = ARRAY_SIZE(vdec_formats);
unsigned int i, k = 0;
if (index > size)
return NULL;
for (i = 0; i < size; i++) {
bool valid;
if (fmt[i].type != type)
continue;
if (V4L2_TYPE_IS_OUTPUT(type)) {
valid = venus_helper_check_codec(inst, fmt[i].pixfmt);
} else if (V4L2_TYPE_IS_CAPTURE(type)) {
valid = venus_helper_check_format(inst, fmt[i].pixfmt);
if (fmt[i].pixfmt == V4L2_PIX_FMT_QC10C &&
!(inst->bit_depth == VIDC_BITDEPTH_10))
valid = false;
}
if (k == index && valid)
break;
if (valid)
k++;
}
if (i == size)
return NULL;
return &fmt[i];
}
static const struct venus_format *
vdec_try_fmt_common(struct venus_inst *inst, struct v4l2_format *f)
{
struct v4l2_pix_format_mplane *pixmp = &f->fmt.pix_mp;
struct v4l2_plane_pix_format *pfmt = pixmp->plane_fmt;
const struct venus_format *fmt;
u32 szimage;
memset(pfmt[0].reserved, 0, sizeof(pfmt[0].reserved));
memset(pixmp->reserved, 0, sizeof(pixmp->reserved));
fmt = find_format(inst, pixmp->pixelformat, f->type);
if (!fmt) {
if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
pixmp->pixelformat = V4L2_PIX_FMT_NV12;
else if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
pixmp->pixelformat = V4L2_PIX_FMT_H264;
else
return NULL;
fmt = find_format(inst, pixmp->pixelformat, f->type);
if (!fmt)
return NULL;
}
pixmp->width = clamp(pixmp->width, frame_width_min(inst),
frame_width_max(inst));
pixmp->height = clamp(pixmp->height, frame_height_min(inst),
frame_height_max(inst));
if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
pixmp->height = ALIGN(pixmp->height, 32);
if (pixmp->field == V4L2_FIELD_ANY)
pixmp->field = V4L2_FIELD_NONE;
pixmp->num_planes = fmt->num_planes;
pixmp->flags = 0;
szimage = venus_helper_get_framesz(pixmp->pixelformat, pixmp->width,
pixmp->height);
if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
unsigned int stride = pixmp->width;
if (pixmp->pixelformat == V4L2_PIX_FMT_P010)
stride *= 2;
pfmt[0].sizeimage = szimage;
pfmt[0].bytesperline = ALIGN(stride, 128);
} else {
pfmt[0].sizeimage = clamp_t(u32, pfmt[0].sizeimage, 0, SZ_8M);
pfmt[0].sizeimage = max(pfmt[0].sizeimage, szimage);
pfmt[0].bytesperline = 0;
}
return fmt;
}
static int vdec_try_fmt(struct file *file, void *fh, struct v4l2_format *f)
{
struct venus_inst *inst = to_inst(file);
vdec_try_fmt_common(inst, f);
return 0;
}
static int vdec_check_src_change(struct venus_inst *inst)
{
int ret;
if (inst->subscriptions & V4L2_EVENT_SOURCE_CHANGE &&
inst->codec_state == VENUS_DEC_STATE_INIT &&
!inst->reconfig)
return -EINVAL;
if (inst->subscriptions & V4L2_EVENT_SOURCE_CHANGE)
return 0;
/*
* The code snippet below is a workaround for backward compatibility
* with applications which doesn't support V4L2 events. It will be
* dropped in future once those applications are fixed.
*/
if (inst->codec_state != VENUS_DEC_STATE_INIT)
goto done;
ret = wait_event_timeout(inst->reconf_wait, inst->reconfig,
msecs_to_jiffies(100));
if (!ret)
return -EINVAL;
if (!(inst->codec_state == VENUS_DEC_STATE_CAPTURE_SETUP) ||
!inst->reconfig)
dev_dbg(inst->core->dev, VDBGH "wrong state\n");
done:
return 0;
}
static int vdec_g_fmt(struct file *file, void *fh, struct v4l2_format *f)
{
struct venus_inst *inst = to_inst(file);
const struct venus_format *fmt = NULL;
struct v4l2_pix_format_mplane *pixmp = &f->fmt.pix_mp;
int ret;
if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
fmt = inst->fmt_cap;
else if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
fmt = inst->fmt_out;
if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
ret = vdec_check_src_change(inst);
if (ret)
return ret;
}
pixmp->pixelformat = fmt->pixfmt;
if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
pixmp->width = inst->width;
pixmp->height = inst->height;
pixmp->colorspace = inst->colorspace;
pixmp->ycbcr_enc = inst->ycbcr_enc;
pixmp->quantization = inst->quantization;
pixmp->xfer_func = inst->xfer_func;
} else if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
pixmp->width = inst->out_width;
pixmp->height = inst->out_height;
}
vdec_try_fmt_common(inst, f);
return 0;
}
static int vdec_s_fmt(struct file *file, void *fh, struct v4l2_format *f)
{
struct venus_inst *inst = to_inst(file);
struct v4l2_pix_format_mplane *pixmp = &f->fmt.pix_mp;
struct v4l2_pix_format_mplane orig_pixmp;
const struct venus_format *fmt;
struct v4l2_format format;
u32 pixfmt_out = 0, pixfmt_cap = 0;
struct vb2_queue *q;
q = v4l2_m2m_get_vq(inst->m2m_ctx, f->type);
if (!q)
return -EINVAL;
if (vb2_is_busy(q))
return -EBUSY;
orig_pixmp = *pixmp;
fmt = vdec_try_fmt_common(inst, f);
if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
pixfmt_out = pixmp->pixelformat;
pixfmt_cap = inst->fmt_cap->pixfmt;
} else if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
pixfmt_cap = pixmp->pixelformat;
pixfmt_out = inst->fmt_out->pixfmt;
}
memset(&format, 0, sizeof(format));
format.type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
format.fmt.pix_mp.pixelformat = pixfmt_out;
format.fmt.pix_mp.width = orig_pixmp.width;
format.fmt.pix_mp.height = orig_pixmp.height;
vdec_try_fmt_common(inst, &format);
if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
inst->out_width = format.fmt.pix_mp.width;
inst->out_height = format.fmt.pix_mp.height;
inst->colorspace = pixmp->colorspace;
inst->ycbcr_enc = pixmp->ycbcr_enc;
inst->quantization = pixmp->quantization;
inst->xfer_func = pixmp->xfer_func;
inst->input_buf_size = pixmp->plane_fmt[0].sizeimage;
}
memset(&format, 0, sizeof(format));
format.type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
format.fmt.pix_mp.pixelformat = pixfmt_cap;
format.fmt.pix_mp.width = orig_pixmp.width;
format.fmt.pix_mp.height = orig_pixmp.height;
vdec_try_fmt_common(inst, &format);
inst->width = format.fmt.pix_mp.width;
inst->height = format.fmt.pix_mp.height;
inst->crop.top = 0;
inst->crop.left = 0;
inst->crop.width = inst->width;
inst->crop.height = inst->height;
if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
inst->fmt_out = fmt;
else if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
inst->fmt_cap = fmt;
inst->output2_buf_size =
venus_helper_get_framesz(pixfmt_cap, orig_pixmp.width, orig_pixmp.height);
}
return 0;
}
static int
vdec_g_selection(struct file *file, void *fh, struct v4l2_selection *s)
{
struct venus_inst *inst = to_inst(file);
if (s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE &&
s->type != V4L2_BUF_TYPE_VIDEO_OUTPUT)
return -EINVAL;
s->r.top = 0;
s->r.left = 0;
switch (s->target) {
case V4L2_SEL_TGT_CROP_BOUNDS:
case V4L2_SEL_TGT_CROP_DEFAULT:
case V4L2_SEL_TGT_CROP:
if (s->type != V4L2_BUF_TYPE_VIDEO_OUTPUT)
return -EINVAL;
s->r.width = inst->out_width;
s->r.height = inst->out_height;
break;
case V4L2_SEL_TGT_COMPOSE_BOUNDS:
case V4L2_SEL_TGT_COMPOSE_PADDED:
if (s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
return -EINVAL;
s->r.width = inst->width;
s->r.height = inst->height;
break;
case V4L2_SEL_TGT_COMPOSE_DEFAULT:
case V4L2_SEL_TGT_COMPOSE:
if (s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
return -EINVAL;
s->r = inst->crop;
break;
default:
return -EINVAL;
}
return 0;
}
static int
vdec_querycap(struct file *file, void *fh, struct v4l2_capability *cap)
{
strscpy(cap->driver, "qcom-venus", sizeof(cap->driver));
strscpy(cap->card, "Qualcomm Venus video decoder", sizeof(cap->card));
strscpy(cap->bus_info, "platform:qcom-venus", sizeof(cap->bus_info));
return 0;
}
static int vdec_enum_fmt(struct file *file, void *fh, struct v4l2_fmtdesc *f)
{
struct venus_inst *inst = to_inst(file);
const struct venus_format *fmt;
memset(f->reserved, 0, sizeof(f->reserved));
fmt = find_format_by_index(inst, f->index, f->type);
if (!fmt)
return -EINVAL;
f->pixelformat = fmt->pixfmt;
f->flags = fmt->flags;
return 0;
}
static int vdec_s_parm(struct file *file, void *fh, struct v4l2_streamparm *a)
{
struct venus_inst *inst = to_inst(file);
struct v4l2_captureparm *cap = &a->parm.capture;
struct v4l2_fract *timeperframe = &cap->timeperframe;
u64 us_per_frame, fps;
if (a->type != V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE &&
a->type != V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
return -EINVAL;
memset(cap->reserved, 0, sizeof(cap->reserved));
if (!timeperframe->denominator)
timeperframe->denominator = inst->timeperframe.denominator;
if (!timeperframe->numerator)
timeperframe->numerator = inst->timeperframe.numerator;
cap->readbuffers = 0;
cap->extendedmode = 0;
cap->capability = V4L2_CAP_TIMEPERFRAME;
us_per_frame = timeperframe->numerator * (u64)USEC_PER_SEC;
do_div(us_per_frame, timeperframe->denominator);
if (!us_per_frame)
return -EINVAL;
fps = (u64)USEC_PER_SEC;
do_div(fps, us_per_frame);
inst->fps = fps;
inst->timeperframe = *timeperframe;
return 0;
}
static int vdec_enum_framesizes(struct file *file, void *fh,
struct v4l2_frmsizeenum *fsize)
{
struct venus_inst *inst = to_inst(file);
const struct venus_format *fmt;
fmt = find_format(inst, fsize->pixel_format,
V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE);
if (!fmt) {
fmt = find_format(inst, fsize->pixel_format,
V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
if (!fmt)
return -EINVAL;
}
if (fsize->index)
return -EINVAL;
fsize->type = V4L2_FRMSIZE_TYPE_STEPWISE;
fsize->stepwise.min_width = frame_width_min(inst);
fsize->stepwise.max_width = frame_width_max(inst);
fsize->stepwise.step_width = frame_width_step(inst);
fsize->stepwise.min_height = frame_height_min(inst);
fsize->stepwise.max_height = frame_height_max(inst);
fsize->stepwise.step_height = frame_height_step(inst);
return 0;
}
static int vdec_subscribe_event(struct v4l2_fh *fh,
const struct v4l2_event_subscription *sub)
{
struct venus_inst *inst = container_of(fh, struct venus_inst, fh);
int ret;
switch (sub->type) {
case V4L2_EVENT_EOS:
return v4l2_event_subscribe(fh, sub, 2, NULL);
case V4L2_EVENT_SOURCE_CHANGE:
ret = v4l2_src_change_event_subscribe(fh, sub);
if (ret)
return ret;
inst->subscriptions |= V4L2_EVENT_SOURCE_CHANGE;
return 0;
case V4L2_EVENT_CTRL:
return v4l2_ctrl_subscribe_event(fh, sub);
default:
return -EINVAL;
}
}
static int
vdec_decoder_cmd(struct file *file, void *fh, struct v4l2_decoder_cmd *cmd)
{
struct venus_inst *inst = to_inst(file);
struct vb2_queue *dst_vq;
struct hfi_frame_data fdata = {0};
int ret;
ret = v4l2_m2m_ioctl_try_decoder_cmd(file, fh, cmd);
if (ret)
return ret;
mutex_lock(&inst->lock);
if (cmd->cmd == V4L2_DEC_CMD_STOP) {
/*
* Implement V4L2_DEC_CMD_STOP by enqueue an empty buffer on
* decoder input to signal EOS.
*/
if (!(inst->streamon_out && inst->streamon_cap))
goto unlock;
fdata.buffer_type = HFI_BUFFER_INPUT;
fdata.flags |= HFI_BUFFERFLAG_EOS;
if (IS_V6(inst->core) && is_fw_rev_or_older(inst->core, 1, 0, 87))
fdata.device_addr = 0;
else
fdata.device_addr = 0xdeadb000;
ret = hfi_session_process_buf(inst, &fdata);
if (!ret && inst->codec_state == VENUS_DEC_STATE_DECODING) {
inst->codec_state = VENUS_DEC_STATE_DRAIN;
inst->drain_active = true;
}
} else if (cmd->cmd == V4L2_DEC_CMD_START &&
inst->codec_state == VENUS_DEC_STATE_STOPPED) {
dst_vq = v4l2_m2m_get_vq(inst->fh.m2m_ctx,
V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE);
vb2_clear_last_buffer_dequeued(dst_vq);
inst->codec_state = VENUS_DEC_STATE_DECODING;
}
unlock:
mutex_unlock(&inst->lock);
return ret;
}
static const struct v4l2_ioctl_ops vdec_ioctl_ops = {
.vidioc_querycap = vdec_querycap,
.vidioc_enum_fmt_vid_cap = vdec_enum_fmt,
.vidioc_enum_fmt_vid_out = vdec_enum_fmt,
.vidioc_s_fmt_vid_cap_mplane = vdec_s_fmt,
.vidioc_s_fmt_vid_out_mplane = vdec_s_fmt,
.vidioc_g_fmt_vid_cap_mplane = vdec_g_fmt,
.vidioc_g_fmt_vid_out_mplane = vdec_g_fmt,
.vidioc_try_fmt_vid_cap_mplane = vdec_try_fmt,
.vidioc_try_fmt_vid_out_mplane = vdec_try_fmt,
.vidioc_g_selection = vdec_g_selection,
.vidioc_reqbufs = v4l2_m2m_ioctl_reqbufs,
.vidioc_querybuf = v4l2_m2m_ioctl_querybuf,
.vidioc_create_bufs = v4l2_m2m_ioctl_create_bufs,
.vidioc_prepare_buf = v4l2_m2m_ioctl_prepare_buf,
.vidioc_qbuf = v4l2_m2m_ioctl_qbuf,
.vidioc_expbuf = v4l2_m2m_ioctl_expbuf,
.vidioc_dqbuf = v4l2_m2m_ioctl_dqbuf,
.vidioc_streamon = v4l2_m2m_ioctl_streamon,
.vidioc_streamoff = v4l2_m2m_ioctl_streamoff,
.vidioc_s_parm = vdec_s_parm,
.vidioc_enum_framesizes = vdec_enum_framesizes,
.vidioc_subscribe_event = vdec_subscribe_event,
.vidioc_unsubscribe_event = v4l2_event_unsubscribe,
.vidioc_try_decoder_cmd = v4l2_m2m_ioctl_try_decoder_cmd,
.vidioc_decoder_cmd = vdec_decoder_cmd,
};
static int vdec_pm_get(struct venus_inst *inst)
{
struct venus_core *core = inst->core;
struct device *dev = core->dev_dec;
int ret;
mutex_lock(&core->pm_lock);
ret = pm_runtime_resume_and_get(dev);
mutex_unlock(&core->pm_lock);
return ret;
}
static int vdec_pm_put(struct venus_inst *inst, bool autosuspend)
{
struct venus_core *core = inst->core;
struct device *dev = core->dev_dec;
int ret;
mutex_lock(&core->pm_lock);
if (autosuspend)
ret = pm_runtime_put_autosuspend(dev);
else
ret = pm_runtime_put_sync(dev);
mutex_unlock(&core->pm_lock);
return ret < 0 ? ret : 0;
}
static int vdec_pm_get_put(struct venus_inst *inst)
{
struct venus_core *core = inst->core;
struct device *dev = core->dev_dec;
int ret = 0;
mutex_lock(&core->pm_lock);
if (pm_runtime_suspended(dev)) {
ret = pm_runtime_resume_and_get(dev);
if (ret < 0)
goto error;
ret = pm_runtime_put_autosuspend(dev);
}
error:
mutex_unlock(&core->pm_lock);
return ret < 0 ? ret : 0;
}
static void vdec_pm_touch(struct venus_inst *inst)
{
pm_runtime_mark_last_busy(inst->core->dev_dec);
}
static int vdec_set_properties(struct venus_inst *inst)
{
struct vdec_controls *ctr = &inst->controls.dec;
struct hfi_enable en = { .enable = 1 };
u32 ptype, decode_order, conceal;
int ret;
if (ctr->post_loop_deb_mode) {
ptype = HFI_PROPERTY_CONFIG_VDEC_POST_LOOP_DEBLOCKER;
ret = hfi_session_set_property(inst, ptype, &en);
if (ret)
return ret;
}
if (ctr->display_delay_enable && ctr->display_delay == 0) {
ptype = HFI_PROPERTY_PARAM_VDEC_OUTPUT_ORDER;
decode_order = HFI_OUTPUT_ORDER_DECODE;
ret = hfi_session_set_property(inst, ptype, &decode_order);
if (ret)
return ret;
}
/* Enabling sufficient sequence change support for VP9 */
if (is_fw_rev_or_newer(inst->core, 5, 4, 51)) {
ptype = HFI_PROPERTY_PARAM_VDEC_ENABLE_SUFFICIENT_SEQCHANGE_EVENT;
ret = hfi_session_set_property(inst, ptype, &en);
if (ret)
return ret;
}
ptype = HFI_PROPERTY_PARAM_VDEC_CONCEAL_COLOR;
conceal = ctr->conceal_color & 0xffff;
conceal |= ((ctr->conceal_color >> 16) & 0xffff) << 10;
conceal |= ((ctr->conceal_color >> 32) & 0xffff) << 20;
ret = hfi_session_set_property(inst, ptype, &conceal);
if (ret)
return ret;
return 0;
}
static int vdec_set_work_route(struct venus_inst *inst)
{
u32 ptype = HFI_PROPERTY_PARAM_WORK_ROUTE;
struct hfi_video_work_route wr;
if (!(IS_IRIS2(inst->core) || IS_IRIS2_1(inst->core)))
return 0;
wr.video_work_route = inst->core->res->num_vpp_pipes;
return hfi_session_set_property(inst, ptype, &wr);
}
#define is_ubwc_fmt(fmt) (!!((fmt) & HFI_COLOR_FORMAT_UBWC_BASE))
#define is_10bit_ubwc_fmt(fmt) (!!((fmt) & HFI_COLOR_FORMAT_10_BIT_BASE & \
HFI_COLOR_FORMAT_UBWC_BASE))
static int vdec_output_conf(struct venus_inst *inst)
{
struct venus_core *core = inst->core;
struct hfi_enable en = { .enable = 1 };
struct hfi_buffer_requirements bufreq;
u32 width = inst->width;
u32 height = inst->height;
u32 out_fmt, out2_fmt;
bool ubwc = false;
u32 ptype;
int ret;
ret = venus_helper_set_work_mode(inst);
if (ret)
return ret;
if (core->res->hfi_version == HFI_VERSION_1XX) {
ptype = HFI_PROPERTY_PARAM_VDEC_CONTINUE_DATA_TRANSFER;
ret = hfi_session_set_property(inst, ptype, &en);
if (ret)
return ret;
}
/* Force searching UBWC formats for bigger then HD resolutions */
if (width > 1920 && height > ALIGN(1080, 32))
ubwc = true;
/* For Venus v4/v6 UBWC format is mandatory */
if (IS_V4(core) || IS_V6(core))
ubwc = true;
ret = venus_helper_get_out_fmts(inst, inst->fmt_cap->pixfmt, &out_fmt,
&out2_fmt, ubwc);
if (ret)
return ret;
inst->output_buf_size =
venus_helper_get_framesz_raw(out_fmt, width, height);
inst->output2_buf_size =
venus_helper_get_framesz_raw(out2_fmt, width, height);
if (is_ubwc_fmt(out_fmt)) {
inst->opb_buftype = HFI_BUFFER_OUTPUT2;
inst->opb_fmt = out2_fmt;
inst->dpb_buftype = HFI_BUFFER_OUTPUT;
inst->dpb_fmt = out_fmt;
} else if (is_ubwc_fmt(out2_fmt) || is_10bit_ubwc_fmt(out_fmt)) {
inst->opb_buftype = HFI_BUFFER_OUTPUT;
inst->opb_fmt = out_fmt;
inst->dpb_buftype = HFI_BUFFER_OUTPUT2;
inst->dpb_fmt = out2_fmt;
} else {
inst->opb_buftype = HFI_BUFFER_OUTPUT;
inst->opb_fmt = out_fmt;
inst->dpb_buftype = 0;
inst->dpb_fmt = 0;
}
ret = venus_helper_set_raw_format(inst, inst->opb_fmt,
inst->opb_buftype);
if (ret)
return ret;
ret = venus_helper_set_format_constraints(inst);
if (ret)
return ret;
if (inst->dpb_fmt) {
ret = venus_helper_set_multistream(inst, false, true);
if (ret)
return ret;
ret = venus_helper_set_raw_format(inst, inst->dpb_fmt,
inst->dpb_buftype);
if (ret)
return ret;
ret = venus_helper_set_output_resolution(inst, width, height,
HFI_BUFFER_OUTPUT2);
if (ret)
return ret;
}
if (IS_V3(core) || IS_V4(core) || IS_V6(core)) {
ret = venus_helper_get_bufreq(inst, HFI_BUFFER_OUTPUT, &bufreq);
if (ret)
return ret;
if (bufreq.size > inst->output_buf_size)
return -EINVAL;
if (inst->dpb_fmt) {
ret = venus_helper_get_bufreq(inst, HFI_BUFFER_OUTPUT2,
&bufreq);
if (ret)
return ret;
if (bufreq.size > inst->output2_buf_size)
return -EINVAL;
}
if (inst->output2_buf_size) {
ret = venus_helper_set_bufsize(inst,
inst->output2_buf_size,
HFI_BUFFER_OUTPUT2);
if (ret)
return ret;
}
if (inst->output_buf_size) {
ret = venus_helper_set_bufsize(inst,
inst->output_buf_size,
HFI_BUFFER_OUTPUT);
if (ret)
return ret;
}
}
ret = venus_helper_set_dyn_bufmode(inst);
if (ret)
return ret;
return 0;
}
static int vdec_session_init(struct venus_inst *inst)
{
int ret;
ret = venus_helper_session_init(inst);
if (ret == -EALREADY)
return 0;
else if (ret)
return ret;
ret = venus_helper_set_input_resolution(inst, frame_width_min(inst),
frame_height_min(inst));
if (ret)
goto deinit;
return 0;
deinit:
hfi_session_deinit(inst);
return ret;
}
static int vdec_num_buffers(struct venus_inst *inst, unsigned int *in_num,
unsigned int *out_num)
{
enum hfi_version ver = inst->core->res->hfi_version;
struct hfi_buffer_requirements bufreq;
int ret;
*in_num = *out_num = 0;
ret = venus_helper_get_bufreq(inst, HFI_BUFFER_INPUT, &bufreq);
if (ret)
return ret;
*in_num = hfi_bufreq_get_count_min(&bufreq, ver);
ret = venus_helper_get_bufreq(inst, HFI_BUFFER_OUTPUT, &bufreq);
if (ret)
return ret;
*out_num = hfi_bufreq_get_count_min(&bufreq, ver);
return 0;
}
static int vdec_queue_setup(struct vb2_queue *q,
unsigned int *num_buffers, unsigned int *num_planes,
unsigned int sizes[], struct device *alloc_devs[])
{
struct venus_inst *inst = vb2_get_drv_priv(q);
struct venus_core *core = inst->core;
unsigned int in_num, out_num;
int ret = 0;
if (*num_planes) {
unsigned int output_buf_size = venus_helper_get_opb_size(inst);
if (q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE &&
*num_planes != inst->fmt_out->num_planes)
return -EINVAL;
if (q->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE &&
*num_planes != inst->fmt_cap->num_planes)
return -EINVAL;
if (q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE &&
sizes[0] < inst->input_buf_size)
return -EINVAL;
if (q->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE &&
sizes[0] < output_buf_size)
return -EINVAL;
return 0;
}
if (test_bit(0, &core->sys_error)) {
if (inst->nonblock)
return -EAGAIN;
ret = wait_event_interruptible(core->sys_err_done,
!test_bit(0, &core->sys_error));
if (ret)
return ret;
}
ret = vdec_pm_get(inst);
if (ret)
return ret;
ret = vdec_session_init(inst);
if (ret)
goto put_power;
ret = vdec_num_buffers(inst, &in_num, &out_num);
if (ret)
goto put_power;
ret = vdec_pm_put(inst, false);
if (ret)
return ret;
switch (q->type) {
case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
*num_planes = inst->fmt_out->num_planes;
sizes[0] = venus_helper_get_framesz(inst->fmt_out->pixfmt,
inst->out_width,
inst->out_height);
sizes[0] = max(sizes[0], inst->input_buf_size);
inst->input_buf_size = sizes[0];
*num_buffers = max(*num_buffers, in_num);
inst->num_input_bufs = *num_buffers;
inst->num_output_bufs = out_num;
break;
case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
*num_planes = inst->fmt_cap->num_planes;
sizes[0] = venus_helper_get_framesz(inst->fmt_cap->pixfmt,
inst->width,
inst->height);
inst->output_buf_size = sizes[0];
*num_buffers = max(*num_buffers, out_num);
inst->num_output_bufs = *num_buffers;
mutex_lock(&inst->lock);
if (inst->codec_state == VENUS_DEC_STATE_CAPTURE_SETUP)
inst->codec_state = VENUS_DEC_STATE_STOPPED;
mutex_unlock(&inst->lock);
break;
default:
ret = -EINVAL;
break;
}
return ret;
put_power:
vdec_pm_put(inst, false);
return ret;
}
static int vdec_verify_conf(struct venus_inst *inst)
{
enum hfi_version ver = inst->core->res->hfi_version;
struct hfi_buffer_requirements bufreq;
int ret;
if (!inst->num_input_bufs || !inst->num_output_bufs)
return -EINVAL;
ret = venus_helper_get_bufreq(inst, HFI_BUFFER_OUTPUT, &bufreq);
if (ret)
return ret;
if (inst->num_output_bufs < bufreq.count_actual ||
inst->num_output_bufs < hfi_bufreq_get_count_min(&bufreq, ver))
return -EINVAL;
ret = venus_helper_get_bufreq(inst, HFI_BUFFER_INPUT, &bufreq);
if (ret)
return ret;
if (inst->num_input_bufs < hfi_bufreq_get_count_min(&bufreq, ver))
return -EINVAL;
return 0;
}
static int vdec_start_capture(struct venus_inst *inst)
{
int ret;
if (!inst->streamon_out)
return 0;
if (inst->codec_state == VENUS_DEC_STATE_DECODING) {
if (inst->reconfig)
goto reconfigure;
venus_helper_queue_dpb_bufs(inst);
venus_helper_process_initial_cap_bufs(inst);
inst->streamon_cap = 1;
return 0;
}
if (inst->codec_state != VENUS_DEC_STATE_STOPPED)
return 0;
reconfigure:
ret = vdec_output_conf(inst);
if (ret)
return ret;
ret = venus_helper_set_num_bufs(inst, inst->num_input_bufs,
VB2_MAX_FRAME, VB2_MAX_FRAME);
if (ret)
return ret;
ret = venus_helper_intbufs_realloc(inst);
if (ret)
goto err;
venus_pm_load_scale(inst);
inst->next_buf_last = false;
ret = venus_helper_alloc_dpb_bufs(inst);
if (ret)
goto err;
ret = hfi_session_continue(inst);
if (ret)
goto free_dpb_bufs;
ret = venus_helper_queue_dpb_bufs(inst);
if (ret)
goto free_dpb_bufs;
ret = venus_helper_process_initial_cap_bufs(inst);
if (ret)
goto free_dpb_bufs;
inst->codec_state = VENUS_DEC_STATE_DECODING;
if (inst->drain_active)
inst->codec_state = VENUS_DEC_STATE_DRAIN;
inst->streamon_cap = 1;
inst->sequence_cap = 0;
inst->reconfig = false;
inst->drain_active = false;
return 0;
free_dpb_bufs:
venus_helper_free_dpb_bufs(inst);
err:
return ret;
}
static int vdec_start_output(struct venus_inst *inst)
{
int ret;
if (inst->codec_state == VENUS_DEC_STATE_SEEK) {
ret = venus_helper_process_initial_out_bufs(inst);
if (inst->next_buf_last)
inst->codec_state = VENUS_DEC_STATE_DRC;
else
inst->codec_state = VENUS_DEC_STATE_DECODING;
goto done;
}
if (inst->codec_state == VENUS_DEC_STATE_INIT ||
inst->codec_state == VENUS_DEC_STATE_CAPTURE_SETUP) {
ret = venus_helper_process_initial_out_bufs(inst);
goto done;
}
if (inst->codec_state != VENUS_DEC_STATE_DEINIT)
return -EINVAL;
venus_helper_init_instance(inst);
inst->sequence_out = 0;
inst->reconfig = false;
inst->next_buf_last = false;
ret = vdec_set_properties(inst);
if (ret)
return ret;
ret = vdec_set_work_route(inst);
if (ret)
return ret;
ret = vdec_output_conf(inst);
if (ret)
return ret;
ret = vdec_verify_conf(inst);
if (ret)
return ret;
ret = venus_helper_set_num_bufs(inst, inst->num_input_bufs,
VB2_MAX_FRAME, VB2_MAX_FRAME);
if (ret)
return ret;
ret = venus_helper_vb2_start_streaming(inst);
if (ret)
return ret;
ret = venus_helper_process_initial_out_bufs(inst);
if (ret)
return ret;
inst->codec_state = VENUS_DEC_STATE_INIT;
done:
inst->streamon_out = 1;
return ret;
}
static int vdec_start_streaming(struct vb2_queue *q, unsigned int count)
{
struct venus_inst *inst = vb2_get_drv_priv(q);
int ret;
mutex_lock(&inst->lock);
if (q->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
ret = vdec_start_capture(inst);
} else {
ret = vdec_pm_get(inst);
if (ret)
goto error;
ret = venus_pm_acquire_core(inst);
if (ret)
goto put_power;
ret = vdec_pm_put(inst, true);
if (ret)
goto error;
ret = vdec_start_output(inst);
}
if (ret)
goto error;
mutex_unlock(&inst->lock);
return 0;
put_power:
vdec_pm_put(inst, false);
error:
venus_helper_buffers_done(inst, q->type, VB2_BUF_STATE_QUEUED);
mutex_unlock(&inst->lock);
return ret;
}
static void vdec_cancel_dst_buffers(struct venus_inst *inst)
{
struct vb2_v4l2_buffer *buf;
while ((buf = v4l2_m2m_dst_buf_remove(inst->m2m_ctx)))
v4l2_m2m_buf_done(buf, VB2_BUF_STATE_ERROR);
}
static int vdec_stop_capture(struct venus_inst *inst)
{
int ret = 0;
switch (inst->codec_state) {
case VENUS_DEC_STATE_DECODING:
ret = hfi_session_flush(inst, HFI_FLUSH_ALL, true);
fallthrough;
case VENUS_DEC_STATE_DRAIN:
inst->codec_state = VENUS_DEC_STATE_STOPPED;
inst->drain_active = false;
fallthrough;
case VENUS_DEC_STATE_SEEK:
vdec_cancel_dst_buffers(inst);
break;
case VENUS_DEC_STATE_DRC:
ret = hfi_session_flush(inst, HFI_FLUSH_OUTPUT, true);
inst->codec_state = VENUS_DEC_STATE_CAPTURE_SETUP;
venus_helper_free_dpb_bufs(inst);
break;
default:
break;
}
return ret;
}
static int vdec_stop_output(struct venus_inst *inst)
{
int ret = 0;
switch (inst->codec_state) {
case VENUS_DEC_STATE_DECODING:
case VENUS_DEC_STATE_DRAIN:
case VENUS_DEC_STATE_STOPPED:
case VENUS_DEC_STATE_DRC:
ret = hfi_session_flush(inst, HFI_FLUSH_ALL, true);
inst->codec_state = VENUS_DEC_STATE_SEEK;
break;
case VENUS_DEC_STATE_INIT:
case VENUS_DEC_STATE_CAPTURE_SETUP:
ret = hfi_session_flush(inst, HFI_FLUSH_INPUT, true);
break;
default:
break;
}
return ret;
}
static void vdec_stop_streaming(struct vb2_queue *q)
{
struct venus_inst *inst = vb2_get_drv_priv(q);
int ret = -EINVAL;
vdec_pm_get_put(inst);
mutex_lock(&inst->lock);
if (q->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
ret = vdec_stop_capture(inst);
else
ret = vdec_stop_output(inst);
venus_helper_buffers_done(inst, q->type, VB2_BUF_STATE_ERROR);
inst->session_error = 0;
if (ret)
goto unlock;
if (q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
inst->streamon_out = 0;
else
inst->streamon_cap = 0;
unlock:
mutex_unlock(&inst->lock);
}
static void vdec_session_release(struct venus_inst *inst)
{
struct venus_core *core = inst->core;
int ret, abort = 0;
vdec_pm_get(inst);
mutex_lock(&inst->lock);
inst->codec_state = VENUS_DEC_STATE_DEINIT;
ret = hfi_session_stop(inst);
abort = (ret && ret != -EINVAL) ? 1 : 0;
ret = hfi_session_unload_res(inst);
abort = (ret && ret != -EINVAL) ? 1 : 0;
ret = venus_helper_unregister_bufs(inst);
abort = (ret && ret != -EINVAL) ? 1 : 0;
ret = venus_helper_intbufs_free(inst);
abort = (ret && ret != -EINVAL) ? 1 : 0;
ret = hfi_session_deinit(inst);
abort = (ret && ret != -EINVAL) ? 1 : 0;
if (inst->session_error || test_bit(0, &core->sys_error))
abort = 1;
if (abort)
hfi_session_abort(inst);
venus_helper_free_dpb_bufs(inst);
venus_pm_load_scale(inst);
INIT_LIST_HEAD(&inst->registeredbufs);
mutex_unlock(&inst->lock);
venus_pm_release_core(inst);
vdec_pm_put(inst, false);
}
static int vdec_buf_init(struct vb2_buffer *vb)
{
struct venus_inst *inst = vb2_get_drv_priv(vb->vb2_queue);
inst->buf_count++;
return venus_helper_vb2_buf_init(vb);
}
static void vdec_buf_cleanup(struct vb2_buffer *vb)
{
struct venus_inst *inst = vb2_get_drv_priv(vb->vb2_queue);
struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
struct venus_buffer *buf = to_venus_buffer(vbuf);
mutex_lock(&inst->lock);
if (vb->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
if (!list_empty(&inst->registeredbufs))
list_del_init(&buf->reg_list);
mutex_unlock(&inst->lock);
inst->buf_count--;
if (!inst->buf_count)
vdec_session_release(inst);
}
static void vdec_vb2_buf_queue(struct vb2_buffer *vb)
{
struct venus_inst *inst = vb2_get_drv_priv(vb->vb2_queue);
struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
static const struct v4l2_event eos = { .type = V4L2_EVENT_EOS };
vdec_pm_get_put(inst);
mutex_lock(&inst->lock);
if (inst->next_buf_last && V4L2_TYPE_IS_CAPTURE(vb->vb2_queue->type) &&
inst->codec_state == VENUS_DEC_STATE_DRC) {
vbuf->flags |= V4L2_BUF_FLAG_LAST;
vbuf->sequence = inst->sequence_cap++;
vbuf->field = V4L2_FIELD_NONE;
vb2_set_plane_payload(vb, 0, 0);
v4l2_m2m_buf_done(vbuf, VB2_BUF_STATE_DONE);
v4l2_event_queue_fh(&inst->fh, &eos);
inst->next_buf_last = false;
mutex_unlock(&inst->lock);
return;
}
venus_helper_vb2_buf_queue(vb);
mutex_unlock(&inst->lock);
}
static const struct vb2_ops vdec_vb2_ops = {
.queue_setup = vdec_queue_setup,
.buf_init = vdec_buf_init,
.buf_cleanup = vdec_buf_cleanup,
.buf_prepare = venus_helper_vb2_buf_prepare,
.start_streaming = vdec_start_streaming,
.stop_streaming = vdec_stop_streaming,
.buf_queue = vdec_vb2_buf_queue,
};
static void vdec_buf_done(struct venus_inst *inst, unsigned int buf_type,
u32 tag, u32 bytesused, u32 data_offset, u32 flags,
u32 hfi_flags, u64 timestamp_us)
{
enum vb2_buffer_state state = VB2_BUF_STATE_DONE;
struct vb2_v4l2_buffer *vbuf;
struct vb2_buffer *vb;
unsigned int type;
vdec_pm_touch(inst);
if (buf_type == HFI_BUFFER_INPUT)
type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
else
type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
vbuf = venus_helper_find_buf(inst, type, tag);
if (!vbuf) {
venus_helper_change_dpb_owner(inst, vbuf, type, buf_type, tag);
return;
}
vbuf->flags = flags;
vbuf->field = V4L2_FIELD_NONE;
vb = &vbuf->vb2_buf;
if (type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
vb2_set_plane_payload(vb, 0, bytesused);
vb->planes[0].data_offset = data_offset;
vb->timestamp = timestamp_us * NSEC_PER_USEC;
vbuf->sequence = inst->sequence_cap++;
if (vbuf->flags & V4L2_BUF_FLAG_LAST) {
const struct v4l2_event ev = { .type = V4L2_EVENT_EOS };
v4l2_event_queue_fh(&inst->fh, &ev);
if (inst->codec_state == VENUS_DEC_STATE_DRAIN) {
inst->drain_active = false;
inst->codec_state = VENUS_DEC_STATE_STOPPED;
}
}
if (!bytesused)
state = VB2_BUF_STATE_ERROR;
} else {
vbuf->sequence = inst->sequence_out++;
}
venus_helper_get_ts_metadata(inst, timestamp_us, vbuf);
if (hfi_flags & HFI_BUFFERFLAG_READONLY)
venus_helper_acquire_buf_ref(vbuf);
if (hfi_flags & HFI_BUFFERFLAG_DATACORRUPT)
state = VB2_BUF_STATE_ERROR;
if (hfi_flags & HFI_BUFFERFLAG_DROP_FRAME) {
state = VB2_BUF_STATE_ERROR;
vb2_set_plane_payload(vb, 0, 0);
vb->timestamp = 0;
}
v4l2_m2m_buf_done(vbuf, state);
}
static void vdec_event_change(struct venus_inst *inst,
struct hfi_event_data *ev_data, bool sufficient)
{
static const struct v4l2_event ev = {
.type = V4L2_EVENT_SOURCE_CHANGE,
.u.src_change.changes = V4L2_EVENT_SRC_CH_RESOLUTION };
struct device *dev = inst->core->dev_dec;
struct v4l2_format format = {};
mutex_lock(&inst->lock);
format.type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
format.fmt.pix_mp.pixelformat = inst->fmt_cap->pixfmt;
format.fmt.pix_mp.width = ev_data->width;
format.fmt.pix_mp.height = ev_data->height;
vdec_try_fmt_common(inst, &format);
inst->width = format.fmt.pix_mp.width;
inst->height = format.fmt.pix_mp.height;
/*
* Some versions of the firmware do not report crop information for
* all codecs. For these cases, set the crop to the coded resolution.
*/
if (ev_data->input_crop.width > 0 && ev_data->input_crop.height > 0) {
inst->crop.left = ev_data->input_crop.left;
inst->crop.top = ev_data->input_crop.top;
inst->crop.width = ev_data->input_crop.width;
inst->crop.height = ev_data->input_crop.height;
} else {
inst->crop.left = 0;
inst->crop.top = 0;
inst->crop.width = ev_data->width;
inst->crop.height = ev_data->height;
}
inst->fw_min_cnt = ev_data->buf_count;
/* overwriting this to 11 for vp9 due to fw bug */
if (inst->hfi_codec == HFI_VIDEO_CODEC_VP9)
inst->fw_min_cnt = 11;
inst->out_width = ev_data->width;
inst->out_height = ev_data->height;
if (inst->bit_depth != ev_data->bit_depth) {
inst->bit_depth = ev_data->bit_depth;
if (inst->bit_depth == VIDC_BITDEPTH_10)
inst->fmt_cap = &vdec_formats[VENUS_FMT_P010];
else
inst->fmt_cap = &vdec_formats[VENUS_FMT_NV12];
}
if (inst->pic_struct != ev_data->pic_struct)
inst->pic_struct = ev_data->pic_struct;
dev_dbg(dev, VDBGM "event %s sufficient resources (%ux%u)\n",
sufficient ? "" : "not", ev_data->width, ev_data->height);
switch (inst->codec_state) {
case VENUS_DEC_STATE_INIT:
inst->codec_state = VENUS_DEC_STATE_CAPTURE_SETUP;
break;
case VENUS_DEC_STATE_DECODING:
case VENUS_DEC_STATE_DRAIN:
inst->codec_state = VENUS_DEC_STATE_DRC;
break;
default:
break;
}
/*
* The assumption is that the firmware have to return the last buffer
* before this event is received in the v4l2 driver. Also the firmware
* itself doesn't mark the last decoder output buffer with HFI EOS flag.
*/
if (inst->codec_state == VENUS_DEC_STATE_DRC) {
int ret;
inst->next_buf_last = true;
ret = hfi_session_flush(inst, HFI_FLUSH_OUTPUT, false);
if (ret)
dev_dbg(dev, VDBGH "flush output error %d\n", ret);
}
inst->next_buf_last = true;
inst->reconfig = true;
v4l2_event_queue_fh(&inst->fh, &ev);
wake_up(&inst->reconf_wait);
mutex_unlock(&inst->lock);
}
static void vdec_event_notify(struct venus_inst *inst, u32 event,
struct hfi_event_data *data)
{
struct venus_core *core = inst->core;
struct device *dev = core->dev_dec;
vdec_pm_touch(inst);
switch (event) {
case EVT_SESSION_ERROR:
inst->session_error = true;
venus_helper_vb2_queue_error(inst);
dev_err(dev, "dec: event session error %x\n", inst->error);
break;
case EVT_SYS_EVENT_CHANGE:
switch (data->event_type) {
case HFI_EVENT_DATA_SEQUENCE_CHANGED_SUFFICIENT_BUF_RESOURCES:
vdec_event_change(inst, data, true);
break;
case HFI_EVENT_DATA_SEQUENCE_CHANGED_INSUFFICIENT_BUF_RESOURCES:
vdec_event_change(inst, data, false);
break;
case HFI_EVENT_RELEASE_BUFFER_REFERENCE:
venus_helper_release_buf_ref(inst, data->tag);
break;
default:
break;
}
break;
default:
break;
}
}
static void vdec_flush_done(struct venus_inst *inst)
{
dev_dbg(inst->core->dev_dec, VDBGH "flush done\n");
}
static const struct hfi_inst_ops vdec_hfi_ops = {
.buf_done = vdec_buf_done,
.event_notify = vdec_event_notify,
.flush_done = vdec_flush_done,
};
static void vdec_inst_init(struct venus_inst *inst)
{
inst->hfi_codec = HFI_VIDEO_CODEC_H264;
inst->fmt_out = &vdec_formats[VENUS_FMT_H264];
inst->fmt_cap = &vdec_formats[VENUS_FMT_NV12];
inst->width = frame_width_min(inst);
inst->height = ALIGN(frame_height_min(inst), 32);
inst->crop.left = 0;
inst->crop.top = 0;
inst->crop.width = inst->width;
inst->crop.height = inst->height;
inst->fw_min_cnt = 8;
inst->out_width = frame_width_min(inst);
inst->out_height = frame_height_min(inst);
inst->fps = 30;
inst->timeperframe.numerator = 1;
inst->timeperframe.denominator = 30;
inst->opb_buftype = HFI_BUFFER_OUTPUT;
}
static void vdec_m2m_device_run(void *priv)
{
}
static const struct v4l2_m2m_ops vdec_m2m_ops = {
.device_run = vdec_m2m_device_run,
.job_abort = venus_helper_m2m_job_abort,
};
static int m2m_queue_init(void *priv, struct vb2_queue *src_vq,
struct vb2_queue *dst_vq)
{
struct venus_inst *inst = priv;
int ret;
src_vq->type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
src_vq->io_modes = VB2_MMAP | VB2_DMABUF;
src_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
src_vq->ops = &vdec_vb2_ops;
src_vq->mem_ops = &vb2_dma_contig_memops;
src_vq->drv_priv = inst;
src_vq->buf_struct_size = sizeof(struct venus_buffer);
src_vq->allow_zero_bytesused = 1;
src_vq->min_buffers_needed = 0;
src_vq->dev = inst->core->dev;
src_vq->lock = &inst->ctx_q_lock;
ret = vb2_queue_init(src_vq);
if (ret)
return ret;
dst_vq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
dst_vq->io_modes = VB2_MMAP | VB2_DMABUF;
dst_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
dst_vq->ops = &vdec_vb2_ops;
dst_vq->mem_ops = &vb2_dma_contig_memops;
dst_vq->drv_priv = inst;
dst_vq->buf_struct_size = sizeof(struct venus_buffer);
dst_vq->allow_zero_bytesused = 1;
dst_vq->min_buffers_needed = 0;
dst_vq->dev = inst->core->dev;
dst_vq->lock = &inst->ctx_q_lock;
return vb2_queue_init(dst_vq);
}
static int vdec_open(struct file *file)
{
struct venus_core *core = video_drvdata(file);
struct venus_inst *inst;
int ret;
inst = kzalloc(sizeof(*inst), GFP_KERNEL);
if (!inst)
return -ENOMEM;
INIT_LIST_HEAD(&inst->dpbbufs);
INIT_LIST_HEAD(&inst->registeredbufs);
INIT_LIST_HEAD(&inst->internalbufs);
INIT_LIST_HEAD(&inst->list);
mutex_init(&inst->lock);
mutex_init(&inst->ctx_q_lock);
inst->core = core;
inst->session_type = VIDC_SESSION_TYPE_DEC;
inst->num_output_bufs = 1;
inst->codec_state = VENUS_DEC_STATE_DEINIT;
inst->buf_count = 0;
inst->clk_data.core_id = VIDC_CORE_ID_DEFAULT;
inst->core_acquired = false;
inst->bit_depth = VIDC_BITDEPTH_8;
inst->pic_struct = HFI_INTERLACE_FRAME_PROGRESSIVE;
init_waitqueue_head(&inst->reconf_wait);
inst->nonblock = file->f_flags & O_NONBLOCK;
venus_helper_init_instance(inst);
ret = vdec_ctrl_init(inst);
if (ret)
goto err_free;
ret = hfi_session_create(inst, &vdec_hfi_ops);
if (ret)
goto err_ctrl_deinit;
vdec_inst_init(inst);
ida_init(&inst->dpb_ids);
/*
* create m2m device for every instance, the m2m context scheduling
* is made by firmware side so we do not need to care about.
*/
inst->m2m_dev = v4l2_m2m_init(&vdec_m2m_ops);
if (IS_ERR(inst->m2m_dev)) {
ret = PTR_ERR(inst->m2m_dev);
goto err_session_destroy;
}
inst->m2m_ctx = v4l2_m2m_ctx_init(inst->m2m_dev, inst, m2m_queue_init);
if (IS_ERR(inst->m2m_ctx)) {
ret = PTR_ERR(inst->m2m_ctx);
goto err_m2m_release;
}
v4l2_fh_init(&inst->fh, core->vdev_dec);
inst->fh.ctrl_handler = &inst->ctrl_handler;
v4l2_fh_add(&inst->fh);
inst->fh.m2m_ctx = inst->m2m_ctx;
file->private_data = &inst->fh;
return 0;
err_m2m_release:
v4l2_m2m_release(inst->m2m_dev);
err_session_destroy:
hfi_session_destroy(inst);
err_ctrl_deinit:
vdec_ctrl_deinit(inst);
err_free:
kfree(inst);
return ret;
}
static int vdec_close(struct file *file)
{
struct venus_inst *inst = to_inst(file);
vdec_pm_get(inst);
v4l2_m2m_ctx_release(inst->m2m_ctx);
v4l2_m2m_release(inst->m2m_dev);
vdec_ctrl_deinit(inst);
ida_destroy(&inst->dpb_ids);
hfi_session_destroy(inst);
mutex_destroy(&inst->lock);
mutex_destroy(&inst->ctx_q_lock);
v4l2_fh_del(&inst->fh);
v4l2_fh_exit(&inst->fh);
vdec_pm_put(inst, false);
kfree(inst);
return 0;
}
static const struct v4l2_file_operations vdec_fops = {
.owner = THIS_MODULE,
.open = vdec_open,
.release = vdec_close,
.unlocked_ioctl = video_ioctl2,
.poll = v4l2_m2m_fop_poll,
.mmap = v4l2_m2m_fop_mmap,
};
static int vdec_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct video_device *vdev;
struct venus_core *core;
int ret;
if (!dev->parent)
return -EPROBE_DEFER;
core = dev_get_drvdata(dev->parent);
if (!core)
return -EPROBE_DEFER;
platform_set_drvdata(pdev, core);
if (core->pm_ops->vdec_get) {
ret = core->pm_ops->vdec_get(dev);
if (ret)
return ret;
}
vdev = video_device_alloc();
if (!vdev)
return -ENOMEM;
strscpy(vdev->name, "qcom-venus-decoder", sizeof(vdev->name));
vdev->release = video_device_release;
vdev->fops = &vdec_fops;
vdev->ioctl_ops = &vdec_ioctl_ops;
vdev->vfl_dir = VFL_DIR_M2M;
vdev->v4l2_dev = &core->v4l2_dev;
vdev->device_caps = V4L2_CAP_VIDEO_M2M_MPLANE | V4L2_CAP_STREAMING;
ret = video_register_device(vdev, VFL_TYPE_VIDEO, -1);
if (ret)
goto err_vdev_release;
core->vdev_dec = vdev;
core->dev_dec = dev;
video_set_drvdata(vdev, core);
pm_runtime_set_autosuspend_delay(dev, 2000);
pm_runtime_use_autosuspend(dev);
pm_runtime_enable(dev);
return 0;
err_vdev_release:
video_device_release(vdev);
return ret;
}
static void vdec_remove(struct platform_device *pdev)
{
struct venus_core *core = dev_get_drvdata(pdev->dev.parent);
video_unregister_device(core->vdev_dec);
pm_runtime_disable(core->dev_dec);
if (core->pm_ops->vdec_put)
core->pm_ops->vdec_put(core->dev_dec);
}
static __maybe_unused int vdec_runtime_suspend(struct device *dev)
{
struct venus_core *core = dev_get_drvdata(dev);
const struct venus_pm_ops *pm_ops = core->pm_ops;
int ret = 0;
if (pm_ops->vdec_power)
ret = pm_ops->vdec_power(dev, POWER_OFF);
return ret;
}
static __maybe_unused int vdec_runtime_resume(struct device *dev)
{
struct venus_core *core = dev_get_drvdata(dev);
const struct venus_pm_ops *pm_ops = core->pm_ops;
int ret = 0;
if (pm_ops->vdec_power)
ret = pm_ops->vdec_power(dev, POWER_ON);
return ret;
}
static const struct dev_pm_ops vdec_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
pm_runtime_force_resume)
SET_RUNTIME_PM_OPS(vdec_runtime_suspend, vdec_runtime_resume, NULL)
};
static const struct of_device_id vdec_dt_match[] = {
{ .compatible = "venus-decoder" },
{ }
};
MODULE_DEVICE_TABLE(of, vdec_dt_match);
static struct platform_driver qcom_venus_dec_driver = {
.probe = vdec_probe,
.remove_new = vdec_remove,
.driver = {
.name = "qcom-venus-decoder",
.of_match_table = vdec_dt_match,
.pm = &vdec_pm_ops,
},
};
module_platform_driver(qcom_venus_dec_driver);
MODULE_ALIAS("platform:qcom-venus-decoder");
MODULE_DESCRIPTION("Qualcomm Venus video decoder driver");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/media/platform/qcom/venus/vdec.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
* Copyright (C) 2017 Linaro Ltd.
*/
#include <linux/overflow.h>
#include <linux/errno.h>
#include <linux/hash.h>
#include "hfi_cmds.h"
static enum hfi_version hfi_ver;
void pkt_sys_init(struct hfi_sys_init_pkt *pkt, u32 arch_type)
{
pkt->hdr.size = sizeof(*pkt);
pkt->hdr.pkt_type = HFI_CMD_SYS_INIT;
pkt->arch_type = arch_type;
}
void pkt_sys_pc_prep(struct hfi_sys_pc_prep_pkt *pkt)
{
pkt->hdr.size = sizeof(*pkt);
pkt->hdr.pkt_type = HFI_CMD_SYS_PC_PREP;
}
void pkt_sys_idle_indicator(struct hfi_sys_set_property_pkt *pkt, u32 enable)
{
struct hfi_enable *hfi = (struct hfi_enable *)&pkt->data[1];
pkt->hdr.size = struct_size(pkt, data, 1) + sizeof(*hfi);
pkt->hdr.pkt_type = HFI_CMD_SYS_SET_PROPERTY;
pkt->num_properties = 1;
pkt->data[0] = HFI_PROPERTY_SYS_IDLE_INDICATOR;
hfi->enable = enable;
}
void pkt_sys_debug_config(struct hfi_sys_set_property_pkt *pkt, u32 mode,
u32 config)
{
struct hfi_debug_config *hfi;
pkt->hdr.size = struct_size(pkt, data, 1) + sizeof(*hfi);
pkt->hdr.pkt_type = HFI_CMD_SYS_SET_PROPERTY;
pkt->num_properties = 1;
pkt->data[0] = HFI_PROPERTY_SYS_DEBUG_CONFIG;
hfi = (struct hfi_debug_config *)&pkt->data[1];
hfi->config = config;
hfi->mode = mode;
}
void pkt_sys_coverage_config(struct hfi_sys_set_property_pkt *pkt, u32 mode)
{
pkt->hdr.size = struct_size(pkt, data, 2);
pkt->hdr.pkt_type = HFI_CMD_SYS_SET_PROPERTY;
pkt->num_properties = 1;
pkt->data[0] = HFI_PROPERTY_SYS_CONFIG_COVERAGE;
pkt->data[1] = mode;
}
void pkt_sys_ubwc_config(struct hfi_sys_set_property_pkt *pkt, const struct hfi_ubwc_config *hfi)
{
pkt->hdr.size = struct_size(pkt, data, 1) + sizeof(*hfi);
pkt->hdr.pkt_type = HFI_CMD_SYS_SET_PROPERTY;
pkt->num_properties = 1;
pkt->data[0] = HFI_PROPERTY_SYS_UBWC_CONFIG;
memcpy(&pkt->data[1], hfi, sizeof(*hfi));
}
int pkt_sys_set_resource(struct hfi_sys_set_resource_pkt *pkt, u32 id, u32 size,
u32 addr, void *cookie)
{
pkt->hdr.size = sizeof(*pkt);
pkt->hdr.pkt_type = HFI_CMD_SYS_SET_RESOURCE;
pkt->resource_handle = hash32_ptr(cookie);
switch (id) {
case VIDC_RESOURCE_OCMEM:
case VIDC_RESOURCE_VMEM: {
struct hfi_resource_ocmem *res =
(struct hfi_resource_ocmem *)&pkt->resource_data[0];
res->size = size;
res->mem = addr;
pkt->resource_type = HFI_RESOURCE_OCMEM;
pkt->hdr.size += sizeof(*res);
break;
}
case VIDC_RESOURCE_NONE:
default:
return -ENOTSUPP;
}
return 0;
}
int pkt_sys_unset_resource(struct hfi_sys_release_resource_pkt *pkt, u32 id,
u32 size, void *cookie)
{
pkt->hdr.size = sizeof(*pkt);
pkt->hdr.pkt_type = HFI_CMD_SYS_RELEASE_RESOURCE;
pkt->resource_handle = hash32_ptr(cookie);
switch (id) {
case VIDC_RESOURCE_OCMEM:
case VIDC_RESOURCE_VMEM:
pkt->resource_type = HFI_RESOURCE_OCMEM;
break;
case VIDC_RESOURCE_NONE:
break;
default:
return -ENOTSUPP;
}
return 0;
}
void pkt_sys_ping(struct hfi_sys_ping_pkt *pkt, u32 cookie)
{
pkt->hdr.size = sizeof(*pkt);
pkt->hdr.pkt_type = HFI_CMD_SYS_PING;
pkt->client_data = cookie;
}
void pkt_sys_power_control(struct hfi_sys_set_property_pkt *pkt, u32 enable)
{
struct hfi_enable *hfi = (struct hfi_enable *)&pkt->data[1];
pkt->hdr.size = struct_size(pkt, data, 1) + sizeof(*hfi);
pkt->hdr.pkt_type = HFI_CMD_SYS_SET_PROPERTY;
pkt->num_properties = 1;
pkt->data[0] = HFI_PROPERTY_SYS_CODEC_POWER_PLANE_CTRL;
hfi->enable = enable;
}
int pkt_sys_ssr_cmd(struct hfi_sys_test_ssr_pkt *pkt, u32 trigger_type)
{
switch (trigger_type) {
case HFI_TEST_SSR_SW_ERR_FATAL:
case HFI_TEST_SSR_SW_DIV_BY_ZERO:
case HFI_TEST_SSR_HW_WDOG_IRQ:
break;
default:
return -EINVAL;
}
pkt->hdr.size = sizeof(*pkt);
pkt->hdr.pkt_type = HFI_CMD_SYS_TEST_SSR;
pkt->trigger_type = trigger_type;
return 0;
}
void pkt_sys_image_version(struct hfi_sys_get_property_pkt *pkt)
{
pkt->hdr.size = sizeof(*pkt);
pkt->hdr.pkt_type = HFI_CMD_SYS_GET_PROPERTY;
pkt->num_properties = 1;
pkt->data[0] = HFI_PROPERTY_SYS_IMAGE_VERSION;
}
int pkt_session_init(struct hfi_session_init_pkt *pkt, void *cookie,
u32 session_type, u32 codec)
{
if (!pkt || !cookie || !codec)
return -EINVAL;
pkt->shdr.hdr.size = sizeof(*pkt);
pkt->shdr.hdr.pkt_type = HFI_CMD_SYS_SESSION_INIT;
pkt->shdr.session_id = hash32_ptr(cookie);
pkt->session_domain = session_type;
pkt->session_codec = codec;
return 0;
}
void pkt_session_cmd(struct hfi_session_pkt *pkt, u32 pkt_type, void *cookie)
{
pkt->shdr.hdr.size = sizeof(*pkt);
pkt->shdr.hdr.pkt_type = pkt_type;
pkt->shdr.session_id = hash32_ptr(cookie);
}
int pkt_session_set_buffers(struct hfi_session_set_buffers_pkt *pkt,
void *cookie, struct hfi_buffer_desc *bd)
{
unsigned int i;
if (!cookie || !pkt || !bd)
return -EINVAL;
pkt->shdr.hdr.pkt_type = HFI_CMD_SESSION_SET_BUFFERS;
pkt->shdr.session_id = hash32_ptr(cookie);
pkt->buffer_size = bd->buffer_size;
pkt->min_buffer_size = bd->buffer_size;
pkt->num_buffers = bd->num_buffers;
if (bd->buffer_type == HFI_BUFFER_OUTPUT ||
bd->buffer_type == HFI_BUFFER_OUTPUT2) {
struct hfi_buffer_info *bi;
pkt->extradata_size = bd->extradata_size;
pkt->shdr.hdr.size = sizeof(*pkt) +
bd->num_buffers * sizeof(*bi);
bi = (struct hfi_buffer_info *)pkt->buffer_info;
for (i = 0; i < pkt->num_buffers; i++) {
bi->buffer_addr = bd->device_addr;
bi->extradata_addr = bd->extradata_addr;
}
} else {
pkt->extradata_size = 0;
pkt->shdr.hdr.size = struct_size(pkt, buffer_info,
bd->num_buffers);
for (i = 0; i < pkt->num_buffers; i++)
pkt->buffer_info[i] = bd->device_addr;
}
pkt->buffer_type = bd->buffer_type;
return 0;
}
int pkt_session_unset_buffers(struct hfi_session_release_buffer_pkt *pkt,
void *cookie, struct hfi_buffer_desc *bd)
{
unsigned int i;
if (!cookie || !pkt || !bd)
return -EINVAL;
pkt->shdr.hdr.pkt_type = HFI_CMD_SESSION_RELEASE_BUFFERS;
pkt->shdr.session_id = hash32_ptr(cookie);
pkt->buffer_size = bd->buffer_size;
pkt->num_buffers = bd->num_buffers;
if (bd->buffer_type == HFI_BUFFER_OUTPUT ||
bd->buffer_type == HFI_BUFFER_OUTPUT2) {
struct hfi_buffer_info *bi;
bi = (struct hfi_buffer_info *)pkt->buffer_info;
for (i = 0; i < pkt->num_buffers; i++) {
bi->buffer_addr = bd->device_addr;
bi->extradata_addr = bd->extradata_addr;
}
pkt->shdr.hdr.size =
sizeof(struct hfi_session_set_buffers_pkt) +
bd->num_buffers * sizeof(*bi);
} else {
for (i = 0; i < pkt->num_buffers; i++)
pkt->buffer_info[i] = bd->device_addr;
pkt->extradata_size = 0;
pkt->shdr.hdr.size =
struct_size_t(struct hfi_session_set_buffers_pkt,
buffer_info, bd->num_buffers);
}
pkt->response_req = bd->response_required;
pkt->buffer_type = bd->buffer_type;
return 0;
}
int pkt_session_etb_decoder(struct hfi_session_empty_buffer_compressed_pkt *pkt,
void *cookie, struct hfi_frame_data *in_frame)
{
if (!cookie)
return -EINVAL;
pkt->shdr.hdr.size = sizeof(*pkt);
pkt->shdr.hdr.pkt_type = HFI_CMD_SESSION_EMPTY_BUFFER;
pkt->shdr.session_id = hash32_ptr(cookie);
pkt->time_stamp_hi = upper_32_bits(in_frame->timestamp);
pkt->time_stamp_lo = lower_32_bits(in_frame->timestamp);
pkt->flags = in_frame->flags;
pkt->mark_target = in_frame->mark_target;
pkt->mark_data = in_frame->mark_data;
pkt->offset = in_frame->offset;
pkt->alloc_len = in_frame->alloc_len;
pkt->filled_len = in_frame->filled_len;
pkt->input_tag = in_frame->clnt_data;
pkt->packet_buffer = in_frame->device_addr;
return 0;
}
int pkt_session_etb_encoder(
struct hfi_session_empty_buffer_uncompressed_plane0_pkt *pkt,
void *cookie, struct hfi_frame_data *in_frame)
{
if (!cookie || !in_frame->device_addr)
return -EINVAL;
pkt->shdr.hdr.size = sizeof(*pkt);
pkt->shdr.hdr.pkt_type = HFI_CMD_SESSION_EMPTY_BUFFER;
pkt->shdr.session_id = hash32_ptr(cookie);
pkt->view_id = 0;
pkt->time_stamp_hi = upper_32_bits(in_frame->timestamp);
pkt->time_stamp_lo = lower_32_bits(in_frame->timestamp);
pkt->flags = in_frame->flags;
pkt->mark_target = in_frame->mark_target;
pkt->mark_data = in_frame->mark_data;
pkt->offset = in_frame->offset;
pkt->alloc_len = in_frame->alloc_len;
pkt->filled_len = in_frame->filled_len;
pkt->input_tag = in_frame->clnt_data;
pkt->packet_buffer = in_frame->device_addr;
pkt->extradata_buffer = in_frame->extradata_addr;
return 0;
}
int pkt_session_ftb(struct hfi_session_fill_buffer_pkt *pkt, void *cookie,
struct hfi_frame_data *out_frame)
{
if (!cookie || !out_frame || !out_frame->device_addr)
return -EINVAL;
pkt->shdr.hdr.size = sizeof(*pkt);
pkt->shdr.hdr.pkt_type = HFI_CMD_SESSION_FILL_BUFFER;
pkt->shdr.session_id = hash32_ptr(cookie);
if (out_frame->buffer_type == HFI_BUFFER_OUTPUT)
pkt->stream_id = 0;
else if (out_frame->buffer_type == HFI_BUFFER_OUTPUT2)
pkt->stream_id = 1;
pkt->output_tag = out_frame->clnt_data;
pkt->packet_buffer = out_frame->device_addr;
pkt->extradata_buffer = out_frame->extradata_addr;
pkt->alloc_len = out_frame->alloc_len;
pkt->filled_len = out_frame->filled_len;
pkt->offset = out_frame->offset;
pkt->data[0] = out_frame->extradata_size;
return 0;
}
int pkt_session_parse_seq_header(
struct hfi_session_parse_sequence_header_pkt *pkt,
void *cookie, u32 seq_hdr, u32 seq_hdr_len)
{
if (!cookie || !seq_hdr || !seq_hdr_len)
return -EINVAL;
pkt->shdr.hdr.size = sizeof(*pkt);
pkt->shdr.hdr.pkt_type = HFI_CMD_SESSION_PARSE_SEQUENCE_HEADER;
pkt->shdr.session_id = hash32_ptr(cookie);
pkt->header_len = seq_hdr_len;
pkt->packet_buffer = seq_hdr;
return 0;
}
int pkt_session_get_seq_hdr(struct hfi_session_get_sequence_header_pkt *pkt,
void *cookie, u32 seq_hdr, u32 seq_hdr_len)
{
if (!cookie || !seq_hdr || !seq_hdr_len)
return -EINVAL;
pkt->shdr.hdr.size = sizeof(*pkt);
pkt->shdr.hdr.pkt_type = HFI_CMD_SESSION_GET_SEQUENCE_HEADER;
pkt->shdr.session_id = hash32_ptr(cookie);
pkt->buffer_len = seq_hdr_len;
pkt->packet_buffer = seq_hdr;
return 0;
}
int pkt_session_flush(struct hfi_session_flush_pkt *pkt, void *cookie, u32 type)
{
switch (type) {
case HFI_FLUSH_INPUT:
case HFI_FLUSH_OUTPUT:
case HFI_FLUSH_OUTPUT2:
case HFI_FLUSH_ALL:
break;
default:
return -EINVAL;
}
pkt->shdr.hdr.size = sizeof(*pkt);
pkt->shdr.hdr.pkt_type = HFI_CMD_SESSION_FLUSH;
pkt->shdr.session_id = hash32_ptr(cookie);
pkt->flush_type = type;
return 0;
}
static int pkt_session_get_property_1x(struct hfi_session_get_property_pkt *pkt,
void *cookie, u32 ptype)
{
switch (ptype) {
case HFI_PROPERTY_PARAM_PROFILE_LEVEL_CURRENT:
case HFI_PROPERTY_CONFIG_BUFFER_REQUIREMENTS:
break;
default:
return -EINVAL;
}
pkt->shdr.hdr.size = sizeof(*pkt);
pkt->shdr.hdr.pkt_type = HFI_CMD_SESSION_GET_PROPERTY;
pkt->shdr.session_id = hash32_ptr(cookie);
pkt->num_properties = 1;
pkt->data[0] = ptype;
return 0;
}
static int pkt_session_set_property_1x(struct hfi_session_set_property_pkt *pkt,
void *cookie, u32 ptype, void *pdata)
{
void *prop_data;
int ret = 0;
if (!pkt || !cookie || !pdata)
return -EINVAL;
prop_data = &pkt->data[1];
pkt->shdr.hdr.size = sizeof(*pkt);
pkt->shdr.hdr.pkt_type = HFI_CMD_SESSION_SET_PROPERTY;
pkt->shdr.session_id = hash32_ptr(cookie);
pkt->num_properties = 1;
pkt->data[0] = ptype;
switch (ptype) {
case HFI_PROPERTY_CONFIG_FRAME_RATE: {
struct hfi_framerate *in = pdata, *frate = prop_data;
frate->buffer_type = in->buffer_type;
frate->framerate = in->framerate;
pkt->shdr.hdr.size += sizeof(u32) + sizeof(*frate);
break;
}
case HFI_PROPERTY_PARAM_UNCOMPRESSED_FORMAT_SELECT: {
struct hfi_uncompressed_format_select *in = pdata;
struct hfi_uncompressed_format_select *hfi = prop_data;
hfi->buffer_type = in->buffer_type;
hfi->format = in->format;
pkt->shdr.hdr.size += sizeof(u32) + sizeof(*hfi);
break;
}
case HFI_PROPERTY_PARAM_FRAME_SIZE: {
struct hfi_framesize *in = pdata, *fsize = prop_data;
fsize->buffer_type = in->buffer_type;
fsize->height = in->height;
fsize->width = in->width;
pkt->shdr.hdr.size += sizeof(u32) + sizeof(*fsize);
break;
}
case HFI_PROPERTY_CONFIG_REALTIME: {
struct hfi_enable *in = pdata, *en = prop_data;
en->enable = in->enable;
pkt->shdr.hdr.size += sizeof(u32) * 2;
break;
}
case HFI_PROPERTY_PARAM_BUFFER_COUNT_ACTUAL: {
struct hfi_buffer_count_actual *in = pdata, *count = prop_data;
count->count_actual = in->count_actual;
count->type = in->type;
pkt->shdr.hdr.size += sizeof(u32) + sizeof(*count);
break;
}
case HFI_PROPERTY_PARAM_BUFFER_SIZE_ACTUAL: {
struct hfi_buffer_size_actual *in = pdata, *sz = prop_data;
sz->size = in->size;
sz->type = in->type;
pkt->shdr.hdr.size += sizeof(u32) + sizeof(*sz);
break;
}
case HFI_PROPERTY_PARAM_BUFFER_DISPLAY_HOLD_COUNT_ACTUAL: {
struct hfi_buffer_display_hold_count_actual *in = pdata;
struct hfi_buffer_display_hold_count_actual *count = prop_data;
count->hold_count = in->hold_count;
count->type = in->type;
pkt->shdr.hdr.size += sizeof(u32) + sizeof(*count);
break;
}
case HFI_PROPERTY_PARAM_NAL_STREAM_FORMAT_SELECT: {
struct hfi_nal_stream_format_select *in = pdata;
struct hfi_nal_stream_format_select *fmt = prop_data;
fmt->format = in->format;
pkt->shdr.hdr.size += sizeof(u32) + sizeof(*fmt);
break;
}
case HFI_PROPERTY_PARAM_VDEC_OUTPUT_ORDER: {
u32 *in = pdata;
switch (*in) {
case HFI_OUTPUT_ORDER_DECODE:
case HFI_OUTPUT_ORDER_DISPLAY:
break;
default:
ret = -EINVAL;
break;
}
pkt->data[1] = *in;
pkt->shdr.hdr.size += sizeof(u32) * 2;
break;
}
case HFI_PROPERTY_PARAM_VDEC_PICTURE_TYPE_DECODE: {
struct hfi_enable_picture *in = pdata, *en = prop_data;
en->picture_type = in->picture_type;
pkt->shdr.hdr.size += sizeof(u32) + sizeof(*en);
break;
}
case HFI_PROPERTY_PARAM_VDEC_OUTPUT2_KEEP_ASPECT_RATIO: {
struct hfi_enable *in = pdata, *en = prop_data;
en->enable = in->enable;
pkt->shdr.hdr.size += sizeof(u32) + sizeof(*en);
break;
}
case HFI_PROPERTY_PARAM_VDEC_ENABLE_SUFFICIENT_SEQCHANGE_EVENT:
case HFI_PROPERTY_CONFIG_VDEC_POST_LOOP_DEBLOCKER: {
struct hfi_enable *in = pdata;
struct hfi_enable *en = prop_data;
en->enable = in->enable;
pkt->shdr.hdr.size += sizeof(u32) + sizeof(*en);
break;
}
case HFI_PROPERTY_PARAM_VDEC_MULTI_STREAM: {
struct hfi_multi_stream *in = pdata, *multi = prop_data;
multi->buffer_type = in->buffer_type;
multi->enable = in->enable;
multi->width = in->width;
multi->height = in->height;
pkt->shdr.hdr.size += sizeof(u32) + sizeof(*multi);
break;
}
case HFI_PROPERTY_PARAM_VDEC_DISPLAY_PICTURE_BUFFER_COUNT: {
struct hfi_display_picture_buffer_count *in = pdata;
struct hfi_display_picture_buffer_count *count = prop_data;
count->count = in->count;
count->enable = in->enable;
pkt->shdr.hdr.size += sizeof(u32) + sizeof(*count);
break;
}
case HFI_PROPERTY_PARAM_DIVX_FORMAT: {
u32 *in = pdata;
switch (*in) {
case HFI_DIVX_FORMAT_4:
case HFI_DIVX_FORMAT_5:
case HFI_DIVX_FORMAT_6:
break;
default:
ret = -EINVAL;
break;
}
pkt->data[1] = *in;
pkt->shdr.hdr.size += sizeof(u32) * 2;
break;
}
case HFI_PROPERTY_CONFIG_VDEC_MB_ERROR_MAP_REPORTING: {
struct hfi_enable *in = pdata, *en = prop_data;
en->enable = in->enable;
pkt->shdr.hdr.size += sizeof(u32) + sizeof(*en);
break;
}
case HFI_PROPERTY_PARAM_VDEC_CONTINUE_DATA_TRANSFER: {
struct hfi_enable *in = pdata, *en = prop_data;
en->enable = in->enable;
pkt->shdr.hdr.size += sizeof(u32) + sizeof(*en);
break;
}
case HFI_PROPERTY_PARAM_VDEC_THUMBNAIL_MODE: {
struct hfi_enable *in = pdata, *en = prop_data;
en->enable = in->enable;
pkt->shdr.hdr.size += sizeof(u32) + sizeof(*en);
break;
}
case HFI_PROPERTY_CONFIG_VENC_SYNC_FRAME_SEQUENCE_HEADER: {
struct hfi_enable *in = pdata, *en = prop_data;
en->enable = in->enable;
pkt->shdr.hdr.size += sizeof(u32) + sizeof(*en);
break;
}
case HFI_PROPERTY_CONFIG_VENC_REQUEST_SYNC_FRAME:
pkt->shdr.hdr.size += sizeof(u32);
break;
case HFI_PROPERTY_PARAM_VENC_MPEG4_SHORT_HEADER:
break;
case HFI_PROPERTY_PARAM_VENC_MPEG4_AC_PREDICTION:
break;
case HFI_PROPERTY_CONFIG_VENC_TARGET_BITRATE: {
struct hfi_bitrate *in = pdata, *brate = prop_data;
brate->bitrate = in->bitrate;
brate->layer_id = in->layer_id;
pkt->shdr.hdr.size += sizeof(u32) + sizeof(*brate);
break;
}
case HFI_PROPERTY_CONFIG_VENC_MAX_BITRATE: {
struct hfi_bitrate *in = pdata, *hfi = prop_data;
hfi->bitrate = in->bitrate;
hfi->layer_id = in->layer_id;
pkt->shdr.hdr.size += sizeof(u32) + sizeof(*hfi);
break;
}
case HFI_PROPERTY_PARAM_PROFILE_LEVEL_CURRENT: {
struct hfi_profile_level *in = pdata, *pl = prop_data;
pl->level = in->level;
pl->profile = in->profile;
if (pl->profile <= 0)
/* Profile not supported, falling back to high */
pl->profile = HFI_H264_PROFILE_HIGH;
if (!pl->level)
/* Level not supported, falling back to 1 */
pl->level = 1;
pkt->shdr.hdr.size += sizeof(u32) + sizeof(*pl);
break;
}
case HFI_PROPERTY_PARAM_VENC_H264_ENTROPY_CONTROL: {
struct hfi_h264_entropy_control *in = pdata, *hfi = prop_data;
hfi->entropy_mode = in->entropy_mode;
if (hfi->entropy_mode == HFI_H264_ENTROPY_CABAC)
hfi->cabac_model = in->cabac_model;
pkt->shdr.hdr.size += sizeof(u32) + sizeof(*hfi);
break;
}
case HFI_PROPERTY_PARAM_VENC_RATE_CONTROL: {
u32 *in = pdata;
switch (*in) {
case HFI_RATE_CONTROL_OFF:
case HFI_RATE_CONTROL_CBR_CFR:
case HFI_RATE_CONTROL_CBR_VFR:
case HFI_RATE_CONTROL_VBR_CFR:
case HFI_RATE_CONTROL_VBR_VFR:
case HFI_RATE_CONTROL_CQ:
break;
default:
ret = -EINVAL;
break;
}
pkt->data[1] = *in;
pkt->shdr.hdr.size += sizeof(u32) * 2;
break;
}
case HFI_PROPERTY_PARAM_VENC_MPEG4_TIME_RESOLUTION: {
struct hfi_mpeg4_time_resolution *in = pdata, *res = prop_data;
res->time_increment_resolution = in->time_increment_resolution;
pkt->shdr.hdr.size += sizeof(u32) + sizeof(*res);
break;
}
case HFI_PROPERTY_PARAM_VENC_MPEG4_HEADER_EXTENSION: {
struct hfi_mpeg4_header_extension *in = pdata, *ext = prop_data;
ext->header_extension = in->header_extension;
pkt->shdr.hdr.size += sizeof(u32) + sizeof(*ext);
break;
}
case HFI_PROPERTY_PARAM_VENC_H264_DEBLOCK_CONTROL: {
struct hfi_h264_db_control *in = pdata, *db = prop_data;
switch (in->mode) {
case HFI_H264_DB_MODE_DISABLE:
case HFI_H264_DB_MODE_SKIP_SLICE_BOUNDARY:
case HFI_H264_DB_MODE_ALL_BOUNDARY:
break;
default:
ret = -EINVAL;
break;
}
db->mode = in->mode;
db->slice_alpha_offset = in->slice_alpha_offset;
db->slice_beta_offset = in->slice_beta_offset;
pkt->shdr.hdr.size += sizeof(u32) + sizeof(*db);
break;
}
case HFI_PROPERTY_PARAM_VENC_SESSION_QP: {
struct hfi_quantization *in = pdata, *quant = prop_data;
quant->qp_i = in->qp_i;
quant->qp_p = in->qp_p;
quant->qp_b = in->qp_b;
quant->layer_id = in->layer_id;
pkt->shdr.hdr.size += sizeof(u32) + sizeof(*quant);
break;
}
case HFI_PROPERTY_PARAM_VENC_SESSION_QP_RANGE: {
struct hfi_quantization_range *in = pdata, *range = prop_data;
u32 min_qp, max_qp;
min_qp = in->min_qp;
max_qp = in->max_qp;
/* We'll be packing in the qp, so make sure we
* won't be losing data when masking
*/
if (min_qp > 0xff || max_qp > 0xff) {
ret = -ERANGE;
break;
}
/* When creating the packet, pack the qp value as
* 0xiippbb, where ii = qp range for I-frames,
* pp = qp range for P-frames, etc.
*/
range->min_qp = min_qp | min_qp << 8 | min_qp << 16;
range->max_qp = max_qp | max_qp << 8 | max_qp << 16;
range->layer_id = in->layer_id;
pkt->shdr.hdr.size += sizeof(u32) + sizeof(*range);
break;
}
case HFI_PROPERTY_PARAM_VENC_VC1_PERF_CFG: {
struct hfi_vc1e_perf_cfg_type *in = pdata, *perf = prop_data;
memcpy(perf->search_range_x_subsampled,
in->search_range_x_subsampled,
sizeof(perf->search_range_x_subsampled));
memcpy(perf->search_range_y_subsampled,
in->search_range_y_subsampled,
sizeof(perf->search_range_y_subsampled));
pkt->shdr.hdr.size += sizeof(u32) + sizeof(*perf);
break;
}
case HFI_PROPERTY_PARAM_VENC_MAX_NUM_B_FRAMES: {
struct hfi_max_num_b_frames *bframes = prop_data;
u32 *in = pdata;
bframes->max_num_b_frames = *in;
pkt->shdr.hdr.size += sizeof(u32) + sizeof(*bframes);
break;
}
case HFI_PROPERTY_CONFIG_VENC_INTRA_PERIOD: {
struct hfi_intra_period *in = pdata, *intra = prop_data;
intra->pframes = in->pframes;
intra->bframes = in->bframes;
pkt->shdr.hdr.size += sizeof(u32) + sizeof(*intra);
break;
}
case HFI_PROPERTY_CONFIG_VENC_IDR_PERIOD: {
struct hfi_idr_period *in = pdata, *idr = prop_data;
idr->idr_period = in->idr_period;
pkt->shdr.hdr.size += sizeof(u32) + sizeof(*idr);
break;
}
case HFI_PROPERTY_PARAM_VDEC_CONCEAL_COLOR: {
struct hfi_conceal_color *color = prop_data;
u32 *in = pdata;
color->conceal_color = *in & 0xff;
color->conceal_color |= ((*in >> 10) & 0xff) << 8;
color->conceal_color |= ((*in >> 20) & 0xff) << 16;
pkt->shdr.hdr.size += sizeof(u32) + sizeof(*color);
break;
}
case HFI_PROPERTY_CONFIG_VPE_OPERATIONS: {
struct hfi_operations_type *in = pdata, *ops = prop_data;
switch (in->rotation) {
case HFI_ROTATE_NONE:
case HFI_ROTATE_90:
case HFI_ROTATE_180:
case HFI_ROTATE_270:
break;
default:
ret = -EINVAL;
break;
}
switch (in->flip) {
case HFI_FLIP_NONE:
case HFI_FLIP_HORIZONTAL:
case HFI_FLIP_VERTICAL:
break;
default:
ret = -EINVAL;
break;
}
ops->rotation = in->rotation;
ops->flip = in->flip;
pkt->shdr.hdr.size += sizeof(u32) + sizeof(*ops);
break;
}
case HFI_PROPERTY_PARAM_VENC_INTRA_REFRESH: {
struct hfi_intra_refresh *in = pdata, *intra = prop_data;
switch (in->mode) {
case HFI_INTRA_REFRESH_NONE:
case HFI_INTRA_REFRESH_ADAPTIVE:
case HFI_INTRA_REFRESH_CYCLIC:
case HFI_INTRA_REFRESH_CYCLIC_ADAPTIVE:
case HFI_INTRA_REFRESH_RANDOM:
break;
default:
ret = -EINVAL;
break;
}
intra->mode = in->mode;
intra->air_mbs = in->air_mbs;
intra->air_ref = in->air_ref;
intra->cir_mbs = in->cir_mbs;
pkt->shdr.hdr.size += sizeof(u32) + sizeof(*intra);
break;
}
case HFI_PROPERTY_PARAM_VENC_MULTI_SLICE_CONTROL: {
struct hfi_multi_slice_control *in = pdata, *multi = prop_data;
switch (in->multi_slice) {
case HFI_MULTI_SLICE_OFF:
case HFI_MULTI_SLICE_GOB:
case HFI_MULTI_SLICE_BY_MB_COUNT:
case HFI_MULTI_SLICE_BY_BYTE_COUNT:
break;
default:
ret = -EINVAL;
break;
}
multi->multi_slice = in->multi_slice;
multi->slice_size = in->slice_size;
pkt->shdr.hdr.size += sizeof(u32) + sizeof(*multi);
break;
}
case HFI_PROPERTY_PARAM_VENC_SLICE_DELIVERY_MODE: {
struct hfi_enable *in = pdata, *en = prop_data;
en->enable = in->enable;
pkt->shdr.hdr.size += sizeof(u32) + sizeof(*en);
break;
}
case HFI_PROPERTY_PARAM_VENC_H264_VUI_TIMING_INFO: {
struct hfi_h264_vui_timing_info *in = pdata, *vui = prop_data;
vui->enable = in->enable;
vui->fixed_framerate = in->fixed_framerate;
vui->time_scale = in->time_scale;
pkt->shdr.hdr.size += sizeof(u32) + sizeof(*vui);
break;
}
case HFI_PROPERTY_CONFIG_VPE_DEINTERLACE: {
struct hfi_enable *in = pdata, *en = prop_data;
en->enable = in->enable;
pkt->shdr.hdr.size += sizeof(u32) + sizeof(*en);
break;
}
case HFI_PROPERTY_PARAM_VENC_H264_GENERATE_AUDNAL: {
struct hfi_enable *in = pdata, *en = prop_data;
en->enable = in->enable;
pkt->shdr.hdr.size += sizeof(u32) + sizeof(*en);
break;
}
case HFI_PROPERTY_PARAM_BUFFER_ALLOC_MODE: {
struct hfi_buffer_alloc_mode *in = pdata, *mode = prop_data;
mode->type = in->type;
mode->mode = in->mode;
pkt->shdr.hdr.size += sizeof(u32) + sizeof(*mode);
break;
}
case HFI_PROPERTY_PARAM_VDEC_FRAME_ASSEMBLY: {
struct hfi_enable *in = pdata, *en = prop_data;
en->enable = in->enable;
pkt->shdr.hdr.size += sizeof(u32) + sizeof(*en);
break;
}
case HFI_PROPERTY_PARAM_VENC_H264_VUI_BITSTREAM_RESTRC: {
struct hfi_enable *in = pdata, *en = prop_data;
en->enable = in->enable;
pkt->shdr.hdr.size += sizeof(u32) + sizeof(*en);
break;
}
case HFI_PROPERTY_PARAM_VENC_PRESERVE_TEXT_QUALITY: {
struct hfi_enable *in = pdata, *en = prop_data;
en->enable = in->enable;
pkt->shdr.hdr.size += sizeof(u32) + sizeof(*en);
break;
}
case HFI_PROPERTY_PARAM_VDEC_SCS_THRESHOLD: {
struct hfi_scs_threshold *thres = prop_data;
u32 *in = pdata;
thres->threshold_value = *in;
pkt->shdr.hdr.size += sizeof(u32) + sizeof(*thres);
break;
}
case HFI_PROPERTY_PARAM_MVC_BUFFER_LAYOUT: {
struct hfi_mvc_buffer_layout_descp_type *in = pdata;
struct hfi_mvc_buffer_layout_descp_type *mvc = prop_data;
switch (in->layout_type) {
case HFI_MVC_BUFFER_LAYOUT_TOP_BOTTOM:
case HFI_MVC_BUFFER_LAYOUT_SEQ:
break;
default:
ret = -EINVAL;
break;
}
mvc->layout_type = in->layout_type;
mvc->bright_view_first = in->bright_view_first;
mvc->ngap = in->ngap;
pkt->shdr.hdr.size += sizeof(u32) + sizeof(*mvc);
break;
}
case HFI_PROPERTY_PARAM_VENC_LTRMODE: {
struct hfi_ltr_mode *in = pdata, *ltr = prop_data;
switch (in->ltr_mode) {
case HFI_LTR_MODE_DISABLE:
case HFI_LTR_MODE_MANUAL:
case HFI_LTR_MODE_PERIODIC:
break;
default:
ret = -EINVAL;
break;
}
ltr->ltr_mode = in->ltr_mode;
ltr->ltr_count = in->ltr_count;
ltr->trust_mode = in->trust_mode;
pkt->shdr.hdr.size += sizeof(u32) + sizeof(*ltr);
break;
}
case HFI_PROPERTY_CONFIG_VENC_USELTRFRAME: {
struct hfi_ltr_use *in = pdata, *ltr_use = prop_data;
ltr_use->frames = in->frames;
ltr_use->ref_ltr = in->ref_ltr;
ltr_use->use_constrnt = in->use_constrnt;
pkt->shdr.hdr.size += sizeof(u32) + sizeof(*ltr_use);
break;
}
case HFI_PROPERTY_CONFIG_VENC_MARKLTRFRAME: {
struct hfi_ltr_mark *in = pdata, *ltr_mark = prop_data;
ltr_mark->mark_frame = in->mark_frame;
pkt->shdr.hdr.size += sizeof(u32) + sizeof(*ltr_mark);
break;
}
case HFI_PROPERTY_PARAM_VENC_HIER_P_MAX_NUM_ENH_LAYER: {
u32 *in = pdata;
pkt->data[1] = *in;
pkt->shdr.hdr.size += sizeof(u32) * 2;
break;
}
case HFI_PROPERTY_CONFIG_VENC_HIER_P_ENH_LAYER: {
u32 *in = pdata;
pkt->data[1] = *in;
pkt->shdr.hdr.size += sizeof(u32) * 2;
break;
}
case HFI_PROPERTY_PARAM_VENC_DISABLE_RC_TIMESTAMP: {
struct hfi_enable *in = pdata, *en = prop_data;
en->enable = in->enable;
pkt->shdr.hdr.size += sizeof(u32) + sizeof(*en);
break;
}
case HFI_PROPERTY_PARAM_VENC_INITIAL_QP: {
struct hfi_initial_quantization *in = pdata, *quant = prop_data;
quant->init_qp_enable = in->init_qp_enable;
quant->qp_i = in->qp_i;
quant->qp_p = in->qp_p;
quant->qp_b = in->qp_b;
pkt->shdr.hdr.size += sizeof(u32) + sizeof(*quant);
break;
}
case HFI_PROPERTY_PARAM_VPE_COLOR_SPACE_CONVERSION: {
struct hfi_vpe_color_space_conversion *in = pdata;
struct hfi_vpe_color_space_conversion *csc = prop_data;
memcpy(csc->csc_matrix, in->csc_matrix,
sizeof(csc->csc_matrix));
memcpy(csc->csc_bias, in->csc_bias, sizeof(csc->csc_bias));
memcpy(csc->csc_limit, in->csc_limit, sizeof(csc->csc_limit));
pkt->shdr.hdr.size += sizeof(u32) + sizeof(*csc);
break;
}
case HFI_PROPERTY_PARAM_VENC_VPX_ERROR_RESILIENCE_MODE: {
struct hfi_enable *in = pdata, *en = prop_data;
en->enable = in->enable;
pkt->shdr.hdr.size += sizeof(u32) + sizeof(*en);
break;
}
case HFI_PROPERTY_PARAM_VENC_H264_NAL_SVC_EXT: {
struct hfi_enable *in = pdata, *en = prop_data;
en->enable = in->enable;
pkt->shdr.hdr.size += sizeof(u32) + sizeof(*en);
break;
}
case HFI_PROPERTY_CONFIG_VENC_PERF_MODE: {
u32 *in = pdata;
pkt->data[1] = *in;
pkt->shdr.hdr.size += sizeof(u32) * 2;
break;
}
case HFI_PROPERTY_PARAM_VENC_HIER_B_MAX_NUM_ENH_LAYER: {
u32 *in = pdata;
pkt->data[1] = *in;
pkt->shdr.hdr.size += sizeof(u32) * 2;
break;
}
case HFI_PROPERTY_PARAM_VDEC_NONCP_OUTPUT2: {
struct hfi_enable *in = pdata, *en = prop_data;
en->enable = in->enable;
pkt->shdr.hdr.size += sizeof(u32) + sizeof(*en);
break;
}
case HFI_PROPERTY_PARAM_VENC_HIER_P_HYBRID_MODE: {
struct hfi_hybrid_hierp *in = pdata, *hierp = prop_data;
hierp->layers = in->layers;
pkt->shdr.hdr.size += sizeof(u32) + sizeof(*hierp);
break;
}
case HFI_PROPERTY_PARAM_UNCOMPRESSED_PLANE_ACTUAL_INFO: {
struct hfi_uncompressed_plane_actual_info *in = pdata;
struct hfi_uncompressed_plane_actual_info *info = prop_data;
info->buffer_type = in->buffer_type;
info->num_planes = in->num_planes;
info->plane_format[0] = in->plane_format[0];
if (in->num_planes > 1)
info->plane_format[1] = in->plane_format[1];
pkt->shdr.hdr.size += sizeof(u32) + sizeof(*info);
break;
}
case HFI_PROPERTY_PARAM_VENC_HDR10_PQ_SEI:
return -ENOTSUPP;
/* FOLLOWING PROPERTIES ARE NOT IMPLEMENTED IN CORE YET */
case HFI_PROPERTY_CONFIG_BUFFER_REQUIREMENTS:
case HFI_PROPERTY_CONFIG_PRIORITY:
case HFI_PROPERTY_CONFIG_BATCH_INFO:
case HFI_PROPERTY_SYS_IDLE_INDICATOR:
case HFI_PROPERTY_PARAM_UNCOMPRESSED_FORMAT_SUPPORTED:
case HFI_PROPERTY_PARAM_INTERLACE_FORMAT_SUPPORTED:
case HFI_PROPERTY_PARAM_CHROMA_SITE:
case HFI_PROPERTY_PARAM_PROPERTIES_SUPPORTED:
case HFI_PROPERTY_PARAM_PROFILE_LEVEL_SUPPORTED:
case HFI_PROPERTY_PARAM_CAPABILITY_SUPPORTED:
case HFI_PROPERTY_PARAM_NAL_STREAM_FORMAT_SUPPORTED:
case HFI_PROPERTY_PARAM_MULTI_VIEW_FORMAT:
case HFI_PROPERTY_PARAM_MAX_SEQUENCE_HEADER_SIZE:
case HFI_PROPERTY_PARAM_CODEC_SUPPORTED:
case HFI_PROPERTY_PARAM_VDEC_MULTI_VIEW_SELECT:
case HFI_PROPERTY_PARAM_VDEC_MB_QUANTIZATION:
case HFI_PROPERTY_PARAM_VDEC_NUM_CONCEALED_MB:
case HFI_PROPERTY_PARAM_VDEC_H264_ENTROPY_SWITCHING:
case HFI_PROPERTY_PARAM_VENC_MULTI_SLICE_INFO:
default:
return -EINVAL;
}
return ret;
}
static int
pkt_session_get_property_3xx(struct hfi_session_get_property_pkt *pkt,
void *cookie, u32 ptype)
{
int ret = 0;
if (!pkt || !cookie)
return -EINVAL;
pkt->shdr.hdr.size = sizeof(struct hfi_session_get_property_pkt);
pkt->shdr.hdr.pkt_type = HFI_CMD_SESSION_GET_PROPERTY;
pkt->shdr.session_id = hash32_ptr(cookie);
pkt->num_properties = 1;
switch (ptype) {
case HFI_PROPERTY_CONFIG_VDEC_ENTROPY:
pkt->data[0] = HFI_PROPERTY_CONFIG_VDEC_ENTROPY;
break;
default:
ret = pkt_session_get_property_1x(pkt, cookie, ptype);
break;
}
return ret;
}
static int
pkt_session_set_property_3xx(struct hfi_session_set_property_pkt *pkt,
void *cookie, u32 ptype, void *pdata)
{
void *prop_data;
int ret = 0;
if (!pkt || !cookie || !pdata)
return -EINVAL;
prop_data = &pkt->data[1];
pkt->shdr.hdr.size = sizeof(*pkt);
pkt->shdr.hdr.pkt_type = HFI_CMD_SESSION_SET_PROPERTY;
pkt->shdr.session_id = hash32_ptr(cookie);
pkt->num_properties = 1;
pkt->data[0] = ptype;
/*
* Any session set property which is different in 3XX packetization
* should be added as a new case below. All unchanged session set
* properties will be handled in the default case.
*/
switch (ptype) {
case HFI_PROPERTY_PARAM_VDEC_MULTI_STREAM: {
struct hfi_multi_stream *in = pdata;
struct hfi_multi_stream_3x *multi = prop_data;
multi->buffer_type = in->buffer_type;
multi->enable = in->enable;
pkt->shdr.hdr.size += sizeof(u32) + sizeof(*multi);
break;
}
case HFI_PROPERTY_PARAM_VENC_INTRA_REFRESH: {
struct hfi_intra_refresh *in = pdata;
struct hfi_intra_refresh_3x *intra = prop_data;
switch (in->mode) {
case HFI_INTRA_REFRESH_NONE:
case HFI_INTRA_REFRESH_ADAPTIVE:
case HFI_INTRA_REFRESH_CYCLIC:
case HFI_INTRA_REFRESH_CYCLIC_ADAPTIVE:
case HFI_INTRA_REFRESH_RANDOM:
break;
default:
ret = -EINVAL;
break;
}
intra->mode = in->mode;
intra->mbs = in->cir_mbs;
pkt->shdr.hdr.size += sizeof(u32) + sizeof(*intra);
break;
}
case HFI_PROPERTY_PARAM_VDEC_CONTINUE_DATA_TRANSFER:
/* for 3xx fw version session_continue is used */
break;
default:
ret = pkt_session_set_property_1x(pkt, cookie, ptype, pdata);
break;
}
return ret;
}
static int
pkt_session_set_property_4xx(struct hfi_session_set_property_pkt *pkt,
void *cookie, u32 ptype, void *pdata)
{
void *prop_data;
if (!pkt || !cookie || !pdata)
return -EINVAL;
prop_data = &pkt->data[1];
pkt->shdr.hdr.size = sizeof(*pkt);
pkt->shdr.hdr.pkt_type = HFI_CMD_SESSION_SET_PROPERTY;
pkt->shdr.session_id = hash32_ptr(cookie);
pkt->num_properties = 1;
pkt->data[0] = ptype;
/*
* Any session set property which is different in 3XX packetization
* should be added as a new case below. All unchanged session set
* properties will be handled in the default case.
*/
switch (ptype) {
case HFI_PROPERTY_PARAM_BUFFER_COUNT_ACTUAL: {
struct hfi_buffer_count_actual *in = pdata;
struct hfi_buffer_count_actual_4xx *count = prop_data;
count->count_actual = in->count_actual;
count->type = in->type;
count->count_min_host = in->count_actual;
pkt->shdr.hdr.size += sizeof(u32) + sizeof(*count);
break;
}
case HFI_PROPERTY_PARAM_WORK_MODE: {
struct hfi_video_work_mode *in = pdata, *wm = prop_data;
wm->video_work_mode = in->video_work_mode;
pkt->shdr.hdr.size += sizeof(u32) + sizeof(*wm);
break;
}
case HFI_PROPERTY_CONFIG_VIDEOCORES_USAGE: {
struct hfi_videocores_usage_type *in = pdata, *cu = prop_data;
cu->video_core_enable_mask = in->video_core_enable_mask;
pkt->shdr.hdr.size += sizeof(u32) + sizeof(*cu);
break;
}
case HFI_PROPERTY_PARAM_VENC_HDR10_PQ_SEI: {
struct hfi_hdr10_pq_sei *in = pdata, *hdr10 = prop_data;
memcpy(hdr10, in, sizeof(*hdr10));
pkt->shdr.hdr.size += sizeof(u32) + sizeof(*hdr10);
break;
}
case HFI_PROPERTY_PARAM_VDEC_CONCEAL_COLOR: {
struct hfi_conceal_color_v4 *color = prop_data;
u32 *in = pdata;
color->conceal_color_8bit = *in & 0xff;
color->conceal_color_8bit |= ((*in >> 10) & 0xff) << 8;
color->conceal_color_8bit |= ((*in >> 20) & 0xff) << 16;
color->conceal_color_10bit = *in;
pkt->shdr.hdr.size += sizeof(u32) + sizeof(*color);
break;
}
case HFI_PROPERTY_PARAM_VENC_H264_TRANSFORM_8X8: {
struct hfi_h264_8x8_transform *in = pdata, *tm = prop_data;
tm->enable_type = in->enable_type;
pkt->shdr.hdr.size += sizeof(u32) + sizeof(*tm);
break;
}
case HFI_PROPERTY_PARAM_VENC_SESSION_QP_RANGE_V2: {
struct hfi_quantization_range_v2 *in = pdata, *range = prop_data;
u32 min_qp, max_qp;
min_qp = in->min_qp.qp_packed;
max_qp = in->max_qp.qp_packed;
/* We'll be packing in the qp, so make sure we
* won't be losing data when masking
*/
if (min_qp > 0xff || max_qp > 0xff)
return -ERANGE;
range->min_qp.layer_id = 0xFF;
range->max_qp.layer_id = 0xFF;
range->min_qp.qp_packed = (min_qp & 0xFF) | ((min_qp & 0xFF) << 8) |
((min_qp & 0xFF) << 16);
range->max_qp.qp_packed = (max_qp & 0xFF) | ((max_qp & 0xFF) << 8) |
((max_qp & 0xFF) << 16);
range->min_qp.enable = 7;
range->max_qp.enable = 7;
pkt->shdr.hdr.size += sizeof(u32) + sizeof(*range);
break;
}
case HFI_PROPERTY_CONFIG_VENC_MAX_BITRATE:
case HFI_PROPERTY_CONFIG_VDEC_POST_LOOP_DEBLOCKER:
case HFI_PROPERTY_PARAM_BUFFER_ALLOC_MODE:
case HFI_PROPERTY_PARAM_VENC_SESSION_QP:
case HFI_PROPERTY_PARAM_VENC_SESSION_QP_RANGE:
/* not implemented on Venus 4xx */
return -ENOTSUPP;
default:
return pkt_session_set_property_3xx(pkt, cookie, ptype, pdata);
}
return 0;
}
static int
pkt_session_set_property_6xx(struct hfi_session_set_property_pkt *pkt,
void *cookie, u32 ptype, void *pdata)
{
void *prop_data;
if (!pkt || !cookie || !pdata)
return -EINVAL;
prop_data = &pkt->data[1];
pkt->shdr.hdr.size = sizeof(*pkt);
pkt->shdr.hdr.pkt_type = HFI_CMD_SESSION_SET_PROPERTY;
pkt->shdr.session_id = hash32_ptr(cookie);
pkt->num_properties = 1;
pkt->data[0] = ptype;
switch (ptype) {
case HFI_PROPERTY_PARAM_UNCOMPRESSED_PLANE_ACTUAL_CONSTRAINTS_INFO: {
struct hfi_uncompressed_plane_actual_constraints_info *in = pdata;
struct hfi_uncompressed_plane_actual_constraints_info *info = prop_data;
info->buffer_type = in->buffer_type;
info->num_planes = in->num_planes;
info->plane_format[0] = in->plane_format[0];
if (in->num_planes > 1)
info->plane_format[1] = in->plane_format[1];
pkt->shdr.hdr.size += sizeof(u32) + sizeof(*info);
break;
}
case HFI_PROPERTY_CONFIG_HEIC_FRAME_QUALITY: {
struct hfi_heic_frame_quality *in = pdata, *cq = prop_data;
cq->frame_quality = in->frame_quality;
pkt->shdr.hdr.size += sizeof(u32) + sizeof(*cq);
break;
}
case HFI_PROPERTY_PARAM_WORK_ROUTE: {
struct hfi_video_work_route *in = pdata, *wr = prop_data;
wr->video_work_route = in->video_work_route;
pkt->shdr.hdr.size += sizeof(u32) + sizeof(*wr);
break;
}
default:
return pkt_session_set_property_4xx(pkt, cookie, ptype, pdata);
}
return 0;
}
int pkt_session_get_property(struct hfi_session_get_property_pkt *pkt,
void *cookie, u32 ptype)
{
if (hfi_ver == HFI_VERSION_1XX)
return pkt_session_get_property_1x(pkt, cookie, ptype);
return pkt_session_get_property_3xx(pkt, cookie, ptype);
}
int pkt_session_set_property(struct hfi_session_set_property_pkt *pkt,
void *cookie, u32 ptype, void *pdata)
{
if (hfi_ver == HFI_VERSION_1XX)
return pkt_session_set_property_1x(pkt, cookie, ptype, pdata);
if (hfi_ver == HFI_VERSION_3XX)
return pkt_session_set_property_3xx(pkt, cookie, ptype, pdata);
if (hfi_ver == HFI_VERSION_4XX)
return pkt_session_set_property_4xx(pkt, cookie, ptype, pdata);
return pkt_session_set_property_6xx(pkt, cookie, ptype, pdata);
}
void pkt_set_version(enum hfi_version version)
{
hfi_ver = version;
}
| linux-master | drivers/media/platform/qcom/venus/hfi_cmds.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2020, The Linux Foundation. All rights reserved.
*/
#include <linux/of.h>
#include "hfi_platform.h"
#include "core.h"
const struct hfi_platform *hfi_platform_get(enum hfi_version version)
{
switch (version) {
case HFI_VERSION_4XX:
return &hfi_plat_v4;
case HFI_VERSION_6XX:
return &hfi_plat_v6;
default:
break;
}
return NULL;
}
unsigned long
hfi_platform_get_codec_vpp_freq(enum hfi_version version, u32 codec, u32 session_type)
{
const struct hfi_platform *plat;
unsigned long freq = 0;
plat = hfi_platform_get(version);
if (!plat)
return 0;
if (plat->codec_vpp_freq)
freq = plat->codec_vpp_freq(session_type, codec);
return freq;
}
unsigned long
hfi_platform_get_codec_vsp_freq(enum hfi_version version, u32 codec, u32 session_type)
{
const struct hfi_platform *plat;
unsigned long freq = 0;
plat = hfi_platform_get(version);
if (!plat)
return 0;
if (plat->codec_vpp_freq)
freq = plat->codec_vsp_freq(session_type, codec);
return freq;
}
unsigned long
hfi_platform_get_codec_lp_freq(enum hfi_version version, u32 codec, u32 session_type)
{
const struct hfi_platform *plat;
unsigned long freq = 0;
plat = hfi_platform_get(version);
if (!plat)
return 0;
if (plat->codec_lp_freq)
freq = plat->codec_lp_freq(session_type, codec);
return freq;
}
int
hfi_platform_get_codecs(struct venus_core *core, u32 *enc_codecs, u32 *dec_codecs, u32 *count)
{
const struct hfi_platform *plat;
plat = hfi_platform_get(core->res->hfi_version);
if (!plat)
return -EINVAL;
if (plat->codecs)
plat->codecs(enc_codecs, dec_codecs, count);
if (IS_IRIS2_1(core)) {
*enc_codecs &= ~HFI_VIDEO_CODEC_VP8;
*dec_codecs &= ~HFI_VIDEO_CODEC_VP8;
}
return 0;
}
| linux-master | drivers/media/platform/qcom/venus/hfi_platform.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
* Copyright (C) 2017 Linaro Ltd.
*/
#include <linux/clk.h>
#include <linux/module.h>
#include <linux/mod_devicetable.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/slab.h>
#include <media/v4l2-mem2mem.h>
#include <media/videobuf2-dma-contig.h>
#include <media/v4l2-ioctl.h>
#include <media/v4l2-event.h>
#include <media/v4l2-ctrls.h>
#include "hfi_venus_io.h"
#include "hfi_parser.h"
#include "core.h"
#include "helpers.h"
#include "venc.h"
#include "pm_helpers.h"
#define NUM_B_FRAMES_MAX 4
/*
* Three resons to keep MPLANE formats (despite that the number of planes
* currently is one):
* - the MPLANE formats allow only one plane to be used
* - the downstream driver use MPLANE formats too
* - future firmware versions could add support for >1 planes
*/
static const struct venus_format venc_formats[] = {
[VENUS_FMT_NV12] = {
.pixfmt = V4L2_PIX_FMT_NV12,
.num_planes = 1,
.type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE,
},
[VENUS_FMT_H264] = {
.pixfmt = V4L2_PIX_FMT_H264,
.num_planes = 1,
.type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE,
},
[VENUS_FMT_VP8] = {
.pixfmt = V4L2_PIX_FMT_VP8,
.num_planes = 1,
.type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE,
},
[VENUS_FMT_HEVC] = {
.pixfmt = V4L2_PIX_FMT_HEVC,
.num_planes = 1,
.type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE,
},
[VENUS_FMT_MPEG4] = {
.pixfmt = V4L2_PIX_FMT_MPEG4,
.num_planes = 1,
.type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE,
},
[VENUS_FMT_H263] = {
.pixfmt = V4L2_PIX_FMT_H263,
.num_planes = 1,
.type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE,
},
};
static const struct venus_format *
find_format(struct venus_inst *inst, u32 pixfmt, u32 type)
{
const struct venus_format *fmt = venc_formats;
unsigned int size = ARRAY_SIZE(venc_formats);
unsigned int i;
for (i = 0; i < size; i++) {
if (fmt[i].pixfmt == pixfmt)
break;
}
if (i == size || fmt[i].type != type)
return NULL;
if (type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE &&
!venus_helper_check_codec(inst, fmt[i].pixfmt))
return NULL;
return &fmt[i];
}
static const struct venus_format *
find_format_by_index(struct venus_inst *inst, unsigned int index, u32 type)
{
const struct venus_format *fmt = venc_formats;
unsigned int size = ARRAY_SIZE(venc_formats);
unsigned int i, k = 0;
if (index > size)
return NULL;
for (i = 0; i < size; i++) {
bool valid;
if (fmt[i].type != type)
continue;
valid = type != V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE ||
venus_helper_check_codec(inst, fmt[i].pixfmt);
if (k == index && valid)
break;
if (valid)
k++;
}
if (i == size)
return NULL;
return &fmt[i];
}
static int venc_v4l2_to_hfi(int id, int value)
{
switch (id) {
case V4L2_CID_MPEG_VIDEO_H264_ENTROPY_MODE:
switch (value) {
case V4L2_MPEG_VIDEO_H264_ENTROPY_MODE_CAVLC:
default:
return HFI_H264_ENTROPY_CAVLC;
case V4L2_MPEG_VIDEO_H264_ENTROPY_MODE_CABAC:
return HFI_H264_ENTROPY_CABAC;
}
case V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_MODE:
switch (value) {
case V4L2_MPEG_VIDEO_H264_LOOP_FILTER_MODE_ENABLED:
default:
return HFI_H264_DB_MODE_ALL_BOUNDARY;
case V4L2_MPEG_VIDEO_H264_LOOP_FILTER_MODE_DISABLED:
return HFI_H264_DB_MODE_DISABLE;
case V4L2_MPEG_VIDEO_H264_LOOP_FILTER_MODE_DISABLED_AT_SLICE_BOUNDARY:
return HFI_H264_DB_MODE_SKIP_SLICE_BOUNDARY;
}
}
return 0;
}
static int
venc_querycap(struct file *file, void *fh, struct v4l2_capability *cap)
{
strscpy(cap->driver, "qcom-venus", sizeof(cap->driver));
strscpy(cap->card, "Qualcomm Venus video encoder", sizeof(cap->card));
strscpy(cap->bus_info, "platform:qcom-venus", sizeof(cap->bus_info));
return 0;
}
static int venc_enum_fmt(struct file *file, void *fh, struct v4l2_fmtdesc *f)
{
struct venus_inst *inst = to_inst(file);
const struct venus_format *fmt;
fmt = find_format_by_index(inst, f->index, f->type);
memset(f->reserved, 0, sizeof(f->reserved));
if (!fmt)
return -EINVAL;
f->pixelformat = fmt->pixfmt;
return 0;
}
static const struct venus_format *
venc_try_fmt_common(struct venus_inst *inst, struct v4l2_format *f)
{
struct v4l2_pix_format_mplane *pixmp = &f->fmt.pix_mp;
struct v4l2_plane_pix_format *pfmt = pixmp->plane_fmt;
const struct venus_format *fmt;
u32 sizeimage;
memset(pfmt[0].reserved, 0, sizeof(pfmt[0].reserved));
memset(pixmp->reserved, 0, sizeof(pixmp->reserved));
fmt = find_format(inst, pixmp->pixelformat, f->type);
if (!fmt) {
if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
pixmp->pixelformat = V4L2_PIX_FMT_H264;
else if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
pixmp->pixelformat = V4L2_PIX_FMT_NV12;
else
return NULL;
fmt = find_format(inst, pixmp->pixelformat, f->type);
if (!fmt)
return NULL;
}
pixmp->width = clamp(pixmp->width, frame_width_min(inst),
frame_width_max(inst));
pixmp->height = clamp(pixmp->height, frame_height_min(inst),
frame_height_max(inst));
pixmp->width = ALIGN(pixmp->width, 128);
pixmp->height = ALIGN(pixmp->height, 32);
pixmp->width = ALIGN(pixmp->width, 2);
pixmp->height = ALIGN(pixmp->height, 2);
if (pixmp->field == V4L2_FIELD_ANY)
pixmp->field = V4L2_FIELD_NONE;
pixmp->num_planes = fmt->num_planes;
pixmp->flags = 0;
sizeimage = venus_helper_get_framesz(pixmp->pixelformat,
pixmp->width,
pixmp->height);
pfmt[0].sizeimage = max(ALIGN(pfmt[0].sizeimage, SZ_4K), sizeimage);
if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
pfmt[0].bytesperline = ALIGN(pixmp->width, 128);
else
pfmt[0].bytesperline = 0;
return fmt;
}
static int venc_try_fmt(struct file *file, void *fh, struct v4l2_format *f)
{
struct venus_inst *inst = to_inst(file);
venc_try_fmt_common(inst, f);
return 0;
}
static int venc_s_fmt(struct file *file, void *fh, struct v4l2_format *f)
{
struct venus_inst *inst = to_inst(file);
struct v4l2_pix_format_mplane *pixmp = &f->fmt.pix_mp;
struct v4l2_pix_format_mplane orig_pixmp;
const struct venus_format *fmt;
struct v4l2_format format;
u32 pixfmt_out = 0, pixfmt_cap = 0;
struct vb2_queue *q;
q = v4l2_m2m_get_vq(inst->m2m_ctx, f->type);
if (!q)
return -EINVAL;
if (vb2_is_busy(q))
return -EBUSY;
orig_pixmp = *pixmp;
fmt = venc_try_fmt_common(inst, f);
if (!fmt)
return -EINVAL;
if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
pixfmt_out = pixmp->pixelformat;
pixfmt_cap = inst->fmt_cap->pixfmt;
} else if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
pixfmt_cap = pixmp->pixelformat;
pixfmt_out = inst->fmt_out->pixfmt;
}
memset(&format, 0, sizeof(format));
format.type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
format.fmt.pix_mp.pixelformat = pixfmt_out;
format.fmt.pix_mp.width = orig_pixmp.width;
format.fmt.pix_mp.height = orig_pixmp.height;
venc_try_fmt_common(inst, &format);
if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
inst->out_width = format.fmt.pix_mp.width;
inst->out_height = format.fmt.pix_mp.height;
inst->colorspace = pixmp->colorspace;
inst->ycbcr_enc = pixmp->ycbcr_enc;
inst->quantization = pixmp->quantization;
inst->xfer_func = pixmp->xfer_func;
}
memset(&format, 0, sizeof(format));
format.type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
format.fmt.pix_mp.pixelformat = pixfmt_cap;
format.fmt.pix_mp.width = orig_pixmp.width;
format.fmt.pix_mp.height = orig_pixmp.height;
venc_try_fmt_common(inst, &format);
inst->width = format.fmt.pix_mp.width;
inst->height = format.fmt.pix_mp.height;
if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
inst->fmt_out = fmt;
else if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
inst->fmt_cap = fmt;
inst->output_buf_size = pixmp->plane_fmt[0].sizeimage;
}
return 0;
}
static int venc_g_fmt(struct file *file, void *fh, struct v4l2_format *f)
{
struct v4l2_pix_format_mplane *pixmp = &f->fmt.pix_mp;
struct venus_inst *inst = to_inst(file);
const struct venus_format *fmt;
if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
fmt = inst->fmt_cap;
else if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
fmt = inst->fmt_out;
else
return -EINVAL;
pixmp->pixelformat = fmt->pixfmt;
if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
pixmp->width = inst->width;
pixmp->height = inst->height;
pixmp->colorspace = inst->colorspace;
pixmp->ycbcr_enc = inst->ycbcr_enc;
pixmp->quantization = inst->quantization;
pixmp->xfer_func = inst->xfer_func;
} else if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
pixmp->width = inst->out_width;
pixmp->height = inst->out_height;
}
venc_try_fmt_common(inst, f);
return 0;
}
static int
venc_g_selection(struct file *file, void *fh, struct v4l2_selection *s)
{
struct venus_inst *inst = to_inst(file);
if (s->type != V4L2_BUF_TYPE_VIDEO_OUTPUT)
return -EINVAL;
switch (s->target) {
case V4L2_SEL_TGT_CROP_DEFAULT:
case V4L2_SEL_TGT_CROP_BOUNDS:
s->r.width = inst->out_width;
s->r.height = inst->out_height;
break;
case V4L2_SEL_TGT_CROP:
s->r.width = inst->width;
s->r.height = inst->height;
break;
default:
return -EINVAL;
}
s->r.top = 0;
s->r.left = 0;
return 0;
}
static int
venc_s_selection(struct file *file, void *fh, struct v4l2_selection *s)
{
struct venus_inst *inst = to_inst(file);
if (s->type != V4L2_BUF_TYPE_VIDEO_OUTPUT)
return -EINVAL;
if (s->r.width > inst->out_width ||
s->r.height > inst->out_height)
return -EINVAL;
s->r.width = ALIGN(s->r.width, 2);
s->r.height = ALIGN(s->r.height, 2);
switch (s->target) {
case V4L2_SEL_TGT_CROP:
s->r.top = 0;
s->r.left = 0;
inst->width = s->r.width;
inst->height = s->r.height;
break;
default:
return -EINVAL;
}
return 0;
}
static int venc_s_parm(struct file *file, void *fh, struct v4l2_streamparm *a)
{
struct venus_inst *inst = to_inst(file);
struct v4l2_outputparm *out = &a->parm.output;
struct v4l2_fract *timeperframe = &out->timeperframe;
u64 us_per_frame, fps;
if (a->type != V4L2_BUF_TYPE_VIDEO_OUTPUT &&
a->type != V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
return -EINVAL;
memset(out->reserved, 0, sizeof(out->reserved));
if (!timeperframe->denominator)
timeperframe->denominator = inst->timeperframe.denominator;
if (!timeperframe->numerator)
timeperframe->numerator = inst->timeperframe.numerator;
out->capability = V4L2_CAP_TIMEPERFRAME;
us_per_frame = timeperframe->numerator * (u64)USEC_PER_SEC;
do_div(us_per_frame, timeperframe->denominator);
if (!us_per_frame)
return -EINVAL;
fps = (u64)USEC_PER_SEC;
do_div(fps, us_per_frame);
inst->timeperframe = *timeperframe;
inst->fps = fps;
return 0;
}
static int venc_g_parm(struct file *file, void *fh, struct v4l2_streamparm *a)
{
struct venus_inst *inst = to_inst(file);
if (a->type != V4L2_BUF_TYPE_VIDEO_OUTPUT &&
a->type != V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
return -EINVAL;
a->parm.output.capability |= V4L2_CAP_TIMEPERFRAME;
a->parm.output.timeperframe = inst->timeperframe;
return 0;
}
static int venc_enum_framesizes(struct file *file, void *fh,
struct v4l2_frmsizeenum *fsize)
{
struct venus_inst *inst = to_inst(file);
const struct venus_format *fmt;
fsize->type = V4L2_FRMSIZE_TYPE_STEPWISE;
fmt = find_format(inst, fsize->pixel_format,
V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE);
if (!fmt) {
fmt = find_format(inst, fsize->pixel_format,
V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
if (!fmt)
return -EINVAL;
}
if (fsize->index)
return -EINVAL;
fsize->stepwise.min_width = frame_width_min(inst);
fsize->stepwise.max_width = frame_width_max(inst);
fsize->stepwise.step_width = frame_width_step(inst);
fsize->stepwise.min_height = frame_height_min(inst);
fsize->stepwise.max_height = frame_height_max(inst);
fsize->stepwise.step_height = frame_height_step(inst);
return 0;
}
static int venc_enum_frameintervals(struct file *file, void *fh,
struct v4l2_frmivalenum *fival)
{
struct venus_inst *inst = to_inst(file);
const struct venus_format *fmt;
unsigned int framerate_factor = 1;
fival->type = V4L2_FRMIVAL_TYPE_STEPWISE;
fmt = find_format(inst, fival->pixel_format,
V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE);
if (!fmt) {
fmt = find_format(inst, fival->pixel_format,
V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
if (!fmt)
return -EINVAL;
}
if (fival->index)
return -EINVAL;
if (!fival->width || !fival->height)
return -EINVAL;
if (fival->width > frame_width_max(inst) ||
fival->width < frame_width_min(inst) ||
fival->height > frame_height_max(inst) ||
fival->height < frame_height_min(inst))
return -EINVAL;
if (IS_V1(inst->core)) {
/* framerate is reported in 1/65535 fps unit */
framerate_factor = (1 << 16);
}
fival->stepwise.min.numerator = 1;
fival->stepwise.min.denominator = frate_max(inst) / framerate_factor;
fival->stepwise.max.numerator = 1;
fival->stepwise.max.denominator = frate_min(inst) / framerate_factor;
fival->stepwise.step.numerator = 1;
fival->stepwise.step.denominator = frate_max(inst) / framerate_factor;
return 0;
}
static int venc_subscribe_event(struct v4l2_fh *fh,
const struct v4l2_event_subscription *sub)
{
switch (sub->type) {
case V4L2_EVENT_EOS:
return v4l2_event_subscribe(fh, sub, 2, NULL);
case V4L2_EVENT_CTRL:
return v4l2_ctrl_subscribe_event(fh, sub);
default:
return -EINVAL;
}
}
static int
venc_encoder_cmd(struct file *file, void *fh, struct v4l2_encoder_cmd *cmd)
{
struct venus_inst *inst = to_inst(file);
struct hfi_frame_data fdata = {0};
int ret = 0;
ret = v4l2_m2m_ioctl_try_encoder_cmd(file, fh, cmd);
if (ret)
return ret;
mutex_lock(&inst->lock);
if (cmd->cmd == V4L2_ENC_CMD_STOP &&
inst->enc_state == VENUS_ENC_STATE_ENCODING) {
/*
* Implement V4L2_ENC_CMD_STOP by enqueue an empty buffer on
* encoder input to signal EOS.
*/
if (!(inst->streamon_out && inst->streamon_cap))
goto unlock;
fdata.buffer_type = HFI_BUFFER_INPUT;
fdata.flags |= HFI_BUFFERFLAG_EOS;
fdata.device_addr = 0xdeadb000;
ret = hfi_session_process_buf(inst, &fdata);
inst->enc_state = VENUS_ENC_STATE_DRAIN;
} else if (cmd->cmd == V4L2_ENC_CMD_START) {
if (inst->enc_state == VENUS_ENC_STATE_DRAIN) {
ret = -EBUSY;
goto unlock;
}
if (inst->enc_state == VENUS_ENC_STATE_STOPPED) {
vb2_clear_last_buffer_dequeued(&inst->fh.m2m_ctx->cap_q_ctx.q);
inst->enc_state = VENUS_ENC_STATE_ENCODING;
}
}
unlock:
mutex_unlock(&inst->lock);
return ret;
}
static const struct v4l2_ioctl_ops venc_ioctl_ops = {
.vidioc_querycap = venc_querycap,
.vidioc_enum_fmt_vid_cap = venc_enum_fmt,
.vidioc_enum_fmt_vid_out = venc_enum_fmt,
.vidioc_s_fmt_vid_cap_mplane = venc_s_fmt,
.vidioc_s_fmt_vid_out_mplane = venc_s_fmt,
.vidioc_g_fmt_vid_cap_mplane = venc_g_fmt,
.vidioc_g_fmt_vid_out_mplane = venc_g_fmt,
.vidioc_try_fmt_vid_cap_mplane = venc_try_fmt,
.vidioc_try_fmt_vid_out_mplane = venc_try_fmt,
.vidioc_g_selection = venc_g_selection,
.vidioc_s_selection = venc_s_selection,
.vidioc_reqbufs = v4l2_m2m_ioctl_reqbufs,
.vidioc_querybuf = v4l2_m2m_ioctl_querybuf,
.vidioc_create_bufs = v4l2_m2m_ioctl_create_bufs,
.vidioc_prepare_buf = v4l2_m2m_ioctl_prepare_buf,
.vidioc_qbuf = v4l2_m2m_ioctl_qbuf,
.vidioc_expbuf = v4l2_m2m_ioctl_expbuf,
.vidioc_dqbuf = v4l2_m2m_ioctl_dqbuf,
.vidioc_streamon = v4l2_m2m_ioctl_streamon,
.vidioc_streamoff = v4l2_m2m_ioctl_streamoff,
.vidioc_s_parm = venc_s_parm,
.vidioc_g_parm = venc_g_parm,
.vidioc_enum_framesizes = venc_enum_framesizes,
.vidioc_enum_frameintervals = venc_enum_frameintervals,
.vidioc_subscribe_event = venc_subscribe_event,
.vidioc_unsubscribe_event = v4l2_event_unsubscribe,
.vidioc_try_encoder_cmd = v4l2_m2m_ioctl_try_encoder_cmd,
.vidioc_encoder_cmd = venc_encoder_cmd,
};
static int venc_pm_get(struct venus_inst *inst)
{
struct venus_core *core = inst->core;
struct device *dev = core->dev_enc;
int ret;
mutex_lock(&core->pm_lock);
ret = pm_runtime_resume_and_get(dev);
mutex_unlock(&core->pm_lock);
return ret < 0 ? ret : 0;
}
static int venc_pm_put(struct venus_inst *inst, bool autosuspend)
{
struct venus_core *core = inst->core;
struct device *dev = core->dev_enc;
int ret;
mutex_lock(&core->pm_lock);
if (autosuspend)
ret = pm_runtime_put_autosuspend(dev);
else
ret = pm_runtime_put_sync(dev);
mutex_unlock(&core->pm_lock);
return ret < 0 ? ret : 0;
}
static int venc_pm_get_put(struct venus_inst *inst)
{
struct venus_core *core = inst->core;
struct device *dev = core->dev_enc;
int ret = 0;
mutex_lock(&core->pm_lock);
if (pm_runtime_suspended(dev)) {
ret = pm_runtime_resume_and_get(dev);
if (ret < 0)
goto error;
ret = pm_runtime_put_autosuspend(dev);
}
error:
mutex_unlock(&core->pm_lock);
return ret < 0 ? ret : 0;
}
static void venc_pm_touch(struct venus_inst *inst)
{
pm_runtime_mark_last_busy(inst->core->dev_enc);
}
static int venc_set_properties(struct venus_inst *inst)
{
struct venc_controls *ctr = &inst->controls.enc;
struct hfi_intra_period intra_period;
struct hfi_framerate frate;
struct hfi_bitrate brate;
struct hfi_idr_period idrp;
struct hfi_quantization quant;
struct hfi_quantization_range quant_range;
struct hfi_quantization_range_v2 quant_range_v2;
struct hfi_enable en;
struct hfi_ltr_mode ltr_mode;
struct hfi_intra_refresh intra_refresh = {};
u32 ptype, rate_control, bitrate;
u32 profile, level;
int ret;
ret = venus_helper_set_work_mode(inst);
if (ret)
return ret;
ptype = HFI_PROPERTY_CONFIG_FRAME_RATE;
frate.buffer_type = HFI_BUFFER_OUTPUT;
frate.framerate = inst->fps * (1 << 16);
ret = hfi_session_set_property(inst, ptype, &frate);
if (ret)
return ret;
if (inst->fmt_cap->pixfmt == V4L2_PIX_FMT_H264) {
struct hfi_h264_vui_timing_info info;
struct hfi_h264_entropy_control entropy;
struct hfi_h264_db_control deblock;
struct hfi_h264_8x8_transform h264_transform;
ptype = HFI_PROPERTY_PARAM_VENC_H264_VUI_TIMING_INFO;
info.enable = 1;
info.fixed_framerate = 1;
info.time_scale = NSEC_PER_SEC;
ret = hfi_session_set_property(inst, ptype, &info);
if (ret)
return ret;
ptype = HFI_PROPERTY_PARAM_VENC_H264_ENTROPY_CONTROL;
entropy.entropy_mode = venc_v4l2_to_hfi(
V4L2_CID_MPEG_VIDEO_H264_ENTROPY_MODE,
ctr->h264_entropy_mode);
entropy.cabac_model = HFI_H264_CABAC_MODEL_0;
ret = hfi_session_set_property(inst, ptype, &entropy);
if (ret)
return ret;
ptype = HFI_PROPERTY_PARAM_VENC_H264_DEBLOCK_CONTROL;
deblock.mode = venc_v4l2_to_hfi(
V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_MODE,
ctr->h264_loop_filter_mode);
deblock.slice_alpha_offset = ctr->h264_loop_filter_alpha;
deblock.slice_beta_offset = ctr->h264_loop_filter_beta;
ret = hfi_session_set_property(inst, ptype, &deblock);
if (ret)
return ret;
ptype = HFI_PROPERTY_PARAM_VENC_H264_TRANSFORM_8X8;
h264_transform.enable_type = 0;
if (ctr->profile.h264 == V4L2_MPEG_VIDEO_H264_PROFILE_HIGH ||
ctr->profile.h264 == V4L2_MPEG_VIDEO_H264_PROFILE_CONSTRAINED_HIGH)
h264_transform.enable_type = ctr->h264_8x8_transform;
ret = hfi_session_set_property(inst, ptype, &h264_transform);
if (ret)
return ret;
}
if (inst->fmt_cap->pixfmt == V4L2_PIX_FMT_H264 ||
inst->fmt_cap->pixfmt == V4L2_PIX_FMT_HEVC) {
/* IDR periodicity, n:
* n = 0 - only the first I-frame is IDR frame
* n = 1 - all I-frames will be IDR frames
* n > 1 - every n-th I-frame will be IDR frame
*/
ptype = HFI_PROPERTY_CONFIG_VENC_IDR_PERIOD;
idrp.idr_period = 0;
ret = hfi_session_set_property(inst, ptype, &idrp);
if (ret)
return ret;
}
if (inst->fmt_cap->pixfmt == V4L2_PIX_FMT_HEVC &&
ctr->profile.hevc == V4L2_MPEG_VIDEO_HEVC_PROFILE_MAIN_10) {
struct hfi_hdr10_pq_sei hdr10;
unsigned int c;
ptype = HFI_PROPERTY_PARAM_VENC_HDR10_PQ_SEI;
for (c = 0; c < 3; c++) {
hdr10.mastering.display_primaries_x[c] =
ctr->mastering.display_primaries_x[c];
hdr10.mastering.display_primaries_y[c] =
ctr->mastering.display_primaries_y[c];
}
hdr10.mastering.white_point_x = ctr->mastering.white_point_x;
hdr10.mastering.white_point_y = ctr->mastering.white_point_y;
hdr10.mastering.max_display_mastering_luminance =
ctr->mastering.max_display_mastering_luminance;
hdr10.mastering.min_display_mastering_luminance =
ctr->mastering.min_display_mastering_luminance;
hdr10.cll.max_content_light = ctr->cll.max_content_light_level;
hdr10.cll.max_pic_average_light =
ctr->cll.max_pic_average_light_level;
ret = hfi_session_set_property(inst, ptype, &hdr10);
if (ret)
return ret;
}
if (ctr->num_b_frames) {
u32 max_num_b_frames = NUM_B_FRAMES_MAX;
ptype = HFI_PROPERTY_PARAM_VENC_MAX_NUM_B_FRAMES;
ret = hfi_session_set_property(inst, ptype, &max_num_b_frames);
if (ret)
return ret;
}
ptype = HFI_PROPERTY_CONFIG_VENC_INTRA_PERIOD;
intra_period.pframes = ctr->num_p_frames;
intra_period.bframes = ctr->num_b_frames;
ret = hfi_session_set_property(inst, ptype, &intra_period);
if (ret)
return ret;
if (!ctr->rc_enable)
rate_control = HFI_RATE_CONTROL_OFF;
else if (ctr->bitrate_mode == V4L2_MPEG_VIDEO_BITRATE_MODE_VBR)
rate_control = ctr->frame_skip_mode ? HFI_RATE_CONTROL_VBR_VFR :
HFI_RATE_CONTROL_VBR_CFR;
else if (ctr->bitrate_mode == V4L2_MPEG_VIDEO_BITRATE_MODE_CBR)
rate_control = ctr->frame_skip_mode ? HFI_RATE_CONTROL_CBR_VFR :
HFI_RATE_CONTROL_CBR_CFR;
else if (ctr->bitrate_mode == V4L2_MPEG_VIDEO_BITRATE_MODE_CQ)
rate_control = HFI_RATE_CONTROL_CQ;
ptype = HFI_PROPERTY_PARAM_VENC_RATE_CONTROL;
ret = hfi_session_set_property(inst, ptype, &rate_control);
if (ret)
return ret;
if (rate_control == HFI_RATE_CONTROL_CQ && ctr->const_quality) {
struct hfi_heic_frame_quality quality = {};
ptype = HFI_PROPERTY_CONFIG_HEIC_FRAME_QUALITY;
quality.frame_quality = ctr->const_quality;
ret = hfi_session_set_property(inst, ptype, &quality);
if (ret)
return ret;
}
if (!ctr->bitrate)
bitrate = 64000;
else
bitrate = ctr->bitrate;
ptype = HFI_PROPERTY_CONFIG_VENC_TARGET_BITRATE;
brate.bitrate = bitrate;
brate.layer_id = 0;
ret = hfi_session_set_property(inst, ptype, &brate);
if (ret)
return ret;
if (inst->fmt_cap->pixfmt == V4L2_PIX_FMT_H264 ||
inst->fmt_cap->pixfmt == V4L2_PIX_FMT_HEVC) {
ptype = HFI_PROPERTY_CONFIG_VENC_SYNC_FRAME_SEQUENCE_HEADER;
if (ctr->header_mode == V4L2_MPEG_VIDEO_HEADER_MODE_SEPARATE)
en.enable = 0;
else
en.enable = 1;
ret = hfi_session_set_property(inst, ptype, &en);
if (ret)
return ret;
}
if (!ctr->bitrate_peak)
bitrate *= 2;
else
bitrate = ctr->bitrate_peak;
ptype = HFI_PROPERTY_CONFIG_VENC_MAX_BITRATE;
brate.bitrate = bitrate;
brate.layer_id = 0;
ret = hfi_session_set_property(inst, ptype, &brate);
if (ret)
return ret;
ptype = HFI_PROPERTY_PARAM_VENC_SESSION_QP;
if (inst->fmt_cap->pixfmt == V4L2_PIX_FMT_HEVC) {
quant.qp_i = ctr->hevc_i_qp;
quant.qp_p = ctr->hevc_p_qp;
quant.qp_b = ctr->hevc_b_qp;
} else {
quant.qp_i = ctr->h264_i_qp;
quant.qp_p = ctr->h264_p_qp;
quant.qp_b = ctr->h264_b_qp;
}
quant.layer_id = 0;
ret = hfi_session_set_property(inst, ptype, &quant);
if (ret)
return ret;
if (inst->core->res->hfi_version == HFI_VERSION_4XX ||
inst->core->res->hfi_version == HFI_VERSION_6XX) {
ptype = HFI_PROPERTY_PARAM_VENC_SESSION_QP_RANGE_V2;
if (inst->fmt_cap->pixfmt == V4L2_PIX_FMT_HEVC) {
quant_range_v2.min_qp.qp_packed = ctr->hevc_min_qp;
quant_range_v2.max_qp.qp_packed = ctr->hevc_max_qp;
} else if (inst->fmt_cap->pixfmt == V4L2_PIX_FMT_VP8) {
quant_range_v2.min_qp.qp_packed = ctr->vp8_min_qp;
quant_range_v2.max_qp.qp_packed = ctr->vp8_max_qp;
} else {
quant_range_v2.min_qp.qp_packed = ctr->h264_min_qp;
quant_range_v2.max_qp.qp_packed = ctr->h264_max_qp;
}
ret = hfi_session_set_property(inst, ptype, &quant_range_v2);
} else {
ptype = HFI_PROPERTY_PARAM_VENC_SESSION_QP_RANGE;
if (inst->fmt_cap->pixfmt == V4L2_PIX_FMT_HEVC) {
quant_range.min_qp = ctr->hevc_min_qp;
quant_range.max_qp = ctr->hevc_max_qp;
} else if (inst->fmt_cap->pixfmt == V4L2_PIX_FMT_VP8) {
quant_range.min_qp = ctr->vp8_min_qp;
quant_range.max_qp = ctr->vp8_max_qp;
} else {
quant_range.min_qp = ctr->h264_min_qp;
quant_range.max_qp = ctr->h264_max_qp;
}
quant_range.layer_id = 0;
ret = hfi_session_set_property(inst, ptype, &quant_range);
}
if (ret)
return ret;
ptype = HFI_PROPERTY_PARAM_VENC_LTRMODE;
ltr_mode.ltr_count = ctr->ltr_count;
ltr_mode.ltr_mode = HFI_LTR_MODE_MANUAL;
ltr_mode.trust_mode = 1;
ret = hfi_session_set_property(inst, ptype, <r_mode);
if (ret)
return ret;
switch (inst->hfi_codec) {
case HFI_VIDEO_CODEC_H264:
profile = ctr->profile.h264;
level = ctr->level.h264;
break;
case HFI_VIDEO_CODEC_MPEG4:
profile = ctr->profile.mpeg4;
level = ctr->level.mpeg4;
break;
case HFI_VIDEO_CODEC_VP8:
profile = ctr->profile.vp8;
level = 0;
break;
case HFI_VIDEO_CODEC_VP9:
profile = ctr->profile.vp9;
level = ctr->level.vp9;
break;
case HFI_VIDEO_CODEC_HEVC:
profile = ctr->profile.hevc;
level = ctr->level.hevc;
break;
case HFI_VIDEO_CODEC_MPEG2:
default:
profile = 0;
level = 0;
break;
}
ret = venus_helper_set_profile_level(inst, profile, level);
if (ret)
return ret;
if (inst->fmt_cap->pixfmt == V4L2_PIX_FMT_H264 ||
inst->fmt_cap->pixfmt == V4L2_PIX_FMT_HEVC) {
struct hfi_enable en = {};
ptype = HFI_PROPERTY_PARAM_VENC_H264_GENERATE_AUDNAL;
if (ctr->aud_enable)
en.enable = 1;
ret = hfi_session_set_property(inst, ptype, &en);
}
if ((inst->fmt_cap->pixfmt == V4L2_PIX_FMT_H264 ||
inst->fmt_cap->pixfmt == V4L2_PIX_FMT_HEVC) &&
(rate_control == HFI_RATE_CONTROL_CBR_VFR ||
rate_control == HFI_RATE_CONTROL_CBR_CFR)) {
intra_refresh.mode = HFI_INTRA_REFRESH_NONE;
intra_refresh.cir_mbs = 0;
if (ctr->intra_refresh_period) {
u32 mbs;
mbs = ALIGN(inst->width, 16) * ALIGN(inst->height, 16);
mbs /= 16 * 16;
if (mbs % ctr->intra_refresh_period)
mbs++;
mbs /= ctr->intra_refresh_period;
intra_refresh.cir_mbs = mbs;
if (ctr->intra_refresh_type ==
V4L2_CID_MPEG_VIDEO_INTRA_REFRESH_PERIOD_TYPE_CYCLIC)
intra_refresh.mode = HFI_INTRA_REFRESH_CYCLIC;
else
intra_refresh.mode = HFI_INTRA_REFRESH_RANDOM;
}
ptype = HFI_PROPERTY_PARAM_VENC_INTRA_REFRESH;
ret = hfi_session_set_property(inst, ptype, &intra_refresh);
if (ret)
return ret;
}
return 0;
}
static int venc_init_session(struct venus_inst *inst)
{
int ret;
ret = venus_helper_session_init(inst);
if (ret == -EALREADY)
return 0;
else if (ret)
return ret;
ret = venus_helper_set_stride(inst, inst->out_width,
inst->out_height);
if (ret)
goto deinit;
ret = venus_helper_set_input_resolution(inst, inst->width,
inst->height);
if (ret)
goto deinit;
ret = venus_helper_set_output_resolution(inst, inst->width,
inst->height,
HFI_BUFFER_OUTPUT);
if (ret)
goto deinit;
ret = venus_helper_set_color_format(inst, inst->fmt_out->pixfmt);
if (ret)
goto deinit;
ret = venc_set_properties(inst);
if (ret)
goto deinit;
return 0;
deinit:
hfi_session_deinit(inst);
return ret;
}
static int venc_out_num_buffers(struct venus_inst *inst, unsigned int *num)
{
struct hfi_buffer_requirements bufreq;
int ret;
ret = venus_helper_get_bufreq(inst, HFI_BUFFER_INPUT, &bufreq);
if (ret)
return ret;
*num = bufreq.count_actual;
return 0;
}
static int venc_queue_setup(struct vb2_queue *q,
unsigned int *num_buffers, unsigned int *num_planes,
unsigned int sizes[], struct device *alloc_devs[])
{
struct venus_inst *inst = vb2_get_drv_priv(q);
struct venus_core *core = inst->core;
unsigned int num, min = 4;
int ret;
if (*num_planes) {
if (q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE &&
*num_planes != inst->fmt_out->num_planes)
return -EINVAL;
if (q->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE &&
*num_planes != inst->fmt_cap->num_planes)
return -EINVAL;
if (q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE &&
sizes[0] < inst->input_buf_size)
return -EINVAL;
if (q->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE &&
sizes[0] < inst->output_buf_size)
return -EINVAL;
return 0;
}
if (test_bit(0, &core->sys_error)) {
if (inst->nonblock)
return -EAGAIN;
ret = wait_event_interruptible(core->sys_err_done,
!test_bit(0, &core->sys_error));
if (ret)
return ret;
}
ret = venc_pm_get(inst);
if (ret)
return ret;
mutex_lock(&inst->lock);
ret = venc_init_session(inst);
mutex_unlock(&inst->lock);
if (ret)
goto put_power;
ret = venc_pm_put(inst, false);
if (ret)
return ret;
switch (q->type) {
case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
*num_planes = inst->fmt_out->num_planes;
ret = venc_out_num_buffers(inst, &num);
if (ret)
break;
num = max(num, min);
*num_buffers = max(*num_buffers, num);
inst->num_input_bufs = *num_buffers;
sizes[0] = venus_helper_get_framesz(inst->fmt_out->pixfmt,
inst->out_width,
inst->out_height);
inst->input_buf_size = sizes[0];
break;
case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
*num_planes = inst->fmt_cap->num_planes;
*num_buffers = max(*num_buffers, min);
inst->num_output_bufs = *num_buffers;
sizes[0] = venus_helper_get_framesz(inst->fmt_cap->pixfmt,
inst->width,
inst->height);
sizes[0] = max(sizes[0], inst->output_buf_size);
inst->output_buf_size = sizes[0];
break;
default:
ret = -EINVAL;
break;
}
return ret;
put_power:
venc_pm_put(inst, false);
return ret;
}
static int venc_buf_init(struct vb2_buffer *vb)
{
struct venus_inst *inst = vb2_get_drv_priv(vb->vb2_queue);
inst->buf_count++;
return venus_helper_vb2_buf_init(vb);
}
static void venc_release_session(struct venus_inst *inst)
{
int ret;
venc_pm_get(inst);
mutex_lock(&inst->lock);
ret = hfi_session_deinit(inst);
if (ret || inst->session_error)
hfi_session_abort(inst);
mutex_unlock(&inst->lock);
venus_pm_load_scale(inst);
INIT_LIST_HEAD(&inst->registeredbufs);
venus_pm_release_core(inst);
venc_pm_put(inst, false);
}
static void venc_buf_cleanup(struct vb2_buffer *vb)
{
struct venus_inst *inst = vb2_get_drv_priv(vb->vb2_queue);
struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
struct venus_buffer *buf = to_venus_buffer(vbuf);
mutex_lock(&inst->lock);
if (vb->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
if (!list_empty(&inst->registeredbufs))
list_del_init(&buf->reg_list);
mutex_unlock(&inst->lock);
inst->buf_count--;
if (!inst->buf_count)
venc_release_session(inst);
}
static int venc_verify_conf(struct venus_inst *inst)
{
enum hfi_version ver = inst->core->res->hfi_version;
struct hfi_buffer_requirements bufreq;
int ret;
if (!inst->num_input_bufs || !inst->num_output_bufs)
return -EINVAL;
ret = venus_helper_get_bufreq(inst, HFI_BUFFER_OUTPUT, &bufreq);
if (ret)
return ret;
if (inst->num_output_bufs < bufreq.count_actual ||
inst->num_output_bufs < hfi_bufreq_get_count_min(&bufreq, ver))
return -EINVAL;
ret = venus_helper_get_bufreq(inst, HFI_BUFFER_INPUT, &bufreq);
if (ret)
return ret;
if (inst->num_input_bufs < bufreq.count_actual ||
inst->num_input_bufs < hfi_bufreq_get_count_min(&bufreq, ver))
return -EINVAL;
return 0;
}
static int venc_start_streaming(struct vb2_queue *q, unsigned int count)
{
struct venus_inst *inst = vb2_get_drv_priv(q);
int ret;
mutex_lock(&inst->lock);
if (q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
inst->streamon_out = 1;
else
inst->streamon_cap = 1;
if (!(inst->streamon_out & inst->streamon_cap)) {
mutex_unlock(&inst->lock);
return 0;
}
venus_helper_init_instance(inst);
inst->sequence_cap = 0;
inst->sequence_out = 0;
ret = venc_pm_get(inst);
if (ret)
goto error;
ret = venus_pm_acquire_core(inst);
if (ret)
goto put_power;
ret = venc_pm_put(inst, true);
if (ret)
goto error;
ret = venc_set_properties(inst);
if (ret)
goto error;
ret = venc_verify_conf(inst);
if (ret)
goto error;
ret = venus_helper_set_num_bufs(inst, inst->num_input_bufs,
inst->num_output_bufs, 0);
if (ret)
goto error;
ret = venus_helper_vb2_start_streaming(inst);
if (ret)
goto error;
inst->enc_state = VENUS_ENC_STATE_ENCODING;
mutex_unlock(&inst->lock);
return 0;
put_power:
venc_pm_put(inst, false);
error:
venus_helper_buffers_done(inst, q->type, VB2_BUF_STATE_QUEUED);
if (q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
inst->streamon_out = 0;
else
inst->streamon_cap = 0;
mutex_unlock(&inst->lock);
return ret;
}
static void venc_vb2_buf_queue(struct vb2_buffer *vb)
{
struct venus_inst *inst = vb2_get_drv_priv(vb->vb2_queue);
struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
venc_pm_get_put(inst);
mutex_lock(&inst->lock);
if (inst->enc_state == VENUS_ENC_STATE_STOPPED) {
vbuf->sequence = inst->sequence_cap++;
vbuf->field = V4L2_FIELD_NONE;
vb2_set_plane_payload(vb, 0, 0);
v4l2_m2m_buf_done(vbuf, VB2_BUF_STATE_DONE);
mutex_unlock(&inst->lock);
return;
}
venus_helper_vb2_buf_queue(vb);
mutex_unlock(&inst->lock);
}
static const struct vb2_ops venc_vb2_ops = {
.queue_setup = venc_queue_setup,
.buf_init = venc_buf_init,
.buf_cleanup = venc_buf_cleanup,
.buf_prepare = venus_helper_vb2_buf_prepare,
.start_streaming = venc_start_streaming,
.stop_streaming = venus_helper_vb2_stop_streaming,
.buf_queue = venc_vb2_buf_queue,
};
static void venc_buf_done(struct venus_inst *inst, unsigned int buf_type,
u32 tag, u32 bytesused, u32 data_offset, u32 flags,
u32 hfi_flags, u64 timestamp_us)
{
struct vb2_v4l2_buffer *vbuf;
struct vb2_buffer *vb;
unsigned int type;
venc_pm_touch(inst);
if (buf_type == HFI_BUFFER_INPUT)
type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
else
type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
vbuf = venus_helper_find_buf(inst, type, tag);
if (!vbuf)
return;
vbuf->flags = flags;
if (type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
vb = &vbuf->vb2_buf;
vb2_set_plane_payload(vb, 0, bytesused + data_offset);
vb->planes[0].data_offset = data_offset;
vb->timestamp = timestamp_us * NSEC_PER_USEC;
vbuf->sequence = inst->sequence_cap++;
if ((vbuf->flags & V4L2_BUF_FLAG_LAST) &&
inst->enc_state == VENUS_ENC_STATE_DRAIN) {
inst->enc_state = VENUS_ENC_STATE_STOPPED;
}
} else {
vbuf->sequence = inst->sequence_out++;
}
v4l2_m2m_buf_done(vbuf, VB2_BUF_STATE_DONE);
}
static void venc_event_notify(struct venus_inst *inst, u32 event,
struct hfi_event_data *data)
{
struct device *dev = inst->core->dev_enc;
venc_pm_touch(inst);
if (event == EVT_SESSION_ERROR) {
inst->session_error = true;
venus_helper_vb2_queue_error(inst);
dev_err(dev, "enc: event session error %x\n", inst->error);
}
}
static const struct hfi_inst_ops venc_hfi_ops = {
.buf_done = venc_buf_done,
.event_notify = venc_event_notify,
};
static const struct v4l2_m2m_ops venc_m2m_ops = {
.device_run = venus_helper_m2m_device_run,
.job_abort = venus_helper_m2m_job_abort,
};
static int m2m_queue_init(void *priv, struct vb2_queue *src_vq,
struct vb2_queue *dst_vq)
{
struct venus_inst *inst = priv;
int ret;
src_vq->type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
src_vq->io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF;
src_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
src_vq->ops = &venc_vb2_ops;
src_vq->mem_ops = &vb2_dma_contig_memops;
src_vq->drv_priv = inst;
src_vq->buf_struct_size = sizeof(struct venus_buffer);
src_vq->allow_zero_bytesused = 1;
src_vq->min_buffers_needed = 1;
src_vq->dev = inst->core->dev;
src_vq->lock = &inst->ctx_q_lock;
if (inst->core->res->hfi_version == HFI_VERSION_1XX)
src_vq->bidirectional = 1;
ret = vb2_queue_init(src_vq);
if (ret)
return ret;
dst_vq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
dst_vq->io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF;
dst_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
dst_vq->ops = &venc_vb2_ops;
dst_vq->mem_ops = &vb2_dma_contig_memops;
dst_vq->drv_priv = inst;
dst_vq->buf_struct_size = sizeof(struct venus_buffer);
dst_vq->allow_zero_bytesused = 1;
dst_vq->min_buffers_needed = 1;
dst_vq->dev = inst->core->dev;
dst_vq->lock = &inst->ctx_q_lock;
return vb2_queue_init(dst_vq);
}
static void venc_inst_init(struct venus_inst *inst)
{
inst->fmt_cap = &venc_formats[VENUS_FMT_H264];
inst->fmt_out = &venc_formats[VENUS_FMT_NV12];
inst->width = 1280;
inst->height = ALIGN(720, 32);
inst->out_width = 1280;
inst->out_height = 720;
inst->fps = 15;
inst->timeperframe.numerator = 1;
inst->timeperframe.denominator = 15;
inst->hfi_codec = HFI_VIDEO_CODEC_H264;
}
static int venc_open(struct file *file)
{
struct venus_core *core = video_drvdata(file);
struct venus_inst *inst;
int ret;
inst = kzalloc(sizeof(*inst), GFP_KERNEL);
if (!inst)
return -ENOMEM;
INIT_LIST_HEAD(&inst->dpbbufs);
INIT_LIST_HEAD(&inst->registeredbufs);
INIT_LIST_HEAD(&inst->internalbufs);
INIT_LIST_HEAD(&inst->list);
mutex_init(&inst->lock);
mutex_init(&inst->ctx_q_lock);
inst->core = core;
inst->session_type = VIDC_SESSION_TYPE_ENC;
inst->clk_data.core_id = VIDC_CORE_ID_DEFAULT;
inst->core_acquired = false;
inst->nonblock = file->f_flags & O_NONBLOCK;
if (inst->enc_state == VENUS_ENC_STATE_DEINIT)
inst->enc_state = VENUS_ENC_STATE_INIT;
venus_helper_init_instance(inst);
ret = venc_ctrl_init(inst);
if (ret)
goto err_free;
ret = hfi_session_create(inst, &venc_hfi_ops);
if (ret)
goto err_ctrl_deinit;
venc_inst_init(inst);
/*
* create m2m device for every instance, the m2m context scheduling
* is made by firmware side so we do not need to care about.
*/
inst->m2m_dev = v4l2_m2m_init(&venc_m2m_ops);
if (IS_ERR(inst->m2m_dev)) {
ret = PTR_ERR(inst->m2m_dev);
goto err_session_destroy;
}
inst->m2m_ctx = v4l2_m2m_ctx_init(inst->m2m_dev, inst, m2m_queue_init);
if (IS_ERR(inst->m2m_ctx)) {
ret = PTR_ERR(inst->m2m_ctx);
goto err_m2m_release;
}
v4l2_fh_init(&inst->fh, core->vdev_enc);
inst->fh.ctrl_handler = &inst->ctrl_handler;
v4l2_fh_add(&inst->fh);
inst->fh.m2m_ctx = inst->m2m_ctx;
file->private_data = &inst->fh;
return 0;
err_m2m_release:
v4l2_m2m_release(inst->m2m_dev);
err_session_destroy:
hfi_session_destroy(inst);
err_ctrl_deinit:
venc_ctrl_deinit(inst);
err_free:
kfree(inst);
return ret;
}
static int venc_close(struct file *file)
{
struct venus_inst *inst = to_inst(file);
venc_pm_get(inst);
v4l2_m2m_ctx_release(inst->m2m_ctx);
v4l2_m2m_release(inst->m2m_dev);
venc_ctrl_deinit(inst);
hfi_session_destroy(inst);
mutex_destroy(&inst->lock);
mutex_destroy(&inst->ctx_q_lock);
v4l2_fh_del(&inst->fh);
v4l2_fh_exit(&inst->fh);
inst->enc_state = VENUS_ENC_STATE_DEINIT;
venc_pm_put(inst, false);
kfree(inst);
return 0;
}
static const struct v4l2_file_operations venc_fops = {
.owner = THIS_MODULE,
.open = venc_open,
.release = venc_close,
.unlocked_ioctl = video_ioctl2,
.poll = v4l2_m2m_fop_poll,
.mmap = v4l2_m2m_fop_mmap,
};
static int venc_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct video_device *vdev;
struct venus_core *core;
int ret;
if (!dev->parent)
return -EPROBE_DEFER;
core = dev_get_drvdata(dev->parent);
if (!core)
return -EPROBE_DEFER;
platform_set_drvdata(pdev, core);
if (core->pm_ops->venc_get) {
ret = core->pm_ops->venc_get(dev);
if (ret)
return ret;
}
vdev = video_device_alloc();
if (!vdev)
return -ENOMEM;
strscpy(vdev->name, "qcom-venus-encoder", sizeof(vdev->name));
vdev->release = video_device_release;
vdev->fops = &venc_fops;
vdev->ioctl_ops = &venc_ioctl_ops;
vdev->vfl_dir = VFL_DIR_M2M;
vdev->v4l2_dev = &core->v4l2_dev;
vdev->device_caps = V4L2_CAP_VIDEO_M2M_MPLANE | V4L2_CAP_STREAMING;
ret = video_register_device(vdev, VFL_TYPE_VIDEO, -1);
if (ret)
goto err_vdev_release;
core->vdev_enc = vdev;
core->dev_enc = dev;
video_set_drvdata(vdev, core);
pm_runtime_set_autosuspend_delay(dev, 2000);
pm_runtime_use_autosuspend(dev);
pm_runtime_enable(dev);
return 0;
err_vdev_release:
video_device_release(vdev);
return ret;
}
static void venc_remove(struct platform_device *pdev)
{
struct venus_core *core = dev_get_drvdata(pdev->dev.parent);
video_unregister_device(core->vdev_enc);
pm_runtime_disable(core->dev_enc);
if (core->pm_ops->venc_put)
core->pm_ops->venc_put(core->dev_enc);
}
static __maybe_unused int venc_runtime_suspend(struct device *dev)
{
struct venus_core *core = dev_get_drvdata(dev);
const struct venus_pm_ops *pm_ops = core->pm_ops;
int ret = 0;
if (pm_ops->venc_power)
ret = pm_ops->venc_power(dev, POWER_OFF);
return ret;
}
static __maybe_unused int venc_runtime_resume(struct device *dev)
{
struct venus_core *core = dev_get_drvdata(dev);
const struct venus_pm_ops *pm_ops = core->pm_ops;
int ret = 0;
if (pm_ops->venc_power)
ret = pm_ops->venc_power(dev, POWER_ON);
return ret;
}
static const struct dev_pm_ops venc_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
pm_runtime_force_resume)
SET_RUNTIME_PM_OPS(venc_runtime_suspend, venc_runtime_resume, NULL)
};
static const struct of_device_id venc_dt_match[] = {
{ .compatible = "venus-encoder" },
{ }
};
MODULE_DEVICE_TABLE(of, venc_dt_match);
static struct platform_driver qcom_venus_enc_driver = {
.probe = venc_probe,
.remove_new = venc_remove,
.driver = {
.name = "qcom-venus-encoder",
.of_match_table = venc_dt_match,
.pm = &venc_pm_ops,
},
};
module_platform_driver(qcom_venus_enc_driver);
MODULE_ALIAS("platform:qcom-venus-encoder");
MODULE_DESCRIPTION("Qualcomm Venus video encoder driver");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/media/platform/qcom/venus/venc.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
* Copyright (C) 2017 Linaro Ltd.
*/
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/dma-mapping.h>
#include <linux/interrupt.h>
#include <linux/iopoll.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include "core.h"
#include "hfi_cmds.h"
#include "hfi_msgs.h"
#include "hfi_venus.h"
#include "hfi_venus_io.h"
#include "firmware.h"
#define HFI_MASK_QHDR_TX_TYPE 0xff000000
#define HFI_MASK_QHDR_RX_TYPE 0x00ff0000
#define HFI_MASK_QHDR_PRI_TYPE 0x0000ff00
#define HFI_MASK_QHDR_ID_TYPE 0x000000ff
#define HFI_HOST_TO_CTRL_CMD_Q 0
#define HFI_CTRL_TO_HOST_MSG_Q 1
#define HFI_CTRL_TO_HOST_DBG_Q 2
#define HFI_MASK_QHDR_STATUS 0x000000ff
#define IFACEQ_NUM 3
#define IFACEQ_CMD_IDX 0
#define IFACEQ_MSG_IDX 1
#define IFACEQ_DBG_IDX 2
#define IFACEQ_MAX_BUF_COUNT 50
#define IFACEQ_MAX_PARALLEL_CLNTS 16
#define IFACEQ_DFLT_QHDR 0x01010000
#define POLL_INTERVAL_US 50
#define IFACEQ_MAX_PKT_SIZE 1024
#define IFACEQ_MED_PKT_SIZE 768
#define IFACEQ_MIN_PKT_SIZE 8
#define IFACEQ_VAR_SMALL_PKT_SIZE 100
#define IFACEQ_VAR_LARGE_PKT_SIZE 512
#define IFACEQ_VAR_HUGE_PKT_SIZE (1024 * 12)
struct hfi_queue_table_header {
u32 version;
u32 size;
u32 qhdr0_offset;
u32 qhdr_size;
u32 num_q;
u32 num_active_q;
};
struct hfi_queue_header {
u32 status;
u32 start_addr;
u32 type;
u32 q_size;
u32 pkt_size;
u32 pkt_drop_cnt;
u32 rx_wm;
u32 tx_wm;
u32 rx_req;
u32 tx_req;
u32 rx_irq_status;
u32 tx_irq_status;
u32 read_idx;
u32 write_idx;
};
#define IFACEQ_TABLE_SIZE \
(sizeof(struct hfi_queue_table_header) + \
sizeof(struct hfi_queue_header) * IFACEQ_NUM)
#define IFACEQ_QUEUE_SIZE (IFACEQ_MAX_PKT_SIZE * \
IFACEQ_MAX_BUF_COUNT * IFACEQ_MAX_PARALLEL_CLNTS)
#define IFACEQ_GET_QHDR_START_ADDR(ptr, i) \
(void *)(((ptr) + sizeof(struct hfi_queue_table_header)) + \
((i) * sizeof(struct hfi_queue_header)))
#define QDSS_SIZE SZ_4K
#define SFR_SIZE SZ_4K
#define QUEUE_SIZE \
(IFACEQ_TABLE_SIZE + (IFACEQ_QUEUE_SIZE * IFACEQ_NUM))
#define ALIGNED_QDSS_SIZE ALIGN(QDSS_SIZE, SZ_4K)
#define ALIGNED_SFR_SIZE ALIGN(SFR_SIZE, SZ_4K)
#define ALIGNED_QUEUE_SIZE ALIGN(QUEUE_SIZE, SZ_4K)
#define SHARED_QSIZE ALIGN(ALIGNED_SFR_SIZE + ALIGNED_QUEUE_SIZE + \
ALIGNED_QDSS_SIZE, SZ_1M)
struct mem_desc {
dma_addr_t da; /* device address */
void *kva; /* kernel virtual address */
u32 size;
unsigned long attrs;
};
struct iface_queue {
struct hfi_queue_header *qhdr;
struct mem_desc qmem;
};
enum venus_state {
VENUS_STATE_DEINIT = 1,
VENUS_STATE_INIT,
};
struct venus_hfi_device {
struct venus_core *core;
u32 irq_status;
u32 last_packet_type;
bool power_enabled;
bool suspended;
enum venus_state state;
/* serialize read / write to the shared memory */
struct mutex lock;
struct completion pwr_collapse_prep;
struct completion release_resource;
struct mem_desc ifaceq_table;
struct mem_desc sfr;
struct iface_queue queues[IFACEQ_NUM];
u8 pkt_buf[IFACEQ_VAR_HUGE_PKT_SIZE];
u8 dbg_buf[IFACEQ_VAR_HUGE_PKT_SIZE];
};
static bool venus_pkt_debug;
int venus_fw_debug = HFI_DEBUG_MSG_ERROR | HFI_DEBUG_MSG_FATAL;
static bool venus_fw_low_power_mode = true;
static int venus_hw_rsp_timeout = 1000;
static bool venus_fw_coverage;
static void venus_set_state(struct venus_hfi_device *hdev,
enum venus_state state)
{
mutex_lock(&hdev->lock);
hdev->state = state;
mutex_unlock(&hdev->lock);
}
static bool venus_is_valid_state(struct venus_hfi_device *hdev)
{
return hdev->state != VENUS_STATE_DEINIT;
}
static void venus_dump_packet(struct venus_hfi_device *hdev, const void *packet)
{
size_t pkt_size = *(u32 *)packet;
if (!venus_pkt_debug)
return;
print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 16, 1, packet,
pkt_size, true);
}
static int venus_write_queue(struct venus_hfi_device *hdev,
struct iface_queue *queue,
void *packet, u32 *rx_req)
{
struct hfi_queue_header *qhdr;
u32 dwords, new_wr_idx;
u32 empty_space, rd_idx, wr_idx, qsize;
u32 *wr_ptr;
if (!queue->qmem.kva)
return -EINVAL;
qhdr = queue->qhdr;
if (!qhdr)
return -EINVAL;
venus_dump_packet(hdev, packet);
dwords = (*(u32 *)packet) >> 2;
if (!dwords)
return -EINVAL;
rd_idx = qhdr->read_idx;
wr_idx = qhdr->write_idx;
qsize = qhdr->q_size;
/* ensure rd/wr indices's are read from memory */
rmb();
if (wr_idx >= rd_idx)
empty_space = qsize - (wr_idx - rd_idx);
else
empty_space = rd_idx - wr_idx;
if (empty_space <= dwords) {
qhdr->tx_req = 1;
/* ensure tx_req is updated in memory */
wmb();
return -ENOSPC;
}
qhdr->tx_req = 0;
/* ensure tx_req is updated in memory */
wmb();
new_wr_idx = wr_idx + dwords;
wr_ptr = (u32 *)(queue->qmem.kva + (wr_idx << 2));
if (new_wr_idx < qsize) {
memcpy(wr_ptr, packet, dwords << 2);
} else {
size_t len;
new_wr_idx -= qsize;
len = (dwords - new_wr_idx) << 2;
memcpy(wr_ptr, packet, len);
memcpy(queue->qmem.kva, packet + len, new_wr_idx << 2);
}
/* make sure packet is written before updating the write index */
wmb();
qhdr->write_idx = new_wr_idx;
*rx_req = qhdr->rx_req ? 1 : 0;
/* make sure write index is updated before an interrupt is raised */
mb();
return 0;
}
static int venus_read_queue(struct venus_hfi_device *hdev,
struct iface_queue *queue, void *pkt, u32 *tx_req)
{
struct hfi_queue_header *qhdr;
u32 dwords, new_rd_idx;
u32 rd_idx, wr_idx, type, qsize;
u32 *rd_ptr;
u32 recv_request = 0;
int ret = 0;
if (!queue->qmem.kva)
return -EINVAL;
qhdr = queue->qhdr;
if (!qhdr)
return -EINVAL;
type = qhdr->type;
rd_idx = qhdr->read_idx;
wr_idx = qhdr->write_idx;
qsize = qhdr->q_size;
/* make sure data is valid before using it */
rmb();
/*
* Do not set receive request for debug queue, if set, Venus generates
* interrupt for debug messages even when there is no response message
* available. In general debug queue will not become full as it is being
* emptied out for every interrupt from Venus. Venus will anyway
* generates interrupt if it is full.
*/
if (type & HFI_CTRL_TO_HOST_MSG_Q)
recv_request = 1;
if (rd_idx == wr_idx) {
qhdr->rx_req = recv_request;
*tx_req = 0;
/* update rx_req field in memory */
wmb();
return -ENODATA;
}
rd_ptr = (u32 *)(queue->qmem.kva + (rd_idx << 2));
dwords = *rd_ptr >> 2;
if (!dwords)
return -EINVAL;
new_rd_idx = rd_idx + dwords;
if (((dwords << 2) <= IFACEQ_VAR_HUGE_PKT_SIZE) && rd_idx <= qsize) {
if (new_rd_idx < qsize) {
memcpy(pkt, rd_ptr, dwords << 2);
} else {
size_t len;
new_rd_idx -= qsize;
len = (dwords - new_rd_idx) << 2;
memcpy(pkt, rd_ptr, len);
memcpy(pkt + len, queue->qmem.kva, new_rd_idx << 2);
}
} else {
/* bad packet received, dropping */
new_rd_idx = qhdr->write_idx;
ret = -EBADMSG;
}
/* ensure the packet is read before updating read index */
rmb();
qhdr->read_idx = new_rd_idx;
/* ensure updating read index */
wmb();
rd_idx = qhdr->read_idx;
wr_idx = qhdr->write_idx;
/* ensure rd/wr indices are read from memory */
rmb();
if (rd_idx != wr_idx)
qhdr->rx_req = 0;
else
qhdr->rx_req = recv_request;
*tx_req = qhdr->tx_req ? 1 : 0;
/* ensure rx_req is stored to memory and tx_req is loaded from memory */
mb();
venus_dump_packet(hdev, pkt);
return ret;
}
static int venus_alloc(struct venus_hfi_device *hdev, struct mem_desc *desc,
u32 size)
{
struct device *dev = hdev->core->dev;
desc->attrs = DMA_ATTR_WRITE_COMBINE;
desc->size = ALIGN(size, SZ_4K);
desc->kva = dma_alloc_attrs(dev, desc->size, &desc->da, GFP_KERNEL,
desc->attrs);
if (!desc->kva)
return -ENOMEM;
return 0;
}
static void venus_free(struct venus_hfi_device *hdev, struct mem_desc *mem)
{
struct device *dev = hdev->core->dev;
dma_free_attrs(dev, mem->size, mem->kva, mem->da, mem->attrs);
}
static void venus_set_registers(struct venus_hfi_device *hdev)
{
const struct venus_resources *res = hdev->core->res;
const struct reg_val *tbl = res->reg_tbl;
unsigned int count = res->reg_tbl_size;
unsigned int i;
for (i = 0; i < count; i++)
writel(tbl[i].value, hdev->core->base + tbl[i].reg);
}
static void venus_soft_int(struct venus_hfi_device *hdev)
{
void __iomem *cpu_ic_base = hdev->core->cpu_ic_base;
u32 clear_bit;
if (IS_V6(hdev->core))
clear_bit = BIT(CPU_IC_SOFTINT_H2A_SHIFT_V6);
else
clear_bit = BIT(CPU_IC_SOFTINT_H2A_SHIFT);
writel(clear_bit, cpu_ic_base + CPU_IC_SOFTINT);
}
static int venus_iface_cmdq_write_nolock(struct venus_hfi_device *hdev,
void *pkt, bool sync)
{
struct device *dev = hdev->core->dev;
struct hfi_pkt_hdr *cmd_packet;
struct iface_queue *queue;
u32 rx_req;
int ret;
if (!venus_is_valid_state(hdev))
return -EINVAL;
cmd_packet = (struct hfi_pkt_hdr *)pkt;
hdev->last_packet_type = cmd_packet->pkt_type;
queue = &hdev->queues[IFACEQ_CMD_IDX];
ret = venus_write_queue(hdev, queue, pkt, &rx_req);
if (ret) {
dev_err(dev, "write to iface cmd queue failed (%d)\n", ret);
return ret;
}
if (sync) {
/*
* Inform video hardware to raise interrupt for synchronous
* commands
*/
queue = &hdev->queues[IFACEQ_MSG_IDX];
queue->qhdr->rx_req = 1;
/* ensure rx_req is updated in memory */
wmb();
}
if (rx_req)
venus_soft_int(hdev);
return 0;
}
static int venus_iface_cmdq_write(struct venus_hfi_device *hdev, void *pkt, bool sync)
{
int ret;
mutex_lock(&hdev->lock);
ret = venus_iface_cmdq_write_nolock(hdev, pkt, sync);
mutex_unlock(&hdev->lock);
return ret;
}
static int venus_hfi_core_set_resource(struct venus_core *core, u32 id,
u32 size, u32 addr, void *cookie)
{
struct venus_hfi_device *hdev = to_hfi_priv(core);
struct hfi_sys_set_resource_pkt *pkt;
u8 packet[IFACEQ_VAR_SMALL_PKT_SIZE];
int ret;
if (id == VIDC_RESOURCE_NONE)
return 0;
pkt = (struct hfi_sys_set_resource_pkt *)packet;
ret = pkt_sys_set_resource(pkt, id, size, addr, cookie);
if (ret)
return ret;
ret = venus_iface_cmdq_write(hdev, pkt, false);
if (ret)
return ret;
return 0;
}
static int venus_boot_core(struct venus_hfi_device *hdev)
{
struct device *dev = hdev->core->dev;
static const unsigned int max_tries = 100;
u32 ctrl_status = 0, mask_val = 0;
unsigned int count = 0;
void __iomem *cpu_cs_base = hdev->core->cpu_cs_base;
void __iomem *wrapper_base = hdev->core->wrapper_base;
int ret = 0;
if (IS_IRIS2(hdev->core) || IS_IRIS2_1(hdev->core)) {
mask_val = readl(wrapper_base + WRAPPER_INTR_MASK);
mask_val &= ~(WRAPPER_INTR_MASK_A2HWD_BASK_V6 |
WRAPPER_INTR_MASK_A2HCPU_MASK);
} else {
mask_val = WRAPPER_INTR_MASK_A2HVCODEC_MASK;
}
writel(mask_val, wrapper_base + WRAPPER_INTR_MASK);
if (IS_V1(hdev->core))
writel(1, cpu_cs_base + CPU_CS_SCIACMDARG3);
writel(BIT(VIDC_CTRL_INIT_CTRL_SHIFT), cpu_cs_base + VIDC_CTRL_INIT);
while (!ctrl_status && count < max_tries) {
ctrl_status = readl(cpu_cs_base + CPU_CS_SCIACMDARG0);
if ((ctrl_status & CPU_CS_SCIACMDARG0_ERROR_STATUS_MASK) == 4) {
dev_err(dev, "invalid setting for UC_REGION\n");
ret = -EINVAL;
break;
}
usleep_range(500, 1000);
count++;
}
if (count >= max_tries)
ret = -ETIMEDOUT;
if (IS_IRIS2(hdev->core) || IS_IRIS2_1(hdev->core)) {
writel(0x1, cpu_cs_base + CPU_CS_H2XSOFTINTEN_V6);
writel(0x0, cpu_cs_base + CPU_CS_X2RPMH_V6);
}
return ret;
}
static u32 venus_hwversion(struct venus_hfi_device *hdev)
{
struct device *dev = hdev->core->dev;
void __iomem *wrapper_base = hdev->core->wrapper_base;
u32 ver;
u32 major, minor, step;
ver = readl(wrapper_base + WRAPPER_HW_VERSION);
major = ver & WRAPPER_HW_VERSION_MAJOR_VERSION_MASK;
major = major >> WRAPPER_HW_VERSION_MAJOR_VERSION_SHIFT;
minor = ver & WRAPPER_HW_VERSION_MINOR_VERSION_MASK;
minor = minor >> WRAPPER_HW_VERSION_MINOR_VERSION_SHIFT;
step = ver & WRAPPER_HW_VERSION_STEP_VERSION_MASK;
dev_dbg(dev, VDBGL "venus hw version %x.%x.%x\n", major, minor, step);
return major;
}
static int venus_run(struct venus_hfi_device *hdev)
{
struct device *dev = hdev->core->dev;
void __iomem *cpu_cs_base = hdev->core->cpu_cs_base;
int ret;
/*
* Re-program all of the registers that get reset as a result of
* regulator_disable() and _enable()
*/
venus_set_registers(hdev);
writel(hdev->ifaceq_table.da, cpu_cs_base + UC_REGION_ADDR);
writel(SHARED_QSIZE, cpu_cs_base + UC_REGION_SIZE);
writel(hdev->ifaceq_table.da, cpu_cs_base + CPU_CS_SCIACMDARG2);
writel(0x01, cpu_cs_base + CPU_CS_SCIACMDARG1);
if (hdev->sfr.da)
writel(hdev->sfr.da, cpu_cs_base + SFR_ADDR);
ret = venus_boot_core(hdev);
if (ret) {
dev_err(dev, "failed to reset venus core\n");
return ret;
}
venus_hwversion(hdev);
return 0;
}
static int venus_halt_axi(struct venus_hfi_device *hdev)
{
void __iomem *wrapper_base = hdev->core->wrapper_base;
void __iomem *vbif_base = hdev->core->vbif_base;
void __iomem *cpu_cs_base = hdev->core->cpu_cs_base;
void __iomem *aon_base = hdev->core->aon_base;
struct device *dev = hdev->core->dev;
u32 val;
u32 mask_val;
int ret;
if (IS_IRIS2(hdev->core) || IS_IRIS2_1(hdev->core)) {
writel(0x3, cpu_cs_base + CPU_CS_X2RPMH_V6);
if (IS_IRIS2_1(hdev->core))
goto skip_aon_mvp_noc;
writel(0x1, aon_base + AON_WRAPPER_MVP_NOC_LPI_CONTROL);
ret = readl_poll_timeout(aon_base + AON_WRAPPER_MVP_NOC_LPI_STATUS,
val,
val & BIT(0),
POLL_INTERVAL_US,
VBIF_AXI_HALT_ACK_TIMEOUT_US);
if (ret)
return -ETIMEDOUT;
skip_aon_mvp_noc:
mask_val = (BIT(2) | BIT(1) | BIT(0));
writel(mask_val, wrapper_base + WRAPPER_DEBUG_BRIDGE_LPI_CONTROL_V6);
writel(0x00, wrapper_base + WRAPPER_DEBUG_BRIDGE_LPI_CONTROL_V6);
ret = readl_poll_timeout(wrapper_base + WRAPPER_DEBUG_BRIDGE_LPI_STATUS_V6,
val,
val == 0,
POLL_INTERVAL_US,
VBIF_AXI_HALT_ACK_TIMEOUT_US);
if (ret) {
dev_err(dev, "DBLP Release: lpi_status %x\n", val);
return -ETIMEDOUT;
}
return 0;
}
if (IS_V4(hdev->core)) {
val = readl(wrapper_base + WRAPPER_CPU_AXI_HALT);
val |= WRAPPER_CPU_AXI_HALT_HALT;
writel(val, wrapper_base + WRAPPER_CPU_AXI_HALT);
ret = readl_poll_timeout(wrapper_base + WRAPPER_CPU_AXI_HALT_STATUS,
val,
val & WRAPPER_CPU_AXI_HALT_STATUS_IDLE,
POLL_INTERVAL_US,
VBIF_AXI_HALT_ACK_TIMEOUT_US);
if (ret) {
dev_err(dev, "AXI bus port halt timeout\n");
return ret;
}
return 0;
}
/* Halt AXI and AXI IMEM VBIF Access */
val = readl(vbif_base + VBIF_AXI_HALT_CTRL0);
val |= VBIF_AXI_HALT_CTRL0_HALT_REQ;
writel(val, vbif_base + VBIF_AXI_HALT_CTRL0);
/* Request for AXI bus port halt */
ret = readl_poll_timeout(vbif_base + VBIF_AXI_HALT_CTRL1, val,
val & VBIF_AXI_HALT_CTRL1_HALT_ACK,
POLL_INTERVAL_US,
VBIF_AXI_HALT_ACK_TIMEOUT_US);
if (ret) {
dev_err(dev, "AXI bus port halt timeout\n");
return ret;
}
return 0;
}
static int venus_power_off(struct venus_hfi_device *hdev)
{
int ret;
if (!hdev->power_enabled)
return 0;
ret = venus_set_hw_state_suspend(hdev->core);
if (ret)
return ret;
ret = venus_halt_axi(hdev);
if (ret)
return ret;
hdev->power_enabled = false;
return 0;
}
static int venus_power_on(struct venus_hfi_device *hdev)
{
int ret;
if (hdev->power_enabled)
return 0;
ret = venus_set_hw_state_resume(hdev->core);
if (ret)
goto err;
ret = venus_run(hdev);
if (ret)
goto err_suspend;
hdev->power_enabled = true;
return 0;
err_suspend:
venus_set_hw_state_suspend(hdev->core);
err:
hdev->power_enabled = false;
return ret;
}
static int venus_iface_msgq_read_nolock(struct venus_hfi_device *hdev,
void *pkt)
{
struct iface_queue *queue;
u32 tx_req;
int ret;
if (!venus_is_valid_state(hdev))
return -EINVAL;
queue = &hdev->queues[IFACEQ_MSG_IDX];
ret = venus_read_queue(hdev, queue, pkt, &tx_req);
if (ret)
return ret;
if (tx_req)
venus_soft_int(hdev);
return 0;
}
static int venus_iface_msgq_read(struct venus_hfi_device *hdev, void *pkt)
{
int ret;
mutex_lock(&hdev->lock);
ret = venus_iface_msgq_read_nolock(hdev, pkt);
mutex_unlock(&hdev->lock);
return ret;
}
static int venus_iface_dbgq_read_nolock(struct venus_hfi_device *hdev,
void *pkt)
{
struct iface_queue *queue;
u32 tx_req;
int ret;
ret = venus_is_valid_state(hdev);
if (!ret)
return -EINVAL;
queue = &hdev->queues[IFACEQ_DBG_IDX];
ret = venus_read_queue(hdev, queue, pkt, &tx_req);
if (ret)
return ret;
if (tx_req)
venus_soft_int(hdev);
return 0;
}
static int venus_iface_dbgq_read(struct venus_hfi_device *hdev, void *pkt)
{
int ret;
if (!pkt)
return -EINVAL;
mutex_lock(&hdev->lock);
ret = venus_iface_dbgq_read_nolock(hdev, pkt);
mutex_unlock(&hdev->lock);
return ret;
}
static void venus_set_qhdr_defaults(struct hfi_queue_header *qhdr)
{
qhdr->status = 1;
qhdr->type = IFACEQ_DFLT_QHDR;
qhdr->q_size = IFACEQ_QUEUE_SIZE / 4;
qhdr->pkt_size = 0;
qhdr->rx_wm = 1;
qhdr->tx_wm = 1;
qhdr->rx_req = 1;
qhdr->tx_req = 0;
qhdr->rx_irq_status = 0;
qhdr->tx_irq_status = 0;
qhdr->read_idx = 0;
qhdr->write_idx = 0;
}
static void venus_interface_queues_release(struct venus_hfi_device *hdev)
{
mutex_lock(&hdev->lock);
venus_free(hdev, &hdev->ifaceq_table);
venus_free(hdev, &hdev->sfr);
memset(hdev->queues, 0, sizeof(hdev->queues));
memset(&hdev->ifaceq_table, 0, sizeof(hdev->ifaceq_table));
memset(&hdev->sfr, 0, sizeof(hdev->sfr));
mutex_unlock(&hdev->lock);
}
static int venus_interface_queues_init(struct venus_hfi_device *hdev)
{
struct hfi_queue_table_header *tbl_hdr;
struct iface_queue *queue;
struct hfi_sfr *sfr;
struct mem_desc desc = {0};
unsigned int offset;
unsigned int i;
int ret;
ret = venus_alloc(hdev, &desc, ALIGNED_QUEUE_SIZE);
if (ret)
return ret;
hdev->ifaceq_table = desc;
offset = IFACEQ_TABLE_SIZE;
for (i = 0; i < IFACEQ_NUM; i++) {
queue = &hdev->queues[i];
queue->qmem.da = desc.da + offset;
queue->qmem.kva = desc.kva + offset;
queue->qmem.size = IFACEQ_QUEUE_SIZE;
offset += queue->qmem.size;
queue->qhdr =
IFACEQ_GET_QHDR_START_ADDR(hdev->ifaceq_table.kva, i);
venus_set_qhdr_defaults(queue->qhdr);
queue->qhdr->start_addr = queue->qmem.da;
if (i == IFACEQ_CMD_IDX)
queue->qhdr->type |= HFI_HOST_TO_CTRL_CMD_Q;
else if (i == IFACEQ_MSG_IDX)
queue->qhdr->type |= HFI_CTRL_TO_HOST_MSG_Q;
else if (i == IFACEQ_DBG_IDX)
queue->qhdr->type |= HFI_CTRL_TO_HOST_DBG_Q;
}
tbl_hdr = hdev->ifaceq_table.kva;
tbl_hdr->version = 0;
tbl_hdr->size = IFACEQ_TABLE_SIZE;
tbl_hdr->qhdr0_offset = sizeof(struct hfi_queue_table_header);
tbl_hdr->qhdr_size = sizeof(struct hfi_queue_header);
tbl_hdr->num_q = IFACEQ_NUM;
tbl_hdr->num_active_q = IFACEQ_NUM;
/*
* Set receive request to zero on debug queue as there is no
* need of interrupt from video hardware for debug messages
*/
queue = &hdev->queues[IFACEQ_DBG_IDX];
queue->qhdr->rx_req = 0;
ret = venus_alloc(hdev, &desc, ALIGNED_SFR_SIZE);
if (ret) {
hdev->sfr.da = 0;
} else {
hdev->sfr = desc;
sfr = hdev->sfr.kva;
sfr->buf_size = ALIGNED_SFR_SIZE;
}
/* ensure table and queue header structs are settled in memory */
wmb();
return 0;
}
static int venus_sys_set_debug(struct venus_hfi_device *hdev, u32 debug)
{
struct hfi_sys_set_property_pkt *pkt;
u8 packet[IFACEQ_VAR_SMALL_PKT_SIZE];
pkt = (struct hfi_sys_set_property_pkt *)packet;
pkt_sys_debug_config(pkt, HFI_DEBUG_MODE_QUEUE, debug);
return venus_iface_cmdq_write(hdev, pkt, false);
}
static int venus_sys_set_coverage(struct venus_hfi_device *hdev, u32 mode)
{
struct hfi_sys_set_property_pkt *pkt;
u8 packet[IFACEQ_VAR_SMALL_PKT_SIZE];
pkt = (struct hfi_sys_set_property_pkt *)packet;
pkt_sys_coverage_config(pkt, mode);
return venus_iface_cmdq_write(hdev, pkt, false);
}
static int venus_sys_set_idle_message(struct venus_hfi_device *hdev,
bool enable)
{
struct hfi_sys_set_property_pkt *pkt;
u8 packet[IFACEQ_VAR_SMALL_PKT_SIZE];
if (!enable)
return 0;
pkt = (struct hfi_sys_set_property_pkt *)packet;
pkt_sys_idle_indicator(pkt, enable);
return venus_iface_cmdq_write(hdev, pkt, false);
}
static int venus_sys_set_power_control(struct venus_hfi_device *hdev,
bool enable)
{
struct hfi_sys_set_property_pkt *pkt;
u8 packet[IFACEQ_VAR_SMALL_PKT_SIZE];
pkt = (struct hfi_sys_set_property_pkt *)packet;
pkt_sys_power_control(pkt, enable);
return venus_iface_cmdq_write(hdev, pkt, false);
}
static int venus_sys_set_ubwc_config(struct venus_hfi_device *hdev)
{
struct hfi_sys_set_property_pkt *pkt;
u8 packet[IFACEQ_VAR_SMALL_PKT_SIZE];
const struct venus_resources *res = hdev->core->res;
int ret;
pkt = (struct hfi_sys_set_property_pkt *)packet;
pkt_sys_ubwc_config(pkt, res->ubwc_conf);
ret = venus_iface_cmdq_write(hdev, pkt, false);
if (ret)
return ret;
return 0;
}
static int venus_get_queue_size(struct venus_hfi_device *hdev,
unsigned int index)
{
struct hfi_queue_header *qhdr;
if (index >= IFACEQ_NUM)
return -EINVAL;
qhdr = hdev->queues[index].qhdr;
if (!qhdr)
return -EINVAL;
return abs(qhdr->read_idx - qhdr->write_idx);
}
static int venus_sys_set_default_properties(struct venus_hfi_device *hdev)
{
struct device *dev = hdev->core->dev;
const struct venus_resources *res = hdev->core->res;
int ret;
ret = venus_sys_set_debug(hdev, venus_fw_debug);
if (ret)
dev_warn(dev, "setting fw debug msg ON failed (%d)\n", ret);
/* HFI_PROPERTY_SYS_IDLE_INDICATOR is not supported beyond 8916 (HFI V1) */
if (IS_V1(hdev->core)) {
ret = venus_sys_set_idle_message(hdev, false);
if (ret)
dev_warn(dev, "setting idle response ON failed (%d)\n", ret);
}
ret = venus_sys_set_power_control(hdev, venus_fw_low_power_mode);
if (ret)
dev_warn(dev, "setting hw power collapse ON failed (%d)\n",
ret);
/* For specific venus core, it is mandatory to set the UBWC configuration */
if (res->ubwc_conf) {
ret = venus_sys_set_ubwc_config(hdev);
if (ret)
dev_warn(dev, "setting ubwc config failed (%d)\n", ret);
}
return ret;
}
static int venus_session_cmd(struct venus_inst *inst, u32 pkt_type, bool sync)
{
struct venus_hfi_device *hdev = to_hfi_priv(inst->core);
struct hfi_session_pkt pkt;
pkt_session_cmd(&pkt, pkt_type, inst);
return venus_iface_cmdq_write(hdev, &pkt, sync);
}
static void venus_flush_debug_queue(struct venus_hfi_device *hdev)
{
struct device *dev = hdev->core->dev;
void *packet = hdev->dbg_buf;
while (!venus_iface_dbgq_read(hdev, packet)) {
struct hfi_msg_sys_coverage_pkt *pkt = packet;
if (pkt->hdr.pkt_type != HFI_MSG_SYS_COV) {
struct hfi_msg_sys_debug_pkt *pkt = packet;
dev_dbg(dev, VDBGFW "%s", pkt->msg_data);
}
}
}
static int venus_prepare_power_collapse(struct venus_hfi_device *hdev,
bool wait)
{
unsigned long timeout = msecs_to_jiffies(venus_hw_rsp_timeout);
struct hfi_sys_pc_prep_pkt pkt;
int ret;
init_completion(&hdev->pwr_collapse_prep);
pkt_sys_pc_prep(&pkt);
ret = venus_iface_cmdq_write(hdev, &pkt, false);
if (ret)
return ret;
if (!wait)
return 0;
ret = wait_for_completion_timeout(&hdev->pwr_collapse_prep, timeout);
if (!ret) {
venus_flush_debug_queue(hdev);
return -ETIMEDOUT;
}
return 0;
}
static int venus_are_queues_empty(struct venus_hfi_device *hdev)
{
int ret1, ret2;
ret1 = venus_get_queue_size(hdev, IFACEQ_MSG_IDX);
if (ret1 < 0)
return ret1;
ret2 = venus_get_queue_size(hdev, IFACEQ_CMD_IDX);
if (ret2 < 0)
return ret2;
if (!ret1 && !ret2)
return 1;
return 0;
}
static void venus_sfr_print(struct venus_hfi_device *hdev)
{
struct device *dev = hdev->core->dev;
struct hfi_sfr *sfr = hdev->sfr.kva;
void *p;
if (!sfr)
return;
p = memchr(sfr->data, '\0', sfr->buf_size);
/*
* SFR isn't guaranteed to be NULL terminated since SYS_ERROR indicates
* that Venus is in the process of crashing.
*/
if (!p)
sfr->data[sfr->buf_size - 1] = '\0';
dev_err_ratelimited(dev, "SFR message from FW: %s\n", sfr->data);
}
static void venus_process_msg_sys_error(struct venus_hfi_device *hdev,
void *packet)
{
struct hfi_msg_event_notify_pkt *event_pkt = packet;
if (event_pkt->event_id != HFI_EVENT_SYS_ERROR)
return;
venus_set_state(hdev, VENUS_STATE_DEINIT);
venus_sfr_print(hdev);
}
static irqreturn_t venus_isr_thread(struct venus_core *core)
{
struct venus_hfi_device *hdev = to_hfi_priv(core);
const struct venus_resources *res;
void *pkt;
u32 msg_ret;
if (!hdev)
return IRQ_NONE;
res = hdev->core->res;
pkt = hdev->pkt_buf;
while (!venus_iface_msgq_read(hdev, pkt)) {
msg_ret = hfi_process_msg_packet(core, pkt);
switch (msg_ret) {
case HFI_MSG_EVENT_NOTIFY:
venus_process_msg_sys_error(hdev, pkt);
break;
case HFI_MSG_SYS_INIT:
venus_hfi_core_set_resource(core, res->vmem_id,
res->vmem_size,
res->vmem_addr,
hdev);
break;
case HFI_MSG_SYS_RELEASE_RESOURCE:
complete(&hdev->release_resource);
break;
case HFI_MSG_SYS_PC_PREP:
complete(&hdev->pwr_collapse_prep);
break;
default:
break;
}
}
venus_flush_debug_queue(hdev);
return IRQ_HANDLED;
}
static irqreturn_t venus_isr(struct venus_core *core)
{
struct venus_hfi_device *hdev = to_hfi_priv(core);
u32 status;
void __iomem *cpu_cs_base;
void __iomem *wrapper_base;
if (!hdev)
return IRQ_NONE;
cpu_cs_base = hdev->core->cpu_cs_base;
wrapper_base = hdev->core->wrapper_base;
status = readl(wrapper_base + WRAPPER_INTR_STATUS);
if (IS_IRIS2(core) || IS_IRIS2_1(core)) {
if (status & WRAPPER_INTR_STATUS_A2H_MASK ||
status & WRAPPER_INTR_STATUS_A2HWD_MASK_V6 ||
status & CPU_CS_SCIACMDARG0_INIT_IDLE_MSG_MASK)
hdev->irq_status = status;
} else {
if (status & WRAPPER_INTR_STATUS_A2H_MASK ||
status & WRAPPER_INTR_STATUS_A2HWD_MASK ||
status & CPU_CS_SCIACMDARG0_INIT_IDLE_MSG_MASK)
hdev->irq_status = status;
}
writel(1, cpu_cs_base + CPU_CS_A2HSOFTINTCLR);
if (!(IS_IRIS2(core) || IS_IRIS2_1(core)))
writel(status, wrapper_base + WRAPPER_INTR_CLEAR);
return IRQ_WAKE_THREAD;
}
static int venus_core_init(struct venus_core *core)
{
struct venus_hfi_device *hdev = to_hfi_priv(core);
struct device *dev = core->dev;
struct hfi_sys_get_property_pkt version_pkt;
struct hfi_sys_init_pkt pkt;
int ret;
pkt_sys_init(&pkt, HFI_VIDEO_ARCH_OX);
venus_set_state(hdev, VENUS_STATE_INIT);
ret = venus_iface_cmdq_write(hdev, &pkt, false);
if (ret)
return ret;
pkt_sys_image_version(&version_pkt);
ret = venus_iface_cmdq_write(hdev, &version_pkt, false);
if (ret)
dev_warn(dev, "failed to send image version pkt to fw\n");
ret = venus_sys_set_default_properties(hdev);
if (ret)
return ret;
return 0;
}
static int venus_core_deinit(struct venus_core *core)
{
struct venus_hfi_device *hdev = to_hfi_priv(core);
venus_set_state(hdev, VENUS_STATE_DEINIT);
hdev->suspended = true;
hdev->power_enabled = false;
return 0;
}
static int venus_core_ping(struct venus_core *core, u32 cookie)
{
struct venus_hfi_device *hdev = to_hfi_priv(core);
struct hfi_sys_ping_pkt pkt;
pkt_sys_ping(&pkt, cookie);
return venus_iface_cmdq_write(hdev, &pkt, false);
}
static int venus_core_trigger_ssr(struct venus_core *core, u32 trigger_type)
{
struct venus_hfi_device *hdev = to_hfi_priv(core);
struct hfi_sys_test_ssr_pkt pkt;
int ret;
ret = pkt_sys_ssr_cmd(&pkt, trigger_type);
if (ret)
return ret;
return venus_iface_cmdq_write(hdev, &pkt, false);
}
static int venus_session_init(struct venus_inst *inst, u32 session_type,
u32 codec)
{
struct venus_hfi_device *hdev = to_hfi_priv(inst->core);
struct hfi_session_init_pkt pkt;
int ret;
ret = venus_sys_set_debug(hdev, venus_fw_debug);
if (ret)
goto err;
ret = pkt_session_init(&pkt, inst, session_type, codec);
if (ret)
goto err;
ret = venus_iface_cmdq_write(hdev, &pkt, true);
if (ret)
goto err;
return 0;
err:
venus_flush_debug_queue(hdev);
return ret;
}
static int venus_session_end(struct venus_inst *inst)
{
struct venus_hfi_device *hdev = to_hfi_priv(inst->core);
struct device *dev = hdev->core->dev;
if (venus_fw_coverage) {
if (venus_sys_set_coverage(hdev, venus_fw_coverage))
dev_warn(dev, "fw coverage msg ON failed\n");
}
return venus_session_cmd(inst, HFI_CMD_SYS_SESSION_END, true);
}
static int venus_session_abort(struct venus_inst *inst)
{
struct venus_hfi_device *hdev = to_hfi_priv(inst->core);
venus_flush_debug_queue(hdev);
return venus_session_cmd(inst, HFI_CMD_SYS_SESSION_ABORT, true);
}
static int venus_session_flush(struct venus_inst *inst, u32 flush_mode)
{
struct venus_hfi_device *hdev = to_hfi_priv(inst->core);
struct hfi_session_flush_pkt pkt;
int ret;
ret = pkt_session_flush(&pkt, inst, flush_mode);
if (ret)
return ret;
return venus_iface_cmdq_write(hdev, &pkt, true);
}
static int venus_session_start(struct venus_inst *inst)
{
return venus_session_cmd(inst, HFI_CMD_SESSION_START, true);
}
static int venus_session_stop(struct venus_inst *inst)
{
return venus_session_cmd(inst, HFI_CMD_SESSION_STOP, true);
}
static int venus_session_continue(struct venus_inst *inst)
{
return venus_session_cmd(inst, HFI_CMD_SESSION_CONTINUE, false);
}
static int venus_session_etb(struct venus_inst *inst,
struct hfi_frame_data *in_frame)
{
struct venus_hfi_device *hdev = to_hfi_priv(inst->core);
u32 session_type = inst->session_type;
int ret;
if (session_type == VIDC_SESSION_TYPE_DEC) {
struct hfi_session_empty_buffer_compressed_pkt pkt;
ret = pkt_session_etb_decoder(&pkt, inst, in_frame);
if (ret)
return ret;
ret = venus_iface_cmdq_write(hdev, &pkt, false);
} else if (session_type == VIDC_SESSION_TYPE_ENC) {
struct hfi_session_empty_buffer_uncompressed_plane0_pkt pkt;
ret = pkt_session_etb_encoder(&pkt, inst, in_frame);
if (ret)
return ret;
ret = venus_iface_cmdq_write(hdev, &pkt, false);
} else {
ret = -EINVAL;
}
return ret;
}
static int venus_session_ftb(struct venus_inst *inst,
struct hfi_frame_data *out_frame)
{
struct venus_hfi_device *hdev = to_hfi_priv(inst->core);
struct hfi_session_fill_buffer_pkt pkt;
int ret;
ret = pkt_session_ftb(&pkt, inst, out_frame);
if (ret)
return ret;
return venus_iface_cmdq_write(hdev, &pkt, false);
}
static int venus_session_set_buffers(struct venus_inst *inst,
struct hfi_buffer_desc *bd)
{
struct venus_hfi_device *hdev = to_hfi_priv(inst->core);
struct hfi_session_set_buffers_pkt *pkt;
u8 packet[IFACEQ_VAR_LARGE_PKT_SIZE];
int ret;
if (bd->buffer_type == HFI_BUFFER_INPUT)
return 0;
pkt = (struct hfi_session_set_buffers_pkt *)packet;
ret = pkt_session_set_buffers(pkt, inst, bd);
if (ret)
return ret;
return venus_iface_cmdq_write(hdev, pkt, false);
}
static int venus_session_unset_buffers(struct venus_inst *inst,
struct hfi_buffer_desc *bd)
{
struct venus_hfi_device *hdev = to_hfi_priv(inst->core);
struct hfi_session_release_buffer_pkt *pkt;
u8 packet[IFACEQ_VAR_LARGE_PKT_SIZE];
int ret;
if (bd->buffer_type == HFI_BUFFER_INPUT)
return 0;
pkt = (struct hfi_session_release_buffer_pkt *)packet;
ret = pkt_session_unset_buffers(pkt, inst, bd);
if (ret)
return ret;
return venus_iface_cmdq_write(hdev, pkt, true);
}
static int venus_session_load_res(struct venus_inst *inst)
{
return venus_session_cmd(inst, HFI_CMD_SESSION_LOAD_RESOURCES, true);
}
static int venus_session_release_res(struct venus_inst *inst)
{
return venus_session_cmd(inst, HFI_CMD_SESSION_RELEASE_RESOURCES, true);
}
static int venus_session_parse_seq_hdr(struct venus_inst *inst, u32 seq_hdr,
u32 seq_hdr_len)
{
struct venus_hfi_device *hdev = to_hfi_priv(inst->core);
struct hfi_session_parse_sequence_header_pkt *pkt;
u8 packet[IFACEQ_VAR_SMALL_PKT_SIZE];
int ret;
pkt = (struct hfi_session_parse_sequence_header_pkt *)packet;
ret = pkt_session_parse_seq_header(pkt, inst, seq_hdr, seq_hdr_len);
if (ret)
return ret;
ret = venus_iface_cmdq_write(hdev, pkt, false);
if (ret)
return ret;
return 0;
}
static int venus_session_get_seq_hdr(struct venus_inst *inst, u32 seq_hdr,
u32 seq_hdr_len)
{
struct venus_hfi_device *hdev = to_hfi_priv(inst->core);
struct hfi_session_get_sequence_header_pkt *pkt;
u8 packet[IFACEQ_VAR_SMALL_PKT_SIZE];
int ret;
pkt = (struct hfi_session_get_sequence_header_pkt *)packet;
ret = pkt_session_get_seq_hdr(pkt, inst, seq_hdr, seq_hdr_len);
if (ret)
return ret;
return venus_iface_cmdq_write(hdev, pkt, false);
}
static int venus_session_set_property(struct venus_inst *inst, u32 ptype,
void *pdata)
{
struct venus_hfi_device *hdev = to_hfi_priv(inst->core);
struct hfi_session_set_property_pkt *pkt;
u8 packet[IFACEQ_VAR_LARGE_PKT_SIZE];
int ret;
pkt = (struct hfi_session_set_property_pkt *)packet;
ret = pkt_session_set_property(pkt, inst, ptype, pdata);
if (ret == -ENOTSUPP)
return 0;
if (ret)
return ret;
return venus_iface_cmdq_write(hdev, pkt, false);
}
static int venus_session_get_property(struct venus_inst *inst, u32 ptype)
{
struct venus_hfi_device *hdev = to_hfi_priv(inst->core);
struct hfi_session_get_property_pkt pkt;
int ret;
ret = pkt_session_get_property(&pkt, inst, ptype);
if (ret)
return ret;
return venus_iface_cmdq_write(hdev, &pkt, true);
}
static int venus_resume(struct venus_core *core)
{
struct venus_hfi_device *hdev = to_hfi_priv(core);
int ret = 0;
mutex_lock(&hdev->lock);
if (!hdev->suspended)
goto unlock;
ret = venus_power_on(hdev);
unlock:
if (!ret)
hdev->suspended = false;
mutex_unlock(&hdev->lock);
return ret;
}
static int venus_suspend_1xx(struct venus_core *core)
{
struct venus_hfi_device *hdev = to_hfi_priv(core);
struct device *dev = core->dev;
void __iomem *cpu_cs_base = hdev->core->cpu_cs_base;
u32 ctrl_status;
int ret;
if (!hdev->power_enabled || hdev->suspended)
return 0;
mutex_lock(&hdev->lock);
ret = venus_is_valid_state(hdev);
mutex_unlock(&hdev->lock);
if (!ret) {
dev_err(dev, "bad state, cannot suspend\n");
return -EINVAL;
}
ret = venus_prepare_power_collapse(hdev, true);
if (ret) {
dev_err(dev, "prepare for power collapse fail (%d)\n", ret);
return ret;
}
mutex_lock(&hdev->lock);
if (hdev->last_packet_type != HFI_CMD_SYS_PC_PREP) {
mutex_unlock(&hdev->lock);
return -EINVAL;
}
ret = venus_are_queues_empty(hdev);
if (ret < 0 || !ret) {
mutex_unlock(&hdev->lock);
return -EINVAL;
}
ctrl_status = readl(cpu_cs_base + CPU_CS_SCIACMDARG0);
if (!(ctrl_status & CPU_CS_SCIACMDARG0_PC_READY)) {
mutex_unlock(&hdev->lock);
return -EINVAL;
}
ret = venus_power_off(hdev);
if (ret) {
mutex_unlock(&hdev->lock);
return ret;
}
hdev->suspended = true;
mutex_unlock(&hdev->lock);
return 0;
}
static bool venus_cpu_and_video_core_idle(struct venus_hfi_device *hdev)
{
void __iomem *wrapper_base = hdev->core->wrapper_base;
void __iomem *wrapper_tz_base = hdev->core->wrapper_tz_base;
void __iomem *cpu_cs_base = hdev->core->cpu_cs_base;
u32 ctrl_status, cpu_status;
if (IS_IRIS2(hdev->core) || IS_IRIS2_1(hdev->core))
cpu_status = readl(wrapper_tz_base + WRAPPER_TZ_CPU_STATUS_V6);
else
cpu_status = readl(wrapper_base + WRAPPER_CPU_STATUS);
ctrl_status = readl(cpu_cs_base + CPU_CS_SCIACMDARG0);
if (cpu_status & WRAPPER_CPU_STATUS_WFI &&
ctrl_status & CPU_CS_SCIACMDARG0_INIT_IDLE_MSG_MASK)
return true;
return false;
}
static bool venus_cpu_idle_and_pc_ready(struct venus_hfi_device *hdev)
{
void __iomem *wrapper_base = hdev->core->wrapper_base;
void __iomem *wrapper_tz_base = hdev->core->wrapper_tz_base;
void __iomem *cpu_cs_base = hdev->core->cpu_cs_base;
u32 ctrl_status, cpu_status;
if (IS_IRIS2(hdev->core) || IS_IRIS2_1(hdev->core))
cpu_status = readl(wrapper_tz_base + WRAPPER_TZ_CPU_STATUS_V6);
else
cpu_status = readl(wrapper_base + WRAPPER_CPU_STATUS);
ctrl_status = readl(cpu_cs_base + CPU_CS_SCIACMDARG0);
if (cpu_status & WRAPPER_CPU_STATUS_WFI &&
ctrl_status & CPU_CS_SCIACMDARG0_PC_READY)
return true;
return false;
}
static int venus_suspend_3xx(struct venus_core *core)
{
struct venus_hfi_device *hdev = to_hfi_priv(core);
struct device *dev = core->dev;
void __iomem *cpu_cs_base = hdev->core->cpu_cs_base;
u32 ctrl_status;
bool val;
int ret;
if (!hdev->power_enabled || hdev->suspended)
return 0;
mutex_lock(&hdev->lock);
ret = venus_is_valid_state(hdev);
mutex_unlock(&hdev->lock);
if (!ret) {
dev_err(dev, "bad state, cannot suspend\n");
return -EINVAL;
}
ctrl_status = readl(cpu_cs_base + CPU_CS_SCIACMDARG0);
if (ctrl_status & CPU_CS_SCIACMDARG0_PC_READY)
goto power_off;
/*
* Power collapse sequence for Venus 3xx and 4xx versions:
* 1. Check for ARM9 and video core to be idle by checking WFI bit
* (bit 0) in CPU status register and by checking Idle (bit 30) in
* Control status register for video core.
* 2. Send a command to prepare for power collapse.
* 3. Check for WFI and PC_READY bits.
*/
ret = readx_poll_timeout(venus_cpu_and_video_core_idle, hdev, val, val,
1500, 100 * 1500);
if (ret) {
dev_err(dev, "wait for cpu and video core idle fail (%d)\n", ret);
return ret;
}
ret = venus_prepare_power_collapse(hdev, false);
if (ret) {
dev_err(dev, "prepare for power collapse fail (%d)\n", ret);
return ret;
}
ret = readx_poll_timeout(venus_cpu_idle_and_pc_ready, hdev, val, val,
1500, 100 * 1500);
if (ret)
return ret;
power_off:
mutex_lock(&hdev->lock);
ret = venus_power_off(hdev);
if (ret) {
dev_err(dev, "venus_power_off (%d)\n", ret);
mutex_unlock(&hdev->lock);
return ret;
}
hdev->suspended = true;
mutex_unlock(&hdev->lock);
return 0;
}
static int venus_suspend(struct venus_core *core)
{
if (IS_V3(core) || IS_V4(core) || IS_V6(core))
return venus_suspend_3xx(core);
return venus_suspend_1xx(core);
}
static const struct hfi_ops venus_hfi_ops = {
.core_init = venus_core_init,
.core_deinit = venus_core_deinit,
.core_ping = venus_core_ping,
.core_trigger_ssr = venus_core_trigger_ssr,
.session_init = venus_session_init,
.session_end = venus_session_end,
.session_abort = venus_session_abort,
.session_flush = venus_session_flush,
.session_start = venus_session_start,
.session_stop = venus_session_stop,
.session_continue = venus_session_continue,
.session_etb = venus_session_etb,
.session_ftb = venus_session_ftb,
.session_set_buffers = venus_session_set_buffers,
.session_unset_buffers = venus_session_unset_buffers,
.session_load_res = venus_session_load_res,
.session_release_res = venus_session_release_res,
.session_parse_seq_hdr = venus_session_parse_seq_hdr,
.session_get_seq_hdr = venus_session_get_seq_hdr,
.session_set_property = venus_session_set_property,
.session_get_property = venus_session_get_property,
.resume = venus_resume,
.suspend = venus_suspend,
.isr = venus_isr,
.isr_thread = venus_isr_thread,
};
void venus_hfi_destroy(struct venus_core *core)
{
struct venus_hfi_device *hdev = to_hfi_priv(core);
core->priv = NULL;
venus_interface_queues_release(hdev);
mutex_destroy(&hdev->lock);
kfree(hdev);
core->ops = NULL;
}
int venus_hfi_create(struct venus_core *core)
{
struct venus_hfi_device *hdev;
int ret;
hdev = kzalloc(sizeof(*hdev), GFP_KERNEL);
if (!hdev)
return -ENOMEM;
mutex_init(&hdev->lock);
hdev->core = core;
hdev->suspended = true;
core->priv = hdev;
core->ops = &venus_hfi_ops;
ret = venus_interface_queues_init(hdev);
if (ret)
goto err_kfree;
return 0;
err_kfree:
kfree(hdev);
core->priv = NULL;
core->ops = NULL;
return ret;
}
void venus_hfi_queues_reinit(struct venus_core *core)
{
struct venus_hfi_device *hdev = to_hfi_priv(core);
struct hfi_queue_table_header *tbl_hdr;
struct iface_queue *queue;
struct hfi_sfr *sfr;
unsigned int i;
mutex_lock(&hdev->lock);
for (i = 0; i < IFACEQ_NUM; i++) {
queue = &hdev->queues[i];
queue->qhdr =
IFACEQ_GET_QHDR_START_ADDR(hdev->ifaceq_table.kva, i);
venus_set_qhdr_defaults(queue->qhdr);
queue->qhdr->start_addr = queue->qmem.da;
if (i == IFACEQ_CMD_IDX)
queue->qhdr->type |= HFI_HOST_TO_CTRL_CMD_Q;
else if (i == IFACEQ_MSG_IDX)
queue->qhdr->type |= HFI_CTRL_TO_HOST_MSG_Q;
else if (i == IFACEQ_DBG_IDX)
queue->qhdr->type |= HFI_CTRL_TO_HOST_DBG_Q;
}
tbl_hdr = hdev->ifaceq_table.kva;
tbl_hdr->version = 0;
tbl_hdr->size = IFACEQ_TABLE_SIZE;
tbl_hdr->qhdr0_offset = sizeof(struct hfi_queue_table_header);
tbl_hdr->qhdr_size = sizeof(struct hfi_queue_header);
tbl_hdr->num_q = IFACEQ_NUM;
tbl_hdr->num_active_q = IFACEQ_NUM;
/*
* Set receive request to zero on debug queue as there is no
* need of interrupt from video hardware for debug messages
*/
queue = &hdev->queues[IFACEQ_DBG_IDX];
queue->qhdr->rx_req = 0;
sfr = hdev->sfr.kva;
sfr->buf_size = ALIGNED_SFR_SIZE;
/* ensure table and queue header structs are settled in memory */
wmb();
mutex_unlock(&hdev->lock);
}
| linux-master | drivers/media/platform/qcom/venus/hfi_venus.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2018 Linaro Ltd.
*
* Author: Stanimir Varbanov <[email protected]>
*/
#include <linux/bitops.h>
#include <linux/kernel.h>
#include "core.h"
#include "hfi_helper.h"
#include "hfi_parser.h"
typedef void (*func)(struct hfi_plat_caps *cap, const void *data,
unsigned int size);
static void init_codecs(struct venus_core *core)
{
struct hfi_plat_caps *caps = core->caps, *cap;
unsigned long bit;
for_each_set_bit(bit, &core->dec_codecs, MAX_CODEC_NUM) {
cap = &caps[core->codecs_count++];
cap->codec = BIT(bit);
cap->domain = VIDC_SESSION_TYPE_DEC;
cap->valid = false;
}
for_each_set_bit(bit, &core->enc_codecs, MAX_CODEC_NUM) {
cap = &caps[core->codecs_count++];
cap->codec = BIT(bit);
cap->domain = VIDC_SESSION_TYPE_ENC;
cap->valid = false;
}
}
static void for_each_codec(struct hfi_plat_caps *caps, unsigned int caps_num,
u32 codecs, u32 domain, func cb, void *data,
unsigned int size)
{
struct hfi_plat_caps *cap;
unsigned int i;
for (i = 0; i < caps_num; i++) {
cap = &caps[i];
if (cap->valid && cap->domain == domain)
continue;
if (cap->codec & codecs && cap->domain == domain)
cb(cap, data, size);
}
}
static void
fill_buf_mode(struct hfi_plat_caps *cap, const void *data, unsigned int num)
{
const u32 *type = data;
if (*type == HFI_BUFFER_MODE_DYNAMIC)
cap->cap_bufs_mode_dynamic = true;
}
static void
parse_alloc_mode(struct venus_core *core, u32 codecs, u32 domain, void *data)
{
struct hfi_buffer_alloc_mode_supported *mode = data;
u32 num_entries = mode->num_entries;
u32 *type;
if (num_entries > MAX_ALLOC_MODE_ENTRIES)
return;
type = mode->data;
while (num_entries--) {
if (mode->buffer_type == HFI_BUFFER_OUTPUT ||
mode->buffer_type == HFI_BUFFER_OUTPUT2)
for_each_codec(core->caps, ARRAY_SIZE(core->caps),
codecs, domain, fill_buf_mode, type, 1);
type++;
}
}
static void fill_profile_level(struct hfi_plat_caps *cap, const void *data,
unsigned int num)
{
const struct hfi_profile_level *pl = data;
memcpy(&cap->pl[cap->num_pl], pl, num * sizeof(*pl));
cap->num_pl += num;
}
static void
parse_profile_level(struct venus_core *core, u32 codecs, u32 domain, void *data)
{
struct hfi_profile_level_supported *pl = data;
struct hfi_profile_level *proflevel = pl->profile_level;
struct hfi_profile_level pl_arr[HFI_MAX_PROFILE_COUNT] = {};
if (pl->profile_count > HFI_MAX_PROFILE_COUNT)
return;
memcpy(pl_arr, proflevel, pl->profile_count * sizeof(*proflevel));
for_each_codec(core->caps, ARRAY_SIZE(core->caps), codecs, domain,
fill_profile_level, pl_arr, pl->profile_count);
}
static void
fill_caps(struct hfi_plat_caps *cap, const void *data, unsigned int num)
{
const struct hfi_capability *caps = data;
memcpy(&cap->caps[cap->num_caps], caps, num * sizeof(*caps));
cap->num_caps += num;
}
static void
parse_caps(struct venus_core *core, u32 codecs, u32 domain, void *data)
{
struct hfi_capabilities *caps = data;
struct hfi_capability *cap = caps->data;
u32 num_caps = caps->num_capabilities;
struct hfi_capability caps_arr[MAX_CAP_ENTRIES] = {};
if (num_caps > MAX_CAP_ENTRIES)
return;
memcpy(caps_arr, cap, num_caps * sizeof(*cap));
for_each_codec(core->caps, ARRAY_SIZE(core->caps), codecs, domain,
fill_caps, caps_arr, num_caps);
}
static void fill_raw_fmts(struct hfi_plat_caps *cap, const void *fmts,
unsigned int num_fmts)
{
const struct raw_formats *formats = fmts;
memcpy(&cap->fmts[cap->num_fmts], formats, num_fmts * sizeof(*formats));
cap->num_fmts += num_fmts;
}
static void
parse_raw_formats(struct venus_core *core, u32 codecs, u32 domain, void *data)
{
struct hfi_uncompressed_format_supported *fmt = data;
struct hfi_uncompressed_plane_info *pinfo = fmt->plane_info;
struct hfi_uncompressed_plane_constraints *constr;
struct raw_formats rawfmts[MAX_FMT_ENTRIES] = {};
u32 entries = fmt->format_entries;
unsigned int i = 0;
u32 num_planes;
while (entries) {
num_planes = pinfo->num_planes;
rawfmts[i].fmt = pinfo->format;
rawfmts[i].buftype = fmt->buffer_type;
i++;
if (pinfo->num_planes > MAX_PLANES)
break;
pinfo = (void *)pinfo + sizeof(*constr) * num_planes +
2 * sizeof(u32);
entries--;
}
for_each_codec(core->caps, ARRAY_SIZE(core->caps), codecs, domain,
fill_raw_fmts, rawfmts, i);
}
static void parse_codecs(struct venus_core *core, void *data)
{
struct hfi_codec_supported *codecs = data;
core->dec_codecs = codecs->dec_codecs;
core->enc_codecs = codecs->enc_codecs;
if (IS_V1(core)) {
core->dec_codecs &= ~HFI_VIDEO_CODEC_HEVC;
core->dec_codecs &= ~HFI_VIDEO_CODEC_SPARK;
core->enc_codecs &= ~HFI_VIDEO_CODEC_HEVC;
}
}
static void parse_max_sessions(struct venus_core *core, const void *data)
{
const struct hfi_max_sessions_supported *sessions = data;
core->max_sessions_supported = sessions->max_sessions;
}
static void parse_codecs_mask(u32 *codecs, u32 *domain, void *data)
{
struct hfi_codec_mask_supported *mask = data;
*codecs = mask->codecs;
*domain = mask->video_domains;
}
static void parser_init(struct venus_inst *inst, u32 *codecs, u32 *domain)
{
if (!inst || !IS_V1(inst->core))
return;
*codecs = inst->hfi_codec;
*domain = inst->session_type;
}
static void parser_fini(struct venus_inst *inst, u32 codecs, u32 domain)
{
struct hfi_plat_caps *caps, *cap;
unsigned int i;
u32 dom;
if (!inst || !IS_V1(inst->core))
return;
caps = inst->core->caps;
dom = inst->session_type;
for (i = 0; i < MAX_CODEC_NUM; i++) {
cap = &caps[i];
if (cap->codec & codecs && cap->domain == dom)
cap->valid = true;
}
}
static int hfi_platform_parser(struct venus_core *core, struct venus_inst *inst)
{
const struct hfi_platform *plat;
const struct hfi_plat_caps *caps = NULL;
u32 enc_codecs, dec_codecs, count = 0;
unsigned int entries;
int ret;
plat = hfi_platform_get(core->res->hfi_version);
if (!plat)
return -EINVAL;
if (inst)
return 0;
ret = hfi_platform_get_codecs(core, &enc_codecs, &dec_codecs, &count);
if (ret)
return ret;
if (plat->capabilities)
caps = plat->capabilities(&entries);
if (!caps || !entries || !count)
return -EINVAL;
core->enc_codecs = enc_codecs;
core->dec_codecs = dec_codecs;
core->codecs_count = count;
core->max_sessions_supported = MAX_SESSIONS;
memset(core->caps, 0, sizeof(*caps) * MAX_CODEC_NUM);
memcpy(core->caps, caps, sizeof(*caps) * entries);
return 0;
}
u32 hfi_parser(struct venus_core *core, struct venus_inst *inst, void *buf,
u32 size)
{
unsigned int words_count = size >> 2;
u32 *word = buf, *data, codecs = 0, domain = 0;
int ret;
ret = hfi_platform_parser(core, inst);
if (!ret)
return HFI_ERR_NONE;
if (size % 4)
return HFI_ERR_SYS_INSUFFICIENT_RESOURCES;
parser_init(inst, &codecs, &domain);
if (core->res->hfi_version > HFI_VERSION_1XX) {
core->codecs_count = 0;
memset(core->caps, 0, sizeof(core->caps));
}
while (words_count) {
data = word + 1;
switch (*word) {
case HFI_PROPERTY_PARAM_CODEC_SUPPORTED:
parse_codecs(core, data);
init_codecs(core);
break;
case HFI_PROPERTY_PARAM_MAX_SESSIONS_SUPPORTED:
parse_max_sessions(core, data);
break;
case HFI_PROPERTY_PARAM_CODEC_MASK_SUPPORTED:
parse_codecs_mask(&codecs, &domain, data);
break;
case HFI_PROPERTY_PARAM_UNCOMPRESSED_FORMAT_SUPPORTED:
parse_raw_formats(core, codecs, domain, data);
break;
case HFI_PROPERTY_PARAM_CAPABILITY_SUPPORTED:
parse_caps(core, codecs, domain, data);
break;
case HFI_PROPERTY_PARAM_PROFILE_LEVEL_SUPPORTED:
parse_profile_level(core, codecs, domain, data);
break;
case HFI_PROPERTY_PARAM_BUFFER_ALLOC_MODE_SUPPORTED:
parse_alloc_mode(core, codecs, domain, data);
break;
default:
break;
}
word++;
words_count--;
}
if (!core->max_sessions_supported)
core->max_sessions_supported = MAX_SESSIONS;
parser_fini(inst, codecs, domain);
return HFI_ERR_NONE;
}
| linux-master | drivers/media/platform/qcom/venus/hfi_parser.c |
// SPDX-License-Identifier: GPL-2.0
/*
* camss-vfe-480.c
*
* Qualcomm MSM Camera Subsystem - VFE (Video Front End) Module v480 (SM8250)
*
* Copyright (C) 2020-2021 Linaro Ltd.
* Copyright (C) 2021 Jonathan Marek
*/
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/iopoll.h>
#include "camss.h"
#include "camss-vfe.h"
/* VFE 2/3 are lite and have a different register layout */
#define IS_LITE (vfe->id >= 2 ? 1 : 0)
#define VFE_HW_VERSION (0x00)
#define VFE_GLOBAL_RESET_CMD (IS_LITE ? 0x0c : 0x1c)
#define GLOBAL_RESET_HW_AND_REG (IS_LITE ? BIT(1) : BIT(0))
#define VFE_REG_UPDATE_CMD (IS_LITE ? 0x20 : 0x34)
static inline int reg_update_rdi(struct vfe_device *vfe, int n)
{
return IS_LITE ? BIT(n) : BIT(1 + (n));
}
#define REG_UPDATE_RDI reg_update_rdi
#define VFE_IRQ_CMD (IS_LITE ? 0x24 : 0x38)
#define IRQ_CMD_GLOBAL_CLEAR BIT(0)
#define VFE_IRQ_MASK(n) ((IS_LITE ? 0x28 : 0x3c) + (n) * 4)
#define IRQ_MASK_0_RESET_ACK (IS_LITE ? BIT(17) : BIT(0))
#define IRQ_MASK_0_BUS_TOP_IRQ (IS_LITE ? BIT(4) : BIT(7))
#define VFE_IRQ_CLEAR(n) ((IS_LITE ? 0x34 : 0x48) + (n) * 4)
#define VFE_IRQ_STATUS(n) ((IS_LITE ? 0x40 : 0x54) + (n) * 4)
#define BUS_REG_BASE (IS_LITE ? 0x1a00 : 0xaa00)
#define VFE_BUS_WM_CGC_OVERRIDE (BUS_REG_BASE + 0x08)
#define WM_CGC_OVERRIDE_ALL (0x3FFFFFF)
#define VFE_BUS_WM_TEST_BUS_CTRL (BUS_REG_BASE + 0xdc)
#define VFE_BUS_IRQ_MASK(n) (BUS_REG_BASE + 0x18 + (n) * 4)
static inline int bus_irq_mask_0_rdi_rup(struct vfe_device *vfe, int n)
{
return IS_LITE ? BIT(n) : BIT(3 + (n));
}
#define BUS_IRQ_MASK_0_RDI_RUP bus_irq_mask_0_rdi_rup
static inline int bus_irq_mask_0_comp_done(struct vfe_device *vfe, int n)
{
return IS_LITE ? BIT(4 + (n)) : BIT(6 + (n));
}
#define BUS_IRQ_MASK_0_COMP_DONE bus_irq_mask_0_comp_done
#define VFE_BUS_IRQ_CLEAR(n) (BUS_REG_BASE + 0x20 + (n) * 4)
#define VFE_BUS_IRQ_STATUS(n) (BUS_REG_BASE + 0x28 + (n) * 4)
#define VFE_BUS_IRQ_CLEAR_GLOBAL (BUS_REG_BASE + 0x30)
#define VFE_BUS_WM_CFG(n) (BUS_REG_BASE + 0x200 + (n) * 0x100)
#define WM_CFG_EN (0)
#define WM_CFG_MODE (16)
#define MODE_QCOM_PLAIN (0)
#define MODE_MIPI_RAW (1)
#define VFE_BUS_WM_IMAGE_ADDR(n) (BUS_REG_BASE + 0x204 + (n) * 0x100)
#define VFE_BUS_WM_FRAME_INCR(n) (BUS_REG_BASE + 0x208 + (n) * 0x100)
#define VFE_BUS_WM_IMAGE_CFG_0(n) (BUS_REG_BASE + 0x20c + (n) * 0x100)
#define WM_IMAGE_CFG_0_DEFAULT_WIDTH (0xFFFF)
#define VFE_BUS_WM_IMAGE_CFG_1(n) (BUS_REG_BASE + 0x210 + (n) * 0x100)
#define VFE_BUS_WM_IMAGE_CFG_2(n) (BUS_REG_BASE + 0x214 + (n) * 0x100)
#define VFE_BUS_WM_PACKER_CFG(n) (BUS_REG_BASE + 0x218 + (n) * 0x100)
#define VFE_BUS_WM_HEADER_ADDR(n) (BUS_REG_BASE + 0x220 + (n) * 0x100)
#define VFE_BUS_WM_HEADER_INCR(n) (BUS_REG_BASE + 0x224 + (n) * 0x100)
#define VFE_BUS_WM_HEADER_CFG(n) (BUS_REG_BASE + 0x228 + (n) * 0x100)
#define VFE_BUS_WM_IRQ_SUBSAMPLE_PERIOD(n) (BUS_REG_BASE + 0x230 + (n) * 0x100)
#define VFE_BUS_WM_IRQ_SUBSAMPLE_PATTERN(n) (BUS_REG_BASE + 0x234 + (n) * 0x100)
#define VFE_BUS_WM_FRAMEDROP_PERIOD(n) (BUS_REG_BASE + 0x238 + (n) * 0x100)
#define VFE_BUS_WM_FRAMEDROP_PATTERN(n) (BUS_REG_BASE + 0x23c + (n) * 0x100)
#define VFE_BUS_WM_SYSTEM_CACHE_CFG(n) (BUS_REG_BASE + 0x260 + (n) * 0x100)
#define VFE_BUS_WM_BURST_LIMIT(n) (BUS_REG_BASE + 0x264 + (n) * 0x100)
/* for titan 480, each bus client is hardcoded to a specific path
* and each bus client is part of a hardcoded "comp group"
*/
#define RDI_WM(n) ((IS_LITE ? 0 : 23) + (n))
#define RDI_COMP_GROUP(n) ((IS_LITE ? 0 : 11) + (n))
#define MAX_VFE_OUTPUT_LINES 4
static u32 vfe_hw_version(struct vfe_device *vfe)
{
u32 hw_version = readl_relaxed(vfe->base + VFE_HW_VERSION);
u32 gen = (hw_version >> 28) & 0xF;
u32 rev = (hw_version >> 16) & 0xFFF;
u32 step = hw_version & 0xFFFF;
dev_dbg(vfe->camss->dev, "VFE HW Version = %u.%u.%u\n", gen, rev, step);
return hw_version;
}
static void vfe_global_reset(struct vfe_device *vfe)
{
writel_relaxed(IRQ_MASK_0_RESET_ACK, vfe->base + VFE_IRQ_MASK(0));
writel_relaxed(GLOBAL_RESET_HW_AND_REG, vfe->base + VFE_GLOBAL_RESET_CMD);
}
static void vfe_wm_start(struct vfe_device *vfe, u8 wm, struct vfe_line *line)
{
struct v4l2_pix_format_mplane *pix =
&line->video_out.active_fmt.fmt.pix_mp;
wm = RDI_WM(wm); /* map to actual WM used (from wm=RDI index) */
/* no clock gating at bus input */
writel_relaxed(WM_CGC_OVERRIDE_ALL, vfe->base + VFE_BUS_WM_CGC_OVERRIDE);
writel_relaxed(0x0, vfe->base + VFE_BUS_WM_TEST_BUS_CTRL);
writel_relaxed(pix->plane_fmt[0].bytesperline * pix->height,
vfe->base + VFE_BUS_WM_FRAME_INCR(wm));
writel_relaxed(0xf, vfe->base + VFE_BUS_WM_BURST_LIMIT(wm));
writel_relaxed(WM_IMAGE_CFG_0_DEFAULT_WIDTH,
vfe->base + VFE_BUS_WM_IMAGE_CFG_0(wm));
writel_relaxed(pix->plane_fmt[0].bytesperline,
vfe->base + VFE_BUS_WM_IMAGE_CFG_2(wm));
writel_relaxed(0, vfe->base + VFE_BUS_WM_PACKER_CFG(wm));
/* no dropped frames, one irq per frame */
writel_relaxed(0, vfe->base + VFE_BUS_WM_FRAMEDROP_PERIOD(wm));
writel_relaxed(1, vfe->base + VFE_BUS_WM_FRAMEDROP_PATTERN(wm));
writel_relaxed(0, vfe->base + VFE_BUS_WM_IRQ_SUBSAMPLE_PERIOD(wm));
writel_relaxed(1, vfe->base + VFE_BUS_WM_IRQ_SUBSAMPLE_PATTERN(wm));
writel_relaxed(1 << WM_CFG_EN | MODE_MIPI_RAW << WM_CFG_MODE,
vfe->base + VFE_BUS_WM_CFG(wm));
}
static void vfe_wm_stop(struct vfe_device *vfe, u8 wm)
{
wm = RDI_WM(wm); /* map to actual WM used (from wm=RDI index) */
writel_relaxed(0, vfe->base + VFE_BUS_WM_CFG(wm));
}
static void vfe_wm_update(struct vfe_device *vfe, u8 wm, u32 addr,
struct vfe_line *line)
{
wm = RDI_WM(wm); /* map to actual WM used (from wm=RDI index) */
writel_relaxed(addr, vfe->base + VFE_BUS_WM_IMAGE_ADDR(wm));
}
static void vfe_reg_update(struct vfe_device *vfe, enum vfe_line_id line_id)
{
vfe->reg_update |= REG_UPDATE_RDI(vfe, line_id);
writel_relaxed(vfe->reg_update, vfe->base + VFE_REG_UPDATE_CMD);
}
static inline void vfe_reg_update_clear(struct vfe_device *vfe,
enum vfe_line_id line_id)
{
vfe->reg_update &= ~REG_UPDATE_RDI(vfe, line_id);
}
static void vfe_enable_irq_common(struct vfe_device *vfe)
{
/* enable reset ack IRQ and top BUS status IRQ */
writel_relaxed(IRQ_MASK_0_RESET_ACK | IRQ_MASK_0_BUS_TOP_IRQ,
vfe->base + VFE_IRQ_MASK(0));
}
static void vfe_enable_lines_irq(struct vfe_device *vfe)
{
int i;
u32 bus_irq_mask = 0;
for (i = 0; i < MAX_VFE_OUTPUT_LINES; i++) {
/* Enable IRQ for newly added lines, but also keep already running lines's IRQ */
if (vfe->line[i].output.state == VFE_OUTPUT_RESERVED ||
vfe->line[i].output.state == VFE_OUTPUT_ON) {
bus_irq_mask |= BUS_IRQ_MASK_0_RDI_RUP(vfe, i)
| BUS_IRQ_MASK_0_COMP_DONE(vfe, RDI_COMP_GROUP(i));
}
}
writel_relaxed(bus_irq_mask, vfe->base + VFE_BUS_IRQ_MASK(0));
}
static void vfe_isr_reg_update(struct vfe_device *vfe, enum vfe_line_id line_id);
static void vfe_isr_wm_done(struct vfe_device *vfe, u8 wm);
/*
* vfe_isr - VFE module interrupt handler
* @irq: Interrupt line
* @dev: VFE device
*
* Return IRQ_HANDLED on success
*/
static irqreturn_t vfe_isr(int irq, void *dev)
{
struct vfe_device *vfe = dev;
u32 status;
int i;
status = readl_relaxed(vfe->base + VFE_IRQ_STATUS(0));
writel_relaxed(status, vfe->base + VFE_IRQ_CLEAR(0));
writel_relaxed(IRQ_CMD_GLOBAL_CLEAR, vfe->base + VFE_IRQ_CMD);
if (status & IRQ_MASK_0_RESET_ACK)
vfe_isr_reset_ack(vfe);
if (status & IRQ_MASK_0_BUS_TOP_IRQ) {
u32 status = readl_relaxed(vfe->base + VFE_BUS_IRQ_STATUS(0));
writel_relaxed(status, vfe->base + VFE_BUS_IRQ_CLEAR(0));
writel_relaxed(1, vfe->base + VFE_BUS_IRQ_CLEAR_GLOBAL);
/* Loop through all WMs IRQs */
for (i = 0; i < MSM_VFE_IMAGE_MASTERS_NUM; i++) {
if (status & BUS_IRQ_MASK_0_RDI_RUP(vfe, i))
vfe_isr_reg_update(vfe, i);
if (status & BUS_IRQ_MASK_0_COMP_DONE(vfe, RDI_COMP_GROUP(i)))
vfe_isr_wm_done(vfe, i);
}
}
return IRQ_HANDLED;
}
/*
* vfe_halt - Trigger halt on VFE module and wait to complete
* @vfe: VFE device
*
* Return 0 on success or a negative error code otherwise
*/
static int vfe_halt(struct vfe_device *vfe)
{
/* rely on vfe_disable_output() to stop the VFE */
return 0;
}
static int vfe_get_output(struct vfe_line *line)
{
struct vfe_device *vfe = to_vfe(line);
struct vfe_output *output;
unsigned long flags;
spin_lock_irqsave(&vfe->output_lock, flags);
output = &line->output;
if (output->state > VFE_OUTPUT_RESERVED) {
dev_err(vfe->camss->dev, "Output is running\n");
goto error;
}
output->wm_num = 1;
/* Correspondence between VFE line number and WM number.
* line 0 -> RDI 0, line 1 -> RDI1, line 2 -> RDI2, line 3 -> PIX/RDI3
* Note this 1:1 mapping will not work for PIX streams.
*/
output->wm_idx[0] = line->id;
vfe->wm_output_map[line->id] = line->id;
output->drop_update_idx = 0;
spin_unlock_irqrestore(&vfe->output_lock, flags);
return 0;
error:
spin_unlock_irqrestore(&vfe->output_lock, flags);
output->state = VFE_OUTPUT_OFF;
return -EINVAL;
}
static int vfe_enable_output(struct vfe_line *line)
{
struct vfe_device *vfe = to_vfe(line);
struct vfe_output *output = &line->output;
unsigned long flags;
unsigned int i;
spin_lock_irqsave(&vfe->output_lock, flags);
vfe_reg_update_clear(vfe, line->id);
if (output->state > VFE_OUTPUT_RESERVED) {
dev_err(vfe->camss->dev, "Output is not in reserved state %d\n",
output->state);
spin_unlock_irqrestore(&vfe->output_lock, flags);
return -EINVAL;
}
WARN_ON(output->gen2.active_num);
output->state = VFE_OUTPUT_ON;
output->sequence = 0;
output->wait_reg_update = 0;
reinit_completion(&output->reg_update);
vfe_wm_start(vfe, output->wm_idx[0], line);
for (i = 0; i < 2; i++) {
output->buf[i] = vfe_buf_get_pending(output);
if (!output->buf[i])
break;
output->gen2.active_num++;
vfe_wm_update(vfe, output->wm_idx[0], output->buf[i]->addr[0], line);
}
vfe_reg_update(vfe, line->id);
spin_unlock_irqrestore(&vfe->output_lock, flags);
return 0;
}
static int vfe_disable_output(struct vfe_line *line)
{
struct vfe_device *vfe = to_vfe(line);
struct vfe_output *output = &line->output;
unsigned long flags;
unsigned int i;
bool done;
int timeout = 0;
do {
spin_lock_irqsave(&vfe->output_lock, flags);
done = !output->gen2.active_num;
spin_unlock_irqrestore(&vfe->output_lock, flags);
usleep_range(10000, 20000);
if (timeout++ == 100) {
dev_err(vfe->camss->dev, "VFE idle timeout - resetting\n");
vfe_reset(vfe);
output->gen2.active_num = 0;
return 0;
}
} while (!done);
spin_lock_irqsave(&vfe->output_lock, flags);
for (i = 0; i < output->wm_num; i++)
vfe_wm_stop(vfe, output->wm_idx[i]);
spin_unlock_irqrestore(&vfe->output_lock, flags);
return 0;
}
/*
* vfe_enable - Enable streaming on VFE line
* @line: VFE line
*
* Return 0 on success or a negative error code otherwise
*/
static int vfe_enable(struct vfe_line *line)
{
struct vfe_device *vfe = to_vfe(line);
int ret;
mutex_lock(&vfe->stream_lock);
if (!vfe->stream_count)
vfe_enable_irq_common(vfe);
vfe->stream_count++;
vfe_enable_lines_irq(vfe);
mutex_unlock(&vfe->stream_lock);
ret = vfe_get_output(line);
if (ret < 0)
goto error_get_output;
ret = vfe_enable_output(line);
if (ret < 0)
goto error_enable_output;
vfe->was_streaming = 1;
return 0;
error_enable_output:
vfe_put_output(line);
error_get_output:
mutex_lock(&vfe->stream_lock);
vfe->stream_count--;
mutex_unlock(&vfe->stream_lock);
return ret;
}
/*
* vfe_disable - Disable streaming on VFE line
* @line: VFE line
*
* Return 0 on success or a negative error code otherwise
*/
static int vfe_disable(struct vfe_line *line)
{
struct vfe_device *vfe = to_vfe(line);
vfe_disable_output(line);
vfe_put_output(line);
mutex_lock(&vfe->stream_lock);
vfe->stream_count--;
mutex_unlock(&vfe->stream_lock);
return 0;
}
/*
* vfe_isr_reg_update - Process reg update interrupt
* @vfe: VFE Device
* @line_id: VFE line
*/
static void vfe_isr_reg_update(struct vfe_device *vfe, enum vfe_line_id line_id)
{
struct vfe_output *output;
unsigned long flags;
spin_lock_irqsave(&vfe->output_lock, flags);
vfe_reg_update_clear(vfe, line_id);
output = &vfe->line[line_id].output;
if (output->wait_reg_update) {
output->wait_reg_update = 0;
complete(&output->reg_update);
}
spin_unlock_irqrestore(&vfe->output_lock, flags);
}
/*
* vfe_isr_wm_done - Process write master done interrupt
* @vfe: VFE Device
* @wm: Write master id
*/
static void vfe_isr_wm_done(struct vfe_device *vfe, u8 wm)
{
struct vfe_line *line = &vfe->line[vfe->wm_output_map[wm]];
struct camss_buffer *ready_buf;
struct vfe_output *output;
unsigned long flags;
u32 index;
u64 ts = ktime_get_ns();
spin_lock_irqsave(&vfe->output_lock, flags);
if (vfe->wm_output_map[wm] == VFE_LINE_NONE) {
dev_err_ratelimited(vfe->camss->dev,
"Received wm done for unmapped index\n");
goto out_unlock;
}
output = &vfe->line[vfe->wm_output_map[wm]].output;
ready_buf = output->buf[0];
if (!ready_buf) {
dev_err_ratelimited(vfe->camss->dev,
"Missing ready buf %d!\n", output->state);
goto out_unlock;
}
ready_buf->vb.vb2_buf.timestamp = ts;
ready_buf->vb.sequence = output->sequence++;
index = 0;
output->buf[0] = output->buf[1];
if (output->buf[0])
index = 1;
output->buf[index] = vfe_buf_get_pending(output);
if (output->buf[index])
vfe_wm_update(vfe, output->wm_idx[0], output->buf[index]->addr[0], line);
else
output->gen2.active_num--;
spin_unlock_irqrestore(&vfe->output_lock, flags);
vb2_buffer_done(&ready_buf->vb.vb2_buf, VB2_BUF_STATE_DONE);
return;
out_unlock:
spin_unlock_irqrestore(&vfe->output_lock, flags);
}
/*
* vfe_pm_domain_off - Disable power domains specific to this VFE.
* @vfe: VFE Device
*/
static void vfe_pm_domain_off(struct vfe_device *vfe)
{
struct camss *camss = vfe->camss;
if (vfe->id >= camss->vfe_num)
return;
device_link_del(camss->genpd_link[vfe->id]);
}
/*
* vfe_pm_domain_on - Enable power domains specific to this VFE.
* @vfe: VFE Device
*/
static int vfe_pm_domain_on(struct vfe_device *vfe)
{
struct camss *camss = vfe->camss;
enum vfe_line_id id = vfe->id;
if (id >= camss->vfe_num)
return 0;
camss->genpd_link[id] = device_link_add(camss->dev, camss->genpd[id],
DL_FLAG_STATELESS |
DL_FLAG_PM_RUNTIME |
DL_FLAG_RPM_ACTIVE);
if (!camss->genpd_link[id])
return -EINVAL;
return 0;
}
/*
* vfe_queue_buffer - Add empty buffer
* @vid: Video device structure
* @buf: Buffer to be enqueued
*
* Add an empty buffer - depending on the current number of buffers it will be
* put in pending buffer queue or directly given to the hardware to be filled.
*
* Return 0 on success or a negative error code otherwise
*/
static int vfe_queue_buffer(struct camss_video *vid,
struct camss_buffer *buf)
{
struct vfe_line *line = container_of(vid, struct vfe_line, video_out);
struct vfe_device *vfe = to_vfe(line);
struct vfe_output *output;
unsigned long flags;
output = &line->output;
spin_lock_irqsave(&vfe->output_lock, flags);
if (output->state == VFE_OUTPUT_ON && output->gen2.active_num < 2) {
output->buf[output->gen2.active_num++] = buf;
vfe_wm_update(vfe, output->wm_idx[0], buf->addr[0], line);
} else {
vfe_buf_add_pending(output, buf);
}
spin_unlock_irqrestore(&vfe->output_lock, flags);
return 0;
}
static const struct camss_video_ops vfe_video_ops_480 = {
.queue_buffer = vfe_queue_buffer,
.flush_buffers = vfe_flush_buffers,
};
static void vfe_subdev_init(struct device *dev, struct vfe_device *vfe)
{
vfe->video_ops = vfe_video_ops_480;
vfe->line_num = MAX_VFE_OUTPUT_LINES;
}
const struct vfe_hw_ops vfe_ops_480 = {
.global_reset = vfe_global_reset,
.hw_version = vfe_hw_version,
.isr = vfe_isr,
.pm_domain_off = vfe_pm_domain_off,
.pm_domain_on = vfe_pm_domain_on,
.subdev_init = vfe_subdev_init,
.vfe_disable = vfe_disable,
.vfe_enable = vfe_enable,
.vfe_halt = vfe_halt,
};
| linux-master | drivers/media/platform/qcom/camss/camss-vfe-480.c |
// SPDX-License-Identifier: GPL-2.0
/*
* camss-csiphy.c
*
* Qualcomm MSM Camera Subsystem - CSIPHY Module
*
* Copyright (c) 2011-2015, The Linux Foundation. All rights reserved.
* Copyright (C) 2016-2018 Linaro Ltd.
*/
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <media/media-entity.h>
#include <media/v4l2-device.h>
#include <media/v4l2-subdev.h>
#include "camss-csiphy.h"
#include "camss.h"
#define MSM_CSIPHY_NAME "msm_csiphy"
struct csiphy_format {
u32 code;
u8 bpp;
};
static const struct csiphy_format csiphy_formats_8x16[] = {
{ MEDIA_BUS_FMT_UYVY8_2X8, 8 },
{ MEDIA_BUS_FMT_VYUY8_2X8, 8 },
{ MEDIA_BUS_FMT_YUYV8_2X8, 8 },
{ MEDIA_BUS_FMT_YVYU8_2X8, 8 },
{ MEDIA_BUS_FMT_SBGGR8_1X8, 8 },
{ MEDIA_BUS_FMT_SGBRG8_1X8, 8 },
{ MEDIA_BUS_FMT_SGRBG8_1X8, 8 },
{ MEDIA_BUS_FMT_SRGGB8_1X8, 8 },
{ MEDIA_BUS_FMT_SBGGR10_1X10, 10 },
{ MEDIA_BUS_FMT_SGBRG10_1X10, 10 },
{ MEDIA_BUS_FMT_SGRBG10_1X10, 10 },
{ MEDIA_BUS_FMT_SRGGB10_1X10, 10 },
{ MEDIA_BUS_FMT_SBGGR12_1X12, 12 },
{ MEDIA_BUS_FMT_SGBRG12_1X12, 12 },
{ MEDIA_BUS_FMT_SGRBG12_1X12, 12 },
{ MEDIA_BUS_FMT_SRGGB12_1X12, 12 },
{ MEDIA_BUS_FMT_Y10_1X10, 10 },
};
static const struct csiphy_format csiphy_formats_8x96[] = {
{ MEDIA_BUS_FMT_UYVY8_2X8, 8 },
{ MEDIA_BUS_FMT_VYUY8_2X8, 8 },
{ MEDIA_BUS_FMT_YUYV8_2X8, 8 },
{ MEDIA_BUS_FMT_YVYU8_2X8, 8 },
{ MEDIA_BUS_FMT_SBGGR8_1X8, 8 },
{ MEDIA_BUS_FMT_SGBRG8_1X8, 8 },
{ MEDIA_BUS_FMT_SGRBG8_1X8, 8 },
{ MEDIA_BUS_FMT_SRGGB8_1X8, 8 },
{ MEDIA_BUS_FMT_SBGGR10_1X10, 10 },
{ MEDIA_BUS_FMT_SGBRG10_1X10, 10 },
{ MEDIA_BUS_FMT_SGRBG10_1X10, 10 },
{ MEDIA_BUS_FMT_SRGGB10_1X10, 10 },
{ MEDIA_BUS_FMT_SBGGR12_1X12, 12 },
{ MEDIA_BUS_FMT_SGBRG12_1X12, 12 },
{ MEDIA_BUS_FMT_SGRBG12_1X12, 12 },
{ MEDIA_BUS_FMT_SRGGB12_1X12, 12 },
{ MEDIA_BUS_FMT_SBGGR14_1X14, 14 },
{ MEDIA_BUS_FMT_SGBRG14_1X14, 14 },
{ MEDIA_BUS_FMT_SGRBG14_1X14, 14 },
{ MEDIA_BUS_FMT_SRGGB14_1X14, 14 },
{ MEDIA_BUS_FMT_Y10_1X10, 10 },
};
static const struct csiphy_format csiphy_formats_sdm845[] = {
{ MEDIA_BUS_FMT_UYVY8_2X8, 8 },
{ MEDIA_BUS_FMT_VYUY8_2X8, 8 },
{ MEDIA_BUS_FMT_YUYV8_2X8, 8 },
{ MEDIA_BUS_FMT_YVYU8_2X8, 8 },
{ MEDIA_BUS_FMT_SBGGR8_1X8, 8 },
{ MEDIA_BUS_FMT_SGBRG8_1X8, 8 },
{ MEDIA_BUS_FMT_SGRBG8_1X8, 8 },
{ MEDIA_BUS_FMT_SRGGB8_1X8, 8 },
{ MEDIA_BUS_FMT_SBGGR10_1X10, 10 },
{ MEDIA_BUS_FMT_SGBRG10_1X10, 10 },
{ MEDIA_BUS_FMT_SGRBG10_1X10, 10 },
{ MEDIA_BUS_FMT_SRGGB10_1X10, 10 },
{ MEDIA_BUS_FMT_SBGGR12_1X12, 12 },
{ MEDIA_BUS_FMT_SGBRG12_1X12, 12 },
{ MEDIA_BUS_FMT_SGRBG12_1X12, 12 },
{ MEDIA_BUS_FMT_SRGGB12_1X12, 12 },
{ MEDIA_BUS_FMT_SBGGR14_1X14, 14 },
{ MEDIA_BUS_FMT_SGBRG14_1X14, 14 },
{ MEDIA_BUS_FMT_SGRBG14_1X14, 14 },
{ MEDIA_BUS_FMT_SRGGB14_1X14, 14 },
{ MEDIA_BUS_FMT_Y8_1X8, 8 },
{ MEDIA_BUS_FMT_Y10_1X10, 10 },
};
/*
* csiphy_get_bpp - map media bus format to bits per pixel
* @formats: supported media bus formats array
* @nformats: size of @formats array
* @code: media bus format code
*
* Return number of bits per pixel
*/
static u8 csiphy_get_bpp(const struct csiphy_format *formats,
unsigned int nformats, u32 code)
{
unsigned int i;
for (i = 0; i < nformats; i++)
if (code == formats[i].code)
return formats[i].bpp;
WARN(1, "Unknown format\n");
return formats[0].bpp;
}
/*
* csiphy_set_clock_rates - Calculate and set clock rates on CSIPHY module
* @csiphy: CSIPHY device
*/
static int csiphy_set_clock_rates(struct csiphy_device *csiphy)
{
struct device *dev = csiphy->camss->dev;
s64 link_freq;
int i, j;
int ret;
u8 bpp = csiphy_get_bpp(csiphy->formats, csiphy->nformats,
csiphy->fmt[MSM_CSIPHY_PAD_SINK].code);
u8 num_lanes = csiphy->cfg.csi2->lane_cfg.num_data;
link_freq = camss_get_link_freq(&csiphy->subdev.entity, bpp, num_lanes);
if (link_freq < 0)
link_freq = 0;
for (i = 0; i < csiphy->nclocks; i++) {
struct camss_clock *clock = &csiphy->clock[i];
if (csiphy->rate_set[i]) {
u64 min_rate = link_freq / 4;
long round_rate;
camss_add_clock_margin(&min_rate);
for (j = 0; j < clock->nfreqs; j++)
if (min_rate < clock->freq[j])
break;
if (j == clock->nfreqs) {
dev_err(dev,
"Pixel clock is too high for CSIPHY\n");
return -EINVAL;
}
/* if sensor pixel clock is not available */
/* set highest possible CSIPHY clock rate */
if (min_rate == 0)
j = clock->nfreqs - 1;
round_rate = clk_round_rate(clock->clk, clock->freq[j]);
if (round_rate < 0) {
dev_err(dev, "clk round rate failed: %ld\n",
round_rate);
return -EINVAL;
}
csiphy->timer_clk_rate = round_rate;
ret = clk_set_rate(clock->clk, csiphy->timer_clk_rate);
if (ret < 0) {
dev_err(dev, "clk set rate failed: %d\n", ret);
return ret;
}
}
}
return 0;
}
/*
* csiphy_set_power - Power on/off CSIPHY module
* @sd: CSIPHY V4L2 subdevice
* @on: Requested power state
*
* Return 0 on success or a negative error code otherwise
*/
static int csiphy_set_power(struct v4l2_subdev *sd, int on)
{
struct csiphy_device *csiphy = v4l2_get_subdevdata(sd);
struct device *dev = csiphy->camss->dev;
if (on) {
int ret;
ret = pm_runtime_resume_and_get(dev);
if (ret < 0)
return ret;
ret = csiphy_set_clock_rates(csiphy);
if (ret < 0) {
pm_runtime_put_sync(dev);
return ret;
}
ret = camss_enable_clocks(csiphy->nclocks, csiphy->clock, dev);
if (ret < 0) {
pm_runtime_put_sync(dev);
return ret;
}
enable_irq(csiphy->irq);
csiphy->ops->reset(csiphy);
csiphy->ops->hw_version_read(csiphy, dev);
} else {
disable_irq(csiphy->irq);
camss_disable_clocks(csiphy->nclocks, csiphy->clock);
pm_runtime_put_sync(dev);
}
return 0;
}
/*
* csiphy_stream_on - Enable streaming on CSIPHY module
* @csiphy: CSIPHY device
*
* Helper function to enable streaming on CSIPHY module.
* Main configuration of CSIPHY module is also done here.
*
* Return 0 on success or a negative error code otherwise
*/
static int csiphy_stream_on(struct csiphy_device *csiphy)
{
struct csiphy_config *cfg = &csiphy->cfg;
s64 link_freq;
u8 lane_mask = csiphy->ops->get_lane_mask(&cfg->csi2->lane_cfg);
u8 bpp = csiphy_get_bpp(csiphy->formats, csiphy->nformats,
csiphy->fmt[MSM_CSIPHY_PAD_SINK].code);
u8 num_lanes = csiphy->cfg.csi2->lane_cfg.num_data;
u8 val;
link_freq = camss_get_link_freq(&csiphy->subdev.entity, bpp, num_lanes);
if (link_freq < 0) {
dev_err(csiphy->camss->dev,
"Cannot get CSI2 transmitter's link frequency\n");
return -EINVAL;
}
if (csiphy->base_clk_mux) {
val = readl_relaxed(csiphy->base_clk_mux);
if (cfg->combo_mode && (lane_mask & 0x18) == 0x18) {
val &= ~0xf0;
val |= cfg->csid_id << 4;
} else {
val &= ~0xf;
val |= cfg->csid_id;
}
writel_relaxed(val, csiphy->base_clk_mux);
/* Enforce reg write ordering between clk mux & lane enabling */
wmb();
}
csiphy->ops->lanes_enable(csiphy, cfg, link_freq, lane_mask);
return 0;
}
/*
* csiphy_stream_off - Disable streaming on CSIPHY module
* @csiphy: CSIPHY device
*
* Helper function to disable streaming on CSIPHY module
*/
static void csiphy_stream_off(struct csiphy_device *csiphy)
{
csiphy->ops->lanes_disable(csiphy, &csiphy->cfg);
}
/*
* csiphy_set_stream - Enable/disable streaming on CSIPHY module
* @sd: CSIPHY V4L2 subdevice
* @enable: Requested streaming state
*
* Return 0 on success or a negative error code otherwise
*/
static int csiphy_set_stream(struct v4l2_subdev *sd, int enable)
{
struct csiphy_device *csiphy = v4l2_get_subdevdata(sd);
int ret = 0;
if (enable)
ret = csiphy_stream_on(csiphy);
else
csiphy_stream_off(csiphy);
return ret;
}
/*
* __csiphy_get_format - Get pointer to format structure
* @csiphy: CSIPHY device
* @cfg: V4L2 subdev pad configuration
* @pad: pad from which format is requested
* @which: TRY or ACTIVE format
*
* Return pointer to TRY or ACTIVE format structure
*/
static struct v4l2_mbus_framefmt *
__csiphy_get_format(struct csiphy_device *csiphy,
struct v4l2_subdev_state *sd_state,
unsigned int pad,
enum v4l2_subdev_format_whence which)
{
if (which == V4L2_SUBDEV_FORMAT_TRY)
return v4l2_subdev_get_try_format(&csiphy->subdev, sd_state,
pad);
return &csiphy->fmt[pad];
}
/*
* csiphy_try_format - Handle try format by pad subdev method
* @csiphy: CSIPHY device
* @cfg: V4L2 subdev pad configuration
* @pad: pad on which format is requested
* @fmt: pointer to v4l2 format structure
* @which: wanted subdev format
*/
static void csiphy_try_format(struct csiphy_device *csiphy,
struct v4l2_subdev_state *sd_state,
unsigned int pad,
struct v4l2_mbus_framefmt *fmt,
enum v4l2_subdev_format_whence which)
{
unsigned int i;
switch (pad) {
case MSM_CSIPHY_PAD_SINK:
/* Set format on sink pad */
for (i = 0; i < csiphy->nformats; i++)
if (fmt->code == csiphy->formats[i].code)
break;
/* If not found, use UYVY as default */
if (i >= csiphy->nformats)
fmt->code = MEDIA_BUS_FMT_UYVY8_2X8;
fmt->width = clamp_t(u32, fmt->width, 1, 8191);
fmt->height = clamp_t(u32, fmt->height, 1, 8191);
fmt->field = V4L2_FIELD_NONE;
fmt->colorspace = V4L2_COLORSPACE_SRGB;
break;
case MSM_CSIPHY_PAD_SRC:
/* Set and return a format same as sink pad */
*fmt = *__csiphy_get_format(csiphy, sd_state,
MSM_CSID_PAD_SINK,
which);
break;
}
}
/*
* csiphy_enum_mbus_code - Handle pixel format enumeration
* @sd: CSIPHY V4L2 subdevice
* @cfg: V4L2 subdev pad configuration
* @code: pointer to v4l2_subdev_mbus_code_enum structure
* return -EINVAL or zero on success
*/
static int csiphy_enum_mbus_code(struct v4l2_subdev *sd,
struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_mbus_code_enum *code)
{
struct csiphy_device *csiphy = v4l2_get_subdevdata(sd);
struct v4l2_mbus_framefmt *format;
if (code->pad == MSM_CSIPHY_PAD_SINK) {
if (code->index >= csiphy->nformats)
return -EINVAL;
code->code = csiphy->formats[code->index].code;
} else {
if (code->index > 0)
return -EINVAL;
format = __csiphy_get_format(csiphy, sd_state,
MSM_CSIPHY_PAD_SINK,
code->which);
code->code = format->code;
}
return 0;
}
/*
* csiphy_enum_frame_size - Handle frame size enumeration
* @sd: CSIPHY V4L2 subdevice
* @cfg: V4L2 subdev pad configuration
* @fse: pointer to v4l2_subdev_frame_size_enum structure
* return -EINVAL or zero on success
*/
static int csiphy_enum_frame_size(struct v4l2_subdev *sd,
struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_frame_size_enum *fse)
{
struct csiphy_device *csiphy = v4l2_get_subdevdata(sd);
struct v4l2_mbus_framefmt format;
if (fse->index != 0)
return -EINVAL;
format.code = fse->code;
format.width = 1;
format.height = 1;
csiphy_try_format(csiphy, sd_state, fse->pad, &format, fse->which);
fse->min_width = format.width;
fse->min_height = format.height;
if (format.code != fse->code)
return -EINVAL;
format.code = fse->code;
format.width = -1;
format.height = -1;
csiphy_try_format(csiphy, sd_state, fse->pad, &format, fse->which);
fse->max_width = format.width;
fse->max_height = format.height;
return 0;
}
/*
* csiphy_get_format - Handle get format by pads subdev method
* @sd: CSIPHY V4L2 subdevice
* @cfg: V4L2 subdev pad configuration
* @fmt: pointer to v4l2 subdev format structure
*
* Return -EINVAL or zero on success
*/
static int csiphy_get_format(struct v4l2_subdev *sd,
struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
struct csiphy_device *csiphy = v4l2_get_subdevdata(sd);
struct v4l2_mbus_framefmt *format;
format = __csiphy_get_format(csiphy, sd_state, fmt->pad, fmt->which);
if (format == NULL)
return -EINVAL;
fmt->format = *format;
return 0;
}
/*
* csiphy_set_format - Handle set format by pads subdev method
* @sd: CSIPHY V4L2 subdevice
* @cfg: V4L2 subdev pad configuration
* @fmt: pointer to v4l2 subdev format structure
*
* Return -EINVAL or zero on success
*/
static int csiphy_set_format(struct v4l2_subdev *sd,
struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
struct csiphy_device *csiphy = v4l2_get_subdevdata(sd);
struct v4l2_mbus_framefmt *format;
format = __csiphy_get_format(csiphy, sd_state, fmt->pad, fmt->which);
if (format == NULL)
return -EINVAL;
csiphy_try_format(csiphy, sd_state, fmt->pad, &fmt->format,
fmt->which);
*format = fmt->format;
/* Propagate the format from sink to source */
if (fmt->pad == MSM_CSIPHY_PAD_SINK) {
format = __csiphy_get_format(csiphy, sd_state,
MSM_CSIPHY_PAD_SRC,
fmt->which);
*format = fmt->format;
csiphy_try_format(csiphy, sd_state, MSM_CSIPHY_PAD_SRC,
format,
fmt->which);
}
return 0;
}
/*
* csiphy_init_formats - Initialize formats on all pads
* @sd: CSIPHY V4L2 subdevice
* @fh: V4L2 subdev file handle
*
* Initialize all pad formats with default values.
*
* Return 0 on success or a negative error code otherwise
*/
static int csiphy_init_formats(struct v4l2_subdev *sd,
struct v4l2_subdev_fh *fh)
{
struct v4l2_subdev_format format = {
.pad = MSM_CSIPHY_PAD_SINK,
.which = fh ? V4L2_SUBDEV_FORMAT_TRY :
V4L2_SUBDEV_FORMAT_ACTIVE,
.format = {
.code = MEDIA_BUS_FMT_UYVY8_2X8,
.width = 1920,
.height = 1080
}
};
return csiphy_set_format(sd, fh ? fh->state : NULL, &format);
}
/*
* msm_csiphy_subdev_init - Initialize CSIPHY device structure and resources
* @csiphy: CSIPHY device
* @res: CSIPHY module resources table
* @id: CSIPHY module id
*
* Return 0 on success or a negative error code otherwise
*/
int msm_csiphy_subdev_init(struct camss *camss,
struct csiphy_device *csiphy,
const struct resources *res, u8 id)
{
struct device *dev = camss->dev;
struct platform_device *pdev = to_platform_device(dev);
int i, j;
int ret;
csiphy->camss = camss;
csiphy->id = id;
csiphy->cfg.combo_mode = 0;
if (camss->version == CAMSS_8x16) {
csiphy->ops = &csiphy_ops_2ph_1_0;
csiphy->formats = csiphy_formats_8x16;
csiphy->nformats = ARRAY_SIZE(csiphy_formats_8x16);
} else if (camss->version == CAMSS_8x96 ||
camss->version == CAMSS_660) {
csiphy->ops = &csiphy_ops_3ph_1_0;
csiphy->formats = csiphy_formats_8x96;
csiphy->nformats = ARRAY_SIZE(csiphy_formats_8x96);
} else if (camss->version == CAMSS_845 ||
camss->version == CAMSS_8250) {
csiphy->ops = &csiphy_ops_3ph_1_0;
csiphy->formats = csiphy_formats_sdm845;
csiphy->nformats = ARRAY_SIZE(csiphy_formats_sdm845);
} else {
return -EINVAL;
}
/* Memory */
csiphy->base = devm_platform_ioremap_resource_byname(pdev, res->reg[0]);
if (IS_ERR(csiphy->base))
return PTR_ERR(csiphy->base);
if (camss->version == CAMSS_8x16 ||
camss->version == CAMSS_8x96) {
csiphy->base_clk_mux =
devm_platform_ioremap_resource_byname(pdev, res->reg[1]);
if (IS_ERR(csiphy->base_clk_mux))
return PTR_ERR(csiphy->base_clk_mux);
} else {
csiphy->base_clk_mux = NULL;
}
/* Interrupt */
ret = platform_get_irq_byname(pdev, res->interrupt[0]);
if (ret < 0)
return ret;
csiphy->irq = ret;
snprintf(csiphy->irq_name, sizeof(csiphy->irq_name), "%s_%s%d",
dev_name(dev), MSM_CSIPHY_NAME, csiphy->id);
ret = devm_request_irq(dev, csiphy->irq, csiphy->ops->isr,
IRQF_TRIGGER_RISING | IRQF_NO_AUTOEN,
csiphy->irq_name, csiphy);
if (ret < 0) {
dev_err(dev, "request_irq failed: %d\n", ret);
return ret;
}
/* Clocks */
csiphy->nclocks = 0;
while (res->clock[csiphy->nclocks])
csiphy->nclocks++;
csiphy->clock = devm_kcalloc(dev,
csiphy->nclocks, sizeof(*csiphy->clock),
GFP_KERNEL);
if (!csiphy->clock)
return -ENOMEM;
csiphy->rate_set = devm_kcalloc(dev,
csiphy->nclocks,
sizeof(*csiphy->rate_set),
GFP_KERNEL);
if (!csiphy->rate_set)
return -ENOMEM;
for (i = 0; i < csiphy->nclocks; i++) {
struct camss_clock *clock = &csiphy->clock[i];
clock->clk = devm_clk_get(dev, res->clock[i]);
if (IS_ERR(clock->clk))
return PTR_ERR(clock->clk);
clock->name = res->clock[i];
clock->nfreqs = 0;
while (res->clock_rate[i][clock->nfreqs])
clock->nfreqs++;
if (!clock->nfreqs) {
clock->freq = NULL;
continue;
}
clock->freq = devm_kcalloc(dev,
clock->nfreqs,
sizeof(*clock->freq),
GFP_KERNEL);
if (!clock->freq)
return -ENOMEM;
for (j = 0; j < clock->nfreqs; j++)
clock->freq[j] = res->clock_rate[i][j];
if (!strcmp(clock->name, "csiphy0_timer") ||
!strcmp(clock->name, "csiphy1_timer") ||
!strcmp(clock->name, "csiphy2_timer") ||
!strcmp(clock->name, "csiphy3_timer") ||
!strcmp(clock->name, "csiphy4_timer") ||
!strcmp(clock->name, "csiphy5_timer"))
csiphy->rate_set[i] = true;
if (camss->version == CAMSS_660 &&
(!strcmp(clock->name, "csi0_phy") ||
!strcmp(clock->name, "csi1_phy") ||
!strcmp(clock->name, "csi2_phy")))
csiphy->rate_set[i] = true;
}
return 0;
}
/*
* csiphy_link_setup - Setup CSIPHY connections
* @entity: Pointer to media entity structure
* @local: Pointer to local pad
* @remote: Pointer to remote pad
* @flags: Link flags
*
* Rreturn 0 on success
*/
static int csiphy_link_setup(struct media_entity *entity,
const struct media_pad *local,
const struct media_pad *remote, u32 flags)
{
if ((local->flags & MEDIA_PAD_FL_SOURCE) &&
(flags & MEDIA_LNK_FL_ENABLED)) {
struct v4l2_subdev *sd;
struct csiphy_device *csiphy;
struct csid_device *csid;
if (media_pad_remote_pad_first(local))
return -EBUSY;
sd = media_entity_to_v4l2_subdev(entity);
csiphy = v4l2_get_subdevdata(sd);
sd = media_entity_to_v4l2_subdev(remote->entity);
csid = v4l2_get_subdevdata(sd);
csiphy->cfg.csid_id = csid->id;
}
return 0;
}
static const struct v4l2_subdev_core_ops csiphy_core_ops = {
.s_power = csiphy_set_power,
};
static const struct v4l2_subdev_video_ops csiphy_video_ops = {
.s_stream = csiphy_set_stream,
};
static const struct v4l2_subdev_pad_ops csiphy_pad_ops = {
.enum_mbus_code = csiphy_enum_mbus_code,
.enum_frame_size = csiphy_enum_frame_size,
.get_fmt = csiphy_get_format,
.set_fmt = csiphy_set_format,
};
static const struct v4l2_subdev_ops csiphy_v4l2_ops = {
.core = &csiphy_core_ops,
.video = &csiphy_video_ops,
.pad = &csiphy_pad_ops,
};
static const struct v4l2_subdev_internal_ops csiphy_v4l2_internal_ops = {
.open = csiphy_init_formats,
};
static const struct media_entity_operations csiphy_media_ops = {
.link_setup = csiphy_link_setup,
.link_validate = v4l2_subdev_link_validate,
};
/*
* msm_csiphy_register_entity - Register subdev node for CSIPHY module
* @csiphy: CSIPHY device
* @v4l2_dev: V4L2 device
*
* Return 0 on success or a negative error code otherwise
*/
int msm_csiphy_register_entity(struct csiphy_device *csiphy,
struct v4l2_device *v4l2_dev)
{
struct v4l2_subdev *sd = &csiphy->subdev;
struct media_pad *pads = csiphy->pads;
struct device *dev = csiphy->camss->dev;
int ret;
v4l2_subdev_init(sd, &csiphy_v4l2_ops);
sd->internal_ops = &csiphy_v4l2_internal_ops;
sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
snprintf(sd->name, ARRAY_SIZE(sd->name), "%s%d",
MSM_CSIPHY_NAME, csiphy->id);
v4l2_set_subdevdata(sd, csiphy);
ret = csiphy_init_formats(sd, NULL);
if (ret < 0) {
dev_err(dev, "Failed to init format: %d\n", ret);
return ret;
}
pads[MSM_CSIPHY_PAD_SINK].flags = MEDIA_PAD_FL_SINK;
pads[MSM_CSIPHY_PAD_SRC].flags = MEDIA_PAD_FL_SOURCE;
sd->entity.function = MEDIA_ENT_F_PROC_VIDEO_PIXEL_FORMATTER;
sd->entity.ops = &csiphy_media_ops;
ret = media_entity_pads_init(&sd->entity, MSM_CSIPHY_PADS_NUM, pads);
if (ret < 0) {
dev_err(dev, "Failed to init media entity: %d\n", ret);
return ret;
}
ret = v4l2_device_register_subdev(v4l2_dev, sd);
if (ret < 0) {
dev_err(dev, "Failed to register subdev: %d\n", ret);
media_entity_cleanup(&sd->entity);
}
return ret;
}
/*
* msm_csiphy_unregister_entity - Unregister CSIPHY module subdev node
* @csiphy: CSIPHY device
*/
void msm_csiphy_unregister_entity(struct csiphy_device *csiphy)
{
v4l2_device_unregister_subdev(&csiphy->subdev);
media_entity_cleanup(&csiphy->subdev.entity);
}
| linux-master | drivers/media/platform/qcom/camss/camss-csiphy.c |
// SPDX-License-Identifier: GPL-2.0
/*
* camss-vfe-4-8.c
*
* Qualcomm MSM Camera Subsystem - VFE (Video Front End) Module v4.8
*
* Copyright (c) 2013-2015, The Linux Foundation. All rights reserved.
* Copyright (C) 2015-2021 Linaro Ltd.
*/
#include <linux/device.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/iopoll.h>
#include "camss.h"
#include "camss-vfe.h"
#include "camss-vfe-gen1.h"
#define VFE_0_HW_VERSION 0x000
#define VFE_0_GLOBAL_RESET_CMD 0x018
#define VFE_0_GLOBAL_RESET_CMD_CORE BIT(0)
#define VFE_0_GLOBAL_RESET_CMD_CAMIF BIT(1)
#define VFE_0_GLOBAL_RESET_CMD_BUS BIT(2)
#define VFE_0_GLOBAL_RESET_CMD_BUS_BDG BIT(3)
#define VFE_0_GLOBAL_RESET_CMD_REGISTER BIT(4)
#define VFE_0_GLOBAL_RESET_CMD_PM BIT(5)
#define VFE_0_GLOBAL_RESET_CMD_BUS_MISR BIT(6)
#define VFE_0_GLOBAL_RESET_CMD_TESTGEN BIT(7)
#define VFE_0_GLOBAL_RESET_CMD_DSP BIT(8)
#define VFE_0_GLOBAL_RESET_CMD_IDLE_CGC BIT(9)
#define VFE_0_MODULE_LENS_EN 0x040
#define VFE_0_MODULE_LENS_EN_DEMUX BIT(2)
#define VFE_0_MODULE_LENS_EN_CHROMA_UPSAMPLE BIT(3)
#define VFE_0_MODULE_ZOOM_EN 0x04c
#define VFE_0_MODULE_ZOOM_EN_SCALE_ENC BIT(1)
#define VFE_0_MODULE_ZOOM_EN_CROP_ENC BIT(2)
#define VFE_0_MODULE_ZOOM_EN_REALIGN_BUF BIT(9)
#define VFE_0_CORE_CFG 0x050
#define VFE_0_CORE_CFG_PIXEL_PATTERN_YCBYCR 0x4
#define VFE_0_CORE_CFG_PIXEL_PATTERN_YCRYCB 0x5
#define VFE_0_CORE_CFG_PIXEL_PATTERN_CBYCRY 0x6
#define VFE_0_CORE_CFG_PIXEL_PATTERN_CRYCBY 0x7
#define VFE_0_CORE_CFG_COMPOSITE_REG_UPDATE_EN BIT(4)
#define VFE_0_IRQ_CMD 0x058
#define VFE_0_IRQ_CMD_GLOBAL_CLEAR BIT(0)
#define VFE_0_IRQ_MASK_0 0x05c
#define VFE_0_IRQ_MASK_0_CAMIF_SOF BIT(0)
#define VFE_0_IRQ_MASK_0_CAMIF_EOF BIT(1)
#define VFE_0_IRQ_MASK_0_RDIn_REG_UPDATE(n) BIT((n) + 5)
#define VFE_0_IRQ_MASK_0_line_n_REG_UPDATE(n) \
((n) == VFE_LINE_PIX ? BIT(4) : VFE_0_IRQ_MASK_0_RDIn_REG_UPDATE(n))
#define VFE_0_IRQ_MASK_0_IMAGE_MASTER_n_PING_PONG(n) BIT((n) + 8)
#define VFE_0_IRQ_MASK_0_IMAGE_COMPOSITE_DONE_n(n) BIT((n) + 25)
#define VFE_0_IRQ_MASK_0_RESET_ACK BIT(31)
#define VFE_0_IRQ_MASK_1 0x060
#define VFE_0_IRQ_MASK_1_CAMIF_ERROR BIT(0)
#define VFE_0_IRQ_MASK_1_VIOLATION BIT(7)
#define VFE_0_IRQ_MASK_1_BUS_BDG_HALT_ACK BIT(8)
#define VFE_0_IRQ_MASK_1_IMAGE_MASTER_n_BUS_OVERFLOW(n) BIT((n) + 9)
#define VFE_0_IRQ_MASK_1_RDIn_SOF(n) BIT((n) + 29)
#define VFE_0_IRQ_CLEAR_0 0x064
#define VFE_0_IRQ_CLEAR_1 0x068
#define VFE_0_IRQ_STATUS_0 0x06c
#define VFE_0_IRQ_STATUS_0_CAMIF_SOF BIT(0)
#define VFE_0_IRQ_STATUS_0_RDIn_REG_UPDATE(n) BIT((n) + 5)
#define VFE_0_IRQ_STATUS_0_line_n_REG_UPDATE(n) \
((n) == VFE_LINE_PIX ? BIT(4) : VFE_0_IRQ_STATUS_0_RDIn_REG_UPDATE(n))
#define VFE_0_IRQ_STATUS_0_IMAGE_MASTER_n_PING_PONG(n) BIT((n) + 8)
#define VFE_0_IRQ_STATUS_0_IMAGE_COMPOSITE_DONE_n(n) BIT((n) + 25)
#define VFE_0_IRQ_STATUS_0_RESET_ACK BIT(31)
#define VFE_0_IRQ_STATUS_1 0x070
#define VFE_0_IRQ_STATUS_1_VIOLATION BIT(7)
#define VFE_0_IRQ_STATUS_1_BUS_BDG_HALT_ACK BIT(8)
#define VFE_0_IRQ_STATUS_1_RDIn_SOF(n) BIT((n) + 29)
#define VFE_0_IRQ_COMPOSITE_MASK_0 0x074
#define VFE_0_VIOLATION_STATUS 0x07c
#define VFE_0_BUS_CMD 0x80
#define VFE_0_BUS_CMD_Mx_RLD_CMD(x) BIT(x)
#define VFE_0_BUS_CFG 0x084
#define VFE_0_BUS_XBAR_CFG_x(x) (0x90 + 0x4 * ((x) / 2))
#define VFE_0_BUS_XBAR_CFG_x_M_PAIR_STREAM_EN BIT(2)
#define VFE_0_BUS_XBAR_CFG_x_M_REALIGN_BUF_EN BIT(3)
#define VFE_0_BUS_XBAR_CFG_x_M_PAIR_STREAM_SWAP_INTRA (0x1 << 4)
#define VFE_0_BUS_XBAR_CFG_x_M_PAIR_STREAM_SWAP_INTER (0x2 << 4)
#define VFE_0_BUS_XBAR_CFG_x_M_PAIR_STREAM_SWAP_INTER_INTRA (0x3 << 4)
#define VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_SHIFT 8
#define VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_LUMA 0x0
#define VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_VAL_RDI0 0xc
#define VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_VAL_RDI1 0xd
#define VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_VAL_RDI2 0xe
#define VFE_0_BUS_IMAGE_MASTER_n_WR_CFG(n) (0x0a0 + 0x2c * (n))
#define VFE_0_BUS_IMAGE_MASTER_n_WR_CFG_WR_PATH_SHIFT 0
#define VFE_0_BUS_IMAGE_MASTER_n_WR_PING_ADDR(n) (0x0a4 + 0x2c * (n))
#define VFE_0_BUS_IMAGE_MASTER_n_WR_PONG_ADDR(n) (0x0ac + 0x2c * (n))
#define VFE_0_BUS_IMAGE_MASTER_n_WR_ADDR_CFG(n) (0x0b4 + 0x2c * (n))
#define VFE_0_BUS_IMAGE_MASTER_n_WR_ADDR_CFG_FRM_BASED_SHIFT 1
#define VFE_0_BUS_IMAGE_MASTER_n_WR_ADDR_CFG_FRM_DROP_PER_SHIFT 2
#define VFE_0_BUS_IMAGE_MASTER_n_WR_ADDR_CFG_FRM_DROP_PER_MASK (0x1f << 2)
#define VFE_0_BUS_IMAGE_MASTER_n_WR_UB_CFG(n) (0x0b8 + 0x2c * (n))
#define VFE_0_BUS_IMAGE_MASTER_n_WR_UB_CFG_OFFSET_SHIFT 16
#define VFE_0_BUS_IMAGE_MASTER_n_WR_IMAGE_SIZE(n) (0x0bc + 0x2c * (n))
#define VFE_0_BUS_IMAGE_MASTER_n_WR_BUFFER_CFG(n) (0x0c0 + 0x2c * (n))
#define VFE_0_BUS_IMAGE_MASTER_n_WR_FRAMEDROP_PATTERN(n) \
(0x0c4 + 0x2c * (n))
#define VFE_0_BUS_IMAGE_MASTER_n_WR_IRQ_SUBSAMPLE_PATTERN(n) \
(0x0c8 + 0x2c * (n))
#define VFE_0_BUS_IMAGE_MASTER_n_WR_IRQ_SUBSAMPLE_PATTERN_DEF 0xffffffff
#define VFE_0_BUS_PING_PONG_STATUS 0x338
#define VFE_0_BUS_BDG_CMD 0x400
#define VFE_0_BUS_BDG_CMD_HALT_REQ 1
#define VFE_0_BUS_BDG_QOS_CFG_0 0x404
#define VFE_0_BUS_BDG_QOS_CFG_0_CFG 0xaaa5aaa5
#define VFE_0_BUS_BDG_QOS_CFG_1 0x408
#define VFE_0_BUS_BDG_QOS_CFG_2 0x40c
#define VFE_0_BUS_BDG_QOS_CFG_3 0x410
#define VFE_0_BUS_BDG_QOS_CFG_3_CFG 0xaa55aaa5
#define VFE_0_BUS_BDG_QOS_CFG_4 0x414
#define VFE_0_BUS_BDG_QOS_CFG_4_CFG 0xaa55aa55
#define VFE_0_BUS_BDG_QOS_CFG_5 0x418
#define VFE_0_BUS_BDG_QOS_CFG_6 0x41c
#define VFE_0_BUS_BDG_QOS_CFG_7 0x420
#define VFE_0_BUS_BDG_QOS_CFG_7_CFG 0x0005aa55
#define VFE_0_BUS_BDG_DS_CFG_0 0x424
#define VFE_0_BUS_BDG_DS_CFG_0_CFG 0xcccc1111
#define VFE_0_BUS_BDG_DS_CFG_1 0x428
#define VFE_0_BUS_BDG_DS_CFG_2 0x42c
#define VFE_0_BUS_BDG_DS_CFG_3 0x430
#define VFE_0_BUS_BDG_DS_CFG_4 0x434
#define VFE_0_BUS_BDG_DS_CFG_5 0x438
#define VFE_0_BUS_BDG_DS_CFG_6 0x43c
#define VFE_0_BUS_BDG_DS_CFG_7 0x440
#define VFE_0_BUS_BDG_DS_CFG_8 0x444
#define VFE_0_BUS_BDG_DS_CFG_9 0x448
#define VFE_0_BUS_BDG_DS_CFG_10 0x44c
#define VFE_0_BUS_BDG_DS_CFG_11 0x450
#define VFE_0_BUS_BDG_DS_CFG_12 0x454
#define VFE_0_BUS_BDG_DS_CFG_13 0x458
#define VFE_0_BUS_BDG_DS_CFG_14 0x45c
#define VFE_0_BUS_BDG_DS_CFG_15 0x460
#define VFE_0_BUS_BDG_DS_CFG_16 0x464
#define VFE_0_BUS_BDG_DS_CFG_16_CFG 0x00000110
#define VFE_0_RDI_CFG_x(x) (0x46c + (0x4 * (x)))
#define VFE_0_RDI_CFG_x_RDI_STREAM_SEL_SHIFT 28
#define VFE_0_RDI_CFG_x_RDI_STREAM_SEL_MASK (0xf << 28)
#define VFE_0_RDI_CFG_x_RDI_M0_SEL_SHIFT 4
#define VFE_0_RDI_CFG_x_RDI_M0_SEL_MASK (0xf << 4)
#define VFE_0_RDI_CFG_x_RDI_EN_BIT BIT(2)
#define VFE_0_RDI_CFG_x_MIPI_EN_BITS 0x3
#define VFE_0_CAMIF_CMD 0x478
#define VFE_0_CAMIF_CMD_DISABLE_FRAME_BOUNDARY 0
#define VFE_0_CAMIF_CMD_ENABLE_FRAME_BOUNDARY 1
#define VFE_0_CAMIF_CMD_NO_CHANGE 3
#define VFE_0_CAMIF_CMD_CLEAR_CAMIF_STATUS BIT(2)
#define VFE_0_CAMIF_CFG 0x47c
#define VFE_0_CAMIF_CFG_VFE_OUTPUT_EN BIT(6)
#define VFE_0_CAMIF_FRAME_CFG 0x484
#define VFE_0_CAMIF_WINDOW_WIDTH_CFG 0x488
#define VFE_0_CAMIF_WINDOW_HEIGHT_CFG 0x48c
#define VFE_0_CAMIF_SUBSAMPLE_CFG 0x490
#define VFE_0_CAMIF_IRQ_FRAMEDROP_PATTERN 0x498
#define VFE_0_CAMIF_IRQ_SUBSAMPLE_PATTERN 0x49c
#define VFE_0_CAMIF_STATUS 0x4a4
#define VFE_0_CAMIF_STATUS_HALT BIT(31)
#define VFE_0_REG_UPDATE 0x4ac
#define VFE_0_REG_UPDATE_RDIn(n) BIT(1 + (n))
#define VFE_0_REG_UPDATE_line_n(n) \
((n) == VFE_LINE_PIX ? 1 : VFE_0_REG_UPDATE_RDIn(n))
#define VFE_0_DEMUX_CFG 0x560
#define VFE_0_DEMUX_CFG_PERIOD 0x3
#define VFE_0_DEMUX_GAIN_0 0x564
#define VFE_0_DEMUX_GAIN_0_CH0_EVEN (0x80 << 0)
#define VFE_0_DEMUX_GAIN_0_CH0_ODD (0x80 << 16)
#define VFE_0_DEMUX_GAIN_1 0x568
#define VFE_0_DEMUX_GAIN_1_CH1 (0x80 << 0)
#define VFE_0_DEMUX_GAIN_1_CH2 (0x80 << 16)
#define VFE_0_DEMUX_EVEN_CFG 0x574
#define VFE_0_DEMUX_EVEN_CFG_PATTERN_YUYV 0x9cac
#define VFE_0_DEMUX_EVEN_CFG_PATTERN_YVYU 0xac9c
#define VFE_0_DEMUX_EVEN_CFG_PATTERN_UYVY 0xc9ca
#define VFE_0_DEMUX_EVEN_CFG_PATTERN_VYUY 0xcac9
#define VFE_0_DEMUX_ODD_CFG 0x578
#define VFE_0_DEMUX_ODD_CFG_PATTERN_YUYV 0x9cac
#define VFE_0_DEMUX_ODD_CFG_PATTERN_YVYU 0xac9c
#define VFE_0_DEMUX_ODD_CFG_PATTERN_UYVY 0xc9ca
#define VFE_0_DEMUX_ODD_CFG_PATTERN_VYUY 0xcac9
#define VFE_0_SCALE_ENC_Y_CFG 0x91c
#define VFE_0_SCALE_ENC_Y_H_IMAGE_SIZE 0x920
#define VFE_0_SCALE_ENC_Y_H_PHASE 0x924
#define VFE_0_SCALE_ENC_Y_V_IMAGE_SIZE 0x934
#define VFE_0_SCALE_ENC_Y_V_PHASE 0x938
#define VFE_0_SCALE_ENC_CBCR_CFG 0x948
#define VFE_0_SCALE_ENC_CBCR_H_IMAGE_SIZE 0x94c
#define VFE_0_SCALE_ENC_CBCR_H_PHASE 0x950
#define VFE_0_SCALE_ENC_CBCR_V_IMAGE_SIZE 0x960
#define VFE_0_SCALE_ENC_CBCR_V_PHASE 0x964
#define VFE_0_CROP_ENC_Y_WIDTH 0x974
#define VFE_0_CROP_ENC_Y_HEIGHT 0x978
#define VFE_0_CROP_ENC_CBCR_WIDTH 0x97c
#define VFE_0_CROP_ENC_CBCR_HEIGHT 0x980
#define VFE_0_CLAMP_ENC_MAX_CFG 0x984
#define VFE_0_CLAMP_ENC_MAX_CFG_CH0 (0xff << 0)
#define VFE_0_CLAMP_ENC_MAX_CFG_CH1 (0xff << 8)
#define VFE_0_CLAMP_ENC_MAX_CFG_CH2 (0xff << 16)
#define VFE_0_CLAMP_ENC_MIN_CFG 0x988
#define VFE_0_CLAMP_ENC_MIN_CFG_CH0 (0x0 << 0)
#define VFE_0_CLAMP_ENC_MIN_CFG_CH1 (0x0 << 8)
#define VFE_0_CLAMP_ENC_MIN_CFG_CH2 (0x0 << 16)
#define VFE_0_REALIGN_BUF_CFG 0xaac
#define VFE_0_REALIGN_BUF_CFG_CB_ODD_PIXEL BIT(2)
#define VFE_0_REALIGN_BUF_CFG_CR_ODD_PIXEL BIT(3)
#define VFE_0_REALIGN_BUF_CFG_HSUB_ENABLE BIT(4)
#define VFE_0_BUS_IMAGE_MASTER_CMD 0xcec
#define VFE_0_BUS_IMAGE_MASTER_n_SHIFT(x) (2 * (x))
#define CAMIF_TIMEOUT_SLEEP_US 1000
#define CAMIF_TIMEOUT_ALL_US 1000000
#define MSM_VFE_VFE0_UB_SIZE 2047
#define MSM_VFE_VFE0_UB_SIZE_RDI (MSM_VFE_VFE0_UB_SIZE / 3)
#define MSM_VFE_VFE1_UB_SIZE 1535
#define MSM_VFE_VFE1_UB_SIZE_RDI (MSM_VFE_VFE1_UB_SIZE / 3)
static u32 vfe_hw_version(struct vfe_device *vfe)
{
u32 hw_version = readl_relaxed(vfe->base + VFE_0_HW_VERSION);
dev_dbg(vfe->camss->dev, "VFE HW Version = 0x%08x\n", hw_version);
return hw_version;
}
static inline void vfe_reg_clr(struct vfe_device *vfe, u32 reg, u32 clr_bits)
{
u32 bits = readl_relaxed(vfe->base + reg);
writel_relaxed(bits & ~clr_bits, vfe->base + reg);
}
static inline void vfe_reg_set(struct vfe_device *vfe, u32 reg, u32 set_bits)
{
u32 bits = readl_relaxed(vfe->base + reg);
writel_relaxed(bits | set_bits, vfe->base + reg);
}
static void vfe_global_reset(struct vfe_device *vfe)
{
u32 reset_bits = VFE_0_GLOBAL_RESET_CMD_IDLE_CGC |
VFE_0_GLOBAL_RESET_CMD_DSP |
VFE_0_GLOBAL_RESET_CMD_TESTGEN |
VFE_0_GLOBAL_RESET_CMD_BUS_MISR |
VFE_0_GLOBAL_RESET_CMD_PM |
VFE_0_GLOBAL_RESET_CMD_REGISTER |
VFE_0_GLOBAL_RESET_CMD_BUS_BDG |
VFE_0_GLOBAL_RESET_CMD_BUS |
VFE_0_GLOBAL_RESET_CMD_CAMIF |
VFE_0_GLOBAL_RESET_CMD_CORE;
writel_relaxed(BIT(31), vfe->base + VFE_0_IRQ_MASK_0);
/* Enforce barrier between IRQ mask setup and global reset */
wmb();
writel_relaxed(reset_bits, vfe->base + VFE_0_GLOBAL_RESET_CMD);
}
static void vfe_halt_request(struct vfe_device *vfe)
{
writel_relaxed(VFE_0_BUS_BDG_CMD_HALT_REQ,
vfe->base + VFE_0_BUS_BDG_CMD);
}
static void vfe_halt_clear(struct vfe_device *vfe)
{
writel_relaxed(0x0, vfe->base + VFE_0_BUS_BDG_CMD);
}
static void vfe_wm_frame_based(struct vfe_device *vfe, u8 wm, u8 enable)
{
if (enable)
vfe_reg_set(vfe, VFE_0_BUS_IMAGE_MASTER_n_WR_ADDR_CFG(wm),
1 << VFE_0_BUS_IMAGE_MASTER_n_WR_ADDR_CFG_FRM_BASED_SHIFT);
else
vfe_reg_clr(vfe, VFE_0_BUS_IMAGE_MASTER_n_WR_ADDR_CFG(wm),
1 << VFE_0_BUS_IMAGE_MASTER_n_WR_ADDR_CFG_FRM_BASED_SHIFT);
}
#define CALC_WORD(width, M, N) (((width) * (M) + (N) - 1) / (N))
static int vfe_word_per_line_by_pixel(u32 format, u32 pixel_per_line)
{
int val = 0;
switch (format) {
case V4L2_PIX_FMT_NV12:
case V4L2_PIX_FMT_NV21:
case V4L2_PIX_FMT_NV16:
case V4L2_PIX_FMT_NV61:
val = CALC_WORD(pixel_per_line, 1, 8);
break;
case V4L2_PIX_FMT_YUYV:
case V4L2_PIX_FMT_YVYU:
case V4L2_PIX_FMT_UYVY:
case V4L2_PIX_FMT_VYUY:
val = CALC_WORD(pixel_per_line, 2, 8);
break;
}
return val;
}
static int vfe_word_per_line_by_bytes(u32 bytes_per_line)
{
return CALC_WORD(bytes_per_line, 1, 8);
}
static void vfe_get_wm_sizes(struct v4l2_pix_format_mplane *pix, u8 plane,
u16 *width, u16 *height, u16 *bytesperline)
{
*width = pix->width;
*height = pix->height;
switch (pix->pixelformat) {
case V4L2_PIX_FMT_NV12:
case V4L2_PIX_FMT_NV21:
*bytesperline = pix->plane_fmt[0].bytesperline;
if (plane == 1)
*height /= 2;
break;
case V4L2_PIX_FMT_NV16:
case V4L2_PIX_FMT_NV61:
*bytesperline = pix->plane_fmt[0].bytesperline;
break;
case V4L2_PIX_FMT_YUYV:
case V4L2_PIX_FMT_YVYU:
case V4L2_PIX_FMT_VYUY:
case V4L2_PIX_FMT_UYVY:
*bytesperline = pix->plane_fmt[plane].bytesperline;
break;
}
}
static void vfe_wm_line_based(struct vfe_device *vfe, u32 wm,
struct v4l2_pix_format_mplane *pix,
u8 plane, u32 enable)
{
u32 reg;
if (enable) {
u16 width = 0, height = 0, bytesperline = 0, wpl;
vfe_get_wm_sizes(pix, plane, &width, &height, &bytesperline);
wpl = vfe_word_per_line_by_pixel(pix->pixelformat, width);
reg = height - 1;
reg |= ((wpl + 3) / 4 - 1) << 16;
writel_relaxed(reg, vfe->base +
VFE_0_BUS_IMAGE_MASTER_n_WR_IMAGE_SIZE(wm));
wpl = vfe_word_per_line_by_bytes(bytesperline);
reg = 0x3;
reg |= (height - 1) << 2;
reg |= ((wpl + 1) / 2) << 16;
writel_relaxed(reg, vfe->base +
VFE_0_BUS_IMAGE_MASTER_n_WR_BUFFER_CFG(wm));
} else {
writel_relaxed(0, vfe->base +
VFE_0_BUS_IMAGE_MASTER_n_WR_IMAGE_SIZE(wm));
writel_relaxed(0, vfe->base +
VFE_0_BUS_IMAGE_MASTER_n_WR_BUFFER_CFG(wm));
}
}
static void vfe_wm_set_framedrop_period(struct vfe_device *vfe, u8 wm, u8 per)
{
u32 reg;
reg = readl_relaxed(vfe->base +
VFE_0_BUS_IMAGE_MASTER_n_WR_ADDR_CFG(wm));
reg &= ~(VFE_0_BUS_IMAGE_MASTER_n_WR_ADDR_CFG_FRM_DROP_PER_MASK);
reg |= (per << VFE_0_BUS_IMAGE_MASTER_n_WR_ADDR_CFG_FRM_DROP_PER_SHIFT)
& VFE_0_BUS_IMAGE_MASTER_n_WR_ADDR_CFG_FRM_DROP_PER_MASK;
writel_relaxed(reg,
vfe->base + VFE_0_BUS_IMAGE_MASTER_n_WR_ADDR_CFG(wm));
}
static void vfe_wm_set_framedrop_pattern(struct vfe_device *vfe, u8 wm,
u32 pattern)
{
writel_relaxed(pattern, vfe->base + VFE_0_BUS_IMAGE_MASTER_n_WR_FRAMEDROP_PATTERN(wm));
}
static void vfe_wm_set_ub_cfg(struct vfe_device *vfe, u8 wm,
u16 offset, u16 depth)
{
u32 reg;
reg = (offset << VFE_0_BUS_IMAGE_MASTER_n_WR_UB_CFG_OFFSET_SHIFT) |
depth;
writel_relaxed(reg, vfe->base + VFE_0_BUS_IMAGE_MASTER_n_WR_UB_CFG(wm));
}
static void vfe_bus_reload_wm(struct vfe_device *vfe, u8 wm)
{
/* Enforce barrier between any outstanding register write */
wmb();
writel_relaxed(VFE_0_BUS_CMD_Mx_RLD_CMD(wm), vfe->base + VFE_0_BUS_CMD);
/* Use barrier to make sure bus reload is issued before anything else */
wmb();
}
static void vfe_wm_set_ping_addr(struct vfe_device *vfe, u8 wm, u32 addr)
{
writel_relaxed(addr,
vfe->base + VFE_0_BUS_IMAGE_MASTER_n_WR_PING_ADDR(wm));
}
static void vfe_wm_set_pong_addr(struct vfe_device *vfe, u8 wm, u32 addr)
{
writel_relaxed(addr,
vfe->base + VFE_0_BUS_IMAGE_MASTER_n_WR_PONG_ADDR(wm));
}
static int vfe_wm_get_ping_pong_status(struct vfe_device *vfe, u8 wm)
{
u32 reg;
reg = readl_relaxed(vfe->base + VFE_0_BUS_PING_PONG_STATUS);
return (reg >> wm) & 0x1;
}
static void vfe_bus_enable_wr_if(struct vfe_device *vfe, u8 enable)
{
if (enable)
writel_relaxed(0x101, vfe->base + VFE_0_BUS_CFG);
else
writel_relaxed(0, vfe->base + VFE_0_BUS_CFG);
}
static void vfe_bus_connect_wm_to_rdi(struct vfe_device *vfe, u8 wm,
enum vfe_line_id id)
{
u32 reg;
reg = VFE_0_RDI_CFG_x_MIPI_EN_BITS;
vfe_reg_set(vfe, VFE_0_RDI_CFG_x(0), reg);
reg = VFE_0_RDI_CFG_x_RDI_EN_BIT;
reg |= ((3 * id) << VFE_0_RDI_CFG_x_RDI_STREAM_SEL_SHIFT) &
VFE_0_RDI_CFG_x_RDI_STREAM_SEL_MASK;
vfe_reg_set(vfe, VFE_0_RDI_CFG_x(id), reg);
switch (id) {
case VFE_LINE_RDI0:
default:
reg = VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_VAL_RDI0 <<
VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_SHIFT;
break;
case VFE_LINE_RDI1:
reg = VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_VAL_RDI1 <<
VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_SHIFT;
break;
case VFE_LINE_RDI2:
reg = VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_VAL_RDI2 <<
VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_SHIFT;
break;
}
if (wm % 2 == 1)
reg <<= 16;
vfe_reg_set(vfe, VFE_0_BUS_XBAR_CFG_x(wm), reg);
}
static void vfe_wm_set_subsample(struct vfe_device *vfe, u8 wm)
{
writel_relaxed(VFE_0_BUS_IMAGE_MASTER_n_WR_IRQ_SUBSAMPLE_PATTERN_DEF,
vfe->base + VFE_0_BUS_IMAGE_MASTER_n_WR_IRQ_SUBSAMPLE_PATTERN(wm));
}
static void vfe_bus_disconnect_wm_from_rdi(struct vfe_device *vfe, u8 wm,
enum vfe_line_id id)
{
u32 reg;
reg = VFE_0_RDI_CFG_x_RDI_EN_BIT;
vfe_reg_clr(vfe, VFE_0_RDI_CFG_x(id), reg);
switch (id) {
case VFE_LINE_RDI0:
default:
reg = VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_VAL_RDI0 <<
VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_SHIFT;
break;
case VFE_LINE_RDI1:
reg = VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_VAL_RDI1 <<
VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_SHIFT;
break;
case VFE_LINE_RDI2:
reg = VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_VAL_RDI2 <<
VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_SHIFT;
break;
}
if (wm % 2 == 1)
reg <<= 16;
vfe_reg_clr(vfe, VFE_0_BUS_XBAR_CFG_x(wm), reg);
}
static void vfe_set_xbar_cfg(struct vfe_device *vfe, struct vfe_output *output,
u8 enable)
{
struct vfe_line *line = container_of(output, struct vfe_line, output);
u32 p = line->video_out.active_fmt.fmt.pix_mp.pixelformat;
u32 reg;
switch (p) {
case V4L2_PIX_FMT_NV12:
case V4L2_PIX_FMT_NV21:
case V4L2_PIX_FMT_NV16:
case V4L2_PIX_FMT_NV61:
reg = VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_LUMA <<
VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_SHIFT;
if (output->wm_idx[0] % 2 == 1)
reg <<= 16;
if (enable)
vfe_reg_set(vfe,
VFE_0_BUS_XBAR_CFG_x(output->wm_idx[0]),
reg);
else
vfe_reg_clr(vfe,
VFE_0_BUS_XBAR_CFG_x(output->wm_idx[0]),
reg);
reg = VFE_0_BUS_XBAR_CFG_x_M_PAIR_STREAM_EN;
if (p == V4L2_PIX_FMT_NV12 || p == V4L2_PIX_FMT_NV16)
reg |= VFE_0_BUS_XBAR_CFG_x_M_PAIR_STREAM_SWAP_INTER_INTRA;
if (output->wm_idx[1] % 2 == 1)
reg <<= 16;
if (enable)
vfe_reg_set(vfe,
VFE_0_BUS_XBAR_CFG_x(output->wm_idx[1]),
reg);
else
vfe_reg_clr(vfe,
VFE_0_BUS_XBAR_CFG_x(output->wm_idx[1]),
reg);
break;
case V4L2_PIX_FMT_YUYV:
case V4L2_PIX_FMT_YVYU:
case V4L2_PIX_FMT_VYUY:
case V4L2_PIX_FMT_UYVY:
reg = VFE_0_BUS_XBAR_CFG_x_M_REALIGN_BUF_EN;
reg |= VFE_0_BUS_XBAR_CFG_x_M_PAIR_STREAM_EN;
if (p == V4L2_PIX_FMT_YUYV || p == V4L2_PIX_FMT_YVYU)
reg |= VFE_0_BUS_XBAR_CFG_x_M_PAIR_STREAM_SWAP_INTER_INTRA;
if (output->wm_idx[0] % 2 == 1)
reg <<= 16;
if (enable)
vfe_reg_set(vfe,
VFE_0_BUS_XBAR_CFG_x(output->wm_idx[0]),
reg);
else
vfe_reg_clr(vfe,
VFE_0_BUS_XBAR_CFG_x(output->wm_idx[0]),
reg);
break;
default:
break;
}
}
static void vfe_set_realign_cfg(struct vfe_device *vfe, struct vfe_line *line,
u8 enable)
{
u32 p = line->video_out.active_fmt.fmt.pix_mp.pixelformat;
u32 val = VFE_0_MODULE_ZOOM_EN_REALIGN_BUF;
if (p != V4L2_PIX_FMT_YUYV && p != V4L2_PIX_FMT_YVYU &&
p != V4L2_PIX_FMT_VYUY && p != V4L2_PIX_FMT_UYVY)
return;
if (enable) {
vfe_reg_set(vfe, VFE_0_MODULE_ZOOM_EN, val);
} else {
vfe_reg_clr(vfe, VFE_0_MODULE_ZOOM_EN, val);
return;
}
val = VFE_0_REALIGN_BUF_CFG_HSUB_ENABLE;
if (p == V4L2_PIX_FMT_UYVY || p == V4L2_PIX_FMT_YUYV)
val |= VFE_0_REALIGN_BUF_CFG_CR_ODD_PIXEL;
else
val |= VFE_0_REALIGN_BUF_CFG_CB_ODD_PIXEL;
writel_relaxed(val, vfe->base + VFE_0_REALIGN_BUF_CFG);
}
static void vfe_set_rdi_cid(struct vfe_device *vfe, enum vfe_line_id id, u8 cid)
{
vfe_reg_clr(vfe, VFE_0_RDI_CFG_x(id),
VFE_0_RDI_CFG_x_RDI_M0_SEL_MASK);
vfe_reg_set(vfe, VFE_0_RDI_CFG_x(id),
cid << VFE_0_RDI_CFG_x_RDI_M0_SEL_SHIFT);
}
static void vfe_reg_update(struct vfe_device *vfe, enum vfe_line_id line_id)
{
vfe->reg_update |= VFE_0_REG_UPDATE_line_n(line_id);
/* Enforce barrier between line update and commit */
wmb();
writel_relaxed(vfe->reg_update, vfe->base + VFE_0_REG_UPDATE);
/* Make sure register update is issued before further reg writes */
wmb();
}
static inline void vfe_reg_update_clear(struct vfe_device *vfe,
enum vfe_line_id line_id)
{
vfe->reg_update &= ~VFE_0_REG_UPDATE_line_n(line_id);
}
static void vfe_enable_irq_wm_line(struct vfe_device *vfe, u8 wm,
enum vfe_line_id line_id, u8 enable)
{
u32 irq_en0 = VFE_0_IRQ_MASK_0_IMAGE_MASTER_n_PING_PONG(wm) |
VFE_0_IRQ_MASK_0_line_n_REG_UPDATE(line_id);
u32 irq_en1 = VFE_0_IRQ_MASK_1_IMAGE_MASTER_n_BUS_OVERFLOW(wm) |
VFE_0_IRQ_MASK_1_RDIn_SOF(line_id);
if (enable) {
vfe_reg_set(vfe, VFE_0_IRQ_MASK_0, irq_en0);
vfe_reg_set(vfe, VFE_0_IRQ_MASK_1, irq_en1);
} else {
vfe_reg_clr(vfe, VFE_0_IRQ_MASK_0, irq_en0);
vfe_reg_clr(vfe, VFE_0_IRQ_MASK_1, irq_en1);
}
}
static void vfe_enable_irq_pix_line(struct vfe_device *vfe, u8 comp,
enum vfe_line_id line_id, u8 enable)
{
struct vfe_output *output = &vfe->line[line_id].output;
unsigned int i;
u32 irq_en0;
u32 irq_en1;
u32 comp_mask = 0;
irq_en0 = VFE_0_IRQ_MASK_0_CAMIF_SOF;
irq_en0 |= VFE_0_IRQ_MASK_0_CAMIF_EOF;
irq_en0 |= VFE_0_IRQ_MASK_0_IMAGE_COMPOSITE_DONE_n(comp);
irq_en0 |= VFE_0_IRQ_MASK_0_line_n_REG_UPDATE(line_id);
irq_en1 = VFE_0_IRQ_MASK_1_CAMIF_ERROR;
for (i = 0; i < output->wm_num; i++) {
irq_en1 |= VFE_0_IRQ_MASK_1_IMAGE_MASTER_n_BUS_OVERFLOW(output->wm_idx[i]);
comp_mask |= (1 << output->wm_idx[i]) << comp * 8;
}
if (enable) {
vfe_reg_set(vfe, VFE_0_IRQ_MASK_0, irq_en0);
vfe_reg_set(vfe, VFE_0_IRQ_MASK_1, irq_en1);
vfe_reg_set(vfe, VFE_0_IRQ_COMPOSITE_MASK_0, comp_mask);
} else {
vfe_reg_clr(vfe, VFE_0_IRQ_MASK_0, irq_en0);
vfe_reg_clr(vfe, VFE_0_IRQ_MASK_1, irq_en1);
vfe_reg_clr(vfe, VFE_0_IRQ_COMPOSITE_MASK_0, comp_mask);
}
}
static void vfe_enable_irq_common(struct vfe_device *vfe)
{
u32 irq_en0 = VFE_0_IRQ_MASK_0_RESET_ACK;
u32 irq_en1 = VFE_0_IRQ_MASK_1_VIOLATION |
VFE_0_IRQ_MASK_1_BUS_BDG_HALT_ACK;
vfe_reg_set(vfe, VFE_0_IRQ_MASK_0, irq_en0);
vfe_reg_set(vfe, VFE_0_IRQ_MASK_1, irq_en1);
}
static void vfe_set_demux_cfg(struct vfe_device *vfe, struct vfe_line *line)
{
u32 val, even_cfg, odd_cfg;
writel_relaxed(VFE_0_DEMUX_CFG_PERIOD, vfe->base + VFE_0_DEMUX_CFG);
val = VFE_0_DEMUX_GAIN_0_CH0_EVEN | VFE_0_DEMUX_GAIN_0_CH0_ODD;
writel_relaxed(val, vfe->base + VFE_0_DEMUX_GAIN_0);
val = VFE_0_DEMUX_GAIN_1_CH1 | VFE_0_DEMUX_GAIN_1_CH2;
writel_relaxed(val, vfe->base + VFE_0_DEMUX_GAIN_1);
switch (line->fmt[MSM_VFE_PAD_SINK].code) {
case MEDIA_BUS_FMT_YUYV8_2X8:
even_cfg = VFE_0_DEMUX_EVEN_CFG_PATTERN_YUYV;
odd_cfg = VFE_0_DEMUX_ODD_CFG_PATTERN_YUYV;
break;
case MEDIA_BUS_FMT_YVYU8_2X8:
even_cfg = VFE_0_DEMUX_EVEN_CFG_PATTERN_YVYU;
odd_cfg = VFE_0_DEMUX_ODD_CFG_PATTERN_YVYU;
break;
case MEDIA_BUS_FMT_UYVY8_2X8:
default:
even_cfg = VFE_0_DEMUX_EVEN_CFG_PATTERN_UYVY;
odd_cfg = VFE_0_DEMUX_ODD_CFG_PATTERN_UYVY;
break;
case MEDIA_BUS_FMT_VYUY8_2X8:
even_cfg = VFE_0_DEMUX_EVEN_CFG_PATTERN_VYUY;
odd_cfg = VFE_0_DEMUX_ODD_CFG_PATTERN_VYUY;
break;
}
writel_relaxed(even_cfg, vfe->base + VFE_0_DEMUX_EVEN_CFG);
writel_relaxed(odd_cfg, vfe->base + VFE_0_DEMUX_ODD_CFG);
}
static void vfe_set_scale_cfg(struct vfe_device *vfe, struct vfe_line *line)
{
u32 p = line->video_out.active_fmt.fmt.pix_mp.pixelformat;
u32 reg;
u16 input, output;
u8 interp_reso;
u32 phase_mult;
writel_relaxed(0x3, vfe->base + VFE_0_SCALE_ENC_Y_CFG);
input = line->fmt[MSM_VFE_PAD_SINK].width - 1;
output = line->compose.width - 1;
reg = (output << 16) | input;
writel_relaxed(reg, vfe->base + VFE_0_SCALE_ENC_Y_H_IMAGE_SIZE);
interp_reso = vfe_calc_interp_reso(input, output);
phase_mult = input * (1 << (14 + interp_reso)) / output;
reg = (interp_reso << 28) | phase_mult;
writel_relaxed(reg, vfe->base + VFE_0_SCALE_ENC_Y_H_PHASE);
input = line->fmt[MSM_VFE_PAD_SINK].height - 1;
output = line->compose.height - 1;
reg = (output << 16) | input;
writel_relaxed(reg, vfe->base + VFE_0_SCALE_ENC_Y_V_IMAGE_SIZE);
interp_reso = vfe_calc_interp_reso(input, output);
phase_mult = input * (1 << (14 + interp_reso)) / output;
reg = (interp_reso << 28) | phase_mult;
writel_relaxed(reg, vfe->base + VFE_0_SCALE_ENC_Y_V_PHASE);
writel_relaxed(0x3, vfe->base + VFE_0_SCALE_ENC_CBCR_CFG);
input = line->fmt[MSM_VFE_PAD_SINK].width - 1;
output = line->compose.width / 2 - 1;
reg = (output << 16) | input;
writel_relaxed(reg, vfe->base + VFE_0_SCALE_ENC_CBCR_H_IMAGE_SIZE);
interp_reso = vfe_calc_interp_reso(input, output);
phase_mult = input * (1 << (14 + interp_reso)) / output;
reg = (interp_reso << 28) | phase_mult;
writel_relaxed(reg, vfe->base + VFE_0_SCALE_ENC_CBCR_H_PHASE);
input = line->fmt[MSM_VFE_PAD_SINK].height - 1;
output = line->compose.height - 1;
if (p == V4L2_PIX_FMT_NV12 || p == V4L2_PIX_FMT_NV21)
output = line->compose.height / 2 - 1;
reg = (output << 16) | input;
writel_relaxed(reg, vfe->base + VFE_0_SCALE_ENC_CBCR_V_IMAGE_SIZE);
interp_reso = vfe_calc_interp_reso(input, output);
phase_mult = input * (1 << (14 + interp_reso)) / output;
reg = (interp_reso << 28) | phase_mult;
writel_relaxed(reg, vfe->base + VFE_0_SCALE_ENC_CBCR_V_PHASE);
}
static void vfe_set_crop_cfg(struct vfe_device *vfe, struct vfe_line *line)
{
u32 p = line->video_out.active_fmt.fmt.pix_mp.pixelformat;
u32 reg;
u16 first, last;
first = line->crop.left;
last = line->crop.left + line->crop.width - 1;
reg = (first << 16) | last;
writel_relaxed(reg, vfe->base + VFE_0_CROP_ENC_Y_WIDTH);
first = line->crop.top;
last = line->crop.top + line->crop.height - 1;
reg = (first << 16) | last;
writel_relaxed(reg, vfe->base + VFE_0_CROP_ENC_Y_HEIGHT);
first = line->crop.left / 2;
last = line->crop.left / 2 + line->crop.width / 2 - 1;
reg = (first << 16) | last;
writel_relaxed(reg, vfe->base + VFE_0_CROP_ENC_CBCR_WIDTH);
first = line->crop.top;
last = line->crop.top + line->crop.height - 1;
if (p == V4L2_PIX_FMT_NV12 || p == V4L2_PIX_FMT_NV21) {
first = line->crop.top / 2;
last = line->crop.top / 2 + line->crop.height / 2 - 1;
}
reg = (first << 16) | last;
writel_relaxed(reg, vfe->base + VFE_0_CROP_ENC_CBCR_HEIGHT);
}
static void vfe_set_clamp_cfg(struct vfe_device *vfe)
{
u32 val = VFE_0_CLAMP_ENC_MAX_CFG_CH0 |
VFE_0_CLAMP_ENC_MAX_CFG_CH1 |
VFE_0_CLAMP_ENC_MAX_CFG_CH2;
writel_relaxed(val, vfe->base + VFE_0_CLAMP_ENC_MAX_CFG);
val = VFE_0_CLAMP_ENC_MIN_CFG_CH0 |
VFE_0_CLAMP_ENC_MIN_CFG_CH1 |
VFE_0_CLAMP_ENC_MIN_CFG_CH2;
writel_relaxed(val, vfe->base + VFE_0_CLAMP_ENC_MIN_CFG);
}
static void vfe_set_cgc_override(struct vfe_device *vfe, u8 wm, u8 enable)
{
/* empty */
}
static void vfe_set_camif_cfg(struct vfe_device *vfe, struct vfe_line *line)
{
u32 val;
switch (line->fmt[MSM_VFE_PAD_SINK].code) {
case MEDIA_BUS_FMT_YUYV8_2X8:
val = VFE_0_CORE_CFG_PIXEL_PATTERN_YCBYCR;
break;
case MEDIA_BUS_FMT_YVYU8_2X8:
val = VFE_0_CORE_CFG_PIXEL_PATTERN_YCRYCB;
break;
case MEDIA_BUS_FMT_UYVY8_2X8:
default:
val = VFE_0_CORE_CFG_PIXEL_PATTERN_CBYCRY;
break;
case MEDIA_BUS_FMT_VYUY8_2X8:
val = VFE_0_CORE_CFG_PIXEL_PATTERN_CRYCBY;
break;
}
val |= VFE_0_CORE_CFG_COMPOSITE_REG_UPDATE_EN;
writel_relaxed(val, vfe->base + VFE_0_CORE_CFG);
val = line->fmt[MSM_VFE_PAD_SINK].width * 2 - 1;
val |= (line->fmt[MSM_VFE_PAD_SINK].height - 1) << 16;
writel_relaxed(val, vfe->base + VFE_0_CAMIF_FRAME_CFG);
val = line->fmt[MSM_VFE_PAD_SINK].width * 2 - 1;
writel_relaxed(val, vfe->base + VFE_0_CAMIF_WINDOW_WIDTH_CFG);
val = line->fmt[MSM_VFE_PAD_SINK].height - 1;
writel_relaxed(val, vfe->base + VFE_0_CAMIF_WINDOW_HEIGHT_CFG);
val = 0xffffffff;
writel_relaxed(val, vfe->base + VFE_0_CAMIF_SUBSAMPLE_CFG);
val = 0xffffffff;
writel_relaxed(val, vfe->base + VFE_0_CAMIF_IRQ_FRAMEDROP_PATTERN);
val = 0xffffffff;
writel_relaxed(val, vfe->base + VFE_0_CAMIF_IRQ_SUBSAMPLE_PATTERN);
val = VFE_0_RDI_CFG_x_MIPI_EN_BITS;
vfe_reg_set(vfe, VFE_0_RDI_CFG_x(0), val);
val = VFE_0_CAMIF_CFG_VFE_OUTPUT_EN;
writel_relaxed(val, vfe->base + VFE_0_CAMIF_CFG);
}
static void vfe_set_camif_cmd(struct vfe_device *vfe, u8 enable)
{
u32 cmd;
cmd = VFE_0_CAMIF_CMD_CLEAR_CAMIF_STATUS | VFE_0_CAMIF_CMD_NO_CHANGE;
writel_relaxed(cmd, vfe->base + VFE_0_CAMIF_CMD);
/* Make sure camif command is issued written before it is changed again */
wmb();
if (enable)
cmd = VFE_0_CAMIF_CMD_ENABLE_FRAME_BOUNDARY;
else
cmd = VFE_0_CAMIF_CMD_DISABLE_FRAME_BOUNDARY;
writel_relaxed(cmd, vfe->base + VFE_0_CAMIF_CMD);
}
static void vfe_set_module_cfg(struct vfe_device *vfe, u8 enable)
{
u32 val_lens = VFE_0_MODULE_LENS_EN_DEMUX |
VFE_0_MODULE_LENS_EN_CHROMA_UPSAMPLE;
u32 val_zoom = VFE_0_MODULE_ZOOM_EN_SCALE_ENC |
VFE_0_MODULE_ZOOM_EN_CROP_ENC;
if (enable) {
vfe_reg_set(vfe, VFE_0_MODULE_LENS_EN, val_lens);
vfe_reg_set(vfe, VFE_0_MODULE_ZOOM_EN, val_zoom);
} else {
vfe_reg_clr(vfe, VFE_0_MODULE_LENS_EN, val_lens);
vfe_reg_clr(vfe, VFE_0_MODULE_ZOOM_EN, val_zoom);
}
}
static int vfe_camif_wait_for_stop(struct vfe_device *vfe, struct device *dev)
{
u32 val;
int ret;
ret = readl_poll_timeout(vfe->base + VFE_0_CAMIF_STATUS,
val,
(val & VFE_0_CAMIF_STATUS_HALT),
CAMIF_TIMEOUT_SLEEP_US,
CAMIF_TIMEOUT_ALL_US);
if (ret < 0)
dev_err(dev, "%s: camif stop timeout\n", __func__);
return ret;
}
/*
* vfe_isr - VFE module interrupt handler
* @irq: Interrupt line
* @dev: VFE device
*
* Return IRQ_HANDLED on success
*/
static irqreturn_t vfe_isr(int irq, void *dev)
{
struct vfe_device *vfe = dev;
u32 value0, value1;
int i, j;
vfe->ops->isr_read(vfe, &value0, &value1);
dev_dbg(vfe->camss->dev, "VFE: status0 = 0x%08x, status1 = 0x%08x\n",
value0, value1);
if (value0 & VFE_0_IRQ_STATUS_0_RESET_ACK)
vfe->isr_ops.reset_ack(vfe);
if (value1 & VFE_0_IRQ_STATUS_1_VIOLATION)
vfe->ops->violation_read(vfe);
if (value1 & VFE_0_IRQ_STATUS_1_BUS_BDG_HALT_ACK)
vfe->isr_ops.halt_ack(vfe);
for (i = VFE_LINE_RDI0; i < vfe->line_num; i++)
if (value0 & VFE_0_IRQ_STATUS_0_line_n_REG_UPDATE(i))
vfe->isr_ops.reg_update(vfe, i);
if (value0 & VFE_0_IRQ_STATUS_0_CAMIF_SOF)
vfe->isr_ops.sof(vfe, VFE_LINE_PIX);
for (i = VFE_LINE_RDI0; i <= VFE_LINE_RDI2; i++)
if (value1 & VFE_0_IRQ_STATUS_1_RDIn_SOF(i))
vfe->isr_ops.sof(vfe, i);
for (i = 0; i < MSM_VFE_COMPOSITE_IRQ_NUM; i++)
if (value0 & VFE_0_IRQ_STATUS_0_IMAGE_COMPOSITE_DONE_n(i)) {
vfe->isr_ops.comp_done(vfe, i);
for (j = 0; j < ARRAY_SIZE(vfe->wm_output_map); j++)
if (vfe->wm_output_map[j] == VFE_LINE_PIX)
value0 &= ~VFE_0_IRQ_MASK_0_IMAGE_MASTER_n_PING_PONG(j);
}
for (i = 0; i < MSM_VFE_IMAGE_MASTERS_NUM; i++)
if (value0 & VFE_0_IRQ_STATUS_0_IMAGE_MASTER_n_PING_PONG(i))
vfe->isr_ops.wm_done(vfe, i);
return IRQ_HANDLED;
}
static u16 vfe_get_ub_size(u8 vfe_id)
{
/* On VFE4.8 the ub-size is the same on both instances */
return MSM_VFE_VFE0_UB_SIZE_RDI;
}
static void vfe_wm_enable(struct vfe_device *vfe, u8 wm, u8 enable)
{
if (enable)
writel_relaxed(2 << VFE_0_BUS_IMAGE_MASTER_n_SHIFT(wm),
vfe->base + VFE_0_BUS_IMAGE_MASTER_CMD);
else
writel_relaxed(1 << VFE_0_BUS_IMAGE_MASTER_n_SHIFT(wm),
vfe->base + VFE_0_BUS_IMAGE_MASTER_CMD);
/* The WM must be enabled before sending other commands */
wmb();
}
static void vfe_set_qos(struct vfe_device *vfe)
{
u32 val = VFE_0_BUS_BDG_QOS_CFG_0_CFG;
u32 val3 = VFE_0_BUS_BDG_QOS_CFG_3_CFG;
u32 val4 = VFE_0_BUS_BDG_QOS_CFG_4_CFG;
u32 val7 = VFE_0_BUS_BDG_QOS_CFG_7_CFG;
writel_relaxed(val, vfe->base + VFE_0_BUS_BDG_QOS_CFG_0);
writel_relaxed(val, vfe->base + VFE_0_BUS_BDG_QOS_CFG_1);
writel_relaxed(val, vfe->base + VFE_0_BUS_BDG_QOS_CFG_2);
writel_relaxed(val3, vfe->base + VFE_0_BUS_BDG_QOS_CFG_3);
writel_relaxed(val4, vfe->base + VFE_0_BUS_BDG_QOS_CFG_4);
writel_relaxed(val4, vfe->base + VFE_0_BUS_BDG_QOS_CFG_5);
writel_relaxed(val4, vfe->base + VFE_0_BUS_BDG_QOS_CFG_6);
writel_relaxed(val7, vfe->base + VFE_0_BUS_BDG_QOS_CFG_7);
}
static void vfe_set_ds(struct vfe_device *vfe)
{
u32 val = VFE_0_BUS_BDG_DS_CFG_0_CFG;
u32 val16 = VFE_0_BUS_BDG_DS_CFG_16_CFG;
writel_relaxed(val, vfe->base + VFE_0_BUS_BDG_DS_CFG_0);
writel_relaxed(val, vfe->base + VFE_0_BUS_BDG_DS_CFG_1);
writel_relaxed(val, vfe->base + VFE_0_BUS_BDG_DS_CFG_2);
writel_relaxed(val, vfe->base + VFE_0_BUS_BDG_DS_CFG_3);
writel_relaxed(val, vfe->base + VFE_0_BUS_BDG_DS_CFG_4);
writel_relaxed(val, vfe->base + VFE_0_BUS_BDG_DS_CFG_5);
writel_relaxed(val, vfe->base + VFE_0_BUS_BDG_DS_CFG_6);
writel_relaxed(val, vfe->base + VFE_0_BUS_BDG_DS_CFG_7);
writel_relaxed(val, vfe->base + VFE_0_BUS_BDG_DS_CFG_8);
writel_relaxed(val, vfe->base + VFE_0_BUS_BDG_DS_CFG_9);
writel_relaxed(val, vfe->base + VFE_0_BUS_BDG_DS_CFG_10);
writel_relaxed(val, vfe->base + VFE_0_BUS_BDG_DS_CFG_11);
writel_relaxed(val, vfe->base + VFE_0_BUS_BDG_DS_CFG_12);
writel_relaxed(val, vfe->base + VFE_0_BUS_BDG_DS_CFG_13);
writel_relaxed(val, vfe->base + VFE_0_BUS_BDG_DS_CFG_14);
writel_relaxed(val, vfe->base + VFE_0_BUS_BDG_DS_CFG_15);
writel_relaxed(val16, vfe->base + VFE_0_BUS_BDG_DS_CFG_16);
}
static void vfe_isr_read(struct vfe_device *vfe, u32 *value0, u32 *value1)
{
*value0 = readl_relaxed(vfe->base + VFE_0_IRQ_STATUS_0);
*value1 = readl_relaxed(vfe->base + VFE_0_IRQ_STATUS_1);
writel_relaxed(*value0, vfe->base + VFE_0_IRQ_CLEAR_0);
writel_relaxed(*value1, vfe->base + VFE_0_IRQ_CLEAR_1);
/* Enforce barrier between local & global IRQ clear */
wmb();
writel_relaxed(VFE_0_IRQ_CMD_GLOBAL_CLEAR, vfe->base + VFE_0_IRQ_CMD);
}
/*
* vfe_pm_domain_off - Disable power domains specific to this VFE.
* @vfe: VFE Device
*/
static void vfe_pm_domain_off(struct vfe_device *vfe)
{
struct camss *camss = vfe->camss;
device_link_del(camss->genpd_link[vfe->id]);
}
/*
* vfe_pm_domain_on - Enable power domains specific to this VFE.
* @vfe: VFE Device
*/
static int vfe_pm_domain_on(struct vfe_device *vfe)
{
struct camss *camss = vfe->camss;
enum vfe_line_id id = vfe->id;
camss->genpd_link[id] = device_link_add(camss->dev, camss->genpd[id], DL_FLAG_STATELESS |
DL_FLAG_PM_RUNTIME | DL_FLAG_RPM_ACTIVE);
if (!camss->genpd_link[id]) {
dev_err(vfe->camss->dev, "Failed to add VFE#%d to power domain\n", id);
return -EINVAL;
}
return 0;
}
static void vfe_violation_read(struct vfe_device *vfe)
{
u32 violation = readl_relaxed(vfe->base + VFE_0_VIOLATION_STATUS);
pr_err_ratelimited("VFE: violation = 0x%08x\n", violation);
}
static const struct vfe_hw_ops_gen1 vfe_ops_gen1_4_8 = {
.bus_connect_wm_to_rdi = vfe_bus_connect_wm_to_rdi,
.bus_disconnect_wm_from_rdi = vfe_bus_disconnect_wm_from_rdi,
.bus_enable_wr_if = vfe_bus_enable_wr_if,
.bus_reload_wm = vfe_bus_reload_wm,
.camif_wait_for_stop = vfe_camif_wait_for_stop,
.enable_irq_common = vfe_enable_irq_common,
.enable_irq_pix_line = vfe_enable_irq_pix_line,
.enable_irq_wm_line = vfe_enable_irq_wm_line,
.get_ub_size = vfe_get_ub_size,
.halt_clear = vfe_halt_clear,
.halt_request = vfe_halt_request,
.set_camif_cfg = vfe_set_camif_cfg,
.set_camif_cmd = vfe_set_camif_cmd,
.set_cgc_override = vfe_set_cgc_override,
.set_clamp_cfg = vfe_set_clamp_cfg,
.set_crop_cfg = vfe_set_crop_cfg,
.set_demux_cfg = vfe_set_demux_cfg,
.set_ds = vfe_set_ds,
.set_module_cfg = vfe_set_module_cfg,
.set_qos = vfe_set_qos,
.set_rdi_cid = vfe_set_rdi_cid,
.set_realign_cfg = vfe_set_realign_cfg,
.set_scale_cfg = vfe_set_scale_cfg,
.set_xbar_cfg = vfe_set_xbar_cfg,
.wm_enable = vfe_wm_enable,
.wm_frame_based = vfe_wm_frame_based,
.wm_get_ping_pong_status = vfe_wm_get_ping_pong_status,
.wm_line_based = vfe_wm_line_based,
.wm_set_framedrop_pattern = vfe_wm_set_framedrop_pattern,
.wm_set_framedrop_period = vfe_wm_set_framedrop_period,
.wm_set_ping_addr = vfe_wm_set_ping_addr,
.wm_set_pong_addr = vfe_wm_set_pong_addr,
.wm_set_subsample = vfe_wm_set_subsample,
.wm_set_ub_cfg = vfe_wm_set_ub_cfg,
};
static void vfe_subdev_init(struct device *dev, struct vfe_device *vfe)
{
vfe->isr_ops = vfe_isr_ops_gen1;
vfe->ops_gen1 = &vfe_ops_gen1_4_8;
vfe->video_ops = vfe_video_ops_gen1;
vfe->line_num = VFE_LINE_NUM_GEN1;
}
const struct vfe_hw_ops vfe_ops_4_8 = {
.global_reset = vfe_global_reset,
.hw_version = vfe_hw_version,
.isr_read = vfe_isr_read,
.isr = vfe_isr,
.pm_domain_off = vfe_pm_domain_off,
.pm_domain_on = vfe_pm_domain_on,
.reg_update_clear = vfe_reg_update_clear,
.reg_update = vfe_reg_update,
.subdev_init = vfe_subdev_init,
.vfe_disable = vfe_gen1_disable,
.vfe_enable = vfe_gen1_enable,
.vfe_halt = vfe_gen1_halt,
.violation_read = vfe_violation_read,
};
| linux-master | drivers/media/platform/qcom/camss/camss-vfe-4-8.c |
// SPDX-License-Identifier: GPL-2.0
/*
* camss-csid-4-1.c
*
* Qualcomm MSM Camera Subsystem - CSID (CSI Decoder) Module
*
* Copyright (C) 2020 Linaro Ltd.
*/
#include <linux/completion.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/of.h>
#include "camss-csid.h"
#include "camss-csid-gen1.h"
#include "camss.h"
#define CAMSS_CSID_HW_VERSION 0x0
#define CAMSS_CSID_CORE_CTRL_0 0x004
#define CAMSS_CSID_CORE_CTRL_1 0x008
#define CAMSS_CSID_RST_CMD 0x00c
#define CAMSS_CSID_CID_LUT_VC_n(n) (0x010 + 0x4 * (n))
#define CAMSS_CSID_CID_n_CFG(n) (0x020 + 0x4 * (n))
#define CAMSS_CSID_CID_n_CFG_ISPIF_EN BIT(0)
#define CAMSS_CSID_CID_n_CFG_RDI_EN BIT(1)
#define CAMSS_CSID_CID_n_CFG_DECODE_FORMAT_SHIFT 4
#define CAMSS_CSID_CID_n_CFG_PLAIN_FORMAT_8 (PLAIN_FORMAT_PLAIN8 << 8)
#define CAMSS_CSID_CID_n_CFG_PLAIN_FORMAT_16 (PLAIN_FORMAT_PLAIN16 << 8)
#define CAMSS_CSID_CID_n_CFG_PLAIN_ALIGNMENT_LSB (0 << 9)
#define CAMSS_CSID_CID_n_CFG_PLAIN_ALIGNMENT_MSB (1 << 9)
#define CAMSS_CSID_CID_n_CFG_RDI_MODE_RAW_DUMP (0 << 10)
#define CAMSS_CSID_CID_n_CFG_RDI_MODE_PLAIN_PACKING (1 << 10)
#define CAMSS_CSID_IRQ_CLEAR_CMD 0x060
#define CAMSS_CSID_IRQ_MASK 0x064
#define CAMSS_CSID_IRQ_STATUS 0x068
#define CAMSS_CSID_TG_CTRL 0x0a0
#define CAMSS_CSID_TG_CTRL_DISABLE 0xa06436
#define CAMSS_CSID_TG_CTRL_ENABLE 0xa06437
#define CAMSS_CSID_TG_VC_CFG 0x0a4
#define CAMSS_CSID_TG_VC_CFG_H_BLANKING 0x3ff
#define CAMSS_CSID_TG_VC_CFG_V_BLANKING 0x7f
#define CAMSS_CSID_TG_DT_n_CGG_0(n) (0x0ac + 0xc * (n))
#define CAMSS_CSID_TG_DT_n_CGG_1(n) (0x0b0 + 0xc * (n))
#define CAMSS_CSID_TG_DT_n_CGG_2(n) (0x0b4 + 0xc * (n))
static const struct csid_format csid_formats[] = {
{
MEDIA_BUS_FMT_UYVY8_2X8,
DATA_TYPE_YUV422_8BIT,
DECODE_FORMAT_UNCOMPRESSED_8_BIT,
8,
2,
},
{
MEDIA_BUS_FMT_VYUY8_2X8,
DATA_TYPE_YUV422_8BIT,
DECODE_FORMAT_UNCOMPRESSED_8_BIT,
8,
2,
},
{
MEDIA_BUS_FMT_YUYV8_2X8,
DATA_TYPE_YUV422_8BIT,
DECODE_FORMAT_UNCOMPRESSED_8_BIT,
8,
2,
},
{
MEDIA_BUS_FMT_YVYU8_2X8,
DATA_TYPE_YUV422_8BIT,
DECODE_FORMAT_UNCOMPRESSED_8_BIT,
8,
2,
},
{
MEDIA_BUS_FMT_SBGGR8_1X8,
DATA_TYPE_RAW_8BIT,
DECODE_FORMAT_UNCOMPRESSED_8_BIT,
8,
1,
},
{
MEDIA_BUS_FMT_SGBRG8_1X8,
DATA_TYPE_RAW_8BIT,
DECODE_FORMAT_UNCOMPRESSED_8_BIT,
8,
1,
},
{
MEDIA_BUS_FMT_SGRBG8_1X8,
DATA_TYPE_RAW_8BIT,
DECODE_FORMAT_UNCOMPRESSED_8_BIT,
8,
1,
},
{
MEDIA_BUS_FMT_SRGGB8_1X8,
DATA_TYPE_RAW_8BIT,
DECODE_FORMAT_UNCOMPRESSED_8_BIT,
8,
1,
},
{
MEDIA_BUS_FMT_SBGGR10_1X10,
DATA_TYPE_RAW_10BIT,
DECODE_FORMAT_UNCOMPRESSED_10_BIT,
10,
1,
},
{
MEDIA_BUS_FMT_SGBRG10_1X10,
DATA_TYPE_RAW_10BIT,
DECODE_FORMAT_UNCOMPRESSED_10_BIT,
10,
1,
},
{
MEDIA_BUS_FMT_SGRBG10_1X10,
DATA_TYPE_RAW_10BIT,
DECODE_FORMAT_UNCOMPRESSED_10_BIT,
10,
1,
},
{
MEDIA_BUS_FMT_SRGGB10_1X10,
DATA_TYPE_RAW_10BIT,
DECODE_FORMAT_UNCOMPRESSED_10_BIT,
10,
1,
},
{
MEDIA_BUS_FMT_SBGGR12_1X12,
DATA_TYPE_RAW_12BIT,
DECODE_FORMAT_UNCOMPRESSED_12_BIT,
12,
1,
},
{
MEDIA_BUS_FMT_SGBRG12_1X12,
DATA_TYPE_RAW_12BIT,
DECODE_FORMAT_UNCOMPRESSED_12_BIT,
12,
1,
},
{
MEDIA_BUS_FMT_SGRBG12_1X12,
DATA_TYPE_RAW_12BIT,
DECODE_FORMAT_UNCOMPRESSED_12_BIT,
12,
1,
},
{
MEDIA_BUS_FMT_SRGGB12_1X12,
DATA_TYPE_RAW_12BIT,
DECODE_FORMAT_UNCOMPRESSED_12_BIT,
12,
1,
},
{
MEDIA_BUS_FMT_Y10_1X10,
DATA_TYPE_RAW_10BIT,
DECODE_FORMAT_UNCOMPRESSED_10_BIT,
10,
1,
},
};
static void csid_configure_stream(struct csid_device *csid, u8 enable)
{
struct csid_testgen_config *tg = &csid->testgen;
u32 val;
if (enable) {
struct v4l2_mbus_framefmt *input_format;
const struct csid_format *format;
u8 vc = 0; /* Virtual Channel 0 */
u8 cid = vc * 4; /* id of Virtual Channel and Data Type set */
u8 dt_shift;
if (tg->enabled) {
/* Config Test Generator */
u32 num_lines, num_bytes_per_line;
input_format = &csid->fmt[MSM_CSID_PAD_SRC];
format = csid_get_fmt_entry(csid->formats, csid->nformats,
input_format->code);
num_bytes_per_line = input_format->width * format->bpp * format->spp / 8;
num_lines = input_format->height;
/* 31:24 V blank, 23:13 H blank, 3:2 num of active DT */
/* 1:0 VC */
val = ((CAMSS_CSID_TG_VC_CFG_V_BLANKING & 0xff) << 24) |
((CAMSS_CSID_TG_VC_CFG_H_BLANKING & 0x7ff) << 13);
writel_relaxed(val, csid->base + CAMSS_CSID_TG_VC_CFG);
/* 28:16 bytes per lines, 12:0 num of lines */
val = ((num_bytes_per_line & 0x1fff) << 16) |
(num_lines & 0x1fff);
writel_relaxed(val, csid->base + CAMSS_CSID_TG_DT_n_CGG_0(0));
/* 5:0 data type */
val = format->data_type;
writel_relaxed(val, csid->base + CAMSS_CSID_TG_DT_n_CGG_1(0));
/* 2:0 output test pattern */
val = tg->mode - 1;
writel_relaxed(val, csid->base + CAMSS_CSID_TG_DT_n_CGG_2(0));
} else {
struct csid_phy_config *phy = &csid->phy;
input_format = &csid->fmt[MSM_CSID_PAD_SINK];
format = csid_get_fmt_entry(csid->formats, csid->nformats,
input_format->code);
val = phy->lane_cnt - 1;
val |= phy->lane_assign << 4;
writel_relaxed(val, csid->base + CAMSS_CSID_CORE_CTRL_0);
val = phy->csiphy_id << 17;
val |= 0x9;
writel_relaxed(val, csid->base + CAMSS_CSID_CORE_CTRL_1);
}
/* Config LUT */
dt_shift = (cid % 4) * 8;
val = readl_relaxed(csid->base + CAMSS_CSID_CID_LUT_VC_n(vc));
val &= ~(0xff << dt_shift);
val |= format->data_type << dt_shift;
writel_relaxed(val, csid->base + CAMSS_CSID_CID_LUT_VC_n(vc));
val = CAMSS_CSID_CID_n_CFG_ISPIF_EN;
val |= CAMSS_CSID_CID_n_CFG_RDI_EN;
val |= format->decode_format << CAMSS_CSID_CID_n_CFG_DECODE_FORMAT_SHIFT;
val |= CAMSS_CSID_CID_n_CFG_RDI_MODE_RAW_DUMP;
writel_relaxed(val, csid->base + CAMSS_CSID_CID_n_CFG(cid));
if (tg->enabled) {
val = CAMSS_CSID_TG_CTRL_ENABLE;
writel_relaxed(val, csid->base + CAMSS_CSID_TG_CTRL);
}
} else {
if (tg->enabled) {
val = CAMSS_CSID_TG_CTRL_DISABLE;
writel_relaxed(val, csid->base + CAMSS_CSID_TG_CTRL);
}
}
}
static int csid_configure_testgen_pattern(struct csid_device *csid, s32 val)
{
if (val > 0 && val <= csid->testgen.nmodes)
csid->testgen.mode = val;
return 0;
}
static u32 csid_hw_version(struct csid_device *csid)
{
u32 hw_version = readl_relaxed(csid->base + CAMSS_CSID_HW_VERSION);
dev_dbg(csid->camss->dev, "CSID HW Version = 0x%08x\n", hw_version);
return hw_version;
}
static irqreturn_t csid_isr(int irq, void *dev)
{
struct csid_device *csid = dev;
u32 value;
value = readl_relaxed(csid->base + CAMSS_CSID_IRQ_STATUS);
writel_relaxed(value, csid->base + CAMSS_CSID_IRQ_CLEAR_CMD);
if ((value >> 11) & 0x1)
complete(&csid->reset_complete);
return IRQ_HANDLED;
}
static int csid_reset(struct csid_device *csid)
{
unsigned long time;
reinit_completion(&csid->reset_complete);
writel_relaxed(0x7fff, csid->base + CAMSS_CSID_RST_CMD);
time = wait_for_completion_timeout(&csid->reset_complete,
msecs_to_jiffies(CSID_RESET_TIMEOUT_MS));
if (!time) {
dev_err(csid->camss->dev, "CSID reset timeout\n");
return -EIO;
}
return 0;
}
static u32 csid_src_pad_code(struct csid_device *csid, u32 sink_code,
unsigned int match_format_idx, u32 match_code)
{
if (match_format_idx > 0)
return 0;
return sink_code;
}
static void csid_subdev_init(struct csid_device *csid)
{
csid->formats = csid_formats;
csid->nformats = ARRAY_SIZE(csid_formats);
csid->testgen.modes = csid_testgen_modes;
csid->testgen.nmodes = CSID_PAYLOAD_MODE_NUM_SUPPORTED_GEN1;
}
const struct csid_hw_ops csid_ops_4_1 = {
.configure_stream = csid_configure_stream,
.configure_testgen_pattern = csid_configure_testgen_pattern,
.hw_version = csid_hw_version,
.isr = csid_isr,
.reset = csid_reset,
.src_pad_code = csid_src_pad_code,
.subdev_init = csid_subdev_init,
};
| linux-master | drivers/media/platform/qcom/camss/camss-csid-4-1.c |
// SPDX-License-Identifier: GPL-2.0
/*
* camss-ispif.c
*
* Qualcomm MSM Camera Subsystem - ISPIF (ISP Interface) Module
*
* Copyright (c) 2013-2015, The Linux Foundation. All rights reserved.
* Copyright (C) 2015-2018 Linaro Ltd.
*/
#include <linux/clk.h>
#include <linux/completion.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/iopoll.h>
#include <linux/kernel.h>
#include <linux/mutex.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <media/media-entity.h>
#include <media/v4l2-device.h>
#include <media/v4l2-subdev.h>
#include "camss-ispif.h"
#include "camss.h"
#define MSM_ISPIF_NAME "msm_ispif"
#define ISPIF_RST_CMD_0 0x008
#define ISPIF_RST_CMD_1 0x00c
#define ISPIF_RST_CMD_0_STROBED_RST_EN (1 << 0)
#define ISPIF_RST_CMD_0_MISC_LOGIC_RST (1 << 1)
#define ISPIF_RST_CMD_0_SW_REG_RST (1 << 2)
#define ISPIF_RST_CMD_0_PIX_INTF_0_CSID_RST (1 << 3)
#define ISPIF_RST_CMD_0_PIX_INTF_0_VFE_RST (1 << 4)
#define ISPIF_RST_CMD_0_PIX_INTF_1_CSID_RST (1 << 5)
#define ISPIF_RST_CMD_0_PIX_INTF_1_VFE_RST (1 << 6)
#define ISPIF_RST_CMD_0_RDI_INTF_0_CSID_RST (1 << 7)
#define ISPIF_RST_CMD_0_RDI_INTF_0_VFE_RST (1 << 8)
#define ISPIF_RST_CMD_0_RDI_INTF_1_CSID_RST (1 << 9)
#define ISPIF_RST_CMD_0_RDI_INTF_1_VFE_RST (1 << 10)
#define ISPIF_RST_CMD_0_RDI_INTF_2_CSID_RST (1 << 11)
#define ISPIF_RST_CMD_0_RDI_INTF_2_VFE_RST (1 << 12)
#define ISPIF_RST_CMD_0_PIX_OUTPUT_0_MISR_RST (1 << 16)
#define ISPIF_RST_CMD_0_RDI_OUTPUT_0_MISR_RST (1 << 17)
#define ISPIF_RST_CMD_0_RDI_OUTPUT_1_MISR_RST (1 << 18)
#define ISPIF_RST_CMD_0_RDI_OUTPUT_2_MISR_RST (1 << 19)
#define ISPIF_IRQ_GLOBAL_CLEAR_CMD 0x01c
#define ISPIF_VFE_m_CTRL_0(m) (0x200 + 0x200 * (m))
#define ISPIF_VFE_m_CTRL_0_PIX0_LINE_BUF_EN (1 << 6)
#define ISPIF_VFE_m_IRQ_MASK_0(m) (0x208 + 0x200 * (m))
#define ISPIF_VFE_m_IRQ_MASK_0_PIX0_ENABLE 0x00001249
#define ISPIF_VFE_m_IRQ_MASK_0_PIX0_MASK 0x00001fff
#define ISPIF_VFE_m_IRQ_MASK_0_RDI0_ENABLE 0x02492000
#define ISPIF_VFE_m_IRQ_MASK_0_RDI0_MASK 0x03ffe000
#define ISPIF_VFE_m_IRQ_MASK_1(m) (0x20c + 0x200 * (m))
#define ISPIF_VFE_m_IRQ_MASK_1_PIX1_ENABLE 0x00001249
#define ISPIF_VFE_m_IRQ_MASK_1_PIX1_MASK 0x00001fff
#define ISPIF_VFE_m_IRQ_MASK_1_RDI1_ENABLE 0x02492000
#define ISPIF_VFE_m_IRQ_MASK_1_RDI1_MASK 0x03ffe000
#define ISPIF_VFE_m_IRQ_MASK_2(m) (0x210 + 0x200 * (m))
#define ISPIF_VFE_m_IRQ_MASK_2_RDI2_ENABLE 0x00001249
#define ISPIF_VFE_m_IRQ_MASK_2_RDI2_MASK 0x00001fff
#define ISPIF_VFE_m_IRQ_STATUS_0(m) (0x21c + 0x200 * (m))
#define ISPIF_VFE_m_IRQ_STATUS_0_PIX0_OVERFLOW (1 << 12)
#define ISPIF_VFE_m_IRQ_STATUS_0_RDI0_OVERFLOW (1 << 25)
#define ISPIF_VFE_m_IRQ_STATUS_1(m) (0x220 + 0x200 * (m))
#define ISPIF_VFE_m_IRQ_STATUS_1_PIX1_OVERFLOW (1 << 12)
#define ISPIF_VFE_m_IRQ_STATUS_1_RDI1_OVERFLOW (1 << 25)
#define ISPIF_VFE_m_IRQ_STATUS_2(m) (0x224 + 0x200 * (m))
#define ISPIF_VFE_m_IRQ_STATUS_2_RDI2_OVERFLOW (1 << 12)
#define ISPIF_VFE_m_IRQ_CLEAR_0(m) (0x230 + 0x200 * (m))
#define ISPIF_VFE_m_IRQ_CLEAR_1(m) (0x234 + 0x200 * (m))
#define ISPIF_VFE_m_IRQ_CLEAR_2(m) (0x238 + 0x200 * (m))
#define ISPIF_VFE_m_INTF_INPUT_SEL(m) (0x244 + 0x200 * (m))
#define ISPIF_VFE_m_INTF_CMD_0(m) (0x248 + 0x200 * (m))
#define ISPIF_VFE_m_INTF_CMD_1(m) (0x24c + 0x200 * (m))
#define ISPIF_VFE_m_PIX_INTF_n_CID_MASK(m, n) \
(0x254 + 0x200 * (m) + 0x4 * (n))
#define ISPIF_VFE_m_RDI_INTF_n_CID_MASK(m, n) \
(0x264 + 0x200 * (m) + 0x4 * (n))
/* PACK_CFG registers are 8x96 only */
#define ISPIF_VFE_m_RDI_INTF_n_PACK_CFG_0(m, n) \
(0x270 + 0x200 * (m) + 0x4 * (n))
#define ISPIF_VFE_m_RDI_INTF_n_PACK_CFG_1(m, n) \
(0x27c + 0x200 * (m) + 0x4 * (n))
#define ISPIF_VFE_m_RDI_INTF_n_PACK_CFG_0_CID_c_PLAIN(c) \
(1 << ((cid % 8) * 4))
#define ISPIF_VFE_m_PIX_INTF_n_STATUS(m, n) \
(0x2c0 + 0x200 * (m) + 0x4 * (n))
#define ISPIF_VFE_m_RDI_INTF_n_STATUS(m, n) \
(0x2d0 + 0x200 * (m) + 0x4 * (n))
#define CSI_PIX_CLK_MUX_SEL 0x000
#define CSI_RDI_CLK_MUX_SEL 0x008
#define ISPIF_TIMEOUT_SLEEP_US 1000
#define ISPIF_TIMEOUT_ALL_US 1000000
#define ISPIF_RESET_TIMEOUT_MS 500
enum ispif_intf_cmd {
CMD_DISABLE_FRAME_BOUNDARY = 0x0,
CMD_ENABLE_FRAME_BOUNDARY = 0x1,
CMD_DISABLE_IMMEDIATELY = 0x2,
CMD_ALL_DISABLE_IMMEDIATELY = 0xaaaaaaaa,
CMD_ALL_NO_CHANGE = 0xffffffff,
};
static const u32 ispif_formats_8x16[] = {
MEDIA_BUS_FMT_UYVY8_2X8,
MEDIA_BUS_FMT_VYUY8_2X8,
MEDIA_BUS_FMT_YUYV8_2X8,
MEDIA_BUS_FMT_YVYU8_2X8,
MEDIA_BUS_FMT_SBGGR8_1X8,
MEDIA_BUS_FMT_SGBRG8_1X8,
MEDIA_BUS_FMT_SGRBG8_1X8,
MEDIA_BUS_FMT_SRGGB8_1X8,
MEDIA_BUS_FMT_SBGGR10_1X10,
MEDIA_BUS_FMT_SGBRG10_1X10,
MEDIA_BUS_FMT_SGRBG10_1X10,
MEDIA_BUS_FMT_SRGGB10_1X10,
MEDIA_BUS_FMT_SBGGR12_1X12,
MEDIA_BUS_FMT_SGBRG12_1X12,
MEDIA_BUS_FMT_SGRBG12_1X12,
MEDIA_BUS_FMT_SRGGB12_1X12,
MEDIA_BUS_FMT_Y10_1X10,
};
static const u32 ispif_formats_8x96[] = {
MEDIA_BUS_FMT_UYVY8_2X8,
MEDIA_BUS_FMT_VYUY8_2X8,
MEDIA_BUS_FMT_YUYV8_2X8,
MEDIA_BUS_FMT_YVYU8_2X8,
MEDIA_BUS_FMT_SBGGR8_1X8,
MEDIA_BUS_FMT_SGBRG8_1X8,
MEDIA_BUS_FMT_SGRBG8_1X8,
MEDIA_BUS_FMT_SRGGB8_1X8,
MEDIA_BUS_FMT_SBGGR10_1X10,
MEDIA_BUS_FMT_SGBRG10_1X10,
MEDIA_BUS_FMT_SGRBG10_1X10,
MEDIA_BUS_FMT_SRGGB10_1X10,
MEDIA_BUS_FMT_SBGGR10_2X8_PADHI_LE,
MEDIA_BUS_FMT_SBGGR12_1X12,
MEDIA_BUS_FMT_SGBRG12_1X12,
MEDIA_BUS_FMT_SGRBG12_1X12,
MEDIA_BUS_FMT_SRGGB12_1X12,
MEDIA_BUS_FMT_SBGGR14_1X14,
MEDIA_BUS_FMT_SGBRG14_1X14,
MEDIA_BUS_FMT_SGRBG14_1X14,
MEDIA_BUS_FMT_SRGGB14_1X14,
MEDIA_BUS_FMT_Y10_1X10,
MEDIA_BUS_FMT_Y10_2X8_PADHI_LE,
};
/*
* ispif_isr_8x96 - ISPIF module interrupt handler for 8x96
* @irq: Interrupt line
* @dev: ISPIF device
*
* Return IRQ_HANDLED on success
*/
static irqreturn_t ispif_isr_8x96(int irq, void *dev)
{
struct ispif_device *ispif = dev;
struct camss *camss = ispif->camss;
u32 value0, value1, value2, value3, value4, value5;
value0 = readl_relaxed(ispif->base + ISPIF_VFE_m_IRQ_STATUS_0(0));
value1 = readl_relaxed(ispif->base + ISPIF_VFE_m_IRQ_STATUS_1(0));
value2 = readl_relaxed(ispif->base + ISPIF_VFE_m_IRQ_STATUS_2(0));
value3 = readl_relaxed(ispif->base + ISPIF_VFE_m_IRQ_STATUS_0(1));
value4 = readl_relaxed(ispif->base + ISPIF_VFE_m_IRQ_STATUS_1(1));
value5 = readl_relaxed(ispif->base + ISPIF_VFE_m_IRQ_STATUS_2(1));
writel_relaxed(value0, ispif->base + ISPIF_VFE_m_IRQ_CLEAR_0(0));
writel_relaxed(value1, ispif->base + ISPIF_VFE_m_IRQ_CLEAR_1(0));
writel_relaxed(value2, ispif->base + ISPIF_VFE_m_IRQ_CLEAR_2(0));
writel_relaxed(value3, ispif->base + ISPIF_VFE_m_IRQ_CLEAR_0(1));
writel_relaxed(value4, ispif->base + ISPIF_VFE_m_IRQ_CLEAR_1(1));
writel_relaxed(value5, ispif->base + ISPIF_VFE_m_IRQ_CLEAR_2(1));
writel(0x1, ispif->base + ISPIF_IRQ_GLOBAL_CLEAR_CMD);
if ((value0 >> 27) & 0x1)
complete(&ispif->reset_complete[0]);
if ((value3 >> 27) & 0x1)
complete(&ispif->reset_complete[1]);
if (unlikely(value0 & ISPIF_VFE_m_IRQ_STATUS_0_PIX0_OVERFLOW))
dev_err_ratelimited(camss->dev, "VFE0 pix0 overflow\n");
if (unlikely(value0 & ISPIF_VFE_m_IRQ_STATUS_0_RDI0_OVERFLOW))
dev_err_ratelimited(camss->dev, "VFE0 rdi0 overflow\n");
if (unlikely(value1 & ISPIF_VFE_m_IRQ_STATUS_1_PIX1_OVERFLOW))
dev_err_ratelimited(camss->dev, "VFE0 pix1 overflow\n");
if (unlikely(value1 & ISPIF_VFE_m_IRQ_STATUS_1_RDI1_OVERFLOW))
dev_err_ratelimited(camss->dev, "VFE0 rdi1 overflow\n");
if (unlikely(value2 & ISPIF_VFE_m_IRQ_STATUS_2_RDI2_OVERFLOW))
dev_err_ratelimited(camss->dev, "VFE0 rdi2 overflow\n");
if (unlikely(value3 & ISPIF_VFE_m_IRQ_STATUS_0_PIX0_OVERFLOW))
dev_err_ratelimited(camss->dev, "VFE1 pix0 overflow\n");
if (unlikely(value3 & ISPIF_VFE_m_IRQ_STATUS_0_RDI0_OVERFLOW))
dev_err_ratelimited(camss->dev, "VFE1 rdi0 overflow\n");
if (unlikely(value4 & ISPIF_VFE_m_IRQ_STATUS_1_PIX1_OVERFLOW))
dev_err_ratelimited(camss->dev, "VFE1 pix1 overflow\n");
if (unlikely(value4 & ISPIF_VFE_m_IRQ_STATUS_1_RDI1_OVERFLOW))
dev_err_ratelimited(camss->dev, "VFE1 rdi1 overflow\n");
if (unlikely(value5 & ISPIF_VFE_m_IRQ_STATUS_2_RDI2_OVERFLOW))
dev_err_ratelimited(camss->dev, "VFE1 rdi2 overflow\n");
return IRQ_HANDLED;
}
/*
* ispif_isr_8x16 - ISPIF module interrupt handler for 8x16
* @irq: Interrupt line
* @dev: ISPIF device
*
* Return IRQ_HANDLED on success
*/
static irqreturn_t ispif_isr_8x16(int irq, void *dev)
{
struct ispif_device *ispif = dev;
struct camss *camss = ispif->camss;
u32 value0, value1, value2;
value0 = readl_relaxed(ispif->base + ISPIF_VFE_m_IRQ_STATUS_0(0));
value1 = readl_relaxed(ispif->base + ISPIF_VFE_m_IRQ_STATUS_1(0));
value2 = readl_relaxed(ispif->base + ISPIF_VFE_m_IRQ_STATUS_2(0));
writel_relaxed(value0, ispif->base + ISPIF_VFE_m_IRQ_CLEAR_0(0));
writel_relaxed(value1, ispif->base + ISPIF_VFE_m_IRQ_CLEAR_1(0));
writel_relaxed(value2, ispif->base + ISPIF_VFE_m_IRQ_CLEAR_2(0));
writel(0x1, ispif->base + ISPIF_IRQ_GLOBAL_CLEAR_CMD);
if ((value0 >> 27) & 0x1)
complete(&ispif->reset_complete[0]);
if (unlikely(value0 & ISPIF_VFE_m_IRQ_STATUS_0_PIX0_OVERFLOW))
dev_err_ratelimited(camss->dev, "VFE0 pix0 overflow\n");
if (unlikely(value0 & ISPIF_VFE_m_IRQ_STATUS_0_RDI0_OVERFLOW))
dev_err_ratelimited(camss->dev, "VFE0 rdi0 overflow\n");
if (unlikely(value1 & ISPIF_VFE_m_IRQ_STATUS_1_PIX1_OVERFLOW))
dev_err_ratelimited(camss->dev, "VFE0 pix1 overflow\n");
if (unlikely(value1 & ISPIF_VFE_m_IRQ_STATUS_1_RDI1_OVERFLOW))
dev_err_ratelimited(camss->dev, "VFE0 rdi1 overflow\n");
if (unlikely(value2 & ISPIF_VFE_m_IRQ_STATUS_2_RDI2_OVERFLOW))
dev_err_ratelimited(camss->dev, "VFE0 rdi2 overflow\n");
return IRQ_HANDLED;
}
static int ispif_vfe_reset(struct ispif_device *ispif, u8 vfe_id)
{
struct camss *camss = ispif->camss;
unsigned long time;
u32 val;
if (vfe_id > (camss->vfe_num - 1)) {
dev_err(camss->dev,
"Error: asked reset for invalid VFE%d\n", vfe_id);
return -ENOENT;
}
reinit_completion(&ispif->reset_complete[vfe_id]);
val = ISPIF_RST_CMD_0_STROBED_RST_EN |
ISPIF_RST_CMD_0_MISC_LOGIC_RST |
ISPIF_RST_CMD_0_SW_REG_RST |
ISPIF_RST_CMD_0_PIX_INTF_0_CSID_RST |
ISPIF_RST_CMD_0_PIX_INTF_0_VFE_RST |
ISPIF_RST_CMD_0_PIX_INTF_1_CSID_RST |
ISPIF_RST_CMD_0_PIX_INTF_1_VFE_RST |
ISPIF_RST_CMD_0_RDI_INTF_0_CSID_RST |
ISPIF_RST_CMD_0_RDI_INTF_0_VFE_RST |
ISPIF_RST_CMD_0_RDI_INTF_1_CSID_RST |
ISPIF_RST_CMD_0_RDI_INTF_1_VFE_RST |
ISPIF_RST_CMD_0_RDI_INTF_2_CSID_RST |
ISPIF_RST_CMD_0_RDI_INTF_2_VFE_RST |
ISPIF_RST_CMD_0_PIX_OUTPUT_0_MISR_RST |
ISPIF_RST_CMD_0_RDI_OUTPUT_0_MISR_RST |
ISPIF_RST_CMD_0_RDI_OUTPUT_1_MISR_RST |
ISPIF_RST_CMD_0_RDI_OUTPUT_2_MISR_RST;
if (vfe_id == 1)
writel_relaxed(val, ispif->base + ISPIF_RST_CMD_1);
else
writel_relaxed(val, ispif->base + ISPIF_RST_CMD_0);
time = wait_for_completion_timeout(&ispif->reset_complete[vfe_id],
msecs_to_jiffies(ISPIF_RESET_TIMEOUT_MS));
if (!time) {
dev_err(camss->dev,
"ISPIF for VFE%d reset timeout\n", vfe_id);
return -EIO;
}
return 0;
}
/*
* ispif_reset - Trigger reset on ISPIF module and wait to complete
* @ispif: ISPIF device
*
* Return 0 on success or a negative error code otherwise
*/
static int ispif_reset(struct ispif_device *ispif, u8 vfe_id)
{
struct camss *camss = ispif->camss;
int ret;
ret = camss_pm_domain_on(camss, PM_DOMAIN_VFE0);
if (ret < 0)
return ret;
ret = camss_pm_domain_on(camss, PM_DOMAIN_VFE1);
if (ret < 0)
return ret;
ret = camss_enable_clocks(ispif->nclocks_for_reset,
ispif->clock_for_reset,
camss->dev);
if (ret < 0)
return ret;
ret = ispif_vfe_reset(ispif, vfe_id);
if (ret)
dev_dbg(camss->dev, "ISPIF Reset failed\n");
camss_disable_clocks(ispif->nclocks_for_reset, ispif->clock_for_reset);
camss_pm_domain_off(camss, PM_DOMAIN_VFE0);
camss_pm_domain_off(camss, PM_DOMAIN_VFE1);
return ret;
}
/*
* ispif_set_power - Power on/off ISPIF module
* @sd: ISPIF V4L2 subdevice
* @on: Requested power state
*
* Return 0 on success or a negative error code otherwise
*/
static int ispif_set_power(struct v4l2_subdev *sd, int on)
{
struct ispif_line *line = v4l2_get_subdevdata(sd);
struct ispif_device *ispif = line->ispif;
struct device *dev = ispif->camss->dev;
int ret = 0;
mutex_lock(&ispif->power_lock);
if (on) {
if (ispif->power_count) {
/* Power is already on */
ispif->power_count++;
goto exit;
}
ret = pm_runtime_resume_and_get(dev);
if (ret < 0)
goto exit;
ret = camss_enable_clocks(ispif->nclocks, ispif->clock, dev);
if (ret < 0) {
pm_runtime_put_sync(dev);
goto exit;
}
ret = ispif_reset(ispif, line->vfe_id);
if (ret < 0) {
pm_runtime_put_sync(dev);
camss_disable_clocks(ispif->nclocks, ispif->clock);
goto exit;
}
ispif->intf_cmd[line->vfe_id].cmd_0 = CMD_ALL_NO_CHANGE;
ispif->intf_cmd[line->vfe_id].cmd_1 = CMD_ALL_NO_CHANGE;
ispif->power_count++;
} else {
if (ispif->power_count == 0) {
dev_err(dev, "ispif power off on power_count == 0\n");
goto exit;
} else if (ispif->power_count == 1) {
camss_disable_clocks(ispif->nclocks, ispif->clock);
pm_runtime_put_sync(dev);
}
ispif->power_count--;
}
exit:
mutex_unlock(&ispif->power_lock);
return ret;
}
/*
* ispif_select_clk_mux - Select clock for PIX/RDI interface
* @ispif: ISPIF device
* @intf: VFE interface
* @csid: CSID HW module id
* @vfe: VFE HW module id
* @enable: enable or disable the selected clock
*/
static void ispif_select_clk_mux(struct ispif_device *ispif,
enum ispif_intf intf, u8 csid,
u8 vfe, u8 enable)
{
u32 val;
switch (intf) {
case PIX0:
val = readl_relaxed(ispif->base_clk_mux + CSI_PIX_CLK_MUX_SEL);
val &= ~(0xf << (vfe * 8));
if (enable)
val |= (csid << (vfe * 8));
writel_relaxed(val, ispif->base_clk_mux + CSI_PIX_CLK_MUX_SEL);
break;
case RDI0:
val = readl_relaxed(ispif->base_clk_mux + CSI_RDI_CLK_MUX_SEL);
val &= ~(0xf << (vfe * 12));
if (enable)
val |= (csid << (vfe * 12));
writel_relaxed(val, ispif->base_clk_mux + CSI_RDI_CLK_MUX_SEL);
break;
case PIX1:
val = readl_relaxed(ispif->base_clk_mux + CSI_PIX_CLK_MUX_SEL);
val &= ~(0xf << (4 + (vfe * 8)));
if (enable)
val |= (csid << (4 + (vfe * 8)));
writel_relaxed(val, ispif->base_clk_mux + CSI_PIX_CLK_MUX_SEL);
break;
case RDI1:
val = readl_relaxed(ispif->base_clk_mux + CSI_RDI_CLK_MUX_SEL);
val &= ~(0xf << (4 + (vfe * 12)));
if (enable)
val |= (csid << (4 + (vfe * 12)));
writel_relaxed(val, ispif->base_clk_mux + CSI_RDI_CLK_MUX_SEL);
break;
case RDI2:
val = readl_relaxed(ispif->base_clk_mux + CSI_RDI_CLK_MUX_SEL);
val &= ~(0xf << (8 + (vfe * 12)));
if (enable)
val |= (csid << (8 + (vfe * 12)));
writel_relaxed(val, ispif->base_clk_mux + CSI_RDI_CLK_MUX_SEL);
break;
}
mb();
}
/*
* ispif_validate_intf_status - Validate current status of PIX/RDI interface
* @ispif: ISPIF device
* @intf: VFE interface
* @vfe: VFE HW module id
*
* Return 0 when interface is idle or -EBUSY otherwise
*/
static int ispif_validate_intf_status(struct ispif_device *ispif,
enum ispif_intf intf, u8 vfe)
{
int ret = 0;
u32 val = 0;
switch (intf) {
case PIX0:
val = readl_relaxed(ispif->base +
ISPIF_VFE_m_PIX_INTF_n_STATUS(vfe, 0));
break;
case RDI0:
val = readl_relaxed(ispif->base +
ISPIF_VFE_m_RDI_INTF_n_STATUS(vfe, 0));
break;
case PIX1:
val = readl_relaxed(ispif->base +
ISPIF_VFE_m_PIX_INTF_n_STATUS(vfe, 1));
break;
case RDI1:
val = readl_relaxed(ispif->base +
ISPIF_VFE_m_RDI_INTF_n_STATUS(vfe, 1));
break;
case RDI2:
val = readl_relaxed(ispif->base +
ISPIF_VFE_m_RDI_INTF_n_STATUS(vfe, 2));
break;
}
if ((val & 0xf) != 0xf) {
dev_err(ispif->camss->dev, "%s: ispif is busy: 0x%x\n",
__func__, val);
ret = -EBUSY;
}
return ret;
}
/*
* ispif_wait_for_stop - Wait for PIX/RDI interface to stop
* @ispif: ISPIF device
* @intf: VFE interface
* @vfe: VFE HW module id
*
* Return 0 on success or a negative error code otherwise
*/
static int ispif_wait_for_stop(struct ispif_device *ispif,
enum ispif_intf intf, u8 vfe)
{
u32 addr = 0;
u32 stop_flag = 0;
int ret;
switch (intf) {
case PIX0:
addr = ISPIF_VFE_m_PIX_INTF_n_STATUS(vfe, 0);
break;
case RDI0:
addr = ISPIF_VFE_m_RDI_INTF_n_STATUS(vfe, 0);
break;
case PIX1:
addr = ISPIF_VFE_m_PIX_INTF_n_STATUS(vfe, 1);
break;
case RDI1:
addr = ISPIF_VFE_m_RDI_INTF_n_STATUS(vfe, 1);
break;
case RDI2:
addr = ISPIF_VFE_m_RDI_INTF_n_STATUS(vfe, 2);
break;
}
ret = readl_poll_timeout(ispif->base + addr,
stop_flag,
(stop_flag & 0xf) == 0xf,
ISPIF_TIMEOUT_SLEEP_US,
ISPIF_TIMEOUT_ALL_US);
if (ret < 0)
dev_err(ispif->camss->dev, "%s: ispif stop timeout\n",
__func__);
return ret;
}
/*
* ispif_select_csid - Select CSID HW module for input from
* @ispif: ISPIF device
* @intf: VFE interface
* @csid: CSID HW module id
* @vfe: VFE HW module id
* @enable: enable or disable the selected input
*/
static void ispif_select_csid(struct ispif_device *ispif, enum ispif_intf intf,
u8 csid, u8 vfe, u8 enable)
{
u32 val;
val = readl_relaxed(ispif->base + ISPIF_VFE_m_INTF_INPUT_SEL(vfe));
switch (intf) {
case PIX0:
val &= ~(BIT(1) | BIT(0));
if (enable)
val |= csid;
break;
case RDI0:
val &= ~(BIT(5) | BIT(4));
if (enable)
val |= (csid << 4);
break;
case PIX1:
val &= ~(BIT(9) | BIT(8));
if (enable)
val |= (csid << 8);
break;
case RDI1:
val &= ~(BIT(13) | BIT(12));
if (enable)
val |= (csid << 12);
break;
case RDI2:
val &= ~(BIT(21) | BIT(20));
if (enable)
val |= (csid << 20);
break;
}
writel(val, ispif->base + ISPIF_VFE_m_INTF_INPUT_SEL(vfe));
}
/*
* ispif_select_cid - Enable/disable desired CID
* @ispif: ISPIF device
* @intf: VFE interface
* @cid: desired CID to enable/disable
* @vfe: VFE HW module id
* @enable: enable or disable the desired CID
*/
static void ispif_select_cid(struct ispif_device *ispif, enum ispif_intf intf,
u8 cid, u8 vfe, u8 enable)
{
u32 cid_mask = 1 << cid;
u32 addr = 0;
u32 val;
switch (intf) {
case PIX0:
addr = ISPIF_VFE_m_PIX_INTF_n_CID_MASK(vfe, 0);
break;
case RDI0:
addr = ISPIF_VFE_m_RDI_INTF_n_CID_MASK(vfe, 0);
break;
case PIX1:
addr = ISPIF_VFE_m_PIX_INTF_n_CID_MASK(vfe, 1);
break;
case RDI1:
addr = ISPIF_VFE_m_RDI_INTF_n_CID_MASK(vfe, 1);
break;
case RDI2:
addr = ISPIF_VFE_m_RDI_INTF_n_CID_MASK(vfe, 2);
break;
}
val = readl_relaxed(ispif->base + addr);
if (enable)
val |= cid_mask;
else
val &= ~cid_mask;
writel(val, ispif->base + addr);
}
/*
* ispif_config_irq - Enable/disable interrupts for PIX/RDI interface
* @ispif: ISPIF device
* @intf: VFE interface
* @vfe: VFE HW module id
* @enable: enable or disable
*/
static void ispif_config_irq(struct ispif_device *ispif, enum ispif_intf intf,
u8 vfe, u8 enable)
{
u32 val;
switch (intf) {
case PIX0:
val = readl_relaxed(ispif->base + ISPIF_VFE_m_IRQ_MASK_0(vfe));
val &= ~ISPIF_VFE_m_IRQ_MASK_0_PIX0_MASK;
if (enable)
val |= ISPIF_VFE_m_IRQ_MASK_0_PIX0_ENABLE;
writel_relaxed(val, ispif->base + ISPIF_VFE_m_IRQ_MASK_0(vfe));
writel_relaxed(ISPIF_VFE_m_IRQ_MASK_0_PIX0_ENABLE,
ispif->base + ISPIF_VFE_m_IRQ_CLEAR_0(vfe));
break;
case RDI0:
val = readl_relaxed(ispif->base + ISPIF_VFE_m_IRQ_MASK_0(vfe));
val &= ~ISPIF_VFE_m_IRQ_MASK_0_RDI0_MASK;
if (enable)
val |= ISPIF_VFE_m_IRQ_MASK_0_RDI0_ENABLE;
writel_relaxed(val, ispif->base + ISPIF_VFE_m_IRQ_MASK_0(vfe));
writel_relaxed(ISPIF_VFE_m_IRQ_MASK_0_RDI0_ENABLE,
ispif->base + ISPIF_VFE_m_IRQ_CLEAR_0(vfe));
break;
case PIX1:
val = readl_relaxed(ispif->base + ISPIF_VFE_m_IRQ_MASK_1(vfe));
val &= ~ISPIF_VFE_m_IRQ_MASK_1_PIX1_MASK;
if (enable)
val |= ISPIF_VFE_m_IRQ_MASK_1_PIX1_ENABLE;
writel_relaxed(val, ispif->base + ISPIF_VFE_m_IRQ_MASK_1(vfe));
writel_relaxed(ISPIF_VFE_m_IRQ_MASK_1_PIX1_ENABLE,
ispif->base + ISPIF_VFE_m_IRQ_CLEAR_1(vfe));
break;
case RDI1:
val = readl_relaxed(ispif->base + ISPIF_VFE_m_IRQ_MASK_1(vfe));
val &= ~ISPIF_VFE_m_IRQ_MASK_1_RDI1_MASK;
if (enable)
val |= ISPIF_VFE_m_IRQ_MASK_1_RDI1_ENABLE;
writel_relaxed(val, ispif->base + ISPIF_VFE_m_IRQ_MASK_1(vfe));
writel_relaxed(ISPIF_VFE_m_IRQ_MASK_1_RDI1_ENABLE,
ispif->base + ISPIF_VFE_m_IRQ_CLEAR_1(vfe));
break;
case RDI2:
val = readl_relaxed(ispif->base + ISPIF_VFE_m_IRQ_MASK_2(vfe));
val &= ~ISPIF_VFE_m_IRQ_MASK_2_RDI2_MASK;
if (enable)
val |= ISPIF_VFE_m_IRQ_MASK_2_RDI2_ENABLE;
writel_relaxed(val, ispif->base + ISPIF_VFE_m_IRQ_MASK_2(vfe));
writel_relaxed(ISPIF_VFE_m_IRQ_MASK_2_RDI2_ENABLE,
ispif->base + ISPIF_VFE_m_IRQ_CLEAR_2(vfe));
break;
}
writel(0x1, ispif->base + ISPIF_IRQ_GLOBAL_CLEAR_CMD);
}
/*
* ispif_config_pack - Config packing for PRDI mode
* @ispif: ISPIF device
* @code: media bus format code
* @intf: VFE interface
* @cid: desired CID to handle
* @vfe: VFE HW module id
* @enable: enable or disable
*/
static void ispif_config_pack(struct ispif_device *ispif, u32 code,
enum ispif_intf intf, u8 cid, u8 vfe, u8 enable)
{
u32 addr, val;
if (code != MEDIA_BUS_FMT_SBGGR10_2X8_PADHI_LE &&
code != MEDIA_BUS_FMT_Y10_2X8_PADHI_LE)
return;
switch (intf) {
case RDI0:
if (cid < 8)
addr = ISPIF_VFE_m_RDI_INTF_n_PACK_CFG_0(vfe, 0);
else
addr = ISPIF_VFE_m_RDI_INTF_n_PACK_CFG_1(vfe, 0);
break;
case RDI1:
if (cid < 8)
addr = ISPIF_VFE_m_RDI_INTF_n_PACK_CFG_0(vfe, 1);
else
addr = ISPIF_VFE_m_RDI_INTF_n_PACK_CFG_1(vfe, 1);
break;
case RDI2:
if (cid < 8)
addr = ISPIF_VFE_m_RDI_INTF_n_PACK_CFG_0(vfe, 2);
else
addr = ISPIF_VFE_m_RDI_INTF_n_PACK_CFG_1(vfe, 2);
break;
default:
return;
}
if (enable)
val = ISPIF_VFE_m_RDI_INTF_n_PACK_CFG_0_CID_c_PLAIN(cid);
else
val = 0;
writel_relaxed(val, ispif->base + addr);
}
/*
* ispif_set_intf_cmd - Set command to enable/disable interface
* @ispif: ISPIF device
* @cmd: interface command
* @intf: VFE interface
* @vfe: VFE HW module id
* @vc: virtual channel
*/
static void ispif_set_intf_cmd(struct ispif_device *ispif, u8 cmd,
enum ispif_intf intf, u8 vfe, u8 vc)
{
u32 *val;
if (intf == RDI2) {
val = &ispif->intf_cmd[vfe].cmd_1;
*val &= ~(0x3 << (vc * 2 + 8));
*val |= (cmd << (vc * 2 + 8));
wmb();
writel_relaxed(*val, ispif->base + ISPIF_VFE_m_INTF_CMD_1(vfe));
wmb();
} else {
val = &ispif->intf_cmd[vfe].cmd_0;
*val &= ~(0x3 << (vc * 2 + intf * 8));
*val |= (cmd << (vc * 2 + intf * 8));
wmb();
writel_relaxed(*val, ispif->base + ISPIF_VFE_m_INTF_CMD_0(vfe));
wmb();
}
}
/*
* ispif_set_stream - Enable/disable streaming on ISPIF module
* @sd: ISPIF V4L2 subdevice
* @enable: Requested streaming state
*
* Main configuration of ISPIF module is also done here.
*
* Return 0 on success or a negative error code otherwise
*/
static int ispif_set_stream(struct v4l2_subdev *sd, int enable)
{
struct ispif_line *line = v4l2_get_subdevdata(sd);
struct ispif_device *ispif = line->ispif;
struct camss *camss = ispif->camss;
enum ispif_intf intf = line->interface;
u8 csid = line->csid_id;
u8 vfe = line->vfe_id;
u8 vc = 0; /* Virtual Channel 0 */
u8 cid = vc * 4; /* id of Virtual Channel and Data Type set */
int ret;
if (enable) {
if (!media_pad_remote_pad_first(&line->pads[MSM_ISPIF_PAD_SINK]))
return -ENOLINK;
/* Config */
mutex_lock(&ispif->config_lock);
ispif_select_clk_mux(ispif, intf, csid, vfe, 1);
ret = ispif_validate_intf_status(ispif, intf, vfe);
if (ret < 0) {
mutex_unlock(&ispif->config_lock);
return ret;
}
ispif_select_csid(ispif, intf, csid, vfe, 1);
ispif_select_cid(ispif, intf, cid, vfe, 1);
ispif_config_irq(ispif, intf, vfe, 1);
if (camss->version == CAMSS_8x96 ||
camss->version == CAMSS_660)
ispif_config_pack(ispif,
line->fmt[MSM_ISPIF_PAD_SINK].code,
intf, cid, vfe, 1);
ispif_set_intf_cmd(ispif, CMD_ENABLE_FRAME_BOUNDARY,
intf, vfe, vc);
} else {
mutex_lock(&ispif->config_lock);
ispif_set_intf_cmd(ispif, CMD_DISABLE_FRAME_BOUNDARY,
intf, vfe, vc);
mutex_unlock(&ispif->config_lock);
ret = ispif_wait_for_stop(ispif, intf, vfe);
if (ret < 0)
return ret;
mutex_lock(&ispif->config_lock);
if (camss->version == CAMSS_8x96 ||
camss->version == CAMSS_660)
ispif_config_pack(ispif,
line->fmt[MSM_ISPIF_PAD_SINK].code,
intf, cid, vfe, 0);
ispif_config_irq(ispif, intf, vfe, 0);
ispif_select_cid(ispif, intf, cid, vfe, 0);
ispif_select_csid(ispif, intf, csid, vfe, 0);
ispif_select_clk_mux(ispif, intf, csid, vfe, 0);
}
mutex_unlock(&ispif->config_lock);
return 0;
}
/*
* __ispif_get_format - Get pointer to format structure
* @ispif: ISPIF line
* @cfg: V4L2 subdev pad configuration
* @pad: pad from which format is requested
* @which: TRY or ACTIVE format
*
* Return pointer to TRY or ACTIVE format structure
*/
static struct v4l2_mbus_framefmt *
__ispif_get_format(struct ispif_line *line,
struct v4l2_subdev_state *sd_state,
unsigned int pad,
enum v4l2_subdev_format_whence which)
{
if (which == V4L2_SUBDEV_FORMAT_TRY)
return v4l2_subdev_get_try_format(&line->subdev, sd_state,
pad);
return &line->fmt[pad];
}
/*
* ispif_try_format - Handle try format by pad subdev method
* @ispif: ISPIF line
* @cfg: V4L2 subdev pad configuration
* @pad: pad on which format is requested
* @fmt: pointer to v4l2 format structure
* @which: wanted subdev format
*/
static void ispif_try_format(struct ispif_line *line,
struct v4l2_subdev_state *sd_state,
unsigned int pad,
struct v4l2_mbus_framefmt *fmt,
enum v4l2_subdev_format_whence which)
{
unsigned int i;
switch (pad) {
case MSM_ISPIF_PAD_SINK:
/* Set format on sink pad */
for (i = 0; i < line->nformats; i++)
if (fmt->code == line->formats[i])
break;
/* If not found, use UYVY as default */
if (i >= line->nformats)
fmt->code = MEDIA_BUS_FMT_UYVY8_2X8;
fmt->width = clamp_t(u32, fmt->width, 1, 8191);
fmt->height = clamp_t(u32, fmt->height, 1, 8191);
fmt->field = V4L2_FIELD_NONE;
fmt->colorspace = V4L2_COLORSPACE_SRGB;
break;
case MSM_ISPIF_PAD_SRC:
/* Set and return a format same as sink pad */
*fmt = *__ispif_get_format(line, sd_state, MSM_ISPIF_PAD_SINK,
which);
break;
}
fmt->colorspace = V4L2_COLORSPACE_SRGB;
}
/*
* ispif_enum_mbus_code - Handle pixel format enumeration
* @sd: ISPIF V4L2 subdevice
* @cfg: V4L2 subdev pad configuration
* @code: pointer to v4l2_subdev_mbus_code_enum structure
* return -EINVAL or zero on success
*/
static int ispif_enum_mbus_code(struct v4l2_subdev *sd,
struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_mbus_code_enum *code)
{
struct ispif_line *line = v4l2_get_subdevdata(sd);
struct v4l2_mbus_framefmt *format;
if (code->pad == MSM_ISPIF_PAD_SINK) {
if (code->index >= line->nformats)
return -EINVAL;
code->code = line->formats[code->index];
} else {
if (code->index > 0)
return -EINVAL;
format = __ispif_get_format(line, sd_state,
MSM_ISPIF_PAD_SINK,
code->which);
code->code = format->code;
}
return 0;
}
/*
* ispif_enum_frame_size - Handle frame size enumeration
* @sd: ISPIF V4L2 subdevice
* @cfg: V4L2 subdev pad configuration
* @fse: pointer to v4l2_subdev_frame_size_enum structure
* return -EINVAL or zero on success
*/
static int ispif_enum_frame_size(struct v4l2_subdev *sd,
struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_frame_size_enum *fse)
{
struct ispif_line *line = v4l2_get_subdevdata(sd);
struct v4l2_mbus_framefmt format;
if (fse->index != 0)
return -EINVAL;
format.code = fse->code;
format.width = 1;
format.height = 1;
ispif_try_format(line, sd_state, fse->pad, &format, fse->which);
fse->min_width = format.width;
fse->min_height = format.height;
if (format.code != fse->code)
return -EINVAL;
format.code = fse->code;
format.width = -1;
format.height = -1;
ispif_try_format(line, sd_state, fse->pad, &format, fse->which);
fse->max_width = format.width;
fse->max_height = format.height;
return 0;
}
/*
* ispif_get_format - Handle get format by pads subdev method
* @sd: ISPIF V4L2 subdevice
* @cfg: V4L2 subdev pad configuration
* @fmt: pointer to v4l2 subdev format structure
*
* Return -EINVAL or zero on success
*/
static int ispif_get_format(struct v4l2_subdev *sd,
struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
struct ispif_line *line = v4l2_get_subdevdata(sd);
struct v4l2_mbus_framefmt *format;
format = __ispif_get_format(line, sd_state, fmt->pad, fmt->which);
if (format == NULL)
return -EINVAL;
fmt->format = *format;
return 0;
}
/*
* ispif_set_format - Handle set format by pads subdev method
* @sd: ISPIF V4L2 subdevice
* @cfg: V4L2 subdev pad configuration
* @fmt: pointer to v4l2 subdev format structure
*
* Return -EINVAL or zero on success
*/
static int ispif_set_format(struct v4l2_subdev *sd,
struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
struct ispif_line *line = v4l2_get_subdevdata(sd);
struct v4l2_mbus_framefmt *format;
format = __ispif_get_format(line, sd_state, fmt->pad, fmt->which);
if (format == NULL)
return -EINVAL;
ispif_try_format(line, sd_state, fmt->pad, &fmt->format, fmt->which);
*format = fmt->format;
/* Propagate the format from sink to source */
if (fmt->pad == MSM_ISPIF_PAD_SINK) {
format = __ispif_get_format(line, sd_state, MSM_ISPIF_PAD_SRC,
fmt->which);
*format = fmt->format;
ispif_try_format(line, sd_state, MSM_ISPIF_PAD_SRC, format,
fmt->which);
}
return 0;
}
/*
* ispif_init_formats - Initialize formats on all pads
* @sd: ISPIF V4L2 subdevice
* @fh: V4L2 subdev file handle
*
* Initialize all pad formats with default values.
*
* Return 0 on success or a negative error code otherwise
*/
static int ispif_init_formats(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
{
struct v4l2_subdev_format format = {
.pad = MSM_ISPIF_PAD_SINK,
.which = fh ? V4L2_SUBDEV_FORMAT_TRY :
V4L2_SUBDEV_FORMAT_ACTIVE,
.format = {
.code = MEDIA_BUS_FMT_UYVY8_2X8,
.width = 1920,
.height = 1080
}
};
return ispif_set_format(sd, fh ? fh->state : NULL, &format);
}
/*
* msm_ispif_subdev_init - Initialize ISPIF device structure and resources
* @ispif: ISPIF device
* @res: ISPIF module resources table
*
* Return 0 on success or a negative error code otherwise
*/
int msm_ispif_subdev_init(struct camss *camss,
const struct resources_ispif *res)
{
struct device *dev = camss->dev;
struct ispif_device *ispif = camss->ispif;
struct platform_device *pdev = to_platform_device(dev);
int i;
int ret;
if (!camss->ispif)
return 0;
ispif->camss = camss;
/* Number of ISPIF lines - same as number of CSID hardware modules */
if (camss->version == CAMSS_8x16)
ispif->line_num = 2;
else if (camss->version == CAMSS_8x96 ||
camss->version == CAMSS_660)
ispif->line_num = 4;
else
return -EINVAL;
ispif->line = devm_kcalloc(dev, ispif->line_num,
sizeof(*ispif->line), GFP_KERNEL);
if (!ispif->line)
return -ENOMEM;
for (i = 0; i < ispif->line_num; i++) {
ispif->line[i].ispif = ispif;
ispif->line[i].id = i;
if (camss->version == CAMSS_8x16) {
ispif->line[i].formats = ispif_formats_8x16;
ispif->line[i].nformats =
ARRAY_SIZE(ispif_formats_8x16);
} else if (camss->version == CAMSS_8x96 ||
camss->version == CAMSS_660) {
ispif->line[i].formats = ispif_formats_8x96;
ispif->line[i].nformats =
ARRAY_SIZE(ispif_formats_8x96);
} else {
return -EINVAL;
}
}
/* Memory */
ispif->base = devm_platform_ioremap_resource_byname(pdev, res->reg[0]);
if (IS_ERR(ispif->base))
return PTR_ERR(ispif->base);
ispif->base_clk_mux = devm_platform_ioremap_resource_byname(pdev, res->reg[1]);
if (IS_ERR(ispif->base_clk_mux))
return PTR_ERR(ispif->base_clk_mux);
/* Interrupt */
ret = platform_get_irq_byname(pdev, res->interrupt);
if (ret < 0)
return ret;
ispif->irq = ret;
snprintf(ispif->irq_name, sizeof(ispif->irq_name), "%s_%s",
dev_name(dev), MSM_ISPIF_NAME);
if (camss->version == CAMSS_8x16)
ret = devm_request_irq(dev, ispif->irq, ispif_isr_8x16,
IRQF_TRIGGER_RISING, ispif->irq_name, ispif);
else if (camss->version == CAMSS_8x96 ||
camss->version == CAMSS_660)
ret = devm_request_irq(dev, ispif->irq, ispif_isr_8x96,
IRQF_TRIGGER_RISING, ispif->irq_name, ispif);
else
ret = -EINVAL;
if (ret < 0) {
dev_err(dev, "request_irq failed: %d\n", ret);
return ret;
}
/* Clocks */
ispif->nclocks = 0;
while (res->clock[ispif->nclocks])
ispif->nclocks++;
ispif->clock = devm_kcalloc(dev,
ispif->nclocks, sizeof(*ispif->clock),
GFP_KERNEL);
if (!ispif->clock)
return -ENOMEM;
for (i = 0; i < ispif->nclocks; i++) {
struct camss_clock *clock = &ispif->clock[i];
clock->clk = devm_clk_get(dev, res->clock[i]);
if (IS_ERR(clock->clk))
return PTR_ERR(clock->clk);
clock->freq = NULL;
clock->nfreqs = 0;
}
ispif->nclocks_for_reset = 0;
while (res->clock_for_reset[ispif->nclocks_for_reset])
ispif->nclocks_for_reset++;
ispif->clock_for_reset = devm_kcalloc(dev,
ispif->nclocks_for_reset,
sizeof(*ispif->clock_for_reset),
GFP_KERNEL);
if (!ispif->clock_for_reset)
return -ENOMEM;
for (i = 0; i < ispif->nclocks_for_reset; i++) {
struct camss_clock *clock = &ispif->clock_for_reset[i];
clock->clk = devm_clk_get(dev, res->clock_for_reset[i]);
if (IS_ERR(clock->clk))
return PTR_ERR(clock->clk);
clock->freq = NULL;
clock->nfreqs = 0;
}
mutex_init(&ispif->power_lock);
ispif->power_count = 0;
mutex_init(&ispif->config_lock);
for (i = 0; i < MSM_ISPIF_VFE_NUM; i++)
init_completion(&ispif->reset_complete[i]);
return 0;
}
/*
* ispif_get_intf - Get ISPIF interface to use by VFE line id
* @line_id: VFE line id that the ISPIF line is connected to
*
* Return ISPIF interface to use
*/
static enum ispif_intf ispif_get_intf(enum vfe_line_id line_id)
{
switch (line_id) {
case (VFE_LINE_RDI0):
return RDI0;
case (VFE_LINE_RDI1):
return RDI1;
case (VFE_LINE_RDI2):
return RDI2;
case (VFE_LINE_PIX):
return PIX0;
default:
return RDI0;
}
}
/*
* ispif_get_vfe_id - Get VFE HW module id
* @entity: Pointer to VFE media entity structure
* @id: Return CSID HW module id here
*/
static void ispif_get_vfe_id(struct media_entity *entity, u8 *id)
{
struct v4l2_subdev *sd;
struct vfe_line *line;
struct vfe_device *vfe;
sd = media_entity_to_v4l2_subdev(entity);
line = v4l2_get_subdevdata(sd);
vfe = to_vfe(line);
*id = vfe->id;
}
/*
* ispif_get_vfe_line_id - Get VFE line id by media entity
* @entity: Pointer to VFE media entity structure
* @id: Return VFE line id here
*/
static void ispif_get_vfe_line_id(struct media_entity *entity,
enum vfe_line_id *id)
{
struct v4l2_subdev *sd;
struct vfe_line *line;
sd = media_entity_to_v4l2_subdev(entity);
line = v4l2_get_subdevdata(sd);
*id = line->id;
}
/*
* ispif_link_setup - Setup ISPIF connections
* @entity: Pointer to media entity structure
* @local: Pointer to local pad
* @remote: Pointer to remote pad
* @flags: Link flags
*
* Return 0 on success
*/
static int ispif_link_setup(struct media_entity *entity,
const struct media_pad *local,
const struct media_pad *remote, u32 flags)
{
if (flags & MEDIA_LNK_FL_ENABLED) {
if (media_pad_remote_pad_first(local))
return -EBUSY;
if (local->flags & MEDIA_PAD_FL_SINK) {
struct v4l2_subdev *sd;
struct ispif_line *line;
sd = media_entity_to_v4l2_subdev(entity);
line = v4l2_get_subdevdata(sd);
msm_csid_get_csid_id(remote->entity, &line->csid_id);
} else { /* MEDIA_PAD_FL_SOURCE */
struct v4l2_subdev *sd;
struct ispif_line *line;
enum vfe_line_id id;
sd = media_entity_to_v4l2_subdev(entity);
line = v4l2_get_subdevdata(sd);
ispif_get_vfe_id(remote->entity, &line->vfe_id);
ispif_get_vfe_line_id(remote->entity, &id);
line->interface = ispif_get_intf(id);
}
}
return 0;
}
static const struct v4l2_subdev_core_ops ispif_core_ops = {
.s_power = ispif_set_power,
};
static const struct v4l2_subdev_video_ops ispif_video_ops = {
.s_stream = ispif_set_stream,
};
static const struct v4l2_subdev_pad_ops ispif_pad_ops = {
.enum_mbus_code = ispif_enum_mbus_code,
.enum_frame_size = ispif_enum_frame_size,
.get_fmt = ispif_get_format,
.set_fmt = ispif_set_format,
};
static const struct v4l2_subdev_ops ispif_v4l2_ops = {
.core = &ispif_core_ops,
.video = &ispif_video_ops,
.pad = &ispif_pad_ops,
};
static const struct v4l2_subdev_internal_ops ispif_v4l2_internal_ops = {
.open = ispif_init_formats,
};
static const struct media_entity_operations ispif_media_ops = {
.link_setup = ispif_link_setup,
.link_validate = v4l2_subdev_link_validate,
};
/*
* msm_ispif_register_entities - Register subdev node for ISPIF module
* @ispif: ISPIF device
* @v4l2_dev: V4L2 device
*
* Return 0 on success or a negative error code otherwise
*/
int msm_ispif_register_entities(struct ispif_device *ispif,
struct v4l2_device *v4l2_dev)
{
struct camss *camss;
int ret;
int i;
if (!ispif)
return 0;
camss = ispif->camss;
for (i = 0; i < ispif->line_num; i++) {
struct v4l2_subdev *sd = &ispif->line[i].subdev;
struct media_pad *pads = ispif->line[i].pads;
v4l2_subdev_init(sd, &ispif_v4l2_ops);
sd->internal_ops = &ispif_v4l2_internal_ops;
sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
snprintf(sd->name, ARRAY_SIZE(sd->name), "%s%d",
MSM_ISPIF_NAME, i);
v4l2_set_subdevdata(sd, &ispif->line[i]);
ret = ispif_init_formats(sd, NULL);
if (ret < 0) {
dev_err(camss->dev, "Failed to init format: %d\n", ret);
goto error;
}
pads[MSM_ISPIF_PAD_SINK].flags = MEDIA_PAD_FL_SINK;
pads[MSM_ISPIF_PAD_SRC].flags = MEDIA_PAD_FL_SOURCE;
sd->entity.function = MEDIA_ENT_F_PROC_VIDEO_PIXEL_FORMATTER;
sd->entity.ops = &ispif_media_ops;
ret = media_entity_pads_init(&sd->entity, MSM_ISPIF_PADS_NUM,
pads);
if (ret < 0) {
dev_err(camss->dev, "Failed to init media entity: %d\n",
ret);
goto error;
}
ret = v4l2_device_register_subdev(v4l2_dev, sd);
if (ret < 0) {
dev_err(camss->dev, "Failed to register subdev: %d\n",
ret);
media_entity_cleanup(&sd->entity);
goto error;
}
}
return 0;
error:
for (i--; i >= 0; i--) {
struct v4l2_subdev *sd = &ispif->line[i].subdev;
v4l2_device_unregister_subdev(sd);
media_entity_cleanup(&sd->entity);
}
return ret;
}
/*
* msm_ispif_unregister_entities - Unregister ISPIF module subdev node
* @ispif: ISPIF device
*/
void msm_ispif_unregister_entities(struct ispif_device *ispif)
{
int i;
if (!ispif)
return;
mutex_destroy(&ispif->power_lock);
mutex_destroy(&ispif->config_lock);
for (i = 0; i < ispif->line_num; i++) {
struct v4l2_subdev *sd = &ispif->line[i].subdev;
v4l2_device_unregister_subdev(sd);
media_entity_cleanup(&sd->entity);
}
}
| linux-master | drivers/media/platform/qcom/camss/camss-ispif.c |
// SPDX-License-Identifier: GPL-2.0
/*
* camss-video.c
*
* Qualcomm MSM Camera Subsystem - V4L2 device node
*
* Copyright (c) 2013-2015, The Linux Foundation. All rights reserved.
* Copyright (C) 2015-2018 Linaro Ltd.
*/
#include <linux/slab.h>
#include <media/media-entity.h>
#include <media/v4l2-dev.h>
#include <media/v4l2-device.h>
#include <media/v4l2-ioctl.h>
#include <media/v4l2-mc.h>
#include <media/videobuf2-dma-sg.h>
#include "camss-video.h"
#include "camss.h"
#define CAMSS_FRAME_MIN_WIDTH 1
#define CAMSS_FRAME_MAX_WIDTH 8191
#define CAMSS_FRAME_MIN_HEIGHT 1
#define CAMSS_FRAME_MAX_HEIGHT_RDI 8191
#define CAMSS_FRAME_MAX_HEIGHT_PIX 4096
struct fract {
u8 numerator;
u8 denominator;
};
/*
* struct camss_format_info - ISP media bus format information
* @code: V4L2 media bus format code
* @pixelformat: V4L2 pixel format FCC identifier
* @planes: Number of planes
* @hsub: Horizontal subsampling (for each plane)
* @vsub: Vertical subsampling (for each plane)
* @bpp: Bits per pixel when stored in memory (for each plane)
*/
struct camss_format_info {
u32 code;
u32 pixelformat;
u8 planes;
struct fract hsub[3];
struct fract vsub[3];
unsigned int bpp[3];
};
static const struct camss_format_info formats_rdi_8x16[] = {
{ MEDIA_BUS_FMT_UYVY8_2X8, V4L2_PIX_FMT_UYVY, 1,
{ { 1, 1 } }, { { 1, 1 } }, { 16 } },
{ MEDIA_BUS_FMT_VYUY8_2X8, V4L2_PIX_FMT_VYUY, 1,
{ { 1, 1 } }, { { 1, 1 } }, { 16 } },
{ MEDIA_BUS_FMT_YUYV8_2X8, V4L2_PIX_FMT_YUYV, 1,
{ { 1, 1 } }, { { 1, 1 } }, { 16 } },
{ MEDIA_BUS_FMT_YVYU8_2X8, V4L2_PIX_FMT_YVYU, 1,
{ { 1, 1 } }, { { 1, 1 } }, { 16 } },
{ MEDIA_BUS_FMT_SBGGR8_1X8, V4L2_PIX_FMT_SBGGR8, 1,
{ { 1, 1 } }, { { 1, 1 } }, { 8 } },
{ MEDIA_BUS_FMT_SGBRG8_1X8, V4L2_PIX_FMT_SGBRG8, 1,
{ { 1, 1 } }, { { 1, 1 } }, { 8 } },
{ MEDIA_BUS_FMT_SGRBG8_1X8, V4L2_PIX_FMT_SGRBG8, 1,
{ { 1, 1 } }, { { 1, 1 } }, { 8 } },
{ MEDIA_BUS_FMT_SRGGB8_1X8, V4L2_PIX_FMT_SRGGB8, 1,
{ { 1, 1 } }, { { 1, 1 } }, { 8 } },
{ MEDIA_BUS_FMT_SBGGR10_1X10, V4L2_PIX_FMT_SBGGR10P, 1,
{ { 1, 1 } }, { { 1, 1 } }, { 10 } },
{ MEDIA_BUS_FMT_SGBRG10_1X10, V4L2_PIX_FMT_SGBRG10P, 1,
{ { 1, 1 } }, { { 1, 1 } }, { 10 } },
{ MEDIA_BUS_FMT_SGRBG10_1X10, V4L2_PIX_FMT_SGRBG10P, 1,
{ { 1, 1 } }, { { 1, 1 } }, { 10 } },
{ MEDIA_BUS_FMT_SRGGB10_1X10, V4L2_PIX_FMT_SRGGB10P, 1,
{ { 1, 1 } }, { { 1, 1 } }, { 10 } },
{ MEDIA_BUS_FMT_SBGGR12_1X12, V4L2_PIX_FMT_SBGGR12P, 1,
{ { 1, 1 } }, { { 1, 1 } }, { 12 } },
{ MEDIA_BUS_FMT_SGBRG12_1X12, V4L2_PIX_FMT_SGBRG12P, 1,
{ { 1, 1 } }, { { 1, 1 } }, { 12 } },
{ MEDIA_BUS_FMT_SGRBG12_1X12, V4L2_PIX_FMT_SGRBG12P, 1,
{ { 1, 1 } }, { { 1, 1 } }, { 12 } },
{ MEDIA_BUS_FMT_SRGGB12_1X12, V4L2_PIX_FMT_SRGGB12P, 1,
{ { 1, 1 } }, { { 1, 1 } }, { 12 } },
{ MEDIA_BUS_FMT_Y10_1X10, V4L2_PIX_FMT_Y10P, 1,
{ { 1, 1 } }, { { 1, 1 } }, { 10 } },
};
static const struct camss_format_info formats_rdi_8x96[] = {
{ MEDIA_BUS_FMT_UYVY8_2X8, V4L2_PIX_FMT_UYVY, 1,
{ { 1, 1 } }, { { 1, 1 } }, { 16 } },
{ MEDIA_BUS_FMT_VYUY8_2X8, V4L2_PIX_FMT_VYUY, 1,
{ { 1, 1 } }, { { 1, 1 } }, { 16 } },
{ MEDIA_BUS_FMT_YUYV8_2X8, V4L2_PIX_FMT_YUYV, 1,
{ { 1, 1 } }, { { 1, 1 } }, { 16 } },
{ MEDIA_BUS_FMT_YVYU8_2X8, V4L2_PIX_FMT_YVYU, 1,
{ { 1, 1 } }, { { 1, 1 } }, { 16 } },
{ MEDIA_BUS_FMT_SBGGR8_1X8, V4L2_PIX_FMT_SBGGR8, 1,
{ { 1, 1 } }, { { 1, 1 } }, { 8 } },
{ MEDIA_BUS_FMT_SGBRG8_1X8, V4L2_PIX_FMT_SGBRG8, 1,
{ { 1, 1 } }, { { 1, 1 } }, { 8 } },
{ MEDIA_BUS_FMT_SGRBG8_1X8, V4L2_PIX_FMT_SGRBG8, 1,
{ { 1, 1 } }, { { 1, 1 } }, { 8 } },
{ MEDIA_BUS_FMT_SRGGB8_1X8, V4L2_PIX_FMT_SRGGB8, 1,
{ { 1, 1 } }, { { 1, 1 } }, { 8 } },
{ MEDIA_BUS_FMT_SBGGR10_1X10, V4L2_PIX_FMT_SBGGR10P, 1,
{ { 1, 1 } }, { { 1, 1 } }, { 10 } },
{ MEDIA_BUS_FMT_SGBRG10_1X10, V4L2_PIX_FMT_SGBRG10P, 1,
{ { 1, 1 } }, { { 1, 1 } }, { 10 } },
{ MEDIA_BUS_FMT_SGRBG10_1X10, V4L2_PIX_FMT_SGRBG10P, 1,
{ { 1, 1 } }, { { 1, 1 } }, { 10 } },
{ MEDIA_BUS_FMT_SRGGB10_1X10, V4L2_PIX_FMT_SRGGB10P, 1,
{ { 1, 1 } }, { { 1, 1 } }, { 10 } },
{ MEDIA_BUS_FMT_SBGGR10_2X8_PADHI_LE, V4L2_PIX_FMT_SBGGR10, 1,
{ { 1, 1 } }, { { 1, 1 } }, { 16 } },
{ MEDIA_BUS_FMT_SBGGR12_1X12, V4L2_PIX_FMT_SBGGR12P, 1,
{ { 1, 1 } }, { { 1, 1 } }, { 12 } },
{ MEDIA_BUS_FMT_SGBRG12_1X12, V4L2_PIX_FMT_SGBRG12P, 1,
{ { 1, 1 } }, { { 1, 1 } }, { 12 } },
{ MEDIA_BUS_FMT_SGRBG12_1X12, V4L2_PIX_FMT_SGRBG12P, 1,
{ { 1, 1 } }, { { 1, 1 } }, { 12 } },
{ MEDIA_BUS_FMT_SRGGB12_1X12, V4L2_PIX_FMT_SRGGB12P, 1,
{ { 1, 1 } }, { { 1, 1 } }, { 12 } },
{ MEDIA_BUS_FMT_SBGGR14_1X14, V4L2_PIX_FMT_SBGGR14P, 1,
{ { 1, 1 } }, { { 1, 1 } }, { 14 } },
{ MEDIA_BUS_FMT_SGBRG14_1X14, V4L2_PIX_FMT_SGBRG14P, 1,
{ { 1, 1 } }, { { 1, 1 } }, { 14 } },
{ MEDIA_BUS_FMT_SGRBG14_1X14, V4L2_PIX_FMT_SGRBG14P, 1,
{ { 1, 1 } }, { { 1, 1 } }, { 14 } },
{ MEDIA_BUS_FMT_SRGGB14_1X14, V4L2_PIX_FMT_SRGGB14P, 1,
{ { 1, 1 } }, { { 1, 1 } }, { 14 } },
{ MEDIA_BUS_FMT_Y10_1X10, V4L2_PIX_FMT_Y10P, 1,
{ { 1, 1 } }, { { 1, 1 } }, { 10 } },
{ MEDIA_BUS_FMT_Y10_2X8_PADHI_LE, V4L2_PIX_FMT_Y10, 1,
{ { 1, 1 } }, { { 1, 1 } }, { 16 } },
};
static const struct camss_format_info formats_rdi_845[] = {
{ MEDIA_BUS_FMT_UYVY8_2X8, V4L2_PIX_FMT_UYVY, 1,
{ { 1, 1 } }, { { 1, 1 } }, { 16 } },
{ MEDIA_BUS_FMT_VYUY8_2X8, V4L2_PIX_FMT_VYUY, 1,
{ { 1, 1 } }, { { 1, 1 } }, { 16 } },
{ MEDIA_BUS_FMT_YUYV8_2X8, V4L2_PIX_FMT_YUYV, 1,
{ { 1, 1 } }, { { 1, 1 } }, { 16 } },
{ MEDIA_BUS_FMT_YVYU8_2X8, V4L2_PIX_FMT_YVYU, 1,
{ { 1, 1 } }, { { 1, 1 } }, { 16 } },
{ MEDIA_BUS_FMT_SBGGR8_1X8, V4L2_PIX_FMT_SBGGR8, 1,
{ { 1, 1 } }, { { 1, 1 } }, { 8 } },
{ MEDIA_BUS_FMT_SGBRG8_1X8, V4L2_PIX_FMT_SGBRG8, 1,
{ { 1, 1 } }, { { 1, 1 } }, { 8 } },
{ MEDIA_BUS_FMT_SGRBG8_1X8, V4L2_PIX_FMT_SGRBG8, 1,
{ { 1, 1 } }, { { 1, 1 } }, { 8 } },
{ MEDIA_BUS_FMT_SRGGB8_1X8, V4L2_PIX_FMT_SRGGB8, 1,
{ { 1, 1 } }, { { 1, 1 } }, { 8 } },
{ MEDIA_BUS_FMT_SBGGR10_1X10, V4L2_PIX_FMT_SBGGR10P, 1,
{ { 1, 1 } }, { { 1, 1 } }, { 10 } },
{ MEDIA_BUS_FMT_SGBRG10_1X10, V4L2_PIX_FMT_SGBRG10P, 1,
{ { 1, 1 } }, { { 1, 1 } }, { 10 } },
{ MEDIA_BUS_FMT_SGRBG10_1X10, V4L2_PIX_FMT_SGRBG10P, 1,
{ { 1, 1 } }, { { 1, 1 } }, { 10 } },
{ MEDIA_BUS_FMT_SRGGB10_1X10, V4L2_PIX_FMT_SRGGB10P, 1,
{ { 1, 1 } }, { { 1, 1 } }, { 10 } },
{ MEDIA_BUS_FMT_SBGGR10_2X8_PADHI_LE, V4L2_PIX_FMT_SBGGR10, 1,
{ { 1, 1 } }, { { 1, 1 } }, { 16 } },
{ MEDIA_BUS_FMT_SBGGR12_1X12, V4L2_PIX_FMT_SBGGR12P, 1,
{ { 1, 1 } }, { { 1, 1 } }, { 12 } },
{ MEDIA_BUS_FMT_SGBRG12_1X12, V4L2_PIX_FMT_SGBRG12P, 1,
{ { 1, 1 } }, { { 1, 1 } }, { 12 } },
{ MEDIA_BUS_FMT_SGRBG12_1X12, V4L2_PIX_FMT_SGRBG12P, 1,
{ { 1, 1 } }, { { 1, 1 } }, { 12 } },
{ MEDIA_BUS_FMT_SRGGB12_1X12, V4L2_PIX_FMT_SRGGB12P, 1,
{ { 1, 1 } }, { { 1, 1 } }, { 12 } },
{ MEDIA_BUS_FMT_SBGGR14_1X14, V4L2_PIX_FMT_SBGGR14P, 1,
{ { 1, 1 } }, { { 1, 1 } }, { 14 } },
{ MEDIA_BUS_FMT_SGBRG14_1X14, V4L2_PIX_FMT_SGBRG14P, 1,
{ { 1, 1 } }, { { 1, 1 } }, { 14 } },
{ MEDIA_BUS_FMT_SGRBG14_1X14, V4L2_PIX_FMT_SGRBG14P, 1,
{ { 1, 1 } }, { { 1, 1 } }, { 14 } },
{ MEDIA_BUS_FMT_SRGGB14_1X14, V4L2_PIX_FMT_SRGGB14P, 1,
{ { 1, 1 } }, { { 1, 1 } }, { 14 } },
{ MEDIA_BUS_FMT_Y8_1X8, V4L2_PIX_FMT_GREY, 1,
{ { 1, 1 } }, { { 1, 1 } }, { 8 } },
{ MEDIA_BUS_FMT_Y10_1X10, V4L2_PIX_FMT_Y10P, 1,
{ { 1, 1 } }, { { 1, 1 } }, { 10 } },
{ MEDIA_BUS_FMT_Y10_2X8_PADHI_LE, V4L2_PIX_FMT_Y10, 1,
{ { 1, 1 } }, { { 1, 1 } }, { 16 } },
};
static const struct camss_format_info formats_pix_8x16[] = {
{ MEDIA_BUS_FMT_YUYV8_1_5X8, V4L2_PIX_FMT_NV12, 1,
{ { 1, 1 } }, { { 2, 3 } }, { 8 } },
{ MEDIA_BUS_FMT_YVYU8_1_5X8, V4L2_PIX_FMT_NV12, 1,
{ { 1, 1 } }, { { 2, 3 } }, { 8 } },
{ MEDIA_BUS_FMT_UYVY8_1_5X8, V4L2_PIX_FMT_NV12, 1,
{ { 1, 1 } }, { { 2, 3 } }, { 8 } },
{ MEDIA_BUS_FMT_VYUY8_1_5X8, V4L2_PIX_FMT_NV12, 1,
{ { 1, 1 } }, { { 2, 3 } }, { 8 } },
{ MEDIA_BUS_FMT_YUYV8_1_5X8, V4L2_PIX_FMT_NV21, 1,
{ { 1, 1 } }, { { 2, 3 } }, { 8 } },
{ MEDIA_BUS_FMT_YVYU8_1_5X8, V4L2_PIX_FMT_NV21, 1,
{ { 1, 1 } }, { { 2, 3 } }, { 8 } },
{ MEDIA_BUS_FMT_UYVY8_1_5X8, V4L2_PIX_FMT_NV21, 1,
{ { 1, 1 } }, { { 2, 3 } }, { 8 } },
{ MEDIA_BUS_FMT_VYUY8_1_5X8, V4L2_PIX_FMT_NV21, 1,
{ { 1, 1 } }, { { 2, 3 } }, { 8 } },
{ MEDIA_BUS_FMT_YUYV8_2X8, V4L2_PIX_FMT_NV16, 1,
{ { 1, 1 } }, { { 1, 2 } }, { 8 } },
{ MEDIA_BUS_FMT_YVYU8_2X8, V4L2_PIX_FMT_NV16, 1,
{ { 1, 1 } }, { { 1, 2 } }, { 8 } },
{ MEDIA_BUS_FMT_UYVY8_2X8, V4L2_PIX_FMT_NV16, 1,
{ { 1, 1 } }, { { 1, 2 } }, { 8 } },
{ MEDIA_BUS_FMT_VYUY8_2X8, V4L2_PIX_FMT_NV16, 1,
{ { 1, 1 } }, { { 1, 2 } }, { 8 } },
{ MEDIA_BUS_FMT_YUYV8_2X8, V4L2_PIX_FMT_NV61, 1,
{ { 1, 1 } }, { { 1, 2 } }, { 8 } },
{ MEDIA_BUS_FMT_YVYU8_2X8, V4L2_PIX_FMT_NV61, 1,
{ { 1, 1 } }, { { 1, 2 } }, { 8 } },
{ MEDIA_BUS_FMT_UYVY8_2X8, V4L2_PIX_FMT_NV61, 1,
{ { 1, 1 } }, { { 1, 2 } }, { 8 } },
{ MEDIA_BUS_FMT_VYUY8_2X8, V4L2_PIX_FMT_NV61, 1,
{ { 1, 1 } }, { { 1, 2 } }, { 8 } },
};
static const struct camss_format_info formats_pix_8x96[] = {
{ MEDIA_BUS_FMT_YUYV8_1_5X8, V4L2_PIX_FMT_NV12, 1,
{ { 1, 1 } }, { { 2, 3 } }, { 8 } },
{ MEDIA_BUS_FMT_YVYU8_1_5X8, V4L2_PIX_FMT_NV12, 1,
{ { 1, 1 } }, { { 2, 3 } }, { 8 } },
{ MEDIA_BUS_FMT_UYVY8_1_5X8, V4L2_PIX_FMT_NV12, 1,
{ { 1, 1 } }, { { 2, 3 } }, { 8 } },
{ MEDIA_BUS_FMT_VYUY8_1_5X8, V4L2_PIX_FMT_NV12, 1,
{ { 1, 1 } }, { { 2, 3 } }, { 8 } },
{ MEDIA_BUS_FMT_YUYV8_1_5X8, V4L2_PIX_FMT_NV21, 1,
{ { 1, 1 } }, { { 2, 3 } }, { 8 } },
{ MEDIA_BUS_FMT_YVYU8_1_5X8, V4L2_PIX_FMT_NV21, 1,
{ { 1, 1 } }, { { 2, 3 } }, { 8 } },
{ MEDIA_BUS_FMT_UYVY8_1_5X8, V4L2_PIX_FMT_NV21, 1,
{ { 1, 1 } }, { { 2, 3 } }, { 8 } },
{ MEDIA_BUS_FMT_VYUY8_1_5X8, V4L2_PIX_FMT_NV21, 1,
{ { 1, 1 } }, { { 2, 3 } }, { 8 } },
{ MEDIA_BUS_FMT_YUYV8_2X8, V4L2_PIX_FMT_NV16, 1,
{ { 1, 1 } }, { { 1, 2 } }, { 8 } },
{ MEDIA_BUS_FMT_YVYU8_2X8, V4L2_PIX_FMT_NV16, 1,
{ { 1, 1 } }, { { 1, 2 } }, { 8 } },
{ MEDIA_BUS_FMT_UYVY8_2X8, V4L2_PIX_FMT_NV16, 1,
{ { 1, 1 } }, { { 1, 2 } }, { 8 } },
{ MEDIA_BUS_FMT_VYUY8_2X8, V4L2_PIX_FMT_NV16, 1,
{ { 1, 1 } }, { { 1, 2 } }, { 8 } },
{ MEDIA_BUS_FMT_YUYV8_2X8, V4L2_PIX_FMT_NV61, 1,
{ { 1, 1 } }, { { 1, 2 } }, { 8 } },
{ MEDIA_BUS_FMT_YVYU8_2X8, V4L2_PIX_FMT_NV61, 1,
{ { 1, 1 } }, { { 1, 2 } }, { 8 } },
{ MEDIA_BUS_FMT_UYVY8_2X8, V4L2_PIX_FMT_NV61, 1,
{ { 1, 1 } }, { { 1, 2 } }, { 8 } },
{ MEDIA_BUS_FMT_VYUY8_2X8, V4L2_PIX_FMT_NV61, 1,
{ { 1, 1 } }, { { 1, 2 } }, { 8 } },
{ MEDIA_BUS_FMT_UYVY8_2X8, V4L2_PIX_FMT_UYVY, 1,
{ { 1, 1 } }, { { 1, 1 } }, { 16 } },
{ MEDIA_BUS_FMT_VYUY8_2X8, V4L2_PIX_FMT_VYUY, 1,
{ { 1, 1 } }, { { 1, 1 } }, { 16 } },
{ MEDIA_BUS_FMT_YUYV8_2X8, V4L2_PIX_FMT_YUYV, 1,
{ { 1, 1 } }, { { 1, 1 } }, { 16 } },
{ MEDIA_BUS_FMT_YVYU8_2X8, V4L2_PIX_FMT_YVYU, 1,
{ { 1, 1 } }, { { 1, 1 } }, { 16 } },
};
/* -----------------------------------------------------------------------------
* Helper functions
*/
static int video_find_format(u32 code, u32 pixelformat,
const struct camss_format_info *formats,
unsigned int nformats)
{
int i;
for (i = 0; i < nformats; i++) {
if (formats[i].code == code &&
formats[i].pixelformat == pixelformat)
return i;
}
for (i = 0; i < nformats; i++)
if (formats[i].code == code)
return i;
WARN_ON(1);
return -EINVAL;
}
/*
* video_mbus_to_pix_mp - Convert v4l2_mbus_framefmt to v4l2_pix_format_mplane
* @mbus: v4l2_mbus_framefmt format (input)
* @pix: v4l2_pix_format_mplane format (output)
* @f: a pointer to formats array element to be used for the conversion
* @alignment: bytesperline alignment value
*
* Fill the output pix structure with information from the input mbus format.
*
* Return 0 on success or a negative error code otherwise
*/
static int video_mbus_to_pix_mp(const struct v4l2_mbus_framefmt *mbus,
struct v4l2_pix_format_mplane *pix,
const struct camss_format_info *f,
unsigned int alignment)
{
unsigned int i;
u32 bytesperline;
memset(pix, 0, sizeof(*pix));
v4l2_fill_pix_format_mplane(pix, mbus);
pix->pixelformat = f->pixelformat;
pix->num_planes = f->planes;
for (i = 0; i < pix->num_planes; i++) {
bytesperline = pix->width / f->hsub[i].numerator *
f->hsub[i].denominator * f->bpp[i] / 8;
bytesperline = ALIGN(bytesperline, alignment);
pix->plane_fmt[i].bytesperline = bytesperline;
pix->plane_fmt[i].sizeimage = pix->height /
f->vsub[i].numerator * f->vsub[i].denominator *
bytesperline;
}
return 0;
}
static struct v4l2_subdev *video_remote_subdev(struct camss_video *video,
u32 *pad)
{
struct media_pad *remote;
remote = media_pad_remote_pad_first(&video->pad);
if (!remote || !is_media_entity_v4l2_subdev(remote->entity))
return NULL;
if (pad)
*pad = remote->index;
return media_entity_to_v4l2_subdev(remote->entity);
}
static int video_get_subdev_format(struct camss_video *video,
struct v4l2_format *format)
{
struct v4l2_subdev_format fmt = {
.which = V4L2_SUBDEV_FORMAT_ACTIVE,
};
struct v4l2_subdev *subdev;
u32 pad;
int ret;
subdev = video_remote_subdev(video, &pad);
if (subdev == NULL)
return -EPIPE;
fmt.pad = pad;
ret = v4l2_subdev_call(subdev, pad, get_fmt, NULL, &fmt);
if (ret)
return ret;
ret = video_find_format(fmt.format.code,
format->fmt.pix_mp.pixelformat,
video->formats, video->nformats);
if (ret < 0)
return ret;
format->type = video->type;
return video_mbus_to_pix_mp(&fmt.format, &format->fmt.pix_mp,
&video->formats[ret], video->bpl_alignment);
}
/* -----------------------------------------------------------------------------
* Video queue operations
*/
static int video_queue_setup(struct vb2_queue *q,
unsigned int *num_buffers, unsigned int *num_planes,
unsigned int sizes[], struct device *alloc_devs[])
{
struct camss_video *video = vb2_get_drv_priv(q);
const struct v4l2_pix_format_mplane *format =
&video->active_fmt.fmt.pix_mp;
unsigned int i;
if (*num_planes) {
if (*num_planes != format->num_planes)
return -EINVAL;
for (i = 0; i < *num_planes; i++)
if (sizes[i] < format->plane_fmt[i].sizeimage)
return -EINVAL;
return 0;
}
*num_planes = format->num_planes;
for (i = 0; i < *num_planes; i++)
sizes[i] = format->plane_fmt[i].sizeimage;
return 0;
}
static int video_buf_init(struct vb2_buffer *vb)
{
struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
struct camss_video *video = vb2_get_drv_priv(vb->vb2_queue);
struct camss_buffer *buffer = container_of(vbuf, struct camss_buffer,
vb);
const struct v4l2_pix_format_mplane *format =
&video->active_fmt.fmt.pix_mp;
struct sg_table *sgt;
unsigned int i;
for (i = 0; i < format->num_planes; i++) {
sgt = vb2_dma_sg_plane_desc(vb, i);
if (!sgt)
return -EFAULT;
buffer->addr[i] = sg_dma_address(sgt->sgl);
}
if (format->pixelformat == V4L2_PIX_FMT_NV12 ||
format->pixelformat == V4L2_PIX_FMT_NV21 ||
format->pixelformat == V4L2_PIX_FMT_NV16 ||
format->pixelformat == V4L2_PIX_FMT_NV61)
buffer->addr[1] = buffer->addr[0] +
format->plane_fmt[0].bytesperline *
format->height;
return 0;
}
static int video_buf_prepare(struct vb2_buffer *vb)
{
struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
struct camss_video *video = vb2_get_drv_priv(vb->vb2_queue);
const struct v4l2_pix_format_mplane *format =
&video->active_fmt.fmt.pix_mp;
unsigned int i;
for (i = 0; i < format->num_planes; i++) {
if (format->plane_fmt[i].sizeimage > vb2_plane_size(vb, i))
return -EINVAL;
vb2_set_plane_payload(vb, i, format->plane_fmt[i].sizeimage);
}
vbuf->field = V4L2_FIELD_NONE;
return 0;
}
static void video_buf_queue(struct vb2_buffer *vb)
{
struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
struct camss_video *video = vb2_get_drv_priv(vb->vb2_queue);
struct camss_buffer *buffer = container_of(vbuf, struct camss_buffer,
vb);
video->ops->queue_buffer(video, buffer);
}
static int video_check_format(struct camss_video *video)
{
struct v4l2_pix_format_mplane *pix = &video->active_fmt.fmt.pix_mp;
struct v4l2_format format;
struct v4l2_pix_format_mplane *sd_pix = &format.fmt.pix_mp;
int ret;
sd_pix->pixelformat = pix->pixelformat;
ret = video_get_subdev_format(video, &format);
if (ret < 0)
return ret;
if (pix->pixelformat != sd_pix->pixelformat ||
pix->height != sd_pix->height ||
pix->width != sd_pix->width ||
pix->num_planes != sd_pix->num_planes ||
pix->field != format.fmt.pix_mp.field)
return -EPIPE;
return 0;
}
static int video_start_streaming(struct vb2_queue *q, unsigned int count)
{
struct camss_video *video = vb2_get_drv_priv(q);
struct video_device *vdev = &video->vdev;
struct media_entity *entity;
struct media_pad *pad;
struct v4l2_subdev *subdev;
int ret;
ret = video_device_pipeline_alloc_start(vdev);
if (ret < 0) {
dev_err(video->camss->dev, "Failed to start media pipeline: %d\n", ret);
goto flush_buffers;
}
ret = video_check_format(video);
if (ret < 0)
goto error;
entity = &vdev->entity;
while (1) {
pad = &entity->pads[0];
if (!(pad->flags & MEDIA_PAD_FL_SINK))
break;
pad = media_pad_remote_pad_first(pad);
if (!pad || !is_media_entity_v4l2_subdev(pad->entity))
break;
entity = pad->entity;
subdev = media_entity_to_v4l2_subdev(entity);
ret = v4l2_subdev_call(subdev, video, s_stream, 1);
if (ret < 0 && ret != -ENOIOCTLCMD)
goto error;
}
return 0;
error:
video_device_pipeline_stop(vdev);
flush_buffers:
video->ops->flush_buffers(video, VB2_BUF_STATE_QUEUED);
return ret;
}
static void video_stop_streaming(struct vb2_queue *q)
{
struct camss_video *video = vb2_get_drv_priv(q);
struct video_device *vdev = &video->vdev;
struct media_entity *entity;
struct media_pad *pad;
struct v4l2_subdev *subdev;
int ret;
entity = &vdev->entity;
while (1) {
pad = &entity->pads[0];
if (!(pad->flags & MEDIA_PAD_FL_SINK))
break;
pad = media_pad_remote_pad_first(pad);
if (!pad || !is_media_entity_v4l2_subdev(pad->entity))
break;
entity = pad->entity;
subdev = media_entity_to_v4l2_subdev(entity);
ret = v4l2_subdev_call(subdev, video, s_stream, 0);
if (entity->use_count > 1) {
/* Don't stop if other instances of the pipeline are still running */
dev_dbg(video->camss->dev, "Video pipeline still used, don't stop streaming.\n");
return;
}
if (ret) {
dev_err(video->camss->dev, "Video pipeline stop failed: %d\n", ret);
return;
}
}
video_device_pipeline_stop(vdev);
video->ops->flush_buffers(video, VB2_BUF_STATE_ERROR);
}
static const struct vb2_ops msm_video_vb2_q_ops = {
.queue_setup = video_queue_setup,
.wait_prepare = vb2_ops_wait_prepare,
.wait_finish = vb2_ops_wait_finish,
.buf_init = video_buf_init,
.buf_prepare = video_buf_prepare,
.buf_queue = video_buf_queue,
.start_streaming = video_start_streaming,
.stop_streaming = video_stop_streaming,
};
/* -----------------------------------------------------------------------------
* V4L2 ioctls
*/
static int video_querycap(struct file *file, void *fh,
struct v4l2_capability *cap)
{
strscpy(cap->driver, "qcom-camss", sizeof(cap->driver));
strscpy(cap->card, "Qualcomm Camera Subsystem", sizeof(cap->card));
return 0;
}
static int video_enum_fmt(struct file *file, void *fh, struct v4l2_fmtdesc *f)
{
struct camss_video *video = video_drvdata(file);
int i, j, k;
u32 mcode = f->mbus_code;
if (f->type != video->type)
return -EINVAL;
if (f->index >= video->nformats)
return -EINVAL;
/*
* Find index "i" of "k"th unique pixelformat in formats array.
*
* If f->mbus_code passed to video_enum_fmt() is not zero, a device
* with V4L2_CAP_IO_MC capability restricts enumeration to only the
* pixel formats that can be produced from that media bus code.
* This is implemented by skipping video->formats[] entries with
* code != f->mbus_code (if f->mbus_code is not zero).
* If the f->mbus_code passed to video_enum_fmt() is not supported,
* -EINVAL is returned.
* If f->mbus_code is zero, all the pixel formats are enumerated.
*/
k = -1;
for (i = 0; i < video->nformats; i++) {
if (mcode != 0 && video->formats[i].code != mcode)
continue;
for (j = 0; j < i; j++) {
if (mcode != 0 && video->formats[j].code != mcode)
continue;
if (video->formats[i].pixelformat ==
video->formats[j].pixelformat)
break;
}
if (j == i)
k++;
if (k == f->index)
break;
}
if (k == -1 || k < f->index)
/*
* All the unique pixel formats matching the arguments
* have been enumerated (k >= 0 and f->index > 0), or
* no pixel formats match the non-zero f->mbus_code (k == -1).
*/
return -EINVAL;
f->pixelformat = video->formats[i].pixelformat;
return 0;
}
static int video_enum_framesizes(struct file *file, void *fh,
struct v4l2_frmsizeenum *fsize)
{
struct camss_video *video = video_drvdata(file);
int i;
if (fsize->index)
return -EINVAL;
/* Only accept pixel format present in the formats[] table */
for (i = 0; i < video->nformats; i++) {
if (video->formats[i].pixelformat == fsize->pixel_format)
break;
}
if (i == video->nformats)
return -EINVAL;
fsize->type = V4L2_FRMSIZE_TYPE_CONTINUOUS;
fsize->stepwise.min_width = CAMSS_FRAME_MIN_WIDTH;
fsize->stepwise.max_width = CAMSS_FRAME_MAX_WIDTH;
fsize->stepwise.min_height = CAMSS_FRAME_MIN_HEIGHT;
fsize->stepwise.max_height = (video->line_based) ?
CAMSS_FRAME_MAX_HEIGHT_PIX : CAMSS_FRAME_MAX_HEIGHT_RDI;
fsize->stepwise.step_width = 1;
fsize->stepwise.step_height = 1;
return 0;
}
static int video_g_fmt(struct file *file, void *fh, struct v4l2_format *f)
{
struct camss_video *video = video_drvdata(file);
*f = video->active_fmt;
return 0;
}
static int __video_try_fmt(struct camss_video *video, struct v4l2_format *f)
{
struct v4l2_pix_format_mplane *pix_mp;
const struct camss_format_info *fi;
struct v4l2_plane_pix_format *p;
u32 bytesperline[3] = { 0 };
u32 sizeimage[3] = { 0 };
u32 width, height;
u32 bpl, lines;
int i, j;
pix_mp = &f->fmt.pix_mp;
if (video->line_based)
for (i = 0; i < pix_mp->num_planes && i < 3; i++) {
p = &pix_mp->plane_fmt[i];
bytesperline[i] = clamp_t(u32, p->bytesperline,
1, 65528);
sizeimage[i] = clamp_t(u32, p->sizeimage,
bytesperline[i],
bytesperline[i] * CAMSS_FRAME_MAX_HEIGHT_PIX);
}
for (j = 0; j < video->nformats; j++)
if (pix_mp->pixelformat == video->formats[j].pixelformat)
break;
if (j == video->nformats)
j = 0; /* default format */
fi = &video->formats[j];
width = pix_mp->width;
height = pix_mp->height;
memset(pix_mp, 0, sizeof(*pix_mp));
pix_mp->pixelformat = fi->pixelformat;
pix_mp->width = clamp_t(u32, width, 1, CAMSS_FRAME_MAX_WIDTH);
pix_mp->height = clamp_t(u32, height, 1, CAMSS_FRAME_MAX_HEIGHT_RDI);
pix_mp->num_planes = fi->planes;
for (i = 0; i < pix_mp->num_planes; i++) {
bpl = pix_mp->width / fi->hsub[i].numerator *
fi->hsub[i].denominator * fi->bpp[i] / 8;
bpl = ALIGN(bpl, video->bpl_alignment);
pix_mp->plane_fmt[i].bytesperline = bpl;
pix_mp->plane_fmt[i].sizeimage = pix_mp->height /
fi->vsub[i].numerator * fi->vsub[i].denominator * bpl;
}
pix_mp->field = V4L2_FIELD_NONE;
pix_mp->colorspace = V4L2_COLORSPACE_SRGB;
pix_mp->flags = 0;
pix_mp->ycbcr_enc = V4L2_MAP_YCBCR_ENC_DEFAULT(pix_mp->colorspace);
pix_mp->quantization = V4L2_MAP_QUANTIZATION_DEFAULT(true,
pix_mp->colorspace, pix_mp->ycbcr_enc);
pix_mp->xfer_func = V4L2_MAP_XFER_FUNC_DEFAULT(pix_mp->colorspace);
if (video->line_based)
for (i = 0; i < pix_mp->num_planes; i++) {
p = &pix_mp->plane_fmt[i];
p->bytesperline = clamp_t(u32, p->bytesperline,
1, 65528);
p->sizeimage = clamp_t(u32, p->sizeimage,
p->bytesperline,
p->bytesperline * CAMSS_FRAME_MAX_HEIGHT_PIX);
lines = p->sizeimage / p->bytesperline;
if (p->bytesperline < bytesperline[i])
p->bytesperline = ALIGN(bytesperline[i], 8);
if (p->sizeimage < p->bytesperline * lines)
p->sizeimage = p->bytesperline * lines;
if (p->sizeimage < sizeimage[i])
p->sizeimage = sizeimage[i];
}
return 0;
}
static int video_try_fmt(struct file *file, void *fh, struct v4l2_format *f)
{
struct camss_video *video = video_drvdata(file);
return __video_try_fmt(video, f);
}
static int video_s_fmt(struct file *file, void *fh, struct v4l2_format *f)
{
struct camss_video *video = video_drvdata(file);
int ret;
if (vb2_is_busy(&video->vb2_q))
return -EBUSY;
ret = __video_try_fmt(video, f);
if (ret < 0)
return ret;
video->active_fmt = *f;
return 0;
}
static int video_enum_input(struct file *file, void *fh,
struct v4l2_input *input)
{
if (input->index > 0)
return -EINVAL;
strscpy(input->name, "camera", sizeof(input->name));
input->type = V4L2_INPUT_TYPE_CAMERA;
return 0;
}
static int video_g_input(struct file *file, void *fh, unsigned int *input)
{
*input = 0;
return 0;
}
static int video_s_input(struct file *file, void *fh, unsigned int input)
{
return input == 0 ? 0 : -EINVAL;
}
static const struct v4l2_ioctl_ops msm_vid_ioctl_ops = {
.vidioc_querycap = video_querycap,
.vidioc_enum_fmt_vid_cap = video_enum_fmt,
.vidioc_enum_framesizes = video_enum_framesizes,
.vidioc_g_fmt_vid_cap_mplane = video_g_fmt,
.vidioc_s_fmt_vid_cap_mplane = video_s_fmt,
.vidioc_try_fmt_vid_cap_mplane = video_try_fmt,
.vidioc_reqbufs = vb2_ioctl_reqbufs,
.vidioc_querybuf = vb2_ioctl_querybuf,
.vidioc_qbuf = vb2_ioctl_qbuf,
.vidioc_expbuf = vb2_ioctl_expbuf,
.vidioc_dqbuf = vb2_ioctl_dqbuf,
.vidioc_create_bufs = vb2_ioctl_create_bufs,
.vidioc_prepare_buf = vb2_ioctl_prepare_buf,
.vidioc_streamon = vb2_ioctl_streamon,
.vidioc_streamoff = vb2_ioctl_streamoff,
.vidioc_enum_input = video_enum_input,
.vidioc_g_input = video_g_input,
.vidioc_s_input = video_s_input,
};
/* -----------------------------------------------------------------------------
* V4L2 file operations
*/
static int video_open(struct file *file)
{
struct video_device *vdev = video_devdata(file);
struct camss_video *video = video_drvdata(file);
struct v4l2_fh *vfh;
int ret;
mutex_lock(&video->lock);
vfh = kzalloc(sizeof(*vfh), GFP_KERNEL);
if (vfh == NULL) {
ret = -ENOMEM;
goto error_alloc;
}
v4l2_fh_init(vfh, vdev);
v4l2_fh_add(vfh);
file->private_data = vfh;
ret = v4l2_pipeline_pm_get(&vdev->entity);
if (ret < 0) {
dev_err(video->camss->dev, "Failed to power up pipeline: %d\n",
ret);
goto error_pm_use;
}
mutex_unlock(&video->lock);
return 0;
error_pm_use:
v4l2_fh_release(file);
error_alloc:
mutex_unlock(&video->lock);
return ret;
}
static int video_release(struct file *file)
{
struct video_device *vdev = video_devdata(file);
vb2_fop_release(file);
v4l2_pipeline_pm_put(&vdev->entity);
file->private_data = NULL;
return 0;
}
static const struct v4l2_file_operations msm_vid_fops = {
.owner = THIS_MODULE,
.unlocked_ioctl = video_ioctl2,
.open = video_open,
.release = video_release,
.poll = vb2_fop_poll,
.mmap = vb2_fop_mmap,
.read = vb2_fop_read,
};
/* -----------------------------------------------------------------------------
* CAMSS video core
*/
static void msm_video_release(struct video_device *vdev)
{
struct camss_video *video = video_get_drvdata(vdev);
media_entity_cleanup(&vdev->entity);
mutex_destroy(&video->q_lock);
mutex_destroy(&video->lock);
if (atomic_dec_and_test(&video->camss->ref_count))
camss_delete(video->camss);
}
/*
* msm_video_init_format - Helper function to initialize format
* @video: struct camss_video
*
* Initialize pad format with default value.
*
* Return 0 on success or a negative error code otherwise
*/
static int msm_video_init_format(struct camss_video *video)
{
int ret;
struct v4l2_format format = {
.type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE,
.fmt.pix_mp = {
.width = 1920,
.height = 1080,
.pixelformat = video->formats[0].pixelformat,
},
};
ret = __video_try_fmt(video, &format);
if (ret < 0)
return ret;
video->active_fmt = format;
return 0;
}
/*
* msm_video_register - Register a video device node
* @video: struct camss_video
* @v4l2_dev: V4L2 device
* @name: name to be used for the video device node
*
* Initialize and register a video device node to a V4L2 device. Also
* initialize the vb2 queue.
*
* Return 0 on success or a negative error code otherwise
*/
int msm_video_register(struct camss_video *video, struct v4l2_device *v4l2_dev,
const char *name, int is_pix)
{
struct media_pad *pad = &video->pad;
struct video_device *vdev;
struct vb2_queue *q;
int ret;
vdev = &video->vdev;
mutex_init(&video->q_lock);
q = &video->vb2_q;
q->drv_priv = video;
q->mem_ops = &vb2_dma_sg_memops;
q->ops = &msm_video_vb2_q_ops;
q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
q->io_modes = VB2_DMABUF | VB2_MMAP | VB2_READ;
q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
q->buf_struct_size = sizeof(struct camss_buffer);
q->dev = video->camss->dev;
q->lock = &video->q_lock;
ret = vb2_queue_init(q);
if (ret < 0) {
dev_err(v4l2_dev->dev, "Failed to init vb2 queue: %d\n", ret);
goto error_vb2_init;
}
pad->flags = MEDIA_PAD_FL_SINK;
ret = media_entity_pads_init(&vdev->entity, 1, pad);
if (ret < 0) {
dev_err(v4l2_dev->dev, "Failed to init video entity: %d\n",
ret);
goto error_vb2_init;
}
mutex_init(&video->lock);
if (video->camss->version == CAMSS_8x16) {
if (is_pix) {
video->formats = formats_pix_8x16;
video->nformats = ARRAY_SIZE(formats_pix_8x16);
} else {
video->formats = formats_rdi_8x16;
video->nformats = ARRAY_SIZE(formats_rdi_8x16);
}
} else if (video->camss->version == CAMSS_8x96 ||
video->camss->version == CAMSS_660) {
if (is_pix) {
video->formats = formats_pix_8x96;
video->nformats = ARRAY_SIZE(formats_pix_8x96);
} else {
video->formats = formats_rdi_8x96;
video->nformats = ARRAY_SIZE(formats_rdi_8x96);
}
} else if (video->camss->version == CAMSS_845 ||
video->camss->version == CAMSS_8250) {
video->formats = formats_rdi_845;
video->nformats = ARRAY_SIZE(formats_rdi_845);
} else {
ret = -EINVAL;
goto error_video_register;
}
ret = msm_video_init_format(video);
if (ret < 0) {
dev_err(v4l2_dev->dev, "Failed to init format: %d\n", ret);
goto error_video_register;
}
vdev->fops = &msm_vid_fops;
vdev->device_caps = V4L2_CAP_VIDEO_CAPTURE_MPLANE | V4L2_CAP_STREAMING
| V4L2_CAP_READWRITE | V4L2_CAP_IO_MC;
vdev->ioctl_ops = &msm_vid_ioctl_ops;
vdev->release = msm_video_release;
vdev->v4l2_dev = v4l2_dev;
vdev->vfl_dir = VFL_DIR_RX;
vdev->queue = &video->vb2_q;
vdev->lock = &video->lock;
strscpy(vdev->name, name, sizeof(vdev->name));
ret = video_register_device(vdev, VFL_TYPE_VIDEO, -1);
if (ret < 0) {
dev_err(v4l2_dev->dev, "Failed to register video device: %d\n",
ret);
goto error_video_register;
}
video_set_drvdata(vdev, video);
atomic_inc(&video->camss->ref_count);
return 0;
error_video_register:
media_entity_cleanup(&vdev->entity);
mutex_destroy(&video->lock);
error_vb2_init:
mutex_destroy(&video->q_lock);
return ret;
}
void msm_video_unregister(struct camss_video *video)
{
atomic_inc(&video->camss->ref_count);
vb2_video_unregister_device(&video->vdev);
atomic_dec(&video->camss->ref_count);
}
| linux-master | drivers/media/platform/qcom/camss/camss-video.c |
// SPDX-License-Identifier: GPL-2.0
/*
* camss-vfe-170.c
*
* Qualcomm MSM Camera Subsystem - VFE (Video Front End) Module v170
*
* Copyright (C) 2020-2021 Linaro Ltd.
*/
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/iopoll.h>
#include "camss.h"
#include "camss-vfe.h"
#define VFE_HW_VERSION (0x000)
#define VFE_GLOBAL_RESET_CMD (0x018)
#define GLOBAL_RESET_CMD_CORE BIT(0)
#define GLOBAL_RESET_CMD_CAMIF BIT(1)
#define GLOBAL_RESET_CMD_BUS BIT(2)
#define GLOBAL_RESET_CMD_BUS_BDG BIT(3)
#define GLOBAL_RESET_CMD_REGISTER BIT(4)
#define GLOBAL_RESET_CMD_PM BIT(5)
#define GLOBAL_RESET_CMD_BUS_MISR BIT(6)
#define GLOBAL_RESET_CMD_TESTGEN BIT(7)
#define GLOBAL_RESET_CMD_DSP BIT(8)
#define GLOBAL_RESET_CMD_IDLE_CGC BIT(9)
#define GLOBAL_RESET_CMD_RDI0 BIT(10)
#define GLOBAL_RESET_CMD_RDI1 BIT(11)
#define GLOBAL_RESET_CMD_RDI2 BIT(12)
#define GLOBAL_RESET_CMD_RDI3 BIT(13)
#define GLOBAL_RESET_CMD_VFE_DOMAIN BIT(30)
#define GLOBAL_RESET_CMD_RESET_BYPASS BIT(31)
#define VFE_CORE_CFG (0x050)
#define CFG_PIXEL_PATTERN_YCBYCR (0x4)
#define CFG_PIXEL_PATTERN_YCRYCB (0x5)
#define CFG_PIXEL_PATTERN_CBYCRY (0x6)
#define CFG_PIXEL_PATTERN_CRYCBY (0x7)
#define CFG_COMPOSITE_REG_UPDATE_EN BIT(4)
#define VFE_IRQ_CMD (0x058)
#define CMD_GLOBAL_CLEAR BIT(0)
#define VFE_IRQ_MASK_0 (0x05c)
#define MASK_0_CAMIF_SOF BIT(0)
#define MASK_0_CAMIF_EOF BIT(1)
#define MASK_0_RDI_REG_UPDATE(n) BIT((n) + 5)
#define MASK_0_IMAGE_MASTER_n_PING_PONG(n) BIT((n) + 8)
#define MASK_0_IMAGE_COMPOSITE_DONE_n(n) BIT((n) + 25)
#define MASK_0_RESET_ACK BIT(31)
#define VFE_IRQ_MASK_1 (0x060)
#define MASK_1_CAMIF_ERROR BIT(0)
#define MASK_1_VIOLATION BIT(7)
#define MASK_1_BUS_BDG_HALT_ACK BIT(8)
#define MASK_1_IMAGE_MASTER_n_BUS_OVERFLOW(n) BIT((n) + 9)
#define MASK_1_RDI_SOF(n) BIT((n) + 29)
#define VFE_IRQ_CLEAR_0 (0x064)
#define VFE_IRQ_CLEAR_1 (0x068)
#define VFE_IRQ_STATUS_0 (0x06c)
#define STATUS_0_CAMIF_SOF BIT(0)
#define STATUS_0_RDI_REG_UPDATE(n) BIT((n) + 5)
#define STATUS_0_IMAGE_MASTER_PING_PONG(n) BIT((n) + 8)
#define STATUS_0_IMAGE_COMPOSITE_DONE(n) BIT((n) + 25)
#define STATUS_0_RESET_ACK BIT(31)
#define VFE_IRQ_STATUS_1 (0x070)
#define STATUS_1_VIOLATION BIT(7)
#define STATUS_1_BUS_BDG_HALT_ACK BIT(8)
#define STATUS_1_RDI_SOF(n) BIT((n) + 27)
#define VFE_VIOLATION_STATUS (0x07c)
#define VFE_CAMIF_CMD (0x478)
#define CMD_CLEAR_CAMIF_STATUS BIT(2)
#define VFE_CAMIF_CFG (0x47c)
#define CFG_VSYNC_SYNC_EDGE (0)
#define VSYNC_ACTIVE_HIGH (0)
#define VSYNC_ACTIVE_LOW (1)
#define CFG_HSYNC_SYNC_EDGE (1)
#define HSYNC_ACTIVE_HIGH (0)
#define HSYNC_ACTIVE_LOW (1)
#define CFG_VFE_SUBSAMPLE_ENABLE BIT(4)
#define CFG_BUS_SUBSAMPLE_ENABLE BIT(5)
#define CFG_VFE_OUTPUT_EN BIT(6)
#define CFG_BUS_OUTPUT_EN BIT(7)
#define CFG_BINNING_EN BIT(9)
#define CFG_FRAME_BASED_EN BIT(10)
#define CFG_RAW_CROP_EN BIT(22)
#define VFE_REG_UPDATE_CMD (0x4ac)
#define REG_UPDATE_RDI(n) BIT(1 + (n))
#define VFE_BUS_IRQ_MASK(n) (0x2044 + (n) * 4)
#define VFE_BUS_IRQ_CLEAR(n) (0x2050 + (n) * 4)
#define VFE_BUS_IRQ_STATUS(n) (0x205c + (n) * 4)
#define STATUS0_COMP_RESET_DONE BIT(0)
#define STATUS0_COMP_REG_UPDATE0_DONE BIT(1)
#define STATUS0_COMP_REG_UPDATE1_DONE BIT(2)
#define STATUS0_COMP_REG_UPDATE2_DONE BIT(3)
#define STATUS0_COMP_REG_UPDATE3_DONE BIT(4)
#define STATUS0_COMP_REG_UPDATE_DONE(n) BIT((n) + 1)
#define STATUS0_COMP0_BUF_DONE BIT(5)
#define STATUS0_COMP1_BUF_DONE BIT(6)
#define STATUS0_COMP2_BUF_DONE BIT(7)
#define STATUS0_COMP3_BUF_DONE BIT(8)
#define STATUS0_COMP4_BUF_DONE BIT(9)
#define STATUS0_COMP5_BUF_DONE BIT(10)
#define STATUS0_COMP_BUF_DONE(n) BIT((n) + 5)
#define STATUS0_COMP_ERROR BIT(11)
#define STATUS0_COMP_OVERWRITE BIT(12)
#define STATUS0_OVERFLOW BIT(13)
#define STATUS0_VIOLATION BIT(14)
/* WM_CLIENT_BUF_DONE defined for buffers 0:19 */
#define STATUS1_WM_CLIENT_BUF_DONE(n) BIT(n)
#define STATUS1_EARLY_DONE BIT(24)
#define STATUS2_DUAL_COMP0_BUF_DONE BIT(0)
#define STATUS2_DUAL_COMP1_BUF_DONE BIT(1)
#define STATUS2_DUAL_COMP2_BUF_DONE BIT(2)
#define STATUS2_DUAL_COMP3_BUF_DONE BIT(3)
#define STATUS2_DUAL_COMP4_BUF_DONE BIT(4)
#define STATUS2_DUAL_COMP5_BUF_DONE BIT(5)
#define STATUS2_DUAL_COMP_BUF_DONE(n) BIT(n)
#define STATUS2_DUAL_COMP_ERROR BIT(6)
#define STATUS2_DUAL_COMP_OVERWRITE BIT(7)
#define VFE_BUS_IRQ_CLEAR_GLOBAL (0x2068)
#define VFE_BUS_WM_DEBUG_STATUS_CFG (0x226c)
#define DEBUG_STATUS_CFG_STATUS0(n) BIT(n)
#define DEBUG_STATUS_CFG_STATUS1(n) BIT(8 + (n))
#define VFE_BUS_WM_ADDR_SYNC_FRAME_HEADER (0x2080)
#define VFE_BUS_WM_ADDR_SYNC_NO_SYNC (0x2084)
#define BUS_VER2_MAX_CLIENTS (24)
#define WM_ADDR_NO_SYNC_DEFAULT_VAL \
((1 << BUS_VER2_MAX_CLIENTS) - 1)
#define VFE_BUS_WM_CGC_OVERRIDE (0x200c)
#define WM_CGC_OVERRIDE_ALL (0xFFFFF)
#define VFE_BUS_WM_TEST_BUS_CTRL (0x211c)
#define VFE_BUS_WM_STATUS0(n) (0x2200 + (n) * 0x100)
#define VFE_BUS_WM_STATUS1(n) (0x2204 + (n) * 0x100)
#define VFE_BUS_WM_CFG(n) (0x2208 + (n) * 0x100)
#define WM_CFG_EN (0)
#define WM_CFG_MODE (1)
#define MODE_QCOM_PLAIN (0)
#define MODE_MIPI_RAW (1)
#define WM_CFG_VIRTUALFRAME (2)
#define VFE_BUS_WM_HEADER_ADDR(n) (0x220c + (n) * 0x100)
#define VFE_BUS_WM_HEADER_CFG(n) (0x2210 + (n) * 0x100)
#define VFE_BUS_WM_IMAGE_ADDR(n) (0x2214 + (n) * 0x100)
#define VFE_BUS_WM_IMAGE_ADDR_OFFSET(n) (0x2218 + (n) * 0x100)
#define VFE_BUS_WM_BUFFER_WIDTH_CFG(n) (0x221c + (n) * 0x100)
#define WM_BUFFER_DEFAULT_WIDTH (0xFF01)
#define VFE_BUS_WM_BUFFER_HEIGHT_CFG(n) (0x2220 + (n) * 0x100)
#define VFE_BUS_WM_PACKER_CFG(n) (0x2224 + (n) * 0x100)
#define VFE_BUS_WM_STRIDE(n) (0x2228 + (n) * 0x100)
#define WM_STRIDE_DEFAULT_STRIDE (0xFF01)
#define VFE_BUS_WM_IRQ_SUBSAMPLE_PERIOD(n) (0x2248 + (n) * 0x100)
#define VFE_BUS_WM_IRQ_SUBSAMPLE_PATTERN(n) (0x224c + (n) * 0x100)
#define VFE_BUS_WM_FRAMEDROP_PERIOD(n) (0x2250 + (n) * 0x100)
#define VFE_BUS_WM_FRAMEDROP_PATTERN(n) (0x2254 + (n) * 0x100)
#define VFE_BUS_WM_FRAME_INC(n) (0x2258 + (n) * 0x100)
#define VFE_BUS_WM_BURST_LIMIT(n) (0x225c + (n) * 0x100)
static u32 vfe_hw_version(struct vfe_device *vfe)
{
u32 hw_version = readl_relaxed(vfe->base + VFE_HW_VERSION);
u32 gen = (hw_version >> 28) & 0xF;
u32 rev = (hw_version >> 16) & 0xFFF;
u32 step = hw_version & 0xFFFF;
dev_dbg(vfe->camss->dev, "VFE HW Version = %u.%u.%u\n",
gen, rev, step);
return hw_version;
}
static inline void vfe_reg_set(struct vfe_device *vfe, u32 reg, u32 set_bits)
{
u32 bits = readl_relaxed(vfe->base + reg);
writel_relaxed(bits | set_bits, vfe->base + reg);
}
static void vfe_global_reset(struct vfe_device *vfe)
{
u32 reset_bits = GLOBAL_RESET_CMD_CORE |
GLOBAL_RESET_CMD_CAMIF |
GLOBAL_RESET_CMD_BUS |
GLOBAL_RESET_CMD_BUS_BDG |
GLOBAL_RESET_CMD_REGISTER |
GLOBAL_RESET_CMD_TESTGEN |
GLOBAL_RESET_CMD_DSP |
GLOBAL_RESET_CMD_IDLE_CGC |
GLOBAL_RESET_CMD_RDI0 |
GLOBAL_RESET_CMD_RDI1 |
GLOBAL_RESET_CMD_RDI2;
writel_relaxed(BIT(31), vfe->base + VFE_IRQ_MASK_0);
/* Make sure IRQ mask has been written before resetting */
wmb();
writel_relaxed(reset_bits, vfe->base + VFE_GLOBAL_RESET_CMD);
}
static void vfe_wm_start(struct vfe_device *vfe, u8 wm, struct vfe_line *line)
{
u32 val;
/*Set Debug Registers*/
val = DEBUG_STATUS_CFG_STATUS0(1) |
DEBUG_STATUS_CFG_STATUS0(7);
writel_relaxed(val, vfe->base + VFE_BUS_WM_DEBUG_STATUS_CFG);
/* BUS_WM_INPUT_IF_ADDR_SYNC_FRAME_HEADER */
writel_relaxed(0, vfe->base + VFE_BUS_WM_ADDR_SYNC_FRAME_HEADER);
/* no clock gating at bus input */
val = WM_CGC_OVERRIDE_ALL;
writel_relaxed(val, vfe->base + VFE_BUS_WM_CGC_OVERRIDE);
writel_relaxed(0x0, vfe->base + VFE_BUS_WM_TEST_BUS_CTRL);
/* if addr_no_sync has default value then config the addr no sync reg */
val = WM_ADDR_NO_SYNC_DEFAULT_VAL;
writel_relaxed(val, vfe->base + VFE_BUS_WM_ADDR_SYNC_NO_SYNC);
writel_relaxed(0xf, vfe->base + VFE_BUS_WM_BURST_LIMIT(wm));
val = WM_BUFFER_DEFAULT_WIDTH;
writel_relaxed(val, vfe->base + VFE_BUS_WM_BUFFER_WIDTH_CFG(wm));
val = 0;
writel_relaxed(val, vfe->base + VFE_BUS_WM_BUFFER_HEIGHT_CFG(wm));
val = 0;
writel_relaxed(val, vfe->base + VFE_BUS_WM_PACKER_CFG(wm)); // XXX 1 for PLAIN8?
/* Configure stride for RDIs */
val = WM_STRIDE_DEFAULT_STRIDE;
writel_relaxed(val, vfe->base + VFE_BUS_WM_STRIDE(wm));
/* Enable WM */
val = 1 << WM_CFG_EN |
MODE_MIPI_RAW << WM_CFG_MODE;
writel_relaxed(val, vfe->base + VFE_BUS_WM_CFG(wm));
}
static void vfe_wm_stop(struct vfe_device *vfe, u8 wm)
{
/* Disable WM */
writel_relaxed(0, vfe->base + VFE_BUS_WM_CFG(wm));
}
static void vfe_wm_update(struct vfe_device *vfe, u8 wm, u32 addr,
struct vfe_line *line)
{
struct v4l2_pix_format_mplane *pix =
&line->video_out.active_fmt.fmt.pix_mp;
u32 stride = pix->plane_fmt[0].bytesperline;
writel_relaxed(addr, vfe->base + VFE_BUS_WM_IMAGE_ADDR(wm));
writel_relaxed(stride * pix->height, vfe->base + VFE_BUS_WM_FRAME_INC(wm));
}
static void vfe_reg_update(struct vfe_device *vfe, enum vfe_line_id line_id)
{
vfe->reg_update |= REG_UPDATE_RDI(line_id);
/* Enforce ordering between previous reg writes and reg update */
wmb();
writel_relaxed(vfe->reg_update, vfe->base + VFE_REG_UPDATE_CMD);
/* Enforce ordering between reg update and subsequent reg writes */
wmb();
}
static inline void vfe_reg_update_clear(struct vfe_device *vfe,
enum vfe_line_id line_id)
{
vfe->reg_update &= ~REG_UPDATE_RDI(line_id);
}
static void vfe_enable_irq_common(struct vfe_device *vfe)
{
vfe_reg_set(vfe, VFE_IRQ_MASK_0, ~0u);
vfe_reg_set(vfe, VFE_IRQ_MASK_1, ~0u);
writel_relaxed(~0u, vfe->base + VFE_BUS_IRQ_MASK(0));
writel_relaxed(~0u, vfe->base + VFE_BUS_IRQ_MASK(1));
writel_relaxed(~0u, vfe->base + VFE_BUS_IRQ_MASK(2));
}
static void vfe_isr_halt_ack(struct vfe_device *vfe)
{
complete(&vfe->halt_complete);
}
static void vfe_isr_read(struct vfe_device *vfe, u32 *status0, u32 *status1)
{
*status0 = readl_relaxed(vfe->base + VFE_IRQ_STATUS_0);
*status1 = readl_relaxed(vfe->base + VFE_IRQ_STATUS_1);
writel_relaxed(*status0, vfe->base + VFE_IRQ_CLEAR_0);
writel_relaxed(*status1, vfe->base + VFE_IRQ_CLEAR_1);
/* Enforce ordering between IRQ Clear and Global IRQ Clear */
wmb();
writel_relaxed(CMD_GLOBAL_CLEAR, vfe->base + VFE_IRQ_CMD);
}
static void vfe_violation_read(struct vfe_device *vfe)
{
u32 violation = readl_relaxed(vfe->base + VFE_VIOLATION_STATUS);
pr_err_ratelimited("VFE: violation = 0x%08x\n", violation);
}
/*
* vfe_isr - VFE module interrupt handler
* @irq: Interrupt line
* @dev: VFE device
*
* Return IRQ_HANDLED on success
*/
static irqreturn_t vfe_isr(int irq, void *dev)
{
struct vfe_device *vfe = dev;
u32 status0, status1, vfe_bus_status[3];
int i, wm;
status0 = readl_relaxed(vfe->base + VFE_IRQ_STATUS_0);
status1 = readl_relaxed(vfe->base + VFE_IRQ_STATUS_1);
writel_relaxed(status0, vfe->base + VFE_IRQ_CLEAR_0);
writel_relaxed(status1, vfe->base + VFE_IRQ_CLEAR_1);
for (i = VFE_LINE_RDI0; i <= VFE_LINE_RDI2; i++) {
vfe_bus_status[i] = readl_relaxed(vfe->base + VFE_BUS_IRQ_STATUS(i));
writel_relaxed(vfe_bus_status[i], vfe->base + VFE_BUS_IRQ_CLEAR(i));
}
/* Enforce ordering between IRQ reading and interpretation */
wmb();
writel_relaxed(CMD_GLOBAL_CLEAR, vfe->base + VFE_IRQ_CMD);
writel_relaxed(1, vfe->base + VFE_BUS_IRQ_CLEAR_GLOBAL);
if (status0 & STATUS_0_RESET_ACK)
vfe->isr_ops.reset_ack(vfe);
for (i = VFE_LINE_RDI0; i <= VFE_LINE_RDI2; i++)
if (status0 & STATUS_0_RDI_REG_UPDATE(i))
vfe->isr_ops.reg_update(vfe, i);
for (i = VFE_LINE_RDI0; i <= VFE_LINE_RDI2; i++)
if (status0 & STATUS_1_RDI_SOF(i))
vfe->isr_ops.sof(vfe, i);
for (i = 0; i < MSM_VFE_COMPOSITE_IRQ_NUM; i++)
if (vfe_bus_status[0] & STATUS0_COMP_BUF_DONE(i))
vfe->isr_ops.comp_done(vfe, i);
for (wm = 0; wm < MSM_VFE_IMAGE_MASTERS_NUM; wm++)
if (status0 & BIT(9))
if (vfe_bus_status[1] & STATUS1_WM_CLIENT_BUF_DONE(wm))
vfe->isr_ops.wm_done(vfe, wm);
return IRQ_HANDLED;
}
/*
* vfe_halt - Trigger halt on VFE module and wait to complete
* @vfe: VFE device
*
* Return 0 on success or a negative error code otherwise
*/
static int vfe_halt(struct vfe_device *vfe)
{
/* rely on vfe_disable_output() to stop the VFE */
return 0;
}
static int vfe_get_output(struct vfe_line *line)
{
struct vfe_device *vfe = to_vfe(line);
struct vfe_output *output;
unsigned long flags;
int wm_idx;
spin_lock_irqsave(&vfe->output_lock, flags);
output = &line->output;
if (output->state > VFE_OUTPUT_RESERVED) {
dev_err(vfe->camss->dev, "Output is running\n");
goto error;
}
output->wm_num = 1;
wm_idx = vfe_reserve_wm(vfe, line->id);
if (wm_idx < 0) {
dev_err(vfe->camss->dev, "Can not reserve wm\n");
goto error_get_wm;
}
output->wm_idx[0] = wm_idx;
output->drop_update_idx = 0;
spin_unlock_irqrestore(&vfe->output_lock, flags);
return 0;
error_get_wm:
vfe_release_wm(vfe, output->wm_idx[0]);
output->state = VFE_OUTPUT_OFF;
error:
spin_unlock_irqrestore(&vfe->output_lock, flags);
return -EINVAL;
}
static int vfe_enable_output(struct vfe_line *line)
{
struct vfe_device *vfe = to_vfe(line);
struct vfe_output *output = &line->output;
const struct vfe_hw_ops *ops = vfe->ops;
struct media_entity *sensor;
unsigned long flags;
unsigned int frame_skip = 0;
unsigned int i;
sensor = camss_find_sensor(&line->subdev.entity);
if (sensor) {
struct v4l2_subdev *subdev = media_entity_to_v4l2_subdev(sensor);
v4l2_subdev_call(subdev, sensor, g_skip_frames, &frame_skip);
/* Max frame skip is 29 frames */
if (frame_skip > VFE_FRAME_DROP_VAL - 1)
frame_skip = VFE_FRAME_DROP_VAL - 1;
}
spin_lock_irqsave(&vfe->output_lock, flags);
ops->reg_update_clear(vfe, line->id);
if (output->state > VFE_OUTPUT_RESERVED) {
dev_err(vfe->camss->dev, "Output is not in reserved state %d\n",
output->state);
spin_unlock_irqrestore(&vfe->output_lock, flags);
return -EINVAL;
}
WARN_ON(output->gen2.active_num);
output->state = VFE_OUTPUT_ON;
output->sequence = 0;
output->wait_reg_update = 0;
reinit_completion(&output->reg_update);
vfe_wm_start(vfe, output->wm_idx[0], line);
for (i = 0; i < 2; i++) {
output->buf[i] = vfe_buf_get_pending(output);
if (!output->buf[i])
break;
output->gen2.active_num++;
vfe_wm_update(vfe, output->wm_idx[0], output->buf[i]->addr[0], line);
}
ops->reg_update(vfe, line->id);
spin_unlock_irqrestore(&vfe->output_lock, flags);
return 0;
}
static int vfe_disable_output(struct vfe_line *line)
{
struct vfe_device *vfe = to_vfe(line);
struct vfe_output *output = &line->output;
unsigned long flags;
unsigned int i;
bool done;
int timeout = 0;
do {
spin_lock_irqsave(&vfe->output_lock, flags);
done = !output->gen2.active_num;
spin_unlock_irqrestore(&vfe->output_lock, flags);
usleep_range(10000, 20000);
if (timeout++ == 100) {
dev_err(vfe->camss->dev, "VFE idle timeout - resetting\n");
vfe_reset(vfe);
output->gen2.active_num = 0;
return 0;
}
} while (!done);
spin_lock_irqsave(&vfe->output_lock, flags);
for (i = 0; i < output->wm_num; i++)
vfe_wm_stop(vfe, output->wm_idx[i]);
spin_unlock_irqrestore(&vfe->output_lock, flags);
return 0;
}
/*
* vfe_enable - Enable streaming on VFE line
* @line: VFE line
*
* Return 0 on success or a negative error code otherwise
*/
static int vfe_enable(struct vfe_line *line)
{
struct vfe_device *vfe = to_vfe(line);
int ret;
mutex_lock(&vfe->stream_lock);
if (!vfe->stream_count)
vfe_enable_irq_common(vfe);
vfe->stream_count++;
mutex_unlock(&vfe->stream_lock);
ret = vfe_get_output(line);
if (ret < 0)
goto error_get_output;
ret = vfe_enable_output(line);
if (ret < 0)
goto error_enable_output;
vfe->was_streaming = 1;
return 0;
error_enable_output:
vfe_put_output(line);
error_get_output:
mutex_lock(&vfe->stream_lock);
vfe->stream_count--;
mutex_unlock(&vfe->stream_lock);
return ret;
}
/*
* vfe_disable - Disable streaming on VFE line
* @line: VFE line
*
* Return 0 on success or a negative error code otherwise
*/
static int vfe_disable(struct vfe_line *line)
{
struct vfe_device *vfe = to_vfe(line);
vfe_disable_output(line);
vfe_put_output(line);
mutex_lock(&vfe->stream_lock);
vfe->stream_count--;
mutex_unlock(&vfe->stream_lock);
return 0;
}
/*
* vfe_isr_sof - Process start of frame interrupt
* @vfe: VFE Device
* @line_id: VFE line
*/
static void vfe_isr_sof(struct vfe_device *vfe, enum vfe_line_id line_id)
{
/* nop */
}
/*
* vfe_isr_reg_update - Process reg update interrupt
* @vfe: VFE Device
* @line_id: VFE line
*/
static void vfe_isr_reg_update(struct vfe_device *vfe, enum vfe_line_id line_id)
{
struct vfe_output *output;
unsigned long flags;
spin_lock_irqsave(&vfe->output_lock, flags);
vfe->ops->reg_update_clear(vfe, line_id);
output = &vfe->line[line_id].output;
if (output->wait_reg_update) {
output->wait_reg_update = 0;
complete(&output->reg_update);
}
spin_unlock_irqrestore(&vfe->output_lock, flags);
}
/*
* vfe_isr_wm_done - Process write master done interrupt
* @vfe: VFE Device
* @wm: Write master id
*/
static void vfe_isr_wm_done(struct vfe_device *vfe, u8 wm)
{
struct vfe_line *line = &vfe->line[vfe->wm_output_map[wm]];
struct camss_buffer *ready_buf;
struct vfe_output *output;
unsigned long flags;
u32 index;
u64 ts = ktime_get_ns();
spin_lock_irqsave(&vfe->output_lock, flags);
if (vfe->wm_output_map[wm] == VFE_LINE_NONE) {
dev_err_ratelimited(vfe->camss->dev,
"Received wm done for unmapped index\n");
goto out_unlock;
}
output = &vfe->line[vfe->wm_output_map[wm]].output;
ready_buf = output->buf[0];
if (!ready_buf) {
dev_err_ratelimited(vfe->camss->dev,
"Missing ready buf %d!\n", output->state);
goto out_unlock;
}
ready_buf->vb.vb2_buf.timestamp = ts;
ready_buf->vb.sequence = output->sequence++;
index = 0;
output->buf[0] = output->buf[1];
if (output->buf[0])
index = 1;
output->buf[index] = vfe_buf_get_pending(output);
if (output->buf[index])
vfe_wm_update(vfe, output->wm_idx[0], output->buf[index]->addr[0], line);
else
output->gen2.active_num--;
spin_unlock_irqrestore(&vfe->output_lock, flags);
vb2_buffer_done(&ready_buf->vb.vb2_buf, VB2_BUF_STATE_DONE);
return;
out_unlock:
spin_unlock_irqrestore(&vfe->output_lock, flags);
}
/*
* vfe_pm_domain_off - Disable power domains specific to this VFE.
* @vfe: VFE Device
*/
static void vfe_pm_domain_off(struct vfe_device *vfe)
{
struct camss *camss = vfe->camss;
if (vfe->id >= camss->vfe_num)
return;
device_link_del(camss->genpd_link[vfe->id]);
}
/*
* vfe_pm_domain_on - Enable power domains specific to this VFE.
* @vfe: VFE Device
*/
static int vfe_pm_domain_on(struct vfe_device *vfe)
{
struct camss *camss = vfe->camss;
enum vfe_line_id id = vfe->id;
if (id >= camss->vfe_num)
return 0;
camss->genpd_link[id] = device_link_add(camss->dev, camss->genpd[id],
DL_FLAG_STATELESS |
DL_FLAG_PM_RUNTIME |
DL_FLAG_RPM_ACTIVE);
if (!camss->genpd_link[id])
return -EINVAL;
return 0;
}
/*
* vfe_queue_buffer - Add empty buffer
* @vid: Video device structure
* @buf: Buffer to be enqueued
*
* Add an empty buffer - depending on the current number of buffers it will be
* put in pending buffer queue or directly given to the hardware to be filled.
*
* Return 0 on success or a negative error code otherwise
*/
static int vfe_queue_buffer(struct camss_video *vid,
struct camss_buffer *buf)
{
struct vfe_line *line = container_of(vid, struct vfe_line, video_out);
struct vfe_device *vfe = to_vfe(line);
struct vfe_output *output;
unsigned long flags;
output = &line->output;
spin_lock_irqsave(&vfe->output_lock, flags);
if (output->state == VFE_OUTPUT_ON && output->gen2.active_num < 2) {
output->buf[output->gen2.active_num++] = buf;
vfe_wm_update(vfe, output->wm_idx[0], buf->addr[0], line);
} else {
vfe_buf_add_pending(output, buf);
}
spin_unlock_irqrestore(&vfe->output_lock, flags);
return 0;
}
static const struct vfe_isr_ops vfe_isr_ops_170 = {
.reset_ack = vfe_isr_reset_ack,
.halt_ack = vfe_isr_halt_ack,
.reg_update = vfe_isr_reg_update,
.sof = vfe_isr_sof,
.comp_done = vfe_isr_comp_done,
.wm_done = vfe_isr_wm_done,
};
static const struct camss_video_ops vfe_video_ops_170 = {
.queue_buffer = vfe_queue_buffer,
.flush_buffers = vfe_flush_buffers,
};
static void vfe_subdev_init(struct device *dev, struct vfe_device *vfe)
{
vfe->isr_ops = vfe_isr_ops_170;
vfe->video_ops = vfe_video_ops_170;
vfe->line_num = VFE_LINE_NUM_GEN2;
}
const struct vfe_hw_ops vfe_ops_170 = {
.global_reset = vfe_global_reset,
.hw_version = vfe_hw_version,
.isr_read = vfe_isr_read,
.isr = vfe_isr,
.pm_domain_off = vfe_pm_domain_off,
.pm_domain_on = vfe_pm_domain_on,
.reg_update_clear = vfe_reg_update_clear,
.reg_update = vfe_reg_update,
.subdev_init = vfe_subdev_init,
.vfe_disable = vfe_disable,
.vfe_enable = vfe_enable,
.vfe_halt = vfe_halt,
.violation_read = vfe_violation_read,
};
| linux-master | drivers/media/platform/qcom/camss/camss-vfe-170.c |
// SPDX-License-Identifier: GPL-2.0
/*
* camss-vfe-4-1.c
*
* Qualcomm MSM Camera Subsystem - VFE (Video Front End) Module v4.1
*
* Copyright (c) 2013-2015, The Linux Foundation. All rights reserved.
* Copyright (C) 2015-2018 Linaro Ltd.
*/
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/iopoll.h>
#include "camss.h"
#include "camss-vfe.h"
#include "camss-vfe-gen1.h"
#define VFE_0_HW_VERSION 0x000
#define VFE_0_GLOBAL_RESET_CMD 0x00c
#define VFE_0_GLOBAL_RESET_CMD_CORE BIT(0)
#define VFE_0_GLOBAL_RESET_CMD_CAMIF BIT(1)
#define VFE_0_GLOBAL_RESET_CMD_BUS BIT(2)
#define VFE_0_GLOBAL_RESET_CMD_BUS_BDG BIT(3)
#define VFE_0_GLOBAL_RESET_CMD_REGISTER BIT(4)
#define VFE_0_GLOBAL_RESET_CMD_TIMER BIT(5)
#define VFE_0_GLOBAL_RESET_CMD_PM BIT(6)
#define VFE_0_GLOBAL_RESET_CMD_BUS_MISR BIT(7)
#define VFE_0_GLOBAL_RESET_CMD_TESTGEN BIT(8)
#define VFE_0_MODULE_CFG 0x018
#define VFE_0_MODULE_CFG_DEMUX BIT(2)
#define VFE_0_MODULE_CFG_CHROMA_UPSAMPLE BIT(3)
#define VFE_0_MODULE_CFG_SCALE_ENC BIT(23)
#define VFE_0_MODULE_CFG_CROP_ENC BIT(27)
#define VFE_0_CORE_CFG 0x01c
#define VFE_0_CORE_CFG_PIXEL_PATTERN_YCBYCR 0x4
#define VFE_0_CORE_CFG_PIXEL_PATTERN_YCRYCB 0x5
#define VFE_0_CORE_CFG_PIXEL_PATTERN_CBYCRY 0x6
#define VFE_0_CORE_CFG_PIXEL_PATTERN_CRYCBY 0x7
#define VFE_0_IRQ_CMD 0x024
#define VFE_0_IRQ_CMD_GLOBAL_CLEAR BIT(0)
#define VFE_0_IRQ_MASK_0 0x028
#define VFE_0_IRQ_MASK_0_CAMIF_SOF BIT(0)
#define VFE_0_IRQ_MASK_0_CAMIF_EOF BIT(1)
#define VFE_0_IRQ_MASK_0_RDIn_REG_UPDATE(n) BIT((n) + 5)
#define VFE_0_IRQ_MASK_0_line_n_REG_UPDATE(n) \
((n) == VFE_LINE_PIX ? BIT(4) : VFE_0_IRQ_MASK_0_RDIn_REG_UPDATE(n))
#define VFE_0_IRQ_MASK_0_IMAGE_MASTER_n_PING_PONG(n) BIT((n) + 8)
#define VFE_0_IRQ_MASK_0_IMAGE_COMPOSITE_DONE_n(n) BIT((n) + 25)
#define VFE_0_IRQ_MASK_0_RESET_ACK BIT(31)
#define VFE_0_IRQ_MASK_1 0x02c
#define VFE_0_IRQ_MASK_1_CAMIF_ERROR BIT(0)
#define VFE_0_IRQ_MASK_1_VIOLATION BIT(7)
#define VFE_0_IRQ_MASK_1_BUS_BDG_HALT_ACK BIT(8)
#define VFE_0_IRQ_MASK_1_IMAGE_MASTER_n_BUS_OVERFLOW(n) BIT((n) + 9)
#define VFE_0_IRQ_MASK_1_RDIn_SOF(n) BIT((n) + 29)
#define VFE_0_IRQ_CLEAR_0 0x030
#define VFE_0_IRQ_CLEAR_1 0x034
#define VFE_0_IRQ_STATUS_0 0x038
#define VFE_0_IRQ_STATUS_0_CAMIF_SOF BIT(0)
#define VFE_0_IRQ_STATUS_0_RDIn_REG_UPDATE(n) BIT((n) + 5)
#define VFE_0_IRQ_STATUS_0_line_n_REG_UPDATE(n) \
((n) == VFE_LINE_PIX ? BIT(4) : VFE_0_IRQ_STATUS_0_RDIn_REG_UPDATE(n))
#define VFE_0_IRQ_STATUS_0_IMAGE_MASTER_n_PING_PONG(n) BIT((n) + 8)
#define VFE_0_IRQ_STATUS_0_IMAGE_COMPOSITE_DONE_n(n) BIT((n) + 25)
#define VFE_0_IRQ_STATUS_0_RESET_ACK BIT(31)
#define VFE_0_IRQ_STATUS_1 0x03c
#define VFE_0_IRQ_STATUS_1_VIOLATION BIT(7)
#define VFE_0_IRQ_STATUS_1_BUS_BDG_HALT_ACK BIT(8)
#define VFE_0_IRQ_STATUS_1_RDIn_SOF(n) BIT((n) + 29)
#define VFE_0_IRQ_COMPOSITE_MASK_0 0x40
#define VFE_0_VIOLATION_STATUS 0x48
#define VFE_0_BUS_CMD 0x4c
#define VFE_0_BUS_CMD_Mx_RLD_CMD(x) BIT(x)
#define VFE_0_BUS_CFG 0x050
#define VFE_0_BUS_XBAR_CFG_x(x) (0x58 + 0x4 * ((x) / 2))
#define VFE_0_BUS_XBAR_CFG_x_M_PAIR_STREAM_EN BIT(1)
#define VFE_0_BUS_XBAR_CFG_x_M_PAIR_STREAM_SWAP_INTER_INTRA (0x3 << 4)
#define VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_SHIFT 8
#define VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_LUMA 0
#define VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_VAL_RDI0 5
#define VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_VAL_RDI1 6
#define VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_VAL_RDI2 7
#define VFE_0_BUS_IMAGE_MASTER_n_WR_CFG(n) (0x06c + 0x24 * (n))
#define VFE_0_BUS_IMAGE_MASTER_n_WR_CFG_WR_PATH_SHIFT 0
#define VFE_0_BUS_IMAGE_MASTER_n_WR_CFG_FRM_BASED_SHIFT 1
#define VFE_0_BUS_IMAGE_MASTER_n_WR_PING_ADDR(n) (0x070 + 0x24 * (n))
#define VFE_0_BUS_IMAGE_MASTER_n_WR_PONG_ADDR(n) (0x074 + 0x24 * (n))
#define VFE_0_BUS_IMAGE_MASTER_n_WR_ADDR_CFG(n) (0x078 + 0x24 * (n))
#define VFE_0_BUS_IMAGE_MASTER_n_WR_ADDR_CFG_FRM_DROP_PER_SHIFT 2
#define VFE_0_BUS_IMAGE_MASTER_n_WR_ADDR_CFG_FRM_DROP_PER_MASK (0x1f << 2)
#define VFE_0_BUS_IMAGE_MASTER_n_WR_UB_CFG(n) (0x07c + 0x24 * (n))
#define VFE_0_BUS_IMAGE_MASTER_n_WR_UB_CFG_OFFSET_SHIFT 16
#define VFE_0_BUS_IMAGE_MASTER_n_WR_IMAGE_SIZE(n) (0x080 + 0x24 * (n))
#define VFE_0_BUS_IMAGE_MASTER_n_WR_BUFFER_CFG(n) (0x084 + 0x24 * (n))
#define VFE_0_BUS_IMAGE_MASTER_n_WR_FRAMEDROP_PATTERN(n) \
(0x088 + 0x24 * (n))
#define VFE_0_BUS_IMAGE_MASTER_n_WR_IRQ_SUBSAMPLE_PATTERN(n) \
(0x08c + 0x24 * (n))
#define VFE_0_BUS_IMAGE_MASTER_n_WR_IRQ_SUBSAMPLE_PATTERN_DEF 0xffffffff
#define VFE_0_BUS_PING_PONG_STATUS 0x268
#define VFE_0_BUS_BDG_CMD 0x2c0
#define VFE_0_BUS_BDG_CMD_HALT_REQ 1
#define VFE_0_BUS_BDG_QOS_CFG_0 0x2c4
#define VFE_0_BUS_BDG_QOS_CFG_0_CFG 0xaaa5aaa5
#define VFE_0_BUS_BDG_QOS_CFG_1 0x2c8
#define VFE_0_BUS_BDG_QOS_CFG_2 0x2cc
#define VFE_0_BUS_BDG_QOS_CFG_3 0x2d0
#define VFE_0_BUS_BDG_QOS_CFG_4 0x2d4
#define VFE_0_BUS_BDG_QOS_CFG_5 0x2d8
#define VFE_0_BUS_BDG_QOS_CFG_6 0x2dc
#define VFE_0_BUS_BDG_QOS_CFG_7 0x2e0
#define VFE_0_BUS_BDG_QOS_CFG_7_CFG 0x0001aaa5
#define VFE_0_RDI_CFG_x(x) (0x2e8 + (0x4 * (x)))
#define VFE_0_RDI_CFG_x_RDI_STREAM_SEL_SHIFT 28
#define VFE_0_RDI_CFG_x_RDI_STREAM_SEL_MASK (0xf << 28)
#define VFE_0_RDI_CFG_x_RDI_M0_SEL_SHIFT 4
#define VFE_0_RDI_CFG_x_RDI_M0_SEL_MASK (0xf << 4)
#define VFE_0_RDI_CFG_x_RDI_EN_BIT BIT(2)
#define VFE_0_RDI_CFG_x_MIPI_EN_BITS 0x3
#define VFE_0_RDI_CFG_x_RDI_Mr_FRAME_BASED_EN(r) BIT(16 + (r))
#define VFE_0_CAMIF_CMD 0x2f4
#define VFE_0_CAMIF_CMD_DISABLE_FRAME_BOUNDARY 0
#define VFE_0_CAMIF_CMD_ENABLE_FRAME_BOUNDARY 1
#define VFE_0_CAMIF_CMD_NO_CHANGE 3
#define VFE_0_CAMIF_CMD_CLEAR_CAMIF_STATUS BIT(2)
#define VFE_0_CAMIF_CFG 0x2f8
#define VFE_0_CAMIF_CFG_VFE_OUTPUT_EN BIT(6)
#define VFE_0_CAMIF_FRAME_CFG 0x300
#define VFE_0_CAMIF_WINDOW_WIDTH_CFG 0x304
#define VFE_0_CAMIF_WINDOW_HEIGHT_CFG 0x308
#define VFE_0_CAMIF_SUBSAMPLE_CFG_0 0x30c
#define VFE_0_CAMIF_IRQ_SUBSAMPLE_PATTERN 0x314
#define VFE_0_CAMIF_STATUS 0x31c
#define VFE_0_CAMIF_STATUS_HALT BIT(31)
#define VFE_0_REG_UPDATE 0x378
#define VFE_0_REG_UPDATE_RDIn(n) BIT(1 + (n))
#define VFE_0_REG_UPDATE_line_n(n) \
((n) == VFE_LINE_PIX ? 1 : VFE_0_REG_UPDATE_RDIn(n))
#define VFE_0_DEMUX_CFG 0x424
#define VFE_0_DEMUX_CFG_PERIOD 0x3
#define VFE_0_DEMUX_GAIN_0 0x428
#define VFE_0_DEMUX_GAIN_0_CH0_EVEN (0x80 << 0)
#define VFE_0_DEMUX_GAIN_0_CH0_ODD (0x80 << 16)
#define VFE_0_DEMUX_GAIN_1 0x42c
#define VFE_0_DEMUX_GAIN_1_CH1 (0x80 << 0)
#define VFE_0_DEMUX_GAIN_1_CH2 (0x80 << 16)
#define VFE_0_DEMUX_EVEN_CFG 0x438
#define VFE_0_DEMUX_EVEN_CFG_PATTERN_YUYV 0x9cac
#define VFE_0_DEMUX_EVEN_CFG_PATTERN_YVYU 0xac9c
#define VFE_0_DEMUX_EVEN_CFG_PATTERN_UYVY 0xc9ca
#define VFE_0_DEMUX_EVEN_CFG_PATTERN_VYUY 0xcac9
#define VFE_0_DEMUX_ODD_CFG 0x43c
#define VFE_0_DEMUX_ODD_CFG_PATTERN_YUYV 0x9cac
#define VFE_0_DEMUX_ODD_CFG_PATTERN_YVYU 0xac9c
#define VFE_0_DEMUX_ODD_CFG_PATTERN_UYVY 0xc9ca
#define VFE_0_DEMUX_ODD_CFG_PATTERN_VYUY 0xcac9
#define VFE_0_SCALE_ENC_Y_CFG 0x75c
#define VFE_0_SCALE_ENC_Y_H_IMAGE_SIZE 0x760
#define VFE_0_SCALE_ENC_Y_H_PHASE 0x764
#define VFE_0_SCALE_ENC_Y_V_IMAGE_SIZE 0x76c
#define VFE_0_SCALE_ENC_Y_V_PHASE 0x770
#define VFE_0_SCALE_ENC_CBCR_CFG 0x778
#define VFE_0_SCALE_ENC_CBCR_H_IMAGE_SIZE 0x77c
#define VFE_0_SCALE_ENC_CBCR_H_PHASE 0x780
#define VFE_0_SCALE_ENC_CBCR_V_IMAGE_SIZE 0x790
#define VFE_0_SCALE_ENC_CBCR_V_PHASE 0x794
#define VFE_0_CROP_ENC_Y_WIDTH 0x854
#define VFE_0_CROP_ENC_Y_HEIGHT 0x858
#define VFE_0_CROP_ENC_CBCR_WIDTH 0x85c
#define VFE_0_CROP_ENC_CBCR_HEIGHT 0x860
#define VFE_0_CLAMP_ENC_MAX_CFG 0x874
#define VFE_0_CLAMP_ENC_MAX_CFG_CH0 (0xff << 0)
#define VFE_0_CLAMP_ENC_MAX_CFG_CH1 (0xff << 8)
#define VFE_0_CLAMP_ENC_MAX_CFG_CH2 (0xff << 16)
#define VFE_0_CLAMP_ENC_MIN_CFG 0x878
#define VFE_0_CLAMP_ENC_MIN_CFG_CH0 (0x0 << 0)
#define VFE_0_CLAMP_ENC_MIN_CFG_CH1 (0x0 << 8)
#define VFE_0_CLAMP_ENC_MIN_CFG_CH2 (0x0 << 16)
#define VFE_0_CGC_OVERRIDE_1 0x974
#define VFE_0_CGC_OVERRIDE_1_IMAGE_Mx_CGC_OVERRIDE(x) BIT(x)
#define CAMIF_TIMEOUT_SLEEP_US 1000
#define CAMIF_TIMEOUT_ALL_US 1000000
#define MSM_VFE_VFE0_UB_SIZE 1023
#define MSM_VFE_VFE0_UB_SIZE_RDI (MSM_VFE_VFE0_UB_SIZE / 3)
static u32 vfe_hw_version(struct vfe_device *vfe)
{
u32 hw_version = readl_relaxed(vfe->base + VFE_0_HW_VERSION);
dev_dbg(vfe->camss->dev, "VFE HW Version = 0x%08x\n", hw_version);
return hw_version;
}
static u16 vfe_get_ub_size(u8 vfe_id)
{
if (vfe_id == 0)
return MSM_VFE_VFE0_UB_SIZE_RDI;
return 0;
}
static inline void vfe_reg_clr(struct vfe_device *vfe, u32 reg, u32 clr_bits)
{
u32 bits = readl_relaxed(vfe->base + reg);
writel_relaxed(bits & ~clr_bits, vfe->base + reg);
}
static inline void vfe_reg_set(struct vfe_device *vfe, u32 reg, u32 set_bits)
{
u32 bits = readl_relaxed(vfe->base + reg);
writel_relaxed(bits | set_bits, vfe->base + reg);
}
static void vfe_global_reset(struct vfe_device *vfe)
{
u32 reset_bits = VFE_0_GLOBAL_RESET_CMD_TESTGEN |
VFE_0_GLOBAL_RESET_CMD_BUS_MISR |
VFE_0_GLOBAL_RESET_CMD_PM |
VFE_0_GLOBAL_RESET_CMD_TIMER |
VFE_0_GLOBAL_RESET_CMD_REGISTER |
VFE_0_GLOBAL_RESET_CMD_BUS_BDG |
VFE_0_GLOBAL_RESET_CMD_BUS |
VFE_0_GLOBAL_RESET_CMD_CAMIF |
VFE_0_GLOBAL_RESET_CMD_CORE;
writel_relaxed(reset_bits, vfe->base + VFE_0_GLOBAL_RESET_CMD);
}
static void vfe_halt_request(struct vfe_device *vfe)
{
writel_relaxed(VFE_0_BUS_BDG_CMD_HALT_REQ,
vfe->base + VFE_0_BUS_BDG_CMD);
}
static void vfe_halt_clear(struct vfe_device *vfe)
{
writel_relaxed(0x0, vfe->base + VFE_0_BUS_BDG_CMD);
}
static void vfe_wm_enable(struct vfe_device *vfe, u8 wm, u8 enable)
{
if (enable)
vfe_reg_set(vfe, VFE_0_BUS_IMAGE_MASTER_n_WR_CFG(wm),
1 << VFE_0_BUS_IMAGE_MASTER_n_WR_CFG_WR_PATH_SHIFT);
else
vfe_reg_clr(vfe, VFE_0_BUS_IMAGE_MASTER_n_WR_CFG(wm),
1 << VFE_0_BUS_IMAGE_MASTER_n_WR_CFG_WR_PATH_SHIFT);
}
static void vfe_wm_frame_based(struct vfe_device *vfe, u8 wm, u8 enable)
{
if (enable)
vfe_reg_set(vfe, VFE_0_BUS_IMAGE_MASTER_n_WR_CFG(wm),
1 << VFE_0_BUS_IMAGE_MASTER_n_WR_CFG_FRM_BASED_SHIFT);
else
vfe_reg_clr(vfe, VFE_0_BUS_IMAGE_MASTER_n_WR_CFG(wm),
1 << VFE_0_BUS_IMAGE_MASTER_n_WR_CFG_FRM_BASED_SHIFT);
}
static void vfe_get_wm_sizes(struct v4l2_pix_format_mplane *pix, u8 plane,
u16 *width, u16 *height, u16 *bytesperline)
{
*width = pix->width;
*height = pix->height;
*bytesperline = pix->plane_fmt[0].bytesperline;
if (pix->pixelformat == V4L2_PIX_FMT_NV12 ||
pix->pixelformat == V4L2_PIX_FMT_NV21)
if (plane == 1)
*height /= 2;
}
static void vfe_wm_line_based(struct vfe_device *vfe, u32 wm,
struct v4l2_pix_format_mplane *pix,
u8 plane, u32 enable)
{
u32 reg;
if (enable) {
u16 width = 0, height = 0, bytesperline = 0, wpl;
vfe_get_wm_sizes(pix, plane, &width, &height, &bytesperline);
wpl = vfe_word_per_line(pix->pixelformat, width);
reg = height - 1;
reg |= ((wpl + 1) / 2 - 1) << 16;
writel_relaxed(reg, vfe->base +
VFE_0_BUS_IMAGE_MASTER_n_WR_IMAGE_SIZE(wm));
wpl = vfe_word_per_line(pix->pixelformat, bytesperline);
reg = 0x3;
reg |= (height - 1) << 4;
reg |= wpl << 16;
writel_relaxed(reg, vfe->base +
VFE_0_BUS_IMAGE_MASTER_n_WR_BUFFER_CFG(wm));
} else {
writel_relaxed(0, vfe->base +
VFE_0_BUS_IMAGE_MASTER_n_WR_IMAGE_SIZE(wm));
writel_relaxed(0, vfe->base +
VFE_0_BUS_IMAGE_MASTER_n_WR_BUFFER_CFG(wm));
}
}
static void vfe_wm_set_framedrop_period(struct vfe_device *vfe, u8 wm, u8 per)
{
u32 reg;
reg = readl_relaxed(vfe->base +
VFE_0_BUS_IMAGE_MASTER_n_WR_ADDR_CFG(wm));
reg &= ~(VFE_0_BUS_IMAGE_MASTER_n_WR_ADDR_CFG_FRM_DROP_PER_MASK);
reg |= (per << VFE_0_BUS_IMAGE_MASTER_n_WR_ADDR_CFG_FRM_DROP_PER_SHIFT)
& VFE_0_BUS_IMAGE_MASTER_n_WR_ADDR_CFG_FRM_DROP_PER_MASK;
writel_relaxed(reg,
vfe->base + VFE_0_BUS_IMAGE_MASTER_n_WR_ADDR_CFG(wm));
}
static void vfe_wm_set_framedrop_pattern(struct vfe_device *vfe, u8 wm,
u32 pattern)
{
writel_relaxed(pattern,
vfe->base + VFE_0_BUS_IMAGE_MASTER_n_WR_FRAMEDROP_PATTERN(wm));
}
static void vfe_wm_set_ub_cfg(struct vfe_device *vfe, u8 wm,
u16 offset, u16 depth)
{
u32 reg;
reg = (offset << VFE_0_BUS_IMAGE_MASTER_n_WR_UB_CFG_OFFSET_SHIFT) |
depth;
writel_relaxed(reg, vfe->base + VFE_0_BUS_IMAGE_MASTER_n_WR_UB_CFG(wm));
}
static void vfe_bus_reload_wm(struct vfe_device *vfe, u8 wm)
{
wmb();
writel_relaxed(VFE_0_BUS_CMD_Mx_RLD_CMD(wm), vfe->base + VFE_0_BUS_CMD);
wmb();
}
static void vfe_wm_set_ping_addr(struct vfe_device *vfe, u8 wm, u32 addr)
{
writel_relaxed(addr,
vfe->base + VFE_0_BUS_IMAGE_MASTER_n_WR_PING_ADDR(wm));
}
static void vfe_wm_set_pong_addr(struct vfe_device *vfe, u8 wm, u32 addr)
{
writel_relaxed(addr,
vfe->base + VFE_0_BUS_IMAGE_MASTER_n_WR_PONG_ADDR(wm));
}
static int vfe_wm_get_ping_pong_status(struct vfe_device *vfe, u8 wm)
{
u32 reg;
reg = readl_relaxed(vfe->base + VFE_0_BUS_PING_PONG_STATUS);
return (reg >> wm) & 0x1;
}
static void vfe_bus_enable_wr_if(struct vfe_device *vfe, u8 enable)
{
if (enable)
writel_relaxed(0x10000009, vfe->base + VFE_0_BUS_CFG);
else
writel_relaxed(0, vfe->base + VFE_0_BUS_CFG);
}
static void vfe_bus_connect_wm_to_rdi(struct vfe_device *vfe, u8 wm,
enum vfe_line_id id)
{
u32 reg;
reg = VFE_0_RDI_CFG_x_MIPI_EN_BITS;
reg |= VFE_0_RDI_CFG_x_RDI_Mr_FRAME_BASED_EN(id);
vfe_reg_set(vfe, VFE_0_RDI_CFG_x(0), reg);
reg = VFE_0_RDI_CFG_x_RDI_EN_BIT;
reg |= ((3 * id) << VFE_0_RDI_CFG_x_RDI_STREAM_SEL_SHIFT) &
VFE_0_RDI_CFG_x_RDI_STREAM_SEL_MASK;
vfe_reg_set(vfe, VFE_0_RDI_CFG_x(id), reg);
switch (id) {
case VFE_LINE_RDI0:
default:
reg = VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_VAL_RDI0 <<
VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_SHIFT;
break;
case VFE_LINE_RDI1:
reg = VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_VAL_RDI1 <<
VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_SHIFT;
break;
case VFE_LINE_RDI2:
reg = VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_VAL_RDI2 <<
VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_SHIFT;
break;
}
if (wm % 2 == 1)
reg <<= 16;
vfe_reg_set(vfe, VFE_0_BUS_XBAR_CFG_x(wm), reg);
}
static void vfe_wm_set_subsample(struct vfe_device *vfe, u8 wm)
{
writel_relaxed(VFE_0_BUS_IMAGE_MASTER_n_WR_IRQ_SUBSAMPLE_PATTERN_DEF,
vfe->base +
VFE_0_BUS_IMAGE_MASTER_n_WR_IRQ_SUBSAMPLE_PATTERN(wm));
}
static void vfe_bus_disconnect_wm_from_rdi(struct vfe_device *vfe, u8 wm,
enum vfe_line_id id)
{
u32 reg;
reg = VFE_0_RDI_CFG_x_RDI_Mr_FRAME_BASED_EN(id);
vfe_reg_clr(vfe, VFE_0_RDI_CFG_x(0), reg);
reg = VFE_0_RDI_CFG_x_RDI_EN_BIT;
vfe_reg_clr(vfe, VFE_0_RDI_CFG_x(id), reg);
switch (id) {
case VFE_LINE_RDI0:
default:
reg = VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_VAL_RDI0 <<
VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_SHIFT;
break;
case VFE_LINE_RDI1:
reg = VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_VAL_RDI1 <<
VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_SHIFT;
break;
case VFE_LINE_RDI2:
reg = VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_VAL_RDI2 <<
VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_SHIFT;
break;
}
if (wm % 2 == 1)
reg <<= 16;
vfe_reg_clr(vfe, VFE_0_BUS_XBAR_CFG_x(wm), reg);
}
static void vfe_set_xbar_cfg(struct vfe_device *vfe, struct vfe_output *output,
u8 enable)
{
struct vfe_line *line = container_of(output, struct vfe_line, output);
u32 p = line->video_out.active_fmt.fmt.pix_mp.pixelformat;
u32 reg;
unsigned int i;
for (i = 0; i < output->wm_num; i++) {
if (i == 0) {
reg = VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_LUMA <<
VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_SHIFT;
} else if (i == 1) {
reg = VFE_0_BUS_XBAR_CFG_x_M_PAIR_STREAM_EN;
if (p == V4L2_PIX_FMT_NV12 || p == V4L2_PIX_FMT_NV16)
reg |= VFE_0_BUS_XBAR_CFG_x_M_PAIR_STREAM_SWAP_INTER_INTRA;
} else {
/* On current devices output->wm_num is always <= 2 */
break;
}
if (output->wm_idx[i] % 2 == 1)
reg <<= 16;
if (enable)
vfe_reg_set(vfe,
VFE_0_BUS_XBAR_CFG_x(output->wm_idx[i]),
reg);
else
vfe_reg_clr(vfe,
VFE_0_BUS_XBAR_CFG_x(output->wm_idx[i]),
reg);
}
}
static void vfe_set_realign_cfg(struct vfe_device *vfe, struct vfe_line *line,
u8 enable)
{
/* empty */
}
static void vfe_set_rdi_cid(struct vfe_device *vfe, enum vfe_line_id id, u8 cid)
{
vfe_reg_clr(vfe, VFE_0_RDI_CFG_x(id),
VFE_0_RDI_CFG_x_RDI_M0_SEL_MASK);
vfe_reg_set(vfe, VFE_0_RDI_CFG_x(id),
cid << VFE_0_RDI_CFG_x_RDI_M0_SEL_SHIFT);
}
static void vfe_reg_update(struct vfe_device *vfe, enum vfe_line_id line_id)
{
vfe->reg_update |= VFE_0_REG_UPDATE_line_n(line_id);
wmb();
writel_relaxed(vfe->reg_update, vfe->base + VFE_0_REG_UPDATE);
wmb();
}
static inline void vfe_reg_update_clear(struct vfe_device *vfe,
enum vfe_line_id line_id)
{
vfe->reg_update &= ~VFE_0_REG_UPDATE_line_n(line_id);
}
static void vfe_enable_irq_wm_line(struct vfe_device *vfe, u8 wm,
enum vfe_line_id line_id, u8 enable)
{
u32 irq_en0 = VFE_0_IRQ_MASK_0_IMAGE_MASTER_n_PING_PONG(wm) |
VFE_0_IRQ_MASK_0_line_n_REG_UPDATE(line_id);
u32 irq_en1 = VFE_0_IRQ_MASK_1_IMAGE_MASTER_n_BUS_OVERFLOW(wm) |
VFE_0_IRQ_MASK_1_RDIn_SOF(line_id);
if (enable) {
vfe_reg_set(vfe, VFE_0_IRQ_MASK_0, irq_en0);
vfe_reg_set(vfe, VFE_0_IRQ_MASK_1, irq_en1);
} else {
vfe_reg_clr(vfe, VFE_0_IRQ_MASK_0, irq_en0);
vfe_reg_clr(vfe, VFE_0_IRQ_MASK_1, irq_en1);
}
}
static void vfe_enable_irq_pix_line(struct vfe_device *vfe, u8 comp,
enum vfe_line_id line_id, u8 enable)
{
struct vfe_output *output = &vfe->line[line_id].output;
unsigned int i;
u32 irq_en0;
u32 irq_en1;
u32 comp_mask = 0;
irq_en0 = VFE_0_IRQ_MASK_0_CAMIF_SOF;
irq_en0 |= VFE_0_IRQ_MASK_0_CAMIF_EOF;
irq_en0 |= VFE_0_IRQ_MASK_0_IMAGE_COMPOSITE_DONE_n(comp);
irq_en0 |= VFE_0_IRQ_MASK_0_line_n_REG_UPDATE(line_id);
irq_en1 = VFE_0_IRQ_MASK_1_CAMIF_ERROR;
for (i = 0; i < output->wm_num; i++) {
irq_en1 |= VFE_0_IRQ_MASK_1_IMAGE_MASTER_n_BUS_OVERFLOW(
output->wm_idx[i]);
comp_mask |= (1 << output->wm_idx[i]) << comp * 8;
}
if (enable) {
vfe_reg_set(vfe, VFE_0_IRQ_MASK_0, irq_en0);
vfe_reg_set(vfe, VFE_0_IRQ_MASK_1, irq_en1);
vfe_reg_set(vfe, VFE_0_IRQ_COMPOSITE_MASK_0, comp_mask);
} else {
vfe_reg_clr(vfe, VFE_0_IRQ_MASK_0, irq_en0);
vfe_reg_clr(vfe, VFE_0_IRQ_MASK_1, irq_en1);
vfe_reg_clr(vfe, VFE_0_IRQ_COMPOSITE_MASK_0, comp_mask);
}
}
static void vfe_enable_irq_common(struct vfe_device *vfe)
{
u32 irq_en0 = VFE_0_IRQ_MASK_0_RESET_ACK;
u32 irq_en1 = VFE_0_IRQ_MASK_1_VIOLATION |
VFE_0_IRQ_MASK_1_BUS_BDG_HALT_ACK;
vfe_reg_set(vfe, VFE_0_IRQ_MASK_0, irq_en0);
vfe_reg_set(vfe, VFE_0_IRQ_MASK_1, irq_en1);
}
static void vfe_set_demux_cfg(struct vfe_device *vfe, struct vfe_line *line)
{
u32 val, even_cfg, odd_cfg;
writel_relaxed(VFE_0_DEMUX_CFG_PERIOD, vfe->base + VFE_0_DEMUX_CFG);
val = VFE_0_DEMUX_GAIN_0_CH0_EVEN | VFE_0_DEMUX_GAIN_0_CH0_ODD;
writel_relaxed(val, vfe->base + VFE_0_DEMUX_GAIN_0);
val = VFE_0_DEMUX_GAIN_1_CH1 | VFE_0_DEMUX_GAIN_1_CH2;
writel_relaxed(val, vfe->base + VFE_0_DEMUX_GAIN_1);
switch (line->fmt[MSM_VFE_PAD_SINK].code) {
case MEDIA_BUS_FMT_YUYV8_2X8:
even_cfg = VFE_0_DEMUX_EVEN_CFG_PATTERN_YUYV;
odd_cfg = VFE_0_DEMUX_ODD_CFG_PATTERN_YUYV;
break;
case MEDIA_BUS_FMT_YVYU8_2X8:
even_cfg = VFE_0_DEMUX_EVEN_CFG_PATTERN_YVYU;
odd_cfg = VFE_0_DEMUX_ODD_CFG_PATTERN_YVYU;
break;
case MEDIA_BUS_FMT_UYVY8_2X8:
default:
even_cfg = VFE_0_DEMUX_EVEN_CFG_PATTERN_UYVY;
odd_cfg = VFE_0_DEMUX_ODD_CFG_PATTERN_UYVY;
break;
case MEDIA_BUS_FMT_VYUY8_2X8:
even_cfg = VFE_0_DEMUX_EVEN_CFG_PATTERN_VYUY;
odd_cfg = VFE_0_DEMUX_ODD_CFG_PATTERN_VYUY;
break;
}
writel_relaxed(even_cfg, vfe->base + VFE_0_DEMUX_EVEN_CFG);
writel_relaxed(odd_cfg, vfe->base + VFE_0_DEMUX_ODD_CFG);
}
static void vfe_set_scale_cfg(struct vfe_device *vfe, struct vfe_line *line)
{
u32 p = line->video_out.active_fmt.fmt.pix_mp.pixelformat;
u32 reg;
u16 input, output;
u8 interp_reso;
u32 phase_mult;
writel_relaxed(0x3, vfe->base + VFE_0_SCALE_ENC_Y_CFG);
input = line->fmt[MSM_VFE_PAD_SINK].width;
output = line->compose.width;
reg = (output << 16) | input;
writel_relaxed(reg, vfe->base + VFE_0_SCALE_ENC_Y_H_IMAGE_SIZE);
interp_reso = vfe_calc_interp_reso(input, output);
phase_mult = input * (1 << (13 + interp_reso)) / output;
reg = (interp_reso << 20) | phase_mult;
writel_relaxed(reg, vfe->base + VFE_0_SCALE_ENC_Y_H_PHASE);
input = line->fmt[MSM_VFE_PAD_SINK].height;
output = line->compose.height;
reg = (output << 16) | input;
writel_relaxed(reg, vfe->base + VFE_0_SCALE_ENC_Y_V_IMAGE_SIZE);
interp_reso = vfe_calc_interp_reso(input, output);
phase_mult = input * (1 << (13 + interp_reso)) / output;
reg = (interp_reso << 20) | phase_mult;
writel_relaxed(reg, vfe->base + VFE_0_SCALE_ENC_Y_V_PHASE);
writel_relaxed(0x3, vfe->base + VFE_0_SCALE_ENC_CBCR_CFG);
input = line->fmt[MSM_VFE_PAD_SINK].width;
output = line->compose.width / 2;
reg = (output << 16) | input;
writel_relaxed(reg, vfe->base + VFE_0_SCALE_ENC_CBCR_H_IMAGE_SIZE);
interp_reso = vfe_calc_interp_reso(input, output);
phase_mult = input * (1 << (13 + interp_reso)) / output;
reg = (interp_reso << 20) | phase_mult;
writel_relaxed(reg, vfe->base + VFE_0_SCALE_ENC_CBCR_H_PHASE);
input = line->fmt[MSM_VFE_PAD_SINK].height;
output = line->compose.height;
if (p == V4L2_PIX_FMT_NV12 || p == V4L2_PIX_FMT_NV21)
output = line->compose.height / 2;
reg = (output << 16) | input;
writel_relaxed(reg, vfe->base + VFE_0_SCALE_ENC_CBCR_V_IMAGE_SIZE);
interp_reso = vfe_calc_interp_reso(input, output);
phase_mult = input * (1 << (13 + interp_reso)) / output;
reg = (interp_reso << 20) | phase_mult;
writel_relaxed(reg, vfe->base + VFE_0_SCALE_ENC_CBCR_V_PHASE);
}
static void vfe_set_crop_cfg(struct vfe_device *vfe, struct vfe_line *line)
{
u32 p = line->video_out.active_fmt.fmt.pix_mp.pixelformat;
u32 reg;
u16 first, last;
first = line->crop.left;
last = line->crop.left + line->crop.width - 1;
reg = (first << 16) | last;
writel_relaxed(reg, vfe->base + VFE_0_CROP_ENC_Y_WIDTH);
first = line->crop.top;
last = line->crop.top + line->crop.height - 1;
reg = (first << 16) | last;
writel_relaxed(reg, vfe->base + VFE_0_CROP_ENC_Y_HEIGHT);
first = line->crop.left / 2;
last = line->crop.left / 2 + line->crop.width / 2 - 1;
reg = (first << 16) | last;
writel_relaxed(reg, vfe->base + VFE_0_CROP_ENC_CBCR_WIDTH);
first = line->crop.top;
last = line->crop.top + line->crop.height - 1;
if (p == V4L2_PIX_FMT_NV12 || p == V4L2_PIX_FMT_NV21) {
first = line->crop.top / 2;
last = line->crop.top / 2 + line->crop.height / 2 - 1;
}
reg = (first << 16) | last;
writel_relaxed(reg, vfe->base + VFE_0_CROP_ENC_CBCR_HEIGHT);
}
static void vfe_set_clamp_cfg(struct vfe_device *vfe)
{
u32 val = VFE_0_CLAMP_ENC_MAX_CFG_CH0 |
VFE_0_CLAMP_ENC_MAX_CFG_CH1 |
VFE_0_CLAMP_ENC_MAX_CFG_CH2;
writel_relaxed(val, vfe->base + VFE_0_CLAMP_ENC_MAX_CFG);
val = VFE_0_CLAMP_ENC_MIN_CFG_CH0 |
VFE_0_CLAMP_ENC_MIN_CFG_CH1 |
VFE_0_CLAMP_ENC_MIN_CFG_CH2;
writel_relaxed(val, vfe->base + VFE_0_CLAMP_ENC_MIN_CFG);
}
static void vfe_set_qos(struct vfe_device *vfe)
{
u32 val = VFE_0_BUS_BDG_QOS_CFG_0_CFG;
u32 val7 = VFE_0_BUS_BDG_QOS_CFG_7_CFG;
writel_relaxed(val, vfe->base + VFE_0_BUS_BDG_QOS_CFG_0);
writel_relaxed(val, vfe->base + VFE_0_BUS_BDG_QOS_CFG_1);
writel_relaxed(val, vfe->base + VFE_0_BUS_BDG_QOS_CFG_2);
writel_relaxed(val, vfe->base + VFE_0_BUS_BDG_QOS_CFG_3);
writel_relaxed(val, vfe->base + VFE_0_BUS_BDG_QOS_CFG_4);
writel_relaxed(val, vfe->base + VFE_0_BUS_BDG_QOS_CFG_5);
writel_relaxed(val, vfe->base + VFE_0_BUS_BDG_QOS_CFG_6);
writel_relaxed(val7, vfe->base + VFE_0_BUS_BDG_QOS_CFG_7);
}
static void vfe_set_ds(struct vfe_device *vfe)
{
/* empty */
}
static void vfe_set_cgc_override(struct vfe_device *vfe, u8 wm, u8 enable)
{
u32 val = VFE_0_CGC_OVERRIDE_1_IMAGE_Mx_CGC_OVERRIDE(wm);
if (enable)
vfe_reg_set(vfe, VFE_0_CGC_OVERRIDE_1, val);
else
vfe_reg_clr(vfe, VFE_0_CGC_OVERRIDE_1, val);
wmb();
}
static void vfe_set_camif_cfg(struct vfe_device *vfe, struct vfe_line *line)
{
u32 val;
switch (line->fmt[MSM_VFE_PAD_SINK].code) {
case MEDIA_BUS_FMT_YUYV8_2X8:
val = VFE_0_CORE_CFG_PIXEL_PATTERN_YCBYCR;
break;
case MEDIA_BUS_FMT_YVYU8_2X8:
val = VFE_0_CORE_CFG_PIXEL_PATTERN_YCRYCB;
break;
case MEDIA_BUS_FMT_UYVY8_2X8:
default:
val = VFE_0_CORE_CFG_PIXEL_PATTERN_CBYCRY;
break;
case MEDIA_BUS_FMT_VYUY8_2X8:
val = VFE_0_CORE_CFG_PIXEL_PATTERN_CRYCBY;
break;
}
writel_relaxed(val, vfe->base + VFE_0_CORE_CFG);
val = line->fmt[MSM_VFE_PAD_SINK].width * 2;
val |= line->fmt[MSM_VFE_PAD_SINK].height << 16;
writel_relaxed(val, vfe->base + VFE_0_CAMIF_FRAME_CFG);
val = line->fmt[MSM_VFE_PAD_SINK].width * 2 - 1;
writel_relaxed(val, vfe->base + VFE_0_CAMIF_WINDOW_WIDTH_CFG);
val = line->fmt[MSM_VFE_PAD_SINK].height - 1;
writel_relaxed(val, vfe->base + VFE_0_CAMIF_WINDOW_HEIGHT_CFG);
val = 0xffffffff;
writel_relaxed(val, vfe->base + VFE_0_CAMIF_SUBSAMPLE_CFG_0);
val = 0xffffffff;
writel_relaxed(val, vfe->base + VFE_0_CAMIF_IRQ_SUBSAMPLE_PATTERN);
val = VFE_0_RDI_CFG_x_MIPI_EN_BITS;
vfe_reg_set(vfe, VFE_0_RDI_CFG_x(0), val);
val = VFE_0_CAMIF_CFG_VFE_OUTPUT_EN;
writel_relaxed(val, vfe->base + VFE_0_CAMIF_CFG);
}
static void vfe_set_camif_cmd(struct vfe_device *vfe, u8 enable)
{
u32 cmd;
cmd = VFE_0_CAMIF_CMD_CLEAR_CAMIF_STATUS | VFE_0_CAMIF_CMD_NO_CHANGE;
writel_relaxed(cmd, vfe->base + VFE_0_CAMIF_CMD);
wmb();
if (enable)
cmd = VFE_0_CAMIF_CMD_ENABLE_FRAME_BOUNDARY;
else
cmd = VFE_0_CAMIF_CMD_DISABLE_FRAME_BOUNDARY;
writel_relaxed(cmd, vfe->base + VFE_0_CAMIF_CMD);
}
static void vfe_set_module_cfg(struct vfe_device *vfe, u8 enable)
{
u32 val = VFE_0_MODULE_CFG_DEMUX |
VFE_0_MODULE_CFG_CHROMA_UPSAMPLE |
VFE_0_MODULE_CFG_SCALE_ENC |
VFE_0_MODULE_CFG_CROP_ENC;
if (enable)
writel_relaxed(val, vfe->base + VFE_0_MODULE_CFG);
else
writel_relaxed(0x0, vfe->base + VFE_0_MODULE_CFG);
}
static int vfe_camif_wait_for_stop(struct vfe_device *vfe, struct device *dev)
{
u32 val;
int ret;
ret = readl_poll_timeout(vfe->base + VFE_0_CAMIF_STATUS,
val,
(val & VFE_0_CAMIF_STATUS_HALT),
CAMIF_TIMEOUT_SLEEP_US,
CAMIF_TIMEOUT_ALL_US);
if (ret < 0)
dev_err(dev, "%s: camif stop timeout\n", __func__);
return ret;
}
static void vfe_isr_read(struct vfe_device *vfe, u32 *value0, u32 *value1)
{
*value0 = readl_relaxed(vfe->base + VFE_0_IRQ_STATUS_0);
*value1 = readl_relaxed(vfe->base + VFE_0_IRQ_STATUS_1);
writel_relaxed(*value0, vfe->base + VFE_0_IRQ_CLEAR_0);
writel_relaxed(*value1, vfe->base + VFE_0_IRQ_CLEAR_1);
wmb();
writel_relaxed(VFE_0_IRQ_CMD_GLOBAL_CLEAR, vfe->base + VFE_0_IRQ_CMD);
}
static void vfe_violation_read(struct vfe_device *vfe)
{
u32 violation = readl_relaxed(vfe->base + VFE_0_VIOLATION_STATUS);
pr_err_ratelimited("VFE: violation = 0x%08x\n", violation);
}
/*
* vfe_isr - VFE module interrupt handler
* @irq: Interrupt line
* @dev: VFE device
*
* Return IRQ_HANDLED on success
*/
static irqreturn_t vfe_isr(int irq, void *dev)
{
struct vfe_device *vfe = dev;
u32 value0, value1;
int i, j;
vfe->ops->isr_read(vfe, &value0, &value1);
dev_dbg(vfe->camss->dev, "VFE: status0 = 0x%08x, status1 = 0x%08x\n",
value0, value1);
if (value0 & VFE_0_IRQ_STATUS_0_RESET_ACK)
vfe->isr_ops.reset_ack(vfe);
if (value1 & VFE_0_IRQ_STATUS_1_VIOLATION)
vfe->ops->violation_read(vfe);
if (value1 & VFE_0_IRQ_STATUS_1_BUS_BDG_HALT_ACK)
vfe->isr_ops.halt_ack(vfe);
for (i = VFE_LINE_RDI0; i <= VFE_LINE_PIX; i++)
if (value0 & VFE_0_IRQ_STATUS_0_line_n_REG_UPDATE(i))
vfe->isr_ops.reg_update(vfe, i);
if (value0 & VFE_0_IRQ_STATUS_0_CAMIF_SOF)
vfe->isr_ops.sof(vfe, VFE_LINE_PIX);
for (i = VFE_LINE_RDI0; i <= VFE_LINE_RDI2; i++)
if (value1 & VFE_0_IRQ_STATUS_1_RDIn_SOF(i))
vfe->isr_ops.sof(vfe, i);
for (i = 0; i < MSM_VFE_COMPOSITE_IRQ_NUM; i++)
if (value0 & VFE_0_IRQ_STATUS_0_IMAGE_COMPOSITE_DONE_n(i)) {
vfe->isr_ops.comp_done(vfe, i);
for (j = 0; j < ARRAY_SIZE(vfe->wm_output_map); j++)
if (vfe->wm_output_map[j] == VFE_LINE_PIX)
value0 &= ~VFE_0_IRQ_MASK_0_IMAGE_MASTER_n_PING_PONG(j);
}
for (i = 0; i < MSM_VFE_IMAGE_MASTERS_NUM; i++)
if (value0 & VFE_0_IRQ_STATUS_0_IMAGE_MASTER_n_PING_PONG(i))
vfe->isr_ops.wm_done(vfe, i);
return IRQ_HANDLED;
}
/*
* vfe_pm_domain_off - Disable power domains specific to this VFE.
* @vfe: VFE Device
*/
static void vfe_pm_domain_off(struct vfe_device *vfe)
{
/* nop */
}
/*
* vfe_pm_domain_on - Enable power domains specific to this VFE.
* @vfe: VFE Device
*/
static int vfe_pm_domain_on(struct vfe_device *vfe)
{
return 0;
}
static const struct vfe_hw_ops_gen1 vfe_ops_gen1_4_1 = {
.bus_connect_wm_to_rdi = vfe_bus_connect_wm_to_rdi,
.bus_disconnect_wm_from_rdi = vfe_bus_disconnect_wm_from_rdi,
.bus_enable_wr_if = vfe_bus_enable_wr_if,
.bus_reload_wm = vfe_bus_reload_wm,
.camif_wait_for_stop = vfe_camif_wait_for_stop,
.enable_irq_common = vfe_enable_irq_common,
.enable_irq_pix_line = vfe_enable_irq_pix_line,
.enable_irq_wm_line = vfe_enable_irq_wm_line,
.get_ub_size = vfe_get_ub_size,
.halt_clear = vfe_halt_clear,
.halt_request = vfe_halt_request,
.set_camif_cfg = vfe_set_camif_cfg,
.set_camif_cmd = vfe_set_camif_cmd,
.set_cgc_override = vfe_set_cgc_override,
.set_clamp_cfg = vfe_set_clamp_cfg,
.set_crop_cfg = vfe_set_crop_cfg,
.set_demux_cfg = vfe_set_demux_cfg,
.set_ds = vfe_set_ds,
.set_module_cfg = vfe_set_module_cfg,
.set_qos = vfe_set_qos,
.set_rdi_cid = vfe_set_rdi_cid,
.set_realign_cfg = vfe_set_realign_cfg,
.set_scale_cfg = vfe_set_scale_cfg,
.set_xbar_cfg = vfe_set_xbar_cfg,
.wm_enable = vfe_wm_enable,
.wm_frame_based = vfe_wm_frame_based,
.wm_get_ping_pong_status = vfe_wm_get_ping_pong_status,
.wm_line_based = vfe_wm_line_based,
.wm_set_framedrop_pattern = vfe_wm_set_framedrop_pattern,
.wm_set_framedrop_period = vfe_wm_set_framedrop_period,
.wm_set_ping_addr = vfe_wm_set_ping_addr,
.wm_set_pong_addr = vfe_wm_set_pong_addr,
.wm_set_subsample = vfe_wm_set_subsample,
.wm_set_ub_cfg = vfe_wm_set_ub_cfg,
};
static void vfe_subdev_init(struct device *dev, struct vfe_device *vfe)
{
vfe->isr_ops = vfe_isr_ops_gen1;
vfe->ops_gen1 = &vfe_ops_gen1_4_1;
vfe->video_ops = vfe_video_ops_gen1;
vfe->line_num = VFE_LINE_NUM_GEN1;
}
const struct vfe_hw_ops vfe_ops_4_1 = {
.global_reset = vfe_global_reset,
.hw_version = vfe_hw_version,
.isr_read = vfe_isr_read,
.isr = vfe_isr,
.pm_domain_off = vfe_pm_domain_off,
.pm_domain_on = vfe_pm_domain_on,
.reg_update_clear = vfe_reg_update_clear,
.reg_update = vfe_reg_update,
.subdev_init = vfe_subdev_init,
.vfe_disable = vfe_gen1_disable,
.vfe_enable = vfe_gen1_enable,
.vfe_halt = vfe_gen1_halt,
.violation_read = vfe_violation_read,
};
| linux-master | drivers/media/platform/qcom/camss/camss-vfe-4-1.c |
// SPDX-License-Identifier: GPL-2.0
/*
* camss-csid-4-7.c
*
* Qualcomm MSM Camera Subsystem - CSID (CSI Decoder) Module
*
* Copyright (C) 2020 Linaro Ltd.
*/
#include <linux/completion.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/of.h>
#include "camss-csid.h"
#include "camss-csid-gen2.h"
#include "camss.h"
/* The CSID 2 IP-block is different from the others,
* and is of a bare-bones Lite version, with no PIX
* interface support. As a result of that it has an
* alternate register layout.
*/
#define IS_LITE (csid->id >= 2 ? 1 : 0)
#define CSID_HW_VERSION 0x0
#define HW_VERSION_STEPPING 0
#define HW_VERSION_REVISION 16
#define HW_VERSION_GENERATION 28
#define CSID_RST_STROBES 0x10
#define RST_STROBES 0
#define CSID_CSI2_RX_IRQ_STATUS 0x20
#define CSID_CSI2_RX_IRQ_MASK 0x24
#define CSID_CSI2_RX_IRQ_CLEAR 0x28
#define CSID_CSI2_RDIN_IRQ_STATUS(rdi) ((IS_LITE ? 0x30 : 0x40) \
+ 0x10 * (rdi))
#define CSID_CSI2_RDIN_IRQ_MASK(rdi) ((IS_LITE ? 0x34 : 0x44) \
+ 0x10 * (rdi))
#define CSID_CSI2_RDIN_IRQ_CLEAR(rdi) ((IS_LITE ? 0x38 : 0x48) \
+ 0x10 * (rdi))
#define CSID_CSI2_RDIN_IRQ_SET(rdi) ((IS_LITE ? 0x3C : 0x4C) \
+ 0x10 * (rdi))
#define CSID_TOP_IRQ_STATUS 0x70
#define TOP_IRQ_STATUS_RESET_DONE 0
#define CSID_TOP_IRQ_MASK 0x74
#define CSID_TOP_IRQ_CLEAR 0x78
#define CSID_TOP_IRQ_SET 0x7C
#define CSID_IRQ_CMD 0x80
#define IRQ_CMD_CLEAR 0
#define IRQ_CMD_SET 4
#define CSID_CSI2_RX_CFG0 0x100
#define CSI2_RX_CFG0_NUM_ACTIVE_LANES 0
#define CSI2_RX_CFG0_DL0_INPUT_SEL 4
#define CSI2_RX_CFG0_DL1_INPUT_SEL 8
#define CSI2_RX_CFG0_DL2_INPUT_SEL 12
#define CSI2_RX_CFG0_DL3_INPUT_SEL 16
#define CSI2_RX_CFG0_PHY_NUM_SEL 20
#define CSI2_RX_CFG0_PHY_TYPE_SEL 24
#define CSID_CSI2_RX_CFG1 0x104
#define CSI2_RX_CFG1_PACKET_ECC_CORRECTION_EN 0
#define CSI2_RX_CFG1_DE_SCRAMBLE_EN 1
#define CSI2_RX_CFG1_VC_MODE 2
#define CSI2_RX_CFG1_COMPLETE_STREAM_EN 4
#define CSI2_RX_CFG1_COMPLETE_STREAM_FRAME_TIMING 5
#define CSI2_RX_CFG1_MISR_EN 6
#define CSI2_RX_CFG1_CGC_MODE 7
#define CGC_MODE_DYNAMIC_GATING 0
#define CGC_MODE_ALWAYS_ON 1
#define CSID_RDI_CFG0(rdi) ((IS_LITE ? 0x200 : 0x300) \
+ 0x100 * (rdi))
#define RDI_CFG0_BYTE_CNTR_EN 0
#define RDI_CFG0_FORMAT_MEASURE_EN 1
#define RDI_CFG0_TIMESTAMP_EN 2
#define RDI_CFG0_DROP_H_EN 3
#define RDI_CFG0_DROP_V_EN 4
#define RDI_CFG0_CROP_H_EN 5
#define RDI_CFG0_CROP_V_EN 6
#define RDI_CFG0_MISR_EN 7
#define RDI_CFG0_CGC_MODE 8
#define CGC_MODE_DYNAMIC 0
#define CGC_MODE_ALWAYS_ON 1
#define RDI_CFG0_PLAIN_ALIGNMENT 9
#define PLAIN_ALIGNMENT_LSB 0
#define PLAIN_ALIGNMENT_MSB 1
#define RDI_CFG0_PLAIN_FORMAT 10
#define RDI_CFG0_DECODE_FORMAT 12
#define RDI_CFG0_DATA_TYPE 16
#define RDI_CFG0_VIRTUAL_CHANNEL 22
#define RDI_CFG0_DT_ID 27
#define RDI_CFG0_EARLY_EOF_EN 29
#define RDI_CFG0_PACKING_FORMAT 30
#define RDI_CFG0_ENABLE 31
#define CSID_RDI_CFG1(rdi) ((IS_LITE ? 0x204 : 0x304)\
+ 0x100 * (rdi))
#define RDI_CFG1_TIMESTAMP_STB_SEL 0
#define CSID_RDI_CTRL(rdi) ((IS_LITE ? 0x208 : 0x308)\
+ 0x100 * (rdi))
#define RDI_CTRL_HALT_CMD 0
#define HALT_CMD_HALT_AT_FRAME_BOUNDARY 0
#define HALT_CMD_RESUME_AT_FRAME_BOUNDARY 1
#define RDI_CTRL_HALT_MODE 2
#define CSID_RDI_FRM_DROP_PATTERN(rdi) ((IS_LITE ? 0x20C : 0x30C)\
+ 0x100 * (rdi))
#define CSID_RDI_FRM_DROP_PERIOD(rdi) ((IS_LITE ? 0x210 : 0x310)\
+ 0x100 * (rdi))
#define CSID_RDI_IRQ_SUBSAMPLE_PATTERN(rdi) ((IS_LITE ? 0x214 : 0x314)\
+ 0x100 * (rdi))
#define CSID_RDI_IRQ_SUBSAMPLE_PERIOD(rdi) ((IS_LITE ? 0x218 : 0x318)\
+ 0x100 * (rdi))
#define CSID_RDI_RPP_PIX_DROP_PATTERN(rdi) ((IS_LITE ? 0x224 : 0x324)\
+ 0x100 * (rdi))
#define CSID_RDI_RPP_PIX_DROP_PERIOD(rdi) ((IS_LITE ? 0x228 : 0x328)\
+ 0x100 * (rdi))
#define CSID_RDI_RPP_LINE_DROP_PATTERN(rdi) ((IS_LITE ? 0x22C : 0x32C)\
+ 0x100 * (rdi))
#define CSID_RDI_RPP_LINE_DROP_PERIOD(rdi) ((IS_LITE ? 0x230 : 0x330)\
+ 0x100 * (rdi))
#define CSID_TPG_CTRL 0x600
#define TPG_CTRL_TEST_EN 0
#define TPG_CTRL_FS_PKT_EN 1
#define TPG_CTRL_FE_PKT_EN 2
#define TPG_CTRL_NUM_ACTIVE_LANES 4
#define TPG_CTRL_CYCLES_BETWEEN_PKTS 8
#define TPG_CTRL_NUM_TRAIL_BYTES 20
#define CSID_TPG_VC_CFG0 0x604
#define TPG_VC_CFG0_VC_NUM 0
#define TPG_VC_CFG0_NUM_ACTIVE_SLOTS 8
#define NUM_ACTIVE_SLOTS_0_ENABLED 0
#define NUM_ACTIVE_SLOTS_0_1_ENABLED 1
#define NUM_ACTIVE_SLOTS_0_1_2_ENABLED 2
#define NUM_ACTIVE_SLOTS_0_1_3_ENABLED 3
#define TPG_VC_CFG0_LINE_INTERLEAVING_MODE 10
#define INTELEAVING_MODE_INTERLEAVED 0
#define INTELEAVING_MODE_ONE_SHOT 1
#define TPG_VC_CFG0_NUM_FRAMES 16
#define CSID_TPG_VC_CFG1 0x608
#define TPG_VC_CFG1_H_BLANKING_COUNT 0
#define TPG_VC_CFG1_V_BLANKING_COUNT 12
#define TPG_VC_CFG1_V_BLANK_FRAME_WIDTH_SEL 24
#define CSID_TPG_LFSR_SEED 0x60C
#define CSID_TPG_DT_n_CFG_0(n) (0x610 + (n) * 0xC)
#define TPG_DT_n_CFG_0_FRAME_HEIGHT 0
#define TPG_DT_n_CFG_0_FRAME_WIDTH 16
#define CSID_TPG_DT_n_CFG_1(n) (0x614 + (n) * 0xC)
#define TPG_DT_n_CFG_1_DATA_TYPE 0
#define TPG_DT_n_CFG_1_ECC_XOR_MASK 8
#define TPG_DT_n_CFG_1_CRC_XOR_MASK 16
#define CSID_TPG_DT_n_CFG_2(n) (0x618 + (n) * 0xC)
#define TPG_DT_n_CFG_2_PAYLOAD_MODE 0
#define TPG_DT_n_CFG_2_USER_SPECIFIED_PAYLOAD 4
#define TPG_DT_n_CFG_2_ENCODE_FORMAT 16
#define CSID_TPG_COLOR_BARS_CFG 0x640
#define TPG_COLOR_BARS_CFG_UNICOLOR_BAR_EN 0
#define TPG_COLOR_BARS_CFG_UNICOLOR_BAR_SEL 4
#define TPG_COLOR_BARS_CFG_SPLIT_EN 5
#define TPG_COLOR_BARS_CFG_ROTATE_PERIOD 8
#define CSID_TPG_COLOR_BOX_CFG 0x644
#define TPG_COLOR_BOX_CFG_MODE 0
#define TPG_COLOR_BOX_PATTERN_SEL 2
static const struct csid_format csid_formats[] = {
{
MEDIA_BUS_FMT_UYVY8_2X8,
DATA_TYPE_YUV422_8BIT,
DECODE_FORMAT_UNCOMPRESSED_8_BIT,
8,
2,
},
{
MEDIA_BUS_FMT_VYUY8_2X8,
DATA_TYPE_YUV422_8BIT,
DECODE_FORMAT_UNCOMPRESSED_8_BIT,
8,
2,
},
{
MEDIA_BUS_FMT_YUYV8_2X8,
DATA_TYPE_YUV422_8BIT,
DECODE_FORMAT_UNCOMPRESSED_8_BIT,
8,
2,
},
{
MEDIA_BUS_FMT_YVYU8_2X8,
DATA_TYPE_YUV422_8BIT,
DECODE_FORMAT_UNCOMPRESSED_8_BIT,
8,
2,
},
{
MEDIA_BUS_FMT_SBGGR8_1X8,
DATA_TYPE_RAW_8BIT,
DECODE_FORMAT_UNCOMPRESSED_8_BIT,
8,
1,
},
{
MEDIA_BUS_FMT_SGBRG8_1X8,
DATA_TYPE_RAW_8BIT,
DECODE_FORMAT_UNCOMPRESSED_8_BIT,
8,
1,
},
{
MEDIA_BUS_FMT_SGRBG8_1X8,
DATA_TYPE_RAW_8BIT,
DECODE_FORMAT_UNCOMPRESSED_8_BIT,
8,
1,
},
{
MEDIA_BUS_FMT_SRGGB8_1X8,
DATA_TYPE_RAW_8BIT,
DECODE_FORMAT_UNCOMPRESSED_8_BIT,
8,
1,
},
{
MEDIA_BUS_FMT_SBGGR10_1X10,
DATA_TYPE_RAW_10BIT,
DECODE_FORMAT_UNCOMPRESSED_10_BIT,
10,
1,
},
{
MEDIA_BUS_FMT_SGBRG10_1X10,
DATA_TYPE_RAW_10BIT,
DECODE_FORMAT_UNCOMPRESSED_10_BIT,
10,
1,
},
{
MEDIA_BUS_FMT_SGRBG10_1X10,
DATA_TYPE_RAW_10BIT,
DECODE_FORMAT_UNCOMPRESSED_10_BIT,
10,
1,
},
{
MEDIA_BUS_FMT_SRGGB10_1X10,
DATA_TYPE_RAW_10BIT,
DECODE_FORMAT_UNCOMPRESSED_10_BIT,
10,
1,
},
{
MEDIA_BUS_FMT_Y8_1X8,
DATA_TYPE_RAW_8BIT,
DECODE_FORMAT_UNCOMPRESSED_8_BIT,
8,
1,
},
{
MEDIA_BUS_FMT_Y10_1X10,
DATA_TYPE_RAW_10BIT,
DECODE_FORMAT_UNCOMPRESSED_10_BIT,
10,
1,
},
{
MEDIA_BUS_FMT_SBGGR12_1X12,
DATA_TYPE_RAW_12BIT,
DECODE_FORMAT_UNCOMPRESSED_12_BIT,
12,
1,
},
{
MEDIA_BUS_FMT_SGBRG12_1X12,
DATA_TYPE_RAW_12BIT,
DECODE_FORMAT_UNCOMPRESSED_12_BIT,
12,
1,
},
{
MEDIA_BUS_FMT_SGRBG12_1X12,
DATA_TYPE_RAW_12BIT,
DECODE_FORMAT_UNCOMPRESSED_12_BIT,
12,
1,
},
{
MEDIA_BUS_FMT_SRGGB12_1X12,
DATA_TYPE_RAW_12BIT,
DECODE_FORMAT_UNCOMPRESSED_12_BIT,
12,
1,
},
{
MEDIA_BUS_FMT_SBGGR14_1X14,
DATA_TYPE_RAW_14BIT,
DECODE_FORMAT_UNCOMPRESSED_14_BIT,
14,
1,
},
{
MEDIA_BUS_FMT_SGBRG14_1X14,
DATA_TYPE_RAW_14BIT,
DECODE_FORMAT_UNCOMPRESSED_14_BIT,
14,
1,
},
{
MEDIA_BUS_FMT_SGRBG14_1X14,
DATA_TYPE_RAW_14BIT,
DECODE_FORMAT_UNCOMPRESSED_14_BIT,
14,
1,
},
{
MEDIA_BUS_FMT_SRGGB14_1X14,
DATA_TYPE_RAW_14BIT,
DECODE_FORMAT_UNCOMPRESSED_14_BIT,
14,
1,
},
};
static void __csid_configure_stream(struct csid_device *csid, u8 enable, u8 vc)
{
struct csid_testgen_config *tg = &csid->testgen;
u32 val;
u32 phy_sel = 0;
u8 lane_cnt = csid->phy.lane_cnt;
/* Source pads matching RDI channels on hardware. Pad 1 -> RDI0, Pad 2 -> RDI1, etc. */
struct v4l2_mbus_framefmt *input_format = &csid->fmt[MSM_CSID_PAD_FIRST_SRC + vc];
const struct csid_format *format = csid_get_fmt_entry(csid->formats, csid->nformats,
input_format->code);
if (!lane_cnt)
lane_cnt = 4;
if (!tg->enabled)
phy_sel = csid->phy.csiphy_id;
if (enable) {
u8 dt_id = vc;
if (tg->enabled) {
/* Config Test Generator */
vc = 0xa;
/* configure one DT, infinite frames */
val = vc << TPG_VC_CFG0_VC_NUM;
val |= INTELEAVING_MODE_ONE_SHOT << TPG_VC_CFG0_LINE_INTERLEAVING_MODE;
val |= 0 << TPG_VC_CFG0_NUM_FRAMES;
writel_relaxed(val, csid->base + CSID_TPG_VC_CFG0);
val = 0x740 << TPG_VC_CFG1_H_BLANKING_COUNT;
val |= 0x3ff << TPG_VC_CFG1_V_BLANKING_COUNT;
writel_relaxed(val, csid->base + CSID_TPG_VC_CFG1);
writel_relaxed(0x12345678, csid->base + CSID_TPG_LFSR_SEED);
val = input_format->height & 0x1fff << TPG_DT_n_CFG_0_FRAME_HEIGHT;
val |= input_format->width & 0x1fff << TPG_DT_n_CFG_0_FRAME_WIDTH;
writel_relaxed(val, csid->base + CSID_TPG_DT_n_CFG_0(0));
val = format->data_type << TPG_DT_n_CFG_1_DATA_TYPE;
writel_relaxed(val, csid->base + CSID_TPG_DT_n_CFG_1(0));
val = tg->mode << TPG_DT_n_CFG_2_PAYLOAD_MODE;
val |= 0xBE << TPG_DT_n_CFG_2_USER_SPECIFIED_PAYLOAD;
val |= format->decode_format << TPG_DT_n_CFG_2_ENCODE_FORMAT;
writel_relaxed(val, csid->base + CSID_TPG_DT_n_CFG_2(0));
writel_relaxed(0, csid->base + CSID_TPG_COLOR_BARS_CFG);
writel_relaxed(0, csid->base + CSID_TPG_COLOR_BOX_CFG);
}
val = 1 << RDI_CFG0_BYTE_CNTR_EN;
val |= 1 << RDI_CFG0_FORMAT_MEASURE_EN;
val |= 1 << RDI_CFG0_TIMESTAMP_EN;
/* note: for non-RDI path, this should be format->decode_format */
val |= DECODE_FORMAT_PAYLOAD_ONLY << RDI_CFG0_DECODE_FORMAT;
val |= format->data_type << RDI_CFG0_DATA_TYPE;
val |= vc << RDI_CFG0_VIRTUAL_CHANNEL;
val |= dt_id << RDI_CFG0_DT_ID;
writel_relaxed(val, csid->base + CSID_RDI_CFG0(vc));
/* CSID_TIMESTAMP_STB_POST_IRQ */
val = 2 << RDI_CFG1_TIMESTAMP_STB_SEL;
writel_relaxed(val, csid->base + CSID_RDI_CFG1(vc));
val = 1;
writel_relaxed(val, csid->base + CSID_RDI_FRM_DROP_PERIOD(vc));
val = 0;
writel_relaxed(val, csid->base + CSID_RDI_FRM_DROP_PATTERN(vc));
val = 1;
writel_relaxed(val, csid->base + CSID_RDI_IRQ_SUBSAMPLE_PERIOD(vc));
val = 0;
writel_relaxed(val, csid->base + CSID_RDI_IRQ_SUBSAMPLE_PATTERN(vc));
val = 1;
writel_relaxed(val, csid->base + CSID_RDI_RPP_PIX_DROP_PERIOD(vc));
val = 0;
writel_relaxed(val, csid->base + CSID_RDI_RPP_PIX_DROP_PATTERN(vc));
val = 1;
writel_relaxed(val, csid->base + CSID_RDI_RPP_LINE_DROP_PERIOD(vc));
val = 0;
writel_relaxed(val, csid->base + CSID_RDI_RPP_LINE_DROP_PATTERN(vc));
val = 0;
writel_relaxed(val, csid->base + CSID_RDI_CTRL(vc));
val = readl_relaxed(csid->base + CSID_RDI_CFG0(vc));
val |= 1 << RDI_CFG0_ENABLE;
writel_relaxed(val, csid->base + CSID_RDI_CFG0(vc));
}
if (tg->enabled) {
val = enable << TPG_CTRL_TEST_EN;
val |= 1 << TPG_CTRL_FS_PKT_EN;
val |= 1 << TPG_CTRL_FE_PKT_EN;
val |= (lane_cnt - 1) << TPG_CTRL_NUM_ACTIVE_LANES;
val |= 0x64 << TPG_CTRL_CYCLES_BETWEEN_PKTS;
val |= 0xA << TPG_CTRL_NUM_TRAIL_BYTES;
writel_relaxed(val, csid->base + CSID_TPG_CTRL);
}
val = (lane_cnt - 1) << CSI2_RX_CFG0_NUM_ACTIVE_LANES;
val |= csid->phy.lane_assign << CSI2_RX_CFG0_DL0_INPUT_SEL;
val |= phy_sel << CSI2_RX_CFG0_PHY_NUM_SEL;
writel_relaxed(val, csid->base + CSID_CSI2_RX_CFG0);
val = 1 << CSI2_RX_CFG1_PACKET_ECC_CORRECTION_EN;
val |= 1 << CSI2_RX_CFG1_MISR_EN;
writel_relaxed(val, csid->base + CSID_CSI2_RX_CFG1);
if (enable)
val = HALT_CMD_RESUME_AT_FRAME_BOUNDARY << RDI_CTRL_HALT_CMD;
else
val = HALT_CMD_HALT_AT_FRAME_BOUNDARY << RDI_CTRL_HALT_CMD;
writel_relaxed(val, csid->base + CSID_RDI_CTRL(vc));
}
static void csid_configure_stream(struct csid_device *csid, u8 enable)
{
u8 i;
/* Loop through all enabled VCs and configure stream for each */
for (i = 0; i < MSM_CSID_MAX_SRC_STREAMS; i++)
if (csid->phy.en_vc & BIT(i))
__csid_configure_stream(csid, enable, i);
}
static int csid_configure_testgen_pattern(struct csid_device *csid, s32 val)
{
if (val > 0 && val <= csid->testgen.nmodes)
csid->testgen.mode = val;
return 0;
}
/*
* csid_hw_version - CSID hardware version query
* @csid: CSID device
*
* Return HW version or error
*/
static u32 csid_hw_version(struct csid_device *csid)
{
u32 hw_version;
u32 hw_gen;
u32 hw_rev;
u32 hw_step;
hw_version = readl_relaxed(csid->base + CSID_HW_VERSION);
hw_gen = (hw_version >> HW_VERSION_GENERATION) & 0xF;
hw_rev = (hw_version >> HW_VERSION_REVISION) & 0xFFF;
hw_step = (hw_version >> HW_VERSION_STEPPING) & 0xFFFF;
dev_dbg(csid->camss->dev, "CSID HW Version = %u.%u.%u\n",
hw_gen, hw_rev, hw_step);
return hw_version;
}
/*
* csid_isr - CSID module interrupt service routine
* @irq: Interrupt line
* @dev: CSID device
*
* Return IRQ_HANDLED on success
*/
static irqreturn_t csid_isr(int irq, void *dev)
{
struct csid_device *csid = dev;
u32 val;
u8 reset_done;
int i;
val = readl_relaxed(csid->base + CSID_TOP_IRQ_STATUS);
writel_relaxed(val, csid->base + CSID_TOP_IRQ_CLEAR);
reset_done = val & BIT(TOP_IRQ_STATUS_RESET_DONE);
val = readl_relaxed(csid->base + CSID_CSI2_RX_IRQ_STATUS);
writel_relaxed(val, csid->base + CSID_CSI2_RX_IRQ_CLEAR);
/* Read and clear IRQ status for each enabled RDI channel */
for (i = 0; i < MSM_CSID_MAX_SRC_STREAMS; i++)
if (csid->phy.en_vc & BIT(i)) {
val = readl_relaxed(csid->base + CSID_CSI2_RDIN_IRQ_STATUS(i));
writel_relaxed(val, csid->base + CSID_CSI2_RDIN_IRQ_CLEAR(i));
}
val = 1 << IRQ_CMD_CLEAR;
writel_relaxed(val, csid->base + CSID_IRQ_CMD);
if (reset_done)
complete(&csid->reset_complete);
return IRQ_HANDLED;
}
/*
* csid_reset - Trigger reset on CSID module and wait to complete
* @csid: CSID device
*
* Return 0 on success or a negative error code otherwise
*/
static int csid_reset(struct csid_device *csid)
{
unsigned long time;
u32 val;
reinit_completion(&csid->reset_complete);
writel_relaxed(1, csid->base + CSID_TOP_IRQ_CLEAR);
writel_relaxed(1, csid->base + CSID_IRQ_CMD);
writel_relaxed(1, csid->base + CSID_TOP_IRQ_MASK);
writel_relaxed(1, csid->base + CSID_IRQ_CMD);
/* preserve registers */
val = 0x1e << RST_STROBES;
writel_relaxed(val, csid->base + CSID_RST_STROBES);
time = wait_for_completion_timeout(&csid->reset_complete,
msecs_to_jiffies(CSID_RESET_TIMEOUT_MS));
if (!time) {
dev_err(csid->camss->dev, "CSID reset timeout\n");
return -EIO;
}
return 0;
}
static u32 csid_src_pad_code(struct csid_device *csid, u32 sink_code,
unsigned int match_format_idx, u32 match_code)
{
switch (sink_code) {
case MEDIA_BUS_FMT_SBGGR10_1X10:
{
u32 src_code[] = {
MEDIA_BUS_FMT_SBGGR10_1X10,
MEDIA_BUS_FMT_SBGGR10_2X8_PADHI_LE,
};
return csid_find_code(src_code, ARRAY_SIZE(src_code),
match_format_idx, match_code);
}
case MEDIA_BUS_FMT_Y10_1X10:
{
u32 src_code[] = {
MEDIA_BUS_FMT_Y10_1X10,
MEDIA_BUS_FMT_Y10_2X8_PADHI_LE,
};
return csid_find_code(src_code, ARRAY_SIZE(src_code),
match_format_idx, match_code);
}
default:
if (match_format_idx > 0)
return 0;
return sink_code;
}
}
static void csid_subdev_init(struct csid_device *csid)
{
csid->formats = csid_formats;
csid->nformats = ARRAY_SIZE(csid_formats);
csid->testgen.modes = csid_testgen_modes;
csid->testgen.nmodes = CSID_PAYLOAD_MODE_NUM_SUPPORTED_GEN2;
}
const struct csid_hw_ops csid_ops_gen2 = {
.configure_stream = csid_configure_stream,
.configure_testgen_pattern = csid_configure_testgen_pattern,
.hw_version = csid_hw_version,
.isr = csid_isr,
.reset = csid_reset,
.src_pad_code = csid_src_pad_code,
.subdev_init = csid_subdev_init,
};
| linux-master | drivers/media/platform/qcom/camss/camss-csid-gen2.c |
// SPDX-License-Identifier: GPL-2.0
/*
* camss-vfe-gen1.c
*
* Qualcomm MSM Camera Subsystem - VFE Common functionality for Gen 1 versions of hw (4.1, 4.7..)
*
* Copyright (C) 2020 Linaro Ltd.
*/
#include "camss.h"
#include "camss-vfe.h"
#include "camss-vfe-gen1.h"
/* Max number of frame drop updates per frame */
#define VFE_FRAME_DROP_UPDATES 2
#define VFE_NEXT_SOF_MS 500
int vfe_gen1_halt(struct vfe_device *vfe)
{
unsigned long time;
reinit_completion(&vfe->halt_complete);
vfe->ops_gen1->halt_request(vfe);
time = wait_for_completion_timeout(&vfe->halt_complete,
msecs_to_jiffies(VFE_HALT_TIMEOUT_MS));
if (!time) {
dev_err(vfe->camss->dev, "VFE halt timeout\n");
return -EIO;
}
return 0;
}
static int vfe_disable_output(struct vfe_line *line)
{
struct vfe_device *vfe = to_vfe(line);
struct vfe_output *output = &line->output;
const struct vfe_hw_ops *ops = vfe->ops;
unsigned long flags;
unsigned long time;
unsigned int i;
spin_lock_irqsave(&vfe->output_lock, flags);
output->gen1.wait_sof = 1;
spin_unlock_irqrestore(&vfe->output_lock, flags);
time = wait_for_completion_timeout(&output->sof, msecs_to_jiffies(VFE_NEXT_SOF_MS));
if (!time)
dev_err(vfe->camss->dev, "VFE sof timeout\n");
spin_lock_irqsave(&vfe->output_lock, flags);
for (i = 0; i < output->wm_num; i++)
vfe->ops_gen1->wm_enable(vfe, output->wm_idx[i], 0);
ops->reg_update(vfe, line->id);
output->wait_reg_update = 1;
spin_unlock_irqrestore(&vfe->output_lock, flags);
time = wait_for_completion_timeout(&output->reg_update, msecs_to_jiffies(VFE_NEXT_SOF_MS));
if (!time)
dev_err(vfe->camss->dev, "VFE reg update timeout\n");
spin_lock_irqsave(&vfe->output_lock, flags);
if (line->id != VFE_LINE_PIX) {
vfe->ops_gen1->wm_frame_based(vfe, output->wm_idx[0], 0);
vfe->ops_gen1->bus_disconnect_wm_from_rdi(vfe, output->wm_idx[0], line->id);
vfe->ops_gen1->enable_irq_wm_line(vfe, output->wm_idx[0], line->id, 0);
vfe->ops_gen1->set_cgc_override(vfe, output->wm_idx[0], 0);
spin_unlock_irqrestore(&vfe->output_lock, flags);
} else {
for (i = 0; i < output->wm_num; i++) {
vfe->ops_gen1->wm_line_based(vfe, output->wm_idx[i], NULL, i, 0);
vfe->ops_gen1->set_cgc_override(vfe, output->wm_idx[i], 0);
}
vfe->ops_gen1->enable_irq_pix_line(vfe, 0, line->id, 0);
vfe->ops_gen1->set_module_cfg(vfe, 0);
vfe->ops_gen1->set_realign_cfg(vfe, line, 0);
vfe->ops_gen1->set_xbar_cfg(vfe, output, 0);
vfe->ops_gen1->set_camif_cmd(vfe, 0);
spin_unlock_irqrestore(&vfe->output_lock, flags);
vfe->ops_gen1->camif_wait_for_stop(vfe, vfe->camss->dev);
}
return 0;
}
/*
* vfe_gen1_disable - Disable streaming on VFE line
* @line: VFE line
*
* Return 0 on success or a negative error code otherwise
*/
int vfe_gen1_disable(struct vfe_line *line)
{
struct vfe_device *vfe = to_vfe(line);
vfe_disable_output(line);
vfe_put_output(line);
mutex_lock(&vfe->stream_lock);
if (vfe->stream_count == 1)
vfe->ops_gen1->bus_enable_wr_if(vfe, 0);
vfe->stream_count--;
mutex_unlock(&vfe->stream_lock);
return 0;
}
static void vfe_output_init_addrs(struct vfe_device *vfe,
struct vfe_output *output, u8 sync,
struct vfe_line *line)
{
u32 ping_addr;
u32 pong_addr;
unsigned int i;
output->gen1.active_buf = 0;
for (i = 0; i < output->wm_num; i++) {
if (output->buf[0])
ping_addr = output->buf[0]->addr[i];
else
ping_addr = 0;
if (output->buf[1])
pong_addr = output->buf[1]->addr[i];
else
pong_addr = ping_addr;
vfe->ops_gen1->wm_set_ping_addr(vfe, output->wm_idx[i], ping_addr);
vfe->ops_gen1->wm_set_pong_addr(vfe, output->wm_idx[i], pong_addr);
if (sync)
vfe->ops_gen1->bus_reload_wm(vfe, output->wm_idx[i]);
}
}
static void vfe_output_frame_drop(struct vfe_device *vfe,
struct vfe_output *output,
u32 drop_pattern)
{
u8 drop_period;
unsigned int i;
/* We need to toggle update period to be valid on next frame */
output->drop_update_idx++;
output->drop_update_idx %= VFE_FRAME_DROP_UPDATES;
drop_period = VFE_FRAME_DROP_VAL + output->drop_update_idx;
for (i = 0; i < output->wm_num; i++) {
vfe->ops_gen1->wm_set_framedrop_period(vfe, output->wm_idx[i], drop_period);
vfe->ops_gen1->wm_set_framedrop_pattern(vfe, output->wm_idx[i], drop_pattern);
}
vfe->ops->reg_update(vfe, container_of(output, struct vfe_line, output)->id);
}
static int vfe_enable_output(struct vfe_line *line)
{
struct vfe_device *vfe = to_vfe(line);
struct vfe_output *output = &line->output;
const struct vfe_hw_ops *ops = vfe->ops;
struct media_entity *sensor;
unsigned long flags;
unsigned int frame_skip = 0;
unsigned int i;
u16 ub_size;
ub_size = vfe->ops_gen1->get_ub_size(vfe->id);
if (!ub_size)
return -EINVAL;
sensor = camss_find_sensor(&line->subdev.entity);
if (sensor) {
struct v4l2_subdev *subdev = media_entity_to_v4l2_subdev(sensor);
v4l2_subdev_call(subdev, sensor, g_skip_frames, &frame_skip);
/* Max frame skip is 29 frames */
if (frame_skip > VFE_FRAME_DROP_VAL - 1)
frame_skip = VFE_FRAME_DROP_VAL - 1;
}
spin_lock_irqsave(&vfe->output_lock, flags);
ops->reg_update_clear(vfe, line->id);
if (output->state > VFE_OUTPUT_RESERVED) {
dev_err(vfe->camss->dev, "Output is not in reserved state %d\n", output->state);
spin_unlock_irqrestore(&vfe->output_lock, flags);
return -EINVAL;
}
output->state = VFE_OUTPUT_IDLE;
output->buf[0] = vfe_buf_get_pending(output);
output->buf[1] = vfe_buf_get_pending(output);
if (!output->buf[0] && output->buf[1]) {
output->buf[0] = output->buf[1];
output->buf[1] = NULL;
}
if (output->buf[0])
output->state = VFE_OUTPUT_SINGLE;
if (output->buf[1])
output->state = VFE_OUTPUT_CONTINUOUS;
switch (output->state) {
case VFE_OUTPUT_SINGLE:
vfe_output_frame_drop(vfe, output, 1 << frame_skip);
break;
case VFE_OUTPUT_CONTINUOUS:
vfe_output_frame_drop(vfe, output, 3 << frame_skip);
break;
default:
vfe_output_frame_drop(vfe, output, 0);
break;
}
output->sequence = 0;
output->gen1.wait_sof = 0;
output->wait_reg_update = 0;
reinit_completion(&output->sof);
reinit_completion(&output->reg_update);
vfe_output_init_addrs(vfe, output, 0, line);
if (line->id != VFE_LINE_PIX) {
vfe->ops_gen1->set_cgc_override(vfe, output->wm_idx[0], 1);
vfe->ops_gen1->enable_irq_wm_line(vfe, output->wm_idx[0], line->id, 1);
vfe->ops_gen1->bus_connect_wm_to_rdi(vfe, output->wm_idx[0], line->id);
vfe->ops_gen1->wm_set_subsample(vfe, output->wm_idx[0]);
vfe->ops_gen1->set_rdi_cid(vfe, line->id, 0);
vfe->ops_gen1->wm_set_ub_cfg(vfe, output->wm_idx[0],
(ub_size + 1) * output->wm_idx[0], ub_size);
vfe->ops_gen1->wm_frame_based(vfe, output->wm_idx[0], 1);
vfe->ops_gen1->wm_enable(vfe, output->wm_idx[0], 1);
vfe->ops_gen1->bus_reload_wm(vfe, output->wm_idx[0]);
} else {
ub_size /= output->wm_num;
for (i = 0; i < output->wm_num; i++) {
vfe->ops_gen1->set_cgc_override(vfe, output->wm_idx[i], 1);
vfe->ops_gen1->wm_set_subsample(vfe, output->wm_idx[i]);
vfe->ops_gen1->wm_set_ub_cfg(vfe, output->wm_idx[i],
(ub_size + 1) * output->wm_idx[i], ub_size);
vfe->ops_gen1->wm_line_based(vfe, output->wm_idx[i],
&line->video_out.active_fmt.fmt.pix_mp, i, 1);
vfe->ops_gen1->wm_enable(vfe, output->wm_idx[i], 1);
vfe->ops_gen1->bus_reload_wm(vfe, output->wm_idx[i]);
}
vfe->ops_gen1->enable_irq_pix_line(vfe, 0, line->id, 1);
vfe->ops_gen1->set_module_cfg(vfe, 1);
vfe->ops_gen1->set_camif_cfg(vfe, line);
vfe->ops_gen1->set_realign_cfg(vfe, line, 1);
vfe->ops_gen1->set_xbar_cfg(vfe, output, 1);
vfe->ops_gen1->set_demux_cfg(vfe, line);
vfe->ops_gen1->set_scale_cfg(vfe, line);
vfe->ops_gen1->set_crop_cfg(vfe, line);
vfe->ops_gen1->set_clamp_cfg(vfe);
vfe->ops_gen1->set_camif_cmd(vfe, 1);
}
ops->reg_update(vfe, line->id);
spin_unlock_irqrestore(&vfe->output_lock, flags);
return 0;
}
static int vfe_get_output(struct vfe_line *line)
{
struct vfe_device *vfe = to_vfe(line);
struct vfe_output *output;
struct v4l2_format *f = &line->video_out.active_fmt;
unsigned long flags;
int i;
int wm_idx;
spin_lock_irqsave(&vfe->output_lock, flags);
output = &line->output;
if (output->state > VFE_OUTPUT_RESERVED) {
dev_err(vfe->camss->dev, "Output is running\n");
goto error;
}
output->state = VFE_OUTPUT_RESERVED;
output->gen1.active_buf = 0;
switch (f->fmt.pix_mp.pixelformat) {
case V4L2_PIX_FMT_NV12:
case V4L2_PIX_FMT_NV21:
case V4L2_PIX_FMT_NV16:
case V4L2_PIX_FMT_NV61:
output->wm_num = 2;
break;
default:
output->wm_num = 1;
break;
}
for (i = 0; i < output->wm_num; i++) {
wm_idx = vfe_reserve_wm(vfe, line->id);
if (wm_idx < 0) {
dev_err(vfe->camss->dev, "Can not reserve wm\n");
goto error_get_wm;
}
output->wm_idx[i] = wm_idx;
}
output->drop_update_idx = 0;
spin_unlock_irqrestore(&vfe->output_lock, flags);
return 0;
error_get_wm:
for (i--; i >= 0; i--)
vfe_release_wm(vfe, output->wm_idx[i]);
output->state = VFE_OUTPUT_OFF;
error:
spin_unlock_irqrestore(&vfe->output_lock, flags);
return -EINVAL;
}
int vfe_gen1_enable(struct vfe_line *line)
{
struct vfe_device *vfe = to_vfe(line);
int ret;
mutex_lock(&vfe->stream_lock);
if (!vfe->stream_count) {
vfe->ops_gen1->enable_irq_common(vfe);
vfe->ops_gen1->bus_enable_wr_if(vfe, 1);
vfe->ops_gen1->set_qos(vfe);
vfe->ops_gen1->set_ds(vfe);
}
vfe->stream_count++;
mutex_unlock(&vfe->stream_lock);
ret = vfe_get_output(line);
if (ret < 0)
goto error_get_output;
ret = vfe_enable_output(line);
if (ret < 0)
goto error_enable_output;
vfe->was_streaming = 1;
return 0;
error_enable_output:
vfe_put_output(line);
error_get_output:
mutex_lock(&vfe->stream_lock);
if (vfe->stream_count == 1)
vfe->ops_gen1->bus_enable_wr_if(vfe, 0);
vfe->stream_count--;
mutex_unlock(&vfe->stream_lock);
return ret;
}
static void vfe_output_update_ping_addr(struct vfe_device *vfe,
struct vfe_output *output, u8 sync,
struct vfe_line *line)
{
u32 addr;
unsigned int i;
for (i = 0; i < output->wm_num; i++) {
if (output->buf[0])
addr = output->buf[0]->addr[i];
else
addr = 0;
vfe->ops_gen1->wm_set_ping_addr(vfe, output->wm_idx[i], addr);
if (sync)
vfe->ops_gen1->bus_reload_wm(vfe, output->wm_idx[i]);
}
}
static void vfe_output_update_pong_addr(struct vfe_device *vfe,
struct vfe_output *output, u8 sync,
struct vfe_line *line)
{
u32 addr;
unsigned int i;
for (i = 0; i < output->wm_num; i++) {
if (output->buf[1])
addr = output->buf[1]->addr[i];
else
addr = 0;
vfe->ops_gen1->wm_set_pong_addr(vfe, output->wm_idx[i], addr);
if (sync)
vfe->ops_gen1->bus_reload_wm(vfe, output->wm_idx[i]);
}
}
static void vfe_buf_update_wm_on_next(struct vfe_device *vfe,
struct vfe_output *output)
{
switch (output->state) {
case VFE_OUTPUT_CONTINUOUS:
vfe_output_frame_drop(vfe, output, 3);
break;
case VFE_OUTPUT_SINGLE:
default:
dev_err_ratelimited(vfe->camss->dev,
"Next buf in wrong state! %d\n",
output->state);
break;
}
}
static void vfe_buf_update_wm_on_last(struct vfe_device *vfe,
struct vfe_output *output)
{
switch (output->state) {
case VFE_OUTPUT_CONTINUOUS:
output->state = VFE_OUTPUT_SINGLE;
vfe_output_frame_drop(vfe, output, 1);
break;
case VFE_OUTPUT_SINGLE:
output->state = VFE_OUTPUT_STOPPING;
vfe_output_frame_drop(vfe, output, 0);
break;
default:
dev_err_ratelimited(vfe->camss->dev,
"Last buff in wrong state! %d\n",
output->state);
break;
}
}
static void vfe_buf_update_wm_on_new(struct vfe_device *vfe,
struct vfe_output *output,
struct camss_buffer *new_buf,
struct vfe_line *line)
{
int inactive_idx;
switch (output->state) {
case VFE_OUTPUT_SINGLE:
inactive_idx = !output->gen1.active_buf;
if (!output->buf[inactive_idx]) {
output->buf[inactive_idx] = new_buf;
if (inactive_idx)
vfe_output_update_pong_addr(vfe, output, 0, line);
else
vfe_output_update_ping_addr(vfe, output, 0, line);
vfe_output_frame_drop(vfe, output, 3);
output->state = VFE_OUTPUT_CONTINUOUS;
} else {
vfe_buf_add_pending(output, new_buf);
dev_err_ratelimited(vfe->camss->dev,
"Inactive buffer is busy\n");
}
break;
case VFE_OUTPUT_IDLE:
if (!output->buf[0]) {
output->buf[0] = new_buf;
vfe_output_init_addrs(vfe, output, 1, line);
vfe_output_frame_drop(vfe, output, 1);
output->state = VFE_OUTPUT_SINGLE;
} else {
vfe_buf_add_pending(output, new_buf);
dev_err_ratelimited(vfe->camss->dev,
"Output idle with buffer set!\n");
}
break;
case VFE_OUTPUT_CONTINUOUS:
default:
vfe_buf_add_pending(output, new_buf);
break;
}
}
/*
* vfe_isr_halt_ack - Process halt ack
* @vfe: VFE Device
*/
static void vfe_isr_halt_ack(struct vfe_device *vfe)
{
complete(&vfe->halt_complete);
vfe->ops_gen1->halt_clear(vfe);
}
/*
* vfe_isr_sof - Process start of frame interrupt
* @vfe: VFE Device
* @line_id: VFE line
*/
static void vfe_isr_sof(struct vfe_device *vfe, enum vfe_line_id line_id)
{
struct vfe_output *output;
unsigned long flags;
spin_lock_irqsave(&vfe->output_lock, flags);
output = &vfe->line[line_id].output;
if (output->gen1.wait_sof) {
output->gen1.wait_sof = 0;
complete(&output->sof);
}
spin_unlock_irqrestore(&vfe->output_lock, flags);
}
/*
* vfe_isr_reg_update - Process reg update interrupt
* @vfe: VFE Device
* @line_id: VFE line
*/
static void vfe_isr_reg_update(struct vfe_device *vfe, enum vfe_line_id line_id)
{
struct vfe_output *output;
struct vfe_line *line = &vfe->line[line_id];
unsigned long flags;
spin_lock_irqsave(&vfe->output_lock, flags);
vfe->ops->reg_update_clear(vfe, line_id);
output = &line->output;
if (output->wait_reg_update) {
output->wait_reg_update = 0;
complete(&output->reg_update);
spin_unlock_irqrestore(&vfe->output_lock, flags);
return;
}
if (output->state == VFE_OUTPUT_STOPPING) {
/* Release last buffer when hw is idle */
if (output->last_buffer) {
vb2_buffer_done(&output->last_buffer->vb.vb2_buf,
VB2_BUF_STATE_DONE);
output->last_buffer = NULL;
}
output->state = VFE_OUTPUT_IDLE;
/* Buffers received in stopping state are queued in */
/* dma pending queue, start next capture here */
output->buf[0] = vfe_buf_get_pending(output);
output->buf[1] = vfe_buf_get_pending(output);
if (!output->buf[0] && output->buf[1]) {
output->buf[0] = output->buf[1];
output->buf[1] = NULL;
}
if (output->buf[0])
output->state = VFE_OUTPUT_SINGLE;
if (output->buf[1])
output->state = VFE_OUTPUT_CONTINUOUS;
switch (output->state) {
case VFE_OUTPUT_SINGLE:
vfe_output_frame_drop(vfe, output, 2);
break;
case VFE_OUTPUT_CONTINUOUS:
vfe_output_frame_drop(vfe, output, 3);
break;
default:
vfe_output_frame_drop(vfe, output, 0);
break;
}
vfe_output_init_addrs(vfe, output, 1, &vfe->line[line_id]);
}
spin_unlock_irqrestore(&vfe->output_lock, flags);
}
/*
* vfe_isr_wm_done - Process write master done interrupt
* @vfe: VFE Device
* @wm: Write master id
*/
static void vfe_isr_wm_done(struct vfe_device *vfe, u8 wm)
{
struct camss_buffer *ready_buf;
struct vfe_output *output;
dma_addr_t *new_addr;
unsigned long flags;
u32 active_index;
u64 ts = ktime_get_ns();
unsigned int i;
active_index = vfe->ops_gen1->wm_get_ping_pong_status(vfe, wm);
spin_lock_irqsave(&vfe->output_lock, flags);
if (vfe->wm_output_map[wm] == VFE_LINE_NONE) {
dev_err_ratelimited(vfe->camss->dev,
"Received wm done for unmapped index\n");
goto out_unlock;
}
output = &vfe->line[vfe->wm_output_map[wm]].output;
if (output->gen1.active_buf == active_index && 0) {
dev_err_ratelimited(vfe->camss->dev,
"Active buffer mismatch!\n");
goto out_unlock;
}
output->gen1.active_buf = active_index;
ready_buf = output->buf[!active_index];
if (!ready_buf) {
dev_err_ratelimited(vfe->camss->dev,
"Missing ready buf %d %d!\n",
!active_index, output->state);
goto out_unlock;
}
ready_buf->vb.vb2_buf.timestamp = ts;
ready_buf->vb.sequence = output->sequence++;
/* Get next buffer */
output->buf[!active_index] = vfe_buf_get_pending(output);
if (!output->buf[!active_index]) {
/* No next buffer - set same address */
new_addr = ready_buf->addr;
vfe_buf_update_wm_on_last(vfe, output);
} else {
new_addr = output->buf[!active_index]->addr;
vfe_buf_update_wm_on_next(vfe, output);
}
if (active_index)
for (i = 0; i < output->wm_num; i++)
vfe->ops_gen1->wm_set_ping_addr(vfe, output->wm_idx[i], new_addr[i]);
else
for (i = 0; i < output->wm_num; i++)
vfe->ops_gen1->wm_set_pong_addr(vfe, output->wm_idx[i], new_addr[i]);
spin_unlock_irqrestore(&vfe->output_lock, flags);
if (output->state == VFE_OUTPUT_STOPPING)
output->last_buffer = ready_buf;
else
vb2_buffer_done(&ready_buf->vb.vb2_buf, VB2_BUF_STATE_DONE);
return;
out_unlock:
spin_unlock_irqrestore(&vfe->output_lock, flags);
}
/*
* vfe_queue_buffer - Add empty buffer
* @vid: Video device structure
* @buf: Buffer to be enqueued
*
* Add an empty buffer - depending on the current number of buffers it will be
* put in pending buffer queue or directly given to the hardware to be filled.
*
* Return 0 on success or a negative error code otherwise
*/
static int vfe_queue_buffer(struct camss_video *vid, struct camss_buffer *buf)
{
struct vfe_line *line = container_of(vid, struct vfe_line, video_out);
struct vfe_device *vfe = to_vfe(line);
struct vfe_output *output;
unsigned long flags;
output = &line->output;
spin_lock_irqsave(&vfe->output_lock, flags);
vfe_buf_update_wm_on_new(vfe, output, buf, line);
spin_unlock_irqrestore(&vfe->output_lock, flags);
return 0;
}
#define CALC_WORD(width, M, N) (((width) * (M) + (N) - 1) / (N))
int vfe_word_per_line(u32 format, u32 width)
{
int val = 0;
switch (format) {
case V4L2_PIX_FMT_NV12:
case V4L2_PIX_FMT_NV21:
case V4L2_PIX_FMT_NV16:
case V4L2_PIX_FMT_NV61:
val = CALC_WORD(width, 1, 8);
break;
case V4L2_PIX_FMT_YUYV:
case V4L2_PIX_FMT_YVYU:
case V4L2_PIX_FMT_UYVY:
case V4L2_PIX_FMT_VYUY:
val = CALC_WORD(width, 2, 8);
break;
}
return val;
}
const struct vfe_isr_ops vfe_isr_ops_gen1 = {
.reset_ack = vfe_isr_reset_ack,
.halt_ack = vfe_isr_halt_ack,
.reg_update = vfe_isr_reg_update,
.sof = vfe_isr_sof,
.comp_done = vfe_isr_comp_done,
.wm_done = vfe_isr_wm_done,
};
const struct camss_video_ops vfe_video_ops_gen1 = {
.queue_buffer = vfe_queue_buffer,
.flush_buffers = vfe_flush_buffers,
};
| linux-master | drivers/media/platform/qcom/camss/camss-vfe-gen1.c |
// SPDX-License-Identifier: GPL-2.0
/*
* camss-csiphy-3ph-1-0.c
*
* Qualcomm MSM Camera Subsystem - CSIPHY Module 3phase v1.0
*
* Copyright (c) 2011-2015, The Linux Foundation. All rights reserved.
* Copyright (C) 2016-2018 Linaro Ltd.
*/
#include "camss.h"
#include "camss-csiphy.h"
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#define CSIPHY_3PH_LNn_CFG1(n) (0x000 + 0x100 * (n))
#define CSIPHY_3PH_LNn_CFG1_SWI_REC_DLY_PRG (BIT(7) | BIT(6))
#define CSIPHY_3PH_LNn_CFG2(n) (0x004 + 0x100 * (n))
#define CSIPHY_3PH_LNn_CFG2_LP_REC_EN_INT BIT(3)
#define CSIPHY_3PH_LNn_CFG3(n) (0x008 + 0x100 * (n))
#define CSIPHY_3PH_LNn_CFG4(n) (0x00c + 0x100 * (n))
#define CSIPHY_3PH_LNn_CFG4_T_HS_CLK_MISS 0xa4
#define CSIPHY_3PH_LNn_CFG4_T_HS_CLK_MISS_660 0xa5
#define CSIPHY_3PH_LNn_CFG5(n) (0x010 + 0x100 * (n))
#define CSIPHY_3PH_LNn_CFG5_T_HS_DTERM 0x02
#define CSIPHY_3PH_LNn_CFG5_HS_REC_EQ_FQ_INT 0x50
#define CSIPHY_3PH_LNn_TEST_IMP(n) (0x01c + 0x100 * (n))
#define CSIPHY_3PH_LNn_TEST_IMP_HS_TERM_IMP 0xa
#define CSIPHY_3PH_LNn_MISC1(n) (0x028 + 0x100 * (n))
#define CSIPHY_3PH_LNn_MISC1_IS_CLKLANE BIT(2)
#define CSIPHY_3PH_LNn_CFG6(n) (0x02c + 0x100 * (n))
#define CSIPHY_3PH_LNn_CFG6_SWI_FORCE_INIT_EXIT BIT(0)
#define CSIPHY_3PH_LNn_CFG7(n) (0x030 + 0x100 * (n))
#define CSIPHY_3PH_LNn_CFG7_SWI_T_INIT 0x2
#define CSIPHY_3PH_LNn_CFG8(n) (0x034 + 0x100 * (n))
#define CSIPHY_3PH_LNn_CFG8_SWI_SKIP_WAKEUP BIT(0)
#define CSIPHY_3PH_LNn_CFG8_SKEW_FILTER_ENABLE BIT(1)
#define CSIPHY_3PH_LNn_CFG9(n) (0x038 + 0x100 * (n))
#define CSIPHY_3PH_LNn_CFG9_SWI_T_WAKEUP 0x1
#define CSIPHY_3PH_LNn_CSI_LANE_CTRL15(n) (0x03c + 0x100 * (n))
#define CSIPHY_3PH_LNn_CSI_LANE_CTRL15_SWI_SOT_SYMBOL 0xb8
#define CSIPHY_3PH_CMN_CSI_COMMON_CTRLn(n) (0x800 + 0x4 * (n))
#define CSIPHY_3PH_CMN_CSI_COMMON_CTRL5_CLK_ENABLE BIT(7)
#define CSIPHY_3PH_CMN_CSI_COMMON_CTRL6_COMMON_PWRDN_B BIT(0)
#define CSIPHY_3PH_CMN_CSI_COMMON_CTRL6_SHOW_REV_ID BIT(1)
#define CSIPHY_3PH_CMN_CSI_COMMON_STATUSn(n) (0x8b0 + 0x4 * (n))
#define CSIPHY_DEFAULT_PARAMS 0
#define CSIPHY_LANE_ENABLE 1
#define CSIPHY_SETTLE_CNT_LOWER_BYTE 2
#define CSIPHY_SETTLE_CNT_HIGHER_BYTE 3
#define CSIPHY_DNP_PARAMS 4
#define CSIPHY_2PH_REGS 5
#define CSIPHY_3PH_REGS 6
struct csiphy_reg_t {
s32 reg_addr;
s32 reg_data;
s32 delay;
u32 csiphy_param_type;
};
/* GEN2 1.0 2PH */
static const struct
csiphy_reg_t lane_regs_sdm845[5][14] = {
{
{0x0004, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS},
{0x002C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
{0x0034, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS},
{0x001C, 0x0A, 0x00, CSIPHY_DEFAULT_PARAMS},
{0x0014, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS},
{0x0028, 0x00, 0x00, CSIPHY_DNP_PARAMS},
{0x003C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS},
{0x0000, 0x91, 0x00, CSIPHY_DEFAULT_PARAMS},
{0x0008, 0x00, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE},
{0x000c, 0x00, 0x00, CSIPHY_DNP_PARAMS},
{0x0010, 0x52, 0x00, CSIPHY_DEFAULT_PARAMS},
{0x0038, 0xFE, 0x00, CSIPHY_DEFAULT_PARAMS},
{0x0060, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
{0x0064, 0x7F, 0x00, CSIPHY_DEFAULT_PARAMS},
},
{
{0x0704, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS},
{0x072C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
{0x0734, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS},
{0x071C, 0x0A, 0x00, CSIPHY_DEFAULT_PARAMS},
{0x0714, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS},
{0x0728, 0x04, 0x00, CSIPHY_DEFAULT_PARAMS},
{0x073C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS},
{0x0700, 0x80, 0x00, CSIPHY_DEFAULT_PARAMS},
{0x0708, 0x14, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE},
{0x070C, 0xA5, 0x00, CSIPHY_DEFAULT_PARAMS},
{0x0710, 0x52, 0x00, CSIPHY_DEFAULT_PARAMS},
{0x0738, 0x1F, 0x00, CSIPHY_DEFAULT_PARAMS},
{0x0760, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
{0x0764, 0x7F, 0x00, CSIPHY_DEFAULT_PARAMS},
},
{
{0x0204, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS},
{0x022C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
{0x0234, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS},
{0x021C, 0x0A, 0x00, CSIPHY_DEFAULT_PARAMS},
{0x0214, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS},
{0x0228, 0x00, 0x00, CSIPHY_DNP_PARAMS},
{0x023C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS},
{0x0200, 0x91, 0x00, CSIPHY_DEFAULT_PARAMS},
{0x0208, 0x00, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE},
{0x020C, 0x00, 0x00, CSIPHY_DNP_PARAMS},
{0x0210, 0x52, 0x00, CSIPHY_DEFAULT_PARAMS},
{0x0238, 0xFE, 0x00, CSIPHY_DEFAULT_PARAMS},
{0x0260, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
{0x0264, 0x7F, 0x00, CSIPHY_DEFAULT_PARAMS},
},
{
{0x0404, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS},
{0x042C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
{0x0434, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS},
{0x041C, 0x0A, 0x00, CSIPHY_DEFAULT_PARAMS},
{0x0414, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS},
{0x0428, 0x00, 0x00, CSIPHY_DNP_PARAMS},
{0x043C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS},
{0x0400, 0x91, 0x00, CSIPHY_DEFAULT_PARAMS},
{0x0408, 0x00, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE},
{0x040C, 0x00, 0x00, CSIPHY_DNP_PARAMS},
{0x0410, 0x52, 0x00, CSIPHY_DEFAULT_PARAMS},
{0x0438, 0xFE, 0x00, CSIPHY_DEFAULT_PARAMS},
{0x0460, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
{0x0464, 0x7F, 0x00, CSIPHY_DEFAULT_PARAMS},
},
{
{0x0604, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS},
{0x062C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
{0x0634, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS},
{0x061C, 0x0A, 0x00, CSIPHY_DEFAULT_PARAMS},
{0x0614, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS},
{0x0628, 0x00, 0x00, CSIPHY_DNP_PARAMS},
{0x063C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS},
{0x0600, 0x91, 0x00, CSIPHY_DEFAULT_PARAMS},
{0x0608, 0x00, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE},
{0x060C, 0x00, 0x00, CSIPHY_DNP_PARAMS},
{0x0610, 0x52, 0x00, CSIPHY_DEFAULT_PARAMS},
{0x0638, 0xFE, 0x00, CSIPHY_DEFAULT_PARAMS},
{0x0660, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
{0x0664, 0x7F, 0x00, CSIPHY_DEFAULT_PARAMS},
},
};
/* GEN2 1.2.1 2PH */
static const struct
csiphy_reg_t lane_regs_sm8250[5][20] = {
{
{0x0030, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
{0x0900, 0x05, 0x00, CSIPHY_DEFAULT_PARAMS},
{0x0908, 0x10, 0x00, CSIPHY_DEFAULT_PARAMS},
{0x0904, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
{0x0904, 0x07, 0x00, CSIPHY_DEFAULT_PARAMS},
{0x0004, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS},
{0x002C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
{0x0034, 0x07, 0x00, CSIPHY_DEFAULT_PARAMS},
{0x0010, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS},
{0x001C, 0x08, 0x00, CSIPHY_DEFAULT_PARAMS},
{0x003C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS},
{0x0008, 0x10, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE},
{0x0000, 0x8D, 0x00, CSIPHY_DEFAULT_PARAMS},
{0x000c, 0x00, 0x00, CSIPHY_DNP_PARAMS},
{0x0038, 0xFE, 0x00, CSIPHY_DEFAULT_PARAMS},
{0x0014, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS},
{0x0028, 0x00, 0x00, CSIPHY_DNP_PARAMS},
{0x0024, 0x00, 0x00, CSIPHY_DNP_PARAMS},
{0x0800, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS},
{0x0884, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
},
{
{0x0730, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
{0x0C80, 0x05, 0x00, CSIPHY_DEFAULT_PARAMS},
{0x0C88, 0x10, 0x00, CSIPHY_DEFAULT_PARAMS},
{0x0C84, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
{0x0C84, 0x07, 0x00, CSIPHY_DEFAULT_PARAMS},
{0x0704, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS},
{0x072C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
{0x0734, 0x07, 0x00, CSIPHY_DEFAULT_PARAMS},
{0x0710, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS},
{0x071C, 0x08, 0x00, CSIPHY_DEFAULT_PARAMS},
{0x073C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS},
{0x0708, 0x10, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE},
{0x0700, 0x80, 0x00, CSIPHY_DEFAULT_PARAMS},
{0x070c, 0xA5, 0x00, CSIPHY_DEFAULT_PARAMS},
{0x0738, 0x1F, 0x00, CSIPHY_DEFAULT_PARAMS},
{0x0714, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS},
{0x0728, 0x04, 0x00, CSIPHY_DEFAULT_PARAMS},
{0x0724, 0x00, 0x00, CSIPHY_DNP_PARAMS},
{0x0800, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS},
{0x0884, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
},
{
{0x0230, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
{0x0A00, 0x05, 0x00, CSIPHY_DEFAULT_PARAMS},
{0x0A08, 0x10, 0x00, CSIPHY_DEFAULT_PARAMS},
{0x0A04, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
{0x0A04, 0x07, 0x00, CSIPHY_DEFAULT_PARAMS},
{0x0204, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS},
{0x022C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
{0x0234, 0x07, 0x00, CSIPHY_DEFAULT_PARAMS},
{0x0210, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS},
{0x021C, 0x08, 0x00, CSIPHY_DEFAULT_PARAMS},
{0x023C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS},
{0x0208, 0x10, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE},
{0x0200, 0x8D, 0x00, CSIPHY_DEFAULT_PARAMS},
{0x020c, 0x00, 0x00, CSIPHY_DNP_PARAMS},
{0x0238, 0xFE, 0x00, CSIPHY_DEFAULT_PARAMS},
{0x0214, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS},
{0x0228, 0x00, 0x00, CSIPHY_DNP_PARAMS},
{0x0224, 0x00, 0x00, CSIPHY_DNP_PARAMS},
{0x0800, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS},
{0x0884, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
},
{
{0x0430, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
{0x0B00, 0x05, 0x00, CSIPHY_DEFAULT_PARAMS},
{0x0B08, 0x10, 0x00, CSIPHY_DEFAULT_PARAMS},
{0x0B04, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
{0x0B04, 0x07, 0x00, CSIPHY_DEFAULT_PARAMS},
{0x0404, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS},
{0x042C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
{0x0434, 0x07, 0x00, CSIPHY_DEFAULT_PARAMS},
{0x0410, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS},
{0x041C, 0x08, 0x00, CSIPHY_DEFAULT_PARAMS},
{0x043C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS},
{0x0408, 0x10, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE},
{0x0400, 0x8D, 0x00, CSIPHY_DEFAULT_PARAMS},
{0x040c, 0x00, 0x00, CSIPHY_DNP_PARAMS},
{0x0438, 0xFE, 0x00, CSIPHY_DEFAULT_PARAMS},
{0x0414, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS},
{0x0428, 0x00, 0x00, CSIPHY_DNP_PARAMS},
{0x0424, 0x00, 0x00, CSIPHY_DNP_PARAMS},
{0x0800, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS},
{0x0884, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
},
{
{0x0630, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
{0x0C00, 0x05, 0x00, CSIPHY_DEFAULT_PARAMS},
{0x0C08, 0x10, 0x00, CSIPHY_DEFAULT_PARAMS},
{0x0C04, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
{0x0C04, 0x07, 0x00, CSIPHY_DEFAULT_PARAMS},
{0x0604, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS},
{0x062C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
{0x0634, 0x07, 0x00, CSIPHY_DEFAULT_PARAMS},
{0x0610, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS},
{0x061C, 0x08, 0x00, CSIPHY_DEFAULT_PARAMS},
{0x063C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS},
{0x0608, 0x10, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE},
{0x0600, 0x8D, 0x00, CSIPHY_DEFAULT_PARAMS},
{0x060c, 0x00, 0x00, CSIPHY_DNP_PARAMS},
{0x0638, 0xFE, 0x00, CSIPHY_DEFAULT_PARAMS},
{0x0614, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS},
{0x0628, 0x00, 0x00, CSIPHY_DNP_PARAMS},
{0x0624, 0x00, 0x00, CSIPHY_DNP_PARAMS},
{0x0800, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS},
{0x0884, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
},
};
static void csiphy_hw_version_read(struct csiphy_device *csiphy,
struct device *dev)
{
u32 hw_version;
writel(CSIPHY_3PH_CMN_CSI_COMMON_CTRL6_SHOW_REV_ID,
csiphy->base + CSIPHY_3PH_CMN_CSI_COMMON_CTRLn(6));
hw_version = readl_relaxed(csiphy->base +
CSIPHY_3PH_CMN_CSI_COMMON_STATUSn(12));
hw_version |= readl_relaxed(csiphy->base +
CSIPHY_3PH_CMN_CSI_COMMON_STATUSn(13)) << 8;
hw_version |= readl_relaxed(csiphy->base +
CSIPHY_3PH_CMN_CSI_COMMON_STATUSn(14)) << 16;
hw_version |= readl_relaxed(csiphy->base +
CSIPHY_3PH_CMN_CSI_COMMON_STATUSn(15)) << 24;
dev_dbg(dev, "CSIPHY 3PH HW Version = 0x%08x\n", hw_version);
}
/*
* csiphy_reset - Perform software reset on CSIPHY module
* @csiphy: CSIPHY device
*/
static void csiphy_reset(struct csiphy_device *csiphy)
{
writel_relaxed(0x1, csiphy->base + CSIPHY_3PH_CMN_CSI_COMMON_CTRLn(0));
usleep_range(5000, 8000);
writel_relaxed(0x0, csiphy->base + CSIPHY_3PH_CMN_CSI_COMMON_CTRLn(0));
}
static irqreturn_t csiphy_isr(int irq, void *dev)
{
struct csiphy_device *csiphy = dev;
int i;
for (i = 0; i < 11; i++) {
int c = i + 22;
u8 val = readl_relaxed(csiphy->base +
CSIPHY_3PH_CMN_CSI_COMMON_STATUSn(i));
writel_relaxed(val, csiphy->base +
CSIPHY_3PH_CMN_CSI_COMMON_CTRLn(c));
}
writel_relaxed(0x1, csiphy->base + CSIPHY_3PH_CMN_CSI_COMMON_CTRLn(10));
writel_relaxed(0x0, csiphy->base + CSIPHY_3PH_CMN_CSI_COMMON_CTRLn(10));
for (i = 22; i < 33; i++)
writel_relaxed(0x0, csiphy->base +
CSIPHY_3PH_CMN_CSI_COMMON_CTRLn(i));
return IRQ_HANDLED;
}
/*
* csiphy_settle_cnt_calc - Calculate settle count value
*
* Helper function to calculate settle count value. This is
* based on the CSI2 T_hs_settle parameter which in turn
* is calculated based on the CSI2 transmitter link frequency.
*
* Return settle count value or 0 if the CSI2 link frequency
* is not available
*/
static u8 csiphy_settle_cnt_calc(s64 link_freq, u32 timer_clk_rate)
{
u32 ui; /* ps */
u32 timer_period; /* ps */
u32 t_hs_prepare_max; /* ps */
u32 t_hs_settle; /* ps */
u8 settle_cnt;
if (link_freq <= 0)
return 0;
ui = div_u64(1000000000000LL, link_freq);
ui /= 2;
t_hs_prepare_max = 85000 + 6 * ui;
t_hs_settle = t_hs_prepare_max;
timer_period = div_u64(1000000000000LL, timer_clk_rate);
settle_cnt = t_hs_settle / timer_period - 6;
return settle_cnt;
}
static void csiphy_gen1_config_lanes(struct csiphy_device *csiphy,
struct csiphy_config *cfg,
u8 settle_cnt)
{
struct csiphy_lanes_cfg *c = &cfg->csi2->lane_cfg;
int i, l = 0;
u8 val;
for (i = 0; i <= c->num_data; i++) {
if (i == c->num_data)
l = 7;
else
l = c->data[i].pos * 2;
val = CSIPHY_3PH_LNn_CFG1_SWI_REC_DLY_PRG;
val |= 0x17;
writel_relaxed(val, csiphy->base + CSIPHY_3PH_LNn_CFG1(l));
val = CSIPHY_3PH_LNn_CFG2_LP_REC_EN_INT;
writel_relaxed(val, csiphy->base + CSIPHY_3PH_LNn_CFG2(l));
val = settle_cnt;
writel_relaxed(val, csiphy->base + CSIPHY_3PH_LNn_CFG3(l));
val = CSIPHY_3PH_LNn_CFG5_T_HS_DTERM |
CSIPHY_3PH_LNn_CFG5_HS_REC_EQ_FQ_INT;
writel_relaxed(val, csiphy->base + CSIPHY_3PH_LNn_CFG5(l));
val = CSIPHY_3PH_LNn_CFG6_SWI_FORCE_INIT_EXIT;
writel_relaxed(val, csiphy->base + CSIPHY_3PH_LNn_CFG6(l));
val = CSIPHY_3PH_LNn_CFG7_SWI_T_INIT;
writel_relaxed(val, csiphy->base + CSIPHY_3PH_LNn_CFG7(l));
val = CSIPHY_3PH_LNn_CFG8_SWI_SKIP_WAKEUP |
CSIPHY_3PH_LNn_CFG8_SKEW_FILTER_ENABLE;
writel_relaxed(val, csiphy->base + CSIPHY_3PH_LNn_CFG8(l));
val = CSIPHY_3PH_LNn_CFG9_SWI_T_WAKEUP;
writel_relaxed(val, csiphy->base + CSIPHY_3PH_LNn_CFG9(l));
val = CSIPHY_3PH_LNn_TEST_IMP_HS_TERM_IMP;
writel_relaxed(val, csiphy->base + CSIPHY_3PH_LNn_TEST_IMP(l));
val = CSIPHY_3PH_LNn_CSI_LANE_CTRL15_SWI_SOT_SYMBOL;
writel_relaxed(val, csiphy->base +
CSIPHY_3PH_LNn_CSI_LANE_CTRL15(l));
}
val = CSIPHY_3PH_LNn_CFG1_SWI_REC_DLY_PRG;
writel_relaxed(val, csiphy->base + CSIPHY_3PH_LNn_CFG1(l));
if (csiphy->camss->version == CAMSS_660)
val = CSIPHY_3PH_LNn_CFG4_T_HS_CLK_MISS_660;
else
val = CSIPHY_3PH_LNn_CFG4_T_HS_CLK_MISS;
writel_relaxed(val, csiphy->base + CSIPHY_3PH_LNn_CFG4(l));
val = CSIPHY_3PH_LNn_MISC1_IS_CLKLANE;
writel_relaxed(val, csiphy->base + CSIPHY_3PH_LNn_MISC1(l));
}
static void csiphy_gen2_config_lanes(struct csiphy_device *csiphy,
u8 settle_cnt)
{
const struct csiphy_reg_t *r;
int i, l, array_size;
u32 val;
switch (csiphy->camss->version) {
case CAMSS_845:
r = &lane_regs_sdm845[0][0];
array_size = ARRAY_SIZE(lane_regs_sdm845[0]);
break;
case CAMSS_8250:
r = &lane_regs_sm8250[0][0];
array_size = ARRAY_SIZE(lane_regs_sm8250[0]);
break;
default:
WARN(1, "unknown cspi version\n");
return;
}
for (l = 0; l < 5; l++) {
for (i = 0; i < array_size; i++, r++) {
switch (r->csiphy_param_type) {
case CSIPHY_SETTLE_CNT_LOWER_BYTE:
val = settle_cnt & 0xff;
break;
case CSIPHY_DNP_PARAMS:
continue;
default:
val = r->reg_data;
break;
}
writel_relaxed(val, csiphy->base + r->reg_addr);
}
}
}
static u8 csiphy_get_lane_mask(struct csiphy_lanes_cfg *lane_cfg)
{
u8 lane_mask;
int i;
lane_mask = CSIPHY_3PH_CMN_CSI_COMMON_CTRL5_CLK_ENABLE;
for (i = 0; i < lane_cfg->num_data; i++)
lane_mask |= 1 << lane_cfg->data[i].pos;
return lane_mask;
}
static void csiphy_lanes_enable(struct csiphy_device *csiphy,
struct csiphy_config *cfg,
s64 link_freq, u8 lane_mask)
{
struct csiphy_lanes_cfg *c = &cfg->csi2->lane_cfg;
bool is_gen2 = (csiphy->camss->version == CAMSS_845 ||
csiphy->camss->version == CAMSS_8250);
u8 settle_cnt;
u8 val;
int i;
settle_cnt = csiphy_settle_cnt_calc(link_freq, csiphy->timer_clk_rate);
val = is_gen2 ? BIT(7) : CSIPHY_3PH_CMN_CSI_COMMON_CTRL5_CLK_ENABLE;
for (i = 0; i < c->num_data; i++)
val |= BIT(c->data[i].pos * 2);
writel_relaxed(val, csiphy->base + CSIPHY_3PH_CMN_CSI_COMMON_CTRLn(5));
val = CSIPHY_3PH_CMN_CSI_COMMON_CTRL6_COMMON_PWRDN_B;
writel_relaxed(val, csiphy->base + CSIPHY_3PH_CMN_CSI_COMMON_CTRLn(6));
val = 0x02;
writel_relaxed(val, csiphy->base + CSIPHY_3PH_CMN_CSI_COMMON_CTRLn(7));
val = 0x00;
writel_relaxed(val, csiphy->base + CSIPHY_3PH_CMN_CSI_COMMON_CTRLn(0));
if (is_gen2)
csiphy_gen2_config_lanes(csiphy, settle_cnt);
else
csiphy_gen1_config_lanes(csiphy, cfg, settle_cnt);
/* IRQ_MASK registers - disable all interrupts */
for (i = 11; i < 22; i++)
writel_relaxed(0, csiphy->base + CSIPHY_3PH_CMN_CSI_COMMON_CTRLn(i));
}
static void csiphy_lanes_disable(struct csiphy_device *csiphy,
struct csiphy_config *cfg)
{
writel_relaxed(0, csiphy->base +
CSIPHY_3PH_CMN_CSI_COMMON_CTRLn(5));
writel_relaxed(0, csiphy->base +
CSIPHY_3PH_CMN_CSI_COMMON_CTRLn(6));
}
const struct csiphy_hw_ops csiphy_ops_3ph_1_0 = {
.get_lane_mask = csiphy_get_lane_mask,
.hw_version_read = csiphy_hw_version_read,
.reset = csiphy_reset,
.lanes_enable = csiphy_lanes_enable,
.lanes_disable = csiphy_lanes_disable,
.isr = csiphy_isr,
};
| linux-master | drivers/media/platform/qcom/camss/camss-csiphy-3ph-1-0.c |
// SPDX-License-Identifier: GPL-2.0
/*
* camss-vfe.c
*
* Qualcomm MSM Camera Subsystem - VFE (Video Front End) Module
*
* Copyright (c) 2013-2015, The Linux Foundation. All rights reserved.
* Copyright (C) 2015-2018 Linaro Ltd.
*/
#include <linux/clk.h>
#include <linux/completion.h>
#include <linux/interrupt.h>
#include <linux/iommu.h>
#include <linux/mutex.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/spinlock_types.h>
#include <linux/spinlock.h>
#include <media/media-entity.h>
#include <media/v4l2-device.h>
#include <media/v4l2-subdev.h>
#include "camss-vfe.h"
#include "camss.h"
#define MSM_VFE_NAME "msm_vfe"
/* VFE reset timeout */
#define VFE_RESET_TIMEOUT_MS 50
#define SCALER_RATIO_MAX 16
struct vfe_format {
u32 code;
u8 bpp;
};
static const struct vfe_format formats_rdi_8x16[] = {
{ MEDIA_BUS_FMT_UYVY8_2X8, 8 },
{ MEDIA_BUS_FMT_VYUY8_2X8, 8 },
{ MEDIA_BUS_FMT_YUYV8_2X8, 8 },
{ MEDIA_BUS_FMT_YVYU8_2X8, 8 },
{ MEDIA_BUS_FMT_SBGGR8_1X8, 8 },
{ MEDIA_BUS_FMT_SGBRG8_1X8, 8 },
{ MEDIA_BUS_FMT_SGRBG8_1X8, 8 },
{ MEDIA_BUS_FMT_SRGGB8_1X8, 8 },
{ MEDIA_BUS_FMT_SBGGR10_1X10, 10 },
{ MEDIA_BUS_FMT_SGBRG10_1X10, 10 },
{ MEDIA_BUS_FMT_SGRBG10_1X10, 10 },
{ MEDIA_BUS_FMT_SRGGB10_1X10, 10 },
{ MEDIA_BUS_FMT_SBGGR12_1X12, 12 },
{ MEDIA_BUS_FMT_SGBRG12_1X12, 12 },
{ MEDIA_BUS_FMT_SGRBG12_1X12, 12 },
{ MEDIA_BUS_FMT_SRGGB12_1X12, 12 },
{ MEDIA_BUS_FMT_Y10_1X10, 10 },
};
static const struct vfe_format formats_pix_8x16[] = {
{ MEDIA_BUS_FMT_UYVY8_2X8, 8 },
{ MEDIA_BUS_FMT_VYUY8_2X8, 8 },
{ MEDIA_BUS_FMT_YUYV8_2X8, 8 },
{ MEDIA_BUS_FMT_YVYU8_2X8, 8 },
};
static const struct vfe_format formats_rdi_8x96[] = {
{ MEDIA_BUS_FMT_UYVY8_2X8, 8 },
{ MEDIA_BUS_FMT_VYUY8_2X8, 8 },
{ MEDIA_BUS_FMT_YUYV8_2X8, 8 },
{ MEDIA_BUS_FMT_YVYU8_2X8, 8 },
{ MEDIA_BUS_FMT_SBGGR8_1X8, 8 },
{ MEDIA_BUS_FMT_SGBRG8_1X8, 8 },
{ MEDIA_BUS_FMT_SGRBG8_1X8, 8 },
{ MEDIA_BUS_FMT_SRGGB8_1X8, 8 },
{ MEDIA_BUS_FMT_SBGGR10_1X10, 10 },
{ MEDIA_BUS_FMT_SGBRG10_1X10, 10 },
{ MEDIA_BUS_FMT_SGRBG10_1X10, 10 },
{ MEDIA_BUS_FMT_SRGGB10_1X10, 10 },
{ MEDIA_BUS_FMT_SBGGR10_2X8_PADHI_LE, 16 },
{ MEDIA_BUS_FMT_SBGGR12_1X12, 12 },
{ MEDIA_BUS_FMT_SGBRG12_1X12, 12 },
{ MEDIA_BUS_FMT_SGRBG12_1X12, 12 },
{ MEDIA_BUS_FMT_SRGGB12_1X12, 12 },
{ MEDIA_BUS_FMT_SBGGR14_1X14, 14 },
{ MEDIA_BUS_FMT_SGBRG14_1X14, 14 },
{ MEDIA_BUS_FMT_SGRBG14_1X14, 14 },
{ MEDIA_BUS_FMT_SRGGB14_1X14, 14 },
{ MEDIA_BUS_FMT_Y10_1X10, 10 },
{ MEDIA_BUS_FMT_Y10_2X8_PADHI_LE, 16 },
};
static const struct vfe_format formats_pix_8x96[] = {
{ MEDIA_BUS_FMT_UYVY8_2X8, 8 },
{ MEDIA_BUS_FMT_VYUY8_2X8, 8 },
{ MEDIA_BUS_FMT_YUYV8_2X8, 8 },
{ MEDIA_BUS_FMT_YVYU8_2X8, 8 },
};
static const struct vfe_format formats_rdi_845[] = {
{ MEDIA_BUS_FMT_UYVY8_2X8, 8 },
{ MEDIA_BUS_FMT_VYUY8_2X8, 8 },
{ MEDIA_BUS_FMT_YUYV8_2X8, 8 },
{ MEDIA_BUS_FMT_YVYU8_2X8, 8 },
{ MEDIA_BUS_FMT_SBGGR8_1X8, 8 },
{ MEDIA_BUS_FMT_SGBRG8_1X8, 8 },
{ MEDIA_BUS_FMT_SGRBG8_1X8, 8 },
{ MEDIA_BUS_FMT_SRGGB8_1X8, 8 },
{ MEDIA_BUS_FMT_SBGGR10_1X10, 10 },
{ MEDIA_BUS_FMT_SGBRG10_1X10, 10 },
{ MEDIA_BUS_FMT_SGRBG10_1X10, 10 },
{ MEDIA_BUS_FMT_SRGGB10_1X10, 10 },
{ MEDIA_BUS_FMT_SBGGR10_2X8_PADHI_LE, 16 },
{ MEDIA_BUS_FMT_SBGGR12_1X12, 12 },
{ MEDIA_BUS_FMT_SGBRG12_1X12, 12 },
{ MEDIA_BUS_FMT_SGRBG12_1X12, 12 },
{ MEDIA_BUS_FMT_SRGGB12_1X12, 12 },
{ MEDIA_BUS_FMT_SBGGR14_1X14, 14 },
{ MEDIA_BUS_FMT_SGBRG14_1X14, 14 },
{ MEDIA_BUS_FMT_SGRBG14_1X14, 14 },
{ MEDIA_BUS_FMT_SRGGB14_1X14, 14 },
{ MEDIA_BUS_FMT_Y8_1X8, 8 },
{ MEDIA_BUS_FMT_Y10_1X10, 10 },
{ MEDIA_BUS_FMT_Y10_2X8_PADHI_LE, 16 },
};
/*
* vfe_get_bpp - map media bus format to bits per pixel
* @formats: supported media bus formats array
* @nformats: size of @formats array
* @code: media bus format code
*
* Return number of bits per pixel
*/
static u8 vfe_get_bpp(const struct vfe_format *formats,
unsigned int nformats, u32 code)
{
unsigned int i;
for (i = 0; i < nformats; i++)
if (code == formats[i].code)
return formats[i].bpp;
WARN(1, "Unknown format\n");
return formats[0].bpp;
}
static u32 vfe_find_code(u32 *code, unsigned int n_code,
unsigned int index, u32 req_code)
{
int i;
if (!req_code && (index >= n_code))
return 0;
for (i = 0; i < n_code; i++)
if (req_code) {
if (req_code == code[i])
return req_code;
} else {
if (i == index)
return code[i];
}
return code[0];
}
static u32 vfe_src_pad_code(struct vfe_line *line, u32 sink_code,
unsigned int index, u32 src_req_code)
{
struct vfe_device *vfe = to_vfe(line);
if (vfe->camss->version == CAMSS_8x16)
switch (sink_code) {
case MEDIA_BUS_FMT_YUYV8_2X8:
{
u32 src_code[] = {
MEDIA_BUS_FMT_YUYV8_2X8,
MEDIA_BUS_FMT_YUYV8_1_5X8,
};
return vfe_find_code(src_code, ARRAY_SIZE(src_code),
index, src_req_code);
}
case MEDIA_BUS_FMT_YVYU8_2X8:
{
u32 src_code[] = {
MEDIA_BUS_FMT_YVYU8_2X8,
MEDIA_BUS_FMT_YVYU8_1_5X8,
};
return vfe_find_code(src_code, ARRAY_SIZE(src_code),
index, src_req_code);
}
case MEDIA_BUS_FMT_UYVY8_2X8:
{
u32 src_code[] = {
MEDIA_BUS_FMT_UYVY8_2X8,
MEDIA_BUS_FMT_UYVY8_1_5X8,
};
return vfe_find_code(src_code, ARRAY_SIZE(src_code),
index, src_req_code);
}
case MEDIA_BUS_FMT_VYUY8_2X8:
{
u32 src_code[] = {
MEDIA_BUS_FMT_VYUY8_2X8,
MEDIA_BUS_FMT_VYUY8_1_5X8,
};
return vfe_find_code(src_code, ARRAY_SIZE(src_code),
index, src_req_code);
}
default:
if (index > 0)
return 0;
return sink_code;
}
else if (vfe->camss->version == CAMSS_8x96 ||
vfe->camss->version == CAMSS_660 ||
vfe->camss->version == CAMSS_845 ||
vfe->camss->version == CAMSS_8250)
switch (sink_code) {
case MEDIA_BUS_FMT_YUYV8_2X8:
{
u32 src_code[] = {
MEDIA_BUS_FMT_YUYV8_2X8,
MEDIA_BUS_FMT_YVYU8_2X8,
MEDIA_BUS_FMT_UYVY8_2X8,
MEDIA_BUS_FMT_VYUY8_2X8,
MEDIA_BUS_FMT_YUYV8_1_5X8,
};
return vfe_find_code(src_code, ARRAY_SIZE(src_code),
index, src_req_code);
}
case MEDIA_BUS_FMT_YVYU8_2X8:
{
u32 src_code[] = {
MEDIA_BUS_FMT_YVYU8_2X8,
MEDIA_BUS_FMT_YUYV8_2X8,
MEDIA_BUS_FMT_UYVY8_2X8,
MEDIA_BUS_FMT_VYUY8_2X8,
MEDIA_BUS_FMT_YVYU8_1_5X8,
};
return vfe_find_code(src_code, ARRAY_SIZE(src_code),
index, src_req_code);
}
case MEDIA_BUS_FMT_UYVY8_2X8:
{
u32 src_code[] = {
MEDIA_BUS_FMT_UYVY8_2X8,
MEDIA_BUS_FMT_YUYV8_2X8,
MEDIA_BUS_FMT_YVYU8_2X8,
MEDIA_BUS_FMT_VYUY8_2X8,
MEDIA_BUS_FMT_UYVY8_1_5X8,
};
return vfe_find_code(src_code, ARRAY_SIZE(src_code),
index, src_req_code);
}
case MEDIA_BUS_FMT_VYUY8_2X8:
{
u32 src_code[] = {
MEDIA_BUS_FMT_VYUY8_2X8,
MEDIA_BUS_FMT_YUYV8_2X8,
MEDIA_BUS_FMT_YVYU8_2X8,
MEDIA_BUS_FMT_UYVY8_2X8,
MEDIA_BUS_FMT_VYUY8_1_5X8,
};
return vfe_find_code(src_code, ARRAY_SIZE(src_code),
index, src_req_code);
}
default:
if (index > 0)
return 0;
return sink_code;
}
else
return 0;
}
int vfe_reset(struct vfe_device *vfe)
{
unsigned long time;
reinit_completion(&vfe->reset_complete);
vfe->ops->global_reset(vfe);
time = wait_for_completion_timeout(&vfe->reset_complete,
msecs_to_jiffies(VFE_RESET_TIMEOUT_MS));
if (!time) {
dev_err(vfe->camss->dev, "VFE reset timeout\n");
return -EIO;
}
return 0;
}
static void vfe_init_outputs(struct vfe_device *vfe)
{
int i;
for (i = 0; i < vfe->line_num; i++) {
struct vfe_output *output = &vfe->line[i].output;
output->state = VFE_OUTPUT_OFF;
output->buf[0] = NULL;
output->buf[1] = NULL;
INIT_LIST_HEAD(&output->pending_bufs);
}
}
static void vfe_reset_output_maps(struct vfe_device *vfe)
{
int i;
for (i = 0; i < ARRAY_SIZE(vfe->wm_output_map); i++)
vfe->wm_output_map[i] = VFE_LINE_NONE;
}
int vfe_reserve_wm(struct vfe_device *vfe, enum vfe_line_id line_id)
{
int ret = -EBUSY;
int i;
for (i = 0; i < ARRAY_SIZE(vfe->wm_output_map); i++) {
if (vfe->wm_output_map[i] == VFE_LINE_NONE) {
vfe->wm_output_map[i] = line_id;
ret = i;
break;
}
}
return ret;
}
int vfe_release_wm(struct vfe_device *vfe, u8 wm)
{
if (wm >= ARRAY_SIZE(vfe->wm_output_map))
return -EINVAL;
vfe->wm_output_map[wm] = VFE_LINE_NONE;
return 0;
}
struct camss_buffer *vfe_buf_get_pending(struct vfe_output *output)
{
struct camss_buffer *buffer = NULL;
if (!list_empty(&output->pending_bufs)) {
buffer = list_first_entry(&output->pending_bufs,
struct camss_buffer,
queue);
list_del(&buffer->queue);
}
return buffer;
}
void vfe_buf_add_pending(struct vfe_output *output,
struct camss_buffer *buffer)
{
INIT_LIST_HEAD(&buffer->queue);
list_add_tail(&buffer->queue, &output->pending_bufs);
}
/*
* vfe_buf_flush_pending - Flush all pending buffers.
* @output: VFE output
* @state: vb2 buffer state
*/
static void vfe_buf_flush_pending(struct vfe_output *output,
enum vb2_buffer_state state)
{
struct camss_buffer *buf;
struct camss_buffer *t;
list_for_each_entry_safe(buf, t, &output->pending_bufs, queue) {
vb2_buffer_done(&buf->vb.vb2_buf, state);
list_del(&buf->queue);
}
}
int vfe_put_output(struct vfe_line *line)
{
struct vfe_device *vfe = to_vfe(line);
struct vfe_output *output = &line->output;
unsigned long flags;
unsigned int i;
spin_lock_irqsave(&vfe->output_lock, flags);
for (i = 0; i < output->wm_num; i++)
vfe_release_wm(vfe, output->wm_idx[i]);
output->state = VFE_OUTPUT_OFF;
spin_unlock_irqrestore(&vfe->output_lock, flags);
return 0;
}
/**
* vfe_isr_comp_done() - Process composite image done interrupt
* @vfe: VFE Device
* @comp: Composite image id
*/
void vfe_isr_comp_done(struct vfe_device *vfe, u8 comp)
{
unsigned int i;
for (i = 0; i < ARRAY_SIZE(vfe->wm_output_map); i++)
if (vfe->wm_output_map[i] == VFE_LINE_PIX) {
vfe->isr_ops.wm_done(vfe, i);
break;
}
}
void vfe_isr_reset_ack(struct vfe_device *vfe)
{
complete(&vfe->reset_complete);
}
/*
* vfe_set_clock_rates - Calculate and set clock rates on VFE module
* @vfe: VFE device
*
* Return 0 on success or a negative error code otherwise
*/
static int vfe_set_clock_rates(struct vfe_device *vfe)
{
struct device *dev = vfe->camss->dev;
u64 pixel_clock[VFE_LINE_NUM_MAX];
int i, j;
int ret;
for (i = VFE_LINE_RDI0; i < vfe->line_num; i++) {
ret = camss_get_pixel_clock(&vfe->line[i].subdev.entity,
&pixel_clock[i]);
if (ret)
pixel_clock[i] = 0;
}
for (i = 0; i < vfe->nclocks; i++) {
struct camss_clock *clock = &vfe->clock[i];
if (!strcmp(clock->name, "vfe0") ||
!strcmp(clock->name, "vfe1") ||
!strcmp(clock->name, "vfe_lite")) {
u64 min_rate = 0;
long rate;
for (j = VFE_LINE_RDI0; j < vfe->line_num; j++) {
u32 tmp;
u8 bpp;
if (j == VFE_LINE_PIX) {
tmp = pixel_clock[j];
} else {
struct vfe_line *l = &vfe->line[j];
bpp = vfe_get_bpp(l->formats,
l->nformats,
l->fmt[MSM_VFE_PAD_SINK].code);
tmp = pixel_clock[j] * bpp / 64;
}
if (min_rate < tmp)
min_rate = tmp;
}
camss_add_clock_margin(&min_rate);
for (j = 0; j < clock->nfreqs; j++)
if (min_rate < clock->freq[j])
break;
if (j == clock->nfreqs) {
dev_err(dev,
"Pixel clock is too high for VFE");
return -EINVAL;
}
/* if sensor pixel clock is not available */
/* set highest possible VFE clock rate */
if (min_rate == 0)
j = clock->nfreqs - 1;
rate = clk_round_rate(clock->clk, clock->freq[j]);
if (rate < 0) {
dev_err(dev, "clk round rate failed: %ld\n",
rate);
return -EINVAL;
}
ret = clk_set_rate(clock->clk, rate);
if (ret < 0) {
dev_err(dev, "clk set rate failed: %d\n", ret);
return ret;
}
}
}
return 0;
}
/*
* vfe_check_clock_rates - Check current clock rates on VFE module
* @vfe: VFE device
*
* Return 0 if current clock rates are suitable for a new pipeline
* or a negative error code otherwise
*/
static int vfe_check_clock_rates(struct vfe_device *vfe)
{
u64 pixel_clock[VFE_LINE_NUM_MAX];
int i, j;
int ret;
for (i = VFE_LINE_RDI0; i < vfe->line_num; i++) {
ret = camss_get_pixel_clock(&vfe->line[i].subdev.entity,
&pixel_clock[i]);
if (ret)
pixel_clock[i] = 0;
}
for (i = 0; i < vfe->nclocks; i++) {
struct camss_clock *clock = &vfe->clock[i];
if (!strcmp(clock->name, "vfe0") ||
!strcmp(clock->name, "vfe1")) {
u64 min_rate = 0;
unsigned long rate;
for (j = VFE_LINE_RDI0; j < vfe->line_num; j++) {
u32 tmp;
u8 bpp;
if (j == VFE_LINE_PIX) {
tmp = pixel_clock[j];
} else {
struct vfe_line *l = &vfe->line[j];
bpp = vfe_get_bpp(l->formats,
l->nformats,
l->fmt[MSM_VFE_PAD_SINK].code);
tmp = pixel_clock[j] * bpp / 64;
}
if (min_rate < tmp)
min_rate = tmp;
}
camss_add_clock_margin(&min_rate);
rate = clk_get_rate(clock->clk);
if (rate < min_rate)
return -EBUSY;
}
}
return 0;
}
/*
* vfe_get - Power up and reset VFE module
* @vfe: VFE Device
*
* Return 0 on success or a negative error code otherwise
*/
int vfe_get(struct vfe_device *vfe)
{
int ret;
mutex_lock(&vfe->power_lock);
if (vfe->power_count == 0) {
ret = vfe->ops->pm_domain_on(vfe);
if (ret < 0)
goto error_pm_domain;
ret = pm_runtime_resume_and_get(vfe->camss->dev);
if (ret < 0)
goto error_domain_off;
ret = vfe_set_clock_rates(vfe);
if (ret < 0)
goto error_pm_runtime_get;
ret = camss_enable_clocks(vfe->nclocks, vfe->clock,
vfe->camss->dev);
if (ret < 0)
goto error_pm_runtime_get;
ret = vfe_reset(vfe);
if (ret < 0)
goto error_reset;
vfe_reset_output_maps(vfe);
vfe_init_outputs(vfe);
vfe->ops->hw_version(vfe);
} else {
ret = vfe_check_clock_rates(vfe);
if (ret < 0)
goto error_pm_runtime_get;
}
vfe->power_count++;
mutex_unlock(&vfe->power_lock);
return 0;
error_reset:
camss_disable_clocks(vfe->nclocks, vfe->clock);
error_pm_runtime_get:
pm_runtime_put_sync(vfe->camss->dev);
error_domain_off:
vfe->ops->pm_domain_off(vfe);
error_pm_domain:
mutex_unlock(&vfe->power_lock);
return ret;
}
/*
* vfe_put - Power down VFE module
* @vfe: VFE Device
*/
void vfe_put(struct vfe_device *vfe)
{
mutex_lock(&vfe->power_lock);
if (vfe->power_count == 0) {
dev_err(vfe->camss->dev, "vfe power off on power_count == 0\n");
goto exit;
} else if (vfe->power_count == 1) {
if (vfe->was_streaming) {
vfe->was_streaming = 0;
vfe->ops->vfe_halt(vfe);
}
camss_disable_clocks(vfe->nclocks, vfe->clock);
pm_runtime_put_sync(vfe->camss->dev);
vfe->ops->pm_domain_off(vfe);
}
vfe->power_count--;
exit:
mutex_unlock(&vfe->power_lock);
}
/*
* vfe_flush_buffers - Return all vb2 buffers
* @vid: Video device structure
* @state: vb2 buffer state of the returned buffers
*
* Return all buffers to vb2. This includes queued pending buffers (still
* unused) and any buffers given to the hardware but again still not used.
*
* Return 0 on success or a negative error code otherwise
*/
int vfe_flush_buffers(struct camss_video *vid,
enum vb2_buffer_state state)
{
struct vfe_line *line = container_of(vid, struct vfe_line, video_out);
struct vfe_device *vfe = to_vfe(line);
struct vfe_output *output;
unsigned long flags;
output = &line->output;
spin_lock_irqsave(&vfe->output_lock, flags);
vfe_buf_flush_pending(output, state);
if (output->buf[0])
vb2_buffer_done(&output->buf[0]->vb.vb2_buf, state);
if (output->buf[1])
vb2_buffer_done(&output->buf[1]->vb.vb2_buf, state);
if (output->last_buffer) {
vb2_buffer_done(&output->last_buffer->vb.vb2_buf, state);
output->last_buffer = NULL;
}
spin_unlock_irqrestore(&vfe->output_lock, flags);
return 0;
}
/*
* vfe_set_power - Power on/off VFE module
* @sd: VFE V4L2 subdevice
* @on: Requested power state
*
* Return 0 on success or a negative error code otherwise
*/
static int vfe_set_power(struct v4l2_subdev *sd, int on)
{
struct vfe_line *line = v4l2_get_subdevdata(sd);
struct vfe_device *vfe = to_vfe(line);
int ret;
if (on) {
ret = vfe_get(vfe);
if (ret < 0)
return ret;
} else {
vfe_put(vfe);
}
return 0;
}
/*
* vfe_set_stream - Enable/disable streaming on VFE module
* @sd: VFE V4L2 subdevice
* @enable: Requested streaming state
*
* Main configuration of VFE module is triggered here.
*
* Return 0 on success or a negative error code otherwise
*/
static int vfe_set_stream(struct v4l2_subdev *sd, int enable)
{
struct vfe_line *line = v4l2_get_subdevdata(sd);
struct vfe_device *vfe = to_vfe(line);
int ret;
if (enable) {
line->output.state = VFE_OUTPUT_RESERVED;
ret = vfe->ops->vfe_enable(line);
if (ret < 0)
dev_err(vfe->camss->dev,
"Failed to enable vfe outputs\n");
} else {
ret = vfe->ops->vfe_disable(line);
if (ret < 0)
dev_err(vfe->camss->dev,
"Failed to disable vfe outputs\n");
}
return ret;
}
/*
* __vfe_get_format - Get pointer to format structure
* @line: VFE line
* @cfg: V4L2 subdev pad configuration
* @pad: pad from which format is requested
* @which: TRY or ACTIVE format
*
* Return pointer to TRY or ACTIVE format structure
*/
static struct v4l2_mbus_framefmt *
__vfe_get_format(struct vfe_line *line,
struct v4l2_subdev_state *sd_state,
unsigned int pad,
enum v4l2_subdev_format_whence which)
{
if (which == V4L2_SUBDEV_FORMAT_TRY)
return v4l2_subdev_get_try_format(&line->subdev, sd_state,
pad);
return &line->fmt[pad];
}
/*
* __vfe_get_compose - Get pointer to compose selection structure
* @line: VFE line
* @cfg: V4L2 subdev pad configuration
* @which: TRY or ACTIVE format
*
* Return pointer to TRY or ACTIVE compose rectangle structure
*/
static struct v4l2_rect *
__vfe_get_compose(struct vfe_line *line,
struct v4l2_subdev_state *sd_state,
enum v4l2_subdev_format_whence which)
{
if (which == V4L2_SUBDEV_FORMAT_TRY)
return v4l2_subdev_get_try_compose(&line->subdev, sd_state,
MSM_VFE_PAD_SINK);
return &line->compose;
}
/*
* __vfe_get_crop - Get pointer to crop selection structure
* @line: VFE line
* @cfg: V4L2 subdev pad configuration
* @which: TRY or ACTIVE format
*
* Return pointer to TRY or ACTIVE crop rectangle structure
*/
static struct v4l2_rect *
__vfe_get_crop(struct vfe_line *line,
struct v4l2_subdev_state *sd_state,
enum v4l2_subdev_format_whence which)
{
if (which == V4L2_SUBDEV_FORMAT_TRY)
return v4l2_subdev_get_try_crop(&line->subdev, sd_state,
MSM_VFE_PAD_SRC);
return &line->crop;
}
/*
* vfe_try_format - Handle try format by pad subdev method
* @line: VFE line
* @cfg: V4L2 subdev pad configuration
* @pad: pad on which format is requested
* @fmt: pointer to v4l2 format structure
* @which: wanted subdev format
*/
static void vfe_try_format(struct vfe_line *line,
struct v4l2_subdev_state *sd_state,
unsigned int pad,
struct v4l2_mbus_framefmt *fmt,
enum v4l2_subdev_format_whence which)
{
unsigned int i;
u32 code;
switch (pad) {
case MSM_VFE_PAD_SINK:
/* Set format on sink pad */
for (i = 0; i < line->nformats; i++)
if (fmt->code == line->formats[i].code)
break;
/* If not found, use UYVY as default */
if (i >= line->nformats)
fmt->code = MEDIA_BUS_FMT_UYVY8_2X8;
fmt->width = clamp_t(u32, fmt->width, 1, 8191);
fmt->height = clamp_t(u32, fmt->height, 1, 8191);
fmt->field = V4L2_FIELD_NONE;
fmt->colorspace = V4L2_COLORSPACE_SRGB;
break;
case MSM_VFE_PAD_SRC:
/* Set and return a format same as sink pad */
code = fmt->code;
*fmt = *__vfe_get_format(line, sd_state, MSM_VFE_PAD_SINK,
which);
fmt->code = vfe_src_pad_code(line, fmt->code, 0, code);
if (line->id == VFE_LINE_PIX) {
struct v4l2_rect *rect;
rect = __vfe_get_crop(line, sd_state, which);
fmt->width = rect->width;
fmt->height = rect->height;
}
break;
}
fmt->colorspace = V4L2_COLORSPACE_SRGB;
}
/*
* vfe_try_compose - Handle try compose selection by pad subdev method
* @line: VFE line
* @cfg: V4L2 subdev pad configuration
* @rect: pointer to v4l2 rect structure
* @which: wanted subdev format
*/
static void vfe_try_compose(struct vfe_line *line,
struct v4l2_subdev_state *sd_state,
struct v4l2_rect *rect,
enum v4l2_subdev_format_whence which)
{
struct v4l2_mbus_framefmt *fmt;
fmt = __vfe_get_format(line, sd_state, MSM_VFE_PAD_SINK, which);
if (rect->width > fmt->width)
rect->width = fmt->width;
if (rect->height > fmt->height)
rect->height = fmt->height;
if (fmt->width > rect->width * SCALER_RATIO_MAX)
rect->width = (fmt->width + SCALER_RATIO_MAX - 1) /
SCALER_RATIO_MAX;
rect->width &= ~0x1;
if (fmt->height > rect->height * SCALER_RATIO_MAX)
rect->height = (fmt->height + SCALER_RATIO_MAX - 1) /
SCALER_RATIO_MAX;
if (rect->width < 16)
rect->width = 16;
if (rect->height < 4)
rect->height = 4;
}
/*
* vfe_try_crop - Handle try crop selection by pad subdev method
* @line: VFE line
* @cfg: V4L2 subdev pad configuration
* @rect: pointer to v4l2 rect structure
* @which: wanted subdev format
*/
static void vfe_try_crop(struct vfe_line *line,
struct v4l2_subdev_state *sd_state,
struct v4l2_rect *rect,
enum v4l2_subdev_format_whence which)
{
struct v4l2_rect *compose;
compose = __vfe_get_compose(line, sd_state, which);
if (rect->width > compose->width)
rect->width = compose->width;
if (rect->width + rect->left > compose->width)
rect->left = compose->width - rect->width;
if (rect->height > compose->height)
rect->height = compose->height;
if (rect->height + rect->top > compose->height)
rect->top = compose->height - rect->height;
/* wm in line based mode writes multiple of 16 horizontally */
rect->left += (rect->width & 0xf) >> 1;
rect->width &= ~0xf;
if (rect->width < 16) {
rect->left = 0;
rect->width = 16;
}
if (rect->height < 4) {
rect->top = 0;
rect->height = 4;
}
}
/*
* vfe_enum_mbus_code - Handle pixel format enumeration
* @sd: VFE V4L2 subdevice
* @cfg: V4L2 subdev pad configuration
* @code: pointer to v4l2_subdev_mbus_code_enum structure
*
* return -EINVAL or zero on success
*/
static int vfe_enum_mbus_code(struct v4l2_subdev *sd,
struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_mbus_code_enum *code)
{
struct vfe_line *line = v4l2_get_subdevdata(sd);
if (code->pad == MSM_VFE_PAD_SINK) {
if (code->index >= line->nformats)
return -EINVAL;
code->code = line->formats[code->index].code;
} else {
struct v4l2_mbus_framefmt *sink_fmt;
sink_fmt = __vfe_get_format(line, sd_state, MSM_VFE_PAD_SINK,
code->which);
code->code = vfe_src_pad_code(line, sink_fmt->code,
code->index, 0);
if (!code->code)
return -EINVAL;
}
return 0;
}
/*
* vfe_enum_frame_size - Handle frame size enumeration
* @sd: VFE V4L2 subdevice
* @cfg: V4L2 subdev pad configuration
* @fse: pointer to v4l2_subdev_frame_size_enum structure
*
* Return -EINVAL or zero on success
*/
static int vfe_enum_frame_size(struct v4l2_subdev *sd,
struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_frame_size_enum *fse)
{
struct vfe_line *line = v4l2_get_subdevdata(sd);
struct v4l2_mbus_framefmt format;
if (fse->index != 0)
return -EINVAL;
format.code = fse->code;
format.width = 1;
format.height = 1;
vfe_try_format(line, sd_state, fse->pad, &format, fse->which);
fse->min_width = format.width;
fse->min_height = format.height;
if (format.code != fse->code)
return -EINVAL;
format.code = fse->code;
format.width = -1;
format.height = -1;
vfe_try_format(line, sd_state, fse->pad, &format, fse->which);
fse->max_width = format.width;
fse->max_height = format.height;
return 0;
}
/*
* vfe_get_format - Handle get format by pads subdev method
* @sd: VFE V4L2 subdevice
* @cfg: V4L2 subdev pad configuration
* @fmt: pointer to v4l2 subdev format structure
*
* Return -EINVAL or zero on success
*/
static int vfe_get_format(struct v4l2_subdev *sd,
struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
struct vfe_line *line = v4l2_get_subdevdata(sd);
struct v4l2_mbus_framefmt *format;
format = __vfe_get_format(line, sd_state, fmt->pad, fmt->which);
if (format == NULL)
return -EINVAL;
fmt->format = *format;
return 0;
}
static int vfe_set_selection(struct v4l2_subdev *sd,
struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_selection *sel);
/*
* vfe_set_format - Handle set format by pads subdev method
* @sd: VFE V4L2 subdevice
* @cfg: V4L2 subdev pad configuration
* @fmt: pointer to v4l2 subdev format structure
*
* Return -EINVAL or zero on success
*/
static int vfe_set_format(struct v4l2_subdev *sd,
struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
struct vfe_line *line = v4l2_get_subdevdata(sd);
struct v4l2_mbus_framefmt *format;
format = __vfe_get_format(line, sd_state, fmt->pad, fmt->which);
if (format == NULL)
return -EINVAL;
vfe_try_format(line, sd_state, fmt->pad, &fmt->format, fmt->which);
*format = fmt->format;
if (fmt->pad == MSM_VFE_PAD_SINK) {
struct v4l2_subdev_selection sel = { 0 };
int ret;
/* Propagate the format from sink to source */
format = __vfe_get_format(line, sd_state, MSM_VFE_PAD_SRC,
fmt->which);
*format = fmt->format;
vfe_try_format(line, sd_state, MSM_VFE_PAD_SRC, format,
fmt->which);
if (line->id != VFE_LINE_PIX)
return 0;
/* Reset sink pad compose selection */
sel.which = fmt->which;
sel.pad = MSM_VFE_PAD_SINK;
sel.target = V4L2_SEL_TGT_COMPOSE;
sel.r.width = fmt->format.width;
sel.r.height = fmt->format.height;
ret = vfe_set_selection(sd, sd_state, &sel);
if (ret < 0)
return ret;
}
return 0;
}
/*
* vfe_get_selection - Handle get selection by pads subdev method
* @sd: VFE V4L2 subdevice
* @cfg: V4L2 subdev pad configuration
* @sel: pointer to v4l2 subdev selection structure
*
* Return -EINVAL or zero on success
*/
static int vfe_get_selection(struct v4l2_subdev *sd,
struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_selection *sel)
{
struct vfe_line *line = v4l2_get_subdevdata(sd);
struct v4l2_subdev_format fmt = { 0 };
struct v4l2_rect *rect;
int ret;
if (line->id != VFE_LINE_PIX)
return -EINVAL;
if (sel->pad == MSM_VFE_PAD_SINK)
switch (sel->target) {
case V4L2_SEL_TGT_COMPOSE_BOUNDS:
fmt.pad = sel->pad;
fmt.which = sel->which;
ret = vfe_get_format(sd, sd_state, &fmt);
if (ret < 0)
return ret;
sel->r.left = 0;
sel->r.top = 0;
sel->r.width = fmt.format.width;
sel->r.height = fmt.format.height;
break;
case V4L2_SEL_TGT_COMPOSE:
rect = __vfe_get_compose(line, sd_state, sel->which);
if (rect == NULL)
return -EINVAL;
sel->r = *rect;
break;
default:
return -EINVAL;
}
else if (sel->pad == MSM_VFE_PAD_SRC)
switch (sel->target) {
case V4L2_SEL_TGT_CROP_BOUNDS:
rect = __vfe_get_compose(line, sd_state, sel->which);
if (rect == NULL)
return -EINVAL;
sel->r.left = rect->left;
sel->r.top = rect->top;
sel->r.width = rect->width;
sel->r.height = rect->height;
break;
case V4L2_SEL_TGT_CROP:
rect = __vfe_get_crop(line, sd_state, sel->which);
if (rect == NULL)
return -EINVAL;
sel->r = *rect;
break;
default:
return -EINVAL;
}
return 0;
}
/*
* vfe_set_selection - Handle set selection by pads subdev method
* @sd: VFE V4L2 subdevice
* @cfg: V4L2 subdev pad configuration
* @sel: pointer to v4l2 subdev selection structure
*
* Return -EINVAL or zero on success
*/
static int vfe_set_selection(struct v4l2_subdev *sd,
struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_selection *sel)
{
struct vfe_line *line = v4l2_get_subdevdata(sd);
struct v4l2_rect *rect;
int ret;
if (line->id != VFE_LINE_PIX)
return -EINVAL;
if (sel->target == V4L2_SEL_TGT_COMPOSE &&
sel->pad == MSM_VFE_PAD_SINK) {
struct v4l2_subdev_selection crop = { 0 };
rect = __vfe_get_compose(line, sd_state, sel->which);
if (rect == NULL)
return -EINVAL;
vfe_try_compose(line, sd_state, &sel->r, sel->which);
*rect = sel->r;
/* Reset source crop selection */
crop.which = sel->which;
crop.pad = MSM_VFE_PAD_SRC;
crop.target = V4L2_SEL_TGT_CROP;
crop.r = *rect;
ret = vfe_set_selection(sd, sd_state, &crop);
} else if (sel->target == V4L2_SEL_TGT_CROP &&
sel->pad == MSM_VFE_PAD_SRC) {
struct v4l2_subdev_format fmt = { 0 };
rect = __vfe_get_crop(line, sd_state, sel->which);
if (rect == NULL)
return -EINVAL;
vfe_try_crop(line, sd_state, &sel->r, sel->which);
*rect = sel->r;
/* Reset source pad format width and height */
fmt.which = sel->which;
fmt.pad = MSM_VFE_PAD_SRC;
ret = vfe_get_format(sd, sd_state, &fmt);
if (ret < 0)
return ret;
fmt.format.width = rect->width;
fmt.format.height = rect->height;
ret = vfe_set_format(sd, sd_state, &fmt);
} else {
ret = -EINVAL;
}
return ret;
}
/*
* vfe_init_formats - Initialize formats on all pads
* @sd: VFE V4L2 subdevice
* @fh: V4L2 subdev file handle
*
* Initialize all pad formats with default values.
*
* Return 0 on success or a negative error code otherwise
*/
static int vfe_init_formats(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
{
struct v4l2_subdev_format format = {
.pad = MSM_VFE_PAD_SINK,
.which = fh ? V4L2_SUBDEV_FORMAT_TRY :
V4L2_SUBDEV_FORMAT_ACTIVE,
.format = {
.code = MEDIA_BUS_FMT_UYVY8_2X8,
.width = 1920,
.height = 1080
}
};
return vfe_set_format(sd, fh ? fh->state : NULL, &format);
}
/*
* msm_vfe_subdev_init - Initialize VFE device structure and resources
* @vfe: VFE device
* @res: VFE module resources table
*
* Return 0 on success or a negative error code otherwise
*/
int msm_vfe_subdev_init(struct camss *camss, struct vfe_device *vfe,
const struct resources *res, u8 id)
{
struct device *dev = camss->dev;
struct platform_device *pdev = to_platform_device(dev);
int i, j;
int ret;
switch (camss->version) {
case CAMSS_8x16:
vfe->ops = &vfe_ops_4_1;
break;
case CAMSS_8x96:
vfe->ops = &vfe_ops_4_7;
break;
case CAMSS_660:
vfe->ops = &vfe_ops_4_8;
break;
case CAMSS_845:
vfe->ops = &vfe_ops_170;
break;
case CAMSS_8250:
vfe->ops = &vfe_ops_480;
break;
default:
return -EINVAL;
}
vfe->ops->subdev_init(dev, vfe);
/* Memory */
vfe->base = devm_platform_ioremap_resource_byname(pdev, res->reg[0]);
if (IS_ERR(vfe->base)) {
dev_err(dev, "could not map memory\n");
return PTR_ERR(vfe->base);
}
/* Interrupt */
ret = platform_get_irq_byname(pdev, res->interrupt[0]);
if (ret < 0)
return ret;
vfe->irq = ret;
snprintf(vfe->irq_name, sizeof(vfe->irq_name), "%s_%s%d",
dev_name(dev), MSM_VFE_NAME, id);
ret = devm_request_irq(dev, vfe->irq, vfe->ops->isr,
IRQF_TRIGGER_RISING, vfe->irq_name, vfe);
if (ret < 0) {
dev_err(dev, "request_irq failed: %d\n", ret);
return ret;
}
/* Clocks */
vfe->nclocks = 0;
while (res->clock[vfe->nclocks])
vfe->nclocks++;
vfe->clock = devm_kcalloc(dev, vfe->nclocks, sizeof(*vfe->clock),
GFP_KERNEL);
if (!vfe->clock)
return -ENOMEM;
for (i = 0; i < vfe->nclocks; i++) {
struct camss_clock *clock = &vfe->clock[i];
clock->clk = devm_clk_get(dev, res->clock[i]);
if (IS_ERR(clock->clk))
return PTR_ERR(clock->clk);
clock->name = res->clock[i];
clock->nfreqs = 0;
while (res->clock_rate[i][clock->nfreqs])
clock->nfreqs++;
if (!clock->nfreqs) {
clock->freq = NULL;
continue;
}
clock->freq = devm_kcalloc(dev,
clock->nfreqs,
sizeof(*clock->freq),
GFP_KERNEL);
if (!clock->freq)
return -ENOMEM;
for (j = 0; j < clock->nfreqs; j++)
clock->freq[j] = res->clock_rate[i][j];
}
mutex_init(&vfe->power_lock);
vfe->power_count = 0;
mutex_init(&vfe->stream_lock);
vfe->stream_count = 0;
spin_lock_init(&vfe->output_lock);
vfe->camss = camss;
vfe->id = id;
vfe->reg_update = 0;
for (i = VFE_LINE_RDI0; i < vfe->line_num; i++) {
struct vfe_line *l = &vfe->line[i];
l->video_out.type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
l->video_out.camss = camss;
l->id = i;
init_completion(&l->output.sof);
init_completion(&l->output.reg_update);
if (camss->version == CAMSS_8x16) {
if (i == VFE_LINE_PIX) {
l->formats = formats_pix_8x16;
l->nformats = ARRAY_SIZE(formats_pix_8x16);
} else {
l->formats = formats_rdi_8x16;
l->nformats = ARRAY_SIZE(formats_rdi_8x16);
}
} else if (camss->version == CAMSS_8x96 ||
camss->version == CAMSS_660) {
if (i == VFE_LINE_PIX) {
l->formats = formats_pix_8x96;
l->nformats = ARRAY_SIZE(formats_pix_8x96);
} else {
l->formats = formats_rdi_8x96;
l->nformats = ARRAY_SIZE(formats_rdi_8x96);
}
} else if (camss->version == CAMSS_845 ||
camss->version == CAMSS_8250) {
l->formats = formats_rdi_845;
l->nformats = ARRAY_SIZE(formats_rdi_845);
} else {
return -EINVAL;
}
}
init_completion(&vfe->reset_complete);
init_completion(&vfe->halt_complete);
return 0;
}
/*
* vfe_link_setup - Setup VFE connections
* @entity: Pointer to media entity structure
* @local: Pointer to local pad
* @remote: Pointer to remote pad
* @flags: Link flags
*
* Return 0 on success
*/
static int vfe_link_setup(struct media_entity *entity,
const struct media_pad *local,
const struct media_pad *remote, u32 flags)
{
if (flags & MEDIA_LNK_FL_ENABLED)
if (media_pad_remote_pad_first(local))
return -EBUSY;
return 0;
}
static const struct v4l2_subdev_core_ops vfe_core_ops = {
.s_power = vfe_set_power,
};
static const struct v4l2_subdev_video_ops vfe_video_ops = {
.s_stream = vfe_set_stream,
};
static const struct v4l2_subdev_pad_ops vfe_pad_ops = {
.enum_mbus_code = vfe_enum_mbus_code,
.enum_frame_size = vfe_enum_frame_size,
.get_fmt = vfe_get_format,
.set_fmt = vfe_set_format,
.get_selection = vfe_get_selection,
.set_selection = vfe_set_selection,
};
static const struct v4l2_subdev_ops vfe_v4l2_ops = {
.core = &vfe_core_ops,
.video = &vfe_video_ops,
.pad = &vfe_pad_ops,
};
static const struct v4l2_subdev_internal_ops vfe_v4l2_internal_ops = {
.open = vfe_init_formats,
};
static const struct media_entity_operations vfe_media_ops = {
.link_setup = vfe_link_setup,
.link_validate = v4l2_subdev_link_validate,
};
/*
* msm_vfe_register_entities - Register subdev node for VFE module
* @vfe: VFE device
* @v4l2_dev: V4L2 device
*
* Initialize and register a subdev node for the VFE module. Then
* call msm_video_register() to register the video device node which
* will be connected to this subdev node. Then actually create the
* media link between them.
*
* Return 0 on success or a negative error code otherwise
*/
int msm_vfe_register_entities(struct vfe_device *vfe,
struct v4l2_device *v4l2_dev)
{
struct device *dev = vfe->camss->dev;
struct v4l2_subdev *sd;
struct media_pad *pads;
struct camss_video *video_out;
int ret;
int i;
for (i = 0; i < vfe->line_num; i++) {
char name[32];
sd = &vfe->line[i].subdev;
pads = vfe->line[i].pads;
video_out = &vfe->line[i].video_out;
v4l2_subdev_init(sd, &vfe_v4l2_ops);
sd->internal_ops = &vfe_v4l2_internal_ops;
sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
if (i == VFE_LINE_PIX)
snprintf(sd->name, ARRAY_SIZE(sd->name), "%s%d_%s",
MSM_VFE_NAME, vfe->id, "pix");
else
snprintf(sd->name, ARRAY_SIZE(sd->name), "%s%d_%s%d",
MSM_VFE_NAME, vfe->id, "rdi", i);
v4l2_set_subdevdata(sd, &vfe->line[i]);
ret = vfe_init_formats(sd, NULL);
if (ret < 0) {
dev_err(dev, "Failed to init format: %d\n", ret);
goto error_init;
}
pads[MSM_VFE_PAD_SINK].flags = MEDIA_PAD_FL_SINK;
pads[MSM_VFE_PAD_SRC].flags = MEDIA_PAD_FL_SOURCE;
sd->entity.function = MEDIA_ENT_F_PROC_VIDEO_PIXEL_FORMATTER;
sd->entity.ops = &vfe_media_ops;
ret = media_entity_pads_init(&sd->entity, MSM_VFE_PADS_NUM,
pads);
if (ret < 0) {
dev_err(dev, "Failed to init media entity: %d\n", ret);
goto error_init;
}
ret = v4l2_device_register_subdev(v4l2_dev, sd);
if (ret < 0) {
dev_err(dev, "Failed to register subdev: %d\n", ret);
goto error_reg_subdev;
}
video_out->ops = &vfe->video_ops;
if (vfe->camss->version == CAMSS_845 ||
vfe->camss->version == CAMSS_8250)
video_out->bpl_alignment = 16;
else
video_out->bpl_alignment = 8;
video_out->line_based = 0;
if (i == VFE_LINE_PIX) {
video_out->bpl_alignment = 16;
video_out->line_based = 1;
}
snprintf(name, ARRAY_SIZE(name), "%s%d_%s%d",
MSM_VFE_NAME, vfe->id, "video", i);
ret = msm_video_register(video_out, v4l2_dev, name,
i == VFE_LINE_PIX ? 1 : 0);
if (ret < 0) {
dev_err(dev, "Failed to register video node: %d\n",
ret);
goto error_reg_video;
}
ret = media_create_pad_link(
&sd->entity, MSM_VFE_PAD_SRC,
&video_out->vdev.entity, 0,
MEDIA_LNK_FL_IMMUTABLE | MEDIA_LNK_FL_ENABLED);
if (ret < 0) {
dev_err(dev, "Failed to link %s->%s entities: %d\n",
sd->entity.name, video_out->vdev.entity.name,
ret);
goto error_link;
}
}
return 0;
error_link:
msm_video_unregister(video_out);
error_reg_video:
v4l2_device_unregister_subdev(sd);
error_reg_subdev:
media_entity_cleanup(&sd->entity);
error_init:
for (i--; i >= 0; i--) {
sd = &vfe->line[i].subdev;
video_out = &vfe->line[i].video_out;
msm_video_unregister(video_out);
v4l2_device_unregister_subdev(sd);
media_entity_cleanup(&sd->entity);
}
return ret;
}
/*
* msm_vfe_unregister_entities - Unregister VFE module subdev node
* @vfe: VFE device
*/
void msm_vfe_unregister_entities(struct vfe_device *vfe)
{
int i;
mutex_destroy(&vfe->power_lock);
mutex_destroy(&vfe->stream_lock);
for (i = 0; i < vfe->line_num; i++) {
struct v4l2_subdev *sd = &vfe->line[i].subdev;
struct camss_video *video_out = &vfe->line[i].video_out;
msm_video_unregister(video_out);
v4l2_device_unregister_subdev(sd);
media_entity_cleanup(&sd->entity);
}
}
| linux-master | drivers/media/platform/qcom/camss/camss-vfe.c |
// SPDX-License-Identifier: GPL-2.0
/*
* camss-csid-4-7.c
*
* Qualcomm MSM Camera Subsystem - CSID (CSI Decoder) Module
*
* Copyright (C) 2020 Linaro Ltd.
*/
#include <linux/completion.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/of.h>
#include "camss-csid.h"
#include "camss-csid-gen1.h"
#include "camss.h"
#define CAMSS_CSID_HW_VERSION 0x0
#define CAMSS_CSID_CORE_CTRL_0 0x004
#define CAMSS_CSID_CORE_CTRL_1 0x008
#define CAMSS_CSID_RST_CMD 0x010
#define CAMSS_CSID_CID_LUT_VC_n(n) (0x014 + 0x4 * (n))
#define CAMSS_CSID_CID_n_CFG(n) (0x024 + 0x4 * (n))
#define CAMSS_CSID_CID_n_CFG_ISPIF_EN BIT(0)
#define CAMSS_CSID_CID_n_CFG_RDI_EN BIT(1)
#define CAMSS_CSID_CID_n_CFG_DECODE_FORMAT_SHIFT 4
#define CAMSS_CSID_CID_n_CFG_PLAIN_FORMAT_8 (PLAIN_FORMAT_PLAIN8 << 8)
#define CAMSS_CSID_CID_n_CFG_PLAIN_FORMAT_16 (PLAIN_FORMAT_PLAIN16 << 8)
#define CAMSS_CSID_CID_n_CFG_PLAIN_ALIGNMENT_LSB (0 << 9)
#define CAMSS_CSID_CID_n_CFG_PLAIN_ALIGNMENT_MSB (1 << 9)
#define CAMSS_CSID_CID_n_CFG_RDI_MODE_RAW_DUMP (0 << 10)
#define CAMSS_CSID_CID_n_CFG_RDI_MODE_PLAIN_PACKING (1 << 10)
#define CAMSS_CSID_IRQ_CLEAR_CMD 0x064
#define CAMSS_CSID_IRQ_MASK 0x068
#define CAMSS_CSID_IRQ_STATUS 0x06c
#define CAMSS_CSID_TG_CTRL 0x0a8
#define CAMSS_CSID_TG_CTRL_DISABLE 0xa06436
#define CAMSS_CSID_TG_CTRL_ENABLE 0xa06437
#define CAMSS_CSID_TG_VC_CFG 0x0ac
#define CAMSS_CSID_TG_VC_CFG_H_BLANKING 0x3ff
#define CAMSS_CSID_TG_VC_CFG_V_BLANKING 0x7f
#define CAMSS_CSID_TG_DT_n_CGG_0(n) (0x0b4 + 0xc * (n))
#define CAMSS_CSID_TG_DT_n_CGG_1(n) (0x0b8 + 0xc * (n))
#define CAMSS_CSID_TG_DT_n_CGG_2(n) (0x0bc + 0xc * (n))
static const struct csid_format csid_formats[] = {
{
MEDIA_BUS_FMT_UYVY8_2X8,
DATA_TYPE_YUV422_8BIT,
DECODE_FORMAT_UNCOMPRESSED_8_BIT,
8,
2,
},
{
MEDIA_BUS_FMT_VYUY8_2X8,
DATA_TYPE_YUV422_8BIT,
DECODE_FORMAT_UNCOMPRESSED_8_BIT,
8,
2,
},
{
MEDIA_BUS_FMT_YUYV8_2X8,
DATA_TYPE_YUV422_8BIT,
DECODE_FORMAT_UNCOMPRESSED_8_BIT,
8,
2,
},
{
MEDIA_BUS_FMT_YVYU8_2X8,
DATA_TYPE_YUV422_8BIT,
DECODE_FORMAT_UNCOMPRESSED_8_BIT,
8,
2,
},
{
MEDIA_BUS_FMT_SBGGR8_1X8,
DATA_TYPE_RAW_8BIT,
DECODE_FORMAT_UNCOMPRESSED_8_BIT,
8,
1,
},
{
MEDIA_BUS_FMT_SGBRG8_1X8,
DATA_TYPE_RAW_8BIT,
DECODE_FORMAT_UNCOMPRESSED_8_BIT,
8,
1,
},
{
MEDIA_BUS_FMT_SGRBG8_1X8,
DATA_TYPE_RAW_8BIT,
DECODE_FORMAT_UNCOMPRESSED_8_BIT,
8,
1,
},
{
MEDIA_BUS_FMT_SRGGB8_1X8,
DATA_TYPE_RAW_8BIT,
DECODE_FORMAT_UNCOMPRESSED_8_BIT,
8,
1,
},
{
MEDIA_BUS_FMT_SBGGR10_1X10,
DATA_TYPE_RAW_10BIT,
DECODE_FORMAT_UNCOMPRESSED_10_BIT,
10,
1,
},
{
MEDIA_BUS_FMT_SGBRG10_1X10,
DATA_TYPE_RAW_10BIT,
DECODE_FORMAT_UNCOMPRESSED_10_BIT,
10,
1,
},
{
MEDIA_BUS_FMT_SGRBG10_1X10,
DATA_TYPE_RAW_10BIT,
DECODE_FORMAT_UNCOMPRESSED_10_BIT,
10,
1,
},
{
MEDIA_BUS_FMT_SRGGB10_1X10,
DATA_TYPE_RAW_10BIT,
DECODE_FORMAT_UNCOMPRESSED_10_BIT,
10,
1,
},
{
MEDIA_BUS_FMT_SBGGR12_1X12,
DATA_TYPE_RAW_12BIT,
DECODE_FORMAT_UNCOMPRESSED_12_BIT,
12,
1,
},
{
MEDIA_BUS_FMT_SGBRG12_1X12,
DATA_TYPE_RAW_12BIT,
DECODE_FORMAT_UNCOMPRESSED_12_BIT,
12,
1,
},
{
MEDIA_BUS_FMT_SGRBG12_1X12,
DATA_TYPE_RAW_12BIT,
DECODE_FORMAT_UNCOMPRESSED_12_BIT,
12,
1,
},
{
MEDIA_BUS_FMT_SRGGB12_1X12,
DATA_TYPE_RAW_12BIT,
DECODE_FORMAT_UNCOMPRESSED_12_BIT,
12,
1,
},
{
MEDIA_BUS_FMT_SBGGR14_1X14,
DATA_TYPE_RAW_14BIT,
DECODE_FORMAT_UNCOMPRESSED_14_BIT,
14,
1,
},
{
MEDIA_BUS_FMT_SGBRG14_1X14,
DATA_TYPE_RAW_14BIT,
DECODE_FORMAT_UNCOMPRESSED_14_BIT,
14,
1,
},
{
MEDIA_BUS_FMT_SGRBG14_1X14,
DATA_TYPE_RAW_14BIT,
DECODE_FORMAT_UNCOMPRESSED_14_BIT,
14,
1,
},
{
MEDIA_BUS_FMT_SRGGB14_1X14,
DATA_TYPE_RAW_14BIT,
DECODE_FORMAT_UNCOMPRESSED_14_BIT,
14,
1,
},
{
MEDIA_BUS_FMT_Y10_1X10,
DATA_TYPE_RAW_10BIT,
DECODE_FORMAT_UNCOMPRESSED_10_BIT,
10,
1,
},
};
static void csid_configure_stream(struct csid_device *csid, u8 enable)
{
struct csid_testgen_config *tg = &csid->testgen;
u32 sink_code = csid->fmt[MSM_CSID_PAD_SINK].code;
u32 src_code = csid->fmt[MSM_CSID_PAD_SRC].code;
u32 val;
if (enable) {
struct v4l2_mbus_framefmt *input_format;
const struct csid_format *format;
u8 vc = 0; /* Virtual Channel 0 */
u8 cid = vc * 4; /* id of Virtual Channel and Data Type set */
u8 dt_shift;
if (tg->enabled) {
/* Config Test Generator */
u32 num_bytes_per_line, num_lines;
input_format = &csid->fmt[MSM_CSID_PAD_SRC];
format = csid_get_fmt_entry(csid->formats, csid->nformats,
input_format->code);
num_bytes_per_line = input_format->width * format->bpp * format->spp / 8;
num_lines = input_format->height;
/* 31:24 V blank, 23:13 H blank, 3:2 num of active DT */
/* 1:0 VC */
val = ((CAMSS_CSID_TG_VC_CFG_V_BLANKING & 0xff) << 24) |
((CAMSS_CSID_TG_VC_CFG_H_BLANKING & 0x7ff) << 13);
writel_relaxed(val, csid->base + CAMSS_CSID_TG_VC_CFG);
/* 28:16 bytes per lines, 12:0 num of lines */
val = ((num_bytes_per_line & 0x1fff) << 16) |
(num_lines & 0x1fff);
writel_relaxed(val, csid->base + CAMSS_CSID_TG_DT_n_CGG_0(0));
/* 5:0 data type */
val = format->data_type;
writel_relaxed(val, csid->base + CAMSS_CSID_TG_DT_n_CGG_1(0));
/* 2:0 output test pattern */
val = tg->mode - 1;
writel_relaxed(val, csid->base + CAMSS_CSID_TG_DT_n_CGG_2(0));
} else {
struct csid_phy_config *phy = &csid->phy;
input_format = &csid->fmt[MSM_CSID_PAD_SINK];
format = csid_get_fmt_entry(csid->formats, csid->nformats,
input_format->code);
val = phy->lane_cnt - 1;
val |= phy->lane_assign << 4;
writel_relaxed(val, csid->base + CAMSS_CSID_CORE_CTRL_0);
val = phy->csiphy_id << 17;
val |= 0x9;
writel_relaxed(val, csid->base + CAMSS_CSID_CORE_CTRL_1);
}
/* Config LUT */
dt_shift = (cid % 4) * 8;
val = readl_relaxed(csid->base + CAMSS_CSID_CID_LUT_VC_n(vc));
val &= ~(0xff << dt_shift);
val |= format->data_type << dt_shift;
writel_relaxed(val, csid->base + CAMSS_CSID_CID_LUT_VC_n(vc));
val = CAMSS_CSID_CID_n_CFG_ISPIF_EN;
val |= CAMSS_CSID_CID_n_CFG_RDI_EN;
val |= format->decode_format << CAMSS_CSID_CID_n_CFG_DECODE_FORMAT_SHIFT;
val |= CAMSS_CSID_CID_n_CFG_RDI_MODE_RAW_DUMP;
if ((sink_code == MEDIA_BUS_FMT_SBGGR10_1X10 &&
src_code == MEDIA_BUS_FMT_SBGGR10_2X8_PADHI_LE) ||
(sink_code == MEDIA_BUS_FMT_Y10_1X10 &&
src_code == MEDIA_BUS_FMT_Y10_2X8_PADHI_LE)) {
val |= CAMSS_CSID_CID_n_CFG_RDI_MODE_PLAIN_PACKING;
val |= CAMSS_CSID_CID_n_CFG_PLAIN_FORMAT_16;
val |= CAMSS_CSID_CID_n_CFG_PLAIN_ALIGNMENT_LSB;
}
writel_relaxed(val, csid->base + CAMSS_CSID_CID_n_CFG(cid));
if (tg->enabled) {
val = CAMSS_CSID_TG_CTRL_ENABLE;
writel_relaxed(val, csid->base + CAMSS_CSID_TG_CTRL);
}
} else {
if (tg->enabled) {
val = CAMSS_CSID_TG_CTRL_DISABLE;
writel_relaxed(val, csid->base + CAMSS_CSID_TG_CTRL);
}
}
}
static int csid_configure_testgen_pattern(struct csid_device *csid, s32 val)
{
if (val > 0 && val <= csid->testgen.nmodes)
csid->testgen.mode = val;
return 0;
}
static u32 csid_hw_version(struct csid_device *csid)
{
u32 hw_version = readl_relaxed(csid->base + CAMSS_CSID_HW_VERSION);
dev_dbg(csid->camss->dev, "CSID HW Version = 0x%08x\n", hw_version);
return hw_version;
}
/*
* isr - CSID module interrupt service routine
* @irq: Interrupt line
* @dev: CSID device
*
* Return IRQ_HANDLED on success
*/
static irqreturn_t csid_isr(int irq, void *dev)
{
struct csid_device *csid = dev;
u32 value;
value = readl_relaxed(csid->base + CAMSS_CSID_IRQ_STATUS);
writel_relaxed(value, csid->base + CAMSS_CSID_IRQ_CLEAR_CMD);
if ((value >> 11) & 0x1)
complete(&csid->reset_complete);
return IRQ_HANDLED;
}
/*
* csid_reset - Trigger reset on CSID module and wait to complete
* @csid: CSID device
*
* Return 0 on success or a negative error code otherwise
*/
static int csid_reset(struct csid_device *csid)
{
unsigned long time;
reinit_completion(&csid->reset_complete);
writel_relaxed(0x7fff, csid->base + CAMSS_CSID_RST_CMD);
time = wait_for_completion_timeout(&csid->reset_complete,
msecs_to_jiffies(CSID_RESET_TIMEOUT_MS));
if (!time) {
dev_err(csid->camss->dev, "CSID reset timeout\n");
return -EIO;
}
return 0;
}
static u32 csid_src_pad_code(struct csid_device *csid, u32 sink_code,
unsigned int match_format_idx, u32 match_code)
{
switch (sink_code) {
case MEDIA_BUS_FMT_SBGGR10_1X10:
{
u32 src_code[] = {
MEDIA_BUS_FMT_SBGGR10_1X10,
MEDIA_BUS_FMT_SBGGR10_2X8_PADHI_LE,
};
return csid_find_code(src_code, ARRAY_SIZE(src_code),
match_format_idx, match_code);
}
case MEDIA_BUS_FMT_Y10_1X10:
{
u32 src_code[] = {
MEDIA_BUS_FMT_Y10_1X10,
MEDIA_BUS_FMT_Y10_2X8_PADHI_LE,
};
return csid_find_code(src_code, ARRAY_SIZE(src_code),
match_format_idx, match_code);
}
default:
if (match_format_idx > 0)
return 0;
return sink_code;
}
}
static void csid_subdev_init(struct csid_device *csid)
{
csid->formats = csid_formats;
csid->nformats = ARRAY_SIZE(csid_formats);
csid->testgen.modes = csid_testgen_modes;
csid->testgen.nmodes = CSID_PAYLOAD_MODE_NUM_SUPPORTED_GEN1;
}
const struct csid_hw_ops csid_ops_4_7 = {
.configure_stream = csid_configure_stream,
.configure_testgen_pattern = csid_configure_testgen_pattern,
.hw_version = csid_hw_version,
.isr = csid_isr,
.reset = csid_reset,
.src_pad_code = csid_src_pad_code,
.subdev_init = csid_subdev_init,
};
| linux-master | drivers/media/platform/qcom/camss/camss-csid-4-7.c |
// SPDX-License-Identifier: GPL-2.0
/*
* camss-csiphy-2ph-1-0.c
*
* Qualcomm MSM Camera Subsystem - CSIPHY Module 2phase v1.0
*
* Copyright (c) 2011-2015, The Linux Foundation. All rights reserved.
* Copyright (C) 2016-2018 Linaro Ltd.
*/
#include "camss-csiphy.h"
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#define CAMSS_CSI_PHY_LNn_CFG2(n) (0x004 + 0x40 * (n))
#define CAMSS_CSI_PHY_LNn_CFG3(n) (0x008 + 0x40 * (n))
#define CAMSS_CSI_PHY_LN_CLK 1
#define CAMSS_CSI_PHY_GLBL_RESET 0x140
#define CAMSS_CSI_PHY_GLBL_PWR_CFG 0x144
#define CAMSS_CSI_PHY_GLBL_IRQ_CMD 0x164
#define CAMSS_CSI_PHY_HW_VERSION 0x188
#define CAMSS_CSI_PHY_INTERRUPT_STATUSn(n) (0x18c + 0x4 * (n))
#define CAMSS_CSI_PHY_INTERRUPT_MASKn(n) (0x1ac + 0x4 * (n))
#define CAMSS_CSI_PHY_INTERRUPT_CLEARn(n) (0x1cc + 0x4 * (n))
#define CAMSS_CSI_PHY_GLBL_T_INIT_CFG0 0x1ec
#define CAMSS_CSI_PHY_T_WAKEUP_CFG0 0x1f4
static u8 csiphy_get_lane_mask(struct csiphy_lanes_cfg *lane_cfg)
{
u8 lane_mask;
int i;
lane_mask = 1 << CAMSS_CSI_PHY_LN_CLK;
for (i = 0; i < lane_cfg->num_data; i++)
lane_mask |= 1 << lane_cfg->data[i].pos;
return lane_mask;
}
static void csiphy_hw_version_read(struct csiphy_device *csiphy,
struct device *dev)
{
u8 hw_version = readl_relaxed(csiphy->base +
CAMSS_CSI_PHY_HW_VERSION);
dev_dbg(dev, "CSIPHY HW Version = 0x%02x\n", hw_version);
}
/*
* csiphy_reset - Perform software reset on CSIPHY module
* @csiphy: CSIPHY device
*/
static void csiphy_reset(struct csiphy_device *csiphy)
{
writel_relaxed(0x1, csiphy->base + CAMSS_CSI_PHY_GLBL_RESET);
usleep_range(5000, 8000);
writel_relaxed(0x0, csiphy->base + CAMSS_CSI_PHY_GLBL_RESET);
}
/*
* csiphy_settle_cnt_calc - Calculate settle count value
*
* Helper function to calculate settle count value. This is
* based on the CSI2 T_hs_settle parameter which in turn
* is calculated based on the CSI2 transmitter link frequency.
*
* Return settle count value or 0 if the CSI2 link frequency
* is not available
*/
static u8 csiphy_settle_cnt_calc(s64 link_freq, u32 timer_clk_rate)
{
u32 ui; /* ps */
u32 timer_period; /* ps */
u32 t_hs_prepare_max; /* ps */
u32 t_hs_prepare_zero_min; /* ps */
u32 t_hs_settle; /* ps */
u8 settle_cnt;
if (link_freq <= 0)
return 0;
ui = div_u64(1000000000000LL, link_freq);
ui /= 2;
t_hs_prepare_max = 85000 + 6 * ui;
t_hs_prepare_zero_min = 145000 + 10 * ui;
t_hs_settle = (t_hs_prepare_max + t_hs_prepare_zero_min) / 2;
timer_period = div_u64(1000000000000LL, timer_clk_rate);
settle_cnt = t_hs_settle / timer_period - 1;
return settle_cnt;
}
static void csiphy_lanes_enable(struct csiphy_device *csiphy,
struct csiphy_config *cfg,
s64 link_freq, u8 lane_mask)
{
struct csiphy_lanes_cfg *c = &cfg->csi2->lane_cfg;
u8 settle_cnt;
u8 val, l = 0;
int i = 0;
settle_cnt = csiphy_settle_cnt_calc(link_freq, csiphy->timer_clk_rate);
writel_relaxed(0x1, csiphy->base +
CAMSS_CSI_PHY_GLBL_T_INIT_CFG0);
writel_relaxed(0x1, csiphy->base +
CAMSS_CSI_PHY_T_WAKEUP_CFG0);
val = 0x1;
val |= lane_mask << 1;
writel_relaxed(val, csiphy->base + CAMSS_CSI_PHY_GLBL_PWR_CFG);
val = cfg->combo_mode << 4;
writel_relaxed(val, csiphy->base + CAMSS_CSI_PHY_GLBL_RESET);
for (i = 0; i <= c->num_data; i++) {
if (i == c->num_data)
l = CAMSS_CSI_PHY_LN_CLK;
else
l = c->data[i].pos;
writel_relaxed(0x10, csiphy->base +
CAMSS_CSI_PHY_LNn_CFG2(l));
writel_relaxed(settle_cnt, csiphy->base +
CAMSS_CSI_PHY_LNn_CFG3(l));
writel_relaxed(0x3f, csiphy->base +
CAMSS_CSI_PHY_INTERRUPT_MASKn(l));
writel_relaxed(0x3f, csiphy->base +
CAMSS_CSI_PHY_INTERRUPT_CLEARn(l));
}
}
static void csiphy_lanes_disable(struct csiphy_device *csiphy,
struct csiphy_config *cfg)
{
struct csiphy_lanes_cfg *c = &cfg->csi2->lane_cfg;
u8 l = 0;
int i = 0;
for (i = 0; i <= c->num_data; i++) {
if (i == c->num_data)
l = CAMSS_CSI_PHY_LN_CLK;
else
l = c->data[i].pos;
writel_relaxed(0x0, csiphy->base +
CAMSS_CSI_PHY_LNn_CFG2(l));
}
writel_relaxed(0x0, csiphy->base + CAMSS_CSI_PHY_GLBL_PWR_CFG);
}
/*
* csiphy_isr - CSIPHY module interrupt handler
* @irq: Interrupt line
* @dev: CSIPHY device
*
* Return IRQ_HANDLED on success
*/
static irqreturn_t csiphy_isr(int irq, void *dev)
{
struct csiphy_device *csiphy = dev;
u8 i;
for (i = 0; i < 8; i++) {
u8 val = readl_relaxed(csiphy->base +
CAMSS_CSI_PHY_INTERRUPT_STATUSn(i));
writel_relaxed(val, csiphy->base +
CAMSS_CSI_PHY_INTERRUPT_CLEARn(i));
writel_relaxed(0x1, csiphy->base + CAMSS_CSI_PHY_GLBL_IRQ_CMD);
writel_relaxed(0x0, csiphy->base + CAMSS_CSI_PHY_GLBL_IRQ_CMD);
writel_relaxed(0x0, csiphy->base +
CAMSS_CSI_PHY_INTERRUPT_CLEARn(i));
}
return IRQ_HANDLED;
}
const struct csiphy_hw_ops csiphy_ops_2ph_1_0 = {
.get_lane_mask = csiphy_get_lane_mask,
.hw_version_read = csiphy_hw_version_read,
.reset = csiphy_reset,
.lanes_enable = csiphy_lanes_enable,
.lanes_disable = csiphy_lanes_disable,
.isr = csiphy_isr,
};
| linux-master | drivers/media/platform/qcom/camss/camss-csiphy-2ph-1-0.c |
// SPDX-License-Identifier: GPL-2.0
/*
* camss-csid.c
*
* Qualcomm MSM Camera Subsystem - CSID (CSI Decoder) Module
*
* Copyright (c) 2011-2015, The Linux Foundation. All rights reserved.
* Copyright (C) 2015-2018 Linaro Ltd.
*/
#include <linux/clk.h>
#include <linux/completion.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/regulator/consumer.h>
#include <media/media-entity.h>
#include <media/v4l2-device.h>
#include <media/v4l2-event.h>
#include <media/v4l2-subdev.h>
#include "camss-csid.h"
#include "camss-csid-gen1.h"
#include "camss.h"
/* offset of CSID registers in VFE region for VFE 480 */
#define VFE_480_CSID_OFFSET 0x1200
#define VFE_480_LITE_CSID_OFFSET 0x200
#define MSM_CSID_NAME "msm_csid"
const char * const csid_testgen_modes[] = {
"Disabled",
"Incrementing",
"Alternating 0x55/0xAA",
"All Zeros 0x00",
"All Ones 0xFF",
"Pseudo-random Data",
"User Specified",
"Complex pattern",
"Color box",
"Color bars",
NULL
};
u32 csid_find_code(u32 *codes, unsigned int ncodes,
unsigned int match_format_idx, u32 match_code)
{
int i;
if (!match_code && (match_format_idx >= ncodes))
return 0;
for (i = 0; i < ncodes; i++)
if (match_code) {
if (codes[i] == match_code)
return match_code;
} else {
if (i == match_format_idx)
return codes[i];
}
return codes[0];
}
const struct csid_format *csid_get_fmt_entry(const struct csid_format *formats,
unsigned int nformats,
u32 code)
{
unsigned int i;
for (i = 0; i < nformats; i++)
if (code == formats[i].code)
return &formats[i];
WARN(1, "Unknown format\n");
return &formats[0];
}
/*
* csid_set_clock_rates - Calculate and set clock rates on CSID module
* @csiphy: CSID device
*/
static int csid_set_clock_rates(struct csid_device *csid)
{
struct device *dev = csid->camss->dev;
const struct csid_format *fmt;
s64 link_freq;
int i, j;
int ret;
fmt = csid_get_fmt_entry(csid->formats, csid->nformats,
csid->fmt[MSM_CSIPHY_PAD_SINK].code);
link_freq = camss_get_link_freq(&csid->subdev.entity, fmt->bpp,
csid->phy.lane_cnt);
if (link_freq < 0)
link_freq = 0;
for (i = 0; i < csid->nclocks; i++) {
struct camss_clock *clock = &csid->clock[i];
if (!strcmp(clock->name, "csi0") ||
!strcmp(clock->name, "csi1") ||
!strcmp(clock->name, "csi2") ||
!strcmp(clock->name, "csi3")) {
u64 min_rate = link_freq / 4;
long rate;
camss_add_clock_margin(&min_rate);
for (j = 0; j < clock->nfreqs; j++)
if (min_rate < clock->freq[j])
break;
if (j == clock->nfreqs) {
dev_err(dev,
"Pixel clock is too high for CSID\n");
return -EINVAL;
}
/* if sensor pixel clock is not available */
/* set highest possible CSID clock rate */
if (min_rate == 0)
j = clock->nfreqs - 1;
rate = clk_round_rate(clock->clk, clock->freq[j]);
if (rate < 0) {
dev_err(dev, "clk round rate failed: %ld\n",
rate);
return -EINVAL;
}
ret = clk_set_rate(clock->clk, rate);
if (ret < 0) {
dev_err(dev, "clk set rate failed: %d\n", ret);
return ret;
}
} else if (clock->nfreqs) {
clk_set_rate(clock->clk, clock->freq[0]);
}
}
return 0;
}
/*
* csid_set_power - Power on/off CSID module
* @sd: CSID V4L2 subdevice
* @on: Requested power state
*
* Return 0 on success or a negative error code otherwise
*/
static int csid_set_power(struct v4l2_subdev *sd, int on)
{
struct csid_device *csid = v4l2_get_subdevdata(sd);
struct camss *camss = csid->camss;
struct device *dev = camss->dev;
struct vfe_device *vfe = &camss->vfe[csid->id];
u32 version = camss->version;
int ret = 0;
if (on) {
if (version == CAMSS_8250 || version == CAMSS_845) {
ret = vfe_get(vfe);
if (ret < 0)
return ret;
}
ret = pm_runtime_resume_and_get(dev);
if (ret < 0)
return ret;
ret = regulator_bulk_enable(csid->num_supplies,
csid->supplies);
if (ret < 0) {
pm_runtime_put_sync(dev);
return ret;
}
ret = csid_set_clock_rates(csid);
if (ret < 0) {
regulator_bulk_disable(csid->num_supplies,
csid->supplies);
pm_runtime_put_sync(dev);
return ret;
}
ret = camss_enable_clocks(csid->nclocks, csid->clock, dev);
if (ret < 0) {
regulator_bulk_disable(csid->num_supplies,
csid->supplies);
pm_runtime_put_sync(dev);
return ret;
}
csid->phy.need_vc_update = true;
enable_irq(csid->irq);
ret = csid->ops->reset(csid);
if (ret < 0) {
disable_irq(csid->irq);
camss_disable_clocks(csid->nclocks, csid->clock);
regulator_bulk_disable(csid->num_supplies,
csid->supplies);
pm_runtime_put_sync(dev);
return ret;
}
csid->ops->hw_version(csid);
} else {
disable_irq(csid->irq);
camss_disable_clocks(csid->nclocks, csid->clock);
regulator_bulk_disable(csid->num_supplies,
csid->supplies);
pm_runtime_put_sync(dev);
if (version == CAMSS_8250 || version == CAMSS_845)
vfe_put(vfe);
}
return ret;
}
/*
* csid_set_stream - Enable/disable streaming on CSID module
* @sd: CSID V4L2 subdevice
* @enable: Requested streaming state
*
* Main configuration of CSID module is also done here.
*
* Return 0 on success or a negative error code otherwise
*/
static int csid_set_stream(struct v4l2_subdev *sd, int enable)
{
struct csid_device *csid = v4l2_get_subdevdata(sd);
int ret;
if (enable) {
ret = v4l2_ctrl_handler_setup(&csid->ctrls);
if (ret < 0) {
dev_err(csid->camss->dev,
"could not sync v4l2 controls: %d\n", ret);
return ret;
}
if (!csid->testgen.enabled &&
!media_pad_remote_pad_first(&csid->pads[MSM_CSID_PAD_SINK]))
return -ENOLINK;
}
if (csid->phy.need_vc_update) {
csid->ops->configure_stream(csid, enable);
csid->phy.need_vc_update = false;
}
return 0;
}
/*
* __csid_get_format - Get pointer to format structure
* @csid: CSID device
* @cfg: V4L2 subdev pad configuration
* @pad: pad from which format is requested
* @which: TRY or ACTIVE format
*
* Return pointer to TRY or ACTIVE format structure
*/
static struct v4l2_mbus_framefmt *
__csid_get_format(struct csid_device *csid,
struct v4l2_subdev_state *sd_state,
unsigned int pad,
enum v4l2_subdev_format_whence which)
{
if (which == V4L2_SUBDEV_FORMAT_TRY)
return v4l2_subdev_get_try_format(&csid->subdev, sd_state,
pad);
return &csid->fmt[pad];
}
/*
* csid_try_format - Handle try format by pad subdev method
* @csid: CSID device
* @cfg: V4L2 subdev pad configuration
* @pad: pad on which format is requested
* @fmt: pointer to v4l2 format structure
* @which: wanted subdev format
*/
static void csid_try_format(struct csid_device *csid,
struct v4l2_subdev_state *sd_state,
unsigned int pad,
struct v4l2_mbus_framefmt *fmt,
enum v4l2_subdev_format_whence which)
{
unsigned int i;
switch (pad) {
case MSM_CSID_PAD_SINK:
/* Set format on sink pad */
for (i = 0; i < csid->nformats; i++)
if (fmt->code == csid->formats[i].code)
break;
/* If not found, use UYVY as default */
if (i >= csid->nformats)
fmt->code = MEDIA_BUS_FMT_UYVY8_2X8;
fmt->width = clamp_t(u32, fmt->width, 1, 8191);
fmt->height = clamp_t(u32, fmt->height, 1, 8191);
fmt->field = V4L2_FIELD_NONE;
fmt->colorspace = V4L2_COLORSPACE_SRGB;
break;
case MSM_CSID_PAD_SRC:
if (csid->testgen_mode->cur.val == 0) {
/* Test generator is disabled, */
/* keep pad formats in sync */
u32 code = fmt->code;
*fmt = *__csid_get_format(csid, sd_state,
MSM_CSID_PAD_SINK, which);
fmt->code = csid->ops->src_pad_code(csid, fmt->code, 0, code);
} else {
/* Test generator is enabled, set format on source */
/* pad to allow test generator usage */
for (i = 0; i < csid->nformats; i++)
if (csid->formats[i].code == fmt->code)
break;
/* If not found, use UYVY as default */
if (i >= csid->nformats)
fmt->code = MEDIA_BUS_FMT_UYVY8_2X8;
fmt->width = clamp_t(u32, fmt->width, 1, 8191);
fmt->height = clamp_t(u32, fmt->height, 1, 8191);
fmt->field = V4L2_FIELD_NONE;
}
break;
}
fmt->colorspace = V4L2_COLORSPACE_SRGB;
}
/*
* csid_enum_mbus_code - Handle pixel format enumeration
* @sd: CSID V4L2 subdevice
* @cfg: V4L2 subdev pad configuration
* @code: pointer to v4l2_subdev_mbus_code_enum structure
* return -EINVAL or zero on success
*/
static int csid_enum_mbus_code(struct v4l2_subdev *sd,
struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_mbus_code_enum *code)
{
struct csid_device *csid = v4l2_get_subdevdata(sd);
if (code->pad == MSM_CSID_PAD_SINK) {
if (code->index >= csid->nformats)
return -EINVAL;
code->code = csid->formats[code->index].code;
} else {
if (csid->testgen_mode->cur.val == 0) {
struct v4l2_mbus_framefmt *sink_fmt;
sink_fmt = __csid_get_format(csid, sd_state,
MSM_CSID_PAD_SINK,
code->which);
code->code = csid->ops->src_pad_code(csid, sink_fmt->code,
code->index, 0);
if (!code->code)
return -EINVAL;
} else {
if (code->index >= csid->nformats)
return -EINVAL;
code->code = csid->formats[code->index].code;
}
}
return 0;
}
/*
* csid_enum_frame_size - Handle frame size enumeration
* @sd: CSID V4L2 subdevice
* @cfg: V4L2 subdev pad configuration
* @fse: pointer to v4l2_subdev_frame_size_enum structure
* return -EINVAL or zero on success
*/
static int csid_enum_frame_size(struct v4l2_subdev *sd,
struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_frame_size_enum *fse)
{
struct csid_device *csid = v4l2_get_subdevdata(sd);
struct v4l2_mbus_framefmt format;
if (fse->index != 0)
return -EINVAL;
format.code = fse->code;
format.width = 1;
format.height = 1;
csid_try_format(csid, sd_state, fse->pad, &format, fse->which);
fse->min_width = format.width;
fse->min_height = format.height;
if (format.code != fse->code)
return -EINVAL;
format.code = fse->code;
format.width = -1;
format.height = -1;
csid_try_format(csid, sd_state, fse->pad, &format, fse->which);
fse->max_width = format.width;
fse->max_height = format.height;
return 0;
}
/*
* csid_get_format - Handle get format by pads subdev method
* @sd: CSID V4L2 subdevice
* @cfg: V4L2 subdev pad configuration
* @fmt: pointer to v4l2 subdev format structure
*
* Return -EINVAL or zero on success
*/
static int csid_get_format(struct v4l2_subdev *sd,
struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
struct csid_device *csid = v4l2_get_subdevdata(sd);
struct v4l2_mbus_framefmt *format;
format = __csid_get_format(csid, sd_state, fmt->pad, fmt->which);
if (format == NULL)
return -EINVAL;
fmt->format = *format;
return 0;
}
/*
* csid_set_format - Handle set format by pads subdev method
* @sd: CSID V4L2 subdevice
* @cfg: V4L2 subdev pad configuration
* @fmt: pointer to v4l2 subdev format structure
*
* Return -EINVAL or zero on success
*/
static int csid_set_format(struct v4l2_subdev *sd,
struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
struct csid_device *csid = v4l2_get_subdevdata(sd);
struct v4l2_mbus_framefmt *format;
int i;
format = __csid_get_format(csid, sd_state, fmt->pad, fmt->which);
if (format == NULL)
return -EINVAL;
csid_try_format(csid, sd_state, fmt->pad, &fmt->format, fmt->which);
*format = fmt->format;
/* Propagate the format from sink to source pads */
if (fmt->pad == MSM_CSID_PAD_SINK) {
for (i = MSM_CSID_PAD_FIRST_SRC; i < MSM_CSID_PADS_NUM; ++i) {
format = __csid_get_format(csid, sd_state, i, fmt->which);
*format = fmt->format;
csid_try_format(csid, sd_state, i, format, fmt->which);
}
}
return 0;
}
/*
* csid_init_formats - Initialize formats on all pads
* @sd: CSID V4L2 subdevice
* @fh: V4L2 subdev file handle
*
* Initialize all pad formats with default values.
*
* Return 0 on success or a negative error code otherwise
*/
static int csid_init_formats(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
{
struct v4l2_subdev_format format = {
.pad = MSM_CSID_PAD_SINK,
.which = fh ? V4L2_SUBDEV_FORMAT_TRY :
V4L2_SUBDEV_FORMAT_ACTIVE,
.format = {
.code = MEDIA_BUS_FMT_UYVY8_2X8,
.width = 1920,
.height = 1080
}
};
return csid_set_format(sd, fh ? fh->state : NULL, &format);
}
/*
* csid_set_test_pattern - Set test generator's pattern mode
* @csid: CSID device
* @value: desired test pattern mode
*
* Return 0 on success or a negative error code otherwise
*/
static int csid_set_test_pattern(struct csid_device *csid, s32 value)
{
struct csid_testgen_config *tg = &csid->testgen;
/* If CSID is linked to CSIPHY, do not allow to enable test generator */
if (value && media_pad_remote_pad_first(&csid->pads[MSM_CSID_PAD_SINK]))
return -EBUSY;
tg->enabled = !!value;
return csid->ops->configure_testgen_pattern(csid, value);
}
/*
* csid_s_ctrl - Handle set control subdev method
* @ctrl: pointer to v4l2 control structure
*
* Return 0 on success or a negative error code otherwise
*/
static int csid_s_ctrl(struct v4l2_ctrl *ctrl)
{
struct csid_device *csid = container_of(ctrl->handler,
struct csid_device, ctrls);
int ret = -EINVAL;
switch (ctrl->id) {
case V4L2_CID_TEST_PATTERN:
ret = csid_set_test_pattern(csid, ctrl->val);
break;
}
return ret;
}
static const struct v4l2_ctrl_ops csid_ctrl_ops = {
.s_ctrl = csid_s_ctrl,
};
/*
* msm_csid_subdev_init - Initialize CSID device structure and resources
* @csid: CSID device
* @res: CSID module resources table
* @id: CSID module id
*
* Return 0 on success or a negative error code otherwise
*/
int msm_csid_subdev_init(struct camss *camss, struct csid_device *csid,
const struct resources *res, u8 id)
{
struct device *dev = camss->dev;
struct platform_device *pdev = to_platform_device(dev);
int i, j;
int ret;
csid->camss = camss;
csid->id = id;
if (camss->version == CAMSS_8x16) {
csid->ops = &csid_ops_4_1;
} else if (camss->version == CAMSS_8x96 ||
camss->version == CAMSS_660) {
csid->ops = &csid_ops_4_7;
} else if (camss->version == CAMSS_845 ||
camss->version == CAMSS_8250) {
csid->ops = &csid_ops_gen2;
} else {
return -EINVAL;
}
csid->ops->subdev_init(csid);
/* Memory */
if (camss->version == CAMSS_8250) {
/* for titan 480, CSID registers are inside the VFE region,
* between the VFE "top" and "bus" registers. this requires
* VFE to be initialized before CSID
*/
if (id >= 2) /* VFE/CSID lite */
csid->base = camss->vfe[id].base + VFE_480_LITE_CSID_OFFSET;
else
csid->base = camss->vfe[id].base + VFE_480_CSID_OFFSET;
} else {
csid->base = devm_platform_ioremap_resource_byname(pdev, res->reg[0]);
if (IS_ERR(csid->base))
return PTR_ERR(csid->base);
}
/* Interrupt */
ret = platform_get_irq_byname(pdev, res->interrupt[0]);
if (ret < 0)
return ret;
csid->irq = ret;
snprintf(csid->irq_name, sizeof(csid->irq_name), "%s_%s%d",
dev_name(dev), MSM_CSID_NAME, csid->id);
ret = devm_request_irq(dev, csid->irq, csid->ops->isr,
IRQF_TRIGGER_RISING | IRQF_NO_AUTOEN,
csid->irq_name, csid);
if (ret < 0) {
dev_err(dev, "request_irq failed: %d\n", ret);
return ret;
}
/* Clocks */
csid->nclocks = 0;
while (res->clock[csid->nclocks])
csid->nclocks++;
csid->clock = devm_kcalloc(dev, csid->nclocks, sizeof(*csid->clock),
GFP_KERNEL);
if (!csid->clock)
return -ENOMEM;
for (i = 0; i < csid->nclocks; i++) {
struct camss_clock *clock = &csid->clock[i];
clock->clk = devm_clk_get(dev, res->clock[i]);
if (IS_ERR(clock->clk))
return PTR_ERR(clock->clk);
clock->name = res->clock[i];
clock->nfreqs = 0;
while (res->clock_rate[i][clock->nfreqs])
clock->nfreqs++;
if (!clock->nfreqs) {
clock->freq = NULL;
continue;
}
clock->freq = devm_kcalloc(dev,
clock->nfreqs,
sizeof(*clock->freq),
GFP_KERNEL);
if (!clock->freq)
return -ENOMEM;
for (j = 0; j < clock->nfreqs; j++)
clock->freq[j] = res->clock_rate[i][j];
}
/* Regulator */
for (i = 0; i < ARRAY_SIZE(res->regulators); i++) {
if (res->regulators[i])
csid->num_supplies++;
}
if (csid->num_supplies) {
csid->supplies = devm_kmalloc_array(camss->dev,
csid->num_supplies,
sizeof(*csid->supplies),
GFP_KERNEL);
if (!csid->supplies)
return -ENOMEM;
}
for (i = 0; i < csid->num_supplies; i++)
csid->supplies[i].supply = res->regulators[i];
ret = devm_regulator_bulk_get(camss->dev, csid->num_supplies,
csid->supplies);
if (ret)
return ret;
init_completion(&csid->reset_complete);
return 0;
}
/*
* msm_csid_get_csid_id - Get CSID HW module id
* @entity: Pointer to CSID media entity structure
* @id: Return CSID HW module id here
*/
void msm_csid_get_csid_id(struct media_entity *entity, u8 *id)
{
struct v4l2_subdev *sd = media_entity_to_v4l2_subdev(entity);
struct csid_device *csid = v4l2_get_subdevdata(sd);
*id = csid->id;
}
/*
* csid_get_lane_assign - Calculate CSI2 lane assign configuration parameter
* @lane_cfg - CSI2 lane configuration
*
* Return lane assign
*/
static u32 csid_get_lane_assign(struct csiphy_lanes_cfg *lane_cfg)
{
u32 lane_assign = 0;
int i;
for (i = 0; i < lane_cfg->num_data; i++)
lane_assign |= lane_cfg->data[i].pos << (i * 4);
return lane_assign;
}
/*
* csid_link_setup - Setup CSID connections
* @entity: Pointer to media entity structure
* @local: Pointer to local pad
* @remote: Pointer to remote pad
* @flags: Link flags
*
* Return 0 on success
*/
static int csid_link_setup(struct media_entity *entity,
const struct media_pad *local,
const struct media_pad *remote, u32 flags)
{
if (flags & MEDIA_LNK_FL_ENABLED)
if (media_pad_remote_pad_first(local))
return -EBUSY;
if ((local->flags & MEDIA_PAD_FL_SINK) &&
(flags & MEDIA_LNK_FL_ENABLED)) {
struct v4l2_subdev *sd;
struct csid_device *csid;
struct csiphy_device *csiphy;
struct csiphy_lanes_cfg *lane_cfg;
sd = media_entity_to_v4l2_subdev(entity);
csid = v4l2_get_subdevdata(sd);
/* If test generator is enabled */
/* do not allow a link from CSIPHY to CSID */
if (csid->testgen_mode->cur.val != 0)
return -EBUSY;
sd = media_entity_to_v4l2_subdev(remote->entity);
csiphy = v4l2_get_subdevdata(sd);
/* If a sensor is not linked to CSIPHY */
/* do no allow a link from CSIPHY to CSID */
if (!csiphy->cfg.csi2)
return -EPERM;
csid->phy.csiphy_id = csiphy->id;
lane_cfg = &csiphy->cfg.csi2->lane_cfg;
csid->phy.lane_cnt = lane_cfg->num_data;
csid->phy.lane_assign = csid_get_lane_assign(lane_cfg);
}
/* Decide which virtual channels to enable based on which source pads are enabled */
if (local->flags & MEDIA_PAD_FL_SOURCE) {
struct v4l2_subdev *sd = media_entity_to_v4l2_subdev(entity);
struct csid_device *csid = v4l2_get_subdevdata(sd);
struct device *dev = csid->camss->dev;
if (flags & MEDIA_LNK_FL_ENABLED)
csid->phy.en_vc |= BIT(local->index - 1);
else
csid->phy.en_vc &= ~BIT(local->index - 1);
csid->phy.need_vc_update = true;
dev_dbg(dev, "%s: Enabled CSID virtual channels mask 0x%x\n",
__func__, csid->phy.en_vc);
}
return 0;
}
static const struct v4l2_subdev_core_ops csid_core_ops = {
.s_power = csid_set_power,
.subscribe_event = v4l2_ctrl_subdev_subscribe_event,
.unsubscribe_event = v4l2_event_subdev_unsubscribe,
};
static const struct v4l2_subdev_video_ops csid_video_ops = {
.s_stream = csid_set_stream,
};
static const struct v4l2_subdev_pad_ops csid_pad_ops = {
.enum_mbus_code = csid_enum_mbus_code,
.enum_frame_size = csid_enum_frame_size,
.get_fmt = csid_get_format,
.set_fmt = csid_set_format,
};
static const struct v4l2_subdev_ops csid_v4l2_ops = {
.core = &csid_core_ops,
.video = &csid_video_ops,
.pad = &csid_pad_ops,
};
static const struct v4l2_subdev_internal_ops csid_v4l2_internal_ops = {
.open = csid_init_formats,
};
static const struct media_entity_operations csid_media_ops = {
.link_setup = csid_link_setup,
.link_validate = v4l2_subdev_link_validate,
};
/*
* msm_csid_register_entity - Register subdev node for CSID module
* @csid: CSID device
* @v4l2_dev: V4L2 device
*
* Return 0 on success or a negative error code otherwise
*/
int msm_csid_register_entity(struct csid_device *csid,
struct v4l2_device *v4l2_dev)
{
struct v4l2_subdev *sd = &csid->subdev;
struct media_pad *pads = csid->pads;
struct device *dev = csid->camss->dev;
int i;
int ret;
v4l2_subdev_init(sd, &csid_v4l2_ops);
sd->internal_ops = &csid_v4l2_internal_ops;
sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE |
V4L2_SUBDEV_FL_HAS_EVENTS;
snprintf(sd->name, ARRAY_SIZE(sd->name), "%s%d",
MSM_CSID_NAME, csid->id);
v4l2_set_subdevdata(sd, csid);
ret = v4l2_ctrl_handler_init(&csid->ctrls, 1);
if (ret < 0) {
dev_err(dev, "Failed to init ctrl handler: %d\n", ret);
return ret;
}
csid->testgen_mode = v4l2_ctrl_new_std_menu_items(&csid->ctrls,
&csid_ctrl_ops, V4L2_CID_TEST_PATTERN,
csid->testgen.nmodes, 0, 0,
csid->testgen.modes);
if (csid->ctrls.error) {
dev_err(dev, "Failed to init ctrl: %d\n", csid->ctrls.error);
ret = csid->ctrls.error;
goto free_ctrl;
}
csid->subdev.ctrl_handler = &csid->ctrls;
ret = csid_init_formats(sd, NULL);
if (ret < 0) {
dev_err(dev, "Failed to init format: %d\n", ret);
goto free_ctrl;
}
pads[MSM_CSID_PAD_SINK].flags = MEDIA_PAD_FL_SINK;
for (i = MSM_CSID_PAD_FIRST_SRC; i < MSM_CSID_PADS_NUM; ++i)
pads[i].flags = MEDIA_PAD_FL_SOURCE;
sd->entity.function = MEDIA_ENT_F_PROC_VIDEO_PIXEL_FORMATTER;
sd->entity.ops = &csid_media_ops;
ret = media_entity_pads_init(&sd->entity, MSM_CSID_PADS_NUM, pads);
if (ret < 0) {
dev_err(dev, "Failed to init media entity: %d\n", ret);
goto free_ctrl;
}
ret = v4l2_device_register_subdev(v4l2_dev, sd);
if (ret < 0) {
dev_err(dev, "Failed to register subdev: %d\n", ret);
goto media_cleanup;
}
return 0;
media_cleanup:
media_entity_cleanup(&sd->entity);
free_ctrl:
v4l2_ctrl_handler_free(&csid->ctrls);
return ret;
}
/*
* msm_csid_unregister_entity - Unregister CSID module subdev node
* @csid: CSID device
*/
void msm_csid_unregister_entity(struct csid_device *csid)
{
v4l2_device_unregister_subdev(&csid->subdev);
media_entity_cleanup(&csid->subdev.entity);
v4l2_ctrl_handler_free(&csid->ctrls);
}
| linux-master | drivers/media/platform/qcom/camss/camss-csid.c |
// SPDX-License-Identifier: GPL-2.0
/*
* camss-vfe-4-7.c
*
* Qualcomm MSM Camera Subsystem - VFE (Video Front End) Module v4.7
*
* Copyright (c) 2013-2015, The Linux Foundation. All rights reserved.
* Copyright (C) 2015-2018 Linaro Ltd.
*/
#include <linux/device.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/iopoll.h>
#include "camss.h"
#include "camss-vfe.h"
#include "camss-vfe-gen1.h"
#define VFE_0_HW_VERSION 0x000
#define VFE_0_GLOBAL_RESET_CMD 0x018
#define VFE_0_GLOBAL_RESET_CMD_CORE BIT(0)
#define VFE_0_GLOBAL_RESET_CMD_CAMIF BIT(1)
#define VFE_0_GLOBAL_RESET_CMD_BUS BIT(2)
#define VFE_0_GLOBAL_RESET_CMD_BUS_BDG BIT(3)
#define VFE_0_GLOBAL_RESET_CMD_REGISTER BIT(4)
#define VFE_0_GLOBAL_RESET_CMD_PM BIT(5)
#define VFE_0_GLOBAL_RESET_CMD_BUS_MISR BIT(6)
#define VFE_0_GLOBAL_RESET_CMD_TESTGEN BIT(7)
#define VFE_0_GLOBAL_RESET_CMD_DSP BIT(8)
#define VFE_0_GLOBAL_RESET_CMD_IDLE_CGC BIT(9)
#define VFE_0_MODULE_LENS_EN 0x040
#define VFE_0_MODULE_LENS_EN_DEMUX BIT(2)
#define VFE_0_MODULE_LENS_EN_CHROMA_UPSAMPLE BIT(3)
#define VFE_0_MODULE_ZOOM_EN 0x04c
#define VFE_0_MODULE_ZOOM_EN_SCALE_ENC BIT(1)
#define VFE_0_MODULE_ZOOM_EN_CROP_ENC BIT(2)
#define VFE_0_MODULE_ZOOM_EN_REALIGN_BUF BIT(9)
#define VFE_0_CORE_CFG 0x050
#define VFE_0_CORE_CFG_PIXEL_PATTERN_YCBYCR 0x4
#define VFE_0_CORE_CFG_PIXEL_PATTERN_YCRYCB 0x5
#define VFE_0_CORE_CFG_PIXEL_PATTERN_CBYCRY 0x6
#define VFE_0_CORE_CFG_PIXEL_PATTERN_CRYCBY 0x7
#define VFE_0_CORE_CFG_COMPOSITE_REG_UPDATE_EN BIT(4)
#define VFE_0_IRQ_CMD 0x058
#define VFE_0_IRQ_CMD_GLOBAL_CLEAR BIT(0)
#define VFE_0_IRQ_MASK_0 0x05c
#define VFE_0_IRQ_MASK_0_CAMIF_SOF BIT(0)
#define VFE_0_IRQ_MASK_0_CAMIF_EOF BIT(1)
#define VFE_0_IRQ_MASK_0_RDIn_REG_UPDATE(n) BIT((n) + 5)
#define VFE_0_IRQ_MASK_0_line_n_REG_UPDATE(n) \
((n) == VFE_LINE_PIX ? BIT(4) : VFE_0_IRQ_MASK_0_RDIn_REG_UPDATE(n))
#define VFE_0_IRQ_MASK_0_IMAGE_MASTER_n_PING_PONG(n) BIT((n) + 8)
#define VFE_0_IRQ_MASK_0_IMAGE_COMPOSITE_DONE_n(n) BIT((n) + 25)
#define VFE_0_IRQ_MASK_0_RESET_ACK BIT(31)
#define VFE_0_IRQ_MASK_1 0x060
#define VFE_0_IRQ_MASK_1_CAMIF_ERROR BIT(0)
#define VFE_0_IRQ_MASK_1_VIOLATION BIT(7)
#define VFE_0_IRQ_MASK_1_BUS_BDG_HALT_ACK BIT(8)
#define VFE_0_IRQ_MASK_1_IMAGE_MASTER_n_BUS_OVERFLOW(n) BIT((n) + 9)
#define VFE_0_IRQ_MASK_1_RDIn_SOF(n) BIT((n) + 29)
#define VFE_0_IRQ_CLEAR_0 0x064
#define VFE_0_IRQ_CLEAR_1 0x068
#define VFE_0_IRQ_STATUS_0 0x06c
#define VFE_0_IRQ_STATUS_0_CAMIF_SOF BIT(0)
#define VFE_0_IRQ_STATUS_0_RDIn_REG_UPDATE(n) BIT((n) + 5)
#define VFE_0_IRQ_STATUS_0_line_n_REG_UPDATE(n) \
((n) == VFE_LINE_PIX ? BIT(4) : VFE_0_IRQ_STATUS_0_RDIn_REG_UPDATE(n))
#define VFE_0_IRQ_STATUS_0_IMAGE_MASTER_n_PING_PONG(n) BIT((n) + 8)
#define VFE_0_IRQ_STATUS_0_IMAGE_COMPOSITE_DONE_n(n) BIT((n) + 25)
#define VFE_0_IRQ_STATUS_0_RESET_ACK BIT(31)
#define VFE_0_IRQ_STATUS_1 0x070
#define VFE_0_IRQ_STATUS_1_VIOLATION BIT(7)
#define VFE_0_IRQ_STATUS_1_BUS_BDG_HALT_ACK BIT(8)
#define VFE_0_IRQ_STATUS_1_RDIn_SOF(n) BIT((n) + 29)
#define VFE_0_IRQ_COMPOSITE_MASK_0 0x074
#define VFE_0_VIOLATION_STATUS 0x07c
#define VFE_0_BUS_CMD 0x80
#define VFE_0_BUS_CMD_Mx_RLD_CMD(x) BIT(x)
#define VFE_0_BUS_CFG 0x084
#define VFE_0_BUS_XBAR_CFG_x(x) (0x90 + 0x4 * ((x) / 2))
#define VFE_0_BUS_XBAR_CFG_x_M_PAIR_STREAM_EN BIT(2)
#define VFE_0_BUS_XBAR_CFG_x_M_REALIGN_BUF_EN BIT(3)
#define VFE_0_BUS_XBAR_CFG_x_M_PAIR_STREAM_SWAP_INTRA (0x1 << 4)
#define VFE_0_BUS_XBAR_CFG_x_M_PAIR_STREAM_SWAP_INTER (0x2 << 4)
#define VFE_0_BUS_XBAR_CFG_x_M_PAIR_STREAM_SWAP_INTER_INTRA (0x3 << 4)
#define VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_SHIFT 8
#define VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_LUMA 0x0
#define VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_VAL_RDI0 0xc
#define VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_VAL_RDI1 0xd
#define VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_VAL_RDI2 0xe
#define VFE_0_BUS_IMAGE_MASTER_n_WR_CFG(n) (0x0a0 + 0x2c * (n))
#define VFE_0_BUS_IMAGE_MASTER_n_WR_CFG_WR_PATH_SHIFT 0
#define VFE_0_BUS_IMAGE_MASTER_n_WR_PING_ADDR(n) (0x0a4 + 0x2c * (n))
#define VFE_0_BUS_IMAGE_MASTER_n_WR_PONG_ADDR(n) (0x0ac + 0x2c * (n))
#define VFE_0_BUS_IMAGE_MASTER_n_WR_ADDR_CFG(n) (0x0b4 + 0x2c * (n))
#define VFE_0_BUS_IMAGE_MASTER_n_WR_ADDR_CFG_FRM_BASED_SHIFT 1
#define VFE_0_BUS_IMAGE_MASTER_n_WR_ADDR_CFG_FRM_DROP_PER_SHIFT 2
#define VFE_0_BUS_IMAGE_MASTER_n_WR_ADDR_CFG_FRM_DROP_PER_MASK (0x1f << 2)
#define VFE_0_BUS_IMAGE_MASTER_n_WR_UB_CFG(n) (0x0b8 + 0x2c * (n))
#define VFE_0_BUS_IMAGE_MASTER_n_WR_UB_CFG_OFFSET_SHIFT 16
#define VFE_0_BUS_IMAGE_MASTER_n_WR_IMAGE_SIZE(n) (0x0bc + 0x2c * (n))
#define VFE_0_BUS_IMAGE_MASTER_n_WR_BUFFER_CFG(n) (0x0c0 + 0x2c * (n))
#define VFE_0_BUS_IMAGE_MASTER_n_WR_FRAMEDROP_PATTERN(n) \
(0x0c4 + 0x2c * (n))
#define VFE_0_BUS_IMAGE_MASTER_n_WR_IRQ_SUBSAMPLE_PATTERN(n) \
(0x0c8 + 0x2c * (n))
#define VFE_0_BUS_IMAGE_MASTER_n_WR_IRQ_SUBSAMPLE_PATTERN_DEF 0xffffffff
#define VFE_0_BUS_PING_PONG_STATUS 0x338
#define VFE_0_BUS_BDG_CMD 0x400
#define VFE_0_BUS_BDG_CMD_HALT_REQ 1
#define VFE_0_BUS_BDG_QOS_CFG_0 0x404
#define VFE_0_BUS_BDG_QOS_CFG_0_CFG 0xaaa9aaa9
#define VFE_0_BUS_BDG_QOS_CFG_1 0x408
#define VFE_0_BUS_BDG_QOS_CFG_2 0x40c
#define VFE_0_BUS_BDG_QOS_CFG_3 0x410
#define VFE_0_BUS_BDG_QOS_CFG_4 0x414
#define VFE_0_BUS_BDG_QOS_CFG_5 0x418
#define VFE_0_BUS_BDG_QOS_CFG_6 0x41c
#define VFE_0_BUS_BDG_QOS_CFG_7 0x420
#define VFE_0_BUS_BDG_QOS_CFG_7_CFG 0x0001aaa9
#define VFE48_0_BUS_BDG_QOS_CFG_0_CFG 0xaaa5aaa5
#define VFE48_0_BUS_BDG_QOS_CFG_3_CFG 0xaa55aaa5
#define VFE48_0_BUS_BDG_QOS_CFG_4_CFG 0xaa55aa55
#define VFE48_0_BUS_BDG_QOS_CFG_7_CFG 0x0005aa55
#define VFE_0_BUS_BDG_DS_CFG_0 0x424
#define VFE_0_BUS_BDG_DS_CFG_0_CFG 0xcccc0011
#define VFE_0_BUS_BDG_DS_CFG_1 0x428
#define VFE_0_BUS_BDG_DS_CFG_2 0x42c
#define VFE_0_BUS_BDG_DS_CFG_3 0x430
#define VFE_0_BUS_BDG_DS_CFG_4 0x434
#define VFE_0_BUS_BDG_DS_CFG_5 0x438
#define VFE_0_BUS_BDG_DS_CFG_6 0x43c
#define VFE_0_BUS_BDG_DS_CFG_7 0x440
#define VFE_0_BUS_BDG_DS_CFG_8 0x444
#define VFE_0_BUS_BDG_DS_CFG_9 0x448
#define VFE_0_BUS_BDG_DS_CFG_10 0x44c
#define VFE_0_BUS_BDG_DS_CFG_11 0x450
#define VFE_0_BUS_BDG_DS_CFG_12 0x454
#define VFE_0_BUS_BDG_DS_CFG_13 0x458
#define VFE_0_BUS_BDG_DS_CFG_14 0x45c
#define VFE_0_BUS_BDG_DS_CFG_15 0x460
#define VFE_0_BUS_BDG_DS_CFG_16 0x464
#define VFE_0_BUS_BDG_DS_CFG_16_CFG 0x40000103
#define VFE48_0_BUS_BDG_DS_CFG_0_CFG 0xcccc1111
#define VFE48_0_BUS_BDG_DS_CFG_16_CFG 0x00000110
#define VFE_0_RDI_CFG_x(x) (0x46c + (0x4 * (x)))
#define VFE_0_RDI_CFG_x_RDI_STREAM_SEL_SHIFT 28
#define VFE_0_RDI_CFG_x_RDI_STREAM_SEL_MASK (0xf << 28)
#define VFE_0_RDI_CFG_x_RDI_M0_SEL_SHIFT 4
#define VFE_0_RDI_CFG_x_RDI_M0_SEL_MASK (0xf << 4)
#define VFE_0_RDI_CFG_x_RDI_EN_BIT BIT(2)
#define VFE_0_RDI_CFG_x_MIPI_EN_BITS 0x3
#define VFE_0_CAMIF_CMD 0x478
#define VFE_0_CAMIF_CMD_DISABLE_FRAME_BOUNDARY 0
#define VFE_0_CAMIF_CMD_ENABLE_FRAME_BOUNDARY 1
#define VFE_0_CAMIF_CMD_NO_CHANGE 3
#define VFE_0_CAMIF_CMD_CLEAR_CAMIF_STATUS BIT(2)
#define VFE_0_CAMIF_CFG 0x47c
#define VFE_0_CAMIF_CFG_VFE_OUTPUT_EN BIT(6)
#define VFE_0_CAMIF_FRAME_CFG 0x484
#define VFE_0_CAMIF_WINDOW_WIDTH_CFG 0x488
#define VFE_0_CAMIF_WINDOW_HEIGHT_CFG 0x48c
#define VFE_0_CAMIF_SUBSAMPLE_CFG 0x490
#define VFE_0_CAMIF_IRQ_FRAMEDROP_PATTERN 0x498
#define VFE_0_CAMIF_IRQ_SUBSAMPLE_PATTERN 0x49c
#define VFE_0_CAMIF_STATUS 0x4a4
#define VFE_0_CAMIF_STATUS_HALT BIT(31)
#define VFE_0_REG_UPDATE 0x4ac
#define VFE_0_REG_UPDATE_RDIn(n) BIT(1 + (n))
#define VFE_0_REG_UPDATE_line_n(n) \
((n) == VFE_LINE_PIX ? 1 : VFE_0_REG_UPDATE_RDIn(n))
#define VFE_0_DEMUX_CFG 0x560
#define VFE_0_DEMUX_CFG_PERIOD 0x3
#define VFE_0_DEMUX_GAIN_0 0x564
#define VFE_0_DEMUX_GAIN_0_CH0_EVEN (0x80 << 0)
#define VFE_0_DEMUX_GAIN_0_CH0_ODD (0x80 << 16)
#define VFE_0_DEMUX_GAIN_1 0x568
#define VFE_0_DEMUX_GAIN_1_CH1 (0x80 << 0)
#define VFE_0_DEMUX_GAIN_1_CH2 (0x80 << 16)
#define VFE_0_DEMUX_EVEN_CFG 0x574
#define VFE_0_DEMUX_EVEN_CFG_PATTERN_YUYV 0x9cac
#define VFE_0_DEMUX_EVEN_CFG_PATTERN_YVYU 0xac9c
#define VFE_0_DEMUX_EVEN_CFG_PATTERN_UYVY 0xc9ca
#define VFE_0_DEMUX_EVEN_CFG_PATTERN_VYUY 0xcac9
#define VFE_0_DEMUX_ODD_CFG 0x578
#define VFE_0_DEMUX_ODD_CFG_PATTERN_YUYV 0x9cac
#define VFE_0_DEMUX_ODD_CFG_PATTERN_YVYU 0xac9c
#define VFE_0_DEMUX_ODD_CFG_PATTERN_UYVY 0xc9ca
#define VFE_0_DEMUX_ODD_CFG_PATTERN_VYUY 0xcac9
#define VFE_0_SCALE_ENC_Y_CFG 0x91c
#define VFE_0_SCALE_ENC_Y_H_IMAGE_SIZE 0x920
#define VFE_0_SCALE_ENC_Y_H_PHASE 0x924
#define VFE_0_SCALE_ENC_Y_V_IMAGE_SIZE 0x934
#define VFE_0_SCALE_ENC_Y_V_PHASE 0x938
#define VFE_0_SCALE_ENC_CBCR_CFG 0x948
#define VFE_0_SCALE_ENC_CBCR_H_IMAGE_SIZE 0x94c
#define VFE_0_SCALE_ENC_CBCR_H_PHASE 0x950
#define VFE_0_SCALE_ENC_CBCR_V_IMAGE_SIZE 0x960
#define VFE_0_SCALE_ENC_CBCR_V_PHASE 0x964
#define VFE_0_CROP_ENC_Y_WIDTH 0x974
#define VFE_0_CROP_ENC_Y_HEIGHT 0x978
#define VFE_0_CROP_ENC_CBCR_WIDTH 0x97c
#define VFE_0_CROP_ENC_CBCR_HEIGHT 0x980
#define VFE_0_CLAMP_ENC_MAX_CFG 0x984
#define VFE_0_CLAMP_ENC_MAX_CFG_CH0 (0xff << 0)
#define VFE_0_CLAMP_ENC_MAX_CFG_CH1 (0xff << 8)
#define VFE_0_CLAMP_ENC_MAX_CFG_CH2 (0xff << 16)
#define VFE_0_CLAMP_ENC_MIN_CFG 0x988
#define VFE_0_CLAMP_ENC_MIN_CFG_CH0 (0x0 << 0)
#define VFE_0_CLAMP_ENC_MIN_CFG_CH1 (0x0 << 8)
#define VFE_0_CLAMP_ENC_MIN_CFG_CH2 (0x0 << 16)
#define VFE_0_REALIGN_BUF_CFG 0xaac
#define VFE_0_REALIGN_BUF_CFG_CB_ODD_PIXEL BIT(2)
#define VFE_0_REALIGN_BUF_CFG_CR_ODD_PIXEL BIT(3)
#define VFE_0_REALIGN_BUF_CFG_HSUB_ENABLE BIT(4)
#define VFE48_0_BUS_IMAGE_MASTER_CMD 0xcec
#define VFE48_0_BUS_IMAGE_MASTER_n_SHIFT(x) (2 * (x))
#define CAMIF_TIMEOUT_SLEEP_US 1000
#define CAMIF_TIMEOUT_ALL_US 1000000
#define MSM_VFE_VFE0_UB_SIZE 2047
#define MSM_VFE_VFE0_UB_SIZE_RDI (MSM_VFE_VFE0_UB_SIZE / 3)
#define MSM_VFE_VFE1_UB_SIZE 1535
#define MSM_VFE_VFE1_UB_SIZE_RDI (MSM_VFE_VFE1_UB_SIZE / 3)
static u32 vfe_hw_version(struct vfe_device *vfe)
{
u32 hw_version = readl_relaxed(vfe->base + VFE_0_HW_VERSION);
dev_dbg(vfe->camss->dev, "VFE HW Version = 0x%08x\n", hw_version);
return hw_version;
}
static u16 vfe_get_ub_size(u8 vfe_id)
{
if (vfe_id == 0)
return MSM_VFE_VFE0_UB_SIZE_RDI;
else if (vfe_id == 1)
return MSM_VFE_VFE1_UB_SIZE_RDI;
return 0;
}
static inline void vfe_reg_clr(struct vfe_device *vfe, u32 reg, u32 clr_bits)
{
u32 bits = readl_relaxed(vfe->base + reg);
writel_relaxed(bits & ~clr_bits, vfe->base + reg);
}
static inline void vfe_reg_set(struct vfe_device *vfe, u32 reg, u32 set_bits)
{
u32 bits = readl_relaxed(vfe->base + reg);
writel_relaxed(bits | set_bits, vfe->base + reg);
}
static void vfe_global_reset(struct vfe_device *vfe)
{
u32 reset_bits = VFE_0_GLOBAL_RESET_CMD_IDLE_CGC |
VFE_0_GLOBAL_RESET_CMD_DSP |
VFE_0_GLOBAL_RESET_CMD_TESTGEN |
VFE_0_GLOBAL_RESET_CMD_BUS_MISR |
VFE_0_GLOBAL_RESET_CMD_PM |
VFE_0_GLOBAL_RESET_CMD_REGISTER |
VFE_0_GLOBAL_RESET_CMD_BUS_BDG |
VFE_0_GLOBAL_RESET_CMD_BUS |
VFE_0_GLOBAL_RESET_CMD_CAMIF |
VFE_0_GLOBAL_RESET_CMD_CORE;
writel_relaxed(BIT(31), vfe->base + VFE_0_IRQ_MASK_0);
/* Enforce barrier between IRQ mask setup and global reset */
wmb();
writel_relaxed(reset_bits, vfe->base + VFE_0_GLOBAL_RESET_CMD);
}
static void vfe_halt_request(struct vfe_device *vfe)
{
writel_relaxed(VFE_0_BUS_BDG_CMD_HALT_REQ,
vfe->base + VFE_0_BUS_BDG_CMD);
}
static void vfe_halt_clear(struct vfe_device *vfe)
{
writel_relaxed(0x0, vfe->base + VFE_0_BUS_BDG_CMD);
}
static void vfe_wm_enable(struct vfe_device *vfe, u8 wm, u8 enable)
{
if (enable)
vfe_reg_set(vfe, VFE_0_BUS_IMAGE_MASTER_n_WR_CFG(wm),
1 << VFE_0_BUS_IMAGE_MASTER_n_WR_CFG_WR_PATH_SHIFT);
else
vfe_reg_clr(vfe, VFE_0_BUS_IMAGE_MASTER_n_WR_CFG(wm),
1 << VFE_0_BUS_IMAGE_MASTER_n_WR_CFG_WR_PATH_SHIFT);
}
static void vfe_wm_frame_based(struct vfe_device *vfe, u8 wm, u8 enable)
{
if (enable)
vfe_reg_set(vfe, VFE_0_BUS_IMAGE_MASTER_n_WR_ADDR_CFG(wm),
1 << VFE_0_BUS_IMAGE_MASTER_n_WR_ADDR_CFG_FRM_BASED_SHIFT);
else
vfe_reg_clr(vfe, VFE_0_BUS_IMAGE_MASTER_n_WR_ADDR_CFG(wm),
1 << VFE_0_BUS_IMAGE_MASTER_n_WR_ADDR_CFG_FRM_BASED_SHIFT);
}
#define CALC_WORD(width, M, N) (((width) * (M) + (N) - 1) / (N))
static int vfe_word_per_line_by_pixel(u32 format, u32 pixel_per_line)
{
int val = 0;
switch (format) {
case V4L2_PIX_FMT_NV12:
case V4L2_PIX_FMT_NV21:
case V4L2_PIX_FMT_NV16:
case V4L2_PIX_FMT_NV61:
val = CALC_WORD(pixel_per_line, 1, 8);
break;
case V4L2_PIX_FMT_YUYV:
case V4L2_PIX_FMT_YVYU:
case V4L2_PIX_FMT_UYVY:
case V4L2_PIX_FMT_VYUY:
val = CALC_WORD(pixel_per_line, 2, 8);
break;
}
return val;
}
static int vfe_word_per_line_by_bytes(u32 bytes_per_line)
{
return CALC_WORD(bytes_per_line, 1, 8);
}
static void vfe_get_wm_sizes(struct v4l2_pix_format_mplane *pix, u8 plane,
u16 *width, u16 *height, u16 *bytesperline)
{
*width = pix->width;
*height = pix->height;
switch (pix->pixelformat) {
case V4L2_PIX_FMT_NV12:
case V4L2_PIX_FMT_NV21:
*bytesperline = pix->plane_fmt[0].bytesperline;
if (plane == 1)
*height /= 2;
break;
case V4L2_PIX_FMT_NV16:
case V4L2_PIX_FMT_NV61:
*bytesperline = pix->plane_fmt[0].bytesperline;
break;
case V4L2_PIX_FMT_YUYV:
case V4L2_PIX_FMT_YVYU:
case V4L2_PIX_FMT_VYUY:
case V4L2_PIX_FMT_UYVY:
*bytesperline = pix->plane_fmt[plane].bytesperline;
break;
}
}
static void vfe_wm_line_based(struct vfe_device *vfe, u32 wm,
struct v4l2_pix_format_mplane *pix,
u8 plane, u32 enable)
{
u32 reg;
if (enable) {
u16 width = 0, height = 0, bytesperline = 0, wpl;
vfe_get_wm_sizes(pix, plane, &width, &height, &bytesperline);
wpl = vfe_word_per_line_by_pixel(pix->pixelformat, width);
reg = height - 1;
reg |= ((wpl + 3) / 4 - 1) << 16;
writel_relaxed(reg, vfe->base +
VFE_0_BUS_IMAGE_MASTER_n_WR_IMAGE_SIZE(wm));
wpl = vfe_word_per_line_by_bytes(bytesperline);
reg = 0x3;
reg |= (height - 1) << 2;
reg |= ((wpl + 1) / 2) << 16;
writel_relaxed(reg, vfe->base +
VFE_0_BUS_IMAGE_MASTER_n_WR_BUFFER_CFG(wm));
} else {
writel_relaxed(0, vfe->base +
VFE_0_BUS_IMAGE_MASTER_n_WR_IMAGE_SIZE(wm));
writel_relaxed(0, vfe->base +
VFE_0_BUS_IMAGE_MASTER_n_WR_BUFFER_CFG(wm));
}
}
static void vfe_wm_set_framedrop_period(struct vfe_device *vfe, u8 wm, u8 per)
{
u32 reg;
reg = readl_relaxed(vfe->base +
VFE_0_BUS_IMAGE_MASTER_n_WR_ADDR_CFG(wm));
reg &= ~(VFE_0_BUS_IMAGE_MASTER_n_WR_ADDR_CFG_FRM_DROP_PER_MASK);
reg |= (per << VFE_0_BUS_IMAGE_MASTER_n_WR_ADDR_CFG_FRM_DROP_PER_SHIFT)
& VFE_0_BUS_IMAGE_MASTER_n_WR_ADDR_CFG_FRM_DROP_PER_MASK;
writel_relaxed(reg,
vfe->base + VFE_0_BUS_IMAGE_MASTER_n_WR_ADDR_CFG(wm));
}
static void vfe_wm_set_framedrop_pattern(struct vfe_device *vfe, u8 wm,
u32 pattern)
{
writel_relaxed(pattern,
vfe->base + VFE_0_BUS_IMAGE_MASTER_n_WR_FRAMEDROP_PATTERN(wm));
}
static void vfe_wm_set_ub_cfg(struct vfe_device *vfe, u8 wm,
u16 offset, u16 depth)
{
u32 reg;
reg = (offset << VFE_0_BUS_IMAGE_MASTER_n_WR_UB_CFG_OFFSET_SHIFT) |
depth;
writel_relaxed(reg, vfe->base + VFE_0_BUS_IMAGE_MASTER_n_WR_UB_CFG(wm));
}
static void vfe_bus_reload_wm(struct vfe_device *vfe, u8 wm)
{
/* Enforce barrier between any outstanding register write */
wmb();
writel_relaxed(VFE_0_BUS_CMD_Mx_RLD_CMD(wm), vfe->base + VFE_0_BUS_CMD);
/* Use barrier to make sure bus reload is issued before anything else */
wmb();
}
static void vfe_wm_set_ping_addr(struct vfe_device *vfe, u8 wm, u32 addr)
{
writel_relaxed(addr,
vfe->base + VFE_0_BUS_IMAGE_MASTER_n_WR_PING_ADDR(wm));
}
static void vfe_wm_set_pong_addr(struct vfe_device *vfe, u8 wm, u32 addr)
{
writel_relaxed(addr,
vfe->base + VFE_0_BUS_IMAGE_MASTER_n_WR_PONG_ADDR(wm));
}
static int vfe_wm_get_ping_pong_status(struct vfe_device *vfe, u8 wm)
{
u32 reg;
reg = readl_relaxed(vfe->base + VFE_0_BUS_PING_PONG_STATUS);
return (reg >> wm) & 0x1;
}
static void vfe_bus_enable_wr_if(struct vfe_device *vfe, u8 enable)
{
if (enable)
writel_relaxed(0x101, vfe->base + VFE_0_BUS_CFG);
else
writel_relaxed(0, vfe->base + VFE_0_BUS_CFG);
}
static void vfe_bus_connect_wm_to_rdi(struct vfe_device *vfe, u8 wm,
enum vfe_line_id id)
{
u32 reg;
reg = VFE_0_RDI_CFG_x_MIPI_EN_BITS;
vfe_reg_set(vfe, VFE_0_RDI_CFG_x(0), reg);
reg = VFE_0_RDI_CFG_x_RDI_EN_BIT;
reg |= ((3 * id) << VFE_0_RDI_CFG_x_RDI_STREAM_SEL_SHIFT) &
VFE_0_RDI_CFG_x_RDI_STREAM_SEL_MASK;
vfe_reg_set(vfe, VFE_0_RDI_CFG_x(id), reg);
switch (id) {
case VFE_LINE_RDI0:
default:
reg = VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_VAL_RDI0 <<
VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_SHIFT;
break;
case VFE_LINE_RDI1:
reg = VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_VAL_RDI1 <<
VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_SHIFT;
break;
case VFE_LINE_RDI2:
reg = VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_VAL_RDI2 <<
VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_SHIFT;
break;
}
if (wm % 2 == 1)
reg <<= 16;
vfe_reg_set(vfe, VFE_0_BUS_XBAR_CFG_x(wm), reg);
}
static void vfe_wm_set_subsample(struct vfe_device *vfe, u8 wm)
{
writel_relaxed(VFE_0_BUS_IMAGE_MASTER_n_WR_IRQ_SUBSAMPLE_PATTERN_DEF,
vfe->base +
VFE_0_BUS_IMAGE_MASTER_n_WR_IRQ_SUBSAMPLE_PATTERN(wm));
}
static void vfe_bus_disconnect_wm_from_rdi(struct vfe_device *vfe, u8 wm,
enum vfe_line_id id)
{
u32 reg;
reg = VFE_0_RDI_CFG_x_RDI_EN_BIT;
vfe_reg_clr(vfe, VFE_0_RDI_CFG_x(id), reg);
switch (id) {
case VFE_LINE_RDI0:
default:
reg = VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_VAL_RDI0 <<
VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_SHIFT;
break;
case VFE_LINE_RDI1:
reg = VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_VAL_RDI1 <<
VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_SHIFT;
break;
case VFE_LINE_RDI2:
reg = VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_VAL_RDI2 <<
VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_SHIFT;
break;
}
if (wm % 2 == 1)
reg <<= 16;
vfe_reg_clr(vfe, VFE_0_BUS_XBAR_CFG_x(wm), reg);
}
static void vfe_set_xbar_cfg(struct vfe_device *vfe, struct vfe_output *output,
u8 enable)
{
struct vfe_line *line = container_of(output, struct vfe_line, output);
u32 p = line->video_out.active_fmt.fmt.pix_mp.pixelformat;
u32 reg;
switch (p) {
case V4L2_PIX_FMT_NV12:
case V4L2_PIX_FMT_NV21:
case V4L2_PIX_FMT_NV16:
case V4L2_PIX_FMT_NV61:
reg = VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_LUMA <<
VFE_0_BUS_XBAR_CFG_x_M_SINGLE_STREAM_SEL_SHIFT;
if (output->wm_idx[0] % 2 == 1)
reg <<= 16;
if (enable)
vfe_reg_set(vfe,
VFE_0_BUS_XBAR_CFG_x(output->wm_idx[0]),
reg);
else
vfe_reg_clr(vfe,
VFE_0_BUS_XBAR_CFG_x(output->wm_idx[0]),
reg);
reg = VFE_0_BUS_XBAR_CFG_x_M_PAIR_STREAM_EN;
if (p == V4L2_PIX_FMT_NV12 || p == V4L2_PIX_FMT_NV16)
reg |= VFE_0_BUS_XBAR_CFG_x_M_PAIR_STREAM_SWAP_INTER_INTRA;
if (output->wm_idx[1] % 2 == 1)
reg <<= 16;
if (enable)
vfe_reg_set(vfe,
VFE_0_BUS_XBAR_CFG_x(output->wm_idx[1]),
reg);
else
vfe_reg_clr(vfe,
VFE_0_BUS_XBAR_CFG_x(output->wm_idx[1]),
reg);
break;
case V4L2_PIX_FMT_YUYV:
case V4L2_PIX_FMT_YVYU:
case V4L2_PIX_FMT_VYUY:
case V4L2_PIX_FMT_UYVY:
reg = VFE_0_BUS_XBAR_CFG_x_M_REALIGN_BUF_EN;
reg |= VFE_0_BUS_XBAR_CFG_x_M_PAIR_STREAM_EN;
if (p == V4L2_PIX_FMT_YUYV || p == V4L2_PIX_FMT_YVYU)
reg |= VFE_0_BUS_XBAR_CFG_x_M_PAIR_STREAM_SWAP_INTER_INTRA;
if (output->wm_idx[0] % 2 == 1)
reg <<= 16;
if (enable)
vfe_reg_set(vfe,
VFE_0_BUS_XBAR_CFG_x(output->wm_idx[0]),
reg);
else
vfe_reg_clr(vfe,
VFE_0_BUS_XBAR_CFG_x(output->wm_idx[0]),
reg);
break;
default:
break;
}
}
static void vfe_set_realign_cfg(struct vfe_device *vfe, struct vfe_line *line,
u8 enable)
{
u32 p = line->video_out.active_fmt.fmt.pix_mp.pixelformat;
u32 val = VFE_0_MODULE_ZOOM_EN_REALIGN_BUF;
if (p != V4L2_PIX_FMT_YUYV && p != V4L2_PIX_FMT_YVYU &&
p != V4L2_PIX_FMT_VYUY && p != V4L2_PIX_FMT_UYVY)
return;
if (enable) {
vfe_reg_set(vfe, VFE_0_MODULE_ZOOM_EN, val);
} else {
vfe_reg_clr(vfe, VFE_0_MODULE_ZOOM_EN, val);
return;
}
val = VFE_0_REALIGN_BUF_CFG_HSUB_ENABLE;
if (p == V4L2_PIX_FMT_UYVY || p == V4L2_PIX_FMT_YUYV)
val |= VFE_0_REALIGN_BUF_CFG_CR_ODD_PIXEL;
else
val |= VFE_0_REALIGN_BUF_CFG_CB_ODD_PIXEL;
writel_relaxed(val, vfe->base + VFE_0_REALIGN_BUF_CFG);
}
static void vfe_set_rdi_cid(struct vfe_device *vfe, enum vfe_line_id id, u8 cid)
{
vfe_reg_clr(vfe, VFE_0_RDI_CFG_x(id),
VFE_0_RDI_CFG_x_RDI_M0_SEL_MASK);
vfe_reg_set(vfe, VFE_0_RDI_CFG_x(id),
cid << VFE_0_RDI_CFG_x_RDI_M0_SEL_SHIFT);
}
static void vfe_reg_update(struct vfe_device *vfe, enum vfe_line_id line_id)
{
vfe->reg_update |= VFE_0_REG_UPDATE_line_n(line_id);
/* Enforce barrier between line update and commit */
wmb();
writel_relaxed(vfe->reg_update, vfe->base + VFE_0_REG_UPDATE);
/* Make sure register update is issued before further reg writes */
wmb();
}
static inline void vfe_reg_update_clear(struct vfe_device *vfe,
enum vfe_line_id line_id)
{
vfe->reg_update &= ~VFE_0_REG_UPDATE_line_n(line_id);
}
static void vfe_enable_irq_wm_line(struct vfe_device *vfe, u8 wm,
enum vfe_line_id line_id, u8 enable)
{
u32 irq_en0 = VFE_0_IRQ_MASK_0_IMAGE_MASTER_n_PING_PONG(wm) |
VFE_0_IRQ_MASK_0_line_n_REG_UPDATE(line_id);
u32 irq_en1 = VFE_0_IRQ_MASK_1_IMAGE_MASTER_n_BUS_OVERFLOW(wm) |
VFE_0_IRQ_MASK_1_RDIn_SOF(line_id);
if (enable) {
vfe_reg_set(vfe, VFE_0_IRQ_MASK_0, irq_en0);
vfe_reg_set(vfe, VFE_0_IRQ_MASK_1, irq_en1);
} else {
vfe_reg_clr(vfe, VFE_0_IRQ_MASK_0, irq_en0);
vfe_reg_clr(vfe, VFE_0_IRQ_MASK_1, irq_en1);
}
}
static void vfe_enable_irq_pix_line(struct vfe_device *vfe, u8 comp,
enum vfe_line_id line_id, u8 enable)
{
struct vfe_output *output = &vfe->line[line_id].output;
unsigned int i;
u32 irq_en0;
u32 irq_en1;
u32 comp_mask = 0;
irq_en0 = VFE_0_IRQ_MASK_0_CAMIF_SOF;
irq_en0 |= VFE_0_IRQ_MASK_0_CAMIF_EOF;
irq_en0 |= VFE_0_IRQ_MASK_0_IMAGE_COMPOSITE_DONE_n(comp);
irq_en0 |= VFE_0_IRQ_MASK_0_line_n_REG_UPDATE(line_id);
irq_en1 = VFE_0_IRQ_MASK_1_CAMIF_ERROR;
for (i = 0; i < output->wm_num; i++) {
irq_en1 |= VFE_0_IRQ_MASK_1_IMAGE_MASTER_n_BUS_OVERFLOW(
output->wm_idx[i]);
comp_mask |= (1 << output->wm_idx[i]) << comp * 8;
}
if (enable) {
vfe_reg_set(vfe, VFE_0_IRQ_MASK_0, irq_en0);
vfe_reg_set(vfe, VFE_0_IRQ_MASK_1, irq_en1);
vfe_reg_set(vfe, VFE_0_IRQ_COMPOSITE_MASK_0, comp_mask);
} else {
vfe_reg_clr(vfe, VFE_0_IRQ_MASK_0, irq_en0);
vfe_reg_clr(vfe, VFE_0_IRQ_MASK_1, irq_en1);
vfe_reg_clr(vfe, VFE_0_IRQ_COMPOSITE_MASK_0, comp_mask);
}
}
static void vfe_enable_irq_common(struct vfe_device *vfe)
{
u32 irq_en0 = VFE_0_IRQ_MASK_0_RESET_ACK;
u32 irq_en1 = VFE_0_IRQ_MASK_1_VIOLATION |
VFE_0_IRQ_MASK_1_BUS_BDG_HALT_ACK;
vfe_reg_set(vfe, VFE_0_IRQ_MASK_0, irq_en0);
vfe_reg_set(vfe, VFE_0_IRQ_MASK_1, irq_en1);
}
static void vfe_set_demux_cfg(struct vfe_device *vfe, struct vfe_line *line)
{
u32 val, even_cfg, odd_cfg;
writel_relaxed(VFE_0_DEMUX_CFG_PERIOD, vfe->base + VFE_0_DEMUX_CFG);
val = VFE_0_DEMUX_GAIN_0_CH0_EVEN | VFE_0_DEMUX_GAIN_0_CH0_ODD;
writel_relaxed(val, vfe->base + VFE_0_DEMUX_GAIN_0);
val = VFE_0_DEMUX_GAIN_1_CH1 | VFE_0_DEMUX_GAIN_1_CH2;
writel_relaxed(val, vfe->base + VFE_0_DEMUX_GAIN_1);
switch (line->fmt[MSM_VFE_PAD_SINK].code) {
case MEDIA_BUS_FMT_YUYV8_2X8:
even_cfg = VFE_0_DEMUX_EVEN_CFG_PATTERN_YUYV;
odd_cfg = VFE_0_DEMUX_ODD_CFG_PATTERN_YUYV;
break;
case MEDIA_BUS_FMT_YVYU8_2X8:
even_cfg = VFE_0_DEMUX_EVEN_CFG_PATTERN_YVYU;
odd_cfg = VFE_0_DEMUX_ODD_CFG_PATTERN_YVYU;
break;
case MEDIA_BUS_FMT_UYVY8_2X8:
default:
even_cfg = VFE_0_DEMUX_EVEN_CFG_PATTERN_UYVY;
odd_cfg = VFE_0_DEMUX_ODD_CFG_PATTERN_UYVY;
break;
case MEDIA_BUS_FMT_VYUY8_2X8:
even_cfg = VFE_0_DEMUX_EVEN_CFG_PATTERN_VYUY;
odd_cfg = VFE_0_DEMUX_ODD_CFG_PATTERN_VYUY;
break;
}
writel_relaxed(even_cfg, vfe->base + VFE_0_DEMUX_EVEN_CFG);
writel_relaxed(odd_cfg, vfe->base + VFE_0_DEMUX_ODD_CFG);
}
static void vfe_set_scale_cfg(struct vfe_device *vfe, struct vfe_line *line)
{
u32 p = line->video_out.active_fmt.fmt.pix_mp.pixelformat;
u32 reg;
u16 input, output;
u8 interp_reso;
u32 phase_mult;
writel_relaxed(0x3, vfe->base + VFE_0_SCALE_ENC_Y_CFG);
input = line->fmt[MSM_VFE_PAD_SINK].width - 1;
output = line->compose.width - 1;
reg = (output << 16) | input;
writel_relaxed(reg, vfe->base + VFE_0_SCALE_ENC_Y_H_IMAGE_SIZE);
interp_reso = vfe_calc_interp_reso(input, output);
phase_mult = input * (1 << (14 + interp_reso)) / output;
reg = (interp_reso << 28) | phase_mult;
writel_relaxed(reg, vfe->base + VFE_0_SCALE_ENC_Y_H_PHASE);
input = line->fmt[MSM_VFE_PAD_SINK].height - 1;
output = line->compose.height - 1;
reg = (output << 16) | input;
writel_relaxed(reg, vfe->base + VFE_0_SCALE_ENC_Y_V_IMAGE_SIZE);
interp_reso = vfe_calc_interp_reso(input, output);
phase_mult = input * (1 << (14 + interp_reso)) / output;
reg = (interp_reso << 28) | phase_mult;
writel_relaxed(reg, vfe->base + VFE_0_SCALE_ENC_Y_V_PHASE);
writel_relaxed(0x3, vfe->base + VFE_0_SCALE_ENC_CBCR_CFG);
input = line->fmt[MSM_VFE_PAD_SINK].width - 1;
output = line->compose.width / 2 - 1;
reg = (output << 16) | input;
writel_relaxed(reg, vfe->base + VFE_0_SCALE_ENC_CBCR_H_IMAGE_SIZE);
interp_reso = vfe_calc_interp_reso(input, output);
phase_mult = input * (1 << (14 + interp_reso)) / output;
reg = (interp_reso << 28) | phase_mult;
writel_relaxed(reg, vfe->base + VFE_0_SCALE_ENC_CBCR_H_PHASE);
input = line->fmt[MSM_VFE_PAD_SINK].height - 1;
output = line->compose.height - 1;
if (p == V4L2_PIX_FMT_NV12 || p == V4L2_PIX_FMT_NV21)
output = line->compose.height / 2 - 1;
reg = (output << 16) | input;
writel_relaxed(reg, vfe->base + VFE_0_SCALE_ENC_CBCR_V_IMAGE_SIZE);
interp_reso = vfe_calc_interp_reso(input, output);
phase_mult = input * (1 << (14 + interp_reso)) / output;
reg = (interp_reso << 28) | phase_mult;
writel_relaxed(reg, vfe->base + VFE_0_SCALE_ENC_CBCR_V_PHASE);
}
static void vfe_set_crop_cfg(struct vfe_device *vfe, struct vfe_line *line)
{
u32 p = line->video_out.active_fmt.fmt.pix_mp.pixelformat;
u32 reg;
u16 first, last;
first = line->crop.left;
last = line->crop.left + line->crop.width - 1;
reg = (first << 16) | last;
writel_relaxed(reg, vfe->base + VFE_0_CROP_ENC_Y_WIDTH);
first = line->crop.top;
last = line->crop.top + line->crop.height - 1;
reg = (first << 16) | last;
writel_relaxed(reg, vfe->base + VFE_0_CROP_ENC_Y_HEIGHT);
first = line->crop.left / 2;
last = line->crop.left / 2 + line->crop.width / 2 - 1;
reg = (first << 16) | last;
writel_relaxed(reg, vfe->base + VFE_0_CROP_ENC_CBCR_WIDTH);
first = line->crop.top;
last = line->crop.top + line->crop.height - 1;
if (p == V4L2_PIX_FMT_NV12 || p == V4L2_PIX_FMT_NV21) {
first = line->crop.top / 2;
last = line->crop.top / 2 + line->crop.height / 2 - 1;
}
reg = (first << 16) | last;
writel_relaxed(reg, vfe->base + VFE_0_CROP_ENC_CBCR_HEIGHT);
}
static void vfe_set_clamp_cfg(struct vfe_device *vfe)
{
u32 val = VFE_0_CLAMP_ENC_MAX_CFG_CH0 |
VFE_0_CLAMP_ENC_MAX_CFG_CH1 |
VFE_0_CLAMP_ENC_MAX_CFG_CH2;
writel_relaxed(val, vfe->base + VFE_0_CLAMP_ENC_MAX_CFG);
val = VFE_0_CLAMP_ENC_MIN_CFG_CH0 |
VFE_0_CLAMP_ENC_MIN_CFG_CH1 |
VFE_0_CLAMP_ENC_MIN_CFG_CH2;
writel_relaxed(val, vfe->base + VFE_0_CLAMP_ENC_MIN_CFG);
}
static void vfe_set_qos(struct vfe_device *vfe)
{
u32 val = VFE_0_BUS_BDG_QOS_CFG_0_CFG;
u32 val7 = VFE_0_BUS_BDG_QOS_CFG_7_CFG;
writel_relaxed(val, vfe->base + VFE_0_BUS_BDG_QOS_CFG_0);
writel_relaxed(val, vfe->base + VFE_0_BUS_BDG_QOS_CFG_1);
writel_relaxed(val, vfe->base + VFE_0_BUS_BDG_QOS_CFG_2);
writel_relaxed(val, vfe->base + VFE_0_BUS_BDG_QOS_CFG_3);
writel_relaxed(val, vfe->base + VFE_0_BUS_BDG_QOS_CFG_4);
writel_relaxed(val, vfe->base + VFE_0_BUS_BDG_QOS_CFG_5);
writel_relaxed(val, vfe->base + VFE_0_BUS_BDG_QOS_CFG_6);
writel_relaxed(val7, vfe->base + VFE_0_BUS_BDG_QOS_CFG_7);
}
static void vfe_set_ds(struct vfe_device *vfe)
{
u32 val = VFE_0_BUS_BDG_DS_CFG_0_CFG;
u32 val16 = VFE_0_BUS_BDG_DS_CFG_16_CFG;
writel_relaxed(val, vfe->base + VFE_0_BUS_BDG_DS_CFG_0);
writel_relaxed(val, vfe->base + VFE_0_BUS_BDG_DS_CFG_1);
writel_relaxed(val, vfe->base + VFE_0_BUS_BDG_DS_CFG_2);
writel_relaxed(val, vfe->base + VFE_0_BUS_BDG_DS_CFG_3);
writel_relaxed(val, vfe->base + VFE_0_BUS_BDG_DS_CFG_4);
writel_relaxed(val, vfe->base + VFE_0_BUS_BDG_DS_CFG_5);
writel_relaxed(val, vfe->base + VFE_0_BUS_BDG_DS_CFG_6);
writel_relaxed(val, vfe->base + VFE_0_BUS_BDG_DS_CFG_7);
writel_relaxed(val, vfe->base + VFE_0_BUS_BDG_DS_CFG_8);
writel_relaxed(val, vfe->base + VFE_0_BUS_BDG_DS_CFG_9);
writel_relaxed(val, vfe->base + VFE_0_BUS_BDG_DS_CFG_10);
writel_relaxed(val, vfe->base + VFE_0_BUS_BDG_DS_CFG_11);
writel_relaxed(val, vfe->base + VFE_0_BUS_BDG_DS_CFG_12);
writel_relaxed(val, vfe->base + VFE_0_BUS_BDG_DS_CFG_13);
writel_relaxed(val, vfe->base + VFE_0_BUS_BDG_DS_CFG_14);
writel_relaxed(val, vfe->base + VFE_0_BUS_BDG_DS_CFG_15);
writel_relaxed(val16, vfe->base + VFE_0_BUS_BDG_DS_CFG_16);
}
static void vfe_set_cgc_override(struct vfe_device *vfe, u8 wm, u8 enable)
{
/* empty */
}
static void vfe_set_camif_cfg(struct vfe_device *vfe, struct vfe_line *line)
{
u32 val;
switch (line->fmt[MSM_VFE_PAD_SINK].code) {
case MEDIA_BUS_FMT_YUYV8_2X8:
val = VFE_0_CORE_CFG_PIXEL_PATTERN_YCBYCR;
break;
case MEDIA_BUS_FMT_YVYU8_2X8:
val = VFE_0_CORE_CFG_PIXEL_PATTERN_YCRYCB;
break;
case MEDIA_BUS_FMT_UYVY8_2X8:
default:
val = VFE_0_CORE_CFG_PIXEL_PATTERN_CBYCRY;
break;
case MEDIA_BUS_FMT_VYUY8_2X8:
val = VFE_0_CORE_CFG_PIXEL_PATTERN_CRYCBY;
break;
}
val |= VFE_0_CORE_CFG_COMPOSITE_REG_UPDATE_EN;
writel_relaxed(val, vfe->base + VFE_0_CORE_CFG);
val = line->fmt[MSM_VFE_PAD_SINK].width * 2 - 1;
val |= (line->fmt[MSM_VFE_PAD_SINK].height - 1) << 16;
writel_relaxed(val, vfe->base + VFE_0_CAMIF_FRAME_CFG);
val = line->fmt[MSM_VFE_PAD_SINK].width * 2 - 1;
writel_relaxed(val, vfe->base + VFE_0_CAMIF_WINDOW_WIDTH_CFG);
val = line->fmt[MSM_VFE_PAD_SINK].height - 1;
writel_relaxed(val, vfe->base + VFE_0_CAMIF_WINDOW_HEIGHT_CFG);
val = 0xffffffff;
writel_relaxed(val, vfe->base + VFE_0_CAMIF_SUBSAMPLE_CFG);
val = 0xffffffff;
writel_relaxed(val, vfe->base + VFE_0_CAMIF_IRQ_FRAMEDROP_PATTERN);
val = 0xffffffff;
writel_relaxed(val, vfe->base + VFE_0_CAMIF_IRQ_SUBSAMPLE_PATTERN);
val = VFE_0_RDI_CFG_x_MIPI_EN_BITS;
vfe_reg_set(vfe, VFE_0_RDI_CFG_x(0), val);
val = VFE_0_CAMIF_CFG_VFE_OUTPUT_EN;
writel_relaxed(val, vfe->base + VFE_0_CAMIF_CFG);
}
static void vfe_set_camif_cmd(struct vfe_device *vfe, u8 enable)
{
u32 cmd;
cmd = VFE_0_CAMIF_CMD_CLEAR_CAMIF_STATUS | VFE_0_CAMIF_CMD_NO_CHANGE;
writel_relaxed(cmd, vfe->base + VFE_0_CAMIF_CMD);
/* Make sure camif command is issued written before it is changed again */
wmb();
if (enable)
cmd = VFE_0_CAMIF_CMD_ENABLE_FRAME_BOUNDARY;
else
cmd = VFE_0_CAMIF_CMD_DISABLE_FRAME_BOUNDARY;
writel_relaxed(cmd, vfe->base + VFE_0_CAMIF_CMD);
}
static void vfe_set_module_cfg(struct vfe_device *vfe, u8 enable)
{
u32 val_lens = VFE_0_MODULE_LENS_EN_DEMUX |
VFE_0_MODULE_LENS_EN_CHROMA_UPSAMPLE;
u32 val_zoom = VFE_0_MODULE_ZOOM_EN_SCALE_ENC |
VFE_0_MODULE_ZOOM_EN_CROP_ENC;
if (enable) {
vfe_reg_set(vfe, VFE_0_MODULE_LENS_EN, val_lens);
vfe_reg_set(vfe, VFE_0_MODULE_ZOOM_EN, val_zoom);
} else {
vfe_reg_clr(vfe, VFE_0_MODULE_LENS_EN, val_lens);
vfe_reg_clr(vfe, VFE_0_MODULE_ZOOM_EN, val_zoom);
}
}
static int vfe_camif_wait_for_stop(struct vfe_device *vfe, struct device *dev)
{
u32 val;
int ret;
ret = readl_poll_timeout(vfe->base + VFE_0_CAMIF_STATUS,
val,
(val & VFE_0_CAMIF_STATUS_HALT),
CAMIF_TIMEOUT_SLEEP_US,
CAMIF_TIMEOUT_ALL_US);
if (ret < 0)
dev_err(dev, "%s: camif stop timeout\n", __func__);
return ret;
}
/*
* vfe_isr - VFE module interrupt handler
* @irq: Interrupt line
* @dev: VFE device
*
* Return IRQ_HANDLED on success
*/
static irqreturn_t vfe_isr(int irq, void *dev)
{
struct vfe_device *vfe = dev;
u32 value0, value1;
int i, j;
vfe->ops->isr_read(vfe, &value0, &value1);
dev_dbg(vfe->camss->dev, "VFE: status0 = 0x%08x, status1 = 0x%08x\n",
value0, value1);
if (value0 & VFE_0_IRQ_STATUS_0_RESET_ACK)
vfe->isr_ops.reset_ack(vfe);
if (value1 & VFE_0_IRQ_STATUS_1_VIOLATION)
vfe->ops->violation_read(vfe);
if (value1 & VFE_0_IRQ_STATUS_1_BUS_BDG_HALT_ACK)
vfe->isr_ops.halt_ack(vfe);
for (i = VFE_LINE_RDI0; i < vfe->line_num; i++)
if (value0 & VFE_0_IRQ_STATUS_0_line_n_REG_UPDATE(i))
vfe->isr_ops.reg_update(vfe, i);
if (value0 & VFE_0_IRQ_STATUS_0_CAMIF_SOF)
vfe->isr_ops.sof(vfe, VFE_LINE_PIX);
for (i = VFE_LINE_RDI0; i <= VFE_LINE_RDI2; i++)
if (value1 & VFE_0_IRQ_STATUS_1_RDIn_SOF(i))
vfe->isr_ops.sof(vfe, i);
for (i = 0; i < MSM_VFE_COMPOSITE_IRQ_NUM; i++)
if (value0 & VFE_0_IRQ_STATUS_0_IMAGE_COMPOSITE_DONE_n(i)) {
vfe->isr_ops.comp_done(vfe, i);
for (j = 0; j < ARRAY_SIZE(vfe->wm_output_map); j++)
if (vfe->wm_output_map[j] == VFE_LINE_PIX)
value0 &= ~VFE_0_IRQ_MASK_0_IMAGE_MASTER_n_PING_PONG(j);
}
for (i = 0; i < MSM_VFE_IMAGE_MASTERS_NUM; i++)
if (value0 & VFE_0_IRQ_STATUS_0_IMAGE_MASTER_n_PING_PONG(i))
vfe->isr_ops.wm_done(vfe, i);
return IRQ_HANDLED;
}
static void vfe_isr_read(struct vfe_device *vfe, u32 *value0, u32 *value1)
{
*value0 = readl_relaxed(vfe->base + VFE_0_IRQ_STATUS_0);
*value1 = readl_relaxed(vfe->base + VFE_0_IRQ_STATUS_1);
writel_relaxed(*value0, vfe->base + VFE_0_IRQ_CLEAR_0);
writel_relaxed(*value1, vfe->base + VFE_0_IRQ_CLEAR_1);
/* Enforce barrier between local & global IRQ clear */
wmb();
writel_relaxed(VFE_0_IRQ_CMD_GLOBAL_CLEAR, vfe->base + VFE_0_IRQ_CMD);
}
/*
* vfe_pm_domain_off - Disable power domains specific to this VFE.
* @vfe: VFE Device
*/
static void vfe_pm_domain_off(struct vfe_device *vfe)
{
struct camss *camss;
if (!vfe)
return;
camss = vfe->camss;
device_link_del(camss->genpd_link[vfe->id]);
}
/*
* vfe_pm_domain_on - Enable power domains specific to this VFE.
* @vfe: VFE Device
*/
static int vfe_pm_domain_on(struct vfe_device *vfe)
{
struct camss *camss = vfe->camss;
enum vfe_line_id id = vfe->id;
camss->genpd_link[id] = device_link_add(camss->dev, camss->genpd[id], DL_FLAG_STATELESS |
DL_FLAG_PM_RUNTIME | DL_FLAG_RPM_ACTIVE);
if (!camss->genpd_link[id]) {
dev_err(vfe->camss->dev, "Failed to add VFE#%d to power domain\n", id);
return -EINVAL;
}
return 0;
}
static void vfe_violation_read(struct vfe_device *vfe)
{
u32 violation = readl_relaxed(vfe->base + VFE_0_VIOLATION_STATUS);
pr_err_ratelimited("VFE: violation = 0x%08x\n", violation);
}
static const struct vfe_hw_ops_gen1 vfe_ops_gen1_4_7 = {
.bus_connect_wm_to_rdi = vfe_bus_connect_wm_to_rdi,
.bus_disconnect_wm_from_rdi = vfe_bus_disconnect_wm_from_rdi,
.bus_enable_wr_if = vfe_bus_enable_wr_if,
.bus_reload_wm = vfe_bus_reload_wm,
.camif_wait_for_stop = vfe_camif_wait_for_stop,
.enable_irq_common = vfe_enable_irq_common,
.enable_irq_pix_line = vfe_enable_irq_pix_line,
.enable_irq_wm_line = vfe_enable_irq_wm_line,
.get_ub_size = vfe_get_ub_size,
.halt_clear = vfe_halt_clear,
.halt_request = vfe_halt_request,
.set_camif_cfg = vfe_set_camif_cfg,
.set_camif_cmd = vfe_set_camif_cmd,
.set_cgc_override = vfe_set_cgc_override,
.set_clamp_cfg = vfe_set_clamp_cfg,
.set_crop_cfg = vfe_set_crop_cfg,
.set_demux_cfg = vfe_set_demux_cfg,
.set_ds = vfe_set_ds,
.set_module_cfg = vfe_set_module_cfg,
.set_qos = vfe_set_qos,
.set_rdi_cid = vfe_set_rdi_cid,
.set_realign_cfg = vfe_set_realign_cfg,
.set_scale_cfg = vfe_set_scale_cfg,
.set_xbar_cfg = vfe_set_xbar_cfg,
.wm_enable = vfe_wm_enable,
.wm_frame_based = vfe_wm_frame_based,
.wm_get_ping_pong_status = vfe_wm_get_ping_pong_status,
.wm_line_based = vfe_wm_line_based,
.wm_set_framedrop_pattern = vfe_wm_set_framedrop_pattern,
.wm_set_framedrop_period = vfe_wm_set_framedrop_period,
.wm_set_ping_addr = vfe_wm_set_ping_addr,
.wm_set_pong_addr = vfe_wm_set_pong_addr,
.wm_set_subsample = vfe_wm_set_subsample,
.wm_set_ub_cfg = vfe_wm_set_ub_cfg,
};
static void vfe_subdev_init(struct device *dev, struct vfe_device *vfe)
{
vfe->isr_ops = vfe_isr_ops_gen1;
vfe->ops_gen1 = &vfe_ops_gen1_4_7;
vfe->video_ops = vfe_video_ops_gen1;
vfe->line_num = VFE_LINE_NUM_GEN1;
}
const struct vfe_hw_ops vfe_ops_4_7 = {
.global_reset = vfe_global_reset,
.hw_version = vfe_hw_version,
.isr_read = vfe_isr_read,
.isr = vfe_isr,
.pm_domain_off = vfe_pm_domain_off,
.pm_domain_on = vfe_pm_domain_on,
.reg_update_clear = vfe_reg_update_clear,
.reg_update = vfe_reg_update,
.subdev_init = vfe_subdev_init,
.vfe_disable = vfe_gen1_disable,
.vfe_enable = vfe_gen1_enable,
.vfe_halt = vfe_gen1_halt,
.violation_read = vfe_violation_read,
};
| linux-master | drivers/media/platform/qcom/camss/camss-vfe-4-7.c |
// SPDX-License-Identifier: GPL-2.0
/*
* camss.c
*
* Qualcomm MSM Camera Subsystem - Core
*
* Copyright (c) 2015, The Linux Foundation. All rights reserved.
* Copyright (C) 2015-2018 Linaro Ltd.
*/
#include <linux/clk.h>
#include <linux/interconnect.h>
#include <linux/media-bus-format.h>
#include <linux/media.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/of.h>
#include <linux/of_graph.h>
#include <linux/pm_runtime.h>
#include <linux/pm_domain.h>
#include <linux/slab.h>
#include <linux/videodev2.h>
#include <media/media-device.h>
#include <media/v4l2-async.h>
#include <media/v4l2-device.h>
#include <media/v4l2-mc.h>
#include <media/v4l2-fwnode.h>
#include "camss.h"
#define CAMSS_CLOCK_MARGIN_NUMERATOR 105
#define CAMSS_CLOCK_MARGIN_DENOMINATOR 100
static const struct resources csiphy_res_8x16[] = {
/* CSIPHY0 */
{
.regulators = {},
.clock = { "top_ahb", "ispif_ahb", "ahb", "csiphy0_timer" },
.clock_rate = { { 0 },
{ 0 },
{ 0 },
{ 100000000, 200000000 } },
.reg = { "csiphy0", "csiphy0_clk_mux" },
.interrupt = { "csiphy0" }
},
/* CSIPHY1 */
{
.regulators = {},
.clock = { "top_ahb", "ispif_ahb", "ahb", "csiphy1_timer" },
.clock_rate = { { 0 },
{ 0 },
{ 0 },
{ 100000000, 200000000 } },
.reg = { "csiphy1", "csiphy1_clk_mux" },
.interrupt = { "csiphy1" }
}
};
static const struct resources csid_res_8x16[] = {
/* CSID0 */
{
.regulators = { "vdda" },
.clock = { "top_ahb", "ispif_ahb", "csi0_ahb", "ahb",
"csi0", "csi0_phy", "csi0_pix", "csi0_rdi" },
.clock_rate = { { 0 },
{ 0 },
{ 0 },
{ 0 },
{ 100000000, 200000000 },
{ 0 },
{ 0 },
{ 0 } },
.reg = { "csid0" },
.interrupt = { "csid0" }
},
/* CSID1 */
{
.regulators = { "vdda" },
.clock = { "top_ahb", "ispif_ahb", "csi1_ahb", "ahb",
"csi1", "csi1_phy", "csi1_pix", "csi1_rdi" },
.clock_rate = { { 0 },
{ 0 },
{ 0 },
{ 0 },
{ 100000000, 200000000 },
{ 0 },
{ 0 },
{ 0 } },
.reg = { "csid1" },
.interrupt = { "csid1" }
},
};
static const struct resources_ispif ispif_res_8x16 = {
/* ISPIF */
.clock = { "top_ahb", "ahb", "ispif_ahb",
"csi0", "csi0_pix", "csi0_rdi",
"csi1", "csi1_pix", "csi1_rdi" },
.clock_for_reset = { "vfe0", "csi_vfe0" },
.reg = { "ispif", "csi_clk_mux" },
.interrupt = "ispif"
};
static const struct resources vfe_res_8x16[] = {
/* VFE0 */
{
.regulators = {},
.clock = { "top_ahb", "vfe0", "csi_vfe0",
"vfe_ahb", "vfe_axi", "ahb" },
.clock_rate = { { 0 },
{ 50000000, 80000000, 100000000, 160000000,
177780000, 200000000, 266670000, 320000000,
400000000, 465000000 },
{ 0 },
{ 0 },
{ 0 },
{ 0 },
{ 0 },
{ 0 },
{ 0 } },
.reg = { "vfe0" },
.interrupt = { "vfe0" }
}
};
static const struct resources csiphy_res_8x96[] = {
/* CSIPHY0 */
{
.regulators = {},
.clock = { "top_ahb", "ispif_ahb", "ahb", "csiphy0_timer" },
.clock_rate = { { 0 },
{ 0 },
{ 0 },
{ 100000000, 200000000, 266666667 } },
.reg = { "csiphy0", "csiphy0_clk_mux" },
.interrupt = { "csiphy0" }
},
/* CSIPHY1 */
{
.regulators = {},
.clock = { "top_ahb", "ispif_ahb", "ahb", "csiphy1_timer" },
.clock_rate = { { 0 },
{ 0 },
{ 0 },
{ 100000000, 200000000, 266666667 } },
.reg = { "csiphy1", "csiphy1_clk_mux" },
.interrupt = { "csiphy1" }
},
/* CSIPHY2 */
{
.regulators = {},
.clock = { "top_ahb", "ispif_ahb", "ahb", "csiphy2_timer" },
.clock_rate = { { 0 },
{ 0 },
{ 0 },
{ 100000000, 200000000, 266666667 } },
.reg = { "csiphy2", "csiphy2_clk_mux" },
.interrupt = { "csiphy2" }
}
};
static const struct resources csid_res_8x96[] = {
/* CSID0 */
{
.regulators = { "vdda" },
.clock = { "top_ahb", "ispif_ahb", "csi0_ahb", "ahb",
"csi0", "csi0_phy", "csi0_pix", "csi0_rdi" },
.clock_rate = { { 0 },
{ 0 },
{ 0 },
{ 0 },
{ 100000000, 200000000, 266666667 },
{ 0 },
{ 0 },
{ 0 } },
.reg = { "csid0" },
.interrupt = { "csid0" }
},
/* CSID1 */
{
.regulators = { "vdda" },
.clock = { "top_ahb", "ispif_ahb", "csi1_ahb", "ahb",
"csi1", "csi1_phy", "csi1_pix", "csi1_rdi" },
.clock_rate = { { 0 },
{ 0 },
{ 0 },
{ 0 },
{ 100000000, 200000000, 266666667 },
{ 0 },
{ 0 },
{ 0 } },
.reg = { "csid1" },
.interrupt = { "csid1" }
},
/* CSID2 */
{
.regulators = { "vdda" },
.clock = { "top_ahb", "ispif_ahb", "csi2_ahb", "ahb",
"csi2", "csi2_phy", "csi2_pix", "csi2_rdi" },
.clock_rate = { { 0 },
{ 0 },
{ 0 },
{ 0 },
{ 100000000, 200000000, 266666667 },
{ 0 },
{ 0 },
{ 0 } },
.reg = { "csid2" },
.interrupt = { "csid2" }
},
/* CSID3 */
{
.regulators = { "vdda" },
.clock = { "top_ahb", "ispif_ahb", "csi3_ahb", "ahb",
"csi3", "csi3_phy", "csi3_pix", "csi3_rdi" },
.clock_rate = { { 0 },
{ 0 },
{ 0 },
{ 0 },
{ 100000000, 200000000, 266666667 },
{ 0 },
{ 0 },
{ 0 } },
.reg = { "csid3" },
.interrupt = { "csid3" }
}
};
static const struct resources_ispif ispif_res_8x96 = {
/* ISPIF */
.clock = { "top_ahb", "ahb", "ispif_ahb",
"csi0", "csi0_pix", "csi0_rdi",
"csi1", "csi1_pix", "csi1_rdi",
"csi2", "csi2_pix", "csi2_rdi",
"csi3", "csi3_pix", "csi3_rdi" },
.clock_for_reset = { "vfe0", "csi_vfe0", "vfe1", "csi_vfe1" },
.reg = { "ispif", "csi_clk_mux" },
.interrupt = "ispif"
};
static const struct resources vfe_res_8x96[] = {
/* VFE0 */
{
.regulators = {},
.clock = { "top_ahb", "ahb", "vfe0", "csi_vfe0", "vfe_ahb",
"vfe0_ahb", "vfe_axi", "vfe0_stream"},
.clock_rate = { { 0 },
{ 0 },
{ 75000000, 100000000, 300000000,
320000000, 480000000, 600000000 },
{ 0 },
{ 0 },
{ 0 },
{ 0 },
{ 0 } },
.reg = { "vfe0" },
.interrupt = { "vfe0" }
},
/* VFE1 */
{
.regulators = {},
.clock = { "top_ahb", "ahb", "vfe1", "csi_vfe1", "vfe_ahb",
"vfe1_ahb", "vfe_axi", "vfe1_stream"},
.clock_rate = { { 0 },
{ 0 },
{ 75000000, 100000000, 300000000,
320000000, 480000000, 600000000 },
{ 0 },
{ 0 },
{ 0 },
{ 0 },
{ 0 } },
.reg = { "vfe1" },
.interrupt = { "vfe1" }
}
};
static const struct resources csiphy_res_660[] = {
/* CSIPHY0 */
{
.regulators = {},
.clock = { "top_ahb", "ispif_ahb", "ahb", "csiphy0_timer",
"csi0_phy", "csiphy_ahb2crif" },
.clock_rate = { { 0 },
{ 0 },
{ 0 },
{ 100000000, 200000000, 269333333 },
{ 0 } },
.reg = { "csiphy0", "csiphy0_clk_mux" },
.interrupt = { "csiphy0" }
},
/* CSIPHY1 */
{
.regulators = {},
.clock = { "top_ahb", "ispif_ahb", "ahb", "csiphy1_timer",
"csi1_phy", "csiphy_ahb2crif" },
.clock_rate = { { 0 },
{ 0 },
{ 0 },
{ 100000000, 200000000, 269333333 },
{ 0 } },
.reg = { "csiphy1", "csiphy1_clk_mux" },
.interrupt = { "csiphy1" }
},
/* CSIPHY2 */
{
.regulators = {},
.clock = { "top_ahb", "ispif_ahb", "ahb", "csiphy2_timer",
"csi2_phy", "csiphy_ahb2crif" },
.clock_rate = { { 0 },
{ 0 },
{ 0 },
{ 100000000, 200000000, 269333333 },
{ 0 } },
.reg = { "csiphy2", "csiphy2_clk_mux" },
.interrupt = { "csiphy2" }
}
};
static const struct resources csid_res_660[] = {
/* CSID0 */
{
.regulators = { "vdda", "vdd_sec" },
.clock = { "top_ahb", "ispif_ahb", "csi0_ahb", "ahb",
"csi0", "csi0_phy", "csi0_pix", "csi0_rdi",
"cphy_csid0" },
.clock_rate = { { 0 },
{ 0 },
{ 0 },
{ 0 },
{ 100000000, 200000000, 310000000,
404000000, 465000000 },
{ 0 },
{ 0 },
{ 0 },
{ 0 } },
.reg = { "csid0" },
.interrupt = { "csid0" }
},
/* CSID1 */
{
.regulators = { "vdda", "vdd_sec" },
.clock = { "top_ahb", "ispif_ahb", "csi1_ahb", "ahb",
"csi1", "csi1_phy", "csi1_pix", "csi1_rdi",
"cphy_csid1" },
.clock_rate = { { 0 },
{ 0 },
{ 0 },
{ 0 },
{ 100000000, 200000000, 310000000,
404000000, 465000000 },
{ 0 },
{ 0 },
{ 0 },
{ 0 } },
.reg = { "csid1" },
.interrupt = { "csid1" }
},
/* CSID2 */
{
.regulators = { "vdda", "vdd_sec" },
.clock = { "top_ahb", "ispif_ahb", "csi2_ahb", "ahb",
"csi2", "csi2_phy", "csi2_pix", "csi2_rdi",
"cphy_csid2" },
.clock_rate = { { 0 },
{ 0 },
{ 0 },
{ 0 },
{ 100000000, 200000000, 310000000,
404000000, 465000000 },
{ 0 },
{ 0 },
{ 0 },
{ 0 } },
.reg = { "csid2" },
.interrupt = { "csid2" }
},
/* CSID3 */
{
.regulators = { "vdda", "vdd_sec" },
.clock = { "top_ahb", "ispif_ahb", "csi3_ahb", "ahb",
"csi3", "csi3_phy", "csi3_pix", "csi3_rdi",
"cphy_csid3" },
.clock_rate = { { 0 },
{ 0 },
{ 0 },
{ 0 },
{ 100000000, 200000000, 310000000,
404000000, 465000000 },
{ 0 },
{ 0 },
{ 0 },
{ 0 } },
.reg = { "csid3" },
.interrupt = { "csid3" }
}
};
static const struct resources_ispif ispif_res_660 = {
/* ISPIF */
.clock = { "top_ahb", "ahb", "ispif_ahb",
"csi0", "csi0_pix", "csi0_rdi",
"csi1", "csi1_pix", "csi1_rdi",
"csi2", "csi2_pix", "csi2_rdi",
"csi3", "csi3_pix", "csi3_rdi" },
.clock_for_reset = { "vfe0", "csi_vfe0", "vfe1", "csi_vfe1" },
.reg = { "ispif", "csi_clk_mux" },
.interrupt = "ispif"
};
static const struct resources vfe_res_660[] = {
/* VFE0 */
{
.regulators = {},
.clock = { "throttle_axi", "top_ahb", "ahb", "vfe0",
"csi_vfe0", "vfe_ahb", "vfe0_ahb", "vfe_axi",
"vfe0_stream"},
.clock_rate = { { 0 },
{ 0 },
{ 0 },
{ 120000000, 200000000, 256000000,
300000000, 404000000, 480000000,
540000000, 576000000 },
{ 0 },
{ 0 },
{ 0 },
{ 0 },
{ 0 } },
.reg = { "vfe0" },
.interrupt = { "vfe0" }
},
/* VFE1 */
{
.regulators = {},
.clock = { "throttle_axi", "top_ahb", "ahb", "vfe1",
"csi_vfe1", "vfe_ahb", "vfe1_ahb", "vfe_axi",
"vfe1_stream"},
.clock_rate = { { 0 },
{ 0 },
{ 0 },
{ 120000000, 200000000, 256000000,
300000000, 404000000, 480000000,
540000000, 576000000 },
{ 0 },
{ 0 },
{ 0 },
{ 0 },
{ 0 } },
.reg = { "vfe1" },
.interrupt = { "vfe1" }
}
};
static const struct resources csiphy_res_845[] = {
/* CSIPHY0 */
{
.regulators = {},
.clock = { "camnoc_axi", "soc_ahb", "slow_ahb_src",
"cpas_ahb", "cphy_rx_src", "csiphy0",
"csiphy0_timer_src", "csiphy0_timer" },
.clock_rate = { { 0 },
{ 0 },
{ 0 },
{ 0 },
{ 0 },
{ 0 },
{ 0 },
{ 19200000, 240000000, 269333333 } },
.reg = { "csiphy0" },
.interrupt = { "csiphy0" }
},
/* CSIPHY1 */
{
.regulators = {},
.clock = { "camnoc_axi", "soc_ahb", "slow_ahb_src",
"cpas_ahb", "cphy_rx_src", "csiphy1",
"csiphy1_timer_src", "csiphy1_timer" },
.clock_rate = { { 0 },
{ 0 },
{ 0 },
{ 0 },
{ 0 },
{ 0 },
{ 0 },
{ 19200000, 240000000, 269333333 } },
.reg = { "csiphy1" },
.interrupt = { "csiphy1" }
},
/* CSIPHY2 */
{
.regulators = {},
.clock = { "camnoc_axi", "soc_ahb", "slow_ahb_src",
"cpas_ahb", "cphy_rx_src", "csiphy2",
"csiphy2_timer_src", "csiphy2_timer" },
.clock_rate = { { 0 },
{ 0 },
{ 0 },
{ 0 },
{ 0 },
{ 0 },
{ 0 },
{ 19200000, 240000000, 269333333 } },
.reg = { "csiphy2" },
.interrupt = { "csiphy2" }
},
/* CSIPHY3 */
{
.regulators = {},
.clock = { "camnoc_axi", "soc_ahb", "slow_ahb_src",
"cpas_ahb", "cphy_rx_src", "csiphy3",
"csiphy3_timer_src", "csiphy3_timer" },
.clock_rate = { { 0 },
{ 0 },
{ 0 },
{ 0 },
{ 0 },
{ 0 },
{ 0 },
{ 19200000, 240000000, 269333333 } },
.reg = { "csiphy3" },
.interrupt = { "csiphy3" }
}
};
static const struct resources csid_res_845[] = {
/* CSID0 */
{
.regulators = { "vdda-phy", "vdda-pll" },
.clock = { "cpas_ahb", "cphy_rx_src", "slow_ahb_src",
"soc_ahb", "vfe0", "vfe0_src",
"vfe0_cphy_rx", "csi0",
"csi0_src" },
.clock_rate = { { 0 },
{ 384000000 },
{ 80000000 },
{ 0 },
{ 19200000, 100000000, 320000000, 404000000, 480000000, 600000000 },
{ 320000000 },
{ 0 },
{ 19200000, 75000000, 384000000, 538666667 },
{ 384000000 } },
.reg = { "csid0" },
.interrupt = { "csid0" }
},
/* CSID1 */
{
.regulators = { "vdda-phy", "vdda-pll" },
.clock = { "cpas_ahb", "cphy_rx_src", "slow_ahb_src",
"soc_ahb", "vfe1", "vfe1_src",
"vfe1_cphy_rx", "csi1",
"csi1_src" },
.clock_rate = { { 0 },
{ 384000000 },
{ 80000000 },
{ 0 },
{ 19200000, 100000000, 320000000, 404000000, 480000000, 600000000 },
{ 320000000 },
{ 0 },
{ 19200000, 75000000, 384000000, 538666667 },
{ 384000000 } },
.reg = { "csid1" },
.interrupt = { "csid1" }
},
/* CSID2 */
{
.regulators = { "vdda-phy", "vdda-pll" },
.clock = { "cpas_ahb", "cphy_rx_src", "slow_ahb_src",
"soc_ahb", "vfe_lite", "vfe_lite_src",
"vfe_lite_cphy_rx", "csi2",
"csi2_src" },
.clock_rate = { { 0 },
{ 384000000 },
{ 80000000 },
{ 0 },
{ 19200000, 100000000, 320000000, 404000000, 480000000, 600000000 },
{ 320000000 },
{ 0 },
{ 19200000, 75000000, 384000000, 538666667 },
{ 384000000 } },
.reg = { "csid2" },
.interrupt = { "csid2" }
}
};
static const struct resources vfe_res_845[] = {
/* VFE0 */
{
.regulators = {},
.clock = { "camnoc_axi", "cpas_ahb", "slow_ahb_src",
"soc_ahb", "vfe0", "vfe0_axi",
"vfe0_src", "csi0",
"csi0_src"},
.clock_rate = { { 0 },
{ 0 },
{ 80000000 },
{ 0 },
{ 19200000, 100000000, 320000000, 404000000, 480000000, 600000000 },
{ 0 },
{ 320000000 },
{ 19200000, 75000000, 384000000, 538666667 },
{ 384000000 } },
.reg = { "vfe0" },
.interrupt = { "vfe0" }
},
/* VFE1 */
{
.regulators = {},
.clock = { "camnoc_axi", "cpas_ahb", "slow_ahb_src",
"soc_ahb", "vfe1", "vfe1_axi",
"vfe1_src", "csi1",
"csi1_src"},
.clock_rate = { { 0 },
{ 0 },
{ 80000000 },
{ 0 },
{ 19200000, 100000000, 320000000, 404000000, 480000000, 600000000 },
{ 0 },
{ 320000000 },
{ 19200000, 75000000, 384000000, 538666667 },
{ 384000000 } },
.reg = { "vfe1" },
.interrupt = { "vfe1" }
},
/* VFE-lite */
{
.regulators = {},
.clock = { "camnoc_axi", "cpas_ahb", "slow_ahb_src",
"soc_ahb", "vfe_lite",
"vfe_lite_src", "csi2",
"csi2_src"},
.clock_rate = { { 0 },
{ 0 },
{ 80000000 },
{ 0 },
{ 19200000, 100000000, 320000000, 404000000, 480000000, 600000000 },
{ 320000000 },
{ 19200000, 75000000, 384000000, 538666667 },
{ 384000000 } },
.reg = { "vfe_lite" },
.interrupt = { "vfe_lite" }
}
};
static const struct resources csiphy_res_8250[] = {
/* CSIPHY0 */
{
.regulators = {},
.clock = { "csiphy0", "csiphy0_timer" },
.clock_rate = { { 400000000 },
{ 300000000 } },
.reg = { "csiphy0" },
.interrupt = { "csiphy0" }
},
/* CSIPHY1 */
{
.regulators = {},
.clock = { "csiphy1", "csiphy1_timer" },
.clock_rate = { { 400000000 },
{ 300000000 } },
.reg = { "csiphy1" },
.interrupt = { "csiphy1" }
},
/* CSIPHY2 */
{
.regulators = {},
.clock = { "csiphy2", "csiphy2_timer" },
.clock_rate = { { 400000000 },
{ 300000000 } },
.reg = { "csiphy2" },
.interrupt = { "csiphy2" }
},
/* CSIPHY3 */
{
.regulators = {},
.clock = { "csiphy3", "csiphy3_timer" },
.clock_rate = { { 400000000 },
{ 300000000 } },
.reg = { "csiphy3" },
.interrupt = { "csiphy3" }
},
/* CSIPHY4 */
{
.regulators = {},
.clock = { "csiphy4", "csiphy4_timer" },
.clock_rate = { { 400000000 },
{ 300000000 } },
.reg = { "csiphy4" },
.interrupt = { "csiphy4" }
},
/* CSIPHY5 */
{
.regulators = {},
.clock = { "csiphy5", "csiphy5_timer" },
.clock_rate = { { 400000000 },
{ 300000000 } },
.reg = { "csiphy5" },
.interrupt = { "csiphy5" }
}
};
static const struct resources csid_res_8250[] = {
/* CSID0 */
{
.regulators = { "vdda-phy", "vdda-pll" },
.clock = { "vfe0_csid", "vfe0_cphy_rx", "vfe0", "vfe0_areg", "vfe0_ahb" },
.clock_rate = { { 400000000 },
{ 400000000 },
{ 350000000, 475000000, 576000000, 720000000 },
{ 100000000, 200000000, 300000000, 400000000 },
{ 0 } },
.reg = { "csid0" },
.interrupt = { "csid0" }
},
/* CSID1 */
{
.regulators = { "vdda-phy", "vdda-pll" },
.clock = { "vfe1_csid", "vfe1_cphy_rx", "vfe1", "vfe1_areg", "vfe1_ahb" },
.clock_rate = { { 400000000 },
{ 400000000 },
{ 350000000, 475000000, 576000000, 720000000 },
{ 100000000, 200000000, 300000000, 400000000 },
{ 0 } },
.reg = { "csid1" },
.interrupt = { "csid1" }
},
/* CSID2 */
{
.regulators = { "vdda-phy", "vdda-pll" },
.clock = { "vfe_lite_csid", "vfe_lite_cphy_rx", "vfe_lite", "vfe_lite_ahb" },
.clock_rate = { { 400000000 },
{ 400000000 },
{ 400000000, 480000000 },
{ 0 } },
.reg = { "csid2" },
.interrupt = { "csid2" }
},
/* CSID3 */
{
.regulators = { "vdda-phy", "vdda-pll" },
.clock = { "vfe_lite_csid", "vfe_lite_cphy_rx", "vfe_lite", "vfe_lite_ahb" },
.clock_rate = { { 400000000 },
{ 400000000 },
{ 400000000, 480000000 },
{ 0 } },
.reg = { "csid3" },
.interrupt = { "csid3" }
}
};
static const struct resources vfe_res_8250[] = {
/* VFE0 */
{
.regulators = {},
.clock = { "camnoc_axi_src", "slow_ahb_src", "cpas_ahb",
"camnoc_axi", "vfe0_ahb", "vfe0_areg", "vfe0",
"vfe0_axi", "cam_hf_axi" },
.clock_rate = { { 19200000, 300000000, 400000000, 480000000 },
{ 19200000, 80000000 },
{ 19200000 },
{ 0 },
{ 0 },
{ 100000000, 200000000, 300000000, 400000000 },
{ 350000000, 475000000, 576000000, 720000000 },
{ 0 },
{ 0 } },
.reg = { "vfe0" },
.interrupt = { "vfe0" }
},
/* VFE1 */
{
.regulators = {},
.clock = { "camnoc_axi_src", "slow_ahb_src", "cpas_ahb",
"camnoc_axi", "vfe1_ahb", "vfe1_areg", "vfe1",
"vfe1_axi", "cam_hf_axi" },
.clock_rate = { { 19200000, 300000000, 400000000, 480000000 },
{ 19200000, 80000000 },
{ 19200000 },
{ 0 },
{ 0 },
{ 100000000, 200000000, 300000000, 400000000 },
{ 350000000, 475000000, 576000000, 720000000 },
{ 0 },
{ 0 } },
.reg = { "vfe1" },
.interrupt = { "vfe1" }
},
/* VFE2 (lite) */
{
.regulators = {},
.clock = { "camnoc_axi_src", "slow_ahb_src", "cpas_ahb",
"camnoc_axi", "vfe_lite_ahb", "vfe_lite_axi",
"vfe_lite", "cam_hf_axi" },
.clock_rate = { { 19200000, 300000000, 400000000, 480000000 },
{ 19200000, 80000000 },
{ 19200000 },
{ 0 },
{ 0 },
{ 0 },
{ 400000000, 480000000 },
{ 0 } },
.reg = { "vfe_lite0" },
.interrupt = { "vfe_lite0" }
},
/* VFE3 (lite) */
{
.regulators = {},
.clock = { "camnoc_axi_src", "slow_ahb_src", "cpas_ahb",
"camnoc_axi", "vfe_lite_ahb", "vfe_lite_axi",
"vfe_lite", "cam_hf_axi" },
.clock_rate = { { 19200000, 300000000, 400000000, 480000000 },
{ 19200000, 80000000 },
{ 19200000 },
{ 0 },
{ 0 },
{ 0 },
{ 400000000, 480000000 },
{ 0 } },
.reg = { "vfe_lite1" },
.interrupt = { "vfe_lite1" }
},
};
static const struct resources_icc icc_res_sm8250[] = {
{
.name = "cam_ahb",
.icc_bw_tbl.avg = 38400,
.icc_bw_tbl.peak = 76800,
},
{
.name = "cam_hf_0_mnoc",
.icc_bw_tbl.avg = 2097152,
.icc_bw_tbl.peak = 2097152,
},
{
.name = "cam_sf_0_mnoc",
.icc_bw_tbl.avg = 0,
.icc_bw_tbl.peak = 2097152,
},
{
.name = "cam_sf_icp_mnoc",
.icc_bw_tbl.avg = 2097152,
.icc_bw_tbl.peak = 2097152,
},
};
/*
* camss_add_clock_margin - Add margin to clock frequency rate
* @rate: Clock frequency rate
*
* When making calculations with physical clock frequency values
* some safety margin must be added. Add it.
*/
inline void camss_add_clock_margin(u64 *rate)
{
*rate *= CAMSS_CLOCK_MARGIN_NUMERATOR;
*rate = div_u64(*rate, CAMSS_CLOCK_MARGIN_DENOMINATOR);
}
/*
* camss_enable_clocks - Enable multiple clocks
* @nclocks: Number of clocks in clock array
* @clock: Clock array
* @dev: Device
*
* Return 0 on success or a negative error code otherwise
*/
int camss_enable_clocks(int nclocks, struct camss_clock *clock,
struct device *dev)
{
int ret;
int i;
for (i = 0; i < nclocks; i++) {
ret = clk_prepare_enable(clock[i].clk);
if (ret) {
dev_err(dev, "clock enable failed: %d\n", ret);
goto error;
}
}
return 0;
error:
for (i--; i >= 0; i--)
clk_disable_unprepare(clock[i].clk);
return ret;
}
/*
* camss_disable_clocks - Disable multiple clocks
* @nclocks: Number of clocks in clock array
* @clock: Clock array
*/
void camss_disable_clocks(int nclocks, struct camss_clock *clock)
{
int i;
for (i = nclocks - 1; i >= 0; i--)
clk_disable_unprepare(clock[i].clk);
}
/*
* camss_find_sensor - Find a linked media entity which represents a sensor
* @entity: Media entity to start searching from
*
* Return a pointer to sensor media entity or NULL if not found
*/
struct media_entity *camss_find_sensor(struct media_entity *entity)
{
struct media_pad *pad;
while (1) {
pad = &entity->pads[0];
if (!(pad->flags & MEDIA_PAD_FL_SINK))
return NULL;
pad = media_pad_remote_pad_first(pad);
if (!pad || !is_media_entity_v4l2_subdev(pad->entity))
return NULL;
entity = pad->entity;
if (entity->function == MEDIA_ENT_F_CAM_SENSOR)
return entity;
}
}
/**
* camss_get_link_freq - Get link frequency from sensor
* @entity: Media entity in the current pipeline
* @bpp: Number of bits per pixel for the current format
* @lanes: Number of lanes in the link to the sensor
*
* Return link frequency on success or a negative error code otherwise
*/
s64 camss_get_link_freq(struct media_entity *entity, unsigned int bpp,
unsigned int lanes)
{
struct media_entity *sensor;
struct v4l2_subdev *subdev;
sensor = camss_find_sensor(entity);
if (!sensor)
return -ENODEV;
subdev = media_entity_to_v4l2_subdev(sensor);
return v4l2_get_link_freq(subdev->ctrl_handler, bpp, 2 * lanes);
}
/*
* camss_get_pixel_clock - Get pixel clock rate from sensor
* @entity: Media entity in the current pipeline
* @pixel_clock: Received pixel clock value
*
* Return 0 on success or a negative error code otherwise
*/
int camss_get_pixel_clock(struct media_entity *entity, u64 *pixel_clock)
{
struct media_entity *sensor;
struct v4l2_subdev *subdev;
struct v4l2_ctrl *ctrl;
sensor = camss_find_sensor(entity);
if (!sensor)
return -ENODEV;
subdev = media_entity_to_v4l2_subdev(sensor);
ctrl = v4l2_ctrl_find(subdev->ctrl_handler, V4L2_CID_PIXEL_RATE);
if (!ctrl)
return -EINVAL;
*pixel_clock = v4l2_ctrl_g_ctrl_int64(ctrl);
return 0;
}
int camss_pm_domain_on(struct camss *camss, int id)
{
int ret = 0;
if (id < camss->vfe_num) {
struct vfe_device *vfe = &camss->vfe[id];
ret = vfe->ops->pm_domain_on(vfe);
}
return ret;
}
void camss_pm_domain_off(struct camss *camss, int id)
{
if (id < camss->vfe_num) {
struct vfe_device *vfe = &camss->vfe[id];
vfe->ops->pm_domain_off(vfe);
}
}
/*
* camss_of_parse_endpoint_node - Parse port endpoint node
* @dev: Device
* @node: Device node to be parsed
* @csd: Parsed data from port endpoint node
*
* Return 0 on success or a negative error code on failure
*/
static int camss_of_parse_endpoint_node(struct device *dev,
struct device_node *node,
struct camss_async_subdev *csd)
{
struct csiphy_lanes_cfg *lncfg = &csd->interface.csi2.lane_cfg;
struct v4l2_mbus_config_mipi_csi2 *mipi_csi2;
struct v4l2_fwnode_endpoint vep = { { 0 } };
unsigned int i;
v4l2_fwnode_endpoint_parse(of_fwnode_handle(node), &vep);
csd->interface.csiphy_id = vep.base.port;
mipi_csi2 = &vep.bus.mipi_csi2;
lncfg->clk.pos = mipi_csi2->clock_lane;
lncfg->clk.pol = mipi_csi2->lane_polarities[0];
lncfg->num_data = mipi_csi2->num_data_lanes;
lncfg->data = devm_kcalloc(dev,
lncfg->num_data, sizeof(*lncfg->data),
GFP_KERNEL);
if (!lncfg->data)
return -ENOMEM;
for (i = 0; i < lncfg->num_data; i++) {
lncfg->data[i].pos = mipi_csi2->data_lanes[i];
lncfg->data[i].pol = mipi_csi2->lane_polarities[i + 1];
}
return 0;
}
/*
* camss_of_parse_ports - Parse ports node
* @dev: Device
* @notifier: v4l2_device notifier data
*
* Return number of "port" nodes found in "ports" node
*/
static int camss_of_parse_ports(struct camss *camss)
{
struct device *dev = camss->dev;
struct device_node *node = NULL;
struct device_node *remote = NULL;
int ret, num_subdevs = 0;
for_each_endpoint_of_node(dev->of_node, node) {
struct camss_async_subdev *csd;
if (!of_device_is_available(node))
continue;
remote = of_graph_get_remote_port_parent(node);
if (!remote) {
dev_err(dev, "Cannot get remote parent\n");
ret = -EINVAL;
goto err_cleanup;
}
csd = v4l2_async_nf_add_fwnode(&camss->notifier,
of_fwnode_handle(remote),
struct camss_async_subdev);
of_node_put(remote);
if (IS_ERR(csd)) {
ret = PTR_ERR(csd);
goto err_cleanup;
}
ret = camss_of_parse_endpoint_node(dev, node, csd);
if (ret < 0)
goto err_cleanup;
num_subdevs++;
}
return num_subdevs;
err_cleanup:
of_node_put(node);
return ret;
}
/*
* camss_init_subdevices - Initialize subdev structures and resources
* @camss: CAMSS device
*
* Return 0 on success or a negative error code on failure
*/
static int camss_init_subdevices(struct camss *camss)
{
const struct resources *csiphy_res;
const struct resources *csid_res;
const struct resources_ispif *ispif_res;
const struct resources *vfe_res;
unsigned int i;
int ret;
if (camss->version == CAMSS_8x16) {
csiphy_res = csiphy_res_8x16;
csid_res = csid_res_8x16;
ispif_res = &ispif_res_8x16;
vfe_res = vfe_res_8x16;
} else if (camss->version == CAMSS_8x96) {
csiphy_res = csiphy_res_8x96;
csid_res = csid_res_8x96;
ispif_res = &ispif_res_8x96;
vfe_res = vfe_res_8x96;
} else if (camss->version == CAMSS_660) {
csiphy_res = csiphy_res_660;
csid_res = csid_res_660;
ispif_res = &ispif_res_660;
vfe_res = vfe_res_660;
} else if (camss->version == CAMSS_845) {
csiphy_res = csiphy_res_845;
csid_res = csid_res_845;
/* Titan VFEs don't have an ISPIF */
ispif_res = NULL;
vfe_res = vfe_res_845;
} else if (camss->version == CAMSS_8250) {
csiphy_res = csiphy_res_8250;
csid_res = csid_res_8250;
/* Titan VFEs don't have an ISPIF */
ispif_res = NULL;
vfe_res = vfe_res_8250;
} else {
return -EINVAL;
}
for (i = 0; i < camss->csiphy_num; i++) {
ret = msm_csiphy_subdev_init(camss, &camss->csiphy[i],
&csiphy_res[i], i);
if (ret < 0) {
dev_err(camss->dev,
"Failed to init csiphy%d sub-device: %d\n",
i, ret);
return ret;
}
}
/* note: SM8250 requires VFE to be initialized before CSID */
for (i = 0; i < camss->vfe_num + camss->vfe_lite_num; i++) {
ret = msm_vfe_subdev_init(camss, &camss->vfe[i],
&vfe_res[i], i);
if (ret < 0) {
dev_err(camss->dev,
"Fail to init vfe%d sub-device: %d\n", i, ret);
return ret;
}
}
for (i = 0; i < camss->csid_num; i++) {
ret = msm_csid_subdev_init(camss, &camss->csid[i],
&csid_res[i], i);
if (ret < 0) {
dev_err(camss->dev,
"Failed to init csid%d sub-device: %d\n",
i, ret);
return ret;
}
}
ret = msm_ispif_subdev_init(camss, ispif_res);
if (ret < 0) {
dev_err(camss->dev, "Failed to init ispif sub-device: %d\n",
ret);
return ret;
}
return 0;
}
/*
* camss_register_entities - Register subdev nodes and create links
* @camss: CAMSS device
*
* Return 0 on success or a negative error code on failure
*/
static int camss_register_entities(struct camss *camss)
{
int i, j, k;
int ret;
for (i = 0; i < camss->csiphy_num; i++) {
ret = msm_csiphy_register_entity(&camss->csiphy[i],
&camss->v4l2_dev);
if (ret < 0) {
dev_err(camss->dev,
"Failed to register csiphy%d entity: %d\n",
i, ret);
goto err_reg_csiphy;
}
}
for (i = 0; i < camss->csid_num; i++) {
ret = msm_csid_register_entity(&camss->csid[i],
&camss->v4l2_dev);
if (ret < 0) {
dev_err(camss->dev,
"Failed to register csid%d entity: %d\n",
i, ret);
goto err_reg_csid;
}
}
ret = msm_ispif_register_entities(camss->ispif,
&camss->v4l2_dev);
if (ret < 0) {
dev_err(camss->dev, "Failed to register ispif entities: %d\n",
ret);
goto err_reg_ispif;
}
for (i = 0; i < camss->vfe_num + camss->vfe_lite_num; i++) {
ret = msm_vfe_register_entities(&camss->vfe[i],
&camss->v4l2_dev);
if (ret < 0) {
dev_err(camss->dev,
"Failed to register vfe%d entities: %d\n",
i, ret);
goto err_reg_vfe;
}
}
for (i = 0; i < camss->csiphy_num; i++) {
for (j = 0; j < camss->csid_num; j++) {
ret = media_create_pad_link(
&camss->csiphy[i].subdev.entity,
MSM_CSIPHY_PAD_SRC,
&camss->csid[j].subdev.entity,
MSM_CSID_PAD_SINK,
0);
if (ret < 0) {
dev_err(camss->dev,
"Failed to link %s->%s entities: %d\n",
camss->csiphy[i].subdev.entity.name,
camss->csid[j].subdev.entity.name,
ret);
goto err_link;
}
}
}
if (camss->ispif) {
for (i = 0; i < camss->csid_num; i++) {
for (j = 0; j < camss->ispif->line_num; j++) {
ret = media_create_pad_link(
&camss->csid[i].subdev.entity,
MSM_CSID_PAD_SRC,
&camss->ispif->line[j].subdev.entity,
MSM_ISPIF_PAD_SINK,
0);
if (ret < 0) {
dev_err(camss->dev,
"Failed to link %s->%s entities: %d\n",
camss->csid[i].subdev.entity.name,
camss->ispif->line[j].subdev.entity.name,
ret);
goto err_link;
}
}
}
for (i = 0; i < camss->ispif->line_num; i++)
for (k = 0; k < camss->vfe_num; k++)
for (j = 0; j < camss->vfe[k].line_num; j++) {
struct v4l2_subdev *ispif = &camss->ispif->line[i].subdev;
struct v4l2_subdev *vfe = &camss->vfe[k].line[j].subdev;
ret = media_create_pad_link(&ispif->entity,
MSM_ISPIF_PAD_SRC,
&vfe->entity,
MSM_VFE_PAD_SINK,
0);
if (ret < 0) {
dev_err(camss->dev,
"Failed to link %s->%s entities: %d\n",
ispif->entity.name,
vfe->entity.name,
ret);
goto err_link;
}
}
} else {
for (i = 0; i < camss->csid_num; i++)
for (k = 0; k < camss->vfe_num + camss->vfe_lite_num; k++)
for (j = 0; j < camss->vfe[k].line_num; j++) {
struct v4l2_subdev *csid = &camss->csid[i].subdev;
struct v4l2_subdev *vfe = &camss->vfe[k].line[j].subdev;
ret = media_create_pad_link(&csid->entity,
MSM_CSID_PAD_FIRST_SRC + j,
&vfe->entity,
MSM_VFE_PAD_SINK,
0);
if (ret < 0) {
dev_err(camss->dev,
"Failed to link %s->%s entities: %d\n",
csid->entity.name,
vfe->entity.name,
ret);
goto err_link;
}
}
}
return 0;
err_link:
i = camss->vfe_num + camss->vfe_lite_num;
err_reg_vfe:
for (i--; i >= 0; i--)
msm_vfe_unregister_entities(&camss->vfe[i]);
err_reg_ispif:
msm_ispif_unregister_entities(camss->ispif);
i = camss->csid_num;
err_reg_csid:
for (i--; i >= 0; i--)
msm_csid_unregister_entity(&camss->csid[i]);
i = camss->csiphy_num;
err_reg_csiphy:
for (i--; i >= 0; i--)
msm_csiphy_unregister_entity(&camss->csiphy[i]);
return ret;
}
/*
* camss_unregister_entities - Unregister subdev nodes
* @camss: CAMSS device
*
* Return 0 on success or a negative error code on failure
*/
static void camss_unregister_entities(struct camss *camss)
{
unsigned int i;
for (i = 0; i < camss->csiphy_num; i++)
msm_csiphy_unregister_entity(&camss->csiphy[i]);
for (i = 0; i < camss->csid_num; i++)
msm_csid_unregister_entity(&camss->csid[i]);
msm_ispif_unregister_entities(camss->ispif);
for (i = 0; i < camss->vfe_num + camss->vfe_lite_num; i++)
msm_vfe_unregister_entities(&camss->vfe[i]);
}
static int camss_subdev_notifier_bound(struct v4l2_async_notifier *async,
struct v4l2_subdev *subdev,
struct v4l2_async_connection *asd)
{
struct camss *camss = container_of(async, struct camss, notifier);
struct camss_async_subdev *csd =
container_of(asd, struct camss_async_subdev, asd);
u8 id = csd->interface.csiphy_id;
struct csiphy_device *csiphy = &camss->csiphy[id];
csiphy->cfg.csi2 = &csd->interface.csi2;
subdev->host_priv = csiphy;
return 0;
}
static int camss_subdev_notifier_complete(struct v4l2_async_notifier *async)
{
struct camss *camss = container_of(async, struct camss, notifier);
struct v4l2_device *v4l2_dev = &camss->v4l2_dev;
struct v4l2_subdev *sd;
int ret;
list_for_each_entry(sd, &v4l2_dev->subdevs, list) {
if (sd->host_priv) {
struct media_entity *sensor = &sd->entity;
struct csiphy_device *csiphy =
(struct csiphy_device *) sd->host_priv;
struct media_entity *input = &csiphy->subdev.entity;
unsigned int i;
for (i = 0; i < sensor->num_pads; i++) {
if (sensor->pads[i].flags & MEDIA_PAD_FL_SOURCE)
break;
}
if (i == sensor->num_pads) {
dev_err(camss->dev,
"No source pad in external entity\n");
return -EINVAL;
}
ret = media_create_pad_link(sensor, i,
input, MSM_CSIPHY_PAD_SINK,
MEDIA_LNK_FL_IMMUTABLE | MEDIA_LNK_FL_ENABLED);
if (ret < 0) {
dev_err(camss->dev,
"Failed to link %s->%s entities: %d\n",
sensor->name, input->name, ret);
return ret;
}
}
}
ret = v4l2_device_register_subdev_nodes(&camss->v4l2_dev);
if (ret < 0)
return ret;
return media_device_register(&camss->media_dev);
}
static const struct v4l2_async_notifier_operations camss_subdev_notifier_ops = {
.bound = camss_subdev_notifier_bound,
.complete = camss_subdev_notifier_complete,
};
static const struct media_device_ops camss_media_ops = {
.link_notify = v4l2_pipeline_link_notify,
};
static int camss_configure_pd(struct camss *camss)
{
struct device *dev = camss->dev;
int i;
int ret;
camss->genpd_num = of_count_phandle_with_args(dev->of_node,
"power-domains",
"#power-domain-cells");
if (camss->genpd_num < 0) {
dev_err(dev, "Power domains are not defined for camss\n");
return camss->genpd_num;
}
/*
* If a platform device has just one power domain, then it is attached
* at platform_probe() level, thus there shall be no need and even no
* option to attach it again, this is the case for CAMSS on MSM8916.
*/
if (camss->genpd_num == 1)
return 0;
camss->genpd = devm_kmalloc_array(dev, camss->genpd_num,
sizeof(*camss->genpd), GFP_KERNEL);
if (!camss->genpd)
return -ENOMEM;
camss->genpd_link = devm_kmalloc_array(dev, camss->genpd_num,
sizeof(*camss->genpd_link),
GFP_KERNEL);
if (!camss->genpd_link)
return -ENOMEM;
/*
* VFE power domains are in the beginning of the list, and while all
* power domains should be attached, only if TITAN_TOP power domain is
* found in the list, it should be linked over here.
*/
for (i = 0; i < camss->genpd_num; i++) {
camss->genpd[i] = dev_pm_domain_attach_by_id(camss->dev, i);
if (IS_ERR(camss->genpd[i])) {
ret = PTR_ERR(camss->genpd[i]);
goto fail_pm;
}
}
if (i > camss->vfe_num) {
camss->genpd_link[i - 1] = device_link_add(camss->dev, camss->genpd[i - 1],
DL_FLAG_STATELESS | DL_FLAG_PM_RUNTIME |
DL_FLAG_RPM_ACTIVE);
if (!camss->genpd_link[i - 1]) {
ret = -EINVAL;
goto fail_pm;
}
}
return 0;
fail_pm:
for (--i ; i >= 0; i--)
dev_pm_domain_detach(camss->genpd[i], true);
return ret;
}
static int camss_icc_get(struct camss *camss)
{
const struct resources_icc *icc_res;
int nbr_icc_paths = 0;
int i;
if (camss->version == CAMSS_8250) {
icc_res = &icc_res_sm8250[0];
nbr_icc_paths = ICC_SM8250_COUNT;
}
for (i = 0; i < nbr_icc_paths; i++) {
camss->icc_path[i] = devm_of_icc_get(camss->dev,
icc_res[i].name);
if (IS_ERR(camss->icc_path[i]))
return PTR_ERR(camss->icc_path[i]);
camss->icc_bw_tbl[i] = icc_res[i].icc_bw_tbl;
}
return 0;
}
/*
* camss_probe - Probe CAMSS platform device
* @pdev: Pointer to CAMSS platform device
*
* Return 0 on success or a negative error code on failure
*/
static int camss_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct camss *camss;
int num_subdevs, ret;
camss = devm_kzalloc(dev, sizeof(*camss), GFP_KERNEL);
if (!camss)
return -ENOMEM;
atomic_set(&camss->ref_count, 0);
camss->dev = dev;
platform_set_drvdata(pdev, camss);
if (of_device_is_compatible(dev->of_node, "qcom,msm8916-camss")) {
camss->version = CAMSS_8x16;
camss->csiphy_num = 2;
camss->csid_num = 2;
camss->vfe_num = 1;
} else if (of_device_is_compatible(dev->of_node,
"qcom,msm8996-camss")) {
camss->version = CAMSS_8x96;
camss->csiphy_num = 3;
camss->csid_num = 4;
camss->vfe_num = 2;
} else if (of_device_is_compatible(dev->of_node,
"qcom,sdm660-camss")) {
camss->version = CAMSS_660;
camss->csiphy_num = 3;
camss->csid_num = 4;
camss->vfe_num = 2;
} else if (of_device_is_compatible(dev->of_node,
"qcom,sdm845-camss")) {
camss->version = CAMSS_845;
camss->csiphy_num = 4;
camss->csid_num = 3;
camss->vfe_num = 2;
camss->vfe_lite_num = 1;
} else if (of_device_is_compatible(dev->of_node,
"qcom,sm8250-camss")) {
camss->version = CAMSS_8250;
camss->csiphy_num = 6;
camss->csid_num = 4;
camss->vfe_num = 2;
camss->vfe_lite_num = 2;
} else {
return -EINVAL;
}
camss->csiphy = devm_kcalloc(dev, camss->csiphy_num,
sizeof(*camss->csiphy), GFP_KERNEL);
if (!camss->csiphy)
return -ENOMEM;
camss->csid = devm_kcalloc(dev, camss->csid_num, sizeof(*camss->csid),
GFP_KERNEL);
if (!camss->csid)
return -ENOMEM;
if (camss->version == CAMSS_8x16 ||
camss->version == CAMSS_8x96) {
camss->ispif = devm_kcalloc(dev, 1, sizeof(*camss->ispif), GFP_KERNEL);
if (!camss->ispif)
return -ENOMEM;
}
camss->vfe = devm_kcalloc(dev, camss->vfe_num + camss->vfe_lite_num,
sizeof(*camss->vfe), GFP_KERNEL);
if (!camss->vfe)
return -ENOMEM;
ret = camss_icc_get(camss);
if (ret < 0)
goto err_cleanup;
ret = camss_init_subdevices(camss);
if (ret < 0)
goto err_cleanup;
ret = dma_set_mask_and_coherent(dev, 0xffffffff);
if (ret)
goto err_cleanup;
camss->media_dev.dev = camss->dev;
strscpy(camss->media_dev.model, "Qualcomm Camera Subsystem",
sizeof(camss->media_dev.model));
camss->media_dev.ops = &camss_media_ops;
media_device_init(&camss->media_dev);
camss->v4l2_dev.mdev = &camss->media_dev;
ret = v4l2_device_register(camss->dev, &camss->v4l2_dev);
if (ret < 0) {
dev_err(dev, "Failed to register V4L2 device: %d\n", ret);
goto err_cleanup;
}
v4l2_async_nf_init(&camss->notifier, &camss->v4l2_dev);
num_subdevs = camss_of_parse_ports(camss);
if (num_subdevs < 0) {
ret = num_subdevs;
goto err_cleanup;
}
ret = camss_register_entities(camss);
if (ret < 0)
goto err_cleanup;
if (num_subdevs) {
camss->notifier.ops = &camss_subdev_notifier_ops;
ret = v4l2_async_nf_register(&camss->notifier);
if (ret) {
dev_err(dev,
"Failed to register async subdev nodes: %d\n",
ret);
goto err_register_subdevs;
}
} else {
ret = v4l2_device_register_subdev_nodes(&camss->v4l2_dev);
if (ret < 0) {
dev_err(dev, "Failed to register subdev nodes: %d\n",
ret);
goto err_register_subdevs;
}
ret = media_device_register(&camss->media_dev);
if (ret < 0) {
dev_err(dev, "Failed to register media device: %d\n",
ret);
goto err_register_subdevs;
}
}
ret = camss_configure_pd(camss);
if (ret < 0) {
dev_err(dev, "Failed to configure power domains: %d\n", ret);
return ret;
}
pm_runtime_enable(dev);
return 0;
err_register_subdevs:
camss_unregister_entities(camss);
err_cleanup:
v4l2_device_unregister(&camss->v4l2_dev);
v4l2_async_nf_cleanup(&camss->notifier);
return ret;
}
void camss_delete(struct camss *camss)
{
int i;
v4l2_device_unregister(&camss->v4l2_dev);
media_device_unregister(&camss->media_dev);
media_device_cleanup(&camss->media_dev);
pm_runtime_disable(camss->dev);
if (camss->genpd_num == 1)
return;
if (camss->genpd_num > camss->vfe_num)
device_link_del(camss->genpd_link[camss->genpd_num - 1]);
for (i = 0; i < camss->genpd_num; i++)
dev_pm_domain_detach(camss->genpd[i], true);
}
/*
* camss_remove - Remove CAMSS platform device
* @pdev: Pointer to CAMSS platform device
*
* Always returns 0.
*/
static void camss_remove(struct platform_device *pdev)
{
struct camss *camss = platform_get_drvdata(pdev);
v4l2_async_nf_unregister(&camss->notifier);
v4l2_async_nf_cleanup(&camss->notifier);
camss_unregister_entities(camss);
if (atomic_read(&camss->ref_count) == 0)
camss_delete(camss);
}
static const struct of_device_id camss_dt_match[] = {
{ .compatible = "qcom,msm8916-camss" },
{ .compatible = "qcom,msm8996-camss" },
{ .compatible = "qcom,sdm660-camss" },
{ .compatible = "qcom,sdm845-camss" },
{ .compatible = "qcom,sm8250-camss" },
{ }
};
MODULE_DEVICE_TABLE(of, camss_dt_match);
static int __maybe_unused camss_runtime_suspend(struct device *dev)
{
struct camss *camss = dev_get_drvdata(dev);
int nbr_icc_paths = 0;
int i;
int ret;
if (camss->version == CAMSS_8250)
nbr_icc_paths = ICC_SM8250_COUNT;
for (i = 0; i < nbr_icc_paths; i++) {
ret = icc_set_bw(camss->icc_path[i], 0, 0);
if (ret)
return ret;
}
return 0;
}
static int __maybe_unused camss_runtime_resume(struct device *dev)
{
struct camss *camss = dev_get_drvdata(dev);
int nbr_icc_paths = 0;
int i;
int ret;
if (camss->version == CAMSS_8250)
nbr_icc_paths = ICC_SM8250_COUNT;
for (i = 0; i < nbr_icc_paths; i++) {
ret = icc_set_bw(camss->icc_path[i],
camss->icc_bw_tbl[i].avg,
camss->icc_bw_tbl[i].peak);
if (ret)
return ret;
}
return 0;
}
static const struct dev_pm_ops camss_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
pm_runtime_force_resume)
SET_RUNTIME_PM_OPS(camss_runtime_suspend, camss_runtime_resume, NULL)
};
static struct platform_driver qcom_camss_driver = {
.probe = camss_probe,
.remove_new = camss_remove,
.driver = {
.name = "qcom-camss",
.of_match_table = camss_dt_match,
.pm = &camss_pm_ops,
},
};
module_platform_driver(qcom_camss_driver);
MODULE_ALIAS("platform:qcom-camss");
MODULE_DESCRIPTION("Qualcomm Camera Subsystem driver");
MODULE_AUTHOR("Todor Tomov <[email protected]>");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/media/platform/qcom/camss/camss.c |
// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
/*
* Rockchip ISP1 Driver - Stats subdevice
*
* Copyright (C) 2017 Rockchip Electronics Co., Ltd.
*/
#include <media/v4l2-common.h>
#include <media/v4l2-event.h>
#include <media/v4l2-ioctl.h>
#include <media/videobuf2-core.h>
#include <media/videobuf2-vmalloc.h> /* for ISP statistics */
#include "rkisp1-common.h"
#define RKISP1_STATS_DEV_NAME RKISP1_DRIVER_NAME "_stats"
#define RKISP1_ISP_STATS_REQ_BUFS_MIN 2
#define RKISP1_ISP_STATS_REQ_BUFS_MAX 8
static int rkisp1_stats_enum_fmt_meta_cap(struct file *file, void *priv,
struct v4l2_fmtdesc *f)
{
struct video_device *video = video_devdata(file);
struct rkisp1_stats *stats = video_get_drvdata(video);
if (f->index > 0 || f->type != video->queue->type)
return -EINVAL;
f->pixelformat = stats->vdev_fmt.fmt.meta.dataformat;
return 0;
}
static int rkisp1_stats_g_fmt_meta_cap(struct file *file, void *priv,
struct v4l2_format *f)
{
struct video_device *video = video_devdata(file);
struct rkisp1_stats *stats = video_get_drvdata(video);
struct v4l2_meta_format *meta = &f->fmt.meta;
if (f->type != video->queue->type)
return -EINVAL;
memset(meta, 0, sizeof(*meta));
meta->dataformat = stats->vdev_fmt.fmt.meta.dataformat;
meta->buffersize = stats->vdev_fmt.fmt.meta.buffersize;
return 0;
}
static int rkisp1_stats_querycap(struct file *file,
void *priv, struct v4l2_capability *cap)
{
struct video_device *vdev = video_devdata(file);
strscpy(cap->driver, RKISP1_DRIVER_NAME, sizeof(cap->driver));
strscpy(cap->card, vdev->name, sizeof(cap->card));
strscpy(cap->bus_info, RKISP1_BUS_INFO, sizeof(cap->bus_info));
return 0;
}
/* ISP video device IOCTLs */
static const struct v4l2_ioctl_ops rkisp1_stats_ioctl = {
.vidioc_reqbufs = vb2_ioctl_reqbufs,
.vidioc_querybuf = vb2_ioctl_querybuf,
.vidioc_create_bufs = vb2_ioctl_create_bufs,
.vidioc_qbuf = vb2_ioctl_qbuf,
.vidioc_dqbuf = vb2_ioctl_dqbuf,
.vidioc_prepare_buf = vb2_ioctl_prepare_buf,
.vidioc_expbuf = vb2_ioctl_expbuf,
.vidioc_streamon = vb2_ioctl_streamon,
.vidioc_streamoff = vb2_ioctl_streamoff,
.vidioc_enum_fmt_meta_cap = rkisp1_stats_enum_fmt_meta_cap,
.vidioc_g_fmt_meta_cap = rkisp1_stats_g_fmt_meta_cap,
.vidioc_s_fmt_meta_cap = rkisp1_stats_g_fmt_meta_cap,
.vidioc_try_fmt_meta_cap = rkisp1_stats_g_fmt_meta_cap,
.vidioc_querycap = rkisp1_stats_querycap,
.vidioc_subscribe_event = v4l2_ctrl_subscribe_event,
.vidioc_unsubscribe_event = v4l2_event_unsubscribe,
};
static const struct v4l2_file_operations rkisp1_stats_fops = {
.mmap = vb2_fop_mmap,
.unlocked_ioctl = video_ioctl2,
.poll = vb2_fop_poll,
.open = v4l2_fh_open,
.release = vb2_fop_release
};
static int rkisp1_stats_vb2_queue_setup(struct vb2_queue *vq,
unsigned int *num_buffers,
unsigned int *num_planes,
unsigned int sizes[],
struct device *alloc_devs[])
{
*num_planes = 1;
*num_buffers = clamp_t(u32, *num_buffers, RKISP1_ISP_STATS_REQ_BUFS_MIN,
RKISP1_ISP_STATS_REQ_BUFS_MAX);
sizes[0] = sizeof(struct rkisp1_stat_buffer);
return 0;
}
static void rkisp1_stats_vb2_buf_queue(struct vb2_buffer *vb)
{
struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
struct rkisp1_buffer *stats_buf =
container_of(vbuf, struct rkisp1_buffer, vb);
struct vb2_queue *vq = vb->vb2_queue;
struct rkisp1_stats *stats_dev = vq->drv_priv;
spin_lock_irq(&stats_dev->lock);
list_add_tail(&stats_buf->queue, &stats_dev->stat);
spin_unlock_irq(&stats_dev->lock);
}
static int rkisp1_stats_vb2_buf_prepare(struct vb2_buffer *vb)
{
if (vb2_plane_size(vb, 0) < sizeof(struct rkisp1_stat_buffer))
return -EINVAL;
vb2_set_plane_payload(vb, 0, sizeof(struct rkisp1_stat_buffer));
return 0;
}
static void rkisp1_stats_vb2_stop_streaming(struct vb2_queue *vq)
{
struct rkisp1_stats *stats = vq->drv_priv;
struct rkisp1_buffer *buf;
unsigned int i;
spin_lock_irq(&stats->lock);
for (i = 0; i < RKISP1_ISP_STATS_REQ_BUFS_MAX; i++) {
if (list_empty(&stats->stat))
break;
buf = list_first_entry(&stats->stat,
struct rkisp1_buffer, queue);
list_del(&buf->queue);
vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
}
spin_unlock_irq(&stats->lock);
}
static const struct vb2_ops rkisp1_stats_vb2_ops = {
.queue_setup = rkisp1_stats_vb2_queue_setup,
.buf_queue = rkisp1_stats_vb2_buf_queue,
.buf_prepare = rkisp1_stats_vb2_buf_prepare,
.wait_prepare = vb2_ops_wait_prepare,
.wait_finish = vb2_ops_wait_finish,
.stop_streaming = rkisp1_stats_vb2_stop_streaming,
};
static int
rkisp1_stats_init_vb2_queue(struct vb2_queue *q, struct rkisp1_stats *stats)
{
struct rkisp1_vdev_node *node;
node = container_of(q, struct rkisp1_vdev_node, buf_queue);
q->type = V4L2_BUF_TYPE_META_CAPTURE;
q->io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF;
q->drv_priv = stats;
q->ops = &rkisp1_stats_vb2_ops;
q->mem_ops = &vb2_vmalloc_memops;
q->buf_struct_size = sizeof(struct rkisp1_buffer);
q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
q->lock = &node->vlock;
return vb2_queue_init(q);
}
static void rkisp1_stats_get_awb_meas_v10(struct rkisp1_stats *stats,
struct rkisp1_stat_buffer *pbuf)
{
/* Protect against concurrent access from ISR? */
struct rkisp1_device *rkisp1 = stats->rkisp1;
u32 reg_val;
pbuf->meas_type |= RKISP1_CIF_ISP_STAT_AWB;
reg_val = rkisp1_read(rkisp1, RKISP1_CIF_ISP_AWB_WHITE_CNT_V10);
pbuf->params.awb.awb_mean[0].cnt =
RKISP1_CIF_ISP_AWB_GET_PIXEL_CNT(reg_val);
reg_val = rkisp1_read(rkisp1, RKISP1_CIF_ISP_AWB_MEAN_V10);
pbuf->params.awb.awb_mean[0].mean_cr_or_r =
RKISP1_CIF_ISP_AWB_GET_MEAN_CR_R(reg_val);
pbuf->params.awb.awb_mean[0].mean_cb_or_b =
RKISP1_CIF_ISP_AWB_GET_MEAN_CB_B(reg_val);
pbuf->params.awb.awb_mean[0].mean_y_or_g =
RKISP1_CIF_ISP_AWB_GET_MEAN_Y_G(reg_val);
}
static void rkisp1_stats_get_awb_meas_v12(struct rkisp1_stats *stats,
struct rkisp1_stat_buffer *pbuf)
{
/* Protect against concurrent access from ISR? */
struct rkisp1_device *rkisp1 = stats->rkisp1;
u32 reg_val;
pbuf->meas_type |= RKISP1_CIF_ISP_STAT_AWB;
reg_val = rkisp1_read(rkisp1, RKISP1_CIF_ISP_AWB_WHITE_CNT_V12);
pbuf->params.awb.awb_mean[0].cnt =
RKISP1_CIF_ISP_AWB_GET_PIXEL_CNT(reg_val);
reg_val = rkisp1_read(rkisp1, RKISP1_CIF_ISP_AWB_MEAN_V12);
pbuf->params.awb.awb_mean[0].mean_cr_or_r =
RKISP1_CIF_ISP_AWB_GET_MEAN_CR_R(reg_val);
pbuf->params.awb.awb_mean[0].mean_cb_or_b =
RKISP1_CIF_ISP_AWB_GET_MEAN_CB_B(reg_val);
pbuf->params.awb.awb_mean[0].mean_y_or_g =
RKISP1_CIF_ISP_AWB_GET_MEAN_Y_G(reg_val);
}
static void rkisp1_stats_get_aec_meas_v10(struct rkisp1_stats *stats,
struct rkisp1_stat_buffer *pbuf)
{
struct rkisp1_device *rkisp1 = stats->rkisp1;
unsigned int i;
pbuf->meas_type |= RKISP1_CIF_ISP_STAT_AUTOEXP;
for (i = 0; i < RKISP1_CIF_ISP_AE_MEAN_MAX_V10; i++)
pbuf->params.ae.exp_mean[i] =
(u8)rkisp1_read(rkisp1,
RKISP1_CIF_ISP_EXP_MEAN_00_V10 + i * 4);
}
static void rkisp1_stats_get_aec_meas_v12(struct rkisp1_stats *stats,
struct rkisp1_stat_buffer *pbuf)
{
struct rkisp1_device *rkisp1 = stats->rkisp1;
u32 value;
int i;
pbuf->meas_type |= RKISP1_CIF_ISP_STAT_AUTOEXP;
for (i = 0; i < RKISP1_CIF_ISP_AE_MEAN_MAX_V12 / 4; i++) {
value = rkisp1_read(rkisp1, RKISP1_CIF_ISP_EXP_MEAN_V12 + i * 4);
pbuf->params.ae.exp_mean[4 * i + 0] =
RKISP1_CIF_ISP_EXP_GET_MEAN_xy0_V12(value);
pbuf->params.ae.exp_mean[4 * i + 1] =
RKISP1_CIF_ISP_EXP_GET_MEAN_xy1_V12(value);
pbuf->params.ae.exp_mean[4 * i + 2] =
RKISP1_CIF_ISP_EXP_GET_MEAN_xy2_V12(value);
pbuf->params.ae.exp_mean[4 * i + 3] =
RKISP1_CIF_ISP_EXP_GET_MEAN_xy3_V12(value);
}
value = rkisp1_read(rkisp1, RKISP1_CIF_ISP_EXP_MEAN_V12 + i * 4);
pbuf->params.ae.exp_mean[4 * i + 0] = RKISP1_CIF_ISP_EXP_GET_MEAN_xy0_V12(value);
}
static void rkisp1_stats_get_afc_meas(struct rkisp1_stats *stats,
struct rkisp1_stat_buffer *pbuf)
{
struct rkisp1_device *rkisp1 = stats->rkisp1;
struct rkisp1_cif_isp_af_stat *af;
pbuf->meas_type |= RKISP1_CIF_ISP_STAT_AFM;
af = &pbuf->params.af;
af->window[0].sum = rkisp1_read(rkisp1, RKISP1_CIF_ISP_AFM_SUM_A);
af->window[0].lum = rkisp1_read(rkisp1, RKISP1_CIF_ISP_AFM_LUM_A);
af->window[1].sum = rkisp1_read(rkisp1, RKISP1_CIF_ISP_AFM_SUM_B);
af->window[1].lum = rkisp1_read(rkisp1, RKISP1_CIF_ISP_AFM_LUM_B);
af->window[2].sum = rkisp1_read(rkisp1, RKISP1_CIF_ISP_AFM_SUM_C);
af->window[2].lum = rkisp1_read(rkisp1, RKISP1_CIF_ISP_AFM_LUM_C);
}
static void rkisp1_stats_get_hst_meas_v10(struct rkisp1_stats *stats,
struct rkisp1_stat_buffer *pbuf)
{
struct rkisp1_device *rkisp1 = stats->rkisp1;
unsigned int i;
pbuf->meas_type |= RKISP1_CIF_ISP_STAT_HIST;
for (i = 0; i < RKISP1_CIF_ISP_HIST_BIN_N_MAX_V10; i++) {
u32 reg_val = rkisp1_read(rkisp1, RKISP1_CIF_ISP_HIST_BIN_0_V10 + i * 4);
pbuf->params.hist.hist_bins[i] = RKISP1_CIF_ISP_HIST_GET_BIN_V10(reg_val);
}
}
static void rkisp1_stats_get_hst_meas_v12(struct rkisp1_stats *stats,
struct rkisp1_stat_buffer *pbuf)
{
struct rkisp1_device *rkisp1 = stats->rkisp1;
u32 value;
int i;
pbuf->meas_type |= RKISP1_CIF_ISP_STAT_HIST;
for (i = 0; i < RKISP1_CIF_ISP_HIST_BIN_N_MAX_V12 / 2; i++) {
value = rkisp1_read(rkisp1, RKISP1_CIF_ISP_HIST_BIN_V12 + i * 4);
pbuf->params.hist.hist_bins[2 * i] =
RKISP1_CIF_ISP_HIST_GET_BIN0_V12(value);
pbuf->params.hist.hist_bins[2 * i + 1] =
RKISP1_CIF_ISP_HIST_GET_BIN1_V12(value);
}
}
static void rkisp1_stats_get_bls_meas(struct rkisp1_stats *stats,
struct rkisp1_stat_buffer *pbuf)
{
struct rkisp1_device *rkisp1 = stats->rkisp1;
const struct rkisp1_mbus_info *in_fmt = rkisp1->isp.sink_fmt;
struct rkisp1_cif_isp_bls_meas_val *bls_val;
bls_val = &pbuf->params.ae.bls_val;
if (in_fmt->bayer_pat == RKISP1_RAW_BGGR) {
bls_val->meas_b =
rkisp1_read(rkisp1, RKISP1_CIF_ISP_BLS_A_MEASURED);
bls_val->meas_gb =
rkisp1_read(rkisp1, RKISP1_CIF_ISP_BLS_B_MEASURED);
bls_val->meas_gr =
rkisp1_read(rkisp1, RKISP1_CIF_ISP_BLS_C_MEASURED);
bls_val->meas_r =
rkisp1_read(rkisp1, RKISP1_CIF_ISP_BLS_D_MEASURED);
} else if (in_fmt->bayer_pat == RKISP1_RAW_GBRG) {
bls_val->meas_gb =
rkisp1_read(rkisp1, RKISP1_CIF_ISP_BLS_A_MEASURED);
bls_val->meas_b =
rkisp1_read(rkisp1, RKISP1_CIF_ISP_BLS_B_MEASURED);
bls_val->meas_r =
rkisp1_read(rkisp1, RKISP1_CIF_ISP_BLS_C_MEASURED);
bls_val->meas_gr =
rkisp1_read(rkisp1, RKISP1_CIF_ISP_BLS_D_MEASURED);
} else if (in_fmt->bayer_pat == RKISP1_RAW_GRBG) {
bls_val->meas_gr =
rkisp1_read(rkisp1, RKISP1_CIF_ISP_BLS_A_MEASURED);
bls_val->meas_r =
rkisp1_read(rkisp1, RKISP1_CIF_ISP_BLS_B_MEASURED);
bls_val->meas_b =
rkisp1_read(rkisp1, RKISP1_CIF_ISP_BLS_C_MEASURED);
bls_val->meas_gb =
rkisp1_read(rkisp1, RKISP1_CIF_ISP_BLS_D_MEASURED);
} else if (in_fmt->bayer_pat == RKISP1_RAW_RGGB) {
bls_val->meas_r =
rkisp1_read(rkisp1, RKISP1_CIF_ISP_BLS_A_MEASURED);
bls_val->meas_gr =
rkisp1_read(rkisp1, RKISP1_CIF_ISP_BLS_B_MEASURED);
bls_val->meas_gb =
rkisp1_read(rkisp1, RKISP1_CIF_ISP_BLS_C_MEASURED);
bls_val->meas_b =
rkisp1_read(rkisp1, RKISP1_CIF_ISP_BLS_D_MEASURED);
}
}
static const struct rkisp1_stats_ops rkisp1_v10_stats_ops = {
.get_awb_meas = rkisp1_stats_get_awb_meas_v10,
.get_aec_meas = rkisp1_stats_get_aec_meas_v10,
.get_hst_meas = rkisp1_stats_get_hst_meas_v10,
};
static struct rkisp1_stats_ops rkisp1_v12_stats_ops = {
.get_awb_meas = rkisp1_stats_get_awb_meas_v12,
.get_aec_meas = rkisp1_stats_get_aec_meas_v12,
.get_hst_meas = rkisp1_stats_get_hst_meas_v12,
};
static void
rkisp1_stats_send_measurement(struct rkisp1_stats *stats, u32 isp_ris)
{
struct rkisp1_stat_buffer *cur_stat_buf;
struct rkisp1_buffer *cur_buf = NULL;
unsigned int frame_sequence = stats->rkisp1->isp.frame_sequence;
u64 timestamp = ktime_get_ns();
/* get one empty buffer */
if (!list_empty(&stats->stat)) {
cur_buf = list_first_entry(&stats->stat,
struct rkisp1_buffer, queue);
list_del(&cur_buf->queue);
}
if (!cur_buf)
return;
cur_stat_buf = (struct rkisp1_stat_buffer *)
vb2_plane_vaddr(&cur_buf->vb.vb2_buf, 0);
if (isp_ris & RKISP1_CIF_ISP_AWB_DONE)
stats->ops->get_awb_meas(stats, cur_stat_buf);
if (isp_ris & RKISP1_CIF_ISP_AFM_FIN)
rkisp1_stats_get_afc_meas(stats, cur_stat_buf);
if (isp_ris & RKISP1_CIF_ISP_EXP_END) {
stats->ops->get_aec_meas(stats, cur_stat_buf);
rkisp1_stats_get_bls_meas(stats, cur_stat_buf);
}
if (isp_ris & RKISP1_CIF_ISP_HIST_MEASURE_RDY)
stats->ops->get_hst_meas(stats, cur_stat_buf);
vb2_set_plane_payload(&cur_buf->vb.vb2_buf, 0,
sizeof(struct rkisp1_stat_buffer));
cur_buf->vb.sequence = frame_sequence;
cur_buf->vb.vb2_buf.timestamp = timestamp;
vb2_buffer_done(&cur_buf->vb.vb2_buf, VB2_BUF_STATE_DONE);
}
void rkisp1_stats_isr(struct rkisp1_stats *stats, u32 isp_ris)
{
struct rkisp1_device *rkisp1 = stats->rkisp1;
unsigned int isp_mis_tmp = 0;
spin_lock(&stats->lock);
rkisp1_write(rkisp1, RKISP1_CIF_ISP_ICR, RKISP1_STATS_MEAS_MASK);
isp_mis_tmp = rkisp1_read(rkisp1, RKISP1_CIF_ISP_MIS);
if (isp_mis_tmp & RKISP1_STATS_MEAS_MASK)
rkisp1->debug.stats_error++;
if (isp_ris & RKISP1_STATS_MEAS_MASK)
rkisp1_stats_send_measurement(stats, isp_ris);
spin_unlock(&stats->lock);
}
static void rkisp1_init_stats(struct rkisp1_stats *stats)
{
stats->vdev_fmt.fmt.meta.dataformat =
V4L2_META_FMT_RK_ISP1_STAT_3A;
stats->vdev_fmt.fmt.meta.buffersize =
sizeof(struct rkisp1_stat_buffer);
if (stats->rkisp1->info->isp_ver == RKISP1_V12)
stats->ops = &rkisp1_v12_stats_ops;
else
stats->ops = &rkisp1_v10_stats_ops;
}
int rkisp1_stats_register(struct rkisp1_device *rkisp1)
{
struct rkisp1_stats *stats = &rkisp1->stats;
struct rkisp1_vdev_node *node = &stats->vnode;
struct video_device *vdev = &node->vdev;
int ret;
stats->rkisp1 = rkisp1;
mutex_init(&node->vlock);
INIT_LIST_HEAD(&stats->stat);
spin_lock_init(&stats->lock);
strscpy(vdev->name, RKISP1_STATS_DEV_NAME, sizeof(vdev->name));
video_set_drvdata(vdev, stats);
vdev->ioctl_ops = &rkisp1_stats_ioctl;
vdev->fops = &rkisp1_stats_fops;
vdev->release = video_device_release_empty;
vdev->lock = &node->vlock;
vdev->v4l2_dev = &rkisp1->v4l2_dev;
vdev->queue = &node->buf_queue;
vdev->device_caps = V4L2_CAP_META_CAPTURE | V4L2_CAP_STREAMING;
vdev->vfl_dir = VFL_DIR_RX;
rkisp1_stats_init_vb2_queue(vdev->queue, stats);
rkisp1_init_stats(stats);
video_set_drvdata(vdev, stats);
node->pad.flags = MEDIA_PAD_FL_SINK;
ret = media_entity_pads_init(&vdev->entity, 1, &node->pad);
if (ret)
goto error;
ret = video_register_device(vdev, VFL_TYPE_VIDEO, -1);
if (ret) {
dev_err(&vdev->dev,
"failed to register %s, ret=%d\n", vdev->name, ret);
goto error;
}
return 0;
error:
media_entity_cleanup(&vdev->entity);
mutex_destroy(&node->vlock);
stats->rkisp1 = NULL;
return ret;
}
void rkisp1_stats_unregister(struct rkisp1_device *rkisp1)
{
struct rkisp1_stats *stats = &rkisp1->stats;
struct rkisp1_vdev_node *node = &stats->vnode;
struct video_device *vdev = &node->vdev;
if (!stats->rkisp1)
return;
vb2_video_unregister_device(vdev);
media_entity_cleanup(&vdev->entity);
mutex_destroy(&node->vlock);
}
| linux-master | drivers/media/platform/rockchip/rkisp1/rkisp1-stats.c |
// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
/*
* Rockchip ISP1 Driver - CSI-2 Receiver
*
* Copyright (C) 2019 Collabora, Ltd.
* Copyright (C) 2022 Ideas on Board
*
* Based on Rockchip ISP1 driver by Rockchip Electronics Co., Ltd.
* Copyright (C) 2017 Rockchip Electronics Co., Ltd.
*/
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/lockdep.h>
#include <linux/phy/phy.h>
#include <linux/phy/phy-mipi-dphy.h>
#include <media/v4l2-ctrls.h>
#include <media/v4l2-fwnode.h>
#include "rkisp1-common.h"
#include "rkisp1-csi.h"
#define RKISP1_CSI_DEV_NAME RKISP1_DRIVER_NAME "_csi"
#define RKISP1_CSI_DEF_FMT MEDIA_BUS_FMT_SRGGB10_1X10
static inline struct rkisp1_csi *to_rkisp1_csi(struct v4l2_subdev *sd)
{
return container_of(sd, struct rkisp1_csi, sd);
}
static struct v4l2_mbus_framefmt *
rkisp1_csi_get_pad_fmt(struct rkisp1_csi *csi,
struct v4l2_subdev_state *sd_state,
unsigned int pad, u32 which)
{
struct v4l2_subdev_state state = {
.pads = csi->pad_cfg
};
lockdep_assert_held(&csi->lock);
if (which == V4L2_SUBDEV_FORMAT_TRY)
return v4l2_subdev_get_try_format(&csi->sd, sd_state, pad);
else
return v4l2_subdev_get_try_format(&csi->sd, &state, pad);
}
int rkisp1_csi_link_sensor(struct rkisp1_device *rkisp1, struct v4l2_subdev *sd,
struct rkisp1_sensor_async *s_asd,
unsigned int source_pad)
{
struct rkisp1_csi *csi = &rkisp1->csi;
int ret;
s_asd->pixel_rate_ctrl = v4l2_ctrl_find(sd->ctrl_handler,
V4L2_CID_PIXEL_RATE);
if (!s_asd->pixel_rate_ctrl) {
dev_err(rkisp1->dev, "No pixel rate control in subdev %s\n",
sd->name);
return -EINVAL;
}
/* Create the link from the sensor to the CSI receiver. */
ret = media_create_pad_link(&sd->entity, source_pad,
&csi->sd.entity, RKISP1_CSI_PAD_SINK,
!s_asd->index ? MEDIA_LNK_FL_ENABLED : 0);
if (ret) {
dev_err(csi->rkisp1->dev, "failed to link src pad of %s\n",
sd->name);
return ret;
}
return 0;
}
static int rkisp1_csi_config(struct rkisp1_csi *csi,
const struct rkisp1_sensor_async *sensor)
{
struct rkisp1_device *rkisp1 = csi->rkisp1;
unsigned int lanes = sensor->lanes;
u32 mipi_ctrl;
if (lanes < 1 || lanes > 4)
return -EINVAL;
mipi_ctrl = RKISP1_CIF_MIPI_CTRL_NUM_LANES(lanes - 1) |
RKISP1_CIF_MIPI_CTRL_SHUTDOWNLANES(0xf) |
RKISP1_CIF_MIPI_CTRL_ERR_SOT_SYNC_HS_SKIP |
RKISP1_CIF_MIPI_CTRL_CLOCKLANE_ENA;
rkisp1_write(rkisp1, RKISP1_CIF_MIPI_CTRL, mipi_ctrl);
/* V12 could also use a newer csi2-host, but we don't want that yet */
if (rkisp1->info->isp_ver == RKISP1_V12)
rkisp1_write(rkisp1, RKISP1_CIF_ISP_CSI0_CTRL0, 0);
/* Configure Data Type and Virtual Channel */
rkisp1_write(rkisp1, RKISP1_CIF_MIPI_IMG_DATA_SEL,
RKISP1_CIF_MIPI_DATA_SEL_DT(csi->sink_fmt->mipi_dt) |
RKISP1_CIF_MIPI_DATA_SEL_VC(0));
/* Clear MIPI interrupts */
rkisp1_write(rkisp1, RKISP1_CIF_MIPI_ICR, ~0);
/*
* Disable RKISP1_CIF_MIPI_ERR_DPHY interrupt here temporary for
* isp bus may be dead when switch isp.
*/
rkisp1_write(rkisp1, RKISP1_CIF_MIPI_IMSC,
RKISP1_CIF_MIPI_FRAME_END | RKISP1_CIF_MIPI_ERR_CSI |
RKISP1_CIF_MIPI_ERR_DPHY |
RKISP1_CIF_MIPI_SYNC_FIFO_OVFLW(0x03) |
RKISP1_CIF_MIPI_ADD_DATA_OVFLW);
dev_dbg(rkisp1->dev, "\n MIPI_CTRL 0x%08x\n"
" MIPI_IMG_DATA_SEL 0x%08x\n"
" MIPI_STATUS 0x%08x\n"
" MIPI_IMSC 0x%08x\n",
rkisp1_read(rkisp1, RKISP1_CIF_MIPI_CTRL),
rkisp1_read(rkisp1, RKISP1_CIF_MIPI_IMG_DATA_SEL),
rkisp1_read(rkisp1, RKISP1_CIF_MIPI_STATUS),
rkisp1_read(rkisp1, RKISP1_CIF_MIPI_IMSC));
return 0;
}
static void rkisp1_csi_enable(struct rkisp1_csi *csi)
{
struct rkisp1_device *rkisp1 = csi->rkisp1;
u32 val;
val = rkisp1_read(rkisp1, RKISP1_CIF_MIPI_CTRL);
rkisp1_write(rkisp1, RKISP1_CIF_MIPI_CTRL,
val | RKISP1_CIF_MIPI_CTRL_OUTPUT_ENA);
}
static void rkisp1_csi_disable(struct rkisp1_csi *csi)
{
struct rkisp1_device *rkisp1 = csi->rkisp1;
u32 val;
/* Mask and clear interrupts. */
rkisp1_write(rkisp1, RKISP1_CIF_MIPI_IMSC, 0);
rkisp1_write(rkisp1, RKISP1_CIF_MIPI_ICR, ~0);
val = rkisp1_read(rkisp1, RKISP1_CIF_MIPI_CTRL);
rkisp1_write(rkisp1, RKISP1_CIF_MIPI_CTRL,
val & (~RKISP1_CIF_MIPI_CTRL_OUTPUT_ENA));
}
static int rkisp1_csi_start(struct rkisp1_csi *csi,
const struct rkisp1_sensor_async *sensor)
{
struct rkisp1_device *rkisp1 = csi->rkisp1;
union phy_configure_opts opts;
struct phy_configure_opts_mipi_dphy *cfg = &opts.mipi_dphy;
s64 pixel_clock;
int ret;
ret = rkisp1_csi_config(csi, sensor);
if (ret)
return ret;
pixel_clock = v4l2_ctrl_g_ctrl_int64(sensor->pixel_rate_ctrl);
if (!pixel_clock) {
dev_err(rkisp1->dev, "Invalid pixel rate value\n");
return -EINVAL;
}
phy_mipi_dphy_get_default_config(pixel_clock, csi->sink_fmt->bus_width,
sensor->lanes, cfg);
phy_set_mode(csi->dphy, PHY_MODE_MIPI_DPHY);
phy_configure(csi->dphy, &opts);
phy_power_on(csi->dphy);
rkisp1_csi_enable(csi);
/*
* CIF spec says to wait for sufficient time after enabling
* the MIPI interface and before starting the sensor output.
*/
usleep_range(1000, 1200);
return 0;
}
static void rkisp1_csi_stop(struct rkisp1_csi *csi)
{
rkisp1_csi_disable(csi);
phy_power_off(csi->dphy);
}
irqreturn_t rkisp1_csi_isr(int irq, void *ctx)
{
struct device *dev = ctx;
struct rkisp1_device *rkisp1 = dev_get_drvdata(dev);
u32 val, status;
status = rkisp1_read(rkisp1, RKISP1_CIF_MIPI_MIS);
if (!status)
return IRQ_NONE;
rkisp1_write(rkisp1, RKISP1_CIF_MIPI_ICR, status);
/*
* Disable DPHY errctrl interrupt, because this dphy
* erctrl signal is asserted until the next changes
* of line state. This time is may be too long and cpu
* is hold in this interrupt.
*/
if (status & RKISP1_CIF_MIPI_ERR_CTRL(0x0f)) {
val = rkisp1_read(rkisp1, RKISP1_CIF_MIPI_IMSC);
rkisp1_write(rkisp1, RKISP1_CIF_MIPI_IMSC,
val & ~RKISP1_CIF_MIPI_ERR_CTRL(0x0f));
rkisp1->csi.is_dphy_errctrl_disabled = true;
}
/*
* Enable DPHY errctrl interrupt again, if mipi have receive
* the whole frame without any error.
*/
if (status == RKISP1_CIF_MIPI_FRAME_END) {
/*
* Enable DPHY errctrl interrupt again, if mipi have receive
* the whole frame without any error.
*/
if (rkisp1->csi.is_dphy_errctrl_disabled) {
val = rkisp1_read(rkisp1, RKISP1_CIF_MIPI_IMSC);
val |= RKISP1_CIF_MIPI_ERR_CTRL(0x0f);
rkisp1_write(rkisp1, RKISP1_CIF_MIPI_IMSC, val);
rkisp1->csi.is_dphy_errctrl_disabled = false;
}
} else {
rkisp1->debug.mipi_error++;
}
return IRQ_HANDLED;
}
/* ----------------------------------------------------------------------------
* Subdev pad operations
*/
static int rkisp1_csi_enum_mbus_code(struct v4l2_subdev *sd,
struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_mbus_code_enum *code)
{
struct rkisp1_csi *csi = to_rkisp1_csi(sd);
unsigned int i;
int pos = 0;
if (code->pad == RKISP1_CSI_PAD_SRC) {
const struct v4l2_mbus_framefmt *sink_fmt;
if (code->index)
return -EINVAL;
mutex_lock(&csi->lock);
sink_fmt = rkisp1_csi_get_pad_fmt(csi, sd_state,
RKISP1_CSI_PAD_SINK,
code->which);
code->code = sink_fmt->code;
mutex_unlock(&csi->lock);
return 0;
}
for (i = 0; ; i++) {
const struct rkisp1_mbus_info *fmt =
rkisp1_mbus_info_get_by_index(i);
if (!fmt)
return -EINVAL;
if (!(fmt->direction & RKISP1_ISP_SD_SINK))
continue;
if (code->index == pos) {
code->code = fmt->mbus_code;
return 0;
}
pos++;
}
return -EINVAL;
}
static int rkisp1_csi_init_config(struct v4l2_subdev *sd,
struct v4l2_subdev_state *sd_state)
{
struct v4l2_mbus_framefmt *sink_fmt, *src_fmt;
sink_fmt = v4l2_subdev_get_try_format(sd, sd_state,
RKISP1_CSI_PAD_SINK);
src_fmt = v4l2_subdev_get_try_format(sd, sd_state,
RKISP1_CSI_PAD_SRC);
sink_fmt->width = RKISP1_DEFAULT_WIDTH;
sink_fmt->height = RKISP1_DEFAULT_HEIGHT;
sink_fmt->field = V4L2_FIELD_NONE;
sink_fmt->code = RKISP1_CSI_DEF_FMT;
*src_fmt = *sink_fmt;
return 0;
}
static int rkisp1_csi_get_fmt(struct v4l2_subdev *sd,
struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
struct rkisp1_csi *csi = to_rkisp1_csi(sd);
mutex_lock(&csi->lock);
fmt->format = *rkisp1_csi_get_pad_fmt(csi, sd_state, fmt->pad,
fmt->which);
mutex_unlock(&csi->lock);
return 0;
}
static int rkisp1_csi_set_fmt(struct v4l2_subdev *sd,
struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
struct rkisp1_csi *csi = to_rkisp1_csi(sd);
const struct rkisp1_mbus_info *mbus_info;
struct v4l2_mbus_framefmt *sink_fmt, *src_fmt;
/* The format on the source pad always matches the sink pad. */
if (fmt->pad == RKISP1_CSI_PAD_SRC)
return rkisp1_csi_get_fmt(sd, sd_state, fmt);
mutex_lock(&csi->lock);
sink_fmt = rkisp1_csi_get_pad_fmt(csi, sd_state, RKISP1_CSI_PAD_SINK,
fmt->which);
sink_fmt->code = fmt->format.code;
mbus_info = rkisp1_mbus_info_get_by_code(sink_fmt->code);
if (!mbus_info || !(mbus_info->direction & RKISP1_ISP_SD_SINK)) {
sink_fmt->code = RKISP1_CSI_DEF_FMT;
mbus_info = rkisp1_mbus_info_get_by_code(sink_fmt->code);
}
sink_fmt->width = clamp_t(u32, fmt->format.width,
RKISP1_ISP_MIN_WIDTH,
RKISP1_ISP_MAX_WIDTH);
sink_fmt->height = clamp_t(u32, fmt->format.height,
RKISP1_ISP_MIN_HEIGHT,
RKISP1_ISP_MAX_HEIGHT);
fmt->format = *sink_fmt;
if (fmt->which == V4L2_SUBDEV_FORMAT_ACTIVE)
csi->sink_fmt = mbus_info;
/* Propagate the format to the source pad. */
src_fmt = rkisp1_csi_get_pad_fmt(csi, sd_state, RKISP1_CSI_PAD_SRC,
fmt->which);
*src_fmt = *sink_fmt;
mutex_unlock(&csi->lock);
return 0;
}
/* ----------------------------------------------------------------------------
* Subdev video operations
*/
static int rkisp1_csi_s_stream(struct v4l2_subdev *sd, int enable)
{
struct rkisp1_csi *csi = to_rkisp1_csi(sd);
struct rkisp1_device *rkisp1 = csi->rkisp1;
struct rkisp1_sensor_async *source_asd;
struct v4l2_async_connection *asc;
struct media_pad *source_pad;
struct v4l2_subdev *source;
int ret;
if (!enable) {
v4l2_subdev_call(csi->source, video, s_stream, false);
rkisp1_csi_stop(csi);
return 0;
}
source_pad = media_entity_remote_source_pad_unique(&sd->entity);
if (IS_ERR(source_pad)) {
dev_dbg(rkisp1->dev, "Failed to get source for CSI: %ld\n",
PTR_ERR(source_pad));
return -EPIPE;
}
source = media_entity_to_v4l2_subdev(source_pad->entity);
if (!source) {
/* This should really not happen, so is not worth a message. */
return -EPIPE;
}
asc = v4l2_async_connection_unique(source);
if (!asc)
return -EPIPE;
source_asd = container_of(asc, struct rkisp1_sensor_async, asd);
if (source_asd->mbus_type != V4L2_MBUS_CSI2_DPHY)
return -EINVAL;
mutex_lock(&csi->lock);
ret = rkisp1_csi_start(csi, source_asd);
mutex_unlock(&csi->lock);
if (ret)
return ret;
ret = v4l2_subdev_call(source, video, s_stream, true);
if (ret) {
rkisp1_csi_stop(csi);
return ret;
}
csi->source = source;
return 0;
}
/* ----------------------------------------------------------------------------
* Registration
*/
static const struct media_entity_operations rkisp1_csi_media_ops = {
.link_validate = v4l2_subdev_link_validate,
};
static const struct v4l2_subdev_video_ops rkisp1_csi_video_ops = {
.s_stream = rkisp1_csi_s_stream,
};
static const struct v4l2_subdev_pad_ops rkisp1_csi_pad_ops = {
.enum_mbus_code = rkisp1_csi_enum_mbus_code,
.init_cfg = rkisp1_csi_init_config,
.get_fmt = rkisp1_csi_get_fmt,
.set_fmt = rkisp1_csi_set_fmt,
};
static const struct v4l2_subdev_ops rkisp1_csi_ops = {
.video = &rkisp1_csi_video_ops,
.pad = &rkisp1_csi_pad_ops,
};
int rkisp1_csi_register(struct rkisp1_device *rkisp1)
{
struct rkisp1_csi *csi = &rkisp1->csi;
struct v4l2_subdev_state state = {};
struct media_pad *pads;
struct v4l2_subdev *sd;
int ret;
csi->rkisp1 = rkisp1;
mutex_init(&csi->lock);
sd = &csi->sd;
v4l2_subdev_init(sd, &rkisp1_csi_ops);
sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
sd->entity.ops = &rkisp1_csi_media_ops;
sd->entity.function = MEDIA_ENT_F_VID_IF_BRIDGE;
sd->owner = THIS_MODULE;
strscpy(sd->name, RKISP1_CSI_DEV_NAME, sizeof(sd->name));
pads = csi->pads;
pads[RKISP1_CSI_PAD_SINK].flags = MEDIA_PAD_FL_SINK |
MEDIA_PAD_FL_MUST_CONNECT;
pads[RKISP1_CSI_PAD_SRC].flags = MEDIA_PAD_FL_SOURCE |
MEDIA_PAD_FL_MUST_CONNECT;
csi->sink_fmt = rkisp1_mbus_info_get_by_code(RKISP1_CSI_DEF_FMT);
ret = media_entity_pads_init(&sd->entity, RKISP1_CSI_PAD_NUM, pads);
if (ret)
goto error;
state.pads = csi->pad_cfg;
rkisp1_csi_init_config(sd, &state);
ret = v4l2_device_register_subdev(&csi->rkisp1->v4l2_dev, sd);
if (ret) {
dev_err(sd->dev, "Failed to register csi receiver subdev\n");
goto error;
}
return 0;
error:
media_entity_cleanup(&sd->entity);
mutex_destroy(&csi->lock);
csi->rkisp1 = NULL;
return ret;
}
void rkisp1_csi_unregister(struct rkisp1_device *rkisp1)
{
struct rkisp1_csi *csi = &rkisp1->csi;
if (!csi->rkisp1)
return;
v4l2_device_unregister_subdev(&csi->sd);
media_entity_cleanup(&csi->sd.entity);
mutex_destroy(&csi->lock);
}
int rkisp1_csi_init(struct rkisp1_device *rkisp1)
{
struct rkisp1_csi *csi = &rkisp1->csi;
csi->rkisp1 = rkisp1;
csi->dphy = devm_phy_get(rkisp1->dev, "dphy");
if (IS_ERR(csi->dphy))
return dev_err_probe(rkisp1->dev, PTR_ERR(csi->dphy),
"Couldn't get the MIPI D-PHY\n");
phy_init(csi->dphy);
return 0;
}
void rkisp1_csi_cleanup(struct rkisp1_device *rkisp1)
{
struct rkisp1_csi *csi = &rkisp1->csi;
phy_exit(csi->dphy);
}
| linux-master | drivers/media/platform/rockchip/rkisp1/rkisp1-csi.c |
// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
/*
* Rockchip ISP1 Driver - Base driver
*
* Copyright (C) 2019 Collabora, Ltd.
*
* Based on Rockchip ISP1 driver by Rockchip Electronics Co., Ltd.
* Copyright (C) 2017 Rockchip Electronics Co., Ltd.
*/
#include <linux/clk.h>
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_graph.h>
#include <linux/platform_device.h>
#include <linux/pinctrl/consumer.h>
#include <linux/pm_runtime.h>
#include <media/v4l2-fwnode.h>
#include <media/v4l2-mc.h>
#include "rkisp1-common.h"
#include "rkisp1-csi.h"
/*
* ISP Details
* -----------
*
* ISP Comprises with:
* MIPI serial camera interface
* Image Signal Processing
* Many Image Enhancement Blocks
* Crop
* Resizer
* RBG display ready image
* Image Rotation
*
* ISP Block Diagram
* -----------------
* rkisp1-resizer.c rkisp1-capture.c
* |====================| |=======================|
* rkisp1-isp.c Main Picture Path
* |==========================| |===============================================|
* +-----------+ +--+--+--+--+ +--------+ +--------+ +-----------+
* | | | | | | | | | | | | |
* +--------+ |\ | | | | | | | -->| Crop |->| RSZ |------------->| |
* | MIPI |--->| \ | | | | | | | | | | | | | |
* +--------+ | | | | |IE|IE|IE|IE| | +--------+ +--------+ | Memory |
* |MUX|--->| ISP |->|0 |1 |2 |3 |---+ | Interface |
* +--------+ | | | | | | | | | | +--------+ +--------+ +--------+ | |
* |Parallel|--->| / | | | | | | | | | | | | | | | |
* +--------+ |/ | | | | | | | -->| Crop |->| RSZ |->| RGB |->| |
* | | | | | | | | | | | | Rotate | | |
* +-----------+ +--+--+--+--+ +--------+ +--------+ +--------+ +-----------+
* ^
* +--------+ | |===============================================|
* | DMA |------------------------------------+ Self Picture Path
* +--------+
*
* rkisp1-stats.c rkisp1-params.c
* |===============| |===============|
* +---------------+ +---------------+
* | | | |
* | ISP | | ISP |
* | | | |
* +---------------+ +---------------+
*
*
* Media Topology
* --------------
*
* +----------+ +----------+
* | Sensor 1 | | Sensor X |
* ------------ ... ------------
* | 0 | | 0 |
* +----------+ +----------+
* | |
* \----\ /----/
* | |
* v v
* +-------------+
* | 0 |
* ---------------
* | CSI-2 RX |
* --------------- +-----------+
* | 1 | | params |
* +-------------+ | (output) |
* | +-----------+
* v |
* +------+------+ |
* | 0 | 1 |<---------+
* |------+------|
* | ISP |
* |------+------|
* +-------------| 2 | 3 |----------+
* | +------+------+ |
* | | |
* v v v
* +- ---------+ +-----------+ +-----------+
* | 0 | | 0 | | stats |
* ------------- ------------- | (capture) |
* | Resizer | | Resizer | +-----------+
* ------------| ------------|
* | 1 | | 1 |
* +-----------+ +-----------+
* | |
* v v
* +-----------+ +-----------+
* | selfpath | | mainpath |
* | (capture) | | (capture) |
* +-----------+ +-----------+
*/
struct rkisp1_isr_data {
const char *name;
irqreturn_t (*isr)(int irq, void *ctx);
};
/* ----------------------------------------------------------------------------
* Sensor DT bindings
*/
static int rkisp1_subdev_notifier_bound(struct v4l2_async_notifier *notifier,
struct v4l2_subdev *sd,
struct v4l2_async_connection *asc)
{
struct rkisp1_device *rkisp1 =
container_of(notifier, struct rkisp1_device, notifier);
struct rkisp1_sensor_async *s_asd =
container_of(asc, struct rkisp1_sensor_async, asd);
int source_pad;
int ret;
s_asd->sd = sd;
source_pad = media_entity_get_fwnode_pad(&sd->entity, s_asd->source_ep,
MEDIA_PAD_FL_SOURCE);
if (source_pad < 0) {
dev_err(rkisp1->dev, "failed to find source pad for %s\n",
sd->name);
return source_pad;
}
if (s_asd->port == 0)
return rkisp1_csi_link_sensor(rkisp1, sd, s_asd, source_pad);
ret = media_create_pad_link(&sd->entity, source_pad,
&rkisp1->isp.sd.entity,
RKISP1_ISP_PAD_SINK_VIDEO,
!s_asd->index ? MEDIA_LNK_FL_ENABLED : 0);
if (ret) {
dev_err(rkisp1->dev, "failed to link source pad of %s\n",
sd->name);
return ret;
}
return 0;
}
static int rkisp1_subdev_notifier_complete(struct v4l2_async_notifier *notifier)
{
struct rkisp1_device *rkisp1 =
container_of(notifier, struct rkisp1_device, notifier);
return v4l2_device_register_subdev_nodes(&rkisp1->v4l2_dev);
}
static void rkisp1_subdev_notifier_destroy(struct v4l2_async_connection *asc)
{
struct rkisp1_sensor_async *rk_asd =
container_of(asc, struct rkisp1_sensor_async, asd);
fwnode_handle_put(rk_asd->source_ep);
}
static const struct v4l2_async_notifier_operations rkisp1_subdev_notifier_ops = {
.bound = rkisp1_subdev_notifier_bound,
.complete = rkisp1_subdev_notifier_complete,
.destroy = rkisp1_subdev_notifier_destroy,
};
static int rkisp1_subdev_notifier_register(struct rkisp1_device *rkisp1)
{
struct v4l2_async_notifier *ntf = &rkisp1->notifier;
struct fwnode_handle *fwnode = dev_fwnode(rkisp1->dev);
struct fwnode_handle *ep;
unsigned int index = 0;
int ret = 0;
v4l2_async_nf_init(ntf, &rkisp1->v4l2_dev);
ntf->ops = &rkisp1_subdev_notifier_ops;
fwnode_graph_for_each_endpoint(fwnode, ep) {
struct fwnode_handle *port;
struct v4l2_fwnode_endpoint vep = { };
struct rkisp1_sensor_async *rk_asd;
struct fwnode_handle *source;
u32 reg = 0;
/* Select the bus type based on the port. */
port = fwnode_get_parent(ep);
fwnode_property_read_u32(port, "reg", ®);
fwnode_handle_put(port);
switch (reg) {
case 0:
/* MIPI CSI-2 port */
if (!(rkisp1->info->features & RKISP1_FEATURE_MIPI_CSI2)) {
dev_err(rkisp1->dev,
"internal CSI must be available for port 0\n");
ret = -EINVAL;
break;
}
vep.bus_type = V4L2_MBUS_CSI2_DPHY;
break;
case 1:
/*
* Parallel port. The bus-type property in DT is
* mandatory for port 1, it will be used to determine if
* it's PARALLEL or BT656.
*/
vep.bus_type = V4L2_MBUS_UNKNOWN;
break;
}
/* Parse the endpoint and validate the bus type. */
ret = v4l2_fwnode_endpoint_parse(ep, &vep);
if (ret) {
dev_err(rkisp1->dev, "failed to parse endpoint %pfw\n",
ep);
break;
}
if (vep.base.port == 1) {
if (vep.bus_type != V4L2_MBUS_PARALLEL &&
vep.bus_type != V4L2_MBUS_BT656) {
dev_err(rkisp1->dev,
"port 1 must be parallel or BT656\n");
ret = -EINVAL;
break;
}
}
/* Add the async subdev to the notifier. */
source = fwnode_graph_get_remote_endpoint(ep);
if (!source) {
dev_err(rkisp1->dev,
"endpoint %pfw has no remote endpoint\n",
ep);
ret = -ENODEV;
break;
}
rk_asd = v4l2_async_nf_add_fwnode(ntf, source,
struct rkisp1_sensor_async);
if (IS_ERR(rk_asd)) {
fwnode_handle_put(source);
ret = PTR_ERR(rk_asd);
break;
}
rk_asd->index = index++;
rk_asd->source_ep = source;
rk_asd->mbus_type = vep.bus_type;
rk_asd->port = vep.base.port;
if (vep.bus_type == V4L2_MBUS_CSI2_DPHY) {
rk_asd->mbus_flags = vep.bus.mipi_csi2.flags;
rk_asd->lanes = vep.bus.mipi_csi2.num_data_lanes;
} else {
rk_asd->mbus_flags = vep.bus.parallel.flags;
}
dev_dbg(rkisp1->dev, "registered ep id %d, bus type %u, %u lanes\n",
vep.base.id, rk_asd->mbus_type, rk_asd->lanes);
}
if (ret) {
fwnode_handle_put(ep);
v4l2_async_nf_cleanup(ntf);
return ret;
}
if (!index)
dev_dbg(rkisp1->dev, "no remote subdevice found\n");
ret = v4l2_async_nf_register(ntf);
if (ret) {
v4l2_async_nf_cleanup(ntf);
return ret;
}
return 0;
}
/* ----------------------------------------------------------------------------
* Power
*/
static int __maybe_unused rkisp1_runtime_suspend(struct device *dev)
{
struct rkisp1_device *rkisp1 = dev_get_drvdata(dev);
clk_bulk_disable_unprepare(rkisp1->clk_size, rkisp1->clks);
return pinctrl_pm_select_sleep_state(dev);
}
static int __maybe_unused rkisp1_runtime_resume(struct device *dev)
{
struct rkisp1_device *rkisp1 = dev_get_drvdata(dev);
int ret;
ret = pinctrl_pm_select_default_state(dev);
if (ret)
return ret;
ret = clk_bulk_prepare_enable(rkisp1->clk_size, rkisp1->clks);
if (ret)
return ret;
return 0;
}
static const struct dev_pm_ops rkisp1_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
pm_runtime_force_resume)
SET_RUNTIME_PM_OPS(rkisp1_runtime_suspend, rkisp1_runtime_resume, NULL)
};
/* ----------------------------------------------------------------------------
* Core
*/
static int rkisp1_create_links(struct rkisp1_device *rkisp1)
{
unsigned int i;
int ret;
if (rkisp1->info->features & RKISP1_FEATURE_MIPI_CSI2) {
/* Link the CSI receiver to the ISP. */
ret = media_create_pad_link(&rkisp1->csi.sd.entity,
RKISP1_CSI_PAD_SRC,
&rkisp1->isp.sd.entity,
RKISP1_ISP_PAD_SINK_VIDEO,
MEDIA_LNK_FL_ENABLED);
if (ret)
return ret;
}
/* create ISP->RSZ->CAP links */
for (i = 0; i < 2; i++) {
struct media_entity *resizer =
&rkisp1->resizer_devs[i].sd.entity;
struct media_entity *capture =
&rkisp1->capture_devs[i].vnode.vdev.entity;
ret = media_create_pad_link(&rkisp1->isp.sd.entity,
RKISP1_ISP_PAD_SOURCE_VIDEO,
resizer, RKISP1_RSZ_PAD_SINK,
MEDIA_LNK_FL_ENABLED);
if (ret)
return ret;
ret = media_create_pad_link(resizer, RKISP1_RSZ_PAD_SRC,
capture, 0,
MEDIA_LNK_FL_ENABLED |
MEDIA_LNK_FL_IMMUTABLE);
if (ret)
return ret;
}
/* params links */
ret = media_create_pad_link(&rkisp1->params.vnode.vdev.entity, 0,
&rkisp1->isp.sd.entity,
RKISP1_ISP_PAD_SINK_PARAMS,
MEDIA_LNK_FL_ENABLED |
MEDIA_LNK_FL_IMMUTABLE);
if (ret)
return ret;
/* 3A stats links */
return media_create_pad_link(&rkisp1->isp.sd.entity,
RKISP1_ISP_PAD_SOURCE_STATS,
&rkisp1->stats.vnode.vdev.entity, 0,
MEDIA_LNK_FL_ENABLED |
MEDIA_LNK_FL_IMMUTABLE);
}
static void rkisp1_entities_unregister(struct rkisp1_device *rkisp1)
{
if (rkisp1->info->features & RKISP1_FEATURE_MIPI_CSI2)
rkisp1_csi_unregister(rkisp1);
rkisp1_params_unregister(rkisp1);
rkisp1_stats_unregister(rkisp1);
rkisp1_capture_devs_unregister(rkisp1);
rkisp1_resizer_devs_unregister(rkisp1);
rkisp1_isp_unregister(rkisp1);
}
static int rkisp1_entities_register(struct rkisp1_device *rkisp1)
{
int ret;
ret = rkisp1_isp_register(rkisp1);
if (ret)
goto error;
ret = rkisp1_resizer_devs_register(rkisp1);
if (ret)
goto error;
ret = rkisp1_capture_devs_register(rkisp1);
if (ret)
goto error;
ret = rkisp1_stats_register(rkisp1);
if (ret)
goto error;
ret = rkisp1_params_register(rkisp1);
if (ret)
goto error;
if (rkisp1->info->features & RKISP1_FEATURE_MIPI_CSI2) {
ret = rkisp1_csi_register(rkisp1);
if (ret)
goto error;
}
ret = rkisp1_create_links(rkisp1);
if (ret)
goto error;
return 0;
error:
rkisp1_entities_unregister(rkisp1);
return ret;
}
static irqreturn_t rkisp1_isr(int irq, void *ctx)
{
/*
* Call rkisp1_capture_isr() first to handle the frame that
* potentially completed using the current frame_sequence number before
* it is potentially incremented by rkisp1_isp_isr() in the vertical
* sync.
*/
rkisp1_capture_isr(irq, ctx);
rkisp1_isp_isr(irq, ctx);
rkisp1_csi_isr(irq, ctx);
return IRQ_HANDLED;
}
static const char * const px30_isp_clks[] = {
"isp",
"aclk",
"hclk",
"pclk",
};
static const struct rkisp1_isr_data px30_isp_isrs[] = {
{ "isp", rkisp1_isp_isr },
{ "mi", rkisp1_capture_isr },
{ "mipi", rkisp1_csi_isr },
};
static const struct rkisp1_info px30_isp_info = {
.clks = px30_isp_clks,
.clk_size = ARRAY_SIZE(px30_isp_clks),
.isrs = px30_isp_isrs,
.isr_size = ARRAY_SIZE(px30_isp_isrs),
.isp_ver = RKISP1_V12,
.features = RKISP1_FEATURE_MIPI_CSI2,
};
static const char * const rk3399_isp_clks[] = {
"isp",
"aclk",
"hclk",
};
static const struct rkisp1_isr_data rk3399_isp_isrs[] = {
{ NULL, rkisp1_isr },
};
static const struct rkisp1_info rk3399_isp_info = {
.clks = rk3399_isp_clks,
.clk_size = ARRAY_SIZE(rk3399_isp_clks),
.isrs = rk3399_isp_isrs,
.isr_size = ARRAY_SIZE(rk3399_isp_isrs),
.isp_ver = RKISP1_V10,
.features = RKISP1_FEATURE_MIPI_CSI2,
};
static const struct of_device_id rkisp1_of_match[] = {
{
.compatible = "rockchip,px30-cif-isp",
.data = &px30_isp_info,
},
{
.compatible = "rockchip,rk3399-cif-isp",
.data = &rk3399_isp_info,
},
{},
};
MODULE_DEVICE_TABLE(of, rkisp1_of_match);
static int rkisp1_probe(struct platform_device *pdev)
{
const struct rkisp1_info *info;
struct device *dev = &pdev->dev;
struct rkisp1_device *rkisp1;
struct v4l2_device *v4l2_dev;
unsigned int i;
int ret, irq;
u32 cif_id;
rkisp1 = devm_kzalloc(dev, sizeof(*rkisp1), GFP_KERNEL);
if (!rkisp1)
return -ENOMEM;
info = of_device_get_match_data(dev);
rkisp1->info = info;
dev_set_drvdata(dev, rkisp1);
rkisp1->dev = dev;
mutex_init(&rkisp1->stream_lock);
rkisp1->base_addr = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(rkisp1->base_addr))
return PTR_ERR(rkisp1->base_addr);
for (i = 0; i < info->isr_size; i++) {
irq = info->isrs[i].name
? platform_get_irq_byname(pdev, info->isrs[i].name)
: platform_get_irq(pdev, i);
if (irq < 0)
return irq;
ret = devm_request_irq(dev, irq, info->isrs[i].isr, IRQF_SHARED,
dev_driver_string(dev), dev);
if (ret) {
dev_err(dev, "request irq failed: %d\n", ret);
return ret;
}
}
for (i = 0; i < info->clk_size; i++)
rkisp1->clks[i].id = info->clks[i];
ret = devm_clk_bulk_get(dev, info->clk_size, rkisp1->clks);
if (ret)
return ret;
rkisp1->clk_size = info->clk_size;
pm_runtime_enable(&pdev->dev);
ret = pm_runtime_resume_and_get(&pdev->dev);
if (ret)
goto err_pm_runtime_disable;
cif_id = rkisp1_read(rkisp1, RKISP1_CIF_VI_ID);
dev_dbg(rkisp1->dev, "CIF_ID 0x%08x\n", cif_id);
pm_runtime_put(&pdev->dev);
rkisp1->media_dev.hw_revision = info->isp_ver;
strscpy(rkisp1->media_dev.model, RKISP1_DRIVER_NAME,
sizeof(rkisp1->media_dev.model));
rkisp1->media_dev.dev = &pdev->dev;
strscpy(rkisp1->media_dev.bus_info, RKISP1_BUS_INFO,
sizeof(rkisp1->media_dev.bus_info));
media_device_init(&rkisp1->media_dev);
v4l2_dev = &rkisp1->v4l2_dev;
v4l2_dev->mdev = &rkisp1->media_dev;
strscpy(v4l2_dev->name, RKISP1_DRIVER_NAME, sizeof(v4l2_dev->name));
ret = v4l2_device_register(rkisp1->dev, &rkisp1->v4l2_dev);
if (ret)
goto err_pm_runtime_disable;
ret = media_device_register(&rkisp1->media_dev);
if (ret) {
dev_err(dev, "Failed to register media device: %d\n", ret);
goto err_unreg_v4l2_dev;
}
if (rkisp1->info->features & RKISP1_FEATURE_MIPI_CSI2) {
ret = rkisp1_csi_init(rkisp1);
if (ret)
goto err_unreg_media_dev;
}
ret = rkisp1_entities_register(rkisp1);
if (ret)
goto err_cleanup_csi;
ret = rkisp1_subdev_notifier_register(rkisp1);
if (ret)
goto err_unreg_entities;
rkisp1_debug_init(rkisp1);
return 0;
err_unreg_entities:
rkisp1_entities_unregister(rkisp1);
err_cleanup_csi:
if (rkisp1->info->features & RKISP1_FEATURE_MIPI_CSI2)
rkisp1_csi_cleanup(rkisp1);
err_unreg_media_dev:
media_device_unregister(&rkisp1->media_dev);
err_unreg_v4l2_dev:
v4l2_device_unregister(&rkisp1->v4l2_dev);
err_pm_runtime_disable:
pm_runtime_disable(&pdev->dev);
return ret;
}
static void rkisp1_remove(struct platform_device *pdev)
{
struct rkisp1_device *rkisp1 = platform_get_drvdata(pdev);
v4l2_async_nf_unregister(&rkisp1->notifier);
v4l2_async_nf_cleanup(&rkisp1->notifier);
rkisp1_entities_unregister(rkisp1);
if (rkisp1->info->features & RKISP1_FEATURE_MIPI_CSI2)
rkisp1_csi_cleanup(rkisp1);
rkisp1_debug_cleanup(rkisp1);
media_device_unregister(&rkisp1->media_dev);
v4l2_device_unregister(&rkisp1->v4l2_dev);
pm_runtime_disable(&pdev->dev);
}
static struct platform_driver rkisp1_drv = {
.driver = {
.name = RKISP1_DRIVER_NAME,
.of_match_table = of_match_ptr(rkisp1_of_match),
.pm = &rkisp1_pm_ops,
},
.probe = rkisp1_probe,
.remove_new = rkisp1_remove,
};
module_platform_driver(rkisp1_drv);
MODULE_DESCRIPTION("Rockchip ISP1 platform driver");
MODULE_LICENSE("Dual MIT/GPL");
| linux-master | drivers/media/platform/rockchip/rkisp1/rkisp1-dev.c |
// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
/*
* Rockchip ISP1 Driver - Params subdevice
*
* Copyright (C) 2017 Rockchip Electronics Co., Ltd.
*/
#include <media/v4l2-common.h>
#include <media/v4l2-event.h>
#include <media/v4l2-ioctl.h>
#include <media/videobuf2-core.h>
#include <media/videobuf2-vmalloc.h> /* for ISP params */
#include "rkisp1-common.h"
#define RKISP1_PARAMS_DEV_NAME RKISP1_DRIVER_NAME "_params"
#define RKISP1_ISP_PARAMS_REQ_BUFS_MIN 2
#define RKISP1_ISP_PARAMS_REQ_BUFS_MAX 8
#define RKISP1_ISP_DPCC_METHODS_SET(n) \
(RKISP1_CIF_ISP_DPCC_METHODS_SET_1 + 0x4 * (n))
#define RKISP1_ISP_DPCC_LINE_THRESH(n) \
(RKISP1_CIF_ISP_DPCC_LINE_THRESH_1 + 0x14 * (n))
#define RKISP1_ISP_DPCC_LINE_MAD_FAC(n) \
(RKISP1_CIF_ISP_DPCC_LINE_MAD_FAC_1 + 0x14 * (n))
#define RKISP1_ISP_DPCC_PG_FAC(n) \
(RKISP1_CIF_ISP_DPCC_PG_FAC_1 + 0x14 * (n))
#define RKISP1_ISP_DPCC_RND_THRESH(n) \
(RKISP1_CIF_ISP_DPCC_RND_THRESH_1 + 0x14 * (n))
#define RKISP1_ISP_DPCC_RG_FAC(n) \
(RKISP1_CIF_ISP_DPCC_RG_FAC_1 + 0x14 * (n))
#define RKISP1_ISP_CC_COEFF(n) \
(RKISP1_CIF_ISP_CC_COEFF_0 + (n) * 4)
static inline void
rkisp1_param_set_bits(struct rkisp1_params *params, u32 reg, u32 bit_mask)
{
u32 val;
val = rkisp1_read(params->rkisp1, reg);
rkisp1_write(params->rkisp1, reg, val | bit_mask);
}
static inline void
rkisp1_param_clear_bits(struct rkisp1_params *params, u32 reg, u32 bit_mask)
{
u32 val;
val = rkisp1_read(params->rkisp1, reg);
rkisp1_write(params->rkisp1, reg, val & ~bit_mask);
}
/* ISP BP interface function */
static void rkisp1_dpcc_config(struct rkisp1_params *params,
const struct rkisp1_cif_isp_dpcc_config *arg)
{
unsigned int i;
u32 mode;
/*
* The enable bit is controlled in rkisp1_isp_isr_other_config() and
* must be preserved. The grayscale mode should be configured
* automatically based on the media bus code on the ISP sink pad, so
* only the STAGE1_ENABLE bit can be set by userspace.
*/
mode = rkisp1_read(params->rkisp1, RKISP1_CIF_ISP_DPCC_MODE);
mode &= RKISP1_CIF_ISP_DPCC_MODE_DPCC_ENABLE;
mode |= arg->mode & RKISP1_CIF_ISP_DPCC_MODE_STAGE1_ENABLE;
rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_DPCC_MODE, mode);
rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_DPCC_OUTPUT_MODE,
arg->output_mode & RKISP1_CIF_ISP_DPCC_OUTPUT_MODE_MASK);
rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_DPCC_SET_USE,
arg->set_use & RKISP1_CIF_ISP_DPCC_SET_USE_MASK);
for (i = 0; i < RKISP1_CIF_ISP_DPCC_METHODS_MAX; i++) {
rkisp1_write(params->rkisp1, RKISP1_ISP_DPCC_METHODS_SET(i),
arg->methods[i].method &
RKISP1_CIF_ISP_DPCC_METHODS_SET_MASK);
rkisp1_write(params->rkisp1, RKISP1_ISP_DPCC_LINE_THRESH(i),
arg->methods[i].line_thresh &
RKISP1_CIF_ISP_DPCC_LINE_THRESH_MASK);
rkisp1_write(params->rkisp1, RKISP1_ISP_DPCC_LINE_MAD_FAC(i),
arg->methods[i].line_mad_fac &
RKISP1_CIF_ISP_DPCC_LINE_MAD_FAC_MASK);
rkisp1_write(params->rkisp1, RKISP1_ISP_DPCC_PG_FAC(i),
arg->methods[i].pg_fac &
RKISP1_CIF_ISP_DPCC_PG_FAC_MASK);
rkisp1_write(params->rkisp1, RKISP1_ISP_DPCC_RND_THRESH(i),
arg->methods[i].rnd_thresh &
RKISP1_CIF_ISP_DPCC_RND_THRESH_MASK);
rkisp1_write(params->rkisp1, RKISP1_ISP_DPCC_RG_FAC(i),
arg->methods[i].rg_fac &
RKISP1_CIF_ISP_DPCC_RG_FAC_MASK);
}
rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_DPCC_RND_OFFS,
arg->rnd_offs & RKISP1_CIF_ISP_DPCC_RND_OFFS_MASK);
rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_DPCC_RO_LIMITS,
arg->ro_limits & RKISP1_CIF_ISP_DPCC_RO_LIMIT_MASK);
}
/* ISP black level subtraction interface function */
static void rkisp1_bls_config(struct rkisp1_params *params,
const struct rkisp1_cif_isp_bls_config *arg)
{
/* avoid to override the old enable value */
u32 new_control;
new_control = rkisp1_read(params->rkisp1, RKISP1_CIF_ISP_BLS_CTRL);
new_control &= RKISP1_CIF_ISP_BLS_ENA;
/* fixed subtraction values */
if (!arg->enable_auto) {
const struct rkisp1_cif_isp_bls_fixed_val *pval =
&arg->fixed_val;
switch (params->raw_type) {
case RKISP1_RAW_BGGR:
rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_BLS_D_FIXED,
pval->r);
rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_BLS_C_FIXED,
pval->gr);
rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_BLS_B_FIXED,
pval->gb);
rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_BLS_A_FIXED,
pval->b);
break;
case RKISP1_RAW_GBRG:
rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_BLS_C_FIXED,
pval->r);
rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_BLS_D_FIXED,
pval->gr);
rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_BLS_A_FIXED,
pval->gb);
rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_BLS_B_FIXED,
pval->b);
break;
case RKISP1_RAW_GRBG:
rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_BLS_B_FIXED,
pval->r);
rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_BLS_A_FIXED,
pval->gr);
rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_BLS_D_FIXED,
pval->gb);
rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_BLS_C_FIXED,
pval->b);
break;
case RKISP1_RAW_RGGB:
rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_BLS_A_FIXED,
pval->r);
rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_BLS_B_FIXED,
pval->gr);
rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_BLS_C_FIXED,
pval->gb);
rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_BLS_D_FIXED,
pval->b);
break;
default:
break;
}
} else {
if (arg->en_windows & BIT(1)) {
rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_BLS_H2_START,
arg->bls_window2.h_offs);
rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_BLS_H2_STOP,
arg->bls_window2.h_size);
rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_BLS_V2_START,
arg->bls_window2.v_offs);
rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_BLS_V2_STOP,
arg->bls_window2.v_size);
new_control |= RKISP1_CIF_ISP_BLS_WINDOW_2;
}
if (arg->en_windows & BIT(0)) {
rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_BLS_H1_START,
arg->bls_window1.h_offs);
rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_BLS_H1_STOP,
arg->bls_window1.h_size);
rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_BLS_V1_START,
arg->bls_window1.v_offs);
rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_BLS_V1_STOP,
arg->bls_window1.v_size);
new_control |= RKISP1_CIF_ISP_BLS_WINDOW_1;
}
rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_BLS_SAMPLES,
arg->bls_samples);
new_control |= RKISP1_CIF_ISP_BLS_MODE_MEASURED;
}
rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_BLS_CTRL, new_control);
}
/* ISP LS correction interface function */
static void
rkisp1_lsc_matrix_config_v10(struct rkisp1_params *params,
const struct rkisp1_cif_isp_lsc_config *pconfig)
{
struct rkisp1_device *rkisp1 = params->rkisp1;
u32 lsc_status, sram_addr, lsc_table_sel;
unsigned int i, j;
lsc_status = rkisp1_read(rkisp1, RKISP1_CIF_ISP_LSC_STATUS);
/* RKISP1_CIF_ISP_LSC_TABLE_ADDRESS_153 = ( 17 * 18 ) >> 1 */
sram_addr = lsc_status & RKISP1_CIF_ISP_LSC_ACTIVE_TABLE ?
RKISP1_CIF_ISP_LSC_TABLE_ADDRESS_0 :
RKISP1_CIF_ISP_LSC_TABLE_ADDRESS_153;
rkisp1_write(rkisp1, RKISP1_CIF_ISP_LSC_R_TABLE_ADDR, sram_addr);
rkisp1_write(rkisp1, RKISP1_CIF_ISP_LSC_GR_TABLE_ADDR, sram_addr);
rkisp1_write(rkisp1, RKISP1_CIF_ISP_LSC_GB_TABLE_ADDR, sram_addr);
rkisp1_write(rkisp1, RKISP1_CIF_ISP_LSC_B_TABLE_ADDR, sram_addr);
/* program data tables (table size is 9 * 17 = 153) */
for (i = 0; i < RKISP1_CIF_ISP_LSC_SAMPLES_MAX; i++) {
const __u16 *r_tbl = pconfig->r_data_tbl[i];
const __u16 *gr_tbl = pconfig->gr_data_tbl[i];
const __u16 *gb_tbl = pconfig->gb_data_tbl[i];
const __u16 *b_tbl = pconfig->b_data_tbl[i];
/*
* 17 sectors with 2 values in one DWORD = 9
* DWORDs (2nd value of last DWORD unused)
*/
for (j = 0; j < RKISP1_CIF_ISP_LSC_SAMPLES_MAX - 1; j += 2) {
rkisp1_write(rkisp1, RKISP1_CIF_ISP_LSC_R_TABLE_DATA,
RKISP1_CIF_ISP_LSC_TABLE_DATA_V10(
r_tbl[j], r_tbl[j + 1]));
rkisp1_write(rkisp1, RKISP1_CIF_ISP_LSC_GR_TABLE_DATA,
RKISP1_CIF_ISP_LSC_TABLE_DATA_V10(
gr_tbl[j], gr_tbl[j + 1]));
rkisp1_write(rkisp1, RKISP1_CIF_ISP_LSC_GB_TABLE_DATA,
RKISP1_CIF_ISP_LSC_TABLE_DATA_V10(
gb_tbl[j], gb_tbl[j + 1]));
rkisp1_write(rkisp1, RKISP1_CIF_ISP_LSC_B_TABLE_DATA,
RKISP1_CIF_ISP_LSC_TABLE_DATA_V10(
b_tbl[j], b_tbl[j + 1]));
}
rkisp1_write(rkisp1, RKISP1_CIF_ISP_LSC_R_TABLE_DATA,
RKISP1_CIF_ISP_LSC_TABLE_DATA_V10(r_tbl[j], 0));
rkisp1_write(rkisp1, RKISP1_CIF_ISP_LSC_GR_TABLE_DATA,
RKISP1_CIF_ISP_LSC_TABLE_DATA_V10(gr_tbl[j], 0));
rkisp1_write(rkisp1, RKISP1_CIF_ISP_LSC_GB_TABLE_DATA,
RKISP1_CIF_ISP_LSC_TABLE_DATA_V10(gb_tbl[j], 0));
rkisp1_write(rkisp1, RKISP1_CIF_ISP_LSC_B_TABLE_DATA,
RKISP1_CIF_ISP_LSC_TABLE_DATA_V10(b_tbl[j], 0));
}
lsc_table_sel = lsc_status & RKISP1_CIF_ISP_LSC_ACTIVE_TABLE ?
RKISP1_CIF_ISP_LSC_TABLE_0 : RKISP1_CIF_ISP_LSC_TABLE_1;
rkisp1_write(rkisp1, RKISP1_CIF_ISP_LSC_TABLE_SEL, lsc_table_sel);
}
static void
rkisp1_lsc_matrix_config_v12(struct rkisp1_params *params,
const struct rkisp1_cif_isp_lsc_config *pconfig)
{
struct rkisp1_device *rkisp1 = params->rkisp1;
u32 lsc_status, sram_addr, lsc_table_sel;
unsigned int i, j;
lsc_status = rkisp1_read(rkisp1, RKISP1_CIF_ISP_LSC_STATUS);
/* RKISP1_CIF_ISP_LSC_TABLE_ADDRESS_153 = ( 17 * 18 ) >> 1 */
sram_addr = lsc_status & RKISP1_CIF_ISP_LSC_ACTIVE_TABLE ?
RKISP1_CIF_ISP_LSC_TABLE_ADDRESS_0 :
RKISP1_CIF_ISP_LSC_TABLE_ADDRESS_153;
rkisp1_write(rkisp1, RKISP1_CIF_ISP_LSC_R_TABLE_ADDR, sram_addr);
rkisp1_write(rkisp1, RKISP1_CIF_ISP_LSC_GR_TABLE_ADDR, sram_addr);
rkisp1_write(rkisp1, RKISP1_CIF_ISP_LSC_GB_TABLE_ADDR, sram_addr);
rkisp1_write(rkisp1, RKISP1_CIF_ISP_LSC_B_TABLE_ADDR, sram_addr);
/* program data tables (table size is 9 * 17 = 153) */
for (i = 0; i < RKISP1_CIF_ISP_LSC_SAMPLES_MAX; i++) {
const __u16 *r_tbl = pconfig->r_data_tbl[i];
const __u16 *gr_tbl = pconfig->gr_data_tbl[i];
const __u16 *gb_tbl = pconfig->gb_data_tbl[i];
const __u16 *b_tbl = pconfig->b_data_tbl[i];
/*
* 17 sectors with 2 values in one DWORD = 9
* DWORDs (2nd value of last DWORD unused)
*/
for (j = 0; j < RKISP1_CIF_ISP_LSC_SAMPLES_MAX - 1; j += 2) {
rkisp1_write(rkisp1, RKISP1_CIF_ISP_LSC_R_TABLE_DATA,
RKISP1_CIF_ISP_LSC_TABLE_DATA_V12(
r_tbl[j], r_tbl[j + 1]));
rkisp1_write(rkisp1, RKISP1_CIF_ISP_LSC_GR_TABLE_DATA,
RKISP1_CIF_ISP_LSC_TABLE_DATA_V12(
gr_tbl[j], gr_tbl[j + 1]));
rkisp1_write(rkisp1, RKISP1_CIF_ISP_LSC_GB_TABLE_DATA,
RKISP1_CIF_ISP_LSC_TABLE_DATA_V12(
gb_tbl[j], gb_tbl[j + 1]));
rkisp1_write(rkisp1, RKISP1_CIF_ISP_LSC_B_TABLE_DATA,
RKISP1_CIF_ISP_LSC_TABLE_DATA_V12(
b_tbl[j], b_tbl[j + 1]));
}
rkisp1_write(rkisp1, RKISP1_CIF_ISP_LSC_R_TABLE_DATA,
RKISP1_CIF_ISP_LSC_TABLE_DATA_V12(r_tbl[j], 0));
rkisp1_write(rkisp1, RKISP1_CIF_ISP_LSC_GR_TABLE_DATA,
RKISP1_CIF_ISP_LSC_TABLE_DATA_V12(gr_tbl[j], 0));
rkisp1_write(rkisp1, RKISP1_CIF_ISP_LSC_GB_TABLE_DATA,
RKISP1_CIF_ISP_LSC_TABLE_DATA_V12(gb_tbl[j], 0));
rkisp1_write(rkisp1, RKISP1_CIF_ISP_LSC_B_TABLE_DATA,
RKISP1_CIF_ISP_LSC_TABLE_DATA_V12(b_tbl[j], 0));
}
lsc_table_sel = lsc_status & RKISP1_CIF_ISP_LSC_ACTIVE_TABLE ?
RKISP1_CIF_ISP_LSC_TABLE_0 : RKISP1_CIF_ISP_LSC_TABLE_1;
rkisp1_write(rkisp1, RKISP1_CIF_ISP_LSC_TABLE_SEL, lsc_table_sel);
}
static void rkisp1_lsc_config(struct rkisp1_params *params,
const struct rkisp1_cif_isp_lsc_config *arg)
{
struct rkisp1_device *rkisp1 = params->rkisp1;
u32 lsc_ctrl, data;
unsigned int i;
/* To config must be off , store the current status firstly */
lsc_ctrl = rkisp1_read(rkisp1, RKISP1_CIF_ISP_LSC_CTRL);
rkisp1_param_clear_bits(params, RKISP1_CIF_ISP_LSC_CTRL,
RKISP1_CIF_ISP_LSC_CTRL_ENA);
params->ops->lsc_matrix_config(params, arg);
for (i = 0; i < RKISP1_CIF_ISP_LSC_SECTORS_TBL_SIZE / 2; i++) {
/* program x size tables */
data = RKISP1_CIF_ISP_LSC_SECT_SIZE(arg->x_size_tbl[i * 2],
arg->x_size_tbl[i * 2 + 1]);
rkisp1_write(rkisp1, RKISP1_CIF_ISP_LSC_XSIZE(i), data);
/* program x grad tables */
data = RKISP1_CIF_ISP_LSC_SECT_GRAD(arg->x_grad_tbl[i * 2],
arg->x_grad_tbl[i * 2 + 1]);
rkisp1_write(rkisp1, RKISP1_CIF_ISP_LSC_XGRAD(i), data);
/* program y size tables */
data = RKISP1_CIF_ISP_LSC_SECT_SIZE(arg->y_size_tbl[i * 2],
arg->y_size_tbl[i * 2 + 1]);
rkisp1_write(rkisp1, RKISP1_CIF_ISP_LSC_YSIZE(i), data);
/* program y grad tables */
data = RKISP1_CIF_ISP_LSC_SECT_GRAD(arg->y_grad_tbl[i * 2],
arg->y_grad_tbl[i * 2 + 1]);
rkisp1_write(rkisp1, RKISP1_CIF_ISP_LSC_YGRAD(i), data);
}
/* restore the lsc ctrl status */
if (lsc_ctrl & RKISP1_CIF_ISP_LSC_CTRL_ENA)
rkisp1_param_set_bits(params, RKISP1_CIF_ISP_LSC_CTRL,
RKISP1_CIF_ISP_LSC_CTRL_ENA);
else
rkisp1_param_clear_bits(params, RKISP1_CIF_ISP_LSC_CTRL,
RKISP1_CIF_ISP_LSC_CTRL_ENA);
}
/* ISP Filtering function */
static void rkisp1_flt_config(struct rkisp1_params *params,
const struct rkisp1_cif_isp_flt_config *arg)
{
u32 filt_mode;
rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_FILT_THRESH_BL0,
arg->thresh_bl0);
rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_FILT_THRESH_BL1,
arg->thresh_bl1);
rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_FILT_THRESH_SH0,
arg->thresh_sh0);
rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_FILT_THRESH_SH1,
arg->thresh_sh1);
rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_FILT_FAC_BL0,
arg->fac_bl0);
rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_FILT_FAC_BL1,
arg->fac_bl1);
rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_FILT_FAC_MID,
arg->fac_mid);
rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_FILT_FAC_SH0,
arg->fac_sh0);
rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_FILT_FAC_SH1,
arg->fac_sh1);
rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_FILT_LUM_WEIGHT,
arg->lum_weight);
rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_FILT_MODE,
(arg->mode ? RKISP1_CIF_ISP_FLT_MODE_DNR : 0) |
RKISP1_CIF_ISP_FLT_CHROMA_V_MODE(arg->chr_v_mode) |
RKISP1_CIF_ISP_FLT_CHROMA_H_MODE(arg->chr_h_mode) |
RKISP1_CIF_ISP_FLT_GREEN_STAGE1(arg->grn_stage1));
/* avoid to override the old enable value */
filt_mode = rkisp1_read(params->rkisp1, RKISP1_CIF_ISP_FILT_MODE);
filt_mode &= RKISP1_CIF_ISP_FLT_ENA;
if (arg->mode)
filt_mode |= RKISP1_CIF_ISP_FLT_MODE_DNR;
filt_mode |= RKISP1_CIF_ISP_FLT_CHROMA_V_MODE(arg->chr_v_mode) |
RKISP1_CIF_ISP_FLT_CHROMA_H_MODE(arg->chr_h_mode) |
RKISP1_CIF_ISP_FLT_GREEN_STAGE1(arg->grn_stage1);
rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_FILT_MODE, filt_mode);
}
/* ISP demosaic interface function */
static int rkisp1_bdm_config(struct rkisp1_params *params,
const struct rkisp1_cif_isp_bdm_config *arg)
{
u32 bdm_th;
/* avoid to override the old enable value */
bdm_th = rkisp1_read(params->rkisp1, RKISP1_CIF_ISP_DEMOSAIC);
bdm_th &= RKISP1_CIF_ISP_DEMOSAIC_BYPASS;
bdm_th |= arg->demosaic_th & ~RKISP1_CIF_ISP_DEMOSAIC_BYPASS;
/* set demosaic threshold */
rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_DEMOSAIC, bdm_th);
return 0;
}
/* ISP GAMMA correction interface function */
static void rkisp1_sdg_config(struct rkisp1_params *params,
const struct rkisp1_cif_isp_sdg_config *arg)
{
unsigned int i;
rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_GAMMA_DX_LO,
arg->xa_pnts.gamma_dx0);
rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_GAMMA_DX_HI,
arg->xa_pnts.gamma_dx1);
for (i = 0; i < RKISP1_CIF_ISP_DEGAMMA_CURVE_SIZE; i++) {
rkisp1_write(params->rkisp1,
RKISP1_CIF_ISP_GAMMA_R_Y0 + i * 4,
arg->curve_r.gamma_y[i]);
rkisp1_write(params->rkisp1,
RKISP1_CIF_ISP_GAMMA_G_Y0 + i * 4,
arg->curve_g.gamma_y[i]);
rkisp1_write(params->rkisp1,
RKISP1_CIF_ISP_GAMMA_B_Y0 + i * 4,
arg->curve_b.gamma_y[i]);
}
}
/* ISP GAMMA correction interface function */
static void rkisp1_goc_config_v10(struct rkisp1_params *params,
const struct rkisp1_cif_isp_goc_config *arg)
{
unsigned int i;
rkisp1_param_clear_bits(params, RKISP1_CIF_ISP_CTRL,
RKISP1_CIF_ISP_CTRL_ISP_GAMMA_OUT_ENA);
rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_GAMMA_OUT_MODE_V10,
arg->mode);
for (i = 0; i < RKISP1_CIF_ISP_GAMMA_OUT_MAX_SAMPLES_V10; i++)
rkisp1_write(params->rkisp1,
RKISP1_CIF_ISP_GAMMA_OUT_Y_0_V10 + i * 4,
arg->gamma_y[i]);
}
static void rkisp1_goc_config_v12(struct rkisp1_params *params,
const struct rkisp1_cif_isp_goc_config *arg)
{
unsigned int i;
u32 value;
rkisp1_param_clear_bits(params, RKISP1_CIF_ISP_CTRL,
RKISP1_CIF_ISP_CTRL_ISP_GAMMA_OUT_ENA);
rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_GAMMA_OUT_MODE_V12,
arg->mode);
for (i = 0; i < RKISP1_CIF_ISP_GAMMA_OUT_MAX_SAMPLES_V12 / 2; i++) {
value = RKISP1_CIF_ISP_GAMMA_VALUE_V12(
arg->gamma_y[2 * i + 1],
arg->gamma_y[2 * i]);
rkisp1_write(params->rkisp1,
RKISP1_CIF_ISP_GAMMA_OUT_Y_0_V12 + i * 4, value);
}
}
/* ISP Cross Talk */
static void rkisp1_ctk_config(struct rkisp1_params *params,
const struct rkisp1_cif_isp_ctk_config *arg)
{
unsigned int i, j, k = 0;
for (i = 0; i < 3; i++)
for (j = 0; j < 3; j++)
rkisp1_write(params->rkisp1,
RKISP1_CIF_ISP_CT_COEFF_0 + 4 * k++,
arg->coeff[i][j]);
for (i = 0; i < 3; i++)
rkisp1_write(params->rkisp1,
RKISP1_CIF_ISP_CT_OFFSET_R + i * 4,
arg->ct_offset[i]);
}
static void rkisp1_ctk_enable(struct rkisp1_params *params, bool en)
{
if (en)
return;
/* Write back the default values. */
rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_CT_COEFF_0, 0x80);
rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_CT_COEFF_1, 0);
rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_CT_COEFF_2, 0);
rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_CT_COEFF_3, 0);
rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_CT_COEFF_4, 0x80);
rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_CT_COEFF_5, 0);
rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_CT_COEFF_6, 0);
rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_CT_COEFF_7, 0);
rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_CT_COEFF_8, 0x80);
rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_CT_OFFSET_R, 0);
rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_CT_OFFSET_G, 0);
rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_CT_OFFSET_B, 0);
}
/* ISP White Balance Mode */
static void rkisp1_awb_meas_config_v10(struct rkisp1_params *params,
const struct rkisp1_cif_isp_awb_meas_config *arg)
{
u32 reg_val = 0;
/* based on the mode,configure the awb module */
if (arg->awb_mode == RKISP1_CIF_ISP_AWB_MODE_YCBCR) {
/* Reference Cb and Cr */
rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_AWB_REF_V10,
RKISP1_CIF_ISP_AWB_REF_CR_SET(arg->awb_ref_cr) |
arg->awb_ref_cb);
/* Yc Threshold */
rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_AWB_THRESH_V10,
RKISP1_CIF_ISP_AWB_MAX_Y_SET(arg->max_y) |
RKISP1_CIF_ISP_AWB_MIN_Y_SET(arg->min_y) |
RKISP1_CIF_ISP_AWB_MAX_CS_SET(arg->max_csum) |
arg->min_c);
}
reg_val = rkisp1_read(params->rkisp1, RKISP1_CIF_ISP_AWB_PROP_V10);
if (arg->enable_ymax_cmp)
reg_val |= RKISP1_CIF_ISP_AWB_YMAX_CMP_EN;
else
reg_val &= ~RKISP1_CIF_ISP_AWB_YMAX_CMP_EN;
rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_AWB_PROP_V10, reg_val);
/* window offset */
rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_AWB_WND_V_OFFS_V10,
arg->awb_wnd.v_offs);
rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_AWB_WND_H_OFFS_V10,
arg->awb_wnd.h_offs);
/* AWB window size */
rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_AWB_WND_V_SIZE_V10,
arg->awb_wnd.v_size);
rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_AWB_WND_H_SIZE_V10,
arg->awb_wnd.h_size);
/* Number of frames */
rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_AWB_FRAMES_V10,
arg->frames);
}
static void rkisp1_awb_meas_config_v12(struct rkisp1_params *params,
const struct rkisp1_cif_isp_awb_meas_config *arg)
{
u32 reg_val = 0;
/* based on the mode,configure the awb module */
if (arg->awb_mode == RKISP1_CIF_ISP_AWB_MODE_YCBCR) {
/* Reference Cb and Cr */
rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_AWB_REF_V12,
RKISP1_CIF_ISP_AWB_REF_CR_SET(arg->awb_ref_cr) |
arg->awb_ref_cb);
/* Yc Threshold */
rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_AWB_THRESH_V12,
RKISP1_CIF_ISP_AWB_MAX_Y_SET(arg->max_y) |
RKISP1_CIF_ISP_AWB_MIN_Y_SET(arg->min_y) |
RKISP1_CIF_ISP_AWB_MAX_CS_SET(arg->max_csum) |
arg->min_c);
}
reg_val = rkisp1_read(params->rkisp1, RKISP1_CIF_ISP_AWB_PROP_V12);
if (arg->enable_ymax_cmp)
reg_val |= RKISP1_CIF_ISP_AWB_YMAX_CMP_EN;
else
reg_val &= ~RKISP1_CIF_ISP_AWB_YMAX_CMP_EN;
reg_val &= ~RKISP1_CIF_ISP_AWB_SET_FRAMES_MASK_V12;
reg_val |= RKISP1_CIF_ISP_AWB_SET_FRAMES_V12(arg->frames);
rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_AWB_PROP_V12, reg_val);
/* window offset */
rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_AWB_OFFS_V12,
arg->awb_wnd.v_offs << 16 | arg->awb_wnd.h_offs);
/* AWB window size */
rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_AWB_SIZE_V12,
arg->awb_wnd.v_size << 16 | arg->awb_wnd.h_size);
}
static void
rkisp1_awb_meas_enable_v10(struct rkisp1_params *params,
const struct rkisp1_cif_isp_awb_meas_config *arg,
bool en)
{
u32 reg_val = rkisp1_read(params->rkisp1, RKISP1_CIF_ISP_AWB_PROP_V10);
/* switch off */
reg_val &= RKISP1_CIF_ISP_AWB_MODE_MASK_NONE;
if (en) {
if (arg->awb_mode == RKISP1_CIF_ISP_AWB_MODE_RGB)
reg_val |= RKISP1_CIF_ISP_AWB_MODE_RGB_EN;
else
reg_val |= RKISP1_CIF_ISP_AWB_MODE_YCBCR_EN;
rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_AWB_PROP_V10,
reg_val);
/* Measurements require AWB block be active. */
rkisp1_param_set_bits(params, RKISP1_CIF_ISP_CTRL,
RKISP1_CIF_ISP_CTRL_ISP_AWB_ENA);
} else {
rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_AWB_PROP_V10,
reg_val);
rkisp1_param_clear_bits(params, RKISP1_CIF_ISP_CTRL,
RKISP1_CIF_ISP_CTRL_ISP_AWB_ENA);
}
}
static void
rkisp1_awb_meas_enable_v12(struct rkisp1_params *params,
const struct rkisp1_cif_isp_awb_meas_config *arg,
bool en)
{
u32 reg_val = rkisp1_read(params->rkisp1, RKISP1_CIF_ISP_AWB_PROP_V12);
/* switch off */
reg_val &= RKISP1_CIF_ISP_AWB_MODE_MASK_NONE;
if (en) {
if (arg->awb_mode == RKISP1_CIF_ISP_AWB_MODE_RGB)
reg_val |= RKISP1_CIF_ISP_AWB_MODE_RGB_EN;
else
reg_val |= RKISP1_CIF_ISP_AWB_MODE_YCBCR_EN;
rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_AWB_PROP_V12,
reg_val);
/* Measurements require AWB block be active. */
rkisp1_param_set_bits(params, RKISP1_CIF_ISP_CTRL,
RKISP1_CIF_ISP_CTRL_ISP_AWB_ENA);
} else {
rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_AWB_PROP_V12,
reg_val);
rkisp1_param_clear_bits(params, RKISP1_CIF_ISP_CTRL,
RKISP1_CIF_ISP_CTRL_ISP_AWB_ENA);
}
}
static void
rkisp1_awb_gain_config_v10(struct rkisp1_params *params,
const struct rkisp1_cif_isp_awb_gain_config *arg)
{
rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_AWB_GAIN_G_V10,
RKISP1_CIF_ISP_AWB_GAIN_R_SET(arg->gain_green_r) |
arg->gain_green_b);
rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_AWB_GAIN_RB_V10,
RKISP1_CIF_ISP_AWB_GAIN_R_SET(arg->gain_red) |
arg->gain_blue);
}
static void
rkisp1_awb_gain_config_v12(struct rkisp1_params *params,
const struct rkisp1_cif_isp_awb_gain_config *arg)
{
rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_AWB_GAIN_G_V12,
RKISP1_CIF_ISP_AWB_GAIN_R_SET(arg->gain_green_r) |
arg->gain_green_b);
rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_AWB_GAIN_RB_V12,
RKISP1_CIF_ISP_AWB_GAIN_R_SET(arg->gain_red) |
arg->gain_blue);
}
static void rkisp1_aec_config_v10(struct rkisp1_params *params,
const struct rkisp1_cif_isp_aec_config *arg)
{
unsigned int block_hsize, block_vsize;
u32 exp_ctrl;
/* avoid to override the old enable value */
exp_ctrl = rkisp1_read(params->rkisp1, RKISP1_CIF_ISP_EXP_CTRL);
exp_ctrl &= RKISP1_CIF_ISP_EXP_ENA;
if (arg->autostop)
exp_ctrl |= RKISP1_CIF_ISP_EXP_CTRL_AUTOSTOP;
if (arg->mode == RKISP1_CIF_ISP_EXP_MEASURING_MODE_1)
exp_ctrl |= RKISP1_CIF_ISP_EXP_CTRL_MEASMODE_1;
rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_EXP_CTRL, exp_ctrl);
rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_EXP_H_OFFSET_V10,
arg->meas_window.h_offs);
rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_EXP_V_OFFSET_V10,
arg->meas_window.v_offs);
block_hsize = arg->meas_window.h_size /
RKISP1_CIF_ISP_EXP_COLUMN_NUM_V10 - 1;
block_vsize = arg->meas_window.v_size /
RKISP1_CIF_ISP_EXP_ROW_NUM_V10 - 1;
rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_EXP_H_SIZE_V10,
RKISP1_CIF_ISP_EXP_H_SIZE_SET_V10(block_hsize));
rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_EXP_V_SIZE_V10,
RKISP1_CIF_ISP_EXP_V_SIZE_SET_V10(block_vsize));
}
static void rkisp1_aec_config_v12(struct rkisp1_params *params,
const struct rkisp1_cif_isp_aec_config *arg)
{
u32 exp_ctrl;
u32 block_hsize, block_vsize;
u32 wnd_num_idx = 1;
static const u32 ae_wnd_num[] = { 5, 9, 15, 15 };
/* avoid to override the old enable value */
exp_ctrl = rkisp1_read(params->rkisp1, RKISP1_CIF_ISP_EXP_CTRL);
exp_ctrl &= RKISP1_CIF_ISP_EXP_ENA;
if (arg->autostop)
exp_ctrl |= RKISP1_CIF_ISP_EXP_CTRL_AUTOSTOP;
if (arg->mode == RKISP1_CIF_ISP_EXP_MEASURING_MODE_1)
exp_ctrl |= RKISP1_CIF_ISP_EXP_CTRL_MEASMODE_1;
exp_ctrl |= RKISP1_CIF_ISP_EXP_CTRL_WNDNUM_SET_V12(wnd_num_idx);
rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_EXP_CTRL, exp_ctrl);
rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_EXP_OFFS_V12,
RKISP1_CIF_ISP_EXP_V_OFFSET_SET_V12(arg->meas_window.v_offs) |
RKISP1_CIF_ISP_EXP_H_OFFSET_SET_V12(arg->meas_window.h_offs));
block_hsize = arg->meas_window.h_size / ae_wnd_num[wnd_num_idx] - 1;
block_vsize = arg->meas_window.v_size / ae_wnd_num[wnd_num_idx] - 1;
rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_EXP_SIZE_V12,
RKISP1_CIF_ISP_EXP_V_SIZE_SET_V12(block_vsize) |
RKISP1_CIF_ISP_EXP_H_SIZE_SET_V12(block_hsize));
}
static void rkisp1_cproc_config(struct rkisp1_params *params,
const struct rkisp1_cif_isp_cproc_config *arg)
{
struct rkisp1_cif_isp_isp_other_cfg *cur_other_cfg =
container_of(arg, struct rkisp1_cif_isp_isp_other_cfg, cproc_config);
struct rkisp1_cif_isp_ie_config *cur_ie_config =
&cur_other_cfg->ie_config;
u32 effect = cur_ie_config->effect;
u32 quantization = params->quantization;
rkisp1_write(params->rkisp1, RKISP1_CIF_C_PROC_CONTRAST,
arg->contrast);
rkisp1_write(params->rkisp1, RKISP1_CIF_C_PROC_HUE, arg->hue);
rkisp1_write(params->rkisp1, RKISP1_CIF_C_PROC_SATURATION, arg->sat);
rkisp1_write(params->rkisp1, RKISP1_CIF_C_PROC_BRIGHTNESS,
arg->brightness);
if (quantization != V4L2_QUANTIZATION_FULL_RANGE ||
effect != V4L2_COLORFX_NONE) {
rkisp1_param_clear_bits(params, RKISP1_CIF_C_PROC_CTRL,
RKISP1_CIF_C_PROC_YOUT_FULL |
RKISP1_CIF_C_PROC_YIN_FULL |
RKISP1_CIF_C_PROC_COUT_FULL);
} else {
rkisp1_param_set_bits(params, RKISP1_CIF_C_PROC_CTRL,
RKISP1_CIF_C_PROC_YOUT_FULL |
RKISP1_CIF_C_PROC_YIN_FULL |
RKISP1_CIF_C_PROC_COUT_FULL);
}
}
static void rkisp1_hst_config_v10(struct rkisp1_params *params,
const struct rkisp1_cif_isp_hst_config *arg)
{
unsigned int block_hsize, block_vsize;
static const u32 hist_weight_regs[] = {
RKISP1_CIF_ISP_HIST_WEIGHT_00TO30_V10,
RKISP1_CIF_ISP_HIST_WEIGHT_40TO21_V10,
RKISP1_CIF_ISP_HIST_WEIGHT_31TO12_V10,
RKISP1_CIF_ISP_HIST_WEIGHT_22TO03_V10,
RKISP1_CIF_ISP_HIST_WEIGHT_13TO43_V10,
RKISP1_CIF_ISP_HIST_WEIGHT_04TO34_V10,
};
const u8 *weight;
unsigned int i;
u32 hist_prop;
/* avoid to override the old enable value */
hist_prop = rkisp1_read(params->rkisp1, RKISP1_CIF_ISP_HIST_PROP_V10);
hist_prop &= RKISP1_CIF_ISP_HIST_PROP_MODE_MASK_V10;
hist_prop |= RKISP1_CIF_ISP_HIST_PREDIV_SET_V10(arg->histogram_predivider);
rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_HIST_PROP_V10, hist_prop);
rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_HIST_H_OFFS_V10,
arg->meas_window.h_offs);
rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_HIST_V_OFFS_V10,
arg->meas_window.v_offs);
block_hsize = arg->meas_window.h_size /
RKISP1_CIF_ISP_HIST_COLUMN_NUM_V10 - 1;
block_vsize = arg->meas_window.v_size / RKISP1_CIF_ISP_HIST_ROW_NUM_V10 - 1;
rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_HIST_H_SIZE_V10,
block_hsize);
rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_HIST_V_SIZE_V10,
block_vsize);
weight = arg->hist_weight;
for (i = 0; i < ARRAY_SIZE(hist_weight_regs); ++i, weight += 4)
rkisp1_write(params->rkisp1, hist_weight_regs[i],
RKISP1_CIF_ISP_HIST_WEIGHT_SET_V10(weight[0], weight[1],
weight[2], weight[3]));
rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_HIST_WEIGHT_44_V10,
weight[0] & 0x1F);
}
static void rkisp1_hst_config_v12(struct rkisp1_params *params,
const struct rkisp1_cif_isp_hst_config *arg)
{
unsigned int i, j;
u32 block_hsize, block_vsize;
u32 wnd_num_idx, hist_weight_num, hist_ctrl, value;
u8 weight15x15[RKISP1_CIF_ISP_HIST_WEIGHT_REG_SIZE_V12];
static const u32 hist_wnd_num[] = { 5, 9, 15, 15 };
/* now we just support 9x9 window */
wnd_num_idx = 1;
memset(weight15x15, 0x00, sizeof(weight15x15));
/* avoid to override the old enable value */
hist_ctrl = rkisp1_read(params->rkisp1, RKISP1_CIF_ISP_HIST_CTRL_V12);
hist_ctrl &= RKISP1_CIF_ISP_HIST_CTRL_MODE_MASK_V12 |
RKISP1_CIF_ISP_HIST_CTRL_EN_MASK_V12;
hist_ctrl = hist_ctrl |
RKISP1_CIF_ISP_HIST_CTRL_INTRSEL_SET_V12(1) |
RKISP1_CIF_ISP_HIST_CTRL_DATASEL_SET_V12(0) |
RKISP1_CIF_ISP_HIST_CTRL_WATERLINE_SET_V12(0) |
RKISP1_CIF_ISP_HIST_CTRL_AUTOSTOP_SET_V12(0) |
RKISP1_CIF_ISP_HIST_CTRL_WNDNUM_SET_V12(1) |
RKISP1_CIF_ISP_HIST_CTRL_STEPSIZE_SET_V12(arg->histogram_predivider);
rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_HIST_CTRL_V12, hist_ctrl);
rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_HIST_OFFS_V12,
RKISP1_CIF_ISP_HIST_OFFS_SET_V12(arg->meas_window.h_offs,
arg->meas_window.v_offs));
block_hsize = arg->meas_window.h_size / hist_wnd_num[wnd_num_idx] - 1;
block_vsize = arg->meas_window.v_size / hist_wnd_num[wnd_num_idx] - 1;
rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_HIST_SIZE_V12,
RKISP1_CIF_ISP_HIST_SIZE_SET_V12(block_hsize, block_vsize));
for (i = 0; i < hist_wnd_num[wnd_num_idx]; i++) {
for (j = 0; j < hist_wnd_num[wnd_num_idx]; j++) {
weight15x15[i * RKISP1_CIF_ISP_HIST_ROW_NUM_V12 + j] =
arg->hist_weight[i * hist_wnd_num[wnd_num_idx] + j];
}
}
hist_weight_num = RKISP1_CIF_ISP_HIST_WEIGHT_REG_SIZE_V12;
for (i = 0; i < (hist_weight_num / 4); i++) {
value = RKISP1_CIF_ISP_HIST_WEIGHT_SET_V12(
weight15x15[4 * i + 0],
weight15x15[4 * i + 1],
weight15x15[4 * i + 2],
weight15x15[4 * i + 3]);
rkisp1_write(params->rkisp1,
RKISP1_CIF_ISP_HIST_WEIGHT_V12 + 4 * i, value);
}
value = RKISP1_CIF_ISP_HIST_WEIGHT_SET_V12(weight15x15[4 * i + 0], 0, 0, 0);
rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_HIST_WEIGHT_V12 + 4 * i,
value);
}
static void
rkisp1_hst_enable_v10(struct rkisp1_params *params,
const struct rkisp1_cif_isp_hst_config *arg, bool en)
{
if (en) {
u32 hist_prop = rkisp1_read(params->rkisp1,
RKISP1_CIF_ISP_HIST_PROP_V10);
hist_prop &= ~RKISP1_CIF_ISP_HIST_PROP_MODE_MASK_V10;
hist_prop |= arg->mode;
rkisp1_param_set_bits(params, RKISP1_CIF_ISP_HIST_PROP_V10,
hist_prop);
} else {
rkisp1_param_clear_bits(params, RKISP1_CIF_ISP_HIST_PROP_V10,
RKISP1_CIF_ISP_HIST_PROP_MODE_MASK_V10);
}
}
static void
rkisp1_hst_enable_v12(struct rkisp1_params *params,
const struct rkisp1_cif_isp_hst_config *arg, bool en)
{
if (en) {
u32 hist_ctrl = rkisp1_read(params->rkisp1,
RKISP1_CIF_ISP_HIST_CTRL_V12);
hist_ctrl &= ~RKISP1_CIF_ISP_HIST_CTRL_MODE_MASK_V12;
hist_ctrl |= RKISP1_CIF_ISP_HIST_CTRL_MODE_SET_V12(arg->mode);
hist_ctrl |= RKISP1_CIF_ISP_HIST_CTRL_EN_SET_V12(1);
rkisp1_param_set_bits(params, RKISP1_CIF_ISP_HIST_CTRL_V12,
hist_ctrl);
} else {
rkisp1_param_clear_bits(params, RKISP1_CIF_ISP_HIST_CTRL_V12,
RKISP1_CIF_ISP_HIST_CTRL_MODE_MASK_V12 |
RKISP1_CIF_ISP_HIST_CTRL_EN_MASK_V12);
}
}
static void rkisp1_afm_config_v10(struct rkisp1_params *params,
const struct rkisp1_cif_isp_afc_config *arg)
{
size_t num_of_win = min_t(size_t, ARRAY_SIZE(arg->afm_win),
arg->num_afm_win);
u32 afm_ctrl = rkisp1_read(params->rkisp1, RKISP1_CIF_ISP_AFM_CTRL);
unsigned int i;
/* Switch off to configure. */
rkisp1_param_clear_bits(params, RKISP1_CIF_ISP_AFM_CTRL,
RKISP1_CIF_ISP_AFM_ENA);
for (i = 0; i < num_of_win; i++) {
rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_AFM_LT_A + i * 8,
RKISP1_CIF_ISP_AFM_WINDOW_X(arg->afm_win[i].h_offs) |
RKISP1_CIF_ISP_AFM_WINDOW_Y(arg->afm_win[i].v_offs));
rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_AFM_RB_A + i * 8,
RKISP1_CIF_ISP_AFM_WINDOW_X(arg->afm_win[i].h_size +
arg->afm_win[i].h_offs) |
RKISP1_CIF_ISP_AFM_WINDOW_Y(arg->afm_win[i].v_size +
arg->afm_win[i].v_offs));
}
rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_AFM_THRES, arg->thres);
rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_AFM_VAR_SHIFT,
arg->var_shift);
/* restore afm status */
rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_AFM_CTRL, afm_ctrl);
}
static void rkisp1_afm_config_v12(struct rkisp1_params *params,
const struct rkisp1_cif_isp_afc_config *arg)
{
size_t num_of_win = min_t(size_t, ARRAY_SIZE(arg->afm_win),
arg->num_afm_win);
u32 afm_ctrl = rkisp1_read(params->rkisp1, RKISP1_CIF_ISP_AFM_CTRL);
u32 lum_var_shift, afm_var_shift;
unsigned int i;
/* Switch off to configure. */
rkisp1_param_clear_bits(params, RKISP1_CIF_ISP_AFM_CTRL,
RKISP1_CIF_ISP_AFM_ENA);
for (i = 0; i < num_of_win; i++) {
rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_AFM_LT_A + i * 8,
RKISP1_CIF_ISP_AFM_WINDOW_X(arg->afm_win[i].h_offs) |
RKISP1_CIF_ISP_AFM_WINDOW_Y(arg->afm_win[i].v_offs));
rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_AFM_RB_A + i * 8,
RKISP1_CIF_ISP_AFM_WINDOW_X(arg->afm_win[i].h_size +
arg->afm_win[i].h_offs) |
RKISP1_CIF_ISP_AFM_WINDOW_Y(arg->afm_win[i].v_size +
arg->afm_win[i].v_offs));
}
rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_AFM_THRES, arg->thres);
lum_var_shift = RKISP1_CIF_ISP_AFM_GET_LUM_SHIFT_a_V12(arg->var_shift);
afm_var_shift = RKISP1_CIF_ISP_AFM_GET_AFM_SHIFT_a_V12(arg->var_shift);
rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_AFM_VAR_SHIFT,
RKISP1_CIF_ISP_AFM_SET_SHIFT_a_V12(lum_var_shift, afm_var_shift) |
RKISP1_CIF_ISP_AFM_SET_SHIFT_b_V12(lum_var_shift, afm_var_shift) |
RKISP1_CIF_ISP_AFM_SET_SHIFT_c_V12(lum_var_shift, afm_var_shift));
/* restore afm status */
rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_AFM_CTRL, afm_ctrl);
}
static void rkisp1_ie_config(struct rkisp1_params *params,
const struct rkisp1_cif_isp_ie_config *arg)
{
u32 eff_ctrl;
eff_ctrl = rkisp1_read(params->rkisp1, RKISP1_CIF_IMG_EFF_CTRL);
eff_ctrl &= ~RKISP1_CIF_IMG_EFF_CTRL_MODE_MASK;
if (params->quantization == V4L2_QUANTIZATION_FULL_RANGE)
eff_ctrl |= RKISP1_CIF_IMG_EFF_CTRL_YCBCR_FULL;
switch (arg->effect) {
case V4L2_COLORFX_SEPIA:
eff_ctrl |= RKISP1_CIF_IMG_EFF_CTRL_MODE_SEPIA;
break;
case V4L2_COLORFX_SET_CBCR:
rkisp1_write(params->rkisp1, RKISP1_CIF_IMG_EFF_TINT,
arg->eff_tint);
eff_ctrl |= RKISP1_CIF_IMG_EFF_CTRL_MODE_SEPIA;
break;
/*
* Color selection is similar to water color(AQUA):
* grayscale + selected color w threshold
*/
case V4L2_COLORFX_AQUA:
eff_ctrl |= RKISP1_CIF_IMG_EFF_CTRL_MODE_COLOR_SEL;
rkisp1_write(params->rkisp1, RKISP1_CIF_IMG_EFF_COLOR_SEL,
arg->color_sel);
break;
case V4L2_COLORFX_EMBOSS:
eff_ctrl |= RKISP1_CIF_IMG_EFF_CTRL_MODE_EMBOSS;
rkisp1_write(params->rkisp1, RKISP1_CIF_IMG_EFF_MAT_1,
arg->eff_mat_1);
rkisp1_write(params->rkisp1, RKISP1_CIF_IMG_EFF_MAT_2,
arg->eff_mat_2);
rkisp1_write(params->rkisp1, RKISP1_CIF_IMG_EFF_MAT_3,
arg->eff_mat_3);
break;
case V4L2_COLORFX_SKETCH:
eff_ctrl |= RKISP1_CIF_IMG_EFF_CTRL_MODE_SKETCH;
rkisp1_write(params->rkisp1, RKISP1_CIF_IMG_EFF_MAT_3,
arg->eff_mat_3);
rkisp1_write(params->rkisp1, RKISP1_CIF_IMG_EFF_MAT_4,
arg->eff_mat_4);
rkisp1_write(params->rkisp1, RKISP1_CIF_IMG_EFF_MAT_5,
arg->eff_mat_5);
break;
case V4L2_COLORFX_BW:
eff_ctrl |= RKISP1_CIF_IMG_EFF_CTRL_MODE_BLACKWHITE;
break;
case V4L2_COLORFX_NEGATIVE:
eff_ctrl |= RKISP1_CIF_IMG_EFF_CTRL_MODE_NEGATIVE;
break;
default:
break;
}
rkisp1_write(params->rkisp1, RKISP1_CIF_IMG_EFF_CTRL, eff_ctrl);
}
static void rkisp1_ie_enable(struct rkisp1_params *params, bool en)
{
if (en) {
rkisp1_param_set_bits(params, RKISP1_CIF_VI_ICCL,
RKISP1_CIF_VI_ICCL_IE_CLK);
rkisp1_write(params->rkisp1, RKISP1_CIF_IMG_EFF_CTRL,
RKISP1_CIF_IMG_EFF_CTRL_ENABLE);
rkisp1_param_set_bits(params, RKISP1_CIF_IMG_EFF_CTRL,
RKISP1_CIF_IMG_EFF_CTRL_CFG_UPD);
} else {
rkisp1_param_clear_bits(params, RKISP1_CIF_IMG_EFF_CTRL,
RKISP1_CIF_IMG_EFF_CTRL_ENABLE);
rkisp1_param_clear_bits(params, RKISP1_CIF_VI_ICCL,
RKISP1_CIF_VI_ICCL_IE_CLK);
}
}
static void rkisp1_csm_config(struct rkisp1_params *params)
{
struct csm_coeffs {
u16 limited[9];
u16 full[9];
};
static const struct csm_coeffs rec601_coeffs = {
.limited = {
0x0021, 0x0042, 0x000d,
0x01ed, 0x01db, 0x0038,
0x0038, 0x01d1, 0x01f7,
},
.full = {
0x0026, 0x004b, 0x000f,
0x01ea, 0x01d6, 0x0040,
0x0040, 0x01ca, 0x01f6,
},
};
static const struct csm_coeffs rec709_coeffs = {
.limited = {
0x0018, 0x0050, 0x0008,
0x01f3, 0x01d5, 0x0038,
0x0038, 0x01cd, 0x01fb,
},
.full = {
0x001b, 0x005c, 0x0009,
0x01f1, 0x01cf, 0x0040,
0x0040, 0x01c6, 0x01fa,
},
};
static const struct csm_coeffs rec2020_coeffs = {
.limited = {
0x001d, 0x004c, 0x0007,
0x01f0, 0x01d8, 0x0038,
0x0038, 0x01cd, 0x01fb,
},
.full = {
0x0022, 0x0057, 0x0008,
0x01ee, 0x01d2, 0x0040,
0x0040, 0x01c5, 0x01fb,
},
};
static const struct csm_coeffs smpte240m_coeffs = {
.limited = {
0x0018, 0x004f, 0x000a,
0x01f3, 0x01d5, 0x0038,
0x0038, 0x01ce, 0x01fa,
},
.full = {
0x001b, 0x005a, 0x000b,
0x01f1, 0x01cf, 0x0040,
0x0040, 0x01c7, 0x01f9,
},
};
const struct csm_coeffs *coeffs;
const u16 *csm;
unsigned int i;
switch (params->ycbcr_encoding) {
case V4L2_YCBCR_ENC_601:
default:
coeffs = &rec601_coeffs;
break;
case V4L2_YCBCR_ENC_709:
coeffs = &rec709_coeffs;
break;
case V4L2_YCBCR_ENC_BT2020:
coeffs = &rec2020_coeffs;
break;
case V4L2_YCBCR_ENC_SMPTE240M:
coeffs = &smpte240m_coeffs;
break;
}
if (params->quantization == V4L2_QUANTIZATION_FULL_RANGE) {
csm = coeffs->full;
rkisp1_param_set_bits(params, RKISP1_CIF_ISP_CTRL,
RKISP1_CIF_ISP_CTRL_ISP_CSM_Y_FULL_ENA |
RKISP1_CIF_ISP_CTRL_ISP_CSM_C_FULL_ENA);
} else {
csm = coeffs->limited;
rkisp1_param_clear_bits(params, RKISP1_CIF_ISP_CTRL,
RKISP1_CIF_ISP_CTRL_ISP_CSM_Y_FULL_ENA |
RKISP1_CIF_ISP_CTRL_ISP_CSM_C_FULL_ENA);
}
for (i = 0; i < 9; i++)
rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_CC_COEFF_0 + i * 4,
csm[i]);
}
/* ISP De-noise Pre-Filter(DPF) function */
static void rkisp1_dpf_config(struct rkisp1_params *params,
const struct rkisp1_cif_isp_dpf_config *arg)
{
unsigned int isp_dpf_mode, spatial_coeff, i;
switch (arg->gain.mode) {
case RKISP1_CIF_ISP_DPF_GAIN_USAGE_NF_GAINS:
isp_dpf_mode = RKISP1_CIF_ISP_DPF_MODE_USE_NF_GAIN |
RKISP1_CIF_ISP_DPF_MODE_AWB_GAIN_COMP;
break;
case RKISP1_CIF_ISP_DPF_GAIN_USAGE_LSC_GAINS:
isp_dpf_mode = RKISP1_CIF_ISP_DPF_MODE_LSC_GAIN_COMP;
break;
case RKISP1_CIF_ISP_DPF_GAIN_USAGE_NF_LSC_GAINS:
isp_dpf_mode = RKISP1_CIF_ISP_DPF_MODE_USE_NF_GAIN |
RKISP1_CIF_ISP_DPF_MODE_AWB_GAIN_COMP |
RKISP1_CIF_ISP_DPF_MODE_LSC_GAIN_COMP;
break;
case RKISP1_CIF_ISP_DPF_GAIN_USAGE_AWB_GAINS:
isp_dpf_mode = RKISP1_CIF_ISP_DPF_MODE_AWB_GAIN_COMP;
break;
case RKISP1_CIF_ISP_DPF_GAIN_USAGE_AWB_LSC_GAINS:
isp_dpf_mode = RKISP1_CIF_ISP_DPF_MODE_LSC_GAIN_COMP |
RKISP1_CIF_ISP_DPF_MODE_AWB_GAIN_COMP;
break;
case RKISP1_CIF_ISP_DPF_GAIN_USAGE_DISABLED:
default:
isp_dpf_mode = 0;
break;
}
if (arg->nll.scale_mode == RKISP1_CIF_ISP_NLL_SCALE_LOGARITHMIC)
isp_dpf_mode |= RKISP1_CIF_ISP_DPF_MODE_NLL_SEGMENTATION;
if (arg->rb_flt.fltsize == RKISP1_CIF_ISP_DPF_RB_FILTERSIZE_9x9)
isp_dpf_mode |= RKISP1_CIF_ISP_DPF_MODE_RB_FLTSIZE_9x9;
if (!arg->rb_flt.r_enable)
isp_dpf_mode |= RKISP1_CIF_ISP_DPF_MODE_R_FLT_DIS;
if (!arg->rb_flt.b_enable)
isp_dpf_mode |= RKISP1_CIF_ISP_DPF_MODE_B_FLT_DIS;
if (!arg->g_flt.gb_enable)
isp_dpf_mode |= RKISP1_CIF_ISP_DPF_MODE_GB_FLT_DIS;
if (!arg->g_flt.gr_enable)
isp_dpf_mode |= RKISP1_CIF_ISP_DPF_MODE_GR_FLT_DIS;
rkisp1_param_set_bits(params, RKISP1_CIF_ISP_DPF_MODE,
isp_dpf_mode);
rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_DPF_NF_GAIN_B,
arg->gain.nf_b_gain);
rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_DPF_NF_GAIN_R,
arg->gain.nf_r_gain);
rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_DPF_NF_GAIN_GB,
arg->gain.nf_gb_gain);
rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_DPF_NF_GAIN_GR,
arg->gain.nf_gr_gain);
for (i = 0; i < RKISP1_CIF_ISP_DPF_MAX_NLF_COEFFS; i++) {
rkisp1_write(params->rkisp1,
RKISP1_CIF_ISP_DPF_NULL_COEFF_0 + i * 4,
arg->nll.coeff[i]);
}
spatial_coeff = arg->g_flt.spatial_coeff[0] |
(arg->g_flt.spatial_coeff[1] << 8) |
(arg->g_flt.spatial_coeff[2] << 16) |
(arg->g_flt.spatial_coeff[3] << 24);
rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_DPF_S_WEIGHT_G_1_4,
spatial_coeff);
spatial_coeff = arg->g_flt.spatial_coeff[4] |
(arg->g_flt.spatial_coeff[5] << 8);
rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_DPF_S_WEIGHT_G_5_6,
spatial_coeff);
spatial_coeff = arg->rb_flt.spatial_coeff[0] |
(arg->rb_flt.spatial_coeff[1] << 8) |
(arg->rb_flt.spatial_coeff[2] << 16) |
(arg->rb_flt.spatial_coeff[3] << 24);
rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_DPF_S_WEIGHT_RB_1_4,
spatial_coeff);
spatial_coeff = arg->rb_flt.spatial_coeff[4] |
(arg->rb_flt.spatial_coeff[5] << 8);
rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_DPF_S_WEIGHT_RB_5_6,
spatial_coeff);
}
static void
rkisp1_dpf_strength_config(struct rkisp1_params *params,
const struct rkisp1_cif_isp_dpf_strength_config *arg)
{
rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_DPF_STRENGTH_B, arg->b);
rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_DPF_STRENGTH_G, arg->g);
rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_DPF_STRENGTH_R, arg->r);
}
static void
rkisp1_isp_isr_other_config(struct rkisp1_params *params,
const struct rkisp1_params_cfg *new_params)
{
unsigned int module_en_update, module_cfg_update, module_ens;
module_en_update = new_params->module_en_update;
module_cfg_update = new_params->module_cfg_update;
module_ens = new_params->module_ens;
/* update dpc config */
if (module_cfg_update & RKISP1_CIF_ISP_MODULE_DPCC)
rkisp1_dpcc_config(params,
&new_params->others.dpcc_config);
if (module_en_update & RKISP1_CIF_ISP_MODULE_DPCC) {
if (module_ens & RKISP1_CIF_ISP_MODULE_DPCC)
rkisp1_param_set_bits(params,
RKISP1_CIF_ISP_DPCC_MODE,
RKISP1_CIF_ISP_DPCC_MODE_DPCC_ENABLE);
else
rkisp1_param_clear_bits(params,
RKISP1_CIF_ISP_DPCC_MODE,
RKISP1_CIF_ISP_DPCC_MODE_DPCC_ENABLE);
}
/* update bls config */
if (module_cfg_update & RKISP1_CIF_ISP_MODULE_BLS)
rkisp1_bls_config(params,
&new_params->others.bls_config);
if (module_en_update & RKISP1_CIF_ISP_MODULE_BLS) {
if (module_ens & RKISP1_CIF_ISP_MODULE_BLS)
rkisp1_param_set_bits(params,
RKISP1_CIF_ISP_BLS_CTRL,
RKISP1_CIF_ISP_BLS_ENA);
else
rkisp1_param_clear_bits(params,
RKISP1_CIF_ISP_BLS_CTRL,
RKISP1_CIF_ISP_BLS_ENA);
}
/* update sdg config */
if (module_cfg_update & RKISP1_CIF_ISP_MODULE_SDG)
rkisp1_sdg_config(params,
&new_params->others.sdg_config);
if (module_en_update & RKISP1_CIF_ISP_MODULE_SDG) {
if (module_ens & RKISP1_CIF_ISP_MODULE_SDG)
rkisp1_param_set_bits(params,
RKISP1_CIF_ISP_CTRL,
RKISP1_CIF_ISP_CTRL_ISP_GAMMA_IN_ENA);
else
rkisp1_param_clear_bits(params,
RKISP1_CIF_ISP_CTRL,
RKISP1_CIF_ISP_CTRL_ISP_GAMMA_IN_ENA);
}
/* update awb gains */
if (module_cfg_update & RKISP1_CIF_ISP_MODULE_AWB_GAIN)
params->ops->awb_gain_config(params, &new_params->others.awb_gain_config);
if (module_en_update & RKISP1_CIF_ISP_MODULE_AWB_GAIN) {
if (module_ens & RKISP1_CIF_ISP_MODULE_AWB_GAIN)
rkisp1_param_set_bits(params,
RKISP1_CIF_ISP_CTRL,
RKISP1_CIF_ISP_CTRL_ISP_AWB_ENA);
else
rkisp1_param_clear_bits(params,
RKISP1_CIF_ISP_CTRL,
RKISP1_CIF_ISP_CTRL_ISP_AWB_ENA);
}
/* update bdm config */
if (module_cfg_update & RKISP1_CIF_ISP_MODULE_BDM)
rkisp1_bdm_config(params,
&new_params->others.bdm_config);
if (module_en_update & RKISP1_CIF_ISP_MODULE_BDM) {
if (module_ens & RKISP1_CIF_ISP_MODULE_BDM)
rkisp1_param_set_bits(params,
RKISP1_CIF_ISP_DEMOSAIC,
RKISP1_CIF_ISP_DEMOSAIC_BYPASS);
else
rkisp1_param_clear_bits(params,
RKISP1_CIF_ISP_DEMOSAIC,
RKISP1_CIF_ISP_DEMOSAIC_BYPASS);
}
/* update filter config */
if (module_cfg_update & RKISP1_CIF_ISP_MODULE_FLT)
rkisp1_flt_config(params,
&new_params->others.flt_config);
if (module_en_update & RKISP1_CIF_ISP_MODULE_FLT) {
if (module_ens & RKISP1_CIF_ISP_MODULE_FLT)
rkisp1_param_set_bits(params,
RKISP1_CIF_ISP_FILT_MODE,
RKISP1_CIF_ISP_FLT_ENA);
else
rkisp1_param_clear_bits(params,
RKISP1_CIF_ISP_FILT_MODE,
RKISP1_CIF_ISP_FLT_ENA);
}
/* update ctk config */
if (module_cfg_update & RKISP1_CIF_ISP_MODULE_CTK)
rkisp1_ctk_config(params,
&new_params->others.ctk_config);
if (module_en_update & RKISP1_CIF_ISP_MODULE_CTK)
rkisp1_ctk_enable(params, !!(module_ens & RKISP1_CIF_ISP_MODULE_CTK));
/* update goc config */
if (module_cfg_update & RKISP1_CIF_ISP_MODULE_GOC)
params->ops->goc_config(params, &new_params->others.goc_config);
if (module_en_update & RKISP1_CIF_ISP_MODULE_GOC) {
if (module_ens & RKISP1_CIF_ISP_MODULE_GOC)
rkisp1_param_set_bits(params,
RKISP1_CIF_ISP_CTRL,
RKISP1_CIF_ISP_CTRL_ISP_GAMMA_OUT_ENA);
else
rkisp1_param_clear_bits(params,
RKISP1_CIF_ISP_CTRL,
RKISP1_CIF_ISP_CTRL_ISP_GAMMA_OUT_ENA);
}
/* update cproc config */
if (module_cfg_update & RKISP1_CIF_ISP_MODULE_CPROC)
rkisp1_cproc_config(params,
&new_params->others.cproc_config);
if (module_en_update & RKISP1_CIF_ISP_MODULE_CPROC) {
if (module_ens & RKISP1_CIF_ISP_MODULE_CPROC)
rkisp1_param_set_bits(params,
RKISP1_CIF_C_PROC_CTRL,
RKISP1_CIF_C_PROC_CTR_ENABLE);
else
rkisp1_param_clear_bits(params,
RKISP1_CIF_C_PROC_CTRL,
RKISP1_CIF_C_PROC_CTR_ENABLE);
}
/* update ie config */
if (module_cfg_update & RKISP1_CIF_ISP_MODULE_IE)
rkisp1_ie_config(params, &new_params->others.ie_config);
if (module_en_update & RKISP1_CIF_ISP_MODULE_IE)
rkisp1_ie_enable(params, !!(module_ens & RKISP1_CIF_ISP_MODULE_IE));
/* update dpf config */
if (module_cfg_update & RKISP1_CIF_ISP_MODULE_DPF)
rkisp1_dpf_config(params, &new_params->others.dpf_config);
if (module_en_update & RKISP1_CIF_ISP_MODULE_DPF) {
if (module_ens & RKISP1_CIF_ISP_MODULE_DPF)
rkisp1_param_set_bits(params,
RKISP1_CIF_ISP_DPF_MODE,
RKISP1_CIF_ISP_DPF_MODE_EN);
else
rkisp1_param_clear_bits(params,
RKISP1_CIF_ISP_DPF_MODE,
RKISP1_CIF_ISP_DPF_MODE_EN);
}
if ((module_en_update & RKISP1_CIF_ISP_MODULE_DPF_STRENGTH) ||
(module_cfg_update & RKISP1_CIF_ISP_MODULE_DPF_STRENGTH)) {
/* update dpf strength config */
rkisp1_dpf_strength_config(params,
&new_params->others.dpf_strength_config);
}
}
static void
rkisp1_isp_isr_lsc_config(struct rkisp1_params *params,
const struct rkisp1_params_cfg *new_params)
{
unsigned int module_en_update, module_cfg_update, module_ens;
module_en_update = new_params->module_en_update;
module_cfg_update = new_params->module_cfg_update;
module_ens = new_params->module_ens;
/* update lsc config */
if (module_cfg_update & RKISP1_CIF_ISP_MODULE_LSC)
rkisp1_lsc_config(params,
&new_params->others.lsc_config);
if (module_en_update & RKISP1_CIF_ISP_MODULE_LSC) {
if (module_ens & RKISP1_CIF_ISP_MODULE_LSC)
rkisp1_param_set_bits(params,
RKISP1_CIF_ISP_LSC_CTRL,
RKISP1_CIF_ISP_LSC_CTRL_ENA);
else
rkisp1_param_clear_bits(params,
RKISP1_CIF_ISP_LSC_CTRL,
RKISP1_CIF_ISP_LSC_CTRL_ENA);
}
}
static void rkisp1_isp_isr_meas_config(struct rkisp1_params *params,
struct rkisp1_params_cfg *new_params)
{
unsigned int module_en_update, module_cfg_update, module_ens;
module_en_update = new_params->module_en_update;
module_cfg_update = new_params->module_cfg_update;
module_ens = new_params->module_ens;
/* update awb config */
if (module_cfg_update & RKISP1_CIF_ISP_MODULE_AWB)
params->ops->awb_meas_config(params, &new_params->meas.awb_meas_config);
if (module_en_update & RKISP1_CIF_ISP_MODULE_AWB)
params->ops->awb_meas_enable(params,
&new_params->meas.awb_meas_config,
!!(module_ens & RKISP1_CIF_ISP_MODULE_AWB));
/* update afc config */
if (module_cfg_update & RKISP1_CIF_ISP_MODULE_AFC)
params->ops->afm_config(params,
&new_params->meas.afc_config);
if (module_en_update & RKISP1_CIF_ISP_MODULE_AFC) {
if (module_ens & RKISP1_CIF_ISP_MODULE_AFC)
rkisp1_param_set_bits(params,
RKISP1_CIF_ISP_AFM_CTRL,
RKISP1_CIF_ISP_AFM_ENA);
else
rkisp1_param_clear_bits(params,
RKISP1_CIF_ISP_AFM_CTRL,
RKISP1_CIF_ISP_AFM_ENA);
}
/* update hst config */
if (module_cfg_update & RKISP1_CIF_ISP_MODULE_HST)
params->ops->hst_config(params,
&new_params->meas.hst_config);
if (module_en_update & RKISP1_CIF_ISP_MODULE_HST)
params->ops->hst_enable(params,
&new_params->meas.hst_config,
!!(module_ens & RKISP1_CIF_ISP_MODULE_HST));
/* update aec config */
if (module_cfg_update & RKISP1_CIF_ISP_MODULE_AEC)
params->ops->aec_config(params,
&new_params->meas.aec_config);
if (module_en_update & RKISP1_CIF_ISP_MODULE_AEC) {
if (module_ens & RKISP1_CIF_ISP_MODULE_AEC)
rkisp1_param_set_bits(params,
RKISP1_CIF_ISP_EXP_CTRL,
RKISP1_CIF_ISP_EXP_ENA);
else
rkisp1_param_clear_bits(params,
RKISP1_CIF_ISP_EXP_CTRL,
RKISP1_CIF_ISP_EXP_ENA);
}
}
static bool rkisp1_params_get_buffer(struct rkisp1_params *params,
struct rkisp1_buffer **buf,
struct rkisp1_params_cfg **cfg)
{
if (list_empty(¶ms->params))
return false;
*buf = list_first_entry(¶ms->params, struct rkisp1_buffer, queue);
*cfg = vb2_plane_vaddr(&(*buf)->vb.vb2_buf, 0);
return true;
}
static void rkisp1_params_complete_buffer(struct rkisp1_params *params,
struct rkisp1_buffer *buf,
unsigned int frame_sequence)
{
list_del(&buf->queue);
buf->vb.sequence = frame_sequence;
vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_DONE);
}
void rkisp1_params_isr(struct rkisp1_device *rkisp1)
{
struct rkisp1_params *params = &rkisp1->params;
struct rkisp1_params_cfg *new_params;
struct rkisp1_buffer *cur_buf;
spin_lock(¶ms->config_lock);
if (!rkisp1_params_get_buffer(params, &cur_buf, &new_params))
goto unlock;
rkisp1_isp_isr_other_config(params, new_params);
rkisp1_isp_isr_lsc_config(params, new_params);
rkisp1_isp_isr_meas_config(params, new_params);
/* update shadow register immediately */
rkisp1_param_set_bits(params, RKISP1_CIF_ISP_CTRL,
RKISP1_CIF_ISP_CTRL_ISP_CFG_UPD);
/*
* This isr is called when the ISR finishes processing a frame
* (RKISP1_CIF_ISP_FRAME). Configurations performed here will be
* applied on the next frame. Since frame_sequence is updated on the
* vertical sync signal, we should use frame_sequence + 1 here to
* indicate to userspace on which frame these parameters are being
* applied.
*/
rkisp1_params_complete_buffer(params, cur_buf,
rkisp1->isp.frame_sequence + 1);
unlock:
spin_unlock(¶ms->config_lock);
}
static const struct rkisp1_cif_isp_awb_meas_config rkisp1_awb_params_default_config = {
{
0, 0, RKISP1_DEFAULT_WIDTH, RKISP1_DEFAULT_HEIGHT
},
RKISP1_CIF_ISP_AWB_MODE_YCBCR, 200, 30, 20, 20, 0, 128, 128
};
static const struct rkisp1_cif_isp_aec_config rkisp1_aec_params_default_config = {
RKISP1_CIF_ISP_EXP_MEASURING_MODE_0,
RKISP1_CIF_ISP_EXP_CTRL_AUTOSTOP_0,
{
RKISP1_DEFAULT_WIDTH >> 2, RKISP1_DEFAULT_HEIGHT >> 2,
RKISP1_DEFAULT_WIDTH >> 1, RKISP1_DEFAULT_HEIGHT >> 1
}
};
static const struct rkisp1_cif_isp_hst_config rkisp1_hst_params_default_config = {
RKISP1_CIF_ISP_HISTOGRAM_MODE_RGB_COMBINED,
3,
{
RKISP1_DEFAULT_WIDTH >> 2, RKISP1_DEFAULT_HEIGHT >> 2,
RKISP1_DEFAULT_WIDTH >> 1, RKISP1_DEFAULT_HEIGHT >> 1
},
{
0, /* To be filled in with 0x01 at runtime. */
}
};
static const struct rkisp1_cif_isp_afc_config rkisp1_afc_params_default_config = {
1,
{
{
300, 225, 200, 150
}
},
4,
14
};
void rkisp1_params_pre_configure(struct rkisp1_params *params,
enum rkisp1_fmt_raw_pat_type bayer_pat,
enum v4l2_quantization quantization,
enum v4l2_ycbcr_encoding ycbcr_encoding)
{
struct rkisp1_cif_isp_hst_config hst = rkisp1_hst_params_default_config;
struct rkisp1_params_cfg *new_params;
struct rkisp1_buffer *cur_buf;
params->quantization = quantization;
params->ycbcr_encoding = ycbcr_encoding;
params->raw_type = bayer_pat;
params->ops->awb_meas_config(params, &rkisp1_awb_params_default_config);
params->ops->awb_meas_enable(params, &rkisp1_awb_params_default_config,
true);
params->ops->aec_config(params, &rkisp1_aec_params_default_config);
rkisp1_param_set_bits(params, RKISP1_CIF_ISP_EXP_CTRL,
RKISP1_CIF_ISP_EXP_ENA);
params->ops->afm_config(params, &rkisp1_afc_params_default_config);
rkisp1_param_set_bits(params, RKISP1_CIF_ISP_AFM_CTRL,
RKISP1_CIF_ISP_AFM_ENA);
memset(hst.hist_weight, 0x01, sizeof(hst.hist_weight));
params->ops->hst_config(params, &hst);
rkisp1_param_set_bits(params, RKISP1_CIF_ISP_HIST_PROP_V10,
rkisp1_hst_params_default_config.mode);
rkisp1_csm_config(params);
spin_lock_irq(¶ms->config_lock);
/* apply the first buffer if there is one already */
if (!rkisp1_params_get_buffer(params, &cur_buf, &new_params))
goto unlock;
rkisp1_isp_isr_other_config(params, new_params);
rkisp1_isp_isr_meas_config(params, new_params);
/* update shadow register immediately */
rkisp1_param_set_bits(params, RKISP1_CIF_ISP_CTRL,
RKISP1_CIF_ISP_CTRL_ISP_CFG_UPD);
unlock:
spin_unlock_irq(¶ms->config_lock);
}
void rkisp1_params_post_configure(struct rkisp1_params *params)
{
struct rkisp1_params_cfg *new_params;
struct rkisp1_buffer *cur_buf;
spin_lock_irq(¶ms->config_lock);
/*
* Apply LSC parameters from the first buffer (if any is already
* available. This must be done after the ISP gets started in the
* ISP8000Nano v18.02 (found in the i.MX8MP) as access to the LSC RAM
* is gated by the ISP_CTRL.ISP_ENABLE bit. As this initialization
* ordering doesn't affect other ISP versions negatively, do so
* unconditionally.
*/
if (!rkisp1_params_get_buffer(params, &cur_buf, &new_params))
goto unlock;
rkisp1_isp_isr_lsc_config(params, new_params);
/* update shadow register immediately */
rkisp1_param_set_bits(params, RKISP1_CIF_ISP_CTRL,
RKISP1_CIF_ISP_CTRL_ISP_CFG_UPD);
rkisp1_params_complete_buffer(params, cur_buf, 0);
unlock:
spin_unlock_irq(¶ms->config_lock);
}
/*
* Not called when the camera is active, therefore there is no need to acquire
* a lock.
*/
void rkisp1_params_disable(struct rkisp1_params *params)
{
rkisp1_param_clear_bits(params, RKISP1_CIF_ISP_DPCC_MODE,
RKISP1_CIF_ISP_DPCC_MODE_DPCC_ENABLE);
rkisp1_param_clear_bits(params, RKISP1_CIF_ISP_LSC_CTRL,
RKISP1_CIF_ISP_LSC_CTRL_ENA);
rkisp1_param_clear_bits(params, RKISP1_CIF_ISP_BLS_CTRL,
RKISP1_CIF_ISP_BLS_ENA);
rkisp1_param_clear_bits(params, RKISP1_CIF_ISP_CTRL,
RKISP1_CIF_ISP_CTRL_ISP_GAMMA_IN_ENA);
rkisp1_param_clear_bits(params, RKISP1_CIF_ISP_CTRL,
RKISP1_CIF_ISP_CTRL_ISP_GAMMA_OUT_ENA);
rkisp1_param_clear_bits(params, RKISP1_CIF_ISP_DEMOSAIC,
RKISP1_CIF_ISP_DEMOSAIC_BYPASS);
rkisp1_param_clear_bits(params, RKISP1_CIF_ISP_FILT_MODE,
RKISP1_CIF_ISP_FLT_ENA);
params->ops->awb_meas_enable(params, NULL, false);
rkisp1_param_clear_bits(params, RKISP1_CIF_ISP_CTRL,
RKISP1_CIF_ISP_CTRL_ISP_AWB_ENA);
rkisp1_param_clear_bits(params, RKISP1_CIF_ISP_EXP_CTRL,
RKISP1_CIF_ISP_EXP_ENA);
rkisp1_ctk_enable(params, false);
rkisp1_param_clear_bits(params, RKISP1_CIF_C_PROC_CTRL,
RKISP1_CIF_C_PROC_CTR_ENABLE);
params->ops->hst_enable(params, NULL, false);
rkisp1_param_clear_bits(params, RKISP1_CIF_ISP_AFM_CTRL,
RKISP1_CIF_ISP_AFM_ENA);
rkisp1_ie_enable(params, false);
rkisp1_param_clear_bits(params, RKISP1_CIF_ISP_DPF_MODE,
RKISP1_CIF_ISP_DPF_MODE_EN);
}
static const struct rkisp1_params_ops rkisp1_v10_params_ops = {
.lsc_matrix_config = rkisp1_lsc_matrix_config_v10,
.goc_config = rkisp1_goc_config_v10,
.awb_meas_config = rkisp1_awb_meas_config_v10,
.awb_meas_enable = rkisp1_awb_meas_enable_v10,
.awb_gain_config = rkisp1_awb_gain_config_v10,
.aec_config = rkisp1_aec_config_v10,
.hst_config = rkisp1_hst_config_v10,
.hst_enable = rkisp1_hst_enable_v10,
.afm_config = rkisp1_afm_config_v10,
};
static struct rkisp1_params_ops rkisp1_v12_params_ops = {
.lsc_matrix_config = rkisp1_lsc_matrix_config_v12,
.goc_config = rkisp1_goc_config_v12,
.awb_meas_config = rkisp1_awb_meas_config_v12,
.awb_meas_enable = rkisp1_awb_meas_enable_v12,
.awb_gain_config = rkisp1_awb_gain_config_v12,
.aec_config = rkisp1_aec_config_v12,
.hst_config = rkisp1_hst_config_v12,
.hst_enable = rkisp1_hst_enable_v12,
.afm_config = rkisp1_afm_config_v12,
};
static int rkisp1_params_enum_fmt_meta_out(struct file *file, void *priv,
struct v4l2_fmtdesc *f)
{
struct video_device *video = video_devdata(file);
struct rkisp1_params *params = video_get_drvdata(video);
if (f->index > 0 || f->type != video->queue->type)
return -EINVAL;
f->pixelformat = params->vdev_fmt.fmt.meta.dataformat;
return 0;
}
static int rkisp1_params_g_fmt_meta_out(struct file *file, void *fh,
struct v4l2_format *f)
{
struct video_device *video = video_devdata(file);
struct rkisp1_params *params = video_get_drvdata(video);
struct v4l2_meta_format *meta = &f->fmt.meta;
if (f->type != video->queue->type)
return -EINVAL;
memset(meta, 0, sizeof(*meta));
meta->dataformat = params->vdev_fmt.fmt.meta.dataformat;
meta->buffersize = params->vdev_fmt.fmt.meta.buffersize;
return 0;
}
static int rkisp1_params_querycap(struct file *file,
void *priv, struct v4l2_capability *cap)
{
struct video_device *vdev = video_devdata(file);
strscpy(cap->driver, RKISP1_DRIVER_NAME, sizeof(cap->driver));
strscpy(cap->card, vdev->name, sizeof(cap->card));
strscpy(cap->bus_info, RKISP1_BUS_INFO, sizeof(cap->bus_info));
return 0;
}
/* ISP params video device IOCTLs */
static const struct v4l2_ioctl_ops rkisp1_params_ioctl = {
.vidioc_reqbufs = vb2_ioctl_reqbufs,
.vidioc_querybuf = vb2_ioctl_querybuf,
.vidioc_create_bufs = vb2_ioctl_create_bufs,
.vidioc_qbuf = vb2_ioctl_qbuf,
.vidioc_dqbuf = vb2_ioctl_dqbuf,
.vidioc_prepare_buf = vb2_ioctl_prepare_buf,
.vidioc_expbuf = vb2_ioctl_expbuf,
.vidioc_streamon = vb2_ioctl_streamon,
.vidioc_streamoff = vb2_ioctl_streamoff,
.vidioc_enum_fmt_meta_out = rkisp1_params_enum_fmt_meta_out,
.vidioc_g_fmt_meta_out = rkisp1_params_g_fmt_meta_out,
.vidioc_s_fmt_meta_out = rkisp1_params_g_fmt_meta_out,
.vidioc_try_fmt_meta_out = rkisp1_params_g_fmt_meta_out,
.vidioc_querycap = rkisp1_params_querycap,
.vidioc_subscribe_event = v4l2_ctrl_subscribe_event,
.vidioc_unsubscribe_event = v4l2_event_unsubscribe,
};
static int rkisp1_params_vb2_queue_setup(struct vb2_queue *vq,
unsigned int *num_buffers,
unsigned int *num_planes,
unsigned int sizes[],
struct device *alloc_devs[])
{
*num_buffers = clamp_t(u32, *num_buffers,
RKISP1_ISP_PARAMS_REQ_BUFS_MIN,
RKISP1_ISP_PARAMS_REQ_BUFS_MAX);
*num_planes = 1;
sizes[0] = sizeof(struct rkisp1_params_cfg);
return 0;
}
static void rkisp1_params_vb2_buf_queue(struct vb2_buffer *vb)
{
struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
struct rkisp1_buffer *params_buf =
container_of(vbuf, struct rkisp1_buffer, vb);
struct vb2_queue *vq = vb->vb2_queue;
struct rkisp1_params *params = vq->drv_priv;
spin_lock_irq(¶ms->config_lock);
list_add_tail(¶ms_buf->queue, ¶ms->params);
spin_unlock_irq(¶ms->config_lock);
}
static int rkisp1_params_vb2_buf_prepare(struct vb2_buffer *vb)
{
if (vb2_plane_size(vb, 0) < sizeof(struct rkisp1_params_cfg))
return -EINVAL;
vb2_set_plane_payload(vb, 0, sizeof(struct rkisp1_params_cfg));
return 0;
}
static void rkisp1_params_vb2_stop_streaming(struct vb2_queue *vq)
{
struct rkisp1_params *params = vq->drv_priv;
struct rkisp1_buffer *buf;
LIST_HEAD(tmp_list);
/*
* we first move the buffers into a local list 'tmp_list'
* and then we can iterate it and call vb2_buffer_done
* without holding the lock
*/
spin_lock_irq(¶ms->config_lock);
list_splice_init(¶ms->params, &tmp_list);
spin_unlock_irq(¶ms->config_lock);
list_for_each_entry(buf, &tmp_list, queue)
vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
}
static const struct vb2_ops rkisp1_params_vb2_ops = {
.queue_setup = rkisp1_params_vb2_queue_setup,
.wait_prepare = vb2_ops_wait_prepare,
.wait_finish = vb2_ops_wait_finish,
.buf_queue = rkisp1_params_vb2_buf_queue,
.buf_prepare = rkisp1_params_vb2_buf_prepare,
.stop_streaming = rkisp1_params_vb2_stop_streaming,
};
static const struct v4l2_file_operations rkisp1_params_fops = {
.mmap = vb2_fop_mmap,
.unlocked_ioctl = video_ioctl2,
.poll = vb2_fop_poll,
.open = v4l2_fh_open,
.release = vb2_fop_release
};
static int rkisp1_params_init_vb2_queue(struct vb2_queue *q,
struct rkisp1_params *params)
{
struct rkisp1_vdev_node *node;
node = container_of(q, struct rkisp1_vdev_node, buf_queue);
q->type = V4L2_BUF_TYPE_META_OUTPUT;
q->io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF;
q->drv_priv = params;
q->ops = &rkisp1_params_vb2_ops;
q->mem_ops = &vb2_vmalloc_memops;
q->buf_struct_size = sizeof(struct rkisp1_buffer);
q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
q->lock = &node->vlock;
return vb2_queue_init(q);
}
static void rkisp1_init_params(struct rkisp1_params *params)
{
params->vdev_fmt.fmt.meta.dataformat =
V4L2_META_FMT_RK_ISP1_PARAMS;
params->vdev_fmt.fmt.meta.buffersize =
sizeof(struct rkisp1_params_cfg);
if (params->rkisp1->info->isp_ver == RKISP1_V12)
params->ops = &rkisp1_v12_params_ops;
else
params->ops = &rkisp1_v10_params_ops;
}
int rkisp1_params_register(struct rkisp1_device *rkisp1)
{
struct rkisp1_params *params = &rkisp1->params;
struct rkisp1_vdev_node *node = ¶ms->vnode;
struct video_device *vdev = &node->vdev;
int ret;
params->rkisp1 = rkisp1;
mutex_init(&node->vlock);
INIT_LIST_HEAD(¶ms->params);
spin_lock_init(¶ms->config_lock);
strscpy(vdev->name, RKISP1_PARAMS_DEV_NAME, sizeof(vdev->name));
video_set_drvdata(vdev, params);
vdev->ioctl_ops = &rkisp1_params_ioctl;
vdev->fops = &rkisp1_params_fops;
vdev->release = video_device_release_empty;
/*
* Provide a mutex to v4l2 core. It will be used
* to protect all fops and v4l2 ioctls.
*/
vdev->lock = &node->vlock;
vdev->v4l2_dev = &rkisp1->v4l2_dev;
vdev->queue = &node->buf_queue;
vdev->device_caps = V4L2_CAP_STREAMING | V4L2_CAP_META_OUTPUT;
vdev->vfl_dir = VFL_DIR_TX;
rkisp1_params_init_vb2_queue(vdev->queue, params);
rkisp1_init_params(params);
video_set_drvdata(vdev, params);
node->pad.flags = MEDIA_PAD_FL_SOURCE;
ret = media_entity_pads_init(&vdev->entity, 1, &node->pad);
if (ret)
goto error;
ret = video_register_device(vdev, VFL_TYPE_VIDEO, -1);
if (ret) {
dev_err(rkisp1->dev,
"failed to register %s, ret=%d\n", vdev->name, ret);
goto error;
}
return 0;
error:
media_entity_cleanup(&vdev->entity);
mutex_destroy(&node->vlock);
return ret;
}
void rkisp1_params_unregister(struct rkisp1_device *rkisp1)
{
struct rkisp1_params *params = &rkisp1->params;
struct rkisp1_vdev_node *node = ¶ms->vnode;
struct video_device *vdev = &node->vdev;
if (!video_is_registered(vdev))
return;
vb2_video_unregister_device(vdev);
media_entity_cleanup(&vdev->entity);
mutex_destroy(&node->vlock);
}
| linux-master | drivers/media/platform/rockchip/rkisp1/rkisp1-params.c |
// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
/*
* Rockchip ISP1 Driver - ISP Subdevice
*
* Copyright (C) 2019 Collabora, Ltd.
*
* Based on Rockchip ISP1 driver by Rockchip Electronics Co., Ltd.
* Copyright (C) 2017 Rockchip Electronics Co., Ltd.
*/
#include <linux/iopoll.h>
#include <linux/pm_runtime.h>
#include <linux/videodev2.h>
#include <linux/vmalloc.h>
#include <media/v4l2-event.h>
#include "rkisp1-common.h"
#define RKISP1_DEF_SINK_PAD_FMT MEDIA_BUS_FMT_SRGGB10_1X10
#define RKISP1_DEF_SRC_PAD_FMT MEDIA_BUS_FMT_YUYV8_2X8
#define RKISP1_ISP_DEV_NAME RKISP1_DRIVER_NAME "_isp"
/*
* NOTE: MIPI controller and input MUX are also configured in this file.
* This is because ISP Subdev describes not only ISP submodule (input size,
* format, output size, format), but also a virtual route device.
*/
/*
* There are many variables named with format/frame in below code,
* please see here for their meaning.
* Cropping in the sink pad defines the image region from the sensor.
* Cropping in the source pad defines the region for the Image Stabilizer (IS)
*
* Cropping regions of ISP
*
* +---------------------------------------------------------+
* | Sensor image |
* | +---------------------------------------------------+ |
* | | CIF_ISP_ACQ (for black level) | |
* | | sink pad format | |
* | | +--------------------------------------------+ | |
* | | | CIF_ISP_OUT | | |
* | | | sink pad crop | | |
* | | | +---------------------------------+ | | |
* | | | | CIF_ISP_IS | | | |
* | | | | source pad crop and format | | | |
* | | | +---------------------------------+ | | |
* | | +--------------------------------------------+ | |
* | +---------------------------------------------------+ |
* +---------------------------------------------------------+
*/
/* ----------------------------------------------------------------------------
* Helpers
*/
static struct v4l2_mbus_framefmt *
rkisp1_isp_get_pad_fmt(struct rkisp1_isp *isp,
struct v4l2_subdev_state *sd_state,
unsigned int pad, u32 which)
{
struct v4l2_subdev_state state = {
.pads = isp->pad_cfg
};
if (which == V4L2_SUBDEV_FORMAT_TRY)
return v4l2_subdev_get_try_format(&isp->sd, sd_state, pad);
else
return v4l2_subdev_get_try_format(&isp->sd, &state, pad);
}
static struct v4l2_rect *
rkisp1_isp_get_pad_crop(struct rkisp1_isp *isp,
struct v4l2_subdev_state *sd_state,
unsigned int pad, u32 which)
{
struct v4l2_subdev_state state = {
.pads = isp->pad_cfg
};
if (which == V4L2_SUBDEV_FORMAT_TRY)
return v4l2_subdev_get_try_crop(&isp->sd, sd_state, pad);
else
return v4l2_subdev_get_try_crop(&isp->sd, &state, pad);
}
/* ----------------------------------------------------------------------------
* Camera Interface registers configurations
*/
/*
* Image Stabilization.
* This should only be called when configuring CIF
* or at the frame end interrupt
*/
static void rkisp1_config_ism(struct rkisp1_isp *isp)
{
const struct v4l2_rect *src_crop =
rkisp1_isp_get_pad_crop(isp, NULL,
RKISP1_ISP_PAD_SOURCE_VIDEO,
V4L2_SUBDEV_FORMAT_ACTIVE);
struct rkisp1_device *rkisp1 = isp->rkisp1;
u32 val;
rkisp1_write(rkisp1, RKISP1_CIF_ISP_IS_RECENTER, 0);
rkisp1_write(rkisp1, RKISP1_CIF_ISP_IS_MAX_DX, 0);
rkisp1_write(rkisp1, RKISP1_CIF_ISP_IS_MAX_DY, 0);
rkisp1_write(rkisp1, RKISP1_CIF_ISP_IS_DISPLACE, 0);
rkisp1_write(rkisp1, RKISP1_CIF_ISP_IS_H_OFFS, src_crop->left);
rkisp1_write(rkisp1, RKISP1_CIF_ISP_IS_V_OFFS, src_crop->top);
rkisp1_write(rkisp1, RKISP1_CIF_ISP_IS_H_SIZE, src_crop->width);
rkisp1_write(rkisp1, RKISP1_CIF_ISP_IS_V_SIZE, src_crop->height);
/* IS(Image Stabilization) is always on, working as output crop */
rkisp1_write(rkisp1, RKISP1_CIF_ISP_IS_CTRL, 1);
val = rkisp1_read(rkisp1, RKISP1_CIF_ISP_CTRL);
val |= RKISP1_CIF_ISP_CTRL_ISP_CFG_UPD;
rkisp1_write(rkisp1, RKISP1_CIF_ISP_CTRL, val);
}
/*
* configure ISP blocks with input format, size......
*/
static int rkisp1_config_isp(struct rkisp1_isp *isp,
enum v4l2_mbus_type mbus_type, u32 mbus_flags)
{
struct rkisp1_device *rkisp1 = isp->rkisp1;
u32 isp_ctrl = 0, irq_mask = 0, acq_mult = 0, acq_prop = 0;
const struct rkisp1_mbus_info *sink_fmt = isp->sink_fmt;
const struct rkisp1_mbus_info *src_fmt = isp->src_fmt;
const struct v4l2_mbus_framefmt *sink_frm;
const struct v4l2_rect *sink_crop;
sink_frm = rkisp1_isp_get_pad_fmt(isp, NULL,
RKISP1_ISP_PAD_SINK_VIDEO,
V4L2_SUBDEV_FORMAT_ACTIVE);
sink_crop = rkisp1_isp_get_pad_crop(isp, NULL,
RKISP1_ISP_PAD_SINK_VIDEO,
V4L2_SUBDEV_FORMAT_ACTIVE);
if (sink_fmt->pixel_enc == V4L2_PIXEL_ENC_BAYER) {
acq_mult = 1;
if (src_fmt->pixel_enc == V4L2_PIXEL_ENC_BAYER) {
if (mbus_type == V4L2_MBUS_BT656)
isp_ctrl = RKISP1_CIF_ISP_CTRL_ISP_MODE_RAW_PICT_ITU656;
else
isp_ctrl = RKISP1_CIF_ISP_CTRL_ISP_MODE_RAW_PICT;
} else {
rkisp1_write(rkisp1, RKISP1_CIF_ISP_DEMOSAIC,
RKISP1_CIF_ISP_DEMOSAIC_TH(0xc));
if (mbus_type == V4L2_MBUS_BT656)
isp_ctrl = RKISP1_CIF_ISP_CTRL_ISP_MODE_BAYER_ITU656;
else
isp_ctrl = RKISP1_CIF_ISP_CTRL_ISP_MODE_BAYER_ITU601;
}
} else if (sink_fmt->pixel_enc == V4L2_PIXEL_ENC_YUV) {
acq_mult = 2;
if (mbus_type == V4L2_MBUS_CSI2_DPHY) {
isp_ctrl = RKISP1_CIF_ISP_CTRL_ISP_MODE_ITU601;
} else {
if (mbus_type == V4L2_MBUS_BT656)
isp_ctrl = RKISP1_CIF_ISP_CTRL_ISP_MODE_ITU656;
else
isp_ctrl = RKISP1_CIF_ISP_CTRL_ISP_MODE_ITU601;
}
irq_mask |= RKISP1_CIF_ISP_DATA_LOSS;
}
/* Set up input acquisition properties */
if (mbus_type == V4L2_MBUS_BT656 || mbus_type == V4L2_MBUS_PARALLEL) {
if (mbus_flags & V4L2_MBUS_PCLK_SAMPLE_RISING)
acq_prop |= RKISP1_CIF_ISP_ACQ_PROP_POS_EDGE;
switch (sink_fmt->bus_width) {
case 8:
acq_prop |= RKISP1_CIF_ISP_ACQ_PROP_IN_SEL_8B_ZERO;
break;
case 10:
acq_prop |= RKISP1_CIF_ISP_ACQ_PROP_IN_SEL_10B_ZERO;
break;
case 12:
acq_prop |= RKISP1_CIF_ISP_ACQ_PROP_IN_SEL_12B;
break;
default:
dev_err(rkisp1->dev, "Invalid bus width %u\n",
sink_fmt->bus_width);
return -EINVAL;
}
}
if (mbus_type == V4L2_MBUS_PARALLEL) {
if (mbus_flags & V4L2_MBUS_VSYNC_ACTIVE_LOW)
acq_prop |= RKISP1_CIF_ISP_ACQ_PROP_VSYNC_LOW;
if (mbus_flags & V4L2_MBUS_HSYNC_ACTIVE_LOW)
acq_prop |= RKISP1_CIF_ISP_ACQ_PROP_HSYNC_LOW;
}
rkisp1_write(rkisp1, RKISP1_CIF_ISP_CTRL, isp_ctrl);
rkisp1_write(rkisp1, RKISP1_CIF_ISP_ACQ_PROP,
acq_prop | sink_fmt->yuv_seq |
RKISP1_CIF_ISP_ACQ_PROP_BAYER_PAT(sink_fmt->bayer_pat) |
RKISP1_CIF_ISP_ACQ_PROP_FIELD_SEL_ALL);
rkisp1_write(rkisp1, RKISP1_CIF_ISP_ACQ_NR_FRAMES, 0);
/* Acquisition Size */
rkisp1_write(rkisp1, RKISP1_CIF_ISP_ACQ_H_OFFS, 0);
rkisp1_write(rkisp1, RKISP1_CIF_ISP_ACQ_V_OFFS, 0);
rkisp1_write(rkisp1, RKISP1_CIF_ISP_ACQ_H_SIZE,
acq_mult * sink_frm->width);
rkisp1_write(rkisp1, RKISP1_CIF_ISP_ACQ_V_SIZE, sink_frm->height);
/* ISP Out Area */
rkisp1_write(rkisp1, RKISP1_CIF_ISP_OUT_H_OFFS, sink_crop->left);
rkisp1_write(rkisp1, RKISP1_CIF_ISP_OUT_V_OFFS, sink_crop->top);
rkisp1_write(rkisp1, RKISP1_CIF_ISP_OUT_H_SIZE, sink_crop->width);
rkisp1_write(rkisp1, RKISP1_CIF_ISP_OUT_V_SIZE, sink_crop->height);
irq_mask |= RKISP1_CIF_ISP_FRAME | RKISP1_CIF_ISP_V_START |
RKISP1_CIF_ISP_PIC_SIZE_ERROR;
rkisp1_write(rkisp1, RKISP1_CIF_ISP_IMSC, irq_mask);
if (src_fmt->pixel_enc == V4L2_PIXEL_ENC_BAYER) {
rkisp1_params_disable(&rkisp1->params);
} else {
struct v4l2_mbus_framefmt *src_frm;
src_frm = rkisp1_isp_get_pad_fmt(isp, NULL,
RKISP1_ISP_PAD_SOURCE_VIDEO,
V4L2_SUBDEV_FORMAT_ACTIVE);
rkisp1_params_pre_configure(&rkisp1->params, sink_fmt->bayer_pat,
src_frm->quantization,
src_frm->ycbcr_enc);
}
return 0;
}
/* Configure MUX */
static void rkisp1_config_path(struct rkisp1_isp *isp,
enum v4l2_mbus_type mbus_type)
{
struct rkisp1_device *rkisp1 = isp->rkisp1;
u32 dpcl = rkisp1_read(rkisp1, RKISP1_CIF_VI_DPCL);
if (mbus_type == V4L2_MBUS_BT656 || mbus_type == V4L2_MBUS_PARALLEL)
dpcl |= RKISP1_CIF_VI_DPCL_IF_SEL_PARALLEL;
else if (mbus_type == V4L2_MBUS_CSI2_DPHY)
dpcl |= RKISP1_CIF_VI_DPCL_IF_SEL_MIPI;
rkisp1_write(rkisp1, RKISP1_CIF_VI_DPCL, dpcl);
}
/* Hardware configure Entry */
static int rkisp1_config_cif(struct rkisp1_isp *isp,
enum v4l2_mbus_type mbus_type, u32 mbus_flags)
{
int ret;
ret = rkisp1_config_isp(isp, mbus_type, mbus_flags);
if (ret)
return ret;
rkisp1_config_path(isp, mbus_type);
rkisp1_config_ism(isp);
return 0;
}
static void rkisp1_isp_stop(struct rkisp1_isp *isp)
{
struct rkisp1_device *rkisp1 = isp->rkisp1;
u32 val;
/*
* ISP(mi) stop in mi frame end -> Stop ISP(mipi) ->
* Stop ISP(isp) ->wait for ISP isp off
*/
/* stop and clear MI and ISP interrupts */
rkisp1_write(rkisp1, RKISP1_CIF_ISP_IMSC, 0);
rkisp1_write(rkisp1, RKISP1_CIF_ISP_ICR, ~0);
rkisp1_write(rkisp1, RKISP1_CIF_MI_IMSC, 0);
rkisp1_write(rkisp1, RKISP1_CIF_MI_ICR, ~0);
/* stop ISP */
val = rkisp1_read(rkisp1, RKISP1_CIF_ISP_CTRL);
val &= ~(RKISP1_CIF_ISP_CTRL_ISP_INFORM_ENABLE |
RKISP1_CIF_ISP_CTRL_ISP_ENABLE);
rkisp1_write(rkisp1, RKISP1_CIF_ISP_CTRL, val);
val = rkisp1_read(rkisp1, RKISP1_CIF_ISP_CTRL);
rkisp1_write(rkisp1, RKISP1_CIF_ISP_CTRL,
val | RKISP1_CIF_ISP_CTRL_ISP_CFG_UPD);
readx_poll_timeout(readl, rkisp1->base_addr + RKISP1_CIF_ISP_RIS,
val, val & RKISP1_CIF_ISP_OFF, 20, 100);
rkisp1_write(rkisp1, RKISP1_CIF_VI_IRCL,
RKISP1_CIF_VI_IRCL_MIPI_SW_RST |
RKISP1_CIF_VI_IRCL_ISP_SW_RST);
rkisp1_write(rkisp1, RKISP1_CIF_VI_IRCL, 0x0);
}
static void rkisp1_config_clk(struct rkisp1_isp *isp)
{
struct rkisp1_device *rkisp1 = isp->rkisp1;
u32 val = RKISP1_CIF_VI_ICCL_ISP_CLK | RKISP1_CIF_VI_ICCL_CP_CLK |
RKISP1_CIF_VI_ICCL_MRSZ_CLK | RKISP1_CIF_VI_ICCL_SRSZ_CLK |
RKISP1_CIF_VI_ICCL_JPEG_CLK | RKISP1_CIF_VI_ICCL_MI_CLK |
RKISP1_CIF_VI_ICCL_IE_CLK | RKISP1_CIF_VI_ICCL_MIPI_CLK |
RKISP1_CIF_VI_ICCL_DCROP_CLK;
rkisp1_write(rkisp1, RKISP1_CIF_VI_ICCL, val);
/* ensure sp and mp can run at the same time in V12 */
if (rkisp1->info->isp_ver == RKISP1_V12) {
val = RKISP1_CIF_CLK_CTRL_MI_Y12 | RKISP1_CIF_CLK_CTRL_MI_SP |
RKISP1_CIF_CLK_CTRL_MI_RAW0 | RKISP1_CIF_CLK_CTRL_MI_RAW1 |
RKISP1_CIF_CLK_CTRL_MI_READ | RKISP1_CIF_CLK_CTRL_MI_RAWRD |
RKISP1_CIF_CLK_CTRL_CP | RKISP1_CIF_CLK_CTRL_IE;
rkisp1_write(rkisp1, RKISP1_CIF_VI_ISP_CLK_CTRL_V12, val);
}
}
static void rkisp1_isp_start(struct rkisp1_isp *isp)
{
struct rkisp1_device *rkisp1 = isp->rkisp1;
u32 val;
rkisp1_config_clk(isp);
/* Activate ISP */
val = rkisp1_read(rkisp1, RKISP1_CIF_ISP_CTRL);
val |= RKISP1_CIF_ISP_CTRL_ISP_CFG_UPD |
RKISP1_CIF_ISP_CTRL_ISP_ENABLE |
RKISP1_CIF_ISP_CTRL_ISP_INFORM_ENABLE;
rkisp1_write(rkisp1, RKISP1_CIF_ISP_CTRL, val);
if (isp->src_fmt->pixel_enc != V4L2_PIXEL_ENC_BAYER)
rkisp1_params_post_configure(&rkisp1->params);
}
/* ----------------------------------------------------------------------------
* Subdev pad operations
*/
static inline struct rkisp1_isp *to_rkisp1_isp(struct v4l2_subdev *sd)
{
return container_of(sd, struct rkisp1_isp, sd);
}
static int rkisp1_isp_enum_mbus_code(struct v4l2_subdev *sd,
struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_mbus_code_enum *code)
{
unsigned int i, dir;
int pos = 0;
if (code->pad == RKISP1_ISP_PAD_SINK_VIDEO) {
dir = RKISP1_ISP_SD_SINK;
} else if (code->pad == RKISP1_ISP_PAD_SOURCE_VIDEO) {
dir = RKISP1_ISP_SD_SRC;
} else {
if (code->index > 0)
return -EINVAL;
code->code = MEDIA_BUS_FMT_METADATA_FIXED;
return 0;
}
for (i = 0; ; i++) {
const struct rkisp1_mbus_info *fmt =
rkisp1_mbus_info_get_by_index(i);
if (!fmt)
return -EINVAL;
if (fmt->direction & dir)
pos++;
if (code->index == pos - 1) {
code->code = fmt->mbus_code;
if (fmt->pixel_enc == V4L2_PIXEL_ENC_YUV &&
dir == RKISP1_ISP_SD_SRC)
code->flags =
V4L2_SUBDEV_MBUS_CODE_CSC_QUANTIZATION;
return 0;
}
}
return -EINVAL;
}
static int rkisp1_isp_enum_frame_size(struct v4l2_subdev *sd,
struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_frame_size_enum *fse)
{
const struct rkisp1_mbus_info *mbus_info;
if (fse->pad == RKISP1_ISP_PAD_SINK_PARAMS ||
fse->pad == RKISP1_ISP_PAD_SOURCE_STATS)
return -ENOTTY;
if (fse->index > 0)
return -EINVAL;
mbus_info = rkisp1_mbus_info_get_by_code(fse->code);
if (!mbus_info)
return -EINVAL;
if (!(mbus_info->direction & RKISP1_ISP_SD_SINK) &&
fse->pad == RKISP1_ISP_PAD_SINK_VIDEO)
return -EINVAL;
if (!(mbus_info->direction & RKISP1_ISP_SD_SRC) &&
fse->pad == RKISP1_ISP_PAD_SOURCE_VIDEO)
return -EINVAL;
fse->min_width = RKISP1_ISP_MIN_WIDTH;
fse->max_width = RKISP1_ISP_MAX_WIDTH;
fse->min_height = RKISP1_ISP_MIN_HEIGHT;
fse->max_height = RKISP1_ISP_MAX_HEIGHT;
return 0;
}
static int rkisp1_isp_init_config(struct v4l2_subdev *sd,
struct v4l2_subdev_state *sd_state)
{
struct v4l2_mbus_framefmt *sink_fmt, *src_fmt;
struct v4l2_rect *sink_crop, *src_crop;
/* Video. */
sink_fmt = v4l2_subdev_get_try_format(sd, sd_state,
RKISP1_ISP_PAD_SINK_VIDEO);
sink_fmt->width = RKISP1_DEFAULT_WIDTH;
sink_fmt->height = RKISP1_DEFAULT_HEIGHT;
sink_fmt->field = V4L2_FIELD_NONE;
sink_fmt->code = RKISP1_DEF_SINK_PAD_FMT;
sink_fmt->colorspace = V4L2_COLORSPACE_RAW;
sink_fmt->xfer_func = V4L2_XFER_FUNC_NONE;
sink_fmt->ycbcr_enc = V4L2_YCBCR_ENC_601;
sink_fmt->quantization = V4L2_QUANTIZATION_FULL_RANGE;
sink_crop = v4l2_subdev_get_try_crop(sd, sd_state,
RKISP1_ISP_PAD_SINK_VIDEO);
sink_crop->width = RKISP1_DEFAULT_WIDTH;
sink_crop->height = RKISP1_DEFAULT_HEIGHT;
sink_crop->left = 0;
sink_crop->top = 0;
src_fmt = v4l2_subdev_get_try_format(sd, sd_state,
RKISP1_ISP_PAD_SOURCE_VIDEO);
*src_fmt = *sink_fmt;
src_fmt->code = RKISP1_DEF_SRC_PAD_FMT;
src_fmt->colorspace = V4L2_COLORSPACE_SRGB;
src_fmt->xfer_func = V4L2_XFER_FUNC_SRGB;
src_fmt->ycbcr_enc = V4L2_YCBCR_ENC_601;
src_fmt->quantization = V4L2_QUANTIZATION_LIM_RANGE;
src_crop = v4l2_subdev_get_try_crop(sd, sd_state,
RKISP1_ISP_PAD_SOURCE_VIDEO);
*src_crop = *sink_crop;
/* Parameters and statistics. */
sink_fmt = v4l2_subdev_get_try_format(sd, sd_state,
RKISP1_ISP_PAD_SINK_PARAMS);
src_fmt = v4l2_subdev_get_try_format(sd, sd_state,
RKISP1_ISP_PAD_SOURCE_STATS);
sink_fmt->width = 0;
sink_fmt->height = 0;
sink_fmt->field = V4L2_FIELD_NONE;
sink_fmt->code = MEDIA_BUS_FMT_METADATA_FIXED;
*src_fmt = *sink_fmt;
return 0;
}
static void rkisp1_isp_set_src_fmt(struct rkisp1_isp *isp,
struct v4l2_subdev_state *sd_state,
struct v4l2_mbus_framefmt *format,
unsigned int which)
{
const struct rkisp1_mbus_info *sink_info;
const struct rkisp1_mbus_info *src_info;
struct v4l2_mbus_framefmt *sink_fmt;
struct v4l2_mbus_framefmt *src_fmt;
const struct v4l2_rect *src_crop;
bool set_csc;
sink_fmt = rkisp1_isp_get_pad_fmt(isp, sd_state,
RKISP1_ISP_PAD_SINK_VIDEO, which);
src_fmt = rkisp1_isp_get_pad_fmt(isp, sd_state,
RKISP1_ISP_PAD_SOURCE_VIDEO, which);
src_crop = rkisp1_isp_get_pad_crop(isp, sd_state,
RKISP1_ISP_PAD_SOURCE_VIDEO, which);
/*
* Media bus code. The ISP can operate in pass-through mode (Bayer in,
* Bayer out or YUV in, YUV out) or process Bayer data to YUV, but
* can't convert from YUV to Bayer.
*/
sink_info = rkisp1_mbus_info_get_by_code(sink_fmt->code);
src_fmt->code = format->code;
src_info = rkisp1_mbus_info_get_by_code(src_fmt->code);
if (!src_info || !(src_info->direction & RKISP1_ISP_SD_SRC)) {
src_fmt->code = RKISP1_DEF_SRC_PAD_FMT;
src_info = rkisp1_mbus_info_get_by_code(src_fmt->code);
}
if (sink_info->pixel_enc == V4L2_PIXEL_ENC_YUV &&
src_info->pixel_enc == V4L2_PIXEL_ENC_BAYER) {
src_fmt->code = sink_fmt->code;
src_info = sink_info;
}
/*
* The source width and height must be identical to the source crop
* size.
*/
src_fmt->width = src_crop->width;
src_fmt->height = src_crop->height;
/*
* Copy the color space for the sink pad. When converting from Bayer to
* YUV, default to a limited quantization range.
*/
src_fmt->colorspace = sink_fmt->colorspace;
src_fmt->xfer_func = sink_fmt->xfer_func;
src_fmt->ycbcr_enc = sink_fmt->ycbcr_enc;
if (sink_info->pixel_enc == V4L2_PIXEL_ENC_BAYER &&
src_info->pixel_enc == V4L2_PIXEL_ENC_YUV)
src_fmt->quantization = V4L2_QUANTIZATION_LIM_RANGE;
else
src_fmt->quantization = sink_fmt->quantization;
/*
* Allow setting the source color space fields when the SET_CSC flag is
* set and the source format is YUV. If the sink format is YUV, don't
* set the color primaries, transfer function or YCbCr encoding as the
* ISP is bypassed in that case and passes YUV data through without
* modifications.
*
* The color primaries and transfer function are configured through the
* cross-talk matrix and tone curve respectively. Settings for those
* hardware blocks are conveyed through the ISP parameters buffer, as
* they need to combine color space information with other image tuning
* characteristics and can't thus be computed by the kernel based on the
* color space. The source pad colorspace and xfer_func fields are thus
* ignored by the driver, but can be set by userspace to propagate
* accurate color space information down the pipeline.
*/
set_csc = format->flags & V4L2_MBUS_FRAMEFMT_SET_CSC;
if (set_csc && src_info->pixel_enc == V4L2_PIXEL_ENC_YUV) {
if (sink_info->pixel_enc == V4L2_PIXEL_ENC_BAYER) {
if (format->colorspace != V4L2_COLORSPACE_DEFAULT)
src_fmt->colorspace = format->colorspace;
if (format->xfer_func != V4L2_XFER_FUNC_DEFAULT)
src_fmt->xfer_func = format->xfer_func;
if (format->ycbcr_enc != V4L2_YCBCR_ENC_DEFAULT)
src_fmt->ycbcr_enc = format->ycbcr_enc;
}
if (format->quantization != V4L2_QUANTIZATION_DEFAULT)
src_fmt->quantization = format->quantization;
}
*format = *src_fmt;
/*
* Restore the SET_CSC flag if it was set to indicate support for the
* CSC setting API.
*/
if (set_csc)
format->flags |= V4L2_MBUS_FRAMEFMT_SET_CSC;
/* Store the source format info when setting the active format. */
if (which == V4L2_SUBDEV_FORMAT_ACTIVE)
isp->src_fmt = src_info;
}
static void rkisp1_isp_set_src_crop(struct rkisp1_isp *isp,
struct v4l2_subdev_state *sd_state,
struct v4l2_rect *r, unsigned int which)
{
struct v4l2_mbus_framefmt *src_fmt;
const struct v4l2_rect *sink_crop;
struct v4l2_rect *src_crop;
src_crop = rkisp1_isp_get_pad_crop(isp, sd_state,
RKISP1_ISP_PAD_SOURCE_VIDEO,
which);
sink_crop = rkisp1_isp_get_pad_crop(isp, sd_state,
RKISP1_ISP_PAD_SINK_VIDEO,
which);
src_crop->left = ALIGN(r->left, 2);
src_crop->width = ALIGN(r->width, 2);
src_crop->top = r->top;
src_crop->height = r->height;
rkisp1_sd_adjust_crop_rect(src_crop, sink_crop);
*r = *src_crop;
/* Propagate to out format */
src_fmt = rkisp1_isp_get_pad_fmt(isp, sd_state,
RKISP1_ISP_PAD_SOURCE_VIDEO, which);
rkisp1_isp_set_src_fmt(isp, sd_state, src_fmt, which);
}
static void rkisp1_isp_set_sink_crop(struct rkisp1_isp *isp,
struct v4l2_subdev_state *sd_state,
struct v4l2_rect *r, unsigned int which)
{
struct v4l2_rect *sink_crop, *src_crop;
const struct v4l2_mbus_framefmt *sink_fmt;
sink_crop = rkisp1_isp_get_pad_crop(isp, sd_state,
RKISP1_ISP_PAD_SINK_VIDEO,
which);
sink_fmt = rkisp1_isp_get_pad_fmt(isp, sd_state,
RKISP1_ISP_PAD_SINK_VIDEO,
which);
sink_crop->left = ALIGN(r->left, 2);
sink_crop->width = ALIGN(r->width, 2);
sink_crop->top = r->top;
sink_crop->height = r->height;
rkisp1_sd_adjust_crop(sink_crop, sink_fmt);
*r = *sink_crop;
/* Propagate to out crop */
src_crop = rkisp1_isp_get_pad_crop(isp, sd_state,
RKISP1_ISP_PAD_SOURCE_VIDEO, which);
rkisp1_isp_set_src_crop(isp, sd_state, src_crop, which);
}
static void rkisp1_isp_set_sink_fmt(struct rkisp1_isp *isp,
struct v4l2_subdev_state *sd_state,
struct v4l2_mbus_framefmt *format,
unsigned int which)
{
const struct rkisp1_mbus_info *mbus_info;
struct v4l2_mbus_framefmt *sink_fmt;
struct v4l2_rect *sink_crop;
bool is_yuv;
sink_fmt = rkisp1_isp_get_pad_fmt(isp, sd_state,
RKISP1_ISP_PAD_SINK_VIDEO,
which);
sink_fmt->code = format->code;
mbus_info = rkisp1_mbus_info_get_by_code(sink_fmt->code);
if (!mbus_info || !(mbus_info->direction & RKISP1_ISP_SD_SINK)) {
sink_fmt->code = RKISP1_DEF_SINK_PAD_FMT;
mbus_info = rkisp1_mbus_info_get_by_code(sink_fmt->code);
}
if (which == V4L2_SUBDEV_FORMAT_ACTIVE)
isp->sink_fmt = mbus_info;
sink_fmt->width = clamp_t(u32, format->width,
RKISP1_ISP_MIN_WIDTH,
RKISP1_ISP_MAX_WIDTH);
sink_fmt->height = clamp_t(u32, format->height,
RKISP1_ISP_MIN_HEIGHT,
RKISP1_ISP_MAX_HEIGHT);
/*
* Adjust the color space fields. Accept any color primaries and
* transfer function for both YUV and Bayer. For YUV any YCbCr encoding
* and quantization range is also accepted. For Bayer formats, the YCbCr
* encoding isn't applicable, and the quantization range can only be
* full.
*/
is_yuv = mbus_info->pixel_enc == V4L2_PIXEL_ENC_YUV;
sink_fmt->colorspace = format->colorspace ? :
(is_yuv ? V4L2_COLORSPACE_SRGB :
V4L2_COLORSPACE_RAW);
sink_fmt->xfer_func = format->xfer_func ? :
V4L2_MAP_XFER_FUNC_DEFAULT(sink_fmt->colorspace);
if (is_yuv) {
sink_fmt->ycbcr_enc = format->ycbcr_enc ? :
V4L2_MAP_YCBCR_ENC_DEFAULT(sink_fmt->colorspace);
sink_fmt->quantization = format->quantization ? :
V4L2_MAP_QUANTIZATION_DEFAULT(false, sink_fmt->colorspace,
sink_fmt->ycbcr_enc);
} else {
/*
* The YCbCr encoding isn't applicable for non-YUV formats, but
* V4L2 has no "no encoding" value. Hardcode it to Rec. 601, it
* should be ignored by userspace.
*/
sink_fmt->ycbcr_enc = V4L2_YCBCR_ENC_601;
sink_fmt->quantization = V4L2_QUANTIZATION_FULL_RANGE;
}
*format = *sink_fmt;
/* Propagate to in crop */
sink_crop = rkisp1_isp_get_pad_crop(isp, sd_state,
RKISP1_ISP_PAD_SINK_VIDEO,
which);
rkisp1_isp_set_sink_crop(isp, sd_state, sink_crop, which);
}
static int rkisp1_isp_get_fmt(struct v4l2_subdev *sd,
struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
struct rkisp1_isp *isp = to_rkisp1_isp(sd);
mutex_lock(&isp->ops_lock);
fmt->format = *rkisp1_isp_get_pad_fmt(isp, sd_state, fmt->pad,
fmt->which);
mutex_unlock(&isp->ops_lock);
return 0;
}
static int rkisp1_isp_set_fmt(struct v4l2_subdev *sd,
struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
struct rkisp1_isp *isp = to_rkisp1_isp(sd);
mutex_lock(&isp->ops_lock);
if (fmt->pad == RKISP1_ISP_PAD_SINK_VIDEO)
rkisp1_isp_set_sink_fmt(isp, sd_state, &fmt->format,
fmt->which);
else if (fmt->pad == RKISP1_ISP_PAD_SOURCE_VIDEO)
rkisp1_isp_set_src_fmt(isp, sd_state, &fmt->format,
fmt->which);
else
fmt->format = *rkisp1_isp_get_pad_fmt(isp, sd_state, fmt->pad,
fmt->which);
mutex_unlock(&isp->ops_lock);
return 0;
}
static int rkisp1_isp_get_selection(struct v4l2_subdev *sd,
struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_selection *sel)
{
struct rkisp1_isp *isp = to_rkisp1_isp(sd);
int ret = 0;
if (sel->pad != RKISP1_ISP_PAD_SOURCE_VIDEO &&
sel->pad != RKISP1_ISP_PAD_SINK_VIDEO)
return -EINVAL;
mutex_lock(&isp->ops_lock);
switch (sel->target) {
case V4L2_SEL_TGT_CROP_BOUNDS:
if (sel->pad == RKISP1_ISP_PAD_SINK_VIDEO) {
struct v4l2_mbus_framefmt *fmt;
fmt = rkisp1_isp_get_pad_fmt(isp, sd_state, sel->pad,
sel->which);
sel->r.height = fmt->height;
sel->r.width = fmt->width;
sel->r.left = 0;
sel->r.top = 0;
} else {
sel->r = *rkisp1_isp_get_pad_crop(isp, sd_state,
RKISP1_ISP_PAD_SINK_VIDEO,
sel->which);
}
break;
case V4L2_SEL_TGT_CROP:
sel->r = *rkisp1_isp_get_pad_crop(isp, sd_state, sel->pad,
sel->which);
break;
default:
ret = -EINVAL;
}
mutex_unlock(&isp->ops_lock);
return ret;
}
static int rkisp1_isp_set_selection(struct v4l2_subdev *sd,
struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_selection *sel)
{
struct rkisp1_isp *isp = to_rkisp1_isp(sd);
int ret = 0;
if (sel->target != V4L2_SEL_TGT_CROP)
return -EINVAL;
dev_dbg(isp->rkisp1->dev, "%s: pad: %d sel(%d,%d)/%dx%d\n", __func__,
sel->pad, sel->r.left, sel->r.top, sel->r.width, sel->r.height);
mutex_lock(&isp->ops_lock);
if (sel->pad == RKISP1_ISP_PAD_SINK_VIDEO)
rkisp1_isp_set_sink_crop(isp, sd_state, &sel->r, sel->which);
else if (sel->pad == RKISP1_ISP_PAD_SOURCE_VIDEO)
rkisp1_isp_set_src_crop(isp, sd_state, &sel->r, sel->which);
else
ret = -EINVAL;
mutex_unlock(&isp->ops_lock);
return ret;
}
static int rkisp1_subdev_link_validate(struct media_link *link)
{
if (link->sink->index == RKISP1_ISP_PAD_SINK_PARAMS)
return 0;
return v4l2_subdev_link_validate(link);
}
static const struct v4l2_subdev_pad_ops rkisp1_isp_pad_ops = {
.enum_mbus_code = rkisp1_isp_enum_mbus_code,
.enum_frame_size = rkisp1_isp_enum_frame_size,
.get_selection = rkisp1_isp_get_selection,
.set_selection = rkisp1_isp_set_selection,
.init_cfg = rkisp1_isp_init_config,
.get_fmt = rkisp1_isp_get_fmt,
.set_fmt = rkisp1_isp_set_fmt,
.link_validate = v4l2_subdev_link_validate_default,
};
/* ----------------------------------------------------------------------------
* Stream operations
*/
static int rkisp1_isp_s_stream(struct v4l2_subdev *sd, int enable)
{
struct rkisp1_isp *isp = to_rkisp1_isp(sd);
struct rkisp1_device *rkisp1 = isp->rkisp1;
struct media_pad *source_pad;
struct media_pad *sink_pad;
enum v4l2_mbus_type mbus_type;
u32 mbus_flags;
int ret;
if (!enable) {
v4l2_subdev_call(rkisp1->source, video, s_stream, false);
rkisp1_isp_stop(isp);
return 0;
}
sink_pad = &isp->pads[RKISP1_ISP_PAD_SINK_VIDEO];
source_pad = media_pad_remote_pad_unique(sink_pad);
if (IS_ERR(source_pad)) {
dev_dbg(rkisp1->dev, "Failed to get source for ISP: %ld\n",
PTR_ERR(source_pad));
return -EPIPE;
}
rkisp1->source = media_entity_to_v4l2_subdev(source_pad->entity);
if (!rkisp1->source) {
/* This should really not happen, so is not worth a message. */
return -EPIPE;
}
if (rkisp1->source == &rkisp1->csi.sd) {
mbus_type = V4L2_MBUS_CSI2_DPHY;
mbus_flags = 0;
} else {
const struct rkisp1_sensor_async *asd;
struct v4l2_async_connection *asc;
asc = v4l2_async_connection_unique(rkisp1->source);
if (!asc)
return -EPIPE;
asd = container_of(asc, struct rkisp1_sensor_async, asd);
mbus_type = asd->mbus_type;
mbus_flags = asd->mbus_flags;
}
isp->frame_sequence = -1;
mutex_lock(&isp->ops_lock);
ret = rkisp1_config_cif(isp, mbus_type, mbus_flags);
if (ret)
goto mutex_unlock;
rkisp1_isp_start(isp);
ret = v4l2_subdev_call(rkisp1->source, video, s_stream, true);
if (ret) {
rkisp1_isp_stop(isp);
goto mutex_unlock;
}
mutex_unlock:
mutex_unlock(&isp->ops_lock);
return ret;
}
static int rkisp1_isp_subs_evt(struct v4l2_subdev *sd, struct v4l2_fh *fh,
struct v4l2_event_subscription *sub)
{
if (sub->type != V4L2_EVENT_FRAME_SYNC)
return -EINVAL;
/* V4L2_EVENT_FRAME_SYNC doesn't require an id, so zero should be set */
if (sub->id != 0)
return -EINVAL;
return v4l2_event_subscribe(fh, sub, 0, NULL);
}
static const struct media_entity_operations rkisp1_isp_media_ops = {
.link_validate = rkisp1_subdev_link_validate,
};
static const struct v4l2_subdev_video_ops rkisp1_isp_video_ops = {
.s_stream = rkisp1_isp_s_stream,
};
static const struct v4l2_subdev_core_ops rkisp1_isp_core_ops = {
.subscribe_event = rkisp1_isp_subs_evt,
.unsubscribe_event = v4l2_event_subdev_unsubscribe,
};
static const struct v4l2_subdev_ops rkisp1_isp_ops = {
.core = &rkisp1_isp_core_ops,
.video = &rkisp1_isp_video_ops,
.pad = &rkisp1_isp_pad_ops,
};
int rkisp1_isp_register(struct rkisp1_device *rkisp1)
{
struct v4l2_subdev_state state = {
.pads = rkisp1->isp.pad_cfg
};
struct rkisp1_isp *isp = &rkisp1->isp;
struct media_pad *pads = isp->pads;
struct v4l2_subdev *sd = &isp->sd;
int ret;
isp->rkisp1 = rkisp1;
v4l2_subdev_init(sd, &rkisp1_isp_ops);
sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE | V4L2_SUBDEV_FL_HAS_EVENTS;
sd->entity.ops = &rkisp1_isp_media_ops;
sd->entity.function = MEDIA_ENT_F_PROC_VIDEO_PIXEL_FORMATTER;
sd->owner = THIS_MODULE;
strscpy(sd->name, RKISP1_ISP_DEV_NAME, sizeof(sd->name));
pads[RKISP1_ISP_PAD_SINK_VIDEO].flags = MEDIA_PAD_FL_SINK |
MEDIA_PAD_FL_MUST_CONNECT;
pads[RKISP1_ISP_PAD_SINK_PARAMS].flags = MEDIA_PAD_FL_SINK;
pads[RKISP1_ISP_PAD_SOURCE_VIDEO].flags = MEDIA_PAD_FL_SOURCE;
pads[RKISP1_ISP_PAD_SOURCE_STATS].flags = MEDIA_PAD_FL_SOURCE;
isp->sink_fmt = rkisp1_mbus_info_get_by_code(RKISP1_DEF_SINK_PAD_FMT);
isp->src_fmt = rkisp1_mbus_info_get_by_code(RKISP1_DEF_SRC_PAD_FMT);
mutex_init(&isp->ops_lock);
ret = media_entity_pads_init(&sd->entity, RKISP1_ISP_PAD_MAX, pads);
if (ret)
goto error;
ret = v4l2_device_register_subdev(&rkisp1->v4l2_dev, sd);
if (ret) {
dev_err(rkisp1->dev, "Failed to register isp subdev\n");
goto error;
}
rkisp1_isp_init_config(sd, &state);
return 0;
error:
media_entity_cleanup(&sd->entity);
mutex_destroy(&isp->ops_lock);
isp->sd.v4l2_dev = NULL;
return ret;
}
void rkisp1_isp_unregister(struct rkisp1_device *rkisp1)
{
struct rkisp1_isp *isp = &rkisp1->isp;
if (!isp->sd.v4l2_dev)
return;
v4l2_device_unregister_subdev(&isp->sd);
media_entity_cleanup(&isp->sd.entity);
mutex_destroy(&isp->ops_lock);
}
/* ----------------------------------------------------------------------------
* Interrupt handlers
*/
static void rkisp1_isp_queue_event_sof(struct rkisp1_isp *isp)
{
struct v4l2_event event = {
.type = V4L2_EVENT_FRAME_SYNC,
};
event.u.frame_sync.frame_sequence = isp->frame_sequence;
v4l2_event_queue(isp->sd.devnode, &event);
}
irqreturn_t rkisp1_isp_isr(int irq, void *ctx)
{
struct device *dev = ctx;
struct rkisp1_device *rkisp1 = dev_get_drvdata(dev);
u32 status, isp_err;
status = rkisp1_read(rkisp1, RKISP1_CIF_ISP_MIS);
if (!status)
return IRQ_NONE;
rkisp1_write(rkisp1, RKISP1_CIF_ISP_ICR, status);
/* Vertical sync signal, starting generating new frame */
if (status & RKISP1_CIF_ISP_V_START) {
rkisp1->isp.frame_sequence++;
rkisp1_isp_queue_event_sof(&rkisp1->isp);
if (status & RKISP1_CIF_ISP_FRAME) {
WARN_ONCE(1, "irq delay is too long, buffers might not be in sync\n");
rkisp1->debug.irq_delay++;
}
}
if (status & RKISP1_CIF_ISP_PIC_SIZE_ERROR) {
/* Clear pic_size_error */
isp_err = rkisp1_read(rkisp1, RKISP1_CIF_ISP_ERR);
if (isp_err & RKISP1_CIF_ISP_ERR_INFORM_SIZE)
rkisp1->debug.inform_size_error++;
if (isp_err & RKISP1_CIF_ISP_ERR_IS_SIZE)
rkisp1->debug.img_stabilization_size_error++;
if (isp_err & RKISP1_CIF_ISP_ERR_OUTFORM_SIZE)
rkisp1->debug.outform_size_error++;
rkisp1_write(rkisp1, RKISP1_CIF_ISP_ERR_CLR, isp_err);
} else if (status & RKISP1_CIF_ISP_DATA_LOSS) {
/* keep track of data_loss in debugfs */
rkisp1->debug.data_loss++;
}
if (status & RKISP1_CIF_ISP_FRAME) {
u32 isp_ris;
/* New frame from the sensor received */
isp_ris = rkisp1_read(rkisp1, RKISP1_CIF_ISP_RIS);
if (isp_ris & RKISP1_STATS_MEAS_MASK)
rkisp1_stats_isr(&rkisp1->stats, isp_ris);
/*
* Then update changed configs. Some of them involve
* lot of register writes. Do those only one per frame.
* Do the updates in the order of the processing flow.
*/
rkisp1_params_isr(rkisp1);
}
return IRQ_HANDLED;
}
| linux-master | drivers/media/platform/rockchip/rkisp1/rkisp1-isp.c |
// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
/*
* Rockchip ISP1 Driver - Base driver
*
* Copyright (C) 2019 Collabora, Ltd.
*
* Based on Rockchip ISP1 driver by Rockchip Electronics Co., Ltd.
* Copyright (C) 2017 Rockchip Electronics Co., Ltd.
*/
#include <linux/debugfs.h>
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/minmax.h>
#include <linux/pm_runtime.h>
#include <linux/seq_file.h>
#include <linux/string.h>
#include "rkisp1-common.h"
#include "rkisp1-regs.h"
struct rkisp1_debug_register {
u32 reg;
u32 shd;
const char * const name;
};
#define RKISP1_DEBUG_REG(name) { RKISP1_CIF_##name, 0, #name }
#define RKISP1_DEBUG_SHD_REG(name) { \
RKISP1_CIF_##name, RKISP1_CIF_##name##_SHD, #name \
}
/* Keep this up-to-date when adding new registers. */
#define RKISP1_MAX_REG_LENGTH 21
static int rkisp1_debug_dump_regs(struct rkisp1_device *rkisp1,
struct seq_file *m, unsigned int offset,
const struct rkisp1_debug_register *regs)
{
const int width = RKISP1_MAX_REG_LENGTH;
u32 val, shd;
int ret;
ret = pm_runtime_get_if_in_use(rkisp1->dev);
if (ret <= 0)
return ret ? : -ENODATA;
for (; regs->name; ++regs) {
val = rkisp1_read(rkisp1, offset + regs->reg);
if (regs->shd) {
shd = rkisp1_read(rkisp1, offset + regs->shd);
seq_printf(m, "%*s: 0x%08x/0x%08x\n", width, regs->name,
val, shd);
} else {
seq_printf(m, "%*s: 0x%08x\n", width, regs->name, val);
}
}
pm_runtime_put(rkisp1->dev);
return 0;
}
static int rkisp1_debug_dump_core_regs_show(struct seq_file *m, void *p)
{
static const struct rkisp1_debug_register registers[] = {
RKISP1_DEBUG_REG(VI_CCL),
RKISP1_DEBUG_REG(VI_ICCL),
RKISP1_DEBUG_REG(VI_IRCL),
RKISP1_DEBUG_REG(VI_DPCL),
RKISP1_DEBUG_REG(MI_CTRL),
RKISP1_DEBUG_REG(MI_BYTE_CNT),
RKISP1_DEBUG_REG(MI_CTRL_SHD),
RKISP1_DEBUG_REG(MI_RIS),
RKISP1_DEBUG_REG(MI_STATUS),
RKISP1_DEBUG_REG(MI_DMA_CTRL),
RKISP1_DEBUG_REG(MI_DMA_STATUS),
{ /* Sentinel */ },
};
struct rkisp1_device *rkisp1 = m->private;
return rkisp1_debug_dump_regs(rkisp1, m, 0, registers);
}
DEFINE_SHOW_ATTRIBUTE(rkisp1_debug_dump_core_regs);
static int rkisp1_debug_dump_isp_regs_show(struct seq_file *m, void *p)
{
static const struct rkisp1_debug_register registers[] = {
RKISP1_DEBUG_REG(ISP_CTRL),
RKISP1_DEBUG_REG(ISP_ACQ_PROP),
RKISP1_DEBUG_REG(ISP_FLAGS_SHD),
RKISP1_DEBUG_REG(ISP_RIS),
RKISP1_DEBUG_REG(ISP_ERR),
{ /* Sentinel */ },
};
struct rkisp1_device *rkisp1 = m->private;
return rkisp1_debug_dump_regs(rkisp1, m, 0, registers);
}
DEFINE_SHOW_ATTRIBUTE(rkisp1_debug_dump_isp_regs);
static int rkisp1_debug_dump_rsz_regs_show(struct seq_file *m, void *p)
{
static const struct rkisp1_debug_register registers[] = {
RKISP1_DEBUG_SHD_REG(RSZ_CTRL),
RKISP1_DEBUG_SHD_REG(RSZ_SCALE_HY),
RKISP1_DEBUG_SHD_REG(RSZ_SCALE_HCB),
RKISP1_DEBUG_SHD_REG(RSZ_SCALE_HCR),
RKISP1_DEBUG_SHD_REG(RSZ_SCALE_VY),
RKISP1_DEBUG_SHD_REG(RSZ_SCALE_VC),
RKISP1_DEBUG_SHD_REG(RSZ_PHASE_HY),
RKISP1_DEBUG_SHD_REG(RSZ_PHASE_HC),
RKISP1_DEBUG_SHD_REG(RSZ_PHASE_VY),
RKISP1_DEBUG_SHD_REG(RSZ_PHASE_VC),
{ /* Sentinel */ },
};
struct rkisp1_resizer *rsz = m->private;
return rkisp1_debug_dump_regs(rsz->rkisp1, m, rsz->regs_base, registers);
}
DEFINE_SHOW_ATTRIBUTE(rkisp1_debug_dump_rsz_regs);
static int rkisp1_debug_dump_mi_mp_show(struct seq_file *m, void *p)
{
static const struct rkisp1_debug_register registers[] = {
RKISP1_DEBUG_REG(MI_MP_Y_BASE_AD_INIT),
RKISP1_DEBUG_REG(MI_MP_Y_BASE_AD_INIT2),
RKISP1_DEBUG_REG(MI_MP_Y_BASE_AD_SHD),
RKISP1_DEBUG_REG(MI_MP_Y_SIZE_INIT),
RKISP1_DEBUG_REG(MI_MP_Y_SIZE_INIT),
RKISP1_DEBUG_REG(MI_MP_Y_SIZE_SHD),
RKISP1_DEBUG_REG(MI_MP_Y_OFFS_CNT_SHD),
{ /* Sentinel */ },
};
struct rkisp1_device *rkisp1 = m->private;
return rkisp1_debug_dump_regs(rkisp1, m, 0, registers);
}
DEFINE_SHOW_ATTRIBUTE(rkisp1_debug_dump_mi_mp);
#define RKISP1_DEBUG_DATA_COUNT_BINS 32
#define RKISP1_DEBUG_DATA_COUNT_STEP (4096 / RKISP1_DEBUG_DATA_COUNT_BINS)
static int rkisp1_debug_input_status_show(struct seq_file *m, void *p)
{
struct rkisp1_device *rkisp1 = m->private;
u16 data_count[RKISP1_DEBUG_DATA_COUNT_BINS] = { };
unsigned int hsync_count = 0;
unsigned int vsync_count = 0;
unsigned int i;
u32 data;
u32 val;
int ret;
ret = pm_runtime_get_if_in_use(rkisp1->dev);
if (ret <= 0)
return ret ? : -ENODATA;
/* Sample the ISP input port status 10000 times with a 1µs interval. */
for (i = 0; i < 10000; ++i) {
val = rkisp1_read(rkisp1, RKISP1_CIF_ISP_FLAGS_SHD);
data = (val & RKISP1_CIF_ISP_FLAGS_SHD_S_DATA_MASK)
>> RKISP1_CIF_ISP_FLAGS_SHD_S_DATA_SHIFT;
data_count[data / RKISP1_DEBUG_DATA_COUNT_STEP]++;
if (val & RKISP1_CIF_ISP_FLAGS_SHD_S_HSYNC)
hsync_count++;
if (val & RKISP1_CIF_ISP_FLAGS_SHD_S_VSYNC)
vsync_count++;
udelay(1);
}
pm_runtime_put(rkisp1->dev);
seq_printf(m, "vsync: %u, hsync: %u\n", vsync_count, hsync_count);
seq_puts(m, "data:\n");
for (i = 0; i < ARRAY_SIZE(data_count); ++i)
seq_printf(m, "- [%04u:%04u]: %u\n",
i * RKISP1_DEBUG_DATA_COUNT_STEP,
(i + 1) * RKISP1_DEBUG_DATA_COUNT_STEP - 1,
data_count[i]);
return 0;
}
DEFINE_SHOW_ATTRIBUTE(rkisp1_debug_input_status);
void rkisp1_debug_init(struct rkisp1_device *rkisp1)
{
struct rkisp1_debug *debug = &rkisp1->debug;
struct dentry *regs_dir;
debug->debugfs_dir = debugfs_create_dir(dev_name(rkisp1->dev), NULL);
debugfs_create_ulong("data_loss", 0444, debug->debugfs_dir,
&debug->data_loss);
debugfs_create_ulong("outform_size_err", 0444, debug->debugfs_dir,
&debug->outform_size_error);
debugfs_create_ulong("img_stabilization_size_error", 0444,
debug->debugfs_dir,
&debug->img_stabilization_size_error);
debugfs_create_ulong("inform_size_error", 0444, debug->debugfs_dir,
&debug->inform_size_error);
debugfs_create_ulong("irq_delay", 0444, debug->debugfs_dir,
&debug->irq_delay);
debugfs_create_ulong("mipi_error", 0444, debug->debugfs_dir,
&debug->mipi_error);
debugfs_create_ulong("stats_error", 0444, debug->debugfs_dir,
&debug->stats_error);
debugfs_create_ulong("mp_stop_timeout", 0444, debug->debugfs_dir,
&debug->stop_timeout[RKISP1_MAINPATH]);
debugfs_create_ulong("sp_stop_timeout", 0444, debug->debugfs_dir,
&debug->stop_timeout[RKISP1_SELFPATH]);
debugfs_create_ulong("mp_frame_drop", 0444, debug->debugfs_dir,
&debug->frame_drop[RKISP1_MAINPATH]);
debugfs_create_ulong("sp_frame_drop", 0444, debug->debugfs_dir,
&debug->frame_drop[RKISP1_SELFPATH]);
debugfs_create_file("input_status", 0444, debug->debugfs_dir, rkisp1,
&rkisp1_debug_input_status_fops);
regs_dir = debugfs_create_dir("regs", debug->debugfs_dir);
debugfs_create_file("core", 0444, regs_dir, rkisp1,
&rkisp1_debug_dump_core_regs_fops);
debugfs_create_file("isp", 0444, regs_dir, rkisp1,
&rkisp1_debug_dump_isp_regs_fops);
debugfs_create_file("mrsz", 0444, regs_dir,
&rkisp1->resizer_devs[RKISP1_MAINPATH],
&rkisp1_debug_dump_rsz_regs_fops);
debugfs_create_file("srsz", 0444, regs_dir,
&rkisp1->resizer_devs[RKISP1_SELFPATH],
&rkisp1_debug_dump_rsz_regs_fops);
debugfs_create_file("mi_mp", 0444, regs_dir, rkisp1,
&rkisp1_debug_dump_mi_mp_fops);
}
void rkisp1_debug_cleanup(struct rkisp1_device *rkisp1)
{
debugfs_remove_recursive(rkisp1->debug.debugfs_dir);
}
| linux-master | drivers/media/platform/rockchip/rkisp1/rkisp1-debug.c |
// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
/*
* Rockchip ISP1 Driver - Common definitions
*
* Copyright (C) 2019 Collabora, Ltd.
*/
#include <media/mipi-csi2.h>
#include <media/v4l2-rect.h>
#include "rkisp1-common.h"
static const struct rkisp1_mbus_info rkisp1_formats[] = {
{
.mbus_code = MEDIA_BUS_FMT_YUYV8_2X8,
.pixel_enc = V4L2_PIXEL_ENC_YUV,
.direction = RKISP1_ISP_SD_SRC,
}, {
.mbus_code = MEDIA_BUS_FMT_SRGGB10_1X10,
.pixel_enc = V4L2_PIXEL_ENC_BAYER,
.mipi_dt = MIPI_CSI2_DT_RAW10,
.bayer_pat = RKISP1_RAW_RGGB,
.bus_width = 10,
.direction = RKISP1_ISP_SD_SINK | RKISP1_ISP_SD_SRC,
}, {
.mbus_code = MEDIA_BUS_FMT_SBGGR10_1X10,
.pixel_enc = V4L2_PIXEL_ENC_BAYER,
.mipi_dt = MIPI_CSI2_DT_RAW10,
.bayer_pat = RKISP1_RAW_BGGR,
.bus_width = 10,
.direction = RKISP1_ISP_SD_SINK | RKISP1_ISP_SD_SRC,
}, {
.mbus_code = MEDIA_BUS_FMT_SGBRG10_1X10,
.pixel_enc = V4L2_PIXEL_ENC_BAYER,
.mipi_dt = MIPI_CSI2_DT_RAW10,
.bayer_pat = RKISP1_RAW_GBRG,
.bus_width = 10,
.direction = RKISP1_ISP_SD_SINK | RKISP1_ISP_SD_SRC,
}, {
.mbus_code = MEDIA_BUS_FMT_SGRBG10_1X10,
.pixel_enc = V4L2_PIXEL_ENC_BAYER,
.mipi_dt = MIPI_CSI2_DT_RAW10,
.bayer_pat = RKISP1_RAW_GRBG,
.bus_width = 10,
.direction = RKISP1_ISP_SD_SINK | RKISP1_ISP_SD_SRC,
}, {
.mbus_code = MEDIA_BUS_FMT_SRGGB12_1X12,
.pixel_enc = V4L2_PIXEL_ENC_BAYER,
.mipi_dt = MIPI_CSI2_DT_RAW12,
.bayer_pat = RKISP1_RAW_RGGB,
.bus_width = 12,
.direction = RKISP1_ISP_SD_SINK | RKISP1_ISP_SD_SRC,
}, {
.mbus_code = MEDIA_BUS_FMT_SBGGR12_1X12,
.pixel_enc = V4L2_PIXEL_ENC_BAYER,
.mipi_dt = MIPI_CSI2_DT_RAW12,
.bayer_pat = RKISP1_RAW_BGGR,
.bus_width = 12,
.direction = RKISP1_ISP_SD_SINK | RKISP1_ISP_SD_SRC,
}, {
.mbus_code = MEDIA_BUS_FMT_SGBRG12_1X12,
.pixel_enc = V4L2_PIXEL_ENC_BAYER,
.mipi_dt = MIPI_CSI2_DT_RAW12,
.bayer_pat = RKISP1_RAW_GBRG,
.bus_width = 12,
.direction = RKISP1_ISP_SD_SINK | RKISP1_ISP_SD_SRC,
}, {
.mbus_code = MEDIA_BUS_FMT_SGRBG12_1X12,
.pixel_enc = V4L2_PIXEL_ENC_BAYER,
.mipi_dt = MIPI_CSI2_DT_RAW12,
.bayer_pat = RKISP1_RAW_GRBG,
.bus_width = 12,
.direction = RKISP1_ISP_SD_SINK | RKISP1_ISP_SD_SRC,
}, {
.mbus_code = MEDIA_BUS_FMT_SRGGB8_1X8,
.pixel_enc = V4L2_PIXEL_ENC_BAYER,
.mipi_dt = MIPI_CSI2_DT_RAW8,
.bayer_pat = RKISP1_RAW_RGGB,
.bus_width = 8,
.direction = RKISP1_ISP_SD_SINK | RKISP1_ISP_SD_SRC,
}, {
.mbus_code = MEDIA_BUS_FMT_SBGGR8_1X8,
.pixel_enc = V4L2_PIXEL_ENC_BAYER,
.mipi_dt = MIPI_CSI2_DT_RAW8,
.bayer_pat = RKISP1_RAW_BGGR,
.bus_width = 8,
.direction = RKISP1_ISP_SD_SINK | RKISP1_ISP_SD_SRC,
}, {
.mbus_code = MEDIA_BUS_FMT_SGBRG8_1X8,
.pixel_enc = V4L2_PIXEL_ENC_BAYER,
.mipi_dt = MIPI_CSI2_DT_RAW8,
.bayer_pat = RKISP1_RAW_GBRG,
.bus_width = 8,
.direction = RKISP1_ISP_SD_SINK | RKISP1_ISP_SD_SRC,
}, {
.mbus_code = MEDIA_BUS_FMT_SGRBG8_1X8,
.pixel_enc = V4L2_PIXEL_ENC_BAYER,
.mipi_dt = MIPI_CSI2_DT_RAW8,
.bayer_pat = RKISP1_RAW_GRBG,
.bus_width = 8,
.direction = RKISP1_ISP_SD_SINK | RKISP1_ISP_SD_SRC,
}, {
.mbus_code = MEDIA_BUS_FMT_YUYV8_1X16,
.pixel_enc = V4L2_PIXEL_ENC_YUV,
.mipi_dt = MIPI_CSI2_DT_YUV422_8B,
.yuv_seq = RKISP1_CIF_ISP_ACQ_PROP_YCBYCR,
.bus_width = 16,
.direction = RKISP1_ISP_SD_SINK,
}, {
.mbus_code = MEDIA_BUS_FMT_YVYU8_1X16,
.pixel_enc = V4L2_PIXEL_ENC_YUV,
.mipi_dt = MIPI_CSI2_DT_YUV422_8B,
.yuv_seq = RKISP1_CIF_ISP_ACQ_PROP_YCRYCB,
.bus_width = 16,
.direction = RKISP1_ISP_SD_SINK,
}, {
.mbus_code = MEDIA_BUS_FMT_UYVY8_1X16,
.pixel_enc = V4L2_PIXEL_ENC_YUV,
.mipi_dt = MIPI_CSI2_DT_YUV422_8B,
.yuv_seq = RKISP1_CIF_ISP_ACQ_PROP_CBYCRY,
.bus_width = 16,
.direction = RKISP1_ISP_SD_SINK,
}, {
.mbus_code = MEDIA_BUS_FMT_VYUY8_1X16,
.pixel_enc = V4L2_PIXEL_ENC_YUV,
.mipi_dt = MIPI_CSI2_DT_YUV422_8B,
.yuv_seq = RKISP1_CIF_ISP_ACQ_PROP_CRYCBY,
.bus_width = 16,
.direction = RKISP1_ISP_SD_SINK,
},
};
const struct rkisp1_mbus_info *rkisp1_mbus_info_get_by_index(unsigned int index)
{
if (index >= ARRAY_SIZE(rkisp1_formats))
return NULL;
return &rkisp1_formats[index];
}
const struct rkisp1_mbus_info *rkisp1_mbus_info_get_by_code(u32 mbus_code)
{
unsigned int i;
for (i = 0; i < ARRAY_SIZE(rkisp1_formats); i++) {
const struct rkisp1_mbus_info *fmt = &rkisp1_formats[i];
if (fmt->mbus_code == mbus_code)
return fmt;
}
return NULL;
}
static const struct v4l2_rect rkisp1_sd_min_crop = {
.width = RKISP1_ISP_MIN_WIDTH,
.height = RKISP1_ISP_MIN_HEIGHT,
.top = 0,
.left = 0,
};
void rkisp1_sd_adjust_crop_rect(struct v4l2_rect *crop,
const struct v4l2_rect *bounds)
{
v4l2_rect_set_min_size(crop, &rkisp1_sd_min_crop);
v4l2_rect_map_inside(crop, bounds);
}
void rkisp1_sd_adjust_crop(struct v4l2_rect *crop,
const struct v4l2_mbus_framefmt *bounds)
{
struct v4l2_rect crop_bounds = {
.left = 0,
.top = 0,
.width = bounds->width,
.height = bounds->height,
};
rkisp1_sd_adjust_crop_rect(crop, &crop_bounds);
}
| linux-master | drivers/media/platform/rockchip/rkisp1/rkisp1-common.c |
// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
/*
* Rockchip ISP1 Driver - V4l resizer device
*
* Copyright (C) 2019 Collabora, Ltd.
*
* Based on Rockchip ISP1 driver by Rockchip Electronics Co., Ltd.
* Copyright (C) 2017 Rockchip Electronics Co., Ltd.
*/
#include "rkisp1-common.h"
#define RKISP1_RSZ_SP_DEV_NAME RKISP1_DRIVER_NAME "_resizer_selfpath"
#define RKISP1_RSZ_MP_DEV_NAME RKISP1_DRIVER_NAME "_resizer_mainpath"
#define RKISP1_DEF_FMT MEDIA_BUS_FMT_YUYV8_2X8
#define RKISP1_DEF_PIXEL_ENC V4L2_PIXEL_ENC_YUV
struct rkisp1_rsz_yuv_mbus_info {
u32 mbus_code;
u32 hdiv;
u32 vdiv;
};
static const struct rkisp1_rsz_yuv_mbus_info rkisp1_rsz_yuv_src_formats[] = {
{
.mbus_code = MEDIA_BUS_FMT_YUYV8_2X8, /* YUV422 */
.hdiv = 2,
.vdiv = 1,
},
{
.mbus_code = MEDIA_BUS_FMT_YUYV8_1_5X8, /* YUV420 */
.hdiv = 2,
.vdiv = 2,
},
};
static const struct rkisp1_rsz_yuv_mbus_info *rkisp1_rsz_get_yuv_mbus_info(u32 mbus_code)
{
unsigned int i;
for (i = 0; i < ARRAY_SIZE(rkisp1_rsz_yuv_src_formats); i++) {
if (rkisp1_rsz_yuv_src_formats[i].mbus_code == mbus_code)
return &rkisp1_rsz_yuv_src_formats[i];
}
return NULL;
}
enum rkisp1_shadow_regs_when {
RKISP1_SHADOW_REGS_SYNC,
RKISP1_SHADOW_REGS_ASYNC,
};
struct rkisp1_rsz_config {
/* constrains */
const int max_rsz_width;
const int max_rsz_height;
const int min_rsz_width;
const int min_rsz_height;
/* registers */
struct {
u32 ctrl;
u32 yuvmode_mask;
u32 rawmode_mask;
u32 h_offset;
u32 v_offset;
u32 h_size;
u32 v_size;
} dual_crop;
};
static const struct rkisp1_rsz_config rkisp1_rsz_config_mp = {
/* constraints */
.max_rsz_width = RKISP1_RSZ_MP_SRC_MAX_WIDTH,
.max_rsz_height = RKISP1_RSZ_MP_SRC_MAX_HEIGHT,
.min_rsz_width = RKISP1_RSZ_SRC_MIN_WIDTH,
.min_rsz_height = RKISP1_RSZ_SRC_MIN_HEIGHT,
/* registers */
.dual_crop = {
.ctrl = RKISP1_CIF_DUAL_CROP_CTRL,
.yuvmode_mask = RKISP1_CIF_DUAL_CROP_MP_MODE_YUV,
.rawmode_mask = RKISP1_CIF_DUAL_CROP_MP_MODE_RAW,
.h_offset = RKISP1_CIF_DUAL_CROP_M_H_OFFS,
.v_offset = RKISP1_CIF_DUAL_CROP_M_V_OFFS,
.h_size = RKISP1_CIF_DUAL_CROP_M_H_SIZE,
.v_size = RKISP1_CIF_DUAL_CROP_M_V_SIZE,
},
};
static const struct rkisp1_rsz_config rkisp1_rsz_config_sp = {
/* constraints */
.max_rsz_width = RKISP1_RSZ_SP_SRC_MAX_WIDTH,
.max_rsz_height = RKISP1_RSZ_SP_SRC_MAX_HEIGHT,
.min_rsz_width = RKISP1_RSZ_SRC_MIN_WIDTH,
.min_rsz_height = RKISP1_RSZ_SRC_MIN_HEIGHT,
/* registers */
.dual_crop = {
.ctrl = RKISP1_CIF_DUAL_CROP_CTRL,
.yuvmode_mask = RKISP1_CIF_DUAL_CROP_SP_MODE_YUV,
.rawmode_mask = RKISP1_CIF_DUAL_CROP_SP_MODE_RAW,
.h_offset = RKISP1_CIF_DUAL_CROP_S_H_OFFS,
.v_offset = RKISP1_CIF_DUAL_CROP_S_V_OFFS,
.h_size = RKISP1_CIF_DUAL_CROP_S_H_SIZE,
.v_size = RKISP1_CIF_DUAL_CROP_S_V_SIZE,
},
};
static inline u32 rkisp1_rsz_read(struct rkisp1_resizer *rsz, u32 offset)
{
return rkisp1_read(rsz->rkisp1, rsz->regs_base + offset);
}
static inline void rkisp1_rsz_write(struct rkisp1_resizer *rsz, u32 offset,
u32 value)
{
rkisp1_write(rsz->rkisp1, rsz->regs_base + offset, value);
}
static struct v4l2_mbus_framefmt *
rkisp1_rsz_get_pad_fmt(struct rkisp1_resizer *rsz,
struct v4l2_subdev_state *sd_state,
unsigned int pad, u32 which)
{
struct v4l2_subdev_state state = {
.pads = rsz->pad_cfg,
};
if (which == V4L2_SUBDEV_FORMAT_TRY)
return v4l2_subdev_get_try_format(&rsz->sd, sd_state, pad);
else
return v4l2_subdev_get_try_format(&rsz->sd, &state, pad);
}
static struct v4l2_rect *
rkisp1_rsz_get_pad_crop(struct rkisp1_resizer *rsz,
struct v4l2_subdev_state *sd_state,
unsigned int pad, u32 which)
{
struct v4l2_subdev_state state = {
.pads = rsz->pad_cfg,
};
if (which == V4L2_SUBDEV_FORMAT_TRY)
return v4l2_subdev_get_try_crop(&rsz->sd, sd_state, pad);
else
return v4l2_subdev_get_try_crop(&rsz->sd, &state, pad);
}
/* ----------------------------------------------------------------------------
* Dual crop hw configs
*/
static void rkisp1_dcrop_disable(struct rkisp1_resizer *rsz,
enum rkisp1_shadow_regs_when when)
{
u32 dc_ctrl = rkisp1_read(rsz->rkisp1, rsz->config->dual_crop.ctrl);
u32 mask = ~(rsz->config->dual_crop.yuvmode_mask |
rsz->config->dual_crop.rawmode_mask);
dc_ctrl &= mask;
if (when == RKISP1_SHADOW_REGS_ASYNC)
dc_ctrl |= RKISP1_CIF_DUAL_CROP_GEN_CFG_UPD;
else
dc_ctrl |= RKISP1_CIF_DUAL_CROP_CFG_UPD;
rkisp1_write(rsz->rkisp1, rsz->config->dual_crop.ctrl, dc_ctrl);
}
/* configure dual-crop unit */
static void rkisp1_dcrop_config(struct rkisp1_resizer *rsz)
{
struct rkisp1_device *rkisp1 = rsz->rkisp1;
struct v4l2_mbus_framefmt *sink_fmt;
struct v4l2_rect *sink_crop;
u32 dc_ctrl;
sink_crop = rkisp1_rsz_get_pad_crop(rsz, NULL, RKISP1_RSZ_PAD_SINK,
V4L2_SUBDEV_FORMAT_ACTIVE);
sink_fmt = rkisp1_rsz_get_pad_fmt(rsz, NULL, RKISP1_RSZ_PAD_SINK,
V4L2_SUBDEV_FORMAT_ACTIVE);
if (sink_crop->width == sink_fmt->width &&
sink_crop->height == sink_fmt->height &&
sink_crop->left == 0 && sink_crop->top == 0) {
rkisp1_dcrop_disable(rsz, RKISP1_SHADOW_REGS_SYNC);
dev_dbg(rkisp1->dev, "capture %d crop disabled\n", rsz->id);
return;
}
dc_ctrl = rkisp1_read(rkisp1, rsz->config->dual_crop.ctrl);
rkisp1_write(rkisp1, rsz->config->dual_crop.h_offset, sink_crop->left);
rkisp1_write(rkisp1, rsz->config->dual_crop.v_offset, sink_crop->top);
rkisp1_write(rkisp1, rsz->config->dual_crop.h_size, sink_crop->width);
rkisp1_write(rkisp1, rsz->config->dual_crop.v_size, sink_crop->height);
dc_ctrl |= rsz->config->dual_crop.yuvmode_mask;
dc_ctrl |= RKISP1_CIF_DUAL_CROP_CFG_UPD;
rkisp1_write(rkisp1, rsz->config->dual_crop.ctrl, dc_ctrl);
dev_dbg(rkisp1->dev, "stream %d crop: %dx%d -> %dx%d\n", rsz->id,
sink_fmt->width, sink_fmt->height,
sink_crop->width, sink_crop->height);
}
/* ----------------------------------------------------------------------------
* Resizer hw configs
*/
static void rkisp1_rsz_update_shadow(struct rkisp1_resizer *rsz,
enum rkisp1_shadow_regs_when when)
{
u32 ctrl_cfg = rkisp1_rsz_read(rsz, RKISP1_CIF_RSZ_CTRL);
if (when == RKISP1_SHADOW_REGS_ASYNC)
ctrl_cfg |= RKISP1_CIF_RSZ_CTRL_CFG_UPD_AUTO;
else
ctrl_cfg |= RKISP1_CIF_RSZ_CTRL_CFG_UPD;
rkisp1_rsz_write(rsz, RKISP1_CIF_RSZ_CTRL, ctrl_cfg);
}
static u32 rkisp1_rsz_calc_ratio(u32 len_sink, u32 len_src)
{
if (len_sink < len_src)
return ((len_sink - 1) * RKISP1_CIF_RSZ_SCALER_FACTOR) /
(len_src - 1);
return ((len_src - 1) * RKISP1_CIF_RSZ_SCALER_FACTOR) /
(len_sink - 1) + 1;
}
static void rkisp1_rsz_disable(struct rkisp1_resizer *rsz,
enum rkisp1_shadow_regs_when when)
{
rkisp1_rsz_write(rsz, RKISP1_CIF_RSZ_CTRL, 0);
if (when == RKISP1_SHADOW_REGS_SYNC)
rkisp1_rsz_update_shadow(rsz, when);
}
static void rkisp1_rsz_config_regs(struct rkisp1_resizer *rsz,
struct v4l2_rect *sink_y,
struct v4l2_rect *sink_c,
struct v4l2_rect *src_y,
struct v4l2_rect *src_c,
enum rkisp1_shadow_regs_when when)
{
u32 ratio, rsz_ctrl = 0;
unsigned int i;
/* No phase offset */
rkisp1_rsz_write(rsz, RKISP1_CIF_RSZ_PHASE_HY, 0);
rkisp1_rsz_write(rsz, RKISP1_CIF_RSZ_PHASE_HC, 0);
rkisp1_rsz_write(rsz, RKISP1_CIF_RSZ_PHASE_VY, 0);
rkisp1_rsz_write(rsz, RKISP1_CIF_RSZ_PHASE_VC, 0);
/* Linear interpolation */
for (i = 0; i < 64; i++) {
rkisp1_rsz_write(rsz, RKISP1_CIF_RSZ_SCALE_LUT_ADDR, i);
rkisp1_rsz_write(rsz, RKISP1_CIF_RSZ_SCALE_LUT, i);
}
if (sink_y->width != src_y->width) {
rsz_ctrl |= RKISP1_CIF_RSZ_CTRL_SCALE_HY_ENABLE;
if (sink_y->width < src_y->width)
rsz_ctrl |= RKISP1_CIF_RSZ_CTRL_SCALE_HY_UP;
ratio = rkisp1_rsz_calc_ratio(sink_y->width, src_y->width);
rkisp1_rsz_write(rsz, RKISP1_CIF_RSZ_SCALE_HY, ratio);
}
if (sink_c->width != src_c->width) {
rsz_ctrl |= RKISP1_CIF_RSZ_CTRL_SCALE_HC_ENABLE;
if (sink_c->width < src_c->width)
rsz_ctrl |= RKISP1_CIF_RSZ_CTRL_SCALE_HC_UP;
ratio = rkisp1_rsz_calc_ratio(sink_c->width, src_c->width);
rkisp1_rsz_write(rsz, RKISP1_CIF_RSZ_SCALE_HCB, ratio);
rkisp1_rsz_write(rsz, RKISP1_CIF_RSZ_SCALE_HCR, ratio);
}
if (sink_y->height != src_y->height) {
rsz_ctrl |= RKISP1_CIF_RSZ_CTRL_SCALE_VY_ENABLE;
if (sink_y->height < src_y->height)
rsz_ctrl |= RKISP1_CIF_RSZ_CTRL_SCALE_VY_UP;
ratio = rkisp1_rsz_calc_ratio(sink_y->height, src_y->height);
rkisp1_rsz_write(rsz, RKISP1_CIF_RSZ_SCALE_VY, ratio);
}
if (sink_c->height != src_c->height) {
rsz_ctrl |= RKISP1_CIF_RSZ_CTRL_SCALE_VC_ENABLE;
if (sink_c->height < src_c->height)
rsz_ctrl |= RKISP1_CIF_RSZ_CTRL_SCALE_VC_UP;
ratio = rkisp1_rsz_calc_ratio(sink_c->height, src_c->height);
rkisp1_rsz_write(rsz, RKISP1_CIF_RSZ_SCALE_VC, ratio);
}
rkisp1_rsz_write(rsz, RKISP1_CIF_RSZ_CTRL, rsz_ctrl);
rkisp1_rsz_update_shadow(rsz, when);
}
static void rkisp1_rsz_config(struct rkisp1_resizer *rsz,
enum rkisp1_shadow_regs_when when)
{
const struct rkisp1_rsz_yuv_mbus_info *sink_yuv_info, *src_yuv_info;
struct v4l2_rect sink_y, sink_c, src_y, src_c;
struct v4l2_mbus_framefmt *src_fmt, *sink_fmt;
struct v4l2_rect *sink_crop;
sink_crop = rkisp1_rsz_get_pad_crop(rsz, NULL, RKISP1_RSZ_PAD_SINK,
V4L2_SUBDEV_FORMAT_ACTIVE);
src_fmt = rkisp1_rsz_get_pad_fmt(rsz, NULL, RKISP1_RSZ_PAD_SRC,
V4L2_SUBDEV_FORMAT_ACTIVE);
src_yuv_info = rkisp1_rsz_get_yuv_mbus_info(src_fmt->code);
sink_fmt = rkisp1_rsz_get_pad_fmt(rsz, NULL, RKISP1_RSZ_PAD_SINK,
V4L2_SUBDEV_FORMAT_ACTIVE);
sink_yuv_info = rkisp1_rsz_get_yuv_mbus_info(sink_fmt->code);
/*
* The resizer only works on yuv formats,
* so return if it is bayer format.
*/
if (rsz->pixel_enc == V4L2_PIXEL_ENC_BAYER) {
rkisp1_rsz_disable(rsz, when);
return;
}
sink_y.width = sink_crop->width;
sink_y.height = sink_crop->height;
src_y.width = src_fmt->width;
src_y.height = src_fmt->height;
sink_c.width = sink_y.width / sink_yuv_info->hdiv;
sink_c.height = sink_y.height / sink_yuv_info->vdiv;
/*
* The resizer is used not only to change the dimensions of the frame
* but also to change the scale for YUV formats,
* (4:2:2 -> 4:2:0 for example). So the width/height of the CbCr
* streams should be set according to the media bus format in the src pad.
*/
src_c.width = src_y.width / src_yuv_info->hdiv;
src_c.height = src_y.height / src_yuv_info->vdiv;
if (sink_c.width == src_c.width && sink_c.height == src_c.height) {
rkisp1_rsz_disable(rsz, when);
return;
}
dev_dbg(rsz->rkisp1->dev, "stream %d rsz/scale: %dx%d -> %dx%d\n",
rsz->id, sink_crop->width, sink_crop->height,
src_fmt->width, src_fmt->height);
dev_dbg(rsz->rkisp1->dev, "chroma scaling %dx%d -> %dx%d\n",
sink_c.width, sink_c.height, src_c.width, src_c.height);
/* set values in the hw */
rkisp1_rsz_config_regs(rsz, &sink_y, &sink_c, &src_y, &src_c, when);
}
/* ----------------------------------------------------------------------------
* Subdev pad operations
*/
static int rkisp1_rsz_enum_mbus_code(struct v4l2_subdev *sd,
struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_mbus_code_enum *code)
{
struct rkisp1_resizer *rsz =
container_of(sd, struct rkisp1_resizer, sd);
struct v4l2_subdev_pad_config dummy_cfg;
struct v4l2_subdev_state pad_state = {
.pads = &dummy_cfg
};
u32 pad = code->pad;
int ret;
if (code->pad == RKISP1_RSZ_PAD_SRC) {
/* supported mbus codes on the src are the same as in the capture */
struct rkisp1_capture *cap = &rsz->rkisp1->capture_devs[rsz->id];
return rkisp1_cap_enum_mbus_codes(cap, code);
}
/*
* The selfpath capture doesn't support bayer formats. Therefore the selfpath resizer
* should support only YUV422 on the sink pad
*/
if (rsz->id == RKISP1_SELFPATH) {
if (code->index > 0)
return -EINVAL;
code->code = MEDIA_BUS_FMT_YUYV8_2X8;
return 0;
}
/* supported mbus codes on the sink pad are the same as isp src pad */
code->pad = RKISP1_ISP_PAD_SOURCE_VIDEO;
ret = v4l2_subdev_call(&rsz->rkisp1->isp.sd, pad, enum_mbus_code,
&pad_state, code);
/* restore pad */
code->pad = pad;
code->flags = 0;
return ret;
}
static int rkisp1_rsz_init_config(struct v4l2_subdev *sd,
struct v4l2_subdev_state *sd_state)
{
struct v4l2_mbus_framefmt *sink_fmt, *src_fmt;
struct v4l2_rect *sink_crop;
sink_fmt = v4l2_subdev_get_try_format(sd, sd_state,
RKISP1_RSZ_PAD_SRC);
sink_fmt->width = RKISP1_DEFAULT_WIDTH;
sink_fmt->height = RKISP1_DEFAULT_HEIGHT;
sink_fmt->field = V4L2_FIELD_NONE;
sink_fmt->code = RKISP1_DEF_FMT;
sink_fmt->colorspace = V4L2_COLORSPACE_SRGB;
sink_fmt->xfer_func = V4L2_XFER_FUNC_SRGB;
sink_fmt->ycbcr_enc = V4L2_YCBCR_ENC_601;
sink_fmt->quantization = V4L2_QUANTIZATION_LIM_RANGE;
sink_crop = v4l2_subdev_get_try_crop(sd, sd_state,
RKISP1_RSZ_PAD_SINK);
sink_crop->width = RKISP1_DEFAULT_WIDTH;
sink_crop->height = RKISP1_DEFAULT_HEIGHT;
sink_crop->left = 0;
sink_crop->top = 0;
src_fmt = v4l2_subdev_get_try_format(sd, sd_state,
RKISP1_RSZ_PAD_SINK);
*src_fmt = *sink_fmt;
/* NOTE: there is no crop in the source pad, only in the sink */
return 0;
}
static void rkisp1_rsz_set_src_fmt(struct rkisp1_resizer *rsz,
struct v4l2_subdev_state *sd_state,
struct v4l2_mbus_framefmt *format,
unsigned int which)
{
const struct rkisp1_mbus_info *sink_mbus_info;
struct v4l2_mbus_framefmt *src_fmt, *sink_fmt;
sink_fmt = rkisp1_rsz_get_pad_fmt(rsz, sd_state, RKISP1_RSZ_PAD_SINK,
which);
src_fmt = rkisp1_rsz_get_pad_fmt(rsz, sd_state, RKISP1_RSZ_PAD_SRC,
which);
sink_mbus_info = rkisp1_mbus_info_get_by_code(sink_fmt->code);
/* for YUV formats, userspace can change the mbus code on the src pad if it is supported */
if (sink_mbus_info->pixel_enc == V4L2_PIXEL_ENC_YUV &&
rkisp1_rsz_get_yuv_mbus_info(format->code))
src_fmt->code = format->code;
src_fmt->width = clamp_t(u32, format->width,
rsz->config->min_rsz_width,
rsz->config->max_rsz_width);
src_fmt->height = clamp_t(u32, format->height,
rsz->config->min_rsz_height,
rsz->config->max_rsz_height);
*format = *src_fmt;
}
static void rkisp1_rsz_set_sink_crop(struct rkisp1_resizer *rsz,
struct v4l2_subdev_state *sd_state,
struct v4l2_rect *r,
unsigned int which)
{
const struct rkisp1_mbus_info *mbus_info;
struct v4l2_mbus_framefmt *sink_fmt;
struct v4l2_rect *sink_crop;
sink_fmt = rkisp1_rsz_get_pad_fmt(rsz, sd_state, RKISP1_RSZ_PAD_SINK,
which);
sink_crop = rkisp1_rsz_get_pad_crop(rsz, sd_state,
RKISP1_RSZ_PAD_SINK,
which);
/* Not crop for MP bayer raw data */
mbus_info = rkisp1_mbus_info_get_by_code(sink_fmt->code);
if (rsz->id == RKISP1_MAINPATH &&
mbus_info->pixel_enc == V4L2_PIXEL_ENC_BAYER) {
sink_crop->left = 0;
sink_crop->top = 0;
sink_crop->width = sink_fmt->width;
sink_crop->height = sink_fmt->height;
*r = *sink_crop;
return;
}
sink_crop->left = ALIGN(r->left, 2);
sink_crop->width = ALIGN(r->width, 2);
sink_crop->top = r->top;
sink_crop->height = r->height;
rkisp1_sd_adjust_crop(sink_crop, sink_fmt);
*r = *sink_crop;
}
static void rkisp1_rsz_set_sink_fmt(struct rkisp1_resizer *rsz,
struct v4l2_subdev_state *sd_state,
struct v4l2_mbus_framefmt *format,
unsigned int which)
{
const struct rkisp1_mbus_info *mbus_info;
struct v4l2_mbus_framefmt *sink_fmt, *src_fmt;
struct v4l2_rect *sink_crop;
bool is_yuv;
sink_fmt = rkisp1_rsz_get_pad_fmt(rsz, sd_state, RKISP1_RSZ_PAD_SINK,
which);
src_fmt = rkisp1_rsz_get_pad_fmt(rsz, sd_state, RKISP1_RSZ_PAD_SRC,
which);
sink_crop = rkisp1_rsz_get_pad_crop(rsz, sd_state,
RKISP1_RSZ_PAD_SINK,
which);
if (rsz->id == RKISP1_SELFPATH)
sink_fmt->code = MEDIA_BUS_FMT_YUYV8_2X8;
else
sink_fmt->code = format->code;
mbus_info = rkisp1_mbus_info_get_by_code(sink_fmt->code);
if (!mbus_info || !(mbus_info->direction & RKISP1_ISP_SD_SRC)) {
sink_fmt->code = RKISP1_DEF_FMT;
mbus_info = rkisp1_mbus_info_get_by_code(sink_fmt->code);
}
if (which == V4L2_SUBDEV_FORMAT_ACTIVE)
rsz->pixel_enc = mbus_info->pixel_enc;
sink_fmt->width = clamp_t(u32, format->width,
RKISP1_ISP_MIN_WIDTH,
RKISP1_ISP_MAX_WIDTH);
sink_fmt->height = clamp_t(u32, format->height,
RKISP1_ISP_MIN_HEIGHT,
RKISP1_ISP_MAX_HEIGHT);
/*
* Adjust the color space fields. Accept any color primaries and
* transfer function for both YUV and Bayer. For YUV any YCbCr encoding
* and quantization range is also accepted. For Bayer formats, the YCbCr
* encoding isn't applicable, and the quantization range can only be
* full.
*/
is_yuv = mbus_info->pixel_enc == V4L2_PIXEL_ENC_YUV;
sink_fmt->colorspace = format->colorspace ? :
(is_yuv ? V4L2_COLORSPACE_SRGB :
V4L2_COLORSPACE_RAW);
sink_fmt->xfer_func = format->xfer_func ? :
V4L2_MAP_XFER_FUNC_DEFAULT(sink_fmt->colorspace);
if (is_yuv) {
sink_fmt->ycbcr_enc = format->ycbcr_enc ? :
V4L2_MAP_YCBCR_ENC_DEFAULT(sink_fmt->colorspace);
sink_fmt->quantization = format->quantization ? :
V4L2_MAP_QUANTIZATION_DEFAULT(false, sink_fmt->colorspace,
sink_fmt->ycbcr_enc);
} else {
/*
* The YCbCr encoding isn't applicable for non-YUV formats, but
* V4L2 has no "no encoding" value. Hardcode it to Rec. 601, it
* should be ignored by userspace.
*/
sink_fmt->ycbcr_enc = V4L2_YCBCR_ENC_601;
sink_fmt->quantization = V4L2_QUANTIZATION_FULL_RANGE;
}
*format = *sink_fmt;
/* Propagate the media bus code and color space to the source pad. */
src_fmt->code = sink_fmt->code;
src_fmt->colorspace = sink_fmt->colorspace;
src_fmt->xfer_func = sink_fmt->xfer_func;
src_fmt->ycbcr_enc = sink_fmt->ycbcr_enc;
src_fmt->quantization = sink_fmt->quantization;
/* Update sink crop */
rkisp1_rsz_set_sink_crop(rsz, sd_state, sink_crop, which);
}
static int rkisp1_rsz_get_fmt(struct v4l2_subdev *sd,
struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
struct rkisp1_resizer *rsz =
container_of(sd, struct rkisp1_resizer, sd);
mutex_lock(&rsz->ops_lock);
fmt->format = *rkisp1_rsz_get_pad_fmt(rsz, sd_state, fmt->pad,
fmt->which);
mutex_unlock(&rsz->ops_lock);
return 0;
}
static int rkisp1_rsz_set_fmt(struct v4l2_subdev *sd,
struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
struct rkisp1_resizer *rsz =
container_of(sd, struct rkisp1_resizer, sd);
mutex_lock(&rsz->ops_lock);
if (fmt->pad == RKISP1_RSZ_PAD_SINK)
rkisp1_rsz_set_sink_fmt(rsz, sd_state, &fmt->format,
fmt->which);
else
rkisp1_rsz_set_src_fmt(rsz, sd_state, &fmt->format,
fmt->which);
mutex_unlock(&rsz->ops_lock);
return 0;
}
static int rkisp1_rsz_get_selection(struct v4l2_subdev *sd,
struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_selection *sel)
{
struct rkisp1_resizer *rsz =
container_of(sd, struct rkisp1_resizer, sd);
struct v4l2_mbus_framefmt *mf_sink;
int ret = 0;
if (sel->pad == RKISP1_RSZ_PAD_SRC)
return -EINVAL;
mutex_lock(&rsz->ops_lock);
switch (sel->target) {
case V4L2_SEL_TGT_CROP_BOUNDS:
mf_sink = rkisp1_rsz_get_pad_fmt(rsz, sd_state,
RKISP1_RSZ_PAD_SINK,
sel->which);
sel->r.height = mf_sink->height;
sel->r.width = mf_sink->width;
sel->r.left = 0;
sel->r.top = 0;
break;
case V4L2_SEL_TGT_CROP:
sel->r = *rkisp1_rsz_get_pad_crop(rsz, sd_state,
RKISP1_RSZ_PAD_SINK,
sel->which);
break;
default:
ret = -EINVAL;
}
mutex_unlock(&rsz->ops_lock);
return ret;
}
static int rkisp1_rsz_set_selection(struct v4l2_subdev *sd,
struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_selection *sel)
{
struct rkisp1_resizer *rsz =
container_of(sd, struct rkisp1_resizer, sd);
if (sel->target != V4L2_SEL_TGT_CROP || sel->pad == RKISP1_RSZ_PAD_SRC)
return -EINVAL;
dev_dbg(rsz->rkisp1->dev, "%s: pad: %d sel(%d,%d)/%dx%d\n", __func__,
sel->pad, sel->r.left, sel->r.top, sel->r.width, sel->r.height);
mutex_lock(&rsz->ops_lock);
rkisp1_rsz_set_sink_crop(rsz, sd_state, &sel->r, sel->which);
mutex_unlock(&rsz->ops_lock);
return 0;
}
static const struct media_entity_operations rkisp1_rsz_media_ops = {
.link_validate = v4l2_subdev_link_validate,
};
static const struct v4l2_subdev_pad_ops rkisp1_rsz_pad_ops = {
.enum_mbus_code = rkisp1_rsz_enum_mbus_code,
.get_selection = rkisp1_rsz_get_selection,
.set_selection = rkisp1_rsz_set_selection,
.init_cfg = rkisp1_rsz_init_config,
.get_fmt = rkisp1_rsz_get_fmt,
.set_fmt = rkisp1_rsz_set_fmt,
.link_validate = v4l2_subdev_link_validate_default,
};
/* ----------------------------------------------------------------------------
* Stream operations
*/
static int rkisp1_rsz_s_stream(struct v4l2_subdev *sd, int enable)
{
struct rkisp1_resizer *rsz =
container_of(sd, struct rkisp1_resizer, sd);
struct rkisp1_device *rkisp1 = rsz->rkisp1;
struct rkisp1_capture *other = &rkisp1->capture_devs[rsz->id ^ 1];
enum rkisp1_shadow_regs_when when = RKISP1_SHADOW_REGS_SYNC;
if (!enable) {
rkisp1_dcrop_disable(rsz, RKISP1_SHADOW_REGS_ASYNC);
rkisp1_rsz_disable(rsz, RKISP1_SHADOW_REGS_ASYNC);
return 0;
}
if (other->is_streaming)
when = RKISP1_SHADOW_REGS_ASYNC;
mutex_lock(&rsz->ops_lock);
rkisp1_rsz_config(rsz, when);
rkisp1_dcrop_config(rsz);
mutex_unlock(&rsz->ops_lock);
return 0;
}
static const struct v4l2_subdev_video_ops rkisp1_rsz_video_ops = {
.s_stream = rkisp1_rsz_s_stream,
};
static const struct v4l2_subdev_ops rkisp1_rsz_ops = {
.video = &rkisp1_rsz_video_ops,
.pad = &rkisp1_rsz_pad_ops,
};
static void rkisp1_rsz_unregister(struct rkisp1_resizer *rsz)
{
if (!rsz->rkisp1)
return;
v4l2_device_unregister_subdev(&rsz->sd);
media_entity_cleanup(&rsz->sd.entity);
mutex_destroy(&rsz->ops_lock);
}
static int rkisp1_rsz_register(struct rkisp1_resizer *rsz)
{
struct v4l2_subdev_state state = {
.pads = rsz->pad_cfg,
};
static const char * const dev_names[] = {
RKISP1_RSZ_MP_DEV_NAME,
RKISP1_RSZ_SP_DEV_NAME
};
struct media_pad *pads = rsz->pads;
struct v4l2_subdev *sd = &rsz->sd;
int ret;
if (rsz->id == RKISP1_SELFPATH) {
rsz->regs_base = RKISP1_CIF_SRSZ_BASE;
rsz->config = &rkisp1_rsz_config_sp;
} else {
rsz->regs_base = RKISP1_CIF_MRSZ_BASE;
rsz->config = &rkisp1_rsz_config_mp;
}
v4l2_subdev_init(sd, &rkisp1_rsz_ops);
sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
sd->entity.ops = &rkisp1_rsz_media_ops;
sd->entity.function = MEDIA_ENT_F_PROC_VIDEO_SCALER;
sd->owner = THIS_MODULE;
strscpy(sd->name, dev_names[rsz->id], sizeof(sd->name));
pads[RKISP1_RSZ_PAD_SINK].flags = MEDIA_PAD_FL_SINK |
MEDIA_PAD_FL_MUST_CONNECT;
pads[RKISP1_RSZ_PAD_SRC].flags = MEDIA_PAD_FL_SOURCE |
MEDIA_PAD_FL_MUST_CONNECT;
rsz->pixel_enc = RKISP1_DEF_PIXEL_ENC;
mutex_init(&rsz->ops_lock);
ret = media_entity_pads_init(&sd->entity, RKISP1_RSZ_PAD_MAX, pads);
if (ret)
goto error;
ret = v4l2_device_register_subdev(&rsz->rkisp1->v4l2_dev, sd);
if (ret) {
dev_err(sd->dev, "Failed to register resizer subdev\n");
goto error;
}
rkisp1_rsz_init_config(sd, &state);
return 0;
error:
media_entity_cleanup(&sd->entity);
mutex_destroy(&rsz->ops_lock);
return ret;
}
int rkisp1_resizer_devs_register(struct rkisp1_device *rkisp1)
{
unsigned int i;
int ret;
for (i = 0; i < ARRAY_SIZE(rkisp1->resizer_devs); i++) {
struct rkisp1_resizer *rsz = &rkisp1->resizer_devs[i];
rsz->rkisp1 = rkisp1;
rsz->id = i;
ret = rkisp1_rsz_register(rsz);
if (ret) {
rsz->rkisp1 = NULL;
rkisp1_resizer_devs_unregister(rkisp1);
return ret;
}
}
return 0;
}
void rkisp1_resizer_devs_unregister(struct rkisp1_device *rkisp1)
{
struct rkisp1_resizer *mp = &rkisp1->resizer_devs[RKISP1_MAINPATH];
struct rkisp1_resizer *sp = &rkisp1->resizer_devs[RKISP1_SELFPATH];
rkisp1_rsz_unregister(mp);
rkisp1_rsz_unregister(sp);
}
| linux-master | drivers/media/platform/rockchip/rkisp1/rkisp1-resizer.c |
// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
/*
* Rockchip ISP1 Driver - V4l capture device
*
* Copyright (C) 2019 Collabora, Ltd.
*
* Based on Rockchip ISP1 driver by Rockchip Electronics Co., Ltd.
* Copyright (C) 2017 Rockchip Electronics Co., Ltd.
*/
#include <linux/delay.h>
#include <linux/pm_runtime.h>
#include <media/v4l2-common.h>
#include <media/v4l2-event.h>
#include <media/v4l2-fh.h>
#include <media/v4l2-ioctl.h>
#include <media/v4l2-mc.h>
#include <media/v4l2-subdev.h>
#include <media/videobuf2-dma-contig.h>
#include "rkisp1-common.h"
/*
* NOTE: There are two capture video devices in rkisp1, selfpath and mainpath.
*
* differences between selfpath and mainpath
* available mp sink input: isp
* available sp sink input : isp, dma(TODO)
* available mp sink pad fmts: yuv422, raw
* available sp sink pad fmts: yuv422, yuv420......
* available mp source fmts: yuv, raw, jpeg(TODO)
* available sp source fmts: yuv, rgb
*/
#define RKISP1_SP_DEV_NAME RKISP1_DRIVER_NAME "_selfpath"
#define RKISP1_MP_DEV_NAME RKISP1_DRIVER_NAME "_mainpath"
#define RKISP1_MIN_BUFFERS_NEEDED 3
enum rkisp1_plane {
RKISP1_PLANE_Y = 0,
RKISP1_PLANE_CB = 1,
RKISP1_PLANE_CR = 2
};
/*
* @fourcc: pixel format
* @fmt_type: helper filed for pixel format
* @uv_swap: if cb cr swapped, for yuv
* @write_format: defines how YCbCr self picture data is written to memory
* @output_format: defines sp output format
* @mbus: the mbus code on the src resizer pad that matches the pixel format
*/
struct rkisp1_capture_fmt_cfg {
u32 fourcc;
u8 uv_swap;
u32 write_format;
u32 output_format;
u32 mbus;
};
struct rkisp1_capture_ops {
void (*config)(struct rkisp1_capture *cap);
void (*stop)(struct rkisp1_capture *cap);
void (*enable)(struct rkisp1_capture *cap);
void (*disable)(struct rkisp1_capture *cap);
void (*set_data_path)(struct rkisp1_capture *cap);
bool (*is_stopped)(struct rkisp1_capture *cap);
};
struct rkisp1_capture_config {
const struct rkisp1_capture_fmt_cfg *fmts;
int fmt_size;
struct {
u32 y_size_init;
u32 cb_size_init;
u32 cr_size_init;
u32 y_base_ad_init;
u32 cb_base_ad_init;
u32 cr_base_ad_init;
u32 y_offs_cnt_init;
u32 cb_offs_cnt_init;
u32 cr_offs_cnt_init;
} mi;
};
/*
* The supported pixel formats for mainpath. NOTE, pixel formats with identical 'mbus'
* are grouped together. This is assumed and used by the function rkisp1_cap_enum_mbus_codes
*/
static const struct rkisp1_capture_fmt_cfg rkisp1_mp_fmts[] = {
/* yuv422 */
{
.fourcc = V4L2_PIX_FMT_YUYV,
.uv_swap = 0,
.write_format = RKISP1_MI_CTRL_MP_WRITE_YUVINT,
.mbus = MEDIA_BUS_FMT_YUYV8_2X8,
}, {
.fourcc = V4L2_PIX_FMT_YUV422P,
.uv_swap = 0,
.write_format = RKISP1_MI_CTRL_MP_WRITE_YUV_PLA_OR_RAW8,
.mbus = MEDIA_BUS_FMT_YUYV8_2X8,
}, {
.fourcc = V4L2_PIX_FMT_NV16,
.uv_swap = 0,
.write_format = RKISP1_MI_CTRL_MP_WRITE_YUV_SPLA,
.mbus = MEDIA_BUS_FMT_YUYV8_2X8,
}, {
.fourcc = V4L2_PIX_FMT_NV61,
.uv_swap = 1,
.write_format = RKISP1_MI_CTRL_MP_WRITE_YUV_SPLA,
.mbus = MEDIA_BUS_FMT_YUYV8_2X8,
}, {
.fourcc = V4L2_PIX_FMT_NV16M,
.uv_swap = 0,
.write_format = RKISP1_MI_CTRL_MP_WRITE_YUV_SPLA,
.mbus = MEDIA_BUS_FMT_YUYV8_2X8,
}, {
.fourcc = V4L2_PIX_FMT_NV61M,
.uv_swap = 1,
.write_format = RKISP1_MI_CTRL_MP_WRITE_YUV_SPLA,
.mbus = MEDIA_BUS_FMT_YUYV8_2X8,
}, {
.fourcc = V4L2_PIX_FMT_YVU422M,
.uv_swap = 1,
.write_format = RKISP1_MI_CTRL_MP_WRITE_YUV_PLA_OR_RAW8,
.mbus = MEDIA_BUS_FMT_YUYV8_2X8,
},
/* yuv400 */
{
.fourcc = V4L2_PIX_FMT_GREY,
.uv_swap = 0,
.write_format = RKISP1_MI_CTRL_MP_WRITE_YUV_PLA_OR_RAW8,
.mbus = MEDIA_BUS_FMT_YUYV8_2X8,
},
/* yuv420 */
{
.fourcc = V4L2_PIX_FMT_NV21,
.uv_swap = 1,
.write_format = RKISP1_MI_CTRL_MP_WRITE_YUV_SPLA,
.mbus = MEDIA_BUS_FMT_YUYV8_1_5X8,
}, {
.fourcc = V4L2_PIX_FMT_NV12,
.uv_swap = 0,
.write_format = RKISP1_MI_CTRL_MP_WRITE_YUV_SPLA,
.mbus = MEDIA_BUS_FMT_YUYV8_1_5X8,
}, {
.fourcc = V4L2_PIX_FMT_NV21M,
.uv_swap = 1,
.write_format = RKISP1_MI_CTRL_MP_WRITE_YUV_SPLA,
.mbus = MEDIA_BUS_FMT_YUYV8_1_5X8,
}, {
.fourcc = V4L2_PIX_FMT_NV12M,
.uv_swap = 0,
.write_format = RKISP1_MI_CTRL_MP_WRITE_YUV_SPLA,
.mbus = MEDIA_BUS_FMT_YUYV8_1_5X8,
}, {
.fourcc = V4L2_PIX_FMT_YUV420,
.uv_swap = 0,
.write_format = RKISP1_MI_CTRL_MP_WRITE_YUV_PLA_OR_RAW8,
.mbus = MEDIA_BUS_FMT_YUYV8_1_5X8,
}, {
.fourcc = V4L2_PIX_FMT_YVU420,
.uv_swap = 1,
.write_format = RKISP1_MI_CTRL_MP_WRITE_YUV_PLA_OR_RAW8,
.mbus = MEDIA_BUS_FMT_YUYV8_1_5X8,
},
/* raw */
{
.fourcc = V4L2_PIX_FMT_SRGGB8,
.write_format = RKISP1_MI_CTRL_MP_WRITE_YUV_PLA_OR_RAW8,
.mbus = MEDIA_BUS_FMT_SRGGB8_1X8,
}, {
.fourcc = V4L2_PIX_FMT_SGRBG8,
.write_format = RKISP1_MI_CTRL_MP_WRITE_YUV_PLA_OR_RAW8,
.mbus = MEDIA_BUS_FMT_SGRBG8_1X8,
}, {
.fourcc = V4L2_PIX_FMT_SGBRG8,
.write_format = RKISP1_MI_CTRL_MP_WRITE_YUV_PLA_OR_RAW8,
.mbus = MEDIA_BUS_FMT_SGBRG8_1X8,
}, {
.fourcc = V4L2_PIX_FMT_SBGGR8,
.write_format = RKISP1_MI_CTRL_MP_WRITE_YUV_PLA_OR_RAW8,
.mbus = MEDIA_BUS_FMT_SBGGR8_1X8,
}, {
.fourcc = V4L2_PIX_FMT_SRGGB10,
.write_format = RKISP1_MI_CTRL_MP_WRITE_RAW12,
.mbus = MEDIA_BUS_FMT_SRGGB10_1X10,
}, {
.fourcc = V4L2_PIX_FMT_SGRBG10,
.write_format = RKISP1_MI_CTRL_MP_WRITE_RAW12,
.mbus = MEDIA_BUS_FMT_SGRBG10_1X10,
}, {
.fourcc = V4L2_PIX_FMT_SGBRG10,
.write_format = RKISP1_MI_CTRL_MP_WRITE_RAW12,
.mbus = MEDIA_BUS_FMT_SGBRG10_1X10,
}, {
.fourcc = V4L2_PIX_FMT_SBGGR10,
.write_format = RKISP1_MI_CTRL_MP_WRITE_RAW12,
.mbus = MEDIA_BUS_FMT_SBGGR10_1X10,
}, {
.fourcc = V4L2_PIX_FMT_SRGGB12,
.write_format = RKISP1_MI_CTRL_MP_WRITE_RAW12,
.mbus = MEDIA_BUS_FMT_SRGGB12_1X12,
}, {
.fourcc = V4L2_PIX_FMT_SGRBG12,
.write_format = RKISP1_MI_CTRL_MP_WRITE_RAW12,
.mbus = MEDIA_BUS_FMT_SGRBG12_1X12,
}, {
.fourcc = V4L2_PIX_FMT_SGBRG12,
.write_format = RKISP1_MI_CTRL_MP_WRITE_RAW12,
.mbus = MEDIA_BUS_FMT_SGBRG12_1X12,
}, {
.fourcc = V4L2_PIX_FMT_SBGGR12,
.write_format = RKISP1_MI_CTRL_MP_WRITE_RAW12,
.mbus = MEDIA_BUS_FMT_SBGGR12_1X12,
},
};
/*
* The supported pixel formats for selfpath. NOTE, pixel formats with identical 'mbus'
* are grouped together. This is assumed and used by the function rkisp1_cap_enum_mbus_codes
*/
static const struct rkisp1_capture_fmt_cfg rkisp1_sp_fmts[] = {
/* yuv422 */
{
.fourcc = V4L2_PIX_FMT_YUYV,
.uv_swap = 0,
.write_format = RKISP1_MI_CTRL_SP_WRITE_INT,
.output_format = RKISP1_MI_CTRL_SP_OUTPUT_YUV422,
.mbus = MEDIA_BUS_FMT_YUYV8_2X8,
}, {
.fourcc = V4L2_PIX_FMT_YUV422P,
.uv_swap = 0,
.write_format = RKISP1_MI_CTRL_SP_WRITE_PLA,
.output_format = RKISP1_MI_CTRL_SP_OUTPUT_YUV422,
.mbus = MEDIA_BUS_FMT_YUYV8_2X8,
}, {
.fourcc = V4L2_PIX_FMT_NV16,
.uv_swap = 0,
.write_format = RKISP1_MI_CTRL_SP_WRITE_SPLA,
.output_format = RKISP1_MI_CTRL_SP_OUTPUT_YUV422,
.mbus = MEDIA_BUS_FMT_YUYV8_2X8,
}, {
.fourcc = V4L2_PIX_FMT_NV61,
.uv_swap = 1,
.write_format = RKISP1_MI_CTRL_SP_WRITE_SPLA,
.output_format = RKISP1_MI_CTRL_SP_OUTPUT_YUV422,
.mbus = MEDIA_BUS_FMT_YUYV8_2X8,
}, {
.fourcc = V4L2_PIX_FMT_NV16M,
.uv_swap = 0,
.write_format = RKISP1_MI_CTRL_SP_WRITE_SPLA,
.output_format = RKISP1_MI_CTRL_SP_OUTPUT_YUV422,
.mbus = MEDIA_BUS_FMT_YUYV8_2X8,
}, {
.fourcc = V4L2_PIX_FMT_NV61M,
.uv_swap = 1,
.write_format = RKISP1_MI_CTRL_SP_WRITE_SPLA,
.output_format = RKISP1_MI_CTRL_SP_OUTPUT_YUV422,
.mbus = MEDIA_BUS_FMT_YUYV8_2X8,
}, {
.fourcc = V4L2_PIX_FMT_YVU422M,
.uv_swap = 1,
.write_format = RKISP1_MI_CTRL_SP_WRITE_PLA,
.output_format = RKISP1_MI_CTRL_SP_OUTPUT_YUV422,
.mbus = MEDIA_BUS_FMT_YUYV8_2X8,
},
/* yuv400 */
{
.fourcc = V4L2_PIX_FMT_GREY,
.uv_swap = 0,
.write_format = RKISP1_MI_CTRL_SP_WRITE_PLA,
.output_format = RKISP1_MI_CTRL_SP_OUTPUT_YUV422,
.mbus = MEDIA_BUS_FMT_YUYV8_2X8,
},
/* rgb */
{
.fourcc = V4L2_PIX_FMT_XBGR32,
.write_format = RKISP1_MI_CTRL_SP_WRITE_PLA,
.output_format = RKISP1_MI_CTRL_SP_OUTPUT_RGB888,
.mbus = MEDIA_BUS_FMT_YUYV8_2X8,
}, {
.fourcc = V4L2_PIX_FMT_RGB565,
.write_format = RKISP1_MI_CTRL_SP_WRITE_PLA,
.output_format = RKISP1_MI_CTRL_SP_OUTPUT_RGB565,
.mbus = MEDIA_BUS_FMT_YUYV8_2X8,
},
/* yuv420 */
{
.fourcc = V4L2_PIX_FMT_NV21,
.uv_swap = 1,
.write_format = RKISP1_MI_CTRL_SP_WRITE_SPLA,
.output_format = RKISP1_MI_CTRL_SP_OUTPUT_YUV420,
.mbus = MEDIA_BUS_FMT_YUYV8_1_5X8,
}, {
.fourcc = V4L2_PIX_FMT_NV12,
.uv_swap = 0,
.write_format = RKISP1_MI_CTRL_SP_WRITE_SPLA,
.output_format = RKISP1_MI_CTRL_SP_OUTPUT_YUV420,
.mbus = MEDIA_BUS_FMT_YUYV8_1_5X8,
}, {
.fourcc = V4L2_PIX_FMT_NV21M,
.uv_swap = 1,
.write_format = RKISP1_MI_CTRL_SP_WRITE_SPLA,
.output_format = RKISP1_MI_CTRL_SP_OUTPUT_YUV420,
.mbus = MEDIA_BUS_FMT_YUYV8_1_5X8,
}, {
.fourcc = V4L2_PIX_FMT_NV12M,
.uv_swap = 0,
.write_format = RKISP1_MI_CTRL_SP_WRITE_SPLA,
.output_format = RKISP1_MI_CTRL_SP_OUTPUT_YUV420,
.mbus = MEDIA_BUS_FMT_YUYV8_1_5X8,
}, {
.fourcc = V4L2_PIX_FMT_YUV420,
.uv_swap = 0,
.write_format = RKISP1_MI_CTRL_SP_WRITE_PLA,
.output_format = RKISP1_MI_CTRL_SP_OUTPUT_YUV420,
.mbus = MEDIA_BUS_FMT_YUYV8_1_5X8,
}, {
.fourcc = V4L2_PIX_FMT_YVU420,
.uv_swap = 1,
.write_format = RKISP1_MI_CTRL_SP_WRITE_PLA,
.output_format = RKISP1_MI_CTRL_SP_OUTPUT_YUV420,
.mbus = MEDIA_BUS_FMT_YUYV8_1_5X8,
},
};
static const struct rkisp1_capture_config rkisp1_capture_config_mp = {
.fmts = rkisp1_mp_fmts,
.fmt_size = ARRAY_SIZE(rkisp1_mp_fmts),
.mi = {
.y_size_init = RKISP1_CIF_MI_MP_Y_SIZE_INIT,
.cb_size_init = RKISP1_CIF_MI_MP_CB_SIZE_INIT,
.cr_size_init = RKISP1_CIF_MI_MP_CR_SIZE_INIT,
.y_base_ad_init = RKISP1_CIF_MI_MP_Y_BASE_AD_INIT,
.cb_base_ad_init = RKISP1_CIF_MI_MP_CB_BASE_AD_INIT,
.cr_base_ad_init = RKISP1_CIF_MI_MP_CR_BASE_AD_INIT,
.y_offs_cnt_init = RKISP1_CIF_MI_MP_Y_OFFS_CNT_INIT,
.cb_offs_cnt_init = RKISP1_CIF_MI_MP_CB_OFFS_CNT_INIT,
.cr_offs_cnt_init = RKISP1_CIF_MI_MP_CR_OFFS_CNT_INIT,
},
};
static const struct rkisp1_capture_config rkisp1_capture_config_sp = {
.fmts = rkisp1_sp_fmts,
.fmt_size = ARRAY_SIZE(rkisp1_sp_fmts),
.mi = {
.y_size_init = RKISP1_CIF_MI_SP_Y_SIZE_INIT,
.cb_size_init = RKISP1_CIF_MI_SP_CB_SIZE_INIT,
.cr_size_init = RKISP1_CIF_MI_SP_CR_SIZE_INIT,
.y_base_ad_init = RKISP1_CIF_MI_SP_Y_BASE_AD_INIT,
.cb_base_ad_init = RKISP1_CIF_MI_SP_CB_BASE_AD_INIT,
.cr_base_ad_init = RKISP1_CIF_MI_SP_CR_BASE_AD_INIT,
.y_offs_cnt_init = RKISP1_CIF_MI_SP_Y_OFFS_CNT_INIT,
.cb_offs_cnt_init = RKISP1_CIF_MI_SP_CB_OFFS_CNT_INIT,
.cr_offs_cnt_init = RKISP1_CIF_MI_SP_CR_OFFS_CNT_INIT,
},
};
static inline struct rkisp1_vdev_node *
rkisp1_vdev_to_node(struct video_device *vdev)
{
return container_of(vdev, struct rkisp1_vdev_node, vdev);
}
int rkisp1_cap_enum_mbus_codes(struct rkisp1_capture *cap,
struct v4l2_subdev_mbus_code_enum *code)
{
const struct rkisp1_capture_fmt_cfg *fmts = cap->config->fmts;
/*
* initialize curr_mbus to non existing mbus code 0 to ensure it is
* different from fmts[0].mbus
*/
u32 curr_mbus = 0;
int i, n = 0;
for (i = 0; i < cap->config->fmt_size; i++) {
if (fmts[i].mbus == curr_mbus)
continue;
curr_mbus = fmts[i].mbus;
if (n++ == code->index) {
code->code = curr_mbus;
return 0;
}
}
return -EINVAL;
}
/* ----------------------------------------------------------------------------
* Stream operations for self-picture path (sp) and main-picture path (mp)
*/
static void rkisp1_mi_config_ctrl(struct rkisp1_capture *cap)
{
u32 mi_ctrl = rkisp1_read(cap->rkisp1, RKISP1_CIF_MI_CTRL);
mi_ctrl &= ~GENMASK(17, 16);
mi_ctrl |= RKISP1_CIF_MI_CTRL_BURST_LEN_LUM_64;
mi_ctrl &= ~GENMASK(19, 18);
mi_ctrl |= RKISP1_CIF_MI_CTRL_BURST_LEN_CHROM_64;
mi_ctrl |= RKISP1_CIF_MI_CTRL_INIT_BASE_EN |
RKISP1_CIF_MI_CTRL_INIT_OFFSET_EN;
rkisp1_write(cap->rkisp1, RKISP1_CIF_MI_CTRL, mi_ctrl);
}
static u32 rkisp1_pixfmt_comp_size(const struct v4l2_pix_format_mplane *pixm,
unsigned int component)
{
/*
* If packed format, then plane_fmt[0].sizeimage is the sum of all
* components, so we need to calculate just the size of Y component.
* See rkisp1_fill_pixfmt().
*/
if (!component && pixm->num_planes == 1)
return pixm->plane_fmt[0].bytesperline * pixm->height;
return pixm->plane_fmt[component].sizeimage;
}
static void rkisp1_irq_frame_end_enable(struct rkisp1_capture *cap)
{
u32 mi_imsc = rkisp1_read(cap->rkisp1, RKISP1_CIF_MI_IMSC);
mi_imsc |= RKISP1_CIF_MI_FRAME(cap);
rkisp1_write(cap->rkisp1, RKISP1_CIF_MI_IMSC, mi_imsc);
}
static void rkisp1_mp_config(struct rkisp1_capture *cap)
{
const struct v4l2_pix_format_mplane *pixm = &cap->pix.fmt;
struct rkisp1_device *rkisp1 = cap->rkisp1;
u32 reg;
rkisp1_write(rkisp1, cap->config->mi.y_size_init,
rkisp1_pixfmt_comp_size(pixm, RKISP1_PLANE_Y));
rkisp1_write(rkisp1, cap->config->mi.cb_size_init,
rkisp1_pixfmt_comp_size(pixm, RKISP1_PLANE_CB));
rkisp1_write(rkisp1, cap->config->mi.cr_size_init,
rkisp1_pixfmt_comp_size(pixm, RKISP1_PLANE_CR));
rkisp1_irq_frame_end_enable(cap);
/* set uv swapping for semiplanar formats */
if (cap->pix.info->comp_planes == 2) {
reg = rkisp1_read(rkisp1, RKISP1_CIF_MI_XTD_FORMAT_CTRL);
if (cap->pix.cfg->uv_swap)
reg |= RKISP1_CIF_MI_XTD_FMT_CTRL_MP_CB_CR_SWAP;
else
reg &= ~RKISP1_CIF_MI_XTD_FMT_CTRL_MP_CB_CR_SWAP;
rkisp1_write(rkisp1, RKISP1_CIF_MI_XTD_FORMAT_CTRL, reg);
}
rkisp1_mi_config_ctrl(cap);
reg = rkisp1_read(rkisp1, RKISP1_CIF_MI_CTRL);
reg &= ~RKISP1_MI_CTRL_MP_FMT_MASK;
reg |= cap->pix.cfg->write_format;
rkisp1_write(rkisp1, RKISP1_CIF_MI_CTRL, reg);
reg = rkisp1_read(rkisp1, RKISP1_CIF_MI_CTRL);
reg |= RKISP1_CIF_MI_MP_AUTOUPDATE_ENABLE;
rkisp1_write(rkisp1, RKISP1_CIF_MI_CTRL, reg);
}
static void rkisp1_sp_config(struct rkisp1_capture *cap)
{
const struct v4l2_pix_format_mplane *pixm = &cap->pix.fmt;
struct rkisp1_device *rkisp1 = cap->rkisp1;
u32 mi_ctrl, reg;
rkisp1_write(rkisp1, cap->config->mi.y_size_init,
rkisp1_pixfmt_comp_size(pixm, RKISP1_PLANE_Y));
rkisp1_write(rkisp1, cap->config->mi.cb_size_init,
rkisp1_pixfmt_comp_size(pixm, RKISP1_PLANE_CB));
rkisp1_write(rkisp1, cap->config->mi.cr_size_init,
rkisp1_pixfmt_comp_size(pixm, RKISP1_PLANE_CR));
rkisp1_write(rkisp1, RKISP1_CIF_MI_SP_Y_PIC_WIDTH, pixm->width);
rkisp1_write(rkisp1, RKISP1_CIF_MI_SP_Y_PIC_HEIGHT, pixm->height);
rkisp1_write(rkisp1, RKISP1_CIF_MI_SP_Y_LLENGTH, cap->sp_y_stride);
rkisp1_irq_frame_end_enable(cap);
/* set uv swapping for semiplanar formats */
if (cap->pix.info->comp_planes == 2) {
reg = rkisp1_read(rkisp1, RKISP1_CIF_MI_XTD_FORMAT_CTRL);
if (cap->pix.cfg->uv_swap)
reg |= RKISP1_CIF_MI_XTD_FMT_CTRL_SP_CB_CR_SWAP;
else
reg &= ~RKISP1_CIF_MI_XTD_FMT_CTRL_SP_CB_CR_SWAP;
rkisp1_write(rkisp1, RKISP1_CIF_MI_XTD_FORMAT_CTRL, reg);
}
rkisp1_mi_config_ctrl(cap);
mi_ctrl = rkisp1_read(rkisp1, RKISP1_CIF_MI_CTRL);
mi_ctrl &= ~RKISP1_MI_CTRL_SP_FMT_MASK;
mi_ctrl |= cap->pix.cfg->write_format |
RKISP1_MI_CTRL_SP_INPUT_YUV422 |
cap->pix.cfg->output_format |
RKISP1_CIF_MI_SP_AUTOUPDATE_ENABLE;
rkisp1_write(rkisp1, RKISP1_CIF_MI_CTRL, mi_ctrl);
}
static void rkisp1_mp_disable(struct rkisp1_capture *cap)
{
u32 mi_ctrl = rkisp1_read(cap->rkisp1, RKISP1_CIF_MI_CTRL);
mi_ctrl &= ~(RKISP1_CIF_MI_CTRL_MP_ENABLE |
RKISP1_CIF_MI_CTRL_RAW_ENABLE);
rkisp1_write(cap->rkisp1, RKISP1_CIF_MI_CTRL, mi_ctrl);
}
static void rkisp1_sp_disable(struct rkisp1_capture *cap)
{
u32 mi_ctrl = rkisp1_read(cap->rkisp1, RKISP1_CIF_MI_CTRL);
mi_ctrl &= ~RKISP1_CIF_MI_CTRL_SP_ENABLE;
rkisp1_write(cap->rkisp1, RKISP1_CIF_MI_CTRL, mi_ctrl);
}
static void rkisp1_mp_enable(struct rkisp1_capture *cap)
{
u32 mi_ctrl;
rkisp1_mp_disable(cap);
mi_ctrl = rkisp1_read(cap->rkisp1, RKISP1_CIF_MI_CTRL);
if (v4l2_is_format_bayer(cap->pix.info))
mi_ctrl |= RKISP1_CIF_MI_CTRL_RAW_ENABLE;
/* YUV */
else
mi_ctrl |= RKISP1_CIF_MI_CTRL_MP_ENABLE;
rkisp1_write(cap->rkisp1, RKISP1_CIF_MI_CTRL, mi_ctrl);
}
static void rkisp1_sp_enable(struct rkisp1_capture *cap)
{
u32 mi_ctrl = rkisp1_read(cap->rkisp1, RKISP1_CIF_MI_CTRL);
mi_ctrl |= RKISP1_CIF_MI_CTRL_SP_ENABLE;
rkisp1_write(cap->rkisp1, RKISP1_CIF_MI_CTRL, mi_ctrl);
}
static void rkisp1_mp_sp_stop(struct rkisp1_capture *cap)
{
if (!cap->is_streaming)
return;
rkisp1_write(cap->rkisp1, RKISP1_CIF_MI_ICR, RKISP1_CIF_MI_FRAME(cap));
cap->ops->disable(cap);
}
static bool rkisp1_mp_is_stopped(struct rkisp1_capture *cap)
{
u32 en = RKISP1_CIF_MI_CTRL_SHD_MP_IN_ENABLED |
RKISP1_CIF_MI_CTRL_SHD_RAW_OUT_ENABLED;
return !(rkisp1_read(cap->rkisp1, RKISP1_CIF_MI_CTRL_SHD) & en);
}
static bool rkisp1_sp_is_stopped(struct rkisp1_capture *cap)
{
return !(rkisp1_read(cap->rkisp1, RKISP1_CIF_MI_CTRL_SHD) &
RKISP1_CIF_MI_CTRL_SHD_SP_IN_ENABLED);
}
static void rkisp1_mp_set_data_path(struct rkisp1_capture *cap)
{
u32 dpcl = rkisp1_read(cap->rkisp1, RKISP1_CIF_VI_DPCL);
dpcl = dpcl | RKISP1_CIF_VI_DPCL_CHAN_MODE_MP |
RKISP1_CIF_VI_DPCL_MP_MUX_MRSZ_MI;
rkisp1_write(cap->rkisp1, RKISP1_CIF_VI_DPCL, dpcl);
}
static void rkisp1_sp_set_data_path(struct rkisp1_capture *cap)
{
u32 dpcl = rkisp1_read(cap->rkisp1, RKISP1_CIF_VI_DPCL);
dpcl |= RKISP1_CIF_VI_DPCL_CHAN_MODE_SP;
rkisp1_write(cap->rkisp1, RKISP1_CIF_VI_DPCL, dpcl);
}
static const struct rkisp1_capture_ops rkisp1_capture_ops_mp = {
.config = rkisp1_mp_config,
.enable = rkisp1_mp_enable,
.disable = rkisp1_mp_disable,
.stop = rkisp1_mp_sp_stop,
.set_data_path = rkisp1_mp_set_data_path,
.is_stopped = rkisp1_mp_is_stopped,
};
static const struct rkisp1_capture_ops rkisp1_capture_ops_sp = {
.config = rkisp1_sp_config,
.enable = rkisp1_sp_enable,
.disable = rkisp1_sp_disable,
.stop = rkisp1_mp_sp_stop,
.set_data_path = rkisp1_sp_set_data_path,
.is_stopped = rkisp1_sp_is_stopped,
};
/* ----------------------------------------------------------------------------
* Frame buffer operations
*/
static int rkisp1_dummy_buf_create(struct rkisp1_capture *cap)
{
const struct v4l2_pix_format_mplane *pixm = &cap->pix.fmt;
struct rkisp1_dummy_buffer *dummy_buf = &cap->buf.dummy;
dummy_buf->size = max3(rkisp1_pixfmt_comp_size(pixm, RKISP1_PLANE_Y),
rkisp1_pixfmt_comp_size(pixm, RKISP1_PLANE_CB),
rkisp1_pixfmt_comp_size(pixm, RKISP1_PLANE_CR));
/* The driver never access vaddr, no mapping is required */
dummy_buf->vaddr = dma_alloc_attrs(cap->rkisp1->dev,
dummy_buf->size,
&dummy_buf->dma_addr,
GFP_KERNEL,
DMA_ATTR_NO_KERNEL_MAPPING);
if (!dummy_buf->vaddr)
return -ENOMEM;
return 0;
}
static void rkisp1_dummy_buf_destroy(struct rkisp1_capture *cap)
{
dma_free_attrs(cap->rkisp1->dev,
cap->buf.dummy.size, cap->buf.dummy.vaddr,
cap->buf.dummy.dma_addr, DMA_ATTR_NO_KERNEL_MAPPING);
}
static void rkisp1_set_next_buf(struct rkisp1_capture *cap)
{
cap->buf.curr = cap->buf.next;
cap->buf.next = NULL;
if (!list_empty(&cap->buf.queue)) {
u32 *buff_addr;
cap->buf.next = list_first_entry(&cap->buf.queue, struct rkisp1_buffer, queue);
list_del(&cap->buf.next->queue);
buff_addr = cap->buf.next->buff_addr;
rkisp1_write(cap->rkisp1, cap->config->mi.y_base_ad_init,
buff_addr[RKISP1_PLANE_Y]);
/*
* In order to support grey format we capture
* YUV422 planar format from the camera and
* set the U and V planes to the dummy buffer
*/
if (cap->pix.cfg->fourcc == V4L2_PIX_FMT_GREY) {
rkisp1_write(cap->rkisp1,
cap->config->mi.cb_base_ad_init,
cap->buf.dummy.dma_addr);
rkisp1_write(cap->rkisp1,
cap->config->mi.cr_base_ad_init,
cap->buf.dummy.dma_addr);
} else {
rkisp1_write(cap->rkisp1,
cap->config->mi.cb_base_ad_init,
buff_addr[RKISP1_PLANE_CB]);
rkisp1_write(cap->rkisp1,
cap->config->mi.cr_base_ad_init,
buff_addr[RKISP1_PLANE_CR]);
}
} else {
/*
* Use the dummy space allocated by dma_alloc_coherent to
* throw data if there is no available buffer.
*/
rkisp1_write(cap->rkisp1, cap->config->mi.y_base_ad_init,
cap->buf.dummy.dma_addr);
rkisp1_write(cap->rkisp1, cap->config->mi.cb_base_ad_init,
cap->buf.dummy.dma_addr);
rkisp1_write(cap->rkisp1, cap->config->mi.cr_base_ad_init,
cap->buf.dummy.dma_addr);
}
/* Set plane offsets */
rkisp1_write(cap->rkisp1, cap->config->mi.y_offs_cnt_init, 0);
rkisp1_write(cap->rkisp1, cap->config->mi.cb_offs_cnt_init, 0);
rkisp1_write(cap->rkisp1, cap->config->mi.cr_offs_cnt_init, 0);
}
/*
* This function is called when a frame end comes. The next frame
* is processing and we should set up buffer for next-next frame,
* otherwise it will overflow.
*/
static void rkisp1_handle_buffer(struct rkisp1_capture *cap)
{
struct rkisp1_isp *isp = &cap->rkisp1->isp;
struct rkisp1_buffer *curr_buf;
spin_lock(&cap->buf.lock);
curr_buf = cap->buf.curr;
if (curr_buf) {
curr_buf->vb.sequence = isp->frame_sequence;
curr_buf->vb.vb2_buf.timestamp = ktime_get_boottime_ns();
curr_buf->vb.field = V4L2_FIELD_NONE;
vb2_buffer_done(&curr_buf->vb.vb2_buf, VB2_BUF_STATE_DONE);
} else {
cap->rkisp1->debug.frame_drop[cap->id]++;
}
rkisp1_set_next_buf(cap);
spin_unlock(&cap->buf.lock);
}
irqreturn_t rkisp1_capture_isr(int irq, void *ctx)
{
struct device *dev = ctx;
struct rkisp1_device *rkisp1 = dev_get_drvdata(dev);
unsigned int i;
u32 status;
status = rkisp1_read(rkisp1, RKISP1_CIF_MI_MIS);
if (!status)
return IRQ_NONE;
rkisp1_write(rkisp1, RKISP1_CIF_MI_ICR, status);
for (i = 0; i < ARRAY_SIZE(rkisp1->capture_devs); ++i) {
struct rkisp1_capture *cap = &rkisp1->capture_devs[i];
if (!(status & RKISP1_CIF_MI_FRAME(cap)))
continue;
if (!cap->is_stopping) {
rkisp1_handle_buffer(cap);
continue;
}
/*
* Make sure stream is actually stopped, whose state
* can be read from the shadow register, before
* wake_up() thread which would immediately free all
* frame buffers. stop() takes effect at the next
* frame end that sync the configurations to shadow
* regs.
*/
if (!cap->ops->is_stopped(cap)) {
cap->ops->stop(cap);
continue;
}
cap->is_stopping = false;
cap->is_streaming = false;
wake_up(&cap->done);
}
return IRQ_HANDLED;
}
/* ----------------------------------------------------------------------------
* Vb2 operations
*/
static int rkisp1_vb2_queue_setup(struct vb2_queue *queue,
unsigned int *num_buffers,
unsigned int *num_planes,
unsigned int sizes[],
struct device *alloc_devs[])
{
struct rkisp1_capture *cap = queue->drv_priv;
const struct v4l2_pix_format_mplane *pixm = &cap->pix.fmt;
unsigned int i;
if (*num_planes) {
if (*num_planes != pixm->num_planes)
return -EINVAL;
for (i = 0; i < pixm->num_planes; i++)
if (sizes[i] < pixm->plane_fmt[i].sizeimage)
return -EINVAL;
} else {
*num_planes = pixm->num_planes;
for (i = 0; i < pixm->num_planes; i++)
sizes[i] = pixm->plane_fmt[i].sizeimage;
}
return 0;
}
static int rkisp1_vb2_buf_init(struct vb2_buffer *vb)
{
struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
struct rkisp1_buffer *ispbuf =
container_of(vbuf, struct rkisp1_buffer, vb);
struct rkisp1_capture *cap = vb->vb2_queue->drv_priv;
const struct v4l2_pix_format_mplane *pixm = &cap->pix.fmt;
unsigned int i;
memset(ispbuf->buff_addr, 0, sizeof(ispbuf->buff_addr));
for (i = 0; i < pixm->num_planes; i++)
ispbuf->buff_addr[i] = vb2_dma_contig_plane_dma_addr(vb, i);
/* Convert to non-MPLANE */
if (pixm->num_planes == 1) {
ispbuf->buff_addr[RKISP1_PLANE_CB] =
ispbuf->buff_addr[RKISP1_PLANE_Y] +
rkisp1_pixfmt_comp_size(pixm, RKISP1_PLANE_Y);
ispbuf->buff_addr[RKISP1_PLANE_CR] =
ispbuf->buff_addr[RKISP1_PLANE_CB] +
rkisp1_pixfmt_comp_size(pixm, RKISP1_PLANE_CB);
}
/*
* uv swap can be supported for planar formats by switching
* the address of cb and cr
*/
if (cap->pix.info->comp_planes == 3 && cap->pix.cfg->uv_swap)
swap(ispbuf->buff_addr[RKISP1_PLANE_CR],
ispbuf->buff_addr[RKISP1_PLANE_CB]);
return 0;
}
static void rkisp1_vb2_buf_queue(struct vb2_buffer *vb)
{
struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
struct rkisp1_buffer *ispbuf =
container_of(vbuf, struct rkisp1_buffer, vb);
struct rkisp1_capture *cap = vb->vb2_queue->drv_priv;
spin_lock_irq(&cap->buf.lock);
list_add_tail(&ispbuf->queue, &cap->buf.queue);
spin_unlock_irq(&cap->buf.lock);
}
static int rkisp1_vb2_buf_prepare(struct vb2_buffer *vb)
{
struct rkisp1_capture *cap = vb->vb2_queue->drv_priv;
unsigned int i;
for (i = 0; i < cap->pix.fmt.num_planes; i++) {
unsigned long size = cap->pix.fmt.plane_fmt[i].sizeimage;
if (vb2_plane_size(vb, i) < size) {
dev_err(cap->rkisp1->dev,
"User buffer too small (%ld < %ld)\n",
vb2_plane_size(vb, i), size);
return -EINVAL;
}
vb2_set_plane_payload(vb, i, size);
}
return 0;
}
static void rkisp1_return_all_buffers(struct rkisp1_capture *cap,
enum vb2_buffer_state state)
{
struct rkisp1_buffer *buf;
spin_lock_irq(&cap->buf.lock);
if (cap->buf.curr) {
vb2_buffer_done(&cap->buf.curr->vb.vb2_buf, state);
cap->buf.curr = NULL;
}
if (cap->buf.next) {
vb2_buffer_done(&cap->buf.next->vb.vb2_buf, state);
cap->buf.next = NULL;
}
while (!list_empty(&cap->buf.queue)) {
buf = list_first_entry(&cap->buf.queue,
struct rkisp1_buffer, queue);
list_del(&buf->queue);
vb2_buffer_done(&buf->vb.vb2_buf, state);
}
spin_unlock_irq(&cap->buf.lock);
}
/*
* Most registers inside the rockchip ISP1 have shadow register since
* they must not be changed while processing a frame.
* Usually, each sub-module updates its shadow register after
* processing the last pixel of a frame.
*/
static void rkisp1_cap_stream_enable(struct rkisp1_capture *cap)
{
struct rkisp1_device *rkisp1 = cap->rkisp1;
struct rkisp1_capture *other = &rkisp1->capture_devs[cap->id ^ 1];
cap->ops->set_data_path(cap);
cap->ops->config(cap);
/* Setup a buffer for the next frame */
spin_lock_irq(&cap->buf.lock);
rkisp1_set_next_buf(cap);
cap->ops->enable(cap);
/* It's safe to configure ACTIVE and SHADOW registers for the
* first stream. While when the second is starting, do NOT
* force update because it also updates the first one.
*
* The latter case would drop one more buffer(that is 2) since
* there's no buffer in a shadow register when the second FE received.
* This's also required because the second FE maybe corrupt
* especially when run at 120fps.
*/
if (!other->is_streaming) {
/* force cfg update */
rkisp1_write(rkisp1, RKISP1_CIF_MI_INIT,
RKISP1_CIF_MI_INIT_SOFT_UPD);
rkisp1_set_next_buf(cap);
}
spin_unlock_irq(&cap->buf.lock);
cap->is_streaming = true;
}
static void rkisp1_cap_stream_disable(struct rkisp1_capture *cap)
{
int ret;
/* Stream should stop in interrupt. If it doesn't, stop it by force. */
cap->is_stopping = true;
ret = wait_event_timeout(cap->done,
!cap->is_streaming,
msecs_to_jiffies(1000));
if (!ret) {
cap->rkisp1->debug.stop_timeout[cap->id]++;
cap->ops->stop(cap);
cap->is_stopping = false;
cap->is_streaming = false;
}
}
/*
* rkisp1_pipeline_stream_disable - disable nodes in the pipeline
*
* Call s_stream(false) in the reverse order from
* rkisp1_pipeline_stream_enable() and disable the DMA engine.
* Should be called before video_device_pipeline_stop()
*/
static void rkisp1_pipeline_stream_disable(struct rkisp1_capture *cap)
__must_hold(&cap->rkisp1->stream_lock)
{
struct rkisp1_device *rkisp1 = cap->rkisp1;
rkisp1_cap_stream_disable(cap);
/*
* If the other capture is streaming, isp and sensor nodes shouldn't
* be disabled, skip them.
*/
if (rkisp1->pipe.start_count < 2)
v4l2_subdev_call(&rkisp1->isp.sd, video, s_stream, false);
v4l2_subdev_call(&rkisp1->resizer_devs[cap->id].sd, video, s_stream,
false);
}
/*
* rkisp1_pipeline_stream_enable - enable nodes in the pipeline
*
* Enable the DMA Engine and call s_stream(true) through the pipeline.
* Should be called after video_device_pipeline_start()
*/
static int rkisp1_pipeline_stream_enable(struct rkisp1_capture *cap)
__must_hold(&cap->rkisp1->stream_lock)
{
struct rkisp1_device *rkisp1 = cap->rkisp1;
int ret;
rkisp1_cap_stream_enable(cap);
ret = v4l2_subdev_call(&rkisp1->resizer_devs[cap->id].sd, video,
s_stream, true);
if (ret)
goto err_disable_cap;
/*
* If the other capture is streaming, isp and sensor nodes are already
* enabled, skip them.
*/
if (rkisp1->pipe.start_count > 1)
return 0;
ret = v4l2_subdev_call(&rkisp1->isp.sd, video, s_stream, true);
if (ret)
goto err_disable_rsz;
return 0;
err_disable_rsz:
v4l2_subdev_call(&rkisp1->resizer_devs[cap->id].sd, video, s_stream,
false);
err_disable_cap:
rkisp1_cap_stream_disable(cap);
return ret;
}
static void rkisp1_vb2_stop_streaming(struct vb2_queue *queue)
{
struct rkisp1_capture *cap = queue->drv_priv;
struct rkisp1_vdev_node *node = &cap->vnode;
struct rkisp1_device *rkisp1 = cap->rkisp1;
int ret;
mutex_lock(&cap->rkisp1->stream_lock);
rkisp1_pipeline_stream_disable(cap);
rkisp1_return_all_buffers(cap, VB2_BUF_STATE_ERROR);
v4l2_pipeline_pm_put(&node->vdev.entity);
ret = pm_runtime_put(rkisp1->dev);
if (ret < 0)
dev_err(rkisp1->dev, "power down failed error:%d\n", ret);
rkisp1_dummy_buf_destroy(cap);
video_device_pipeline_stop(&node->vdev);
mutex_unlock(&cap->rkisp1->stream_lock);
}
static int
rkisp1_vb2_start_streaming(struct vb2_queue *queue, unsigned int count)
{
struct rkisp1_capture *cap = queue->drv_priv;
struct media_entity *entity = &cap->vnode.vdev.entity;
int ret;
mutex_lock(&cap->rkisp1->stream_lock);
ret = video_device_pipeline_start(&cap->vnode.vdev, &cap->rkisp1->pipe);
if (ret) {
dev_err(cap->rkisp1->dev, "start pipeline failed %d\n", ret);
goto err_ret_buffers;
}
ret = rkisp1_dummy_buf_create(cap);
if (ret)
goto err_pipeline_stop;
ret = pm_runtime_resume_and_get(cap->rkisp1->dev);
if (ret < 0) {
dev_err(cap->rkisp1->dev, "power up failed %d\n", ret);
goto err_destroy_dummy;
}
ret = v4l2_pipeline_pm_get(entity);
if (ret) {
dev_err(cap->rkisp1->dev, "open cif pipeline failed %d\n", ret);
goto err_pipe_pm_put;
}
ret = rkisp1_pipeline_stream_enable(cap);
if (ret)
goto err_v4l2_pm_put;
mutex_unlock(&cap->rkisp1->stream_lock);
return 0;
err_v4l2_pm_put:
v4l2_pipeline_pm_put(entity);
err_pipe_pm_put:
pm_runtime_put(cap->rkisp1->dev);
err_destroy_dummy:
rkisp1_dummy_buf_destroy(cap);
err_pipeline_stop:
video_device_pipeline_stop(&cap->vnode.vdev);
err_ret_buffers:
rkisp1_return_all_buffers(cap, VB2_BUF_STATE_QUEUED);
mutex_unlock(&cap->rkisp1->stream_lock);
return ret;
}
static const struct vb2_ops rkisp1_vb2_ops = {
.queue_setup = rkisp1_vb2_queue_setup,
.buf_init = rkisp1_vb2_buf_init,
.buf_queue = rkisp1_vb2_buf_queue,
.buf_prepare = rkisp1_vb2_buf_prepare,
.wait_prepare = vb2_ops_wait_prepare,
.wait_finish = vb2_ops_wait_finish,
.stop_streaming = rkisp1_vb2_stop_streaming,
.start_streaming = rkisp1_vb2_start_streaming,
};
/* ----------------------------------------------------------------------------
* IOCTLs operations
*/
static const struct v4l2_format_info *
rkisp1_fill_pixfmt(struct v4l2_pix_format_mplane *pixm,
enum rkisp1_stream_id id)
{
struct v4l2_plane_pix_format *plane_y = &pixm->plane_fmt[0];
const struct v4l2_format_info *info;
unsigned int i;
u32 stride;
memset(pixm->plane_fmt, 0, sizeof(pixm->plane_fmt));
info = v4l2_format_info(pixm->pixelformat);
pixm->num_planes = info->mem_planes;
stride = info->bpp[0] * pixm->width;
/* Self path supports custom stride but Main path doesn't */
if (id == RKISP1_MAINPATH || plane_y->bytesperline < stride)
plane_y->bytesperline = stride;
plane_y->sizeimage = plane_y->bytesperline * pixm->height;
/* normalize stride to pixels per line */
stride = DIV_ROUND_UP(plane_y->bytesperline, info->bpp[0]);
for (i = 1; i < info->comp_planes; i++) {
struct v4l2_plane_pix_format *plane = &pixm->plane_fmt[i];
/* bytesperline for other components derive from Y component */
plane->bytesperline = DIV_ROUND_UP(stride, info->hdiv) *
info->bpp[i];
plane->sizeimage = plane->bytesperline *
DIV_ROUND_UP(pixm->height, info->vdiv);
}
/*
* If pixfmt is packed, then plane_fmt[0] should contain the total size
* considering all components. plane_fmt[i] for i > 0 should be ignored
* by userspace as mem_planes == 1, but we are keeping information there
* for convenience.
*/
if (info->mem_planes == 1)
for (i = 1; i < info->comp_planes; i++)
plane_y->sizeimage += pixm->plane_fmt[i].sizeimage;
return info;
}
static const struct rkisp1_capture_fmt_cfg *
rkisp1_find_fmt_cfg(const struct rkisp1_capture *cap, const u32 pixelfmt)
{
unsigned int i;
for (i = 0; i < cap->config->fmt_size; i++) {
if (cap->config->fmts[i].fourcc == pixelfmt)
return &cap->config->fmts[i];
}
return NULL;
}
static void rkisp1_try_fmt(const struct rkisp1_capture *cap,
struct v4l2_pix_format_mplane *pixm,
const struct rkisp1_capture_fmt_cfg **fmt_cfg,
const struct v4l2_format_info **fmt_info)
{
const struct rkisp1_capture_config *config = cap->config;
const struct rkisp1_capture_fmt_cfg *fmt;
const struct v4l2_format_info *info;
static const unsigned int max_widths[] = {
RKISP1_RSZ_MP_SRC_MAX_WIDTH, RKISP1_RSZ_SP_SRC_MAX_WIDTH
};
static const unsigned int max_heights[] = {
RKISP1_RSZ_MP_SRC_MAX_HEIGHT, RKISP1_RSZ_SP_SRC_MAX_HEIGHT
};
fmt = rkisp1_find_fmt_cfg(cap, pixm->pixelformat);
if (!fmt) {
fmt = config->fmts;
pixm->pixelformat = fmt->fourcc;
}
pixm->width = clamp_t(u32, pixm->width,
RKISP1_RSZ_SRC_MIN_WIDTH, max_widths[cap->id]);
pixm->height = clamp_t(u32, pixm->height,
RKISP1_RSZ_SRC_MIN_HEIGHT, max_heights[cap->id]);
pixm->field = V4L2_FIELD_NONE;
pixm->colorspace = V4L2_COLORSPACE_DEFAULT;
pixm->ycbcr_enc = V4L2_YCBCR_ENC_DEFAULT;
pixm->quantization = V4L2_QUANTIZATION_DEFAULT;
info = rkisp1_fill_pixfmt(pixm, cap->id);
if (fmt_cfg)
*fmt_cfg = fmt;
if (fmt_info)
*fmt_info = info;
}
static void rkisp1_set_fmt(struct rkisp1_capture *cap,
struct v4l2_pix_format_mplane *pixm)
{
rkisp1_try_fmt(cap, pixm, &cap->pix.cfg, &cap->pix.info);
cap->pix.fmt = *pixm;
/* SP supports custom stride in number of pixels of the Y plane */
if (cap->id == RKISP1_SELFPATH)
cap->sp_y_stride = pixm->plane_fmt[0].bytesperline /
cap->pix.info->bpp[0];
}
static int rkisp1_try_fmt_vid_cap_mplane(struct file *file, void *fh,
struct v4l2_format *f)
{
struct rkisp1_capture *cap = video_drvdata(file);
rkisp1_try_fmt(cap, &f->fmt.pix_mp, NULL, NULL);
return 0;
}
static int rkisp1_enum_fmt_vid_cap_mplane(struct file *file, void *priv,
struct v4l2_fmtdesc *f)
{
struct rkisp1_capture *cap = video_drvdata(file);
const struct rkisp1_capture_fmt_cfg *fmt = NULL;
unsigned int i, n = 0;
if (!f->mbus_code) {
if (f->index >= cap->config->fmt_size)
return -EINVAL;
fmt = &cap->config->fmts[f->index];
f->pixelformat = fmt->fourcc;
return 0;
}
for (i = 0; i < cap->config->fmt_size; i++) {
if (cap->config->fmts[i].mbus != f->mbus_code)
continue;
if (n++ == f->index) {
f->pixelformat = cap->config->fmts[i].fourcc;
return 0;
}
}
return -EINVAL;
}
static int rkisp1_enum_framesizes(struct file *file, void *fh,
struct v4l2_frmsizeenum *fsize)
{
static const unsigned int max_widths[] = {
RKISP1_RSZ_MP_SRC_MAX_WIDTH,
RKISP1_RSZ_SP_SRC_MAX_WIDTH,
};
static const unsigned int max_heights[] = {
RKISP1_RSZ_MP_SRC_MAX_HEIGHT,
RKISP1_RSZ_SP_SRC_MAX_HEIGHT,
};
struct rkisp1_capture *cap = video_drvdata(file);
if (fsize->index != 0)
return -EINVAL;
fsize->type = V4L2_FRMSIZE_TYPE_STEPWISE;
fsize->stepwise.min_width = RKISP1_RSZ_SRC_MIN_WIDTH;
fsize->stepwise.max_width = max_widths[cap->id];
fsize->stepwise.step_width = 2;
fsize->stepwise.min_height = RKISP1_RSZ_SRC_MIN_HEIGHT;
fsize->stepwise.max_height = max_heights[cap->id];
fsize->stepwise.step_height = 2;
return 0;
}
static int rkisp1_s_fmt_vid_cap_mplane(struct file *file,
void *priv, struct v4l2_format *f)
{
struct rkisp1_capture *cap = video_drvdata(file);
struct rkisp1_vdev_node *node =
rkisp1_vdev_to_node(&cap->vnode.vdev);
if (vb2_is_busy(&node->buf_queue))
return -EBUSY;
rkisp1_set_fmt(cap, &f->fmt.pix_mp);
return 0;
}
static int rkisp1_g_fmt_vid_cap_mplane(struct file *file, void *fh,
struct v4l2_format *f)
{
struct rkisp1_capture *cap = video_drvdata(file);
f->fmt.pix_mp = cap->pix.fmt;
return 0;
}
static int
rkisp1_querycap(struct file *file, void *priv, struct v4l2_capability *cap)
{
strscpy(cap->driver, RKISP1_DRIVER_NAME, sizeof(cap->driver));
strscpy(cap->card, RKISP1_DRIVER_NAME, sizeof(cap->card));
strscpy(cap->bus_info, RKISP1_BUS_INFO, sizeof(cap->bus_info));
return 0;
}
static const struct v4l2_ioctl_ops rkisp1_v4l2_ioctl_ops = {
.vidioc_reqbufs = vb2_ioctl_reqbufs,
.vidioc_querybuf = vb2_ioctl_querybuf,
.vidioc_create_bufs = vb2_ioctl_create_bufs,
.vidioc_qbuf = vb2_ioctl_qbuf,
.vidioc_expbuf = vb2_ioctl_expbuf,
.vidioc_dqbuf = vb2_ioctl_dqbuf,
.vidioc_prepare_buf = vb2_ioctl_prepare_buf,
.vidioc_streamon = vb2_ioctl_streamon,
.vidioc_streamoff = vb2_ioctl_streamoff,
.vidioc_try_fmt_vid_cap_mplane = rkisp1_try_fmt_vid_cap_mplane,
.vidioc_s_fmt_vid_cap_mplane = rkisp1_s_fmt_vid_cap_mplane,
.vidioc_g_fmt_vid_cap_mplane = rkisp1_g_fmt_vid_cap_mplane,
.vidioc_enum_fmt_vid_cap = rkisp1_enum_fmt_vid_cap_mplane,
.vidioc_enum_framesizes = rkisp1_enum_framesizes,
.vidioc_querycap = rkisp1_querycap,
.vidioc_subscribe_event = v4l2_ctrl_subscribe_event,
.vidioc_unsubscribe_event = v4l2_event_unsubscribe,
};
static int rkisp1_capture_link_validate(struct media_link *link)
{
struct video_device *vdev =
media_entity_to_video_device(link->sink->entity);
struct v4l2_subdev *sd =
media_entity_to_v4l2_subdev(link->source->entity);
struct rkisp1_capture *cap = video_get_drvdata(vdev);
const struct rkisp1_capture_fmt_cfg *fmt =
rkisp1_find_fmt_cfg(cap, cap->pix.fmt.pixelformat);
struct v4l2_subdev_format sd_fmt = {
.which = V4L2_SUBDEV_FORMAT_ACTIVE,
.pad = link->source->index,
};
int ret;
ret = v4l2_subdev_call(sd, pad, get_fmt, NULL, &sd_fmt);
if (ret)
return ret;
if (sd_fmt.format.height != cap->pix.fmt.height ||
sd_fmt.format.width != cap->pix.fmt.width ||
sd_fmt.format.code != fmt->mbus) {
dev_dbg(cap->rkisp1->dev,
"link '%s':%u -> '%s':%u not valid: 0x%04x/%ux%u != 0x%04x/%ux%u\n",
link->source->entity->name, link->source->index,
link->sink->entity->name, link->sink->index,
sd_fmt.format.code, sd_fmt.format.width,
sd_fmt.format.height, fmt->mbus, cap->pix.fmt.width,
cap->pix.fmt.height);
return -EPIPE;
}
return 0;
}
/* ----------------------------------------------------------------------------
* core functions
*/
static const struct media_entity_operations rkisp1_media_ops = {
.link_validate = rkisp1_capture_link_validate,
};
static const struct v4l2_file_operations rkisp1_fops = {
.open = v4l2_fh_open,
.release = vb2_fop_release,
.unlocked_ioctl = video_ioctl2,
.poll = vb2_fop_poll,
.mmap = vb2_fop_mmap,
};
static void rkisp1_unregister_capture(struct rkisp1_capture *cap)
{
if (!video_is_registered(&cap->vnode.vdev))
return;
media_entity_cleanup(&cap->vnode.vdev.entity);
vb2_video_unregister_device(&cap->vnode.vdev);
mutex_destroy(&cap->vnode.vlock);
}
void rkisp1_capture_devs_unregister(struct rkisp1_device *rkisp1)
{
struct rkisp1_capture *mp = &rkisp1->capture_devs[RKISP1_MAINPATH];
struct rkisp1_capture *sp = &rkisp1->capture_devs[RKISP1_SELFPATH];
rkisp1_unregister_capture(mp);
rkisp1_unregister_capture(sp);
}
static int rkisp1_register_capture(struct rkisp1_capture *cap)
{
static const char * const dev_names[] = {
RKISP1_MP_DEV_NAME, RKISP1_SP_DEV_NAME
};
struct v4l2_device *v4l2_dev = &cap->rkisp1->v4l2_dev;
struct video_device *vdev = &cap->vnode.vdev;
struct rkisp1_vdev_node *node;
struct vb2_queue *q;
int ret;
strscpy(vdev->name, dev_names[cap->id], sizeof(vdev->name));
node = rkisp1_vdev_to_node(vdev);
mutex_init(&node->vlock);
vdev->ioctl_ops = &rkisp1_v4l2_ioctl_ops;
vdev->release = video_device_release_empty;
vdev->fops = &rkisp1_fops;
vdev->minor = -1;
vdev->v4l2_dev = v4l2_dev;
vdev->lock = &node->vlock;
vdev->device_caps = V4L2_CAP_VIDEO_CAPTURE_MPLANE |
V4L2_CAP_STREAMING | V4L2_CAP_IO_MC;
vdev->entity.ops = &rkisp1_media_ops;
video_set_drvdata(vdev, cap);
vdev->vfl_dir = VFL_DIR_RX;
node->pad.flags = MEDIA_PAD_FL_SINK;
q = &node->buf_queue;
q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
q->io_modes = VB2_MMAP | VB2_DMABUF;
q->drv_priv = cap;
q->ops = &rkisp1_vb2_ops;
q->mem_ops = &vb2_dma_contig_memops;
q->buf_struct_size = sizeof(struct rkisp1_buffer);
q->min_buffers_needed = RKISP1_MIN_BUFFERS_NEEDED;
q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
q->lock = &node->vlock;
q->dev = cap->rkisp1->dev;
ret = vb2_queue_init(q);
if (ret) {
dev_err(cap->rkisp1->dev,
"vb2 queue init failed (err=%d)\n", ret);
goto error;
}
vdev->queue = q;
ret = media_entity_pads_init(&vdev->entity, 1, &node->pad);
if (ret)
goto error;
ret = video_register_device(vdev, VFL_TYPE_VIDEO, -1);
if (ret) {
dev_err(cap->rkisp1->dev,
"failed to register %s, ret=%d\n", vdev->name, ret);
goto error;
}
v4l2_info(v4l2_dev, "registered %s as /dev/video%d\n", vdev->name,
vdev->num);
return 0;
error:
media_entity_cleanup(&vdev->entity);
mutex_destroy(&node->vlock);
return ret;
}
static void
rkisp1_capture_init(struct rkisp1_device *rkisp1, enum rkisp1_stream_id id)
{
struct rkisp1_capture *cap = &rkisp1->capture_devs[id];
struct v4l2_pix_format_mplane pixm;
memset(cap, 0, sizeof(*cap));
cap->id = id;
cap->rkisp1 = rkisp1;
INIT_LIST_HEAD(&cap->buf.queue);
init_waitqueue_head(&cap->done);
spin_lock_init(&cap->buf.lock);
if (cap->id == RKISP1_SELFPATH) {
cap->ops = &rkisp1_capture_ops_sp;
cap->config = &rkisp1_capture_config_sp;
} else {
cap->ops = &rkisp1_capture_ops_mp;
cap->config = &rkisp1_capture_config_mp;
}
cap->is_streaming = false;
memset(&pixm, 0, sizeof(pixm));
pixm.pixelformat = V4L2_PIX_FMT_YUYV;
pixm.width = RKISP1_DEFAULT_WIDTH;
pixm.height = RKISP1_DEFAULT_HEIGHT;
rkisp1_set_fmt(cap, &pixm);
}
int rkisp1_capture_devs_register(struct rkisp1_device *rkisp1)
{
unsigned int i;
int ret;
for (i = 0; i < ARRAY_SIZE(rkisp1->capture_devs); i++) {
struct rkisp1_capture *cap = &rkisp1->capture_devs[i];
rkisp1_capture_init(rkisp1, i);
ret = rkisp1_register_capture(cap);
if (ret) {
rkisp1_capture_devs_unregister(rkisp1);
return ret;
}
}
return 0;
}
| linux-master | drivers/media/platform/rockchip/rkisp1/rkisp1-capture.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) Fuzhou Rockchip Electronics Co.Ltd
* Author: Jacob Chen <[email protected]>
*/
#include <linux/pm_runtime.h>
#include "rga-hw.h"
#include "rga.h"
enum e_rga_start_pos {
LT = 0,
LB = 1,
RT = 2,
RB = 3,
};
struct rga_addr_offset {
unsigned int y_off;
unsigned int u_off;
unsigned int v_off;
};
struct rga_corners_addr_offset {
struct rga_addr_offset left_top;
struct rga_addr_offset right_top;
struct rga_addr_offset left_bottom;
struct rga_addr_offset right_bottom;
};
static unsigned int rga_get_scaling(unsigned int src, unsigned int dst)
{
/*
* The rga hw scaling factor is a normalized inverse of the
* scaling factor.
* For example: When source width is 100 and destination width is 200
* (scaling of 2x), then the hw factor is NC * 100 / 200.
* The normalization factor (NC) is 2^16 = 0x10000.
*/
return (src > dst) ? ((dst << 16) / src) : ((src << 16) / dst);
}
static struct rga_corners_addr_offset
rga_get_addr_offset(struct rga_frame *frm, unsigned int x, unsigned int y,
unsigned int w, unsigned int h)
{
struct rga_corners_addr_offset offsets;
struct rga_addr_offset *lt, *lb, *rt, *rb;
unsigned int x_div = 0,
y_div = 0, uv_stride = 0, pixel_width = 0, uv_factor = 0;
lt = &offsets.left_top;
lb = &offsets.left_bottom;
rt = &offsets.right_top;
rb = &offsets.right_bottom;
x_div = frm->fmt->x_div;
y_div = frm->fmt->y_div;
uv_factor = frm->fmt->uv_factor;
uv_stride = frm->stride / x_div;
pixel_width = frm->stride / frm->width;
lt->y_off = y * frm->stride + x * pixel_width;
lt->u_off =
frm->width * frm->height + (y / y_div) * uv_stride + x / x_div;
lt->v_off = lt->u_off + frm->width * frm->height / uv_factor;
lb->y_off = lt->y_off + (h - 1) * frm->stride;
lb->u_off = lt->u_off + (h / y_div - 1) * uv_stride;
lb->v_off = lt->v_off + (h / y_div - 1) * uv_stride;
rt->y_off = lt->y_off + (w - 1) * pixel_width;
rt->u_off = lt->u_off + w / x_div - 1;
rt->v_off = lt->v_off + w / x_div - 1;
rb->y_off = lb->y_off + (w - 1) * pixel_width;
rb->u_off = lb->u_off + w / x_div - 1;
rb->v_off = lb->v_off + w / x_div - 1;
return offsets;
}
static struct rga_addr_offset *rga_lookup_draw_pos(struct
rga_corners_addr_offset
* offsets, u32 rotate_mode,
u32 mirr_mode)
{
static enum e_rga_start_pos rot_mir_point_matrix[4][4] = {
{
LT, RT, LB, RB,
},
{
RT, LT, RB, LB,
},
{
RB, LB, RT, LT,
},
{
LB, RB, LT, RT,
},
};
if (!offsets)
return NULL;
switch (rot_mir_point_matrix[rotate_mode][mirr_mode]) {
case LT:
return &offsets->left_top;
case LB:
return &offsets->left_bottom;
case RT:
return &offsets->right_top;
case RB:
return &offsets->right_bottom;
}
return NULL;
}
static void rga_cmd_set_src_addr(struct rga_ctx *ctx, void *mmu_pages)
{
struct rockchip_rga *rga = ctx->rga;
u32 *dest = rga->cmdbuf_virt;
unsigned int reg;
reg = RGA_MMU_SRC_BASE - RGA_MODE_BASE_REG;
dest[reg >> 2] = virt_to_phys(mmu_pages) >> 4;
reg = RGA_MMU_CTRL1 - RGA_MODE_BASE_REG;
dest[reg >> 2] |= 0x7;
}
static void rga_cmd_set_src1_addr(struct rga_ctx *ctx, void *mmu_pages)
{
struct rockchip_rga *rga = ctx->rga;
u32 *dest = rga->cmdbuf_virt;
unsigned int reg;
reg = RGA_MMU_SRC1_BASE - RGA_MODE_BASE_REG;
dest[reg >> 2] = virt_to_phys(mmu_pages) >> 4;
reg = RGA_MMU_CTRL1 - RGA_MODE_BASE_REG;
dest[reg >> 2] |= 0x7 << 4;
}
static void rga_cmd_set_dst_addr(struct rga_ctx *ctx, void *mmu_pages)
{
struct rockchip_rga *rga = ctx->rga;
u32 *dest = rga->cmdbuf_virt;
unsigned int reg;
reg = RGA_MMU_DST_BASE - RGA_MODE_BASE_REG;
dest[reg >> 2] = virt_to_phys(mmu_pages) >> 4;
reg = RGA_MMU_CTRL1 - RGA_MODE_BASE_REG;
dest[reg >> 2] |= 0x7 << 8;
}
static void rga_cmd_set_trans_info(struct rga_ctx *ctx)
{
struct rockchip_rga *rga = ctx->rga;
u32 *dest = rga->cmdbuf_virt;
unsigned int scale_dst_w, scale_dst_h;
unsigned int src_h, src_w, src_x, src_y, dst_h, dst_w, dst_x, dst_y;
union rga_src_info src_info;
union rga_dst_info dst_info;
union rga_src_x_factor x_factor;
union rga_src_y_factor y_factor;
union rga_src_vir_info src_vir_info;
union rga_src_act_info src_act_info;
union rga_dst_vir_info dst_vir_info;
union rga_dst_act_info dst_act_info;
struct rga_addr_offset *dst_offset;
struct rga_corners_addr_offset offsets;
struct rga_corners_addr_offset src_offsets;
src_h = ctx->in.crop.height;
src_w = ctx->in.crop.width;
src_x = ctx->in.crop.left;
src_y = ctx->in.crop.top;
dst_h = ctx->out.crop.height;
dst_w = ctx->out.crop.width;
dst_x = ctx->out.crop.left;
dst_y = ctx->out.crop.top;
src_info.val = dest[(RGA_SRC_INFO - RGA_MODE_BASE_REG) >> 2];
dst_info.val = dest[(RGA_DST_INFO - RGA_MODE_BASE_REG) >> 2];
x_factor.val = dest[(RGA_SRC_X_FACTOR - RGA_MODE_BASE_REG) >> 2];
y_factor.val = dest[(RGA_SRC_Y_FACTOR - RGA_MODE_BASE_REG) >> 2];
src_vir_info.val = dest[(RGA_SRC_VIR_INFO - RGA_MODE_BASE_REG) >> 2];
src_act_info.val = dest[(RGA_SRC_ACT_INFO - RGA_MODE_BASE_REG) >> 2];
dst_vir_info.val = dest[(RGA_DST_VIR_INFO - RGA_MODE_BASE_REG) >> 2];
dst_act_info.val = dest[(RGA_DST_ACT_INFO - RGA_MODE_BASE_REG) >> 2];
src_info.data.format = ctx->in.fmt->hw_format;
src_info.data.swap = ctx->in.fmt->color_swap;
dst_info.data.format = ctx->out.fmt->hw_format;
dst_info.data.swap = ctx->out.fmt->color_swap;
/*
* CSC mode must only be set when the colorspace families differ between
* input and output. It must remain unset (zeroed) if both are the same.
*/
if (RGA_COLOR_FMT_IS_YUV(ctx->in.fmt->hw_format) &&
RGA_COLOR_FMT_IS_RGB(ctx->out.fmt->hw_format)) {
switch (ctx->in.colorspace) {
case V4L2_COLORSPACE_REC709:
src_info.data.csc_mode = RGA_SRC_CSC_MODE_BT709_R0;
break;
default:
src_info.data.csc_mode = RGA_SRC_CSC_MODE_BT601_R0;
break;
}
}
if (RGA_COLOR_FMT_IS_RGB(ctx->in.fmt->hw_format) &&
RGA_COLOR_FMT_IS_YUV(ctx->out.fmt->hw_format)) {
switch (ctx->out.colorspace) {
case V4L2_COLORSPACE_REC709:
dst_info.data.csc_mode = RGA_SRC_CSC_MODE_BT709_R0;
break;
default:
dst_info.data.csc_mode = RGA_DST_CSC_MODE_BT601_R0;
break;
}
}
if (ctx->vflip)
src_info.data.mir_mode |= RGA_SRC_MIRR_MODE_X;
if (ctx->hflip)
src_info.data.mir_mode |= RGA_SRC_MIRR_MODE_Y;
switch (ctx->rotate) {
case 90:
src_info.data.rot_mode = RGA_SRC_ROT_MODE_90_DEGREE;
break;
case 180:
src_info.data.rot_mode = RGA_SRC_ROT_MODE_180_DEGREE;
break;
case 270:
src_info.data.rot_mode = RGA_SRC_ROT_MODE_270_DEGREE;
break;
default:
src_info.data.rot_mode = RGA_SRC_ROT_MODE_0_DEGREE;
break;
}
/*
* Calculate the up/down scaling mode/factor.
*
* RGA used to scale the picture first, and then rotate second,
* so we need to swap the w/h when rotate degree is 90/270.
*/
if (src_info.data.rot_mode == RGA_SRC_ROT_MODE_90_DEGREE ||
src_info.data.rot_mode == RGA_SRC_ROT_MODE_270_DEGREE) {
if (rga->version.major == 0 || rga->version.minor == 0) {
if (dst_w == src_h)
src_h -= 8;
if (abs(src_w - dst_h) < 16)
src_w -= 16;
}
scale_dst_h = dst_w;
scale_dst_w = dst_h;
} else {
scale_dst_w = dst_w;
scale_dst_h = dst_h;
}
if (src_w == scale_dst_w) {
src_info.data.hscl_mode = RGA_SRC_HSCL_MODE_NO;
x_factor.val = 0;
} else if (src_w > scale_dst_w) {
src_info.data.hscl_mode = RGA_SRC_HSCL_MODE_DOWN;
x_factor.data.down_scale_factor =
rga_get_scaling(src_w, scale_dst_w) + 1;
} else {
src_info.data.hscl_mode = RGA_SRC_HSCL_MODE_UP;
x_factor.data.up_scale_factor =
rga_get_scaling(src_w - 1, scale_dst_w - 1);
}
if (src_h == scale_dst_h) {
src_info.data.vscl_mode = RGA_SRC_VSCL_MODE_NO;
y_factor.val = 0;
} else if (src_h > scale_dst_h) {
src_info.data.vscl_mode = RGA_SRC_VSCL_MODE_DOWN;
y_factor.data.down_scale_factor =
rga_get_scaling(src_h, scale_dst_h) + 1;
} else {
src_info.data.vscl_mode = RGA_SRC_VSCL_MODE_UP;
y_factor.data.up_scale_factor =
rga_get_scaling(src_h - 1, scale_dst_h - 1);
}
/*
* Calculate the framebuffer virtual strides and active size,
* note that the step of vir_stride / vir_width is 4 byte words
*/
src_vir_info.data.vir_stride = ctx->in.stride >> 2;
src_vir_info.data.vir_width = ctx->in.stride >> 2;
src_act_info.data.act_height = src_h - 1;
src_act_info.data.act_width = src_w - 1;
dst_vir_info.data.vir_stride = ctx->out.stride >> 2;
dst_act_info.data.act_height = dst_h - 1;
dst_act_info.data.act_width = dst_w - 1;
/*
* Calculate the source framebuffer base address with offset pixel.
*/
src_offsets = rga_get_addr_offset(&ctx->in, src_x, src_y,
src_w, src_h);
/*
* Configure the dest framebuffer base address with pixel offset.
*/
offsets = rga_get_addr_offset(&ctx->out, dst_x, dst_y, dst_w, dst_h);
dst_offset = rga_lookup_draw_pos(&offsets, src_info.data.rot_mode,
src_info.data.mir_mode);
dest[(RGA_SRC_Y_RGB_BASE_ADDR - RGA_MODE_BASE_REG) >> 2] =
src_offsets.left_top.y_off;
dest[(RGA_SRC_CB_BASE_ADDR - RGA_MODE_BASE_REG) >> 2] =
src_offsets.left_top.u_off;
dest[(RGA_SRC_CR_BASE_ADDR - RGA_MODE_BASE_REG) >> 2] =
src_offsets.left_top.v_off;
dest[(RGA_SRC_X_FACTOR - RGA_MODE_BASE_REG) >> 2] = x_factor.val;
dest[(RGA_SRC_Y_FACTOR - RGA_MODE_BASE_REG) >> 2] = y_factor.val;
dest[(RGA_SRC_VIR_INFO - RGA_MODE_BASE_REG) >> 2] = src_vir_info.val;
dest[(RGA_SRC_ACT_INFO - RGA_MODE_BASE_REG) >> 2] = src_act_info.val;
dest[(RGA_SRC_INFO - RGA_MODE_BASE_REG) >> 2] = src_info.val;
dest[(RGA_DST_Y_RGB_BASE_ADDR - RGA_MODE_BASE_REG) >> 2] =
dst_offset->y_off;
dest[(RGA_DST_CB_BASE_ADDR - RGA_MODE_BASE_REG) >> 2] =
dst_offset->u_off;
dest[(RGA_DST_CR_BASE_ADDR - RGA_MODE_BASE_REG) >> 2] =
dst_offset->v_off;
dest[(RGA_DST_VIR_INFO - RGA_MODE_BASE_REG) >> 2] = dst_vir_info.val;
dest[(RGA_DST_ACT_INFO - RGA_MODE_BASE_REG) >> 2] = dst_act_info.val;
dest[(RGA_DST_INFO - RGA_MODE_BASE_REG) >> 2] = dst_info.val;
}
static void rga_cmd_set_mode(struct rga_ctx *ctx)
{
struct rockchip_rga *rga = ctx->rga;
u32 *dest = rga->cmdbuf_virt;
union rga_mode_ctrl mode;
union rga_alpha_ctrl0 alpha_ctrl0;
union rga_alpha_ctrl1 alpha_ctrl1;
mode.val = 0;
alpha_ctrl0.val = 0;
alpha_ctrl1.val = 0;
mode.data.gradient_sat = 1;
mode.data.render = RGA_MODE_RENDER_BITBLT;
mode.data.bitblt = RGA_MODE_BITBLT_MODE_SRC_TO_DST;
/* disable alpha blending */
dest[(RGA_ALPHA_CTRL0 - RGA_MODE_BASE_REG) >> 2] = alpha_ctrl0.val;
dest[(RGA_ALPHA_CTRL1 - RGA_MODE_BASE_REG) >> 2] = alpha_ctrl1.val;
dest[(RGA_MODE_CTRL - RGA_MODE_BASE_REG) >> 2] = mode.val;
}
static void rga_cmd_set(struct rga_ctx *ctx)
{
struct rockchip_rga *rga = ctx->rga;
memset(rga->cmdbuf_virt, 0, RGA_CMDBUF_SIZE * 4);
rga_cmd_set_src_addr(ctx, rga->src_mmu_pages);
/*
* Due to hardware bug,
* src1 mmu also should be configured when using alpha blending.
*/
rga_cmd_set_src1_addr(ctx, rga->dst_mmu_pages);
rga_cmd_set_dst_addr(ctx, rga->dst_mmu_pages);
rga_cmd_set_mode(ctx);
rga_cmd_set_trans_info(ctx);
rga_write(rga, RGA_CMD_BASE, rga->cmdbuf_phy);
/* sync CMD buf for RGA */
dma_sync_single_for_device(rga->dev, rga->cmdbuf_phy,
PAGE_SIZE, DMA_BIDIRECTIONAL);
}
void rga_hw_start(struct rockchip_rga *rga)
{
struct rga_ctx *ctx = rga->curr;
rga_cmd_set(ctx);
rga_write(rga, RGA_SYS_CTRL, 0x00);
rga_write(rga, RGA_SYS_CTRL, 0x22);
rga_write(rga, RGA_INT, 0x600);
rga_write(rga, RGA_CMD_CTRL, 0x1);
}
| linux-master | drivers/media/platform/rockchip/rga/rga-hw.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2017 Fuzhou Rockchip Electronics Co.Ltd
* Author: Jacob Chen <[email protected]>
*/
#include <linux/pm_runtime.h>
#include <media/v4l2-device.h>
#include <media/v4l2-ioctl.h>
#include <media/v4l2-mem2mem.h>
#include <media/videobuf2-dma-sg.h>
#include <media/videobuf2-v4l2.h>
#include "rga-hw.h"
#include "rga.h"
static int
rga_queue_setup(struct vb2_queue *vq,
unsigned int *nbuffers, unsigned int *nplanes,
unsigned int sizes[], struct device *alloc_devs[])
{
struct rga_ctx *ctx = vb2_get_drv_priv(vq);
struct rga_frame *f = rga_get_frame(ctx, vq->type);
if (IS_ERR(f))
return PTR_ERR(f);
if (*nplanes)
return sizes[0] < f->size ? -EINVAL : 0;
sizes[0] = f->size;
*nplanes = 1;
return 0;
}
static int rga_buf_prepare(struct vb2_buffer *vb)
{
struct rga_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
struct rga_frame *f = rga_get_frame(ctx, vb->vb2_queue->type);
if (IS_ERR(f))
return PTR_ERR(f);
vb2_set_plane_payload(vb, 0, f->size);
return 0;
}
static void rga_buf_queue(struct vb2_buffer *vb)
{
struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
struct rga_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
v4l2_m2m_buf_queue(ctx->fh.m2m_ctx, vbuf);
}
static void rga_buf_return_buffers(struct vb2_queue *q,
enum vb2_buffer_state state)
{
struct rga_ctx *ctx = vb2_get_drv_priv(q);
struct vb2_v4l2_buffer *vbuf;
for (;;) {
if (V4L2_TYPE_IS_OUTPUT(q->type))
vbuf = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
else
vbuf = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx);
if (!vbuf)
break;
v4l2_m2m_buf_done(vbuf, state);
}
}
static int rga_buf_start_streaming(struct vb2_queue *q, unsigned int count)
{
struct rga_ctx *ctx = vb2_get_drv_priv(q);
struct rockchip_rga *rga = ctx->rga;
int ret;
ret = pm_runtime_resume_and_get(rga->dev);
if (ret < 0) {
rga_buf_return_buffers(q, VB2_BUF_STATE_QUEUED);
return ret;
}
return 0;
}
static void rga_buf_stop_streaming(struct vb2_queue *q)
{
struct rga_ctx *ctx = vb2_get_drv_priv(q);
struct rockchip_rga *rga = ctx->rga;
rga_buf_return_buffers(q, VB2_BUF_STATE_ERROR);
pm_runtime_put(rga->dev);
}
const struct vb2_ops rga_qops = {
.queue_setup = rga_queue_setup,
.buf_prepare = rga_buf_prepare,
.buf_queue = rga_buf_queue,
.wait_prepare = vb2_ops_wait_prepare,
.wait_finish = vb2_ops_wait_finish,
.start_streaming = rga_buf_start_streaming,
.stop_streaming = rga_buf_stop_streaming,
};
/* RGA MMU is a 1-Level MMU, so it can't be used through the IOMMU API.
* We use it more like a scatter-gather list.
*/
void rga_buf_map(struct vb2_buffer *vb)
{
struct rga_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
struct rockchip_rga *rga = ctx->rga;
struct sg_table *sgt;
struct scatterlist *sgl;
unsigned int *pages;
unsigned int address, len, i, p;
unsigned int mapped_size = 0;
if (vb->type == V4L2_BUF_TYPE_VIDEO_OUTPUT)
pages = rga->src_mmu_pages;
else
pages = rga->dst_mmu_pages;
/* Create local MMU table for RGA */
sgt = vb2_plane_cookie(vb, 0);
for_each_sg(sgt->sgl, sgl, sgt->nents, i) {
len = sg_dma_len(sgl) >> PAGE_SHIFT;
address = sg_phys(sgl);
for (p = 0; p < len; p++) {
dma_addr_t phys = address +
((dma_addr_t)p << PAGE_SHIFT);
pages[mapped_size + p] = phys;
}
mapped_size += len;
}
/* sync local MMU table for RGA */
dma_sync_single_for_device(rga->dev, virt_to_phys(pages),
8 * PAGE_SIZE, DMA_BIDIRECTIONAL);
}
| linux-master | drivers/media/platform/rockchip/rga/rga-buf.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) Fuzhou Rockchip Electronics Co.Ltd
* Author: Jacob Chen <[email protected]>
*/
#include <linux/clk.h>
#include <linux/debugfs.h>
#include <linux/delay.h>
#include <linux/fs.h>
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/pm_runtime.h>
#include <linux/reset.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/timer.h>
#include <linux/platform_device.h>
#include <media/v4l2-device.h>
#include <media/v4l2-event.h>
#include <media/v4l2-ioctl.h>
#include <media/v4l2-mem2mem.h>
#include <media/videobuf2-dma-sg.h>
#include <media/videobuf2-v4l2.h>
#include "rga-hw.h"
#include "rga.h"
static int debug;
module_param(debug, int, 0644);
static void device_run(void *prv)
{
struct rga_ctx *ctx = prv;
struct rockchip_rga *rga = ctx->rga;
struct vb2_v4l2_buffer *src, *dst;
unsigned long flags;
spin_lock_irqsave(&rga->ctrl_lock, flags);
rga->curr = ctx;
src = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
dst = v4l2_m2m_next_dst_buf(ctx->fh.m2m_ctx);
rga_buf_map(&src->vb2_buf);
rga_buf_map(&dst->vb2_buf);
rga_hw_start(rga);
spin_unlock_irqrestore(&rga->ctrl_lock, flags);
}
static irqreturn_t rga_isr(int irq, void *prv)
{
struct rockchip_rga *rga = prv;
int intr;
intr = rga_read(rga, RGA_INT) & 0xf;
rga_mod(rga, RGA_INT, intr << 4, 0xf << 4);
if (intr & 0x04) {
struct vb2_v4l2_buffer *src, *dst;
struct rga_ctx *ctx = rga->curr;
WARN_ON(!ctx);
rga->curr = NULL;
src = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
dst = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx);
WARN_ON(!src);
WARN_ON(!dst);
v4l2_m2m_buf_copy_metadata(src, dst, true);
v4l2_m2m_buf_done(src, VB2_BUF_STATE_DONE);
v4l2_m2m_buf_done(dst, VB2_BUF_STATE_DONE);
v4l2_m2m_job_finish(rga->m2m_dev, ctx->fh.m2m_ctx);
}
return IRQ_HANDLED;
}
static const struct v4l2_m2m_ops rga_m2m_ops = {
.device_run = device_run,
};
static int
queue_init(void *priv, struct vb2_queue *src_vq, struct vb2_queue *dst_vq)
{
struct rga_ctx *ctx = priv;
int ret;
src_vq->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
src_vq->io_modes = VB2_MMAP | VB2_DMABUF;
src_vq->drv_priv = ctx;
src_vq->ops = &rga_qops;
src_vq->mem_ops = &vb2_dma_sg_memops;
src_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
src_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
src_vq->lock = &ctx->rga->mutex;
src_vq->dev = ctx->rga->v4l2_dev.dev;
ret = vb2_queue_init(src_vq);
if (ret)
return ret;
dst_vq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
dst_vq->io_modes = VB2_MMAP | VB2_DMABUF;
dst_vq->drv_priv = ctx;
dst_vq->ops = &rga_qops;
dst_vq->mem_ops = &vb2_dma_sg_memops;
dst_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
dst_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
dst_vq->lock = &ctx->rga->mutex;
dst_vq->dev = ctx->rga->v4l2_dev.dev;
return vb2_queue_init(dst_vq);
}
static int rga_s_ctrl(struct v4l2_ctrl *ctrl)
{
struct rga_ctx *ctx = container_of(ctrl->handler, struct rga_ctx,
ctrl_handler);
unsigned long flags;
spin_lock_irqsave(&ctx->rga->ctrl_lock, flags);
switch (ctrl->id) {
case V4L2_CID_HFLIP:
ctx->hflip = ctrl->val;
break;
case V4L2_CID_VFLIP:
ctx->vflip = ctrl->val;
break;
case V4L2_CID_ROTATE:
ctx->rotate = ctrl->val;
break;
case V4L2_CID_BG_COLOR:
ctx->fill_color = ctrl->val;
break;
}
spin_unlock_irqrestore(&ctx->rga->ctrl_lock, flags);
return 0;
}
static const struct v4l2_ctrl_ops rga_ctrl_ops = {
.s_ctrl = rga_s_ctrl,
};
static int rga_setup_ctrls(struct rga_ctx *ctx)
{
struct rockchip_rga *rga = ctx->rga;
v4l2_ctrl_handler_init(&ctx->ctrl_handler, 4);
v4l2_ctrl_new_std(&ctx->ctrl_handler, &rga_ctrl_ops,
V4L2_CID_HFLIP, 0, 1, 1, 0);
v4l2_ctrl_new_std(&ctx->ctrl_handler, &rga_ctrl_ops,
V4L2_CID_VFLIP, 0, 1, 1, 0);
v4l2_ctrl_new_std(&ctx->ctrl_handler, &rga_ctrl_ops,
V4L2_CID_ROTATE, 0, 270, 90, 0);
v4l2_ctrl_new_std(&ctx->ctrl_handler, &rga_ctrl_ops,
V4L2_CID_BG_COLOR, 0, 0xffffffff, 1, 0);
if (ctx->ctrl_handler.error) {
int err = ctx->ctrl_handler.error;
v4l2_err(&rga->v4l2_dev, "%s failed\n", __func__);
v4l2_ctrl_handler_free(&ctx->ctrl_handler);
return err;
}
return 0;
}
static struct rga_fmt formats[] = {
{
.fourcc = V4L2_PIX_FMT_ARGB32,
.color_swap = RGA_COLOR_RB_SWAP,
.hw_format = RGA_COLOR_FMT_ABGR8888,
.depth = 32,
.uv_factor = 1,
.y_div = 1,
.x_div = 1,
},
{
.fourcc = V4L2_PIX_FMT_XRGB32,
.color_swap = RGA_COLOR_RB_SWAP,
.hw_format = RGA_COLOR_FMT_XBGR8888,
.depth = 32,
.uv_factor = 1,
.y_div = 1,
.x_div = 1,
},
{
.fourcc = V4L2_PIX_FMT_ABGR32,
.color_swap = RGA_COLOR_ALPHA_SWAP,
.hw_format = RGA_COLOR_FMT_ABGR8888,
.depth = 32,
.uv_factor = 1,
.y_div = 1,
.x_div = 1,
},
{
.fourcc = V4L2_PIX_FMT_XBGR32,
.color_swap = RGA_COLOR_ALPHA_SWAP,
.hw_format = RGA_COLOR_FMT_XBGR8888,
.depth = 32,
.uv_factor = 1,
.y_div = 1,
.x_div = 1,
},
{
.fourcc = V4L2_PIX_FMT_RGB24,
.color_swap = RGA_COLOR_NONE_SWAP,
.hw_format = RGA_COLOR_FMT_RGB888,
.depth = 24,
.uv_factor = 1,
.y_div = 1,
.x_div = 1,
},
{
.fourcc = V4L2_PIX_FMT_BGR24,
.color_swap = RGA_COLOR_RB_SWAP,
.hw_format = RGA_COLOR_FMT_RGB888,
.depth = 24,
.uv_factor = 1,
.y_div = 1,
.x_div = 1,
},
{
.fourcc = V4L2_PIX_FMT_ARGB444,
.color_swap = RGA_COLOR_RB_SWAP,
.hw_format = RGA_COLOR_FMT_ABGR4444,
.depth = 16,
.uv_factor = 1,
.y_div = 1,
.x_div = 1,
},
{
.fourcc = V4L2_PIX_FMT_ARGB555,
.color_swap = RGA_COLOR_RB_SWAP,
.hw_format = RGA_COLOR_FMT_ABGR1555,
.depth = 16,
.uv_factor = 1,
.y_div = 1,
.x_div = 1,
},
{
.fourcc = V4L2_PIX_FMT_RGB565,
.color_swap = RGA_COLOR_RB_SWAP,
.hw_format = RGA_COLOR_FMT_BGR565,
.depth = 16,
.uv_factor = 1,
.y_div = 1,
.x_div = 1,
},
{
.fourcc = V4L2_PIX_FMT_NV21,
.color_swap = RGA_COLOR_UV_SWAP,
.hw_format = RGA_COLOR_FMT_YUV420SP,
.depth = 12,
.uv_factor = 4,
.y_div = 2,
.x_div = 1,
},
{
.fourcc = V4L2_PIX_FMT_NV61,
.color_swap = RGA_COLOR_UV_SWAP,
.hw_format = RGA_COLOR_FMT_YUV422SP,
.depth = 16,
.uv_factor = 2,
.y_div = 1,
.x_div = 1,
},
{
.fourcc = V4L2_PIX_FMT_NV12,
.color_swap = RGA_COLOR_NONE_SWAP,
.hw_format = RGA_COLOR_FMT_YUV420SP,
.depth = 12,
.uv_factor = 4,
.y_div = 2,
.x_div = 1,
},
{
.fourcc = V4L2_PIX_FMT_NV16,
.color_swap = RGA_COLOR_NONE_SWAP,
.hw_format = RGA_COLOR_FMT_YUV422SP,
.depth = 16,
.uv_factor = 2,
.y_div = 1,
.x_div = 1,
},
{
.fourcc = V4L2_PIX_FMT_YUV420,
.color_swap = RGA_COLOR_NONE_SWAP,
.hw_format = RGA_COLOR_FMT_YUV420P,
.depth = 12,
.uv_factor = 4,
.y_div = 2,
.x_div = 2,
},
{
.fourcc = V4L2_PIX_FMT_YUV422P,
.color_swap = RGA_COLOR_NONE_SWAP,
.hw_format = RGA_COLOR_FMT_YUV422P,
.depth = 16,
.uv_factor = 2,
.y_div = 1,
.x_div = 2,
},
{
.fourcc = V4L2_PIX_FMT_YVU420,
.color_swap = RGA_COLOR_UV_SWAP,
.hw_format = RGA_COLOR_FMT_YUV420P,
.depth = 12,
.uv_factor = 4,
.y_div = 2,
.x_div = 2,
},
};
#define NUM_FORMATS ARRAY_SIZE(formats)
static struct rga_fmt *rga_fmt_find(struct v4l2_format *f)
{
unsigned int i;
for (i = 0; i < NUM_FORMATS; i++) {
if (formats[i].fourcc == f->fmt.pix.pixelformat)
return &formats[i];
}
return NULL;
}
static struct rga_frame def_frame = {
.width = DEFAULT_WIDTH,
.height = DEFAULT_HEIGHT,
.colorspace = V4L2_COLORSPACE_DEFAULT,
.crop.left = 0,
.crop.top = 0,
.crop.width = DEFAULT_WIDTH,
.crop.height = DEFAULT_HEIGHT,
.fmt = &formats[0],
};
struct rga_frame *rga_get_frame(struct rga_ctx *ctx, enum v4l2_buf_type type)
{
switch (type) {
case V4L2_BUF_TYPE_VIDEO_OUTPUT:
return &ctx->in;
case V4L2_BUF_TYPE_VIDEO_CAPTURE:
return &ctx->out;
default:
return ERR_PTR(-EINVAL);
}
}
static int rga_open(struct file *file)
{
struct rockchip_rga *rga = video_drvdata(file);
struct rga_ctx *ctx = NULL;
int ret = 0;
ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
if (!ctx)
return -ENOMEM;
ctx->rga = rga;
/* Set default formats */
ctx->in = def_frame;
ctx->out = def_frame;
if (mutex_lock_interruptible(&rga->mutex)) {
kfree(ctx);
return -ERESTARTSYS;
}
ctx->fh.m2m_ctx = v4l2_m2m_ctx_init(rga->m2m_dev, ctx, &queue_init);
if (IS_ERR(ctx->fh.m2m_ctx)) {
ret = PTR_ERR(ctx->fh.m2m_ctx);
mutex_unlock(&rga->mutex);
kfree(ctx);
return ret;
}
v4l2_fh_init(&ctx->fh, video_devdata(file));
file->private_data = &ctx->fh;
v4l2_fh_add(&ctx->fh);
rga_setup_ctrls(ctx);
/* Write the default values to the ctx struct */
v4l2_ctrl_handler_setup(&ctx->ctrl_handler);
ctx->fh.ctrl_handler = &ctx->ctrl_handler;
mutex_unlock(&rga->mutex);
return 0;
}
static int rga_release(struct file *file)
{
struct rga_ctx *ctx =
container_of(file->private_data, struct rga_ctx, fh);
struct rockchip_rga *rga = ctx->rga;
mutex_lock(&rga->mutex);
v4l2_m2m_ctx_release(ctx->fh.m2m_ctx);
v4l2_ctrl_handler_free(&ctx->ctrl_handler);
v4l2_fh_del(&ctx->fh);
v4l2_fh_exit(&ctx->fh);
kfree(ctx);
mutex_unlock(&rga->mutex);
return 0;
}
static const struct v4l2_file_operations rga_fops = {
.owner = THIS_MODULE,
.open = rga_open,
.release = rga_release,
.poll = v4l2_m2m_fop_poll,
.unlocked_ioctl = video_ioctl2,
.mmap = v4l2_m2m_fop_mmap,
};
static int
vidioc_querycap(struct file *file, void *priv, struct v4l2_capability *cap)
{
strscpy(cap->driver, RGA_NAME, sizeof(cap->driver));
strscpy(cap->card, "rockchip-rga", sizeof(cap->card));
strscpy(cap->bus_info, "platform:rga", sizeof(cap->bus_info));
return 0;
}
static int vidioc_enum_fmt(struct file *file, void *prv, struct v4l2_fmtdesc *f)
{
struct rga_fmt *fmt;
if (f->index >= NUM_FORMATS)
return -EINVAL;
fmt = &formats[f->index];
f->pixelformat = fmt->fourcc;
return 0;
}
static int vidioc_g_fmt(struct file *file, void *prv, struct v4l2_format *f)
{
struct rga_ctx *ctx = prv;
struct vb2_queue *vq;
struct rga_frame *frm;
vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, f->type);
if (!vq)
return -EINVAL;
frm = rga_get_frame(ctx, f->type);
if (IS_ERR(frm))
return PTR_ERR(frm);
f->fmt.pix.width = frm->width;
f->fmt.pix.height = frm->height;
f->fmt.pix.field = V4L2_FIELD_NONE;
f->fmt.pix.pixelformat = frm->fmt->fourcc;
f->fmt.pix.bytesperline = frm->stride;
f->fmt.pix.sizeimage = frm->size;
f->fmt.pix.colorspace = frm->colorspace;
return 0;
}
static int vidioc_try_fmt(struct file *file, void *prv, struct v4l2_format *f)
{
struct rga_fmt *fmt;
fmt = rga_fmt_find(f);
if (!fmt) {
fmt = &formats[0];
f->fmt.pix.pixelformat = fmt->fourcc;
}
f->fmt.pix.field = V4L2_FIELD_NONE;
if (f->fmt.pix.width > MAX_WIDTH)
f->fmt.pix.width = MAX_WIDTH;
if (f->fmt.pix.height > MAX_HEIGHT)
f->fmt.pix.height = MAX_HEIGHT;
if (f->fmt.pix.width < MIN_WIDTH)
f->fmt.pix.width = MIN_WIDTH;
if (f->fmt.pix.height < MIN_HEIGHT)
f->fmt.pix.height = MIN_HEIGHT;
if (fmt->hw_format >= RGA_COLOR_FMT_YUV422SP)
f->fmt.pix.bytesperline = f->fmt.pix.width;
else
f->fmt.pix.bytesperline = (f->fmt.pix.width * fmt->depth) >> 3;
f->fmt.pix.sizeimage =
f->fmt.pix.height * (f->fmt.pix.width * fmt->depth) >> 3;
return 0;
}
static int vidioc_s_fmt(struct file *file, void *prv, struct v4l2_format *f)
{
struct rga_ctx *ctx = prv;
struct rockchip_rga *rga = ctx->rga;
struct vb2_queue *vq;
struct rga_frame *frm;
struct rga_fmt *fmt;
int ret = 0;
/* Adjust all values accordingly to the hardware capabilities
* and chosen format.
*/
ret = vidioc_try_fmt(file, prv, f);
if (ret)
return ret;
vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, f->type);
if (vb2_is_busy(vq)) {
v4l2_err(&rga->v4l2_dev, "queue (%d) bust\n", f->type);
return -EBUSY;
}
frm = rga_get_frame(ctx, f->type);
if (IS_ERR(frm))
return PTR_ERR(frm);
fmt = rga_fmt_find(f);
if (!fmt)
return -EINVAL;
frm->width = f->fmt.pix.width;
frm->height = f->fmt.pix.height;
frm->size = f->fmt.pix.sizeimage;
frm->fmt = fmt;
frm->stride = f->fmt.pix.bytesperline;
frm->colorspace = f->fmt.pix.colorspace;
/* Reset crop settings */
frm->crop.left = 0;
frm->crop.top = 0;
frm->crop.width = frm->width;
frm->crop.height = frm->height;
return 0;
}
static int vidioc_g_selection(struct file *file, void *prv,
struct v4l2_selection *s)
{
struct rga_ctx *ctx = prv;
struct rga_frame *f;
bool use_frame = false;
f = rga_get_frame(ctx, s->type);
if (IS_ERR(f))
return PTR_ERR(f);
switch (s->target) {
case V4L2_SEL_TGT_COMPOSE_DEFAULT:
case V4L2_SEL_TGT_COMPOSE_BOUNDS:
if (s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
return -EINVAL;
break;
case V4L2_SEL_TGT_CROP_DEFAULT:
case V4L2_SEL_TGT_CROP_BOUNDS:
if (s->type != V4L2_BUF_TYPE_VIDEO_OUTPUT)
return -EINVAL;
break;
case V4L2_SEL_TGT_COMPOSE:
if (s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
return -EINVAL;
use_frame = true;
break;
case V4L2_SEL_TGT_CROP:
if (s->type != V4L2_BUF_TYPE_VIDEO_OUTPUT)
return -EINVAL;
use_frame = true;
break;
default:
return -EINVAL;
}
if (use_frame) {
s->r = f->crop;
} else {
s->r.left = 0;
s->r.top = 0;
s->r.width = f->width;
s->r.height = f->height;
}
return 0;
}
static int vidioc_s_selection(struct file *file, void *prv,
struct v4l2_selection *s)
{
struct rga_ctx *ctx = prv;
struct rockchip_rga *rga = ctx->rga;
struct rga_frame *f;
int ret = 0;
f = rga_get_frame(ctx, s->type);
if (IS_ERR(f))
return PTR_ERR(f);
switch (s->target) {
case V4L2_SEL_TGT_COMPOSE:
/*
* COMPOSE target is only valid for capture buffer type, return
* error for output buffer type
*/
if (s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
return -EINVAL;
break;
case V4L2_SEL_TGT_CROP:
/*
* CROP target is only valid for output buffer type, return
* error for capture buffer type
*/
if (s->type != V4L2_BUF_TYPE_VIDEO_OUTPUT)
return -EINVAL;
break;
/*
* bound and default crop/compose targets are invalid targets to
* try/set
*/
default:
return -EINVAL;
}
if (s->r.top < 0 || s->r.left < 0) {
v4l2_dbg(debug, 1, &rga->v4l2_dev,
"doesn't support negative values for top & left.\n");
return -EINVAL;
}
if (s->r.left + s->r.width > f->width ||
s->r.top + s->r.height > f->height ||
s->r.width < MIN_WIDTH || s->r.height < MIN_HEIGHT) {
v4l2_dbg(debug, 1, &rga->v4l2_dev, "unsupported crop value.\n");
return -EINVAL;
}
f->crop = s->r;
return ret;
}
static const struct v4l2_ioctl_ops rga_ioctl_ops = {
.vidioc_querycap = vidioc_querycap,
.vidioc_enum_fmt_vid_cap = vidioc_enum_fmt,
.vidioc_g_fmt_vid_cap = vidioc_g_fmt,
.vidioc_try_fmt_vid_cap = vidioc_try_fmt,
.vidioc_s_fmt_vid_cap = vidioc_s_fmt,
.vidioc_enum_fmt_vid_out = vidioc_enum_fmt,
.vidioc_g_fmt_vid_out = vidioc_g_fmt,
.vidioc_try_fmt_vid_out = vidioc_try_fmt,
.vidioc_s_fmt_vid_out = vidioc_s_fmt,
.vidioc_reqbufs = v4l2_m2m_ioctl_reqbufs,
.vidioc_querybuf = v4l2_m2m_ioctl_querybuf,
.vidioc_qbuf = v4l2_m2m_ioctl_qbuf,
.vidioc_dqbuf = v4l2_m2m_ioctl_dqbuf,
.vidioc_prepare_buf = v4l2_m2m_ioctl_prepare_buf,
.vidioc_create_bufs = v4l2_m2m_ioctl_create_bufs,
.vidioc_expbuf = v4l2_m2m_ioctl_expbuf,
.vidioc_subscribe_event = v4l2_ctrl_subscribe_event,
.vidioc_unsubscribe_event = v4l2_event_unsubscribe,
.vidioc_streamon = v4l2_m2m_ioctl_streamon,
.vidioc_streamoff = v4l2_m2m_ioctl_streamoff,
.vidioc_g_selection = vidioc_g_selection,
.vidioc_s_selection = vidioc_s_selection,
};
static const struct video_device rga_videodev = {
.name = "rockchip-rga",
.fops = &rga_fops,
.ioctl_ops = &rga_ioctl_ops,
.minor = -1,
.release = video_device_release,
.vfl_dir = VFL_DIR_M2M,
.device_caps = V4L2_CAP_VIDEO_M2M | V4L2_CAP_STREAMING,
};
static int rga_enable_clocks(struct rockchip_rga *rga)
{
int ret;
ret = clk_prepare_enable(rga->sclk);
if (ret) {
dev_err(rga->dev, "Cannot enable rga sclk: %d\n", ret);
return ret;
}
ret = clk_prepare_enable(rga->aclk);
if (ret) {
dev_err(rga->dev, "Cannot enable rga aclk: %d\n", ret);
goto err_disable_sclk;
}
ret = clk_prepare_enable(rga->hclk);
if (ret) {
dev_err(rga->dev, "Cannot enable rga hclk: %d\n", ret);
goto err_disable_aclk;
}
return 0;
err_disable_aclk:
clk_disable_unprepare(rga->aclk);
err_disable_sclk:
clk_disable_unprepare(rga->sclk);
return ret;
}
static void rga_disable_clocks(struct rockchip_rga *rga)
{
clk_disable_unprepare(rga->sclk);
clk_disable_unprepare(rga->hclk);
clk_disable_unprepare(rga->aclk);
}
static int rga_parse_dt(struct rockchip_rga *rga)
{
struct reset_control *core_rst, *axi_rst, *ahb_rst;
core_rst = devm_reset_control_get(rga->dev, "core");
if (IS_ERR(core_rst)) {
dev_err(rga->dev, "failed to get core reset controller\n");
return PTR_ERR(core_rst);
}
axi_rst = devm_reset_control_get(rga->dev, "axi");
if (IS_ERR(axi_rst)) {
dev_err(rga->dev, "failed to get axi reset controller\n");
return PTR_ERR(axi_rst);
}
ahb_rst = devm_reset_control_get(rga->dev, "ahb");
if (IS_ERR(ahb_rst)) {
dev_err(rga->dev, "failed to get ahb reset controller\n");
return PTR_ERR(ahb_rst);
}
reset_control_assert(core_rst);
udelay(1);
reset_control_deassert(core_rst);
reset_control_assert(axi_rst);
udelay(1);
reset_control_deassert(axi_rst);
reset_control_assert(ahb_rst);
udelay(1);
reset_control_deassert(ahb_rst);
rga->sclk = devm_clk_get(rga->dev, "sclk");
if (IS_ERR(rga->sclk)) {
dev_err(rga->dev, "failed to get sclk clock\n");
return PTR_ERR(rga->sclk);
}
rga->aclk = devm_clk_get(rga->dev, "aclk");
if (IS_ERR(rga->aclk)) {
dev_err(rga->dev, "failed to get aclk clock\n");
return PTR_ERR(rga->aclk);
}
rga->hclk = devm_clk_get(rga->dev, "hclk");
if (IS_ERR(rga->hclk)) {
dev_err(rga->dev, "failed to get hclk clock\n");
return PTR_ERR(rga->hclk);
}
return 0;
}
static int rga_probe(struct platform_device *pdev)
{
struct rockchip_rga *rga;
struct video_device *vfd;
int ret = 0;
int irq;
if (!pdev->dev.of_node)
return -ENODEV;
rga = devm_kzalloc(&pdev->dev, sizeof(*rga), GFP_KERNEL);
if (!rga)
return -ENOMEM;
rga->dev = &pdev->dev;
spin_lock_init(&rga->ctrl_lock);
mutex_init(&rga->mutex);
ret = rga_parse_dt(rga);
if (ret)
return dev_err_probe(&pdev->dev, ret, "Unable to parse OF data\n");
pm_runtime_enable(rga->dev);
rga->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(rga->regs)) {
ret = PTR_ERR(rga->regs);
goto err_put_clk;
}
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
ret = irq;
goto err_put_clk;
}
ret = devm_request_irq(rga->dev, irq, rga_isr, 0,
dev_name(rga->dev), rga);
if (ret < 0) {
dev_err(rga->dev, "failed to request irq\n");
goto err_put_clk;
}
ret = v4l2_device_register(&pdev->dev, &rga->v4l2_dev);
if (ret)
goto err_put_clk;
vfd = video_device_alloc();
if (!vfd) {
v4l2_err(&rga->v4l2_dev, "Failed to allocate video device\n");
ret = -ENOMEM;
goto unreg_v4l2_dev;
}
*vfd = rga_videodev;
vfd->lock = &rga->mutex;
vfd->v4l2_dev = &rga->v4l2_dev;
video_set_drvdata(vfd, rga);
rga->vfd = vfd;
platform_set_drvdata(pdev, rga);
rga->m2m_dev = v4l2_m2m_init(&rga_m2m_ops);
if (IS_ERR(rga->m2m_dev)) {
v4l2_err(&rga->v4l2_dev, "Failed to init mem2mem device\n");
ret = PTR_ERR(rga->m2m_dev);
goto rel_vdev;
}
ret = pm_runtime_resume_and_get(rga->dev);
if (ret < 0)
goto rel_m2m;
rga->version.major = (rga_read(rga, RGA_VERSION_INFO) >> 24) & 0xFF;
rga->version.minor = (rga_read(rga, RGA_VERSION_INFO) >> 20) & 0x0F;
v4l2_info(&rga->v4l2_dev, "HW Version: 0x%02x.%02x\n",
rga->version.major, rga->version.minor);
pm_runtime_put(rga->dev);
/* Create CMD buffer */
rga->cmdbuf_virt = dma_alloc_attrs(rga->dev, RGA_CMDBUF_SIZE,
&rga->cmdbuf_phy, GFP_KERNEL,
DMA_ATTR_WRITE_COMBINE);
if (!rga->cmdbuf_virt) {
ret = -ENOMEM;
goto rel_m2m;
}
rga->src_mmu_pages =
(unsigned int *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 3);
if (!rga->src_mmu_pages) {
ret = -ENOMEM;
goto free_dma;
}
rga->dst_mmu_pages =
(unsigned int *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 3);
if (!rga->dst_mmu_pages) {
ret = -ENOMEM;
goto free_src_pages;
}
def_frame.stride = (def_frame.width * def_frame.fmt->depth) >> 3;
def_frame.size = def_frame.stride * def_frame.height;
ret = video_register_device(vfd, VFL_TYPE_VIDEO, -1);
if (ret) {
v4l2_err(&rga->v4l2_dev, "Failed to register video device\n");
goto free_dst_pages;
}
v4l2_info(&rga->v4l2_dev, "Registered %s as /dev/%s\n",
vfd->name, video_device_node_name(vfd));
return 0;
free_dst_pages:
free_pages((unsigned long)rga->dst_mmu_pages, 3);
free_src_pages:
free_pages((unsigned long)rga->src_mmu_pages, 3);
free_dma:
dma_free_attrs(rga->dev, RGA_CMDBUF_SIZE, rga->cmdbuf_virt,
rga->cmdbuf_phy, DMA_ATTR_WRITE_COMBINE);
rel_m2m:
v4l2_m2m_release(rga->m2m_dev);
rel_vdev:
video_device_release(vfd);
unreg_v4l2_dev:
v4l2_device_unregister(&rga->v4l2_dev);
err_put_clk:
pm_runtime_disable(rga->dev);
return ret;
}
static void rga_remove(struct platform_device *pdev)
{
struct rockchip_rga *rga = platform_get_drvdata(pdev);
dma_free_attrs(rga->dev, RGA_CMDBUF_SIZE, rga->cmdbuf_virt,
rga->cmdbuf_phy, DMA_ATTR_WRITE_COMBINE);
free_pages((unsigned long)rga->src_mmu_pages, 3);
free_pages((unsigned long)rga->dst_mmu_pages, 3);
v4l2_info(&rga->v4l2_dev, "Removing\n");
v4l2_m2m_release(rga->m2m_dev);
video_unregister_device(rga->vfd);
v4l2_device_unregister(&rga->v4l2_dev);
pm_runtime_disable(rga->dev);
}
static int __maybe_unused rga_runtime_suspend(struct device *dev)
{
struct rockchip_rga *rga = dev_get_drvdata(dev);
rga_disable_clocks(rga);
return 0;
}
static int __maybe_unused rga_runtime_resume(struct device *dev)
{
struct rockchip_rga *rga = dev_get_drvdata(dev);
return rga_enable_clocks(rga);
}
static const struct dev_pm_ops rga_pm = {
SET_RUNTIME_PM_OPS(rga_runtime_suspend,
rga_runtime_resume, NULL)
};
static const struct of_device_id rockchip_rga_match[] = {
{
.compatible = "rockchip,rk3288-rga",
},
{
.compatible = "rockchip,rk3399-rga",
},
{},
};
MODULE_DEVICE_TABLE(of, rockchip_rga_match);
static struct platform_driver rga_pdrv = {
.probe = rga_probe,
.remove_new = rga_remove,
.driver = {
.name = RGA_NAME,
.pm = &rga_pm,
.of_match_table = rockchip_rga_match,
},
};
module_platform_driver(rga_pdrv);
MODULE_AUTHOR("Jacob Chen <[email protected]>");
MODULE_DESCRIPTION("Rockchip Raster 2d Graphic Acceleration Unit");
MODULE_LICENSE("GPL");
| linux-master | drivers/media/platform/rockchip/rga/rga.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Samsung S5P G2D - 2D Graphics Accelerator Driver
*
* Copyright (c) 2011 Samsung Electronics Co., Ltd.
* Kamil Debski, <[email protected]>
*/
#include <linux/module.h>
#include <linux/fs.h>
#include <linux/timer.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/clk.h>
#include <linux/interrupt.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <media/v4l2-mem2mem.h>
#include <media/v4l2-device.h>
#include <media/v4l2-ioctl.h>
#include <media/videobuf2-v4l2.h>
#include <media/videobuf2-dma-contig.h>
#include "g2d.h"
#include "g2d-regs.h"
#define fh2ctx(__fh) container_of(__fh, struct g2d_ctx, fh)
static struct g2d_fmt formats[] = {
{
.fourcc = V4L2_PIX_FMT_RGB32,
.depth = 32,
.hw = COLOR_MODE(ORDER_XRGB, MODE_XRGB_8888),
},
{
.fourcc = V4L2_PIX_FMT_RGB565X,
.depth = 16,
.hw = COLOR_MODE(ORDER_XRGB, MODE_RGB_565),
},
{
.fourcc = V4L2_PIX_FMT_RGB555X,
.depth = 16,
.hw = COLOR_MODE(ORDER_XRGB, MODE_XRGB_1555),
},
{
.fourcc = V4L2_PIX_FMT_RGB444,
.depth = 16,
.hw = COLOR_MODE(ORDER_XRGB, MODE_XRGB_4444),
},
{
.fourcc = V4L2_PIX_FMT_RGB24,
.depth = 24,
.hw = COLOR_MODE(ORDER_XRGB, MODE_PACKED_RGB_888),
},
};
#define NUM_FORMATS ARRAY_SIZE(formats)
static struct g2d_frame def_frame = {
.width = DEFAULT_WIDTH,
.height = DEFAULT_HEIGHT,
.c_width = DEFAULT_WIDTH,
.c_height = DEFAULT_HEIGHT,
.o_width = 0,
.o_height = 0,
.fmt = &formats[0],
.right = DEFAULT_WIDTH,
.bottom = DEFAULT_HEIGHT,
};
static struct g2d_fmt *find_fmt(struct v4l2_format *f)
{
unsigned int i;
for (i = 0; i < NUM_FORMATS; i++) {
if (formats[i].fourcc == f->fmt.pix.pixelformat)
return &formats[i];
}
return NULL;
}
static struct g2d_frame *get_frame(struct g2d_ctx *ctx,
enum v4l2_buf_type type)
{
switch (type) {
case V4L2_BUF_TYPE_VIDEO_OUTPUT:
return &ctx->in;
case V4L2_BUF_TYPE_VIDEO_CAPTURE:
return &ctx->out;
default:
return ERR_PTR(-EINVAL);
}
}
static int g2d_queue_setup(struct vb2_queue *vq,
unsigned int *nbuffers, unsigned int *nplanes,
unsigned int sizes[], struct device *alloc_devs[])
{
struct g2d_ctx *ctx = vb2_get_drv_priv(vq);
struct g2d_frame *f = get_frame(ctx, vq->type);
if (IS_ERR(f))
return PTR_ERR(f);
sizes[0] = f->size;
*nplanes = 1;
if (*nbuffers == 0)
*nbuffers = 1;
return 0;
}
static int g2d_buf_prepare(struct vb2_buffer *vb)
{
struct g2d_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
struct g2d_frame *f = get_frame(ctx, vb->vb2_queue->type);
if (IS_ERR(f))
return PTR_ERR(f);
vb2_set_plane_payload(vb, 0, f->size);
return 0;
}
static void g2d_buf_queue(struct vb2_buffer *vb)
{
struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
struct g2d_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
v4l2_m2m_buf_queue(ctx->fh.m2m_ctx, vbuf);
}
static const struct vb2_ops g2d_qops = {
.queue_setup = g2d_queue_setup,
.buf_prepare = g2d_buf_prepare,
.buf_queue = g2d_buf_queue,
.wait_prepare = vb2_ops_wait_prepare,
.wait_finish = vb2_ops_wait_finish,
};
static int queue_init(void *priv, struct vb2_queue *src_vq,
struct vb2_queue *dst_vq)
{
struct g2d_ctx *ctx = priv;
int ret;
src_vq->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
src_vq->io_modes = VB2_MMAP | VB2_USERPTR;
src_vq->drv_priv = ctx;
src_vq->ops = &g2d_qops;
src_vq->mem_ops = &vb2_dma_contig_memops;
src_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
src_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
src_vq->lock = &ctx->dev->mutex;
src_vq->dev = ctx->dev->v4l2_dev.dev;
ret = vb2_queue_init(src_vq);
if (ret)
return ret;
dst_vq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
dst_vq->io_modes = VB2_MMAP | VB2_USERPTR;
dst_vq->drv_priv = ctx;
dst_vq->ops = &g2d_qops;
dst_vq->mem_ops = &vb2_dma_contig_memops;
dst_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
dst_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
dst_vq->lock = &ctx->dev->mutex;
dst_vq->dev = ctx->dev->v4l2_dev.dev;
return vb2_queue_init(dst_vq);
}
static int g2d_s_ctrl(struct v4l2_ctrl *ctrl)
{
struct g2d_ctx *ctx = container_of(ctrl->handler, struct g2d_ctx,
ctrl_handler);
unsigned long flags;
spin_lock_irqsave(&ctx->dev->ctrl_lock, flags);
switch (ctrl->id) {
case V4L2_CID_COLORFX:
if (ctrl->val == V4L2_COLORFX_NEGATIVE)
ctx->rop = ROP4_INVERT;
else
ctx->rop = ROP4_COPY;
break;
case V4L2_CID_HFLIP:
ctx->flip = ctx->ctrl_hflip->val | (ctx->ctrl_vflip->val << 1);
break;
}
spin_unlock_irqrestore(&ctx->dev->ctrl_lock, flags);
return 0;
}
static const struct v4l2_ctrl_ops g2d_ctrl_ops = {
.s_ctrl = g2d_s_ctrl,
};
static int g2d_setup_ctrls(struct g2d_ctx *ctx)
{
struct g2d_dev *dev = ctx->dev;
v4l2_ctrl_handler_init(&ctx->ctrl_handler, 3);
ctx->ctrl_hflip = v4l2_ctrl_new_std(&ctx->ctrl_handler, &g2d_ctrl_ops,
V4L2_CID_HFLIP, 0, 1, 1, 0);
ctx->ctrl_vflip = v4l2_ctrl_new_std(&ctx->ctrl_handler, &g2d_ctrl_ops,
V4L2_CID_VFLIP, 0, 1, 1, 0);
v4l2_ctrl_new_std_menu(
&ctx->ctrl_handler,
&g2d_ctrl_ops,
V4L2_CID_COLORFX,
V4L2_COLORFX_NEGATIVE,
~((1 << V4L2_COLORFX_NONE) | (1 << V4L2_COLORFX_NEGATIVE)),
V4L2_COLORFX_NONE);
if (ctx->ctrl_handler.error) {
int err = ctx->ctrl_handler.error;
v4l2_err(&dev->v4l2_dev, "g2d_setup_ctrls failed\n");
v4l2_ctrl_handler_free(&ctx->ctrl_handler);
return err;
}
v4l2_ctrl_cluster(2, &ctx->ctrl_hflip);
return 0;
}
static int g2d_open(struct file *file)
{
struct g2d_dev *dev = video_drvdata(file);
struct g2d_ctx *ctx = NULL;
int ret = 0;
ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
if (!ctx)
return -ENOMEM;
ctx->dev = dev;
/* Set default formats */
ctx->in = def_frame;
ctx->out = def_frame;
if (mutex_lock_interruptible(&dev->mutex)) {
kfree(ctx);
return -ERESTARTSYS;
}
ctx->fh.m2m_ctx = v4l2_m2m_ctx_init(dev->m2m_dev, ctx, &queue_init);
if (IS_ERR(ctx->fh.m2m_ctx)) {
ret = PTR_ERR(ctx->fh.m2m_ctx);
mutex_unlock(&dev->mutex);
kfree(ctx);
return ret;
}
v4l2_fh_init(&ctx->fh, video_devdata(file));
file->private_data = &ctx->fh;
v4l2_fh_add(&ctx->fh);
g2d_setup_ctrls(ctx);
/* Write the default values to the ctx struct */
v4l2_ctrl_handler_setup(&ctx->ctrl_handler);
ctx->fh.ctrl_handler = &ctx->ctrl_handler;
mutex_unlock(&dev->mutex);
v4l2_info(&dev->v4l2_dev, "instance opened\n");
return 0;
}
static int g2d_release(struct file *file)
{
struct g2d_dev *dev = video_drvdata(file);
struct g2d_ctx *ctx = fh2ctx(file->private_data);
mutex_lock(&dev->mutex);
v4l2_m2m_ctx_release(ctx->fh.m2m_ctx);
mutex_unlock(&dev->mutex);
v4l2_ctrl_handler_free(&ctx->ctrl_handler);
v4l2_fh_del(&ctx->fh);
v4l2_fh_exit(&ctx->fh);
kfree(ctx);
v4l2_info(&dev->v4l2_dev, "instance closed\n");
return 0;
}
static int vidioc_querycap(struct file *file, void *priv,
struct v4l2_capability *cap)
{
strscpy(cap->driver, G2D_NAME, sizeof(cap->driver));
strscpy(cap->card, G2D_NAME, sizeof(cap->card));
cap->bus_info[0] = 0;
return 0;
}
static int vidioc_enum_fmt(struct file *file, void *prv, struct v4l2_fmtdesc *f)
{
if (f->index >= NUM_FORMATS)
return -EINVAL;
f->pixelformat = formats[f->index].fourcc;
return 0;
}
static int vidioc_g_fmt(struct file *file, void *prv, struct v4l2_format *f)
{
struct g2d_ctx *ctx = prv;
struct vb2_queue *vq;
struct g2d_frame *frm;
vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, f->type);
if (!vq)
return -EINVAL;
frm = get_frame(ctx, f->type);
if (IS_ERR(frm))
return PTR_ERR(frm);
f->fmt.pix.width = frm->width;
f->fmt.pix.height = frm->height;
f->fmt.pix.field = V4L2_FIELD_NONE;
f->fmt.pix.pixelformat = frm->fmt->fourcc;
f->fmt.pix.bytesperline = (frm->width * frm->fmt->depth) >> 3;
f->fmt.pix.sizeimage = frm->size;
return 0;
}
static int vidioc_try_fmt(struct file *file, void *prv, struct v4l2_format *f)
{
struct g2d_fmt *fmt;
enum v4l2_field *field;
fmt = find_fmt(f);
if (!fmt)
return -EINVAL;
field = &f->fmt.pix.field;
if (*field == V4L2_FIELD_ANY)
*field = V4L2_FIELD_NONE;
else if (*field != V4L2_FIELD_NONE)
return -EINVAL;
if (f->fmt.pix.width > MAX_WIDTH)
f->fmt.pix.width = MAX_WIDTH;
if (f->fmt.pix.height > MAX_HEIGHT)
f->fmt.pix.height = MAX_HEIGHT;
if (f->fmt.pix.width < 1)
f->fmt.pix.width = 1;
if (f->fmt.pix.height < 1)
f->fmt.pix.height = 1;
f->fmt.pix.bytesperline = (f->fmt.pix.width * fmt->depth) >> 3;
f->fmt.pix.sizeimage = f->fmt.pix.height * f->fmt.pix.bytesperline;
return 0;
}
static int vidioc_s_fmt(struct file *file, void *prv, struct v4l2_format *f)
{
struct g2d_ctx *ctx = prv;
struct g2d_dev *dev = ctx->dev;
struct vb2_queue *vq;
struct g2d_frame *frm;
struct g2d_fmt *fmt;
int ret = 0;
/* Adjust all values accordingly to the hardware capabilities
* and chosen format. */
ret = vidioc_try_fmt(file, prv, f);
if (ret)
return ret;
vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, f->type);
if (vb2_is_busy(vq)) {
v4l2_err(&dev->v4l2_dev, "queue (%d) bust\n", f->type);
return -EBUSY;
}
frm = get_frame(ctx, f->type);
if (IS_ERR(frm))
return PTR_ERR(frm);
fmt = find_fmt(f);
if (!fmt)
return -EINVAL;
frm->width = f->fmt.pix.width;
frm->height = f->fmt.pix.height;
frm->size = f->fmt.pix.sizeimage;
/* Reset crop settings */
frm->o_width = 0;
frm->o_height = 0;
frm->c_width = frm->width;
frm->c_height = frm->height;
frm->right = frm->width;
frm->bottom = frm->height;
frm->fmt = fmt;
frm->stride = f->fmt.pix.bytesperline;
return 0;
}
static int vidioc_g_selection(struct file *file, void *prv,
struct v4l2_selection *s)
{
struct g2d_ctx *ctx = prv;
struct g2d_frame *f;
f = get_frame(ctx, s->type);
if (IS_ERR(f))
return PTR_ERR(f);
switch (s->target) {
case V4L2_SEL_TGT_CROP:
case V4L2_SEL_TGT_CROP_DEFAULT:
case V4L2_SEL_TGT_CROP_BOUNDS:
if (s->type != V4L2_BUF_TYPE_VIDEO_OUTPUT)
return -EINVAL;
break;
case V4L2_SEL_TGT_COMPOSE:
case V4L2_SEL_TGT_COMPOSE_DEFAULT:
case V4L2_SEL_TGT_COMPOSE_BOUNDS:
if (s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
return -EINVAL;
break;
default:
return -EINVAL;
}
switch (s->target) {
case V4L2_SEL_TGT_CROP:
case V4L2_SEL_TGT_COMPOSE:
s->r.left = f->o_height;
s->r.top = f->o_width;
s->r.width = f->c_width;
s->r.height = f->c_height;
break;
case V4L2_SEL_TGT_CROP_DEFAULT:
case V4L2_SEL_TGT_CROP_BOUNDS:
case V4L2_SEL_TGT_COMPOSE_DEFAULT:
case V4L2_SEL_TGT_COMPOSE_BOUNDS:
s->r.left = 0;
s->r.top = 0;
s->r.width = f->width;
s->r.height = f->height;
break;
default:
return -EINVAL;
}
return 0;
}
static int vidioc_try_selection(struct file *file, void *prv,
const struct v4l2_selection *s)
{
struct g2d_ctx *ctx = prv;
struct g2d_dev *dev = ctx->dev;
struct g2d_frame *f;
f = get_frame(ctx, s->type);
if (IS_ERR(f))
return PTR_ERR(f);
if (s->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) {
if (s->target != V4L2_SEL_TGT_COMPOSE)
return -EINVAL;
} else if (s->type == V4L2_BUF_TYPE_VIDEO_OUTPUT) {
if (s->target != V4L2_SEL_TGT_CROP)
return -EINVAL;
}
if (s->r.top < 0 || s->r.left < 0) {
v4l2_err(&dev->v4l2_dev,
"doesn't support negative values for top & left\n");
return -EINVAL;
}
return 0;
}
static int vidioc_s_selection(struct file *file, void *prv,
struct v4l2_selection *s)
{
struct g2d_ctx *ctx = prv;
struct g2d_frame *f;
int ret;
ret = vidioc_try_selection(file, prv, s);
if (ret)
return ret;
f = get_frame(ctx, s->type);
if (IS_ERR(f))
return PTR_ERR(f);
f->c_width = s->r.width;
f->c_height = s->r.height;
f->o_width = s->r.left;
f->o_height = s->r.top;
f->bottom = f->o_height + f->c_height;
f->right = f->o_width + f->c_width;
return 0;
}
static void device_run(void *prv)
{
struct g2d_ctx *ctx = prv;
struct g2d_dev *dev = ctx->dev;
struct vb2_v4l2_buffer *src, *dst;
unsigned long flags;
u32 cmd = 0;
dev->curr = ctx;
src = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
dst = v4l2_m2m_next_dst_buf(ctx->fh.m2m_ctx);
clk_enable(dev->gate);
g2d_reset(dev);
spin_lock_irqsave(&dev->ctrl_lock, flags);
g2d_set_src_size(dev, &ctx->in);
g2d_set_src_addr(dev, vb2_dma_contig_plane_dma_addr(&src->vb2_buf, 0));
g2d_set_dst_size(dev, &ctx->out);
g2d_set_dst_addr(dev, vb2_dma_contig_plane_dma_addr(&dst->vb2_buf, 0));
g2d_set_rop4(dev, ctx->rop);
g2d_set_flip(dev, ctx->flip);
if (ctx->in.c_width != ctx->out.c_width ||
ctx->in.c_height != ctx->out.c_height) {
if (dev->variant->hw_rev == TYPE_G2D_3X)
cmd |= CMD_V3_ENABLE_STRETCH;
else
g2d_set_v41_stretch(dev, &ctx->in, &ctx->out);
}
g2d_set_cmd(dev, cmd);
g2d_start(dev);
spin_unlock_irqrestore(&dev->ctrl_lock, flags);
}
static irqreturn_t g2d_isr(int irq, void *prv)
{
struct g2d_dev *dev = prv;
struct g2d_ctx *ctx = dev->curr;
struct vb2_v4l2_buffer *src, *dst;
g2d_clear_int(dev);
clk_disable(dev->gate);
BUG_ON(ctx == NULL);
src = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
dst = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx);
BUG_ON(src == NULL);
BUG_ON(dst == NULL);
dst->timecode = src->timecode;
dst->vb2_buf.timestamp = src->vb2_buf.timestamp;
dst->flags &= ~V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
dst->flags |=
src->flags & V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
v4l2_m2m_buf_done(src, VB2_BUF_STATE_DONE);
v4l2_m2m_buf_done(dst, VB2_BUF_STATE_DONE);
v4l2_m2m_job_finish(dev->m2m_dev, ctx->fh.m2m_ctx);
dev->curr = NULL;
return IRQ_HANDLED;
}
static const struct v4l2_file_operations g2d_fops = {
.owner = THIS_MODULE,
.open = g2d_open,
.release = g2d_release,
.poll = v4l2_m2m_fop_poll,
.unlocked_ioctl = video_ioctl2,
.mmap = v4l2_m2m_fop_mmap,
};
static const struct v4l2_ioctl_ops g2d_ioctl_ops = {
.vidioc_querycap = vidioc_querycap,
.vidioc_enum_fmt_vid_cap = vidioc_enum_fmt,
.vidioc_g_fmt_vid_cap = vidioc_g_fmt,
.vidioc_try_fmt_vid_cap = vidioc_try_fmt,
.vidioc_s_fmt_vid_cap = vidioc_s_fmt,
.vidioc_enum_fmt_vid_out = vidioc_enum_fmt,
.vidioc_g_fmt_vid_out = vidioc_g_fmt,
.vidioc_try_fmt_vid_out = vidioc_try_fmt,
.vidioc_s_fmt_vid_out = vidioc_s_fmt,
.vidioc_reqbufs = v4l2_m2m_ioctl_reqbufs,
.vidioc_querybuf = v4l2_m2m_ioctl_querybuf,
.vidioc_qbuf = v4l2_m2m_ioctl_qbuf,
.vidioc_dqbuf = v4l2_m2m_ioctl_dqbuf,
.vidioc_streamon = v4l2_m2m_ioctl_streamon,
.vidioc_streamoff = v4l2_m2m_ioctl_streamoff,
.vidioc_g_selection = vidioc_g_selection,
.vidioc_s_selection = vidioc_s_selection,
};
static const struct video_device g2d_videodev = {
.name = G2D_NAME,
.fops = &g2d_fops,
.ioctl_ops = &g2d_ioctl_ops,
.minor = -1,
.release = video_device_release,
.vfl_dir = VFL_DIR_M2M,
};
static const struct v4l2_m2m_ops g2d_m2m_ops = {
.device_run = device_run,
};
static const struct of_device_id exynos_g2d_match[];
static int g2d_probe(struct platform_device *pdev)
{
struct g2d_dev *dev;
struct video_device *vfd;
const struct of_device_id *of_id;
int ret = 0;
dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL);
if (!dev)
return -ENOMEM;
spin_lock_init(&dev->ctrl_lock);
mutex_init(&dev->mutex);
atomic_set(&dev->num_inst, 0);
dev->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(dev->regs))
return PTR_ERR(dev->regs);
dev->clk = clk_get(&pdev->dev, "sclk_fimg2d");
if (IS_ERR(dev->clk)) {
dev_err(&pdev->dev, "failed to get g2d clock\n");
return -ENXIO;
}
ret = clk_prepare(dev->clk);
if (ret) {
dev_err(&pdev->dev, "failed to prepare g2d clock\n");
goto put_clk;
}
dev->gate = clk_get(&pdev->dev, "fimg2d");
if (IS_ERR(dev->gate)) {
dev_err(&pdev->dev, "failed to get g2d clock gate\n");
ret = -ENXIO;
goto unprep_clk;
}
ret = clk_prepare(dev->gate);
if (ret) {
dev_err(&pdev->dev, "failed to prepare g2d clock gate\n");
goto put_clk_gate;
}
ret = platform_get_irq(pdev, 0);
if (ret < 0)
goto unprep_clk_gate;
dev->irq = ret;
ret = devm_request_irq(&pdev->dev, dev->irq, g2d_isr,
0, pdev->name, dev);
if (ret) {
dev_err(&pdev->dev, "failed to install IRQ\n");
goto unprep_clk_gate;
}
vb2_dma_contig_set_max_seg_size(&pdev->dev, DMA_BIT_MASK(32));
ret = v4l2_device_register(&pdev->dev, &dev->v4l2_dev);
if (ret)
goto unprep_clk_gate;
vfd = video_device_alloc();
if (!vfd) {
v4l2_err(&dev->v4l2_dev, "Failed to allocate video device\n");
ret = -ENOMEM;
goto unreg_v4l2_dev;
}
*vfd = g2d_videodev;
set_bit(V4L2_FL_QUIRK_INVERTED_CROP, &vfd->flags);
vfd->lock = &dev->mutex;
vfd->v4l2_dev = &dev->v4l2_dev;
vfd->device_caps = V4L2_CAP_VIDEO_M2M | V4L2_CAP_STREAMING;
platform_set_drvdata(pdev, dev);
dev->m2m_dev = v4l2_m2m_init(&g2d_m2m_ops);
if (IS_ERR(dev->m2m_dev)) {
v4l2_err(&dev->v4l2_dev, "Failed to init mem2mem device\n");
ret = PTR_ERR(dev->m2m_dev);
goto rel_vdev;
}
def_frame.stride = (def_frame.width * def_frame.fmt->depth) >> 3;
of_id = of_match_node(exynos_g2d_match, pdev->dev.of_node);
if (!of_id) {
ret = -ENODEV;
goto free_m2m;
}
dev->variant = (struct g2d_variant *)of_id->data;
ret = video_register_device(vfd, VFL_TYPE_VIDEO, 0);
if (ret) {
v4l2_err(&dev->v4l2_dev, "Failed to register video device\n");
goto free_m2m;
}
video_set_drvdata(vfd, dev);
dev->vfd = vfd;
v4l2_info(&dev->v4l2_dev, "device registered as /dev/video%d\n",
vfd->num);
return 0;
free_m2m:
v4l2_m2m_release(dev->m2m_dev);
rel_vdev:
video_device_release(vfd);
unreg_v4l2_dev:
v4l2_device_unregister(&dev->v4l2_dev);
unprep_clk_gate:
clk_unprepare(dev->gate);
put_clk_gate:
clk_put(dev->gate);
unprep_clk:
clk_unprepare(dev->clk);
put_clk:
clk_put(dev->clk);
return ret;
}
static void g2d_remove(struct platform_device *pdev)
{
struct g2d_dev *dev = platform_get_drvdata(pdev);
v4l2_info(&dev->v4l2_dev, "Removing " G2D_NAME);
v4l2_m2m_release(dev->m2m_dev);
video_unregister_device(dev->vfd);
v4l2_device_unregister(&dev->v4l2_dev);
vb2_dma_contig_clear_max_seg_size(&pdev->dev);
clk_unprepare(dev->gate);
clk_put(dev->gate);
clk_unprepare(dev->clk);
clk_put(dev->clk);
}
static struct g2d_variant g2d_drvdata_v3x = {
.hw_rev = TYPE_G2D_3X, /* Revision 3.0 for S5PV210 and Exynos4210 */
};
static struct g2d_variant g2d_drvdata_v4x = {
.hw_rev = TYPE_G2D_4X, /* Revision 4.1 for Exynos4X12 and Exynos5 */
};
static const struct of_device_id exynos_g2d_match[] = {
{
.compatible = "samsung,s5pv210-g2d",
.data = &g2d_drvdata_v3x,
}, {
.compatible = "samsung,exynos4212-g2d",
.data = &g2d_drvdata_v4x,
},
{},
};
MODULE_DEVICE_TABLE(of, exynos_g2d_match);
static struct platform_driver g2d_pdrv = {
.probe = g2d_probe,
.remove_new = g2d_remove,
.driver = {
.name = G2D_NAME,
.of_match_table = exynos_g2d_match,
},
};
module_platform_driver(g2d_pdrv);
MODULE_AUTHOR("Kamil Debski <[email protected]>");
MODULE_DESCRIPTION("S5P G2D 2d graphics accelerator driver");
MODULE_LICENSE("GPL");
| linux-master | drivers/media/platform/samsung/s5p-g2d/g2d.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Samsung S5P G2D - 2D Graphics Accelerator Driver
*
* Copyright (c) 2011 Samsung Electronics Co., Ltd.
* Kamil Debski, <[email protected]>
*/
#include <linux/io.h>
#include "g2d.h"
#include "g2d-regs.h"
#define w(x, a) writel((x), d->regs + (a))
#define r(a) readl(d->regs + (a))
/* g2d_reset clears all g2d registers */
void g2d_reset(struct g2d_dev *d)
{
w(1, SOFT_RESET_REG);
}
void g2d_set_src_size(struct g2d_dev *d, struct g2d_frame *f)
{
u32 n;
w(0, SRC_SELECT_REG);
w(f->stride & 0xFFFF, SRC_STRIDE_REG);
n = f->o_height & 0xFFF;
n <<= 16;
n |= f->o_width & 0xFFF;
w(n, SRC_LEFT_TOP_REG);
n = f->bottom & 0xFFF;
n <<= 16;
n |= f->right & 0xFFF;
w(n, SRC_RIGHT_BOTTOM_REG);
w(f->fmt->hw, SRC_COLOR_MODE_REG);
}
void g2d_set_src_addr(struct g2d_dev *d, dma_addr_t a)
{
w(a, SRC_BASE_ADDR_REG);
}
void g2d_set_dst_size(struct g2d_dev *d, struct g2d_frame *f)
{
u32 n;
w(0, DST_SELECT_REG);
w(f->stride & 0xFFFF, DST_STRIDE_REG);
n = f->o_height & 0xFFF;
n <<= 16;
n |= f->o_width & 0xFFF;
w(n, DST_LEFT_TOP_REG);
n = f->bottom & 0xFFF;
n <<= 16;
n |= f->right & 0xFFF;
w(n, DST_RIGHT_BOTTOM_REG);
w(f->fmt->hw, DST_COLOR_MODE_REG);
}
void g2d_set_dst_addr(struct g2d_dev *d, dma_addr_t a)
{
w(a, DST_BASE_ADDR_REG);
}
void g2d_set_rop4(struct g2d_dev *d, u32 r)
{
w(r, ROP4_REG);
}
void g2d_set_flip(struct g2d_dev *d, u32 r)
{
w(r, SRC_MSK_DIRECT_REG);
}
void g2d_set_v41_stretch(struct g2d_dev *d, struct g2d_frame *src,
struct g2d_frame *dst)
{
w(DEFAULT_SCALE_MODE, SRC_SCALE_CTRL_REG);
/* inversed scaling factor: src is numerator */
w((src->c_width << 16) / dst->c_width, SRC_XSCALE_REG);
w((src->c_height << 16) / dst->c_height, SRC_YSCALE_REG);
}
void g2d_set_cmd(struct g2d_dev *d, u32 c)
{
w(c, BITBLT_COMMAND_REG);
}
void g2d_start(struct g2d_dev *d)
{
/* Clear cache */
if (d->variant->hw_rev == TYPE_G2D_3X)
w(0x7, CACHECTL_REG);
/* Enable interrupt */
w(1, INTEN_REG);
/* Start G2D engine */
w(1, BITBLT_START_REG);
}
void g2d_clear_int(struct g2d_dev *d)
{
w(1, INTC_PEND_REG);
}
| linux-master | drivers/media/platform/samsung/s5p-g2d/g2d-hw.c |
// SPDX-License-Identifier: GPL-2.0-only
/* linux/drivers/media/platform/exynos3250-jpeg/jpeg-hw.h
*
* Copyright (c) 2014 Samsung Electronics Co., Ltd.
* http://www.samsung.com
*
* Author: Jacek Anaszewski <[email protected]>
*/
#include <linux/io.h>
#include <linux/videodev2.h>
#include <linux/delay.h>
#include "jpeg-core.h"
#include "jpeg-regs.h"
#include "jpeg-hw-exynos3250.h"
void exynos3250_jpeg_reset(void __iomem *regs)
{
u32 reg = 1;
int count = 1000;
writel(1, regs + EXYNOS3250_SW_RESET);
/* no other way but polling for when JPEG IP becomes operational */
while (reg != 0 && --count > 0) {
udelay(1);
cpu_relax();
reg = readl(regs + EXYNOS3250_SW_RESET);
}
reg = 0;
count = 1000;
while (reg != 1 && --count > 0) {
writel(1, regs + EXYNOS3250_JPGDRI);
udelay(1);
cpu_relax();
reg = readl(regs + EXYNOS3250_JPGDRI);
}
writel(0, regs + EXYNOS3250_JPGDRI);
}
void exynos3250_jpeg_poweron(void __iomem *regs)
{
writel(EXYNOS3250_POWER_ON, regs + EXYNOS3250_JPGCLKCON);
}
void exynos3250_jpeg_set_dma_num(void __iomem *regs)
{
writel(((EXYNOS3250_DMA_MO_COUNT << EXYNOS3250_WDMA_ISSUE_NUM_SHIFT) &
EXYNOS3250_WDMA_ISSUE_NUM_MASK) |
((EXYNOS3250_DMA_MO_COUNT << EXYNOS3250_RDMA_ISSUE_NUM_SHIFT) &
EXYNOS3250_RDMA_ISSUE_NUM_MASK) |
((EXYNOS3250_DMA_MO_COUNT << EXYNOS3250_ISSUE_GATHER_NUM_SHIFT) &
EXYNOS3250_ISSUE_GATHER_NUM_MASK),
regs + EXYNOS3250_DMA_ISSUE_NUM);
}
void exynos3250_jpeg_clk_set(void __iomem *base)
{
u32 reg;
reg = readl(base + EXYNOS3250_JPGCMOD) & ~EXYNOS3250_HALF_EN_MASK;
writel(reg | EXYNOS3250_HALF_EN, base + EXYNOS3250_JPGCMOD);
}
void exynos3250_jpeg_input_raw_fmt(void __iomem *regs, unsigned int fmt)
{
u32 reg;
reg = readl(regs + EXYNOS3250_JPGCMOD) &
EXYNOS3250_MODE_Y16_MASK;
switch (fmt) {
case V4L2_PIX_FMT_RGB32:
reg |= EXYNOS3250_MODE_SEL_ARGB8888;
break;
case V4L2_PIX_FMT_BGR32:
reg |= EXYNOS3250_MODE_SEL_ARGB8888 | EXYNOS3250_SRC_SWAP_RGB;
break;
case V4L2_PIX_FMT_RGB565:
reg |= EXYNOS3250_MODE_SEL_RGB565;
break;
case V4L2_PIX_FMT_RGB565X:
reg |= EXYNOS3250_MODE_SEL_RGB565 | EXYNOS3250_SRC_SWAP_RGB;
break;
case V4L2_PIX_FMT_YUYV:
reg |= EXYNOS3250_MODE_SEL_422_1P_LUM_CHR;
break;
case V4L2_PIX_FMT_YVYU:
reg |= EXYNOS3250_MODE_SEL_422_1P_LUM_CHR |
EXYNOS3250_SRC_SWAP_UV;
break;
case V4L2_PIX_FMT_UYVY:
reg |= EXYNOS3250_MODE_SEL_422_1P_CHR_LUM;
break;
case V4L2_PIX_FMT_VYUY:
reg |= EXYNOS3250_MODE_SEL_422_1P_CHR_LUM |
EXYNOS3250_SRC_SWAP_UV;
break;
case V4L2_PIX_FMT_NV12:
reg |= EXYNOS3250_MODE_SEL_420_2P | EXYNOS3250_SRC_NV12;
break;
case V4L2_PIX_FMT_NV21:
reg |= EXYNOS3250_MODE_SEL_420_2P | EXYNOS3250_SRC_NV21;
break;
case V4L2_PIX_FMT_YUV420:
reg |= EXYNOS3250_MODE_SEL_420_3P;
break;
default:
break;
}
writel(reg, regs + EXYNOS3250_JPGCMOD);
}
void exynos3250_jpeg_set_y16(void __iomem *regs, bool y16)
{
u32 reg;
reg = readl(regs + EXYNOS3250_JPGCMOD);
if (y16)
reg |= EXYNOS3250_MODE_Y16;
else
reg &= ~EXYNOS3250_MODE_Y16_MASK;
writel(reg, regs + EXYNOS3250_JPGCMOD);
}
void exynos3250_jpeg_proc_mode(void __iomem *regs, unsigned int mode)
{
u32 reg, m;
if (mode == S5P_JPEG_ENCODE)
m = EXYNOS3250_PROC_MODE_COMPR;
else
m = EXYNOS3250_PROC_MODE_DECOMPR;
reg = readl(regs + EXYNOS3250_JPGMOD);
reg &= ~EXYNOS3250_PROC_MODE_MASK;
reg |= m;
writel(reg, regs + EXYNOS3250_JPGMOD);
}
void exynos3250_jpeg_subsampling_mode(void __iomem *regs, unsigned int mode)
{
u32 reg, m = 0;
switch (mode) {
case V4L2_JPEG_CHROMA_SUBSAMPLING_444:
m = EXYNOS3250_SUBSAMPLING_MODE_444;
break;
case V4L2_JPEG_CHROMA_SUBSAMPLING_422:
m = EXYNOS3250_SUBSAMPLING_MODE_422;
break;
case V4L2_JPEG_CHROMA_SUBSAMPLING_420:
m = EXYNOS3250_SUBSAMPLING_MODE_420;
break;
}
reg = readl(regs + EXYNOS3250_JPGMOD);
reg &= ~EXYNOS3250_SUBSAMPLING_MODE_MASK;
reg |= m;
writel(reg, regs + EXYNOS3250_JPGMOD);
}
unsigned int exynos3250_jpeg_get_subsampling_mode(void __iomem *regs)
{
return readl(regs + EXYNOS3250_JPGMOD) &
EXYNOS3250_SUBSAMPLING_MODE_MASK;
}
void exynos3250_jpeg_dri(void __iomem *regs, unsigned int dri)
{
u32 reg;
reg = dri & EXYNOS3250_JPGDRI_MASK;
writel(reg, regs + EXYNOS3250_JPGDRI);
}
void exynos3250_jpeg_qtbl(void __iomem *regs, unsigned int t, unsigned int n)
{
unsigned long reg;
reg = readl(regs + EXYNOS3250_QHTBL);
reg &= ~EXYNOS3250_QT_NUM_MASK(t);
reg |= (n << EXYNOS3250_QT_NUM_SHIFT(t)) &
EXYNOS3250_QT_NUM_MASK(t);
writel(reg, regs + EXYNOS3250_QHTBL);
}
void exynos3250_jpeg_htbl_ac(void __iomem *regs, unsigned int t)
{
unsigned long reg;
reg = readl(regs + EXYNOS3250_QHTBL);
reg &= ~EXYNOS3250_HT_NUM_AC_MASK(t);
/* this driver uses table 0 for all color components */
reg |= (0 << EXYNOS3250_HT_NUM_AC_SHIFT(t)) &
EXYNOS3250_HT_NUM_AC_MASK(t);
writel(reg, regs + EXYNOS3250_QHTBL);
}
void exynos3250_jpeg_htbl_dc(void __iomem *regs, unsigned int t)
{
unsigned long reg;
reg = readl(regs + EXYNOS3250_QHTBL);
reg &= ~EXYNOS3250_HT_NUM_DC_MASK(t);
/* this driver uses table 0 for all color components */
reg |= (0 << EXYNOS3250_HT_NUM_DC_SHIFT(t)) &
EXYNOS3250_HT_NUM_DC_MASK(t);
writel(reg, regs + EXYNOS3250_QHTBL);
}
void exynos3250_jpeg_set_y(void __iomem *regs, unsigned int y)
{
u32 reg;
reg = y & EXYNOS3250_JPGY_MASK;
writel(reg, regs + EXYNOS3250_JPGY);
}
void exynos3250_jpeg_set_x(void __iomem *regs, unsigned int x)
{
u32 reg;
reg = x & EXYNOS3250_JPGX_MASK;
writel(reg, regs + EXYNOS3250_JPGX);
}
#if 0 /* Currently unused */
unsigned int exynos3250_jpeg_get_y(void __iomem *regs)
{
return readl(regs + EXYNOS3250_JPGY);
}
unsigned int exynos3250_jpeg_get_x(void __iomem *regs)
{
return readl(regs + EXYNOS3250_JPGX);
}
#endif
void exynos3250_jpeg_interrupts_enable(void __iomem *regs)
{
u32 reg;
reg = readl(regs + EXYNOS3250_JPGINTSE);
reg |= (EXYNOS3250_JPEG_DONE_EN |
EXYNOS3250_WDMA_DONE_EN |
EXYNOS3250_RDMA_DONE_EN |
EXYNOS3250_ENC_STREAM_INT_EN |
EXYNOS3250_CORE_DONE_EN |
EXYNOS3250_ERR_INT_EN |
EXYNOS3250_HEAD_INT_EN);
writel(reg, regs + EXYNOS3250_JPGINTSE);
}
void exynos3250_jpeg_enc_stream_bound(void __iomem *regs, unsigned int size)
{
u32 reg;
reg = size & EXYNOS3250_ENC_STREAM_BOUND_MASK;
writel(reg, regs + EXYNOS3250_ENC_STREAM_BOUND);
}
void exynos3250_jpeg_output_raw_fmt(void __iomem *regs, unsigned int fmt)
{
u32 reg;
switch (fmt) {
case V4L2_PIX_FMT_RGB32:
reg = EXYNOS3250_OUT_FMT_ARGB8888;
break;
case V4L2_PIX_FMT_BGR32:
reg = EXYNOS3250_OUT_FMT_ARGB8888 | EXYNOS3250_OUT_SWAP_RGB;
break;
case V4L2_PIX_FMT_RGB565:
reg = EXYNOS3250_OUT_FMT_RGB565;
break;
case V4L2_PIX_FMT_RGB565X:
reg = EXYNOS3250_OUT_FMT_RGB565 | EXYNOS3250_OUT_SWAP_RGB;
break;
case V4L2_PIX_FMT_YUYV:
reg = EXYNOS3250_OUT_FMT_422_1P_LUM_CHR;
break;
case V4L2_PIX_FMT_YVYU:
reg = EXYNOS3250_OUT_FMT_422_1P_LUM_CHR |
EXYNOS3250_OUT_SWAP_UV;
break;
case V4L2_PIX_FMT_UYVY:
reg = EXYNOS3250_OUT_FMT_422_1P_CHR_LUM;
break;
case V4L2_PIX_FMT_VYUY:
reg = EXYNOS3250_OUT_FMT_422_1P_CHR_LUM |
EXYNOS3250_OUT_SWAP_UV;
break;
case V4L2_PIX_FMT_NV12:
reg = EXYNOS3250_OUT_FMT_420_2P | EXYNOS3250_OUT_NV12;
break;
case V4L2_PIX_FMT_NV21:
reg = EXYNOS3250_OUT_FMT_420_2P | EXYNOS3250_OUT_NV21;
break;
case V4L2_PIX_FMT_YUV420:
reg = EXYNOS3250_OUT_FMT_420_3P;
break;
default:
reg = 0;
break;
}
writel(reg, regs + EXYNOS3250_OUTFORM);
}
void exynos3250_jpeg_jpgadr(void __iomem *regs, unsigned int addr)
{
writel(addr, regs + EXYNOS3250_JPG_JPGADR);
}
void exynos3250_jpeg_imgadr(void __iomem *regs, struct s5p_jpeg_addr *img_addr)
{
writel(img_addr->y, regs + EXYNOS3250_LUMA_BASE);
writel(img_addr->cb, regs + EXYNOS3250_CHROMA_BASE);
writel(img_addr->cr, regs + EXYNOS3250_CHROMA_CR_BASE);
}
void exynos3250_jpeg_stride(void __iomem *regs, unsigned int img_fmt,
unsigned int width)
{
u32 reg_luma = 0, reg_cr = 0, reg_cb = 0;
switch (img_fmt) {
case V4L2_PIX_FMT_RGB32:
reg_luma = 4 * width;
break;
case V4L2_PIX_FMT_RGB565:
case V4L2_PIX_FMT_RGB565X:
case V4L2_PIX_FMT_YUYV:
case V4L2_PIX_FMT_YVYU:
case V4L2_PIX_FMT_UYVY:
case V4L2_PIX_FMT_VYUY:
reg_luma = 2 * width;
break;
case V4L2_PIX_FMT_NV12:
case V4L2_PIX_FMT_NV21:
reg_luma = width;
reg_cb = reg_luma;
break;
case V4L2_PIX_FMT_YUV420:
reg_luma = width;
reg_cb = reg_cr = reg_luma / 2;
break;
default:
break;
}
writel(reg_luma, regs + EXYNOS3250_LUMA_STRIDE);
writel(reg_cb, regs + EXYNOS3250_CHROMA_STRIDE);
writel(reg_cr, regs + EXYNOS3250_CHROMA_CR_STRIDE);
}
void exynos3250_jpeg_offset(void __iomem *regs, unsigned int x_offset,
unsigned int y_offset)
{
u32 reg;
reg = (y_offset << EXYNOS3250_LUMA_YY_OFFSET_SHIFT) &
EXYNOS3250_LUMA_YY_OFFSET_MASK;
reg |= (x_offset << EXYNOS3250_LUMA_YX_OFFSET_SHIFT) &
EXYNOS3250_LUMA_YX_OFFSET_MASK;
writel(reg, regs + EXYNOS3250_LUMA_XY_OFFSET);
reg = (y_offset << EXYNOS3250_CHROMA_YY_OFFSET_SHIFT) &
EXYNOS3250_CHROMA_YY_OFFSET_MASK;
reg |= (x_offset << EXYNOS3250_CHROMA_YX_OFFSET_SHIFT) &
EXYNOS3250_CHROMA_YX_OFFSET_MASK;
writel(reg, regs + EXYNOS3250_CHROMA_XY_OFFSET);
reg = (y_offset << EXYNOS3250_CHROMA_CR_YY_OFFSET_SHIFT) &
EXYNOS3250_CHROMA_CR_YY_OFFSET_MASK;
reg |= (x_offset << EXYNOS3250_CHROMA_CR_YX_OFFSET_SHIFT) &
EXYNOS3250_CHROMA_CR_YX_OFFSET_MASK;
writel(reg, regs + EXYNOS3250_CHROMA_CR_XY_OFFSET);
}
void exynos3250_jpeg_coef(void __iomem *base, unsigned int mode)
{
if (mode == S5P_JPEG_ENCODE) {
writel(EXYNOS3250_JPEG_ENC_COEF1,
base + EXYNOS3250_JPG_COEF(1));
writel(EXYNOS3250_JPEG_ENC_COEF2,
base + EXYNOS3250_JPG_COEF(2));
writel(EXYNOS3250_JPEG_ENC_COEF3,
base + EXYNOS3250_JPG_COEF(3));
} else {
writel(EXYNOS3250_JPEG_DEC_COEF1,
base + EXYNOS3250_JPG_COEF(1));
writel(EXYNOS3250_JPEG_DEC_COEF2,
base + EXYNOS3250_JPG_COEF(2));
writel(EXYNOS3250_JPEG_DEC_COEF3,
base + EXYNOS3250_JPG_COEF(3));
}
}
void exynos3250_jpeg_start(void __iomem *regs)
{
writel(1, regs + EXYNOS3250_JSTART);
}
void exynos3250_jpeg_rstart(void __iomem *regs)
{
writel(1, regs + EXYNOS3250_JRSTART);
}
unsigned int exynos3250_jpeg_get_int_status(void __iomem *regs)
{
return readl(regs + EXYNOS3250_JPGINTST);
}
void exynos3250_jpeg_clear_int_status(void __iomem *regs,
unsigned int value)
{
writel(value, regs + EXYNOS3250_JPGINTST);
}
unsigned int exynos3250_jpeg_operating(void __iomem *regs)
{
return readl(regs + S5P_JPGOPR) & EXYNOS3250_JPGOPR_MASK;
}
unsigned int exynos3250_jpeg_compressed_size(void __iomem *regs)
{
return readl(regs + EXYNOS3250_JPGCNT) & EXYNOS3250_JPGCNT_MASK;
}
void exynos3250_jpeg_dec_stream_size(void __iomem *regs,
unsigned int size)
{
writel(size & EXYNOS3250_DEC_STREAM_MASK,
regs + EXYNOS3250_DEC_STREAM_SIZE);
}
void exynos3250_jpeg_dec_scaling_ratio(void __iomem *regs,
unsigned int sratio)
{
switch (sratio) {
case 1:
default:
sratio = EXYNOS3250_DEC_SCALE_FACTOR_8_8;
break;
case 2:
sratio = EXYNOS3250_DEC_SCALE_FACTOR_4_8;
break;
case 4:
sratio = EXYNOS3250_DEC_SCALE_FACTOR_2_8;
break;
case 8:
sratio = EXYNOS3250_DEC_SCALE_FACTOR_1_8;
break;
}
writel(sratio & EXYNOS3250_DEC_SCALE_FACTOR_MASK,
regs + EXYNOS3250_DEC_SCALING_RATIO);
}
void exynos3250_jpeg_set_timer(void __iomem *regs, unsigned int time_value)
{
time_value &= EXYNOS3250_TIMER_INIT_MASK;
writel(EXYNOS3250_TIMER_INT_STAT | time_value,
regs + EXYNOS3250_TIMER_SE);
}
unsigned int exynos3250_jpeg_get_timer_status(void __iomem *regs)
{
return readl(regs + EXYNOS3250_TIMER_ST);
}
void exynos3250_jpeg_clear_timer_status(void __iomem *regs)
{
writel(EXYNOS3250_TIMER_INT_STAT, regs + EXYNOS3250_TIMER_ST);
}
| linux-master | drivers/media/platform/samsung/s5p-jpeg/jpeg-hw-exynos3250.c |
// SPDX-License-Identifier: GPL-2.0-only
/* linux/drivers/media/platform/samsung/s5p-jpeg/jpeg-hw.h
*
* Copyright (c) 2011 Samsung Electronics Co., Ltd.
* http://www.samsung.com
*
* Author: Andrzej Pietrasiewicz <[email protected]>
*/
#include <linux/io.h>
#include <linux/videodev2.h>
#include "jpeg-core.h"
#include "jpeg-regs.h"
#include "jpeg-hw-s5p.h"
void s5p_jpeg_reset(void __iomem *regs)
{
unsigned long reg;
writel(1, regs + S5P_JPG_SW_RESET);
reg = readl(regs + S5P_JPG_SW_RESET);
/* no other way but polling for when JPEG IP becomes operational */
while (reg != 0) {
cpu_relax();
reg = readl(regs + S5P_JPG_SW_RESET);
}
}
void s5p_jpeg_poweron(void __iomem *regs)
{
writel(S5P_POWER_ON, regs + S5P_JPGCLKCON);
}
void s5p_jpeg_input_raw_mode(void __iomem *regs, unsigned long mode)
{
unsigned long reg, m;
m = S5P_MOD_SEL_565;
if (mode == S5P_JPEG_RAW_IN_565)
m = S5P_MOD_SEL_565;
else if (mode == S5P_JPEG_RAW_IN_422)
m = S5P_MOD_SEL_422;
reg = readl(regs + S5P_JPGCMOD);
reg &= ~S5P_MOD_SEL_MASK;
reg |= m;
writel(reg, regs + S5P_JPGCMOD);
}
void s5p_jpeg_proc_mode(void __iomem *regs, unsigned long mode)
{
unsigned long reg, m;
if (mode == S5P_JPEG_ENCODE)
m = S5P_PROC_MODE_COMPR;
else
m = S5P_PROC_MODE_DECOMPR;
reg = readl(regs + S5P_JPGMOD);
reg &= ~S5P_PROC_MODE_MASK;
reg |= m;
writel(reg, regs + S5P_JPGMOD);
}
void s5p_jpeg_subsampling_mode(void __iomem *regs, unsigned int mode)
{
unsigned long reg, m;
if (mode == V4L2_JPEG_CHROMA_SUBSAMPLING_420)
m = S5P_SUBSAMPLING_MODE_420;
else
m = S5P_SUBSAMPLING_MODE_422;
reg = readl(regs + S5P_JPGMOD);
reg &= ~S5P_SUBSAMPLING_MODE_MASK;
reg |= m;
writel(reg, regs + S5P_JPGMOD);
}
unsigned int s5p_jpeg_get_subsampling_mode(void __iomem *regs)
{
return readl(regs + S5P_JPGMOD) & S5P_SUBSAMPLING_MODE_MASK;
}
void s5p_jpeg_dri(void __iomem *regs, unsigned int dri)
{
unsigned long reg;
reg = readl(regs + S5P_JPGDRI_U);
reg &= ~0xff;
reg |= (dri >> 8) & 0xff;
writel(reg, regs + S5P_JPGDRI_U);
reg = readl(regs + S5P_JPGDRI_L);
reg &= ~0xff;
reg |= dri & 0xff;
writel(reg, regs + S5P_JPGDRI_L);
}
void s5p_jpeg_qtbl(void __iomem *regs, unsigned int t, unsigned int n)
{
unsigned long reg;
reg = readl(regs + S5P_JPG_QTBL);
reg &= ~S5P_QT_NUMt_MASK(t);
reg |= (n << S5P_QT_NUMt_SHIFT(t)) & S5P_QT_NUMt_MASK(t);
writel(reg, regs + S5P_JPG_QTBL);
}
void s5p_jpeg_htbl_ac(void __iomem *regs, unsigned int t)
{
unsigned long reg;
reg = readl(regs + S5P_JPG_HTBL);
reg &= ~S5P_HT_NUMt_AC_MASK(t);
/* this driver uses table 0 for all color components */
reg |= (0 << S5P_HT_NUMt_AC_SHIFT(t)) & S5P_HT_NUMt_AC_MASK(t);
writel(reg, regs + S5P_JPG_HTBL);
}
void s5p_jpeg_htbl_dc(void __iomem *regs, unsigned int t)
{
unsigned long reg;
reg = readl(regs + S5P_JPG_HTBL);
reg &= ~S5P_HT_NUMt_DC_MASK(t);
/* this driver uses table 0 for all color components */
reg |= (0 << S5P_HT_NUMt_DC_SHIFT(t)) & S5P_HT_NUMt_DC_MASK(t);
writel(reg, regs + S5P_JPG_HTBL);
}
void s5p_jpeg_y(void __iomem *regs, unsigned int y)
{
unsigned long reg;
reg = readl(regs + S5P_JPGY_U);
reg &= ~0xff;
reg |= (y >> 8) & 0xff;
writel(reg, regs + S5P_JPGY_U);
reg = readl(regs + S5P_JPGY_L);
reg &= ~0xff;
reg |= y & 0xff;
writel(reg, regs + S5P_JPGY_L);
}
void s5p_jpeg_x(void __iomem *regs, unsigned int x)
{
unsigned long reg;
reg = readl(regs + S5P_JPGX_U);
reg &= ~0xff;
reg |= (x >> 8) & 0xff;
writel(reg, regs + S5P_JPGX_U);
reg = readl(regs + S5P_JPGX_L);
reg &= ~0xff;
reg |= x & 0xff;
writel(reg, regs + S5P_JPGX_L);
}
void s5p_jpeg_rst_int_enable(void __iomem *regs, bool enable)
{
unsigned long reg;
reg = readl(regs + S5P_JPGINTSE);
reg &= ~S5P_RSTm_INT_EN_MASK;
if (enable)
reg |= S5P_RSTm_INT_EN;
writel(reg, regs + S5P_JPGINTSE);
}
void s5p_jpeg_data_num_int_enable(void __iomem *regs, bool enable)
{
unsigned long reg;
reg = readl(regs + S5P_JPGINTSE);
reg &= ~S5P_DATA_NUM_INT_EN_MASK;
if (enable)
reg |= S5P_DATA_NUM_INT_EN;
writel(reg, regs + S5P_JPGINTSE);
}
void s5p_jpeg_final_mcu_num_int_enable(void __iomem *regs, bool enbl)
{
unsigned long reg;
reg = readl(regs + S5P_JPGINTSE);
reg &= ~S5P_FINAL_MCU_NUM_INT_EN_MASK;
if (enbl)
reg |= S5P_FINAL_MCU_NUM_INT_EN;
writel(reg, regs + S5P_JPGINTSE);
}
int s5p_jpeg_timer_stat(void __iomem *regs)
{
return (int)((readl(regs + S5P_JPG_TIMER_ST) & S5P_TIMER_INT_STAT_MASK)
>> S5P_TIMER_INT_STAT_SHIFT);
}
void s5p_jpeg_clear_timer_stat(void __iomem *regs)
{
unsigned long reg;
reg = readl(regs + S5P_JPG_TIMER_SE);
reg &= ~S5P_TIMER_INT_STAT_MASK;
writel(reg, regs + S5P_JPG_TIMER_SE);
}
void s5p_jpeg_enc_stream_int(void __iomem *regs, unsigned long size)
{
unsigned long reg;
reg = readl(regs + S5P_JPG_ENC_STREAM_INTSE);
reg &= ~S5P_ENC_STREAM_BOUND_MASK;
reg |= S5P_ENC_STREAM_INT_EN;
reg |= size & S5P_ENC_STREAM_BOUND_MASK;
writel(reg, regs + S5P_JPG_ENC_STREAM_INTSE);
}
int s5p_jpeg_enc_stream_stat(void __iomem *regs)
{
return (int)(readl(regs + S5P_JPG_ENC_STREAM_INTST) &
S5P_ENC_STREAM_INT_STAT_MASK);
}
void s5p_jpeg_clear_enc_stream_stat(void __iomem *regs)
{
unsigned long reg;
reg = readl(regs + S5P_JPG_ENC_STREAM_INTSE);
reg &= ~S5P_ENC_STREAM_INT_MASK;
writel(reg, regs + S5P_JPG_ENC_STREAM_INTSE);
}
void s5p_jpeg_outform_raw(void __iomem *regs, unsigned long format)
{
unsigned long reg, f;
f = S5P_DEC_OUT_FORMAT_422;
if (format == S5P_JPEG_RAW_OUT_422)
f = S5P_DEC_OUT_FORMAT_422;
else if (format == S5P_JPEG_RAW_OUT_420)
f = S5P_DEC_OUT_FORMAT_420;
reg = readl(regs + S5P_JPG_OUTFORM);
reg &= ~S5P_DEC_OUT_FORMAT_MASK;
reg |= f;
writel(reg, regs + S5P_JPG_OUTFORM);
}
void s5p_jpeg_jpgadr(void __iomem *regs, unsigned long addr)
{
writel(addr, regs + S5P_JPG_JPGADR);
}
void s5p_jpeg_imgadr(void __iomem *regs, unsigned long addr)
{
writel(addr, regs + S5P_JPG_IMGADR);
}
void s5p_jpeg_coef(void __iomem *regs, unsigned int i,
unsigned int j, unsigned int coef)
{
unsigned long reg;
reg = readl(regs + S5P_JPG_COEF(i));
reg &= ~S5P_COEFn_MASK(j);
reg |= (coef << S5P_COEFn_SHIFT(j)) & S5P_COEFn_MASK(j);
writel(reg, regs + S5P_JPG_COEF(i));
}
void s5p_jpeg_start(void __iomem *regs)
{
writel(1, regs + S5P_JSTART);
}
int s5p_jpeg_result_stat_ok(void __iomem *regs)
{
return (int)((readl(regs + S5P_JPGINTST) & S5P_RESULT_STAT_MASK)
>> S5P_RESULT_STAT_SHIFT);
}
int s5p_jpeg_stream_stat_ok(void __iomem *regs)
{
return !(int)((readl(regs + S5P_JPGINTST) & S5P_STREAM_STAT_MASK)
>> S5P_STREAM_STAT_SHIFT);
}
void s5p_jpeg_clear_int(void __iomem *regs)
{
readl(regs + S5P_JPGINTST);
writel(S5P_INT_RELEASE, regs + S5P_JPGCOM);
readl(regs + S5P_JPGOPR);
}
unsigned int s5p_jpeg_compressed_size(void __iomem *regs)
{
unsigned long jpeg_size = 0;
jpeg_size |= (readl(regs + S5P_JPGCNT_U) & 0xff) << 16;
jpeg_size |= (readl(regs + S5P_JPGCNT_M) & 0xff) << 8;
jpeg_size |= (readl(regs + S5P_JPGCNT_L) & 0xff);
return (unsigned int)jpeg_size;
}
| linux-master | drivers/media/platform/samsung/s5p-jpeg/jpeg-hw-s5p.c |
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (c) 2013 Samsung Electronics Co., Ltd.
* http://www.samsung.com/
*
* Author: Jacek Anaszewski <[email protected]>
*
* Register interface file for JPEG driver on Exynos4x12.
*/
#include <linux/io.h>
#include <linux/delay.h>
#include "jpeg-core.h"
#include "jpeg-hw-exynos4.h"
#include "jpeg-regs.h"
void exynos4_jpeg_sw_reset(void __iomem *base)
{
unsigned int reg;
reg = readl(base + EXYNOS4_JPEG_CNTL_REG);
writel(reg & ~(EXYNOS4_DEC_MODE | EXYNOS4_ENC_MODE),
base + EXYNOS4_JPEG_CNTL_REG);
reg = readl(base + EXYNOS4_JPEG_CNTL_REG);
writel(reg & ~EXYNOS4_SOFT_RESET_HI, base + EXYNOS4_JPEG_CNTL_REG);
udelay(100);
writel(reg | EXYNOS4_SOFT_RESET_HI, base + EXYNOS4_JPEG_CNTL_REG);
}
void exynos4_jpeg_set_enc_dec_mode(void __iomem *base, unsigned int mode)
{
unsigned int reg;
reg = readl(base + EXYNOS4_JPEG_CNTL_REG);
/* set exynos4_jpeg mod register */
if (mode == S5P_JPEG_DECODE) {
writel((reg & EXYNOS4_ENC_DEC_MODE_MASK) |
EXYNOS4_DEC_MODE,
base + EXYNOS4_JPEG_CNTL_REG);
} else if (mode == S5P_JPEG_ENCODE) {/* encode */
writel((reg & EXYNOS4_ENC_DEC_MODE_MASK) |
EXYNOS4_ENC_MODE,
base + EXYNOS4_JPEG_CNTL_REG);
} else { /* disable both */
writel(reg & EXYNOS4_ENC_DEC_MODE_MASK,
base + EXYNOS4_JPEG_CNTL_REG);
}
}
void __exynos4_jpeg_set_img_fmt(void __iomem *base, unsigned int img_fmt,
unsigned int version)
{
unsigned int reg;
unsigned int exynos4_swap_chroma_cbcr;
unsigned int exynos4_swap_chroma_crcb;
if (version == SJPEG_EXYNOS4) {
exynos4_swap_chroma_cbcr = EXYNOS4_SWAP_CHROMA_CBCR;
exynos4_swap_chroma_crcb = EXYNOS4_SWAP_CHROMA_CRCB;
} else {
exynos4_swap_chroma_cbcr = EXYNOS5433_SWAP_CHROMA_CBCR;
exynos4_swap_chroma_crcb = EXYNOS5433_SWAP_CHROMA_CRCB;
}
reg = readl(base + EXYNOS4_IMG_FMT_REG) &
EXYNOS4_ENC_IN_FMT_MASK; /* clear except enc format */
switch (img_fmt) {
case V4L2_PIX_FMT_GREY:
reg = reg | EXYNOS4_ENC_GRAY_IMG | EXYNOS4_GRAY_IMG_IP;
break;
case V4L2_PIX_FMT_RGB32:
reg = reg | EXYNOS4_ENC_RGB_IMG |
EXYNOS4_RGB_IP_RGB_32BIT_IMG;
break;
case V4L2_PIX_FMT_RGB565:
reg = reg | EXYNOS4_ENC_RGB_IMG |
EXYNOS4_RGB_IP_RGB_16BIT_IMG;
break;
case V4L2_PIX_FMT_NV24:
reg = reg | EXYNOS4_ENC_YUV_444_IMG |
EXYNOS4_YUV_444_IP_YUV_444_2P_IMG |
exynos4_swap_chroma_cbcr;
break;
case V4L2_PIX_FMT_NV42:
reg = reg | EXYNOS4_ENC_YUV_444_IMG |
EXYNOS4_YUV_444_IP_YUV_444_2P_IMG |
exynos4_swap_chroma_crcb;
break;
case V4L2_PIX_FMT_YUYV:
reg = reg | EXYNOS4_DEC_YUV_422_IMG |
EXYNOS4_YUV_422_IP_YUV_422_1P_IMG |
exynos4_swap_chroma_cbcr;
break;
case V4L2_PIX_FMT_YVYU:
reg = reg | EXYNOS4_DEC_YUV_422_IMG |
EXYNOS4_YUV_422_IP_YUV_422_1P_IMG |
exynos4_swap_chroma_crcb;
break;
case V4L2_PIX_FMT_NV16:
reg = reg | EXYNOS4_DEC_YUV_422_IMG |
EXYNOS4_YUV_422_IP_YUV_422_2P_IMG |
exynos4_swap_chroma_cbcr;
break;
case V4L2_PIX_FMT_NV61:
reg = reg | EXYNOS4_DEC_YUV_422_IMG |
EXYNOS4_YUV_422_IP_YUV_422_2P_IMG |
exynos4_swap_chroma_crcb;
break;
case V4L2_PIX_FMT_NV12:
reg = reg | EXYNOS4_DEC_YUV_420_IMG |
EXYNOS4_YUV_420_IP_YUV_420_2P_IMG |
exynos4_swap_chroma_cbcr;
break;
case V4L2_PIX_FMT_NV21:
reg = reg | EXYNOS4_DEC_YUV_420_IMG |
EXYNOS4_YUV_420_IP_YUV_420_2P_IMG |
exynos4_swap_chroma_crcb;
break;
case V4L2_PIX_FMT_YUV420:
reg = reg | EXYNOS4_DEC_YUV_420_IMG |
EXYNOS4_YUV_420_IP_YUV_420_3P_IMG |
exynos4_swap_chroma_cbcr;
break;
default:
break;
}
writel(reg, base + EXYNOS4_IMG_FMT_REG);
}
void __exynos4_jpeg_set_enc_out_fmt(void __iomem *base, unsigned int out_fmt,
unsigned int version)
{
unsigned int reg;
reg = readl(base + EXYNOS4_IMG_FMT_REG) &
~(version == SJPEG_EXYNOS4 ? EXYNOS4_ENC_FMT_MASK :
EXYNOS5433_ENC_FMT_MASK); /* clear enc format */
switch (out_fmt) {
case V4L2_JPEG_CHROMA_SUBSAMPLING_GRAY:
reg = reg | EXYNOS4_ENC_FMT_GRAY;
break;
case V4L2_JPEG_CHROMA_SUBSAMPLING_444:
reg = reg | EXYNOS4_ENC_FMT_YUV_444;
break;
case V4L2_JPEG_CHROMA_SUBSAMPLING_422:
reg = reg | EXYNOS4_ENC_FMT_YUV_422;
break;
case V4L2_JPEG_CHROMA_SUBSAMPLING_420:
reg = reg | EXYNOS4_ENC_FMT_YUV_420;
break;
default:
break;
}
writel(reg, base + EXYNOS4_IMG_FMT_REG);
}
void exynos4_jpeg_set_interrupt(void __iomem *base, unsigned int version)
{
unsigned int reg;
if (version == SJPEG_EXYNOS4) {
reg = readl(base + EXYNOS4_INT_EN_REG) & ~EXYNOS4_INT_EN_MASK;
writel(reg | EXYNOS4_INT_EN_ALL, base + EXYNOS4_INT_EN_REG);
} else {
reg = readl(base + EXYNOS4_INT_EN_REG) &
~EXYNOS5433_INT_EN_MASK;
writel(reg | EXYNOS5433_INT_EN_ALL, base + EXYNOS4_INT_EN_REG);
}
}
unsigned int exynos4_jpeg_get_int_status(void __iomem *base)
{
return readl(base + EXYNOS4_INT_STATUS_REG);
}
unsigned int exynos4_jpeg_get_fifo_status(void __iomem *base)
{
return readl(base + EXYNOS4_FIFO_STATUS_REG);
}
void exynos4_jpeg_set_huf_table_enable(void __iomem *base, int value)
{
unsigned int reg;
reg = readl(base + EXYNOS4_JPEG_CNTL_REG) & ~EXYNOS4_HUF_TBL_EN;
if (value == 1)
writel(reg | EXYNOS4_HUF_TBL_EN,
base + EXYNOS4_JPEG_CNTL_REG);
else
writel(reg & ~EXYNOS4_HUF_TBL_EN,
base + EXYNOS4_JPEG_CNTL_REG);
}
void exynos4_jpeg_set_sys_int_enable(void __iomem *base, int value)
{
unsigned int reg;
reg = readl(base + EXYNOS4_JPEG_CNTL_REG) & ~(EXYNOS4_SYS_INT_EN);
if (value == 1)
writel(reg | EXYNOS4_SYS_INT_EN, base + EXYNOS4_JPEG_CNTL_REG);
else
writel(reg & ~EXYNOS4_SYS_INT_EN, base + EXYNOS4_JPEG_CNTL_REG);
}
void exynos4_jpeg_set_stream_buf_address(void __iomem *base,
unsigned int address)
{
writel(address, base + EXYNOS4_OUT_MEM_BASE_REG);
}
void exynos4_jpeg_set_stream_size(void __iomem *base,
unsigned int x_value, unsigned int y_value)
{
writel(0x0, base + EXYNOS4_JPEG_IMG_SIZE_REG); /* clear */
writel(EXYNOS4_X_SIZE(x_value) | EXYNOS4_Y_SIZE(y_value),
base + EXYNOS4_JPEG_IMG_SIZE_REG);
}
void exynos4_jpeg_set_frame_buf_address(void __iomem *base,
struct s5p_jpeg_addr *exynos4_jpeg_addr)
{
writel(exynos4_jpeg_addr->y, base + EXYNOS4_IMG_BA_PLANE_1_REG);
writel(exynos4_jpeg_addr->cb, base + EXYNOS4_IMG_BA_PLANE_2_REG);
writel(exynos4_jpeg_addr->cr, base + EXYNOS4_IMG_BA_PLANE_3_REG);
}
void exynos4_jpeg_set_encode_tbl_select(void __iomem *base,
enum exynos4_jpeg_img_quality_level level)
{
unsigned int reg;
reg = EXYNOS4_Q_TBL_COMP1_0 | EXYNOS4_Q_TBL_COMP2_1 |
EXYNOS4_Q_TBL_COMP3_1 |
EXYNOS4_HUFF_TBL_COMP1_AC_0_DC_1 |
EXYNOS4_HUFF_TBL_COMP2_AC_0_DC_0 |
EXYNOS4_HUFF_TBL_COMP3_AC_1_DC_1;
writel(reg, base + EXYNOS4_TBL_SEL_REG);
}
void exynos4_jpeg_set_dec_components(void __iomem *base, int n)
{
unsigned int reg;
reg = readl(base + EXYNOS4_TBL_SEL_REG);
reg |= EXYNOS4_NF(n);
writel(reg, base + EXYNOS4_TBL_SEL_REG);
}
void exynos4_jpeg_select_dec_q_tbl(void __iomem *base, char c, char x)
{
unsigned int reg;
reg = readl(base + EXYNOS4_TBL_SEL_REG);
reg |= EXYNOS4_Q_TBL_COMP(c, x);
writel(reg, base + EXYNOS4_TBL_SEL_REG);
}
void exynos4_jpeg_select_dec_h_tbl(void __iomem *base, char c, char x)
{
unsigned int reg;
reg = readl(base + EXYNOS4_TBL_SEL_REG);
reg |= EXYNOS4_HUFF_TBL_COMP(c, x);
writel(reg, base + EXYNOS4_TBL_SEL_REG);
}
void exynos4_jpeg_set_encode_hoff_cnt(void __iomem *base, unsigned int fmt)
{
if (fmt == V4L2_PIX_FMT_GREY)
writel(0xd2, base + EXYNOS4_HUFF_CNT_REG);
else
writel(0x1a2, base + EXYNOS4_HUFF_CNT_REG);
}
unsigned int exynos4_jpeg_get_stream_size(void __iomem *base)
{
return readl(base + EXYNOS4_BITSTREAM_SIZE_REG);
}
void exynos4_jpeg_set_dec_bitstream_size(void __iomem *base, unsigned int size)
{
writel(size, base + EXYNOS4_BITSTREAM_SIZE_REG);
}
void exynos4_jpeg_get_frame_size(void __iomem *base,
unsigned int *width, unsigned int *height)
{
*width = (readl(base + EXYNOS4_DECODE_XY_SIZE_REG) &
EXYNOS4_DECODED_SIZE_MASK);
*height = (readl(base + EXYNOS4_DECODE_XY_SIZE_REG) >> 16) &
EXYNOS4_DECODED_SIZE_MASK;
}
unsigned int exynos4_jpeg_get_frame_fmt(void __iomem *base)
{
return readl(base + EXYNOS4_DECODE_IMG_FMT_REG) &
EXYNOS4_JPEG_DECODED_IMG_FMT_MASK;
}
void exynos4_jpeg_set_timer_count(void __iomem *base, unsigned int size)
{
writel(size, base + EXYNOS4_INT_TIMER_COUNT_REG);
}
| linux-master | drivers/media/platform/samsung/s5p-jpeg/jpeg-hw-exynos4.c |
// SPDX-License-Identifier: GPL-2.0-only
/* linux/drivers/media/platform/samsung/s5p-jpeg/jpeg-core.c
*
* Copyright (c) 2011-2014 Samsung Electronics Co., Ltd.
* http://www.samsung.com
*
* Author: Andrzej Pietrasiewicz <[email protected]>
* Author: Jacek Anaszewski <[email protected]>
*/
#include <linux/clk.h>
#include <linux/err.h>
#include <linux/gfp.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/string.h>
#include <media/v4l2-event.h>
#include <media/v4l2-mem2mem.h>
#include <media/v4l2-ioctl.h>
#include <media/v4l2-rect.h>
#include <media/videobuf2-v4l2.h>
#include <media/videobuf2-dma-contig.h>
#include "jpeg-core.h"
#include "jpeg-hw-s5p.h"
#include "jpeg-hw-exynos4.h"
#include "jpeg-hw-exynos3250.h"
#include "jpeg-regs.h"
static struct s5p_jpeg_fmt sjpeg_formats[] = {
{
.fourcc = V4L2_PIX_FMT_JPEG,
.flags = SJPEG_FMT_FLAG_ENC_CAPTURE |
SJPEG_FMT_FLAG_DEC_OUTPUT |
SJPEG_FMT_FLAG_S5P |
SJPEG_FMT_FLAG_EXYNOS3250 |
SJPEG_FMT_FLAG_EXYNOS4,
},
{
.fourcc = V4L2_PIX_FMT_YUYV,
.depth = 16,
.colplanes = 1,
.h_align = 4,
.v_align = 3,
.flags = SJPEG_FMT_FLAG_ENC_OUTPUT |
SJPEG_FMT_FLAG_DEC_CAPTURE |
SJPEG_FMT_FLAG_S5P |
SJPEG_FMT_NON_RGB,
.subsampling = V4L2_JPEG_CHROMA_SUBSAMPLING_422,
},
{
.fourcc = V4L2_PIX_FMT_YUYV,
.depth = 16,
.colplanes = 1,
.h_align = 1,
.v_align = 0,
.flags = SJPEG_FMT_FLAG_ENC_OUTPUT |
SJPEG_FMT_FLAG_DEC_CAPTURE |
SJPEG_FMT_FLAG_EXYNOS4 |
SJPEG_FMT_NON_RGB,
.subsampling = V4L2_JPEG_CHROMA_SUBSAMPLING_422,
},
{
.fourcc = V4L2_PIX_FMT_YUYV,
.depth = 16,
.colplanes = 1,
.h_align = 2,
.v_align = 0,
.flags = SJPEG_FMT_FLAG_ENC_OUTPUT |
SJPEG_FMT_FLAG_DEC_CAPTURE |
SJPEG_FMT_FLAG_EXYNOS3250 |
SJPEG_FMT_NON_RGB,
.subsampling = V4L2_JPEG_CHROMA_SUBSAMPLING_422,
},
{
.fourcc = V4L2_PIX_FMT_YVYU,
.depth = 16,
.colplanes = 1,
.h_align = 1,
.v_align = 0,
.flags = SJPEG_FMT_FLAG_ENC_OUTPUT |
SJPEG_FMT_FLAG_DEC_CAPTURE |
SJPEG_FMT_FLAG_EXYNOS4 |
SJPEG_FMT_NON_RGB,
.subsampling = V4L2_JPEG_CHROMA_SUBSAMPLING_422,
},
{
.fourcc = V4L2_PIX_FMT_YVYU,
.depth = 16,
.colplanes = 1,
.h_align = 2,
.v_align = 0,
.flags = SJPEG_FMT_FLAG_ENC_OUTPUT |
SJPEG_FMT_FLAG_DEC_CAPTURE |
SJPEG_FMT_FLAG_EXYNOS3250 |
SJPEG_FMT_NON_RGB,
.subsampling = V4L2_JPEG_CHROMA_SUBSAMPLING_422,
},
{
.fourcc = V4L2_PIX_FMT_UYVY,
.depth = 16,
.colplanes = 1,
.h_align = 2,
.v_align = 0,
.flags = SJPEG_FMT_FLAG_ENC_OUTPUT |
SJPEG_FMT_FLAG_DEC_CAPTURE |
SJPEG_FMT_FLAG_EXYNOS3250 |
SJPEG_FMT_NON_RGB,
.subsampling = V4L2_JPEG_CHROMA_SUBSAMPLING_422,
},
{
.fourcc = V4L2_PIX_FMT_VYUY,
.depth = 16,
.colplanes = 1,
.h_align = 2,
.v_align = 0,
.flags = SJPEG_FMT_FLAG_ENC_OUTPUT |
SJPEG_FMT_FLAG_DEC_CAPTURE |
SJPEG_FMT_FLAG_EXYNOS3250 |
SJPEG_FMT_NON_RGB,
.subsampling = V4L2_JPEG_CHROMA_SUBSAMPLING_422,
},
{
.fourcc = V4L2_PIX_FMT_RGB565,
.depth = 16,
.colplanes = 1,
.h_align = 0,
.v_align = 0,
.flags = SJPEG_FMT_FLAG_ENC_OUTPUT |
SJPEG_FMT_FLAG_DEC_CAPTURE |
SJPEG_FMT_FLAG_EXYNOS4 |
SJPEG_FMT_RGB,
.subsampling = V4L2_JPEG_CHROMA_SUBSAMPLING_444,
},
{
.fourcc = V4L2_PIX_FMT_RGB565,
.depth = 16,
.colplanes = 1,
.h_align = 2,
.v_align = 0,
.flags = SJPEG_FMT_FLAG_ENC_OUTPUT |
SJPEG_FMT_FLAG_DEC_CAPTURE |
SJPEG_FMT_FLAG_EXYNOS3250 |
SJPEG_FMT_RGB,
.subsampling = V4L2_JPEG_CHROMA_SUBSAMPLING_444,
},
{
.fourcc = V4L2_PIX_FMT_RGB565X,
.depth = 16,
.colplanes = 1,
.h_align = 2,
.v_align = 0,
.flags = SJPEG_FMT_FLAG_ENC_OUTPUT |
SJPEG_FMT_FLAG_DEC_CAPTURE |
SJPEG_FMT_FLAG_EXYNOS3250 |
SJPEG_FMT_RGB,
.subsampling = V4L2_JPEG_CHROMA_SUBSAMPLING_444,
},
{
.fourcc = V4L2_PIX_FMT_RGB565,
.depth = 16,
.colplanes = 1,
.h_align = 0,
.v_align = 0,
.flags = SJPEG_FMT_FLAG_ENC_OUTPUT |
SJPEG_FMT_FLAG_S5P |
SJPEG_FMT_RGB,
.subsampling = V4L2_JPEG_CHROMA_SUBSAMPLING_444,
},
{
.fourcc = V4L2_PIX_FMT_RGB32,
.depth = 32,
.colplanes = 1,
.h_align = 0,
.v_align = 0,
.flags = SJPEG_FMT_FLAG_ENC_OUTPUT |
SJPEG_FMT_FLAG_DEC_CAPTURE |
SJPEG_FMT_FLAG_EXYNOS4 |
SJPEG_FMT_RGB,
.subsampling = V4L2_JPEG_CHROMA_SUBSAMPLING_444,
},
{
.fourcc = V4L2_PIX_FMT_RGB32,
.depth = 32,
.colplanes = 1,
.h_align = 2,
.v_align = 0,
.flags = SJPEG_FMT_FLAG_ENC_OUTPUT |
SJPEG_FMT_FLAG_DEC_CAPTURE |
SJPEG_FMT_FLAG_EXYNOS3250 |
SJPEG_FMT_RGB,
.subsampling = V4L2_JPEG_CHROMA_SUBSAMPLING_444,
},
{
.fourcc = V4L2_PIX_FMT_NV24,
.depth = 24,
.colplanes = 2,
.h_align = 0,
.v_align = 0,
.flags = SJPEG_FMT_FLAG_ENC_OUTPUT |
SJPEG_FMT_FLAG_DEC_CAPTURE |
SJPEG_FMT_FLAG_EXYNOS4 |
SJPEG_FMT_NON_RGB,
.subsampling = V4L2_JPEG_CHROMA_SUBSAMPLING_444,
},
{
.fourcc = V4L2_PIX_FMT_NV42,
.depth = 24,
.colplanes = 2,
.h_align = 0,
.v_align = 0,
.flags = SJPEG_FMT_FLAG_ENC_OUTPUT |
SJPEG_FMT_FLAG_DEC_CAPTURE |
SJPEG_FMT_FLAG_EXYNOS4 |
SJPEG_FMT_NON_RGB,
.subsampling = V4L2_JPEG_CHROMA_SUBSAMPLING_444,
},
{
.fourcc = V4L2_PIX_FMT_NV61,
.depth = 16,
.colplanes = 2,
.h_align = 1,
.v_align = 0,
.flags = SJPEG_FMT_FLAG_ENC_OUTPUT |
SJPEG_FMT_FLAG_DEC_CAPTURE |
SJPEG_FMT_FLAG_EXYNOS4 |
SJPEG_FMT_NON_RGB,
.subsampling = V4L2_JPEG_CHROMA_SUBSAMPLING_422,
},
{
.fourcc = V4L2_PIX_FMT_NV16,
.depth = 16,
.colplanes = 2,
.h_align = 1,
.v_align = 0,
.flags = SJPEG_FMT_FLAG_ENC_OUTPUT |
SJPEG_FMT_FLAG_DEC_CAPTURE |
SJPEG_FMT_FLAG_EXYNOS4 |
SJPEG_FMT_NON_RGB,
.subsampling = V4L2_JPEG_CHROMA_SUBSAMPLING_422,
},
{
.fourcc = V4L2_PIX_FMT_NV12,
.depth = 12,
.colplanes = 2,
.h_align = 1,
.v_align = 1,
.flags = SJPEG_FMT_FLAG_ENC_OUTPUT |
SJPEG_FMT_FLAG_DEC_CAPTURE |
SJPEG_FMT_FLAG_EXYNOS4 |
SJPEG_FMT_NON_RGB,
.subsampling = V4L2_JPEG_CHROMA_SUBSAMPLING_420,
},
{
.fourcc = V4L2_PIX_FMT_NV12,
.depth = 12,
.colplanes = 2,
.h_align = 3,
.v_align = 3,
.flags = SJPEG_FMT_FLAG_ENC_OUTPUT |
SJPEG_FMT_FLAG_DEC_CAPTURE |
SJPEG_FMT_FLAG_EXYNOS3250 |
SJPEG_FMT_NON_RGB,
.subsampling = V4L2_JPEG_CHROMA_SUBSAMPLING_420,
},
{
.fourcc = V4L2_PIX_FMT_NV12,
.depth = 12,
.colplanes = 2,
.h_align = 4,
.v_align = 4,
.flags = SJPEG_FMT_FLAG_ENC_OUTPUT |
SJPEG_FMT_FLAG_DEC_CAPTURE |
SJPEG_FMT_FLAG_S5P |
SJPEG_FMT_NON_RGB,
.subsampling = V4L2_JPEG_CHROMA_SUBSAMPLING_420,
},
{
.fourcc = V4L2_PIX_FMT_NV21,
.depth = 12,
.colplanes = 2,
.h_align = 3,
.v_align = 3,
.flags = SJPEG_FMT_FLAG_ENC_OUTPUT |
SJPEG_FMT_FLAG_DEC_CAPTURE |
SJPEG_FMT_FLAG_EXYNOS3250 |
SJPEG_FMT_NON_RGB,
.subsampling = V4L2_JPEG_CHROMA_SUBSAMPLING_420,
},
{
.fourcc = V4L2_PIX_FMT_NV21,
.depth = 12,
.colplanes = 2,
.h_align = 1,
.v_align = 1,
.flags = SJPEG_FMT_FLAG_ENC_OUTPUT |
SJPEG_FMT_FLAG_DEC_CAPTURE |
SJPEG_FMT_FLAG_EXYNOS3250 |
SJPEG_FMT_FLAG_EXYNOS4 |
SJPEG_FMT_NON_RGB,
.subsampling = V4L2_JPEG_CHROMA_SUBSAMPLING_420,
},
{
.fourcc = V4L2_PIX_FMT_YUV420,
.depth = 12,
.colplanes = 3,
.h_align = 1,
.v_align = 1,
.flags = SJPEG_FMT_FLAG_ENC_OUTPUT |
SJPEG_FMT_FLAG_DEC_CAPTURE |
SJPEG_FMT_FLAG_EXYNOS4 |
SJPEG_FMT_NON_RGB,
.subsampling = V4L2_JPEG_CHROMA_SUBSAMPLING_420,
},
{
.fourcc = V4L2_PIX_FMT_YUV420,
.depth = 12,
.colplanes = 3,
.h_align = 4,
.v_align = 4,
.flags = SJPEG_FMT_FLAG_ENC_OUTPUT |
SJPEG_FMT_FLAG_DEC_CAPTURE |
SJPEG_FMT_FLAG_EXYNOS3250 |
SJPEG_FMT_NON_RGB,
.subsampling = V4L2_JPEG_CHROMA_SUBSAMPLING_420,
},
{
.fourcc = V4L2_PIX_FMT_GREY,
.depth = 8,
.colplanes = 1,
.flags = SJPEG_FMT_FLAG_ENC_OUTPUT |
SJPEG_FMT_FLAG_DEC_CAPTURE |
SJPEG_FMT_FLAG_EXYNOS4 |
SJPEG_FMT_NON_RGB,
.subsampling = V4L2_JPEG_CHROMA_SUBSAMPLING_GRAY,
},
};
#define SJPEG_NUM_FORMATS ARRAY_SIZE(sjpeg_formats)
static const unsigned char qtbl_luminance[4][64] = {
{/*level 0 - high compression quality */
20, 16, 25, 39, 50, 46, 62, 68,
16, 18, 23, 38, 38, 53, 65, 68,
25, 23, 31, 38, 53, 65, 68, 68,
39, 38, 38, 53, 65, 68, 68, 68,
50, 38, 53, 65, 68, 68, 68, 68,
46, 53, 65, 68, 68, 68, 68, 68,
62, 65, 68, 68, 68, 68, 68, 68,
68, 68, 68, 68, 68, 68, 68, 68
},
{/* level 1 */
16, 11, 11, 16, 23, 27, 31, 30,
11, 12, 12, 15, 20, 23, 23, 30,
11, 12, 13, 16, 23, 26, 35, 47,
16, 15, 16, 23, 26, 37, 47, 64,
23, 20, 23, 26, 39, 51, 64, 64,
27, 23, 26, 37, 51, 64, 64, 64,
31, 23, 35, 47, 64, 64, 64, 64,
30, 30, 47, 64, 64, 64, 64, 64
},
{/* level 2 */
12, 8, 8, 12, 17, 21, 24, 23,
8, 9, 9, 11, 15, 19, 18, 23,
8, 9, 10, 12, 19, 20, 27, 36,
12, 11, 12, 21, 20, 28, 36, 53,
17, 15, 19, 20, 30, 39, 51, 59,
21, 19, 20, 28, 39, 51, 59, 59,
24, 18, 27, 36, 51, 59, 59, 59,
23, 23, 36, 53, 59, 59, 59, 59
},
{/* level 3 - low compression quality */
8, 6, 6, 8, 12, 14, 16, 17,
6, 6, 6, 8, 10, 13, 12, 15,
6, 6, 7, 8, 13, 14, 18, 24,
8, 8, 8, 14, 13, 19, 24, 35,
12, 10, 13, 13, 20, 26, 34, 39,
14, 13, 14, 19, 26, 34, 39, 39,
16, 12, 18, 24, 34, 39, 39, 39,
17, 15, 24, 35, 39, 39, 39, 39
}
};
static const unsigned char qtbl_chrominance[4][64] = {
{/*level 0 - high compression quality */
21, 25, 32, 38, 54, 68, 68, 68,
25, 28, 24, 38, 54, 68, 68, 68,
32, 24, 32, 43, 66, 68, 68, 68,
38, 38, 43, 53, 68, 68, 68, 68,
54, 54, 66, 68, 68, 68, 68, 68,
68, 68, 68, 68, 68, 68, 68, 68,
68, 68, 68, 68, 68, 68, 68, 68,
68, 68, 68, 68, 68, 68, 68, 68
},
{/* level 1 */
17, 15, 17, 21, 20, 26, 38, 48,
15, 19, 18, 17, 20, 26, 35, 43,
17, 18, 20, 22, 26, 30, 46, 53,
21, 17, 22, 28, 30, 39, 53, 64,
20, 20, 26, 30, 39, 48, 64, 64,
26, 26, 30, 39, 48, 63, 64, 64,
38, 35, 46, 53, 64, 64, 64, 64,
48, 43, 53, 64, 64, 64, 64, 64
},
{/* level 2 */
13, 11, 13, 16, 20, 20, 29, 37,
11, 14, 14, 14, 16, 20, 26, 32,
13, 14, 15, 17, 20, 23, 35, 40,
16, 14, 17, 21, 23, 30, 40, 50,
20, 16, 20, 23, 30, 37, 50, 59,
20, 20, 23, 30, 37, 48, 59, 59,
29, 26, 35, 40, 50, 59, 59, 59,
37, 32, 40, 50, 59, 59, 59, 59
},
{/* level 3 - low compression quality */
9, 8, 9, 11, 14, 17, 19, 24,
8, 10, 9, 11, 14, 13, 17, 22,
9, 9, 13, 14, 13, 15, 23, 26,
11, 11, 14, 14, 15, 20, 26, 33,
14, 14, 13, 15, 20, 24, 33, 39,
17, 13, 15, 20, 24, 32, 39, 39,
19, 17, 23, 26, 33, 39, 39, 39,
24, 22, 26, 33, 39, 39, 39, 39
}
};
static const unsigned char hdctbl0[16] = {
0, 1, 5, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0
};
static const unsigned char hdctblg0[12] = {
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0xa, 0xb
};
static const unsigned char hactbl0[16] = {
0, 2, 1, 3, 3, 2, 4, 3, 5, 5, 4, 4, 0, 0, 1, 0x7d
};
static const unsigned char hactblg0[162] = {
0x01, 0x02, 0x03, 0x00, 0x04, 0x11, 0x05, 0x12,
0x21, 0x31, 0x41, 0x06, 0x13, 0x51, 0x61, 0x07,
0x22, 0x71, 0x14, 0x32, 0x81, 0x91, 0xa1, 0x08,
0x23, 0x42, 0xb1, 0xc1, 0x15, 0x52, 0xd1, 0xf0,
0x24, 0x33, 0x62, 0x72, 0x82, 0x09, 0x0a, 0x16,
0x17, 0x18, 0x19, 0x1a, 0x25, 0x26, 0x27, 0x28,
0x29, 0x2a, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39,
0x3a, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49,
0x4a, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58, 0x59,
0x5a, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69,
0x6a, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78, 0x79,
0x7a, 0x83, 0x84, 0x85, 0x86, 0x87, 0x88, 0x89,
0x8a, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, 0x98,
0x99, 0x9a, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7,
0xa8, 0xa9, 0xaa, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6,
0xb7, 0xb8, 0xb9, 0xba, 0xc2, 0xc3, 0xc4, 0xc5,
0xc6, 0xc7, 0xc8, 0xc9, 0xca, 0xd2, 0xd3, 0xd4,
0xd5, 0xd6, 0xd7, 0xd8, 0xd9, 0xda, 0xe1, 0xe2,
0xe3, 0xe4, 0xe5, 0xe6, 0xe7, 0xe8, 0xe9, 0xea,
0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, 0xf8,
0xf9, 0xfa
};
/*
* Fourcc downgrade schema lookup tables for 422 and 420
* chroma subsampling - fourcc on each position maps on the
* fourcc from the table fourcc_to_dwngrd_schema_id which allows
* to get the most suitable fourcc counterpart for the given
* downgraded subsampling property.
*/
static const u32 subs422_fourcc_dwngrd_schema[] = {
V4L2_PIX_FMT_NV16,
V4L2_PIX_FMT_NV61,
};
static const u32 subs420_fourcc_dwngrd_schema[] = {
V4L2_PIX_FMT_NV12,
V4L2_PIX_FMT_NV21,
V4L2_PIX_FMT_NV12,
V4L2_PIX_FMT_NV21,
V4L2_PIX_FMT_NV12,
V4L2_PIX_FMT_NV21,
V4L2_PIX_FMT_GREY,
V4L2_PIX_FMT_GREY,
V4L2_PIX_FMT_GREY,
V4L2_PIX_FMT_GREY,
};
/*
* Lookup table for translation of a fourcc to the position
* of its downgraded counterpart in the *fourcc_dwngrd_schema
* tables.
*/
static const u32 fourcc_to_dwngrd_schema_id[] = {
V4L2_PIX_FMT_NV24,
V4L2_PIX_FMT_NV42,
V4L2_PIX_FMT_NV16,
V4L2_PIX_FMT_NV61,
V4L2_PIX_FMT_YUYV,
V4L2_PIX_FMT_YVYU,
V4L2_PIX_FMT_NV12,
V4L2_PIX_FMT_NV21,
V4L2_PIX_FMT_YUV420,
V4L2_PIX_FMT_GREY,
};
static int s5p_jpeg_get_dwngrd_sch_id_by_fourcc(u32 fourcc)
{
int i;
for (i = 0; i < ARRAY_SIZE(fourcc_to_dwngrd_schema_id); ++i) {
if (fourcc_to_dwngrd_schema_id[i] == fourcc)
return i;
}
return -EINVAL;
}
static int s5p_jpeg_adjust_fourcc_to_subsampling(
enum v4l2_jpeg_chroma_subsampling subs,
u32 in_fourcc,
u32 *out_fourcc,
struct s5p_jpeg_ctx *ctx)
{
int dwngrd_sch_id;
if (ctx->subsampling != V4L2_JPEG_CHROMA_SUBSAMPLING_GRAY) {
dwngrd_sch_id =
s5p_jpeg_get_dwngrd_sch_id_by_fourcc(in_fourcc);
if (dwngrd_sch_id < 0)
return -EINVAL;
}
switch (ctx->subsampling) {
case V4L2_JPEG_CHROMA_SUBSAMPLING_GRAY:
*out_fourcc = V4L2_PIX_FMT_GREY;
break;
case V4L2_JPEG_CHROMA_SUBSAMPLING_420:
if (dwngrd_sch_id >
ARRAY_SIZE(subs420_fourcc_dwngrd_schema) - 1)
return -EINVAL;
*out_fourcc = subs420_fourcc_dwngrd_schema[dwngrd_sch_id];
break;
case V4L2_JPEG_CHROMA_SUBSAMPLING_422:
if (dwngrd_sch_id >
ARRAY_SIZE(subs422_fourcc_dwngrd_schema) - 1)
return -EINVAL;
*out_fourcc = subs422_fourcc_dwngrd_schema[dwngrd_sch_id];
break;
default:
*out_fourcc = V4L2_PIX_FMT_GREY;
break;
}
return 0;
}
static int exynos4x12_decoded_subsampling[] = {
V4L2_JPEG_CHROMA_SUBSAMPLING_GRAY,
V4L2_JPEG_CHROMA_SUBSAMPLING_444,
V4L2_JPEG_CHROMA_SUBSAMPLING_422,
V4L2_JPEG_CHROMA_SUBSAMPLING_420,
};
static int exynos3250_decoded_subsampling[] = {
V4L2_JPEG_CHROMA_SUBSAMPLING_444,
V4L2_JPEG_CHROMA_SUBSAMPLING_422,
V4L2_JPEG_CHROMA_SUBSAMPLING_420,
V4L2_JPEG_CHROMA_SUBSAMPLING_GRAY,
-1,
-1,
V4L2_JPEG_CHROMA_SUBSAMPLING_411,
};
static inline struct s5p_jpeg_ctx *ctrl_to_ctx(struct v4l2_ctrl *c)
{
return container_of(c->handler, struct s5p_jpeg_ctx, ctrl_handler);
}
static inline struct s5p_jpeg_ctx *fh_to_ctx(struct v4l2_fh *fh)
{
return container_of(fh, struct s5p_jpeg_ctx, fh);
}
static int s5p_jpeg_to_user_subsampling(struct s5p_jpeg_ctx *ctx)
{
switch (ctx->jpeg->variant->version) {
case SJPEG_S5P:
WARN_ON(ctx->subsampling > 3);
if (ctx->subsampling > 2)
return V4L2_JPEG_CHROMA_SUBSAMPLING_GRAY;
return ctx->subsampling;
case SJPEG_EXYNOS3250:
case SJPEG_EXYNOS5420:
WARN_ON(ctx->subsampling > 6);
if (ctx->subsampling > 3)
return V4L2_JPEG_CHROMA_SUBSAMPLING_411;
return exynos3250_decoded_subsampling[ctx->subsampling];
case SJPEG_EXYNOS4:
WARN_ON(ctx->subsampling > 3);
if (ctx->subsampling > 2)
return V4L2_JPEG_CHROMA_SUBSAMPLING_420;
return exynos4x12_decoded_subsampling[ctx->subsampling];
case SJPEG_EXYNOS5433:
return ctx->subsampling; /* parsed from header */
default:
WARN_ON(ctx->subsampling > 3);
return V4L2_JPEG_CHROMA_SUBSAMPLING_GRAY;
}
}
static inline void s5p_jpeg_set_qtbl(void __iomem *regs,
const unsigned char *qtbl,
unsigned long tab, int len)
{
int i;
for (i = 0; i < len; i++)
writel((unsigned int)qtbl[i], regs + tab + (i * 0x04));
}
static inline void s5p_jpeg_set_qtbl_lum(void __iomem *regs, int quality)
{
/* this driver fills quantisation table 0 with data for luma */
s5p_jpeg_set_qtbl(regs, qtbl_luminance[quality],
S5P_JPG_QTBL_CONTENT(0),
ARRAY_SIZE(qtbl_luminance[quality]));
}
static inline void s5p_jpeg_set_qtbl_chr(void __iomem *regs, int quality)
{
/* this driver fills quantisation table 1 with data for chroma */
s5p_jpeg_set_qtbl(regs, qtbl_chrominance[quality],
S5P_JPG_QTBL_CONTENT(1),
ARRAY_SIZE(qtbl_chrominance[quality]));
}
static inline void s5p_jpeg_set_htbl(void __iomem *regs,
const unsigned char *htbl,
unsigned long tab, int len)
{
int i;
for (i = 0; i < len; i++)
writel((unsigned int)htbl[i], regs + tab + (i * 0x04));
}
static inline void s5p_jpeg_set_hdctbl(void __iomem *regs)
{
/* this driver fills table 0 for this component */
s5p_jpeg_set_htbl(regs, hdctbl0, S5P_JPG_HDCTBL(0),
ARRAY_SIZE(hdctbl0));
}
static inline void s5p_jpeg_set_hdctblg(void __iomem *regs)
{
/* this driver fills table 0 for this component */
s5p_jpeg_set_htbl(regs, hdctblg0, S5P_JPG_HDCTBLG(0),
ARRAY_SIZE(hdctblg0));
}
static inline void s5p_jpeg_set_hactbl(void __iomem *regs)
{
/* this driver fills table 0 for this component */
s5p_jpeg_set_htbl(regs, hactbl0, S5P_JPG_HACTBL(0),
ARRAY_SIZE(hactbl0));
}
static inline void s5p_jpeg_set_hactblg(void __iomem *regs)
{
/* this driver fills table 0 for this component */
s5p_jpeg_set_htbl(regs, hactblg0, S5P_JPG_HACTBLG(0),
ARRAY_SIZE(hactblg0));
}
static inline void exynos4_jpeg_set_tbl(void __iomem *regs,
const unsigned char *tbl,
unsigned long tab, int len)
{
int i;
unsigned int dword;
for (i = 0; i < len; i += 4) {
dword = tbl[i] |
(tbl[i + 1] << 8) |
(tbl[i + 2] << 16) |
(tbl[i + 3] << 24);
writel(dword, regs + tab + i);
}
}
static inline void exynos4_jpeg_set_qtbl_lum(void __iomem *regs, int quality)
{
/* this driver fills quantisation table 0 with data for luma */
exynos4_jpeg_set_tbl(regs, qtbl_luminance[quality],
EXYNOS4_QTBL_CONTENT(0),
ARRAY_SIZE(qtbl_luminance[quality]));
}
static inline void exynos4_jpeg_set_qtbl_chr(void __iomem *regs, int quality)
{
/* this driver fills quantisation table 1 with data for chroma */
exynos4_jpeg_set_tbl(regs, qtbl_chrominance[quality],
EXYNOS4_QTBL_CONTENT(1),
ARRAY_SIZE(qtbl_chrominance[quality]));
}
static void exynos4_jpeg_set_huff_tbl(void __iomem *base)
{
exynos4_jpeg_set_tbl(base, hdctbl0, EXYNOS4_HUFF_TBL_HDCLL,
ARRAY_SIZE(hdctbl0));
exynos4_jpeg_set_tbl(base, hdctbl0, EXYNOS4_HUFF_TBL_HDCCL,
ARRAY_SIZE(hdctbl0));
exynos4_jpeg_set_tbl(base, hdctblg0, EXYNOS4_HUFF_TBL_HDCLV,
ARRAY_SIZE(hdctblg0));
exynos4_jpeg_set_tbl(base, hdctblg0, EXYNOS4_HUFF_TBL_HDCCV,
ARRAY_SIZE(hdctblg0));
exynos4_jpeg_set_tbl(base, hactbl0, EXYNOS4_HUFF_TBL_HACLL,
ARRAY_SIZE(hactbl0));
exynos4_jpeg_set_tbl(base, hactbl0, EXYNOS4_HUFF_TBL_HACCL,
ARRAY_SIZE(hactbl0));
exynos4_jpeg_set_tbl(base, hactblg0, EXYNOS4_HUFF_TBL_HACLV,
ARRAY_SIZE(hactblg0));
exynos4_jpeg_set_tbl(base, hactblg0, EXYNOS4_HUFF_TBL_HACCV,
ARRAY_SIZE(hactblg0));
}
static inline int __exynos4_huff_tbl(int class, int id, bool lenval)
{
/*
* class: 0 - DC, 1 - AC
* id: 0 - Y, 1 - Cb/Cr
*/
if (class) {
if (id)
return lenval ? EXYNOS4_HUFF_TBL_HACCL :
EXYNOS4_HUFF_TBL_HACCV;
return lenval ? EXYNOS4_HUFF_TBL_HACLL : EXYNOS4_HUFF_TBL_HACLV;
}
/* class == 0 */
if (id)
return lenval ? EXYNOS4_HUFF_TBL_HDCCL : EXYNOS4_HUFF_TBL_HDCCV;
return lenval ? EXYNOS4_HUFF_TBL_HDCLL : EXYNOS4_HUFF_TBL_HDCLV;
}
static inline int exynos4_huff_tbl_len(int class, int id)
{
return __exynos4_huff_tbl(class, id, true);
}
static inline int exynos4_huff_tbl_val(int class, int id)
{
return __exynos4_huff_tbl(class, id, false);
}
static int get_byte(struct s5p_jpeg_buffer *buf);
static int get_word_be(struct s5p_jpeg_buffer *buf, unsigned int *word);
static void skip(struct s5p_jpeg_buffer *buf, long len);
static void exynos4_jpeg_parse_decode_h_tbl(struct s5p_jpeg_ctx *ctx)
{
struct s5p_jpeg *jpeg = ctx->jpeg;
struct vb2_v4l2_buffer *vb = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
struct s5p_jpeg_buffer jpeg_buffer;
unsigned int word;
int c, x, components;
jpeg_buffer.size = 2; /* Ls */
jpeg_buffer.data =
(unsigned long)vb2_plane_vaddr(&vb->vb2_buf, 0) + ctx->out_q.sos + 2;
jpeg_buffer.curr = 0;
word = 0;
if (get_word_be(&jpeg_buffer, &word))
return;
jpeg_buffer.size = (long)word - 2;
jpeg_buffer.data += 2;
jpeg_buffer.curr = 0;
components = get_byte(&jpeg_buffer);
if (components == -1)
return;
while (components--) {
c = get_byte(&jpeg_buffer);
if (c == -1)
return;
x = get_byte(&jpeg_buffer);
if (x == -1)
return;
exynos4_jpeg_select_dec_h_tbl(jpeg->regs, c,
(((x >> 4) & 0x1) << 1) | (x & 0x1));
}
}
static void exynos4_jpeg_parse_huff_tbl(struct s5p_jpeg_ctx *ctx)
{
struct s5p_jpeg *jpeg = ctx->jpeg;
struct vb2_v4l2_buffer *vb = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
struct s5p_jpeg_buffer jpeg_buffer;
unsigned int word;
int c, i, n, j;
for (j = 0; j < ctx->out_q.dht.n; ++j) {
jpeg_buffer.size = ctx->out_q.dht.len[j];
jpeg_buffer.data = (unsigned long)vb2_plane_vaddr(&vb->vb2_buf, 0) +
ctx->out_q.dht.marker[j];
jpeg_buffer.curr = 0;
word = 0;
while (jpeg_buffer.curr < jpeg_buffer.size) {
char id, class;
c = get_byte(&jpeg_buffer);
if (c == -1)
return;
id = c & 0xf;
class = (c >> 4) & 0xf;
n = 0;
for (i = 0; i < 16; ++i) {
c = get_byte(&jpeg_buffer);
if (c == -1)
return;
word |= c << ((i % 4) * 8);
if ((i + 1) % 4 == 0) {
writel(word, jpeg->regs +
exynos4_huff_tbl_len(class, id) +
(i / 4) * 4);
word = 0;
}
n += c;
}
word = 0;
for (i = 0; i < n; ++i) {
c = get_byte(&jpeg_buffer);
if (c == -1)
return;
word |= c << ((i % 4) * 8);
if ((i + 1) % 4 == 0) {
writel(word, jpeg->regs +
exynos4_huff_tbl_val(class, id) +
(i / 4) * 4);
word = 0;
}
}
if (i % 4) {
writel(word, jpeg->regs +
exynos4_huff_tbl_val(class, id) + (i / 4) * 4);
}
word = 0;
}
}
}
static void exynos4_jpeg_parse_decode_q_tbl(struct s5p_jpeg_ctx *ctx)
{
struct s5p_jpeg *jpeg = ctx->jpeg;
struct vb2_v4l2_buffer *vb = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
struct s5p_jpeg_buffer jpeg_buffer;
int c, x, components;
jpeg_buffer.size = ctx->out_q.sof_len;
jpeg_buffer.data =
(unsigned long)vb2_plane_vaddr(&vb->vb2_buf, 0) + ctx->out_q.sof;
jpeg_buffer.curr = 0;
skip(&jpeg_buffer, 5); /* P, Y, X */
components = get_byte(&jpeg_buffer);
if (components == -1)
return;
exynos4_jpeg_set_dec_components(jpeg->regs, components);
while (components--) {
c = get_byte(&jpeg_buffer);
if (c == -1)
return;
skip(&jpeg_buffer, 1);
x = get_byte(&jpeg_buffer);
if (x == -1)
return;
exynos4_jpeg_select_dec_q_tbl(jpeg->regs, c, x);
}
}
static void exynos4_jpeg_parse_q_tbl(struct s5p_jpeg_ctx *ctx)
{
struct s5p_jpeg *jpeg = ctx->jpeg;
struct vb2_v4l2_buffer *vb = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
struct s5p_jpeg_buffer jpeg_buffer;
unsigned int word;
int c, i, j;
for (j = 0; j < ctx->out_q.dqt.n; ++j) {
jpeg_buffer.size = ctx->out_q.dqt.len[j];
jpeg_buffer.data = (unsigned long)vb2_plane_vaddr(&vb->vb2_buf, 0) +
ctx->out_q.dqt.marker[j];
jpeg_buffer.curr = 0;
word = 0;
while (jpeg_buffer.size - jpeg_buffer.curr >= 65) {
char id;
c = get_byte(&jpeg_buffer);
if (c == -1)
return;
id = c & 0xf;
/* nonzero means extended mode - not supported */
if ((c >> 4) & 0xf)
return;
for (i = 0; i < 64; ++i) {
c = get_byte(&jpeg_buffer);
if (c == -1)
return;
word |= c << ((i % 4) * 8);
if ((i + 1) % 4 == 0) {
writel(word, jpeg->regs +
EXYNOS4_QTBL_CONTENT(id) + (i / 4) * 4);
word = 0;
}
}
word = 0;
}
}
}
/*
* ============================================================================
* Device file operations
* ============================================================================
*/
static int queue_init(void *priv, struct vb2_queue *src_vq,
struct vb2_queue *dst_vq);
static struct s5p_jpeg_fmt *s5p_jpeg_find_format(struct s5p_jpeg_ctx *ctx,
__u32 pixelformat, unsigned int fmt_type);
static int s5p_jpeg_controls_create(struct s5p_jpeg_ctx *ctx);
static int s5p_jpeg_open(struct file *file)
{
struct s5p_jpeg *jpeg = video_drvdata(file);
struct video_device *vfd = video_devdata(file);
struct s5p_jpeg_ctx *ctx;
struct s5p_jpeg_fmt *out_fmt, *cap_fmt;
int ret = 0;
ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
if (!ctx)
return -ENOMEM;
if (mutex_lock_interruptible(&jpeg->lock)) {
ret = -ERESTARTSYS;
goto free;
}
v4l2_fh_init(&ctx->fh, vfd);
/* Use separate control handler per file handle */
ctx->fh.ctrl_handler = &ctx->ctrl_handler;
file->private_data = &ctx->fh;
v4l2_fh_add(&ctx->fh);
ctx->jpeg = jpeg;
if (vfd == jpeg->vfd_encoder) {
ctx->mode = S5P_JPEG_ENCODE;
out_fmt = s5p_jpeg_find_format(ctx, V4L2_PIX_FMT_RGB565,
FMT_TYPE_OUTPUT);
cap_fmt = s5p_jpeg_find_format(ctx, V4L2_PIX_FMT_JPEG,
FMT_TYPE_CAPTURE);
} else {
ctx->mode = S5P_JPEG_DECODE;
out_fmt = s5p_jpeg_find_format(ctx, V4L2_PIX_FMT_JPEG,
FMT_TYPE_OUTPUT);
cap_fmt = s5p_jpeg_find_format(ctx, V4L2_PIX_FMT_YUYV,
FMT_TYPE_CAPTURE);
ctx->scale_factor = EXYNOS3250_DEC_SCALE_FACTOR_8_8;
}
ctx->fh.m2m_ctx = v4l2_m2m_ctx_init(jpeg->m2m_dev, ctx, queue_init);
if (IS_ERR(ctx->fh.m2m_ctx)) {
ret = PTR_ERR(ctx->fh.m2m_ctx);
goto error;
}
ctx->out_q.fmt = out_fmt;
ctx->cap_q.fmt = cap_fmt;
ret = s5p_jpeg_controls_create(ctx);
if (ret < 0)
goto error;
mutex_unlock(&jpeg->lock);
return 0;
error:
v4l2_fh_del(&ctx->fh);
v4l2_fh_exit(&ctx->fh);
mutex_unlock(&jpeg->lock);
free:
kfree(ctx);
return ret;
}
static int s5p_jpeg_release(struct file *file)
{
struct s5p_jpeg *jpeg = video_drvdata(file);
struct s5p_jpeg_ctx *ctx = fh_to_ctx(file->private_data);
mutex_lock(&jpeg->lock);
v4l2_m2m_ctx_release(ctx->fh.m2m_ctx);
v4l2_ctrl_handler_free(&ctx->ctrl_handler);
v4l2_fh_del(&ctx->fh);
v4l2_fh_exit(&ctx->fh);
kfree(ctx);
mutex_unlock(&jpeg->lock);
return 0;
}
static const struct v4l2_file_operations s5p_jpeg_fops = {
.owner = THIS_MODULE,
.open = s5p_jpeg_open,
.release = s5p_jpeg_release,
.poll = v4l2_m2m_fop_poll,
.unlocked_ioctl = video_ioctl2,
.mmap = v4l2_m2m_fop_mmap,
};
/*
* ============================================================================
* video ioctl operations
* ============================================================================
*/
static int get_byte(struct s5p_jpeg_buffer *buf)
{
if (buf->curr >= buf->size)
return -1;
return ((unsigned char *)buf->data)[buf->curr++];
}
static int get_word_be(struct s5p_jpeg_buffer *buf, unsigned int *word)
{
unsigned int temp;
int byte;
byte = get_byte(buf);
if (byte == -1)
return -1;
temp = byte << 8;
byte = get_byte(buf);
if (byte == -1)
return -1;
*word = (unsigned int)byte | temp;
return 0;
}
static void skip(struct s5p_jpeg_buffer *buf, long len)
{
if (len <= 0)
return;
while (len--)
get_byte(buf);
}
static bool s5p_jpeg_subsampling_decode(struct s5p_jpeg_ctx *ctx,
unsigned int subsampling)
{
unsigned int version;
switch (subsampling) {
case 0x11:
ctx->subsampling = V4L2_JPEG_CHROMA_SUBSAMPLING_444;
break;
case 0x21:
ctx->subsampling = V4L2_JPEG_CHROMA_SUBSAMPLING_422;
break;
case 0x22:
ctx->subsampling = V4L2_JPEG_CHROMA_SUBSAMPLING_420;
break;
case 0x33:
ctx->subsampling = V4L2_JPEG_CHROMA_SUBSAMPLING_GRAY;
break;
case 0x41:
/*
* 4:1:1 subsampling only supported by 3250, 5420, and 5433
* variants
*/
version = ctx->jpeg->variant->version;
if (version != SJPEG_EXYNOS3250 &&
version != SJPEG_EXYNOS5420 &&
version != SJPEG_EXYNOS5433)
return false;
ctx->subsampling = V4L2_JPEG_CHROMA_SUBSAMPLING_411;
break;
default:
return false;
}
return true;
}
static bool s5p_jpeg_parse_hdr(struct s5p_jpeg_q_data *result,
unsigned long buffer, unsigned long size,
struct s5p_jpeg_ctx *ctx)
{
int c, components = 0, notfound, n_dht = 0, n_dqt = 0;
unsigned int height = 0, width = 0, word, subsampling = 0;
unsigned int sos = 0, sof = 0, sof_len = 0;
unsigned int dht[S5P_JPEG_MAX_MARKER], dht_len[S5P_JPEG_MAX_MARKER];
unsigned int dqt[S5P_JPEG_MAX_MARKER], dqt_len[S5P_JPEG_MAX_MARKER];
long length;
struct s5p_jpeg_buffer jpeg_buffer;
jpeg_buffer.size = size;
jpeg_buffer.data = buffer;
jpeg_buffer.curr = 0;
notfound = 1;
while (notfound || !sos) {
c = get_byte(&jpeg_buffer);
if (c == -1)
return false;
if (c != 0xff)
continue;
do
c = get_byte(&jpeg_buffer);
while (c == 0xff);
if (c == -1)
return false;
if (c == 0)
continue;
length = 0;
switch (c) {
/* JPEG_MARKER_SOF0: baseline JPEG */
case JPEG_MARKER_SOF0:
if (get_word_be(&jpeg_buffer, &word))
break;
length = (long)word - 2;
if (!length)
return false;
sof = jpeg_buffer.curr; /* after 0xffc0 */
sof_len = length;
if (get_byte(&jpeg_buffer) == -1)
break;
if (get_word_be(&jpeg_buffer, &height))
break;
if (get_word_be(&jpeg_buffer, &width))
break;
components = get_byte(&jpeg_buffer);
if (components == -1)
break;
if (components == 1) {
subsampling = 0x33;
} else {
skip(&jpeg_buffer, 1);
subsampling = get_byte(&jpeg_buffer);
skip(&jpeg_buffer, 1);
}
if (components > 3)
return false;
skip(&jpeg_buffer, components * 2);
notfound = 0;
break;
case JPEG_MARKER_DQT:
if (get_word_be(&jpeg_buffer, &word))
break;
length = (long)word - 2;
if (!length)
return false;
if (n_dqt >= S5P_JPEG_MAX_MARKER)
return false;
dqt[n_dqt] = jpeg_buffer.curr; /* after 0xffdb */
dqt_len[n_dqt++] = length;
skip(&jpeg_buffer, length);
break;
case JPEG_MARKER_DHT:
if (get_word_be(&jpeg_buffer, &word))
break;
length = (long)word - 2;
if (!length)
return false;
if (n_dht >= S5P_JPEG_MAX_MARKER)
return false;
dht[n_dht] = jpeg_buffer.curr; /* after 0xffc4 */
dht_len[n_dht++] = length;
skip(&jpeg_buffer, length);
break;
case JPEG_MARKER_SOS:
sos = jpeg_buffer.curr - 2; /* 0xffda */
break;
/* skip payload-less markers */
case JPEG_MARKER_RST ... JPEG_MARKER_RST + 7:
case JPEG_MARKER_SOI:
case JPEG_MARKER_EOI:
case JPEG_MARKER_TEM:
break;
/* skip uninteresting payload markers */
default:
if (get_word_be(&jpeg_buffer, &word))
break;
length = (long)word - 2;
skip(&jpeg_buffer, length);
break;
}
}
if (notfound || !sos || !s5p_jpeg_subsampling_decode(ctx, subsampling))
return false;
result->w = width;
result->h = height;
result->sos = sos;
result->dht.n = n_dht;
while (n_dht--) {
result->dht.marker[n_dht] = dht[n_dht];
result->dht.len[n_dht] = dht_len[n_dht];
}
result->dqt.n = n_dqt;
while (n_dqt--) {
result->dqt.marker[n_dqt] = dqt[n_dqt];
result->dqt.len[n_dqt] = dqt_len[n_dqt];
}
result->sof = sof;
result->sof_len = sof_len;
return true;
}
static int s5p_jpeg_querycap(struct file *file, void *priv,
struct v4l2_capability *cap)
{
struct s5p_jpeg_ctx *ctx = fh_to_ctx(priv);
if (ctx->mode == S5P_JPEG_ENCODE) {
strscpy(cap->driver, S5P_JPEG_M2M_NAME,
sizeof(cap->driver));
strscpy(cap->card, S5P_JPEG_M2M_NAME " encoder",
sizeof(cap->card));
} else {
strscpy(cap->driver, S5P_JPEG_M2M_NAME,
sizeof(cap->driver));
strscpy(cap->card, S5P_JPEG_M2M_NAME " decoder",
sizeof(cap->card));
}
return 0;
}
static int enum_fmt(struct s5p_jpeg_ctx *ctx,
struct s5p_jpeg_fmt *sjpeg_formats, int n,
struct v4l2_fmtdesc *f, u32 type)
{
int i, num = 0;
unsigned int fmt_ver_flag = ctx->jpeg->variant->fmt_ver_flag;
for (i = 0; i < n; ++i) {
if (sjpeg_formats[i].flags & type &&
sjpeg_formats[i].flags & fmt_ver_flag) {
/* index-th format of type type found ? */
if (num == f->index)
break;
/* Correct type but haven't reached our index yet,
* just increment per-type index
*/
++num;
}
}
/* Format not found */
if (i >= n)
return -EINVAL;
f->pixelformat = sjpeg_formats[i].fourcc;
return 0;
}
static int s5p_jpeg_enum_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_fmtdesc *f)
{
struct s5p_jpeg_ctx *ctx = fh_to_ctx(priv);
if (ctx->mode == S5P_JPEG_ENCODE)
return enum_fmt(ctx, sjpeg_formats, SJPEG_NUM_FORMATS, f,
SJPEG_FMT_FLAG_ENC_CAPTURE);
return enum_fmt(ctx, sjpeg_formats, SJPEG_NUM_FORMATS, f,
SJPEG_FMT_FLAG_DEC_CAPTURE);
}
static int s5p_jpeg_enum_fmt_vid_out(struct file *file, void *priv,
struct v4l2_fmtdesc *f)
{
struct s5p_jpeg_ctx *ctx = fh_to_ctx(priv);
if (ctx->mode == S5P_JPEG_ENCODE)
return enum_fmt(ctx, sjpeg_formats, SJPEG_NUM_FORMATS, f,
SJPEG_FMT_FLAG_ENC_OUTPUT);
return enum_fmt(ctx, sjpeg_formats, SJPEG_NUM_FORMATS, f,
SJPEG_FMT_FLAG_DEC_OUTPUT);
}
static struct s5p_jpeg_q_data *get_q_data(struct s5p_jpeg_ctx *ctx,
enum v4l2_buf_type type)
{
if (type == V4L2_BUF_TYPE_VIDEO_OUTPUT)
return &ctx->out_q;
if (type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
return &ctx->cap_q;
return NULL;
}
static int s5p_jpeg_g_fmt(struct file *file, void *priv, struct v4l2_format *f)
{
struct vb2_queue *vq;
struct s5p_jpeg_q_data *q_data = NULL;
struct v4l2_pix_format *pix = &f->fmt.pix;
struct s5p_jpeg_ctx *ct = fh_to_ctx(priv);
vq = v4l2_m2m_get_vq(ct->fh.m2m_ctx, f->type);
if (!vq)
return -EINVAL;
if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE &&
ct->mode == S5P_JPEG_DECODE && !ct->hdr_parsed)
return -EINVAL;
q_data = get_q_data(ct, f->type);
BUG_ON(q_data == NULL);
pix->width = q_data->w;
pix->height = q_data->h;
pix->field = V4L2_FIELD_NONE;
pix->pixelformat = q_data->fmt->fourcc;
pix->bytesperline = 0;
if (q_data->fmt->fourcc != V4L2_PIX_FMT_JPEG) {
u32 bpl = q_data->w;
if (q_data->fmt->colplanes == 1)
bpl = (bpl * q_data->fmt->depth) >> 3;
pix->bytesperline = bpl;
}
pix->sizeimage = q_data->size;
return 0;
}
static struct s5p_jpeg_fmt *s5p_jpeg_find_format(struct s5p_jpeg_ctx *ctx,
u32 pixelformat, unsigned int fmt_type)
{
unsigned int k, fmt_flag;
if (ctx->mode == S5P_JPEG_ENCODE)
fmt_flag = (fmt_type == FMT_TYPE_OUTPUT) ?
SJPEG_FMT_FLAG_ENC_OUTPUT :
SJPEG_FMT_FLAG_ENC_CAPTURE;
else
fmt_flag = (fmt_type == FMT_TYPE_OUTPUT) ?
SJPEG_FMT_FLAG_DEC_OUTPUT :
SJPEG_FMT_FLAG_DEC_CAPTURE;
for (k = 0; k < ARRAY_SIZE(sjpeg_formats); k++) {
struct s5p_jpeg_fmt *fmt = &sjpeg_formats[k];
if (fmt->fourcc == pixelformat &&
fmt->flags & fmt_flag &&
fmt->flags & ctx->jpeg->variant->fmt_ver_flag) {
return fmt;
}
}
return NULL;
}
static void jpeg_bound_align_image(struct s5p_jpeg_ctx *ctx,
u32 *w, unsigned int wmin, unsigned int wmax,
unsigned int walign,
u32 *h, unsigned int hmin, unsigned int hmax,
unsigned int halign)
{
int width, height, w_step, h_step;
width = *w;
height = *h;
w_step = 1 << walign;
h_step = 1 << halign;
if (ctx->jpeg->variant->hw3250_compat) {
/*
* Rightmost and bottommost pixels are cropped by the
* Exynos3250/compatible JPEG IP for RGB formats, for the
* specific width and height values respectively. This
* assignment will result in v4l_bound_align_image returning
* dimensions reduced by 1 for the aforementioned cases.
*/
if (w_step == 4 && ((width & 3) == 1)) {
wmax = width;
hmax = height;
}
}
v4l_bound_align_image(w, wmin, wmax, walign, h, hmin, hmax, halign, 0);
if (*w < width && (*w + w_step) < wmax)
*w += w_step;
if (*h < height && (*h + h_step) < hmax)
*h += h_step;
}
static int vidioc_try_fmt(struct v4l2_format *f, struct s5p_jpeg_fmt *fmt,
struct s5p_jpeg_ctx *ctx, int q_type)
{
struct v4l2_pix_format *pix = &f->fmt.pix;
if (pix->field == V4L2_FIELD_ANY)
pix->field = V4L2_FIELD_NONE;
else if (pix->field != V4L2_FIELD_NONE)
return -EINVAL;
/* V4L2 specification suggests the driver corrects the format struct
* if any of the dimensions is unsupported
*/
if (q_type == FMT_TYPE_OUTPUT)
jpeg_bound_align_image(ctx, &pix->width, S5P_JPEG_MIN_WIDTH,
S5P_JPEG_MAX_WIDTH, 0,
&pix->height, S5P_JPEG_MIN_HEIGHT,
S5P_JPEG_MAX_HEIGHT, 0);
else
jpeg_bound_align_image(ctx, &pix->width, S5P_JPEG_MIN_WIDTH,
S5P_JPEG_MAX_WIDTH, fmt->h_align,
&pix->height, S5P_JPEG_MIN_HEIGHT,
S5P_JPEG_MAX_HEIGHT, fmt->v_align);
if (fmt->fourcc == V4L2_PIX_FMT_JPEG) {
if (pix->sizeimage <= 0)
pix->sizeimage = PAGE_SIZE;
pix->bytesperline = 0;
} else {
u32 bpl = pix->bytesperline;
if (fmt->colplanes > 1 && bpl < pix->width)
bpl = pix->width; /* planar */
if (fmt->colplanes == 1 && /* packed */
(bpl << 3) / fmt->depth < pix->width)
bpl = (pix->width * fmt->depth) >> 3;
pix->bytesperline = bpl;
pix->sizeimage = (pix->width * pix->height * fmt->depth) >> 3;
}
return 0;
}
static int s5p_jpeg_try_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_format *f)
{
struct s5p_jpeg_ctx *ctx = fh_to_ctx(priv);
struct v4l2_pix_format *pix = &f->fmt.pix;
struct s5p_jpeg_fmt *fmt;
int ret;
fmt = s5p_jpeg_find_format(ctx, f->fmt.pix.pixelformat,
FMT_TYPE_CAPTURE);
if (!fmt) {
v4l2_err(&ctx->jpeg->v4l2_dev,
"Fourcc format (0x%08x) invalid.\n",
f->fmt.pix.pixelformat);
return -EINVAL;
}
if (!ctx->jpeg->variant->hw_ex4_compat || ctx->mode != S5P_JPEG_DECODE)
goto exit;
/*
* The exynos4x12 device requires resulting YUV image
* subsampling not to be lower than the input jpeg subsampling.
* If this requirement is not met then downgrade the requested
* capture format to the one with subsampling equal to the input jpeg.
*/
if ((fmt->flags & SJPEG_FMT_NON_RGB) &&
(fmt->subsampling < ctx->subsampling)) {
ret = s5p_jpeg_adjust_fourcc_to_subsampling(ctx->subsampling,
fmt->fourcc,
&pix->pixelformat,
ctx);
if (ret < 0)
pix->pixelformat = V4L2_PIX_FMT_GREY;
fmt = s5p_jpeg_find_format(ctx, pix->pixelformat,
FMT_TYPE_CAPTURE);
}
/*
* Decompression of a JPEG file with 4:2:0 subsampling and odd
* width to the YUV 4:2:0 compliant formats produces a raw image
* with broken luma component. Adjust capture format to RGB565
* in such a case.
*/
if (ctx->subsampling == V4L2_JPEG_CHROMA_SUBSAMPLING_420 &&
(ctx->out_q.w & 1) &&
(pix->pixelformat == V4L2_PIX_FMT_NV12 ||
pix->pixelformat == V4L2_PIX_FMT_NV21 ||
pix->pixelformat == V4L2_PIX_FMT_YUV420)) {
pix->pixelformat = V4L2_PIX_FMT_RGB565;
fmt = s5p_jpeg_find_format(ctx, pix->pixelformat,
FMT_TYPE_CAPTURE);
}
exit:
return vidioc_try_fmt(f, fmt, ctx, FMT_TYPE_CAPTURE);
}
static int s5p_jpeg_try_fmt_vid_out(struct file *file, void *priv,
struct v4l2_format *f)
{
struct s5p_jpeg_ctx *ctx = fh_to_ctx(priv);
struct s5p_jpeg_fmt *fmt;
fmt = s5p_jpeg_find_format(ctx, f->fmt.pix.pixelformat,
FMT_TYPE_OUTPUT);
if (!fmt) {
v4l2_err(&ctx->jpeg->v4l2_dev,
"Fourcc format (0x%08x) invalid.\n",
f->fmt.pix.pixelformat);
return -EINVAL;
}
return vidioc_try_fmt(f, fmt, ctx, FMT_TYPE_OUTPUT);
}
static int exynos4_jpeg_get_output_buffer_size(struct s5p_jpeg_ctx *ctx,
struct v4l2_format *f,
int fmt_depth)
{
struct v4l2_pix_format *pix = &f->fmt.pix;
u32 pix_fmt = f->fmt.pix.pixelformat;
int w = pix->width, h = pix->height, wh_align;
int padding = 0;
if (pix_fmt == V4L2_PIX_FMT_RGB32 ||
pix_fmt == V4L2_PIX_FMT_RGB565 ||
pix_fmt == V4L2_PIX_FMT_NV24 ||
pix_fmt == V4L2_PIX_FMT_NV42 ||
pix_fmt == V4L2_PIX_FMT_NV12 ||
pix_fmt == V4L2_PIX_FMT_NV21 ||
pix_fmt == V4L2_PIX_FMT_YUV420)
wh_align = 4;
else
wh_align = 1;
jpeg_bound_align_image(ctx, &w, S5P_JPEG_MIN_WIDTH,
S5P_JPEG_MAX_WIDTH, wh_align,
&h, S5P_JPEG_MIN_HEIGHT,
S5P_JPEG_MAX_HEIGHT, wh_align);
if (ctx->jpeg->variant->version == SJPEG_EXYNOS4)
padding = PAGE_SIZE;
return (w * h * fmt_depth >> 3) + padding;
}
static int exynos3250_jpeg_try_downscale(struct s5p_jpeg_ctx *ctx,
struct v4l2_rect *r);
static int s5p_jpeg_s_fmt(struct s5p_jpeg_ctx *ct, struct v4l2_format *f)
{
struct vb2_queue *vq;
struct s5p_jpeg_q_data *q_data = NULL;
struct v4l2_pix_format *pix = &f->fmt.pix;
struct v4l2_ctrl *ctrl_subs;
struct v4l2_rect scale_rect;
unsigned int f_type;
vq = v4l2_m2m_get_vq(ct->fh.m2m_ctx, f->type);
if (!vq)
return -EINVAL;
q_data = get_q_data(ct, f->type);
BUG_ON(q_data == NULL);
if (vb2_is_busy(vq)) {
v4l2_err(&ct->jpeg->v4l2_dev, "%s queue busy\n", __func__);
return -EBUSY;
}
f_type = V4L2_TYPE_IS_OUTPUT(f->type) ?
FMT_TYPE_OUTPUT : FMT_TYPE_CAPTURE;
q_data->fmt = s5p_jpeg_find_format(ct, pix->pixelformat, f_type);
if (ct->mode == S5P_JPEG_ENCODE ||
(ct->mode == S5P_JPEG_DECODE &&
q_data->fmt->fourcc != V4L2_PIX_FMT_JPEG)) {
q_data->w = pix->width;
q_data->h = pix->height;
}
if (q_data->fmt->fourcc != V4L2_PIX_FMT_JPEG) {
/*
* During encoding Exynos4x12 SoCs access wider memory area
* than it results from Image_x and Image_y values written to
* the JPEG_IMAGE_SIZE register. In order to avoid sysmmu
* page fault calculate proper buffer size in such a case.
*/
if (ct->jpeg->variant->hw_ex4_compat &&
f_type == FMT_TYPE_OUTPUT && ct->mode == S5P_JPEG_ENCODE)
q_data->size = exynos4_jpeg_get_output_buffer_size(ct,
f,
q_data->fmt->depth);
else
q_data->size = q_data->w * q_data->h *
q_data->fmt->depth >> 3;
} else {
q_data->size = pix->sizeimage;
}
if (f_type == FMT_TYPE_OUTPUT) {
ctrl_subs = v4l2_ctrl_find(&ct->ctrl_handler,
V4L2_CID_JPEG_CHROMA_SUBSAMPLING);
if (ctrl_subs)
v4l2_ctrl_s_ctrl(ctrl_subs, q_data->fmt->subsampling);
ct->crop_altered = false;
}
/*
* For decoding init crop_rect with capture buffer dimmensions which
* contain aligned dimensions of the input JPEG image and do it only
* if crop rectangle hasn't been altered by the user space e.g. with
* S_SELECTION ioctl. For encoding assign output buffer dimensions.
*/
if (!ct->crop_altered &&
((ct->mode == S5P_JPEG_DECODE && f_type == FMT_TYPE_CAPTURE) ||
(ct->mode == S5P_JPEG_ENCODE && f_type == FMT_TYPE_OUTPUT))) {
ct->crop_rect.width = pix->width;
ct->crop_rect.height = pix->height;
}
/*
* Prevent downscaling to YUV420 format by more than 2
* for Exynos3250/compatible SoC as it produces broken raw image
* in such cases.
*/
if (ct->mode == S5P_JPEG_DECODE &&
f_type == FMT_TYPE_CAPTURE &&
ct->jpeg->variant->hw3250_compat &&
pix->pixelformat == V4L2_PIX_FMT_YUV420 &&
ct->scale_factor > 2) {
scale_rect.width = ct->out_q.w / 2;
scale_rect.height = ct->out_q.h / 2;
exynos3250_jpeg_try_downscale(ct, &scale_rect);
}
return 0;
}
static int s5p_jpeg_s_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_format *f)
{
int ret;
ret = s5p_jpeg_try_fmt_vid_cap(file, priv, f);
if (ret)
return ret;
return s5p_jpeg_s_fmt(fh_to_ctx(priv), f);
}
static int s5p_jpeg_s_fmt_vid_out(struct file *file, void *priv,
struct v4l2_format *f)
{
int ret;
ret = s5p_jpeg_try_fmt_vid_out(file, priv, f);
if (ret)
return ret;
return s5p_jpeg_s_fmt(fh_to_ctx(priv), f);
}
static int s5p_jpeg_subscribe_event(struct v4l2_fh *fh,
const struct v4l2_event_subscription *sub)
{
if (sub->type == V4L2_EVENT_SOURCE_CHANGE)
return v4l2_src_change_event_subscribe(fh, sub);
return -EINVAL;
}
static int exynos3250_jpeg_try_downscale(struct s5p_jpeg_ctx *ctx,
struct v4l2_rect *r)
{
int w_ratio, h_ratio, scale_factor, cur_ratio, i;
w_ratio = ctx->out_q.w / r->width;
h_ratio = ctx->out_q.h / r->height;
scale_factor = max(w_ratio, h_ratio);
scale_factor = clamp_val(scale_factor, 1, 8);
/* Align scale ratio to the nearest power of 2 */
for (i = 0; i <= 3; ++i) {
cur_ratio = 1 << i;
if (scale_factor <= cur_ratio) {
ctx->scale_factor = cur_ratio;
break;
}
}
r->width = round_down(ctx->out_q.w / ctx->scale_factor, 2);
r->height = round_down(ctx->out_q.h / ctx->scale_factor, 2);
ctx->crop_rect.width = r->width;
ctx->crop_rect.height = r->height;
ctx->crop_rect.left = 0;
ctx->crop_rect.top = 0;
ctx->crop_altered = true;
return 0;
}
static int exynos3250_jpeg_try_crop(struct s5p_jpeg_ctx *ctx,
struct v4l2_rect *r)
{
struct v4l2_rect base_rect;
int w_step, h_step;
switch (ctx->cap_q.fmt->fourcc) {
case V4L2_PIX_FMT_NV12:
case V4L2_PIX_FMT_NV21:
w_step = 1;
h_step = 2;
break;
case V4L2_PIX_FMT_YUV420:
w_step = 2;
h_step = 2;
break;
default:
w_step = 1;
h_step = 1;
break;
}
base_rect.top = 0;
base_rect.left = 0;
base_rect.width = ctx->out_q.w;
base_rect.height = ctx->out_q.h;
r->width = round_down(r->width, w_step);
r->height = round_down(r->height, h_step);
r->left = round_down(r->left, 2);
r->top = round_down(r->top, 2);
if (!v4l2_rect_enclosed(r, &base_rect))
return -EINVAL;
ctx->crop_rect.left = r->left;
ctx->crop_rect.top = r->top;
ctx->crop_rect.width = r->width;
ctx->crop_rect.height = r->height;
ctx->crop_altered = true;
return 0;
}
/*
* V4L2 controls
*/
static int s5p_jpeg_g_selection(struct file *file, void *priv,
struct v4l2_selection *s)
{
struct s5p_jpeg_ctx *ctx = fh_to_ctx(priv);
if (s->type != V4L2_BUF_TYPE_VIDEO_OUTPUT &&
s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
return -EINVAL;
/* For JPEG blob active == default == bounds */
switch (s->target) {
case V4L2_SEL_TGT_CROP:
case V4L2_SEL_TGT_CROP_BOUNDS:
case V4L2_SEL_TGT_CROP_DEFAULT:
case V4L2_SEL_TGT_COMPOSE_DEFAULT:
s->r.width = ctx->out_q.w;
s->r.height = ctx->out_q.h;
s->r.left = 0;
s->r.top = 0;
break;
case V4L2_SEL_TGT_COMPOSE:
case V4L2_SEL_TGT_COMPOSE_BOUNDS:
case V4L2_SEL_TGT_COMPOSE_PADDED:
s->r.width = ctx->crop_rect.width;
s->r.height = ctx->crop_rect.height;
s->r.left = ctx->crop_rect.left;
s->r.top = ctx->crop_rect.top;
break;
default:
return -EINVAL;
}
return 0;
}
/*
* V4L2 controls
*/
static int s5p_jpeg_s_selection(struct file *file, void *fh,
struct v4l2_selection *s)
{
struct s5p_jpeg_ctx *ctx = fh_to_ctx(file->private_data);
struct v4l2_rect *rect = &s->r;
int ret = -EINVAL;
if (s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
return -EINVAL;
if (s->target == V4L2_SEL_TGT_COMPOSE) {
if (ctx->mode != S5P_JPEG_DECODE)
return -EINVAL;
if (ctx->jpeg->variant->hw3250_compat)
ret = exynos3250_jpeg_try_downscale(ctx, rect);
} else if (s->target == V4L2_SEL_TGT_CROP) {
if (ctx->mode != S5P_JPEG_ENCODE)
return -EINVAL;
if (ctx->jpeg->variant->hw3250_compat)
ret = exynos3250_jpeg_try_crop(ctx, rect);
}
return ret;
}
static int s5p_jpeg_g_volatile_ctrl(struct v4l2_ctrl *ctrl)
{
struct s5p_jpeg_ctx *ctx = ctrl_to_ctx(ctrl);
struct s5p_jpeg *jpeg = ctx->jpeg;
unsigned long flags;
switch (ctrl->id) {
case V4L2_CID_JPEG_CHROMA_SUBSAMPLING:
spin_lock_irqsave(&jpeg->slock, flags);
ctrl->val = s5p_jpeg_to_user_subsampling(ctx);
spin_unlock_irqrestore(&jpeg->slock, flags);
break;
}
return 0;
}
static int s5p_jpeg_adjust_subs_ctrl(struct s5p_jpeg_ctx *ctx, int *ctrl_val)
{
switch (ctx->jpeg->variant->version) {
case SJPEG_S5P:
return 0;
case SJPEG_EXYNOS3250:
case SJPEG_EXYNOS5420:
/*
* The exynos3250/compatible device can produce JPEG image only
* of 4:4:4 subsampling when given RGB32 source image.
*/
if (ctx->out_q.fmt->fourcc == V4L2_PIX_FMT_RGB32)
*ctrl_val = 0;
break;
case SJPEG_EXYNOS4:
/*
* The exynos4x12 device requires input raw image fourcc
* to be V4L2_PIX_FMT_GREY if gray jpeg format
* is to be set.
*/
if (ctx->out_q.fmt->fourcc != V4L2_PIX_FMT_GREY &&
*ctrl_val == V4L2_JPEG_CHROMA_SUBSAMPLING_GRAY)
return -EINVAL;
break;
}
/*
* The exynos4x12 and exynos3250/compatible devices require resulting
* jpeg subsampling not to be lower than the input raw image
* subsampling.
*/
if (ctx->out_q.fmt->subsampling > *ctrl_val)
*ctrl_val = ctx->out_q.fmt->subsampling;
return 0;
}
static int s5p_jpeg_try_ctrl(struct v4l2_ctrl *ctrl)
{
struct s5p_jpeg_ctx *ctx = ctrl_to_ctx(ctrl);
unsigned long flags;
int ret = 0;
spin_lock_irqsave(&ctx->jpeg->slock, flags);
if (ctrl->id == V4L2_CID_JPEG_CHROMA_SUBSAMPLING)
ret = s5p_jpeg_adjust_subs_ctrl(ctx, &ctrl->val);
spin_unlock_irqrestore(&ctx->jpeg->slock, flags);
return ret;
}
static int s5p_jpeg_s_ctrl(struct v4l2_ctrl *ctrl)
{
struct s5p_jpeg_ctx *ctx = ctrl_to_ctx(ctrl);
unsigned long flags;
spin_lock_irqsave(&ctx->jpeg->slock, flags);
switch (ctrl->id) {
case V4L2_CID_JPEG_COMPRESSION_QUALITY:
ctx->compr_quality = ctrl->val;
break;
case V4L2_CID_JPEG_RESTART_INTERVAL:
ctx->restart_interval = ctrl->val;
break;
case V4L2_CID_JPEG_CHROMA_SUBSAMPLING:
ctx->subsampling = ctrl->val;
break;
}
spin_unlock_irqrestore(&ctx->jpeg->slock, flags);
return 0;
}
static const struct v4l2_ctrl_ops s5p_jpeg_ctrl_ops = {
.g_volatile_ctrl = s5p_jpeg_g_volatile_ctrl,
.try_ctrl = s5p_jpeg_try_ctrl,
.s_ctrl = s5p_jpeg_s_ctrl,
};
static int s5p_jpeg_controls_create(struct s5p_jpeg_ctx *ctx)
{
unsigned int mask = ~0x27; /* 444, 422, 420, GRAY */
struct v4l2_ctrl *ctrl;
int ret;
v4l2_ctrl_handler_init(&ctx->ctrl_handler, 3);
if (ctx->mode == S5P_JPEG_ENCODE) {
v4l2_ctrl_new_std(&ctx->ctrl_handler, &s5p_jpeg_ctrl_ops,
V4L2_CID_JPEG_COMPRESSION_QUALITY,
0, 3, 1, S5P_JPEG_COMPR_QUAL_WORST);
v4l2_ctrl_new_std(&ctx->ctrl_handler, &s5p_jpeg_ctrl_ops,
V4L2_CID_JPEG_RESTART_INTERVAL,
0, 0xffff, 1, 0);
if (ctx->jpeg->variant->version == SJPEG_S5P)
mask = ~0x06; /* 422, 420 */
}
ctrl = v4l2_ctrl_new_std_menu(&ctx->ctrl_handler, &s5p_jpeg_ctrl_ops,
V4L2_CID_JPEG_CHROMA_SUBSAMPLING,
V4L2_JPEG_CHROMA_SUBSAMPLING_GRAY, mask,
V4L2_JPEG_CHROMA_SUBSAMPLING_422);
if (ctx->ctrl_handler.error) {
ret = ctx->ctrl_handler.error;
goto error_free;
}
if (ctx->mode == S5P_JPEG_DECODE)
ctrl->flags |= V4L2_CTRL_FLAG_VOLATILE |
V4L2_CTRL_FLAG_READ_ONLY;
ret = v4l2_ctrl_handler_setup(&ctx->ctrl_handler);
if (ret < 0)
goto error_free;
return ret;
error_free:
v4l2_ctrl_handler_free(&ctx->ctrl_handler);
return ret;
}
static const struct v4l2_ioctl_ops s5p_jpeg_ioctl_ops = {
.vidioc_querycap = s5p_jpeg_querycap,
.vidioc_enum_fmt_vid_cap = s5p_jpeg_enum_fmt_vid_cap,
.vidioc_enum_fmt_vid_out = s5p_jpeg_enum_fmt_vid_out,
.vidioc_g_fmt_vid_cap = s5p_jpeg_g_fmt,
.vidioc_g_fmt_vid_out = s5p_jpeg_g_fmt,
.vidioc_try_fmt_vid_cap = s5p_jpeg_try_fmt_vid_cap,
.vidioc_try_fmt_vid_out = s5p_jpeg_try_fmt_vid_out,
.vidioc_s_fmt_vid_cap = s5p_jpeg_s_fmt_vid_cap,
.vidioc_s_fmt_vid_out = s5p_jpeg_s_fmt_vid_out,
.vidioc_reqbufs = v4l2_m2m_ioctl_reqbufs,
.vidioc_querybuf = v4l2_m2m_ioctl_querybuf,
.vidioc_qbuf = v4l2_m2m_ioctl_qbuf,
.vidioc_dqbuf = v4l2_m2m_ioctl_dqbuf,
.vidioc_streamon = v4l2_m2m_ioctl_streamon,
.vidioc_streamoff = v4l2_m2m_ioctl_streamoff,
.vidioc_g_selection = s5p_jpeg_g_selection,
.vidioc_s_selection = s5p_jpeg_s_selection,
.vidioc_subscribe_event = s5p_jpeg_subscribe_event,
.vidioc_unsubscribe_event = v4l2_event_unsubscribe,
};
/*
* ============================================================================
* mem2mem callbacks
* ============================================================================
*/
static void s5p_jpeg_device_run(void *priv)
{
struct s5p_jpeg_ctx *ctx = priv;
struct s5p_jpeg *jpeg = ctx->jpeg;
struct vb2_v4l2_buffer *src_buf, *dst_buf;
unsigned long src_addr, dst_addr, flags;
spin_lock_irqsave(&ctx->jpeg->slock, flags);
src_buf = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
dst_buf = v4l2_m2m_next_dst_buf(ctx->fh.m2m_ctx);
src_addr = vb2_dma_contig_plane_dma_addr(&src_buf->vb2_buf, 0);
dst_addr = vb2_dma_contig_plane_dma_addr(&dst_buf->vb2_buf, 0);
s5p_jpeg_reset(jpeg->regs);
s5p_jpeg_poweron(jpeg->regs);
s5p_jpeg_proc_mode(jpeg->regs, ctx->mode);
if (ctx->mode == S5P_JPEG_ENCODE) {
if (ctx->out_q.fmt->fourcc == V4L2_PIX_FMT_RGB565)
s5p_jpeg_input_raw_mode(jpeg->regs,
S5P_JPEG_RAW_IN_565);
else
s5p_jpeg_input_raw_mode(jpeg->regs,
S5P_JPEG_RAW_IN_422);
s5p_jpeg_subsampling_mode(jpeg->regs, ctx->subsampling);
s5p_jpeg_dri(jpeg->regs, ctx->restart_interval);
s5p_jpeg_x(jpeg->regs, ctx->out_q.w);
s5p_jpeg_y(jpeg->regs, ctx->out_q.h);
s5p_jpeg_imgadr(jpeg->regs, src_addr);
s5p_jpeg_jpgadr(jpeg->regs, dst_addr);
/* ultimately comes from sizeimage from userspace */
s5p_jpeg_enc_stream_int(jpeg->regs, ctx->cap_q.size);
/* JPEG RGB to YCbCr conversion matrix */
s5p_jpeg_coef(jpeg->regs, 1, 1, S5P_JPEG_COEF11);
s5p_jpeg_coef(jpeg->regs, 1, 2, S5P_JPEG_COEF12);
s5p_jpeg_coef(jpeg->regs, 1, 3, S5P_JPEG_COEF13);
s5p_jpeg_coef(jpeg->regs, 2, 1, S5P_JPEG_COEF21);
s5p_jpeg_coef(jpeg->regs, 2, 2, S5P_JPEG_COEF22);
s5p_jpeg_coef(jpeg->regs, 2, 3, S5P_JPEG_COEF23);
s5p_jpeg_coef(jpeg->regs, 3, 1, S5P_JPEG_COEF31);
s5p_jpeg_coef(jpeg->regs, 3, 2, S5P_JPEG_COEF32);
s5p_jpeg_coef(jpeg->regs, 3, 3, S5P_JPEG_COEF33);
/*
* JPEG IP allows storing 4 quantization tables
* We fill table 0 for luma and table 1 for chroma
*/
s5p_jpeg_set_qtbl_lum(jpeg->regs, ctx->compr_quality);
s5p_jpeg_set_qtbl_chr(jpeg->regs, ctx->compr_quality);
/* use table 0 for Y */
s5p_jpeg_qtbl(jpeg->regs, 1, 0);
/* use table 1 for Cb and Cr*/
s5p_jpeg_qtbl(jpeg->regs, 2, 1);
s5p_jpeg_qtbl(jpeg->regs, 3, 1);
/* Y, Cb, Cr use Huffman table 0 */
s5p_jpeg_htbl_ac(jpeg->regs, 1);
s5p_jpeg_htbl_dc(jpeg->regs, 1);
s5p_jpeg_htbl_ac(jpeg->regs, 2);
s5p_jpeg_htbl_dc(jpeg->regs, 2);
s5p_jpeg_htbl_ac(jpeg->regs, 3);
s5p_jpeg_htbl_dc(jpeg->regs, 3);
} else { /* S5P_JPEG_DECODE */
s5p_jpeg_rst_int_enable(jpeg->regs, true);
s5p_jpeg_data_num_int_enable(jpeg->regs, true);
s5p_jpeg_final_mcu_num_int_enable(jpeg->regs, true);
if (ctx->cap_q.fmt->fourcc == V4L2_PIX_FMT_YUYV)
s5p_jpeg_outform_raw(jpeg->regs, S5P_JPEG_RAW_OUT_422);
else
s5p_jpeg_outform_raw(jpeg->regs, S5P_JPEG_RAW_OUT_420);
s5p_jpeg_jpgadr(jpeg->regs, src_addr);
s5p_jpeg_imgadr(jpeg->regs, dst_addr);
}
s5p_jpeg_start(jpeg->regs);
spin_unlock_irqrestore(&ctx->jpeg->slock, flags);
}
static void exynos4_jpeg_set_img_addr(struct s5p_jpeg_ctx *ctx)
{
struct s5p_jpeg *jpeg = ctx->jpeg;
struct s5p_jpeg_fmt *fmt;
struct vb2_v4l2_buffer *vb;
struct s5p_jpeg_addr jpeg_addr = {};
u32 pix_size, padding_bytes = 0;
jpeg_addr.cb = 0;
jpeg_addr.cr = 0;
pix_size = ctx->cap_q.w * ctx->cap_q.h;
if (ctx->mode == S5P_JPEG_ENCODE) {
vb = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
fmt = ctx->out_q.fmt;
if (ctx->out_q.w % 2 && fmt->h_align > 0)
padding_bytes = ctx->out_q.h;
} else {
fmt = ctx->cap_q.fmt;
vb = v4l2_m2m_next_dst_buf(ctx->fh.m2m_ctx);
}
jpeg_addr.y = vb2_dma_contig_plane_dma_addr(&vb->vb2_buf, 0);
if (fmt->colplanes == 2) {
jpeg_addr.cb = jpeg_addr.y + pix_size - padding_bytes;
} else if (fmt->colplanes == 3) {
jpeg_addr.cb = jpeg_addr.y + pix_size;
if (fmt->fourcc == V4L2_PIX_FMT_YUV420)
jpeg_addr.cr = jpeg_addr.cb + pix_size / 4;
else
jpeg_addr.cr = jpeg_addr.cb + pix_size / 2;
}
exynos4_jpeg_set_frame_buf_address(jpeg->regs, &jpeg_addr);
}
static void exynos4_jpeg_set_jpeg_addr(struct s5p_jpeg_ctx *ctx)
{
struct s5p_jpeg *jpeg = ctx->jpeg;
struct vb2_v4l2_buffer *vb;
unsigned int jpeg_addr = 0;
if (ctx->mode == S5P_JPEG_ENCODE)
vb = v4l2_m2m_next_dst_buf(ctx->fh.m2m_ctx);
else
vb = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
jpeg_addr = vb2_dma_contig_plane_dma_addr(&vb->vb2_buf, 0);
if (jpeg->variant->version == SJPEG_EXYNOS5433 &&
ctx->mode == S5P_JPEG_DECODE)
jpeg_addr += ctx->out_q.sos;
exynos4_jpeg_set_stream_buf_address(jpeg->regs, jpeg_addr);
}
static inline void exynos4_jpeg_set_img_fmt(void __iomem *base,
unsigned int img_fmt)
{
__exynos4_jpeg_set_img_fmt(base, img_fmt, SJPEG_EXYNOS4);
}
static inline void exynos5433_jpeg_set_img_fmt(void __iomem *base,
unsigned int img_fmt)
{
__exynos4_jpeg_set_img_fmt(base, img_fmt, SJPEG_EXYNOS5433);
}
static inline void exynos4_jpeg_set_enc_out_fmt(void __iomem *base,
unsigned int out_fmt)
{
__exynos4_jpeg_set_enc_out_fmt(base, out_fmt, SJPEG_EXYNOS4);
}
static inline void exynos5433_jpeg_set_enc_out_fmt(void __iomem *base,
unsigned int out_fmt)
{
__exynos4_jpeg_set_enc_out_fmt(base, out_fmt, SJPEG_EXYNOS5433);
}
static void exynos4_jpeg_device_run(void *priv)
{
struct s5p_jpeg_ctx *ctx = priv;
struct s5p_jpeg *jpeg = ctx->jpeg;
unsigned int bitstream_size;
unsigned long flags;
spin_lock_irqsave(&jpeg->slock, flags);
if (ctx->mode == S5P_JPEG_ENCODE) {
exynos4_jpeg_sw_reset(jpeg->regs);
exynos4_jpeg_set_interrupt(jpeg->regs, jpeg->variant->version);
exynos4_jpeg_set_huf_table_enable(jpeg->regs, 1);
exynos4_jpeg_set_huff_tbl(jpeg->regs);
/*
* JPEG IP allows storing 4 quantization tables
* We fill table 0 for luma and table 1 for chroma
*/
exynos4_jpeg_set_qtbl_lum(jpeg->regs, ctx->compr_quality);
exynos4_jpeg_set_qtbl_chr(jpeg->regs, ctx->compr_quality);
exynos4_jpeg_set_encode_tbl_select(jpeg->regs,
ctx->compr_quality);
exynos4_jpeg_set_stream_size(jpeg->regs, ctx->cap_q.w,
ctx->cap_q.h);
if (ctx->jpeg->variant->version == SJPEG_EXYNOS4) {
exynos4_jpeg_set_enc_out_fmt(jpeg->regs,
ctx->subsampling);
exynos4_jpeg_set_img_fmt(jpeg->regs,
ctx->out_q.fmt->fourcc);
} else {
exynos5433_jpeg_set_enc_out_fmt(jpeg->regs,
ctx->subsampling);
exynos5433_jpeg_set_img_fmt(jpeg->regs,
ctx->out_q.fmt->fourcc);
}
exynos4_jpeg_set_img_addr(ctx);
exynos4_jpeg_set_jpeg_addr(ctx);
exynos4_jpeg_set_encode_hoff_cnt(jpeg->regs,
ctx->out_q.fmt->fourcc);
} else {
exynos4_jpeg_sw_reset(jpeg->regs);
exynos4_jpeg_set_interrupt(jpeg->regs,
jpeg->variant->version);
exynos4_jpeg_set_img_addr(ctx);
exynos4_jpeg_set_jpeg_addr(ctx);
if (jpeg->variant->version == SJPEG_EXYNOS5433) {
exynos4_jpeg_parse_huff_tbl(ctx);
exynos4_jpeg_parse_decode_h_tbl(ctx);
exynos4_jpeg_parse_q_tbl(ctx);
exynos4_jpeg_parse_decode_q_tbl(ctx);
exynos4_jpeg_set_huf_table_enable(jpeg->regs, 1);
exynos4_jpeg_set_stream_size(jpeg->regs, ctx->cap_q.w,
ctx->cap_q.h);
exynos5433_jpeg_set_enc_out_fmt(jpeg->regs,
ctx->subsampling);
exynos5433_jpeg_set_img_fmt(jpeg->regs,
ctx->cap_q.fmt->fourcc);
bitstream_size = DIV_ROUND_UP(ctx->out_q.size, 16);
} else {
exynos4_jpeg_set_img_fmt(jpeg->regs,
ctx->cap_q.fmt->fourcc);
bitstream_size = DIV_ROUND_UP(ctx->out_q.size, 32);
}
exynos4_jpeg_set_dec_bitstream_size(jpeg->regs, bitstream_size);
}
exynos4_jpeg_set_sys_int_enable(jpeg->regs, 1);
exynos4_jpeg_set_enc_dec_mode(jpeg->regs, ctx->mode);
spin_unlock_irqrestore(&jpeg->slock, flags);
}
static void exynos3250_jpeg_set_img_addr(struct s5p_jpeg_ctx *ctx)
{
struct s5p_jpeg *jpeg = ctx->jpeg;
struct s5p_jpeg_fmt *fmt;
struct vb2_v4l2_buffer *vb;
struct s5p_jpeg_addr jpeg_addr = {};
u32 pix_size;
pix_size = ctx->cap_q.w * ctx->cap_q.h;
if (ctx->mode == S5P_JPEG_ENCODE) {
vb = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
fmt = ctx->out_q.fmt;
} else {
vb = v4l2_m2m_next_dst_buf(ctx->fh.m2m_ctx);
fmt = ctx->cap_q.fmt;
}
jpeg_addr.y = vb2_dma_contig_plane_dma_addr(&vb->vb2_buf, 0);
if (fmt->colplanes == 2) {
jpeg_addr.cb = jpeg_addr.y + pix_size;
} else if (fmt->colplanes == 3) {
jpeg_addr.cb = jpeg_addr.y + pix_size;
if (fmt->fourcc == V4L2_PIX_FMT_YUV420)
jpeg_addr.cr = jpeg_addr.cb + pix_size / 4;
else
jpeg_addr.cr = jpeg_addr.cb + pix_size / 2;
}
exynos3250_jpeg_imgadr(jpeg->regs, &jpeg_addr);
}
static void exynos3250_jpeg_set_jpeg_addr(struct s5p_jpeg_ctx *ctx)
{
struct s5p_jpeg *jpeg = ctx->jpeg;
struct vb2_v4l2_buffer *vb;
unsigned int jpeg_addr = 0;
if (ctx->mode == S5P_JPEG_ENCODE)
vb = v4l2_m2m_next_dst_buf(ctx->fh.m2m_ctx);
else
vb = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
jpeg_addr = vb2_dma_contig_plane_dma_addr(&vb->vb2_buf, 0);
exynos3250_jpeg_jpgadr(jpeg->regs, jpeg_addr);
}
static void exynos3250_jpeg_device_run(void *priv)
{
struct s5p_jpeg_ctx *ctx = priv;
struct s5p_jpeg *jpeg = ctx->jpeg;
unsigned long flags;
spin_lock_irqsave(&ctx->jpeg->slock, flags);
exynos3250_jpeg_reset(jpeg->regs);
exynos3250_jpeg_set_dma_num(jpeg->regs);
exynos3250_jpeg_poweron(jpeg->regs);
exynos3250_jpeg_clk_set(jpeg->regs);
exynos3250_jpeg_proc_mode(jpeg->regs, ctx->mode);
if (ctx->mode == S5P_JPEG_ENCODE) {
exynos3250_jpeg_input_raw_fmt(jpeg->regs,
ctx->out_q.fmt->fourcc);
exynos3250_jpeg_dri(jpeg->regs, ctx->restart_interval);
/*
* JPEG IP allows storing 4 quantization tables
* We fill table 0 for luma and table 1 for chroma
*/
s5p_jpeg_set_qtbl_lum(jpeg->regs, ctx->compr_quality);
s5p_jpeg_set_qtbl_chr(jpeg->regs, ctx->compr_quality);
/* use table 0 for Y */
exynos3250_jpeg_qtbl(jpeg->regs, 1, 0);
/* use table 1 for Cb and Cr*/
exynos3250_jpeg_qtbl(jpeg->regs, 2, 1);
exynos3250_jpeg_qtbl(jpeg->regs, 3, 1);
/*
* Some SoCs require setting Huffman tables before each run
*/
if (jpeg->variant->htbl_reinit) {
s5p_jpeg_set_hdctbl(jpeg->regs);
s5p_jpeg_set_hdctblg(jpeg->regs);
s5p_jpeg_set_hactbl(jpeg->regs);
s5p_jpeg_set_hactblg(jpeg->regs);
}
/* Y, Cb, Cr use Huffman table 0 */
exynos3250_jpeg_htbl_ac(jpeg->regs, 1);
exynos3250_jpeg_htbl_dc(jpeg->regs, 1);
exynos3250_jpeg_htbl_ac(jpeg->regs, 2);
exynos3250_jpeg_htbl_dc(jpeg->regs, 2);
exynos3250_jpeg_htbl_ac(jpeg->regs, 3);
exynos3250_jpeg_htbl_dc(jpeg->regs, 3);
exynos3250_jpeg_set_x(jpeg->regs, ctx->crop_rect.width);
exynos3250_jpeg_set_y(jpeg->regs, ctx->crop_rect.height);
exynos3250_jpeg_stride(jpeg->regs, ctx->out_q.fmt->fourcc,
ctx->out_q.w);
exynos3250_jpeg_offset(jpeg->regs, ctx->crop_rect.left,
ctx->crop_rect.top);
exynos3250_jpeg_set_img_addr(ctx);
exynos3250_jpeg_set_jpeg_addr(ctx);
exynos3250_jpeg_subsampling_mode(jpeg->regs, ctx->subsampling);
/* ultimately comes from sizeimage from userspace */
exynos3250_jpeg_enc_stream_bound(jpeg->regs, ctx->cap_q.size);
if (ctx->out_q.fmt->fourcc == V4L2_PIX_FMT_RGB565 ||
ctx->out_q.fmt->fourcc == V4L2_PIX_FMT_RGB565X ||
ctx->out_q.fmt->fourcc == V4L2_PIX_FMT_RGB32)
exynos3250_jpeg_set_y16(jpeg->regs, true);
} else {
exynos3250_jpeg_set_img_addr(ctx);
exynos3250_jpeg_set_jpeg_addr(ctx);
exynos3250_jpeg_stride(jpeg->regs, ctx->cap_q.fmt->fourcc,
ctx->cap_q.w);
exynos3250_jpeg_offset(jpeg->regs, 0, 0);
exynos3250_jpeg_dec_scaling_ratio(jpeg->regs,
ctx->scale_factor);
exynos3250_jpeg_dec_stream_size(jpeg->regs, ctx->out_q.size);
exynos3250_jpeg_output_raw_fmt(jpeg->regs,
ctx->cap_q.fmt->fourcc);
}
exynos3250_jpeg_interrupts_enable(jpeg->regs);
/* JPEG RGB to YCbCr conversion matrix */
exynos3250_jpeg_coef(jpeg->regs, ctx->mode);
exynos3250_jpeg_set_timer(jpeg->regs, EXYNOS3250_IRQ_TIMEOUT);
jpeg->irq_status = 0;
exynos3250_jpeg_start(jpeg->regs);
spin_unlock_irqrestore(&ctx->jpeg->slock, flags);
}
static int s5p_jpeg_job_ready(void *priv)
{
struct s5p_jpeg_ctx *ctx = priv;
if (ctx->mode == S5P_JPEG_DECODE) {
/*
* We have only one input buffer and one output buffer. If there
* is a resolution change event, no need to continue decoding.
*/
if (ctx->state == JPEGCTX_RESOLUTION_CHANGE)
return 0;
return ctx->hdr_parsed;
}
return 1;
}
static const struct v4l2_m2m_ops s5p_jpeg_m2m_ops = {
.device_run = s5p_jpeg_device_run,
.job_ready = s5p_jpeg_job_ready,
};
static const struct v4l2_m2m_ops exynos3250_jpeg_m2m_ops = {
.device_run = exynos3250_jpeg_device_run,
.job_ready = s5p_jpeg_job_ready,
};
static const struct v4l2_m2m_ops exynos4_jpeg_m2m_ops = {
.device_run = exynos4_jpeg_device_run,
.job_ready = s5p_jpeg_job_ready,
};
/*
* ============================================================================
* Queue operations
* ============================================================================
*/
static int s5p_jpeg_queue_setup(struct vb2_queue *vq,
unsigned int *nbuffers, unsigned int *nplanes,
unsigned int sizes[], struct device *alloc_devs[])
{
struct s5p_jpeg_ctx *ctx = vb2_get_drv_priv(vq);
struct s5p_jpeg_q_data *q_data = NULL;
unsigned int size, count = *nbuffers;
q_data = get_q_data(ctx, vq->type);
BUG_ON(q_data == NULL);
size = q_data->size;
/*
* header is parsed during decoding and parsed information stored
* in the context so we do not allow another buffer to overwrite it
*/
if (ctx->mode == S5P_JPEG_DECODE)
count = 1;
*nbuffers = count;
*nplanes = 1;
sizes[0] = size;
return 0;
}
static int s5p_jpeg_buf_prepare(struct vb2_buffer *vb)
{
struct s5p_jpeg_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
struct s5p_jpeg_q_data *q_data = NULL;
q_data = get_q_data(ctx, vb->vb2_queue->type);
BUG_ON(q_data == NULL);
if (vb2_plane_size(vb, 0) < q_data->size) {
pr_err("%s data will not fit into plane (%lu < %lu)\n",
__func__, vb2_plane_size(vb, 0),
(long)q_data->size);
return -EINVAL;
}
vb2_set_plane_payload(vb, 0, q_data->size);
return 0;
}
static void s5p_jpeg_set_capture_queue_data(struct s5p_jpeg_ctx *ctx)
{
struct s5p_jpeg_q_data *q_data = &ctx->cap_q;
q_data->w = ctx->out_q.w;
q_data->h = ctx->out_q.h;
/*
* This call to jpeg_bound_align_image() takes care of width and
* height values alignment when user space calls the QBUF of
* OUTPUT buffer after the S_FMT of CAPTURE buffer.
* Please note that on Exynos4x12 SoCs, resigning from executing
* S_FMT on capture buffer for each JPEG image can result in a
* hardware hangup if subsampling is lower than the one of input
* JPEG.
*/
jpeg_bound_align_image(ctx, &q_data->w, S5P_JPEG_MIN_WIDTH,
S5P_JPEG_MAX_WIDTH, q_data->fmt->h_align,
&q_data->h, S5P_JPEG_MIN_HEIGHT,
S5P_JPEG_MAX_HEIGHT, q_data->fmt->v_align);
q_data->size = q_data->w * q_data->h * q_data->fmt->depth >> 3;
}
static void s5p_jpeg_buf_queue(struct vb2_buffer *vb)
{
struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
struct s5p_jpeg_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
if (ctx->mode == S5P_JPEG_DECODE &&
vb->vb2_queue->type == V4L2_BUF_TYPE_VIDEO_OUTPUT) {
static const struct v4l2_event ev_src_ch = {
.type = V4L2_EVENT_SOURCE_CHANGE,
.u.src_change.changes = V4L2_EVENT_SRC_CH_RESOLUTION,
};
struct vb2_queue *dst_vq;
u32 ori_w;
u32 ori_h;
dst_vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx,
V4L2_BUF_TYPE_VIDEO_CAPTURE);
ori_w = ctx->out_q.w;
ori_h = ctx->out_q.h;
ctx->hdr_parsed = s5p_jpeg_parse_hdr(&ctx->out_q,
(unsigned long)vb2_plane_vaddr(vb, 0),
min((unsigned long)ctx->out_q.size,
vb2_get_plane_payload(vb, 0)), ctx);
if (!ctx->hdr_parsed) {
vb2_buffer_done(vb, VB2_BUF_STATE_ERROR);
return;
}
/*
* If there is a resolution change event, only update capture
* queue when it is not streaming. Otherwise, update it in
* STREAMOFF. See s5p_jpeg_stop_streaming for detail.
*/
if (ctx->out_q.w != ori_w || ctx->out_q.h != ori_h) {
v4l2_event_queue_fh(&ctx->fh, &ev_src_ch);
if (vb2_is_streaming(dst_vq))
ctx->state = JPEGCTX_RESOLUTION_CHANGE;
else
s5p_jpeg_set_capture_queue_data(ctx);
}
}
v4l2_m2m_buf_queue(ctx->fh.m2m_ctx, vbuf);
}
static int s5p_jpeg_start_streaming(struct vb2_queue *q, unsigned int count)
{
struct s5p_jpeg_ctx *ctx = vb2_get_drv_priv(q);
return pm_runtime_resume_and_get(ctx->jpeg->dev);
}
static void s5p_jpeg_stop_streaming(struct vb2_queue *q)
{
struct s5p_jpeg_ctx *ctx = vb2_get_drv_priv(q);
/*
* STREAMOFF is an acknowledgment for resolution change event.
* Before STREAMOFF, we still have to return the old resolution and
* subsampling. Update capture queue when the stream is off.
*/
if (ctx->state == JPEGCTX_RESOLUTION_CHANGE &&
q->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) {
s5p_jpeg_set_capture_queue_data(ctx);
ctx->state = JPEGCTX_RUNNING;
}
pm_runtime_put(ctx->jpeg->dev);
}
static const struct vb2_ops s5p_jpeg_qops = {
.queue_setup = s5p_jpeg_queue_setup,
.buf_prepare = s5p_jpeg_buf_prepare,
.buf_queue = s5p_jpeg_buf_queue,
.wait_prepare = vb2_ops_wait_prepare,
.wait_finish = vb2_ops_wait_finish,
.start_streaming = s5p_jpeg_start_streaming,
.stop_streaming = s5p_jpeg_stop_streaming,
};
static int queue_init(void *priv, struct vb2_queue *src_vq,
struct vb2_queue *dst_vq)
{
struct s5p_jpeg_ctx *ctx = priv;
int ret;
src_vq->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
src_vq->io_modes = VB2_MMAP | VB2_USERPTR;
src_vq->drv_priv = ctx;
src_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
src_vq->ops = &s5p_jpeg_qops;
src_vq->mem_ops = &vb2_dma_contig_memops;
src_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
src_vq->lock = &ctx->jpeg->lock;
src_vq->dev = ctx->jpeg->dev;
ret = vb2_queue_init(src_vq);
if (ret)
return ret;
dst_vq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
dst_vq->io_modes = VB2_MMAP | VB2_USERPTR;
dst_vq->drv_priv = ctx;
dst_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
dst_vq->ops = &s5p_jpeg_qops;
dst_vq->mem_ops = &vb2_dma_contig_memops;
dst_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
dst_vq->lock = &ctx->jpeg->lock;
dst_vq->dev = ctx->jpeg->dev;
return vb2_queue_init(dst_vq);
}
/*
* ============================================================================
* ISR
* ============================================================================
*/
static irqreturn_t s5p_jpeg_irq(int irq, void *dev_id)
{
struct s5p_jpeg *jpeg = dev_id;
struct s5p_jpeg_ctx *curr_ctx;
struct vb2_v4l2_buffer *src_buf, *dst_buf;
unsigned long payload_size = 0;
enum vb2_buffer_state state = VB2_BUF_STATE_DONE;
bool enc_jpeg_too_large = false;
bool timer_elapsed = false;
bool op_completed = false;
spin_lock(&jpeg->slock);
curr_ctx = v4l2_m2m_get_curr_priv(jpeg->m2m_dev);
src_buf = v4l2_m2m_src_buf_remove(curr_ctx->fh.m2m_ctx);
dst_buf = v4l2_m2m_dst_buf_remove(curr_ctx->fh.m2m_ctx);
if (curr_ctx->mode == S5P_JPEG_ENCODE)
enc_jpeg_too_large = s5p_jpeg_enc_stream_stat(jpeg->regs);
timer_elapsed = s5p_jpeg_timer_stat(jpeg->regs);
op_completed = s5p_jpeg_result_stat_ok(jpeg->regs);
if (curr_ctx->mode == S5P_JPEG_DECODE)
op_completed = op_completed &&
s5p_jpeg_stream_stat_ok(jpeg->regs);
if (enc_jpeg_too_large) {
state = VB2_BUF_STATE_ERROR;
s5p_jpeg_clear_enc_stream_stat(jpeg->regs);
} else if (timer_elapsed) {
state = VB2_BUF_STATE_ERROR;
s5p_jpeg_clear_timer_stat(jpeg->regs);
} else if (!op_completed) {
state = VB2_BUF_STATE_ERROR;
} else {
payload_size = s5p_jpeg_compressed_size(jpeg->regs);
}
dst_buf->timecode = src_buf->timecode;
dst_buf->vb2_buf.timestamp = src_buf->vb2_buf.timestamp;
dst_buf->flags &= ~V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
dst_buf->flags |=
src_buf->flags & V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
v4l2_m2m_buf_done(src_buf, state);
if (curr_ctx->mode == S5P_JPEG_ENCODE)
vb2_set_plane_payload(&dst_buf->vb2_buf, 0, payload_size);
v4l2_m2m_buf_done(dst_buf, state);
curr_ctx->subsampling = s5p_jpeg_get_subsampling_mode(jpeg->regs);
spin_unlock(&jpeg->slock);
s5p_jpeg_clear_int(jpeg->regs);
v4l2_m2m_job_finish(jpeg->m2m_dev, curr_ctx->fh.m2m_ctx);
return IRQ_HANDLED;
}
static irqreturn_t exynos4_jpeg_irq(int irq, void *priv)
{
unsigned int int_status;
struct vb2_v4l2_buffer *src_vb, *dst_vb;
struct s5p_jpeg *jpeg = priv;
struct s5p_jpeg_ctx *curr_ctx;
unsigned long payload_size = 0;
spin_lock(&jpeg->slock);
exynos4_jpeg_set_sys_int_enable(jpeg->regs, 0);
curr_ctx = v4l2_m2m_get_curr_priv(jpeg->m2m_dev);
src_vb = v4l2_m2m_src_buf_remove(curr_ctx->fh.m2m_ctx);
dst_vb = v4l2_m2m_dst_buf_remove(curr_ctx->fh.m2m_ctx);
int_status = exynos4_jpeg_get_int_status(jpeg->regs);
if (int_status) {
switch (int_status & 0x1f) {
case 0x1:
jpeg->irq_ret = ERR_PROT;
break;
case 0x2:
jpeg->irq_ret = OK_ENC_OR_DEC;
break;
case 0x4:
jpeg->irq_ret = ERR_DEC_INVALID_FORMAT;
break;
case 0x8:
jpeg->irq_ret = ERR_MULTI_SCAN;
break;
case 0x10:
jpeg->irq_ret = ERR_FRAME;
break;
default:
jpeg->irq_ret = ERR_UNKNOWN;
break;
}
} else {
jpeg->irq_ret = ERR_UNKNOWN;
}
if (jpeg->irq_ret == OK_ENC_OR_DEC) {
if (curr_ctx->mode == S5P_JPEG_ENCODE) {
payload_size = exynos4_jpeg_get_stream_size(jpeg->regs);
vb2_set_plane_payload(&dst_vb->vb2_buf,
0, payload_size);
}
v4l2_m2m_buf_done(src_vb, VB2_BUF_STATE_DONE);
v4l2_m2m_buf_done(dst_vb, VB2_BUF_STATE_DONE);
} else {
v4l2_m2m_buf_done(src_vb, VB2_BUF_STATE_ERROR);
v4l2_m2m_buf_done(dst_vb, VB2_BUF_STATE_ERROR);
}
if (jpeg->variant->version == SJPEG_EXYNOS4)
curr_ctx->subsampling = exynos4_jpeg_get_frame_fmt(jpeg->regs);
exynos4_jpeg_set_enc_dec_mode(jpeg->regs, S5P_JPEG_DISABLE);
spin_unlock(&jpeg->slock);
v4l2_m2m_job_finish(jpeg->m2m_dev, curr_ctx->fh.m2m_ctx);
return IRQ_HANDLED;
}
static irqreturn_t exynos3250_jpeg_irq(int irq, void *dev_id)
{
struct s5p_jpeg *jpeg = dev_id;
struct s5p_jpeg_ctx *curr_ctx;
struct vb2_v4l2_buffer *src_buf, *dst_buf;
unsigned long payload_size = 0;
enum vb2_buffer_state state = VB2_BUF_STATE_DONE;
bool interrupt_timeout = false;
bool stream_error = false;
u32 irq_status;
spin_lock(&jpeg->slock);
irq_status = exynos3250_jpeg_get_timer_status(jpeg->regs);
if (irq_status & EXYNOS3250_TIMER_INT_STAT) {
exynos3250_jpeg_clear_timer_status(jpeg->regs);
interrupt_timeout = true;
dev_err(jpeg->dev, "Interrupt timeout occurred.\n");
}
irq_status = exynos3250_jpeg_get_int_status(jpeg->regs);
exynos3250_jpeg_clear_int_status(jpeg->regs, irq_status);
jpeg->irq_status |= irq_status;
if (jpeg->variant->version == SJPEG_EXYNOS5420 &&
irq_status & EXYNOS3250_STREAM_STAT) {
stream_error = true;
dev_err(jpeg->dev, "Syntax error or unrecoverable error occurred.\n");
}
curr_ctx = v4l2_m2m_get_curr_priv(jpeg->m2m_dev);
if (!curr_ctx)
goto exit_unlock;
if ((irq_status & EXYNOS3250_HEADER_STAT) &&
(curr_ctx->mode == S5P_JPEG_DECODE)) {
exynos3250_jpeg_rstart(jpeg->regs);
goto exit_unlock;
}
if (jpeg->irq_status & (EXYNOS3250_JPEG_DONE |
EXYNOS3250_WDMA_DONE |
EXYNOS3250_RDMA_DONE |
EXYNOS3250_RESULT_STAT))
payload_size = exynos3250_jpeg_compressed_size(jpeg->regs);
else if (interrupt_timeout || stream_error)
state = VB2_BUF_STATE_ERROR;
else
goto exit_unlock;
src_buf = v4l2_m2m_src_buf_remove(curr_ctx->fh.m2m_ctx);
dst_buf = v4l2_m2m_dst_buf_remove(curr_ctx->fh.m2m_ctx);
dst_buf->timecode = src_buf->timecode;
dst_buf->vb2_buf.timestamp = src_buf->vb2_buf.timestamp;
v4l2_m2m_buf_done(src_buf, state);
if (curr_ctx->mode == S5P_JPEG_ENCODE)
vb2_set_plane_payload(&dst_buf->vb2_buf, 0, payload_size);
v4l2_m2m_buf_done(dst_buf, state);
curr_ctx->subsampling =
exynos3250_jpeg_get_subsampling_mode(jpeg->regs);
spin_unlock(&jpeg->slock);
v4l2_m2m_job_finish(jpeg->m2m_dev, curr_ctx->fh.m2m_ctx);
return IRQ_HANDLED;
exit_unlock:
spin_unlock(&jpeg->slock);
return IRQ_HANDLED;
}
static void *jpeg_get_drv_data(struct device *dev);
/*
* ============================================================================
* Driver basic infrastructure
* ============================================================================
*/
static int s5p_jpeg_probe(struct platform_device *pdev)
{
struct s5p_jpeg *jpeg;
int i, ret;
/* JPEG IP abstraction struct */
jpeg = devm_kzalloc(&pdev->dev, sizeof(struct s5p_jpeg), GFP_KERNEL);
if (!jpeg)
return -ENOMEM;
jpeg->variant = jpeg_get_drv_data(&pdev->dev);
if (!jpeg->variant)
return -ENODEV;
mutex_init(&jpeg->lock);
spin_lock_init(&jpeg->slock);
jpeg->dev = &pdev->dev;
/* memory-mapped registers */
jpeg->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(jpeg->regs))
return PTR_ERR(jpeg->regs);
/* interrupt service routine registration */
jpeg->irq = ret = platform_get_irq(pdev, 0);
if (ret < 0)
return ret;
ret = devm_request_irq(&pdev->dev, jpeg->irq, jpeg->variant->jpeg_irq,
0, dev_name(&pdev->dev), jpeg);
if (ret) {
dev_err(&pdev->dev, "cannot claim IRQ %d\n", jpeg->irq);
return ret;
}
/* clocks */
for (i = 0; i < jpeg->variant->num_clocks; i++) {
jpeg->clocks[i] = devm_clk_get(&pdev->dev,
jpeg->variant->clk_names[i]);
if (IS_ERR(jpeg->clocks[i])) {
dev_err(&pdev->dev, "failed to get clock: %s\n",
jpeg->variant->clk_names[i]);
return PTR_ERR(jpeg->clocks[i]);
}
}
/* v4l2 device */
ret = v4l2_device_register(&pdev->dev, &jpeg->v4l2_dev);
if (ret) {
dev_err(&pdev->dev, "Failed to register v4l2 device\n");
return ret;
}
/* mem2mem device */
jpeg->m2m_dev = v4l2_m2m_init(jpeg->variant->m2m_ops);
if (IS_ERR(jpeg->m2m_dev)) {
v4l2_err(&jpeg->v4l2_dev, "Failed to init mem2mem device\n");
ret = PTR_ERR(jpeg->m2m_dev);
goto device_register_rollback;
}
vb2_dma_contig_set_max_seg_size(&pdev->dev, DMA_BIT_MASK(32));
/* JPEG encoder /dev/videoX node */
jpeg->vfd_encoder = video_device_alloc();
if (!jpeg->vfd_encoder) {
v4l2_err(&jpeg->v4l2_dev, "Failed to allocate video device\n");
ret = -ENOMEM;
goto m2m_init_rollback;
}
snprintf(jpeg->vfd_encoder->name, sizeof(jpeg->vfd_encoder->name),
"%s-enc", S5P_JPEG_M2M_NAME);
jpeg->vfd_encoder->fops = &s5p_jpeg_fops;
jpeg->vfd_encoder->ioctl_ops = &s5p_jpeg_ioctl_ops;
jpeg->vfd_encoder->minor = -1;
jpeg->vfd_encoder->release = video_device_release;
jpeg->vfd_encoder->lock = &jpeg->lock;
jpeg->vfd_encoder->v4l2_dev = &jpeg->v4l2_dev;
jpeg->vfd_encoder->vfl_dir = VFL_DIR_M2M;
jpeg->vfd_encoder->device_caps = V4L2_CAP_STREAMING | V4L2_CAP_VIDEO_M2M;
ret = video_register_device(jpeg->vfd_encoder, VFL_TYPE_VIDEO, -1);
if (ret) {
v4l2_err(&jpeg->v4l2_dev, "Failed to register video device\n");
video_device_release(jpeg->vfd_encoder);
goto m2m_init_rollback;
}
video_set_drvdata(jpeg->vfd_encoder, jpeg);
v4l2_info(&jpeg->v4l2_dev,
"encoder device registered as /dev/video%d\n",
jpeg->vfd_encoder->num);
/* JPEG decoder /dev/videoX node */
jpeg->vfd_decoder = video_device_alloc();
if (!jpeg->vfd_decoder) {
v4l2_err(&jpeg->v4l2_dev, "Failed to allocate video device\n");
ret = -ENOMEM;
goto enc_vdev_register_rollback;
}
snprintf(jpeg->vfd_decoder->name, sizeof(jpeg->vfd_decoder->name),
"%s-dec", S5P_JPEG_M2M_NAME);
jpeg->vfd_decoder->fops = &s5p_jpeg_fops;
jpeg->vfd_decoder->ioctl_ops = &s5p_jpeg_ioctl_ops;
jpeg->vfd_decoder->minor = -1;
jpeg->vfd_decoder->release = video_device_release;
jpeg->vfd_decoder->lock = &jpeg->lock;
jpeg->vfd_decoder->v4l2_dev = &jpeg->v4l2_dev;
jpeg->vfd_decoder->vfl_dir = VFL_DIR_M2M;
jpeg->vfd_decoder->device_caps = V4L2_CAP_STREAMING | V4L2_CAP_VIDEO_M2M;
ret = video_register_device(jpeg->vfd_decoder, VFL_TYPE_VIDEO, -1);
if (ret) {
v4l2_err(&jpeg->v4l2_dev, "Failed to register video device\n");
video_device_release(jpeg->vfd_decoder);
goto enc_vdev_register_rollback;
}
video_set_drvdata(jpeg->vfd_decoder, jpeg);
v4l2_info(&jpeg->v4l2_dev,
"decoder device registered as /dev/video%d\n",
jpeg->vfd_decoder->num);
/* final statements & power management */
platform_set_drvdata(pdev, jpeg);
pm_runtime_enable(&pdev->dev);
v4l2_info(&jpeg->v4l2_dev, "Samsung S5P JPEG codec\n");
return 0;
enc_vdev_register_rollback:
video_unregister_device(jpeg->vfd_encoder);
m2m_init_rollback:
v4l2_m2m_release(jpeg->m2m_dev);
device_register_rollback:
v4l2_device_unregister(&jpeg->v4l2_dev);
return ret;
}
static void s5p_jpeg_remove(struct platform_device *pdev)
{
struct s5p_jpeg *jpeg = platform_get_drvdata(pdev);
int i;
pm_runtime_disable(jpeg->dev);
video_unregister_device(jpeg->vfd_decoder);
video_unregister_device(jpeg->vfd_encoder);
vb2_dma_contig_clear_max_seg_size(&pdev->dev);
v4l2_m2m_release(jpeg->m2m_dev);
v4l2_device_unregister(&jpeg->v4l2_dev);
if (!pm_runtime_status_suspended(&pdev->dev)) {
for (i = jpeg->variant->num_clocks - 1; i >= 0; i--)
clk_disable_unprepare(jpeg->clocks[i]);
}
}
#ifdef CONFIG_PM
static int s5p_jpeg_runtime_suspend(struct device *dev)
{
struct s5p_jpeg *jpeg = dev_get_drvdata(dev);
int i;
for (i = jpeg->variant->num_clocks - 1; i >= 0; i--)
clk_disable_unprepare(jpeg->clocks[i]);
return 0;
}
static int s5p_jpeg_runtime_resume(struct device *dev)
{
struct s5p_jpeg *jpeg = dev_get_drvdata(dev);
unsigned long flags;
int i, ret;
for (i = 0; i < jpeg->variant->num_clocks; i++) {
ret = clk_prepare_enable(jpeg->clocks[i]);
if (ret) {
while (--i >= 0)
clk_disable_unprepare(jpeg->clocks[i]);
return ret;
}
}
spin_lock_irqsave(&jpeg->slock, flags);
/*
* JPEG IP allows storing two Huffman tables for each component.
* We fill table 0 for each component and do this here only
* for S5PC210 and Exynos3250 SoCs. Exynos4x12 and Exynos542x SoC
* require programming their Huffman tables each time the encoding
* process is initialized, and thus it is accomplished in the
* device_run callback of m2m_ops.
*/
if (!jpeg->variant->htbl_reinit) {
s5p_jpeg_set_hdctbl(jpeg->regs);
s5p_jpeg_set_hdctblg(jpeg->regs);
s5p_jpeg_set_hactbl(jpeg->regs);
s5p_jpeg_set_hactblg(jpeg->regs);
}
spin_unlock_irqrestore(&jpeg->slock, flags);
return 0;
}
#endif /* CONFIG_PM */
static const struct dev_pm_ops s5p_jpeg_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
pm_runtime_force_resume)
SET_RUNTIME_PM_OPS(s5p_jpeg_runtime_suspend, s5p_jpeg_runtime_resume,
NULL)
};
static struct s5p_jpeg_variant s5p_jpeg_drvdata = {
.version = SJPEG_S5P,
.jpeg_irq = s5p_jpeg_irq,
.m2m_ops = &s5p_jpeg_m2m_ops,
.fmt_ver_flag = SJPEG_FMT_FLAG_S5P,
.clk_names = {"jpeg"},
.num_clocks = 1,
};
static struct s5p_jpeg_variant exynos3250_jpeg_drvdata = {
.version = SJPEG_EXYNOS3250,
.jpeg_irq = exynos3250_jpeg_irq,
.m2m_ops = &exynos3250_jpeg_m2m_ops,
.fmt_ver_flag = SJPEG_FMT_FLAG_EXYNOS3250,
.hw3250_compat = 1,
.clk_names = {"jpeg", "sclk"},
.num_clocks = 2,
};
static struct s5p_jpeg_variant exynos4_jpeg_drvdata = {
.version = SJPEG_EXYNOS4,
.jpeg_irq = exynos4_jpeg_irq,
.m2m_ops = &exynos4_jpeg_m2m_ops,
.fmt_ver_flag = SJPEG_FMT_FLAG_EXYNOS4,
.htbl_reinit = 1,
.clk_names = {"jpeg"},
.num_clocks = 1,
.hw_ex4_compat = 1,
};
static struct s5p_jpeg_variant exynos5420_jpeg_drvdata = {
.version = SJPEG_EXYNOS5420,
.jpeg_irq = exynos3250_jpeg_irq, /* intentionally 3250 */
.m2m_ops = &exynos3250_jpeg_m2m_ops, /* intentionally 3250 */
.fmt_ver_flag = SJPEG_FMT_FLAG_EXYNOS3250, /* intentionally 3250 */
.hw3250_compat = 1,
.htbl_reinit = 1,
.clk_names = {"jpeg"},
.num_clocks = 1,
};
static struct s5p_jpeg_variant exynos5433_jpeg_drvdata = {
.version = SJPEG_EXYNOS5433,
.jpeg_irq = exynos4_jpeg_irq,
.m2m_ops = &exynos4_jpeg_m2m_ops,
.fmt_ver_flag = SJPEG_FMT_FLAG_EXYNOS4,
.htbl_reinit = 1,
.clk_names = {"pclk", "aclk", "aclk_xiu", "sclk"},
.num_clocks = 4,
.hw_ex4_compat = 1,
};
static const struct of_device_id samsung_jpeg_match[] = {
{
.compatible = "samsung,s5pv210-jpeg",
.data = &s5p_jpeg_drvdata,
}, {
.compatible = "samsung,exynos3250-jpeg",
.data = &exynos3250_jpeg_drvdata,
}, {
.compatible = "samsung,exynos4210-jpeg",
.data = &exynos4_jpeg_drvdata,
}, {
.compatible = "samsung,exynos4212-jpeg",
.data = &exynos4_jpeg_drvdata,
}, {
.compatible = "samsung,exynos5420-jpeg",
.data = &exynos5420_jpeg_drvdata,
}, {
.compatible = "samsung,exynos5433-jpeg",
.data = &exynos5433_jpeg_drvdata,
},
{},
};
MODULE_DEVICE_TABLE(of, samsung_jpeg_match);
static void *jpeg_get_drv_data(struct device *dev)
{
struct s5p_jpeg_variant *driver_data = NULL;
const struct of_device_id *match;
if (!IS_ENABLED(CONFIG_OF) || !dev->of_node)
return &s5p_jpeg_drvdata;
match = of_match_node(samsung_jpeg_match, dev->of_node);
if (match)
driver_data = (struct s5p_jpeg_variant *)match->data;
return driver_data;
}
static struct platform_driver s5p_jpeg_driver = {
.probe = s5p_jpeg_probe,
.remove_new = s5p_jpeg_remove,
.driver = {
.of_match_table = samsung_jpeg_match,
.name = S5P_JPEG_M2M_NAME,
.pm = &s5p_jpeg_pm_ops,
},
};
module_platform_driver(s5p_jpeg_driver);
MODULE_AUTHOR("Andrzej Pietrasiewicz <[email protected]>");
MODULE_AUTHOR("Jacek Anaszewski <[email protected]>");
MODULE_DESCRIPTION("Samsung JPEG codec driver");
MODULE_LICENSE("GPL");
| linux-master | drivers/media/platform/samsung/s5p-jpeg/jpeg-core.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (c) 2011 - 2012 Samsung Electronics Co., Ltd.
* http://www.samsung.com
*
* Samsung EXYNOS5 SoC series G-Scaler driver
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/errno.h>
#include <linux/bug.h>
#include <linux/interrupt.h>
#include <linux/workqueue.h>
#include <linux/device.h>
#include <linux/platform_device.h>
#include <linux/list.h>
#include <linux/io.h>
#include <linux/slab.h>
#include <linux/clk.h>
#include <media/v4l2-ioctl.h>
#include "gsc-core.h"
static int gsc_m2m_ctx_stop_req(struct gsc_ctx *ctx)
{
struct gsc_ctx *curr_ctx;
struct gsc_dev *gsc = ctx->gsc_dev;
int ret;
curr_ctx = v4l2_m2m_get_curr_priv(gsc->m2m.m2m_dev);
if (!gsc_m2m_pending(gsc) || (curr_ctx != ctx))
return 0;
gsc_ctx_state_lock_set(GSC_CTX_STOP_REQ, ctx);
ret = wait_event_timeout(gsc->irq_queue,
!gsc_ctx_state_is_set(GSC_CTX_STOP_REQ, ctx),
GSC_SHUTDOWN_TIMEOUT);
return ret == 0 ? -ETIMEDOUT : ret;
}
static void __gsc_m2m_job_abort(struct gsc_ctx *ctx)
{
int ret;
ret = gsc_m2m_ctx_stop_req(ctx);
if ((ret == -ETIMEDOUT) || (ctx->state & GSC_CTX_ABORT)) {
gsc_ctx_state_lock_clear(GSC_CTX_STOP_REQ | GSC_CTX_ABORT, ctx);
gsc_m2m_job_finish(ctx, VB2_BUF_STATE_ERROR);
}
}
static int gsc_m2m_start_streaming(struct vb2_queue *q, unsigned int count)
{
struct gsc_ctx *ctx = q->drv_priv;
return pm_runtime_resume_and_get(&ctx->gsc_dev->pdev->dev);
}
static void __gsc_m2m_cleanup_queue(struct gsc_ctx *ctx)
{
struct vb2_v4l2_buffer *src_vb, *dst_vb;
while (v4l2_m2m_num_src_bufs_ready(ctx->m2m_ctx) > 0) {
src_vb = v4l2_m2m_src_buf_remove(ctx->m2m_ctx);
v4l2_m2m_buf_done(src_vb, VB2_BUF_STATE_ERROR);
}
while (v4l2_m2m_num_dst_bufs_ready(ctx->m2m_ctx) > 0) {
dst_vb = v4l2_m2m_dst_buf_remove(ctx->m2m_ctx);
v4l2_m2m_buf_done(dst_vb, VB2_BUF_STATE_ERROR);
}
}
static void gsc_m2m_stop_streaming(struct vb2_queue *q)
{
struct gsc_ctx *ctx = q->drv_priv;
__gsc_m2m_job_abort(ctx);
__gsc_m2m_cleanup_queue(ctx);
pm_runtime_put(&ctx->gsc_dev->pdev->dev);
}
void gsc_m2m_job_finish(struct gsc_ctx *ctx, int vb_state)
{
struct vb2_v4l2_buffer *src_vb, *dst_vb;
if (!ctx || !ctx->m2m_ctx)
return;
src_vb = v4l2_m2m_src_buf_remove(ctx->m2m_ctx);
dst_vb = v4l2_m2m_dst_buf_remove(ctx->m2m_ctx);
if (src_vb && dst_vb) {
dst_vb->vb2_buf.timestamp = src_vb->vb2_buf.timestamp;
dst_vb->timecode = src_vb->timecode;
dst_vb->flags &= ~V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
dst_vb->flags |=
src_vb->flags
& V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
v4l2_m2m_buf_done(src_vb, vb_state);
v4l2_m2m_buf_done(dst_vb, vb_state);
v4l2_m2m_job_finish(ctx->gsc_dev->m2m.m2m_dev,
ctx->m2m_ctx);
}
}
static void gsc_m2m_job_abort(void *priv)
{
__gsc_m2m_job_abort((struct gsc_ctx *)priv);
}
static int gsc_get_bufs(struct gsc_ctx *ctx)
{
struct gsc_frame *s_frame, *d_frame;
struct vb2_v4l2_buffer *src_vb, *dst_vb;
int ret;
s_frame = &ctx->s_frame;
d_frame = &ctx->d_frame;
src_vb = v4l2_m2m_next_src_buf(ctx->m2m_ctx);
ret = gsc_prepare_addr(ctx, &src_vb->vb2_buf, s_frame, &s_frame->addr);
if (ret)
return ret;
dst_vb = v4l2_m2m_next_dst_buf(ctx->m2m_ctx);
ret = gsc_prepare_addr(ctx, &dst_vb->vb2_buf, d_frame, &d_frame->addr);
if (ret)
return ret;
dst_vb->vb2_buf.timestamp = src_vb->vb2_buf.timestamp;
return 0;
}
static void gsc_m2m_device_run(void *priv)
{
struct gsc_ctx *ctx = priv;
struct gsc_dev *gsc;
unsigned long flags;
int ret;
bool is_set = false;
if (WARN(!ctx, "null hardware context\n"))
return;
gsc = ctx->gsc_dev;
spin_lock_irqsave(&gsc->slock, flags);
set_bit(ST_M2M_PEND, &gsc->state);
/* Reconfigure hardware if the context has changed. */
if (gsc->m2m.ctx != ctx) {
pr_debug("gsc->m2m.ctx = 0x%p, current_ctx = 0x%p",
gsc->m2m.ctx, ctx);
ctx->state |= GSC_PARAMS;
gsc->m2m.ctx = ctx;
}
is_set = ctx->state & GSC_CTX_STOP_REQ;
if (is_set) {
ctx->state &= ~GSC_CTX_STOP_REQ;
ctx->state |= GSC_CTX_ABORT;
wake_up(&gsc->irq_queue);
goto put_device;
}
ret = gsc_get_bufs(ctx);
if (ret) {
pr_err("Wrong address");
goto put_device;
}
gsc_set_prefbuf(gsc, &ctx->s_frame);
gsc_hw_set_input_addr(gsc, &ctx->s_frame.addr, GSC_M2M_BUF_NUM);
gsc_hw_set_output_addr(gsc, &ctx->d_frame.addr, GSC_M2M_BUF_NUM);
if (ctx->state & GSC_PARAMS) {
gsc_hw_set_input_buf_masking(gsc, GSC_M2M_BUF_NUM, false);
gsc_hw_set_output_buf_masking(gsc, GSC_M2M_BUF_NUM, false);
gsc_hw_set_frm_done_irq_mask(gsc, false);
gsc_hw_set_gsc_irq_enable(gsc, true);
if (gsc_set_scaler_info(ctx)) {
pr_err("Scaler setup error");
goto put_device;
}
gsc_hw_set_input_path(ctx);
gsc_hw_set_in_size(ctx);
gsc_hw_set_in_image_format(ctx);
gsc_hw_set_output_path(ctx);
gsc_hw_set_out_size(ctx);
gsc_hw_set_out_image_format(ctx);
gsc_hw_set_prescaler(ctx);
gsc_hw_set_mainscaler(ctx);
gsc_hw_set_rotation(ctx);
gsc_hw_set_global_alpha(ctx);
}
/* update shadow registers */
gsc_hw_set_sfr_update(ctx);
ctx->state &= ~GSC_PARAMS;
gsc_hw_enable_control(gsc, true);
spin_unlock_irqrestore(&gsc->slock, flags);
return;
put_device:
ctx->state &= ~GSC_PARAMS;
spin_unlock_irqrestore(&gsc->slock, flags);
}
static int gsc_m2m_queue_setup(struct vb2_queue *vq,
unsigned int *num_buffers, unsigned int *num_planes,
unsigned int sizes[], struct device *alloc_devs[])
{
struct gsc_ctx *ctx = vb2_get_drv_priv(vq);
struct gsc_frame *frame;
int i;
frame = ctx_get_frame(ctx, vq->type);
if (IS_ERR(frame))
return PTR_ERR(frame);
if (!frame->fmt)
return -EINVAL;
*num_planes = frame->fmt->num_planes;
for (i = 0; i < frame->fmt->num_planes; i++)
sizes[i] = frame->payload[i];
return 0;
}
static int gsc_m2m_buf_prepare(struct vb2_buffer *vb)
{
struct gsc_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
struct gsc_frame *frame;
int i;
frame = ctx_get_frame(ctx, vb->vb2_queue->type);
if (IS_ERR(frame))
return PTR_ERR(frame);
if (V4L2_TYPE_IS_CAPTURE(vb->vb2_queue->type)) {
for (i = 0; i < frame->fmt->num_planes; i++)
vb2_set_plane_payload(vb, i, frame->payload[i]);
}
return 0;
}
static void gsc_m2m_buf_queue(struct vb2_buffer *vb)
{
struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
struct gsc_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
pr_debug("ctx: %p, ctx->state: 0x%x", ctx, ctx->state);
if (ctx->m2m_ctx)
v4l2_m2m_buf_queue(ctx->m2m_ctx, vbuf);
}
static const struct vb2_ops gsc_m2m_qops = {
.queue_setup = gsc_m2m_queue_setup,
.buf_prepare = gsc_m2m_buf_prepare,
.buf_queue = gsc_m2m_buf_queue,
.wait_prepare = vb2_ops_wait_prepare,
.wait_finish = vb2_ops_wait_finish,
.stop_streaming = gsc_m2m_stop_streaming,
.start_streaming = gsc_m2m_start_streaming,
};
static int gsc_m2m_querycap(struct file *file, void *fh,
struct v4l2_capability *cap)
{
strscpy(cap->driver, GSC_MODULE_NAME, sizeof(cap->driver));
strscpy(cap->card, GSC_MODULE_NAME " gscaler", sizeof(cap->card));
return 0;
}
static int gsc_m2m_enum_fmt(struct file *file, void *priv,
struct v4l2_fmtdesc *f)
{
return gsc_enum_fmt(f);
}
static int gsc_m2m_g_fmt_mplane(struct file *file, void *fh,
struct v4l2_format *f)
{
struct gsc_ctx *ctx = fh_to_ctx(fh);
return gsc_g_fmt_mplane(ctx, f);
}
static int gsc_m2m_try_fmt_mplane(struct file *file, void *fh,
struct v4l2_format *f)
{
struct gsc_ctx *ctx = fh_to_ctx(fh);
return gsc_try_fmt_mplane(ctx, f);
}
static int gsc_m2m_s_fmt_mplane(struct file *file, void *fh,
struct v4l2_format *f)
{
struct gsc_ctx *ctx = fh_to_ctx(fh);
struct vb2_queue *vq;
struct gsc_frame *frame;
struct v4l2_pix_format_mplane *pix;
int i, ret = 0;
ret = gsc_m2m_try_fmt_mplane(file, fh, f);
if (ret)
return ret;
vq = v4l2_m2m_get_vq(ctx->m2m_ctx, f->type);
if (vb2_is_streaming(vq)) {
pr_err("queue (%d) busy", f->type);
return -EBUSY;
}
if (V4L2_TYPE_IS_OUTPUT(f->type))
frame = &ctx->s_frame;
else
frame = &ctx->d_frame;
pix = &f->fmt.pix_mp;
frame->fmt = find_fmt(&pix->pixelformat, NULL, 0);
frame->colorspace = pix->colorspace;
if (!frame->fmt)
return -EINVAL;
for (i = 0; i < frame->fmt->num_planes; i++)
frame->payload[i] = pix->plane_fmt[i].sizeimage;
gsc_set_frame_size(frame, pix->width, pix->height);
if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
gsc_ctx_state_lock_set(GSC_PARAMS | GSC_DST_FMT, ctx);
else
gsc_ctx_state_lock_set(GSC_PARAMS | GSC_SRC_FMT, ctx);
pr_debug("f_w: %d, f_h: %d", frame->f_width, frame->f_height);
return 0;
}
static int gsc_m2m_reqbufs(struct file *file, void *fh,
struct v4l2_requestbuffers *reqbufs)
{
struct gsc_ctx *ctx = fh_to_ctx(fh);
struct gsc_dev *gsc = ctx->gsc_dev;
u32 max_cnt;
max_cnt = (reqbufs->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) ?
gsc->variant->in_buf_cnt : gsc->variant->out_buf_cnt;
if (reqbufs->count > max_cnt)
return -EINVAL;
return v4l2_m2m_reqbufs(file, ctx->m2m_ctx, reqbufs);
}
static int gsc_m2m_expbuf(struct file *file, void *fh,
struct v4l2_exportbuffer *eb)
{
struct gsc_ctx *ctx = fh_to_ctx(fh);
return v4l2_m2m_expbuf(file, ctx->m2m_ctx, eb);
}
static int gsc_m2m_querybuf(struct file *file, void *fh,
struct v4l2_buffer *buf)
{
struct gsc_ctx *ctx = fh_to_ctx(fh);
return v4l2_m2m_querybuf(file, ctx->m2m_ctx, buf);
}
static int gsc_m2m_qbuf(struct file *file, void *fh,
struct v4l2_buffer *buf)
{
struct gsc_ctx *ctx = fh_to_ctx(fh);
return v4l2_m2m_qbuf(file, ctx->m2m_ctx, buf);
}
static int gsc_m2m_dqbuf(struct file *file, void *fh,
struct v4l2_buffer *buf)
{
struct gsc_ctx *ctx = fh_to_ctx(fh);
return v4l2_m2m_dqbuf(file, ctx->m2m_ctx, buf);
}
static int gsc_m2m_streamon(struct file *file, void *fh,
enum v4l2_buf_type type)
{
struct gsc_ctx *ctx = fh_to_ctx(fh);
/* The source and target color format need to be set */
if (V4L2_TYPE_IS_OUTPUT(type)) {
if (!gsc_ctx_state_is_set(GSC_SRC_FMT, ctx))
return -EINVAL;
} else if (!gsc_ctx_state_is_set(GSC_DST_FMT, ctx)) {
return -EINVAL;
}
return v4l2_m2m_streamon(file, ctx->m2m_ctx, type);
}
static int gsc_m2m_streamoff(struct file *file, void *fh,
enum v4l2_buf_type type)
{
struct gsc_ctx *ctx = fh_to_ctx(fh);
return v4l2_m2m_streamoff(file, ctx->m2m_ctx, type);
}
/* Return 1 if rectangle a is enclosed in rectangle b, or 0 otherwise. */
static int is_rectangle_enclosed(struct v4l2_rect *a, struct v4l2_rect *b)
{
if (a->left < b->left || a->top < b->top)
return 0;
if (a->left + a->width > b->left + b->width)
return 0;
if (a->top + a->height > b->top + b->height)
return 0;
return 1;
}
static int gsc_m2m_g_selection(struct file *file, void *fh,
struct v4l2_selection *s)
{
struct gsc_frame *frame;
struct gsc_ctx *ctx = fh_to_ctx(fh);
if ((s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) &&
(s->type != V4L2_BUF_TYPE_VIDEO_OUTPUT))
return -EINVAL;
frame = ctx_get_frame(ctx, s->type);
if (IS_ERR(frame))
return PTR_ERR(frame);
switch (s->target) {
case V4L2_SEL_TGT_COMPOSE_DEFAULT:
case V4L2_SEL_TGT_COMPOSE_BOUNDS:
case V4L2_SEL_TGT_CROP_BOUNDS:
case V4L2_SEL_TGT_CROP_DEFAULT:
s->r.left = 0;
s->r.top = 0;
s->r.width = frame->f_width;
s->r.height = frame->f_height;
return 0;
case V4L2_SEL_TGT_COMPOSE:
case V4L2_SEL_TGT_CROP:
s->r.left = frame->crop.left;
s->r.top = frame->crop.top;
s->r.width = frame->crop.width;
s->r.height = frame->crop.height;
return 0;
}
return -EINVAL;
}
static int gsc_m2m_s_selection(struct file *file, void *fh,
struct v4l2_selection *s)
{
struct gsc_frame *frame;
struct gsc_ctx *ctx = fh_to_ctx(fh);
struct gsc_variant *variant = ctx->gsc_dev->variant;
struct v4l2_selection sel = *s;
int ret;
if ((s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) &&
(s->type != V4L2_BUF_TYPE_VIDEO_OUTPUT))
return -EINVAL;
ret = gsc_try_selection(ctx, &sel);
if (ret)
return ret;
if (s->flags & V4L2_SEL_FLAG_LE &&
!is_rectangle_enclosed(&sel.r, &s->r))
return -ERANGE;
if (s->flags & V4L2_SEL_FLAG_GE &&
!is_rectangle_enclosed(&s->r, &sel.r))
return -ERANGE;
s->r = sel.r;
switch (s->target) {
case V4L2_SEL_TGT_COMPOSE_BOUNDS:
case V4L2_SEL_TGT_COMPOSE_DEFAULT:
case V4L2_SEL_TGT_COMPOSE:
frame = &ctx->s_frame;
break;
case V4L2_SEL_TGT_CROP_BOUNDS:
case V4L2_SEL_TGT_CROP:
case V4L2_SEL_TGT_CROP_DEFAULT:
frame = &ctx->d_frame;
break;
default:
return -EINVAL;
}
/* Check to see if scaling ratio is within supported range */
if (gsc_ctx_state_is_set(GSC_DST_FMT | GSC_SRC_FMT, ctx)) {
if (s->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
ret = gsc_check_scaler_ratio(variant, sel.r.width,
sel.r.height, ctx->d_frame.crop.width,
ctx->d_frame.crop.height,
ctx->gsc_ctrls.rotate->val, ctx->out_path);
} else {
ret = gsc_check_scaler_ratio(variant,
ctx->s_frame.crop.width,
ctx->s_frame.crop.height, sel.r.width,
sel.r.height, ctx->gsc_ctrls.rotate->val,
ctx->out_path);
}
if (ret) {
pr_err("Out of scaler range");
return -EINVAL;
}
}
frame->crop = sel.r;
gsc_ctx_state_lock_set(GSC_PARAMS, ctx);
return 0;
}
static const struct v4l2_ioctl_ops gsc_m2m_ioctl_ops = {
.vidioc_querycap = gsc_m2m_querycap,
.vidioc_enum_fmt_vid_cap = gsc_m2m_enum_fmt,
.vidioc_enum_fmt_vid_out = gsc_m2m_enum_fmt,
.vidioc_g_fmt_vid_cap_mplane = gsc_m2m_g_fmt_mplane,
.vidioc_g_fmt_vid_out_mplane = gsc_m2m_g_fmt_mplane,
.vidioc_try_fmt_vid_cap_mplane = gsc_m2m_try_fmt_mplane,
.vidioc_try_fmt_vid_out_mplane = gsc_m2m_try_fmt_mplane,
.vidioc_s_fmt_vid_cap_mplane = gsc_m2m_s_fmt_mplane,
.vidioc_s_fmt_vid_out_mplane = gsc_m2m_s_fmt_mplane,
.vidioc_reqbufs = gsc_m2m_reqbufs,
.vidioc_expbuf = gsc_m2m_expbuf,
.vidioc_querybuf = gsc_m2m_querybuf,
.vidioc_qbuf = gsc_m2m_qbuf,
.vidioc_dqbuf = gsc_m2m_dqbuf,
.vidioc_streamon = gsc_m2m_streamon,
.vidioc_streamoff = gsc_m2m_streamoff,
.vidioc_g_selection = gsc_m2m_g_selection,
.vidioc_s_selection = gsc_m2m_s_selection
};
static int queue_init(void *priv, struct vb2_queue *src_vq,
struct vb2_queue *dst_vq)
{
struct gsc_ctx *ctx = priv;
int ret;
memset(src_vq, 0, sizeof(*src_vq));
src_vq->type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
src_vq->io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF;
src_vq->drv_priv = ctx;
src_vq->ops = &gsc_m2m_qops;
src_vq->mem_ops = &vb2_dma_contig_memops;
src_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
src_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
src_vq->lock = &ctx->gsc_dev->lock;
src_vq->dev = &ctx->gsc_dev->pdev->dev;
ret = vb2_queue_init(src_vq);
if (ret)
return ret;
memset(dst_vq, 0, sizeof(*dst_vq));
dst_vq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
dst_vq->io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF;
dst_vq->drv_priv = ctx;
dst_vq->ops = &gsc_m2m_qops;
dst_vq->mem_ops = &vb2_dma_contig_memops;
dst_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
dst_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
dst_vq->lock = &ctx->gsc_dev->lock;
dst_vq->dev = &ctx->gsc_dev->pdev->dev;
return vb2_queue_init(dst_vq);
}
static int gsc_m2m_open(struct file *file)
{
struct gsc_dev *gsc = video_drvdata(file);
struct gsc_ctx *ctx = NULL;
int ret;
pr_debug("pid: %d, state: 0x%lx", task_pid_nr(current), gsc->state);
if (mutex_lock_interruptible(&gsc->lock))
return -ERESTARTSYS;
ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
if (!ctx) {
ret = -ENOMEM;
goto unlock;
}
v4l2_fh_init(&ctx->fh, gsc->m2m.vfd);
ret = gsc_ctrls_create(ctx);
if (ret)
goto error_fh;
/* Use separate control handler per file handle */
ctx->fh.ctrl_handler = &ctx->ctrl_handler;
file->private_data = &ctx->fh;
v4l2_fh_add(&ctx->fh);
ctx->gsc_dev = gsc;
/* Default color format */
ctx->s_frame.fmt = get_format(0);
ctx->d_frame.fmt = get_format(0);
/* Setup the device context for mem2mem mode. */
ctx->state = GSC_CTX_M2M;
ctx->flags = 0;
ctx->in_path = GSC_DMA;
ctx->out_path = GSC_DMA;
ctx->m2m_ctx = v4l2_m2m_ctx_init(gsc->m2m.m2m_dev, ctx, queue_init);
if (IS_ERR(ctx->m2m_ctx)) {
pr_err("Failed to initialize m2m context");
ret = PTR_ERR(ctx->m2m_ctx);
goto error_ctrls;
}
if (gsc->m2m.refcnt++ == 0)
set_bit(ST_M2M_OPEN, &gsc->state);
pr_debug("gsc m2m driver is opened, ctx(0x%p)", ctx);
mutex_unlock(&gsc->lock);
return 0;
error_ctrls:
gsc_ctrls_delete(ctx);
v4l2_fh_del(&ctx->fh);
error_fh:
v4l2_fh_exit(&ctx->fh);
kfree(ctx);
unlock:
mutex_unlock(&gsc->lock);
return ret;
}
static int gsc_m2m_release(struct file *file)
{
struct gsc_ctx *ctx = fh_to_ctx(file->private_data);
struct gsc_dev *gsc = ctx->gsc_dev;
pr_debug("pid: %d, state: 0x%lx, refcnt= %d",
task_pid_nr(current), gsc->state, gsc->m2m.refcnt);
mutex_lock(&gsc->lock);
v4l2_m2m_ctx_release(ctx->m2m_ctx);
gsc_ctrls_delete(ctx);
v4l2_fh_del(&ctx->fh);
v4l2_fh_exit(&ctx->fh);
if (--gsc->m2m.refcnt <= 0)
clear_bit(ST_M2M_OPEN, &gsc->state);
kfree(ctx);
mutex_unlock(&gsc->lock);
return 0;
}
static __poll_t gsc_m2m_poll(struct file *file,
struct poll_table_struct *wait)
{
struct gsc_ctx *ctx = fh_to_ctx(file->private_data);
struct gsc_dev *gsc = ctx->gsc_dev;
__poll_t ret;
if (mutex_lock_interruptible(&gsc->lock))
return EPOLLERR;
ret = v4l2_m2m_poll(file, ctx->m2m_ctx, wait);
mutex_unlock(&gsc->lock);
return ret;
}
static int gsc_m2m_mmap(struct file *file, struct vm_area_struct *vma)
{
struct gsc_ctx *ctx = fh_to_ctx(file->private_data);
struct gsc_dev *gsc = ctx->gsc_dev;
int ret;
if (mutex_lock_interruptible(&gsc->lock))
return -ERESTARTSYS;
ret = v4l2_m2m_mmap(file, ctx->m2m_ctx, vma);
mutex_unlock(&gsc->lock);
return ret;
}
static const struct v4l2_file_operations gsc_m2m_fops = {
.owner = THIS_MODULE,
.open = gsc_m2m_open,
.release = gsc_m2m_release,
.poll = gsc_m2m_poll,
.unlocked_ioctl = video_ioctl2,
.mmap = gsc_m2m_mmap,
};
static const struct v4l2_m2m_ops gsc_m2m_ops = {
.device_run = gsc_m2m_device_run,
.job_abort = gsc_m2m_job_abort,
};
int gsc_register_m2m_device(struct gsc_dev *gsc)
{
struct platform_device *pdev;
int ret;
if (!gsc)
return -ENODEV;
pdev = gsc->pdev;
gsc->vdev.fops = &gsc_m2m_fops;
gsc->vdev.ioctl_ops = &gsc_m2m_ioctl_ops;
gsc->vdev.release = video_device_release_empty;
gsc->vdev.lock = &gsc->lock;
gsc->vdev.vfl_dir = VFL_DIR_M2M;
gsc->vdev.v4l2_dev = &gsc->v4l2_dev;
gsc->vdev.device_caps = V4L2_CAP_STREAMING |
V4L2_CAP_VIDEO_M2M_MPLANE;
snprintf(gsc->vdev.name, sizeof(gsc->vdev.name), "%s.%d:m2m",
GSC_MODULE_NAME, gsc->id);
video_set_drvdata(&gsc->vdev, gsc);
gsc->m2m.vfd = &gsc->vdev;
gsc->m2m.m2m_dev = v4l2_m2m_init(&gsc_m2m_ops);
if (IS_ERR(gsc->m2m.m2m_dev)) {
dev_err(&pdev->dev, "failed to initialize v4l2-m2m device\n");
return PTR_ERR(gsc->m2m.m2m_dev);
}
ret = video_register_device(&gsc->vdev, VFL_TYPE_VIDEO, -1);
if (ret) {
dev_err(&pdev->dev,
"%s(): failed to register video device\n", __func__);
goto err_m2m_release;
}
pr_debug("gsc m2m driver registered as /dev/video%d", gsc->vdev.num);
return 0;
err_m2m_release:
v4l2_m2m_release(gsc->m2m.m2m_dev);
return ret;
}
void gsc_unregister_m2m_device(struct gsc_dev *gsc)
{
if (gsc) {
v4l2_m2m_release(gsc->m2m.m2m_dev);
video_unregister_device(&gsc->vdev);
}
}
| linux-master | drivers/media/platform/samsung/exynos-gsc/gsc-m2m.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (c) 2011 - 2012 Samsung Electronics Co., Ltd.
* http://www.samsung.com
*
* Samsung EXYNOS5 SoC series G-Scaler driver
*/
#include <linux/io.h>
#include <linux/delay.h>
#include "gsc-core.h"
void gsc_hw_set_sw_reset(struct gsc_dev *dev)
{
writel(GSC_SW_RESET_SRESET, dev->regs + GSC_SW_RESET);
}
int gsc_wait_reset(struct gsc_dev *dev)
{
unsigned long end = jiffies + msecs_to_jiffies(50);
u32 cfg;
while (time_before(jiffies, end)) {
cfg = readl(dev->regs + GSC_SW_RESET);
if (!cfg)
return 0;
usleep_range(10, 20);
}
return -EBUSY;
}
void gsc_hw_set_frm_done_irq_mask(struct gsc_dev *dev, bool mask)
{
u32 cfg;
cfg = readl(dev->regs + GSC_IRQ);
if (mask)
cfg |= GSC_IRQ_FRMDONE_MASK;
else
cfg &= ~GSC_IRQ_FRMDONE_MASK;
writel(cfg, dev->regs + GSC_IRQ);
}
void gsc_hw_set_gsc_irq_enable(struct gsc_dev *dev, bool mask)
{
u32 cfg;
cfg = readl(dev->regs + GSC_IRQ);
if (mask)
cfg |= GSC_IRQ_ENABLE;
else
cfg &= ~GSC_IRQ_ENABLE;
writel(cfg, dev->regs + GSC_IRQ);
}
void gsc_hw_set_input_buf_masking(struct gsc_dev *dev, u32 shift,
bool enable)
{
u32 cfg = readl(dev->regs + GSC_IN_BASE_ADDR_Y_MASK);
u32 mask = 1 << shift;
cfg &= ~mask;
cfg |= enable << shift;
writel(cfg, dev->regs + GSC_IN_BASE_ADDR_Y_MASK);
writel(cfg, dev->regs + GSC_IN_BASE_ADDR_CB_MASK);
writel(cfg, dev->regs + GSC_IN_BASE_ADDR_CR_MASK);
}
void gsc_hw_set_output_buf_masking(struct gsc_dev *dev, u32 shift,
bool enable)
{
u32 cfg = readl(dev->regs + GSC_OUT_BASE_ADDR_Y_MASK);
u32 mask = 1 << shift;
cfg &= ~mask;
cfg |= enable << shift;
writel(cfg, dev->regs + GSC_OUT_BASE_ADDR_Y_MASK);
writel(cfg, dev->regs + GSC_OUT_BASE_ADDR_CB_MASK);
writel(cfg, dev->regs + GSC_OUT_BASE_ADDR_CR_MASK);
}
void gsc_hw_set_input_addr(struct gsc_dev *dev, struct gsc_addr *addr,
int index)
{
pr_debug("src_buf[%d]: %pad, cb: %pad, cr: %pad", index,
&addr->y, &addr->cb, &addr->cr);
writel(addr->y, dev->regs + GSC_IN_BASE_ADDR_Y(index));
writel(addr->cb, dev->regs + GSC_IN_BASE_ADDR_CB(index));
writel(addr->cr, dev->regs + GSC_IN_BASE_ADDR_CR(index));
}
void gsc_hw_set_output_addr(struct gsc_dev *dev,
struct gsc_addr *addr, int index)
{
pr_debug("dst_buf[%d]: %pad, cb: %pad, cr: %pad",
index, &addr->y, &addr->cb, &addr->cr);
writel(addr->y, dev->regs + GSC_OUT_BASE_ADDR_Y(index));
writel(addr->cb, dev->regs + GSC_OUT_BASE_ADDR_CB(index));
writel(addr->cr, dev->regs + GSC_OUT_BASE_ADDR_CR(index));
}
void gsc_hw_set_input_path(struct gsc_ctx *ctx)
{
struct gsc_dev *dev = ctx->gsc_dev;
u32 cfg = readl(dev->regs + GSC_IN_CON);
cfg &= ~(GSC_IN_PATH_MASK | GSC_IN_LOCAL_SEL_MASK);
if (ctx->in_path == GSC_DMA)
cfg |= GSC_IN_PATH_MEMORY;
writel(cfg, dev->regs + GSC_IN_CON);
}
void gsc_hw_set_in_size(struct gsc_ctx *ctx)
{
struct gsc_dev *dev = ctx->gsc_dev;
struct gsc_frame *frame = &ctx->s_frame;
u32 cfg;
/* Set input pixel offset */
cfg = GSC_SRCIMG_OFFSET_X(frame->crop.left);
cfg |= GSC_SRCIMG_OFFSET_Y(frame->crop.top);
writel(cfg, dev->regs + GSC_SRCIMG_OFFSET);
/* Set input original size */
cfg = GSC_SRCIMG_WIDTH(frame->f_width);
cfg |= GSC_SRCIMG_HEIGHT(frame->f_height);
writel(cfg, dev->regs + GSC_SRCIMG_SIZE);
/* Set input cropped size */
cfg = GSC_CROPPED_WIDTH(frame->crop.width);
cfg |= GSC_CROPPED_HEIGHT(frame->crop.height);
writel(cfg, dev->regs + GSC_CROPPED_SIZE);
}
void gsc_hw_set_in_image_rgb(struct gsc_ctx *ctx)
{
struct gsc_dev *dev = ctx->gsc_dev;
struct gsc_frame *frame = &ctx->s_frame;
u32 cfg;
cfg = readl(dev->regs + GSC_IN_CON);
if (frame->colorspace == V4L2_COLORSPACE_REC709)
cfg |= GSC_IN_RGB_HD_WIDE;
else
cfg |= GSC_IN_RGB_SD_WIDE;
if (frame->fmt->pixelformat == V4L2_PIX_FMT_RGB565X)
cfg |= GSC_IN_RGB565;
else if (frame->fmt->pixelformat == V4L2_PIX_FMT_RGB32)
cfg |= GSC_IN_XRGB8888;
writel(cfg, dev->regs + GSC_IN_CON);
}
void gsc_hw_set_in_image_format(struct gsc_ctx *ctx)
{
struct gsc_dev *dev = ctx->gsc_dev;
struct gsc_frame *frame = &ctx->s_frame;
u32 i, depth = 0;
u32 cfg;
cfg = readl(dev->regs + GSC_IN_CON);
cfg &= ~(GSC_IN_RGB_TYPE_MASK | GSC_IN_YUV422_1P_ORDER_MASK |
GSC_IN_CHROMA_ORDER_MASK | GSC_IN_FORMAT_MASK |
GSC_IN_TILE_TYPE_MASK | GSC_IN_TILE_MODE);
writel(cfg, dev->regs + GSC_IN_CON);
if (is_rgb(frame->fmt->color)) {
gsc_hw_set_in_image_rgb(ctx);
return;
}
for (i = 0; i < frame->fmt->num_planes; i++)
depth += frame->fmt->depth[i];
switch (frame->fmt->num_comp) {
case 1:
cfg |= GSC_IN_YUV422_1P;
if (frame->fmt->yorder == GSC_LSB_Y)
cfg |= GSC_IN_YUV422_1P_ORDER_LSB_Y;
else
cfg |= GSC_IN_YUV422_1P_OEDER_LSB_C;
if (frame->fmt->corder == GSC_CBCR)
cfg |= GSC_IN_CHROMA_ORDER_CBCR;
else
cfg |= GSC_IN_CHROMA_ORDER_CRCB;
break;
case 2:
if (depth == 12)
cfg |= GSC_IN_YUV420_2P;
else
cfg |= GSC_IN_YUV422_2P;
if (frame->fmt->corder == GSC_CBCR)
cfg |= GSC_IN_CHROMA_ORDER_CBCR;
else
cfg |= GSC_IN_CHROMA_ORDER_CRCB;
break;
case 3:
if (depth == 12)
cfg |= GSC_IN_YUV420_3P;
else
cfg |= GSC_IN_YUV422_3P;
break;
}
if (is_tiled(frame->fmt))
cfg |= GSC_IN_TILE_C_16x8 | GSC_IN_TILE_MODE;
writel(cfg, dev->regs + GSC_IN_CON);
}
void gsc_hw_set_output_path(struct gsc_ctx *ctx)
{
struct gsc_dev *dev = ctx->gsc_dev;
u32 cfg = readl(dev->regs + GSC_OUT_CON);
cfg &= ~GSC_OUT_PATH_MASK;
if (ctx->out_path == GSC_DMA)
cfg |= GSC_OUT_PATH_MEMORY;
else
cfg |= GSC_OUT_PATH_LOCAL;
writel(cfg, dev->regs + GSC_OUT_CON);
}
void gsc_hw_set_out_size(struct gsc_ctx *ctx)
{
struct gsc_dev *dev = ctx->gsc_dev;
struct gsc_frame *frame = &ctx->d_frame;
u32 cfg;
/* Set output original size */
if (ctx->out_path == GSC_DMA) {
cfg = GSC_DSTIMG_OFFSET_X(frame->crop.left);
cfg |= GSC_DSTIMG_OFFSET_Y(frame->crop.top);
writel(cfg, dev->regs + GSC_DSTIMG_OFFSET);
cfg = GSC_DSTIMG_WIDTH(frame->f_width);
cfg |= GSC_DSTIMG_HEIGHT(frame->f_height);
writel(cfg, dev->regs + GSC_DSTIMG_SIZE);
}
/* Set output scaled size */
if (ctx->gsc_ctrls.rotate->val == 90 ||
ctx->gsc_ctrls.rotate->val == 270) {
cfg = GSC_SCALED_WIDTH(frame->crop.height);
cfg |= GSC_SCALED_HEIGHT(frame->crop.width);
} else {
cfg = GSC_SCALED_WIDTH(frame->crop.width);
cfg |= GSC_SCALED_HEIGHT(frame->crop.height);
}
writel(cfg, dev->regs + GSC_SCALED_SIZE);
}
void gsc_hw_set_out_image_rgb(struct gsc_ctx *ctx)
{
struct gsc_dev *dev = ctx->gsc_dev;
struct gsc_frame *frame = &ctx->d_frame;
u32 cfg;
cfg = readl(dev->regs + GSC_OUT_CON);
if (frame->colorspace == V4L2_COLORSPACE_REC709)
cfg |= GSC_OUT_RGB_HD_WIDE;
else
cfg |= GSC_OUT_RGB_SD_WIDE;
if (frame->fmt->pixelformat == V4L2_PIX_FMT_RGB565X)
cfg |= GSC_OUT_RGB565;
else if (frame->fmt->pixelformat == V4L2_PIX_FMT_RGB32)
cfg |= GSC_OUT_XRGB8888;
writel(cfg, dev->regs + GSC_OUT_CON);
}
void gsc_hw_set_out_image_format(struct gsc_ctx *ctx)
{
struct gsc_dev *dev = ctx->gsc_dev;
struct gsc_frame *frame = &ctx->d_frame;
u32 i, depth = 0;
u32 cfg;
cfg = readl(dev->regs + GSC_OUT_CON);
cfg &= ~(GSC_OUT_RGB_TYPE_MASK | GSC_OUT_YUV422_1P_ORDER_MASK |
GSC_OUT_CHROMA_ORDER_MASK | GSC_OUT_FORMAT_MASK |
GSC_OUT_TILE_TYPE_MASK | GSC_OUT_TILE_MODE);
writel(cfg, dev->regs + GSC_OUT_CON);
if (is_rgb(frame->fmt->color)) {
gsc_hw_set_out_image_rgb(ctx);
return;
}
if (ctx->out_path != GSC_DMA) {
cfg |= GSC_OUT_YUV444;
goto end_set;
}
for (i = 0; i < frame->fmt->num_planes; i++)
depth += frame->fmt->depth[i];
switch (frame->fmt->num_comp) {
case 1:
cfg |= GSC_OUT_YUV422_1P;
if (frame->fmt->yorder == GSC_LSB_Y)
cfg |= GSC_OUT_YUV422_1P_ORDER_LSB_Y;
else
cfg |= GSC_OUT_YUV422_1P_OEDER_LSB_C;
if (frame->fmt->corder == GSC_CBCR)
cfg |= GSC_OUT_CHROMA_ORDER_CBCR;
else
cfg |= GSC_OUT_CHROMA_ORDER_CRCB;
break;
case 2:
if (depth == 12)
cfg |= GSC_OUT_YUV420_2P;
else
cfg |= GSC_OUT_YUV422_2P;
if (frame->fmt->corder == GSC_CBCR)
cfg |= GSC_OUT_CHROMA_ORDER_CBCR;
else
cfg |= GSC_OUT_CHROMA_ORDER_CRCB;
break;
case 3:
cfg |= GSC_OUT_YUV420_3P;
break;
}
if (is_tiled(frame->fmt))
cfg |= GSC_OUT_TILE_C_16x8 | GSC_OUT_TILE_MODE;
end_set:
writel(cfg, dev->regs + GSC_OUT_CON);
}
void gsc_hw_set_prescaler(struct gsc_ctx *ctx)
{
struct gsc_dev *dev = ctx->gsc_dev;
struct gsc_scaler *sc = &ctx->scaler;
u32 cfg;
cfg = GSC_PRESC_SHFACTOR(sc->pre_shfactor);
cfg |= GSC_PRESC_H_RATIO(sc->pre_hratio);
cfg |= GSC_PRESC_V_RATIO(sc->pre_vratio);
writel(cfg, dev->regs + GSC_PRE_SCALE_RATIO);
}
void gsc_hw_set_mainscaler(struct gsc_ctx *ctx)
{
struct gsc_dev *dev = ctx->gsc_dev;
struct gsc_scaler *sc = &ctx->scaler;
u32 cfg;
cfg = GSC_MAIN_H_RATIO_VALUE(sc->main_hratio);
writel(cfg, dev->regs + GSC_MAIN_H_RATIO);
cfg = GSC_MAIN_V_RATIO_VALUE(sc->main_vratio);
writel(cfg, dev->regs + GSC_MAIN_V_RATIO);
}
void gsc_hw_set_rotation(struct gsc_ctx *ctx)
{
struct gsc_dev *dev = ctx->gsc_dev;
u32 cfg;
cfg = readl(dev->regs + GSC_IN_CON);
cfg &= ~GSC_IN_ROT_MASK;
switch (ctx->gsc_ctrls.rotate->val) {
case 270:
cfg |= GSC_IN_ROT_270;
break;
case 180:
cfg |= GSC_IN_ROT_180;
break;
case 90:
if (ctx->gsc_ctrls.hflip->val)
cfg |= GSC_IN_ROT_90_XFLIP;
else if (ctx->gsc_ctrls.vflip->val)
cfg |= GSC_IN_ROT_90_YFLIP;
else
cfg |= GSC_IN_ROT_90;
break;
case 0:
if (ctx->gsc_ctrls.hflip->val)
cfg |= GSC_IN_ROT_XFLIP;
else if (ctx->gsc_ctrls.vflip->val)
cfg |= GSC_IN_ROT_YFLIP;
}
writel(cfg, dev->regs + GSC_IN_CON);
}
void gsc_hw_set_global_alpha(struct gsc_ctx *ctx)
{
struct gsc_dev *dev = ctx->gsc_dev;
struct gsc_frame *frame = &ctx->d_frame;
u32 cfg;
if (!is_rgb(frame->fmt->color)) {
pr_debug("Not a RGB format");
return;
}
cfg = readl(dev->regs + GSC_OUT_CON);
cfg &= ~GSC_OUT_GLOBAL_ALPHA_MASK;
cfg |= GSC_OUT_GLOBAL_ALPHA(ctx->gsc_ctrls.global_alpha->val);
writel(cfg, dev->regs + GSC_OUT_CON);
}
void gsc_hw_set_sfr_update(struct gsc_ctx *ctx)
{
struct gsc_dev *dev = ctx->gsc_dev;
u32 cfg;
cfg = readl(dev->regs + GSC_ENABLE);
cfg |= GSC_ENABLE_SFR_UPDATE;
writel(cfg, dev->regs + GSC_ENABLE);
}
| linux-master | drivers/media/platform/samsung/exynos-gsc/gsc-regs.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (c) 2011 - 2012 Samsung Electronics Co., Ltd.
* http://www.samsung.com
*
* Samsung EXYNOS5 SoC series G-Scaler driver
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/errno.h>
#include <linux/bug.h>
#include <linux/interrupt.h>
#include <linux/workqueue.h>
#include <linux/device.h>
#include <linux/platform_device.h>
#include <linux/list.h>
#include <linux/io.h>
#include <linux/slab.h>
#include <linux/clk.h>
#include <linux/of.h>
#include <media/v4l2-ioctl.h>
#include "gsc-core.h"
static const struct gsc_fmt gsc_formats[] = {
{
.pixelformat = V4L2_PIX_FMT_RGB565X,
.depth = { 16 },
.color = GSC_RGB,
.num_planes = 1,
.num_comp = 1,
}, {
.pixelformat = V4L2_PIX_FMT_BGR32,
.depth = { 32 },
.color = GSC_RGB,
.num_planes = 1,
.num_comp = 1,
}, {
.pixelformat = V4L2_PIX_FMT_YUYV,
.depth = { 16 },
.color = GSC_YUV422,
.yorder = GSC_LSB_Y,
.corder = GSC_CBCR,
.num_planes = 1,
.num_comp = 1,
.mbus_code = MEDIA_BUS_FMT_YUYV8_2X8,
}, {
.pixelformat = V4L2_PIX_FMT_UYVY,
.depth = { 16 },
.color = GSC_YUV422,
.yorder = GSC_LSB_C,
.corder = GSC_CBCR,
.num_planes = 1,
.num_comp = 1,
.mbus_code = MEDIA_BUS_FMT_UYVY8_2X8,
}, {
.pixelformat = V4L2_PIX_FMT_VYUY,
.depth = { 16 },
.color = GSC_YUV422,
.yorder = GSC_LSB_C,
.corder = GSC_CRCB,
.num_planes = 1,
.num_comp = 1,
.mbus_code = MEDIA_BUS_FMT_VYUY8_2X8,
}, {
.pixelformat = V4L2_PIX_FMT_YVYU,
.depth = { 16 },
.color = GSC_YUV422,
.yorder = GSC_LSB_Y,
.corder = GSC_CRCB,
.num_planes = 1,
.num_comp = 1,
.mbus_code = MEDIA_BUS_FMT_YVYU8_2X8,
}, {
.pixelformat = V4L2_PIX_FMT_YUV32,
.depth = { 32 },
.color = GSC_YUV444,
.yorder = GSC_LSB_Y,
.corder = GSC_CBCR,
.num_planes = 1,
.num_comp = 1,
}, {
.pixelformat = V4L2_PIX_FMT_YUV422P,
.depth = { 16 },
.color = GSC_YUV422,
.yorder = GSC_LSB_Y,
.corder = GSC_CBCR,
.num_planes = 1,
.num_comp = 3,
}, {
.pixelformat = V4L2_PIX_FMT_NV16,
.depth = { 16 },
.color = GSC_YUV422,
.yorder = GSC_LSB_Y,
.corder = GSC_CBCR,
.num_planes = 1,
.num_comp = 2,
}, {
.pixelformat = V4L2_PIX_FMT_NV16M,
.depth = { 8, 8 },
.color = GSC_YUV422,
.yorder = GSC_LSB_Y,
.corder = GSC_CBCR,
.num_planes = 2,
.num_comp = 2,
}, {
.pixelformat = V4L2_PIX_FMT_NV61,
.depth = { 16 },
.color = GSC_YUV422,
.yorder = GSC_LSB_Y,
.corder = GSC_CRCB,
.num_planes = 1,
.num_comp = 2,
}, {
.pixelformat = V4L2_PIX_FMT_NV61M,
.depth = { 8, 8 },
.color = GSC_YUV422,
.yorder = GSC_LSB_Y,
.corder = GSC_CRCB,
.num_planes = 2,
.num_comp = 2,
}, {
.pixelformat = V4L2_PIX_FMT_YUV420,
.depth = { 12 },
.color = GSC_YUV420,
.yorder = GSC_LSB_Y,
.corder = GSC_CBCR,
.num_planes = 1,
.num_comp = 3,
}, {
.pixelformat = V4L2_PIX_FMT_YVU420,
.depth = { 12 },
.color = GSC_YUV420,
.yorder = GSC_LSB_Y,
.corder = GSC_CRCB,
.num_planes = 1,
.num_comp = 3,
}, {
.pixelformat = V4L2_PIX_FMT_NV12,
.depth = { 12 },
.color = GSC_YUV420,
.yorder = GSC_LSB_Y,
.corder = GSC_CBCR,
.num_planes = 1,
.num_comp = 2,
}, {
.pixelformat = V4L2_PIX_FMT_NV21,
.depth = { 12 },
.color = GSC_YUV420,
.yorder = GSC_LSB_Y,
.corder = GSC_CRCB,
.num_planes = 1,
.num_comp = 2,
}, {
.pixelformat = V4L2_PIX_FMT_NV21M,
.depth = { 8, 4 },
.color = GSC_YUV420,
.yorder = GSC_LSB_Y,
.corder = GSC_CRCB,
.num_planes = 2,
.num_comp = 2,
}, {
.pixelformat = V4L2_PIX_FMT_NV12M,
.depth = { 8, 4 },
.color = GSC_YUV420,
.yorder = GSC_LSB_Y,
.corder = GSC_CBCR,
.num_planes = 2,
.num_comp = 2,
}, {
.pixelformat = V4L2_PIX_FMT_YUV420M,
.depth = { 8, 2, 2 },
.color = GSC_YUV420,
.yorder = GSC_LSB_Y,
.corder = GSC_CBCR,
.num_planes = 3,
.num_comp = 3,
}, {
.pixelformat = V4L2_PIX_FMT_YVU420M,
.depth = { 8, 2, 2 },
.color = GSC_YUV420,
.yorder = GSC_LSB_Y,
.corder = GSC_CRCB,
.num_planes = 3,
.num_comp = 3,
}, {
.pixelformat = V4L2_PIX_FMT_NV12MT_16X16,
.depth = { 8, 4 },
.color = GSC_YUV420,
.yorder = GSC_LSB_Y,
.corder = GSC_CBCR,
.num_planes = 2,
.num_comp = 2,
}
};
const struct gsc_fmt *get_format(int index)
{
if (index >= ARRAY_SIZE(gsc_formats))
return NULL;
return (struct gsc_fmt *)&gsc_formats[index];
}
const struct gsc_fmt *find_fmt(u32 *pixelformat, u32 *mbus_code, u32 index)
{
const struct gsc_fmt *fmt, *def_fmt = NULL;
unsigned int i;
if (index >= ARRAY_SIZE(gsc_formats))
return NULL;
for (i = 0; i < ARRAY_SIZE(gsc_formats); ++i) {
fmt = get_format(i);
if (pixelformat && fmt->pixelformat == *pixelformat)
return fmt;
if (mbus_code && fmt->mbus_code == *mbus_code)
return fmt;
if (index == i)
def_fmt = fmt;
}
return def_fmt;
}
void gsc_set_frame_size(struct gsc_frame *frame, int width, int height)
{
frame->f_width = width;
frame->f_height = height;
frame->crop.width = width;
frame->crop.height = height;
frame->crop.left = 0;
frame->crop.top = 0;
}
int gsc_cal_prescaler_ratio(struct gsc_variant *var, u32 src, u32 dst,
u32 *ratio)
{
if ((dst > src) || (dst >= src / var->poly_sc_down_max)) {
*ratio = 1;
return 0;
}
if ((src / var->poly_sc_down_max / var->pre_sc_down_max) > dst) {
pr_err("Exceeded maximum downscaling ratio (1/16))");
return -EINVAL;
}
*ratio = (dst > (src / 8)) ? 2 : 4;
return 0;
}
void gsc_get_prescaler_shfactor(u32 hratio, u32 vratio, u32 *sh)
{
if (hratio == 4 && vratio == 4)
*sh = 4;
else if ((hratio == 4 && vratio == 2) ||
(hratio == 2 && vratio == 4))
*sh = 3;
else if ((hratio == 4 && vratio == 1) ||
(hratio == 1 && vratio == 4) ||
(hratio == 2 && vratio == 2))
*sh = 2;
else if (hratio == 1 && vratio == 1)
*sh = 0;
else
*sh = 1;
}
void gsc_check_src_scale_info(struct gsc_variant *var,
struct gsc_frame *s_frame, u32 *wratio,
u32 tx, u32 ty, u32 *hratio)
{
int remainder = 0, walign, halign;
if (is_yuv420(s_frame->fmt->color)) {
walign = GSC_SC_ALIGN_4;
halign = GSC_SC_ALIGN_4;
} else if (is_yuv422(s_frame->fmt->color)) {
walign = GSC_SC_ALIGN_4;
halign = GSC_SC_ALIGN_2;
} else {
walign = GSC_SC_ALIGN_2;
halign = GSC_SC_ALIGN_2;
}
remainder = s_frame->crop.width % (*wratio * walign);
if (remainder) {
s_frame->crop.width -= remainder;
gsc_cal_prescaler_ratio(var, s_frame->crop.width, tx, wratio);
pr_info("cropped src width size is recalculated from %d to %d",
s_frame->crop.width + remainder, s_frame->crop.width);
}
remainder = s_frame->crop.height % (*hratio * halign);
if (remainder) {
s_frame->crop.height -= remainder;
gsc_cal_prescaler_ratio(var, s_frame->crop.height, ty, hratio);
pr_info("cropped src height size is recalculated from %d to %d",
s_frame->crop.height + remainder, s_frame->crop.height);
}
}
int gsc_enum_fmt(struct v4l2_fmtdesc *f)
{
const struct gsc_fmt *fmt;
fmt = find_fmt(NULL, NULL, f->index);
if (!fmt)
return -EINVAL;
f->pixelformat = fmt->pixelformat;
return 0;
}
static int get_plane_info(struct gsc_frame *frm, u32 addr, u32 *index, u32 *ret_addr)
{
if (frm->addr.y == addr) {
*index = 0;
*ret_addr = frm->addr.y;
} else if (frm->addr.cb == addr) {
*index = 1;
*ret_addr = frm->addr.cb;
} else if (frm->addr.cr == addr) {
*index = 2;
*ret_addr = frm->addr.cr;
} else {
pr_err("Plane address is wrong");
return -EINVAL;
}
return 0;
}
void gsc_set_prefbuf(struct gsc_dev *gsc, struct gsc_frame *frm)
{
u32 f_chk_addr, f_chk_len, s_chk_addr = 0, s_chk_len = 0;
f_chk_addr = frm->addr.y;
f_chk_len = frm->payload[0];
if (frm->fmt->num_planes == 2) {
s_chk_addr = frm->addr.cb;
s_chk_len = frm->payload[1];
} else if (frm->fmt->num_planes == 3) {
u32 low_addr, low_plane, mid_addr, mid_plane;
u32 high_addr, high_plane;
u32 t_min, t_max;
t_min = min3(frm->addr.y, frm->addr.cb, frm->addr.cr);
if (get_plane_info(frm, t_min, &low_plane, &low_addr))
return;
t_max = max3(frm->addr.y, frm->addr.cb, frm->addr.cr);
if (get_plane_info(frm, t_max, &high_plane, &high_addr))
return;
mid_plane = 3 - (low_plane + high_plane);
if (mid_plane == 0)
mid_addr = frm->addr.y;
else if (mid_plane == 1)
mid_addr = frm->addr.cb;
else if (mid_plane == 2)
mid_addr = frm->addr.cr;
else
return;
f_chk_addr = low_addr;
if (mid_addr + frm->payload[mid_plane] - low_addr >
high_addr + frm->payload[high_plane] - mid_addr) {
f_chk_len = frm->payload[low_plane];
s_chk_addr = mid_addr;
s_chk_len = high_addr +
frm->payload[high_plane] - mid_addr;
} else {
f_chk_len = mid_addr +
frm->payload[mid_plane] - low_addr;
s_chk_addr = high_addr;
s_chk_len = frm->payload[high_plane];
}
}
pr_debug("f_addr = 0x%08x, f_len = %d, s_addr = 0x%08x, s_len = %d\n",
f_chk_addr, f_chk_len, s_chk_addr, s_chk_len);
}
int gsc_try_fmt_mplane(struct gsc_ctx *ctx, struct v4l2_format *f)
{
struct gsc_dev *gsc = ctx->gsc_dev;
struct gsc_variant *variant = gsc->variant;
struct v4l2_pix_format_mplane *pix_mp = &f->fmt.pix_mp;
const struct gsc_fmt *fmt;
u32 max_w, max_h, mod_x, mod_y;
u32 min_w, min_h, tmp_w, tmp_h;
int i;
pr_debug("user put w: %d, h: %d", pix_mp->width, pix_mp->height);
fmt = find_fmt(&pix_mp->pixelformat, NULL, 0);
if (!fmt) {
pr_err("pixelformat format (0x%X) invalid\n",
pix_mp->pixelformat);
return -EINVAL;
}
if (pix_mp->field == V4L2_FIELD_ANY)
pix_mp->field = V4L2_FIELD_NONE;
else if (pix_mp->field != V4L2_FIELD_NONE) {
pr_debug("Not supported field order(%d)\n", pix_mp->field);
return -EINVAL;
}
max_w = variant->pix_max->target_rot_dis_w;
max_h = variant->pix_max->target_rot_dis_h;
mod_x = ffs(variant->pix_align->org_w) - 1;
if (is_yuv420(fmt->color))
mod_y = ffs(variant->pix_align->org_h) - 1;
else
mod_y = ffs(variant->pix_align->org_h) - 2;
if (V4L2_TYPE_IS_OUTPUT(f->type)) {
min_w = variant->pix_min->org_w;
min_h = variant->pix_min->org_h;
} else {
min_w = variant->pix_min->target_rot_dis_w;
min_h = variant->pix_min->target_rot_dis_h;
pix_mp->colorspace = ctx->out_colorspace;
}
pr_debug("mod_x: %d, mod_y: %d, max_w: %d, max_h = %d",
mod_x, mod_y, max_w, max_h);
/* To check if image size is modified to adjust parameter against
hardware abilities */
tmp_w = pix_mp->width;
tmp_h = pix_mp->height;
v4l_bound_align_image(&pix_mp->width, min_w, max_w, mod_x,
&pix_mp->height, min_h, max_h, mod_y, 0);
if (tmp_w != pix_mp->width || tmp_h != pix_mp->height)
pr_debug("Image size has been modified from %dx%d to %dx%d\n",
tmp_w, tmp_h, pix_mp->width, pix_mp->height);
pix_mp->num_planes = fmt->num_planes;
if (V4L2_TYPE_IS_OUTPUT(f->type))
ctx->out_colorspace = pix_mp->colorspace;
for (i = 0; i < pix_mp->num_planes; ++i) {
struct v4l2_plane_pix_format *plane_fmt = &pix_mp->plane_fmt[i];
u32 bpl = plane_fmt->bytesperline;
if (fmt->num_comp == 1 && /* Packed */
(bpl == 0 || (bpl * 8 / fmt->depth[i]) < pix_mp->width))
bpl = pix_mp->width * fmt->depth[i] / 8;
if (fmt->num_comp > 1 && /* Planar */
(bpl == 0 || bpl < pix_mp->width))
bpl = pix_mp->width;
if (i != 0 && fmt->num_comp == 3)
bpl /= 2;
plane_fmt->bytesperline = bpl;
plane_fmt->sizeimage = max(pix_mp->width * pix_mp->height *
fmt->depth[i] / 8,
plane_fmt->sizeimage);
pr_debug("[%d]: bpl: %d, sizeimage: %d",
i, bpl, pix_mp->plane_fmt[i].sizeimage);
}
return 0;
}
int gsc_g_fmt_mplane(struct gsc_ctx *ctx, struct v4l2_format *f)
{
struct gsc_frame *frame;
struct v4l2_pix_format_mplane *pix_mp;
int i;
frame = ctx_get_frame(ctx, f->type);
if (IS_ERR(frame))
return PTR_ERR(frame);
pix_mp = &f->fmt.pix_mp;
pix_mp->width = frame->f_width;
pix_mp->height = frame->f_height;
pix_mp->field = V4L2_FIELD_NONE;
pix_mp->pixelformat = frame->fmt->pixelformat;
pix_mp->num_planes = frame->fmt->num_planes;
pix_mp->colorspace = ctx->out_colorspace;
for (i = 0; i < pix_mp->num_planes; ++i) {
pix_mp->plane_fmt[i].bytesperline = (frame->f_width *
frame->fmt->depth[i]) / 8;
pix_mp->plane_fmt[i].sizeimage =
pix_mp->plane_fmt[i].bytesperline * frame->f_height;
}
return 0;
}
void gsc_check_crop_change(u32 tmp_w, u32 tmp_h, u32 *w, u32 *h)
{
if (tmp_w != *w || tmp_h != *h) {
pr_info("Cropped size has been modified from %dx%d to %dx%d",
*w, *h, tmp_w, tmp_h);
*w = tmp_w;
*h = tmp_h;
}
}
int gsc_try_selection(struct gsc_ctx *ctx, struct v4l2_selection *s)
{
struct gsc_frame *f;
struct gsc_dev *gsc = ctx->gsc_dev;
struct gsc_variant *variant = gsc->variant;
u32 mod_x = 0, mod_y = 0, tmp_w, tmp_h;
u32 min_w, min_h, max_w, max_h;
if (s->r.top < 0 || s->r.left < 0) {
pr_err("doesn't support negative values for top & left\n");
return -EINVAL;
}
pr_debug("user put w: %d, h: %d", s->r.width, s->r.height);
if (s->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
f = &ctx->d_frame;
else if (s->type == V4L2_BUF_TYPE_VIDEO_OUTPUT)
f = &ctx->s_frame;
else
return -EINVAL;
max_w = f->f_width;
max_h = f->f_height;
tmp_w = s->r.width;
tmp_h = s->r.height;
if (V4L2_TYPE_IS_OUTPUT(s->type)) {
if ((is_yuv422(f->fmt->color) && f->fmt->num_comp == 1) ||
is_rgb(f->fmt->color))
min_w = 32;
else
min_w = 64;
if ((is_yuv422(f->fmt->color) && f->fmt->num_comp == 3) ||
is_yuv420(f->fmt->color))
min_h = 32;
else
min_h = 16;
} else {
if (is_yuv420(f->fmt->color) || is_yuv422(f->fmt->color))
mod_x = ffs(variant->pix_align->target_w) - 1;
if (is_yuv420(f->fmt->color))
mod_y = ffs(variant->pix_align->target_h) - 1;
if (ctx->gsc_ctrls.rotate->val == 90 ||
ctx->gsc_ctrls.rotate->val == 270) {
max_w = f->f_height;
max_h = f->f_width;
min_w = variant->pix_min->target_rot_en_w;
min_h = variant->pix_min->target_rot_en_h;
tmp_w = s->r.height;
tmp_h = s->r.width;
} else {
min_w = variant->pix_min->target_rot_dis_w;
min_h = variant->pix_min->target_rot_dis_h;
}
}
pr_debug("mod_x: %d, mod_y: %d, min_w: %d, min_h = %d",
mod_x, mod_y, min_w, min_h);
pr_debug("tmp_w : %d, tmp_h : %d", tmp_w, tmp_h);
v4l_bound_align_image(&tmp_w, min_w, max_w, mod_x,
&tmp_h, min_h, max_h, mod_y, 0);
if (V4L2_TYPE_IS_CAPTURE(s->type) &&
(ctx->gsc_ctrls.rotate->val == 90 ||
ctx->gsc_ctrls.rotate->val == 270))
gsc_check_crop_change(tmp_h, tmp_w,
&s->r.width, &s->r.height);
else
gsc_check_crop_change(tmp_w, tmp_h,
&s->r.width, &s->r.height);
/* adjust left/top if cropping rectangle is out of bounds */
/* Need to add code to algin left value with 2's multiple */
if (s->r.left + tmp_w > max_w)
s->r.left = max_w - tmp_w;
if (s->r.top + tmp_h > max_h)
s->r.top = max_h - tmp_h;
if ((is_yuv420(f->fmt->color) || is_yuv422(f->fmt->color)) &&
s->r.left & 1)
s->r.left -= 1;
pr_debug("Aligned l:%d, t:%d, w:%d, h:%d, f_w: %d, f_h: %d",
s->r.left, s->r.top, s->r.width, s->r.height, max_w, max_h);
return 0;
}
int gsc_check_scaler_ratio(struct gsc_variant *var, int sw, int sh, int dw,
int dh, int rot, int out_path)
{
int tmp_w, tmp_h, sc_down_max;
if (out_path == GSC_DMA)
sc_down_max = var->sc_down_max;
else
sc_down_max = var->local_sc_down;
if (rot == 90 || rot == 270) {
tmp_w = dh;
tmp_h = dw;
} else {
tmp_w = dw;
tmp_h = dh;
}
if ((sw / tmp_w) > sc_down_max ||
(sh / tmp_h) > sc_down_max ||
(tmp_w / sw) > var->sc_up_max ||
(tmp_h / sh) > var->sc_up_max)
return -EINVAL;
return 0;
}
int gsc_set_scaler_info(struct gsc_ctx *ctx)
{
struct gsc_scaler *sc = &ctx->scaler;
struct gsc_frame *s_frame = &ctx->s_frame;
struct gsc_frame *d_frame = &ctx->d_frame;
struct gsc_variant *variant = ctx->gsc_dev->variant;
struct device *dev = &ctx->gsc_dev->pdev->dev;
int tx, ty;
int ret;
ret = gsc_check_scaler_ratio(variant, s_frame->crop.width,
s_frame->crop.height, d_frame->crop.width, d_frame->crop.height,
ctx->gsc_ctrls.rotate->val, ctx->out_path);
if (ret) {
pr_err("out of scaler range");
return ret;
}
if (ctx->gsc_ctrls.rotate->val == 90 ||
ctx->gsc_ctrls.rotate->val == 270) {
ty = d_frame->crop.width;
tx = d_frame->crop.height;
} else {
tx = d_frame->crop.width;
ty = d_frame->crop.height;
}
if (tx <= 0 || ty <= 0) {
dev_err(dev, "Invalid target size: %dx%d", tx, ty);
return -EINVAL;
}
ret = gsc_cal_prescaler_ratio(variant, s_frame->crop.width,
tx, &sc->pre_hratio);
if (ret) {
pr_err("Horizontal scale ratio is out of range");
return ret;
}
ret = gsc_cal_prescaler_ratio(variant, s_frame->crop.height,
ty, &sc->pre_vratio);
if (ret) {
pr_err("Vertical scale ratio is out of range");
return ret;
}
gsc_check_src_scale_info(variant, s_frame, &sc->pre_hratio,
tx, ty, &sc->pre_vratio);
gsc_get_prescaler_shfactor(sc->pre_hratio, sc->pre_vratio,
&sc->pre_shfactor);
sc->main_hratio = (s_frame->crop.width << 16) / tx;
sc->main_vratio = (s_frame->crop.height << 16) / ty;
pr_debug("scaler input/output size : sx = %d, sy = %d, tx = %d, ty = %d",
s_frame->crop.width, s_frame->crop.height, tx, ty);
pr_debug("scaler ratio info : pre_shfactor : %d, pre_h : %d",
sc->pre_shfactor, sc->pre_hratio);
pr_debug("pre_v :%d, main_h : %d, main_v : %d",
sc->pre_vratio, sc->main_hratio, sc->main_vratio);
return 0;
}
static int __gsc_s_ctrl(struct gsc_ctx *ctx, struct v4l2_ctrl *ctrl)
{
struct gsc_dev *gsc = ctx->gsc_dev;
struct gsc_variant *variant = gsc->variant;
unsigned int flags = GSC_DST_FMT | GSC_SRC_FMT;
int ret = 0;
if (ctrl->flags & V4L2_CTRL_FLAG_INACTIVE)
return 0;
switch (ctrl->id) {
case V4L2_CID_HFLIP:
ctx->hflip = ctrl->val;
break;
case V4L2_CID_VFLIP:
ctx->vflip = ctrl->val;
break;
case V4L2_CID_ROTATE:
if ((ctx->state & flags) == flags) {
ret = gsc_check_scaler_ratio(variant,
ctx->s_frame.crop.width,
ctx->s_frame.crop.height,
ctx->d_frame.crop.width,
ctx->d_frame.crop.height,
ctx->gsc_ctrls.rotate->val,
ctx->out_path);
if (ret)
return -EINVAL;
}
ctx->rotation = ctrl->val;
break;
case V4L2_CID_ALPHA_COMPONENT:
ctx->d_frame.alpha = ctrl->val;
break;
}
ctx->state |= GSC_PARAMS;
return 0;
}
static int gsc_s_ctrl(struct v4l2_ctrl *ctrl)
{
struct gsc_ctx *ctx = ctrl_to_ctx(ctrl);
unsigned long flags;
int ret;
spin_lock_irqsave(&ctx->gsc_dev->slock, flags);
ret = __gsc_s_ctrl(ctx, ctrl);
spin_unlock_irqrestore(&ctx->gsc_dev->slock, flags);
return ret;
}
static const struct v4l2_ctrl_ops gsc_ctrl_ops = {
.s_ctrl = gsc_s_ctrl,
};
int gsc_ctrls_create(struct gsc_ctx *ctx)
{
if (ctx->ctrls_rdy) {
pr_err("Control handler of this context was created already");
return 0;
}
v4l2_ctrl_handler_init(&ctx->ctrl_handler, GSC_MAX_CTRL_NUM);
ctx->gsc_ctrls.rotate = v4l2_ctrl_new_std(&ctx->ctrl_handler,
&gsc_ctrl_ops, V4L2_CID_ROTATE, 0, 270, 90, 0);
ctx->gsc_ctrls.hflip = v4l2_ctrl_new_std(&ctx->ctrl_handler,
&gsc_ctrl_ops, V4L2_CID_HFLIP, 0, 1, 1, 0);
ctx->gsc_ctrls.vflip = v4l2_ctrl_new_std(&ctx->ctrl_handler,
&gsc_ctrl_ops, V4L2_CID_VFLIP, 0, 1, 1, 0);
ctx->gsc_ctrls.global_alpha = v4l2_ctrl_new_std(&ctx->ctrl_handler,
&gsc_ctrl_ops, V4L2_CID_ALPHA_COMPONENT, 0, 255, 1, 0);
ctx->ctrls_rdy = ctx->ctrl_handler.error == 0;
if (ctx->ctrl_handler.error) {
int err = ctx->ctrl_handler.error;
v4l2_ctrl_handler_free(&ctx->ctrl_handler);
pr_err("Failed to create G-Scaler control handlers");
return err;
}
return 0;
}
void gsc_ctrls_delete(struct gsc_ctx *ctx)
{
if (ctx->ctrls_rdy) {
v4l2_ctrl_handler_free(&ctx->ctrl_handler);
ctx->ctrls_rdy = false;
}
}
/* The color format (num_comp, num_planes) must be already configured. */
int gsc_prepare_addr(struct gsc_ctx *ctx, struct vb2_buffer *vb,
struct gsc_frame *frame, struct gsc_addr *addr)
{
int ret = 0;
u32 pix_size;
if ((vb == NULL) || (frame == NULL))
return -EINVAL;
pix_size = frame->f_width * frame->f_height;
pr_debug("num_planes= %d, num_comp= %d, pix_size= %d",
frame->fmt->num_planes, frame->fmt->num_comp, pix_size);
addr->y = vb2_dma_contig_plane_dma_addr(vb, 0);
if (frame->fmt->num_planes == 1) {
switch (frame->fmt->num_comp) {
case 1:
addr->cb = 0;
addr->cr = 0;
break;
case 2:
/* decompose Y into Y/Cb */
addr->cb = (dma_addr_t)(addr->y + pix_size);
addr->cr = 0;
break;
case 3:
/* decompose Y into Y/Cb/Cr */
addr->cb = (dma_addr_t)(addr->y + pix_size);
if (GSC_YUV420 == frame->fmt->color)
addr->cr = (dma_addr_t)(addr->cb
+ (pix_size >> 2));
else /* 422 */
addr->cr = (dma_addr_t)(addr->cb
+ (pix_size >> 1));
break;
default:
pr_err("Invalid the number of color planes");
return -EINVAL;
}
} else {
if (frame->fmt->num_planes >= 2)
addr->cb = vb2_dma_contig_plane_dma_addr(vb, 1);
if (frame->fmt->num_planes == 3)
addr->cr = vb2_dma_contig_plane_dma_addr(vb, 2);
}
if ((frame->fmt->pixelformat == V4L2_PIX_FMT_VYUY) ||
(frame->fmt->pixelformat == V4L2_PIX_FMT_YVYU) ||
(frame->fmt->pixelformat == V4L2_PIX_FMT_YVU420) ||
(frame->fmt->pixelformat == V4L2_PIX_FMT_YVU420M))
swap(addr->cb, addr->cr);
pr_debug("ADDR: y= %pad cb= %pad cr= %pad ret= %d",
&addr->y, &addr->cb, &addr->cr, ret);
return ret;
}
static irqreturn_t gsc_irq_handler(int irq, void *priv)
{
struct gsc_dev *gsc = priv;
struct gsc_ctx *ctx;
int gsc_irq;
gsc_irq = gsc_hw_get_irq_status(gsc);
gsc_hw_clear_irq(gsc, gsc_irq);
if (gsc_irq == GSC_IRQ_OVERRUN) {
pr_err("Local path input over-run interrupt has occurred!\n");
return IRQ_HANDLED;
}
spin_lock(&gsc->slock);
if (test_and_clear_bit(ST_M2M_PEND, &gsc->state)) {
gsc_hw_enable_control(gsc, false);
if (test_and_clear_bit(ST_M2M_SUSPENDING, &gsc->state)) {
set_bit(ST_M2M_SUSPENDED, &gsc->state);
wake_up(&gsc->irq_queue);
goto isr_unlock;
}
ctx = v4l2_m2m_get_curr_priv(gsc->m2m.m2m_dev);
if (!ctx || !ctx->m2m_ctx)
goto isr_unlock;
spin_unlock(&gsc->slock);
gsc_m2m_job_finish(ctx, VB2_BUF_STATE_DONE);
/* wake_up job_abort, stop_streaming */
if (ctx->state & GSC_CTX_STOP_REQ) {
ctx->state &= ~GSC_CTX_STOP_REQ;
wake_up(&gsc->irq_queue);
}
return IRQ_HANDLED;
}
isr_unlock:
spin_unlock(&gsc->slock);
return IRQ_HANDLED;
}
static struct gsc_pix_max gsc_v_100_max = {
.org_scaler_bypass_w = 8192,
.org_scaler_bypass_h = 8192,
.org_scaler_input_w = 4800,
.org_scaler_input_h = 3344,
.real_rot_dis_w = 4800,
.real_rot_dis_h = 3344,
.real_rot_en_w = 2047,
.real_rot_en_h = 2047,
.target_rot_dis_w = 4800,
.target_rot_dis_h = 3344,
.target_rot_en_w = 2016,
.target_rot_en_h = 2016,
};
static struct gsc_pix_max gsc_v_5250_max = {
.org_scaler_bypass_w = 8192,
.org_scaler_bypass_h = 8192,
.org_scaler_input_w = 4800,
.org_scaler_input_h = 3344,
.real_rot_dis_w = 4800,
.real_rot_dis_h = 3344,
.real_rot_en_w = 2016,
.real_rot_en_h = 2016,
.target_rot_dis_w = 4800,
.target_rot_dis_h = 3344,
.target_rot_en_w = 2016,
.target_rot_en_h = 2016,
};
static struct gsc_pix_max gsc_v_5420_max = {
.org_scaler_bypass_w = 8192,
.org_scaler_bypass_h = 8192,
.org_scaler_input_w = 4800,
.org_scaler_input_h = 3344,
.real_rot_dis_w = 4800,
.real_rot_dis_h = 3344,
.real_rot_en_w = 2048,
.real_rot_en_h = 2048,
.target_rot_dis_w = 4800,
.target_rot_dis_h = 3344,
.target_rot_en_w = 2016,
.target_rot_en_h = 2016,
};
static struct gsc_pix_max gsc_v_5433_max = {
.org_scaler_bypass_w = 8192,
.org_scaler_bypass_h = 8192,
.org_scaler_input_w = 4800,
.org_scaler_input_h = 3344,
.real_rot_dis_w = 4800,
.real_rot_dis_h = 3344,
.real_rot_en_w = 2047,
.real_rot_en_h = 2047,
.target_rot_dis_w = 4800,
.target_rot_dis_h = 3344,
.target_rot_en_w = 2016,
.target_rot_en_h = 2016,
};
static struct gsc_pix_min gsc_v_100_min = {
.org_w = 64,
.org_h = 32,
.real_w = 64,
.real_h = 32,
.target_rot_dis_w = 64,
.target_rot_dis_h = 32,
.target_rot_en_w = 32,
.target_rot_en_h = 16,
};
static struct gsc_pix_align gsc_v_100_align = {
.org_h = 16,
.org_w = 16, /* yuv420 : 16, others : 8 */
.offset_h = 2, /* yuv420/422 : 2, others : 1 */
.real_w = 16, /* yuv420/422 : 4~16, others : 2~8 */
.real_h = 16, /* yuv420 : 4~16, others : 1 */
.target_w = 2, /* yuv420/422 : 2, others : 1 */
.target_h = 2, /* yuv420 : 2, others : 1 */
};
static struct gsc_variant gsc_v_100_variant = {
.pix_max = &gsc_v_100_max,
.pix_min = &gsc_v_100_min,
.pix_align = &gsc_v_100_align,
.in_buf_cnt = 32,
.out_buf_cnt = 32,
.sc_up_max = 8,
.sc_down_max = 16,
.poly_sc_down_max = 4,
.pre_sc_down_max = 4,
.local_sc_down = 2,
};
static struct gsc_variant gsc_v_5250_variant = {
.pix_max = &gsc_v_5250_max,
.pix_min = &gsc_v_100_min,
.pix_align = &gsc_v_100_align,
.in_buf_cnt = 32,
.out_buf_cnt = 32,
.sc_up_max = 8,
.sc_down_max = 16,
.poly_sc_down_max = 4,
.pre_sc_down_max = 4,
.local_sc_down = 2,
};
static struct gsc_variant gsc_v_5420_variant = {
.pix_max = &gsc_v_5420_max,
.pix_min = &gsc_v_100_min,
.pix_align = &gsc_v_100_align,
.in_buf_cnt = 32,
.out_buf_cnt = 32,
.sc_up_max = 8,
.sc_down_max = 16,
.poly_sc_down_max = 4,
.pre_sc_down_max = 4,
.local_sc_down = 2,
};
static struct gsc_variant gsc_v_5433_variant = {
.pix_max = &gsc_v_5433_max,
.pix_min = &gsc_v_100_min,
.pix_align = &gsc_v_100_align,
.in_buf_cnt = 32,
.out_buf_cnt = 32,
.sc_up_max = 8,
.sc_down_max = 16,
.poly_sc_down_max = 4,
.pre_sc_down_max = 4,
.local_sc_down = 2,
};
static struct gsc_driverdata gsc_v_100_drvdata = {
.variant = {
[0] = &gsc_v_100_variant,
[1] = &gsc_v_100_variant,
[2] = &gsc_v_100_variant,
[3] = &gsc_v_100_variant,
},
.num_entities = 4,
.clk_names = { "gscl" },
.num_clocks = 1,
};
static struct gsc_driverdata gsc_v_5250_drvdata = {
.variant = {
[0] = &gsc_v_5250_variant,
[1] = &gsc_v_5250_variant,
[2] = &gsc_v_5250_variant,
[3] = &gsc_v_5250_variant,
},
.num_entities = 4,
.clk_names = { "gscl" },
.num_clocks = 1,
};
static struct gsc_driverdata gsc_v_5420_drvdata = {
.variant = {
[0] = &gsc_v_5420_variant,
[1] = &gsc_v_5420_variant,
},
.num_entities = 2,
.clk_names = { "gscl" },
.num_clocks = 1,
};
static struct gsc_driverdata gsc_5433_drvdata = {
.variant = {
[0] = &gsc_v_5433_variant,
[1] = &gsc_v_5433_variant,
[2] = &gsc_v_5433_variant,
},
.num_entities = 3,
.clk_names = { "pclk", "aclk", "aclk_xiu", "aclk_gsclbend" },
.num_clocks = 4,
};
static const struct of_device_id exynos_gsc_match[] = {
{
.compatible = "samsung,exynos5250-gsc",
.data = &gsc_v_5250_drvdata,
},
{
.compatible = "samsung,exynos5420-gsc",
.data = &gsc_v_5420_drvdata,
},
{
.compatible = "samsung,exynos5433-gsc",
.data = &gsc_5433_drvdata,
},
{
.compatible = "samsung,exynos5-gsc",
.data = &gsc_v_100_drvdata,
},
{},
};
MODULE_DEVICE_TABLE(of, exynos_gsc_match);
static int gsc_probe(struct platform_device *pdev)
{
struct gsc_dev *gsc;
struct device *dev = &pdev->dev;
const struct gsc_driverdata *drv_data = of_device_get_match_data(dev);
int irq;
int ret;
int i;
gsc = devm_kzalloc(dev, sizeof(struct gsc_dev), GFP_KERNEL);
if (!gsc)
return -ENOMEM;
ret = of_alias_get_id(pdev->dev.of_node, "gsc");
if (ret < 0)
return ret;
if (drv_data == &gsc_v_100_drvdata)
dev_info(dev, "compatible 'exynos5-gsc' is deprecated\n");
gsc->id = ret;
if (gsc->id >= drv_data->num_entities) {
dev_err(dev, "Invalid platform device id: %d\n", gsc->id);
return -EINVAL;
}
gsc->num_clocks = drv_data->num_clocks;
gsc->variant = drv_data->variant[gsc->id];
gsc->pdev = pdev;
init_waitqueue_head(&gsc->irq_queue);
spin_lock_init(&gsc->slock);
mutex_init(&gsc->lock);
gsc->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(gsc->regs))
return PTR_ERR(gsc->regs);
irq = platform_get_irq(pdev, 0);
if (irq < 0)
return irq;
for (i = 0; i < gsc->num_clocks; i++) {
gsc->clock[i] = devm_clk_get(dev, drv_data->clk_names[i]);
if (IS_ERR(gsc->clock[i])) {
dev_err(dev, "failed to get clock: %s\n",
drv_data->clk_names[i]);
return PTR_ERR(gsc->clock[i]);
}
}
for (i = 0; i < gsc->num_clocks; i++) {
ret = clk_prepare_enable(gsc->clock[i]);
if (ret) {
dev_err(dev, "clock prepare failed for clock: %s\n",
drv_data->clk_names[i]);
while (--i >= 0)
clk_disable_unprepare(gsc->clock[i]);
return ret;
}
}
ret = devm_request_irq(dev, irq, gsc_irq_handler,
0, pdev->name, gsc);
if (ret) {
dev_err(dev, "failed to install irq (%d)\n", ret);
goto err_clk;
}
ret = v4l2_device_register(dev, &gsc->v4l2_dev);
if (ret)
goto err_clk;
ret = gsc_register_m2m_device(gsc);
if (ret)
goto err_v4l2;
platform_set_drvdata(pdev, gsc);
gsc_hw_set_sw_reset(gsc);
gsc_wait_reset(gsc);
vb2_dma_contig_set_max_seg_size(dev, DMA_BIT_MASK(32));
dev_dbg(dev, "gsc-%d registered successfully\n", gsc->id);
pm_runtime_set_active(dev);
pm_runtime_enable(dev);
return 0;
err_v4l2:
v4l2_device_unregister(&gsc->v4l2_dev);
err_clk:
for (i = gsc->num_clocks - 1; i >= 0; i--)
clk_disable_unprepare(gsc->clock[i]);
return ret;
}
static void gsc_remove(struct platform_device *pdev)
{
struct gsc_dev *gsc = platform_get_drvdata(pdev);
int i;
gsc_unregister_m2m_device(gsc);
v4l2_device_unregister(&gsc->v4l2_dev);
vb2_dma_contig_clear_max_seg_size(&pdev->dev);
pm_runtime_disable(&pdev->dev);
if (!pm_runtime_status_suspended(&pdev->dev))
for (i = 0; i < gsc->num_clocks; i++)
clk_disable_unprepare(gsc->clock[i]);
pm_runtime_set_suspended(&pdev->dev);
dev_dbg(&pdev->dev, "%s driver unloaded\n", pdev->name);
}
#ifdef CONFIG_PM
static int gsc_m2m_suspend(struct gsc_dev *gsc)
{
unsigned long flags;
int timeout;
spin_lock_irqsave(&gsc->slock, flags);
if (!gsc_m2m_pending(gsc)) {
spin_unlock_irqrestore(&gsc->slock, flags);
return 0;
}
clear_bit(ST_M2M_SUSPENDED, &gsc->state);
set_bit(ST_M2M_SUSPENDING, &gsc->state);
spin_unlock_irqrestore(&gsc->slock, flags);
timeout = wait_event_timeout(gsc->irq_queue,
test_bit(ST_M2M_SUSPENDED, &gsc->state),
GSC_SHUTDOWN_TIMEOUT);
clear_bit(ST_M2M_SUSPENDING, &gsc->state);
return timeout == 0 ? -EAGAIN : 0;
}
static void gsc_m2m_resume(struct gsc_dev *gsc)
{
struct gsc_ctx *ctx;
unsigned long flags;
spin_lock_irqsave(&gsc->slock, flags);
/* Clear for full H/W setup in first run after resume */
ctx = gsc->m2m.ctx;
gsc->m2m.ctx = NULL;
spin_unlock_irqrestore(&gsc->slock, flags);
if (test_and_clear_bit(ST_M2M_SUSPENDED, &gsc->state))
gsc_m2m_job_finish(ctx, VB2_BUF_STATE_ERROR);
}
static int gsc_runtime_resume(struct device *dev)
{
struct gsc_dev *gsc = dev_get_drvdata(dev);
int ret = 0;
int i;
pr_debug("gsc%d: state: 0x%lx\n", gsc->id, gsc->state);
for (i = 0; i < gsc->num_clocks; i++) {
ret = clk_prepare_enable(gsc->clock[i]);
if (ret) {
while (--i >= 0)
clk_disable_unprepare(gsc->clock[i]);
return ret;
}
}
gsc_hw_set_sw_reset(gsc);
gsc_wait_reset(gsc);
gsc_m2m_resume(gsc);
return 0;
}
static int gsc_runtime_suspend(struct device *dev)
{
struct gsc_dev *gsc = dev_get_drvdata(dev);
int ret = 0;
int i;
ret = gsc_m2m_suspend(gsc);
if (ret)
return ret;
for (i = gsc->num_clocks - 1; i >= 0; i--)
clk_disable_unprepare(gsc->clock[i]);
pr_debug("gsc%d: state: 0x%lx\n", gsc->id, gsc->state);
return ret;
}
#endif
static const struct dev_pm_ops gsc_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
pm_runtime_force_resume)
SET_RUNTIME_PM_OPS(gsc_runtime_suspend, gsc_runtime_resume, NULL)
};
static struct platform_driver gsc_driver = {
.probe = gsc_probe,
.remove_new = gsc_remove,
.driver = {
.name = GSC_MODULE_NAME,
.pm = &gsc_pm_ops,
.of_match_table = exynos_gsc_match,
}
};
module_platform_driver(gsc_driver);
MODULE_AUTHOR("Hyunwong Kim <[email protected]>");
MODULE_DESCRIPTION("Samsung EXYNOS5 Soc series G-Scaler driver");
MODULE_LICENSE("GPL");
| linux-master | drivers/media/platform/samsung/exynos-gsc/gsc-core.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* s3c24xx/s3c64xx SoC series Camera Interface (CAMIF) driver
*
* Copyright (C) 2012 Sylwester Nawrocki <[email protected]>
* Copyright (C) 2012 Tomasz Figa <[email protected]>
*/
#define pr_fmt(fmt) "%s:%d " fmt, __func__, __LINE__
#include <linux/bug.h>
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/errno.h>
#include <linux/gpio.h>
#include <linux/i2c.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/slab.h>
#include <linux/types.h>
#include <media/media-device.h>
#include <media/v4l2-ctrls.h>
#include <media/v4l2-ioctl.h>
#include <media/videobuf2-v4l2.h>
#include <media/videobuf2-dma-contig.h>
#include "camif-core.h"
static char *camif_clocks[CLK_MAX_NUM] = {
/* HCLK CAMIF clock */
[CLK_GATE] = "camif",
/* CAMIF / external camera sensor master clock */
[CLK_CAM] = "camera",
};
static const struct camif_fmt camif_formats[] = {
{
.fourcc = V4L2_PIX_FMT_YUV422P,
.depth = 16,
.ybpp = 1,
.color = IMG_FMT_YCBCR422P,
.colplanes = 3,
.flags = FMT_FL_S3C24XX_CODEC |
FMT_FL_S3C64XX,
}, {
.fourcc = V4L2_PIX_FMT_YUV420,
.depth = 12,
.ybpp = 1,
.color = IMG_FMT_YCBCR420,
.colplanes = 3,
.flags = FMT_FL_S3C24XX_CODEC |
FMT_FL_S3C64XX,
}, {
.fourcc = V4L2_PIX_FMT_YVU420,
.depth = 12,
.ybpp = 1,
.color = IMG_FMT_YCRCB420,
.colplanes = 3,
.flags = FMT_FL_S3C24XX_CODEC |
FMT_FL_S3C64XX,
}, {
.fourcc = V4L2_PIX_FMT_RGB565X,
.depth = 16,
.ybpp = 2,
.color = IMG_FMT_RGB565,
.colplanes = 1,
.flags = FMT_FL_S3C24XX_PREVIEW |
FMT_FL_S3C64XX,
}, {
.fourcc = V4L2_PIX_FMT_RGB32,
.depth = 32,
.ybpp = 4,
.color = IMG_FMT_XRGB8888,
.colplanes = 1,
.flags = FMT_FL_S3C24XX_PREVIEW |
FMT_FL_S3C64XX,
}, {
.fourcc = V4L2_PIX_FMT_BGR666,
.depth = 32,
.ybpp = 4,
.color = IMG_FMT_RGB666,
.colplanes = 1,
.flags = FMT_FL_S3C64XX,
}
};
/**
* s3c_camif_find_format() - lookup camif color format by fourcc or an index
* @vp: video path (DMA) description (codec/preview)
* @pixelformat: fourcc to match, ignored if null
* @index: index to the camif_formats array, ignored if negative
*/
const struct camif_fmt *s3c_camif_find_format(struct camif_vp *vp,
const u32 *pixelformat,
int index)
{
const struct camif_fmt *fmt, *def_fmt = NULL;
unsigned int i;
int id = 0;
if (index >= (int)ARRAY_SIZE(camif_formats))
return NULL;
for (i = 0; i < ARRAY_SIZE(camif_formats); ++i) {
fmt = &camif_formats[i];
if (vp && !(vp->fmt_flags & fmt->flags))
continue;
if (pixelformat && fmt->fourcc == *pixelformat)
return fmt;
if (index == id)
def_fmt = fmt;
id++;
}
return def_fmt;
}
static int camif_get_scaler_factor(u32 src, u32 tar, u32 *ratio, u32 *shift)
{
unsigned int sh = 6;
if (src >= 64 * tar)
return -EINVAL;
while (sh--) {
unsigned int tmp = 1 << sh;
if (src >= tar * tmp) {
*shift = sh;
*ratio = tmp;
return 0;
}
}
*shift = 0;
*ratio = 1;
return 0;
}
int s3c_camif_get_scaler_config(struct camif_vp *vp,
struct camif_scaler *scaler)
{
struct v4l2_rect *camif_crop = &vp->camif->camif_crop;
int source_x = camif_crop->width;
int source_y = camif_crop->height;
int target_x = vp->out_frame.rect.width;
int target_y = vp->out_frame.rect.height;
int ret;
if (vp->rotation == 90 || vp->rotation == 270)
swap(target_x, target_y);
ret = camif_get_scaler_factor(source_x, target_x, &scaler->pre_h_ratio,
&scaler->h_shift);
if (ret < 0)
return ret;
ret = camif_get_scaler_factor(source_y, target_y, &scaler->pre_v_ratio,
&scaler->v_shift);
if (ret < 0)
return ret;
scaler->pre_dst_width = source_x / scaler->pre_h_ratio;
scaler->pre_dst_height = source_y / scaler->pre_v_ratio;
scaler->main_h_ratio = (source_x << 8) / (target_x << scaler->h_shift);
scaler->main_v_ratio = (source_y << 8) / (target_y << scaler->v_shift);
scaler->scaleup_h = (target_x >= source_x);
scaler->scaleup_v = (target_y >= source_y);
scaler->copy = 0;
pr_debug("H: ratio: %u, shift: %u. V: ratio: %u, shift: %u.\n",
scaler->pre_h_ratio, scaler->h_shift,
scaler->pre_v_ratio, scaler->v_shift);
pr_debug("Source: %dx%d, Target: %dx%d, scaleup_h/v: %d/%d\n",
source_x, source_y, target_x, target_y,
scaler->scaleup_h, scaler->scaleup_v);
return 0;
}
static int camif_register_sensor(struct camif_dev *camif)
{
struct s3c_camif_sensor_info *sensor = &camif->pdata.sensor;
struct v4l2_device *v4l2_dev = &camif->v4l2_dev;
struct i2c_adapter *adapter;
struct v4l2_subdev_format format = {
.which = V4L2_SUBDEV_FORMAT_ACTIVE,
};
struct v4l2_subdev *sd;
int ret;
camif->sensor.sd = NULL;
if (sensor->i2c_board_info.addr == 0)
return -EINVAL;
adapter = i2c_get_adapter(sensor->i2c_bus_num);
if (adapter == NULL) {
v4l2_warn(v4l2_dev, "failed to get I2C adapter %d\n",
sensor->i2c_bus_num);
return -EPROBE_DEFER;
}
sd = v4l2_i2c_new_subdev_board(v4l2_dev, adapter,
&sensor->i2c_board_info, NULL);
if (sd == NULL) {
i2c_put_adapter(adapter);
v4l2_warn(v4l2_dev, "failed to acquire subdev %s\n",
sensor->i2c_board_info.type);
return -EPROBE_DEFER;
}
camif->sensor.sd = sd;
v4l2_info(v4l2_dev, "registered sensor subdevice %s\n", sd->name);
/* Get initial pixel format and set it at the camif sink pad */
format.pad = 0;
ret = v4l2_subdev_call(sd, pad, get_fmt, NULL, &format);
if (ret < 0)
return 0;
format.pad = CAMIF_SD_PAD_SINK;
v4l2_subdev_call(&camif->subdev, pad, set_fmt, NULL, &format);
v4l2_info(sd, "Initial format from sensor: %dx%d, %#x\n",
format.format.width, format.format.height,
format.format.code);
return 0;
}
static void camif_unregister_sensor(struct camif_dev *camif)
{
struct v4l2_subdev *sd = camif->sensor.sd;
struct i2c_client *client = sd ? v4l2_get_subdevdata(sd) : NULL;
struct i2c_adapter *adapter;
if (client == NULL)
return;
adapter = client->adapter;
v4l2_device_unregister_subdev(sd);
camif->sensor.sd = NULL;
i2c_unregister_device(client);
i2c_put_adapter(adapter);
}
static int camif_create_media_links(struct camif_dev *camif)
{
int i, ret;
ret = media_create_pad_link(&camif->sensor.sd->entity, 0,
&camif->subdev.entity, CAMIF_SD_PAD_SINK,
MEDIA_LNK_FL_IMMUTABLE |
MEDIA_LNK_FL_ENABLED);
if (ret)
return ret;
for (i = 1; i < CAMIF_SD_PADS_NUM && !ret; i++) {
ret = media_create_pad_link(&camif->subdev.entity, i,
&camif->vp[i - 1].vdev.entity, 0,
MEDIA_LNK_FL_IMMUTABLE |
MEDIA_LNK_FL_ENABLED);
}
return ret;
}
static int camif_register_video_nodes(struct camif_dev *camif)
{
int ret = s3c_camif_register_video_node(camif, VP_CODEC);
if (ret < 0)
return ret;
return s3c_camif_register_video_node(camif, VP_PREVIEW);
}
static void camif_unregister_video_nodes(struct camif_dev *camif)
{
s3c_camif_unregister_video_node(camif, VP_CODEC);
s3c_camif_unregister_video_node(camif, VP_PREVIEW);
}
static void camif_unregister_media_entities(struct camif_dev *camif)
{
camif_unregister_video_nodes(camif);
camif_unregister_sensor(camif);
}
/*
* Media device
*/
static int camif_media_dev_init(struct camif_dev *camif)
{
struct media_device *md = &camif->media_dev;
struct v4l2_device *v4l2_dev = &camif->v4l2_dev;
unsigned int ip_rev = camif->variant->ip_revision;
int ret;
memset(md, 0, sizeof(*md));
snprintf(md->model, sizeof(md->model), "Samsung S3C%s CAMIF",
ip_rev == S3C6410_CAMIF_IP_REV ? "6410" : "244X");
strscpy(md->bus_info, "platform", sizeof(md->bus_info));
md->hw_revision = ip_rev;
md->dev = camif->dev;
strscpy(v4l2_dev->name, "s3c-camif", sizeof(v4l2_dev->name));
v4l2_dev->mdev = md;
media_device_init(md);
ret = v4l2_device_register(camif->dev, v4l2_dev);
if (ret < 0)
return ret;
return ret;
}
static void camif_clk_put(struct camif_dev *camif)
{
int i;
for (i = 0; i < CLK_MAX_NUM; i++) {
if (IS_ERR(camif->clock[i]))
continue;
clk_unprepare(camif->clock[i]);
clk_put(camif->clock[i]);
camif->clock[i] = ERR_PTR(-EINVAL);
}
}
static int camif_clk_get(struct camif_dev *camif)
{
int ret, i;
for (i = 1; i < CLK_MAX_NUM; i++)
camif->clock[i] = ERR_PTR(-EINVAL);
for (i = 0; i < CLK_MAX_NUM; i++) {
camif->clock[i] = clk_get(camif->dev, camif_clocks[i]);
if (IS_ERR(camif->clock[i])) {
ret = PTR_ERR(camif->clock[i]);
goto err;
}
ret = clk_prepare(camif->clock[i]);
if (ret < 0) {
clk_put(camif->clock[i]);
camif->clock[i] = NULL;
goto err;
}
}
return 0;
err:
camif_clk_put(camif);
dev_err(camif->dev, "failed to get clock: %s\n",
camif_clocks[i]);
return ret;
}
/*
* The CAMIF device has two relatively independent data processing paths
* that can source data from memory or the common camera input frontend.
* Register interrupts for each data processing path (camif_vp).
*/
static int camif_request_irqs(struct platform_device *pdev,
struct camif_dev *camif)
{
int irq, ret, i;
for (i = 0; i < CAMIF_VP_NUM; i++) {
struct camif_vp *vp = &camif->vp[i];
init_waitqueue_head(&vp->irq_queue);
irq = platform_get_irq(pdev, i);
if (irq < 0)
return irq;
ret = devm_request_irq(&pdev->dev, irq, s3c_camif_irq_handler,
0, dev_name(&pdev->dev), vp);
if (ret < 0) {
dev_err(&pdev->dev, "failed to install IRQ: %d\n", ret);
break;
}
}
return ret;
}
static int s3c_camif_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct s3c_camif_plat_data *pdata = dev->platform_data;
struct s3c_camif_drvdata *drvdata;
struct camif_dev *camif;
int ret = 0;
camif = devm_kzalloc(dev, sizeof(*camif), GFP_KERNEL);
if (!camif)
return -ENOMEM;
spin_lock_init(&camif->slock);
mutex_init(&camif->lock);
camif->dev = dev;
if (!pdata || !pdata->gpio_get || !pdata->gpio_put) {
dev_err(dev, "wrong platform data\n");
return -EINVAL;
}
camif->pdata = *pdata;
drvdata = (void *)platform_get_device_id(pdev)->driver_data;
camif->variant = drvdata->variant;
camif->io_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(camif->io_base))
return PTR_ERR(camif->io_base);
ret = camif_request_irqs(pdev, camif);
if (ret < 0)
return ret;
ret = pdata->gpio_get();
if (ret < 0)
return ret;
ret = s3c_camif_create_subdev(camif);
if (ret < 0)
goto err_sd;
ret = camif_clk_get(camif);
if (ret < 0)
goto err_clk;
platform_set_drvdata(pdev, camif);
clk_set_rate(camif->clock[CLK_CAM],
camif->pdata.sensor.clock_frequency);
dev_info(dev, "sensor clock frequency: %lu\n",
clk_get_rate(camif->clock[CLK_CAM]));
/*
* Set initial pixel format, resolution and crop rectangle.
* Must be done before a sensor subdev is registered as some
* settings are overrode with values from sensor subdev.
*/
s3c_camif_set_defaults(camif);
pm_runtime_enable(dev);
ret = pm_runtime_resume_and_get(dev);
if (ret < 0)
goto err_disable;
ret = camif_media_dev_init(camif);
if (ret < 0)
goto err_pm;
ret = camif_register_sensor(camif);
if (ret < 0)
goto err_sens;
ret = v4l2_device_register_subdev(&camif->v4l2_dev, &camif->subdev);
if (ret < 0)
goto err_sens;
ret = v4l2_device_register_subdev_nodes(&camif->v4l2_dev);
if (ret < 0)
goto err_sens;
ret = camif_register_video_nodes(camif);
if (ret < 0)
goto err_sens;
ret = camif_create_media_links(camif);
if (ret < 0)
goto err_sens;
ret = media_device_register(&camif->media_dev);
if (ret < 0)
goto err_sens;
pm_runtime_put(dev);
return 0;
err_sens:
v4l2_device_unregister(&camif->v4l2_dev);
media_device_unregister(&camif->media_dev);
media_device_cleanup(&camif->media_dev);
camif_unregister_media_entities(camif);
err_pm:
pm_runtime_put(dev);
err_disable:
pm_runtime_disable(dev);
camif_clk_put(camif);
err_clk:
s3c_camif_unregister_subdev(camif);
err_sd:
pdata->gpio_put();
return ret;
}
static void s3c_camif_remove(struct platform_device *pdev)
{
struct camif_dev *camif = platform_get_drvdata(pdev);
struct s3c_camif_plat_data *pdata = &camif->pdata;
media_device_unregister(&camif->media_dev);
media_device_cleanup(&camif->media_dev);
camif_unregister_media_entities(camif);
v4l2_device_unregister(&camif->v4l2_dev);
pm_runtime_disable(&pdev->dev);
camif_clk_put(camif);
s3c_camif_unregister_subdev(camif);
pdata->gpio_put();
}
static int s3c_camif_runtime_resume(struct device *dev)
{
struct camif_dev *camif = dev_get_drvdata(dev);
clk_enable(camif->clock[CLK_GATE]);
/* null op on s3c244x */
clk_enable(camif->clock[CLK_CAM]);
return 0;
}
static int s3c_camif_runtime_suspend(struct device *dev)
{
struct camif_dev *camif = dev_get_drvdata(dev);
/* null op on s3c244x */
clk_disable(camif->clock[CLK_CAM]);
clk_disable(camif->clock[CLK_GATE]);
return 0;
}
static const struct s3c_camif_variant s3c244x_camif_variant = {
.vp_pix_limits = {
[VP_CODEC] = {
.max_out_width = 4096,
.max_sc_out_width = 2048,
.out_width_align = 16,
.min_out_width = 16,
.max_height = 4096,
},
[VP_PREVIEW] = {
.max_out_width = 640,
.max_sc_out_width = 640,
.out_width_align = 16,
.min_out_width = 16,
.max_height = 480,
}
},
.pix_limits = {
.win_hor_offset_align = 8,
},
.ip_revision = S3C244X_CAMIF_IP_REV,
};
static struct s3c_camif_drvdata s3c244x_camif_drvdata = {
.variant = &s3c244x_camif_variant,
.bus_clk_freq = 24000000UL,
};
static const struct s3c_camif_variant s3c6410_camif_variant = {
.vp_pix_limits = {
[VP_CODEC] = {
.max_out_width = 4096,
.max_sc_out_width = 2048,
.out_width_align = 16,
.min_out_width = 16,
.max_height = 4096,
},
[VP_PREVIEW] = {
.max_out_width = 4096,
.max_sc_out_width = 720,
.out_width_align = 16,
.min_out_width = 16,
.max_height = 4096,
}
},
.pix_limits = {
.win_hor_offset_align = 8,
},
.ip_revision = S3C6410_CAMIF_IP_REV,
.has_img_effect = 1,
.vp_offset = 0x20,
};
static struct s3c_camif_drvdata s3c6410_camif_drvdata = {
.variant = &s3c6410_camif_variant,
.bus_clk_freq = 133000000UL,
};
static const struct platform_device_id s3c_camif_driver_ids[] = {
{
.name = "s3c2440-camif",
.driver_data = (unsigned long)&s3c244x_camif_drvdata,
}, {
.name = "s3c6410-camif",
.driver_data = (unsigned long)&s3c6410_camif_drvdata,
},
{ /* sentinel */ },
};
MODULE_DEVICE_TABLE(platform, s3c_camif_driver_ids);
static const struct dev_pm_ops s3c_camif_pm_ops = {
.runtime_suspend = s3c_camif_runtime_suspend,
.runtime_resume = s3c_camif_runtime_resume,
};
static struct platform_driver s3c_camif_driver = {
.probe = s3c_camif_probe,
.remove_new = s3c_camif_remove,
.id_table = s3c_camif_driver_ids,
.driver = {
.name = S3C_CAMIF_DRIVER_NAME,
.pm = &s3c_camif_pm_ops,
}
};
module_platform_driver(s3c_camif_driver);
MODULE_AUTHOR("Sylwester Nawrocki <[email protected]>");
MODULE_AUTHOR("Tomasz Figa <[email protected]>");
MODULE_DESCRIPTION("S3C24XX/S3C64XX SoC camera interface driver");
MODULE_LICENSE("GPL");
| linux-master | drivers/media/platform/samsung/s3c-camif/camif-core.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* s3c24xx/s3c64xx SoC series Camera Interface (CAMIF) driver
*
* Copyright (C) 2012 Sylwester Nawrocki <[email protected]>
* Copyright (C) 2012 Tomasz Figa <[email protected]>
*
* Based on drivers/media/platform/s5p-fimc,
* Copyright (C) 2010 - 2012 Samsung Electronics Co., Ltd.
*/
#define pr_fmt(fmt) "%s:%d " fmt, __func__, __LINE__
#include <linux/bug.h>
#include <linux/clk.h>
#include <linux/device.h>
#include <linux/errno.h>
#include <linux/i2c.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/ratelimit.h>
#include <linux/slab.h>
#include <linux/types.h>
#include <linux/videodev2.h>
#include <media/media-device.h>
#include <media/v4l2-ctrls.h>
#include <media/v4l2-event.h>
#include <media/v4l2-ioctl.h>
#include <media/videobuf2-v4l2.h>
#include <media/videobuf2-dma-contig.h>
#include "camif-core.h"
#include "camif-regs.h"
static int debug;
module_param(debug, int, 0644);
/* Locking: called with vp->camif->slock spinlock held */
static void camif_cfg_video_path(struct camif_vp *vp)
{
WARN_ON(s3c_camif_get_scaler_config(vp, &vp->scaler));
camif_hw_set_scaler(vp);
camif_hw_set_flip(vp);
camif_hw_set_target_format(vp);
camif_hw_set_output_dma(vp);
}
static void camif_prepare_dma_offset(struct camif_vp *vp)
{
struct camif_frame *f = &vp->out_frame;
f->dma_offset.initial = f->rect.top * f->f_width + f->rect.left;
f->dma_offset.line = f->f_width - (f->rect.left + f->rect.width);
pr_debug("dma_offset: initial: %d, line: %d\n",
f->dma_offset.initial, f->dma_offset.line);
}
/* Locking: called with camif->slock spinlock held */
static int s3c_camif_hw_init(struct camif_dev *camif, struct camif_vp *vp)
{
const struct s3c_camif_variant *variant = camif->variant;
if (camif->sensor.sd == NULL || vp->out_fmt == NULL)
return -EINVAL;
if (variant->ip_revision == S3C244X_CAMIF_IP_REV)
camif_hw_clear_fifo_overflow(vp);
camif_hw_set_camera_bus(camif);
camif_hw_set_source_format(camif);
camif_hw_set_camera_crop(camif);
camif_hw_set_test_pattern(camif, camif->test_pattern);
if (variant->has_img_effect)
camif_hw_set_effect(camif, camif->colorfx,
camif->colorfx_cr, camif->colorfx_cb);
if (variant->ip_revision == S3C6410_CAMIF_IP_REV)
camif_hw_set_input_path(vp);
camif_cfg_video_path(vp);
vp->state &= ~ST_VP_CONFIG;
return 0;
}
/*
* Initialize the video path, only up from the scaler stage. The camera
* input interface set up is skipped. This is useful to enable one of the
* video paths when the other is already running.
* Locking: called with camif->slock spinlock held.
*/
static int s3c_camif_hw_vp_init(struct camif_dev *camif, struct camif_vp *vp)
{
unsigned int ip_rev = camif->variant->ip_revision;
if (vp->out_fmt == NULL)
return -EINVAL;
camif_prepare_dma_offset(vp);
if (ip_rev == S3C244X_CAMIF_IP_REV)
camif_hw_clear_fifo_overflow(vp);
camif_cfg_video_path(vp);
vp->state &= ~ST_VP_CONFIG;
return 0;
}
static int sensor_set_power(struct camif_dev *camif, int on)
{
struct cam_sensor *sensor = &camif->sensor;
int err = 0;
if (camif->sensor.power_count == !on)
err = v4l2_subdev_call(sensor->sd, core, s_power, on);
if (err == -ENOIOCTLCMD)
err = 0;
if (!err)
sensor->power_count += on ? 1 : -1;
pr_debug("on: %d, power_count: %d, err: %d\n",
on, sensor->power_count, err);
return err;
}
static int sensor_set_streaming(struct camif_dev *camif, int on)
{
struct cam_sensor *sensor = &camif->sensor;
int err = 0;
if (camif->sensor.stream_count == !on)
err = v4l2_subdev_call(sensor->sd, video, s_stream, on);
if (!err)
sensor->stream_count += on ? 1 : -1;
pr_debug("on: %d, stream_count: %d, err: %d\n",
on, sensor->stream_count, err);
return err;
}
/*
* Reinitialize the driver so it is ready to start streaming again.
* Return any buffers to vb2, perform CAMIF software reset and
* turn off streaming at the data pipeline (sensor) if required.
*/
static int camif_reinitialize(struct camif_vp *vp)
{
struct camif_dev *camif = vp->camif;
struct camif_buffer *buf;
unsigned long flags;
bool streaming;
spin_lock_irqsave(&camif->slock, flags);
streaming = vp->state & ST_VP_SENSOR_STREAMING;
vp->state &= ~(ST_VP_PENDING | ST_VP_RUNNING | ST_VP_OFF |
ST_VP_ABORTING | ST_VP_STREAMING |
ST_VP_SENSOR_STREAMING | ST_VP_LASTIRQ);
/* Release unused buffers */
while (!list_empty(&vp->pending_buf_q)) {
buf = camif_pending_queue_pop(vp);
vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
}
while (!list_empty(&vp->active_buf_q)) {
buf = camif_active_queue_pop(vp);
vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
}
spin_unlock_irqrestore(&camif->slock, flags);
if (!streaming)
return 0;
return sensor_set_streaming(camif, 0);
}
static bool s3c_vp_active(struct camif_vp *vp)
{
struct camif_dev *camif = vp->camif;
unsigned long flags;
bool ret;
spin_lock_irqsave(&camif->slock, flags);
ret = (vp->state & ST_VP_RUNNING) || (vp->state & ST_VP_PENDING);
spin_unlock_irqrestore(&camif->slock, flags);
return ret;
}
static bool camif_is_streaming(struct camif_dev *camif)
{
unsigned long flags;
bool status;
spin_lock_irqsave(&camif->slock, flags);
status = camif->stream_count > 0;
spin_unlock_irqrestore(&camif->slock, flags);
return status;
}
static int camif_stop_capture(struct camif_vp *vp)
{
struct camif_dev *camif = vp->camif;
unsigned long flags;
int ret;
if (!s3c_vp_active(vp))
return 0;
spin_lock_irqsave(&camif->slock, flags);
vp->state &= ~(ST_VP_OFF | ST_VP_LASTIRQ);
vp->state |= ST_VP_ABORTING;
spin_unlock_irqrestore(&camif->slock, flags);
ret = wait_event_timeout(vp->irq_queue,
!(vp->state & ST_VP_ABORTING),
msecs_to_jiffies(CAMIF_STOP_TIMEOUT));
spin_lock_irqsave(&camif->slock, flags);
if (ret == 0 && !(vp->state & ST_VP_OFF)) {
/* Timed out, forcibly stop capture */
vp->state &= ~(ST_VP_OFF | ST_VP_ABORTING |
ST_VP_LASTIRQ);
camif_hw_disable_capture(vp);
camif_hw_enable_scaler(vp, false);
}
spin_unlock_irqrestore(&camif->slock, flags);
return camif_reinitialize(vp);
}
static int camif_prepare_addr(struct camif_vp *vp, struct vb2_buffer *vb,
struct camif_addr *paddr)
{
struct camif_frame *frame = &vp->out_frame;
u32 pix_size;
if (vb == NULL || frame == NULL)
return -EINVAL;
pix_size = frame->rect.width * frame->rect.height;
pr_debug("colplanes: %d, pix_size: %u\n",
vp->out_fmt->colplanes, pix_size);
paddr->y = vb2_dma_contig_plane_dma_addr(vb, 0);
switch (vp->out_fmt->colplanes) {
case 1:
paddr->cb = 0;
paddr->cr = 0;
break;
case 2:
/* decompose Y into Y/Cb */
paddr->cb = (u32)(paddr->y + pix_size);
paddr->cr = 0;
break;
case 3:
paddr->cb = (u32)(paddr->y + pix_size);
/* decompose Y into Y/Cb/Cr */
if (vp->out_fmt->color == IMG_FMT_YCBCR422P)
paddr->cr = (u32)(paddr->cb + (pix_size >> 1));
else /* 420 */
paddr->cr = (u32)(paddr->cb + (pix_size >> 2));
if (vp->out_fmt->color == IMG_FMT_YCRCB420)
swap(paddr->cb, paddr->cr);
break;
default:
return -EINVAL;
}
pr_debug("DMA address: y: %pad cb: %pad cr: %pad\n",
&paddr->y, &paddr->cb, &paddr->cr);
return 0;
}
irqreturn_t s3c_camif_irq_handler(int irq, void *priv)
{
struct camif_vp *vp = priv;
struct camif_dev *camif = vp->camif;
unsigned int ip_rev = camif->variant->ip_revision;
unsigned int status;
spin_lock(&camif->slock);
if (ip_rev == S3C6410_CAMIF_IP_REV)
camif_hw_clear_pending_irq(vp);
status = camif_hw_get_status(vp);
if (ip_rev == S3C244X_CAMIF_IP_REV && (status & CISTATUS_OVF_MASK)) {
camif_hw_clear_fifo_overflow(vp);
goto unlock;
}
if (vp->state & ST_VP_ABORTING) {
if (vp->state & ST_VP_OFF) {
/* Last IRQ */
vp->state &= ~(ST_VP_OFF | ST_VP_ABORTING |
ST_VP_LASTIRQ);
wake_up(&vp->irq_queue);
goto unlock;
} else if (vp->state & ST_VP_LASTIRQ) {
camif_hw_disable_capture(vp);
camif_hw_enable_scaler(vp, false);
camif_hw_set_lastirq(vp, false);
vp->state |= ST_VP_OFF;
} else {
/* Disable capture, enable last IRQ */
camif_hw_set_lastirq(vp, true);
vp->state |= ST_VP_LASTIRQ;
}
}
if (!list_empty(&vp->pending_buf_q) && (vp->state & ST_VP_RUNNING) &&
!list_empty(&vp->active_buf_q)) {
unsigned int index;
struct camif_buffer *vbuf;
/*
* Get previous DMA write buffer index:
* 0 => DMA buffer 0, 2;
* 1 => DMA buffer 1, 3.
*/
index = (CISTATUS_FRAMECNT(status) + 2) & 1;
vbuf = camif_active_queue_peek(vp, index);
if (!WARN_ON(vbuf == NULL)) {
/* Dequeue a filled buffer */
vbuf->vb.vb2_buf.timestamp = ktime_get_ns();
vbuf->vb.sequence = vp->frame_sequence++;
vb2_buffer_done(&vbuf->vb.vb2_buf, VB2_BUF_STATE_DONE);
/* Set up an empty buffer at the DMA engine */
vbuf = camif_pending_queue_pop(vp);
vbuf->index = index;
camif_hw_set_output_addr(vp, &vbuf->paddr, index);
camif_hw_set_output_addr(vp, &vbuf->paddr, index + 2);
/* Scheduled in H/W, add to the queue */
camif_active_queue_add(vp, vbuf);
}
} else if (!(vp->state & ST_VP_ABORTING) &&
(vp->state & ST_VP_PENDING)) {
vp->state |= ST_VP_RUNNING;
}
if (vp->state & ST_VP_CONFIG) {
camif_prepare_dma_offset(vp);
camif_hw_set_camera_crop(camif);
camif_hw_set_scaler(vp);
camif_hw_set_flip(vp);
camif_hw_set_test_pattern(camif, camif->test_pattern);
if (camif->variant->has_img_effect)
camif_hw_set_effect(camif, camif->colorfx,
camif->colorfx_cr, camif->colorfx_cb);
vp->state &= ~ST_VP_CONFIG;
}
unlock:
spin_unlock(&camif->slock);
return IRQ_HANDLED;
}
static int start_streaming(struct vb2_queue *vq, unsigned int count)
{
struct camif_vp *vp = vb2_get_drv_priv(vq);
struct camif_dev *camif = vp->camif;
unsigned long flags;
int ret;
/*
* We assume the codec capture path is always activated
* first, before the preview path starts streaming.
* This is required to avoid internal FIFO overflow and
* a need for CAMIF software reset.
*/
spin_lock_irqsave(&camif->slock, flags);
if (camif->stream_count == 0) {
camif_hw_reset(camif);
ret = s3c_camif_hw_init(camif, vp);
} else {
ret = s3c_camif_hw_vp_init(camif, vp);
}
spin_unlock_irqrestore(&camif->slock, flags);
if (ret < 0) {
camif_reinitialize(vp);
return ret;
}
spin_lock_irqsave(&camif->slock, flags);
vp->frame_sequence = 0;
vp->state |= ST_VP_PENDING;
if (!list_empty(&vp->pending_buf_q) &&
(!(vp->state & ST_VP_STREAMING) ||
!(vp->state & ST_VP_SENSOR_STREAMING))) {
camif_hw_enable_scaler(vp, vp->scaler.enable);
camif_hw_enable_capture(vp);
vp->state |= ST_VP_STREAMING;
if (!(vp->state & ST_VP_SENSOR_STREAMING)) {
vp->state |= ST_VP_SENSOR_STREAMING;
spin_unlock_irqrestore(&camif->slock, flags);
ret = sensor_set_streaming(camif, 1);
if (ret)
v4l2_err(&vp->vdev, "Sensor s_stream failed\n");
if (debug)
camif_hw_dump_regs(camif, __func__);
return ret;
}
}
spin_unlock_irqrestore(&camif->slock, flags);
return 0;
}
static void stop_streaming(struct vb2_queue *vq)
{
struct camif_vp *vp = vb2_get_drv_priv(vq);
camif_stop_capture(vp);
}
static int queue_setup(struct vb2_queue *vq,
unsigned int *num_buffers, unsigned int *num_planes,
unsigned int sizes[], struct device *alloc_devs[])
{
struct camif_vp *vp = vb2_get_drv_priv(vq);
struct camif_frame *frame = &vp->out_frame;
const struct camif_fmt *fmt = vp->out_fmt;
unsigned int size;
if (fmt == NULL)
return -EINVAL;
size = (frame->f_width * frame->f_height * fmt->depth) / 8;
if (*num_planes)
return sizes[0] < size ? -EINVAL : 0;
*num_planes = 1;
sizes[0] = size;
pr_debug("size: %u\n", sizes[0]);
return 0;
}
static int buffer_prepare(struct vb2_buffer *vb)
{
struct camif_vp *vp = vb2_get_drv_priv(vb->vb2_queue);
if (vp->out_fmt == NULL)
return -EINVAL;
if (vb2_plane_size(vb, 0) < vp->payload) {
v4l2_err(&vp->vdev, "buffer too small: %lu, required: %u\n",
vb2_plane_size(vb, 0), vp->payload);
return -EINVAL;
}
vb2_set_plane_payload(vb, 0, vp->payload);
return 0;
}
static void buffer_queue(struct vb2_buffer *vb)
{
struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
struct camif_buffer *buf = container_of(vbuf, struct camif_buffer, vb);
struct camif_vp *vp = vb2_get_drv_priv(vb->vb2_queue);
struct camif_dev *camif = vp->camif;
unsigned long flags;
spin_lock_irqsave(&camif->slock, flags);
WARN_ON(camif_prepare_addr(vp, &buf->vb.vb2_buf, &buf->paddr));
if (!(vp->state & ST_VP_STREAMING) && vp->active_buffers < 2) {
/* Schedule an empty buffer in H/W */
buf->index = vp->buf_index;
camif_hw_set_output_addr(vp, &buf->paddr, buf->index);
camif_hw_set_output_addr(vp, &buf->paddr, buf->index + 2);
camif_active_queue_add(vp, buf);
vp->buf_index = !vp->buf_index;
} else {
camif_pending_queue_add(vp, buf);
}
if (vb2_is_streaming(&vp->vb_queue) && !list_empty(&vp->pending_buf_q)
&& !(vp->state & ST_VP_STREAMING)) {
vp->state |= ST_VP_STREAMING;
camif_hw_enable_scaler(vp, vp->scaler.enable);
camif_hw_enable_capture(vp);
spin_unlock_irqrestore(&camif->slock, flags);
if (!(vp->state & ST_VP_SENSOR_STREAMING)) {
if (sensor_set_streaming(camif, 1) == 0)
vp->state |= ST_VP_SENSOR_STREAMING;
else
v4l2_err(&vp->vdev, "Sensor s_stream failed\n");
if (debug)
camif_hw_dump_regs(camif, __func__);
}
return;
}
spin_unlock_irqrestore(&camif->slock, flags);
}
static const struct vb2_ops s3c_camif_qops = {
.queue_setup = queue_setup,
.buf_prepare = buffer_prepare,
.buf_queue = buffer_queue,
.wait_prepare = vb2_ops_wait_prepare,
.wait_finish = vb2_ops_wait_finish,
.start_streaming = start_streaming,
.stop_streaming = stop_streaming,
};
static int s3c_camif_open(struct file *file)
{
struct camif_vp *vp = video_drvdata(file);
struct camif_dev *camif = vp->camif;
int ret;
pr_debug("[vp%d] state: %#x, owner: %p, pid: %d\n", vp->id,
vp->state, vp->owner, task_pid_nr(current));
if (mutex_lock_interruptible(&camif->lock))
return -ERESTARTSYS;
ret = v4l2_fh_open(file);
if (ret < 0)
goto unlock;
ret = pm_runtime_resume_and_get(camif->dev);
if (ret < 0)
goto err_pm;
ret = sensor_set_power(camif, 1);
if (!ret)
goto unlock;
pm_runtime_put(camif->dev);
err_pm:
v4l2_fh_release(file);
unlock:
mutex_unlock(&camif->lock);
return ret;
}
static int s3c_camif_close(struct file *file)
{
struct camif_vp *vp = video_drvdata(file);
struct camif_dev *camif = vp->camif;
int ret;
pr_debug("[vp%d] state: %#x, owner: %p, pid: %d\n", vp->id,
vp->state, vp->owner, task_pid_nr(current));
mutex_lock(&camif->lock);
if (vp->owner == file->private_data) {
camif_stop_capture(vp);
vb2_queue_release(&vp->vb_queue);
vp->owner = NULL;
}
sensor_set_power(camif, 0);
pm_runtime_put(camif->dev);
ret = v4l2_fh_release(file);
mutex_unlock(&camif->lock);
return ret;
}
static __poll_t s3c_camif_poll(struct file *file,
struct poll_table_struct *wait)
{
struct camif_vp *vp = video_drvdata(file);
struct camif_dev *camif = vp->camif;
__poll_t ret;
mutex_lock(&camif->lock);
if (vp->owner && vp->owner != file->private_data)
ret = EPOLLERR;
else
ret = vb2_poll(&vp->vb_queue, file, wait);
mutex_unlock(&camif->lock);
return ret;
}
static int s3c_camif_mmap(struct file *file, struct vm_area_struct *vma)
{
struct camif_vp *vp = video_drvdata(file);
int ret;
if (vp->owner && vp->owner != file->private_data)
ret = -EBUSY;
else
ret = vb2_mmap(&vp->vb_queue, vma);
return ret;
}
static const struct v4l2_file_operations s3c_camif_fops = {
.owner = THIS_MODULE,
.open = s3c_camif_open,
.release = s3c_camif_close,
.poll = s3c_camif_poll,
.unlocked_ioctl = video_ioctl2,
.mmap = s3c_camif_mmap,
};
/*
* Video node IOCTLs
*/
static int s3c_camif_vidioc_querycap(struct file *file, void *priv,
struct v4l2_capability *cap)
{
struct camif_vp *vp = video_drvdata(file);
strscpy(cap->driver, S3C_CAMIF_DRIVER_NAME, sizeof(cap->driver));
strscpy(cap->card, S3C_CAMIF_DRIVER_NAME, sizeof(cap->card));
snprintf(cap->bus_info, sizeof(cap->bus_info), "platform:%s.%d",
dev_name(vp->camif->dev), vp->id);
return 0;
}
static int s3c_camif_vidioc_enum_input(struct file *file, void *priv,
struct v4l2_input *input)
{
struct camif_vp *vp = video_drvdata(file);
struct v4l2_subdev *sensor = vp->camif->sensor.sd;
if (input->index || sensor == NULL)
return -EINVAL;
input->type = V4L2_INPUT_TYPE_CAMERA;
strscpy(input->name, sensor->name, sizeof(input->name));
return 0;
}
static int s3c_camif_vidioc_s_input(struct file *file, void *priv,
unsigned int i)
{
return i == 0 ? 0 : -EINVAL;
}
static int s3c_camif_vidioc_g_input(struct file *file, void *priv,
unsigned int *i)
{
*i = 0;
return 0;
}
static int s3c_camif_vidioc_enum_fmt(struct file *file, void *priv,
struct v4l2_fmtdesc *f)
{
struct camif_vp *vp = video_drvdata(file);
const struct camif_fmt *fmt;
fmt = s3c_camif_find_format(vp, NULL, f->index);
if (!fmt)
return -EINVAL;
f->pixelformat = fmt->fourcc;
return 0;
}
static int s3c_camif_vidioc_g_fmt(struct file *file, void *priv,
struct v4l2_format *f)
{
struct camif_vp *vp = video_drvdata(file);
struct v4l2_pix_format *pix = &f->fmt.pix;
struct camif_frame *frame = &vp->out_frame;
const struct camif_fmt *fmt = vp->out_fmt;
pix->bytesperline = frame->f_width * fmt->ybpp;
pix->sizeimage = vp->payload;
pix->pixelformat = fmt->fourcc;
pix->width = frame->f_width;
pix->height = frame->f_height;
pix->field = V4L2_FIELD_NONE;
pix->colorspace = V4L2_COLORSPACE_JPEG;
return 0;
}
static int __camif_video_try_format(struct camif_vp *vp,
struct v4l2_pix_format *pix,
const struct camif_fmt **ffmt)
{
struct camif_dev *camif = vp->camif;
struct v4l2_rect *crop = &camif->camif_crop;
unsigned int wmin, hmin, sc_hrmax, sc_vrmax;
const struct vp_pix_limits *pix_lim;
const struct camif_fmt *fmt;
fmt = s3c_camif_find_format(vp, &pix->pixelformat, 0);
if (WARN_ON(fmt == NULL))
return -EINVAL;
if (ffmt)
*ffmt = fmt;
pix_lim = &camif->variant->vp_pix_limits[vp->id];
pr_debug("fmt: %ux%u, crop: %ux%u, bytesperline: %u\n",
pix->width, pix->height, crop->width, crop->height,
pix->bytesperline);
/*
* Calculate minimum width and height according to the configured
* camera input interface crop rectangle and the resizer's capabilities.
*/
sc_hrmax = min(SCALER_MAX_RATIO, 1 << (ffs(crop->width) - 3));
sc_vrmax = min(SCALER_MAX_RATIO, 1 << (ffs(crop->height) - 1));
wmin = max_t(u32, pix_lim->min_out_width, crop->width / sc_hrmax);
wmin = round_up(wmin, pix_lim->out_width_align);
hmin = max_t(u32, 8, crop->height / sc_vrmax);
hmin = round_up(hmin, 8);
v4l_bound_align_image(&pix->width, wmin, pix_lim->max_sc_out_width,
ffs(pix_lim->out_width_align) - 1,
&pix->height, hmin, pix_lim->max_height, 0, 0);
pix->bytesperline = pix->width * fmt->ybpp;
pix->sizeimage = (pix->width * pix->height * fmt->depth) / 8;
pix->pixelformat = fmt->fourcc;
pix->colorspace = V4L2_COLORSPACE_JPEG;
pix->field = V4L2_FIELD_NONE;
pr_debug("%ux%u, wmin: %d, hmin: %d, sc_hrmax: %d, sc_vrmax: %d\n",
pix->width, pix->height, wmin, hmin, sc_hrmax, sc_vrmax);
return 0;
}
static int s3c_camif_vidioc_try_fmt(struct file *file, void *priv,
struct v4l2_format *f)
{
struct camif_vp *vp = video_drvdata(file);
return __camif_video_try_format(vp, &f->fmt.pix, NULL);
}
static int s3c_camif_vidioc_s_fmt(struct file *file, void *priv,
struct v4l2_format *f)
{
struct v4l2_pix_format *pix = &f->fmt.pix;
struct camif_vp *vp = video_drvdata(file);
struct camif_frame *out_frame = &vp->out_frame;
const struct camif_fmt *fmt = NULL;
int ret;
pr_debug("[vp%d]\n", vp->id);
if (vb2_is_busy(&vp->vb_queue))
return -EBUSY;
ret = __camif_video_try_format(vp, &f->fmt.pix, &fmt);
if (ret < 0)
return ret;
vp->out_fmt = fmt;
vp->payload = pix->sizeimage;
out_frame->f_width = pix->width;
out_frame->f_height = pix->height;
/* Reset composition rectangle */
out_frame->rect.width = pix->width;
out_frame->rect.height = pix->height;
out_frame->rect.left = 0;
out_frame->rect.top = 0;
if (vp->owner == NULL)
vp->owner = priv;
pr_debug("%ux%u. payload: %u. fmt: 0x%08x. %d %d. sizeimage: %d. bpl: %d\n",
out_frame->f_width, out_frame->f_height, vp->payload,
fmt->fourcc, pix->width * pix->height * fmt->depth,
fmt->depth, pix->sizeimage, pix->bytesperline);
return 0;
}
/* Only check pixel formats at the sensor and the camif subdev pads */
static int camif_pipeline_validate(struct camif_dev *camif)
{
struct v4l2_subdev_format src_fmt = {
.which = V4L2_SUBDEV_FORMAT_ACTIVE,
};
struct media_pad *pad;
int ret;
/* Retrieve format at the sensor subdev source pad */
pad = media_pad_remote_pad_first(&camif->pads[0]);
if (!pad || !is_media_entity_v4l2_subdev(pad->entity))
return -EPIPE;
src_fmt.pad = pad->index;
ret = v4l2_subdev_call(camif->sensor.sd, pad, get_fmt, NULL, &src_fmt);
if (ret < 0 && ret != -ENOIOCTLCMD)
return -EPIPE;
if (src_fmt.format.width != camif->mbus_fmt.width ||
src_fmt.format.height != camif->mbus_fmt.height ||
src_fmt.format.code != camif->mbus_fmt.code)
return -EPIPE;
return 0;
}
static int s3c_camif_streamon(struct file *file, void *priv,
enum v4l2_buf_type type)
{
struct camif_vp *vp = video_drvdata(file);
struct camif_dev *camif = vp->camif;
struct media_entity *sensor = &camif->sensor.sd->entity;
int ret;
pr_debug("[vp%d]\n", vp->id);
if (type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
return -EINVAL;
if (vp->owner && vp->owner != priv)
return -EBUSY;
if (s3c_vp_active(vp))
return 0;
ret = media_pipeline_start(sensor->pads, camif->m_pipeline);
if (ret < 0)
return ret;
ret = camif_pipeline_validate(camif);
if (ret < 0) {
media_pipeline_stop(sensor->pads);
return ret;
}
return vb2_streamon(&vp->vb_queue, type);
}
static int s3c_camif_streamoff(struct file *file, void *priv,
enum v4l2_buf_type type)
{
struct camif_vp *vp = video_drvdata(file);
struct camif_dev *camif = vp->camif;
int ret;
pr_debug("[vp%d]\n", vp->id);
if (type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
return -EINVAL;
if (vp->owner && vp->owner != priv)
return -EBUSY;
ret = vb2_streamoff(&vp->vb_queue, type);
if (ret == 0)
media_pipeline_stop(camif->sensor.sd->entity.pads);
return ret;
}
static int s3c_camif_reqbufs(struct file *file, void *priv,
struct v4l2_requestbuffers *rb)
{
struct camif_vp *vp = video_drvdata(file);
int ret;
pr_debug("[vp%d] rb count: %d, owner: %p, priv: %p\n",
vp->id, rb->count, vp->owner, priv);
if (vp->owner && vp->owner != priv)
return -EBUSY;
if (rb->count)
rb->count = max_t(u32, CAMIF_REQ_BUFS_MIN, rb->count);
else
vp->owner = NULL;
ret = vb2_reqbufs(&vp->vb_queue, rb);
if (ret < 0)
return ret;
if (rb->count && rb->count < CAMIF_REQ_BUFS_MIN) {
rb->count = 0;
vb2_reqbufs(&vp->vb_queue, rb);
ret = -ENOMEM;
}
vp->reqbufs_count = rb->count;
if (vp->owner == NULL && rb->count > 0)
vp->owner = priv;
return ret;
}
static int s3c_camif_querybuf(struct file *file, void *priv,
struct v4l2_buffer *buf)
{
struct camif_vp *vp = video_drvdata(file);
return vb2_querybuf(&vp->vb_queue, buf);
}
static int s3c_camif_qbuf(struct file *file, void *priv,
struct v4l2_buffer *buf)
{
struct camif_vp *vp = video_drvdata(file);
pr_debug("[vp%d]\n", vp->id);
if (vp->owner && vp->owner != priv)
return -EBUSY;
return vb2_qbuf(&vp->vb_queue, vp->vdev.v4l2_dev->mdev, buf);
}
static int s3c_camif_dqbuf(struct file *file, void *priv,
struct v4l2_buffer *buf)
{
struct camif_vp *vp = video_drvdata(file);
pr_debug("[vp%d] sequence: %d\n", vp->id, vp->frame_sequence);
if (vp->owner && vp->owner != priv)
return -EBUSY;
return vb2_dqbuf(&vp->vb_queue, buf, file->f_flags & O_NONBLOCK);
}
static int s3c_camif_create_bufs(struct file *file, void *priv,
struct v4l2_create_buffers *create)
{
struct camif_vp *vp = video_drvdata(file);
int ret;
if (vp->owner && vp->owner != priv)
return -EBUSY;
create->count = max_t(u32, 1, create->count);
ret = vb2_create_bufs(&vp->vb_queue, create);
if (!ret && vp->owner == NULL)
vp->owner = priv;
return ret;
}
static int s3c_camif_prepare_buf(struct file *file, void *priv,
struct v4l2_buffer *b)
{
struct camif_vp *vp = video_drvdata(file);
return vb2_prepare_buf(&vp->vb_queue, vp->vdev.v4l2_dev->mdev, b);
}
static int s3c_camif_g_selection(struct file *file, void *priv,
struct v4l2_selection *sel)
{
struct camif_vp *vp = video_drvdata(file);
if (sel->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
return -EINVAL;
switch (sel->target) {
case V4L2_SEL_TGT_COMPOSE_BOUNDS:
case V4L2_SEL_TGT_COMPOSE_DEFAULT:
sel->r.left = 0;
sel->r.top = 0;
sel->r.width = vp->out_frame.f_width;
sel->r.height = vp->out_frame.f_height;
return 0;
case V4L2_SEL_TGT_COMPOSE:
sel->r = vp->out_frame.rect;
return 0;
}
return -EINVAL;
}
static void __camif_try_compose(struct camif_dev *camif, struct camif_vp *vp,
struct v4l2_rect *r)
{
/* s3c244x doesn't support composition */
if (camif->variant->ip_revision == S3C244X_CAMIF_IP_REV) {
*r = vp->out_frame.rect;
return;
}
/* TODO: s3c64xx */
}
static int s3c_camif_s_selection(struct file *file, void *priv,
struct v4l2_selection *sel)
{
struct camif_vp *vp = video_drvdata(file);
struct camif_dev *camif = vp->camif;
struct v4l2_rect rect = sel->r;
unsigned long flags;
if (sel->type != V4L2_BUF_TYPE_VIDEO_CAPTURE ||
sel->target != V4L2_SEL_TGT_COMPOSE)
return -EINVAL;
__camif_try_compose(camif, vp, &rect);
sel->r = rect;
spin_lock_irqsave(&camif->slock, flags);
vp->out_frame.rect = rect;
vp->state |= ST_VP_CONFIG;
spin_unlock_irqrestore(&camif->slock, flags);
pr_debug("type: %#x, target: %#x, flags: %#x, (%d,%d)/%dx%d\n",
sel->type, sel->target, sel->flags,
sel->r.left, sel->r.top, sel->r.width, sel->r.height);
return 0;
}
static const struct v4l2_ioctl_ops s3c_camif_ioctl_ops = {
.vidioc_querycap = s3c_camif_vidioc_querycap,
.vidioc_enum_input = s3c_camif_vidioc_enum_input,
.vidioc_g_input = s3c_camif_vidioc_g_input,
.vidioc_s_input = s3c_camif_vidioc_s_input,
.vidioc_enum_fmt_vid_cap = s3c_camif_vidioc_enum_fmt,
.vidioc_try_fmt_vid_cap = s3c_camif_vidioc_try_fmt,
.vidioc_s_fmt_vid_cap = s3c_camif_vidioc_s_fmt,
.vidioc_g_fmt_vid_cap = s3c_camif_vidioc_g_fmt,
.vidioc_g_selection = s3c_camif_g_selection,
.vidioc_s_selection = s3c_camif_s_selection,
.vidioc_reqbufs = s3c_camif_reqbufs,
.vidioc_querybuf = s3c_camif_querybuf,
.vidioc_prepare_buf = s3c_camif_prepare_buf,
.vidioc_create_bufs = s3c_camif_create_bufs,
.vidioc_qbuf = s3c_camif_qbuf,
.vidioc_dqbuf = s3c_camif_dqbuf,
.vidioc_streamon = s3c_camif_streamon,
.vidioc_streamoff = s3c_camif_streamoff,
.vidioc_subscribe_event = v4l2_ctrl_subscribe_event,
.vidioc_unsubscribe_event = v4l2_event_unsubscribe,
.vidioc_log_status = v4l2_ctrl_log_status,
};
/*
* Video node controls
*/
static int s3c_camif_video_s_ctrl(struct v4l2_ctrl *ctrl)
{
struct camif_vp *vp = ctrl->priv;
struct camif_dev *camif = vp->camif;
unsigned long flags;
pr_debug("[vp%d] ctrl: %s, value: %d\n", vp->id,
ctrl->name, ctrl->val);
spin_lock_irqsave(&camif->slock, flags);
switch (ctrl->id) {
case V4L2_CID_HFLIP:
vp->hflip = ctrl->val;
break;
case V4L2_CID_VFLIP:
vp->vflip = ctrl->val;
break;
}
vp->state |= ST_VP_CONFIG;
spin_unlock_irqrestore(&camif->slock, flags);
return 0;
}
/* Codec and preview video node control ops */
static const struct v4l2_ctrl_ops s3c_camif_video_ctrl_ops = {
.s_ctrl = s3c_camif_video_s_ctrl,
};
int s3c_camif_register_video_node(struct camif_dev *camif, int idx)
{
struct camif_vp *vp = &camif->vp[idx];
struct vb2_queue *q = &vp->vb_queue;
struct video_device *vfd = &vp->vdev;
struct v4l2_ctrl *ctrl;
int ret;
memset(vfd, 0, sizeof(*vfd));
snprintf(vfd->name, sizeof(vfd->name), "camif-%s",
vp->id == 0 ? "codec" : "preview");
vfd->fops = &s3c_camif_fops;
vfd->ioctl_ops = &s3c_camif_ioctl_ops;
vfd->v4l2_dev = &camif->v4l2_dev;
vfd->minor = -1;
vfd->release = video_device_release_empty;
vfd->lock = &camif->lock;
vp->reqbufs_count = 0;
INIT_LIST_HEAD(&vp->pending_buf_q);
INIT_LIST_HEAD(&vp->active_buf_q);
memset(q, 0, sizeof(*q));
q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
q->io_modes = VB2_MMAP | VB2_USERPTR;
q->ops = &s3c_camif_qops;
q->mem_ops = &vb2_dma_contig_memops;
q->buf_struct_size = sizeof(struct camif_buffer);
q->drv_priv = vp;
q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
q->lock = &vp->camif->lock;
q->dev = camif->v4l2_dev.dev;
ret = vb2_queue_init(q);
if (ret)
goto err_vd_rel;
vp->pad.flags = MEDIA_PAD_FL_SINK;
ret = media_entity_pads_init(&vfd->entity, 1, &vp->pad);
if (ret)
goto err_vd_rel;
video_set_drvdata(vfd, vp);
v4l2_ctrl_handler_init(&vp->ctrl_handler, 1);
ctrl = v4l2_ctrl_new_std(&vp->ctrl_handler, &s3c_camif_video_ctrl_ops,
V4L2_CID_HFLIP, 0, 1, 1, 0);
if (ctrl)
ctrl->priv = vp;
ctrl = v4l2_ctrl_new_std(&vp->ctrl_handler, &s3c_camif_video_ctrl_ops,
V4L2_CID_VFLIP, 0, 1, 1, 0);
if (ctrl)
ctrl->priv = vp;
ret = vp->ctrl_handler.error;
if (ret < 0)
goto err_me_cleanup;
vfd->ctrl_handler = &vp->ctrl_handler;
vfd->device_caps = V4L2_CAP_STREAMING | V4L2_CAP_VIDEO_CAPTURE;
ret = video_register_device(vfd, VFL_TYPE_VIDEO, -1);
if (ret)
goto err_ctrlh_free;
v4l2_info(&camif->v4l2_dev, "registered %s as /dev/%s\n",
vfd->name, video_device_node_name(vfd));
return 0;
err_ctrlh_free:
v4l2_ctrl_handler_free(&vp->ctrl_handler);
err_me_cleanup:
media_entity_cleanup(&vfd->entity);
err_vd_rel:
video_device_release(vfd);
return ret;
}
void s3c_camif_unregister_video_node(struct camif_dev *camif, int idx)
{
struct video_device *vfd = &camif->vp[idx].vdev;
if (video_is_registered(vfd)) {
video_unregister_device(vfd);
media_entity_cleanup(&vfd->entity);
v4l2_ctrl_handler_free(vfd->ctrl_handler);
}
}
/* Media bus pixel formats supported at the camif input */
static const u32 camif_mbus_formats[] = {
MEDIA_BUS_FMT_YUYV8_2X8,
MEDIA_BUS_FMT_YVYU8_2X8,
MEDIA_BUS_FMT_UYVY8_2X8,
MEDIA_BUS_FMT_VYUY8_2X8,
};
/*
* Camera input interface subdev operations
*/
static int s3c_camif_subdev_enum_mbus_code(struct v4l2_subdev *sd,
struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_mbus_code_enum *code)
{
if (code->index >= ARRAY_SIZE(camif_mbus_formats))
return -EINVAL;
code->code = camif_mbus_formats[code->index];
return 0;
}
static int s3c_camif_subdev_get_fmt(struct v4l2_subdev *sd,
struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
struct camif_dev *camif = v4l2_get_subdevdata(sd);
struct v4l2_mbus_framefmt *mf = &fmt->format;
if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
mf = v4l2_subdev_get_try_format(sd, sd_state, fmt->pad);
fmt->format = *mf;
return 0;
}
mutex_lock(&camif->lock);
switch (fmt->pad) {
case CAMIF_SD_PAD_SINK:
/* full camera input pixel size */
*mf = camif->mbus_fmt;
break;
case CAMIF_SD_PAD_SOURCE_C...CAMIF_SD_PAD_SOURCE_P:
/* crop rectangle at camera interface input */
mf->width = camif->camif_crop.width;
mf->height = camif->camif_crop.height;
mf->code = camif->mbus_fmt.code;
break;
}
mutex_unlock(&camif->lock);
mf->field = V4L2_FIELD_NONE;
mf->colorspace = V4L2_COLORSPACE_JPEG;
return 0;
}
static void __camif_subdev_try_format(struct camif_dev *camif,
struct v4l2_mbus_framefmt *mf, int pad)
{
const struct s3c_camif_variant *variant = camif->variant;
const struct vp_pix_limits *pix_lim;
unsigned int i;
/* FIXME: constraints against codec or preview path ? */
pix_lim = &variant->vp_pix_limits[VP_CODEC];
for (i = 0; i < ARRAY_SIZE(camif_mbus_formats); i++)
if (camif_mbus_formats[i] == mf->code)
break;
if (i == ARRAY_SIZE(camif_mbus_formats))
mf->code = camif_mbus_formats[0];
if (pad == CAMIF_SD_PAD_SINK) {
v4l_bound_align_image(&mf->width, 8, CAMIF_MAX_PIX_WIDTH,
ffs(pix_lim->out_width_align) - 1,
&mf->height, 8, CAMIF_MAX_PIX_HEIGHT, 0,
0);
} else {
struct v4l2_rect *crop = &camif->camif_crop;
v4l_bound_align_image(&mf->width, 8, crop->width,
ffs(pix_lim->out_width_align) - 1,
&mf->height, 8, crop->height,
0, 0);
}
v4l2_dbg(1, debug, &camif->subdev, "%ux%u\n", mf->width, mf->height);
}
static int s3c_camif_subdev_set_fmt(struct v4l2_subdev *sd,
struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
struct camif_dev *camif = v4l2_get_subdevdata(sd);
struct v4l2_mbus_framefmt *mf = &fmt->format;
struct v4l2_rect *crop = &camif->camif_crop;
int i;
v4l2_dbg(1, debug, sd, "pad%d: code: 0x%x, %ux%u\n",
fmt->pad, mf->code, mf->width, mf->height);
mf->field = V4L2_FIELD_NONE;
mf->colorspace = V4L2_COLORSPACE_JPEG;
mutex_lock(&camif->lock);
/*
* No pixel format change at the camera input is allowed
* while streaming.
*/
if (vb2_is_busy(&camif->vp[VP_CODEC].vb_queue) ||
vb2_is_busy(&camif->vp[VP_PREVIEW].vb_queue)) {
mutex_unlock(&camif->lock);
return -EBUSY;
}
__camif_subdev_try_format(camif, mf, fmt->pad);
if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
mf = v4l2_subdev_get_try_format(sd, sd_state, fmt->pad);
*mf = fmt->format;
mutex_unlock(&camif->lock);
return 0;
}
switch (fmt->pad) {
case CAMIF_SD_PAD_SINK:
camif->mbus_fmt = *mf;
/* Reset sink crop rectangle. */
crop->width = mf->width;
crop->height = mf->height;
crop->left = 0;
crop->top = 0;
/*
* Reset source format (the camif's crop rectangle)
* and the video output resolution.
*/
for (i = 0; i < CAMIF_VP_NUM; i++) {
struct camif_frame *frame = &camif->vp[i].out_frame;
frame->rect = *crop;
frame->f_width = mf->width;
frame->f_height = mf->height;
}
break;
case CAMIF_SD_PAD_SOURCE_C...CAMIF_SD_PAD_SOURCE_P:
/* Pixel format can be only changed on the sink pad. */
mf->code = camif->mbus_fmt.code;
mf->width = crop->width;
mf->height = crop->height;
break;
}
mutex_unlock(&camif->lock);
return 0;
}
static int s3c_camif_subdev_get_selection(struct v4l2_subdev *sd,
struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_selection *sel)
{
struct camif_dev *camif = v4l2_get_subdevdata(sd);
struct v4l2_rect *crop = &camif->camif_crop;
struct v4l2_mbus_framefmt *mf = &camif->mbus_fmt;
if ((sel->target != V4L2_SEL_TGT_CROP &&
sel->target != V4L2_SEL_TGT_CROP_BOUNDS) ||
sel->pad != CAMIF_SD_PAD_SINK)
return -EINVAL;
if (sel->which == V4L2_SUBDEV_FORMAT_TRY) {
sel->r = *v4l2_subdev_get_try_crop(sd, sd_state, sel->pad);
return 0;
}
mutex_lock(&camif->lock);
if (sel->target == V4L2_SEL_TGT_CROP) {
sel->r = *crop;
} else { /* crop bounds */
sel->r.width = mf->width;
sel->r.height = mf->height;
sel->r.left = 0;
sel->r.top = 0;
}
mutex_unlock(&camif->lock);
v4l2_dbg(1, debug, sd, "%s: crop: (%d,%d) %dx%d, size: %ux%u\n",
__func__, crop->left, crop->top, crop->width,
crop->height, mf->width, mf->height);
return 0;
}
static void __camif_try_crop(struct camif_dev *camif, struct v4l2_rect *r)
{
struct v4l2_mbus_framefmt *mf = &camif->mbus_fmt;
const struct camif_pix_limits *pix_lim = &camif->variant->pix_limits;
unsigned int left = 2 * r->left;
unsigned int top = 2 * r->top;
/*
* Following constraints must be met:
* - r->width + 2 * r->left = mf->width;
* - r->height + 2 * r->top = mf->height;
* - crop rectangle size and position must be aligned
* to 8 or 2 pixels, depending on SoC version.
*/
v4l_bound_align_image(&r->width, 0, mf->width,
ffs(pix_lim->win_hor_offset_align) - 1,
&r->height, 0, mf->height, 1, 0);
v4l_bound_align_image(&left, 0, mf->width - r->width,
ffs(pix_lim->win_hor_offset_align),
&top, 0, mf->height - r->height, 2, 0);
r->left = left / 2;
r->top = top / 2;
r->width = mf->width - left;
r->height = mf->height - top;
/*
* Make sure we either downscale or upscale both the pixel
* width and height. Just return current crop rectangle if
* this scaler constraint is not met.
*/
if (camif->variant->ip_revision == S3C244X_CAMIF_IP_REV &&
camif_is_streaming(camif)) {
unsigned int i;
for (i = 0; i < CAMIF_VP_NUM; i++) {
struct v4l2_rect *or = &camif->vp[i].out_frame.rect;
if ((or->width > r->width) == (or->height > r->height))
continue;
*r = camif->camif_crop;
pr_debug("Width/height scaling direction limitation\n");
break;
}
}
v4l2_dbg(1, debug, &camif->v4l2_dev, "crop: (%d,%d)/%dx%d, fmt: %ux%u\n",
r->left, r->top, r->width, r->height, mf->width, mf->height);
}
static int s3c_camif_subdev_set_selection(struct v4l2_subdev *sd,
struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_selection *sel)
{
struct camif_dev *camif = v4l2_get_subdevdata(sd);
struct v4l2_rect *crop = &camif->camif_crop;
struct camif_scaler scaler;
if (sel->target != V4L2_SEL_TGT_CROP || sel->pad != CAMIF_SD_PAD_SINK)
return -EINVAL;
mutex_lock(&camif->lock);
__camif_try_crop(camif, &sel->r);
if (sel->which == V4L2_SUBDEV_FORMAT_TRY) {
*v4l2_subdev_get_try_crop(sd, sd_state, sel->pad) = sel->r;
} else {
unsigned long flags;
unsigned int i;
spin_lock_irqsave(&camif->slock, flags);
*crop = sel->r;
for (i = 0; i < CAMIF_VP_NUM; i++) {
struct camif_vp *vp = &camif->vp[i];
scaler = vp->scaler;
if (s3c_camif_get_scaler_config(vp, &scaler))
continue;
vp->scaler = scaler;
vp->state |= ST_VP_CONFIG;
}
spin_unlock_irqrestore(&camif->slock, flags);
}
mutex_unlock(&camif->lock);
v4l2_dbg(1, debug, sd, "%s: (%d,%d) %dx%d, f_w: %u, f_h: %u\n",
__func__, crop->left, crop->top, crop->width, crop->height,
camif->mbus_fmt.width, camif->mbus_fmt.height);
return 0;
}
static const struct v4l2_subdev_pad_ops s3c_camif_subdev_pad_ops = {
.enum_mbus_code = s3c_camif_subdev_enum_mbus_code,
.get_selection = s3c_camif_subdev_get_selection,
.set_selection = s3c_camif_subdev_set_selection,
.get_fmt = s3c_camif_subdev_get_fmt,
.set_fmt = s3c_camif_subdev_set_fmt,
};
static const struct v4l2_subdev_ops s3c_camif_subdev_ops = {
.pad = &s3c_camif_subdev_pad_ops,
};
static int s3c_camif_subdev_s_ctrl(struct v4l2_ctrl *ctrl)
{
struct camif_dev *camif = container_of(ctrl->handler, struct camif_dev,
ctrl_handler);
unsigned long flags;
spin_lock_irqsave(&camif->slock, flags);
switch (ctrl->id) {
case V4L2_CID_COLORFX:
camif->colorfx = camif->ctrl_colorfx->val;
/* Set Cb, Cr */
switch (ctrl->val) {
case V4L2_COLORFX_SEPIA:
camif->colorfx_cb = 115;
camif->colorfx_cr = 145;
break;
case V4L2_COLORFX_SET_CBCR:
camif->colorfx_cb = camif->ctrl_colorfx_cbcr->val >> 8;
camif->colorfx_cr = camif->ctrl_colorfx_cbcr->val & 0xff;
break;
default:
/* for V4L2_COLORFX_BW and others */
camif->colorfx_cb = 128;
camif->colorfx_cr = 128;
}
break;
case V4L2_CID_TEST_PATTERN:
camif->test_pattern = camif->ctrl_test_pattern->val;
break;
default:
WARN_ON(1);
}
camif->vp[VP_CODEC].state |= ST_VP_CONFIG;
camif->vp[VP_PREVIEW].state |= ST_VP_CONFIG;
spin_unlock_irqrestore(&camif->slock, flags);
return 0;
}
static const struct v4l2_ctrl_ops s3c_camif_subdev_ctrl_ops = {
.s_ctrl = s3c_camif_subdev_s_ctrl,
};
static const char * const s3c_camif_test_pattern_menu[] = {
"Disabled",
"Color bars",
"Horizontal increment",
"Vertical increment",
};
int s3c_camif_create_subdev(struct camif_dev *camif)
{
struct v4l2_ctrl_handler *handler = &camif->ctrl_handler;
struct v4l2_subdev *sd = &camif->subdev;
int ret;
v4l2_subdev_init(sd, &s3c_camif_subdev_ops);
sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
strscpy(sd->name, "S3C-CAMIF", sizeof(sd->name));
camif->pads[CAMIF_SD_PAD_SINK].flags = MEDIA_PAD_FL_SINK;
camif->pads[CAMIF_SD_PAD_SOURCE_C].flags = MEDIA_PAD_FL_SOURCE;
camif->pads[CAMIF_SD_PAD_SOURCE_P].flags = MEDIA_PAD_FL_SOURCE;
ret = media_entity_pads_init(&sd->entity, CAMIF_SD_PADS_NUM,
camif->pads);
if (ret)
return ret;
v4l2_ctrl_handler_init(handler, 3);
camif->ctrl_test_pattern = v4l2_ctrl_new_std_menu_items(handler,
&s3c_camif_subdev_ctrl_ops, V4L2_CID_TEST_PATTERN,
ARRAY_SIZE(s3c_camif_test_pattern_menu) - 1, 0, 0,
s3c_camif_test_pattern_menu);
if (camif->variant->has_img_effect) {
camif->ctrl_colorfx = v4l2_ctrl_new_std_menu(handler,
&s3c_camif_subdev_ctrl_ops,
V4L2_CID_COLORFX, V4L2_COLORFX_SET_CBCR,
~0x981f, V4L2_COLORFX_NONE);
camif->ctrl_colorfx_cbcr = v4l2_ctrl_new_std(handler,
&s3c_camif_subdev_ctrl_ops,
V4L2_CID_COLORFX_CBCR, 0, 0xffff, 1, 0);
}
if (handler->error) {
v4l2_ctrl_handler_free(handler);
media_entity_cleanup(&sd->entity);
return handler->error;
}
if (camif->variant->has_img_effect)
v4l2_ctrl_auto_cluster(2, &camif->ctrl_colorfx,
V4L2_COLORFX_SET_CBCR, false);
sd->ctrl_handler = handler;
v4l2_set_subdevdata(sd, camif);
return 0;
}
void s3c_camif_unregister_subdev(struct camif_dev *camif)
{
struct v4l2_subdev *sd = &camif->subdev;
/* Return if not registered */
if (v4l2_get_subdevdata(sd) == NULL)
return;
v4l2_device_unregister_subdev(sd);
media_entity_cleanup(&sd->entity);
v4l2_ctrl_handler_free(&camif->ctrl_handler);
v4l2_set_subdevdata(sd, NULL);
}
int s3c_camif_set_defaults(struct camif_dev *camif)
{
unsigned int ip_rev = camif->variant->ip_revision;
int i;
for (i = 0; i < CAMIF_VP_NUM; i++) {
struct camif_vp *vp = &camif->vp[i];
struct camif_frame *f = &vp->out_frame;
vp->camif = camif;
vp->id = i;
vp->offset = camif->variant->vp_offset;
if (ip_rev == S3C244X_CAMIF_IP_REV)
vp->fmt_flags = i ? FMT_FL_S3C24XX_PREVIEW :
FMT_FL_S3C24XX_CODEC;
else
vp->fmt_flags = FMT_FL_S3C64XX;
vp->out_fmt = s3c_camif_find_format(vp, NULL, 0);
BUG_ON(vp->out_fmt == NULL);
memset(f, 0, sizeof(*f));
f->f_width = CAMIF_DEF_WIDTH;
f->f_height = CAMIF_DEF_HEIGHT;
f->rect.width = CAMIF_DEF_WIDTH;
f->rect.height = CAMIF_DEF_HEIGHT;
/* Scaler is always enabled */
vp->scaler.enable = 1;
vp->payload = (f->f_width * f->f_height *
vp->out_fmt->depth) / 8;
}
memset(&camif->mbus_fmt, 0, sizeof(camif->mbus_fmt));
camif->mbus_fmt.width = CAMIF_DEF_WIDTH;
camif->mbus_fmt.height = CAMIF_DEF_HEIGHT;
camif->mbus_fmt.code = camif_mbus_formats[0];
memset(&camif->camif_crop, 0, sizeof(camif->camif_crop));
camif->camif_crop.width = CAMIF_DEF_WIDTH;
camif->camif_crop.height = CAMIF_DEF_HEIGHT;
return 0;
}
| linux-master | drivers/media/platform/samsung/s3c-camif/camif-capture.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Samsung s3c24xx/s3c64xx SoC CAMIF driver
*
* Copyright (C) 2012 Sylwester Nawrocki <[email protected]>
* Copyright (C) 2012 Tomasz Figa <[email protected]>
*/
#define pr_fmt(fmt) "%s:%d " fmt, __func__, __LINE__
#include <linux/delay.h>
#include "camif-regs.h"
#define camif_write(_camif, _off, _val) writel(_val, (_camif)->io_base + (_off))
#define camif_read(_camif, _off) readl((_camif)->io_base + (_off))
void camif_hw_reset(struct camif_dev *camif)
{
u32 cfg;
cfg = camif_read(camif, S3C_CAMIF_REG_CISRCFMT);
cfg |= CISRCFMT_ITU601_8BIT;
camif_write(camif, S3C_CAMIF_REG_CISRCFMT, cfg);
/* S/W reset */
cfg = camif_read(camif, S3C_CAMIF_REG_CIGCTRL);
cfg |= CIGCTRL_SWRST;
if (camif->variant->ip_revision == S3C6410_CAMIF_IP_REV)
cfg |= CIGCTRL_IRQ_LEVEL;
camif_write(camif, S3C_CAMIF_REG_CIGCTRL, cfg);
udelay(10);
cfg = camif_read(camif, S3C_CAMIF_REG_CIGCTRL);
cfg &= ~CIGCTRL_SWRST;
camif_write(camif, S3C_CAMIF_REG_CIGCTRL, cfg);
udelay(10);
}
void camif_hw_clear_pending_irq(struct camif_vp *vp)
{
u32 cfg = camif_read(vp->camif, S3C_CAMIF_REG_CIGCTRL);
cfg |= CIGCTRL_IRQ_CLR(vp->id);
camif_write(vp->camif, S3C_CAMIF_REG_CIGCTRL, cfg);
}
/*
* Sets video test pattern (off, color bar, horizontal or vertical gradient).
* External sensor pixel clock must be active for the test pattern to work.
*/
void camif_hw_set_test_pattern(struct camif_dev *camif, unsigned int pattern)
{
u32 cfg = camif_read(camif, S3C_CAMIF_REG_CIGCTRL);
cfg &= ~CIGCTRL_TESTPATTERN_MASK;
cfg |= (pattern << 27);
camif_write(camif, S3C_CAMIF_REG_CIGCTRL, cfg);
}
void camif_hw_set_effect(struct camif_dev *camif, unsigned int effect,
unsigned int cr, unsigned int cb)
{
static const struct v4l2_control colorfx[] = {
{ V4L2_COLORFX_NONE, CIIMGEFF_FIN_BYPASS },
{ V4L2_COLORFX_BW, CIIMGEFF_FIN_ARBITRARY },
{ V4L2_COLORFX_SEPIA, CIIMGEFF_FIN_ARBITRARY },
{ V4L2_COLORFX_NEGATIVE, CIIMGEFF_FIN_NEGATIVE },
{ V4L2_COLORFX_ART_FREEZE, CIIMGEFF_FIN_ARTFREEZE },
{ V4L2_COLORFX_EMBOSS, CIIMGEFF_FIN_EMBOSSING },
{ V4L2_COLORFX_SILHOUETTE, CIIMGEFF_FIN_SILHOUETTE },
{ V4L2_COLORFX_SET_CBCR, CIIMGEFF_FIN_ARBITRARY },
};
unsigned int i, cfg;
for (i = 0; i < ARRAY_SIZE(colorfx); i++)
if (colorfx[i].id == effect)
break;
if (i == ARRAY_SIZE(colorfx))
return;
cfg = camif_read(camif, S3C_CAMIF_REG_CIIMGEFF(camif->vp->offset));
/* Set effect */
cfg &= ~CIIMGEFF_FIN_MASK;
cfg |= colorfx[i].value;
/* Set both paths */
if (camif->variant->ip_revision >= S3C6400_CAMIF_IP_REV) {
if (effect == V4L2_COLORFX_NONE)
cfg &= ~CIIMGEFF_IE_ENABLE_MASK;
else
cfg |= CIIMGEFF_IE_ENABLE_MASK;
}
cfg &= ~CIIMGEFF_PAT_CBCR_MASK;
cfg |= cr | (cb << 13);
camif_write(camif, S3C_CAMIF_REG_CIIMGEFF(camif->vp->offset), cfg);
}
static const u32 src_pixfmt_map[8][2] = {
{ MEDIA_BUS_FMT_YUYV8_2X8, CISRCFMT_ORDER422_YCBYCR },
{ MEDIA_BUS_FMT_YVYU8_2X8, CISRCFMT_ORDER422_YCRYCB },
{ MEDIA_BUS_FMT_UYVY8_2X8, CISRCFMT_ORDER422_CBYCRY },
{ MEDIA_BUS_FMT_VYUY8_2X8, CISRCFMT_ORDER422_CRYCBY },
};
/* Set camera input pixel format and resolution */
void camif_hw_set_source_format(struct camif_dev *camif)
{
struct v4l2_mbus_framefmt *mf = &camif->mbus_fmt;
int i;
u32 cfg;
for (i = ARRAY_SIZE(src_pixfmt_map) - 1; i >= 0; i--) {
if (src_pixfmt_map[i][0] == mf->code)
break;
}
if (i < 0) {
i = 0;
dev_err(camif->dev,
"Unsupported pixel code, falling back to %#08x\n",
src_pixfmt_map[i][0]);
}
cfg = camif_read(camif, S3C_CAMIF_REG_CISRCFMT);
cfg &= ~(CISRCFMT_ORDER422_MASK | CISRCFMT_SIZE_CAM_MASK);
cfg |= (mf->width << 16) | mf->height;
cfg |= src_pixfmt_map[i][1];
camif_write(camif, S3C_CAMIF_REG_CISRCFMT, cfg);
}
/* Set the camera host input window offsets (cropping) */
void camif_hw_set_camera_crop(struct camif_dev *camif)
{
struct v4l2_mbus_framefmt *mf = &camif->mbus_fmt;
struct v4l2_rect *crop = &camif->camif_crop;
u32 hoff2, voff2;
u32 cfg;
/* Note: s3c244x requirement: left = f_width - rect.width / 2 */
cfg = camif_read(camif, S3C_CAMIF_REG_CIWDOFST);
cfg &= ~(CIWDOFST_OFST_MASK | CIWDOFST_WINOFSEN);
cfg |= (crop->left << 16) | crop->top;
if (crop->left != 0 || crop->top != 0)
cfg |= CIWDOFST_WINOFSEN;
camif_write(camif, S3C_CAMIF_REG_CIWDOFST, cfg);
if (camif->variant->ip_revision == S3C6410_CAMIF_IP_REV) {
hoff2 = mf->width - crop->width - crop->left;
voff2 = mf->height - crop->height - crop->top;
cfg = (hoff2 << 16) | voff2;
camif_write(camif, S3C_CAMIF_REG_CIWDOFST2, cfg);
}
}
void camif_hw_clear_fifo_overflow(struct camif_vp *vp)
{
struct camif_dev *camif = vp->camif;
u32 cfg;
cfg = camif_read(camif, S3C_CAMIF_REG_CIWDOFST);
if (vp->id == 0)
cfg |= (CIWDOFST_CLROVCOFIY | CIWDOFST_CLROVCOFICB |
CIWDOFST_CLROVCOFICR);
else
cfg |= (/* CIWDOFST_CLROVPRFIY | */ CIWDOFST_CLROVPRFICB |
CIWDOFST_CLROVPRFICR);
camif_write(camif, S3C_CAMIF_REG_CIWDOFST, cfg);
}
/* Set video bus signals polarity */
void camif_hw_set_camera_bus(struct camif_dev *camif)
{
unsigned int flags = camif->pdata.sensor.flags;
u32 cfg = camif_read(camif, S3C_CAMIF_REG_CIGCTRL);
cfg &= ~(CIGCTRL_INVPOLPCLK | CIGCTRL_INVPOLVSYNC |
CIGCTRL_INVPOLHREF | CIGCTRL_INVPOLFIELD);
if (flags & V4L2_MBUS_PCLK_SAMPLE_FALLING)
cfg |= CIGCTRL_INVPOLPCLK;
if (flags & V4L2_MBUS_VSYNC_ACTIVE_LOW)
cfg |= CIGCTRL_INVPOLVSYNC;
/*
* HREF is normally high during frame active data
* transmission and low during horizontal synchronization
* period. Thus HREF active high means HSYNC active low.
*/
if (flags & V4L2_MBUS_HSYNC_ACTIVE_HIGH)
cfg |= CIGCTRL_INVPOLHREF; /* HREF active low */
if (camif->variant->ip_revision == S3C6410_CAMIF_IP_REV) {
if (flags & V4L2_MBUS_FIELD_EVEN_LOW)
cfg |= CIGCTRL_INVPOLFIELD;
cfg |= CIGCTRL_FIELDMODE;
}
pr_debug("Setting CIGCTRL to: %#x\n", cfg);
camif_write(camif, S3C_CAMIF_REG_CIGCTRL, cfg);
}
void camif_hw_set_output_addr(struct camif_vp *vp,
struct camif_addr *paddr, int i)
{
struct camif_dev *camif = vp->camif;
camif_write(camif, S3C_CAMIF_REG_CIYSA(vp->id, i), paddr->y);
if (camif->variant->ip_revision == S3C6410_CAMIF_IP_REV
|| vp->id == VP_CODEC) {
camif_write(camif, S3C_CAMIF_REG_CICBSA(vp->id, i),
paddr->cb);
camif_write(camif, S3C_CAMIF_REG_CICRSA(vp->id, i),
paddr->cr);
}
pr_debug("dst_buf[%d]: %pad, cb: %pad, cr: %pad\n",
i, &paddr->y, &paddr->cb, &paddr->cr);
}
static void camif_hw_set_out_dma_size(struct camif_vp *vp)
{
struct camif_frame *frame = &vp->out_frame;
u32 cfg;
cfg = camif_read(vp->camif, S3C_CAMIF_REG_CITRGFMT(vp->id, vp->offset));
cfg &= ~CITRGFMT_TARGETSIZE_MASK;
cfg |= (frame->f_width << 16) | frame->f_height;
camif_write(vp->camif, S3C_CAMIF_REG_CITRGFMT(vp->id, vp->offset), cfg);
}
static void camif_get_dma_burst(u32 width, u32 ybpp, u32 *mburst, u32 *rburst)
{
unsigned int nwords = width * ybpp / 4;
unsigned int div, rem;
if (WARN_ON(width < 8 || (width * ybpp) & 7))
return;
for (div = 16; div >= 2; div /= 2) {
if (nwords < div)
continue;
rem = nwords & (div - 1);
if (rem == 0) {
*mburst = div;
*rburst = div;
break;
}
if (rem == div / 2 || rem == div / 4) {
*mburst = div;
*rburst = rem;
break;
}
}
}
void camif_hw_set_output_dma(struct camif_vp *vp)
{
struct camif_dev *camif = vp->camif;
struct camif_frame *frame = &vp->out_frame;
const struct camif_fmt *fmt = vp->out_fmt;
unsigned int ymburst = 0, yrburst = 0;
u32 cfg;
camif_hw_set_out_dma_size(vp);
if (camif->variant->ip_revision == S3C6410_CAMIF_IP_REV) {
struct camif_dma_offset *offset = &frame->dma_offset;
/* Set the input dma offsets. */
cfg = S3C_CISS_OFFS_INITIAL(offset->initial);
cfg |= S3C_CISS_OFFS_LINE(offset->line);
camif_write(camif, S3C_CAMIF_REG_CISSY(vp->id), cfg);
camif_write(camif, S3C_CAMIF_REG_CISSCB(vp->id), cfg);
camif_write(camif, S3C_CAMIF_REG_CISSCR(vp->id), cfg);
}
/* Configure DMA burst values */
camif_get_dma_burst(frame->rect.width, fmt->ybpp, &ymburst, &yrburst);
cfg = camif_read(camif, S3C_CAMIF_REG_CICTRL(vp->id, vp->offset));
cfg &= ~CICTRL_BURST_MASK;
cfg |= CICTRL_YBURST1(ymburst) | CICTRL_YBURST2(yrburst);
cfg |= CICTRL_CBURST1(ymburst / 2) | CICTRL_CBURST2(yrburst / 2);
camif_write(camif, S3C_CAMIF_REG_CICTRL(vp->id, vp->offset), cfg);
pr_debug("ymburst: %u, yrburst: %u\n", ymburst, yrburst);
}
void camif_hw_set_input_path(struct camif_vp *vp)
{
u32 cfg = camif_read(vp->camif, S3C_CAMIF_REG_MSCTRL(vp->id));
cfg &= ~MSCTRL_SEL_DMA_CAM;
camif_write(vp->camif, S3C_CAMIF_REG_MSCTRL(vp->id), cfg);
}
void camif_hw_set_target_format(struct camif_vp *vp)
{
struct camif_dev *camif = vp->camif;
struct camif_frame *frame = &vp->out_frame;
u32 cfg;
pr_debug("fw: %d, fh: %d color: %d\n", frame->f_width,
frame->f_height, vp->out_fmt->color);
cfg = camif_read(camif, S3C_CAMIF_REG_CITRGFMT(vp->id, vp->offset));
cfg &= ~CITRGFMT_TARGETSIZE_MASK;
if (camif->variant->ip_revision == S3C244X_CAMIF_IP_REV) {
/* We currently support only YCbCr 4:2:2 at the camera input */
cfg |= CITRGFMT_IN422;
cfg &= ~CITRGFMT_OUT422;
if (vp->out_fmt->color == IMG_FMT_YCBCR422P)
cfg |= CITRGFMT_OUT422;
} else {
cfg &= ~CITRGFMT_OUTFORMAT_MASK;
switch (vp->out_fmt->color) {
case IMG_FMT_RGB565...IMG_FMT_XRGB8888:
cfg |= CITRGFMT_OUTFORMAT_RGB;
break;
case IMG_FMT_YCBCR420...IMG_FMT_YCRCB420:
cfg |= CITRGFMT_OUTFORMAT_YCBCR420;
break;
case IMG_FMT_YCBCR422P:
cfg |= CITRGFMT_OUTFORMAT_YCBCR422;
break;
case IMG_FMT_YCBYCR422...IMG_FMT_CRYCBY422:
cfg |= CITRGFMT_OUTFORMAT_YCBCR422I;
break;
}
}
/* Rotation is only supported by s3c64xx */
if (vp->rotation == 90 || vp->rotation == 270)
cfg |= (frame->f_height << 16) | frame->f_width;
else
cfg |= (frame->f_width << 16) | frame->f_height;
camif_write(camif, S3C_CAMIF_REG_CITRGFMT(vp->id, vp->offset), cfg);
/* Target area, output pixel width * height */
cfg = camif_read(camif, S3C_CAMIF_REG_CITAREA(vp->id, vp->offset));
cfg &= ~CITAREA_MASK;
cfg |= (frame->f_width * frame->f_height);
camif_write(camif, S3C_CAMIF_REG_CITAREA(vp->id, vp->offset), cfg);
}
void camif_hw_set_flip(struct camif_vp *vp)
{
u32 cfg = camif_read(vp->camif,
S3C_CAMIF_REG_CITRGFMT(vp->id, vp->offset));
cfg &= ~CITRGFMT_FLIP_MASK;
if (vp->hflip)
cfg |= CITRGFMT_FLIP_Y_MIRROR;
if (vp->vflip)
cfg |= CITRGFMT_FLIP_X_MIRROR;
camif_write(vp->camif, S3C_CAMIF_REG_CITRGFMT(vp->id, vp->offset), cfg);
}
static void camif_hw_set_prescaler(struct camif_vp *vp)
{
struct camif_dev *camif = vp->camif;
struct camif_scaler *sc = &vp->scaler;
u32 cfg, shfactor, addr;
addr = S3C_CAMIF_REG_CISCPRERATIO(vp->id, vp->offset);
shfactor = 10 - (sc->h_shift + sc->v_shift);
cfg = shfactor << 28;
cfg |= (sc->pre_h_ratio << 16) | sc->pre_v_ratio;
camif_write(camif, addr, cfg);
cfg = (sc->pre_dst_width << 16) | sc->pre_dst_height;
camif_write(camif, S3C_CAMIF_REG_CISCPREDST(vp->id, vp->offset), cfg);
}
static void camif_s3c244x_hw_set_scaler(struct camif_vp *vp)
{
struct camif_dev *camif = vp->camif;
struct camif_scaler *scaler = &vp->scaler;
unsigned int color = vp->out_fmt->color;
u32 cfg;
camif_hw_set_prescaler(vp);
cfg = camif_read(camif, S3C_CAMIF_REG_CISCCTRL(vp->id, vp->offset));
cfg &= ~(CISCCTRL_SCALEUP_MASK | CISCCTRL_SCALERBYPASS |
CISCCTRL_MAIN_RATIO_MASK | CIPRSCCTRL_RGB_FORMAT_24BIT);
if (scaler->enable) {
if (scaler->scaleup_h) {
if (vp->id == VP_CODEC)
cfg |= CISCCTRL_SCALEUP_H;
else
cfg |= CIPRSCCTRL_SCALEUP_H;
}
if (scaler->scaleup_v) {
if (vp->id == VP_CODEC)
cfg |= CISCCTRL_SCALEUP_V;
else
cfg |= CIPRSCCTRL_SCALEUP_V;
}
} else {
if (vp->id == VP_CODEC)
cfg |= CISCCTRL_SCALERBYPASS;
}
cfg |= ((scaler->main_h_ratio & 0x1ff) << 16);
cfg |= scaler->main_v_ratio & 0x1ff;
if (vp->id == VP_PREVIEW) {
if (color == IMG_FMT_XRGB8888)
cfg |= CIPRSCCTRL_RGB_FORMAT_24BIT;
cfg |= CIPRSCCTRL_SAMPLE;
}
camif_write(camif, S3C_CAMIF_REG_CISCCTRL(vp->id, vp->offset), cfg);
pr_debug("main: h_ratio: %#x, v_ratio: %#x",
scaler->main_h_ratio, scaler->main_v_ratio);
}
static void camif_s3c64xx_hw_set_scaler(struct camif_vp *vp)
{
struct camif_dev *camif = vp->camif;
struct camif_scaler *scaler = &vp->scaler;
unsigned int color = vp->out_fmt->color;
u32 cfg;
camif_hw_set_prescaler(vp);
cfg = camif_read(camif, S3C_CAMIF_REG_CISCCTRL(vp->id, vp->offset));
cfg &= ~(CISCCTRL_CSCR2Y_WIDE | CISCCTRL_CSCY2R_WIDE
| CISCCTRL_SCALEUP_H | CISCCTRL_SCALEUP_V
| CISCCTRL_SCALERBYPASS | CISCCTRL_ONE2ONE
| CISCCTRL_INRGB_FMT_MASK | CISCCTRL_OUTRGB_FMT_MASK
| CISCCTRL_INTERLACE | CISCCTRL_EXTRGB_EXTENSION
| CISCCTRL_MAIN_RATIO_MASK);
cfg |= (CISCCTRL_CSCR2Y_WIDE | CISCCTRL_CSCY2R_WIDE);
if (!scaler->enable) {
cfg |= CISCCTRL_SCALERBYPASS;
} else {
if (scaler->scaleup_h)
cfg |= CISCCTRL_SCALEUP_H;
if (scaler->scaleup_v)
cfg |= CISCCTRL_SCALEUP_V;
if (scaler->copy)
cfg |= CISCCTRL_ONE2ONE;
}
switch (color) {
case IMG_FMT_RGB666:
cfg |= CISCCTRL_OUTRGB_FMT_RGB666;
break;
case IMG_FMT_XRGB8888:
cfg |= CISCCTRL_OUTRGB_FMT_RGB888;
break;
}
cfg |= (scaler->main_h_ratio & 0x1ff) << 16;
cfg |= scaler->main_v_ratio & 0x1ff;
camif_write(camif, S3C_CAMIF_REG_CISCCTRL(vp->id, vp->offset), cfg);
pr_debug("main: h_ratio: %#x, v_ratio: %#x",
scaler->main_h_ratio, scaler->main_v_ratio);
}
void camif_hw_set_scaler(struct camif_vp *vp)
{
unsigned int ip_rev = vp->camif->variant->ip_revision;
if (ip_rev == S3C244X_CAMIF_IP_REV)
camif_s3c244x_hw_set_scaler(vp);
else
camif_s3c64xx_hw_set_scaler(vp);
}
void camif_hw_enable_scaler(struct camif_vp *vp, bool on)
{
u32 addr = S3C_CAMIF_REG_CISCCTRL(vp->id, vp->offset);
u32 cfg;
cfg = camif_read(vp->camif, addr);
if (on)
cfg |= CISCCTRL_SCALERSTART;
else
cfg &= ~CISCCTRL_SCALERSTART;
camif_write(vp->camif, addr, cfg);
}
void camif_hw_set_lastirq(struct camif_vp *vp, int enable)
{
u32 addr = S3C_CAMIF_REG_CICTRL(vp->id, vp->offset);
u32 cfg;
cfg = camif_read(vp->camif, addr);
if (enable)
cfg |= CICTRL_LASTIRQ_ENABLE;
else
cfg &= ~CICTRL_LASTIRQ_ENABLE;
camif_write(vp->camif, addr, cfg);
}
void camif_hw_enable_capture(struct camif_vp *vp)
{
struct camif_dev *camif = vp->camif;
u32 cfg;
cfg = camif_read(camif, S3C_CAMIF_REG_CIIMGCPT(vp->offset));
camif->stream_count++;
if (camif->variant->ip_revision == S3C6410_CAMIF_IP_REV)
cfg |= CIIMGCPT_CPT_FREN_ENABLE(vp->id);
if (vp->scaler.enable)
cfg |= CIIMGCPT_IMGCPTEN_SC(vp->id);
if (camif->stream_count == 1)
cfg |= CIIMGCPT_IMGCPTEN;
camif_write(camif, S3C_CAMIF_REG_CIIMGCPT(vp->offset), cfg);
pr_debug("CIIMGCPT: %#x, camif->stream_count: %d\n",
cfg, camif->stream_count);
}
void camif_hw_disable_capture(struct camif_vp *vp)
{
struct camif_dev *camif = vp->camif;
u32 cfg;
cfg = camif_read(camif, S3C_CAMIF_REG_CIIMGCPT(vp->offset));
cfg &= ~CIIMGCPT_IMGCPTEN_SC(vp->id);
if (WARN_ON(--(camif->stream_count) < 0))
camif->stream_count = 0;
if (camif->stream_count == 0)
cfg &= ~CIIMGCPT_IMGCPTEN;
pr_debug("CIIMGCPT: %#x, camif->stream_count: %d\n",
cfg, camif->stream_count);
camif_write(camif, S3C_CAMIF_REG_CIIMGCPT(vp->offset), cfg);
}
void camif_hw_dump_regs(struct camif_dev *camif, const char *label)
{
static const struct {
u32 offset;
const char * const name;
} registers[] = {
{ S3C_CAMIF_REG_CISRCFMT, "CISRCFMT" },
{ S3C_CAMIF_REG_CIWDOFST, "CIWDOFST" },
{ S3C_CAMIF_REG_CIGCTRL, "CIGCTRL" },
{ S3C_CAMIF_REG_CIWDOFST2, "CIWDOFST2" },
{ S3C_CAMIF_REG_CIYSA(0, 0), "CICOYSA0" },
{ S3C_CAMIF_REG_CICBSA(0, 0), "CICOCBSA0" },
{ S3C_CAMIF_REG_CICRSA(0, 0), "CICOCRSA0" },
{ S3C_CAMIF_REG_CIYSA(0, 1), "CICOYSA1" },
{ S3C_CAMIF_REG_CICBSA(0, 1), "CICOCBSA1" },
{ S3C_CAMIF_REG_CICRSA(0, 1), "CICOCRSA1" },
{ S3C_CAMIF_REG_CIYSA(0, 2), "CICOYSA2" },
{ S3C_CAMIF_REG_CICBSA(0, 2), "CICOCBSA2" },
{ S3C_CAMIF_REG_CICRSA(0, 2), "CICOCRSA2" },
{ S3C_CAMIF_REG_CIYSA(0, 3), "CICOYSA3" },
{ S3C_CAMIF_REG_CICBSA(0, 3), "CICOCBSA3" },
{ S3C_CAMIF_REG_CICRSA(0, 3), "CICOCRSA3" },
{ S3C_CAMIF_REG_CIYSA(1, 0), "CIPRYSA0" },
{ S3C_CAMIF_REG_CIYSA(1, 1), "CIPRYSA1" },
{ S3C_CAMIF_REG_CIYSA(1, 2), "CIPRYSA2" },
{ S3C_CAMIF_REG_CIYSA(1, 3), "CIPRYSA3" },
{ S3C_CAMIF_REG_CITRGFMT(0, 0), "CICOTRGFMT" },
{ S3C_CAMIF_REG_CITRGFMT(1, 0), "CIPRTRGFMT" },
{ S3C_CAMIF_REG_CICTRL(0, 0), "CICOCTRL" },
{ S3C_CAMIF_REG_CICTRL(1, 0), "CIPRCTRL" },
{ S3C_CAMIF_REG_CISCPREDST(0, 0), "CICOSCPREDST" },
{ S3C_CAMIF_REG_CISCPREDST(1, 0), "CIPRSCPREDST" },
{ S3C_CAMIF_REG_CISCPRERATIO(0, 0), "CICOSCPRERATIO" },
{ S3C_CAMIF_REG_CISCPRERATIO(1, 0), "CIPRSCPRERATIO" },
{ S3C_CAMIF_REG_CISCCTRL(0, 0), "CICOSCCTRL" },
{ S3C_CAMIF_REG_CISCCTRL(1, 0), "CIPRSCCTRL" },
{ S3C_CAMIF_REG_CITAREA(0, 0), "CICOTAREA" },
{ S3C_CAMIF_REG_CITAREA(1, 0), "CIPRTAREA" },
{ S3C_CAMIF_REG_CISTATUS(0, 0), "CICOSTATUS" },
{ S3C_CAMIF_REG_CISTATUS(1, 0), "CIPRSTATUS" },
{ S3C_CAMIF_REG_CIIMGCPT(0), "CIIMGCPT" },
};
u32 i;
pr_info("--- %s ---\n", label);
for (i = 0; i < ARRAY_SIZE(registers); i++) {
u32 cfg = readl(camif->io_base + registers[i].offset);
dev_info(camif->dev, "%s:\t0x%08x\n", registers[i].name, cfg);
}
}
| linux-master | drivers/media/platform/samsung/s3c-camif/camif-regs.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Samsung EXYNOS4x12 FIMC-IS (Imaging Subsystem) driver
*
* Copyright (C) 2012 - 2013 Samsung Electronics Co., Ltd.
*
* Authors: Younghwan Joo <[email protected]>
* Sylwester Nawrocki <[email protected]>
*/
#include <linux/delay.h>
#include "fimc-is.h"
#include "fimc-is-command.h"
#include "fimc-is-regs.h"
#include "fimc-is-sensor.h"
void fimc_is_fw_clear_irq1(struct fimc_is *is, unsigned int nr)
{
mcuctl_write(1UL << nr, is, MCUCTL_REG_INTCR1);
}
void fimc_is_fw_clear_irq2(struct fimc_is *is)
{
u32 cfg = mcuctl_read(is, MCUCTL_REG_INTSR2);
mcuctl_write(cfg, is, MCUCTL_REG_INTCR2);
}
void fimc_is_hw_set_intgr0_gd0(struct fimc_is *is)
{
mcuctl_write(INTGR0_INTGD(0), is, MCUCTL_REG_INTGR0);
}
int fimc_is_hw_wait_intmsr0_intmsd0(struct fimc_is *is)
{
unsigned int timeout = 2000;
u32 cfg, status;
do {
cfg = mcuctl_read(is, MCUCTL_REG_INTMSR0);
status = INTMSR0_GET_INTMSD(0, cfg);
if (--timeout == 0) {
dev_warn(&is->pdev->dev, "%s timeout\n",
__func__);
return -ETIMEDOUT;
}
udelay(1);
} while (status != 0);
return 0;
}
int fimc_is_hw_set_param(struct fimc_is *is)
{
struct chain_config *config = &is->config[is->config_index];
unsigned int param_count = __get_pending_param_count(is);
fimc_is_hw_wait_intmsr0_intmsd0(is);
mcuctl_write(HIC_SET_PARAMETER, is, MCUCTL_REG_ISSR(0));
mcuctl_write(is->sensor_index, is, MCUCTL_REG_ISSR(1));
mcuctl_write(is->config_index, is, MCUCTL_REG_ISSR(2));
mcuctl_write(param_count, is, MCUCTL_REG_ISSR(3));
mcuctl_write(config->p_region_index[0], is, MCUCTL_REG_ISSR(4));
mcuctl_write(config->p_region_index[1], is, MCUCTL_REG_ISSR(5));
fimc_is_hw_set_intgr0_gd0(is);
return 0;
}
static int __maybe_unused fimc_is_hw_set_tune(struct fimc_is *is)
{
fimc_is_hw_wait_intmsr0_intmsd0(is);
mcuctl_write(HIC_SET_TUNE, is, MCUCTL_REG_ISSR(0));
mcuctl_write(is->sensor_index, is, MCUCTL_REG_ISSR(1));
mcuctl_write(is->h2i_cmd.entry_id, is, MCUCTL_REG_ISSR(2));
fimc_is_hw_set_intgr0_gd0(is);
return 0;
}
#define FIMC_IS_MAX_PARAMS 4
int fimc_is_hw_get_params(struct fimc_is *is, unsigned int num_args)
{
int i;
if (num_args > FIMC_IS_MAX_PARAMS)
return -EINVAL;
is->i2h_cmd.num_args = num_args;
for (i = 0; i < FIMC_IS_MAX_PARAMS; i++) {
if (i < num_args)
is->i2h_cmd.args[i] = mcuctl_read(is,
MCUCTL_REG_ISSR(12 + i));
else
is->i2h_cmd.args[i] = 0;
}
return 0;
}
void fimc_is_hw_set_isp_buf_mask(struct fimc_is *is, unsigned int mask)
{
if (hweight32(mask) == 1) {
dev_err(&is->pdev->dev, "%s(): not enough buffers (mask %#x)\n",
__func__, mask);
return;
}
if (mcuctl_read(is, MCUCTL_REG_ISSR(23)) != 0)
dev_dbg(&is->pdev->dev, "non-zero DMA buffer mask\n");
mcuctl_write(mask, is, MCUCTL_REG_ISSR(23));
}
void fimc_is_hw_set_sensor_num(struct fimc_is *is)
{
pr_debug("setting sensor index to: %d\n", is->sensor_index);
mcuctl_write(IH_REPLY_DONE, is, MCUCTL_REG_ISSR(0));
mcuctl_write(is->sensor_index, is, MCUCTL_REG_ISSR(1));
mcuctl_write(IHC_GET_SENSOR_NUM, is, MCUCTL_REG_ISSR(2));
mcuctl_write(FIMC_IS_SENSORS_NUM, is, MCUCTL_REG_ISSR(3));
}
void fimc_is_hw_close_sensor(struct fimc_is *is, unsigned int index)
{
if (is->sensor_index != index)
return;
fimc_is_hw_wait_intmsr0_intmsd0(is);
mcuctl_write(HIC_CLOSE_SENSOR, is, MCUCTL_REG_ISSR(0));
mcuctl_write(is->sensor_index, is, MCUCTL_REG_ISSR(1));
mcuctl_write(is->sensor_index, is, MCUCTL_REG_ISSR(2));
fimc_is_hw_set_intgr0_gd0(is);
}
void fimc_is_hw_get_setfile_addr(struct fimc_is *is)
{
fimc_is_hw_wait_intmsr0_intmsd0(is);
mcuctl_write(HIC_GET_SET_FILE_ADDR, is, MCUCTL_REG_ISSR(0));
mcuctl_write(is->sensor_index, is, MCUCTL_REG_ISSR(1));
fimc_is_hw_set_intgr0_gd0(is);
}
void fimc_is_hw_load_setfile(struct fimc_is *is)
{
fimc_is_hw_wait_intmsr0_intmsd0(is);
mcuctl_write(HIC_LOAD_SET_FILE, is, MCUCTL_REG_ISSR(0));
mcuctl_write(is->sensor_index, is, MCUCTL_REG_ISSR(1));
fimc_is_hw_set_intgr0_gd0(is);
}
int fimc_is_hw_change_mode(struct fimc_is *is)
{
static const u8 cmd[] = {
HIC_PREVIEW_STILL, HIC_PREVIEW_VIDEO,
HIC_CAPTURE_STILL, HIC_CAPTURE_VIDEO,
};
if (WARN_ON(is->config_index >= ARRAY_SIZE(cmd)))
return -EINVAL;
mcuctl_write(cmd[is->config_index], is, MCUCTL_REG_ISSR(0));
mcuctl_write(is->sensor_index, is, MCUCTL_REG_ISSR(1));
mcuctl_write(is->setfile.sub_index, is, MCUCTL_REG_ISSR(2));
fimc_is_hw_set_intgr0_gd0(is);
return 0;
}
void fimc_is_hw_stream_on(struct fimc_is *is)
{
fimc_is_hw_wait_intmsr0_intmsd0(is);
mcuctl_write(HIC_STREAM_ON, is, MCUCTL_REG_ISSR(0));
mcuctl_write(is->sensor_index, is, MCUCTL_REG_ISSR(1));
mcuctl_write(0, is, MCUCTL_REG_ISSR(2));
fimc_is_hw_set_intgr0_gd0(is);
}
void fimc_is_hw_stream_off(struct fimc_is *is)
{
fimc_is_hw_wait_intmsr0_intmsd0(is);
mcuctl_write(HIC_STREAM_OFF, is, MCUCTL_REG_ISSR(0));
mcuctl_write(is->sensor_index, is, MCUCTL_REG_ISSR(1));
fimc_is_hw_set_intgr0_gd0(is);
}
void fimc_is_hw_subip_power_off(struct fimc_is *is)
{
fimc_is_hw_wait_intmsr0_intmsd0(is);
mcuctl_write(HIC_POWER_DOWN, is, MCUCTL_REG_ISSR(0));
mcuctl_write(is->sensor_index, is, MCUCTL_REG_ISSR(1));
fimc_is_hw_set_intgr0_gd0(is);
}
int fimc_is_itf_s_param(struct fimc_is *is, bool update)
{
int ret;
if (update)
__is_hw_update_params(is);
fimc_is_mem_barrier();
clear_bit(IS_ST_BLOCK_CMD_CLEARED, &is->state);
fimc_is_hw_set_param(is);
ret = fimc_is_wait_event(is, IS_ST_BLOCK_CMD_CLEARED, 1,
FIMC_IS_CONFIG_TIMEOUT);
if (ret < 0)
dev_err(&is->pdev->dev, "%s() timeout\n", __func__);
return ret;
}
int fimc_is_itf_mode_change(struct fimc_is *is)
{
int ret;
clear_bit(IS_ST_CHANGE_MODE, &is->state);
fimc_is_hw_change_mode(is);
ret = fimc_is_wait_event(is, IS_ST_CHANGE_MODE, 1,
FIMC_IS_CONFIG_TIMEOUT);
if (ret < 0)
dev_err(&is->pdev->dev, "%s(): mode change (%d) timeout\n",
__func__, is->config_index);
return ret;
}
| linux-master | drivers/media/platform/samsung/exynos4-is/fimc-is-regs.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Samsung S5P/EXYNOS4 SoC Camera Subsystem driver
*
* Copyright (C) 2013 Samsung Electronics Co., Ltd.
* Author: Sylwester Nawrocki <[email protected]>
*/
#include <linux/module.h>
#include <media/drv-intf/exynos-fimc.h>
#include "common.h"
/*
* Called with the media graph mutex held or media_entity_is_streaming(entity)
* true.
*/
struct v4l2_subdev *fimc_find_remote_sensor(struct media_entity *entity)
{
struct media_pad *pad = &entity->pads[0];
struct v4l2_subdev *sd;
while (pad->flags & MEDIA_PAD_FL_SINK) {
/* source pad */
pad = media_pad_remote_pad_first(pad);
if (!pad || !is_media_entity_v4l2_subdev(pad->entity))
break;
sd = media_entity_to_v4l2_subdev(pad->entity);
if (sd->grp_id == GRP_ID_FIMC_IS_SENSOR ||
sd->grp_id == GRP_ID_SENSOR)
return sd;
/* sink pad */
pad = &sd->entity.pads[0];
}
return NULL;
}
EXPORT_SYMBOL(fimc_find_remote_sensor);
void __fimc_vidioc_querycap(struct device *dev, struct v4l2_capability *cap)
{
strscpy(cap->driver, dev->driver->name, sizeof(cap->driver));
strscpy(cap->card, dev->driver->name, sizeof(cap->card));
}
EXPORT_SYMBOL(__fimc_vidioc_querycap);
MODULE_LICENSE("GPL");
| linux-master | drivers/media/platform/samsung/exynos4-is/common.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Samsung EXYNOS4x12 FIMC-IS (Imaging Subsystem) driver
*
* FIMC-IS ISP video input and video output DMA interface driver
*
* Copyright (C) 2013 Samsung Electronics Co., Ltd.
* Author: Sylwester Nawrocki <[email protected]>
*
* The hardware handling code derived from a driver written by
* Younghwan Joo <[email protected]>.
*/
#include <linux/bitops.h>
#include <linux/device.h>
#include <linux/delay.h>
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/types.h>
#include <linux/printk.h>
#include <linux/pm_runtime.h>
#include <linux/slab.h>
#include <linux/videodev2.h>
#include <media/v4l2-device.h>
#include <media/v4l2-ioctl.h>
#include <media/videobuf2-v4l2.h>
#include <media/videobuf2-dma-contig.h>
#include <media/drv-intf/exynos-fimc.h>
#include "common.h"
#include "media-dev.h"
#include "fimc-is.h"
#include "fimc-isp-video.h"
#include "fimc-is-param.h"
static int isp_video_capture_queue_setup(struct vb2_queue *vq,
unsigned int *num_buffers, unsigned int *num_planes,
unsigned int sizes[], struct device *alloc_devs[])
{
struct fimc_isp *isp = vb2_get_drv_priv(vq);
struct v4l2_pix_format_mplane *vid_fmt = &isp->video_capture.pixfmt;
const struct fimc_fmt *fmt = isp->video_capture.format;
unsigned int wh, i;
wh = vid_fmt->width * vid_fmt->height;
if (fmt == NULL)
return -EINVAL;
*num_buffers = clamp_t(u32, *num_buffers, FIMC_ISP_REQ_BUFS_MIN,
FIMC_ISP_REQ_BUFS_MAX);
if (*num_planes) {
if (*num_planes != fmt->memplanes)
return -EINVAL;
for (i = 0; i < *num_planes; i++)
if (sizes[i] < (wh * fmt->depth[i]) / 8)
return -EINVAL;
return 0;
}
*num_planes = fmt->memplanes;
for (i = 0; i < fmt->memplanes; i++)
sizes[i] = (wh * fmt->depth[i]) / 8;
return 0;
}
static inline struct param_dma_output *__get_isp_dma2(struct fimc_is *is)
{
return &__get_curr_is_config(is)->isp.dma2_output;
}
static int isp_video_capture_start_streaming(struct vb2_queue *q,
unsigned int count)
{
struct fimc_isp *isp = vb2_get_drv_priv(q);
struct fimc_is *is = fimc_isp_to_is(isp);
struct param_dma_output *dma = __get_isp_dma2(is);
struct fimc_is_video *video = &isp->video_capture;
int ret;
if (!test_bit(ST_ISP_VID_CAP_BUF_PREP, &isp->state) ||
test_bit(ST_ISP_VID_CAP_STREAMING, &isp->state))
return 0;
dma->cmd = DMA_OUTPUT_COMMAND_ENABLE;
dma->notify_dma_done = DMA_OUTPUT_NOTIFY_DMA_DONE_ENABLE;
dma->buffer_address = is->is_dma_p_region +
DMA2_OUTPUT_ADDR_ARRAY_OFFS;
dma->buffer_number = video->reqbufs_count;
dma->dma_out_mask = video->buf_mask;
isp_dbg(2, &video->ve.vdev,
"buf_count: %d, planes: %d, dma addr table: %#x\n",
video->buf_count, video->format->memplanes,
dma->buffer_address);
fimc_is_mem_barrier();
fimc_is_set_param_bit(is, PARAM_ISP_DMA2_OUTPUT);
__fimc_is_hw_update_param(is, PARAM_ISP_DMA2_OUTPUT);
ret = fimc_is_itf_s_param(is, false);
if (ret < 0)
return ret;
ret = fimc_pipeline_call(&video->ve, set_stream, 1);
if (ret < 0)
return ret;
set_bit(ST_ISP_VID_CAP_STREAMING, &isp->state);
return ret;
}
static void isp_video_capture_stop_streaming(struct vb2_queue *q)
{
struct fimc_isp *isp = vb2_get_drv_priv(q);
struct fimc_is *is = fimc_isp_to_is(isp);
struct param_dma_output *dma = __get_isp_dma2(is);
int ret;
ret = fimc_pipeline_call(&isp->video_capture.ve, set_stream, 0);
if (ret < 0)
return;
dma->cmd = DMA_OUTPUT_COMMAND_DISABLE;
dma->notify_dma_done = DMA_OUTPUT_NOTIFY_DMA_DONE_DISABLE;
dma->buffer_number = 0;
dma->buffer_address = 0;
dma->dma_out_mask = 0;
fimc_is_set_param_bit(is, PARAM_ISP_DMA2_OUTPUT);
__fimc_is_hw_update_param(is, PARAM_ISP_DMA2_OUTPUT);
ret = fimc_is_itf_s_param(is, false);
if (ret < 0)
dev_warn(&is->pdev->dev, "%s: DMA stop failed\n", __func__);
fimc_is_hw_set_isp_buf_mask(is, 0);
clear_bit(ST_ISP_VID_CAP_BUF_PREP, &isp->state);
clear_bit(ST_ISP_VID_CAP_STREAMING, &isp->state);
isp->video_capture.buf_count = 0;
}
static int isp_video_capture_buffer_prepare(struct vb2_buffer *vb)
{
struct fimc_isp *isp = vb2_get_drv_priv(vb->vb2_queue);
struct fimc_is_video *video = &isp->video_capture;
int i;
if (video->format == NULL)
return -EINVAL;
for (i = 0; i < video->format->memplanes; i++) {
unsigned long size = video->pixfmt.plane_fmt[i].sizeimage;
if (vb2_plane_size(vb, i) < size) {
v4l2_err(&video->ve.vdev,
"User buffer too small (%ld < %ld)\n",
vb2_plane_size(vb, i), size);
return -EINVAL;
}
vb2_set_plane_payload(vb, i, size);
}
/* Check if we get one of the already known buffers. */
if (test_bit(ST_ISP_VID_CAP_BUF_PREP, &isp->state)) {
dma_addr_t dma_addr = vb2_dma_contig_plane_dma_addr(vb, 0);
int i;
for (i = 0; i < video->buf_count; i++)
if (video->buffers[i]->dma_addr[0] == dma_addr)
return 0;
return -ENXIO;
}
return 0;
}
static void isp_video_capture_buffer_queue(struct vb2_buffer *vb)
{
struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
struct fimc_isp *isp = vb2_get_drv_priv(vb->vb2_queue);
struct fimc_is_video *video = &isp->video_capture;
struct fimc_is *is = fimc_isp_to_is(isp);
struct isp_video_buf *ivb = to_isp_video_buf(vbuf);
unsigned long flags;
unsigned int i;
if (test_bit(ST_ISP_VID_CAP_BUF_PREP, &isp->state)) {
spin_lock_irqsave(&is->slock, flags);
video->buf_mask |= BIT(ivb->index);
spin_unlock_irqrestore(&is->slock, flags);
} else {
unsigned int num_planes = video->format->memplanes;
ivb->index = video->buf_count;
video->buffers[ivb->index] = ivb;
for (i = 0; i < num_planes; i++) {
int buf_index = ivb->index * num_planes + i;
ivb->dma_addr[i] = vb2_dma_contig_plane_dma_addr(vb, i);
is->is_p_region->shared[32 + buf_index] =
ivb->dma_addr[i];
isp_dbg(2, &video->ve.vdev,
"dma_buf %d (%d/%d/%d) addr: %pad\n",
buf_index, ivb->index, i, vb->index,
&ivb->dma_addr[i]);
}
if (++video->buf_count < video->reqbufs_count)
return;
video->buf_mask = (1UL << video->buf_count) - 1;
set_bit(ST_ISP_VID_CAP_BUF_PREP, &isp->state);
}
if (!test_bit(ST_ISP_VID_CAP_STREAMING, &isp->state))
isp_video_capture_start_streaming(vb->vb2_queue, 0);
}
/*
* FIMC-IS ISP input and output DMA interface interrupt handler.
* Locking: called with is->slock spinlock held.
*/
void fimc_isp_video_irq_handler(struct fimc_is *is)
{
struct fimc_is_video *video = &is->isp.video_capture;
struct vb2_v4l2_buffer *vbuf;
int buf_index;
/* TODO: Ensure the DMA is really stopped in stop_streaming callback */
if (!test_bit(ST_ISP_VID_CAP_STREAMING, &is->isp.state))
return;
buf_index = (is->i2h_cmd.args[1] - 1) % video->buf_count;
vbuf = &video->buffers[buf_index]->vb;
vbuf->vb2_buf.timestamp = ktime_get_ns();
vb2_buffer_done(&vbuf->vb2_buf, VB2_BUF_STATE_DONE);
video->buf_mask &= ~BIT(buf_index);
fimc_is_hw_set_isp_buf_mask(is, video->buf_mask);
}
static const struct vb2_ops isp_video_capture_qops = {
.queue_setup = isp_video_capture_queue_setup,
.buf_prepare = isp_video_capture_buffer_prepare,
.buf_queue = isp_video_capture_buffer_queue,
.wait_prepare = vb2_ops_wait_prepare,
.wait_finish = vb2_ops_wait_finish,
.start_streaming = isp_video_capture_start_streaming,
.stop_streaming = isp_video_capture_stop_streaming,
};
static int isp_video_open(struct file *file)
{
struct fimc_isp *isp = video_drvdata(file);
struct exynos_video_entity *ve = &isp->video_capture.ve;
struct media_entity *me = &ve->vdev.entity;
int ret;
if (mutex_lock_interruptible(&isp->video_lock))
return -ERESTARTSYS;
ret = v4l2_fh_open(file);
if (ret < 0)
goto unlock;
ret = pm_runtime_resume_and_get(&isp->pdev->dev);
if (ret < 0)
goto rel_fh;
if (v4l2_fh_is_singular_file(file)) {
mutex_lock(&me->graph_obj.mdev->graph_mutex);
ret = fimc_pipeline_call(ve, open, me, true);
/* Mark the video pipeline as in use. */
if (ret == 0)
me->use_count++;
mutex_unlock(&me->graph_obj.mdev->graph_mutex);
}
if (!ret)
goto unlock;
rel_fh:
v4l2_fh_release(file);
unlock:
mutex_unlock(&isp->video_lock);
return ret;
}
static int isp_video_release(struct file *file)
{
struct fimc_isp *isp = video_drvdata(file);
struct fimc_is_video *ivc = &isp->video_capture;
struct media_entity *entity = &ivc->ve.vdev.entity;
struct media_device *mdev = entity->graph_obj.mdev;
bool is_singular_file;
mutex_lock(&isp->video_lock);
is_singular_file = v4l2_fh_is_singular_file(file);
if (is_singular_file && ivc->streaming) {
video_device_pipeline_stop(&ivc->ve.vdev);
ivc->streaming = 0;
}
_vb2_fop_release(file, NULL);
if (is_singular_file) {
fimc_pipeline_call(&ivc->ve, close);
mutex_lock(&mdev->graph_mutex);
entity->use_count--;
mutex_unlock(&mdev->graph_mutex);
}
pm_runtime_put(&isp->pdev->dev);
mutex_unlock(&isp->video_lock);
return 0;
}
static const struct v4l2_file_operations isp_video_fops = {
.owner = THIS_MODULE,
.open = isp_video_open,
.release = isp_video_release,
.poll = vb2_fop_poll,
.unlocked_ioctl = video_ioctl2,
.mmap = vb2_fop_mmap,
};
/*
* Video node ioctl operations
*/
static int isp_video_querycap(struct file *file, void *priv,
struct v4l2_capability *cap)
{
struct fimc_isp *isp = video_drvdata(file);
__fimc_vidioc_querycap(&isp->pdev->dev, cap);
return 0;
}
static int isp_video_enum_fmt(struct file *file, void *priv,
struct v4l2_fmtdesc *f)
{
const struct fimc_fmt *fmt;
if (f->index >= FIMC_ISP_NUM_FORMATS)
return -EINVAL;
fmt = fimc_isp_find_format(NULL, NULL, f->index);
if (WARN_ON(fmt == NULL))
return -EINVAL;
f->pixelformat = fmt->fourcc;
return 0;
}
static int isp_video_g_fmt_mplane(struct file *file, void *fh,
struct v4l2_format *f)
{
struct fimc_isp *isp = video_drvdata(file);
f->fmt.pix_mp = isp->video_capture.pixfmt;
return 0;
}
static void __isp_video_try_fmt(struct fimc_isp *isp,
struct v4l2_pix_format_mplane *pixm,
const struct fimc_fmt **fmt)
{
const struct fimc_fmt *__fmt;
__fmt = fimc_isp_find_format(&pixm->pixelformat, NULL, 2);
if (fmt)
*fmt = __fmt;
pixm->colorspace = V4L2_COLORSPACE_SRGB;
pixm->field = V4L2_FIELD_NONE;
pixm->num_planes = __fmt->memplanes;
pixm->pixelformat = __fmt->fourcc;
/*
* TODO: double check with the docmentation these width/height
* constraints are correct.
*/
v4l_bound_align_image(&pixm->width, FIMC_ISP_SOURCE_WIDTH_MIN,
FIMC_ISP_SOURCE_WIDTH_MAX, 3,
&pixm->height, FIMC_ISP_SOURCE_HEIGHT_MIN,
FIMC_ISP_SOURCE_HEIGHT_MAX, 0, 0);
}
static int isp_video_try_fmt_mplane(struct file *file, void *fh,
struct v4l2_format *f)
{
struct fimc_isp *isp = video_drvdata(file);
__isp_video_try_fmt(isp, &f->fmt.pix_mp, NULL);
return 0;
}
static int isp_video_s_fmt_mplane(struct file *file, void *priv,
struct v4l2_format *f)
{
struct fimc_isp *isp = video_drvdata(file);
struct fimc_is *is = fimc_isp_to_is(isp);
struct v4l2_pix_format_mplane *pixm = &f->fmt.pix_mp;
const struct fimc_fmt *ifmt = NULL;
struct param_dma_output *dma = __get_isp_dma2(is);
__isp_video_try_fmt(isp, pixm, &ifmt);
if (WARN_ON(ifmt == NULL))
return -EINVAL;
dma->format = DMA_OUTPUT_FORMAT_BAYER;
dma->order = DMA_OUTPUT_ORDER_GB_BG;
dma->plane = ifmt->memplanes;
dma->bitwidth = ifmt->depth[0];
dma->width = pixm->width;
dma->height = pixm->height;
fimc_is_mem_barrier();
isp->video_capture.format = ifmt;
isp->video_capture.pixfmt = *pixm;
return 0;
}
/*
* Check for source/sink format differences at each link.
* Return 0 if the formats match or -EPIPE otherwise.
*/
static int isp_video_pipeline_validate(struct fimc_isp *isp)
{
struct v4l2_subdev *sd = &isp->subdev;
struct media_pad *pad;
int ret;
while (1) {
struct v4l2_subdev_format sink_fmt = {
.which = V4L2_SUBDEV_FORMAT_ACTIVE,
};
struct v4l2_subdev_format src_fmt = {
.which = V4L2_SUBDEV_FORMAT_ACTIVE,
};
/* Retrieve format at the sink pad */
pad = &sd->entity.pads[0];
if (!(pad->flags & MEDIA_PAD_FL_SINK))
break;
sink_fmt.pad = pad->index;
ret = v4l2_subdev_call(sd, pad, get_fmt, NULL, &sink_fmt);
if (ret < 0 && ret != -ENOIOCTLCMD)
return -EPIPE;
/* Retrieve format at the source pad */
pad = media_pad_remote_pad_first(pad);
if (!pad || !is_media_entity_v4l2_subdev(pad->entity))
break;
sd = media_entity_to_v4l2_subdev(pad->entity);
src_fmt.pad = pad->index;
ret = v4l2_subdev_call(sd, pad, get_fmt, NULL, &src_fmt);
if (ret < 0 && ret != -ENOIOCTLCMD)
return -EPIPE;
if (src_fmt.format.width != sink_fmt.format.width ||
src_fmt.format.height != sink_fmt.format.height ||
src_fmt.format.code != sink_fmt.format.code)
return -EPIPE;
}
return 0;
}
static int isp_video_streamon(struct file *file, void *priv,
enum v4l2_buf_type type)
{
struct fimc_isp *isp = video_drvdata(file);
struct exynos_video_entity *ve = &isp->video_capture.ve;
int ret;
ret = video_device_pipeline_start(&ve->vdev, &ve->pipe->mp);
if (ret < 0)
return ret;
ret = isp_video_pipeline_validate(isp);
if (ret < 0)
goto p_stop;
ret = vb2_ioctl_streamon(file, priv, type);
if (ret < 0)
goto p_stop;
isp->video_capture.streaming = 1;
return 0;
p_stop:
video_device_pipeline_stop(&ve->vdev);
return ret;
}
static int isp_video_streamoff(struct file *file, void *priv,
enum v4l2_buf_type type)
{
struct fimc_isp *isp = video_drvdata(file);
struct fimc_is_video *video = &isp->video_capture;
int ret;
ret = vb2_ioctl_streamoff(file, priv, type);
if (ret < 0)
return ret;
video_device_pipeline_stop(&video->ve.vdev);
video->streaming = 0;
return 0;
}
static int isp_video_reqbufs(struct file *file, void *priv,
struct v4l2_requestbuffers *rb)
{
struct fimc_isp *isp = video_drvdata(file);
int ret;
ret = vb2_ioctl_reqbufs(file, priv, rb);
if (ret < 0)
return ret;
if (rb->count && rb->count < FIMC_ISP_REQ_BUFS_MIN) {
rb->count = 0;
vb2_ioctl_reqbufs(file, priv, rb);
ret = -ENOMEM;
}
isp->video_capture.reqbufs_count = rb->count;
return ret;
}
static const struct v4l2_ioctl_ops isp_video_ioctl_ops = {
.vidioc_querycap = isp_video_querycap,
.vidioc_enum_fmt_vid_cap = isp_video_enum_fmt,
.vidioc_try_fmt_vid_cap_mplane = isp_video_try_fmt_mplane,
.vidioc_s_fmt_vid_cap_mplane = isp_video_s_fmt_mplane,
.vidioc_g_fmt_vid_cap_mplane = isp_video_g_fmt_mplane,
.vidioc_reqbufs = isp_video_reqbufs,
.vidioc_querybuf = vb2_ioctl_querybuf,
.vidioc_prepare_buf = vb2_ioctl_prepare_buf,
.vidioc_create_bufs = vb2_ioctl_create_bufs,
.vidioc_qbuf = vb2_ioctl_qbuf,
.vidioc_dqbuf = vb2_ioctl_dqbuf,
.vidioc_streamon = isp_video_streamon,
.vidioc_streamoff = isp_video_streamoff,
};
int fimc_isp_video_device_register(struct fimc_isp *isp,
struct v4l2_device *v4l2_dev,
enum v4l2_buf_type type)
{
struct vb2_queue *q = &isp->video_capture.vb_queue;
struct fimc_is_video *iv;
struct video_device *vdev;
int ret;
if (type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
iv = &isp->video_capture;
else
return -ENOSYS;
mutex_init(&isp->video_lock);
INIT_LIST_HEAD(&iv->pending_buf_q);
INIT_LIST_HEAD(&iv->active_buf_q);
iv->format = fimc_isp_find_format(NULL, NULL, 0);
iv->pixfmt.width = IS_DEFAULT_WIDTH;
iv->pixfmt.height = IS_DEFAULT_HEIGHT;
iv->pixfmt.pixelformat = iv->format->fourcc;
iv->pixfmt.colorspace = V4L2_COLORSPACE_SRGB;
iv->reqbufs_count = 0;
memset(q, 0, sizeof(*q));
q->type = type;
q->io_modes = VB2_MMAP | VB2_USERPTR;
q->ops = &isp_video_capture_qops;
q->mem_ops = &vb2_dma_contig_memops;
q->buf_struct_size = sizeof(struct isp_video_buf);
q->drv_priv = isp;
q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
q->lock = &isp->video_lock;
q->dev = &isp->pdev->dev;
ret = vb2_queue_init(q);
if (ret < 0)
return ret;
vdev = &iv->ve.vdev;
memset(vdev, 0, sizeof(*vdev));
strscpy(vdev->name, "fimc-is-isp.capture", sizeof(vdev->name));
vdev->queue = q;
vdev->fops = &isp_video_fops;
vdev->ioctl_ops = &isp_video_ioctl_ops;
vdev->v4l2_dev = v4l2_dev;
vdev->minor = -1;
vdev->release = video_device_release_empty;
vdev->lock = &isp->video_lock;
vdev->device_caps = V4L2_CAP_STREAMING | V4L2_CAP_VIDEO_CAPTURE_MPLANE;
iv->pad.flags = MEDIA_PAD_FL_SINK;
ret = media_entity_pads_init(&vdev->entity, 1, &iv->pad);
if (ret < 0)
return ret;
video_set_drvdata(vdev, isp);
ret = video_register_device(vdev, VFL_TYPE_VIDEO, -1);
if (ret < 0) {
media_entity_cleanup(&vdev->entity);
return ret;
}
v4l2_info(v4l2_dev, "Registered %s as /dev/%s\n",
vdev->name, video_device_node_name(vdev));
return 0;
}
void fimc_isp_video_device_unregister(struct fimc_isp *isp,
enum v4l2_buf_type type)
{
struct exynos_video_entity *ve;
if (type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
ve = &isp->video_capture.ve;
else
return;
mutex_lock(&isp->video_lock);
if (video_is_registered(&ve->vdev)) {
video_unregister_device(&ve->vdev);
media_entity_cleanup(&ve->vdev.entity);
ve->pipe = NULL;
}
mutex_unlock(&isp->video_lock);
}
| linux-master | drivers/media/platform/samsung/exynos4-is/fimc-isp-video.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* S5P/EXYNOS4 SoC series camera host interface media device driver
*
* Copyright (C) 2011 - 2013 Samsung Electronics Co., Ltd.
* Author: Sylwester Nawrocki <[email protected]>
*/
#include <linux/bug.h>
#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/device.h>
#include <linux/errno.h>
#include <linux/i2c.h>
#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_platform.h>
#include <linux/of_graph.h>
#include <linux/pinctrl/consumer.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/types.h>
#include <linux/slab.h>
#include <media/v4l2-async.h>
#include <media/v4l2-ctrls.h>
#include <media/v4l2-fwnode.h>
#include <media/media-device.h>
#include <media/drv-intf/exynos-fimc.h>
#include "media-dev.h"
#include "fimc-core.h"
#include "fimc-is.h"
#include "fimc-lite.h"
#include "mipi-csis.h"
/* Set up image sensor subdev -> FIMC capture node notifications. */
static void __setup_sensor_notification(struct fimc_md *fmd,
struct v4l2_subdev *sensor,
struct v4l2_subdev *fimc_sd)
{
struct fimc_source_info *src_inf;
struct fimc_sensor_info *md_si;
unsigned long flags;
src_inf = v4l2_get_subdev_hostdata(sensor);
if (!src_inf || WARN_ON(fmd == NULL))
return;
md_si = source_to_sensor_info(src_inf);
spin_lock_irqsave(&fmd->slock, flags);
md_si->host = v4l2_get_subdevdata(fimc_sd);
spin_unlock_irqrestore(&fmd->slock, flags);
}
/**
* fimc_pipeline_prepare - update pipeline information with subdevice pointers
* @p: fimc pipeline
* @me: media entity terminating the pipeline
*
* Caller holds the graph mutex.
*/
static void fimc_pipeline_prepare(struct fimc_pipeline *p,
struct media_entity *me)
{
struct fimc_md *fmd = entity_to_fimc_mdev(me);
struct v4l2_subdev *sd;
struct v4l2_subdev *sensor = NULL;
int i;
for (i = 0; i < IDX_MAX; i++)
p->subdevs[i] = NULL;
while (1) {
struct media_pad *pad = NULL;
/* Find remote source pad */
for (i = 0; i < me->num_pads; i++) {
struct media_pad *spad = &me->pads[i];
if (!(spad->flags & MEDIA_PAD_FL_SINK))
continue;
pad = media_pad_remote_pad_first(spad);
if (pad)
break;
}
if (!pad || !is_media_entity_v4l2_subdev(pad->entity))
break;
sd = media_entity_to_v4l2_subdev(pad->entity);
switch (sd->grp_id) {
case GRP_ID_SENSOR:
sensor = sd;
fallthrough;
case GRP_ID_FIMC_IS_SENSOR:
p->subdevs[IDX_SENSOR] = sd;
break;
case GRP_ID_CSIS:
p->subdevs[IDX_CSIS] = sd;
break;
case GRP_ID_FLITE:
p->subdevs[IDX_FLITE] = sd;
break;
case GRP_ID_FIMC:
p->subdevs[IDX_FIMC] = sd;
break;
case GRP_ID_FIMC_IS:
p->subdevs[IDX_IS_ISP] = sd;
break;
default:
break;
}
me = &sd->entity;
if (me->num_pads == 1)
break;
}
if (sensor && p->subdevs[IDX_FIMC])
__setup_sensor_notification(fmd, sensor, p->subdevs[IDX_FIMC]);
}
/**
* __subdev_set_power - change power state of a single subdev
* @sd: subdevice to change power state for
* @on: 1 to enable power or 0 to disable
*
* Return result of s_power subdev operation or -ENXIO if sd argument
* is NULL. Return 0 if the subdevice does not implement s_power.
*/
static int __subdev_set_power(struct v4l2_subdev *sd, int on)
{
int *use_count;
int ret;
if (sd == NULL)
return -ENXIO;
use_count = &sd->entity.use_count;
if (on && (*use_count)++ > 0)
return 0;
else if (!on && (*use_count == 0 || --(*use_count) > 0))
return 0;
ret = v4l2_subdev_call(sd, core, s_power, on);
return ret != -ENOIOCTLCMD ? ret : 0;
}
/**
* fimc_pipeline_s_power - change power state of all pipeline subdevs
* @p: fimc device terminating the pipeline
* @on: true to power on, false to power off
*
* Needs to be called with the graph mutex held.
*/
static int fimc_pipeline_s_power(struct fimc_pipeline *p, bool on)
{
static const u8 seq[2][IDX_MAX - 1] = {
{ IDX_IS_ISP, IDX_SENSOR, IDX_CSIS, IDX_FLITE },
{ IDX_CSIS, IDX_FLITE, IDX_SENSOR, IDX_IS_ISP },
};
int i, ret = 0;
if (p->subdevs[IDX_SENSOR] == NULL)
return -ENXIO;
for (i = 0; i < IDX_MAX - 1; i++) {
unsigned int idx = seq[on][i];
ret = __subdev_set_power(p->subdevs[idx], on);
if (ret < 0 && ret != -ENXIO)
goto error;
}
return 0;
error:
for (; i >= 0; i--) {
unsigned int idx = seq[on][i];
__subdev_set_power(p->subdevs[idx], !on);
}
return ret;
}
/**
* __fimc_pipeline_enable - enable power of all pipeline subdevs
* and the sensor clock
* @ep: video pipeline structure
* @fmd: fimc media device
*
* Called with the graph mutex held.
*/
static int __fimc_pipeline_enable(struct exynos_media_pipeline *ep,
struct fimc_md *fmd)
{
struct fimc_pipeline *p = to_fimc_pipeline(ep);
int ret;
/* Enable PXLASYNC clock if this pipeline includes FIMC-IS */
if (!IS_ERR(fmd->wbclk[CLK_IDX_WB_B]) && p->subdevs[IDX_IS_ISP]) {
ret = clk_prepare_enable(fmd->wbclk[CLK_IDX_WB_B]);
if (ret < 0)
return ret;
}
ret = fimc_pipeline_s_power(p, 1);
if (!ret)
return 0;
if (!IS_ERR(fmd->wbclk[CLK_IDX_WB_B]) && p->subdevs[IDX_IS_ISP])
clk_disable_unprepare(fmd->wbclk[CLK_IDX_WB_B]);
return ret;
}
/**
* __fimc_pipeline_open - update the pipeline information, enable power
* of all pipeline subdevs and the sensor clock
* @ep: fimc device terminating the pipeline
* @me: media entity to start graph walk with
* @prepare: true to walk the current pipeline and acquire all subdevs
*
* Called with the graph mutex held.
*/
static int __fimc_pipeline_open(struct exynos_media_pipeline *ep,
struct media_entity *me, bool prepare)
{
struct fimc_md *fmd = entity_to_fimc_mdev(me);
struct fimc_pipeline *p = to_fimc_pipeline(ep);
struct v4l2_subdev *sd;
if (WARN_ON(p == NULL || me == NULL))
return -EINVAL;
if (prepare)
fimc_pipeline_prepare(p, me);
sd = p->subdevs[IDX_SENSOR];
if (sd == NULL) {
pr_warn("%s(): No sensor subdev\n", __func__);
/*
* Pipeline open cannot fail so as to make it possible
* for the user space to configure the pipeline.
*/
return 0;
}
return __fimc_pipeline_enable(ep, fmd);
}
/**
* __fimc_pipeline_close - disable the sensor clock and pipeline power
* @ep: fimc device terminating the pipeline
*
* Disable power of all subdevs and turn the external sensor clock off.
*/
static int __fimc_pipeline_close(struct exynos_media_pipeline *ep)
{
struct fimc_pipeline *p = to_fimc_pipeline(ep);
struct v4l2_subdev *sd = p ? p->subdevs[IDX_SENSOR] : NULL;
struct fimc_md *fmd;
int ret;
if (sd == NULL) {
pr_warn("%s(): No sensor subdev\n", __func__);
return 0;
}
ret = fimc_pipeline_s_power(p, 0);
fmd = entity_to_fimc_mdev(&sd->entity);
/* Disable PXLASYNC clock if this pipeline includes FIMC-IS */
if (!IS_ERR(fmd->wbclk[CLK_IDX_WB_B]) && p->subdevs[IDX_IS_ISP])
clk_disable_unprepare(fmd->wbclk[CLK_IDX_WB_B]);
return ret == -ENXIO ? 0 : ret;
}
/**
* __fimc_pipeline_s_stream - call s_stream() on pipeline subdevs
* @ep: video pipeline structure
* @on: passed as the s_stream() callback argument
*/
static int __fimc_pipeline_s_stream(struct exynos_media_pipeline *ep, bool on)
{
static const u8 seq[2][IDX_MAX] = {
{ IDX_FIMC, IDX_SENSOR, IDX_IS_ISP, IDX_CSIS, IDX_FLITE },
{ IDX_CSIS, IDX_FLITE, IDX_FIMC, IDX_SENSOR, IDX_IS_ISP },
};
struct fimc_pipeline *p = to_fimc_pipeline(ep);
enum fimc_subdev_index sd_id;
int i, ret = 0;
if (p->subdevs[IDX_SENSOR] == NULL) {
struct fimc_md *fmd;
struct v4l2_subdev *sd = p->subdevs[IDX_CSIS];
if (!sd)
sd = p->subdevs[IDX_FIMC];
if (!sd) {
/*
* If neither CSIS nor FIMC was set up,
* it's impossible to have any sensors
*/
return -ENODEV;
}
fmd = entity_to_fimc_mdev(&sd->entity);
if (!fmd->user_subdev_api) {
/*
* Sensor must be already discovered if we
* aren't in the user_subdev_api mode
*/
return -ENODEV;
}
/* Get pipeline sink entity */
if (p->subdevs[IDX_FIMC])
sd_id = IDX_FIMC;
else if (p->subdevs[IDX_IS_ISP])
sd_id = IDX_IS_ISP;
else if (p->subdevs[IDX_FLITE])
sd_id = IDX_FLITE;
else
return -ENODEV;
/*
* Sensor could have been linked between open and STREAMON -
* check if this is the case.
*/
fimc_pipeline_prepare(p, &p->subdevs[sd_id]->entity);
if (p->subdevs[IDX_SENSOR] == NULL)
return -ENODEV;
ret = __fimc_pipeline_enable(ep, fmd);
if (ret < 0)
return ret;
}
for (i = 0; i < IDX_MAX; i++) {
unsigned int idx = seq[on][i];
ret = v4l2_subdev_call(p->subdevs[idx], video, s_stream, on);
if (ret < 0 && ret != -ENOIOCTLCMD && ret != -ENODEV)
goto error;
}
return 0;
error:
fimc_pipeline_s_power(p, !on);
for (; i >= 0; i--) {
unsigned int idx = seq[on][i];
v4l2_subdev_call(p->subdevs[idx], video, s_stream, !on);
}
return ret;
}
/* Media pipeline operations for the FIMC/FIMC-LITE video device driver */
static const struct exynos_media_pipeline_ops fimc_pipeline_ops = {
.open = __fimc_pipeline_open,
.close = __fimc_pipeline_close,
.set_stream = __fimc_pipeline_s_stream,
};
static struct exynos_media_pipeline *fimc_md_pipeline_create(
struct fimc_md *fmd)
{
struct fimc_pipeline *p;
p = kzalloc(sizeof(*p), GFP_KERNEL);
if (!p)
return NULL;
list_add_tail(&p->list, &fmd->pipelines);
p->ep.ops = &fimc_pipeline_ops;
return &p->ep;
}
static void fimc_md_pipelines_free(struct fimc_md *fmd)
{
while (!list_empty(&fmd->pipelines)) {
struct fimc_pipeline *p;
p = list_entry(fmd->pipelines.next, typeof(*p), list);
list_del(&p->list);
kfree(p);
}
}
static int fimc_md_parse_one_endpoint(struct fimc_md *fmd,
struct device_node *ep)
{
int index = fmd->num_sensors;
struct fimc_source_info *pd = &fmd->sensor[index].pdata;
struct device_node *rem, *np;
struct v4l2_async_connection *asd;
struct v4l2_fwnode_endpoint endpoint = { .bus_type = 0 };
int ret;
ret = v4l2_fwnode_endpoint_parse(of_fwnode_handle(ep), &endpoint);
if (ret) {
of_node_put(ep);
return ret;
}
if (WARN_ON(endpoint.base.port == 0) || index >= FIMC_MAX_SENSORS) {
of_node_put(ep);
return -EINVAL;
}
pd->mux_id = (endpoint.base.port - 1) & 0x1;
rem = of_graph_get_remote_port_parent(ep);
if (rem == NULL) {
v4l2_info(&fmd->v4l2_dev, "Remote device at %pOF not found\n",
ep);
of_node_put(ep);
return 0;
}
if (fimc_input_is_parallel(endpoint.base.port)) {
if (endpoint.bus_type == V4L2_MBUS_PARALLEL)
pd->sensor_bus_type = FIMC_BUS_TYPE_ITU_601;
else
pd->sensor_bus_type = FIMC_BUS_TYPE_ITU_656;
pd->flags = endpoint.bus.parallel.flags;
} else if (fimc_input_is_mipi_csi(endpoint.base.port)) {
/*
* MIPI CSI-2: only input mux selection and
* the sensor's clock frequency is needed.
*/
pd->sensor_bus_type = FIMC_BUS_TYPE_MIPI_CSI2;
} else {
v4l2_err(&fmd->v4l2_dev, "Wrong port id (%u) at node %pOF\n",
endpoint.base.port, rem);
}
/*
* For FIMC-IS handled sensors, that are placed under i2c-isp device
* node, FIMC is connected to the FIMC-IS through its ISP Writeback
* input. Sensors are attached to the FIMC-LITE hostdata interface
* directly or through MIPI-CSIS, depending on the external media bus
* used. This needs to be handled in a more reliable way, not by just
* checking parent's node name.
*/
np = of_get_parent(rem);
of_node_put(rem);
if (of_node_name_eq(np, "i2c-isp"))
pd->fimc_bus_type = FIMC_BUS_TYPE_ISP_WRITEBACK;
else
pd->fimc_bus_type = pd->sensor_bus_type;
of_node_put(np);
if (WARN_ON(index >= ARRAY_SIZE(fmd->sensor))) {
of_node_put(ep);
return -EINVAL;
}
asd = v4l2_async_nf_add_fwnode_remote(&fmd->subdev_notifier,
of_fwnode_handle(ep),
struct v4l2_async_connection);
of_node_put(ep);
if (IS_ERR(asd))
return PTR_ERR(asd);
fmd->sensor[index].asd = asd;
fmd->num_sensors++;
return 0;
}
/* Parse port node and register as a sub-device any sensor specified there. */
static int fimc_md_parse_port_node(struct fimc_md *fmd,
struct device_node *port)
{
struct device_node *ep;
int ret;
for_each_child_of_node(port, ep) {
ret = fimc_md_parse_one_endpoint(fmd, ep);
if (ret < 0) {
of_node_put(ep);
return ret;
}
}
return 0;
}
/* Register all SoC external sub-devices */
static int fimc_md_register_sensor_entities(struct fimc_md *fmd)
{
struct device_node *parent = fmd->pdev->dev.of_node;
struct device_node *ports = NULL;
struct device_node *node;
int ret;
/*
* Runtime resume one of the FIMC entities to make sure
* the sclk_cam clocks are not globally disabled.
*/
if (!fmd->pmf)
return -ENXIO;
ret = pm_runtime_resume_and_get(fmd->pmf);
if (ret < 0)
return ret;
fmd->num_sensors = 0;
/* Attach sensors linked to MIPI CSI-2 receivers */
for_each_available_child_of_node(parent, node) {
struct device_node *port;
if (!of_node_name_eq(node, "csis"))
continue;
/* The csis node can have only port subnode. */
port = of_get_next_child(node, NULL);
if (!port)
continue;
ret = fimc_md_parse_port_node(fmd, port);
of_node_put(port);
if (ret < 0) {
of_node_put(node);
goto cleanup;
}
}
/* Attach sensors listed in the parallel-ports node */
ports = of_get_child_by_name(parent, "parallel-ports");
if (!ports)
goto rpm_put;
for_each_child_of_node(ports, node) {
ret = fimc_md_parse_port_node(fmd, node);
if (ret < 0) {
of_node_put(node);
goto cleanup;
}
}
of_node_put(ports);
rpm_put:
pm_runtime_put(fmd->pmf);
return 0;
cleanup:
of_node_put(ports);
v4l2_async_nf_cleanup(&fmd->subdev_notifier);
pm_runtime_put(fmd->pmf);
return ret;
}
static int __of_get_csis_id(struct device_node *np)
{
u32 reg = 0;
np = of_get_child_by_name(np, "port");
if (!np)
return -EINVAL;
of_property_read_u32(np, "reg", ®);
of_node_put(np);
return reg - FIMC_INPUT_MIPI_CSI2_0;
}
/*
* MIPI-CSIS, FIMC and FIMC-LITE platform devices registration.
*/
static int register_fimc_lite_entity(struct fimc_md *fmd,
struct fimc_lite *fimc_lite)
{
struct v4l2_subdev *sd;
struct exynos_media_pipeline *ep;
int ret;
if (WARN_ON(fimc_lite->index >= FIMC_LITE_MAX_DEVS ||
fmd->fimc_lite[fimc_lite->index]))
return -EBUSY;
sd = &fimc_lite->subdev;
sd->grp_id = GRP_ID_FLITE;
ep = fimc_md_pipeline_create(fmd);
if (!ep)
return -ENOMEM;
v4l2_set_subdev_hostdata(sd, ep);
ret = v4l2_device_register_subdev(&fmd->v4l2_dev, sd);
if (!ret)
fmd->fimc_lite[fimc_lite->index] = fimc_lite;
else
v4l2_err(&fmd->v4l2_dev, "Failed to register FIMC.LITE%d\n",
fimc_lite->index);
return ret;
}
static int register_fimc_entity(struct fimc_md *fmd, struct fimc_dev *fimc)
{
struct v4l2_subdev *sd;
struct exynos_media_pipeline *ep;
int ret;
if (WARN_ON(fimc->id >= FIMC_MAX_DEVS || fmd->fimc[fimc->id]))
return -EBUSY;
sd = &fimc->vid_cap.subdev;
sd->grp_id = GRP_ID_FIMC;
ep = fimc_md_pipeline_create(fmd);
if (!ep)
return -ENOMEM;
v4l2_set_subdev_hostdata(sd, ep);
ret = v4l2_device_register_subdev(&fmd->v4l2_dev, sd);
if (!ret) {
if (!fmd->pmf && fimc->pdev)
fmd->pmf = &fimc->pdev->dev;
fmd->fimc[fimc->id] = fimc;
fimc->vid_cap.user_subdev_api = fmd->user_subdev_api;
} else {
v4l2_err(&fmd->v4l2_dev, "Failed to register FIMC.%d (%d)\n",
fimc->id, ret);
}
return ret;
}
static int register_csis_entity(struct fimc_md *fmd,
struct platform_device *pdev,
struct v4l2_subdev *sd)
{
struct device_node *node = pdev->dev.of_node;
int id, ret;
id = node ? __of_get_csis_id(node) : max(0, pdev->id);
if (WARN_ON(id < 0 || id >= CSIS_MAX_ENTITIES))
return -ENOENT;
if (WARN_ON(fmd->csis[id].sd))
return -EBUSY;
sd->grp_id = GRP_ID_CSIS;
ret = v4l2_device_register_subdev(&fmd->v4l2_dev, sd);
if (!ret)
fmd->csis[id].sd = sd;
else
v4l2_err(&fmd->v4l2_dev,
"Failed to register MIPI-CSIS.%d (%d)\n", id, ret);
return ret;
}
static int register_fimc_is_entity(struct fimc_md *fmd, struct fimc_is *is)
{
struct v4l2_subdev *sd = &is->isp.subdev;
struct exynos_media_pipeline *ep;
int ret;
/* Allocate pipeline object for the ISP capture video node. */
ep = fimc_md_pipeline_create(fmd);
if (!ep)
return -ENOMEM;
v4l2_set_subdev_hostdata(sd, ep);
ret = v4l2_device_register_subdev(&fmd->v4l2_dev, sd);
if (ret) {
v4l2_err(&fmd->v4l2_dev,
"Failed to register FIMC-ISP (%d)\n", ret);
return ret;
}
fmd->fimc_is = is;
return 0;
}
static int fimc_md_register_platform_entity(struct fimc_md *fmd,
struct platform_device *pdev,
int plat_entity)
{
struct device *dev = &pdev->dev;
int ret = -EPROBE_DEFER;
void *drvdata;
/* Lock to ensure dev->driver won't change. */
device_lock(dev);
if (!dev->driver || !try_module_get(dev->driver->owner))
goto dev_unlock;
drvdata = dev_get_drvdata(dev);
/* Some subdev didn't probe successfully id drvdata is NULL */
if (drvdata) {
switch (plat_entity) {
case IDX_FIMC:
ret = register_fimc_entity(fmd, drvdata);
break;
case IDX_FLITE:
ret = register_fimc_lite_entity(fmd, drvdata);
break;
case IDX_CSIS:
ret = register_csis_entity(fmd, pdev, drvdata);
break;
case IDX_IS_ISP:
ret = register_fimc_is_entity(fmd, drvdata);
break;
default:
ret = -ENODEV;
}
}
module_put(dev->driver->owner);
dev_unlock:
device_unlock(dev);
if (ret == -EPROBE_DEFER)
dev_info(&fmd->pdev->dev, "deferring %s device registration\n",
dev_name(dev));
else if (ret < 0)
dev_err(&fmd->pdev->dev, "%s device registration failed (%d)\n",
dev_name(dev), ret);
return ret;
}
/* Register FIMC, FIMC-LITE and CSIS media entities */
static int fimc_md_register_platform_entities(struct fimc_md *fmd,
struct device_node *parent)
{
struct device_node *node;
int ret = 0;
for_each_available_child_of_node(parent, node) {
struct platform_device *pdev;
int plat_entity = -1;
pdev = of_find_device_by_node(node);
if (!pdev)
continue;
/* If driver of any entity isn't ready try all again later. */
if (of_node_name_eq(node, CSIS_OF_NODE_NAME))
plat_entity = IDX_CSIS;
else if (of_node_name_eq(node, FIMC_IS_OF_NODE_NAME))
plat_entity = IDX_IS_ISP;
else if (of_node_name_eq(node, FIMC_LITE_OF_NODE_NAME))
plat_entity = IDX_FLITE;
else if (of_node_name_eq(node, FIMC_OF_NODE_NAME) &&
!of_property_read_bool(node, "samsung,lcd-wb"))
plat_entity = IDX_FIMC;
if (plat_entity >= 0)
ret = fimc_md_register_platform_entity(fmd, pdev,
plat_entity);
put_device(&pdev->dev);
if (ret < 0) {
of_node_put(node);
break;
}
}
return ret;
}
static void fimc_md_unregister_entities(struct fimc_md *fmd)
{
int i;
for (i = 0; i < FIMC_MAX_DEVS; i++) {
struct fimc_dev *dev = fmd->fimc[i];
if (dev == NULL)
continue;
v4l2_device_unregister_subdev(&dev->vid_cap.subdev);
dev->vid_cap.ve.pipe = NULL;
fmd->fimc[i] = NULL;
}
for (i = 0; i < FIMC_LITE_MAX_DEVS; i++) {
struct fimc_lite *dev = fmd->fimc_lite[i];
if (dev == NULL)
continue;
v4l2_device_unregister_subdev(&dev->subdev);
dev->ve.pipe = NULL;
fmd->fimc_lite[i] = NULL;
}
for (i = 0; i < CSIS_MAX_ENTITIES; i++) {
if (fmd->csis[i].sd == NULL)
continue;
v4l2_device_unregister_subdev(fmd->csis[i].sd);
fmd->csis[i].sd = NULL;
}
if (fmd->fimc_is)
v4l2_device_unregister_subdev(&fmd->fimc_is->isp.subdev);
v4l2_info(&fmd->v4l2_dev, "Unregistered all entities\n");
}
/**
* __fimc_md_create_fimc_sink_links - create links to all FIMC entities
* @fmd: fimc media device
* @source: the source entity to create links to all fimc entities from
* @sensor: sensor subdev linked to FIMC[fimc_id] entity, may be null
* @pad: the source entity pad index
* @link_mask: bitmask of the fimc devices for which link should be enabled
*/
static int __fimc_md_create_fimc_sink_links(struct fimc_md *fmd,
struct media_entity *source,
struct v4l2_subdev *sensor,
int pad, int link_mask)
{
struct fimc_source_info *si = NULL;
struct media_entity *sink;
unsigned int flags = 0;
int i, ret = 0;
if (sensor) {
si = v4l2_get_subdev_hostdata(sensor);
/* Skip direct FIMC links in the logical FIMC-IS sensor path */
if (si && si->fimc_bus_type == FIMC_BUS_TYPE_ISP_WRITEBACK)
ret = 1;
}
for (i = 0; !ret && i < FIMC_MAX_DEVS; i++) {
if (!fmd->fimc[i])
continue;
/*
* Some FIMC variants are not fitted with camera capture
* interface. Skip creating a link from sensor for those.
*/
if (!fmd->fimc[i]->variant->has_cam_if)
continue;
flags = ((1 << i) & link_mask) ? MEDIA_LNK_FL_ENABLED : 0;
sink = &fmd->fimc[i]->vid_cap.subdev.entity;
ret = media_create_pad_link(source, pad, sink,
FIMC_SD_PAD_SINK_CAM, flags);
if (ret)
return ret;
/* Notify FIMC capture subdev entity */
ret = media_entity_call(sink, link_setup, &sink->pads[0],
&source->pads[pad], flags);
if (ret)
break;
v4l2_info(&fmd->v4l2_dev, "created link [%s] %c> [%s]\n",
source->name, flags ? '=' : '-', sink->name);
}
for (i = 0; i < FIMC_LITE_MAX_DEVS; i++) {
if (!fmd->fimc_lite[i])
continue;
sink = &fmd->fimc_lite[i]->subdev.entity;
ret = media_create_pad_link(source, pad, sink,
FLITE_SD_PAD_SINK, 0);
if (ret)
return ret;
/* Notify FIMC-LITE subdev entity */
ret = media_entity_call(sink, link_setup, &sink->pads[0],
&source->pads[pad], 0);
if (ret)
break;
v4l2_info(&fmd->v4l2_dev, "created link [%s] -> [%s]\n",
source->name, sink->name);
}
return 0;
}
/* Create links from FIMC-LITE source pads to other entities */
static int __fimc_md_create_flite_source_links(struct fimc_md *fmd)
{
struct media_entity *source, *sink;
int i, ret = 0;
for (i = 0; i < FIMC_LITE_MAX_DEVS; i++) {
struct fimc_lite *fimc = fmd->fimc_lite[i];
if (fimc == NULL)
continue;
source = &fimc->subdev.entity;
sink = &fimc->ve.vdev.entity;
/* FIMC-LITE's subdev and video node */
ret = media_create_pad_link(source, FLITE_SD_PAD_SOURCE_DMA,
sink, 0, 0);
if (ret)
break;
/* Link from FIMC-LITE to IS-ISP subdev */
sink = &fmd->fimc_is->isp.subdev.entity;
ret = media_create_pad_link(source, FLITE_SD_PAD_SOURCE_ISP,
sink, 0, 0);
if (ret)
break;
}
return ret;
}
/* Create FIMC-IS links */
static int __fimc_md_create_fimc_is_links(struct fimc_md *fmd)
{
struct fimc_isp *isp = &fmd->fimc_is->isp;
struct media_entity *source, *sink;
int i, ret;
source = &isp->subdev.entity;
for (i = 0; i < FIMC_MAX_DEVS; i++) {
if (fmd->fimc[i] == NULL)
continue;
/* Link from FIMC-IS-ISP subdev to FIMC */
sink = &fmd->fimc[i]->vid_cap.subdev.entity;
ret = media_create_pad_link(source, FIMC_ISP_SD_PAD_SRC_FIFO,
sink, FIMC_SD_PAD_SINK_FIFO, 0);
if (ret)
return ret;
}
/* Link from FIMC-IS-ISP subdev to fimc-is-isp.capture video node */
sink = &isp->video_capture.ve.vdev.entity;
/* Skip this link if the fimc-is-isp video node driver isn't built-in */
if (sink->num_pads == 0)
return 0;
return media_create_pad_link(source, FIMC_ISP_SD_PAD_SRC_DMA,
sink, 0, 0);
}
/**
* fimc_md_create_links - create default links between registered entities
* @fmd: fimc media device
*
* Parallel interface sensor entities are connected directly to FIMC capture
* entities. The sensors using MIPI CSIS bus are connected through immutable
* link with CSI receiver entity specified by mux_id. Any registered CSIS
* entity has a link to each registered FIMC capture entity. Enabled links
* are created by default between each subsequent registered sensor and
* subsequent FIMC capture entity. The number of default active links is
* determined by the number of available sensors or FIMC entities,
* whichever is less.
*/
static int fimc_md_create_links(struct fimc_md *fmd)
{
struct v4l2_subdev *csi_sensors[CSIS_MAX_ENTITIES] = { NULL };
struct v4l2_subdev *sensor, *csis;
struct fimc_source_info *pdata;
struct media_entity *source, *sink;
int i, pad, fimc_id = 0, ret = 0;
u32 flags, link_mask = 0;
for (i = 0; i < fmd->num_sensors; i++) {
if (fmd->sensor[i].subdev == NULL)
continue;
sensor = fmd->sensor[i].subdev;
pdata = v4l2_get_subdev_hostdata(sensor);
if (!pdata)
continue;
source = NULL;
switch (pdata->sensor_bus_type) {
case FIMC_BUS_TYPE_MIPI_CSI2:
if (WARN(pdata->mux_id >= CSIS_MAX_ENTITIES,
"Wrong CSI channel id: %d\n", pdata->mux_id))
return -EINVAL;
csis = fmd->csis[pdata->mux_id].sd;
if (WARN(csis == NULL,
"MIPI-CSI interface specified but s5p-csis module is not loaded!\n"))
return -EINVAL;
pad = sensor->entity.num_pads - 1;
ret = media_create_pad_link(&sensor->entity, pad,
&csis->entity, CSIS_PAD_SINK,
MEDIA_LNK_FL_IMMUTABLE |
MEDIA_LNK_FL_ENABLED);
if (ret)
return ret;
v4l2_info(&fmd->v4l2_dev, "created link [%s] => [%s]\n",
sensor->entity.name, csis->entity.name);
source = NULL;
csi_sensors[pdata->mux_id] = sensor;
break;
case FIMC_BUS_TYPE_ITU_601...FIMC_BUS_TYPE_ITU_656:
source = &sensor->entity;
pad = 0;
break;
default:
v4l2_err(&fmd->v4l2_dev, "Wrong bus_type: %x\n",
pdata->sensor_bus_type);
return -EINVAL;
}
if (source == NULL)
continue;
link_mask = 1 << fimc_id++;
ret = __fimc_md_create_fimc_sink_links(fmd, source, sensor,
pad, link_mask);
}
for (i = 0; i < CSIS_MAX_ENTITIES; i++) {
if (fmd->csis[i].sd == NULL)
continue;
source = &fmd->csis[i].sd->entity;
pad = CSIS_PAD_SOURCE;
sensor = csi_sensors[i];
link_mask = 1 << fimc_id++;
ret = __fimc_md_create_fimc_sink_links(fmd, source, sensor,
pad, link_mask);
}
/* Create immutable links between each FIMC's subdev and video node */
flags = MEDIA_LNK_FL_IMMUTABLE | MEDIA_LNK_FL_ENABLED;
for (i = 0; i < FIMC_MAX_DEVS; i++) {
if (!fmd->fimc[i])
continue;
source = &fmd->fimc[i]->vid_cap.subdev.entity;
sink = &fmd->fimc[i]->vid_cap.ve.vdev.entity;
ret = media_create_pad_link(source, FIMC_SD_PAD_SOURCE,
sink, 0, flags);
if (ret)
break;
}
ret = __fimc_md_create_flite_source_links(fmd);
if (ret < 0)
return ret;
if (fmd->use_isp)
ret = __fimc_md_create_fimc_is_links(fmd);
return ret;
}
/*
* The peripheral sensor and CAM_BLK (PIXELASYNCMx) clocks management.
*/
static void fimc_md_put_clocks(struct fimc_md *fmd)
{
int i = FIMC_MAX_CAMCLKS;
while (--i >= 0) {
if (IS_ERR(fmd->camclk[i].clock))
continue;
clk_put(fmd->camclk[i].clock);
fmd->camclk[i].clock = ERR_PTR(-EINVAL);
}
/* Writeback (PIXELASYNCMx) clocks */
for (i = 0; i < FIMC_MAX_WBCLKS; i++) {
if (IS_ERR(fmd->wbclk[i]))
continue;
clk_put(fmd->wbclk[i]);
fmd->wbclk[i] = ERR_PTR(-EINVAL);
}
}
static int fimc_md_get_clocks(struct fimc_md *fmd)
{
struct device *dev = &fmd->pdev->dev;
char clk_name[32];
struct clk *clock;
int i, ret = 0;
for (i = 0; i < FIMC_MAX_CAMCLKS; i++)
fmd->camclk[i].clock = ERR_PTR(-EINVAL);
for (i = 0; i < FIMC_MAX_CAMCLKS; i++) {
snprintf(clk_name, sizeof(clk_name), "sclk_cam%u", i);
clock = clk_get(dev, clk_name);
if (IS_ERR(clock)) {
dev_err(dev, "Failed to get clock: %s\n", clk_name);
ret = PTR_ERR(clock);
break;
}
fmd->camclk[i].clock = clock;
}
if (ret)
fimc_md_put_clocks(fmd);
if (!fmd->use_isp)
return 0;
/*
* For now get only PIXELASYNCM1 clock (Writeback B/ISP),
* leave PIXELASYNCM0 out for the LCD Writeback driver.
*/
fmd->wbclk[CLK_IDX_WB_A] = ERR_PTR(-EINVAL);
for (i = CLK_IDX_WB_B; i < FIMC_MAX_WBCLKS; i++) {
snprintf(clk_name, sizeof(clk_name), "pxl_async%u", i);
clock = clk_get(dev, clk_name);
if (IS_ERR(clock)) {
v4l2_err(&fmd->v4l2_dev, "Failed to get clock: %s\n",
clk_name);
ret = PTR_ERR(clock);
break;
}
fmd->wbclk[i] = clock;
}
if (ret)
fimc_md_put_clocks(fmd);
return ret;
}
static int __fimc_md_modify_pipeline(struct media_entity *entity, bool enable)
{
struct exynos_video_entity *ve;
struct fimc_pipeline *p;
struct video_device *vdev;
int ret;
vdev = media_entity_to_video_device(entity);
if (vdev->entity.use_count == 0)
return 0;
ve = vdev_to_exynos_video_entity(vdev);
p = to_fimc_pipeline(ve->pipe);
/*
* Nothing to do if we are disabling the pipeline, some link
* has been disconnected and p->subdevs array is cleared now.
*/
if (!enable && p->subdevs[IDX_SENSOR] == NULL)
return 0;
if (enable)
ret = __fimc_pipeline_open(ve->pipe, entity, true);
else
ret = __fimc_pipeline_close(ve->pipe);
if (ret == 0 && !enable)
memset(p->subdevs, 0, sizeof(p->subdevs));
return ret;
}
/* Locking: called with entity->graph_obj.mdev->graph_mutex mutex held. */
static int __fimc_md_modify_pipelines(struct media_entity *entity, bool enable,
struct media_graph *graph)
{
struct media_entity *entity_err = entity;
int ret;
/*
* Walk current graph and call the pipeline open/close routine for each
* opened video node that belongs to the graph of entities connected
* through active links. This is needed as we cannot power on/off the
* subdevs in random order.
*/
media_graph_walk_start(graph, entity);
while ((entity = media_graph_walk_next(graph))) {
if (!is_media_entity_v4l2_video_device(entity))
continue;
ret = __fimc_md_modify_pipeline(entity, enable);
if (ret < 0)
goto err;
}
return 0;
err:
media_graph_walk_start(graph, entity_err);
while ((entity_err = media_graph_walk_next(graph))) {
if (!is_media_entity_v4l2_video_device(entity_err))
continue;
__fimc_md_modify_pipeline(entity_err, !enable);
if (entity_err == entity)
break;
}
return ret;
}
static int fimc_md_link_notify(struct media_link *link, unsigned int flags,
unsigned int notification)
{
struct media_graph *graph =
&container_of(link->graph_obj.mdev, struct fimc_md,
media_dev)->link_setup_graph;
struct media_entity *sink = link->sink->entity;
int ret = 0;
/* Before link disconnection */
if (notification == MEDIA_DEV_NOTIFY_PRE_LINK_CH) {
ret = media_graph_walk_init(graph,
link->graph_obj.mdev);
if (ret)
return ret;
if (!(flags & MEDIA_LNK_FL_ENABLED))
ret = __fimc_md_modify_pipelines(sink, false, graph);
#if 0
else
/* TODO: Link state change validation */
#endif
/* After link activation */
} else if (notification == MEDIA_DEV_NOTIFY_POST_LINK_CH) {
if (link->flags & MEDIA_LNK_FL_ENABLED)
ret = __fimc_md_modify_pipelines(sink, true, graph);
media_graph_walk_cleanup(graph);
}
return ret ? -EPIPE : 0;
}
static const struct media_device_ops fimc_md_ops = {
.link_notify = fimc_md_link_notify,
};
static ssize_t subdev_conf_mode_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct fimc_md *fmd = dev_get_drvdata(dev);
if (fmd->user_subdev_api)
return strscpy(buf, "Sub-device API (sub-dev)\n", PAGE_SIZE);
return strscpy(buf, "V4L2 video node only API (vid-dev)\n", PAGE_SIZE);
}
static ssize_t subdev_conf_mode_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct fimc_md *fmd = dev_get_drvdata(dev);
bool subdev_api;
int i;
if (!strcmp(buf, "vid-dev\n"))
subdev_api = false;
else if (!strcmp(buf, "sub-dev\n"))
subdev_api = true;
else
return count;
fmd->user_subdev_api = subdev_api;
for (i = 0; i < FIMC_MAX_DEVS; i++)
if (fmd->fimc[i])
fmd->fimc[i]->vid_cap.user_subdev_api = subdev_api;
return count;
}
/*
* This device attribute is to select video pipeline configuration method.
* There are following valid values:
* vid-dev - for V4L2 video node API only, subdevice will be configured
* by the host driver.
* sub-dev - for media controller API, subdevs must be configured in user
* space before starting streaming.
*/
static DEVICE_ATTR_RW(subdev_conf_mode);
static int cam_clk_prepare(struct clk_hw *hw)
{
struct cam_clk *camclk = to_cam_clk(hw);
if (camclk->fmd->pmf == NULL)
return -ENODEV;
return pm_runtime_resume_and_get(camclk->fmd->pmf);
}
static void cam_clk_unprepare(struct clk_hw *hw)
{
struct cam_clk *camclk = to_cam_clk(hw);
if (camclk->fmd->pmf == NULL)
return;
pm_runtime_put_sync(camclk->fmd->pmf);
}
static const struct clk_ops cam_clk_ops = {
.prepare = cam_clk_prepare,
.unprepare = cam_clk_unprepare,
};
static void fimc_md_unregister_clk_provider(struct fimc_md *fmd)
{
struct cam_clk_provider *cp = &fmd->clk_provider;
unsigned int i;
if (cp->of_node)
of_clk_del_provider(cp->of_node);
for (i = 0; i < cp->num_clocks; i++)
clk_unregister(cp->clks[i]);
}
static int fimc_md_register_clk_provider(struct fimc_md *fmd)
{
struct cam_clk_provider *cp = &fmd->clk_provider;
struct device *dev = &fmd->pdev->dev;
int i, ret;
for (i = 0; i < FIMC_MAX_CAMCLKS; i++) {
struct cam_clk *camclk = &cp->camclk[i];
struct clk_init_data init;
const char *p_name;
ret = of_property_read_string_index(dev->of_node,
"clock-output-names", i, &init.name);
if (ret < 0)
break;
p_name = __clk_get_name(fmd->camclk[i].clock);
/* It's safe since clk_register() will duplicate the string. */
init.parent_names = &p_name;
init.num_parents = 1;
init.ops = &cam_clk_ops;
init.flags = CLK_SET_RATE_PARENT;
camclk->hw.init = &init;
camclk->fmd = fmd;
cp->clks[i] = clk_register(NULL, &camclk->hw);
if (IS_ERR(cp->clks[i])) {
dev_err(dev, "failed to register clock: %s (%ld)\n",
init.name, PTR_ERR(cp->clks[i]));
ret = PTR_ERR(cp->clks[i]);
goto err;
}
cp->num_clocks++;
}
if (cp->num_clocks == 0) {
dev_warn(dev, "clk provider not registered\n");
return 0;
}
cp->clk_data.clks = cp->clks;
cp->clk_data.clk_num = cp->num_clocks;
cp->of_node = dev->of_node;
ret = of_clk_add_provider(dev->of_node, of_clk_src_onecell_get,
&cp->clk_data);
if (ret == 0)
return 0;
err:
fimc_md_unregister_clk_provider(fmd);
return ret;
}
static int subdev_notifier_bound(struct v4l2_async_notifier *notifier,
struct v4l2_subdev *subdev,
struct v4l2_async_connection *asd)
{
struct fimc_md *fmd = notifier_to_fimc_md(notifier);
struct fimc_sensor_info *si = NULL;
int i;
/* Find platform data for this sensor subdev */
for (i = 0; i < ARRAY_SIZE(fmd->sensor); i++)
if (fmd->sensor[i].asd == asd)
si = &fmd->sensor[i];
if (si == NULL)
return -EINVAL;
v4l2_set_subdev_hostdata(subdev, &si->pdata);
if (si->pdata.fimc_bus_type == FIMC_BUS_TYPE_ISP_WRITEBACK)
subdev->grp_id = GRP_ID_FIMC_IS_SENSOR;
else
subdev->grp_id = GRP_ID_SENSOR;
si->subdev = subdev;
v4l2_info(&fmd->v4l2_dev, "Registered sensor subdevice: %s (%d)\n",
subdev->name, fmd->num_sensors);
fmd->num_sensors++;
return 0;
}
static int subdev_notifier_complete(struct v4l2_async_notifier *notifier)
{
struct fimc_md *fmd = notifier_to_fimc_md(notifier);
int ret;
mutex_lock(&fmd->media_dev.graph_mutex);
ret = fimc_md_create_links(fmd);
if (ret < 0)
goto unlock;
ret = v4l2_device_register_subdev_nodes(&fmd->v4l2_dev);
unlock:
mutex_unlock(&fmd->media_dev.graph_mutex);
if (ret < 0)
return ret;
return media_device_register(&fmd->media_dev);
}
static const struct v4l2_async_notifier_operations subdev_notifier_ops = {
.bound = subdev_notifier_bound,
.complete = subdev_notifier_complete,
};
static int fimc_md_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct v4l2_device *v4l2_dev;
struct pinctrl *pinctrl;
struct fimc_md *fmd;
int ret;
fmd = devm_kzalloc(dev, sizeof(*fmd), GFP_KERNEL);
if (!fmd)
return -ENOMEM;
ret = of_platform_populate(dev->of_node, NULL, NULL, dev);
if (ret < 0)
return -ENOMEM;
spin_lock_init(&fmd->slock);
INIT_LIST_HEAD(&fmd->pipelines);
fmd->pdev = pdev;
strscpy(fmd->media_dev.model, "Samsung S5P FIMC",
sizeof(fmd->media_dev.model));
fmd->media_dev.ops = &fimc_md_ops;
fmd->media_dev.dev = dev;
v4l2_dev = &fmd->v4l2_dev;
v4l2_dev->mdev = &fmd->media_dev;
v4l2_dev->notify = fimc_sensor_notify;
strscpy(v4l2_dev->name, "s5p-fimc-md", sizeof(v4l2_dev->name));
fmd->use_isp = fimc_md_is_isp_available(dev->of_node);
fmd->user_subdev_api = true;
media_device_init(&fmd->media_dev);
ret = v4l2_device_register(dev, &fmd->v4l2_dev);
if (ret < 0) {
v4l2_err(v4l2_dev, "Failed to register v4l2_device: %d\n", ret);
goto err_md;
}
ret = fimc_md_get_clocks(fmd);
if (ret)
goto err_v4l2dev;
pinctrl = devm_pinctrl_get(dev);
if (IS_ERR(pinctrl))
dev_dbg(dev, "Failed to get pinctrl: %pe\n", pinctrl);
platform_set_drvdata(pdev, fmd);
v4l2_async_nf_init(&fmd->subdev_notifier, &fmd->v4l2_dev);
ret = fimc_md_register_platform_entities(fmd, dev->of_node);
if (ret)
goto err_clk;
ret = fimc_md_register_sensor_entities(fmd);
if (ret)
goto err_m_ent;
ret = device_create_file(&pdev->dev, &dev_attr_subdev_conf_mode);
if (ret)
goto err_cleanup;
/*
* FIMC platform devices need to be registered before the sclk_cam
* clocks provider, as one of these devices needs to be activated
* to enable the clock.
*/
ret = fimc_md_register_clk_provider(fmd);
if (ret < 0) {
v4l2_err(v4l2_dev, "clock provider registration failed\n");
goto err_attr;
}
if (fmd->num_sensors > 0) {
fmd->subdev_notifier.ops = &subdev_notifier_ops;
fmd->num_sensors = 0;
ret = v4l2_async_nf_register(&fmd->subdev_notifier);
if (ret)
goto err_clk_p;
}
return 0;
err_clk_p:
fimc_md_unregister_clk_provider(fmd);
err_attr:
device_remove_file(&pdev->dev, &dev_attr_subdev_conf_mode);
err_cleanup:
v4l2_async_nf_cleanup(&fmd->subdev_notifier);
err_m_ent:
fimc_md_unregister_entities(fmd);
err_clk:
fimc_md_put_clocks(fmd);
err_v4l2dev:
v4l2_device_unregister(&fmd->v4l2_dev);
err_md:
media_device_cleanup(&fmd->media_dev);
return ret;
}
static void fimc_md_remove(struct platform_device *pdev)
{
struct fimc_md *fmd = platform_get_drvdata(pdev);
if (!fmd)
return;
fimc_md_unregister_clk_provider(fmd);
v4l2_async_nf_unregister(&fmd->subdev_notifier);
v4l2_async_nf_cleanup(&fmd->subdev_notifier);
v4l2_device_unregister(&fmd->v4l2_dev);
device_remove_file(&pdev->dev, &dev_attr_subdev_conf_mode);
fimc_md_unregister_entities(fmd);
fimc_md_pipelines_free(fmd);
media_device_unregister(&fmd->media_dev);
media_device_cleanup(&fmd->media_dev);
fimc_md_put_clocks(fmd);
}
static const struct platform_device_id fimc_driver_ids[] __always_unused = {
{ .name = "s5p-fimc-md" },
{ },
};
MODULE_DEVICE_TABLE(platform, fimc_driver_ids);
static const struct of_device_id fimc_md_of_match[] = {
{ .compatible = "samsung,fimc" },
{ },
};
MODULE_DEVICE_TABLE(of, fimc_md_of_match);
static struct platform_driver fimc_md_driver = {
.probe = fimc_md_probe,
.remove_new = fimc_md_remove,
.driver = {
.of_match_table = of_match_ptr(fimc_md_of_match),
.name = "s5p-fimc-md",
}
};
static int __init fimc_md_init(void)
{
int ret;
request_module("s5p-csis");
ret = fimc_register_driver();
if (ret)
return ret;
ret = platform_driver_register(&fimc_md_driver);
if (ret)
fimc_unregister_driver();
return ret;
}
static void __exit fimc_md_exit(void)
{
platform_driver_unregister(&fimc_md_driver);
fimc_unregister_driver();
}
module_init(fimc_md_init);
module_exit(fimc_md_exit);
MODULE_AUTHOR("Sylwester Nawrocki <[email protected]>");
MODULE_DESCRIPTION("S5P FIMC camera host interface/video postprocessor driver");
MODULE_LICENSE("GPL");
MODULE_VERSION("2.0.1");
| linux-master | drivers/media/platform/samsung/exynos4-is/media-dev.c |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.