python_code
stringlengths
0
1.8M
repo_name
stringclasses
7 values
file_path
stringlengths
5
99
// SPDX-License-Identifier: GPL-2.0 /* * Allwinner sun8i DE2 rotation driver * * Copyright (C) 2020 Jernej Skrabec <[email protected]> */ #include <linux/clk.h> #include <linux/interrupt.h> #include <linux/io.h> #include <linux/iopoll.h> #include <linux/mod_devicetable.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/pm_runtime.h> #include <linux/reset.h> #include <media/v4l2-device.h> #include <media/v4l2-event.h> #include <media/v4l2-ioctl.h> #include <media/v4l2-mem2mem.h> #include "sun8i-formats.h" #include "sun8i-rotate.h" static inline u32 rotate_read(struct rotate_dev *dev, u32 reg) { return readl(dev->base + reg); } static inline void rotate_write(struct rotate_dev *dev, u32 reg, u32 value) { writel(value, dev->base + reg); } static inline void rotate_set_bits(struct rotate_dev *dev, u32 reg, u32 bits) { writel(readl(dev->base + reg) | bits, dev->base + reg); } static void rotate_calc_addr_pitch(dma_addr_t buffer, u32 bytesperline, u32 height, const struct rotate_format *fmt, dma_addr_t *addr, u32 *pitch) { u32 size; int i; for (i = 0; i < fmt->planes; i++) { pitch[i] = bytesperline; addr[i] = buffer; if (i > 0) pitch[i] /= fmt->hsub / fmt->bpp[i]; size = pitch[i] * height; if (i > 0) size /= fmt->vsub; buffer += size; } } static void rotate_device_run(void *priv) { struct rotate_ctx *ctx = priv; struct rotate_dev *dev = ctx->dev; struct vb2_v4l2_buffer *src, *dst; const struct rotate_format *fmt; dma_addr_t addr[3]; u32 val, pitch[3]; src = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx); dst = v4l2_m2m_next_dst_buf(ctx->fh.m2m_ctx); v4l2_m2m_buf_copy_metadata(src, dst, true); val = ROTATE_GLB_CTL_MODE(ROTATE_MODE_COPY_ROTATE); if (ctx->hflip) val |= ROTATE_GLB_CTL_HFLIP; if (ctx->vflip) val |= ROTATE_GLB_CTL_VFLIP; val |= ROTATE_GLB_CTL_ROTATION(ctx->rotate / 90); if (ctx->rotate != 90 && ctx->rotate != 270) val |= ROTATE_GLB_CTL_BURST_LEN(ROTATE_BURST_64); else val |= ROTATE_GLB_CTL_BURST_LEN(ROTATE_BURST_8); rotate_write(dev, ROTATE_GLB_CTL, val); fmt = rotate_find_format(ctx->src_fmt.pixelformat); if (!fmt) return; rotate_write(dev, ROTATE_IN_FMT, ROTATE_IN_FMT_FORMAT(fmt->hw_format)); rotate_calc_addr_pitch(vb2_dma_contig_plane_dma_addr(&src->vb2_buf, 0), ctx->src_fmt.bytesperline, ctx->src_fmt.height, fmt, addr, pitch); rotate_write(dev, ROTATE_IN_SIZE, ROTATE_SIZE(ctx->src_fmt.width, ctx->src_fmt.height)); rotate_write(dev, ROTATE_IN_PITCH0, pitch[0]); rotate_write(dev, ROTATE_IN_PITCH1, pitch[1]); rotate_write(dev, ROTATE_IN_PITCH2, pitch[2]); rotate_write(dev, ROTATE_IN_ADDRL0, addr[0]); rotate_write(dev, ROTATE_IN_ADDRL1, addr[1]); rotate_write(dev, ROTATE_IN_ADDRL2, addr[2]); rotate_write(dev, ROTATE_IN_ADDRH0, 0); rotate_write(dev, ROTATE_IN_ADDRH1, 0); rotate_write(dev, ROTATE_IN_ADDRH2, 0); fmt = rotate_find_format(ctx->dst_fmt.pixelformat); if (!fmt) return; rotate_calc_addr_pitch(vb2_dma_contig_plane_dma_addr(&dst->vb2_buf, 0), ctx->dst_fmt.bytesperline, ctx->dst_fmt.height, fmt, addr, pitch); rotate_write(dev, ROTATE_OUT_SIZE, ROTATE_SIZE(ctx->dst_fmt.width, ctx->dst_fmt.height)); rotate_write(dev, ROTATE_OUT_PITCH0, pitch[0]); rotate_write(dev, ROTATE_OUT_PITCH1, pitch[1]); rotate_write(dev, ROTATE_OUT_PITCH2, pitch[2]); rotate_write(dev, ROTATE_OUT_ADDRL0, addr[0]); rotate_write(dev, ROTATE_OUT_ADDRL1, addr[1]); rotate_write(dev, ROTATE_OUT_ADDRL2, addr[2]); rotate_write(dev, ROTATE_OUT_ADDRH0, 0); rotate_write(dev, ROTATE_OUT_ADDRH1, 0); rotate_write(dev, ROTATE_OUT_ADDRH2, 0); rotate_set_bits(dev, ROTATE_INT, ROTATE_INT_FINISH_IRQ_EN); rotate_set_bits(dev, ROTATE_GLB_CTL, ROTATE_GLB_CTL_START); } static irqreturn_t rotate_irq(int irq, void *data) { struct vb2_v4l2_buffer *buffer; struct rotate_dev *dev = data; struct rotate_ctx *ctx; unsigned int val; ctx = v4l2_m2m_get_curr_priv(dev->m2m_dev); if (!ctx) { v4l2_err(&dev->v4l2_dev, "Instance released before the end of transaction\n"); return IRQ_NONE; } val = rotate_read(dev, ROTATE_INT); if (!(val & ROTATE_INT_FINISH_IRQ)) return IRQ_NONE; /* clear flag and disable irq */ rotate_write(dev, ROTATE_INT, ROTATE_INT_FINISH_IRQ); buffer = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx); v4l2_m2m_buf_done(buffer, VB2_BUF_STATE_DONE); buffer = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx); v4l2_m2m_buf_done(buffer, VB2_BUF_STATE_DONE); v4l2_m2m_job_finish(ctx->dev->m2m_dev, ctx->fh.m2m_ctx); return IRQ_HANDLED; } static inline struct rotate_ctx *rotate_file2ctx(struct file *file) { return container_of(file->private_data, struct rotate_ctx, fh); } static void rotate_prepare_format(struct v4l2_pix_format *pix_fmt) { unsigned int height, width, alignment, sizeimage, size, bpl; const struct rotate_format *fmt; int i; fmt = rotate_find_format(pix_fmt->pixelformat); if (!fmt) return; width = ALIGN(pix_fmt->width, fmt->hsub); height = ALIGN(pix_fmt->height, fmt->vsub); /* all pitches have to be 16 byte aligned */ alignment = 16; if (fmt->planes > 1) alignment *= fmt->hsub / fmt->bpp[1]; bpl = ALIGN(width * fmt->bpp[0], alignment); sizeimage = 0; for (i = 0; i < fmt->planes; i++) { size = bpl * height; if (i > 0) { size *= fmt->bpp[i]; size /= fmt->hsub; size /= fmt->vsub; } sizeimage += size; } pix_fmt->width = width; pix_fmt->height = height; pix_fmt->bytesperline = bpl; pix_fmt->sizeimage = sizeimage; } static int rotate_querycap(struct file *file, void *priv, struct v4l2_capability *cap) { strscpy(cap->driver, ROTATE_NAME, sizeof(cap->driver)); strscpy(cap->card, ROTATE_NAME, sizeof(cap->card)); snprintf(cap->bus_info, sizeof(cap->bus_info), "platform:%s", ROTATE_NAME); return 0; } static int rotate_enum_fmt_vid_cap(struct file *file, void *priv, struct v4l2_fmtdesc *f) { return rotate_enum_fmt(f, true); } static int rotate_enum_fmt_vid_out(struct file *file, void *priv, struct v4l2_fmtdesc *f) { return rotate_enum_fmt(f, false); } static int rotate_enum_framesizes(struct file *file, void *priv, struct v4l2_frmsizeenum *fsize) { const struct rotate_format *fmt; if (fsize->index != 0) return -EINVAL; fmt = rotate_find_format(fsize->pixel_format); if (!fmt) return -EINVAL; fsize->type = V4L2_FRMSIZE_TYPE_STEPWISE; fsize->stepwise.min_width = ROTATE_MIN_WIDTH; fsize->stepwise.min_height = ROTATE_MIN_HEIGHT; fsize->stepwise.max_width = ROTATE_MAX_WIDTH; fsize->stepwise.max_height = ROTATE_MAX_HEIGHT; fsize->stepwise.step_width = fmt->hsub; fsize->stepwise.step_height = fmt->vsub; return 0; } static int rotate_set_cap_format(struct rotate_ctx *ctx, struct v4l2_pix_format *f, u32 rotate) { const struct rotate_format *fmt; fmt = rotate_find_format(ctx->src_fmt.pixelformat); if (!fmt) return -EINVAL; if (fmt->flags & ROTATE_FLAG_YUV) f->pixelformat = V4L2_PIX_FMT_YUV420; else f->pixelformat = ctx->src_fmt.pixelformat; f->field = V4L2_FIELD_NONE; if (rotate == 90 || rotate == 270) { f->width = ctx->src_fmt.height; f->height = ctx->src_fmt.width; } else { f->width = ctx->src_fmt.width; f->height = ctx->src_fmt.height; } rotate_prepare_format(f); return 0; } static int rotate_g_fmt_vid_cap(struct file *file, void *priv, struct v4l2_format *f) { struct rotate_ctx *ctx = rotate_file2ctx(file); f->fmt.pix = ctx->dst_fmt; return 0; } static int rotate_g_fmt_vid_out(struct file *file, void *priv, struct v4l2_format *f) { struct rotate_ctx *ctx = rotate_file2ctx(file); f->fmt.pix = ctx->src_fmt; return 0; } static int rotate_try_fmt_vid_cap(struct file *file, void *priv, struct v4l2_format *f) { struct rotate_ctx *ctx = rotate_file2ctx(file); return rotate_set_cap_format(ctx, &f->fmt.pix, ctx->rotate); } static int rotate_try_fmt_vid_out(struct file *file, void *priv, struct v4l2_format *f) { if (!rotate_find_format(f->fmt.pix.pixelformat)) f->fmt.pix.pixelformat = V4L2_PIX_FMT_ARGB32; if (f->fmt.pix.width < ROTATE_MIN_WIDTH) f->fmt.pix.width = ROTATE_MIN_WIDTH; if (f->fmt.pix.height < ROTATE_MIN_HEIGHT) f->fmt.pix.height = ROTATE_MIN_HEIGHT; if (f->fmt.pix.width > ROTATE_MAX_WIDTH) f->fmt.pix.width = ROTATE_MAX_WIDTH; if (f->fmt.pix.height > ROTATE_MAX_HEIGHT) f->fmt.pix.height = ROTATE_MAX_HEIGHT; f->fmt.pix.field = V4L2_FIELD_NONE; rotate_prepare_format(&f->fmt.pix); return 0; } static int rotate_s_fmt_vid_cap(struct file *file, void *priv, struct v4l2_format *f) { struct rotate_ctx *ctx = rotate_file2ctx(file); struct vb2_queue *vq; int ret; ret = rotate_try_fmt_vid_cap(file, priv, f); if (ret) return ret; vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, f->type); if (vb2_is_busy(vq)) return -EBUSY; ctx->dst_fmt = f->fmt.pix; return 0; } static int rotate_s_fmt_vid_out(struct file *file, void *priv, struct v4l2_format *f) { struct rotate_ctx *ctx = rotate_file2ctx(file); struct vb2_queue *vq; int ret; ret = rotate_try_fmt_vid_out(file, priv, f); if (ret) return ret; vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, f->type); if (vb2_is_busy(vq)) return -EBUSY; /* * Capture queue has to be also checked, because format and size * depends on output format and size. */ vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE); if (vb2_is_busy(vq)) return -EBUSY; ctx->src_fmt = f->fmt.pix; /* Propagate colorspace information to capture. */ ctx->dst_fmt.colorspace = f->fmt.pix.colorspace; ctx->dst_fmt.xfer_func = f->fmt.pix.xfer_func; ctx->dst_fmt.ycbcr_enc = f->fmt.pix.ycbcr_enc; ctx->dst_fmt.quantization = f->fmt.pix.quantization; return rotate_set_cap_format(ctx, &ctx->dst_fmt, ctx->rotate); } static const struct v4l2_ioctl_ops rotate_ioctl_ops = { .vidioc_querycap = rotate_querycap, .vidioc_enum_framesizes = rotate_enum_framesizes, .vidioc_enum_fmt_vid_cap = rotate_enum_fmt_vid_cap, .vidioc_g_fmt_vid_cap = rotate_g_fmt_vid_cap, .vidioc_try_fmt_vid_cap = rotate_try_fmt_vid_cap, .vidioc_s_fmt_vid_cap = rotate_s_fmt_vid_cap, .vidioc_enum_fmt_vid_out = rotate_enum_fmt_vid_out, .vidioc_g_fmt_vid_out = rotate_g_fmt_vid_out, .vidioc_try_fmt_vid_out = rotate_try_fmt_vid_out, .vidioc_s_fmt_vid_out = rotate_s_fmt_vid_out, .vidioc_reqbufs = v4l2_m2m_ioctl_reqbufs, .vidioc_querybuf = v4l2_m2m_ioctl_querybuf, .vidioc_qbuf = v4l2_m2m_ioctl_qbuf, .vidioc_dqbuf = v4l2_m2m_ioctl_dqbuf, .vidioc_prepare_buf = v4l2_m2m_ioctl_prepare_buf, .vidioc_create_bufs = v4l2_m2m_ioctl_create_bufs, .vidioc_expbuf = v4l2_m2m_ioctl_expbuf, .vidioc_streamon = v4l2_m2m_ioctl_streamon, .vidioc_streamoff = v4l2_m2m_ioctl_streamoff, .vidioc_log_status = v4l2_ctrl_log_status, .vidioc_subscribe_event = v4l2_ctrl_subscribe_event, .vidioc_unsubscribe_event = v4l2_event_unsubscribe, }; static int rotate_queue_setup(struct vb2_queue *vq, unsigned int *nbuffers, unsigned int *nplanes, unsigned int sizes[], struct device *alloc_devs[]) { struct rotate_ctx *ctx = vb2_get_drv_priv(vq); struct v4l2_pix_format *pix_fmt; if (V4L2_TYPE_IS_OUTPUT(vq->type)) pix_fmt = &ctx->src_fmt; else pix_fmt = &ctx->dst_fmt; if (*nplanes) { if (sizes[0] < pix_fmt->sizeimage) return -EINVAL; } else { sizes[0] = pix_fmt->sizeimage; *nplanes = 1; } return 0; } static int rotate_buf_prepare(struct vb2_buffer *vb) { struct vb2_queue *vq = vb->vb2_queue; struct rotate_ctx *ctx = vb2_get_drv_priv(vq); struct v4l2_pix_format *pix_fmt; if (V4L2_TYPE_IS_OUTPUT(vq->type)) pix_fmt = &ctx->src_fmt; else pix_fmt = &ctx->dst_fmt; if (vb2_plane_size(vb, 0) < pix_fmt->sizeimage) return -EINVAL; vb2_set_plane_payload(vb, 0, pix_fmt->sizeimage); return 0; } static void rotate_buf_queue(struct vb2_buffer *vb) { struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb); struct rotate_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue); v4l2_m2m_buf_queue(ctx->fh.m2m_ctx, vbuf); } static void rotate_queue_cleanup(struct vb2_queue *vq, u32 state) { struct rotate_ctx *ctx = vb2_get_drv_priv(vq); struct vb2_v4l2_buffer *vbuf; do { if (V4L2_TYPE_IS_OUTPUT(vq->type)) vbuf = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx); else vbuf = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx); if (vbuf) v4l2_m2m_buf_done(vbuf, state); } while (vbuf); } static int rotate_start_streaming(struct vb2_queue *vq, unsigned int count) { if (V4L2_TYPE_IS_OUTPUT(vq->type)) { struct rotate_ctx *ctx = vb2_get_drv_priv(vq); struct device *dev = ctx->dev->dev; int ret; ret = pm_runtime_resume_and_get(dev); if (ret < 0) { dev_err(dev, "Failed to enable module\n"); return ret; } } return 0; } static void rotate_stop_streaming(struct vb2_queue *vq) { if (V4L2_TYPE_IS_OUTPUT(vq->type)) { struct rotate_ctx *ctx = vb2_get_drv_priv(vq); pm_runtime_put(ctx->dev->dev); } rotate_queue_cleanup(vq, VB2_BUF_STATE_ERROR); } static const struct vb2_ops rotate_qops = { .queue_setup = rotate_queue_setup, .buf_prepare = rotate_buf_prepare, .buf_queue = rotate_buf_queue, .start_streaming = rotate_start_streaming, .stop_streaming = rotate_stop_streaming, .wait_prepare = vb2_ops_wait_prepare, .wait_finish = vb2_ops_wait_finish, }; static int rotate_queue_init(void *priv, struct vb2_queue *src_vq, struct vb2_queue *dst_vq) { struct rotate_ctx *ctx = priv; int ret; src_vq->type = V4L2_BUF_TYPE_VIDEO_OUTPUT; src_vq->io_modes = VB2_MMAP | VB2_DMABUF; src_vq->drv_priv = ctx; src_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer); src_vq->min_buffers_needed = 1; src_vq->ops = &rotate_qops; src_vq->mem_ops = &vb2_dma_contig_memops; src_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY; src_vq->lock = &ctx->dev->dev_mutex; src_vq->dev = ctx->dev->dev; ret = vb2_queue_init(src_vq); if (ret) return ret; dst_vq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE; dst_vq->io_modes = VB2_MMAP | VB2_DMABUF; dst_vq->drv_priv = ctx; dst_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer); dst_vq->min_buffers_needed = 2; dst_vq->ops = &rotate_qops; dst_vq->mem_ops = &vb2_dma_contig_memops; dst_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY; dst_vq->lock = &ctx->dev->dev_mutex; dst_vq->dev = ctx->dev->dev; ret = vb2_queue_init(dst_vq); if (ret) return ret; return 0; } static int rotate_s_ctrl(struct v4l2_ctrl *ctrl) { struct rotate_ctx *ctx = container_of(ctrl->handler, struct rotate_ctx, ctrl_handler); struct v4l2_pix_format fmt; switch (ctrl->id) { case V4L2_CID_HFLIP: ctx->hflip = ctrl->val; break; case V4L2_CID_VFLIP: ctx->vflip = ctrl->val; break; case V4L2_CID_ROTATE: rotate_set_cap_format(ctx, &fmt, ctrl->val); /* Check if capture format needs to be changed */ if (fmt.width != ctx->dst_fmt.width || fmt.height != ctx->dst_fmt.height || fmt.bytesperline != ctx->dst_fmt.bytesperline || fmt.sizeimage != ctx->dst_fmt.sizeimage) { struct vb2_queue *vq; vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE); if (vb2_is_busy(vq)) return -EBUSY; rotate_set_cap_format(ctx, &ctx->dst_fmt, ctrl->val); } ctx->rotate = ctrl->val; break; default: return -EINVAL; } return 0; } static const struct v4l2_ctrl_ops rotate_ctrl_ops = { .s_ctrl = rotate_s_ctrl, }; static int rotate_setup_ctrls(struct rotate_ctx *ctx) { v4l2_ctrl_handler_init(&ctx->ctrl_handler, 3); v4l2_ctrl_new_std(&ctx->ctrl_handler, &rotate_ctrl_ops, V4L2_CID_HFLIP, 0, 1, 1, 0); v4l2_ctrl_new_std(&ctx->ctrl_handler, &rotate_ctrl_ops, V4L2_CID_VFLIP, 0, 1, 1, 0); v4l2_ctrl_new_std(&ctx->ctrl_handler, &rotate_ctrl_ops, V4L2_CID_ROTATE, 0, 270, 90, 0); if (ctx->ctrl_handler.error) { int err = ctx->ctrl_handler.error; v4l2_err(&ctx->dev->v4l2_dev, "control setup failed!\n"); v4l2_ctrl_handler_free(&ctx->ctrl_handler); return err; } return v4l2_ctrl_handler_setup(&ctx->ctrl_handler); } static int rotate_open(struct file *file) { struct rotate_dev *dev = video_drvdata(file); struct rotate_ctx *ctx = NULL; int ret; if (mutex_lock_interruptible(&dev->dev_mutex)) return -ERESTARTSYS; ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); if (!ctx) { mutex_unlock(&dev->dev_mutex); return -ENOMEM; } /* default output format */ ctx->src_fmt.pixelformat = V4L2_PIX_FMT_ARGB32; ctx->src_fmt.field = V4L2_FIELD_NONE; ctx->src_fmt.width = 640; ctx->src_fmt.height = 480; rotate_prepare_format(&ctx->src_fmt); /* default capture format */ rotate_set_cap_format(ctx, &ctx->dst_fmt, ctx->rotate); v4l2_fh_init(&ctx->fh, video_devdata(file)); file->private_data = &ctx->fh; ctx->dev = dev; ctx->fh.m2m_ctx = v4l2_m2m_ctx_init(dev->m2m_dev, ctx, &rotate_queue_init); if (IS_ERR(ctx->fh.m2m_ctx)) { ret = PTR_ERR(ctx->fh.m2m_ctx); goto err_free; } v4l2_fh_add(&ctx->fh); ret = rotate_setup_ctrls(ctx); if (ret) goto err_free; ctx->fh.ctrl_handler = &ctx->ctrl_handler; mutex_unlock(&dev->dev_mutex); return 0; err_free: kfree(ctx); mutex_unlock(&dev->dev_mutex); return ret; } static int rotate_release(struct file *file) { struct rotate_dev *dev = video_drvdata(file); struct rotate_ctx *ctx = container_of(file->private_data, struct rotate_ctx, fh); mutex_lock(&dev->dev_mutex); v4l2_ctrl_handler_free(&ctx->ctrl_handler); v4l2_fh_del(&ctx->fh); v4l2_fh_exit(&ctx->fh); v4l2_m2m_ctx_release(ctx->fh.m2m_ctx); kfree(ctx); mutex_unlock(&dev->dev_mutex); return 0; } static const struct v4l2_file_operations rotate_fops = { .owner = THIS_MODULE, .open = rotate_open, .release = rotate_release, .poll = v4l2_m2m_fop_poll, .unlocked_ioctl = video_ioctl2, .mmap = v4l2_m2m_fop_mmap, }; static const struct video_device rotate_video_device = { .name = ROTATE_NAME, .vfl_dir = VFL_DIR_M2M, .fops = &rotate_fops, .ioctl_ops = &rotate_ioctl_ops, .minor = -1, .release = video_device_release_empty, .device_caps = V4L2_CAP_VIDEO_M2M | V4L2_CAP_STREAMING, }; static const struct v4l2_m2m_ops rotate_m2m_ops = { .device_run = rotate_device_run, }; static int rotate_probe(struct platform_device *pdev) { struct rotate_dev *dev; struct video_device *vfd; int irq, ret; dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL); if (!dev) return -ENOMEM; dev->vfd = rotate_video_device; dev->dev = &pdev->dev; irq = platform_get_irq(pdev, 0); if (irq <= 0) return irq; ret = devm_request_irq(dev->dev, irq, rotate_irq, 0, dev_name(dev->dev), dev); if (ret) { dev_err(dev->dev, "Failed to request IRQ\n"); return ret; } dev->base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(dev->base)) return PTR_ERR(dev->base); dev->bus_clk = devm_clk_get(dev->dev, "bus"); if (IS_ERR(dev->bus_clk)) { dev_err(dev->dev, "Failed to get bus clock\n"); return PTR_ERR(dev->bus_clk); } dev->mod_clk = devm_clk_get(dev->dev, "mod"); if (IS_ERR(dev->mod_clk)) { dev_err(dev->dev, "Failed to get mod clock\n"); return PTR_ERR(dev->mod_clk); } dev->rstc = devm_reset_control_get(dev->dev, NULL); if (IS_ERR(dev->rstc)) { dev_err(dev->dev, "Failed to get reset control\n"); return PTR_ERR(dev->rstc); } mutex_init(&dev->dev_mutex); ret = v4l2_device_register(&pdev->dev, &dev->v4l2_dev); if (ret) { dev_err(dev->dev, "Failed to register V4L2 device\n"); return ret; } vfd = &dev->vfd; vfd->lock = &dev->dev_mutex; vfd->v4l2_dev = &dev->v4l2_dev; snprintf(vfd->name, sizeof(vfd->name), "%s", rotate_video_device.name); video_set_drvdata(vfd, dev); ret = video_register_device(vfd, VFL_TYPE_VIDEO, 0); if (ret) { v4l2_err(&dev->v4l2_dev, "Failed to register video device\n"); goto err_v4l2; } v4l2_info(&dev->v4l2_dev, "Device registered as /dev/video%d\n", vfd->num); dev->m2m_dev = v4l2_m2m_init(&rotate_m2m_ops); if (IS_ERR(dev->m2m_dev)) { v4l2_err(&dev->v4l2_dev, "Failed to initialize V4L2 M2M device\n"); ret = PTR_ERR(dev->m2m_dev); goto err_video; } platform_set_drvdata(pdev, dev); pm_runtime_enable(dev->dev); return 0; err_video: video_unregister_device(&dev->vfd); err_v4l2: v4l2_device_unregister(&dev->v4l2_dev); return ret; } static void rotate_remove(struct platform_device *pdev) { struct rotate_dev *dev = platform_get_drvdata(pdev); v4l2_m2m_release(dev->m2m_dev); video_unregister_device(&dev->vfd); v4l2_device_unregister(&dev->v4l2_dev); pm_runtime_force_suspend(&pdev->dev); } static int rotate_runtime_resume(struct device *device) { struct rotate_dev *dev = dev_get_drvdata(device); int ret; ret = clk_prepare_enable(dev->bus_clk); if (ret) { dev_err(dev->dev, "Failed to enable bus clock\n"); return ret; } ret = clk_prepare_enable(dev->mod_clk); if (ret) { dev_err(dev->dev, "Failed to enable mod clock\n"); goto err_bus_clk; } ret = reset_control_deassert(dev->rstc); if (ret) { dev_err(dev->dev, "Failed to apply reset\n"); goto err_mod_clk; } return 0; err_mod_clk: clk_disable_unprepare(dev->mod_clk); err_bus_clk: clk_disable_unprepare(dev->bus_clk); return ret; } static int rotate_runtime_suspend(struct device *device) { struct rotate_dev *dev = dev_get_drvdata(device); reset_control_assert(dev->rstc); clk_disable_unprepare(dev->mod_clk); clk_disable_unprepare(dev->bus_clk); return 0; } static const struct of_device_id rotate_dt_match[] = { { .compatible = "allwinner,sun8i-a83t-de2-rotate" }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(of, rotate_dt_match); static const struct dev_pm_ops rotate_pm_ops = { .runtime_resume = rotate_runtime_resume, .runtime_suspend = rotate_runtime_suspend, }; static struct platform_driver rotate_driver = { .probe = rotate_probe, .remove_new = rotate_remove, .driver = { .name = ROTATE_NAME, .of_match_table = rotate_dt_match, .pm = &rotate_pm_ops, }, }; module_platform_driver(rotate_driver); MODULE_LICENSE("GPL v2"); MODULE_AUTHOR("Jernej Skrabec <[email protected]>"); MODULE_DESCRIPTION("Allwinner DE2 rotate driver");
linux-master
drivers/media/platform/sunxi/sun8i-rotate/sun8i_rotate.c
// SPDX-License-Identifier: GPL-2.0 /* Copyright (C) 2020 Jernej Skrabec <[email protected]> */ #include "sun8i-formats.h" #include "sun8i-rotate.h" /* * Formats not included in array: * ROTATE_FORMAT_BGR565 * ROTATE_FORMAT_VYUV */ static const struct rotate_format rotate_formats[] = { { .fourcc = V4L2_PIX_FMT_ARGB32, .hw_format = ROTATE_FORMAT_ARGB32, .planes = 1, .bpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1, .flags = ROTATE_FLAG_OUTPUT }, { .fourcc = V4L2_PIX_FMT_ABGR32, .hw_format = ROTATE_FORMAT_ABGR32, .planes = 1, .bpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1, .flags = ROTATE_FLAG_OUTPUT }, { .fourcc = V4L2_PIX_FMT_RGBA32, .hw_format = ROTATE_FORMAT_RGBA32, .planes = 1, .bpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1, .flags = ROTATE_FLAG_OUTPUT }, { .fourcc = V4L2_PIX_FMT_BGRA32, .hw_format = ROTATE_FORMAT_BGRA32, .planes = 1, .bpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1, .flags = ROTATE_FLAG_OUTPUT }, { .fourcc = V4L2_PIX_FMT_XRGB32, .hw_format = ROTATE_FORMAT_XRGB32, .planes = 1, .bpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1, .flags = ROTATE_FLAG_OUTPUT }, { .fourcc = V4L2_PIX_FMT_XBGR32, .hw_format = ROTATE_FORMAT_XBGR32, .planes = 1, .bpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1, .flags = ROTATE_FLAG_OUTPUT }, { .fourcc = V4L2_PIX_FMT_RGB32, .hw_format = ROTATE_FORMAT_RGBX32, .planes = 1, .bpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1, .flags = ROTATE_FLAG_OUTPUT }, { .fourcc = V4L2_PIX_FMT_BGR32, .hw_format = ROTATE_FORMAT_BGRX32, .planes = 1, .bpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1, .flags = ROTATE_FLAG_OUTPUT }, { .fourcc = V4L2_PIX_FMT_RGB24, .hw_format = ROTATE_FORMAT_RGB24, .planes = 1, .bpp = { 3, 0, 0 }, .hsub = 1, .vsub = 1, .flags = ROTATE_FLAG_OUTPUT }, { .fourcc = V4L2_PIX_FMT_BGR24, .hw_format = ROTATE_FORMAT_BGR24, .planes = 1, .bpp = { 3, 0, 0 }, .hsub = 1, .vsub = 1, .flags = ROTATE_FLAG_OUTPUT }, { .fourcc = V4L2_PIX_FMT_RGB565, .hw_format = ROTATE_FORMAT_RGB565, .planes = 1, .bpp = { 2, 0, 0 }, .hsub = 1, .vsub = 1, .flags = ROTATE_FLAG_OUTPUT }, { .fourcc = V4L2_PIX_FMT_ARGB444, .hw_format = ROTATE_FORMAT_ARGB4444, .planes = 1, .bpp = { 2, 0, 0 }, .hsub = 1, .vsub = 1, .flags = ROTATE_FLAG_OUTPUT }, { .fourcc = V4L2_PIX_FMT_ABGR444, .hw_format = ROTATE_FORMAT_ABGR4444, .planes = 1, .bpp = { 2, 0, 0 }, .hsub = 1, .vsub = 1, .flags = ROTATE_FLAG_OUTPUT }, { .fourcc = V4L2_PIX_FMT_RGBA444, .hw_format = ROTATE_FORMAT_RGBA4444, .planes = 1, .bpp = { 2, 0, 0 }, .hsub = 1, .vsub = 1, .flags = ROTATE_FLAG_OUTPUT }, { .fourcc = V4L2_PIX_FMT_BGRA444, .hw_format = ROTATE_FORMAT_BGRA4444, .planes = 1, .bpp = { 2, 0, 0 }, .hsub = 1, .vsub = 1, .flags = ROTATE_FLAG_OUTPUT }, { .fourcc = V4L2_PIX_FMT_ARGB555, .hw_format = ROTATE_FORMAT_ARGB1555, .planes = 1, .bpp = { 2, 0, 0 }, .hsub = 1, .vsub = 1, .flags = ROTATE_FLAG_OUTPUT }, { .fourcc = V4L2_PIX_FMT_ABGR555, .hw_format = ROTATE_FORMAT_ABGR1555, .planes = 1, .bpp = { 2, 0, 0 }, .hsub = 1, .vsub = 1, .flags = ROTATE_FLAG_OUTPUT }, { .fourcc = V4L2_PIX_FMT_RGBA555, .hw_format = ROTATE_FORMAT_RGBA5551, .planes = 1, .bpp = { 2, 0, 0 }, .hsub = 1, .vsub = 1, .flags = ROTATE_FLAG_OUTPUT }, { .fourcc = V4L2_PIX_FMT_BGRA555, .hw_format = ROTATE_FORMAT_BGRA5551, .planes = 1, .bpp = { 2, 0, 0 }, .hsub = 1, .vsub = 1, .flags = ROTATE_FLAG_OUTPUT }, { .fourcc = V4L2_PIX_FMT_YVYU, .hw_format = ROTATE_FORMAT_YVYU, .planes = 1, .bpp = { 2, 0, 0 }, .hsub = 2, .vsub = 1, .flags = ROTATE_FLAG_YUV }, { .fourcc = V4L2_PIX_FMT_UYVY, .hw_format = ROTATE_FORMAT_UYVY, .planes = 1, .bpp = { 2, 0, 0 }, .hsub = 2, .vsub = 1, .flags = ROTATE_FLAG_YUV }, { .fourcc = V4L2_PIX_FMT_YUYV, .hw_format = ROTATE_FORMAT_YUYV, .planes = 1, .bpp = { 2, 0, 0 }, .hsub = 2, .vsub = 1, .flags = ROTATE_FLAG_YUV }, { .fourcc = V4L2_PIX_FMT_NV61, .hw_format = ROTATE_FORMAT_NV61, .planes = 2, .bpp = { 1, 2, 0 }, .hsub = 2, .vsub = 1, .flags = ROTATE_FLAG_YUV }, { .fourcc = V4L2_PIX_FMT_NV16, .hw_format = ROTATE_FORMAT_NV16, .planes = 2, .bpp = { 1, 2, 0 }, .hsub = 2, .vsub = 1, .flags = ROTATE_FLAG_YUV }, { .fourcc = V4L2_PIX_FMT_YUV422P, .hw_format = ROTATE_FORMAT_YUV422P, .planes = 3, .bpp = { 1, 1, 1 }, .hsub = 2, .vsub = 1, .flags = ROTATE_FLAG_YUV }, { .fourcc = V4L2_PIX_FMT_NV21, .hw_format = ROTATE_FORMAT_NV21, .planes = 2, .bpp = { 1, 2, 0 }, .hsub = 2, .vsub = 2, .flags = ROTATE_FLAG_YUV }, { .fourcc = V4L2_PIX_FMT_NV12, .hw_format = ROTATE_FORMAT_NV12, .planes = 2, .bpp = { 1, 2, 0 }, .hsub = 2, .vsub = 2, .flags = ROTATE_FLAG_YUV }, { .fourcc = V4L2_PIX_FMT_YUV420, .hw_format = ROTATE_FORMAT_YUV420P, .planes = 3, .bpp = { 1, 1, 1 }, .hsub = 2, .vsub = 2, .flags = ROTATE_FLAG_YUV | ROTATE_FLAG_OUTPUT }, }; const struct rotate_format *rotate_find_format(u32 pixelformat) { unsigned int i; for (i = 0; i < ARRAY_SIZE(rotate_formats); i++) if (rotate_formats[i].fourcc == pixelformat) return &rotate_formats[i]; return NULL; } int rotate_enum_fmt(struct v4l2_fmtdesc *f, bool dst) { int i, index; index = 0; for (i = 0; i < ARRAY_SIZE(rotate_formats); i++) { /* not all formats can be used for capture buffers */ if (dst && !(rotate_formats[i].flags & ROTATE_FLAG_OUTPUT)) continue; if (index == f->index) { f->pixelformat = rotate_formats[i].fourcc; return 0; } index++; } return -EINVAL; }
linux-master
drivers/media/platform/sunxi/sun8i-rotate/sun8i_formats.c
// SPDX-License-Identifier: GPL-2.0+ /* * Copyright 2020-2022 Bootlin * Author: Paul Kocialkowski <[email protected]> */ #include <linux/clk.h> #include <linux/module.h> #include <linux/of.h> #include <linux/phy/phy.h> #include <linux/platform_device.h> #include <linux/pm_runtime.h> #include <linux/regmap.h> #include <linux/reset.h> #include <media/mipi-csi2.h> #include <media/v4l2-ctrls.h> #include <media/v4l2-device.h> #include <media/v4l2-fwnode.h> #include "sun6i_mipi_csi2.h" #include "sun6i_mipi_csi2_reg.h" /* Format */ static const struct sun6i_mipi_csi2_format sun6i_mipi_csi2_formats[] = { { .mbus_code = MEDIA_BUS_FMT_SBGGR8_1X8, .data_type = MIPI_CSI2_DT_RAW8, .bpp = 8, }, { .mbus_code = MEDIA_BUS_FMT_SGBRG8_1X8, .data_type = MIPI_CSI2_DT_RAW8, .bpp = 8, }, { .mbus_code = MEDIA_BUS_FMT_SGRBG8_1X8, .data_type = MIPI_CSI2_DT_RAW8, .bpp = 8, }, { .mbus_code = MEDIA_BUS_FMT_SRGGB8_1X8, .data_type = MIPI_CSI2_DT_RAW8, .bpp = 8, }, { .mbus_code = MEDIA_BUS_FMT_SBGGR10_1X10, .data_type = MIPI_CSI2_DT_RAW10, .bpp = 10, }, { .mbus_code = MEDIA_BUS_FMT_SGBRG10_1X10, .data_type = MIPI_CSI2_DT_RAW10, .bpp = 10, }, { .mbus_code = MEDIA_BUS_FMT_SGRBG10_1X10, .data_type = MIPI_CSI2_DT_RAW10, .bpp = 10, }, { .mbus_code = MEDIA_BUS_FMT_SRGGB10_1X10, .data_type = MIPI_CSI2_DT_RAW10, .bpp = 10, }, }; static const struct sun6i_mipi_csi2_format * sun6i_mipi_csi2_format_find(u32 mbus_code) { unsigned int i; for (i = 0; i < ARRAY_SIZE(sun6i_mipi_csi2_formats); i++) if (sun6i_mipi_csi2_formats[i].mbus_code == mbus_code) return &sun6i_mipi_csi2_formats[i]; return NULL; } /* Controller */ static void sun6i_mipi_csi2_enable(struct sun6i_mipi_csi2_device *csi2_dev) { struct regmap *regmap = csi2_dev->regmap; regmap_update_bits(regmap, SUN6I_MIPI_CSI2_CTL_REG, SUN6I_MIPI_CSI2_CTL_EN, SUN6I_MIPI_CSI2_CTL_EN); } static void sun6i_mipi_csi2_disable(struct sun6i_mipi_csi2_device *csi2_dev) { struct regmap *regmap = csi2_dev->regmap; regmap_update_bits(regmap, SUN6I_MIPI_CSI2_CTL_REG, SUN6I_MIPI_CSI2_CTL_EN, 0); } static void sun6i_mipi_csi2_configure(struct sun6i_mipi_csi2_device *csi2_dev) { struct regmap *regmap = csi2_dev->regmap; unsigned int lanes_count = csi2_dev->bridge.endpoint.bus.mipi_csi2.num_data_lanes; struct v4l2_mbus_framefmt *mbus_format = &csi2_dev->bridge.mbus_format; const struct sun6i_mipi_csi2_format *format; struct device *dev = csi2_dev->dev; u32 version = 0; format = sun6i_mipi_csi2_format_find(mbus_format->code); if (WARN_ON(!format)) return; /* * The enable flow in the Allwinner BSP is a bit different: the enable * and reset bits are set together before starting the CSI controller. * * In mainline we enable the CSI controller first (due to subdev logic). * One reliable way to make this work is to deassert reset, configure * registers and enable the controller when everything's ready. * * However, setting the version enable bit and removing it afterwards * appears necessary for capture to work reliably, while replacing it * with a delay doesn't do the trick. */ regmap_write(regmap, SUN6I_MIPI_CSI2_CTL_REG, SUN6I_MIPI_CSI2_CTL_RESET_N | SUN6I_MIPI_CSI2_CTL_VERSION_EN | SUN6I_MIPI_CSI2_CTL_UNPK_EN); regmap_read(regmap, SUN6I_MIPI_CSI2_VERSION_REG, &version); regmap_update_bits(regmap, SUN6I_MIPI_CSI2_CTL_REG, SUN6I_MIPI_CSI2_CTL_VERSION_EN, 0); dev_dbg(dev, "A31 MIPI CSI-2 version: %04x\n", version); regmap_write(regmap, SUN6I_MIPI_CSI2_CFG_REG, SUN6I_MIPI_CSI2_CFG_CHANNEL_MODE(1) | SUN6I_MIPI_CSI2_CFG_LANE_COUNT(lanes_count)); /* * Only a single virtual channel (index 0) is currently supported. * While the registers do mention multiple physical channels being * available (which can be configured to match a specific virtual * channel or data type), it's unclear whether channels > 0 are actually * connected and available and the reference source code only makes use * of channel 0. * * Using extra channels would also require matching channels to be * available on the CSI (and ISP) side, which is also unsure although * some CSI implementations are said to support multiple channels for * BT656 time-sharing. * * We still configure virtual channel numbers to ensure that virtual * channel 0 only goes to channel 0. */ regmap_write(regmap, SUN6I_MIPI_CSI2_VCDT_RX_REG, SUN6I_MIPI_CSI2_VCDT_RX_CH_VC(3, 3) | SUN6I_MIPI_CSI2_VCDT_RX_CH_VC(2, 2) | SUN6I_MIPI_CSI2_VCDT_RX_CH_VC(1, 1) | SUN6I_MIPI_CSI2_VCDT_RX_CH_VC(0, 0) | SUN6I_MIPI_CSI2_VCDT_RX_CH_DT(0, format->data_type)); regmap_write(regmap, SUN6I_MIPI_CSI2_CH_INT_PD_REG, SUN6I_MIPI_CSI2_CH_INT_PD_CLEAR); } /* V4L2 Subdev */ static int sun6i_mipi_csi2_s_stream(struct v4l2_subdev *subdev, int on) { struct sun6i_mipi_csi2_device *csi2_dev = v4l2_get_subdevdata(subdev); struct v4l2_subdev *source_subdev = csi2_dev->bridge.source_subdev; union phy_configure_opts dphy_opts = { 0 }; struct phy_configure_opts_mipi_dphy *dphy_cfg = &dphy_opts.mipi_dphy; struct v4l2_mbus_framefmt *mbus_format = &csi2_dev->bridge.mbus_format; const struct sun6i_mipi_csi2_format *format; struct phy *dphy = csi2_dev->dphy; struct device *dev = csi2_dev->dev; struct v4l2_ctrl *ctrl; unsigned int lanes_count = csi2_dev->bridge.endpoint.bus.mipi_csi2.num_data_lanes; unsigned long pixel_rate; int ret; if (!source_subdev) return -ENODEV; if (!on) { v4l2_subdev_call(source_subdev, video, s_stream, 0); ret = 0; goto disable; } /* Runtime PM */ ret = pm_runtime_resume_and_get(dev); if (ret < 0) return ret; /* Sensor Pixel Rate */ ctrl = v4l2_ctrl_find(source_subdev->ctrl_handler, V4L2_CID_PIXEL_RATE); if (!ctrl) { dev_err(dev, "missing sensor pixel rate\n"); ret = -ENODEV; goto error_pm; } pixel_rate = (unsigned long)v4l2_ctrl_g_ctrl_int64(ctrl); if (!pixel_rate) { dev_err(dev, "missing (zero) sensor pixel rate\n"); ret = -ENODEV; goto error_pm; } /* D-PHY */ if (!lanes_count) { dev_err(dev, "missing (zero) MIPI CSI-2 lanes count\n"); ret = -ENODEV; goto error_pm; } format = sun6i_mipi_csi2_format_find(mbus_format->code); if (WARN_ON(!format)) { ret = -ENODEV; goto error_pm; } phy_mipi_dphy_get_default_config(pixel_rate, format->bpp, lanes_count, dphy_cfg); /* * Note that our hardware is using DDR, which is not taken in account by * phy_mipi_dphy_get_default_config when calculating hs_clk_rate from * the pixel rate, lanes count and bpp. * * The resulting clock rate is basically the symbol rate over the whole * link. The actual clock rate is calculated with division by two since * DDR samples both on rising and falling edges. */ dev_dbg(dev, "A31 MIPI CSI-2 config:\n"); dev_dbg(dev, "%ld pixels/s, %u bits/pixel, %u lanes, %lu Hz clock\n", pixel_rate, format->bpp, lanes_count, dphy_cfg->hs_clk_rate / 2); ret = phy_reset(dphy); if (ret) { dev_err(dev, "failed to reset MIPI D-PHY\n"); goto error_pm; } ret = phy_configure(dphy, &dphy_opts); if (ret) { dev_err(dev, "failed to configure MIPI D-PHY\n"); goto error_pm; } /* Controller */ sun6i_mipi_csi2_configure(csi2_dev); sun6i_mipi_csi2_enable(csi2_dev); /* D-PHY */ ret = phy_power_on(dphy); if (ret) { dev_err(dev, "failed to power on MIPI D-PHY\n"); goto error_pm; } /* Source */ ret = v4l2_subdev_call(source_subdev, video, s_stream, 1); if (ret && ret != -ENOIOCTLCMD) goto disable; return 0; disable: phy_power_off(dphy); sun6i_mipi_csi2_disable(csi2_dev); error_pm: pm_runtime_put(dev); return ret; } static const struct v4l2_subdev_video_ops sun6i_mipi_csi2_video_ops = { .s_stream = sun6i_mipi_csi2_s_stream, }; static void sun6i_mipi_csi2_mbus_format_prepare(struct v4l2_mbus_framefmt *mbus_format) { if (!sun6i_mipi_csi2_format_find(mbus_format->code)) mbus_format->code = sun6i_mipi_csi2_formats[0].mbus_code; mbus_format->field = V4L2_FIELD_NONE; mbus_format->colorspace = V4L2_COLORSPACE_RAW; mbus_format->quantization = V4L2_QUANTIZATION_DEFAULT; mbus_format->xfer_func = V4L2_XFER_FUNC_DEFAULT; } static int sun6i_mipi_csi2_init_cfg(struct v4l2_subdev *subdev, struct v4l2_subdev_state *state) { struct sun6i_mipi_csi2_device *csi2_dev = v4l2_get_subdevdata(subdev); unsigned int pad = SUN6I_MIPI_CSI2_PAD_SINK; struct v4l2_mbus_framefmt *mbus_format = v4l2_subdev_get_try_format(subdev, state, pad); struct mutex *lock = &csi2_dev->bridge.lock; mutex_lock(lock); mbus_format->code = sun6i_mipi_csi2_formats[0].mbus_code; mbus_format->width = 640; mbus_format->height = 480; sun6i_mipi_csi2_mbus_format_prepare(mbus_format); mutex_unlock(lock); return 0; } static int sun6i_mipi_csi2_enum_mbus_code(struct v4l2_subdev *subdev, struct v4l2_subdev_state *state, struct v4l2_subdev_mbus_code_enum *code_enum) { if (code_enum->index >= ARRAY_SIZE(sun6i_mipi_csi2_formats)) return -EINVAL; code_enum->code = sun6i_mipi_csi2_formats[code_enum->index].mbus_code; return 0; } static int sun6i_mipi_csi2_get_fmt(struct v4l2_subdev *subdev, struct v4l2_subdev_state *state, struct v4l2_subdev_format *format) { struct sun6i_mipi_csi2_device *csi2_dev = v4l2_get_subdevdata(subdev); struct v4l2_mbus_framefmt *mbus_format = &format->format; struct mutex *lock = &csi2_dev->bridge.lock; mutex_lock(lock); if (format->which == V4L2_SUBDEV_FORMAT_TRY) *mbus_format = *v4l2_subdev_get_try_format(subdev, state, format->pad); else *mbus_format = csi2_dev->bridge.mbus_format; mutex_unlock(lock); return 0; } static int sun6i_mipi_csi2_set_fmt(struct v4l2_subdev *subdev, struct v4l2_subdev_state *state, struct v4l2_subdev_format *format) { struct sun6i_mipi_csi2_device *csi2_dev = v4l2_get_subdevdata(subdev); struct v4l2_mbus_framefmt *mbus_format = &format->format; struct mutex *lock = &csi2_dev->bridge.lock; mutex_lock(lock); sun6i_mipi_csi2_mbus_format_prepare(mbus_format); if (format->which == V4L2_SUBDEV_FORMAT_TRY) *v4l2_subdev_get_try_format(subdev, state, format->pad) = *mbus_format; else csi2_dev->bridge.mbus_format = *mbus_format; mutex_unlock(lock); return 0; } static const struct v4l2_subdev_pad_ops sun6i_mipi_csi2_pad_ops = { .init_cfg = sun6i_mipi_csi2_init_cfg, .enum_mbus_code = sun6i_mipi_csi2_enum_mbus_code, .get_fmt = sun6i_mipi_csi2_get_fmt, .set_fmt = sun6i_mipi_csi2_set_fmt, }; static const struct v4l2_subdev_ops sun6i_mipi_csi2_subdev_ops = { .video = &sun6i_mipi_csi2_video_ops, .pad = &sun6i_mipi_csi2_pad_ops, }; /* Media Entity */ static const struct media_entity_operations sun6i_mipi_csi2_entity_ops = { .link_validate = v4l2_subdev_link_validate, }; /* V4L2 Async */ static int sun6i_mipi_csi2_notifier_bound(struct v4l2_async_notifier *notifier, struct v4l2_subdev *remote_subdev, struct v4l2_async_connection *async_subdev) { struct v4l2_subdev *subdev = notifier->sd; struct sun6i_mipi_csi2_device *csi2_dev = container_of(notifier, struct sun6i_mipi_csi2_device, bridge.notifier); struct media_entity *sink_entity = &subdev->entity; struct media_entity *source_entity = &remote_subdev->entity; struct device *dev = csi2_dev->dev; int sink_pad_index = 0; int source_pad_index; int ret; ret = media_entity_get_fwnode_pad(source_entity, remote_subdev->fwnode, MEDIA_PAD_FL_SOURCE); if (ret < 0) { dev_err(dev, "missing source pad in external entity %s\n", source_entity->name); return -EINVAL; } source_pad_index = ret; dev_dbg(dev, "creating %s:%u -> %s:%u link\n", source_entity->name, source_pad_index, sink_entity->name, sink_pad_index); ret = media_create_pad_link(source_entity, source_pad_index, sink_entity, sink_pad_index, MEDIA_LNK_FL_ENABLED | MEDIA_LNK_FL_IMMUTABLE); if (ret) { dev_err(dev, "failed to create %s:%u -> %s:%u link\n", source_entity->name, source_pad_index, sink_entity->name, sink_pad_index); return ret; } csi2_dev->bridge.source_subdev = remote_subdev; return 0; } static const struct v4l2_async_notifier_operations sun6i_mipi_csi2_notifier_ops = { .bound = sun6i_mipi_csi2_notifier_bound, }; /* Bridge */ static int sun6i_mipi_csi2_bridge_source_setup(struct sun6i_mipi_csi2_device *csi2_dev) { struct v4l2_async_notifier *notifier = &csi2_dev->bridge.notifier; struct v4l2_fwnode_endpoint *endpoint = &csi2_dev->bridge.endpoint; struct v4l2_async_connection *subdev_async; struct fwnode_handle *handle; struct device *dev = csi2_dev->dev; int ret; handle = fwnode_graph_get_endpoint_by_id(dev_fwnode(dev), 0, 0, FWNODE_GRAPH_ENDPOINT_NEXT); if (!handle) return -ENODEV; endpoint->bus_type = V4L2_MBUS_CSI2_DPHY; ret = v4l2_fwnode_endpoint_parse(handle, endpoint); if (ret) goto complete; subdev_async = v4l2_async_nf_add_fwnode_remote(notifier, handle, struct v4l2_async_connection); if (IS_ERR(subdev_async)) ret = PTR_ERR(subdev_async); complete: fwnode_handle_put(handle); return ret; } static int sun6i_mipi_csi2_bridge_setup(struct sun6i_mipi_csi2_device *csi2_dev) { struct sun6i_mipi_csi2_bridge *bridge = &csi2_dev->bridge; struct v4l2_subdev *subdev = &bridge->subdev; struct v4l2_async_notifier *notifier = &bridge->notifier; struct media_pad *pads = bridge->pads; struct device *dev = csi2_dev->dev; bool notifier_registered = false; int ret; mutex_init(&bridge->lock); /* V4L2 Subdev */ v4l2_subdev_init(subdev, &sun6i_mipi_csi2_subdev_ops); strscpy(subdev->name, SUN6I_MIPI_CSI2_NAME, sizeof(subdev->name)); subdev->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE; subdev->owner = THIS_MODULE; subdev->dev = dev; v4l2_set_subdevdata(subdev, csi2_dev); /* Media Entity */ subdev->entity.function = MEDIA_ENT_F_VID_IF_BRIDGE; subdev->entity.ops = &sun6i_mipi_csi2_entity_ops; /* Media Pads */ pads[SUN6I_MIPI_CSI2_PAD_SINK].flags = MEDIA_PAD_FL_SINK | MEDIA_PAD_FL_MUST_CONNECT; pads[SUN6I_MIPI_CSI2_PAD_SOURCE].flags = MEDIA_PAD_FL_SOURCE | MEDIA_PAD_FL_MUST_CONNECT; ret = media_entity_pads_init(&subdev->entity, SUN6I_MIPI_CSI2_PAD_COUNT, pads); if (ret) return ret; /* V4L2 Async */ v4l2_async_subdev_nf_init(notifier, subdev); notifier->ops = &sun6i_mipi_csi2_notifier_ops; ret = sun6i_mipi_csi2_bridge_source_setup(csi2_dev); if (ret && ret != -ENODEV) goto error_v4l2_notifier_cleanup; /* Only register the notifier when a sensor is connected. */ if (ret != -ENODEV) { ret = v4l2_async_nf_register(notifier); if (ret < 0) goto error_v4l2_notifier_cleanup; notifier_registered = true; } /* V4L2 Subdev */ ret = v4l2_async_register_subdev(subdev); if (ret < 0) goto error_v4l2_notifier_unregister; return 0; error_v4l2_notifier_unregister: if (notifier_registered) v4l2_async_nf_unregister(notifier); error_v4l2_notifier_cleanup: v4l2_async_nf_cleanup(notifier); media_entity_cleanup(&subdev->entity); return ret; } static void sun6i_mipi_csi2_bridge_cleanup(struct sun6i_mipi_csi2_device *csi2_dev) { struct v4l2_subdev *subdev = &csi2_dev->bridge.subdev; struct v4l2_async_notifier *notifier = &csi2_dev->bridge.notifier; v4l2_async_unregister_subdev(subdev); v4l2_async_nf_unregister(notifier); v4l2_async_nf_cleanup(notifier); media_entity_cleanup(&subdev->entity); } /* Platform */ static int sun6i_mipi_csi2_suspend(struct device *dev) { struct sun6i_mipi_csi2_device *csi2_dev = dev_get_drvdata(dev); clk_disable_unprepare(csi2_dev->clock_mod); reset_control_assert(csi2_dev->reset); return 0; } static int sun6i_mipi_csi2_resume(struct device *dev) { struct sun6i_mipi_csi2_device *csi2_dev = dev_get_drvdata(dev); int ret; ret = reset_control_deassert(csi2_dev->reset); if (ret) { dev_err(dev, "failed to deassert reset\n"); return ret; } ret = clk_prepare_enable(csi2_dev->clock_mod); if (ret) { dev_err(dev, "failed to enable module clock\n"); goto error_reset; } return 0; error_reset: reset_control_assert(csi2_dev->reset); return ret; } static const struct dev_pm_ops sun6i_mipi_csi2_pm_ops = { .runtime_suspend = sun6i_mipi_csi2_suspend, .runtime_resume = sun6i_mipi_csi2_resume, }; static const struct regmap_config sun6i_mipi_csi2_regmap_config = { .reg_bits = 32, .reg_stride = 4, .val_bits = 32, .max_register = 0x400, }; static int sun6i_mipi_csi2_resources_setup(struct sun6i_mipi_csi2_device *csi2_dev, struct platform_device *platform_dev) { struct device *dev = csi2_dev->dev; void __iomem *io_base; int ret; /* Registers */ io_base = devm_platform_ioremap_resource(platform_dev, 0); if (IS_ERR(io_base)) return PTR_ERR(io_base); csi2_dev->regmap = devm_regmap_init_mmio_clk(dev, "bus", io_base, &sun6i_mipi_csi2_regmap_config); if (IS_ERR(csi2_dev->regmap)) { dev_err(dev, "failed to init register map\n"); return PTR_ERR(csi2_dev->regmap); } /* Clock */ csi2_dev->clock_mod = devm_clk_get(dev, "mod"); if (IS_ERR(csi2_dev->clock_mod)) { dev_err(dev, "failed to acquire mod clock\n"); return PTR_ERR(csi2_dev->clock_mod); } ret = clk_set_rate_exclusive(csi2_dev->clock_mod, 297000000); if (ret) { dev_err(dev, "failed to set mod clock rate\n"); return ret; } /* Reset */ csi2_dev->reset = devm_reset_control_get_shared(dev, NULL); if (IS_ERR(csi2_dev->reset)) { dev_err(dev, "failed to get reset controller\n"); ret = PTR_ERR(csi2_dev->reset); goto error_clock_rate_exclusive; } /* D-PHY */ csi2_dev->dphy = devm_phy_get(dev, "dphy"); if (IS_ERR(csi2_dev->dphy)) { dev_err(dev, "failed to get MIPI D-PHY\n"); ret = PTR_ERR(csi2_dev->dphy); goto error_clock_rate_exclusive; } ret = phy_init(csi2_dev->dphy); if (ret) { dev_err(dev, "failed to initialize MIPI D-PHY\n"); goto error_clock_rate_exclusive; } /* Runtime PM */ pm_runtime_enable(dev); return 0; error_clock_rate_exclusive: clk_rate_exclusive_put(csi2_dev->clock_mod); return ret; } static void sun6i_mipi_csi2_resources_cleanup(struct sun6i_mipi_csi2_device *csi2_dev) { pm_runtime_disable(csi2_dev->dev); phy_exit(csi2_dev->dphy); clk_rate_exclusive_put(csi2_dev->clock_mod); } static int sun6i_mipi_csi2_probe(struct platform_device *platform_dev) { struct sun6i_mipi_csi2_device *csi2_dev; struct device *dev = &platform_dev->dev; int ret; csi2_dev = devm_kzalloc(dev, sizeof(*csi2_dev), GFP_KERNEL); if (!csi2_dev) return -ENOMEM; csi2_dev->dev = dev; platform_set_drvdata(platform_dev, csi2_dev); ret = sun6i_mipi_csi2_resources_setup(csi2_dev, platform_dev); if (ret) return ret; ret = sun6i_mipi_csi2_bridge_setup(csi2_dev); if (ret) goto error_resources; return 0; error_resources: sun6i_mipi_csi2_resources_cleanup(csi2_dev); return ret; } static void sun6i_mipi_csi2_remove(struct platform_device *platform_dev) { struct sun6i_mipi_csi2_device *csi2_dev = platform_get_drvdata(platform_dev); sun6i_mipi_csi2_bridge_cleanup(csi2_dev); sun6i_mipi_csi2_resources_cleanup(csi2_dev); } static const struct of_device_id sun6i_mipi_csi2_of_match[] = { { .compatible = "allwinner,sun6i-a31-mipi-csi2" }, {}, }; MODULE_DEVICE_TABLE(of, sun6i_mipi_csi2_of_match); static struct platform_driver sun6i_mipi_csi2_platform_driver = { .probe = sun6i_mipi_csi2_probe, .remove_new = sun6i_mipi_csi2_remove, .driver = { .name = SUN6I_MIPI_CSI2_NAME, .of_match_table = sun6i_mipi_csi2_of_match, .pm = &sun6i_mipi_csi2_pm_ops, }, }; module_platform_driver(sun6i_mipi_csi2_platform_driver); MODULE_DESCRIPTION("Allwinner A31 MIPI CSI-2 Controller Driver"); MODULE_AUTHOR("Paul Kocialkowski <[email protected]>"); MODULE_LICENSE("GPL");
linux-master
drivers/media/platform/sunxi/sun6i-mipi-csi2/sun6i_mipi_csi2.c
// SPDX-License-Identifier: GPL-2.0+ /* * Copyright (C) 2016 NextThing Co * Copyright (C) 2016-2019 Bootlin * * Author: Maxime Ripard <[email protected]> */ #include <linux/device.h> #include <linux/pm_runtime.h> #include <media/v4l2-ioctl.h> #include <media/v4l2-mc.h> #include <media/videobuf2-v4l2.h> #include "sun4i_csi.h" #define CSI_DEFAULT_WIDTH 640 #define CSI_DEFAULT_HEIGHT 480 static const struct sun4i_csi_format sun4i_csi_formats[] = { /* YUV422 inputs */ { .mbus = MEDIA_BUS_FMT_YUYV8_2X8, .fourcc = V4L2_PIX_FMT_YUV420M, .input = CSI_INPUT_YUV, .output = CSI_OUTPUT_YUV_420_PLANAR, .num_planes = 3, .bpp = { 8, 8, 8 }, .hsub = 2, .vsub = 2, }, }; const struct sun4i_csi_format *sun4i_csi_find_format(const u32 *fourcc, const u32 *mbus) { unsigned int i; for (i = 0; i < ARRAY_SIZE(sun4i_csi_formats); i++) { if (fourcc && *fourcc != sun4i_csi_formats[i].fourcc) continue; if (mbus && *mbus != sun4i_csi_formats[i].mbus) continue; return &sun4i_csi_formats[i]; } return NULL; } static int sun4i_csi_querycap(struct file *file, void *priv, struct v4l2_capability *cap) { strscpy(cap->driver, KBUILD_MODNAME, sizeof(cap->driver)); strscpy(cap->card, "sun4i-csi", sizeof(cap->card)); return 0; } static int sun4i_csi_enum_input(struct file *file, void *priv, struct v4l2_input *inp) { if (inp->index != 0) return -EINVAL; inp->type = V4L2_INPUT_TYPE_CAMERA; strscpy(inp->name, "Camera", sizeof(inp->name)); return 0; } static int sun4i_csi_g_input(struct file *file, void *fh, unsigned int *i) { *i = 0; return 0; } static int sun4i_csi_s_input(struct file *file, void *fh, unsigned int i) { if (i != 0) return -EINVAL; return 0; } static void _sun4i_csi_try_fmt(struct sun4i_csi *csi, struct v4l2_pix_format_mplane *pix) { const struct sun4i_csi_format *_fmt; unsigned int height, width; unsigned int i; _fmt = sun4i_csi_find_format(&pix->pixelformat, NULL); if (!_fmt) _fmt = &sun4i_csi_formats[0]; pix->field = V4L2_FIELD_NONE; pix->colorspace = V4L2_COLORSPACE_SRGB; pix->xfer_func = V4L2_MAP_XFER_FUNC_DEFAULT(pix->colorspace); pix->ycbcr_enc = V4L2_MAP_YCBCR_ENC_DEFAULT(pix->colorspace); pix->quantization = V4L2_MAP_QUANTIZATION_DEFAULT(true, pix->colorspace, pix->ycbcr_enc); pix->num_planes = _fmt->num_planes; pix->pixelformat = _fmt->fourcc; /* Align the width and height on the subsampling */ width = ALIGN(pix->width, _fmt->hsub); height = ALIGN(pix->height, _fmt->vsub); /* Clamp the width and height to our capabilities */ pix->width = clamp(width, _fmt->hsub, CSI_MAX_WIDTH); pix->height = clamp(height, _fmt->vsub, CSI_MAX_HEIGHT); for (i = 0; i < _fmt->num_planes; i++) { unsigned int hsub = i > 0 ? _fmt->hsub : 1; unsigned int vsub = i > 0 ? _fmt->vsub : 1; unsigned int bpl; bpl = pix->width / hsub * _fmt->bpp[i] / 8; pix->plane_fmt[i].bytesperline = bpl; pix->plane_fmt[i].sizeimage = bpl * pix->height / vsub; } } static int sun4i_csi_try_fmt_vid_cap(struct file *file, void *priv, struct v4l2_format *f) { struct sun4i_csi *csi = video_drvdata(file); _sun4i_csi_try_fmt(csi, &f->fmt.pix_mp); return 0; } static int sun4i_csi_s_fmt_vid_cap(struct file *file, void *priv, struct v4l2_format *f) { struct sun4i_csi *csi = video_drvdata(file); _sun4i_csi_try_fmt(csi, &f->fmt.pix_mp); csi->fmt = f->fmt.pix_mp; return 0; } static int sun4i_csi_g_fmt_vid_cap(struct file *file, void *priv, struct v4l2_format *f) { struct sun4i_csi *csi = video_drvdata(file); f->fmt.pix_mp = csi->fmt; return 0; } static int sun4i_csi_enum_fmt_vid_cap(struct file *file, void *priv, struct v4l2_fmtdesc *f) { if (f->index >= ARRAY_SIZE(sun4i_csi_formats)) return -EINVAL; f->pixelformat = sun4i_csi_formats[f->index].fourcc; return 0; } static const struct v4l2_ioctl_ops sun4i_csi_ioctl_ops = { .vidioc_querycap = sun4i_csi_querycap, .vidioc_enum_fmt_vid_cap = sun4i_csi_enum_fmt_vid_cap, .vidioc_g_fmt_vid_cap_mplane = sun4i_csi_g_fmt_vid_cap, .vidioc_s_fmt_vid_cap_mplane = sun4i_csi_s_fmt_vid_cap, .vidioc_try_fmt_vid_cap_mplane = sun4i_csi_try_fmt_vid_cap, .vidioc_enum_input = sun4i_csi_enum_input, .vidioc_g_input = sun4i_csi_g_input, .vidioc_s_input = sun4i_csi_s_input, .vidioc_reqbufs = vb2_ioctl_reqbufs, .vidioc_create_bufs = vb2_ioctl_create_bufs, .vidioc_querybuf = vb2_ioctl_querybuf, .vidioc_qbuf = vb2_ioctl_qbuf, .vidioc_dqbuf = vb2_ioctl_dqbuf, .vidioc_expbuf = vb2_ioctl_expbuf, .vidioc_prepare_buf = vb2_ioctl_prepare_buf, .vidioc_streamon = vb2_ioctl_streamon, .vidioc_streamoff = vb2_ioctl_streamoff, }; static int sun4i_csi_open(struct file *file) { struct sun4i_csi *csi = video_drvdata(file); int ret; ret = mutex_lock_interruptible(&csi->lock); if (ret) return ret; ret = pm_runtime_resume_and_get(csi->dev); if (ret < 0) goto err_unlock; ret = v4l2_pipeline_pm_get(&csi->vdev.entity); if (ret) goto err_pm_put; ret = v4l2_fh_open(file); if (ret) goto err_pipeline_pm_put; mutex_unlock(&csi->lock); return 0; err_pipeline_pm_put: v4l2_pipeline_pm_put(&csi->vdev.entity); err_pm_put: pm_runtime_put(csi->dev); err_unlock: mutex_unlock(&csi->lock); return ret; } static int sun4i_csi_release(struct file *file) { struct sun4i_csi *csi = video_drvdata(file); mutex_lock(&csi->lock); _vb2_fop_release(file, NULL); v4l2_pipeline_pm_put(&csi->vdev.entity); pm_runtime_put(csi->dev); mutex_unlock(&csi->lock); return 0; } static const struct v4l2_file_operations sun4i_csi_fops = { .owner = THIS_MODULE, .open = sun4i_csi_open, .release = sun4i_csi_release, .unlocked_ioctl = video_ioctl2, .poll = vb2_fop_poll, .mmap = vb2_fop_mmap, }; static const struct v4l2_mbus_framefmt sun4i_csi_pad_fmt_default = { .width = CSI_DEFAULT_WIDTH, .height = CSI_DEFAULT_HEIGHT, .code = MEDIA_BUS_FMT_YUYV8_2X8, .field = V4L2_FIELD_NONE, .colorspace = V4L2_COLORSPACE_RAW, .ycbcr_enc = V4L2_YCBCR_ENC_DEFAULT, .quantization = V4L2_QUANTIZATION_DEFAULT, .xfer_func = V4L2_XFER_FUNC_DEFAULT, }; static int sun4i_csi_subdev_init_cfg(struct v4l2_subdev *subdev, struct v4l2_subdev_state *sd_state) { struct v4l2_mbus_framefmt *fmt; fmt = v4l2_subdev_get_try_format(subdev, sd_state, CSI_SUBDEV_SINK); *fmt = sun4i_csi_pad_fmt_default; return 0; } static int sun4i_csi_subdev_get_fmt(struct v4l2_subdev *subdev, struct v4l2_subdev_state *sd_state, struct v4l2_subdev_format *fmt) { struct sun4i_csi *csi = container_of(subdev, struct sun4i_csi, subdev); struct v4l2_mbus_framefmt *subdev_fmt; if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) subdev_fmt = v4l2_subdev_get_try_format(subdev, sd_state, fmt->pad); else subdev_fmt = &csi->subdev_fmt; fmt->format = *subdev_fmt; return 0; } static int sun4i_csi_subdev_set_fmt(struct v4l2_subdev *subdev, struct v4l2_subdev_state *sd_state, struct v4l2_subdev_format *fmt) { struct sun4i_csi *csi = container_of(subdev, struct sun4i_csi, subdev); struct v4l2_mbus_framefmt *subdev_fmt; if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) subdev_fmt = v4l2_subdev_get_try_format(subdev, sd_state, fmt->pad); else subdev_fmt = &csi->subdev_fmt; /* We can only set the format on the sink pad */ if (fmt->pad == CSI_SUBDEV_SINK) { /* It's the sink, only allow changing the frame size */ subdev_fmt->width = fmt->format.width; subdev_fmt->height = fmt->format.height; subdev_fmt->code = fmt->format.code; } fmt->format = *subdev_fmt; return 0; } static int sun4i_csi_subdev_enum_mbus_code(struct v4l2_subdev *subdev, struct v4l2_subdev_state *sd_state, struct v4l2_subdev_mbus_code_enum *mbus) { if (mbus->index >= ARRAY_SIZE(sun4i_csi_formats)) return -EINVAL; mbus->code = sun4i_csi_formats[mbus->index].mbus; return 0; } static const struct v4l2_subdev_pad_ops sun4i_csi_subdev_pad_ops = { .link_validate = v4l2_subdev_link_validate_default, .init_cfg = sun4i_csi_subdev_init_cfg, .get_fmt = sun4i_csi_subdev_get_fmt, .set_fmt = sun4i_csi_subdev_set_fmt, .enum_mbus_code = sun4i_csi_subdev_enum_mbus_code, }; const struct v4l2_subdev_ops sun4i_csi_subdev_ops = { .pad = &sun4i_csi_subdev_pad_ops, }; int sun4i_csi_v4l2_register(struct sun4i_csi *csi) { struct video_device *vdev = &csi->vdev; int ret; vdev->device_caps = V4L2_CAP_VIDEO_CAPTURE_MPLANE | V4L2_CAP_STREAMING; vdev->v4l2_dev = &csi->v4l; vdev->queue = &csi->queue; strscpy(vdev->name, KBUILD_MODNAME, sizeof(vdev->name)); vdev->release = video_device_release_empty; vdev->lock = &csi->lock; /* Set a default format */ csi->fmt.pixelformat = sun4i_csi_formats[0].fourcc; csi->fmt.width = CSI_DEFAULT_WIDTH; csi->fmt.height = CSI_DEFAULT_HEIGHT; _sun4i_csi_try_fmt(csi, &csi->fmt); csi->subdev_fmt = sun4i_csi_pad_fmt_default; vdev->fops = &sun4i_csi_fops; vdev->ioctl_ops = &sun4i_csi_ioctl_ops; video_set_drvdata(vdev, csi); ret = video_register_device(&csi->vdev, VFL_TYPE_VIDEO, -1); if (ret) return ret; dev_info(csi->dev, "Device registered as %s\n", video_device_node_name(vdev)); return 0; }
linux-master
drivers/media/platform/sunxi/sun4i-csi/sun4i_v4l2.c
// SPDX-License-Identifier: GPL-2.0+ /* * Copyright (C) 2016 NextThing Co * Copyright (C) 2016-2019 Bootlin * * Author: Maxime Ripard <[email protected]> */ #include <linux/clk.h> #include <linux/dma-mapping.h> #include <linux/interrupt.h> #include <linux/module.h> #include <linux/mutex.h> #include <linux/of.h> #include <linux/of_graph.h> #include <linux/platform_device.h> #include <linux/pm_runtime.h> #include <linux/reset.h> #include <linux/videodev2.h> #include <media/v4l2-dev.h> #include <media/v4l2-device.h> #include <media/v4l2-fwnode.h> #include <media/v4l2-ioctl.h> #include <media/v4l2-mediabus.h> #include <media/videobuf2-core.h> #include <media/videobuf2-dma-contig.h> #include "sun4i_csi.h" struct sun4i_csi_traits { unsigned int channels; unsigned int max_width; bool has_isp; }; static const struct media_entity_operations sun4i_csi_video_entity_ops = { .link_validate = v4l2_subdev_link_validate, }; static int sun4i_csi_notify_bound(struct v4l2_async_notifier *notifier, struct v4l2_subdev *subdev, struct v4l2_async_connection *asd) { struct sun4i_csi *csi = container_of(notifier, struct sun4i_csi, notifier); csi->src_subdev = subdev; csi->src_pad = media_entity_get_fwnode_pad(&subdev->entity, subdev->fwnode, MEDIA_PAD_FL_SOURCE); if (csi->src_pad < 0) { dev_err(csi->dev, "Couldn't find output pad for subdev %s\n", subdev->name); return csi->src_pad; } dev_dbg(csi->dev, "Bound %s pad: %d\n", subdev->name, csi->src_pad); return 0; } static int sun4i_csi_notify_complete(struct v4l2_async_notifier *notifier) { struct sun4i_csi *csi = container_of(notifier, struct sun4i_csi, notifier); struct v4l2_subdev *subdev = &csi->subdev; struct video_device *vdev = &csi->vdev; int ret; ret = v4l2_device_register_subdev(&csi->v4l, subdev); if (ret < 0) return ret; ret = sun4i_csi_v4l2_register(csi); if (ret < 0) return ret; ret = media_device_register(&csi->mdev); if (ret) return ret; /* Create link from subdev to main device */ ret = media_create_pad_link(&subdev->entity, CSI_SUBDEV_SOURCE, &vdev->entity, 0, MEDIA_LNK_FL_ENABLED | MEDIA_LNK_FL_IMMUTABLE); if (ret) goto err_clean_media; ret = media_create_pad_link(&csi->src_subdev->entity, csi->src_pad, &subdev->entity, CSI_SUBDEV_SINK, MEDIA_LNK_FL_ENABLED | MEDIA_LNK_FL_IMMUTABLE); if (ret) goto err_clean_media; ret = v4l2_device_register_subdev_nodes(&csi->v4l); if (ret < 0) goto err_clean_media; return 0; err_clean_media: media_device_unregister(&csi->mdev); return ret; } static const struct v4l2_async_notifier_operations sun4i_csi_notify_ops = { .bound = sun4i_csi_notify_bound, .complete = sun4i_csi_notify_complete, }; static int sun4i_csi_notifier_init(struct sun4i_csi *csi) { struct v4l2_fwnode_endpoint vep = { .bus_type = V4L2_MBUS_PARALLEL, }; struct v4l2_async_connection *asd; struct fwnode_handle *ep; int ret; v4l2_async_nf_init(&csi->notifier, &csi->v4l); ep = fwnode_graph_get_endpoint_by_id(dev_fwnode(csi->dev), 0, 0, FWNODE_GRAPH_ENDPOINT_NEXT); if (!ep) return -EINVAL; ret = v4l2_fwnode_endpoint_parse(ep, &vep); if (ret) goto out; csi->bus = vep.bus.parallel; asd = v4l2_async_nf_add_fwnode_remote(&csi->notifier, ep, struct v4l2_async_connection); if (IS_ERR(asd)) { ret = PTR_ERR(asd); goto out; } csi->notifier.ops = &sun4i_csi_notify_ops; out: fwnode_handle_put(ep); return ret; } static int sun4i_csi_probe(struct platform_device *pdev) { struct v4l2_subdev *subdev; struct video_device *vdev; struct sun4i_csi *csi; int ret; int irq; csi = devm_kzalloc(&pdev->dev, sizeof(*csi), GFP_KERNEL); if (!csi) return -ENOMEM; platform_set_drvdata(pdev, csi); csi->dev = &pdev->dev; subdev = &csi->subdev; vdev = &csi->vdev; csi->traits = of_device_get_match_data(&pdev->dev); if (!csi->traits) return -EINVAL; csi->mdev.dev = csi->dev; strscpy(csi->mdev.model, "Allwinner Video Capture Device", sizeof(csi->mdev.model)); csi->mdev.hw_revision = 0; media_device_init(&csi->mdev); csi->v4l.mdev = &csi->mdev; csi->regs = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(csi->regs)) return PTR_ERR(csi->regs); irq = platform_get_irq(pdev, 0); if (irq < 0) return irq; csi->bus_clk = devm_clk_get(&pdev->dev, "bus"); if (IS_ERR(csi->bus_clk)) { dev_err(&pdev->dev, "Couldn't get our bus clock\n"); return PTR_ERR(csi->bus_clk); } if (csi->traits->has_isp) { csi->isp_clk = devm_clk_get(&pdev->dev, "isp"); if (IS_ERR(csi->isp_clk)) { dev_err(&pdev->dev, "Couldn't get our ISP clock\n"); return PTR_ERR(csi->isp_clk); } } csi->ram_clk = devm_clk_get(&pdev->dev, "ram"); if (IS_ERR(csi->ram_clk)) { dev_err(&pdev->dev, "Couldn't get our ram clock\n"); return PTR_ERR(csi->ram_clk); } csi->rst = devm_reset_control_get(&pdev->dev, NULL); if (IS_ERR(csi->rst)) { dev_err(&pdev->dev, "Couldn't get our reset line\n"); return PTR_ERR(csi->rst); } /* Initialize subdev */ v4l2_subdev_init(subdev, &sun4i_csi_subdev_ops); subdev->flags = V4L2_SUBDEV_FL_HAS_DEVNODE | V4L2_SUBDEV_FL_HAS_EVENTS; subdev->entity.function = MEDIA_ENT_F_VID_IF_BRIDGE; subdev->owner = THIS_MODULE; snprintf(subdev->name, sizeof(subdev->name), "sun4i-csi-0"); v4l2_set_subdevdata(subdev, csi); csi->subdev_pads[CSI_SUBDEV_SINK].flags = MEDIA_PAD_FL_SINK | MEDIA_PAD_FL_MUST_CONNECT; csi->subdev_pads[CSI_SUBDEV_SOURCE].flags = MEDIA_PAD_FL_SOURCE; ret = media_entity_pads_init(&subdev->entity, CSI_SUBDEV_PADS, csi->subdev_pads); if (ret < 0) return ret; csi->vdev_pad.flags = MEDIA_PAD_FL_SINK | MEDIA_PAD_FL_MUST_CONNECT; vdev->entity.ops = &sun4i_csi_video_entity_ops; ret = media_entity_pads_init(&vdev->entity, 1, &csi->vdev_pad); if (ret < 0) return ret; ret = sun4i_csi_dma_register(csi, irq); if (ret) goto err_clean_pad; ret = sun4i_csi_notifier_init(csi); if (ret) goto err_unregister_media; ret = v4l2_async_nf_register(&csi->notifier); if (ret) { dev_err(csi->dev, "Couldn't register our notifier.\n"); goto err_unregister_media; } pm_runtime_enable(&pdev->dev); return 0; err_unregister_media: media_device_unregister(&csi->mdev); sun4i_csi_dma_unregister(csi); err_clean_pad: media_device_cleanup(&csi->mdev); return ret; } static void sun4i_csi_remove(struct platform_device *pdev) { struct sun4i_csi *csi = platform_get_drvdata(pdev); pm_runtime_disable(&pdev->dev); v4l2_async_nf_unregister(&csi->notifier); v4l2_async_nf_cleanup(&csi->notifier); vb2_video_unregister_device(&csi->vdev); media_device_unregister(&csi->mdev); sun4i_csi_dma_unregister(csi); media_device_cleanup(&csi->mdev); } static const struct sun4i_csi_traits sun4i_a10_csi1_traits = { .channels = 1, .max_width = 24, .has_isp = false, }; static const struct sun4i_csi_traits sun7i_a20_csi0_traits = { .channels = 4, .max_width = 16, .has_isp = true, }; static const struct of_device_id sun4i_csi_of_match[] = { { .compatible = "allwinner,sun4i-a10-csi1", .data = &sun4i_a10_csi1_traits }, { .compatible = "allwinner,sun7i-a20-csi0", .data = &sun7i_a20_csi0_traits }, { /* Sentinel */ } }; MODULE_DEVICE_TABLE(of, sun4i_csi_of_match); static int __maybe_unused sun4i_csi_runtime_resume(struct device *dev) { struct sun4i_csi *csi = dev_get_drvdata(dev); reset_control_deassert(csi->rst); clk_prepare_enable(csi->bus_clk); clk_prepare_enable(csi->ram_clk); clk_set_rate(csi->isp_clk, 80000000); clk_prepare_enable(csi->isp_clk); writel(1, csi->regs + CSI_EN_REG); return 0; } static int __maybe_unused sun4i_csi_runtime_suspend(struct device *dev) { struct sun4i_csi *csi = dev_get_drvdata(dev); clk_disable_unprepare(csi->isp_clk); clk_disable_unprepare(csi->ram_clk); clk_disable_unprepare(csi->bus_clk); reset_control_assert(csi->rst); return 0; } static const struct dev_pm_ops sun4i_csi_pm_ops = { SET_RUNTIME_PM_OPS(sun4i_csi_runtime_suspend, sun4i_csi_runtime_resume, NULL) }; static struct platform_driver sun4i_csi_driver = { .probe = sun4i_csi_probe, .remove_new = sun4i_csi_remove, .driver = { .name = "sun4i-csi", .of_match_table = sun4i_csi_of_match, .pm = &sun4i_csi_pm_ops, }, }; module_platform_driver(sun4i_csi_driver); MODULE_DESCRIPTION("Allwinner A10 Camera Sensor Interface driver"); MODULE_AUTHOR("Maxime Ripard <[email protected]>"); MODULE_LICENSE("GPL");
linux-master
drivers/media/platform/sunxi/sun4i-csi/sun4i_csi.c
// SPDX-License-Identifier: GPL-2.0+ /* * Copyright (C) 2016 NextThing Co * Copyright (C) 2016-2019 Bootlin * * Author: Maxime Ripard <[email protected]> */ #include <linux/device.h> #include <linux/interrupt.h> #include <linux/list.h> #include <linux/mutex.h> #include <linux/spinlock.h> #include <media/videobuf2-dma-contig.h> #include <media/videobuf2-v4l2.h> #include "sun4i_csi.h" struct sun4i_csi_buffer { struct vb2_v4l2_buffer vb; struct list_head list; }; static inline struct sun4i_csi_buffer * vb2_v4l2_to_csi_buffer(const struct vb2_v4l2_buffer *p) { return container_of(p, struct sun4i_csi_buffer, vb); } static inline struct sun4i_csi_buffer * vb2_to_csi_buffer(const struct vb2_buffer *p) { return vb2_v4l2_to_csi_buffer(to_vb2_v4l2_buffer(p)); } static void sun4i_csi_capture_start(struct sun4i_csi *csi) { writel(CSI_CPT_CTRL_VIDEO_START, csi->regs + CSI_CPT_CTRL_REG); } static void sun4i_csi_capture_stop(struct sun4i_csi *csi) { writel(0, csi->regs + CSI_CPT_CTRL_REG); } static int sun4i_csi_queue_setup(struct vb2_queue *vq, unsigned int *nbuffers, unsigned int *nplanes, unsigned int sizes[], struct device *alloc_devs[]) { struct sun4i_csi *csi = vb2_get_drv_priv(vq); unsigned int num_planes = csi->fmt.num_planes; unsigned int i; if (*nplanes) { if (*nplanes != num_planes) return -EINVAL; for (i = 0; i < num_planes; i++) if (sizes[i] < csi->fmt.plane_fmt[i].sizeimage) return -EINVAL; return 0; } *nplanes = num_planes; for (i = 0; i < num_planes; i++) sizes[i] = csi->fmt.plane_fmt[i].sizeimage; return 0; }; static int sun4i_csi_buffer_prepare(struct vb2_buffer *vb) { struct sun4i_csi *csi = vb2_get_drv_priv(vb->vb2_queue); unsigned int i; for (i = 0; i < csi->fmt.num_planes; i++) { unsigned long size = csi->fmt.plane_fmt[i].sizeimage; if (vb2_plane_size(vb, i) < size) { dev_err(csi->dev, "buffer too small (%lu < %lu)\n", vb2_plane_size(vb, i), size); return -EINVAL; } vb2_set_plane_payload(vb, i, size); } return 0; } static int sun4i_csi_setup_scratch_buffer(struct sun4i_csi *csi, unsigned int slot) { dma_addr_t addr = csi->scratch.paddr; unsigned int plane; dev_dbg(csi->dev, "No more available buffer, using the scratch buffer\n"); for (plane = 0; plane < csi->fmt.num_planes; plane++) { writel(addr, csi->regs + CSI_BUF_ADDR_REG(plane, slot)); addr += csi->fmt.plane_fmt[plane].sizeimage; } csi->current_buf[slot] = NULL; return 0; } static int sun4i_csi_buffer_fill_slot(struct sun4i_csi *csi, unsigned int slot) { struct sun4i_csi_buffer *c_buf; struct vb2_v4l2_buffer *v_buf; unsigned int plane; /* * We should never end up in a situation where we overwrite an * already filled slot. */ if (WARN_ON(csi->current_buf[slot])) return -EINVAL; if (list_empty(&csi->buf_list)) return sun4i_csi_setup_scratch_buffer(csi, slot); c_buf = list_first_entry(&csi->buf_list, struct sun4i_csi_buffer, list); list_del_init(&c_buf->list); v_buf = &c_buf->vb; csi->current_buf[slot] = v_buf; for (plane = 0; plane < csi->fmt.num_planes; plane++) { dma_addr_t buf_addr; buf_addr = vb2_dma_contig_plane_dma_addr(&v_buf->vb2_buf, plane); writel(buf_addr, csi->regs + CSI_BUF_ADDR_REG(plane, slot)); } return 0; } static int sun4i_csi_buffer_fill_all(struct sun4i_csi *csi) { unsigned int slot; int ret; for (slot = 0; slot < CSI_MAX_BUFFER; slot++) { ret = sun4i_csi_buffer_fill_slot(csi, slot); if (ret) return ret; } return 0; } static void sun4i_csi_buffer_mark_done(struct sun4i_csi *csi, unsigned int slot, unsigned int sequence) { struct vb2_v4l2_buffer *v_buf; if (!csi->current_buf[slot]) { dev_dbg(csi->dev, "Scratch buffer was used, ignoring..\n"); return; } v_buf = csi->current_buf[slot]; v_buf->field = csi->fmt.field; v_buf->sequence = sequence; v_buf->vb2_buf.timestamp = ktime_get_ns(); vb2_buffer_done(&v_buf->vb2_buf, VB2_BUF_STATE_DONE); csi->current_buf[slot] = NULL; } static int sun4i_csi_buffer_flip(struct sun4i_csi *csi, unsigned int sequence) { u32 reg = readl(csi->regs + CSI_BUF_CTRL_REG); unsigned int next; /* Our next buffer is not the current buffer */ next = !(reg & CSI_BUF_CTRL_DBS); /* Report the previous buffer as done */ sun4i_csi_buffer_mark_done(csi, next, sequence); /* Put a new buffer in there */ return sun4i_csi_buffer_fill_slot(csi, next); } static void sun4i_csi_buffer_queue(struct vb2_buffer *vb) { struct sun4i_csi *csi = vb2_get_drv_priv(vb->vb2_queue); struct sun4i_csi_buffer *buf = vb2_to_csi_buffer(vb); unsigned long flags; spin_lock_irqsave(&csi->qlock, flags); list_add_tail(&buf->list, &csi->buf_list); spin_unlock_irqrestore(&csi->qlock, flags); } static void return_all_buffers(struct sun4i_csi *csi, enum vb2_buffer_state state) { struct sun4i_csi_buffer *buf, *node; unsigned int slot; list_for_each_entry_safe(buf, node, &csi->buf_list, list) { vb2_buffer_done(&buf->vb.vb2_buf, state); list_del(&buf->list); } for (slot = 0; slot < CSI_MAX_BUFFER; slot++) { struct vb2_v4l2_buffer *v_buf = csi->current_buf[slot]; if (!v_buf) continue; vb2_buffer_done(&v_buf->vb2_buf, state); csi->current_buf[slot] = NULL; } } static int sun4i_csi_start_streaming(struct vb2_queue *vq, unsigned int count) { struct sun4i_csi *csi = vb2_get_drv_priv(vq); struct v4l2_mbus_config_parallel *bus = &csi->bus; const struct sun4i_csi_format *csi_fmt; unsigned long href_pol, pclk_pol, vref_pol; unsigned long flags; unsigned int i; int ret; csi_fmt = sun4i_csi_find_format(&csi->fmt.pixelformat, NULL); if (!csi_fmt) return -EINVAL; dev_dbg(csi->dev, "Starting capture\n"); csi->sequence = 0; /* * We need a scratch buffer in case where we'll not have any * more buffer queued so that we don't error out. One of those * cases is when you end up at the last frame to capture, you * don't have any buffer queued any more, and yet it doesn't * really matter since you'll never reach the next buffer. * * Since we support the multi-planar API, we need to have a * buffer for each plane. Allocating a single one large enough * to hold all the buffers is simpler, so let's go for that. */ csi->scratch.size = 0; for (i = 0; i < csi->fmt.num_planes; i++) csi->scratch.size += csi->fmt.plane_fmt[i].sizeimage; csi->scratch.vaddr = dma_alloc_coherent(csi->dev, csi->scratch.size, &csi->scratch.paddr, GFP_KERNEL); if (!csi->scratch.vaddr) { dev_err(csi->dev, "Failed to allocate scratch buffer\n"); ret = -ENOMEM; goto err_clear_dma_queue; } ret = video_device_pipeline_alloc_start(&csi->vdev); if (ret < 0) goto err_free_scratch_buffer; spin_lock_irqsave(&csi->qlock, flags); /* Setup timings */ writel(CSI_WIN_CTRL_W_ACTIVE(csi->fmt.width * 2), csi->regs + CSI_WIN_CTRL_W_REG); writel(CSI_WIN_CTRL_H_ACTIVE(csi->fmt.height), csi->regs + CSI_WIN_CTRL_H_REG); /* * This hardware uses [HV]REF instead of [HV]SYNC. Based on the * provided timing diagrams in the manual, positive polarity * equals active high [HV]REF. * * When the back porch is 0, [HV]REF is more or less equivalent * to [HV]SYNC inverted. */ href_pol = !!(bus->flags & V4L2_MBUS_HSYNC_ACTIVE_LOW); vref_pol = !!(bus->flags & V4L2_MBUS_VSYNC_ACTIVE_LOW); pclk_pol = !!(bus->flags & V4L2_MBUS_PCLK_SAMPLE_RISING); writel(CSI_CFG_INPUT_FMT(csi_fmt->input) | CSI_CFG_OUTPUT_FMT(csi_fmt->output) | CSI_CFG_VREF_POL(vref_pol) | CSI_CFG_HREF_POL(href_pol) | CSI_CFG_PCLK_POL(pclk_pol), csi->regs + CSI_CFG_REG); /* Setup buffer length */ writel(csi->fmt.plane_fmt[0].bytesperline, csi->regs + CSI_BUF_LEN_REG); /* Prepare our buffers in hardware */ ret = sun4i_csi_buffer_fill_all(csi); if (ret) { spin_unlock_irqrestore(&csi->qlock, flags); goto err_disable_pipeline; } /* Enable double buffering */ writel(CSI_BUF_CTRL_DBE, csi->regs + CSI_BUF_CTRL_REG); /* Clear the pending interrupts */ writel(CSI_INT_FRM_DONE, csi->regs + CSI_INT_STA_REG); /* Enable frame done interrupt */ writel(CSI_INT_FRM_DONE, csi->regs + CSI_INT_EN_REG); sun4i_csi_capture_start(csi); spin_unlock_irqrestore(&csi->qlock, flags); ret = v4l2_subdev_call(csi->src_subdev, video, s_stream, 1); if (ret < 0 && ret != -ENOIOCTLCMD) goto err_disable_device; return 0; err_disable_device: sun4i_csi_capture_stop(csi); err_disable_pipeline: video_device_pipeline_stop(&csi->vdev); err_free_scratch_buffer: dma_free_coherent(csi->dev, csi->scratch.size, csi->scratch.vaddr, csi->scratch.paddr); err_clear_dma_queue: spin_lock_irqsave(&csi->qlock, flags); return_all_buffers(csi, VB2_BUF_STATE_QUEUED); spin_unlock_irqrestore(&csi->qlock, flags); return ret; } static void sun4i_csi_stop_streaming(struct vb2_queue *vq) { struct sun4i_csi *csi = vb2_get_drv_priv(vq); unsigned long flags; dev_dbg(csi->dev, "Stopping capture\n"); v4l2_subdev_call(csi->src_subdev, video, s_stream, 0); sun4i_csi_capture_stop(csi); /* Release all active buffers */ spin_lock_irqsave(&csi->qlock, flags); return_all_buffers(csi, VB2_BUF_STATE_ERROR); spin_unlock_irqrestore(&csi->qlock, flags); video_device_pipeline_stop(&csi->vdev); dma_free_coherent(csi->dev, csi->scratch.size, csi->scratch.vaddr, csi->scratch.paddr); } static const struct vb2_ops sun4i_csi_qops = { .queue_setup = sun4i_csi_queue_setup, .buf_prepare = sun4i_csi_buffer_prepare, .buf_queue = sun4i_csi_buffer_queue, .start_streaming = sun4i_csi_start_streaming, .stop_streaming = sun4i_csi_stop_streaming, .wait_prepare = vb2_ops_wait_prepare, .wait_finish = vb2_ops_wait_finish, }; static irqreturn_t sun4i_csi_irq(int irq, void *data) { struct sun4i_csi *csi = data; u32 reg; reg = readl(csi->regs + CSI_INT_STA_REG); /* Acknowledge the interrupts */ writel(reg, csi->regs + CSI_INT_STA_REG); if (!(reg & CSI_INT_FRM_DONE)) return IRQ_HANDLED; spin_lock(&csi->qlock); if (sun4i_csi_buffer_flip(csi, csi->sequence++)) { dev_warn(csi->dev, "%s: Flip failed\n", __func__); sun4i_csi_capture_stop(csi); } spin_unlock(&csi->qlock); return IRQ_HANDLED; } int sun4i_csi_dma_register(struct sun4i_csi *csi, int irq) { struct vb2_queue *q = &csi->queue; int ret; int i; spin_lock_init(&csi->qlock); mutex_init(&csi->lock); INIT_LIST_HEAD(&csi->buf_list); for (i = 0; i < CSI_MAX_BUFFER; i++) csi->current_buf[i] = NULL; q->min_buffers_needed = 3; q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE; q->io_modes = VB2_MMAP | VB2_DMABUF; q->lock = &csi->lock; q->drv_priv = csi; q->buf_struct_size = sizeof(struct sun4i_csi_buffer); q->ops = &sun4i_csi_qops; q->mem_ops = &vb2_dma_contig_memops; q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC; q->dev = csi->dev; ret = vb2_queue_init(q); if (ret < 0) { dev_err(csi->dev, "failed to initialize VB2 queue\n"); goto err_free_mutex; } ret = v4l2_device_register(csi->dev, &csi->v4l); if (ret) { dev_err(csi->dev, "Couldn't register the v4l2 device\n"); goto err_free_mutex; } ret = devm_request_irq(csi->dev, irq, sun4i_csi_irq, 0, dev_name(csi->dev), csi); if (ret) { dev_err(csi->dev, "Couldn't register our interrupt\n"); goto err_unregister_device; } return 0; err_unregister_device: v4l2_device_unregister(&csi->v4l); err_free_mutex: mutex_destroy(&csi->lock); return ret; } void sun4i_csi_dma_unregister(struct sun4i_csi *csi) { v4l2_device_unregister(&csi->v4l); mutex_destroy(&csi->lock); }
linux-master
drivers/media/platform/sunxi/sun4i-csi/sun4i_dma.c
// SPDX-License-Identifier: GPL-2.0 /* * Allwinner sun8i deinterlacer with scaler driver * * Copyright (C) 2019 Jernej Skrabec <[email protected]> * * Based on vim2m driver. */ #include <linux/clk.h> #include <linux/interrupt.h> #include <linux/io.h> #include <linux/iopoll.h> #include <linux/mod_devicetable.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/pm_runtime.h> #include <linux/reset.h> #include <media/v4l2-device.h> #include <media/v4l2-ioctl.h> #include <media/v4l2-mem2mem.h> #include "sun8i-di.h" #define FLAG_SIZE (DEINTERLACE_MAX_WIDTH * DEINTERLACE_MAX_HEIGHT / 4) static u32 deinterlace_formats[] = { V4L2_PIX_FMT_NV12, V4L2_PIX_FMT_NV21, }; static inline u32 deinterlace_read(struct deinterlace_dev *dev, u32 reg) { return readl(dev->base + reg); } static inline void deinterlace_write(struct deinterlace_dev *dev, u32 reg, u32 value) { writel(value, dev->base + reg); } static inline void deinterlace_set_bits(struct deinterlace_dev *dev, u32 reg, u32 bits) { writel(readl(dev->base + reg) | bits, dev->base + reg); } static inline void deinterlace_clr_set_bits(struct deinterlace_dev *dev, u32 reg, u32 clr, u32 set) { u32 val = readl(dev->base + reg); val &= ~clr; val |= set; writel(val, dev->base + reg); } static void deinterlace_device_run(void *priv) { struct deinterlace_ctx *ctx = priv; struct deinterlace_dev *dev = ctx->dev; u32 size, stride, width, height, val; struct vb2_v4l2_buffer *src, *dst; unsigned int hstep, vstep; dma_addr_t addr; src = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx); dst = v4l2_m2m_next_dst_buf(ctx->fh.m2m_ctx); v4l2_m2m_buf_copy_metadata(src, dst, true); deinterlace_write(dev, DEINTERLACE_MOD_ENABLE, DEINTERLACE_MOD_ENABLE_EN); if (ctx->field) { deinterlace_write(dev, DEINTERLACE_TILE_FLAG0, ctx->flag1_buf_dma); deinterlace_write(dev, DEINTERLACE_TILE_FLAG1, ctx->flag2_buf_dma); } else { deinterlace_write(dev, DEINTERLACE_TILE_FLAG0, ctx->flag2_buf_dma); deinterlace_write(dev, DEINTERLACE_TILE_FLAG1, ctx->flag1_buf_dma); } deinterlace_write(dev, DEINTERLACE_FLAG_LINE_STRIDE, 0x200); width = ctx->src_fmt.width; height = ctx->src_fmt.height; stride = ctx->src_fmt.bytesperline; size = stride * height; addr = vb2_dma_contig_plane_dma_addr(&src->vb2_buf, 0); deinterlace_write(dev, DEINTERLACE_BUF_ADDR0, addr); deinterlace_write(dev, DEINTERLACE_BUF_ADDR1, addr + size); deinterlace_write(dev, DEINTERLACE_BUF_ADDR2, 0); deinterlace_write(dev, DEINTERLACE_LINE_STRIDE0, stride); deinterlace_write(dev, DEINTERLACE_LINE_STRIDE1, stride); deinterlace_write(dev, DEINTERLACE_CH0_IN_SIZE, DEINTERLACE_SIZE(width, height)); deinterlace_write(dev, DEINTERLACE_CH1_IN_SIZE, DEINTERLACE_SIZE(width / 2, height / 2)); val = DEINTERLACE_IN_FMT_FMT(DEINTERLACE_IN_FMT_YUV420) | DEINTERLACE_IN_FMT_MOD(DEINTERLACE_MODE_UV_COMBINED); switch (ctx->src_fmt.pixelformat) { case V4L2_PIX_FMT_NV12: val |= DEINTERLACE_IN_FMT_PS(DEINTERLACE_PS_UVUV); break; case V4L2_PIX_FMT_NV21: val |= DEINTERLACE_IN_FMT_PS(DEINTERLACE_PS_VUVU); break; } deinterlace_write(dev, DEINTERLACE_IN_FMT, val); if (ctx->prev) addr = vb2_dma_contig_plane_dma_addr(&ctx->prev->vb2_buf, 0); deinterlace_write(dev, DEINTERLACE_PRELUMA, addr); deinterlace_write(dev, DEINTERLACE_PRECHROMA, addr + size); val = DEINTERLACE_OUT_FMT_FMT(DEINTERLACE_OUT_FMT_YUV420SP); switch (ctx->src_fmt.pixelformat) { case V4L2_PIX_FMT_NV12: val |= DEINTERLACE_OUT_FMT_PS(DEINTERLACE_PS_UVUV); break; case V4L2_PIX_FMT_NV21: val |= DEINTERLACE_OUT_FMT_PS(DEINTERLACE_PS_VUVU); break; } deinterlace_write(dev, DEINTERLACE_OUT_FMT, val); width = ctx->dst_fmt.width; height = ctx->dst_fmt.height; stride = ctx->dst_fmt.bytesperline; size = stride * height; deinterlace_write(dev, DEINTERLACE_CH0_OUT_SIZE, DEINTERLACE_SIZE(width, height)); deinterlace_write(dev, DEINTERLACE_CH1_OUT_SIZE, DEINTERLACE_SIZE(width / 2, height / 2)); deinterlace_write(dev, DEINTERLACE_WB_LINE_STRIDE0, stride); deinterlace_write(dev, DEINTERLACE_WB_LINE_STRIDE1, stride); addr = vb2_dma_contig_plane_dma_addr(&dst->vb2_buf, 0); deinterlace_write(dev, DEINTERLACE_WB_ADDR0, addr); deinterlace_write(dev, DEINTERLACE_WB_ADDR1, addr + size); deinterlace_write(dev, DEINTERLACE_WB_ADDR2, 0); hstep = (ctx->src_fmt.width << 16) / ctx->dst_fmt.width; vstep = (ctx->src_fmt.height << 16) / ctx->dst_fmt.height; deinterlace_write(dev, DEINTERLACE_CH0_HORZ_FACT, hstep); deinterlace_write(dev, DEINTERLACE_CH0_VERT_FACT, vstep); deinterlace_write(dev, DEINTERLACE_CH1_HORZ_FACT, hstep); deinterlace_write(dev, DEINTERLACE_CH1_VERT_FACT, vstep); deinterlace_clr_set_bits(dev, DEINTERLACE_FIELD_CTRL, DEINTERLACE_FIELD_CTRL_FIELD_CNT_MSK, DEINTERLACE_FIELD_CTRL_FIELD_CNT(ctx->field)); deinterlace_set_bits(dev, DEINTERLACE_FRM_CTRL, DEINTERLACE_FRM_CTRL_START); deinterlace_set_bits(dev, DEINTERLACE_FRM_CTRL, DEINTERLACE_FRM_CTRL_REG_READY); deinterlace_set_bits(dev, DEINTERLACE_INT_ENABLE, DEINTERLACE_INT_ENABLE_WB_EN); deinterlace_set_bits(dev, DEINTERLACE_FRM_CTRL, DEINTERLACE_FRM_CTRL_WB_EN); } static int deinterlace_job_ready(void *priv) { struct deinterlace_ctx *ctx = priv; return v4l2_m2m_num_src_bufs_ready(ctx->fh.m2m_ctx) >= 1 && v4l2_m2m_num_dst_bufs_ready(ctx->fh.m2m_ctx) >= 2; } static void deinterlace_job_abort(void *priv) { struct deinterlace_ctx *ctx = priv; /* Will cancel the transaction in the next interrupt handler */ ctx->aborting = 1; } static irqreturn_t deinterlace_irq(int irq, void *data) { struct deinterlace_dev *dev = data; struct vb2_v4l2_buffer *src, *dst; enum vb2_buffer_state state; struct deinterlace_ctx *ctx; unsigned int val; ctx = v4l2_m2m_get_curr_priv(dev->m2m_dev); if (!ctx) { v4l2_err(&dev->v4l2_dev, "Instance released before the end of transaction\n"); return IRQ_NONE; } val = deinterlace_read(dev, DEINTERLACE_INT_STATUS); if (!(val & DEINTERLACE_INT_STATUS_WRITEBACK)) return IRQ_NONE; deinterlace_write(dev, DEINTERLACE_INT_ENABLE, 0); deinterlace_set_bits(dev, DEINTERLACE_INT_STATUS, DEINTERLACE_INT_STATUS_WRITEBACK); deinterlace_write(dev, DEINTERLACE_MOD_ENABLE, 0); deinterlace_clr_set_bits(dev, DEINTERLACE_FRM_CTRL, DEINTERLACE_FRM_CTRL_START, 0); val = deinterlace_read(dev, DEINTERLACE_STATUS); if (val & DEINTERLACE_STATUS_WB_ERROR) state = VB2_BUF_STATE_ERROR; else state = VB2_BUF_STATE_DONE; dst = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx); v4l2_m2m_buf_done(dst, state); if (ctx->field != ctx->first_field || ctx->aborting) { ctx->field = ctx->first_field; src = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx); if (ctx->prev) v4l2_m2m_buf_done(ctx->prev, state); ctx->prev = src; v4l2_m2m_job_finish(ctx->dev->m2m_dev, ctx->fh.m2m_ctx); } else { ctx->field = !ctx->first_field; deinterlace_device_run(ctx); } return IRQ_HANDLED; } static void deinterlace_init(struct deinterlace_dev *dev) { u32 val; int i; deinterlace_write(dev, DEINTERLACE_BYPASS, DEINTERLACE_BYPASS_CSC); deinterlace_write(dev, DEINTERLACE_WB_LINE_STRIDE_CTRL, DEINTERLACE_WB_LINE_STRIDE_CTRL_EN); deinterlace_set_bits(dev, DEINTERLACE_FRM_CTRL, DEINTERLACE_FRM_CTRL_OUT_CTRL); deinterlace_write(dev, DEINTERLACE_AGTH_SEL, DEINTERLACE_AGTH_SEL_LINEBUF); val = DEINTERLACE_CTRL_EN | DEINTERLACE_CTRL_MODE_MIXED | DEINTERLACE_CTRL_DIAG_INTP_EN | DEINTERLACE_CTRL_TEMP_DIFF_EN; deinterlace_write(dev, DEINTERLACE_CTRL, val); deinterlace_clr_set_bits(dev, DEINTERLACE_LUMA_TH, DEINTERLACE_LUMA_TH_MIN_LUMA_MSK, DEINTERLACE_LUMA_TH_MIN_LUMA(4)); deinterlace_clr_set_bits(dev, DEINTERLACE_SPAT_COMP, DEINTERLACE_SPAT_COMP_TH2_MSK, DEINTERLACE_SPAT_COMP_TH2(5)); deinterlace_clr_set_bits(dev, DEINTERLACE_TEMP_DIFF, DEINTERLACE_TEMP_DIFF_AMBIGUITY_TH_MSK, DEINTERLACE_TEMP_DIFF_AMBIGUITY_TH(5)); val = DEINTERLACE_DIAG_INTP_TH0(60) | DEINTERLACE_DIAG_INTP_TH1(0) | DEINTERLACE_DIAG_INTP_TH3(30); deinterlace_write(dev, DEINTERLACE_DIAG_INTP, val); deinterlace_clr_set_bits(dev, DEINTERLACE_CHROMA_DIFF, DEINTERLACE_CHROMA_DIFF_TH_MSK, DEINTERLACE_CHROMA_DIFF_TH(5)); /* neutral filter coefficients */ deinterlace_set_bits(dev, DEINTERLACE_FRM_CTRL, DEINTERLACE_FRM_CTRL_COEF_ACCESS); readl_poll_timeout(dev->base + DEINTERLACE_STATUS, val, val & DEINTERLACE_STATUS_COEF_STATUS, 2, 40); for (i = 0; i < 32; i++) { deinterlace_write(dev, DEINTERLACE_CH0_HORZ_COEF0 + i * 4, DEINTERLACE_IDENTITY_COEF); deinterlace_write(dev, DEINTERLACE_CH0_VERT_COEF + i * 4, DEINTERLACE_IDENTITY_COEF); deinterlace_write(dev, DEINTERLACE_CH1_HORZ_COEF0 + i * 4, DEINTERLACE_IDENTITY_COEF); deinterlace_write(dev, DEINTERLACE_CH1_VERT_COEF + i * 4, DEINTERLACE_IDENTITY_COEF); } deinterlace_clr_set_bits(dev, DEINTERLACE_FRM_CTRL, DEINTERLACE_FRM_CTRL_COEF_ACCESS, 0); } static inline struct deinterlace_ctx *deinterlace_file2ctx(struct file *file) { return container_of(file->private_data, struct deinterlace_ctx, fh); } static bool deinterlace_check_format(u32 pixelformat) { unsigned int i; for (i = 0; i < ARRAY_SIZE(deinterlace_formats); i++) if (deinterlace_formats[i] == pixelformat) return true; return false; } static void deinterlace_prepare_format(struct v4l2_pix_format *pix_fmt) { unsigned int height = pix_fmt->height; unsigned int width = pix_fmt->width; unsigned int bytesperline; unsigned int sizeimage; width = clamp(width, DEINTERLACE_MIN_WIDTH, DEINTERLACE_MAX_WIDTH); height = clamp(height, DEINTERLACE_MIN_HEIGHT, DEINTERLACE_MAX_HEIGHT); bytesperline = ALIGN(width, 2); /* luma */ sizeimage = bytesperline * height; /* chroma */ sizeimage += bytesperline * height / 2; pix_fmt->width = width; pix_fmt->height = height; pix_fmt->bytesperline = bytesperline; pix_fmt->sizeimage = sizeimage; } static int deinterlace_querycap(struct file *file, void *priv, struct v4l2_capability *cap) { strscpy(cap->driver, DEINTERLACE_NAME, sizeof(cap->driver)); strscpy(cap->card, DEINTERLACE_NAME, sizeof(cap->card)); snprintf(cap->bus_info, sizeof(cap->bus_info), "platform:%s", DEINTERLACE_NAME); return 0; } static int deinterlace_enum_fmt(struct file *file, void *priv, struct v4l2_fmtdesc *f) { if (f->index < ARRAY_SIZE(deinterlace_formats)) { f->pixelformat = deinterlace_formats[f->index]; return 0; } return -EINVAL; } static int deinterlace_enum_framesizes(struct file *file, void *priv, struct v4l2_frmsizeenum *fsize) { if (fsize->index != 0) return -EINVAL; if (!deinterlace_check_format(fsize->pixel_format)) return -EINVAL; fsize->type = V4L2_FRMSIZE_TYPE_STEPWISE; fsize->stepwise.min_width = DEINTERLACE_MIN_WIDTH; fsize->stepwise.min_height = DEINTERLACE_MIN_HEIGHT; fsize->stepwise.max_width = DEINTERLACE_MAX_WIDTH; fsize->stepwise.max_height = DEINTERLACE_MAX_HEIGHT; fsize->stepwise.step_width = 2; fsize->stepwise.step_height = 1; return 0; } static int deinterlace_g_fmt_vid_cap(struct file *file, void *priv, struct v4l2_format *f) { struct deinterlace_ctx *ctx = deinterlace_file2ctx(file); f->fmt.pix = ctx->dst_fmt; return 0; } static int deinterlace_g_fmt_vid_out(struct file *file, void *priv, struct v4l2_format *f) { struct deinterlace_ctx *ctx = deinterlace_file2ctx(file); f->fmt.pix = ctx->src_fmt; return 0; } static int deinterlace_try_fmt_vid_cap(struct file *file, void *priv, struct v4l2_format *f) { if (!deinterlace_check_format(f->fmt.pix.pixelformat)) f->fmt.pix.pixelformat = deinterlace_formats[0]; if (f->fmt.pix.field != V4L2_FIELD_NONE) f->fmt.pix.field = V4L2_FIELD_NONE; deinterlace_prepare_format(&f->fmt.pix); return 0; } static int deinterlace_try_fmt_vid_out(struct file *file, void *priv, struct v4l2_format *f) { if (!deinterlace_check_format(f->fmt.pix.pixelformat)) f->fmt.pix.pixelformat = deinterlace_formats[0]; if (f->fmt.pix.field != V4L2_FIELD_INTERLACED_TB && f->fmt.pix.field != V4L2_FIELD_INTERLACED_BT && f->fmt.pix.field != V4L2_FIELD_INTERLACED) f->fmt.pix.field = V4L2_FIELD_INTERLACED; deinterlace_prepare_format(&f->fmt.pix); return 0; } static int deinterlace_s_fmt_vid_cap(struct file *file, void *priv, struct v4l2_format *f) { struct deinterlace_ctx *ctx = deinterlace_file2ctx(file); struct vb2_queue *vq; int ret; ret = deinterlace_try_fmt_vid_cap(file, priv, f); if (ret) return ret; vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, f->type); if (vb2_is_busy(vq)) return -EBUSY; ctx->dst_fmt = f->fmt.pix; return 0; } static int deinterlace_s_fmt_vid_out(struct file *file, void *priv, struct v4l2_format *f) { struct deinterlace_ctx *ctx = deinterlace_file2ctx(file); struct vb2_queue *vq; int ret; ret = deinterlace_try_fmt_vid_out(file, priv, f); if (ret) return ret; vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, f->type); if (vb2_is_busy(vq)) return -EBUSY; ctx->src_fmt = f->fmt.pix; /* Propagate colorspace information to capture. */ ctx->dst_fmt.colorspace = f->fmt.pix.colorspace; ctx->dst_fmt.xfer_func = f->fmt.pix.xfer_func; ctx->dst_fmt.ycbcr_enc = f->fmt.pix.ycbcr_enc; ctx->dst_fmt.quantization = f->fmt.pix.quantization; return 0; } static const struct v4l2_ioctl_ops deinterlace_ioctl_ops = { .vidioc_querycap = deinterlace_querycap, .vidioc_enum_framesizes = deinterlace_enum_framesizes, .vidioc_enum_fmt_vid_cap = deinterlace_enum_fmt, .vidioc_g_fmt_vid_cap = deinterlace_g_fmt_vid_cap, .vidioc_try_fmt_vid_cap = deinterlace_try_fmt_vid_cap, .vidioc_s_fmt_vid_cap = deinterlace_s_fmt_vid_cap, .vidioc_enum_fmt_vid_out = deinterlace_enum_fmt, .vidioc_g_fmt_vid_out = deinterlace_g_fmt_vid_out, .vidioc_try_fmt_vid_out = deinterlace_try_fmt_vid_out, .vidioc_s_fmt_vid_out = deinterlace_s_fmt_vid_out, .vidioc_reqbufs = v4l2_m2m_ioctl_reqbufs, .vidioc_querybuf = v4l2_m2m_ioctl_querybuf, .vidioc_qbuf = v4l2_m2m_ioctl_qbuf, .vidioc_dqbuf = v4l2_m2m_ioctl_dqbuf, .vidioc_prepare_buf = v4l2_m2m_ioctl_prepare_buf, .vidioc_create_bufs = v4l2_m2m_ioctl_create_bufs, .vidioc_expbuf = v4l2_m2m_ioctl_expbuf, .vidioc_streamon = v4l2_m2m_ioctl_streamon, .vidioc_streamoff = v4l2_m2m_ioctl_streamoff, }; static int deinterlace_queue_setup(struct vb2_queue *vq, unsigned int *nbuffers, unsigned int *nplanes, unsigned int sizes[], struct device *alloc_devs[]) { struct deinterlace_ctx *ctx = vb2_get_drv_priv(vq); struct v4l2_pix_format *pix_fmt; if (V4L2_TYPE_IS_OUTPUT(vq->type)) pix_fmt = &ctx->src_fmt; else pix_fmt = &ctx->dst_fmt; if (*nplanes) { if (sizes[0] < pix_fmt->sizeimage) return -EINVAL; } else { sizes[0] = pix_fmt->sizeimage; *nplanes = 1; } return 0; } static int deinterlace_buf_prepare(struct vb2_buffer *vb) { struct vb2_queue *vq = vb->vb2_queue; struct deinterlace_ctx *ctx = vb2_get_drv_priv(vq); struct v4l2_pix_format *pix_fmt; if (V4L2_TYPE_IS_OUTPUT(vq->type)) pix_fmt = &ctx->src_fmt; else pix_fmt = &ctx->dst_fmt; if (vb2_plane_size(vb, 0) < pix_fmt->sizeimage) return -EINVAL; vb2_set_plane_payload(vb, 0, pix_fmt->sizeimage); return 0; } static void deinterlace_buf_queue(struct vb2_buffer *vb) { struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb); struct deinterlace_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue); v4l2_m2m_buf_queue(ctx->fh.m2m_ctx, vbuf); } static void deinterlace_queue_cleanup(struct vb2_queue *vq, u32 state) { struct deinterlace_ctx *ctx = vb2_get_drv_priv(vq); struct vb2_v4l2_buffer *vbuf; do { if (V4L2_TYPE_IS_OUTPUT(vq->type)) vbuf = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx); else vbuf = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx); if (vbuf) v4l2_m2m_buf_done(vbuf, state); } while (vbuf); if (V4L2_TYPE_IS_OUTPUT(vq->type) && ctx->prev) v4l2_m2m_buf_done(ctx->prev, state); } static int deinterlace_start_streaming(struct vb2_queue *vq, unsigned int count) { struct deinterlace_ctx *ctx = vb2_get_drv_priv(vq); struct device *dev = ctx->dev->dev; int ret; if (V4L2_TYPE_IS_OUTPUT(vq->type)) { ret = pm_runtime_resume_and_get(dev); if (ret < 0) { dev_err(dev, "Failed to enable module\n"); goto err_runtime_get; } ctx->first_field = ctx->src_fmt.field == V4L2_FIELD_INTERLACED_BT; ctx->field = ctx->first_field; ctx->prev = NULL; ctx->aborting = 0; ctx->flag1_buf = dma_alloc_coherent(dev, FLAG_SIZE, &ctx->flag1_buf_dma, GFP_KERNEL); if (!ctx->flag1_buf) { ret = -ENOMEM; goto err_no_mem1; } ctx->flag2_buf = dma_alloc_coherent(dev, FLAG_SIZE, &ctx->flag2_buf_dma, GFP_KERNEL); if (!ctx->flag2_buf) { ret = -ENOMEM; goto err_no_mem2; } } return 0; err_no_mem2: dma_free_coherent(dev, FLAG_SIZE, ctx->flag1_buf, ctx->flag1_buf_dma); err_no_mem1: pm_runtime_put(dev); err_runtime_get: deinterlace_queue_cleanup(vq, VB2_BUF_STATE_QUEUED); return ret; } static void deinterlace_stop_streaming(struct vb2_queue *vq) { struct deinterlace_ctx *ctx = vb2_get_drv_priv(vq); if (V4L2_TYPE_IS_OUTPUT(vq->type)) { struct device *dev = ctx->dev->dev; dma_free_coherent(dev, FLAG_SIZE, ctx->flag1_buf, ctx->flag1_buf_dma); dma_free_coherent(dev, FLAG_SIZE, ctx->flag2_buf, ctx->flag2_buf_dma); pm_runtime_put(dev); } deinterlace_queue_cleanup(vq, VB2_BUF_STATE_ERROR); } static const struct vb2_ops deinterlace_qops = { .queue_setup = deinterlace_queue_setup, .buf_prepare = deinterlace_buf_prepare, .buf_queue = deinterlace_buf_queue, .start_streaming = deinterlace_start_streaming, .stop_streaming = deinterlace_stop_streaming, .wait_prepare = vb2_ops_wait_prepare, .wait_finish = vb2_ops_wait_finish, }; static int deinterlace_queue_init(void *priv, struct vb2_queue *src_vq, struct vb2_queue *dst_vq) { struct deinterlace_ctx *ctx = priv; int ret; src_vq->type = V4L2_BUF_TYPE_VIDEO_OUTPUT; src_vq->io_modes = VB2_MMAP | VB2_DMABUF; src_vq->drv_priv = ctx; src_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer); src_vq->min_buffers_needed = 1; src_vq->ops = &deinterlace_qops; src_vq->mem_ops = &vb2_dma_contig_memops; src_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY; src_vq->lock = &ctx->dev->dev_mutex; src_vq->dev = ctx->dev->dev; ret = vb2_queue_init(src_vq); if (ret) return ret; dst_vq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE; dst_vq->io_modes = VB2_MMAP | VB2_DMABUF; dst_vq->drv_priv = ctx; dst_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer); dst_vq->min_buffers_needed = 2; dst_vq->ops = &deinterlace_qops; dst_vq->mem_ops = &vb2_dma_contig_memops; dst_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY; dst_vq->lock = &ctx->dev->dev_mutex; dst_vq->dev = ctx->dev->dev; ret = vb2_queue_init(dst_vq); if (ret) return ret; return 0; } static int deinterlace_open(struct file *file) { struct deinterlace_dev *dev = video_drvdata(file); struct deinterlace_ctx *ctx = NULL; int ret; if (mutex_lock_interruptible(&dev->dev_mutex)) return -ERESTARTSYS; ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); if (!ctx) { mutex_unlock(&dev->dev_mutex); return -ENOMEM; } /* default output format */ ctx->src_fmt.pixelformat = deinterlace_formats[0]; ctx->src_fmt.field = V4L2_FIELD_INTERLACED; ctx->src_fmt.width = 640; ctx->src_fmt.height = 480; deinterlace_prepare_format(&ctx->src_fmt); /* default capture format */ ctx->dst_fmt.pixelformat = deinterlace_formats[0]; ctx->dst_fmt.field = V4L2_FIELD_NONE; ctx->dst_fmt.width = 640; ctx->dst_fmt.height = 480; deinterlace_prepare_format(&ctx->dst_fmt); v4l2_fh_init(&ctx->fh, video_devdata(file)); file->private_data = &ctx->fh; ctx->dev = dev; ctx->fh.m2m_ctx = v4l2_m2m_ctx_init(dev->m2m_dev, ctx, &deinterlace_queue_init); if (IS_ERR(ctx->fh.m2m_ctx)) { ret = PTR_ERR(ctx->fh.m2m_ctx); goto err_free; } v4l2_fh_add(&ctx->fh); mutex_unlock(&dev->dev_mutex); return 0; err_free: kfree(ctx); mutex_unlock(&dev->dev_mutex); return ret; } static int deinterlace_release(struct file *file) { struct deinterlace_dev *dev = video_drvdata(file); struct deinterlace_ctx *ctx = container_of(file->private_data, struct deinterlace_ctx, fh); mutex_lock(&dev->dev_mutex); v4l2_fh_del(&ctx->fh); v4l2_fh_exit(&ctx->fh); v4l2_m2m_ctx_release(ctx->fh.m2m_ctx); kfree(ctx); mutex_unlock(&dev->dev_mutex); return 0; } static const struct v4l2_file_operations deinterlace_fops = { .owner = THIS_MODULE, .open = deinterlace_open, .release = deinterlace_release, .poll = v4l2_m2m_fop_poll, .unlocked_ioctl = video_ioctl2, .mmap = v4l2_m2m_fop_mmap, }; static const struct video_device deinterlace_video_device = { .name = DEINTERLACE_NAME, .vfl_dir = VFL_DIR_M2M, .fops = &deinterlace_fops, .ioctl_ops = &deinterlace_ioctl_ops, .minor = -1, .release = video_device_release_empty, .device_caps = V4L2_CAP_VIDEO_M2M | V4L2_CAP_STREAMING, }; static const struct v4l2_m2m_ops deinterlace_m2m_ops = { .device_run = deinterlace_device_run, .job_ready = deinterlace_job_ready, .job_abort = deinterlace_job_abort, }; static int deinterlace_probe(struct platform_device *pdev) { struct deinterlace_dev *dev; struct video_device *vfd; int irq, ret; dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL); if (!dev) return -ENOMEM; dev->vfd = deinterlace_video_device; dev->dev = &pdev->dev; irq = platform_get_irq(pdev, 0); if (irq <= 0) return irq; ret = devm_request_irq(dev->dev, irq, deinterlace_irq, 0, dev_name(dev->dev), dev); if (ret) { dev_err(dev->dev, "Failed to request IRQ\n"); return ret; } dev->base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(dev->base)) return PTR_ERR(dev->base); dev->bus_clk = devm_clk_get(dev->dev, "bus"); if (IS_ERR(dev->bus_clk)) { dev_err(dev->dev, "Failed to get bus clock\n"); return PTR_ERR(dev->bus_clk); } dev->mod_clk = devm_clk_get(dev->dev, "mod"); if (IS_ERR(dev->mod_clk)) { dev_err(dev->dev, "Failed to get mod clock\n"); return PTR_ERR(dev->mod_clk); } dev->ram_clk = devm_clk_get(dev->dev, "ram"); if (IS_ERR(dev->ram_clk)) { dev_err(dev->dev, "Failed to get ram clock\n"); return PTR_ERR(dev->ram_clk); } dev->rstc = devm_reset_control_get(dev->dev, NULL); if (IS_ERR(dev->rstc)) { dev_err(dev->dev, "Failed to get reset control\n"); return PTR_ERR(dev->rstc); } mutex_init(&dev->dev_mutex); ret = v4l2_device_register(&pdev->dev, &dev->v4l2_dev); if (ret) { dev_err(dev->dev, "Failed to register V4L2 device\n"); return ret; } vfd = &dev->vfd; vfd->lock = &dev->dev_mutex; vfd->v4l2_dev = &dev->v4l2_dev; snprintf(vfd->name, sizeof(vfd->name), "%s", deinterlace_video_device.name); video_set_drvdata(vfd, dev); ret = video_register_device(vfd, VFL_TYPE_VIDEO, 0); if (ret) { v4l2_err(&dev->v4l2_dev, "Failed to register video device\n"); goto err_v4l2; } v4l2_info(&dev->v4l2_dev, "Device registered as /dev/video%d\n", vfd->num); dev->m2m_dev = v4l2_m2m_init(&deinterlace_m2m_ops); if (IS_ERR(dev->m2m_dev)) { v4l2_err(&dev->v4l2_dev, "Failed to initialize V4L2 M2M device\n"); ret = PTR_ERR(dev->m2m_dev); goto err_video; } platform_set_drvdata(pdev, dev); pm_runtime_enable(dev->dev); return 0; err_video: video_unregister_device(&dev->vfd); err_v4l2: v4l2_device_unregister(&dev->v4l2_dev); return ret; } static void deinterlace_remove(struct platform_device *pdev) { struct deinterlace_dev *dev = platform_get_drvdata(pdev); v4l2_m2m_release(dev->m2m_dev); video_unregister_device(&dev->vfd); v4l2_device_unregister(&dev->v4l2_dev); pm_runtime_force_suspend(&pdev->dev); } static int deinterlace_runtime_resume(struct device *device) { struct deinterlace_dev *dev = dev_get_drvdata(device); int ret; ret = clk_set_rate_exclusive(dev->mod_clk, 300000000); if (ret) { dev_err(dev->dev, "Failed to set exclusive mod clock rate\n"); return ret; } ret = clk_prepare_enable(dev->bus_clk); if (ret) { dev_err(dev->dev, "Failed to enable bus clock\n"); goto err_exclusive_rate; } ret = clk_prepare_enable(dev->mod_clk); if (ret) { dev_err(dev->dev, "Failed to enable mod clock\n"); goto err_bus_clk; } ret = clk_prepare_enable(dev->ram_clk); if (ret) { dev_err(dev->dev, "Failed to enable ram clock\n"); goto err_mod_clk; } ret = reset_control_deassert(dev->rstc); if (ret) { dev_err(dev->dev, "Failed to apply reset\n"); goto err_ram_clk; } deinterlace_init(dev); return 0; err_ram_clk: clk_disable_unprepare(dev->ram_clk); err_mod_clk: clk_disable_unprepare(dev->mod_clk); err_bus_clk: clk_disable_unprepare(dev->bus_clk); err_exclusive_rate: clk_rate_exclusive_put(dev->mod_clk); return ret; } static int deinterlace_runtime_suspend(struct device *device) { struct deinterlace_dev *dev = dev_get_drvdata(device); reset_control_assert(dev->rstc); clk_disable_unprepare(dev->ram_clk); clk_disable_unprepare(dev->mod_clk); clk_disable_unprepare(dev->bus_clk); clk_rate_exclusive_put(dev->mod_clk); return 0; } static const struct of_device_id deinterlace_dt_match[] = { { .compatible = "allwinner,sun8i-h3-deinterlace" }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(of, deinterlace_dt_match); static const struct dev_pm_ops deinterlace_pm_ops = { .runtime_resume = deinterlace_runtime_resume, .runtime_suspend = deinterlace_runtime_suspend, }; static struct platform_driver deinterlace_driver = { .probe = deinterlace_probe, .remove_new = deinterlace_remove, .driver = { .name = DEINTERLACE_NAME, .of_match_table = deinterlace_dt_match, .pm = &deinterlace_pm_ops, }, }; module_platform_driver(deinterlace_driver); MODULE_LICENSE("GPL v2"); MODULE_AUTHOR("Jernej Skrabec <[email protected]>"); MODULE_DESCRIPTION("Allwinner Deinterlace driver");
linux-master
drivers/media/platform/sunxi/sun8i-di/sun8i-di.c
// SPDX-License-Identifier: GPL-2.0+ /* * Copyright 2020-2022 Bootlin * Author: Paul Kocialkowski <[email protected]> */ #include <linux/phy/phy.h> #include <linux/regmap.h> #include "sun8i_a83t_dphy.h" #include "sun8i_a83t_mipi_csi2.h" static int sun8i_a83t_dphy_configure(struct phy *dphy, union phy_configure_opts *opts) { return phy_mipi_dphy_config_validate(&opts->mipi_dphy); } static int sun8i_a83t_dphy_power_on(struct phy *dphy) { struct sun8i_a83t_mipi_csi2_device *csi2_dev = phy_get_drvdata(dphy); struct regmap *regmap = csi2_dev->regmap; regmap_write(regmap, SUN8I_A83T_DPHY_CTRL_REG, SUN8I_A83T_DPHY_CTRL_RESET_N | SUN8I_A83T_DPHY_CTRL_SHUTDOWN_N); regmap_write(regmap, SUN8I_A83T_DPHY_ANA0_REG, SUN8I_A83T_DPHY_ANA0_REXT_EN | SUN8I_A83T_DPHY_ANA0_RINT(2) | SUN8I_A83T_DPHY_ANA0_SNK(2)); return 0; }; static int sun8i_a83t_dphy_power_off(struct phy *dphy) { struct sun8i_a83t_mipi_csi2_device *csi2_dev = phy_get_drvdata(dphy); struct regmap *regmap = csi2_dev->regmap; regmap_write(regmap, SUN8I_A83T_DPHY_CTRL_REG, 0); return 0; }; static const struct phy_ops sun8i_a83t_dphy_ops = { .configure = sun8i_a83t_dphy_configure, .power_on = sun8i_a83t_dphy_power_on, .power_off = sun8i_a83t_dphy_power_off, }; int sun8i_a83t_dphy_register(struct sun8i_a83t_mipi_csi2_device *csi2_dev) { struct device *dev = csi2_dev->dev; struct phy_provider *phy_provider; csi2_dev->dphy = devm_phy_create(dev, NULL, &sun8i_a83t_dphy_ops); if (IS_ERR(csi2_dev->dphy)) { dev_err(dev, "failed to create D-PHY\n"); return PTR_ERR(csi2_dev->dphy); } phy_set_drvdata(csi2_dev->dphy, csi2_dev); phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate); if (IS_ERR(phy_provider)) { dev_err(dev, "failed to register D-PHY provider\n"); return PTR_ERR(phy_provider); } return 0; }
linux-master
drivers/media/platform/sunxi/sun8i-a83t-mipi-csi2/sun8i_a83t_dphy.c
// SPDX-License-Identifier: GPL-2.0+ /* * Copyright 2020 Kévin L'hôpital <[email protected]> * Copyright 2020-2022 Bootlin * Author: Paul Kocialkowski <[email protected]> */ #include <linux/clk.h> #include <linux/module.h> #include <linux/of.h> #include <linux/phy/phy.h> #include <linux/platform_device.h> #include <linux/pm_runtime.h> #include <linux/regmap.h> #include <linux/reset.h> #include <media/mipi-csi2.h> #include <media/v4l2-ctrls.h> #include <media/v4l2-device.h> #include <media/v4l2-fwnode.h> #include "sun8i_a83t_dphy.h" #include "sun8i_a83t_mipi_csi2.h" #include "sun8i_a83t_mipi_csi2_reg.h" /* Format */ static const struct sun8i_a83t_mipi_csi2_format sun8i_a83t_mipi_csi2_formats[] = { { .mbus_code = MEDIA_BUS_FMT_SBGGR8_1X8, .data_type = MIPI_CSI2_DT_RAW8, .bpp = 8, }, { .mbus_code = MEDIA_BUS_FMT_SGBRG8_1X8, .data_type = MIPI_CSI2_DT_RAW8, .bpp = 8, }, { .mbus_code = MEDIA_BUS_FMT_SGRBG8_1X8, .data_type = MIPI_CSI2_DT_RAW8, .bpp = 8, }, { .mbus_code = MEDIA_BUS_FMT_SRGGB8_1X8, .data_type = MIPI_CSI2_DT_RAW8, .bpp = 8, }, { .mbus_code = MEDIA_BUS_FMT_SBGGR10_1X10, .data_type = MIPI_CSI2_DT_RAW10, .bpp = 10, }, { .mbus_code = MEDIA_BUS_FMT_SGBRG10_1X10, .data_type = MIPI_CSI2_DT_RAW10, .bpp = 10, }, { .mbus_code = MEDIA_BUS_FMT_SGRBG10_1X10, .data_type = MIPI_CSI2_DT_RAW10, .bpp = 10, }, { .mbus_code = MEDIA_BUS_FMT_SRGGB10_1X10, .data_type = MIPI_CSI2_DT_RAW10, .bpp = 10, }, }; static const struct sun8i_a83t_mipi_csi2_format * sun8i_a83t_mipi_csi2_format_find(u32 mbus_code) { unsigned int i; for (i = 0; i < ARRAY_SIZE(sun8i_a83t_mipi_csi2_formats); i++) if (sun8i_a83t_mipi_csi2_formats[i].mbus_code == mbus_code) return &sun8i_a83t_mipi_csi2_formats[i]; return NULL; } /* Controller */ static void sun8i_a83t_mipi_csi2_init(struct sun8i_a83t_mipi_csi2_device *csi2_dev) { struct regmap *regmap = csi2_dev->regmap; /* * The Allwinner BSP sets various magic values on a bunch of registers. * This is apparently a necessary initialization process that will cause * the capture to fail with unsolicited interrupts hitting if skipped. * * Most of the registers are set to proper values later, except for the * two reserved registers. They are said to hold a "hardware lock" * value, without more information available. */ regmap_write(regmap, SUN8I_A83T_MIPI_CSI2_CTRL_REG, 0); regmap_write(regmap, SUN8I_A83T_MIPI_CSI2_CTRL_REG, SUN8I_A83T_MIPI_CSI2_CTRL_INIT_VALUE); regmap_write(regmap, SUN8I_A83T_MIPI_CSI2_RX_PKT_NUM_REG, 0); regmap_write(regmap, SUN8I_A83T_MIPI_CSI2_RX_PKT_NUM_REG, SUN8I_A83T_MIPI_CSI2_RX_PKT_NUM_INIT_VALUE); regmap_write(regmap, SUN8I_A83T_DPHY_CTRL_REG, 0); regmap_write(regmap, SUN8I_A83T_DPHY_CTRL_REG, SUN8I_A83T_DPHY_CTRL_INIT_VALUE); regmap_write(regmap, SUN8I_A83T_MIPI_CSI2_RSVD1_REG, 0); regmap_write(regmap, SUN8I_A83T_MIPI_CSI2_RSVD1_REG, SUN8I_A83T_MIPI_CSI2_RSVD1_HW_LOCK_VALUE); regmap_write(regmap, SUN8I_A83T_MIPI_CSI2_RSVD2_REG, 0); regmap_write(regmap, SUN8I_A83T_MIPI_CSI2_RSVD2_REG, SUN8I_A83T_MIPI_CSI2_RSVD2_HW_LOCK_VALUE); regmap_write(regmap, SUN8I_A83T_MIPI_CSI2_CFG_REG, 0); regmap_write(regmap, SUN8I_A83T_MIPI_CSI2_CFG_REG, SUN8I_A83T_MIPI_CSI2_CFG_INIT_VALUE); } static void sun8i_a83t_mipi_csi2_enable(struct sun8i_a83t_mipi_csi2_device *csi2_dev) { struct regmap *regmap = csi2_dev->regmap; regmap_update_bits(regmap, SUN8I_A83T_MIPI_CSI2_CFG_REG, SUN8I_A83T_MIPI_CSI2_CFG_SYNC_EN, SUN8I_A83T_MIPI_CSI2_CFG_SYNC_EN); } static void sun8i_a83t_mipi_csi2_disable(struct sun8i_a83t_mipi_csi2_device *csi2_dev) { struct regmap *regmap = csi2_dev->regmap; regmap_update_bits(regmap, SUN8I_A83T_MIPI_CSI2_CFG_REG, SUN8I_A83T_MIPI_CSI2_CFG_SYNC_EN, 0); regmap_write(regmap, SUN8I_A83T_MIPI_CSI2_CTRL_REG, 0); } static void sun8i_a83t_mipi_csi2_configure(struct sun8i_a83t_mipi_csi2_device *csi2_dev) { struct regmap *regmap = csi2_dev->regmap; unsigned int lanes_count = csi2_dev->bridge.endpoint.bus.mipi_csi2.num_data_lanes; struct v4l2_mbus_framefmt *mbus_format = &csi2_dev->bridge.mbus_format; const struct sun8i_a83t_mipi_csi2_format *format; struct device *dev = csi2_dev->dev; u32 version = 0; format = sun8i_a83t_mipi_csi2_format_find(mbus_format->code); if (WARN_ON(!format)) return; regmap_write(regmap, SUN8I_A83T_MIPI_CSI2_CTRL_REG, SUN8I_A83T_MIPI_CSI2_CTRL_RESET_N); regmap_read(regmap, SUN8I_A83T_MIPI_CSI2_VERSION_REG, &version); dev_dbg(dev, "A83T MIPI CSI-2 version: %04x\n", version); regmap_write(regmap, SUN8I_A83T_MIPI_CSI2_CFG_REG, SUN8I_A83T_MIPI_CSI2_CFG_UNPKT_EN | SUN8I_A83T_MIPI_CSI2_CFG_SYNC_DLY_CYCLE(8) | SUN8I_A83T_MIPI_CSI2_CFG_N_CHANNEL(1) | SUN8I_A83T_MIPI_CSI2_CFG_N_LANE(lanes_count)); /* * Only a single virtual channel (index 0) is currently supported. * While the registers do mention multiple physical channels being * available (which can be configured to match a specific virtual * channel or data type), it's unclear whether channels > 0 are actually * connected and available and the reference source code only makes use * of channel 0. * * Using extra channels would also require matching channels to be * available on the CSI (and ISP) side, which is also unsure although * some CSI implementations are said to support multiple channels for * BT656 time-sharing. * * We still configure virtual channel numbers to ensure that virtual * channel 0 only goes to channel 0. */ regmap_write(regmap, SUN8I_A83T_MIPI_CSI2_VCDT0_REG, SUN8I_A83T_MIPI_CSI2_VCDT0_CH_VC(3, 3) | SUN8I_A83T_MIPI_CSI2_VCDT0_CH_VC(2, 2) | SUN8I_A83T_MIPI_CSI2_VCDT0_CH_VC(1, 1) | SUN8I_A83T_MIPI_CSI2_VCDT0_CH_VC(0, 0) | SUN8I_A83T_MIPI_CSI2_VCDT0_CH_DT(0, format->data_type)); } /* V4L2 Subdev */ static int sun8i_a83t_mipi_csi2_s_stream(struct v4l2_subdev *subdev, int on) { struct sun8i_a83t_mipi_csi2_device *csi2_dev = v4l2_get_subdevdata(subdev); struct v4l2_subdev *source_subdev = csi2_dev->bridge.source_subdev; union phy_configure_opts dphy_opts = { 0 }; struct phy_configure_opts_mipi_dphy *dphy_cfg = &dphy_opts.mipi_dphy; struct v4l2_mbus_framefmt *mbus_format = &csi2_dev->bridge.mbus_format; const struct sun8i_a83t_mipi_csi2_format *format; struct phy *dphy = csi2_dev->dphy; struct device *dev = csi2_dev->dev; struct v4l2_ctrl *ctrl; unsigned int lanes_count = csi2_dev->bridge.endpoint.bus.mipi_csi2.num_data_lanes; unsigned long pixel_rate; int ret; if (!source_subdev) return -ENODEV; if (!on) { v4l2_subdev_call(source_subdev, video, s_stream, 0); ret = 0; goto disable; } /* Runtime PM */ ret = pm_runtime_resume_and_get(dev); if (ret < 0) return ret; /* Sensor pixel rate */ ctrl = v4l2_ctrl_find(source_subdev->ctrl_handler, V4L2_CID_PIXEL_RATE); if (!ctrl) { dev_err(dev, "missing sensor pixel rate\n"); ret = -ENODEV; goto error_pm; } pixel_rate = (unsigned long)v4l2_ctrl_g_ctrl_int64(ctrl); if (!pixel_rate) { dev_err(dev, "missing (zero) sensor pixel rate\n"); ret = -ENODEV; goto error_pm; } /* D-PHY */ if (!lanes_count) { dev_err(dev, "missing (zero) MIPI CSI-2 lanes count\n"); ret = -ENODEV; goto error_pm; } format = sun8i_a83t_mipi_csi2_format_find(mbus_format->code); if (WARN_ON(!format)) { ret = -ENODEV; goto error_pm; } phy_mipi_dphy_get_default_config(pixel_rate, format->bpp, lanes_count, dphy_cfg); /* * Note that our hardware is using DDR, which is not taken in account by * phy_mipi_dphy_get_default_config when calculating hs_clk_rate from * the pixel rate, lanes count and bpp. * * The resulting clock rate is basically the symbol rate over the whole * link. The actual clock rate is calculated with division by two since * DDR samples both on rising and falling edges. */ dev_dbg(dev, "A83T MIPI CSI-2 config:\n"); dev_dbg(dev, "%ld pixels/s, %u bits/pixel, %u lanes, %lu Hz clock\n", pixel_rate, format->bpp, lanes_count, dphy_cfg->hs_clk_rate / 2); ret = phy_reset(dphy); if (ret) { dev_err(dev, "failed to reset MIPI D-PHY\n"); goto error_pm; } ret = phy_configure(dphy, &dphy_opts); if (ret) { dev_err(dev, "failed to configure MIPI D-PHY\n"); goto error_pm; } /* Controller */ sun8i_a83t_mipi_csi2_configure(csi2_dev); sun8i_a83t_mipi_csi2_enable(csi2_dev); /* D-PHY */ ret = phy_power_on(dphy); if (ret) { dev_err(dev, "failed to power on MIPI D-PHY\n"); goto error_pm; } /* Source */ ret = v4l2_subdev_call(source_subdev, video, s_stream, 1); if (ret && ret != -ENOIOCTLCMD) goto disable; return 0; disable: phy_power_off(dphy); sun8i_a83t_mipi_csi2_disable(csi2_dev); error_pm: pm_runtime_put(dev); return ret; } static const struct v4l2_subdev_video_ops sun8i_a83t_mipi_csi2_video_ops = { .s_stream = sun8i_a83t_mipi_csi2_s_stream, }; static void sun8i_a83t_mipi_csi2_mbus_format_prepare(struct v4l2_mbus_framefmt *mbus_format) { if (!sun8i_a83t_mipi_csi2_format_find(mbus_format->code)) mbus_format->code = sun8i_a83t_mipi_csi2_formats[0].mbus_code; mbus_format->field = V4L2_FIELD_NONE; mbus_format->colorspace = V4L2_COLORSPACE_RAW; mbus_format->quantization = V4L2_QUANTIZATION_DEFAULT; mbus_format->xfer_func = V4L2_XFER_FUNC_DEFAULT; } static int sun8i_a83t_mipi_csi2_init_cfg(struct v4l2_subdev *subdev, struct v4l2_subdev_state *state) { struct sun8i_a83t_mipi_csi2_device *csi2_dev = v4l2_get_subdevdata(subdev); unsigned int pad = SUN8I_A83T_MIPI_CSI2_PAD_SINK; struct v4l2_mbus_framefmt *mbus_format = v4l2_subdev_get_try_format(subdev, state, pad); struct mutex *lock = &csi2_dev->bridge.lock; mutex_lock(lock); mbus_format->code = sun8i_a83t_mipi_csi2_formats[0].mbus_code; mbus_format->width = 640; mbus_format->height = 480; sun8i_a83t_mipi_csi2_mbus_format_prepare(mbus_format); mutex_unlock(lock); return 0; } static int sun8i_a83t_mipi_csi2_enum_mbus_code(struct v4l2_subdev *subdev, struct v4l2_subdev_state *state, struct v4l2_subdev_mbus_code_enum *code_enum) { if (code_enum->index >= ARRAY_SIZE(sun8i_a83t_mipi_csi2_formats)) return -EINVAL; code_enum->code = sun8i_a83t_mipi_csi2_formats[code_enum->index].mbus_code; return 0; } static int sun8i_a83t_mipi_csi2_get_fmt(struct v4l2_subdev *subdev, struct v4l2_subdev_state *state, struct v4l2_subdev_format *format) { struct sun8i_a83t_mipi_csi2_device *csi2_dev = v4l2_get_subdevdata(subdev); struct v4l2_mbus_framefmt *mbus_format = &format->format; struct mutex *lock = &csi2_dev->bridge.lock; mutex_lock(lock); if (format->which == V4L2_SUBDEV_FORMAT_TRY) *mbus_format = *v4l2_subdev_get_try_format(subdev, state, format->pad); else *mbus_format = csi2_dev->bridge.mbus_format; mutex_unlock(lock); return 0; } static int sun8i_a83t_mipi_csi2_set_fmt(struct v4l2_subdev *subdev, struct v4l2_subdev_state *state, struct v4l2_subdev_format *format) { struct sun8i_a83t_mipi_csi2_device *csi2_dev = v4l2_get_subdevdata(subdev); struct v4l2_mbus_framefmt *mbus_format = &format->format; struct mutex *lock = &csi2_dev->bridge.lock; mutex_lock(lock); sun8i_a83t_mipi_csi2_mbus_format_prepare(mbus_format); if (format->which == V4L2_SUBDEV_FORMAT_TRY) *v4l2_subdev_get_try_format(subdev, state, format->pad) = *mbus_format; else csi2_dev->bridge.mbus_format = *mbus_format; mutex_unlock(lock); return 0; } static const struct v4l2_subdev_pad_ops sun8i_a83t_mipi_csi2_pad_ops = { .init_cfg = sun8i_a83t_mipi_csi2_init_cfg, .enum_mbus_code = sun8i_a83t_mipi_csi2_enum_mbus_code, .get_fmt = sun8i_a83t_mipi_csi2_get_fmt, .set_fmt = sun8i_a83t_mipi_csi2_set_fmt, }; static const struct v4l2_subdev_ops sun8i_a83t_mipi_csi2_subdev_ops = { .video = &sun8i_a83t_mipi_csi2_video_ops, .pad = &sun8i_a83t_mipi_csi2_pad_ops, }; /* Media Entity */ static const struct media_entity_operations sun8i_a83t_mipi_csi2_entity_ops = { .link_validate = v4l2_subdev_link_validate, }; /* V4L2 Async */ static int sun8i_a83t_mipi_csi2_notifier_bound(struct v4l2_async_notifier *notifier, struct v4l2_subdev *remote_subdev, struct v4l2_async_connection *async_subdev) { struct v4l2_subdev *subdev = notifier->sd; struct sun8i_a83t_mipi_csi2_device *csi2_dev = container_of(notifier, struct sun8i_a83t_mipi_csi2_device, bridge.notifier); struct media_entity *sink_entity = &subdev->entity; struct media_entity *source_entity = &remote_subdev->entity; struct device *dev = csi2_dev->dev; int sink_pad_index = 0; int source_pad_index; int ret; ret = media_entity_get_fwnode_pad(source_entity, remote_subdev->fwnode, MEDIA_PAD_FL_SOURCE); if (ret < 0) { dev_err(dev, "missing source pad in external entity %s\n", source_entity->name); return -EINVAL; } source_pad_index = ret; dev_dbg(dev, "creating %s:%u -> %s:%u link\n", source_entity->name, source_pad_index, sink_entity->name, sink_pad_index); ret = media_create_pad_link(source_entity, source_pad_index, sink_entity, sink_pad_index, MEDIA_LNK_FL_ENABLED | MEDIA_LNK_FL_IMMUTABLE); if (ret) { dev_err(dev, "failed to create %s:%u -> %s:%u link\n", source_entity->name, source_pad_index, sink_entity->name, sink_pad_index); return ret; } csi2_dev->bridge.source_subdev = remote_subdev; return 0; } static const struct v4l2_async_notifier_operations sun8i_a83t_mipi_csi2_notifier_ops = { .bound = sun8i_a83t_mipi_csi2_notifier_bound, }; /* Bridge */ static int sun8i_a83t_mipi_csi2_bridge_source_setup(struct sun8i_a83t_mipi_csi2_device *csi2_dev) { struct v4l2_async_notifier *notifier = &csi2_dev->bridge.notifier; struct v4l2_fwnode_endpoint *endpoint = &csi2_dev->bridge.endpoint; struct v4l2_async_connection *subdev_async; struct fwnode_handle *handle; struct device *dev = csi2_dev->dev; int ret; handle = fwnode_graph_get_endpoint_by_id(dev_fwnode(dev), 0, 0, FWNODE_GRAPH_ENDPOINT_NEXT); if (!handle) return -ENODEV; endpoint->bus_type = V4L2_MBUS_CSI2_DPHY; ret = v4l2_fwnode_endpoint_parse(handle, endpoint); if (ret) goto complete; subdev_async = v4l2_async_nf_add_fwnode_remote(notifier, handle, struct v4l2_async_connection); if (IS_ERR(subdev_async)) ret = PTR_ERR(subdev_async); complete: fwnode_handle_put(handle); return ret; } static int sun8i_a83t_mipi_csi2_bridge_setup(struct sun8i_a83t_mipi_csi2_device *csi2_dev) { struct sun8i_a83t_mipi_csi2_bridge *bridge = &csi2_dev->bridge; struct v4l2_subdev *subdev = &bridge->subdev; struct v4l2_async_notifier *notifier = &bridge->notifier; struct media_pad *pads = bridge->pads; struct device *dev = csi2_dev->dev; bool notifier_registered = false; int ret; mutex_init(&bridge->lock); /* V4L2 Subdev */ v4l2_subdev_init(subdev, &sun8i_a83t_mipi_csi2_subdev_ops); strscpy(subdev->name, SUN8I_A83T_MIPI_CSI2_NAME, sizeof(subdev->name)); subdev->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE; subdev->owner = THIS_MODULE; subdev->dev = dev; v4l2_set_subdevdata(subdev, csi2_dev); /* Media Entity */ subdev->entity.function = MEDIA_ENT_F_VID_IF_BRIDGE; subdev->entity.ops = &sun8i_a83t_mipi_csi2_entity_ops; /* Media Pads */ pads[SUN8I_A83T_MIPI_CSI2_PAD_SINK].flags = MEDIA_PAD_FL_SINK | MEDIA_PAD_FL_MUST_CONNECT; pads[SUN8I_A83T_MIPI_CSI2_PAD_SOURCE].flags = MEDIA_PAD_FL_SOURCE | MEDIA_PAD_FL_MUST_CONNECT; ret = media_entity_pads_init(&subdev->entity, SUN8I_A83T_MIPI_CSI2_PAD_COUNT, pads); if (ret) return ret; /* V4L2 Async */ v4l2_async_subdev_nf_init(notifier, subdev); notifier->ops = &sun8i_a83t_mipi_csi2_notifier_ops; ret = sun8i_a83t_mipi_csi2_bridge_source_setup(csi2_dev); if (ret && ret != -ENODEV) goto error_v4l2_notifier_cleanup; /* Only register the notifier when a sensor is connected. */ if (ret != -ENODEV) { ret = v4l2_async_nf_register(notifier); if (ret < 0) goto error_v4l2_notifier_cleanup; notifier_registered = true; } /* V4L2 Subdev */ ret = v4l2_async_register_subdev(subdev); if (ret < 0) goto error_v4l2_notifier_unregister; return 0; error_v4l2_notifier_unregister: if (notifier_registered) v4l2_async_nf_unregister(notifier); error_v4l2_notifier_cleanup: v4l2_async_nf_cleanup(notifier); media_entity_cleanup(&subdev->entity); return ret; } static void sun8i_a83t_mipi_csi2_bridge_cleanup(struct sun8i_a83t_mipi_csi2_device *csi2_dev) { struct v4l2_subdev *subdev = &csi2_dev->bridge.subdev; struct v4l2_async_notifier *notifier = &csi2_dev->bridge.notifier; v4l2_async_unregister_subdev(subdev); v4l2_async_nf_unregister(notifier); v4l2_async_nf_cleanup(notifier); media_entity_cleanup(&subdev->entity); } /* Platform */ static int sun8i_a83t_mipi_csi2_suspend(struct device *dev) { struct sun8i_a83t_mipi_csi2_device *csi2_dev = dev_get_drvdata(dev); clk_disable_unprepare(csi2_dev->clock_misc); clk_disable_unprepare(csi2_dev->clock_mipi); clk_disable_unprepare(csi2_dev->clock_mod); reset_control_assert(csi2_dev->reset); return 0; } static int sun8i_a83t_mipi_csi2_resume(struct device *dev) { struct sun8i_a83t_mipi_csi2_device *csi2_dev = dev_get_drvdata(dev); int ret; ret = reset_control_deassert(csi2_dev->reset); if (ret) { dev_err(dev, "failed to deassert reset\n"); return ret; } ret = clk_prepare_enable(csi2_dev->clock_mod); if (ret) { dev_err(dev, "failed to enable module clock\n"); goto error_reset; } ret = clk_prepare_enable(csi2_dev->clock_mipi); if (ret) { dev_err(dev, "failed to enable MIPI clock\n"); goto error_clock_mod; } ret = clk_prepare_enable(csi2_dev->clock_misc); if (ret) { dev_err(dev, "failed to enable CSI misc clock\n"); goto error_clock_mipi; } sun8i_a83t_mipi_csi2_init(csi2_dev); return 0; error_clock_mipi: clk_disable_unprepare(csi2_dev->clock_mipi); error_clock_mod: clk_disable_unprepare(csi2_dev->clock_mod); error_reset: reset_control_assert(csi2_dev->reset); return ret; } static const struct dev_pm_ops sun8i_a83t_mipi_csi2_pm_ops = { .runtime_suspend = sun8i_a83t_mipi_csi2_suspend, .runtime_resume = sun8i_a83t_mipi_csi2_resume, }; static const struct regmap_config sun8i_a83t_mipi_csi2_regmap_config = { .reg_bits = 32, .reg_stride = 4, .val_bits = 32, .max_register = 0x120, }; static int sun8i_a83t_mipi_csi2_resources_setup(struct sun8i_a83t_mipi_csi2_device *csi2_dev, struct platform_device *platform_dev) { struct device *dev = csi2_dev->dev; void __iomem *io_base; int ret; /* Registers */ io_base = devm_platform_ioremap_resource(platform_dev, 0); if (IS_ERR(io_base)) return PTR_ERR(io_base); csi2_dev->regmap = devm_regmap_init_mmio_clk(dev, "bus", io_base, &sun8i_a83t_mipi_csi2_regmap_config); if (IS_ERR(csi2_dev->regmap)) { dev_err(dev, "failed to init register map\n"); return PTR_ERR(csi2_dev->regmap); } /* Clocks */ csi2_dev->clock_mod = devm_clk_get(dev, "mod"); if (IS_ERR(csi2_dev->clock_mod)) { dev_err(dev, "failed to acquire mod clock\n"); return PTR_ERR(csi2_dev->clock_mod); } ret = clk_set_rate_exclusive(csi2_dev->clock_mod, 297000000); if (ret) { dev_err(dev, "failed to set mod clock rate\n"); return ret; } csi2_dev->clock_mipi = devm_clk_get(dev, "mipi"); if (IS_ERR(csi2_dev->clock_mipi)) { dev_err(dev, "failed to acquire mipi clock\n"); ret = PTR_ERR(csi2_dev->clock_mipi); goto error_clock_rate_exclusive; } csi2_dev->clock_misc = devm_clk_get(dev, "misc"); if (IS_ERR(csi2_dev->clock_misc)) { dev_err(dev, "failed to acquire misc clock\n"); ret = PTR_ERR(csi2_dev->clock_misc); goto error_clock_rate_exclusive; } /* Reset */ csi2_dev->reset = devm_reset_control_get_shared(dev, NULL); if (IS_ERR(csi2_dev->reset)) { dev_err(dev, "failed to get reset controller\n"); ret = PTR_ERR(csi2_dev->reset); goto error_clock_rate_exclusive; } /* D-PHY */ ret = sun8i_a83t_dphy_register(csi2_dev); if (ret) { dev_err(dev, "failed to initialize MIPI D-PHY\n"); goto error_clock_rate_exclusive; } /* Runtime PM */ pm_runtime_enable(dev); return 0; error_clock_rate_exclusive: clk_rate_exclusive_put(csi2_dev->clock_mod); return ret; } static void sun8i_a83t_mipi_csi2_resources_cleanup(struct sun8i_a83t_mipi_csi2_device *csi2_dev) { pm_runtime_disable(csi2_dev->dev); phy_exit(csi2_dev->dphy); clk_rate_exclusive_put(csi2_dev->clock_mod); } static int sun8i_a83t_mipi_csi2_probe(struct platform_device *platform_dev) { struct sun8i_a83t_mipi_csi2_device *csi2_dev; struct device *dev = &platform_dev->dev; int ret; csi2_dev = devm_kzalloc(dev, sizeof(*csi2_dev), GFP_KERNEL); if (!csi2_dev) return -ENOMEM; csi2_dev->dev = dev; platform_set_drvdata(platform_dev, csi2_dev); ret = sun8i_a83t_mipi_csi2_resources_setup(csi2_dev, platform_dev); if (ret) return ret; ret = sun8i_a83t_mipi_csi2_bridge_setup(csi2_dev); if (ret) goto error_resources; return 0; error_resources: sun8i_a83t_mipi_csi2_resources_cleanup(csi2_dev); return ret; } static void sun8i_a83t_mipi_csi2_remove(struct platform_device *platform_dev) { struct sun8i_a83t_mipi_csi2_device *csi2_dev = platform_get_drvdata(platform_dev); sun8i_a83t_mipi_csi2_bridge_cleanup(csi2_dev); sun8i_a83t_mipi_csi2_resources_cleanup(csi2_dev); } static const struct of_device_id sun8i_a83t_mipi_csi2_of_match[] = { { .compatible = "allwinner,sun8i-a83t-mipi-csi2" }, {}, }; MODULE_DEVICE_TABLE(of, sun8i_a83t_mipi_csi2_of_match); static struct platform_driver sun8i_a83t_mipi_csi2_platform_driver = { .probe = sun8i_a83t_mipi_csi2_probe, .remove_new = sun8i_a83t_mipi_csi2_remove, .driver = { .name = SUN8I_A83T_MIPI_CSI2_NAME, .of_match_table = sun8i_a83t_mipi_csi2_of_match, .pm = &sun8i_a83t_mipi_csi2_pm_ops, }, }; module_platform_driver(sun8i_a83t_mipi_csi2_platform_driver); MODULE_DESCRIPTION("Allwinner A83T MIPI CSI-2 and D-PHY Controller Driver"); MODULE_AUTHOR("Paul Kocialkowski <[email protected]>"); MODULE_LICENSE("GPL");
linux-master
drivers/media/platform/sunxi/sun8i-a83t-mipi-csi2/sun8i_a83t_mipi_csi2.c
// SPDX-License-Identifier: GPL-2.0+ /* * Copyright 2021-2022 Bootlin * Author: Paul Kocialkowski <[email protected]> */ #include <linux/pm_runtime.h> #include <linux/regmap.h> #include <media/v4l2-device.h> #include <media/v4l2-fwnode.h> #include "sun6i_csi.h" #include "sun6i_csi_bridge.h" #include "sun6i_csi_reg.h" /* Helpers */ void sun6i_csi_bridge_dimensions(struct sun6i_csi_device *csi_dev, unsigned int *width, unsigned int *height) { if (width) *width = csi_dev->bridge.mbus_format.width; if (height) *height = csi_dev->bridge.mbus_format.height; } void sun6i_csi_bridge_format(struct sun6i_csi_device *csi_dev, u32 *mbus_code, u32 *field) { if (mbus_code) *mbus_code = csi_dev->bridge.mbus_format.code; if (field) *field = csi_dev->bridge.mbus_format.field; } /* Format */ static const struct sun6i_csi_bridge_format sun6i_csi_bridge_formats[] = { /* Bayer */ { .mbus_code = MEDIA_BUS_FMT_SBGGR8_1X8, .input_format = SUN6I_CSI_INPUT_FMT_RAW, }, { .mbus_code = MEDIA_BUS_FMT_SGBRG8_1X8, .input_format = SUN6I_CSI_INPUT_FMT_RAW, }, { .mbus_code = MEDIA_BUS_FMT_SGRBG8_1X8, .input_format = SUN6I_CSI_INPUT_FMT_RAW, }, { .mbus_code = MEDIA_BUS_FMT_SRGGB8_1X8, .input_format = SUN6I_CSI_INPUT_FMT_RAW, }, { .mbus_code = MEDIA_BUS_FMT_SBGGR10_1X10, .input_format = SUN6I_CSI_INPUT_FMT_RAW, }, { .mbus_code = MEDIA_BUS_FMT_SGBRG10_1X10, .input_format = SUN6I_CSI_INPUT_FMT_RAW, }, { .mbus_code = MEDIA_BUS_FMT_SGRBG10_1X10, .input_format = SUN6I_CSI_INPUT_FMT_RAW, }, { .mbus_code = MEDIA_BUS_FMT_SRGGB10_1X10, .input_format = SUN6I_CSI_INPUT_FMT_RAW, }, { .mbus_code = MEDIA_BUS_FMT_SBGGR12_1X12, .input_format = SUN6I_CSI_INPUT_FMT_RAW, }, { .mbus_code = MEDIA_BUS_FMT_SGBRG12_1X12, .input_format = SUN6I_CSI_INPUT_FMT_RAW, }, { .mbus_code = MEDIA_BUS_FMT_SGRBG12_1X12, .input_format = SUN6I_CSI_INPUT_FMT_RAW, }, { .mbus_code = MEDIA_BUS_FMT_SRGGB12_1X12, .input_format = SUN6I_CSI_INPUT_FMT_RAW, }, /* RGB */ { .mbus_code = MEDIA_BUS_FMT_RGB565_2X8_LE, .input_format = SUN6I_CSI_INPUT_FMT_RAW, }, { .mbus_code = MEDIA_BUS_FMT_RGB565_2X8_BE, .input_format = SUN6I_CSI_INPUT_FMT_RAW, }, /* YUV422 */ { .mbus_code = MEDIA_BUS_FMT_YUYV8_2X8, .input_format = SUN6I_CSI_INPUT_FMT_YUV422, .input_yuv_seq = SUN6I_CSI_INPUT_YUV_SEQ_YUYV, .input_yuv_seq_invert = SUN6I_CSI_INPUT_YUV_SEQ_YVYU, }, { .mbus_code = MEDIA_BUS_FMT_UYVY8_2X8, .input_format = SUN6I_CSI_INPUT_FMT_YUV422, .input_yuv_seq = SUN6I_CSI_INPUT_YUV_SEQ_UYVY, .input_yuv_seq_invert = SUN6I_CSI_INPUT_YUV_SEQ_VYUY, }, { .mbus_code = MEDIA_BUS_FMT_YVYU8_2X8, .input_format = SUN6I_CSI_INPUT_FMT_YUV422, .input_yuv_seq = SUN6I_CSI_INPUT_YUV_SEQ_YVYU, .input_yuv_seq_invert = SUN6I_CSI_INPUT_YUV_SEQ_YUYV, }, { .mbus_code = MEDIA_BUS_FMT_UYVY8_2X8, .input_format = SUN6I_CSI_INPUT_FMT_YUV422, .input_yuv_seq = SUN6I_CSI_INPUT_YUV_SEQ_UYVY, .input_yuv_seq_invert = SUN6I_CSI_INPUT_YUV_SEQ_VYUY, }, { .mbus_code = MEDIA_BUS_FMT_VYUY8_2X8, .input_format = SUN6I_CSI_INPUT_FMT_YUV422, .input_yuv_seq = SUN6I_CSI_INPUT_YUV_SEQ_VYUY, .input_yuv_seq_invert = SUN6I_CSI_INPUT_YUV_SEQ_UYVY, }, { .mbus_code = MEDIA_BUS_FMT_YUYV8_1X16, .input_format = SUN6I_CSI_INPUT_FMT_YUV422, .input_yuv_seq = SUN6I_CSI_INPUT_YUV_SEQ_YUYV, .input_yuv_seq_invert = SUN6I_CSI_INPUT_YUV_SEQ_YVYU, }, { .mbus_code = MEDIA_BUS_FMT_UYVY8_1X16, .input_format = SUN6I_CSI_INPUT_FMT_YUV422, .input_yuv_seq = SUN6I_CSI_INPUT_YUV_SEQ_UYVY, .input_yuv_seq_invert = SUN6I_CSI_INPUT_YUV_SEQ_VYUY, }, { .mbus_code = MEDIA_BUS_FMT_YVYU8_1X16, .input_format = SUN6I_CSI_INPUT_FMT_YUV422, .input_yuv_seq = SUN6I_CSI_INPUT_YUV_SEQ_YVYU, .input_yuv_seq_invert = SUN6I_CSI_INPUT_YUV_SEQ_YUYV, }, { .mbus_code = MEDIA_BUS_FMT_UYVY8_1X16, .input_format = SUN6I_CSI_INPUT_FMT_YUV422, .input_yuv_seq = SUN6I_CSI_INPUT_YUV_SEQ_UYVY, .input_yuv_seq_invert = SUN6I_CSI_INPUT_YUV_SEQ_VYUY, }, { .mbus_code = MEDIA_BUS_FMT_VYUY8_1X16, .input_format = SUN6I_CSI_INPUT_FMT_YUV422, .input_yuv_seq = SUN6I_CSI_INPUT_YUV_SEQ_VYUY, .input_yuv_seq_invert = SUN6I_CSI_INPUT_YUV_SEQ_UYVY, }, /* Compressed */ { .mbus_code = MEDIA_BUS_FMT_JPEG_1X8, .input_format = SUN6I_CSI_INPUT_FMT_RAW, }, }; const struct sun6i_csi_bridge_format * sun6i_csi_bridge_format_find(u32 mbus_code) { unsigned int i; for (i = 0; i < ARRAY_SIZE(sun6i_csi_bridge_formats); i++) if (sun6i_csi_bridge_formats[i].mbus_code == mbus_code) return &sun6i_csi_bridge_formats[i]; return NULL; } /* Bridge */ static void sun6i_csi_bridge_irq_enable(struct sun6i_csi_device *csi_dev) { struct regmap *regmap = csi_dev->regmap; regmap_write(regmap, SUN6I_CSI_CH_INT_EN_REG, SUN6I_CSI_CH_INT_EN_VS | SUN6I_CSI_CH_INT_EN_HB_OF | SUN6I_CSI_CH_INT_EN_FIFO2_OF | SUN6I_CSI_CH_INT_EN_FIFO1_OF | SUN6I_CSI_CH_INT_EN_FIFO0_OF | SUN6I_CSI_CH_INT_EN_FD | SUN6I_CSI_CH_INT_EN_CD); } static void sun6i_csi_bridge_irq_disable(struct sun6i_csi_device *csi_dev) { struct regmap *regmap = csi_dev->regmap; regmap_write(regmap, SUN6I_CSI_CH_INT_EN_REG, 0); } static void sun6i_csi_bridge_irq_clear(struct sun6i_csi_device *csi_dev) { struct regmap *regmap = csi_dev->regmap; regmap_write(regmap, SUN6I_CSI_CH_INT_EN_REG, 0); regmap_write(regmap, SUN6I_CSI_CH_INT_STA_REG, SUN6I_CSI_CH_INT_STA_CLEAR); } static void sun6i_csi_bridge_enable(struct sun6i_csi_device *csi_dev) { struct regmap *regmap = csi_dev->regmap; regmap_update_bits(regmap, SUN6I_CSI_EN_REG, SUN6I_CSI_EN_CSI_EN, SUN6I_CSI_EN_CSI_EN); regmap_update_bits(regmap, SUN6I_CSI_CAP_REG, SUN6I_CSI_CAP_VCAP_ON, SUN6I_CSI_CAP_VCAP_ON); } static void sun6i_csi_bridge_disable(struct sun6i_csi_device *csi_dev) { struct regmap *regmap = csi_dev->regmap; regmap_update_bits(regmap, SUN6I_CSI_CAP_REG, SUN6I_CSI_CAP_VCAP_ON, 0); regmap_update_bits(regmap, SUN6I_CSI_EN_REG, SUN6I_CSI_EN_CSI_EN, 0); } static void sun6i_csi_bridge_configure_parallel(struct sun6i_csi_device *csi_dev) { struct device *dev = csi_dev->dev; struct regmap *regmap = csi_dev->regmap; struct v4l2_fwnode_endpoint *endpoint = &csi_dev->bridge.source_parallel.endpoint; unsigned char bus_width = endpoint->bus.parallel.bus_width; unsigned int flags = endpoint->bus.parallel.flags; u32 field; u32 value = SUN6I_CSI_IF_CFG_IF_CSI; sun6i_csi_bridge_format(csi_dev, NULL, &field); if (field == V4L2_FIELD_INTERLACED || field == V4L2_FIELD_INTERLACED_TB || field == V4L2_FIELD_INTERLACED_BT) value |= SUN6I_CSI_IF_CFG_SRC_TYPE_INTERLACED | SUN6I_CSI_IF_CFG_FIELD_DT_PCLK_SHIFT(1) | SUN6I_CSI_IF_CFG_FIELD_DT_FIELD_VSYNC; else value |= SUN6I_CSI_IF_CFG_SRC_TYPE_PROGRESSIVE; switch (endpoint->bus_type) { case V4L2_MBUS_PARALLEL: if (bus_width == 16) value |= SUN6I_CSI_IF_CFG_IF_CSI_YUV_COMBINED; else value |= SUN6I_CSI_IF_CFG_IF_CSI_YUV_RAW; if (flags & V4L2_MBUS_FIELD_EVEN_LOW) value |= SUN6I_CSI_IF_CFG_FIELD_NEGATIVE; else value |= SUN6I_CSI_IF_CFG_FIELD_POSITIVE; if (flags & V4L2_MBUS_VSYNC_ACTIVE_LOW) value |= SUN6I_CSI_IF_CFG_VREF_POL_NEGATIVE; else value |= SUN6I_CSI_IF_CFG_VREF_POL_POSITIVE; if (flags & V4L2_MBUS_HSYNC_ACTIVE_LOW) value |= SUN6I_CSI_IF_CFG_HREF_POL_NEGATIVE; else value |= SUN6I_CSI_IF_CFG_HREF_POL_POSITIVE; if (flags & V4L2_MBUS_PCLK_SAMPLE_RISING) value |= SUN6I_CSI_IF_CFG_CLK_POL_RISING; else value |= SUN6I_CSI_IF_CFG_CLK_POL_FALLING; break; case V4L2_MBUS_BT656: if (bus_width == 16) value |= SUN6I_CSI_IF_CFG_IF_CSI_BT1120; else value |= SUN6I_CSI_IF_CFG_IF_CSI_BT656; if (flags & V4L2_MBUS_FIELD_EVEN_LOW) value |= SUN6I_CSI_IF_CFG_FIELD_NEGATIVE; else value |= SUN6I_CSI_IF_CFG_FIELD_POSITIVE; if (flags & V4L2_MBUS_PCLK_SAMPLE_FALLING) value |= SUN6I_CSI_IF_CFG_CLK_POL_RISING; else value |= SUN6I_CSI_IF_CFG_CLK_POL_FALLING; break; default: dev_warn(dev, "unsupported bus type: %d\n", endpoint->bus_type); break; } switch (bus_width) { case 8: /* 16-bit YUV formats use a doubled width in 8-bit mode. */ case 16: value |= SUN6I_CSI_IF_CFG_DATA_WIDTH_8; break; case 10: value |= SUN6I_CSI_IF_CFG_DATA_WIDTH_10; break; case 12: value |= SUN6I_CSI_IF_CFG_DATA_WIDTH_12; break; default: dev_warn(dev, "unsupported bus width: %u\n", bus_width); break; } regmap_write(regmap, SUN6I_CSI_IF_CFG_REG, value); } static void sun6i_csi_bridge_configure_mipi_csi2(struct sun6i_csi_device *csi_dev) { struct regmap *regmap = csi_dev->regmap; u32 value = SUN6I_CSI_IF_CFG_IF_MIPI; u32 field; sun6i_csi_bridge_format(csi_dev, NULL, &field); if (field == V4L2_FIELD_INTERLACED || field == V4L2_FIELD_INTERLACED_TB || field == V4L2_FIELD_INTERLACED_BT) value |= SUN6I_CSI_IF_CFG_SRC_TYPE_INTERLACED; else value |= SUN6I_CSI_IF_CFG_SRC_TYPE_PROGRESSIVE; regmap_write(regmap, SUN6I_CSI_IF_CFG_REG, value); } static void sun6i_csi_bridge_configure_format(struct sun6i_csi_device *csi_dev) { struct regmap *regmap = csi_dev->regmap; bool capture_streaming = csi_dev->capture.state.streaming; const struct sun6i_csi_bridge_format *bridge_format; const struct sun6i_csi_capture_format *capture_format; u32 mbus_code, field, pixelformat; u8 input_format, input_yuv_seq, output_format; u32 value = 0; sun6i_csi_bridge_format(csi_dev, &mbus_code, &field); bridge_format = sun6i_csi_bridge_format_find(mbus_code); if (WARN_ON(!bridge_format)) return; input_format = bridge_format->input_format; input_yuv_seq = bridge_format->input_yuv_seq; if (capture_streaming) { sun6i_csi_capture_format(csi_dev, &pixelformat, NULL); capture_format = sun6i_csi_capture_format_find(pixelformat); if (WARN_ON(!capture_format)) return; if (capture_format->input_format_raw) input_format = SUN6I_CSI_INPUT_FMT_RAW; if (capture_format->input_yuv_seq_invert) input_yuv_seq = bridge_format->input_yuv_seq_invert; if (field == V4L2_FIELD_INTERLACED || field == V4L2_FIELD_INTERLACED_TB || field == V4L2_FIELD_INTERLACED_BT) output_format = capture_format->output_format_field; else output_format = capture_format->output_format_frame; value |= SUN6I_CSI_CH_CFG_OUTPUT_FMT(output_format); } value |= SUN6I_CSI_CH_CFG_INPUT_FMT(input_format); value |= SUN6I_CSI_CH_CFG_INPUT_YUV_SEQ(input_yuv_seq); if (field == V4L2_FIELD_TOP) value |= SUN6I_CSI_CH_CFG_FIELD_SEL_FIELD0; else if (field == V4L2_FIELD_BOTTOM) value |= SUN6I_CSI_CH_CFG_FIELD_SEL_FIELD1; else value |= SUN6I_CSI_CH_CFG_FIELD_SEL_EITHER; regmap_write(regmap, SUN6I_CSI_CH_CFG_REG, value); } static void sun6i_csi_bridge_configure(struct sun6i_csi_device *csi_dev, struct sun6i_csi_bridge_source *source) { struct sun6i_csi_bridge *bridge = &csi_dev->bridge; if (source == &bridge->source_parallel) sun6i_csi_bridge_configure_parallel(csi_dev); else sun6i_csi_bridge_configure_mipi_csi2(csi_dev); sun6i_csi_bridge_configure_format(csi_dev); } /* V4L2 Subdev */ static int sun6i_csi_bridge_s_stream(struct v4l2_subdev *subdev, int on) { struct sun6i_csi_device *csi_dev = v4l2_get_subdevdata(subdev); struct sun6i_csi_bridge *bridge = &csi_dev->bridge; struct media_pad *local_pad = &bridge->pads[SUN6I_CSI_BRIDGE_PAD_SINK]; bool capture_streaming = csi_dev->capture.state.streaming; struct device *dev = csi_dev->dev; struct sun6i_csi_bridge_source *source; struct v4l2_subdev *source_subdev; struct media_pad *remote_pad; int ret; /* Source */ remote_pad = media_pad_remote_pad_unique(local_pad); if (IS_ERR(remote_pad)) { dev_err(dev, "zero or more than a single source connected to the bridge\n"); return PTR_ERR(remote_pad); } source_subdev = media_entity_to_v4l2_subdev(remote_pad->entity); if (source_subdev == bridge->source_parallel.subdev) source = &bridge->source_parallel; else source = &bridge->source_mipi_csi2; if (!on) { v4l2_subdev_call(source_subdev, video, s_stream, 0); ret = 0; goto disable; } /* PM */ ret = pm_runtime_resume_and_get(dev); if (ret < 0) return ret; /* Clear */ sun6i_csi_bridge_irq_clear(csi_dev); /* Configure */ sun6i_csi_bridge_configure(csi_dev, source); if (capture_streaming) sun6i_csi_capture_configure(csi_dev); /* State Update */ if (capture_streaming) sun6i_csi_capture_state_update(csi_dev); /* Enable */ if (capture_streaming) sun6i_csi_bridge_irq_enable(csi_dev); sun6i_csi_bridge_enable(csi_dev); ret = v4l2_subdev_call(source_subdev, video, s_stream, 1); if (ret && ret != -ENOIOCTLCMD) goto disable; return 0; disable: if (capture_streaming) sun6i_csi_bridge_irq_disable(csi_dev); sun6i_csi_bridge_disable(csi_dev); pm_runtime_put(dev); return ret; } static const struct v4l2_subdev_video_ops sun6i_csi_bridge_video_ops = { .s_stream = sun6i_csi_bridge_s_stream, }; static void sun6i_csi_bridge_mbus_format_prepare(struct v4l2_mbus_framefmt *mbus_format) { if (!sun6i_csi_bridge_format_find(mbus_format->code)) mbus_format->code = sun6i_csi_bridge_formats[0].mbus_code; mbus_format->field = V4L2_FIELD_NONE; mbus_format->colorspace = V4L2_COLORSPACE_RAW; mbus_format->quantization = V4L2_QUANTIZATION_DEFAULT; mbus_format->xfer_func = V4L2_XFER_FUNC_DEFAULT; } static int sun6i_csi_bridge_init_cfg(struct v4l2_subdev *subdev, struct v4l2_subdev_state *state) { struct sun6i_csi_device *csi_dev = v4l2_get_subdevdata(subdev); unsigned int pad = SUN6I_CSI_BRIDGE_PAD_SINK; struct v4l2_mbus_framefmt *mbus_format = v4l2_subdev_get_try_format(subdev, state, pad); struct mutex *lock = &csi_dev->bridge.lock; mutex_lock(lock); mbus_format->code = sun6i_csi_bridge_formats[0].mbus_code; mbus_format->width = 1280; mbus_format->height = 720; sun6i_csi_bridge_mbus_format_prepare(mbus_format); mutex_unlock(lock); return 0; } static int sun6i_csi_bridge_enum_mbus_code(struct v4l2_subdev *subdev, struct v4l2_subdev_state *state, struct v4l2_subdev_mbus_code_enum *code_enum) { if (code_enum->index >= ARRAY_SIZE(sun6i_csi_bridge_formats)) return -EINVAL; code_enum->code = sun6i_csi_bridge_formats[code_enum->index].mbus_code; return 0; } static int sun6i_csi_bridge_get_fmt(struct v4l2_subdev *subdev, struct v4l2_subdev_state *state, struct v4l2_subdev_format *format) { struct sun6i_csi_device *csi_dev = v4l2_get_subdevdata(subdev); struct v4l2_mbus_framefmt *mbus_format = &format->format; struct mutex *lock = &csi_dev->bridge.lock; mutex_lock(lock); if (format->which == V4L2_SUBDEV_FORMAT_TRY) *mbus_format = *v4l2_subdev_get_try_format(subdev, state, format->pad); else *mbus_format = csi_dev->bridge.mbus_format; mutex_unlock(lock); return 0; } static int sun6i_csi_bridge_set_fmt(struct v4l2_subdev *subdev, struct v4l2_subdev_state *state, struct v4l2_subdev_format *format) { struct sun6i_csi_device *csi_dev = v4l2_get_subdevdata(subdev); struct v4l2_mbus_framefmt *mbus_format = &format->format; struct mutex *lock = &csi_dev->bridge.lock; mutex_lock(lock); sun6i_csi_bridge_mbus_format_prepare(mbus_format); if (format->which == V4L2_SUBDEV_FORMAT_TRY) *v4l2_subdev_get_try_format(subdev, state, format->pad) = *mbus_format; else csi_dev->bridge.mbus_format = *mbus_format; mutex_unlock(lock); return 0; } static const struct v4l2_subdev_pad_ops sun6i_csi_bridge_pad_ops = { .init_cfg = sun6i_csi_bridge_init_cfg, .enum_mbus_code = sun6i_csi_bridge_enum_mbus_code, .get_fmt = sun6i_csi_bridge_get_fmt, .set_fmt = sun6i_csi_bridge_set_fmt, }; static const struct v4l2_subdev_ops sun6i_csi_bridge_subdev_ops = { .video = &sun6i_csi_bridge_video_ops, .pad = &sun6i_csi_bridge_pad_ops, }; /* Media Entity */ static const struct media_entity_operations sun6i_csi_bridge_entity_ops = { .link_validate = v4l2_subdev_link_validate, }; /* V4L2 Async */ static int sun6i_csi_bridge_link(struct sun6i_csi_device *csi_dev, int sink_pad_index, struct v4l2_subdev *remote_subdev, bool enabled) { struct device *dev = csi_dev->dev; struct v4l2_subdev *subdev = &csi_dev->bridge.subdev; struct media_entity *sink_entity = &subdev->entity; struct media_entity *source_entity = &remote_subdev->entity; int source_pad_index; int ret; /* Get the first remote source pad. */ ret = media_entity_get_fwnode_pad(source_entity, remote_subdev->fwnode, MEDIA_PAD_FL_SOURCE); if (ret < 0) { dev_err(dev, "missing source pad in external entity %s\n", source_entity->name); return -EINVAL; } source_pad_index = ret; dev_dbg(dev, "creating %s:%u -> %s:%u link\n", source_entity->name, source_pad_index, sink_entity->name, sink_pad_index); ret = media_create_pad_link(source_entity, source_pad_index, sink_entity, sink_pad_index, enabled ? MEDIA_LNK_FL_ENABLED : 0); if (ret < 0) { dev_err(dev, "failed to create %s:%u -> %s:%u link\n", source_entity->name, source_pad_index, sink_entity->name, sink_pad_index); return ret; } return 0; } static int sun6i_csi_bridge_notifier_bound(struct v4l2_async_notifier *notifier, struct v4l2_subdev *remote_subdev, struct v4l2_async_connection *async_subdev) { struct sun6i_csi_device *csi_dev = container_of(notifier, struct sun6i_csi_device, bridge.notifier); struct sun6i_csi_bridge_async_subdev *bridge_async_subdev = container_of(async_subdev, struct sun6i_csi_bridge_async_subdev, async_subdev); struct sun6i_csi_bridge *bridge = &csi_dev->bridge; struct sun6i_csi_bridge_source *source = bridge_async_subdev->source; bool enabled = false; int ret; switch (source->endpoint.base.port) { case SUN6I_CSI_PORT_PARALLEL: enabled = true; break; case SUN6I_CSI_PORT_MIPI_CSI2: enabled = !bridge->source_parallel.expected; break; default: return -EINVAL; } source->subdev = remote_subdev; if (csi_dev->isp_available) { /* * Hook to the first available remote subdev to get v4l2 and * media devices and register the capture device then. */ ret = sun6i_csi_isp_complete(csi_dev, remote_subdev->v4l2_dev); if (ret) return ret; } return sun6i_csi_bridge_link(csi_dev, SUN6I_CSI_BRIDGE_PAD_SINK, remote_subdev, enabled); } static int sun6i_csi_bridge_notifier_complete(struct v4l2_async_notifier *notifier) { struct sun6i_csi_device *csi_dev = container_of(notifier, struct sun6i_csi_device, bridge.notifier); struct v4l2_device *v4l2_dev = &csi_dev->v4l2.v4l2_dev; if (csi_dev->isp_available) return 0; return v4l2_device_register_subdev_nodes(v4l2_dev); } static const struct v4l2_async_notifier_operations sun6i_csi_bridge_notifier_ops = { .bound = sun6i_csi_bridge_notifier_bound, .complete = sun6i_csi_bridge_notifier_complete, }; /* Bridge */ static int sun6i_csi_bridge_source_setup(struct sun6i_csi_device *csi_dev, struct sun6i_csi_bridge_source *source, u32 port, enum v4l2_mbus_type *bus_types) { struct device *dev = csi_dev->dev; struct v4l2_async_notifier *notifier = &csi_dev->bridge.notifier; struct v4l2_fwnode_endpoint *endpoint = &source->endpoint; struct sun6i_csi_bridge_async_subdev *bridge_async_subdev; struct fwnode_handle *handle; int ret; handle = fwnode_graph_get_endpoint_by_id(dev_fwnode(dev), port, 0, 0); if (!handle) return -ENODEV; ret = v4l2_fwnode_endpoint_parse(handle, endpoint); if (ret) goto complete; if (bus_types) { bool valid = false; unsigned int i; for (i = 0; bus_types[i] != V4L2_MBUS_INVALID; i++) { if (endpoint->bus_type == bus_types[i]) { valid = true; break; } } if (!valid) { dev_err(dev, "unsupported bus type for port %d\n", port); ret = -EINVAL; goto complete; } } bridge_async_subdev = v4l2_async_nf_add_fwnode_remote(notifier, handle, struct sun6i_csi_bridge_async_subdev); if (IS_ERR(bridge_async_subdev)) { ret = PTR_ERR(bridge_async_subdev); goto complete; } bridge_async_subdev->source = source; source->expected = true; complete: fwnode_handle_put(handle); return ret; } int sun6i_csi_bridge_setup(struct sun6i_csi_device *csi_dev) { struct device *dev = csi_dev->dev; struct sun6i_csi_bridge *bridge = &csi_dev->bridge; struct v4l2_device *v4l2_dev = csi_dev->v4l2_dev; struct v4l2_subdev *subdev = &bridge->subdev; struct v4l2_async_notifier *notifier = &bridge->notifier; struct media_pad *pads = bridge->pads; enum v4l2_mbus_type parallel_mbus_types[] = { V4L2_MBUS_PARALLEL, V4L2_MBUS_BT656, V4L2_MBUS_INVALID }; int ret; mutex_init(&bridge->lock); /* V4L2 Subdev */ v4l2_subdev_init(subdev, &sun6i_csi_bridge_subdev_ops); strscpy(subdev->name, SUN6I_CSI_BRIDGE_NAME, sizeof(subdev->name)); subdev->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE; subdev->owner = THIS_MODULE; subdev->dev = dev; v4l2_set_subdevdata(subdev, csi_dev); /* Media Entity */ subdev->entity.function = MEDIA_ENT_F_VID_IF_BRIDGE; subdev->entity.ops = &sun6i_csi_bridge_entity_ops; /* Media Pads */ pads[SUN6I_CSI_BRIDGE_PAD_SINK].flags = MEDIA_PAD_FL_SINK; pads[SUN6I_CSI_BRIDGE_PAD_SOURCE].flags = MEDIA_PAD_FL_SOURCE | MEDIA_PAD_FL_MUST_CONNECT; ret = media_entity_pads_init(&subdev->entity, SUN6I_CSI_BRIDGE_PAD_COUNT, pads); if (ret < 0) return ret; /* V4L2 Subdev */ if (csi_dev->isp_available) ret = v4l2_async_register_subdev(subdev); else ret = v4l2_device_register_subdev(v4l2_dev, subdev); if (ret) { dev_err(dev, "failed to register v4l2 subdev: %d\n", ret); goto error_media_entity; } /* V4L2 Async */ if (csi_dev->isp_available) v4l2_async_subdev_nf_init(notifier, subdev); else v4l2_async_nf_init(notifier, v4l2_dev); notifier->ops = &sun6i_csi_bridge_notifier_ops; sun6i_csi_bridge_source_setup(csi_dev, &bridge->source_parallel, SUN6I_CSI_PORT_PARALLEL, parallel_mbus_types); sun6i_csi_bridge_source_setup(csi_dev, &bridge->source_mipi_csi2, SUN6I_CSI_PORT_MIPI_CSI2, NULL); ret = v4l2_async_nf_register(notifier); if (ret) { dev_err(dev, "failed to register v4l2 async notifier: %d\n", ret); goto error_v4l2_async_notifier; } return 0; error_v4l2_async_notifier: v4l2_async_nf_cleanup(notifier); if (csi_dev->isp_available) v4l2_async_unregister_subdev(subdev); else v4l2_device_unregister_subdev(subdev); error_media_entity: media_entity_cleanup(&subdev->entity); return ret; } void sun6i_csi_bridge_cleanup(struct sun6i_csi_device *csi_dev) { struct v4l2_subdev *subdev = &csi_dev->bridge.subdev; struct v4l2_async_notifier *notifier = &csi_dev->bridge.notifier; v4l2_async_nf_unregister(notifier); v4l2_async_nf_cleanup(notifier); v4l2_device_unregister_subdev(subdev); media_entity_cleanup(&subdev->entity); }
linux-master
drivers/media/platform/sunxi/sun6i-csi/sun6i_csi_bridge.c
// SPDX-License-Identifier: GPL-2.0+ /* * Copyright (c) 2011-2018 Magewell Electronics Co., Ltd. (Nanjing) * Author: Yong Deng <[email protected]> * Copyright 2021-2022 Bootlin * Author: Paul Kocialkowski <[email protected]> */ #include <linux/clk.h> #include <linux/err.h> #include <linux/interrupt.h> #include <linux/module.h> #include <linux/of.h> #include <linux/platform_device.h> #include <linux/pm_runtime.h> #include <linux/regmap.h> #include <linux/reset.h> #include <media/v4l2-device.h> #include <media/v4l2-mc.h> #include "sun6i_csi.h" #include "sun6i_csi_bridge.h" #include "sun6i_csi_capture.h" #include "sun6i_csi_reg.h" /* ISP */ int sun6i_csi_isp_complete(struct sun6i_csi_device *csi_dev, struct v4l2_device *v4l2_dev) { if (csi_dev->v4l2_dev && csi_dev->v4l2_dev != v4l2_dev) return -EINVAL; csi_dev->v4l2_dev = v4l2_dev; csi_dev->media_dev = v4l2_dev->mdev; return sun6i_csi_capture_setup(csi_dev); } static int sun6i_csi_isp_detect(struct sun6i_csi_device *csi_dev) { struct device *dev = csi_dev->dev; struct fwnode_handle *handle; /* * ISP is not available if not connected via fwnode graph. * This will also check that the remote parent node is available. */ handle = fwnode_graph_get_endpoint_by_id(dev_fwnode(dev), SUN6I_CSI_PORT_ISP, 0, FWNODE_GRAPH_ENDPOINT_NEXT); if (!handle) return 0; fwnode_handle_put(handle); if (!IS_ENABLED(CONFIG_VIDEO_SUN6I_ISP)) { dev_warn(dev, "ISP link is detected but not enabled in kernel config!"); return 0; } csi_dev->isp_available = true; return 0; } /* Media */ static const struct media_device_ops sun6i_csi_media_ops = { .link_notify = v4l2_pipeline_link_notify, }; /* V4L2 */ static int sun6i_csi_v4l2_setup(struct sun6i_csi_device *csi_dev) { struct sun6i_csi_v4l2 *v4l2 = &csi_dev->v4l2; struct media_device *media_dev = &v4l2->media_dev; struct v4l2_device *v4l2_dev = &v4l2->v4l2_dev; struct device *dev = csi_dev->dev; int ret; /* Media Device */ strscpy(media_dev->model, SUN6I_CSI_DESCRIPTION, sizeof(media_dev->model)); media_dev->hw_revision = 0; media_dev->ops = &sun6i_csi_media_ops; media_dev->dev = dev; media_device_init(media_dev); ret = media_device_register(media_dev); if (ret) { dev_err(dev, "failed to register media device: %d\n", ret); goto error_media; } /* V4L2 Device */ v4l2_dev->mdev = media_dev; ret = v4l2_device_register(dev, v4l2_dev); if (ret) { dev_err(dev, "failed to register v4l2 device: %d\n", ret); goto error_media; } csi_dev->v4l2_dev = v4l2_dev; csi_dev->media_dev = media_dev; return 0; error_media: media_device_unregister(media_dev); media_device_cleanup(media_dev); return ret; } static void sun6i_csi_v4l2_cleanup(struct sun6i_csi_device *csi_dev) { struct sun6i_csi_v4l2 *v4l2 = &csi_dev->v4l2; media_device_unregister(&v4l2->media_dev); v4l2_device_unregister(&v4l2->v4l2_dev); media_device_cleanup(&v4l2->media_dev); } /* Platform */ static irqreturn_t sun6i_csi_interrupt(int irq, void *private) { struct sun6i_csi_device *csi_dev = private; bool capture_streaming = csi_dev->capture.state.streaming; struct regmap *regmap = csi_dev->regmap; u32 status = 0, enable = 0; regmap_read(regmap, SUN6I_CSI_CH_INT_STA_REG, &status); regmap_read(regmap, SUN6I_CSI_CH_INT_EN_REG, &enable); if (!status) return IRQ_NONE; else if (!(status & enable) || !capture_streaming) goto complete; if ((status & SUN6I_CSI_CH_INT_STA_FIFO0_OF) || (status & SUN6I_CSI_CH_INT_STA_FIFO1_OF) || (status & SUN6I_CSI_CH_INT_STA_FIFO2_OF) || (status & SUN6I_CSI_CH_INT_STA_HB_OF)) { regmap_write(regmap, SUN6I_CSI_CH_INT_STA_REG, status); regmap_update_bits(regmap, SUN6I_CSI_EN_REG, SUN6I_CSI_EN_CSI_EN, 0); regmap_update_bits(regmap, SUN6I_CSI_EN_REG, SUN6I_CSI_EN_CSI_EN, SUN6I_CSI_EN_CSI_EN); return IRQ_HANDLED; } if (status & SUN6I_CSI_CH_INT_STA_FD) sun6i_csi_capture_frame_done(csi_dev); if (status & SUN6I_CSI_CH_INT_STA_VS) sun6i_csi_capture_sync(csi_dev); complete: regmap_write(regmap, SUN6I_CSI_CH_INT_STA_REG, status); return IRQ_HANDLED; } static int sun6i_csi_suspend(struct device *dev) { struct sun6i_csi_device *csi_dev = dev_get_drvdata(dev); reset_control_assert(csi_dev->reset); clk_disable_unprepare(csi_dev->clock_ram); clk_disable_unprepare(csi_dev->clock_mod); return 0; } static int sun6i_csi_resume(struct device *dev) { struct sun6i_csi_device *csi_dev = dev_get_drvdata(dev); int ret; ret = reset_control_deassert(csi_dev->reset); if (ret) { dev_err(dev, "failed to deassert reset\n"); return ret; } ret = clk_prepare_enable(csi_dev->clock_mod); if (ret) { dev_err(dev, "failed to enable module clock\n"); goto error_reset; } ret = clk_prepare_enable(csi_dev->clock_ram); if (ret) { dev_err(dev, "failed to enable ram clock\n"); goto error_clock_mod; } return 0; error_clock_mod: clk_disable_unprepare(csi_dev->clock_mod); error_reset: reset_control_assert(csi_dev->reset); return ret; } static const struct dev_pm_ops sun6i_csi_pm_ops = { .runtime_suspend = sun6i_csi_suspend, .runtime_resume = sun6i_csi_resume, }; static const struct regmap_config sun6i_csi_regmap_config = { .reg_bits = 32, .reg_stride = 4, .val_bits = 32, .max_register = 0x9c, }; static int sun6i_csi_resources_setup(struct sun6i_csi_device *csi_dev, struct platform_device *platform_dev) { struct device *dev = csi_dev->dev; const struct sun6i_csi_variant *variant; void __iomem *io_base; int ret; int irq; variant = of_device_get_match_data(dev); if (!variant) return -EINVAL; /* Registers */ io_base = devm_platform_ioremap_resource(platform_dev, 0); if (IS_ERR(io_base)) return PTR_ERR(io_base); csi_dev->regmap = devm_regmap_init_mmio_clk(dev, "bus", io_base, &sun6i_csi_regmap_config); if (IS_ERR(csi_dev->regmap)) { dev_err(dev, "failed to init register map\n"); return PTR_ERR(csi_dev->regmap); } /* Clocks */ csi_dev->clock_mod = devm_clk_get(dev, "mod"); if (IS_ERR(csi_dev->clock_mod)) { dev_err(dev, "failed to acquire module clock\n"); return PTR_ERR(csi_dev->clock_mod); } csi_dev->clock_ram = devm_clk_get(dev, "ram"); if (IS_ERR(csi_dev->clock_ram)) { dev_err(dev, "failed to acquire ram clock\n"); return PTR_ERR(csi_dev->clock_ram); } ret = clk_set_rate_exclusive(csi_dev->clock_mod, variant->clock_mod_rate); if (ret) { dev_err(dev, "failed to set mod clock rate\n"); return ret; } /* Reset */ csi_dev->reset = devm_reset_control_get_shared(dev, NULL); if (IS_ERR(csi_dev->reset)) { dev_err(dev, "failed to acquire reset\n"); ret = PTR_ERR(csi_dev->reset); goto error_clock_rate_exclusive; } /* Interrupt */ irq = platform_get_irq(platform_dev, 0); if (irq < 0) { ret = -ENXIO; goto error_clock_rate_exclusive; } ret = devm_request_irq(dev, irq, sun6i_csi_interrupt, IRQF_SHARED, SUN6I_CSI_NAME, csi_dev); if (ret) { dev_err(dev, "failed to request interrupt\n"); goto error_clock_rate_exclusive; } /* Runtime PM */ pm_runtime_enable(dev); return 0; error_clock_rate_exclusive: clk_rate_exclusive_put(csi_dev->clock_mod); return ret; } static void sun6i_csi_resources_cleanup(struct sun6i_csi_device *csi_dev) { pm_runtime_disable(csi_dev->dev); clk_rate_exclusive_put(csi_dev->clock_mod); } static int sun6i_csi_probe(struct platform_device *platform_dev) { struct sun6i_csi_device *csi_dev; struct device *dev = &platform_dev->dev; int ret; csi_dev = devm_kzalloc(dev, sizeof(*csi_dev), GFP_KERNEL); if (!csi_dev) return -ENOMEM; csi_dev->dev = &platform_dev->dev; platform_set_drvdata(platform_dev, csi_dev); ret = sun6i_csi_resources_setup(csi_dev, platform_dev); if (ret) return ret; ret = sun6i_csi_isp_detect(csi_dev); if (ret) goto error_resources; /* * Register our own v4l2 and media devices when there is no ISP around. * Otherwise the ISP will use async subdev registration with our bridge, * which will provide v4l2 and media devices that are used to register * the video interface. */ if (!csi_dev->isp_available) { ret = sun6i_csi_v4l2_setup(csi_dev); if (ret) goto error_resources; } ret = sun6i_csi_bridge_setup(csi_dev); if (ret) goto error_v4l2; if (!csi_dev->isp_available) { ret = sun6i_csi_capture_setup(csi_dev); if (ret) goto error_bridge; } return 0; error_bridge: sun6i_csi_bridge_cleanup(csi_dev); error_v4l2: if (!csi_dev->isp_available) sun6i_csi_v4l2_cleanup(csi_dev); error_resources: sun6i_csi_resources_cleanup(csi_dev); return ret; } static void sun6i_csi_remove(struct platform_device *pdev) { struct sun6i_csi_device *csi_dev = platform_get_drvdata(pdev); sun6i_csi_capture_cleanup(csi_dev); sun6i_csi_bridge_cleanup(csi_dev); if (!csi_dev->isp_available) sun6i_csi_v4l2_cleanup(csi_dev); sun6i_csi_resources_cleanup(csi_dev); } static const struct sun6i_csi_variant sun6i_a31_csi_variant = { .clock_mod_rate = 297000000, }; static const struct sun6i_csi_variant sun50i_a64_csi_variant = { .clock_mod_rate = 300000000, }; static const struct of_device_id sun6i_csi_of_match[] = { { .compatible = "allwinner,sun6i-a31-csi", .data = &sun6i_a31_csi_variant, }, { .compatible = "allwinner,sun8i-a83t-csi", .data = &sun6i_a31_csi_variant, }, { .compatible = "allwinner,sun8i-h3-csi", .data = &sun6i_a31_csi_variant, }, { .compatible = "allwinner,sun8i-v3s-csi", .data = &sun6i_a31_csi_variant, }, { .compatible = "allwinner,sun50i-a64-csi", .data = &sun50i_a64_csi_variant, }, {}, }; MODULE_DEVICE_TABLE(of, sun6i_csi_of_match); static struct platform_driver sun6i_csi_platform_driver = { .probe = sun6i_csi_probe, .remove_new = sun6i_csi_remove, .driver = { .name = SUN6I_CSI_NAME, .of_match_table = sun6i_csi_of_match, .pm = &sun6i_csi_pm_ops, }, }; module_platform_driver(sun6i_csi_platform_driver); MODULE_DESCRIPTION("Allwinner A31 Camera Sensor Interface driver"); MODULE_AUTHOR("Yong Deng <[email protected]>"); MODULE_AUTHOR("Paul Kocialkowski <[email protected]>"); MODULE_LICENSE("GPL");
linux-master
drivers/media/platform/sunxi/sun6i-csi/sun6i_csi.c
// SPDX-License-Identifier: GPL-2.0+ /* * Copyright (c) 2011-2018 Magewell Electronics Co., Ltd. (Nanjing) * Author: Yong Deng <[email protected]> * Copyright 2021-2022 Bootlin * Author: Paul Kocialkowski <[email protected]> */ #include <linux/of.h> #include <linux/regmap.h> #include <media/v4l2-device.h> #include <media/v4l2-event.h> #include <media/v4l2-ioctl.h> #include <media/v4l2-mc.h> #include <media/videobuf2-dma-contig.h> #include <media/videobuf2-v4l2.h> #include "sun6i_csi.h" #include "sun6i_csi_bridge.h" #include "sun6i_csi_capture.h" #include "sun6i_csi_reg.h" /* Helpers */ void sun6i_csi_capture_dimensions(struct sun6i_csi_device *csi_dev, unsigned int *width, unsigned int *height) { if (width) *width = csi_dev->capture.format.fmt.pix.width; if (height) *height = csi_dev->capture.format.fmt.pix.height; } void sun6i_csi_capture_format(struct sun6i_csi_device *csi_dev, u32 *pixelformat, u32 *field) { if (pixelformat) *pixelformat = csi_dev->capture.format.fmt.pix.pixelformat; if (field) *field = csi_dev->capture.format.fmt.pix.field; } /* Format */ static const struct sun6i_csi_capture_format sun6i_csi_capture_formats[] = { /* Bayer */ { .pixelformat = V4L2_PIX_FMT_SBGGR8, .output_format_frame = SUN6I_CSI_OUTPUT_FMT_FRAME_RAW_8, .output_format_field = SUN6I_CSI_OUTPUT_FMT_FIELD_RAW_8, }, { .pixelformat = V4L2_PIX_FMT_SGBRG8, .output_format_frame = SUN6I_CSI_OUTPUT_FMT_FRAME_RAW_8, .output_format_field = SUN6I_CSI_OUTPUT_FMT_FIELD_RAW_8, }, { .pixelformat = V4L2_PIX_FMT_SGRBG8, .output_format_frame = SUN6I_CSI_OUTPUT_FMT_FRAME_RAW_8, .output_format_field = SUN6I_CSI_OUTPUT_FMT_FIELD_RAW_8, }, { .pixelformat = V4L2_PIX_FMT_SRGGB8, .output_format_frame = SUN6I_CSI_OUTPUT_FMT_FRAME_RAW_8, .output_format_field = SUN6I_CSI_OUTPUT_FMT_FIELD_RAW_8, }, { .pixelformat = V4L2_PIX_FMT_SBGGR10, .output_format_frame = SUN6I_CSI_OUTPUT_FMT_FRAME_RAW_10, .output_format_field = SUN6I_CSI_OUTPUT_FMT_FIELD_RAW_10, }, { .pixelformat = V4L2_PIX_FMT_SGBRG10, .output_format_frame = SUN6I_CSI_OUTPUT_FMT_FRAME_RAW_10, .output_format_field = SUN6I_CSI_OUTPUT_FMT_FIELD_RAW_10, }, { .pixelformat = V4L2_PIX_FMT_SGRBG10, .output_format_frame = SUN6I_CSI_OUTPUT_FMT_FRAME_RAW_10, .output_format_field = SUN6I_CSI_OUTPUT_FMT_FIELD_RAW_10, }, { .pixelformat = V4L2_PIX_FMT_SRGGB10, .output_format_frame = SUN6I_CSI_OUTPUT_FMT_FRAME_RAW_10, .output_format_field = SUN6I_CSI_OUTPUT_FMT_FIELD_RAW_10, }, { .pixelformat = V4L2_PIX_FMT_SBGGR12, .output_format_frame = SUN6I_CSI_OUTPUT_FMT_FRAME_RAW_12, .output_format_field = SUN6I_CSI_OUTPUT_FMT_FIELD_RAW_12, }, { .pixelformat = V4L2_PIX_FMT_SGBRG12, .output_format_frame = SUN6I_CSI_OUTPUT_FMT_FRAME_RAW_12, .output_format_field = SUN6I_CSI_OUTPUT_FMT_FIELD_RAW_12, }, { .pixelformat = V4L2_PIX_FMT_SGRBG12, .output_format_frame = SUN6I_CSI_OUTPUT_FMT_FRAME_RAW_12, .output_format_field = SUN6I_CSI_OUTPUT_FMT_FIELD_RAW_12, }, { .pixelformat = V4L2_PIX_FMT_SRGGB12, .output_format_frame = SUN6I_CSI_OUTPUT_FMT_FRAME_RAW_12, .output_format_field = SUN6I_CSI_OUTPUT_FMT_FIELD_RAW_12, }, /* RGB */ { .pixelformat = V4L2_PIX_FMT_RGB565, .output_format_frame = SUN6I_CSI_OUTPUT_FMT_FRAME_RGB565, .output_format_field = SUN6I_CSI_OUTPUT_FMT_FIELD_RGB565, }, { .pixelformat = V4L2_PIX_FMT_RGB565X, .output_format_frame = SUN6I_CSI_OUTPUT_FMT_FRAME_RGB565, .output_format_field = SUN6I_CSI_OUTPUT_FMT_FIELD_RGB565, }, /* YUV422 */ { .pixelformat = V4L2_PIX_FMT_YUYV, .output_format_frame = SUN6I_CSI_OUTPUT_FMT_FRAME_RAW_8, .output_format_field = SUN6I_CSI_OUTPUT_FMT_FIELD_RAW_8, .input_format_raw = true, .hsize_len_factor = 2, }, { .pixelformat = V4L2_PIX_FMT_YVYU, .output_format_frame = SUN6I_CSI_OUTPUT_FMT_FRAME_RAW_8, .output_format_field = SUN6I_CSI_OUTPUT_FMT_FIELD_RAW_8, .input_format_raw = true, .hsize_len_factor = 2, }, { .pixelformat = V4L2_PIX_FMT_UYVY, .output_format_frame = SUN6I_CSI_OUTPUT_FMT_FRAME_RAW_8, .output_format_field = SUN6I_CSI_OUTPUT_FMT_FIELD_RAW_8, .input_format_raw = true, .hsize_len_factor = 2, }, { .pixelformat = V4L2_PIX_FMT_VYUY, .output_format_frame = SUN6I_CSI_OUTPUT_FMT_FRAME_RAW_8, .output_format_field = SUN6I_CSI_OUTPUT_FMT_FIELD_RAW_8, .input_format_raw = true, .hsize_len_factor = 2, }, { .pixelformat = V4L2_PIX_FMT_NV16, .output_format_frame = SUN6I_CSI_OUTPUT_FMT_FRAME_YUV422SP, .output_format_field = SUN6I_CSI_OUTPUT_FMT_FIELD_YUV422SP, }, { .pixelformat = V4L2_PIX_FMT_NV61, .output_format_frame = SUN6I_CSI_OUTPUT_FMT_FRAME_YUV422SP, .output_format_field = SUN6I_CSI_OUTPUT_FMT_FIELD_YUV422SP, .input_yuv_seq_invert = true, }, { .pixelformat = V4L2_PIX_FMT_YUV422P, .output_format_frame = SUN6I_CSI_OUTPUT_FMT_FRAME_YUV422P, .output_format_field = SUN6I_CSI_OUTPUT_FMT_FIELD_YUV422P, }, /* YUV420 */ { .pixelformat = V4L2_PIX_FMT_NV12_16L16, .output_format_frame = SUN6I_CSI_OUTPUT_FMT_FRAME_YUV420MB, .output_format_field = SUN6I_CSI_OUTPUT_FMT_FIELD_YUV420MB, }, { .pixelformat = V4L2_PIX_FMT_NV12, .output_format_frame = SUN6I_CSI_OUTPUT_FMT_FRAME_YUV420SP, .output_format_field = SUN6I_CSI_OUTPUT_FMT_FIELD_YUV420SP, }, { .pixelformat = V4L2_PIX_FMT_NV21, .output_format_frame = SUN6I_CSI_OUTPUT_FMT_FRAME_YUV420SP, .output_format_field = SUN6I_CSI_OUTPUT_FMT_FIELD_YUV420SP, .input_yuv_seq_invert = true, }, { .pixelformat = V4L2_PIX_FMT_YUV420, .output_format_frame = SUN6I_CSI_OUTPUT_FMT_FRAME_YUV420P, .output_format_field = SUN6I_CSI_OUTPUT_FMT_FIELD_YUV420P, }, { .pixelformat = V4L2_PIX_FMT_YVU420, .output_format_frame = SUN6I_CSI_OUTPUT_FMT_FRAME_YUV420P, .output_format_field = SUN6I_CSI_OUTPUT_FMT_FIELD_YUV420P, .input_yuv_seq_invert = true, }, /* Compressed */ { .pixelformat = V4L2_PIX_FMT_JPEG, .output_format_frame = SUN6I_CSI_OUTPUT_FMT_FRAME_RAW_8, .output_format_field = SUN6I_CSI_OUTPUT_FMT_FIELD_RAW_8, }, }; const struct sun6i_csi_capture_format *sun6i_csi_capture_format_find(u32 pixelformat) { unsigned int i; for (i = 0; i < ARRAY_SIZE(sun6i_csi_capture_formats); i++) if (sun6i_csi_capture_formats[i].pixelformat == pixelformat) return &sun6i_csi_capture_formats[i]; return NULL; } /* RAW formats need an exact match between pixel and mbus formats. */ static const struct sun6i_csi_capture_format_match sun6i_csi_capture_format_matches[] = { /* YUV420 */ { .pixelformat = V4L2_PIX_FMT_YUYV, .mbus_code = MEDIA_BUS_FMT_YUYV8_2X8, }, { .pixelformat = V4L2_PIX_FMT_YUYV, .mbus_code = MEDIA_BUS_FMT_YUYV8_1X16, }, { .pixelformat = V4L2_PIX_FMT_YVYU, .mbus_code = MEDIA_BUS_FMT_YVYU8_2X8, }, { .pixelformat = V4L2_PIX_FMT_YVYU, .mbus_code = MEDIA_BUS_FMT_YVYU8_1X16, }, { .pixelformat = V4L2_PIX_FMT_UYVY, .mbus_code = MEDIA_BUS_FMT_UYVY8_2X8, }, { .pixelformat = V4L2_PIX_FMT_UYVY, .mbus_code = MEDIA_BUS_FMT_UYVY8_1X16, }, { .pixelformat = V4L2_PIX_FMT_VYUY, .mbus_code = MEDIA_BUS_FMT_VYUY8_2X8, }, { .pixelformat = V4L2_PIX_FMT_VYUY, .mbus_code = MEDIA_BUS_FMT_VYUY8_1X16, }, /* RGB */ { .pixelformat = V4L2_PIX_FMT_RGB565, .mbus_code = MEDIA_BUS_FMT_RGB565_2X8_LE, }, { .pixelformat = V4L2_PIX_FMT_RGB565X, .mbus_code = MEDIA_BUS_FMT_RGB565_2X8_BE, }, /* Bayer */ { .pixelformat = V4L2_PIX_FMT_SBGGR8, .mbus_code = MEDIA_BUS_FMT_SBGGR8_1X8, }, { .pixelformat = V4L2_PIX_FMT_SGBRG8, .mbus_code = MEDIA_BUS_FMT_SGBRG8_1X8, }, { .pixelformat = V4L2_PIX_FMT_SGRBG8, .mbus_code = MEDIA_BUS_FMT_SGRBG8_1X8, }, { .pixelformat = V4L2_PIX_FMT_SRGGB8, .mbus_code = MEDIA_BUS_FMT_SRGGB8_1X8, }, { .pixelformat = V4L2_PIX_FMT_SBGGR10, .mbus_code = MEDIA_BUS_FMT_SBGGR10_1X10, }, { .pixelformat = V4L2_PIX_FMT_SGBRG10, .mbus_code = MEDIA_BUS_FMT_SGBRG10_1X10, }, { .pixelformat = V4L2_PIX_FMT_SGRBG10, .mbus_code = MEDIA_BUS_FMT_SGRBG10_1X10, }, { .pixelformat = V4L2_PIX_FMT_SRGGB10, .mbus_code = MEDIA_BUS_FMT_SRGGB10_1X10, }, { .pixelformat = V4L2_PIX_FMT_SBGGR12, .mbus_code = MEDIA_BUS_FMT_SBGGR12_1X12, }, { .pixelformat = V4L2_PIX_FMT_SGBRG12, .mbus_code = MEDIA_BUS_FMT_SGBRG12_1X12, }, { .pixelformat = V4L2_PIX_FMT_SGRBG12, .mbus_code = MEDIA_BUS_FMT_SGRBG12_1X12, }, { .pixelformat = V4L2_PIX_FMT_SRGGB12, .mbus_code = MEDIA_BUS_FMT_SRGGB12_1X12, }, /* Compressed */ { .pixelformat = V4L2_PIX_FMT_JPEG, .mbus_code = MEDIA_BUS_FMT_JPEG_1X8, }, }; static bool sun6i_csi_capture_format_match(u32 pixelformat, u32 mbus_code) { unsigned int i; for (i = 0; i < ARRAY_SIZE(sun6i_csi_capture_format_matches); i++) { const struct sun6i_csi_capture_format_match *match = &sun6i_csi_capture_format_matches[i]; if (match->pixelformat == pixelformat && match->mbus_code == mbus_code) return true; } return false; } /* Capture */ static void sun6i_csi_capture_buffer_configure(struct sun6i_csi_device *csi_dev, struct sun6i_csi_buffer *csi_buffer) { struct regmap *regmap = csi_dev->regmap; const struct v4l2_format_info *info; struct vb2_buffer *vb2_buffer; unsigned int width, height; dma_addr_t address; u32 pixelformat; vb2_buffer = &csi_buffer->v4l2_buffer.vb2_buf; address = vb2_dma_contig_plane_dma_addr(vb2_buffer, 0); regmap_write(regmap, SUN6I_CSI_CH_FIFO0_ADDR_REG, SUN6I_CSI_ADDR_VALUE(address)); sun6i_csi_capture_dimensions(csi_dev, &width, &height); sun6i_csi_capture_format(csi_dev, &pixelformat, NULL); info = v4l2_format_info(pixelformat); /* Unsupported formats are single-plane, so we can stop here. */ if (!info) return; if (info->comp_planes > 1) { address += info->bpp[0] * width * height; regmap_write(regmap, SUN6I_CSI_CH_FIFO1_ADDR_REG, SUN6I_CSI_ADDR_VALUE(address)); } if (info->comp_planes > 2) { address += info->bpp[1] * DIV_ROUND_UP(width, info->hdiv) * DIV_ROUND_UP(height, info->vdiv); regmap_write(regmap, SUN6I_CSI_CH_FIFO2_ADDR_REG, SUN6I_CSI_ADDR_VALUE(address)); } } void sun6i_csi_capture_configure(struct sun6i_csi_device *csi_dev) { struct regmap *regmap = csi_dev->regmap; const struct sun6i_csi_capture_format *format; const struct v4l2_format_info *info; u32 hsize_len, vsize_len; u32 luma_line, chroma_line = 0; u32 pixelformat, field; u32 width, height; sun6i_csi_capture_dimensions(csi_dev, &width, &height); sun6i_csi_capture_format(csi_dev, &pixelformat, &field); format = sun6i_csi_capture_format_find(pixelformat); if (WARN_ON(!format)) return; hsize_len = width; vsize_len = height; /* * When using 8-bit raw input/output (for packed YUV), we need to adapt * the width to account for the difference in bpp when it's not 8-bit. */ if (format->hsize_len_factor) hsize_len *= format->hsize_len_factor; regmap_write(regmap, SUN6I_CSI_CH_HSIZE_REG, SUN6I_CSI_CH_HSIZE_LEN(hsize_len) | SUN6I_CSI_CH_HSIZE_START(0)); regmap_write(regmap, SUN6I_CSI_CH_VSIZE_REG, SUN6I_CSI_CH_VSIZE_LEN(vsize_len) | SUN6I_CSI_CH_VSIZE_START(0)); switch (pixelformat) { case V4L2_PIX_FMT_RGB565X: luma_line = width * 2; break; case V4L2_PIX_FMT_NV12_16L16: luma_line = width; chroma_line = width; break; case V4L2_PIX_FMT_JPEG: luma_line = width; break; default: info = v4l2_format_info(pixelformat); if (WARN_ON(!info)) return; luma_line = width * info->bpp[0]; if (info->comp_planes > 1) chroma_line = width * info->bpp[1] / info->hdiv; break; } regmap_write(regmap, SUN6I_CSI_CH_BUF_LEN_REG, SUN6I_CSI_CH_BUF_LEN_CHROMA_LINE(chroma_line) | SUN6I_CSI_CH_BUF_LEN_LUMA_LINE(luma_line)); } /* State */ static void sun6i_csi_capture_state_cleanup(struct sun6i_csi_device *csi_dev, bool error) { struct sun6i_csi_capture_state *state = &csi_dev->capture.state; struct sun6i_csi_buffer **csi_buffer_states[] = { &state->pending, &state->current, &state->complete, }; struct sun6i_csi_buffer *csi_buffer; struct vb2_buffer *vb2_buffer; unsigned long flags; unsigned int i; spin_lock_irqsave(&state->lock, flags); for (i = 0; i < ARRAY_SIZE(csi_buffer_states); i++) { csi_buffer = *csi_buffer_states[i]; if (!csi_buffer) continue; vb2_buffer = &csi_buffer->v4l2_buffer.vb2_buf; vb2_buffer_done(vb2_buffer, error ? VB2_BUF_STATE_ERROR : VB2_BUF_STATE_QUEUED); *csi_buffer_states[i] = NULL; } list_for_each_entry(csi_buffer, &state->queue, list) { vb2_buffer = &csi_buffer->v4l2_buffer.vb2_buf; vb2_buffer_done(vb2_buffer, error ? VB2_BUF_STATE_ERROR : VB2_BUF_STATE_QUEUED); } INIT_LIST_HEAD(&state->queue); spin_unlock_irqrestore(&state->lock, flags); } void sun6i_csi_capture_state_update(struct sun6i_csi_device *csi_dev) { struct sun6i_csi_capture_state *state = &csi_dev->capture.state; struct sun6i_csi_buffer *csi_buffer; unsigned long flags; spin_lock_irqsave(&state->lock, flags); if (list_empty(&state->queue)) goto complete; if (state->pending) goto complete; csi_buffer = list_first_entry(&state->queue, struct sun6i_csi_buffer, list); sun6i_csi_capture_buffer_configure(csi_dev, csi_buffer); list_del(&csi_buffer->list); state->pending = csi_buffer; complete: spin_unlock_irqrestore(&state->lock, flags); } static void sun6i_csi_capture_state_complete(struct sun6i_csi_device *csi_dev) { struct sun6i_csi_capture_state *state = &csi_dev->capture.state; unsigned long flags; spin_lock_irqsave(&state->lock, flags); if (!state->pending) goto complete; state->complete = state->current; state->current = state->pending; state->pending = NULL; if (state->complete) { struct sun6i_csi_buffer *csi_buffer = state->complete; struct vb2_buffer *vb2_buffer = &csi_buffer->v4l2_buffer.vb2_buf; vb2_buffer->timestamp = ktime_get_ns(); csi_buffer->v4l2_buffer.sequence = state->sequence; vb2_buffer_done(vb2_buffer, VB2_BUF_STATE_DONE); state->complete = NULL; } complete: spin_unlock_irqrestore(&state->lock, flags); } void sun6i_csi_capture_frame_done(struct sun6i_csi_device *csi_dev) { struct sun6i_csi_capture_state *state = &csi_dev->capture.state; unsigned long flags; spin_lock_irqsave(&state->lock, flags); state->sequence++; spin_unlock_irqrestore(&state->lock, flags); } void sun6i_csi_capture_sync(struct sun6i_csi_device *csi_dev) { sun6i_csi_capture_state_complete(csi_dev); sun6i_csi_capture_state_update(csi_dev); } /* Queue */ static int sun6i_csi_capture_queue_setup(struct vb2_queue *queue, unsigned int *buffers_count, unsigned int *planes_count, unsigned int sizes[], struct device *alloc_devs[]) { struct sun6i_csi_device *csi_dev = vb2_get_drv_priv(queue); unsigned int size = csi_dev->capture.format.fmt.pix.sizeimage; if (*planes_count) return sizes[0] < size ? -EINVAL : 0; *planes_count = 1; sizes[0] = size; return 0; } static int sun6i_csi_capture_buffer_prepare(struct vb2_buffer *buffer) { struct sun6i_csi_device *csi_dev = vb2_get_drv_priv(buffer->vb2_queue); struct sun6i_csi_capture *capture = &csi_dev->capture; struct v4l2_device *v4l2_dev = csi_dev->v4l2_dev; struct vb2_v4l2_buffer *v4l2_buffer = to_vb2_v4l2_buffer(buffer); unsigned long size = capture->format.fmt.pix.sizeimage; if (vb2_plane_size(buffer, 0) < size) { v4l2_err(v4l2_dev, "buffer too small (%lu < %lu)\n", vb2_plane_size(buffer, 0), size); return -EINVAL; } vb2_set_plane_payload(buffer, 0, size); v4l2_buffer->field = capture->format.fmt.pix.field; return 0; } static void sun6i_csi_capture_buffer_queue(struct vb2_buffer *buffer) { struct sun6i_csi_device *csi_dev = vb2_get_drv_priv(buffer->vb2_queue); struct sun6i_csi_capture_state *state = &csi_dev->capture.state; struct vb2_v4l2_buffer *v4l2_buffer = to_vb2_v4l2_buffer(buffer); struct sun6i_csi_buffer *csi_buffer = container_of(v4l2_buffer, struct sun6i_csi_buffer, v4l2_buffer); unsigned long flags; spin_lock_irqsave(&state->lock, flags); list_add_tail(&csi_buffer->list, &state->queue); spin_unlock_irqrestore(&state->lock, flags); } static int sun6i_csi_capture_start_streaming(struct vb2_queue *queue, unsigned int count) { struct sun6i_csi_device *csi_dev = vb2_get_drv_priv(queue); struct sun6i_csi_capture_state *state = &csi_dev->capture.state; struct video_device *video_dev = &csi_dev->capture.video_dev; struct v4l2_subdev *subdev = &csi_dev->bridge.subdev; int ret; state->sequence = 0; ret = video_device_pipeline_alloc_start(video_dev); if (ret < 0) goto error_state; state->streaming = true; ret = v4l2_subdev_call(subdev, video, s_stream, 1); if (ret && ret != -ENOIOCTLCMD) goto error_streaming; return 0; error_streaming: state->streaming = false; video_device_pipeline_stop(video_dev); error_state: sun6i_csi_capture_state_cleanup(csi_dev, false); return ret; } static void sun6i_csi_capture_stop_streaming(struct vb2_queue *queue) { struct sun6i_csi_device *csi_dev = vb2_get_drv_priv(queue); struct sun6i_csi_capture_state *state = &csi_dev->capture.state; struct video_device *video_dev = &csi_dev->capture.video_dev; struct v4l2_subdev *subdev = &csi_dev->bridge.subdev; v4l2_subdev_call(subdev, video, s_stream, 0); state->streaming = false; video_device_pipeline_stop(video_dev); sun6i_csi_capture_state_cleanup(csi_dev, true); } static const struct vb2_ops sun6i_csi_capture_queue_ops = { .queue_setup = sun6i_csi_capture_queue_setup, .buf_prepare = sun6i_csi_capture_buffer_prepare, .buf_queue = sun6i_csi_capture_buffer_queue, .start_streaming = sun6i_csi_capture_start_streaming, .stop_streaming = sun6i_csi_capture_stop_streaming, .wait_prepare = vb2_ops_wait_prepare, .wait_finish = vb2_ops_wait_finish, }; /* V4L2 Device */ static void sun6i_csi_capture_format_prepare(struct v4l2_format *format) { struct v4l2_pix_format *pix_format = &format->fmt.pix; const struct v4l2_format_info *info; unsigned int width, height; v4l_bound_align_image(&pix_format->width, SUN6I_CSI_CAPTURE_WIDTH_MIN, SUN6I_CSI_CAPTURE_WIDTH_MAX, 1, &pix_format->height, SUN6I_CSI_CAPTURE_HEIGHT_MIN, SUN6I_CSI_CAPTURE_HEIGHT_MAX, 1, 0); if (!sun6i_csi_capture_format_find(pix_format->pixelformat)) pix_format->pixelformat = sun6i_csi_capture_formats[0].pixelformat; width = pix_format->width; height = pix_format->height; info = v4l2_format_info(pix_format->pixelformat); switch (pix_format->pixelformat) { case V4L2_PIX_FMT_NV12_16L16: pix_format->bytesperline = width * 12 / 8; pix_format->sizeimage = pix_format->bytesperline * height; break; case V4L2_PIX_FMT_JPEG: pix_format->bytesperline = width; pix_format->sizeimage = pix_format->bytesperline * height; break; default: v4l2_fill_pixfmt(pix_format, pix_format->pixelformat, width, height); break; } if (pix_format->field == V4L2_FIELD_ANY) pix_format->field = V4L2_FIELD_NONE; if (pix_format->pixelformat == V4L2_PIX_FMT_JPEG) pix_format->colorspace = V4L2_COLORSPACE_JPEG; else if (info && info->pixel_enc == V4L2_PIXEL_ENC_BAYER) pix_format->colorspace = V4L2_COLORSPACE_RAW; else pix_format->colorspace = V4L2_COLORSPACE_SRGB; pix_format->ycbcr_enc = V4L2_YCBCR_ENC_DEFAULT; pix_format->quantization = V4L2_QUANTIZATION_DEFAULT; pix_format->xfer_func = V4L2_XFER_FUNC_DEFAULT; } static int sun6i_csi_capture_querycap(struct file *file, void *private, struct v4l2_capability *capability) { struct sun6i_csi_device *csi_dev = video_drvdata(file); struct video_device *video_dev = &csi_dev->capture.video_dev; strscpy(capability->driver, SUN6I_CSI_NAME, sizeof(capability->driver)); strscpy(capability->card, video_dev->name, sizeof(capability->card)); snprintf(capability->bus_info, sizeof(capability->bus_info), "platform:%s", dev_name(csi_dev->dev)); return 0; } static int sun6i_csi_capture_enum_fmt(struct file *file, void *private, struct v4l2_fmtdesc *fmtdesc) { u32 index = fmtdesc->index; if (index >= ARRAY_SIZE(sun6i_csi_capture_formats)) return -EINVAL; fmtdesc->pixelformat = sun6i_csi_capture_formats[index].pixelformat; return 0; } static int sun6i_csi_capture_g_fmt(struct file *file, void *private, struct v4l2_format *format) { struct sun6i_csi_device *csi_dev = video_drvdata(file); *format = csi_dev->capture.format; return 0; } static int sun6i_csi_capture_s_fmt(struct file *file, void *private, struct v4l2_format *format) { struct sun6i_csi_device *csi_dev = video_drvdata(file); struct sun6i_csi_capture *capture = &csi_dev->capture; if (vb2_is_busy(&capture->queue)) return -EBUSY; sun6i_csi_capture_format_prepare(format); csi_dev->capture.format = *format; return 0; } static int sun6i_csi_capture_try_fmt(struct file *file, void *private, struct v4l2_format *format) { sun6i_csi_capture_format_prepare(format); return 0; } static int sun6i_csi_capture_enum_input(struct file *file, void *private, struct v4l2_input *input) { if (input->index != 0) return -EINVAL; input->type = V4L2_INPUT_TYPE_CAMERA; strscpy(input->name, "Camera", sizeof(input->name)); return 0; } static int sun6i_csi_capture_g_input(struct file *file, void *private, unsigned int *index) { *index = 0; return 0; } static int sun6i_csi_capture_s_input(struct file *file, void *private, unsigned int index) { if (index != 0) return -EINVAL; return 0; } static const struct v4l2_ioctl_ops sun6i_csi_capture_ioctl_ops = { .vidioc_querycap = sun6i_csi_capture_querycap, .vidioc_enum_fmt_vid_cap = sun6i_csi_capture_enum_fmt, .vidioc_g_fmt_vid_cap = sun6i_csi_capture_g_fmt, .vidioc_s_fmt_vid_cap = sun6i_csi_capture_s_fmt, .vidioc_try_fmt_vid_cap = sun6i_csi_capture_try_fmt, .vidioc_enum_input = sun6i_csi_capture_enum_input, .vidioc_g_input = sun6i_csi_capture_g_input, .vidioc_s_input = sun6i_csi_capture_s_input, .vidioc_create_bufs = vb2_ioctl_create_bufs, .vidioc_prepare_buf = vb2_ioctl_prepare_buf, .vidioc_reqbufs = vb2_ioctl_reqbufs, .vidioc_querybuf = vb2_ioctl_querybuf, .vidioc_expbuf = vb2_ioctl_expbuf, .vidioc_qbuf = vb2_ioctl_qbuf, .vidioc_dqbuf = vb2_ioctl_dqbuf, .vidioc_streamon = vb2_ioctl_streamon, .vidioc_streamoff = vb2_ioctl_streamoff, }; /* V4L2 File */ static int sun6i_csi_capture_open(struct file *file) { struct sun6i_csi_device *csi_dev = video_drvdata(file); struct sun6i_csi_capture *capture = &csi_dev->capture; int ret; if (mutex_lock_interruptible(&capture->lock)) return -ERESTARTSYS; ret = v4l2_pipeline_pm_get(&capture->video_dev.entity); if (ret < 0) goto error_lock; ret = v4l2_fh_open(file); if (ret < 0) goto error_pipeline; mutex_unlock(&capture->lock); return 0; error_pipeline: v4l2_pipeline_pm_put(&capture->video_dev.entity); error_lock: mutex_unlock(&capture->lock); return ret; } static int sun6i_csi_capture_close(struct file *file) { struct sun6i_csi_device *csi_dev = video_drvdata(file); struct sun6i_csi_capture *capture = &csi_dev->capture; mutex_lock(&capture->lock); _vb2_fop_release(file, NULL); v4l2_pipeline_pm_put(&capture->video_dev.entity); mutex_unlock(&capture->lock); return 0; } static const struct v4l2_file_operations sun6i_csi_capture_fops = { .owner = THIS_MODULE, .open = sun6i_csi_capture_open, .release = sun6i_csi_capture_close, .unlocked_ioctl = video_ioctl2, .mmap = vb2_fop_mmap, .poll = vb2_fop_poll }; /* Media Entity */ static int sun6i_csi_capture_link_validate(struct media_link *link) { struct video_device *video_dev = media_entity_to_video_device(link->sink->entity); struct sun6i_csi_device *csi_dev = video_get_drvdata(video_dev); struct v4l2_device *v4l2_dev = csi_dev->v4l2_dev; const struct sun6i_csi_capture_format *capture_format; const struct sun6i_csi_bridge_format *bridge_format; unsigned int capture_width, capture_height; unsigned int bridge_width, bridge_height; const struct v4l2_format_info *format_info; u32 pixelformat, capture_field; u32 mbus_code, bridge_field; bool match; sun6i_csi_capture_dimensions(csi_dev, &capture_width, &capture_height); sun6i_csi_capture_format(csi_dev, &pixelformat, &capture_field); capture_format = sun6i_csi_capture_format_find(pixelformat); if (WARN_ON(!capture_format)) return -EINVAL; sun6i_csi_bridge_dimensions(csi_dev, &bridge_width, &bridge_height); sun6i_csi_bridge_format(csi_dev, &mbus_code, &bridge_field); bridge_format = sun6i_csi_bridge_format_find(mbus_code); if (WARN_ON(!bridge_format)) return -EINVAL; /* No cropping/scaling is supported. */ if (capture_width != bridge_width || capture_height != bridge_height) { v4l2_err(v4l2_dev, "invalid input/output dimensions: %ux%u/%ux%u\n", bridge_width, bridge_height, capture_width, capture_height); return -EINVAL; } format_info = v4l2_format_info(pixelformat); /* Some formats are not listed. */ if (!format_info) return 0; if (format_info->pixel_enc == V4L2_PIXEL_ENC_BAYER && bridge_format->input_format != SUN6I_CSI_INPUT_FMT_RAW) goto invalid; if (format_info->pixel_enc == V4L2_PIXEL_ENC_RGB && bridge_format->input_format != SUN6I_CSI_INPUT_FMT_RAW) goto invalid; if (format_info->pixel_enc == V4L2_PIXEL_ENC_YUV) { if (bridge_format->input_format != SUN6I_CSI_INPUT_FMT_YUV420 && bridge_format->input_format != SUN6I_CSI_INPUT_FMT_YUV422) goto invalid; /* YUV420 input can't produce YUV422 output. */ if (bridge_format->input_format == SUN6I_CSI_INPUT_FMT_YUV420 && format_info->vdiv == 1) goto invalid; } /* With raw input mode, we need a 1:1 match between input and output. */ if (bridge_format->input_format == SUN6I_CSI_INPUT_FMT_RAW || capture_format->input_format_raw) { match = sun6i_csi_capture_format_match(pixelformat, mbus_code); if (!match) goto invalid; } return 0; invalid: v4l2_err(v4l2_dev, "invalid input/output format combination\n"); return -EINVAL; } static const struct media_entity_operations sun6i_csi_capture_media_ops = { .link_validate = sun6i_csi_capture_link_validate }; /* Capture */ int sun6i_csi_capture_setup(struct sun6i_csi_device *csi_dev) { struct sun6i_csi_capture *capture = &csi_dev->capture; struct sun6i_csi_capture_state *state = &capture->state; struct v4l2_device *v4l2_dev = csi_dev->v4l2_dev; struct v4l2_subdev *bridge_subdev = &csi_dev->bridge.subdev; struct video_device *video_dev = &capture->video_dev; struct vb2_queue *queue = &capture->queue; struct media_pad *pad = &capture->pad; struct v4l2_format *format = &csi_dev->capture.format; struct v4l2_pix_format *pix_format = &format->fmt.pix; int ret; /* This may happen with multiple bridge notifier bound calls. */ if (state->setup) return 0; /* State */ INIT_LIST_HEAD(&state->queue); spin_lock_init(&state->lock); /* Media Entity */ video_dev->entity.ops = &sun6i_csi_capture_media_ops; /* Media Pad */ pad->flags = MEDIA_PAD_FL_SINK | MEDIA_PAD_FL_MUST_CONNECT; ret = media_entity_pads_init(&video_dev->entity, 1, pad); if (ret < 0) return ret; /* Queue */ mutex_init(&capture->lock); queue->type = V4L2_BUF_TYPE_VIDEO_CAPTURE; queue->io_modes = VB2_MMAP | VB2_DMABUF; queue->buf_struct_size = sizeof(struct sun6i_csi_buffer); queue->ops = &sun6i_csi_capture_queue_ops; queue->mem_ops = &vb2_dma_contig_memops; queue->min_buffers_needed = 2; queue->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC; queue->lock = &capture->lock; queue->dev = csi_dev->dev; queue->drv_priv = csi_dev; ret = vb2_queue_init(queue); if (ret) { v4l2_err(v4l2_dev, "failed to initialize vb2 queue: %d\n", ret); goto error_media_entity; } /* V4L2 Format */ format->type = queue->type; pix_format->pixelformat = sun6i_csi_capture_formats[0].pixelformat; pix_format->width = 1280; pix_format->height = 720; pix_format->field = V4L2_FIELD_NONE; sun6i_csi_capture_format_prepare(format); /* Video Device */ strscpy(video_dev->name, SUN6I_CSI_CAPTURE_NAME, sizeof(video_dev->name)); video_dev->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING; video_dev->vfl_dir = VFL_DIR_RX; video_dev->release = video_device_release_empty; video_dev->fops = &sun6i_csi_capture_fops; video_dev->ioctl_ops = &sun6i_csi_capture_ioctl_ops; video_dev->v4l2_dev = v4l2_dev; video_dev->queue = queue; video_dev->lock = &capture->lock; video_set_drvdata(video_dev, csi_dev); ret = video_register_device(video_dev, VFL_TYPE_VIDEO, -1); if (ret < 0) { v4l2_err(v4l2_dev, "failed to register video device: %d\n", ret); goto error_media_entity; } /* Media Pad Link */ ret = media_create_pad_link(&bridge_subdev->entity, SUN6I_CSI_BRIDGE_PAD_SOURCE, &video_dev->entity, 0, csi_dev->isp_available ? 0 : MEDIA_LNK_FL_ENABLED | MEDIA_LNK_FL_IMMUTABLE); if (ret < 0) { v4l2_err(v4l2_dev, "failed to create %s:%u -> %s:%u link\n", bridge_subdev->entity.name, SUN6I_CSI_BRIDGE_PAD_SOURCE, video_dev->entity.name, 0); goto error_video_device; } state->setup = true; return 0; error_video_device: vb2_video_unregister_device(video_dev); error_media_entity: media_entity_cleanup(&video_dev->entity); mutex_destroy(&capture->lock); return ret; } void sun6i_csi_capture_cleanup(struct sun6i_csi_device *csi_dev) { struct sun6i_csi_capture *capture = &csi_dev->capture; struct video_device *video_dev = &capture->video_dev; /* This may happen if async registration failed to complete. */ if (!capture->state.setup) return; vb2_video_unregister_device(video_dev); media_entity_cleanup(&video_dev->entity); mutex_destroy(&capture->lock); capture->state.setup = false; }
linux-master
drivers/media/platform/sunxi/sun6i-csi/sun6i_csi_capture.c
// SPDX-License-Identifier: GPL-2.0-only /* * Microchip Image Sensor Controller (ISC) common driver base * * Copyright (C) 2016-2019 Microchip Technology, Inc. * * Author: Songjun Wu * Author: Eugen Hristev <[email protected]> * */ #include <linux/delay.h> #include <linux/interrupt.h> #include <linux/math64.h> #include <linux/module.h> #include <linux/of.h> #include <linux/of_graph.h> #include <linux/platform_device.h> #include <linux/pm_runtime.h> #include <linux/regmap.h> #include <linux/videodev2.h> #include <linux/atmel-isc-media.h> #include <media/v4l2-ctrls.h> #include <media/v4l2-device.h> #include <media/v4l2-event.h> #include <media/v4l2-image-sizes.h> #include <media/v4l2-ioctl.h> #include <media/v4l2-fwnode.h> #include <media/v4l2-subdev.h> #include <media/videobuf2-dma-contig.h> #include "microchip-isc-regs.h" #include "microchip-isc.h" #define ISC_IS_FORMAT_RAW(mbus_code) \ (((mbus_code) & 0xf000) == 0x3000) #define ISC_IS_FORMAT_GREY(mbus_code) \ (((mbus_code) == MEDIA_BUS_FMT_Y10_1X10) | \ (((mbus_code) == MEDIA_BUS_FMT_Y8_1X8))) static inline void isc_update_v4l2_ctrls(struct isc_device *isc) { struct isc_ctrls *ctrls = &isc->ctrls; /* In here we set the v4l2 controls w.r.t. our pipeline config */ v4l2_ctrl_s_ctrl(isc->r_gain_ctrl, ctrls->gain[ISC_HIS_CFG_MODE_R]); v4l2_ctrl_s_ctrl(isc->b_gain_ctrl, ctrls->gain[ISC_HIS_CFG_MODE_B]); v4l2_ctrl_s_ctrl(isc->gr_gain_ctrl, ctrls->gain[ISC_HIS_CFG_MODE_GR]); v4l2_ctrl_s_ctrl(isc->gb_gain_ctrl, ctrls->gain[ISC_HIS_CFG_MODE_GB]); v4l2_ctrl_s_ctrl(isc->r_off_ctrl, ctrls->offset[ISC_HIS_CFG_MODE_R]); v4l2_ctrl_s_ctrl(isc->b_off_ctrl, ctrls->offset[ISC_HIS_CFG_MODE_B]); v4l2_ctrl_s_ctrl(isc->gr_off_ctrl, ctrls->offset[ISC_HIS_CFG_MODE_GR]); v4l2_ctrl_s_ctrl(isc->gb_off_ctrl, ctrls->offset[ISC_HIS_CFG_MODE_GB]); } static inline void isc_update_awb_ctrls(struct isc_device *isc) { struct isc_ctrls *ctrls = &isc->ctrls; /* In here we set our actual hw pipeline config */ regmap_write(isc->regmap, ISC_WB_O_RGR, ((ctrls->offset[ISC_HIS_CFG_MODE_R])) | ((ctrls->offset[ISC_HIS_CFG_MODE_GR]) << 16)); regmap_write(isc->regmap, ISC_WB_O_BGB, ((ctrls->offset[ISC_HIS_CFG_MODE_B])) | ((ctrls->offset[ISC_HIS_CFG_MODE_GB]) << 16)); regmap_write(isc->regmap, ISC_WB_G_RGR, ctrls->gain[ISC_HIS_CFG_MODE_R] | (ctrls->gain[ISC_HIS_CFG_MODE_GR] << 16)); regmap_write(isc->regmap, ISC_WB_G_BGB, ctrls->gain[ISC_HIS_CFG_MODE_B] | (ctrls->gain[ISC_HIS_CFG_MODE_GB] << 16)); } static inline void isc_reset_awb_ctrls(struct isc_device *isc) { unsigned int c; for (c = ISC_HIS_CFG_MODE_GR; c <= ISC_HIS_CFG_MODE_B; c++) { /* gains have a fixed point at 9 decimals */ isc->ctrls.gain[c] = 1 << 9; /* offsets are in 2's complements */ isc->ctrls.offset[c] = 0; } } static int isc_queue_setup(struct vb2_queue *vq, unsigned int *nbuffers, unsigned int *nplanes, unsigned int sizes[], struct device *alloc_devs[]) { struct isc_device *isc = vb2_get_drv_priv(vq); unsigned int size = isc->fmt.fmt.pix.sizeimage; if (*nplanes) return sizes[0] < size ? -EINVAL : 0; *nplanes = 1; sizes[0] = size; return 0; } static int isc_buffer_prepare(struct vb2_buffer *vb) { struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb); struct isc_device *isc = vb2_get_drv_priv(vb->vb2_queue); unsigned long size = isc->fmt.fmt.pix.sizeimage; if (vb2_plane_size(vb, 0) < size) { dev_err(isc->dev, "buffer too small (%lu < %lu)\n", vb2_plane_size(vb, 0), size); return -EINVAL; } vb2_set_plane_payload(vb, 0, size); vbuf->field = isc->fmt.fmt.pix.field; return 0; } static void isc_crop_pfe(struct isc_device *isc) { struct regmap *regmap = isc->regmap; u32 h, w; h = isc->fmt.fmt.pix.height; w = isc->fmt.fmt.pix.width; /* * In case the sensor is not RAW, it will output a pixel (12-16 bits) * with two samples on the ISC Data bus (which is 8-12) * ISC will count each sample, so, we need to multiply these values * by two, to get the real number of samples for the required pixels. */ if (!ISC_IS_FORMAT_RAW(isc->config.sd_format->mbus_code)) { h <<= 1; w <<= 1; } /* * We limit the column/row count that the ISC will output according * to the configured resolution that we want. * This will avoid the situation where the sensor is misconfigured, * sending more data, and the ISC will just take it and DMA to memory, * causing corruption. */ regmap_write(regmap, ISC_PFE_CFG1, (ISC_PFE_CFG1_COLMIN(0) & ISC_PFE_CFG1_COLMIN_MASK) | (ISC_PFE_CFG1_COLMAX(w - 1) & ISC_PFE_CFG1_COLMAX_MASK)); regmap_write(regmap, ISC_PFE_CFG2, (ISC_PFE_CFG2_ROWMIN(0) & ISC_PFE_CFG2_ROWMIN_MASK) | (ISC_PFE_CFG2_ROWMAX(h - 1) & ISC_PFE_CFG2_ROWMAX_MASK)); regmap_update_bits(regmap, ISC_PFE_CFG0, ISC_PFE_CFG0_COLEN | ISC_PFE_CFG0_ROWEN, ISC_PFE_CFG0_COLEN | ISC_PFE_CFG0_ROWEN); } static void isc_start_dma(struct isc_device *isc) { struct regmap *regmap = isc->regmap; u32 sizeimage = isc->fmt.fmt.pix.sizeimage; u32 dctrl_dview; dma_addr_t addr0; addr0 = vb2_dma_contig_plane_dma_addr(&isc->cur_frm->vb.vb2_buf, 0); regmap_write(regmap, ISC_DAD0 + isc->offsets.dma, addr0); switch (isc->config.fourcc) { case V4L2_PIX_FMT_YUV420: regmap_write(regmap, ISC_DAD1 + isc->offsets.dma, addr0 + (sizeimage * 2) / 3); regmap_write(regmap, ISC_DAD2 + isc->offsets.dma, addr0 + (sizeimage * 5) / 6); break; case V4L2_PIX_FMT_YUV422P: regmap_write(regmap, ISC_DAD1 + isc->offsets.dma, addr0 + sizeimage / 2); regmap_write(regmap, ISC_DAD2 + isc->offsets.dma, addr0 + (sizeimage * 3) / 4); break; default: break; } dctrl_dview = isc->config.dctrl_dview; regmap_write(regmap, ISC_DCTRL + isc->offsets.dma, dctrl_dview | ISC_DCTRL_IE_IS); spin_lock(&isc->awb_lock); regmap_write(regmap, ISC_CTRLEN, ISC_CTRL_CAPTURE); spin_unlock(&isc->awb_lock); } static void isc_set_pipeline(struct isc_device *isc, u32 pipeline) { struct regmap *regmap = isc->regmap; struct isc_ctrls *ctrls = &isc->ctrls; u32 val, bay_cfg; const u32 *gamma; unsigned int i; /* WB-->CFA-->CC-->GAM-->CSC-->CBC-->SUB422-->SUB420 */ for (i = 0; i < ISC_PIPE_LINE_NODE_NUM; i++) { val = pipeline & BIT(i) ? 1 : 0; regmap_field_write(isc->pipeline[i], val); } if (!pipeline) return; bay_cfg = isc->config.sd_format->cfa_baycfg; regmap_write(regmap, ISC_WB_CFG, bay_cfg); isc_update_awb_ctrls(isc); isc_update_v4l2_ctrls(isc); regmap_write(regmap, ISC_CFA_CFG, bay_cfg | ISC_CFA_CFG_EITPOL); gamma = &isc->gamma_table[ctrls->gamma_index][0]; regmap_bulk_write(regmap, ISC_GAM_BENTRY, gamma, GAMMA_ENTRIES); regmap_bulk_write(regmap, ISC_GAM_GENTRY, gamma, GAMMA_ENTRIES); regmap_bulk_write(regmap, ISC_GAM_RENTRY, gamma, GAMMA_ENTRIES); isc->config_dpc(isc); isc->config_csc(isc); isc->config_cbc(isc); isc->config_cc(isc); isc->config_gam(isc); } static int isc_update_profile(struct isc_device *isc) { struct regmap *regmap = isc->regmap; u32 sr; int counter = 100; regmap_write(regmap, ISC_CTRLEN, ISC_CTRL_UPPRO); regmap_read(regmap, ISC_CTRLSR, &sr); while ((sr & ISC_CTRL_UPPRO) && counter--) { usleep_range(1000, 2000); regmap_read(regmap, ISC_CTRLSR, &sr); } if (counter < 0) { v4l2_warn(&isc->v4l2_dev, "Time out to update profile\n"); return -ETIMEDOUT; } return 0; } static void isc_set_histogram(struct isc_device *isc, bool enable) { struct regmap *regmap = isc->regmap; struct isc_ctrls *ctrls = &isc->ctrls; if (enable) { regmap_write(regmap, ISC_HIS_CFG + isc->offsets.his, ISC_HIS_CFG_MODE_GR | (isc->config.sd_format->cfa_baycfg << ISC_HIS_CFG_BAYSEL_SHIFT) | ISC_HIS_CFG_RAR); regmap_write(regmap, ISC_HIS_CTRL + isc->offsets.his, ISC_HIS_CTRL_EN); regmap_write(regmap, ISC_INTEN, ISC_INT_HISDONE); ctrls->hist_id = ISC_HIS_CFG_MODE_GR; isc_update_profile(isc); regmap_write(regmap, ISC_CTRLEN, ISC_CTRL_HISREQ); ctrls->hist_stat = HIST_ENABLED; } else { regmap_write(regmap, ISC_INTDIS, ISC_INT_HISDONE); regmap_write(regmap, ISC_HIS_CTRL + isc->offsets.his, ISC_HIS_CTRL_DIS); ctrls->hist_stat = HIST_DISABLED; } } static int isc_configure(struct isc_device *isc) { struct regmap *regmap = isc->regmap; u32 pfe_cfg0, dcfg, mask, pipeline; struct isc_subdev_entity *subdev = isc->current_subdev; pfe_cfg0 = isc->config.sd_format->pfe_cfg0_bps; pipeline = isc->config.bits_pipeline; dcfg = isc->config.dcfg_imode | isc->dcfg; pfe_cfg0 |= subdev->pfe_cfg0 | ISC_PFE_CFG0_MODE_PROGRESSIVE; mask = ISC_PFE_CFG0_BPS_MASK | ISC_PFE_CFG0_HPOL_LOW | ISC_PFE_CFG0_VPOL_LOW | ISC_PFE_CFG0_PPOL_LOW | ISC_PFE_CFG0_MODE_MASK | ISC_PFE_CFG0_CCIR_CRC | ISC_PFE_CFG0_CCIR656 | ISC_PFE_CFG0_MIPI; regmap_update_bits(regmap, ISC_PFE_CFG0, mask, pfe_cfg0); isc->config_rlp(isc); regmap_write(regmap, ISC_DCFG + isc->offsets.dma, dcfg); /* Set the pipeline */ isc_set_pipeline(isc, pipeline); /* * The current implemented histogram is available for RAW R, B, GB, GR * channels. We need to check if sensor is outputting RAW BAYER */ if (isc->ctrls.awb && ISC_IS_FORMAT_RAW(isc->config.sd_format->mbus_code)) isc_set_histogram(isc, true); else isc_set_histogram(isc, false); /* Update profile */ return isc_update_profile(isc); } static int isc_prepare_streaming(struct vb2_queue *vq) { struct isc_device *isc = vb2_get_drv_priv(vq); return media_pipeline_start(isc->video_dev.entity.pads, &isc->mpipe); } static int isc_start_streaming(struct vb2_queue *vq, unsigned int count) { struct isc_device *isc = vb2_get_drv_priv(vq); struct regmap *regmap = isc->regmap; struct isc_buffer *buf; unsigned long flags; int ret; /* Enable stream on the sub device */ ret = v4l2_subdev_call(isc->current_subdev->sd, video, s_stream, 1); if (ret && ret != -ENOIOCTLCMD) { dev_err(isc->dev, "stream on failed in subdev %d\n", ret); goto err_start_stream; } ret = pm_runtime_resume_and_get(isc->dev); if (ret < 0) { dev_err(isc->dev, "RPM resume failed in subdev %d\n", ret); goto err_pm_get; } ret = isc_configure(isc); if (unlikely(ret)) goto err_configure; /* Enable DMA interrupt */ regmap_write(regmap, ISC_INTEN, ISC_INT_DDONE); spin_lock_irqsave(&isc->dma_queue_lock, flags); isc->sequence = 0; isc->stop = false; reinit_completion(&isc->comp); isc->cur_frm = list_first_entry(&isc->dma_queue, struct isc_buffer, list); list_del(&isc->cur_frm->list); isc_crop_pfe(isc); isc_start_dma(isc); spin_unlock_irqrestore(&isc->dma_queue_lock, flags); /* if we streaming from RAW, we can do one-shot white balance adj */ if (ISC_IS_FORMAT_RAW(isc->config.sd_format->mbus_code)) v4l2_ctrl_activate(isc->do_wb_ctrl, true); return 0; err_configure: pm_runtime_put_sync(isc->dev); err_pm_get: v4l2_subdev_call(isc->current_subdev->sd, video, s_stream, 0); err_start_stream: spin_lock_irqsave(&isc->dma_queue_lock, flags); list_for_each_entry(buf, &isc->dma_queue, list) vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_QUEUED); INIT_LIST_HEAD(&isc->dma_queue); spin_unlock_irqrestore(&isc->dma_queue_lock, flags); return ret; } static void isc_unprepare_streaming(struct vb2_queue *vq) { struct isc_device *isc = vb2_get_drv_priv(vq); /* Stop media pipeline */ media_pipeline_stop(isc->video_dev.entity.pads); } static void isc_stop_streaming(struct vb2_queue *vq) { struct isc_device *isc = vb2_get_drv_priv(vq); unsigned long flags; struct isc_buffer *buf; int ret; mutex_lock(&isc->awb_mutex); v4l2_ctrl_activate(isc->do_wb_ctrl, false); isc->stop = true; /* Wait until the end of the current frame */ if (isc->cur_frm && !wait_for_completion_timeout(&isc->comp, 5 * HZ)) dev_err(isc->dev, "Timeout waiting for end of the capture\n"); mutex_unlock(&isc->awb_mutex); /* Disable DMA interrupt */ regmap_write(isc->regmap, ISC_INTDIS, ISC_INT_DDONE); pm_runtime_put_sync(isc->dev); /* Disable stream on the sub device */ ret = v4l2_subdev_call(isc->current_subdev->sd, video, s_stream, 0); if (ret && ret != -ENOIOCTLCMD) dev_err(isc->dev, "stream off failed in subdev\n"); /* Release all active buffers */ spin_lock_irqsave(&isc->dma_queue_lock, flags); if (unlikely(isc->cur_frm)) { vb2_buffer_done(&isc->cur_frm->vb.vb2_buf, VB2_BUF_STATE_ERROR); isc->cur_frm = NULL; } list_for_each_entry(buf, &isc->dma_queue, list) vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR); INIT_LIST_HEAD(&isc->dma_queue); spin_unlock_irqrestore(&isc->dma_queue_lock, flags); } static void isc_buffer_queue(struct vb2_buffer *vb) { struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb); struct isc_buffer *buf = container_of(vbuf, struct isc_buffer, vb); struct isc_device *isc = vb2_get_drv_priv(vb->vb2_queue); unsigned long flags; spin_lock_irqsave(&isc->dma_queue_lock, flags); if (!isc->cur_frm && list_empty(&isc->dma_queue) && vb2_start_streaming_called(vb->vb2_queue)) { isc->cur_frm = buf; isc_start_dma(isc); } else { list_add_tail(&buf->list, &isc->dma_queue); } spin_unlock_irqrestore(&isc->dma_queue_lock, flags); } static const struct vb2_ops isc_vb2_ops = { .queue_setup = isc_queue_setup, .wait_prepare = vb2_ops_wait_prepare, .wait_finish = vb2_ops_wait_finish, .buf_prepare = isc_buffer_prepare, .start_streaming = isc_start_streaming, .stop_streaming = isc_stop_streaming, .buf_queue = isc_buffer_queue, .prepare_streaming = isc_prepare_streaming, .unprepare_streaming = isc_unprepare_streaming, }; static int isc_querycap(struct file *file, void *priv, struct v4l2_capability *cap) { struct isc_device *isc = video_drvdata(file); strscpy(cap->driver, "microchip-isc", sizeof(cap->driver)); strscpy(cap->card, "Microchip Image Sensor Controller", sizeof(cap->card)); snprintf(cap->bus_info, sizeof(cap->bus_info), "platform:%s", isc->v4l2_dev.name); return 0; } static int isc_enum_fmt_vid_cap(struct file *file, void *priv, struct v4l2_fmtdesc *f) { struct isc_device *isc = video_drvdata(file); u32 index = f->index; u32 i, supported_index = 0; struct isc_format *fmt; /* * If we are not asked a specific mbus_code, we have to report all * the formats that we can output. */ if (!f->mbus_code) { if (index >= isc->controller_formats_size) return -EINVAL; f->pixelformat = isc->controller_formats[index].fourcc; return 0; } /* * If a specific mbus_code is requested, check if we support * this mbus_code as input for the ISC. * If it's supported, then we report the corresponding pixelformat * as first possible option for the ISC. * E.g. mbus MEDIA_BUS_FMT_YUYV8_2X8 and report * 'YUYV' (YUYV 4:2:2) */ fmt = isc_find_format_by_code(isc, f->mbus_code, &i); if (!fmt) return -EINVAL; if (!index) { f->pixelformat = fmt->fourcc; return 0; } supported_index++; /* If the index is not raw, we don't have anymore formats to report */ if (!ISC_IS_FORMAT_RAW(f->mbus_code)) return -EINVAL; /* * We are asked for a specific mbus code, which is raw. * We have to search through the formats we can convert to. * We have to skip the raw formats, we cannot convert to raw. * E.g. 'AR12' (16-bit ARGB 4-4-4-4), 'AR15' (16-bit ARGB 1-5-5-5), etc. */ for (i = 0; i < isc->controller_formats_size; i++) { if (isc->controller_formats[i].raw) continue; if (index == supported_index) { f->pixelformat = isc->controller_formats[i].fourcc; return 0; } supported_index++; } return -EINVAL; } static int isc_g_fmt_vid_cap(struct file *file, void *priv, struct v4l2_format *fmt) { struct isc_device *isc = video_drvdata(file); *fmt = isc->fmt; return 0; } /* * Checks the current configured format, if ISC can output it, * considering which type of format the ISC receives from the sensor */ static int isc_try_validate_formats(struct isc_device *isc) { int ret; bool bayer = false, yuv = false, rgb = false, grey = false; /* all formats supported by the RLP module are OK */ switch (isc->try_config.fourcc) { case V4L2_PIX_FMT_SBGGR8: case V4L2_PIX_FMT_SGBRG8: case V4L2_PIX_FMT_SGRBG8: case V4L2_PIX_FMT_SRGGB8: case V4L2_PIX_FMT_SBGGR10: case V4L2_PIX_FMT_SGBRG10: case V4L2_PIX_FMT_SGRBG10: case V4L2_PIX_FMT_SRGGB10: case V4L2_PIX_FMT_SBGGR12: case V4L2_PIX_FMT_SGBRG12: case V4L2_PIX_FMT_SGRBG12: case V4L2_PIX_FMT_SRGGB12: ret = 0; bayer = true; break; case V4L2_PIX_FMT_YUV420: case V4L2_PIX_FMT_YUV422P: case V4L2_PIX_FMT_YUYV: case V4L2_PIX_FMT_UYVY: case V4L2_PIX_FMT_VYUY: ret = 0; yuv = true; break; case V4L2_PIX_FMT_RGB565: case V4L2_PIX_FMT_ABGR32: case V4L2_PIX_FMT_XBGR32: case V4L2_PIX_FMT_ARGB444: case V4L2_PIX_FMT_ARGB555: ret = 0; rgb = true; break; case V4L2_PIX_FMT_GREY: case V4L2_PIX_FMT_Y10: case V4L2_PIX_FMT_Y16: ret = 0; grey = true; break; default: /* any other different formats are not supported */ dev_err(isc->dev, "Requested unsupported format.\n"); ret = -EINVAL; } dev_dbg(isc->dev, "Format validation, requested rgb=%u, yuv=%u, grey=%u, bayer=%u\n", rgb, yuv, grey, bayer); if (bayer && !ISC_IS_FORMAT_RAW(isc->try_config.sd_format->mbus_code)) { dev_err(isc->dev, "Cannot output RAW if we do not receive RAW.\n"); return -EINVAL; } if (grey && !ISC_IS_FORMAT_RAW(isc->try_config.sd_format->mbus_code) && !ISC_IS_FORMAT_GREY(isc->try_config.sd_format->mbus_code)) { dev_err(isc->dev, "Cannot output GREY if we do not receive RAW/GREY.\n"); return -EINVAL; } if ((rgb || bayer || yuv) && ISC_IS_FORMAT_GREY(isc->try_config.sd_format->mbus_code)) { dev_err(isc->dev, "Cannot convert GREY to another format.\n"); return -EINVAL; } return ret; } /* * Configures the RLP and DMA modules, depending on the output format * configured for the ISC. * If direct_dump == true, just dump raw data 8/16 bits depending on format. */ static int isc_try_configure_rlp_dma(struct isc_device *isc, bool direct_dump) { isc->try_config.rlp_cfg_mode = 0; switch (isc->try_config.fourcc) { case V4L2_PIX_FMT_SBGGR8: case V4L2_PIX_FMT_SGBRG8: case V4L2_PIX_FMT_SGRBG8: case V4L2_PIX_FMT_SRGGB8: isc->try_config.rlp_cfg_mode = ISC_RLP_CFG_MODE_DAT8; isc->try_config.dcfg_imode = ISC_DCFG_IMODE_PACKED8; isc->try_config.dctrl_dview = ISC_DCTRL_DVIEW_PACKED; isc->try_config.bpp = 8; isc->try_config.bpp_v4l2 = 8; break; case V4L2_PIX_FMT_SBGGR10: case V4L2_PIX_FMT_SGBRG10: case V4L2_PIX_FMT_SGRBG10: case V4L2_PIX_FMT_SRGGB10: isc->try_config.rlp_cfg_mode = ISC_RLP_CFG_MODE_DAT10; isc->try_config.dcfg_imode = ISC_DCFG_IMODE_PACKED16; isc->try_config.dctrl_dview = ISC_DCTRL_DVIEW_PACKED; isc->try_config.bpp = 16; isc->try_config.bpp_v4l2 = 16; break; case V4L2_PIX_FMT_SBGGR12: case V4L2_PIX_FMT_SGBRG12: case V4L2_PIX_FMT_SGRBG12: case V4L2_PIX_FMT_SRGGB12: isc->try_config.rlp_cfg_mode = ISC_RLP_CFG_MODE_DAT12; isc->try_config.dcfg_imode = ISC_DCFG_IMODE_PACKED16; isc->try_config.dctrl_dview = ISC_DCTRL_DVIEW_PACKED; isc->try_config.bpp = 16; isc->try_config.bpp_v4l2 = 16; break; case V4L2_PIX_FMT_RGB565: isc->try_config.rlp_cfg_mode = ISC_RLP_CFG_MODE_RGB565; isc->try_config.dcfg_imode = ISC_DCFG_IMODE_PACKED16; isc->try_config.dctrl_dview = ISC_DCTRL_DVIEW_PACKED; isc->try_config.bpp = 16; isc->try_config.bpp_v4l2 = 16; break; case V4L2_PIX_FMT_ARGB444: isc->try_config.rlp_cfg_mode = ISC_RLP_CFG_MODE_ARGB444; isc->try_config.dcfg_imode = ISC_DCFG_IMODE_PACKED16; isc->try_config.dctrl_dview = ISC_DCTRL_DVIEW_PACKED; isc->try_config.bpp = 16; isc->try_config.bpp_v4l2 = 16; break; case V4L2_PIX_FMT_ARGB555: isc->try_config.rlp_cfg_mode = ISC_RLP_CFG_MODE_ARGB555; isc->try_config.dcfg_imode = ISC_DCFG_IMODE_PACKED16; isc->try_config.dctrl_dview = ISC_DCTRL_DVIEW_PACKED; isc->try_config.bpp = 16; isc->try_config.bpp_v4l2 = 16; break; case V4L2_PIX_FMT_ABGR32: case V4L2_PIX_FMT_XBGR32: isc->try_config.rlp_cfg_mode = ISC_RLP_CFG_MODE_ARGB32; isc->try_config.dcfg_imode = ISC_DCFG_IMODE_PACKED32; isc->try_config.dctrl_dview = ISC_DCTRL_DVIEW_PACKED; isc->try_config.bpp = 32; isc->try_config.bpp_v4l2 = 32; break; case V4L2_PIX_FMT_YUV420: isc->try_config.rlp_cfg_mode = ISC_RLP_CFG_MODE_YYCC; isc->try_config.dcfg_imode = ISC_DCFG_IMODE_YC420P; isc->try_config.dctrl_dview = ISC_DCTRL_DVIEW_PLANAR; isc->try_config.bpp = 12; isc->try_config.bpp_v4l2 = 8; /* only first plane */ break; case V4L2_PIX_FMT_YUV422P: isc->try_config.rlp_cfg_mode = ISC_RLP_CFG_MODE_YYCC; isc->try_config.dcfg_imode = ISC_DCFG_IMODE_YC422P; isc->try_config.dctrl_dview = ISC_DCTRL_DVIEW_PLANAR; isc->try_config.bpp = 16; isc->try_config.bpp_v4l2 = 8; /* only first plane */ break; case V4L2_PIX_FMT_YUYV: isc->try_config.rlp_cfg_mode = ISC_RLP_CFG_MODE_YCYC | ISC_RLP_CFG_YMODE_YUYV; isc->try_config.dcfg_imode = ISC_DCFG_IMODE_PACKED32; isc->try_config.dctrl_dview = ISC_DCTRL_DVIEW_PACKED; isc->try_config.bpp = 16; isc->try_config.bpp_v4l2 = 16; break; case V4L2_PIX_FMT_UYVY: isc->try_config.rlp_cfg_mode = ISC_RLP_CFG_MODE_YCYC | ISC_RLP_CFG_YMODE_UYVY; isc->try_config.dcfg_imode = ISC_DCFG_IMODE_PACKED32; isc->try_config.dctrl_dview = ISC_DCTRL_DVIEW_PACKED; isc->try_config.bpp = 16; isc->try_config.bpp_v4l2 = 16; break; case V4L2_PIX_FMT_VYUY: isc->try_config.rlp_cfg_mode = ISC_RLP_CFG_MODE_YCYC | ISC_RLP_CFG_YMODE_VYUY; isc->try_config.dcfg_imode = ISC_DCFG_IMODE_PACKED32; isc->try_config.dctrl_dview = ISC_DCTRL_DVIEW_PACKED; isc->try_config.bpp = 16; isc->try_config.bpp_v4l2 = 16; break; case V4L2_PIX_FMT_GREY: isc->try_config.rlp_cfg_mode = ISC_RLP_CFG_MODE_DATY8; isc->try_config.dcfg_imode = ISC_DCFG_IMODE_PACKED8; isc->try_config.dctrl_dview = ISC_DCTRL_DVIEW_PACKED; isc->try_config.bpp = 8; isc->try_config.bpp_v4l2 = 8; break; case V4L2_PIX_FMT_Y16: isc->try_config.rlp_cfg_mode = ISC_RLP_CFG_MODE_DATY10 | ISC_RLP_CFG_LSH; fallthrough; case V4L2_PIX_FMT_Y10: isc->try_config.rlp_cfg_mode |= ISC_RLP_CFG_MODE_DATY10; isc->try_config.dcfg_imode = ISC_DCFG_IMODE_PACKED16; isc->try_config.dctrl_dview = ISC_DCTRL_DVIEW_PACKED; isc->try_config.bpp = 16; isc->try_config.bpp_v4l2 = 16; break; default: return -EINVAL; } if (direct_dump) { isc->try_config.rlp_cfg_mode = ISC_RLP_CFG_MODE_DAT8; isc->try_config.dcfg_imode = ISC_DCFG_IMODE_PACKED8; isc->try_config.dctrl_dview = ISC_DCTRL_DVIEW_PACKED; return 0; } return 0; } /* * Configuring pipeline modules, depending on which format the ISC outputs * and considering which format it has as input from the sensor. */ static int isc_try_configure_pipeline(struct isc_device *isc) { switch (isc->try_config.fourcc) { case V4L2_PIX_FMT_RGB565: case V4L2_PIX_FMT_ARGB555: case V4L2_PIX_FMT_ARGB444: case V4L2_PIX_FMT_ABGR32: case V4L2_PIX_FMT_XBGR32: /* if sensor format is RAW, we convert inside ISC */ if (ISC_IS_FORMAT_RAW(isc->try_config.sd_format->mbus_code)) { isc->try_config.bits_pipeline = CFA_ENABLE | WB_ENABLE | GAM_ENABLES | DPC_BLCENABLE | CC_ENABLE; } else { isc->try_config.bits_pipeline = 0x0; } break; case V4L2_PIX_FMT_YUV420: /* if sensor format is RAW, we convert inside ISC */ if (ISC_IS_FORMAT_RAW(isc->try_config.sd_format->mbus_code)) { isc->try_config.bits_pipeline = CFA_ENABLE | CSC_ENABLE | GAM_ENABLES | WB_ENABLE | SUB420_ENABLE | SUB422_ENABLE | CBC_ENABLE | DPC_BLCENABLE; } else { isc->try_config.bits_pipeline = 0x0; } break; case V4L2_PIX_FMT_YUV422P: /* if sensor format is RAW, we convert inside ISC */ if (ISC_IS_FORMAT_RAW(isc->try_config.sd_format->mbus_code)) { isc->try_config.bits_pipeline = CFA_ENABLE | CSC_ENABLE | WB_ENABLE | GAM_ENABLES | SUB422_ENABLE | CBC_ENABLE | DPC_BLCENABLE; } else { isc->try_config.bits_pipeline = 0x0; } break; case V4L2_PIX_FMT_YUYV: case V4L2_PIX_FMT_UYVY: case V4L2_PIX_FMT_VYUY: /* if sensor format is RAW, we convert inside ISC */ if (ISC_IS_FORMAT_RAW(isc->try_config.sd_format->mbus_code)) { isc->try_config.bits_pipeline = CFA_ENABLE | CSC_ENABLE | WB_ENABLE | GAM_ENABLES | SUB422_ENABLE | CBC_ENABLE | DPC_BLCENABLE; } else { isc->try_config.bits_pipeline = 0x0; } break; case V4L2_PIX_FMT_GREY: case V4L2_PIX_FMT_Y16: /* if sensor format is RAW, we convert inside ISC */ if (ISC_IS_FORMAT_RAW(isc->try_config.sd_format->mbus_code)) { isc->try_config.bits_pipeline = CFA_ENABLE | CSC_ENABLE | WB_ENABLE | GAM_ENABLES | CBC_ENABLE | DPC_BLCENABLE; } else { isc->try_config.bits_pipeline = 0x0; } break; default: if (ISC_IS_FORMAT_RAW(isc->try_config.sd_format->mbus_code)) isc->try_config.bits_pipeline = WB_ENABLE | DPC_BLCENABLE; else isc->try_config.bits_pipeline = 0x0; } /* Tune the pipeline to product specific */ isc->adapt_pipeline(isc); return 0; } static void isc_try_fse(struct isc_device *isc, struct v4l2_subdev_state *sd_state) { struct v4l2_subdev_frame_size_enum fse = { .which = V4L2_SUBDEV_FORMAT_TRY, }; int ret; /* * If we do not know yet which format the subdev is using, we cannot * do anything. */ if (!isc->config.sd_format) return; fse.code = isc->try_config.sd_format->mbus_code; ret = v4l2_subdev_call(isc->current_subdev->sd, pad, enum_frame_size, sd_state, &fse); /* * Attempt to obtain format size from subdev. If not available, * just use the maximum ISC can receive. */ if (ret) { sd_state->pads->try_crop.width = isc->max_width; sd_state->pads->try_crop.height = isc->max_height; } else { sd_state->pads->try_crop.width = fse.max_width; sd_state->pads->try_crop.height = fse.max_height; } } static int isc_try_fmt(struct isc_device *isc, struct v4l2_format *f) { struct v4l2_pix_format *pixfmt = &f->fmt.pix; unsigned int i; if (f->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) return -EINVAL; isc->try_config.fourcc = isc->controller_formats[0].fourcc; /* find if the format requested is supported */ for (i = 0; i < isc->controller_formats_size; i++) if (isc->controller_formats[i].fourcc == pixfmt->pixelformat) { isc->try_config.fourcc = pixfmt->pixelformat; break; } isc_try_configure_rlp_dma(isc, false); /* Limit to Microchip ISC hardware capabilities */ v4l_bound_align_image(&pixfmt->width, 16, isc->max_width, 0, &pixfmt->height, 16, isc->max_height, 0, 0); /* If we did not find the requested format, we will fallback here */ pixfmt->pixelformat = isc->try_config.fourcc; pixfmt->colorspace = V4L2_COLORSPACE_SRGB; pixfmt->field = V4L2_FIELD_NONE; pixfmt->bytesperline = (pixfmt->width * isc->try_config.bpp_v4l2) >> 3; pixfmt->sizeimage = ((pixfmt->width * isc->try_config.bpp) >> 3) * pixfmt->height; isc->try_fmt = *f; return 0; } static int isc_set_fmt(struct isc_device *isc, struct v4l2_format *f) { isc_try_fmt(isc, f); /* make the try configuration active */ isc->config = isc->try_config; isc->fmt = isc->try_fmt; dev_dbg(isc->dev, "ISC set_fmt to %.4s @%dx%d\n", (char *)&f->fmt.pix.pixelformat, f->fmt.pix.width, f->fmt.pix.height); return 0; } static int isc_validate(struct isc_device *isc) { int ret; int i; struct isc_format *sd_fmt = NULL; struct v4l2_pix_format *pixfmt = &isc->fmt.fmt.pix; struct v4l2_subdev_format format = { .which = V4L2_SUBDEV_FORMAT_ACTIVE, .pad = isc->remote_pad, }; struct v4l2_subdev_pad_config pad_cfg = {}; struct v4l2_subdev_state pad_state = { .pads = &pad_cfg, }; /* Get current format from subdev */ ret = v4l2_subdev_call(isc->current_subdev->sd, pad, get_fmt, NULL, &format); if (ret) return ret; /* Identify the subdev's format configuration */ for (i = 0; i < isc->formats_list_size; i++) if (isc->formats_list[i].mbus_code == format.format.code) { sd_fmt = &isc->formats_list[i]; break; } /* Check if the format is not supported */ if (!sd_fmt) { dev_err(isc->dev, "Current subdevice is streaming a media bus code that is not supported 0x%x\n", format.format.code); return -EPIPE; } /* At this moment we know which format the subdev will use */ isc->try_config.sd_format = sd_fmt; /* If the sensor is not RAW, we can only do a direct dump */ if (!ISC_IS_FORMAT_RAW(isc->try_config.sd_format->mbus_code)) isc_try_configure_rlp_dma(isc, true); /* Limit to Microchip ISC hardware capabilities */ v4l_bound_align_image(&format.format.width, 16, isc->max_width, 0, &format.format.height, 16, isc->max_height, 0, 0); /* Check if the frame size is the same. Otherwise we may overflow */ if (pixfmt->height != format.format.height || pixfmt->width != format.format.width) { dev_err(isc->dev, "ISC not configured with the proper frame size: %dx%d\n", format.format.width, format.format.height); return -EPIPE; } dev_dbg(isc->dev, "Identified subdev using format %.4s with %dx%d %d bpp\n", (char *)&sd_fmt->fourcc, pixfmt->width, pixfmt->height, isc->try_config.bpp); /* Reset and restart AWB if the subdevice changed the format */ if (isc->try_config.sd_format && isc->config.sd_format && isc->try_config.sd_format != isc->config.sd_format) { isc->ctrls.hist_stat = HIST_INIT; isc_reset_awb_ctrls(isc); isc_update_v4l2_ctrls(isc); } /* Validate formats */ ret = isc_try_validate_formats(isc); if (ret) return ret; /* Obtain frame sizes if possible to have crop requirements ready */ isc_try_fse(isc, &pad_state); /* Configure ISC pipeline for the config */ ret = isc_try_configure_pipeline(isc); if (ret) return ret; isc->config = isc->try_config; dev_dbg(isc->dev, "New ISC configuration in place\n"); return 0; } static int isc_s_fmt_vid_cap(struct file *file, void *priv, struct v4l2_format *f) { struct isc_device *isc = video_drvdata(file); if (vb2_is_busy(&isc->vb2_vidq)) return -EBUSY; return isc_set_fmt(isc, f); } static int isc_try_fmt_vid_cap(struct file *file, void *priv, struct v4l2_format *f) { struct isc_device *isc = video_drvdata(file); return isc_try_fmt(isc, f); } static int isc_enum_input(struct file *file, void *priv, struct v4l2_input *inp) { if (inp->index != 0) return -EINVAL; inp->type = V4L2_INPUT_TYPE_CAMERA; inp->std = 0; strscpy(inp->name, "Camera", sizeof(inp->name)); return 0; } static int isc_g_input(struct file *file, void *priv, unsigned int *i) { *i = 0; return 0; } static int isc_s_input(struct file *file, void *priv, unsigned int i) { if (i > 0) return -EINVAL; return 0; } static int isc_g_parm(struct file *file, void *fh, struct v4l2_streamparm *a) { struct isc_device *isc = video_drvdata(file); return v4l2_g_parm_cap(video_devdata(file), isc->current_subdev->sd, a); } static int isc_s_parm(struct file *file, void *fh, struct v4l2_streamparm *a) { struct isc_device *isc = video_drvdata(file); return v4l2_s_parm_cap(video_devdata(file), isc->current_subdev->sd, a); } static int isc_enum_framesizes(struct file *file, void *fh, struct v4l2_frmsizeenum *fsize) { struct isc_device *isc = video_drvdata(file); int ret = -EINVAL; int i; if (fsize->index) return -EINVAL; for (i = 0; i < isc->controller_formats_size; i++) if (isc->controller_formats[i].fourcc == fsize->pixel_format) ret = 0; if (ret) return ret; fsize->type = V4L2_FRMSIZE_TYPE_CONTINUOUS; fsize->stepwise.min_width = 16; fsize->stepwise.max_width = isc->max_width; fsize->stepwise.min_height = 16; fsize->stepwise.max_height = isc->max_height; fsize->stepwise.step_width = 1; fsize->stepwise.step_height = 1; return 0; } static const struct v4l2_ioctl_ops isc_ioctl_ops = { .vidioc_querycap = isc_querycap, .vidioc_enum_fmt_vid_cap = isc_enum_fmt_vid_cap, .vidioc_g_fmt_vid_cap = isc_g_fmt_vid_cap, .vidioc_s_fmt_vid_cap = isc_s_fmt_vid_cap, .vidioc_try_fmt_vid_cap = isc_try_fmt_vid_cap, .vidioc_enum_input = isc_enum_input, .vidioc_g_input = isc_g_input, .vidioc_s_input = isc_s_input, .vidioc_reqbufs = vb2_ioctl_reqbufs, .vidioc_querybuf = vb2_ioctl_querybuf, .vidioc_qbuf = vb2_ioctl_qbuf, .vidioc_expbuf = vb2_ioctl_expbuf, .vidioc_dqbuf = vb2_ioctl_dqbuf, .vidioc_create_bufs = vb2_ioctl_create_bufs, .vidioc_prepare_buf = vb2_ioctl_prepare_buf, .vidioc_streamon = vb2_ioctl_streamon, .vidioc_streamoff = vb2_ioctl_streamoff, .vidioc_g_parm = isc_g_parm, .vidioc_s_parm = isc_s_parm, .vidioc_enum_framesizes = isc_enum_framesizes, .vidioc_log_status = v4l2_ctrl_log_status, .vidioc_subscribe_event = v4l2_ctrl_subscribe_event, .vidioc_unsubscribe_event = v4l2_event_unsubscribe, }; static int isc_open(struct file *file) { struct isc_device *isc = video_drvdata(file); struct v4l2_subdev *sd = isc->current_subdev->sd; int ret; if (mutex_lock_interruptible(&isc->lock)) return -ERESTARTSYS; ret = v4l2_fh_open(file); if (ret < 0) goto unlock; if (!v4l2_fh_is_singular_file(file)) goto unlock; ret = v4l2_subdev_call(sd, core, s_power, 1); if (ret < 0 && ret != -ENOIOCTLCMD) { v4l2_fh_release(file); goto unlock; } ret = isc_set_fmt(isc, &isc->fmt); if (ret) { v4l2_subdev_call(sd, core, s_power, 0); v4l2_fh_release(file); } unlock: mutex_unlock(&isc->lock); return ret; } static int isc_release(struct file *file) { struct isc_device *isc = video_drvdata(file); struct v4l2_subdev *sd = isc->current_subdev->sd; bool fh_singular; int ret; mutex_lock(&isc->lock); fh_singular = v4l2_fh_is_singular_file(file); ret = _vb2_fop_release(file, NULL); if (fh_singular) v4l2_subdev_call(sd, core, s_power, 0); mutex_unlock(&isc->lock); return ret; } static const struct v4l2_file_operations isc_fops = { .owner = THIS_MODULE, .open = isc_open, .release = isc_release, .unlocked_ioctl = video_ioctl2, .read = vb2_fop_read, .mmap = vb2_fop_mmap, .poll = vb2_fop_poll, }; irqreturn_t microchip_isc_interrupt(int irq, void *dev_id) { struct isc_device *isc = (struct isc_device *)dev_id; struct regmap *regmap = isc->regmap; u32 isc_intsr, isc_intmask, pending; irqreturn_t ret = IRQ_NONE; regmap_read(regmap, ISC_INTSR, &isc_intsr); regmap_read(regmap, ISC_INTMASK, &isc_intmask); pending = isc_intsr & isc_intmask; if (likely(pending & ISC_INT_DDONE)) { spin_lock(&isc->dma_queue_lock); if (isc->cur_frm) { struct vb2_v4l2_buffer *vbuf = &isc->cur_frm->vb; struct vb2_buffer *vb = &vbuf->vb2_buf; vb->timestamp = ktime_get_ns(); vbuf->sequence = isc->sequence++; vb2_buffer_done(vb, VB2_BUF_STATE_DONE); isc->cur_frm = NULL; } if (!list_empty(&isc->dma_queue) && !isc->stop) { isc->cur_frm = list_first_entry(&isc->dma_queue, struct isc_buffer, list); list_del(&isc->cur_frm->list); isc_start_dma(isc); } if (isc->stop) complete(&isc->comp); ret = IRQ_HANDLED; spin_unlock(&isc->dma_queue_lock); } if (pending & ISC_INT_HISDONE) { schedule_work(&isc->awb_work); ret = IRQ_HANDLED; } return ret; } EXPORT_SYMBOL_GPL(microchip_isc_interrupt); static void isc_hist_count(struct isc_device *isc, u32 *min, u32 *max) { struct regmap *regmap = isc->regmap; struct isc_ctrls *ctrls = &isc->ctrls; u32 *hist_count = &ctrls->hist_count[ctrls->hist_id]; u32 *hist_entry = &ctrls->hist_entry[0]; u32 i; *min = 0; *max = HIST_ENTRIES; regmap_bulk_read(regmap, ISC_HIS_ENTRY + isc->offsets.his_entry, hist_entry, HIST_ENTRIES); *hist_count = 0; /* * we deliberately ignore the end of the histogram, * the most white pixels */ for (i = 1; i < HIST_ENTRIES; i++) { if (*hist_entry && !*min) *min = i; if (*hist_entry) *max = i; *hist_count += i * (*hist_entry++); } if (!*min) *min = 1; dev_dbg(isc->dev, "isc wb: hist_id %u, hist_count %u", ctrls->hist_id, *hist_count); } static void isc_wb_update(struct isc_ctrls *ctrls) { struct isc_device *isc = container_of(ctrls, struct isc_device, ctrls); u32 *hist_count = &ctrls->hist_count[0]; u32 c, offset[4]; u64 avg = 0; /* We compute two gains, stretch gain and grey world gain */ u32 s_gain[4], gw_gain[4]; /* * According to Grey World, we need to set gains for R/B to normalize * them towards the green channel. * Thus we want to keep Green as fixed and adjust only Red/Blue * Compute the average of the both green channels first */ avg = (u64)hist_count[ISC_HIS_CFG_MODE_GR] + (u64)hist_count[ISC_HIS_CFG_MODE_GB]; avg >>= 1; dev_dbg(isc->dev, "isc wb: green components average %llu\n", avg); /* Green histogram is null, nothing to do */ if (!avg) return; for (c = ISC_HIS_CFG_MODE_GR; c <= ISC_HIS_CFG_MODE_B; c++) { /* * the color offset is the minimum value of the histogram. * we stretch this color to the full range by substracting * this value from the color component. */ offset[c] = ctrls->hist_minmax[c][HIST_MIN_INDEX]; /* * The offset is always at least 1. If the offset is 1, we do * not need to adjust it, so our result must be zero. * the offset is computed in a histogram on 9 bits (0..512) * but the offset in register is based on * 12 bits pipeline (0..4096). * we need to shift with the 3 bits that the histogram is * ignoring */ ctrls->offset[c] = (offset[c] - 1) << 3; /* * the offset is then taken and converted to 2's complements, * and must be negative, as we subtract this value from the * color components */ ctrls->offset[c] = -ctrls->offset[c]; /* * the stretch gain is the total number of histogram bins * divided by the actual range of color component (Max - Min) * If we compute gain like this, the actual color component * will be stretched to the full histogram. * We need to shift 9 bits for precision, we have 9 bits for * decimals */ s_gain[c] = (HIST_ENTRIES << 9) / (ctrls->hist_minmax[c][HIST_MAX_INDEX] - ctrls->hist_minmax[c][HIST_MIN_INDEX] + 1); /* * Now we have to compute the gain w.r.t. the average. * Add/lose gain to the component towards the average. * If it happens that the component is zero, use the * fixed point value : 1.0 gain. */ if (hist_count[c]) gw_gain[c] = div_u64(avg << 9, hist_count[c]); else gw_gain[c] = 1 << 9; dev_dbg(isc->dev, "isc wb: component %d, s_gain %u, gw_gain %u\n", c, s_gain[c], gw_gain[c]); /* multiply both gains and adjust for decimals */ ctrls->gain[c] = s_gain[c] * gw_gain[c]; ctrls->gain[c] >>= 9; /* make sure we are not out of range */ ctrls->gain[c] = clamp_val(ctrls->gain[c], 0, GENMASK(12, 0)); dev_dbg(isc->dev, "isc wb: component %d, final gain %u\n", c, ctrls->gain[c]); } } static void isc_awb_work(struct work_struct *w) { struct isc_device *isc = container_of(w, struct isc_device, awb_work); struct regmap *regmap = isc->regmap; struct isc_ctrls *ctrls = &isc->ctrls; u32 hist_id = ctrls->hist_id; u32 baysel; unsigned long flags; u32 min, max; int ret; if (ctrls->hist_stat != HIST_ENABLED) return; isc_hist_count(isc, &min, &max); dev_dbg(isc->dev, "isc wb mode %d: hist min %u , max %u\n", hist_id, min, max); ctrls->hist_minmax[hist_id][HIST_MIN_INDEX] = min; ctrls->hist_minmax[hist_id][HIST_MAX_INDEX] = max; if (hist_id != ISC_HIS_CFG_MODE_B) { hist_id++; } else { isc_wb_update(ctrls); hist_id = ISC_HIS_CFG_MODE_GR; } ctrls->hist_id = hist_id; baysel = isc->config.sd_format->cfa_baycfg << ISC_HIS_CFG_BAYSEL_SHIFT; ret = pm_runtime_resume_and_get(isc->dev); if (ret < 0) return; /* * only update if we have all the required histograms and controls * if awb has been disabled, we need to reset registers as well. */ if (hist_id == ISC_HIS_CFG_MODE_GR || ctrls->awb == ISC_WB_NONE) { /* * It may happen that DMA Done IRQ will trigger while we are * updating white balance registers here. * In that case, only parts of the controls have been updated. * We can avoid that by locking the section. */ spin_lock_irqsave(&isc->awb_lock, flags); isc_update_awb_ctrls(isc); spin_unlock_irqrestore(&isc->awb_lock, flags); /* * if we are doing just the one time white balance adjustment, * we are basically done. */ if (ctrls->awb == ISC_WB_ONETIME) { dev_info(isc->dev, "Completed one time white-balance adjustment.\n"); /* update the v4l2 controls values */ isc_update_v4l2_ctrls(isc); ctrls->awb = ISC_WB_NONE; } } regmap_write(regmap, ISC_HIS_CFG + isc->offsets.his, hist_id | baysel | ISC_HIS_CFG_RAR); /* * We have to make sure the streaming has not stopped meanwhile. * ISC requires a frame to clock the internal profile update. * To avoid issues, lock the sequence with a mutex */ mutex_lock(&isc->awb_mutex); /* streaming is not active anymore */ if (isc->stop) { mutex_unlock(&isc->awb_mutex); return; } isc_update_profile(isc); mutex_unlock(&isc->awb_mutex); /* if awb has been disabled, we don't need to start another histogram */ if (ctrls->awb) regmap_write(regmap, ISC_CTRLEN, ISC_CTRL_HISREQ); pm_runtime_put_sync(isc->dev); } static int isc_s_ctrl(struct v4l2_ctrl *ctrl) { struct isc_device *isc = container_of(ctrl->handler, struct isc_device, ctrls.handler); struct isc_ctrls *ctrls = &isc->ctrls; if (ctrl->flags & V4L2_CTRL_FLAG_INACTIVE) return 0; switch (ctrl->id) { case V4L2_CID_BRIGHTNESS: ctrls->brightness = ctrl->val & ISC_CBC_BRIGHT_MASK; break; case V4L2_CID_CONTRAST: ctrls->contrast = ctrl->val & ISC_CBC_CONTRAST_MASK; break; case V4L2_CID_GAMMA: ctrls->gamma_index = ctrl->val; break; default: return -EINVAL; } return 0; } static const struct v4l2_ctrl_ops isc_ctrl_ops = { .s_ctrl = isc_s_ctrl, }; static int isc_s_awb_ctrl(struct v4l2_ctrl *ctrl) { struct isc_device *isc = container_of(ctrl->handler, struct isc_device, ctrls.handler); struct isc_ctrls *ctrls = &isc->ctrls; if (ctrl->flags & V4L2_CTRL_FLAG_INACTIVE) return 0; switch (ctrl->id) { case V4L2_CID_AUTO_WHITE_BALANCE: if (ctrl->val == 1) ctrls->awb = ISC_WB_AUTO; else ctrls->awb = ISC_WB_NONE; /* configure the controls with new values from v4l2 */ if (ctrl->cluster[ISC_CTRL_R_GAIN]->is_new) ctrls->gain[ISC_HIS_CFG_MODE_R] = isc->r_gain_ctrl->val; if (ctrl->cluster[ISC_CTRL_B_GAIN]->is_new) ctrls->gain[ISC_HIS_CFG_MODE_B] = isc->b_gain_ctrl->val; if (ctrl->cluster[ISC_CTRL_GR_GAIN]->is_new) ctrls->gain[ISC_HIS_CFG_MODE_GR] = isc->gr_gain_ctrl->val; if (ctrl->cluster[ISC_CTRL_GB_GAIN]->is_new) ctrls->gain[ISC_HIS_CFG_MODE_GB] = isc->gb_gain_ctrl->val; if (ctrl->cluster[ISC_CTRL_R_OFF]->is_new) ctrls->offset[ISC_HIS_CFG_MODE_R] = isc->r_off_ctrl->val; if (ctrl->cluster[ISC_CTRL_B_OFF]->is_new) ctrls->offset[ISC_HIS_CFG_MODE_B] = isc->b_off_ctrl->val; if (ctrl->cluster[ISC_CTRL_GR_OFF]->is_new) ctrls->offset[ISC_HIS_CFG_MODE_GR] = isc->gr_off_ctrl->val; if (ctrl->cluster[ISC_CTRL_GB_OFF]->is_new) ctrls->offset[ISC_HIS_CFG_MODE_GB] = isc->gb_off_ctrl->val; isc_update_awb_ctrls(isc); mutex_lock(&isc->awb_mutex); if (vb2_is_streaming(&isc->vb2_vidq)) { /* * If we are streaming, we can update profile to * have the new settings in place. */ isc_update_profile(isc); } else { /* * The auto cluster will activate automatically this * control. This has to be deactivated when not * streaming. */ v4l2_ctrl_activate(isc->do_wb_ctrl, false); } mutex_unlock(&isc->awb_mutex); /* if we have autowhitebalance on, start histogram procedure */ if (ctrls->awb == ISC_WB_AUTO && vb2_is_streaming(&isc->vb2_vidq) && ISC_IS_FORMAT_RAW(isc->config.sd_format->mbus_code)) isc_set_histogram(isc, true); /* * for one time whitebalance adjustment, check the button, * if it's pressed, perform the one time operation. */ if (ctrls->awb == ISC_WB_NONE && ctrl->cluster[ISC_CTRL_DO_WB]->is_new && !(ctrl->cluster[ISC_CTRL_DO_WB]->flags & V4L2_CTRL_FLAG_INACTIVE)) { ctrls->awb = ISC_WB_ONETIME; isc_set_histogram(isc, true); dev_dbg(isc->dev, "One time white-balance started.\n"); } return 0; } return 0; } static int isc_g_volatile_awb_ctrl(struct v4l2_ctrl *ctrl) { struct isc_device *isc = container_of(ctrl->handler, struct isc_device, ctrls.handler); struct isc_ctrls *ctrls = &isc->ctrls; switch (ctrl->id) { /* being a cluster, this id will be called for every control */ case V4L2_CID_AUTO_WHITE_BALANCE: ctrl->cluster[ISC_CTRL_R_GAIN]->val = ctrls->gain[ISC_HIS_CFG_MODE_R]; ctrl->cluster[ISC_CTRL_B_GAIN]->val = ctrls->gain[ISC_HIS_CFG_MODE_B]; ctrl->cluster[ISC_CTRL_GR_GAIN]->val = ctrls->gain[ISC_HIS_CFG_MODE_GR]; ctrl->cluster[ISC_CTRL_GB_GAIN]->val = ctrls->gain[ISC_HIS_CFG_MODE_GB]; ctrl->cluster[ISC_CTRL_R_OFF]->val = ctrls->offset[ISC_HIS_CFG_MODE_R]; ctrl->cluster[ISC_CTRL_B_OFF]->val = ctrls->offset[ISC_HIS_CFG_MODE_B]; ctrl->cluster[ISC_CTRL_GR_OFF]->val = ctrls->offset[ISC_HIS_CFG_MODE_GR]; ctrl->cluster[ISC_CTRL_GB_OFF]->val = ctrls->offset[ISC_HIS_CFG_MODE_GB]; break; } return 0; } static const struct v4l2_ctrl_ops isc_awb_ops = { .s_ctrl = isc_s_awb_ctrl, .g_volatile_ctrl = isc_g_volatile_awb_ctrl, }; #define ISC_CTRL_OFF(_name, _id, _name_str) \ static const struct v4l2_ctrl_config _name = { \ .ops = &isc_awb_ops, \ .id = _id, \ .name = _name_str, \ .type = V4L2_CTRL_TYPE_INTEGER, \ .flags = V4L2_CTRL_FLAG_SLIDER, \ .min = -4095, \ .max = 4095, \ .step = 1, \ .def = 0, \ } ISC_CTRL_OFF(isc_r_off_ctrl, ISC_CID_R_OFFSET, "Red Component Offset"); ISC_CTRL_OFF(isc_b_off_ctrl, ISC_CID_B_OFFSET, "Blue Component Offset"); ISC_CTRL_OFF(isc_gr_off_ctrl, ISC_CID_GR_OFFSET, "Green Red Component Offset"); ISC_CTRL_OFF(isc_gb_off_ctrl, ISC_CID_GB_OFFSET, "Green Blue Component Offset"); #define ISC_CTRL_GAIN(_name, _id, _name_str) \ static const struct v4l2_ctrl_config _name = { \ .ops = &isc_awb_ops, \ .id = _id, \ .name = _name_str, \ .type = V4L2_CTRL_TYPE_INTEGER, \ .flags = V4L2_CTRL_FLAG_SLIDER, \ .min = 0, \ .max = 8191, \ .step = 1, \ .def = 512, \ } ISC_CTRL_GAIN(isc_r_gain_ctrl, ISC_CID_R_GAIN, "Red Component Gain"); ISC_CTRL_GAIN(isc_b_gain_ctrl, ISC_CID_B_GAIN, "Blue Component Gain"); ISC_CTRL_GAIN(isc_gr_gain_ctrl, ISC_CID_GR_GAIN, "Green Red Component Gain"); ISC_CTRL_GAIN(isc_gb_gain_ctrl, ISC_CID_GB_GAIN, "Green Blue Component Gain"); static int isc_ctrl_init(struct isc_device *isc) { const struct v4l2_ctrl_ops *ops = &isc_ctrl_ops; struct isc_ctrls *ctrls = &isc->ctrls; struct v4l2_ctrl_handler *hdl = &ctrls->handler; int ret; ctrls->hist_stat = HIST_INIT; isc_reset_awb_ctrls(isc); ret = v4l2_ctrl_handler_init(hdl, 13); if (ret < 0) return ret; /* Initialize product specific controls. For example, contrast */ isc->config_ctrls(isc, ops); ctrls->brightness = 0; v4l2_ctrl_new_std(hdl, ops, V4L2_CID_BRIGHTNESS, -1024, 1023, 1, 0); v4l2_ctrl_new_std(hdl, ops, V4L2_CID_GAMMA, 0, isc->gamma_max, 1, isc->gamma_max); isc->awb_ctrl = v4l2_ctrl_new_std(hdl, &isc_awb_ops, V4L2_CID_AUTO_WHITE_BALANCE, 0, 1, 1, 1); /* do_white_balance is a button, so min,max,step,default are ignored */ isc->do_wb_ctrl = v4l2_ctrl_new_std(hdl, &isc_awb_ops, V4L2_CID_DO_WHITE_BALANCE, 0, 0, 0, 0); if (!isc->do_wb_ctrl) { ret = hdl->error; v4l2_ctrl_handler_free(hdl); return ret; } v4l2_ctrl_activate(isc->do_wb_ctrl, false); isc->r_gain_ctrl = v4l2_ctrl_new_custom(hdl, &isc_r_gain_ctrl, NULL); isc->b_gain_ctrl = v4l2_ctrl_new_custom(hdl, &isc_b_gain_ctrl, NULL); isc->gr_gain_ctrl = v4l2_ctrl_new_custom(hdl, &isc_gr_gain_ctrl, NULL); isc->gb_gain_ctrl = v4l2_ctrl_new_custom(hdl, &isc_gb_gain_ctrl, NULL); isc->r_off_ctrl = v4l2_ctrl_new_custom(hdl, &isc_r_off_ctrl, NULL); isc->b_off_ctrl = v4l2_ctrl_new_custom(hdl, &isc_b_off_ctrl, NULL); isc->gr_off_ctrl = v4l2_ctrl_new_custom(hdl, &isc_gr_off_ctrl, NULL); isc->gb_off_ctrl = v4l2_ctrl_new_custom(hdl, &isc_gb_off_ctrl, NULL); /* * The cluster is in auto mode with autowhitebalance enabled * and manual mode otherwise. */ v4l2_ctrl_auto_cluster(10, &isc->awb_ctrl, 0, true); v4l2_ctrl_handler_setup(hdl); return 0; } static int isc_async_bound(struct v4l2_async_notifier *notifier, struct v4l2_subdev *subdev, struct v4l2_async_connection *asd) { struct isc_device *isc = container_of(notifier->v4l2_dev, struct isc_device, v4l2_dev); struct isc_subdev_entity *subdev_entity = container_of(notifier, struct isc_subdev_entity, notifier); int pad; if (video_is_registered(&isc->video_dev)) { dev_err(isc->dev, "only supports one sub-device.\n"); return -EBUSY; } subdev_entity->sd = subdev; pad = media_entity_get_fwnode_pad(&subdev->entity, asd->match.fwnode, MEDIA_PAD_FL_SOURCE); if (pad < 0) { dev_err(isc->dev, "failed to find pad for %s\n", subdev->name); return pad; } isc->remote_pad = pad; return 0; } static void isc_async_unbind(struct v4l2_async_notifier *notifier, struct v4l2_subdev *subdev, struct v4l2_async_connection *asd) { struct isc_device *isc = container_of(notifier->v4l2_dev, struct isc_device, v4l2_dev); mutex_destroy(&isc->awb_mutex); cancel_work_sync(&isc->awb_work); video_unregister_device(&isc->video_dev); v4l2_ctrl_handler_free(&isc->ctrls.handler); } struct isc_format *isc_find_format_by_code(struct isc_device *isc, unsigned int code, int *index) { struct isc_format *fmt = &isc->formats_list[0]; unsigned int i; for (i = 0; i < isc->formats_list_size; i++) { if (fmt->mbus_code == code) { *index = i; return fmt; } fmt++; } return NULL; } EXPORT_SYMBOL_GPL(isc_find_format_by_code); static int isc_set_default_fmt(struct isc_device *isc) { struct v4l2_format f = { .type = V4L2_BUF_TYPE_VIDEO_CAPTURE, .fmt.pix = { .width = VGA_WIDTH, .height = VGA_HEIGHT, .field = V4L2_FIELD_NONE, .pixelformat = isc->controller_formats[0].fourcc, }, }; int ret; ret = isc_try_fmt(isc, &f); if (ret) return ret; isc->fmt = f; return 0; } static int isc_async_complete(struct v4l2_async_notifier *notifier) { struct isc_device *isc = container_of(notifier->v4l2_dev, struct isc_device, v4l2_dev); struct video_device *vdev = &isc->video_dev; struct vb2_queue *q = &isc->vb2_vidq; int ret = 0; INIT_WORK(&isc->awb_work, isc_awb_work); ret = v4l2_device_register_subdev_nodes(&isc->v4l2_dev); if (ret < 0) { dev_err(isc->dev, "Failed to register subdev nodes\n"); return ret; } isc->current_subdev = container_of(notifier, struct isc_subdev_entity, notifier); mutex_init(&isc->lock); mutex_init(&isc->awb_mutex); init_completion(&isc->comp); /* Initialize videobuf2 queue */ q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE; q->io_modes = VB2_MMAP | VB2_DMABUF | VB2_READ; q->drv_priv = isc; q->buf_struct_size = sizeof(struct isc_buffer); q->ops = &isc_vb2_ops; q->mem_ops = &vb2_dma_contig_memops; q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC; q->lock = &isc->lock; q->min_buffers_needed = 1; q->dev = isc->dev; ret = vb2_queue_init(q); if (ret < 0) { dev_err(isc->dev, "vb2_queue_init() failed: %d\n", ret); goto isc_async_complete_err; } /* Init video dma queues */ INIT_LIST_HEAD(&isc->dma_queue); spin_lock_init(&isc->dma_queue_lock); spin_lock_init(&isc->awb_lock); ret = isc_set_default_fmt(isc); if (ret) { dev_err(isc->dev, "Could not set default format\n"); goto isc_async_complete_err; } ret = isc_ctrl_init(isc); if (ret) { dev_err(isc->dev, "Init isc ctrols failed: %d\n", ret); goto isc_async_complete_err; } /* Register video device */ strscpy(vdev->name, KBUILD_MODNAME, sizeof(vdev->name)); vdev->release = video_device_release_empty; vdev->fops = &isc_fops; vdev->ioctl_ops = &isc_ioctl_ops; vdev->v4l2_dev = &isc->v4l2_dev; vdev->vfl_dir = VFL_DIR_RX; vdev->queue = q; vdev->lock = &isc->lock; vdev->ctrl_handler = &isc->ctrls.handler; vdev->device_caps = V4L2_CAP_STREAMING | V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_IO_MC; video_set_drvdata(vdev, isc); ret = video_register_device(vdev, VFL_TYPE_VIDEO, -1); if (ret < 0) { dev_err(isc->dev, "video_register_device failed: %d\n", ret); goto isc_async_complete_err; } ret = isc_scaler_link(isc); if (ret < 0) goto isc_async_complete_unregister_device; ret = media_device_register(&isc->mdev); if (ret < 0) goto isc_async_complete_unregister_device; return 0; isc_async_complete_unregister_device: video_unregister_device(vdev); isc_async_complete_err: mutex_destroy(&isc->awb_mutex); mutex_destroy(&isc->lock); return ret; } const struct v4l2_async_notifier_operations microchip_isc_async_ops = { .bound = isc_async_bound, .unbind = isc_async_unbind, .complete = isc_async_complete, }; EXPORT_SYMBOL_GPL(microchip_isc_async_ops); void microchip_isc_subdev_cleanup(struct isc_device *isc) { struct isc_subdev_entity *subdev_entity; list_for_each_entry(subdev_entity, &isc->subdev_entities, list) { v4l2_async_nf_unregister(&subdev_entity->notifier); v4l2_async_nf_cleanup(&subdev_entity->notifier); } INIT_LIST_HEAD(&isc->subdev_entities); } EXPORT_SYMBOL_GPL(microchip_isc_subdev_cleanup); int microchip_isc_pipeline_init(struct isc_device *isc) { struct device *dev = isc->dev; struct regmap *regmap = isc->regmap; struct regmap_field *regs; unsigned int i; /* * DPCEN-->GDCEN-->BLCEN-->WB-->CFA-->CC--> * GAM-->VHXS-->CSC-->CBC-->SUB422-->SUB420 */ const struct reg_field regfields[ISC_PIPE_LINE_NODE_NUM] = { REG_FIELD(ISC_DPC_CTRL, 0, 0), REG_FIELD(ISC_DPC_CTRL, 1, 1), REG_FIELD(ISC_DPC_CTRL, 2, 2), REG_FIELD(ISC_WB_CTRL, 0, 0), REG_FIELD(ISC_CFA_CTRL, 0, 0), REG_FIELD(ISC_CC_CTRL, 0, 0), REG_FIELD(ISC_GAM_CTRL, 0, 0), REG_FIELD(ISC_GAM_CTRL, 1, 1), REG_FIELD(ISC_GAM_CTRL, 2, 2), REG_FIELD(ISC_GAM_CTRL, 3, 3), REG_FIELD(ISC_VHXS_CTRL, 0, 0), REG_FIELD(ISC_CSC_CTRL + isc->offsets.csc, 0, 0), REG_FIELD(ISC_CBC_CTRL + isc->offsets.cbc, 0, 0), REG_FIELD(ISC_SUB422_CTRL + isc->offsets.sub422, 0, 0), REG_FIELD(ISC_SUB420_CTRL + isc->offsets.sub420, 0, 0), }; for (i = 0; i < ISC_PIPE_LINE_NODE_NUM; i++) { regs = devm_regmap_field_alloc(dev, regmap, regfields[i]); if (IS_ERR(regs)) return PTR_ERR(regs); isc->pipeline[i] = regs; } return 0; } EXPORT_SYMBOL_GPL(microchip_isc_pipeline_init); static int isc_link_validate(struct media_link *link) { struct video_device *vdev = media_entity_to_video_device(link->sink->entity); struct isc_device *isc = video_get_drvdata(vdev); int ret; ret = v4l2_subdev_link_validate(link); if (ret) return ret; return isc_validate(isc); } static const struct media_entity_operations isc_entity_operations = { .link_validate = isc_link_validate, }; int isc_mc_init(struct isc_device *isc, u32 ver) { const struct of_device_id *match; int ret; isc->video_dev.entity.function = MEDIA_ENT_F_IO_V4L; isc->video_dev.entity.flags = MEDIA_ENT_FL_DEFAULT; isc->video_dev.entity.ops = &isc_entity_operations; isc->pads[ISC_PAD_SINK].flags = MEDIA_PAD_FL_SINK; ret = media_entity_pads_init(&isc->video_dev.entity, ISC_PADS_NUM, isc->pads); if (ret < 0) { dev_err(isc->dev, "media entity init failed\n"); return ret; } isc->mdev.dev = isc->dev; match = of_match_node(isc->dev->driver->of_match_table, isc->dev->of_node); strscpy(isc->mdev.driver_name, KBUILD_MODNAME, sizeof(isc->mdev.driver_name)); strscpy(isc->mdev.model, match->compatible, sizeof(isc->mdev.model)); snprintf(isc->mdev.bus_info, sizeof(isc->mdev.bus_info), "platform:%s", isc->v4l2_dev.name); isc->mdev.hw_revision = ver; media_device_init(&isc->mdev); isc->v4l2_dev.mdev = &isc->mdev; return isc_scaler_init(isc); } EXPORT_SYMBOL_GPL(isc_mc_init); void isc_mc_cleanup(struct isc_device *isc) { media_entity_cleanup(&isc->video_dev.entity); media_device_cleanup(&isc->mdev); } EXPORT_SYMBOL_GPL(isc_mc_cleanup); /* regmap configuration */ #define MICROCHIP_ISC_REG_MAX 0xd5c const struct regmap_config microchip_isc_regmap_config = { .reg_bits = 32, .reg_stride = 4, .val_bits = 32, .max_register = MICROCHIP_ISC_REG_MAX, }; EXPORT_SYMBOL_GPL(microchip_isc_regmap_config); MODULE_AUTHOR("Songjun Wu"); MODULE_AUTHOR("Eugen Hristev"); MODULE_DESCRIPTION("Microchip ISC common code base"); MODULE_LICENSE("GPL v2");
linux-master
drivers/media/platform/microchip/microchip-isc-base.c
// SPDX-License-Identifier: GPL-2.0 /* * Microchip eXtended Image Sensor Controller (XISC) driver * * Copyright (C) 2019-2021 Microchip Technology, Inc. and its subsidiaries * * Author: Eugen Hristev <[email protected]> * * Sensor-->PFE-->DPC-->WB-->CFA-->CC-->GAM-->VHXS-->CSC-->CBHS-->SUB-->RLP-->DMA-->HIS * * ISC video pipeline integrates the following submodules: * PFE: Parallel Front End to sample the camera sensor input stream * DPC: Defective Pixel Correction with black offset correction, green disparity * correction and defective pixel correction (3 modules total) * WB: Programmable white balance in the Bayer domain * CFA: Color filter array interpolation module * CC: Programmable color correction * GAM: Gamma correction *VHXS: Vertical and Horizontal Scaler * CSC: Programmable color space conversion *CBHS: Contrast Brightness Hue and Saturation control * SUB: This module performs YCbCr444 to YCbCr420 chrominance subsampling * RLP: This module performs rounding, range limiting * and packing of the incoming data * DMA: This module performs DMA master accesses to write frames to external RAM * HIS: Histogram module performs statistic counters on the frames */ #include <linux/clk.h> #include <linux/clkdev.h> #include <linux/clk-provider.h> #include <linux/delay.h> #include <linux/interrupt.h> #include <linux/math64.h> #include <linux/module.h> #include <linux/of.h> #include <linux/of_graph.h> #include <linux/platform_device.h> #include <linux/pm_runtime.h> #include <linux/regmap.h> #include <linux/videodev2.h> #include <media/v4l2-ctrls.h> #include <media/v4l2-device.h> #include <media/v4l2-event.h> #include <media/v4l2-image-sizes.h> #include <media/v4l2-ioctl.h> #include <media/v4l2-fwnode.h> #include <media/v4l2-subdev.h> #include <media/videobuf2-dma-contig.h> #include "microchip-isc-regs.h" #include "microchip-isc.h" #define ISC_SAMA7G5_MAX_SUPPORT_WIDTH 3264 #define ISC_SAMA7G5_MAX_SUPPORT_HEIGHT 2464 #define ISC_SAMA7G5_PIPELINE \ (WB_ENABLE | CFA_ENABLE | CC_ENABLE | GAM_ENABLES | CSC_ENABLE | \ CBC_ENABLE | SUB422_ENABLE | SUB420_ENABLE) /* This is a list of the formats that the ISC can *output* */ static const struct isc_format sama7g5_controller_formats[] = { { .fourcc = V4L2_PIX_FMT_ARGB444, }, { .fourcc = V4L2_PIX_FMT_ARGB555, }, { .fourcc = V4L2_PIX_FMT_RGB565, }, { .fourcc = V4L2_PIX_FMT_ABGR32, }, { .fourcc = V4L2_PIX_FMT_XBGR32, }, { .fourcc = V4L2_PIX_FMT_YUV420, }, { .fourcc = V4L2_PIX_FMT_UYVY, }, { .fourcc = V4L2_PIX_FMT_VYUY, }, { .fourcc = V4L2_PIX_FMT_YUYV, }, { .fourcc = V4L2_PIX_FMT_YUV422P, }, { .fourcc = V4L2_PIX_FMT_GREY, }, { .fourcc = V4L2_PIX_FMT_Y10, }, { .fourcc = V4L2_PIX_FMT_Y16, }, { .fourcc = V4L2_PIX_FMT_SBGGR8, .raw = true, }, { .fourcc = V4L2_PIX_FMT_SGBRG8, .raw = true, }, { .fourcc = V4L2_PIX_FMT_SGRBG8, .raw = true, }, { .fourcc = V4L2_PIX_FMT_SRGGB8, .raw = true, }, { .fourcc = V4L2_PIX_FMT_SBGGR10, .raw = true, }, { .fourcc = V4L2_PIX_FMT_SGBRG10, .raw = true, }, { .fourcc = V4L2_PIX_FMT_SGRBG10, .raw = true, }, { .fourcc = V4L2_PIX_FMT_SRGGB10, .raw = true, }, { .fourcc = V4L2_PIX_FMT_SBGGR12, .raw = true, }, { .fourcc = V4L2_PIX_FMT_SGBRG12, .raw = true, }, { .fourcc = V4L2_PIX_FMT_SGRBG12, .raw = true, }, { .fourcc = V4L2_PIX_FMT_SRGGB12, .raw = true, }, }; /* This is a list of formats that the ISC can receive as *input* */ static struct isc_format sama7g5_formats_list[] = { { .fourcc = V4L2_PIX_FMT_SBGGR8, .mbus_code = MEDIA_BUS_FMT_SBGGR8_1X8, .pfe_cfg0_bps = ISC_PFE_CFG0_BPS_EIGHT, .cfa_baycfg = ISC_BAY_CFG_BGBG, }, { .fourcc = V4L2_PIX_FMT_SGBRG8, .mbus_code = MEDIA_BUS_FMT_SGBRG8_1X8, .pfe_cfg0_bps = ISC_PFE_CFG0_BPS_EIGHT, .cfa_baycfg = ISC_BAY_CFG_GBGB, }, { .fourcc = V4L2_PIX_FMT_SGRBG8, .mbus_code = MEDIA_BUS_FMT_SGRBG8_1X8, .pfe_cfg0_bps = ISC_PFE_CFG0_BPS_EIGHT, .cfa_baycfg = ISC_BAY_CFG_GRGR, }, { .fourcc = V4L2_PIX_FMT_SRGGB8, .mbus_code = MEDIA_BUS_FMT_SRGGB8_1X8, .pfe_cfg0_bps = ISC_PFE_CFG0_BPS_EIGHT, .cfa_baycfg = ISC_BAY_CFG_RGRG, }, { .fourcc = V4L2_PIX_FMT_SBGGR10, .mbus_code = MEDIA_BUS_FMT_SBGGR10_1X10, .pfe_cfg0_bps = ISC_PFG_CFG0_BPS_TEN, .cfa_baycfg = ISC_BAY_CFG_RGRG, }, { .fourcc = V4L2_PIX_FMT_SGBRG10, .mbus_code = MEDIA_BUS_FMT_SGBRG10_1X10, .pfe_cfg0_bps = ISC_PFG_CFG0_BPS_TEN, .cfa_baycfg = ISC_BAY_CFG_GBGB, }, { .fourcc = V4L2_PIX_FMT_SGRBG10, .mbus_code = MEDIA_BUS_FMT_SGRBG10_1X10, .pfe_cfg0_bps = ISC_PFG_CFG0_BPS_TEN, .cfa_baycfg = ISC_BAY_CFG_GRGR, }, { .fourcc = V4L2_PIX_FMT_SRGGB10, .mbus_code = MEDIA_BUS_FMT_SRGGB10_1X10, .pfe_cfg0_bps = ISC_PFG_CFG0_BPS_TEN, .cfa_baycfg = ISC_BAY_CFG_RGRG, }, { .fourcc = V4L2_PIX_FMT_SBGGR12, .mbus_code = MEDIA_BUS_FMT_SBGGR12_1X12, .pfe_cfg0_bps = ISC_PFG_CFG0_BPS_TWELVE, .cfa_baycfg = ISC_BAY_CFG_BGBG, }, { .fourcc = V4L2_PIX_FMT_SGBRG12, .mbus_code = MEDIA_BUS_FMT_SGBRG12_1X12, .pfe_cfg0_bps = ISC_PFG_CFG0_BPS_TWELVE, .cfa_baycfg = ISC_BAY_CFG_GBGB, }, { .fourcc = V4L2_PIX_FMT_SGRBG12, .mbus_code = MEDIA_BUS_FMT_SGRBG12_1X12, .pfe_cfg0_bps = ISC_PFG_CFG0_BPS_TWELVE, .cfa_baycfg = ISC_BAY_CFG_GRGR, }, { .fourcc = V4L2_PIX_FMT_SRGGB12, .mbus_code = MEDIA_BUS_FMT_SRGGB12_1X12, .pfe_cfg0_bps = ISC_PFG_CFG0_BPS_TWELVE, .cfa_baycfg = ISC_BAY_CFG_RGRG, }, { .fourcc = V4L2_PIX_FMT_GREY, .mbus_code = MEDIA_BUS_FMT_Y8_1X8, .pfe_cfg0_bps = ISC_PFE_CFG0_BPS_EIGHT, }, { .fourcc = V4L2_PIX_FMT_YUYV, .mbus_code = MEDIA_BUS_FMT_YUYV8_2X8, .pfe_cfg0_bps = ISC_PFE_CFG0_BPS_EIGHT, }, { .fourcc = V4L2_PIX_FMT_UYVY, .mbus_code = MEDIA_BUS_FMT_UYVY8_2X8, .pfe_cfg0_bps = ISC_PFE_CFG0_BPS_EIGHT, }, { .fourcc = V4L2_PIX_FMT_RGB565, .mbus_code = MEDIA_BUS_FMT_RGB565_2X8_LE, .pfe_cfg0_bps = ISC_PFE_CFG0_BPS_EIGHT, }, { .fourcc = V4L2_PIX_FMT_Y10, .mbus_code = MEDIA_BUS_FMT_Y10_1X10, .pfe_cfg0_bps = ISC_PFG_CFG0_BPS_TEN, }, }; static void isc_sama7g5_config_csc(struct isc_device *isc) { struct regmap *regmap = isc->regmap; /* Convert RGB to YUV */ regmap_write(regmap, ISC_CSC_YR_YG + isc->offsets.csc, 0x42 | (0x81 << 16)); regmap_write(regmap, ISC_CSC_YB_OY + isc->offsets.csc, 0x19 | (0x10 << 16)); regmap_write(regmap, ISC_CSC_CBR_CBG + isc->offsets.csc, 0xFDA | (0xFB6 << 16)); regmap_write(regmap, ISC_CSC_CBB_OCB + isc->offsets.csc, 0x70 | (0x80 << 16)); regmap_write(regmap, ISC_CSC_CRR_CRG + isc->offsets.csc, 0x70 | (0xFA2 << 16)); regmap_write(regmap, ISC_CSC_CRB_OCR + isc->offsets.csc, 0xFEE | (0x80 << 16)); } static void isc_sama7g5_config_cbc(struct isc_device *isc) { struct regmap *regmap = isc->regmap; /* Configure what is set via v4l2 ctrls */ regmap_write(regmap, ISC_CBC_BRIGHT + isc->offsets.cbc, isc->ctrls.brightness); regmap_write(regmap, ISC_CBC_CONTRAST + isc->offsets.cbc, isc->ctrls.contrast); /* Configure Hue and Saturation as neutral midpoint */ regmap_write(regmap, ISC_CBCHS_HUE, 0); regmap_write(regmap, ISC_CBCHS_SAT, (1 << 4)); } static void isc_sama7g5_config_cc(struct isc_device *isc) { struct regmap *regmap = isc->regmap; /* Configure each register at the neutral fixed point 1.0 or 0.0 */ regmap_write(regmap, ISC_CC_RR_RG, (1 << 8)); regmap_write(regmap, ISC_CC_RB_OR, 0); regmap_write(regmap, ISC_CC_GR_GG, (1 << 8) << 16); regmap_write(regmap, ISC_CC_GB_OG, 0); regmap_write(regmap, ISC_CC_BR_BG, 0); regmap_write(regmap, ISC_CC_BB_OB, (1 << 8)); } static void isc_sama7g5_config_ctrls(struct isc_device *isc, const struct v4l2_ctrl_ops *ops) { struct isc_ctrls *ctrls = &isc->ctrls; struct v4l2_ctrl_handler *hdl = &ctrls->handler; ctrls->contrast = 16; v4l2_ctrl_new_std(hdl, ops, V4L2_CID_CONTRAST, -2048, 2047, 1, 16); } static void isc_sama7g5_config_dpc(struct isc_device *isc) { u32 bay_cfg = isc->config.sd_format->cfa_baycfg; struct regmap *regmap = isc->regmap; regmap_update_bits(regmap, ISC_DPC_CFG, ISC_DPC_CFG_BLOFF_MASK, (64 << ISC_DPC_CFG_BLOFF_SHIFT)); regmap_update_bits(regmap, ISC_DPC_CFG, ISC_DPC_CFG_BAYCFG_MASK, (bay_cfg << ISC_DPC_CFG_BAYCFG_SHIFT)); } static void isc_sama7g5_config_gam(struct isc_device *isc) { struct regmap *regmap = isc->regmap; regmap_update_bits(regmap, ISC_GAM_CTRL, ISC_GAM_CTRL_BIPART, ISC_GAM_CTRL_BIPART); } static void isc_sama7g5_config_rlp(struct isc_device *isc) { struct regmap *regmap = isc->regmap; u32 rlp_mode = isc->config.rlp_cfg_mode; regmap_update_bits(regmap, ISC_RLP_CFG + isc->offsets.rlp, ISC_RLP_CFG_MODE_MASK | ISC_RLP_CFG_LSH | ISC_RLP_CFG_YMODE_MASK, rlp_mode); } static void isc_sama7g5_adapt_pipeline(struct isc_device *isc) { isc->try_config.bits_pipeline &= ISC_SAMA7G5_PIPELINE; } /* Gamma table with gamma 1/2.2 */ static const u32 isc_sama7g5_gamma_table[][GAMMA_ENTRIES] = { /* index 0 --> gamma bipartite */ { 0x980, 0x4c0320, 0x650260, 0x7801e0, 0x8701a0, 0x940180, 0xa00160, 0xab0120, 0xb40120, 0xbd0120, 0xc60100, 0xce0100, 0xd600e0, 0xdd00e0, 0xe400e0, 0xeb00c0, 0xf100c0, 0xf700c0, 0xfd00c0, 0x10300a0, 0x10800c0, 0x10e00a0, 0x11300a0, 0x11800a0, 0x11d00a0, 0x12200a0, 0x12700a0, 0x12c0080, 0x13000a0, 0x1350080, 0x13900a0, 0x13e0080, 0x1420076, 0x17d0062, 0x1ae0054, 0x1d8004a, 0x1fd0044, 0x21f003e, 0x23e003a, 0x25b0036, 0x2760032, 0x28f0030, 0x2a7002e, 0x2be002c, 0x2d4002c, 0x2ea0028, 0x2fe0028, 0x3120026, 0x3250024, 0x3370024, 0x3490022, 0x35a0022, 0x36b0020, 0x37b0020, 0x38b0020, 0x39b001e, 0x3aa001e, 0x3b9001c, 0x3c7001c, 0x3d5001c, 0x3e3001c, 0x3f1001c, 0x3ff001a, 0x40c001a }, }; static int xisc_parse_dt(struct device *dev, struct isc_device *isc) { struct device_node *np = dev->of_node; struct device_node *epn = NULL; struct isc_subdev_entity *subdev_entity; unsigned int flags; int ret; bool mipi_mode; INIT_LIST_HEAD(&isc->subdev_entities); mipi_mode = of_property_read_bool(np, "microchip,mipi-mode"); while (1) { struct v4l2_fwnode_endpoint v4l2_epn = { .bus_type = 0 }; epn = of_graph_get_next_endpoint(np, epn); if (!epn) return 0; ret = v4l2_fwnode_endpoint_parse(of_fwnode_handle(epn), &v4l2_epn); if (ret) { ret = -EINVAL; dev_err(dev, "Could not parse the endpoint\n"); break; } subdev_entity = devm_kzalloc(dev, sizeof(*subdev_entity), GFP_KERNEL); if (!subdev_entity) { ret = -ENOMEM; break; } subdev_entity->epn = epn; flags = v4l2_epn.bus.parallel.flags; if (flags & V4L2_MBUS_HSYNC_ACTIVE_LOW) subdev_entity->pfe_cfg0 = ISC_PFE_CFG0_HPOL_LOW; if (flags & V4L2_MBUS_VSYNC_ACTIVE_LOW) subdev_entity->pfe_cfg0 |= ISC_PFE_CFG0_VPOL_LOW; if (flags & V4L2_MBUS_PCLK_SAMPLE_FALLING) subdev_entity->pfe_cfg0 |= ISC_PFE_CFG0_PPOL_LOW; if (v4l2_epn.bus_type == V4L2_MBUS_BT656) subdev_entity->pfe_cfg0 |= ISC_PFE_CFG0_CCIR_CRC | ISC_PFE_CFG0_CCIR656; if (mipi_mode) subdev_entity->pfe_cfg0 |= ISC_PFE_CFG0_MIPI; list_add_tail(&subdev_entity->list, &isc->subdev_entities); } of_node_put(epn); return ret; } static int microchip_xisc_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct isc_device *isc; void __iomem *io_base; struct isc_subdev_entity *subdev_entity; int irq; int ret; u32 ver; isc = devm_kzalloc(dev, sizeof(*isc), GFP_KERNEL); if (!isc) return -ENOMEM; platform_set_drvdata(pdev, isc); isc->dev = dev; io_base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(io_base)) return PTR_ERR(io_base); isc->regmap = devm_regmap_init_mmio(dev, io_base, &microchip_isc_regmap_config); if (IS_ERR(isc->regmap)) { ret = PTR_ERR(isc->regmap); dev_err(dev, "failed to init register map: %d\n", ret); return ret; } irq = platform_get_irq(pdev, 0); if (irq < 0) return irq; ret = devm_request_irq(dev, irq, microchip_isc_interrupt, 0, "microchip-sama7g5-xisc", isc); if (ret < 0) { dev_err(dev, "can't register ISR for IRQ %u (ret=%i)\n", irq, ret); return ret; } isc->gamma_table = isc_sama7g5_gamma_table; isc->gamma_max = 0; isc->max_width = ISC_SAMA7G5_MAX_SUPPORT_WIDTH; isc->max_height = ISC_SAMA7G5_MAX_SUPPORT_HEIGHT; isc->config_dpc = isc_sama7g5_config_dpc; isc->config_csc = isc_sama7g5_config_csc; isc->config_cbc = isc_sama7g5_config_cbc; isc->config_cc = isc_sama7g5_config_cc; isc->config_gam = isc_sama7g5_config_gam; isc->config_rlp = isc_sama7g5_config_rlp; isc->config_ctrls = isc_sama7g5_config_ctrls; isc->adapt_pipeline = isc_sama7g5_adapt_pipeline; isc->offsets.csc = ISC_SAMA7G5_CSC_OFFSET; isc->offsets.cbc = ISC_SAMA7G5_CBC_OFFSET; isc->offsets.sub422 = ISC_SAMA7G5_SUB422_OFFSET; isc->offsets.sub420 = ISC_SAMA7G5_SUB420_OFFSET; isc->offsets.rlp = ISC_SAMA7G5_RLP_OFFSET; isc->offsets.his = ISC_SAMA7G5_HIS_OFFSET; isc->offsets.dma = ISC_SAMA7G5_DMA_OFFSET; isc->offsets.version = ISC_SAMA7G5_VERSION_OFFSET; isc->offsets.his_entry = ISC_SAMA7G5_HIS_ENTRY_OFFSET; isc->controller_formats = sama7g5_controller_formats; isc->controller_formats_size = ARRAY_SIZE(sama7g5_controller_formats); isc->formats_list = sama7g5_formats_list; isc->formats_list_size = ARRAY_SIZE(sama7g5_formats_list); /* sama7g5-isc RAM access port is full AXI4 - 32 bits per beat */ isc->dcfg = ISC_DCFG_YMBSIZE_BEATS32 | ISC_DCFG_CMBSIZE_BEATS32; /* sama7g5-isc : ISPCK does not exist, ISC is clocked by MCK */ isc->ispck_required = false; ret = microchip_isc_pipeline_init(isc); if (ret) return ret; isc->hclock = devm_clk_get(dev, "hclock"); if (IS_ERR(isc->hclock)) { ret = PTR_ERR(isc->hclock); dev_err(dev, "failed to get hclock: %d\n", ret); return ret; } ret = clk_prepare_enable(isc->hclock); if (ret) { dev_err(dev, "failed to enable hclock: %d\n", ret); return ret; } ret = microchip_isc_clk_init(isc); if (ret) { dev_err(dev, "failed to init isc clock: %d\n", ret); goto unprepare_hclk; } ret = v4l2_device_register(dev, &isc->v4l2_dev); if (ret) { dev_err(dev, "unable to register v4l2 device.\n"); goto unprepare_hclk; } ret = xisc_parse_dt(dev, isc); if (ret) { dev_err(dev, "fail to parse device tree\n"); goto unregister_v4l2_device; } if (list_empty(&isc->subdev_entities)) { dev_err(dev, "no subdev found\n"); ret = -ENODEV; goto unregister_v4l2_device; } list_for_each_entry(subdev_entity, &isc->subdev_entities, list) { struct v4l2_async_connection *asd; struct fwnode_handle *fwnode = of_fwnode_handle(subdev_entity->epn); v4l2_async_nf_init(&subdev_entity->notifier, &isc->v4l2_dev); asd = v4l2_async_nf_add_fwnode_remote(&subdev_entity->notifier, fwnode, struct v4l2_async_connection); of_node_put(subdev_entity->epn); subdev_entity->epn = NULL; if (IS_ERR(asd)) { ret = PTR_ERR(asd); goto cleanup_subdev; } subdev_entity->notifier.ops = &microchip_isc_async_ops; ret = v4l2_async_nf_register(&subdev_entity->notifier); if (ret) { dev_err(dev, "fail to register async notifier\n"); goto cleanup_subdev; } if (video_is_registered(&isc->video_dev)) break; } regmap_read(isc->regmap, ISC_VERSION + isc->offsets.version, &ver); ret = isc_mc_init(isc, ver); if (ret < 0) goto isc_probe_mc_init_err; pm_runtime_set_active(dev); pm_runtime_enable(dev); pm_request_idle(dev); dev_info(dev, "Microchip XISC version %x\n", ver); return 0; isc_probe_mc_init_err: isc_mc_cleanup(isc); cleanup_subdev: microchip_isc_subdev_cleanup(isc); unregister_v4l2_device: v4l2_device_unregister(&isc->v4l2_dev); unprepare_hclk: clk_disable_unprepare(isc->hclock); microchip_isc_clk_cleanup(isc); return ret; } static void microchip_xisc_remove(struct platform_device *pdev) { struct isc_device *isc = platform_get_drvdata(pdev); pm_runtime_disable(&pdev->dev); isc_mc_cleanup(isc); microchip_isc_subdev_cleanup(isc); v4l2_device_unregister(&isc->v4l2_dev); clk_disable_unprepare(isc->hclock); microchip_isc_clk_cleanup(isc); } static int __maybe_unused xisc_runtime_suspend(struct device *dev) { struct isc_device *isc = dev_get_drvdata(dev); clk_disable_unprepare(isc->hclock); return 0; } static int __maybe_unused xisc_runtime_resume(struct device *dev) { struct isc_device *isc = dev_get_drvdata(dev); int ret; ret = clk_prepare_enable(isc->hclock); if (ret) return ret; return ret; } static const struct dev_pm_ops microchip_xisc_dev_pm_ops = { SET_RUNTIME_PM_OPS(xisc_runtime_suspend, xisc_runtime_resume, NULL) }; #if IS_ENABLED(CONFIG_OF) static const struct of_device_id microchip_xisc_of_match[] = { { .compatible = "microchip,sama7g5-isc" }, { } }; MODULE_DEVICE_TABLE(of, microchip_xisc_of_match); #endif static struct platform_driver microchip_xisc_driver = { .probe = microchip_xisc_probe, .remove_new = microchip_xisc_remove, .driver = { .name = "microchip-sama7g5-xisc", .pm = &microchip_xisc_dev_pm_ops, .of_match_table = of_match_ptr(microchip_xisc_of_match), }, }; module_platform_driver(microchip_xisc_driver); MODULE_AUTHOR("Eugen Hristev <[email protected]>"); MODULE_DESCRIPTION("The V4L2 driver for Microchip-XISC"); MODULE_LICENSE("GPL v2");
linux-master
drivers/media/platform/microchip/microchip-sama7g5-isc.c
// SPDX-License-Identifier: GPL-2.0-only /* * Microchip Image Sensor Controller (ISC) Scaler entity support * * Copyright (C) 2022 Microchip Technology, Inc. * * Author: Eugen Hristev <[email protected]> * */ #include <media/media-device.h> #include <media/media-entity.h> #include <media/v4l2-device.h> #include <media/v4l2-subdev.h> #include "microchip-isc-regs.h" #include "microchip-isc.h" static void isc_scaler_prepare_fmt(struct v4l2_mbus_framefmt *framefmt) { framefmt->colorspace = V4L2_COLORSPACE_SRGB; framefmt->field = V4L2_FIELD_NONE; framefmt->ycbcr_enc = V4L2_YCBCR_ENC_DEFAULT; framefmt->quantization = V4L2_QUANTIZATION_DEFAULT; framefmt->xfer_func = V4L2_XFER_FUNC_DEFAULT; }; static int isc_scaler_get_fmt(struct v4l2_subdev *sd, struct v4l2_subdev_state *sd_state, struct v4l2_subdev_format *format) { struct isc_device *isc = container_of(sd, struct isc_device, scaler_sd); struct v4l2_mbus_framefmt *v4l2_try_fmt; if (format->which == V4L2_SUBDEV_FORMAT_TRY) { v4l2_try_fmt = v4l2_subdev_get_try_format(sd, sd_state, format->pad); format->format = *v4l2_try_fmt; return 0; } format->format = isc->scaler_format[format->pad]; return 0; } static int isc_scaler_set_fmt(struct v4l2_subdev *sd, struct v4l2_subdev_state *sd_state, struct v4l2_subdev_format *req_fmt) { struct isc_device *isc = container_of(sd, struct isc_device, scaler_sd); struct v4l2_mbus_framefmt *v4l2_try_fmt; struct isc_format *fmt; unsigned int i; /* Source format is fixed, we cannot change it */ if (req_fmt->pad == ISC_SCALER_PAD_SOURCE) { req_fmt->format = isc->scaler_format[ISC_SCALER_PAD_SOURCE]; return 0; } /* There is no limit on the frame size on the sink pad */ v4l_bound_align_image(&req_fmt->format.width, 16, UINT_MAX, 0, &req_fmt->format.height, 16, UINT_MAX, 0, 0); isc_scaler_prepare_fmt(&req_fmt->format); fmt = isc_find_format_by_code(isc, req_fmt->format.code, &i); if (!fmt) fmt = &isc->formats_list[0]; req_fmt->format.code = fmt->mbus_code; if (req_fmt->which == V4L2_SUBDEV_FORMAT_TRY) { v4l2_try_fmt = v4l2_subdev_get_try_format(sd, sd_state, req_fmt->pad); *v4l2_try_fmt = req_fmt->format; /* Trying on the sink pad makes the source pad change too */ v4l2_try_fmt = v4l2_subdev_get_try_format(sd, sd_state, ISC_SCALER_PAD_SOURCE); *v4l2_try_fmt = req_fmt->format; v4l_bound_align_image(&v4l2_try_fmt->width, 16, isc->max_width, 0, &v4l2_try_fmt->height, 16, isc->max_height, 0, 0); /* if we are just trying, we are done */ return 0; } isc->scaler_format[ISC_SCALER_PAD_SINK] = req_fmt->format; /* The source pad is the same as the sink, but we have to crop it */ isc->scaler_format[ISC_SCALER_PAD_SOURCE] = isc->scaler_format[ISC_SCALER_PAD_SINK]; v4l_bound_align_image (&isc->scaler_format[ISC_SCALER_PAD_SOURCE].width, 16, isc->max_width, 0, &isc->scaler_format[ISC_SCALER_PAD_SOURCE].height, 16, isc->max_height, 0, 0); return 0; } static int isc_scaler_enum_mbus_code(struct v4l2_subdev *sd, struct v4l2_subdev_state *sd_state, struct v4l2_subdev_mbus_code_enum *code) { struct isc_device *isc = container_of(sd, struct isc_device, scaler_sd); /* * All formats supported by the ISC are supported by the scaler. * Advertise the formats which the ISC can take as input, as the scaler * entity cropping is part of the PFE module (parallel front end) */ if (code->index < isc->formats_list_size) { code->code = isc->formats_list[code->index].mbus_code; return 0; } return -EINVAL; } static int isc_scaler_g_sel(struct v4l2_subdev *sd, struct v4l2_subdev_state *sd_state, struct v4l2_subdev_selection *sel) { struct isc_device *isc = container_of(sd, struct isc_device, scaler_sd); if (sel->pad == ISC_SCALER_PAD_SOURCE) return -EINVAL; if (sel->target != V4L2_SEL_TGT_CROP_BOUNDS && sel->target != V4L2_SEL_TGT_CROP) return -EINVAL; sel->r.height = isc->scaler_format[ISC_SCALER_PAD_SOURCE].height; sel->r.width = isc->scaler_format[ISC_SCALER_PAD_SOURCE].width; sel->r.left = 0; sel->r.top = 0; return 0; } static int isc_scaler_init_cfg(struct v4l2_subdev *sd, struct v4l2_subdev_state *sd_state) { struct v4l2_mbus_framefmt *v4l2_try_fmt = v4l2_subdev_get_try_format(sd, sd_state, 0); struct v4l2_rect *try_crop; struct isc_device *isc = container_of(sd, struct isc_device, scaler_sd); *v4l2_try_fmt = isc->scaler_format[ISC_SCALER_PAD_SOURCE]; try_crop = v4l2_subdev_get_try_crop(sd, sd_state, 0); try_crop->top = 0; try_crop->left = 0; try_crop->width = v4l2_try_fmt->width; try_crop->height = v4l2_try_fmt->height; return 0; } static const struct v4l2_subdev_pad_ops isc_scaler_pad_ops = { .enum_mbus_code = isc_scaler_enum_mbus_code, .set_fmt = isc_scaler_set_fmt, .get_fmt = isc_scaler_get_fmt, .get_selection = isc_scaler_g_sel, .init_cfg = isc_scaler_init_cfg, }; static const struct media_entity_operations isc_scaler_entity_ops = { .link_validate = v4l2_subdev_link_validate, }; static const struct v4l2_subdev_ops xisc_scaler_subdev_ops = { .pad = &isc_scaler_pad_ops, }; int isc_scaler_init(struct isc_device *isc) { int ret; v4l2_subdev_init(&isc->scaler_sd, &xisc_scaler_subdev_ops); isc->scaler_sd.owner = THIS_MODULE; isc->scaler_sd.dev = isc->dev; snprintf(isc->scaler_sd.name, sizeof(isc->scaler_sd.name), "microchip_isc_scaler"); isc->scaler_sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE; isc->scaler_sd.entity.function = MEDIA_ENT_F_PROC_VIDEO_SCALER; isc->scaler_sd.entity.ops = &isc_scaler_entity_ops; isc->scaler_pads[ISC_SCALER_PAD_SINK].flags = MEDIA_PAD_FL_SINK; isc->scaler_pads[ISC_SCALER_PAD_SOURCE].flags = MEDIA_PAD_FL_SOURCE; isc_scaler_prepare_fmt(&isc->scaler_format[ISC_SCALER_PAD_SOURCE]); isc->scaler_format[ISC_SCALER_PAD_SOURCE].height = isc->max_height; isc->scaler_format[ISC_SCALER_PAD_SOURCE].width = isc->max_width; isc->scaler_format[ISC_SCALER_PAD_SOURCE].code = isc->formats_list[0].mbus_code; isc->scaler_format[ISC_SCALER_PAD_SINK] = isc->scaler_format[ISC_SCALER_PAD_SOURCE]; ret = media_entity_pads_init(&isc->scaler_sd.entity, ISC_SCALER_PADS_NUM, isc->scaler_pads); if (ret < 0) { dev_err(isc->dev, "scaler sd media entity init failed\n"); return ret; } ret = v4l2_device_register_subdev(&isc->v4l2_dev, &isc->scaler_sd); if (ret < 0) { dev_err(isc->dev, "scaler sd failed to register subdev\n"); return ret; } return ret; } EXPORT_SYMBOL_GPL(isc_scaler_init); int isc_scaler_link(struct isc_device *isc) { int ret; ret = media_create_pad_link(&isc->current_subdev->sd->entity, isc->remote_pad, &isc->scaler_sd.entity, ISC_SCALER_PAD_SINK, MEDIA_LNK_FL_ENABLED | MEDIA_LNK_FL_IMMUTABLE); if (ret < 0) { dev_err(isc->dev, "Failed to create pad link: %s to %s\n", isc->current_subdev->sd->entity.name, isc->scaler_sd.entity.name); return ret; } dev_dbg(isc->dev, "link with %s pad: %d\n", isc->current_subdev->sd->name, isc->remote_pad); ret = media_create_pad_link(&isc->scaler_sd.entity, ISC_SCALER_PAD_SOURCE, &isc->video_dev.entity, ISC_PAD_SINK, MEDIA_LNK_FL_ENABLED | MEDIA_LNK_FL_IMMUTABLE); if (ret < 0) { dev_err(isc->dev, "Failed to create pad link: %s to %s\n", isc->scaler_sd.entity.name, isc->video_dev.entity.name); return ret; } dev_dbg(isc->dev, "link with %s pad: %d\n", isc->scaler_sd.name, ISC_SCALER_PAD_SOURCE); return ret; } EXPORT_SYMBOL_GPL(isc_scaler_link);
linux-master
drivers/media/platform/microchip/microchip-isc-scaler.c
// SPDX-License-Identifier: GPL-2.0-only /* * Microchip CSI2 Demux Controller (CSI2DC) driver * * Copyright (C) 2018 Microchip Technology, Inc. * * Author: Eugen Hristev <[email protected]> * */ #include <linux/clk.h> #include <linux/mod_devicetable.h> #include <linux/module.h> #include <linux/of_graph.h> #include <linux/platform_device.h> #include <linux/pm_runtime.h> #include <linux/videodev2.h> #include <media/v4l2-fwnode.h> #include <media/v4l2-subdev.h> /* Global configuration register */ #define CSI2DC_GCFG 0x0 /* MIPI sensor pixel clock is free running */ #define CSI2DC_GCFG_MIPIFRN BIT(0) /* GPIO parallel interface selection */ #define CSI2DC_GCFG_GPIOSEL BIT(1) /* Output waveform inter-line minimum delay */ #define CSI2DC_GCFG_HLC(v) ((v) << 4) #define CSI2DC_GCFG_HLC_MASK GENMASK(7, 4) /* SAMA7G5 requires a HLC delay of 15 */ #define SAMA7G5_HLC (15) /* Global control register */ #define CSI2DC_GCTLR 0x04 #define CSI2DC_GCTLR_SWRST BIT(0) /* Global status register */ #define CSI2DC_GS 0x08 /* SSP interrupt status register */ #define CSI2DC_SSPIS 0x28 /* Pipe update register */ #define CSI2DC_PU 0xc0 /* Video pipe attributes update */ #define CSI2DC_PU_VP BIT(0) /* Pipe update status register */ #define CSI2DC_PUS 0xc4 /* Video pipeline Interrupt Status Register */ #define CSI2DC_VPISR 0xf4 /* Video pipeline enable register */ #define CSI2DC_VPE 0xf8 #define CSI2DC_VPE_ENABLE BIT(0) /* Video pipeline configuration register */ #define CSI2DC_VPCFG 0xfc /* Data type */ #define CSI2DC_VPCFG_DT(v) ((v) << 0) #define CSI2DC_VPCFG_DT_MASK GENMASK(5, 0) /* Virtual channel identifier */ #define CSI2DC_VPCFG_VC(v) ((v) << 6) #define CSI2DC_VPCFG_VC_MASK GENMASK(7, 6) /* Decompression enable */ #define CSI2DC_VPCFG_DE BIT(8) /* Decoder mode */ #define CSI2DC_VPCFG_DM(v) ((v) << 9) #define CSI2DC_VPCFG_DM_DECODER8TO12 0 /* Decoder predictor 2 selection */ #define CSI2DC_VPCFG_DP2 BIT(12) /* Recommended memory storage */ #define CSI2DC_VPCFG_RMS BIT(13) /* Post adjustment */ #define CSI2DC_VPCFG_PA BIT(14) /* Video pipeline column register */ #define CSI2DC_VPCOL 0x100 /* Column number */ #define CSI2DC_VPCOL_COL(v) ((v) << 0) #define CSI2DC_VPCOL_COL_MASK GENMASK(15, 0) /* Video pipeline row register */ #define CSI2DC_VPROW 0x104 /* Row number */ #define CSI2DC_VPROW_ROW(v) ((v) << 0) #define CSI2DC_VPROW_ROW_MASK GENMASK(15, 0) /* Version register */ #define CSI2DC_VERSION 0x1fc /* register read/write helpers */ #define csi2dc_readl(st, reg) readl_relaxed((st)->base + (reg)) #define csi2dc_writel(st, reg, val) writel_relaxed((val), \ (st)->base + (reg)) /* supported RAW data types */ #define CSI2DC_DT_RAW6 0x28 #define CSI2DC_DT_RAW7 0x29 #define CSI2DC_DT_RAW8 0x2a #define CSI2DC_DT_RAW10 0x2b #define CSI2DC_DT_RAW12 0x2c #define CSI2DC_DT_RAW14 0x2d /* YUV data types */ #define CSI2DC_DT_YUV422_8B 0x1e /* * struct csi2dc_format - CSI2DC format type struct * @mbus_code: Media bus code for the format * @dt: Data type constant for this format */ struct csi2dc_format { u32 mbus_code; u32 dt; }; static const struct csi2dc_format csi2dc_formats[] = { { .mbus_code = MEDIA_BUS_FMT_SRGGB8_1X8, .dt = CSI2DC_DT_RAW8, }, { .mbus_code = MEDIA_BUS_FMT_SBGGR8_1X8, .dt = CSI2DC_DT_RAW8, }, { .mbus_code = MEDIA_BUS_FMT_SGRBG8_1X8, .dt = CSI2DC_DT_RAW8, }, { .mbus_code = MEDIA_BUS_FMT_SGBRG8_1X8, .dt = CSI2DC_DT_RAW8, }, { .mbus_code = MEDIA_BUS_FMT_SRGGB10_1X10, .dt = CSI2DC_DT_RAW10, }, { .mbus_code = MEDIA_BUS_FMT_SBGGR10_1X10, .dt = CSI2DC_DT_RAW10, }, { .mbus_code = MEDIA_BUS_FMT_SGRBG10_1X10, .dt = CSI2DC_DT_RAW10, }, { .mbus_code = MEDIA_BUS_FMT_SGBRG10_1X10, .dt = CSI2DC_DT_RAW10, }, { .mbus_code = MEDIA_BUS_FMT_YUYV8_2X8, .dt = CSI2DC_DT_YUV422_8B, }, }; enum mipi_csi_pads { CSI2DC_PAD_SINK = 0, CSI2DC_PAD_SOURCE = 1, CSI2DC_PADS_NUM = 2, }; /* * struct csi2dc_device - CSI2DC device driver data/config struct * @base: Register map base address * @csi2dc_sd: v4l2 subdevice for the csi2dc device * This is the subdevice that the csi2dc device itself * registers in v4l2 subsystem * @dev: struct device for this csi2dc device * @pclk: Peripheral clock reference * Input clock that clocks the hardware block internal * logic * @scck: Sensor Controller clock reference * Input clock that is used to generate the pixel clock * @format: Current saved format used in g/s fmt * @cur_fmt: Current state format * @try_fmt: Try format that is being tried * @pads: Media entity pads for the csi2dc subdevice * @clk_gated: Whether the clock is gated or free running * @video_pipe: Whether video pipeline is configured * @parallel_mode: The underlying subdevice is connected on a parallel bus * @vc: Current set virtual channel * @notifier: Async notifier that is used to bound the underlying * subdevice to the csi2dc subdevice * @input_sd: Reference to the underlying subdevice bound to the * csi2dc subdevice * @remote_pad: Pad number of the underlying subdevice that is linked * to the csi2dc subdevice sink pad. */ struct csi2dc_device { void __iomem *base; struct v4l2_subdev csi2dc_sd; struct device *dev; struct clk *pclk; struct clk *scck; struct v4l2_mbus_framefmt format; const struct csi2dc_format *cur_fmt; const struct csi2dc_format *try_fmt; struct media_pad pads[CSI2DC_PADS_NUM]; bool clk_gated; bool video_pipe; bool parallel_mode; u32 vc; struct v4l2_async_notifier notifier; struct v4l2_subdev *input_sd; u32 remote_pad; }; static inline struct csi2dc_device * csi2dc_sd_to_csi2dc_device(struct v4l2_subdev *csi2dc_sd) { return container_of(csi2dc_sd, struct csi2dc_device, csi2dc_sd); } static int csi2dc_enum_mbus_code(struct v4l2_subdev *csi2dc_sd, struct v4l2_subdev_state *sd_state, struct v4l2_subdev_mbus_code_enum *code) { if (code->index >= ARRAY_SIZE(csi2dc_formats)) return -EINVAL; code->code = csi2dc_formats[code->index].mbus_code; return 0; } static int csi2dc_get_fmt(struct v4l2_subdev *csi2dc_sd, struct v4l2_subdev_state *sd_state, struct v4l2_subdev_format *format) { struct csi2dc_device *csi2dc = csi2dc_sd_to_csi2dc_device(csi2dc_sd); struct v4l2_mbus_framefmt *v4l2_try_fmt; if (format->which == V4L2_SUBDEV_FORMAT_TRY) { v4l2_try_fmt = v4l2_subdev_get_try_format(csi2dc_sd, sd_state, format->pad); format->format = *v4l2_try_fmt; return 0; } format->format = csi2dc->format; return 0; } static int csi2dc_set_fmt(struct v4l2_subdev *csi2dc_sd, struct v4l2_subdev_state *sd_state, struct v4l2_subdev_format *req_fmt) { struct csi2dc_device *csi2dc = csi2dc_sd_to_csi2dc_device(csi2dc_sd); const struct csi2dc_format *fmt, *try_fmt = NULL; struct v4l2_mbus_framefmt *v4l2_try_fmt; unsigned int i; /* * Setting the source pad is disabled. * The same format is being propagated from the sink to source. */ if (req_fmt->pad == CSI2DC_PAD_SOURCE) return -EINVAL; for (i = 0; i < ARRAY_SIZE(csi2dc_formats); i++) { fmt = &csi2dc_formats[i]; if (req_fmt->format.code == fmt->mbus_code) try_fmt = fmt; fmt++; } /* in case we could not find the desired format, default to something */ if (!try_fmt) { try_fmt = &csi2dc_formats[0]; dev_dbg(csi2dc->dev, "CSI2DC unsupported format 0x%x, defaulting to 0x%x\n", req_fmt->format.code, csi2dc_formats[0].mbus_code); } req_fmt->format.code = try_fmt->mbus_code; req_fmt->format.colorspace = V4L2_COLORSPACE_SRGB; req_fmt->format.field = V4L2_FIELD_NONE; if (req_fmt->which == V4L2_SUBDEV_FORMAT_TRY) { v4l2_try_fmt = v4l2_subdev_get_try_format(csi2dc_sd, sd_state, req_fmt->pad); *v4l2_try_fmt = req_fmt->format; /* Trying on the sink pad makes the source pad change too */ v4l2_try_fmt = v4l2_subdev_get_try_format(csi2dc_sd, sd_state, CSI2DC_PAD_SOURCE); *v4l2_try_fmt = req_fmt->format; /* if we are just trying, we are done */ return 0; } /* save the format for later requests */ csi2dc->format = req_fmt->format; /* update config */ csi2dc->cur_fmt = try_fmt; dev_dbg(csi2dc->dev, "new format set: 0x%x @%dx%d\n", csi2dc->format.code, csi2dc->format.width, csi2dc->format.height); return 0; } static int csi2dc_power(struct csi2dc_device *csi2dc, int on) { int ret = 0; if (on) { ret = clk_prepare_enable(csi2dc->pclk); if (ret) { dev_err(csi2dc->dev, "failed to enable pclk:%d\n", ret); return ret; } ret = clk_prepare_enable(csi2dc->scck); if (ret) { dev_err(csi2dc->dev, "failed to enable scck:%d\n", ret); clk_disable_unprepare(csi2dc->pclk); return ret; } /* if powering up, deassert reset line */ csi2dc_writel(csi2dc, CSI2DC_GCTLR, CSI2DC_GCTLR_SWRST); } else { /* if powering down, assert reset line */ csi2dc_writel(csi2dc, CSI2DC_GCTLR, 0); clk_disable_unprepare(csi2dc->scck); clk_disable_unprepare(csi2dc->pclk); } return ret; } static int csi2dc_get_mbus_config(struct csi2dc_device *csi2dc) { struct v4l2_mbus_config mbus_config = { 0 }; int ret; ret = v4l2_subdev_call(csi2dc->input_sd, pad, get_mbus_config, csi2dc->remote_pad, &mbus_config); if (ret == -ENOIOCTLCMD) { dev_dbg(csi2dc->dev, "no remote mbus configuration available\n"); return 0; } if (ret) { dev_err(csi2dc->dev, "failed to get remote mbus configuration\n"); return 0; } dev_dbg(csi2dc->dev, "subdev sending on channel %d\n", csi2dc->vc); csi2dc->clk_gated = mbus_config.bus.parallel.flags & V4L2_MBUS_CSI2_NONCONTINUOUS_CLOCK; dev_dbg(csi2dc->dev, "mbus_config: %s clock\n", csi2dc->clk_gated ? "gated" : "free running"); return 0; } static void csi2dc_vp_update(struct csi2dc_device *csi2dc) { u32 vp, gcfg; if (!csi2dc->video_pipe) { dev_err(csi2dc->dev, "video pipeline unavailable\n"); return; } if (csi2dc->parallel_mode) { /* In parallel mode, GPIO parallel interface must be selected */ gcfg = csi2dc_readl(csi2dc, CSI2DC_GCFG); gcfg |= CSI2DC_GCFG_GPIOSEL; csi2dc_writel(csi2dc, CSI2DC_GCFG, gcfg); return; } /* serial video pipeline */ csi2dc_writel(csi2dc, CSI2DC_GCFG, (SAMA7G5_HLC & CSI2DC_GCFG_HLC_MASK) | (csi2dc->clk_gated ? 0 : CSI2DC_GCFG_MIPIFRN)); vp = CSI2DC_VPCFG_DT(csi2dc->cur_fmt->dt) & CSI2DC_VPCFG_DT_MASK; vp |= CSI2DC_VPCFG_VC(csi2dc->vc) & CSI2DC_VPCFG_VC_MASK; vp &= ~CSI2DC_VPCFG_DE; vp |= CSI2DC_VPCFG_DM(CSI2DC_VPCFG_DM_DECODER8TO12); vp &= ~CSI2DC_VPCFG_DP2; vp &= ~CSI2DC_VPCFG_RMS; vp |= CSI2DC_VPCFG_PA; csi2dc_writel(csi2dc, CSI2DC_VPCFG, vp); csi2dc_writel(csi2dc, CSI2DC_VPE, CSI2DC_VPE_ENABLE); csi2dc_writel(csi2dc, CSI2DC_PU, CSI2DC_PU_VP); } static int csi2dc_s_stream(struct v4l2_subdev *csi2dc_sd, int enable) { struct csi2dc_device *csi2dc = csi2dc_sd_to_csi2dc_device(csi2dc_sd); int ret; if (enable) { ret = pm_runtime_resume_and_get(csi2dc->dev); if (ret < 0) return ret; csi2dc_get_mbus_config(csi2dc); csi2dc_vp_update(csi2dc); return v4l2_subdev_call(csi2dc->input_sd, video, s_stream, true); } dev_dbg(csi2dc->dev, "Last frame received: VPCOLR = %u, VPROWR= %u, VPISR = %x\n", csi2dc_readl(csi2dc, CSI2DC_VPCOL), csi2dc_readl(csi2dc, CSI2DC_VPROW), csi2dc_readl(csi2dc, CSI2DC_VPISR)); /* stop streaming scenario */ ret = v4l2_subdev_call(csi2dc->input_sd, video, s_stream, false); pm_runtime_put_sync(csi2dc->dev); return ret; } static int csi2dc_init_cfg(struct v4l2_subdev *csi2dc_sd, struct v4l2_subdev_state *sd_state) { struct v4l2_mbus_framefmt *v4l2_try_fmt = v4l2_subdev_get_try_format(csi2dc_sd, sd_state, 0); v4l2_try_fmt->height = 480; v4l2_try_fmt->width = 640; v4l2_try_fmt->code = csi2dc_formats[0].mbus_code; v4l2_try_fmt->colorspace = V4L2_COLORSPACE_SRGB; v4l2_try_fmt->field = V4L2_FIELD_NONE; v4l2_try_fmt->ycbcr_enc = V4L2_YCBCR_ENC_DEFAULT; v4l2_try_fmt->quantization = V4L2_QUANTIZATION_DEFAULT; v4l2_try_fmt->xfer_func = V4L2_XFER_FUNC_DEFAULT; return 0; } static const struct media_entity_operations csi2dc_entity_ops = { .link_validate = v4l2_subdev_link_validate, }; static const struct v4l2_subdev_pad_ops csi2dc_pad_ops = { .enum_mbus_code = csi2dc_enum_mbus_code, .set_fmt = csi2dc_set_fmt, .get_fmt = csi2dc_get_fmt, .init_cfg = csi2dc_init_cfg, }; static const struct v4l2_subdev_video_ops csi2dc_video_ops = { .s_stream = csi2dc_s_stream, }; static const struct v4l2_subdev_ops csi2dc_subdev_ops = { .pad = &csi2dc_pad_ops, .video = &csi2dc_video_ops, }; static int csi2dc_async_bound(struct v4l2_async_notifier *notifier, struct v4l2_subdev *subdev, struct v4l2_async_connection *asd) { struct csi2dc_device *csi2dc = container_of(notifier, struct csi2dc_device, notifier); int pad; int ret; csi2dc->input_sd = subdev; pad = media_entity_get_fwnode_pad(&subdev->entity, asd->match.fwnode, MEDIA_PAD_FL_SOURCE); if (pad < 0) { dev_err(csi2dc->dev, "Failed to find pad for %s\n", subdev->name); return pad; } csi2dc->remote_pad = pad; ret = media_create_pad_link(&csi2dc->input_sd->entity, csi2dc->remote_pad, &csi2dc->csi2dc_sd.entity, 0, MEDIA_LNK_FL_ENABLED); if (ret) { dev_err(csi2dc->dev, "Failed to create pad link: %s to %s\n", csi2dc->input_sd->entity.name, csi2dc->csi2dc_sd.entity.name); return ret; } dev_dbg(csi2dc->dev, "link with %s pad: %d\n", csi2dc->input_sd->name, csi2dc->remote_pad); return ret; } static const struct v4l2_async_notifier_operations csi2dc_async_ops = { .bound = csi2dc_async_bound, }; static int csi2dc_prepare_notifier(struct csi2dc_device *csi2dc, struct fwnode_handle *input_fwnode) { struct v4l2_async_connection *asd; int ret = 0; v4l2_async_subdev_nf_init(&csi2dc->notifier, &csi2dc->csi2dc_sd); asd = v4l2_async_nf_add_fwnode_remote(&csi2dc->notifier, input_fwnode, struct v4l2_async_connection); fwnode_handle_put(input_fwnode); if (IS_ERR(asd)) { ret = PTR_ERR(asd); dev_err(csi2dc->dev, "failed to add async notifier for node %pOF: %d\n", to_of_node(input_fwnode), ret); v4l2_async_nf_cleanup(&csi2dc->notifier); return ret; } csi2dc->notifier.ops = &csi2dc_async_ops; ret = v4l2_async_nf_register(&csi2dc->notifier); if (ret) { dev_err(csi2dc->dev, "fail to register async notifier: %d\n", ret); v4l2_async_nf_cleanup(&csi2dc->notifier); } return ret; } static int csi2dc_of_parse(struct csi2dc_device *csi2dc, struct device_node *of_node) { struct fwnode_handle *input_fwnode, *output_fwnode; struct v4l2_fwnode_endpoint input_endpoint = { 0 }, output_endpoint = { 0 }; int ret; input_fwnode = fwnode_graph_get_next_endpoint(of_fwnode_handle(of_node), NULL); if (!input_fwnode) { dev_err(csi2dc->dev, "missing port node at %pOF, input node is mandatory.\n", of_node); return -EINVAL; } ret = v4l2_fwnode_endpoint_parse(input_fwnode, &input_endpoint); if (ret) { dev_err(csi2dc->dev, "endpoint not defined at %pOF\n", of_node); goto csi2dc_of_parse_err; } if (input_endpoint.bus_type == V4L2_MBUS_PARALLEL || input_endpoint.bus_type == V4L2_MBUS_BT656) { csi2dc->parallel_mode = true; dev_dbg(csi2dc->dev, "subdevice connected on parallel interface\n"); } if (input_endpoint.bus_type == V4L2_MBUS_CSI2_DPHY) { csi2dc->clk_gated = input_endpoint.bus.mipi_csi2.flags & V4L2_MBUS_CSI2_NONCONTINUOUS_CLOCK; dev_dbg(csi2dc->dev, "subdevice connected on serial interface\n"); dev_dbg(csi2dc->dev, "DT: %s clock\n", csi2dc->clk_gated ? "gated" : "free running"); } output_fwnode = fwnode_graph_get_next_endpoint (of_fwnode_handle(of_node), input_fwnode); if (output_fwnode) ret = v4l2_fwnode_endpoint_parse(output_fwnode, &output_endpoint); fwnode_handle_put(output_fwnode); if (!output_fwnode || ret) { dev_info(csi2dc->dev, "missing output node at %pOF, data pipe available only.\n", of_node); } else { if (output_endpoint.bus_type != V4L2_MBUS_PARALLEL && output_endpoint.bus_type != V4L2_MBUS_BT656) { dev_err(csi2dc->dev, "output port must be parallel/bt656.\n"); ret = -EINVAL; goto csi2dc_of_parse_err; } csi2dc->video_pipe = true; dev_dbg(csi2dc->dev, "block %pOF [%d.%d]->[%d.%d] video pipeline\n", of_node, input_endpoint.base.port, input_endpoint.base.id, output_endpoint.base.port, output_endpoint.base.id); } /* prepare async notifier for subdevice completion */ return csi2dc_prepare_notifier(csi2dc, input_fwnode); csi2dc_of_parse_err: fwnode_handle_put(input_fwnode); return ret; } static void csi2dc_default_format(struct csi2dc_device *csi2dc) { csi2dc->cur_fmt = &csi2dc_formats[0]; csi2dc->format.height = 480; csi2dc->format.width = 640; csi2dc->format.code = csi2dc_formats[0].mbus_code; csi2dc->format.colorspace = V4L2_COLORSPACE_SRGB; csi2dc->format.field = V4L2_FIELD_NONE; csi2dc->format.ycbcr_enc = V4L2_YCBCR_ENC_DEFAULT; csi2dc->format.quantization = V4L2_QUANTIZATION_DEFAULT; csi2dc->format.xfer_func = V4L2_XFER_FUNC_DEFAULT; } static int csi2dc_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct csi2dc_device *csi2dc; int ret = 0; u32 ver; csi2dc = devm_kzalloc(dev, sizeof(*csi2dc), GFP_KERNEL); if (!csi2dc) return -ENOMEM; csi2dc->dev = dev; csi2dc->base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(csi2dc->base)) { dev_err(dev, "base address not set\n"); return PTR_ERR(csi2dc->base); } csi2dc->pclk = devm_clk_get(dev, "pclk"); if (IS_ERR(csi2dc->pclk)) { ret = PTR_ERR(csi2dc->pclk); dev_err(dev, "failed to get pclk: %d\n", ret); return ret; } csi2dc->scck = devm_clk_get(dev, "scck"); if (IS_ERR(csi2dc->scck)) { ret = PTR_ERR(csi2dc->scck); dev_err(dev, "failed to get scck: %d\n", ret); return ret; } v4l2_subdev_init(&csi2dc->csi2dc_sd, &csi2dc_subdev_ops); csi2dc->csi2dc_sd.owner = THIS_MODULE; csi2dc->csi2dc_sd.dev = dev; snprintf(csi2dc->csi2dc_sd.name, sizeof(csi2dc->csi2dc_sd.name), "csi2dc"); csi2dc->csi2dc_sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE; csi2dc->csi2dc_sd.entity.function = MEDIA_ENT_F_VID_IF_BRIDGE; csi2dc->csi2dc_sd.entity.ops = &csi2dc_entity_ops; platform_set_drvdata(pdev, csi2dc); ret = csi2dc_of_parse(csi2dc, dev->of_node); if (ret) goto csi2dc_probe_cleanup_entity; csi2dc->pads[CSI2DC_PAD_SINK].flags = MEDIA_PAD_FL_SINK; if (csi2dc->video_pipe) csi2dc->pads[CSI2DC_PAD_SOURCE].flags = MEDIA_PAD_FL_SOURCE; ret = media_entity_pads_init(&csi2dc->csi2dc_sd.entity, csi2dc->video_pipe ? CSI2DC_PADS_NUM : 1, csi2dc->pads); if (ret < 0) { dev_err(dev, "media entity init failed\n"); goto csi2dc_probe_cleanup_notifier; } csi2dc_default_format(csi2dc); /* turn power on to validate capabilities */ ret = csi2dc_power(csi2dc, true); if (ret < 0) goto csi2dc_probe_cleanup_notifier; pm_runtime_set_active(dev); pm_runtime_enable(dev); ver = csi2dc_readl(csi2dc, CSI2DC_VERSION); /* * we must register the subdev after PM runtime has been requested, * otherwise we might bound immediately and request pm_runtime_resume * before runtime_enable. */ ret = v4l2_async_register_subdev(&csi2dc->csi2dc_sd); if (ret) { dev_err(csi2dc->dev, "failed to register the subdevice\n"); goto csi2dc_probe_cleanup_notifier; } dev_info(dev, "Microchip CSI2DC version %x\n", ver); return 0; csi2dc_probe_cleanup_notifier: v4l2_async_nf_cleanup(&csi2dc->notifier); csi2dc_probe_cleanup_entity: media_entity_cleanup(&csi2dc->csi2dc_sd.entity); return ret; } static void csi2dc_remove(struct platform_device *pdev) { struct csi2dc_device *csi2dc = platform_get_drvdata(pdev); pm_runtime_disable(&pdev->dev); v4l2_async_unregister_subdev(&csi2dc->csi2dc_sd); v4l2_async_nf_unregister(&csi2dc->notifier); v4l2_async_nf_cleanup(&csi2dc->notifier); media_entity_cleanup(&csi2dc->csi2dc_sd.entity); } static int __maybe_unused csi2dc_runtime_suspend(struct device *dev) { struct csi2dc_device *csi2dc = dev_get_drvdata(dev); return csi2dc_power(csi2dc, false); } static int __maybe_unused csi2dc_runtime_resume(struct device *dev) { struct csi2dc_device *csi2dc = dev_get_drvdata(dev); return csi2dc_power(csi2dc, true); } static const struct dev_pm_ops csi2dc_dev_pm_ops = { SET_RUNTIME_PM_OPS(csi2dc_runtime_suspend, csi2dc_runtime_resume, NULL) }; static const struct of_device_id csi2dc_of_match[] = { { .compatible = "microchip,sama7g5-csi2dc" }, { } }; MODULE_DEVICE_TABLE(of, csi2dc_of_match); static struct platform_driver csi2dc_driver = { .probe = csi2dc_probe, .remove_new = csi2dc_remove, .driver = { .name = "microchip-csi2dc", .pm = &csi2dc_dev_pm_ops, .of_match_table = of_match_ptr(csi2dc_of_match), }, }; module_platform_driver(csi2dc_driver); MODULE_AUTHOR("Eugen Hristev <[email protected]>"); MODULE_DESCRIPTION("Microchip CSI2 Demux Controller driver"); MODULE_LICENSE("GPL v2");
linux-master
drivers/media/platform/microchip/microchip-csi2dc.c
// SPDX-License-Identifier: GPL-2.0 /* * Microchip Image Sensor Controller (ISC) driver * * Copyright (C) 2016-2019 Microchip Technology, Inc. * * Author: Songjun Wu * Author: Eugen Hristev <[email protected]> * * * Sensor-->PFE-->WB-->CFA-->CC-->GAM-->CSC-->CBC-->SUB-->RLP-->DMA * * ISC video pipeline integrates the following submodules: * PFE: Parallel Front End to sample the camera sensor input stream * WB: Programmable white balance in the Bayer domain * CFA: Color filter array interpolation module * CC: Programmable color correction * GAM: Gamma correction * CSC: Programmable color space conversion * CBC: Contrast and Brightness control * SUB: This module performs YCbCr444 to YCbCr420 chrominance subsampling * RLP: This module performs rounding, range limiting * and packing of the incoming data */ #include <linux/clk.h> #include <linux/clkdev.h> #include <linux/clk-provider.h> #include <linux/delay.h> #include <linux/interrupt.h> #include <linux/math64.h> #include <linux/module.h> #include <linux/of.h> #include <linux/of_graph.h> #include <linux/platform_device.h> #include <linux/pm_runtime.h> #include <linux/regmap.h> #include <linux/videodev2.h> #include <media/v4l2-ctrls.h> #include <media/v4l2-device.h> #include <media/v4l2-event.h> #include <media/v4l2-image-sizes.h> #include <media/v4l2-ioctl.h> #include <media/v4l2-fwnode.h> #include <media/v4l2-subdev.h> #include <media/videobuf2-dma-contig.h> #include "microchip-isc-regs.h" #include "microchip-isc.h" #define ISC_SAMA5D2_MAX_SUPPORT_WIDTH 2592 #define ISC_SAMA5D2_MAX_SUPPORT_HEIGHT 1944 #define ISC_SAMA5D2_PIPELINE \ (WB_ENABLE | CFA_ENABLE | CC_ENABLE | GAM_ENABLES | CSC_ENABLE | \ CBC_ENABLE | SUB422_ENABLE | SUB420_ENABLE) /* This is a list of the formats that the ISC can *output* */ static const struct isc_format sama5d2_controller_formats[] = { { .fourcc = V4L2_PIX_FMT_ARGB444, }, { .fourcc = V4L2_PIX_FMT_ARGB555, }, { .fourcc = V4L2_PIX_FMT_RGB565, }, { .fourcc = V4L2_PIX_FMT_ABGR32, }, { .fourcc = V4L2_PIX_FMT_XBGR32, }, { .fourcc = V4L2_PIX_FMT_YUV420, }, { .fourcc = V4L2_PIX_FMT_YUYV, }, { .fourcc = V4L2_PIX_FMT_YUV422P, }, { .fourcc = V4L2_PIX_FMT_GREY, }, { .fourcc = V4L2_PIX_FMT_Y10, }, { .fourcc = V4L2_PIX_FMT_SBGGR8, .raw = true, }, { .fourcc = V4L2_PIX_FMT_SGBRG8, .raw = true, }, { .fourcc = V4L2_PIX_FMT_SGRBG8, .raw = true, }, { .fourcc = V4L2_PIX_FMT_SRGGB8, .raw = true, }, { .fourcc = V4L2_PIX_FMT_SBGGR10, .raw = true, }, { .fourcc = V4L2_PIX_FMT_SGBRG10, .raw = true, }, { .fourcc = V4L2_PIX_FMT_SGRBG10, .raw = true, }, { .fourcc = V4L2_PIX_FMT_SRGGB10, .raw = true, }, { .fourcc = V4L2_PIX_FMT_SBGGR12, .raw = true, }, { .fourcc = V4L2_PIX_FMT_SGBRG12, .raw = true, }, { .fourcc = V4L2_PIX_FMT_SGRBG12, .raw = true, }, { .fourcc = V4L2_PIX_FMT_SRGGB12, .raw = true, }, }; /* This is a list of formats that the ISC can receive as *input* */ static struct isc_format sama5d2_formats_list[] = { { .fourcc = V4L2_PIX_FMT_SBGGR8, .mbus_code = MEDIA_BUS_FMT_SBGGR8_1X8, .pfe_cfg0_bps = ISC_PFE_CFG0_BPS_EIGHT, .cfa_baycfg = ISC_BAY_CFG_BGBG, }, { .fourcc = V4L2_PIX_FMT_SGBRG8, .mbus_code = MEDIA_BUS_FMT_SGBRG8_1X8, .pfe_cfg0_bps = ISC_PFE_CFG0_BPS_EIGHT, .cfa_baycfg = ISC_BAY_CFG_GBGB, }, { .fourcc = V4L2_PIX_FMT_SGRBG8, .mbus_code = MEDIA_BUS_FMT_SGRBG8_1X8, .pfe_cfg0_bps = ISC_PFE_CFG0_BPS_EIGHT, .cfa_baycfg = ISC_BAY_CFG_GRGR, }, { .fourcc = V4L2_PIX_FMT_SRGGB8, .mbus_code = MEDIA_BUS_FMT_SRGGB8_1X8, .pfe_cfg0_bps = ISC_PFE_CFG0_BPS_EIGHT, .cfa_baycfg = ISC_BAY_CFG_RGRG, }, { .fourcc = V4L2_PIX_FMT_SBGGR10, .mbus_code = MEDIA_BUS_FMT_SBGGR10_1X10, .pfe_cfg0_bps = ISC_PFG_CFG0_BPS_TEN, .cfa_baycfg = ISC_BAY_CFG_RGRG, }, { .fourcc = V4L2_PIX_FMT_SGBRG10, .mbus_code = MEDIA_BUS_FMT_SGBRG10_1X10, .pfe_cfg0_bps = ISC_PFG_CFG0_BPS_TEN, .cfa_baycfg = ISC_BAY_CFG_GBGB, }, { .fourcc = V4L2_PIX_FMT_SGRBG10, .mbus_code = MEDIA_BUS_FMT_SGRBG10_1X10, .pfe_cfg0_bps = ISC_PFG_CFG0_BPS_TEN, .cfa_baycfg = ISC_BAY_CFG_GRGR, }, { .fourcc = V4L2_PIX_FMT_SRGGB10, .mbus_code = MEDIA_BUS_FMT_SRGGB10_1X10, .pfe_cfg0_bps = ISC_PFG_CFG0_BPS_TEN, .cfa_baycfg = ISC_BAY_CFG_RGRG, }, { .fourcc = V4L2_PIX_FMT_SBGGR12, .mbus_code = MEDIA_BUS_FMT_SBGGR12_1X12, .pfe_cfg0_bps = ISC_PFG_CFG0_BPS_TWELVE, .cfa_baycfg = ISC_BAY_CFG_BGBG, }, { .fourcc = V4L2_PIX_FMT_SGBRG12, .mbus_code = MEDIA_BUS_FMT_SGBRG12_1X12, .pfe_cfg0_bps = ISC_PFG_CFG0_BPS_TWELVE, .cfa_baycfg = ISC_BAY_CFG_GBGB, }, { .fourcc = V4L2_PIX_FMT_SGRBG12, .mbus_code = MEDIA_BUS_FMT_SGRBG12_1X12, .pfe_cfg0_bps = ISC_PFG_CFG0_BPS_TWELVE, .cfa_baycfg = ISC_BAY_CFG_GRGR, }, { .fourcc = V4L2_PIX_FMT_SRGGB12, .mbus_code = MEDIA_BUS_FMT_SRGGB12_1X12, .pfe_cfg0_bps = ISC_PFG_CFG0_BPS_TWELVE, .cfa_baycfg = ISC_BAY_CFG_RGRG, }, { .fourcc = V4L2_PIX_FMT_GREY, .mbus_code = MEDIA_BUS_FMT_Y8_1X8, .pfe_cfg0_bps = ISC_PFE_CFG0_BPS_EIGHT, }, { .fourcc = V4L2_PIX_FMT_YUYV, .mbus_code = MEDIA_BUS_FMT_YUYV8_2X8, .pfe_cfg0_bps = ISC_PFE_CFG0_BPS_EIGHT, }, { .fourcc = V4L2_PIX_FMT_RGB565, .mbus_code = MEDIA_BUS_FMT_RGB565_2X8_LE, .pfe_cfg0_bps = ISC_PFE_CFG0_BPS_EIGHT, }, { .fourcc = V4L2_PIX_FMT_Y10, .mbus_code = MEDIA_BUS_FMT_Y10_1X10, .pfe_cfg0_bps = ISC_PFG_CFG0_BPS_TEN, }, }; static void isc_sama5d2_config_csc(struct isc_device *isc) { struct regmap *regmap = isc->regmap; /* Convert RGB to YUV */ regmap_write(regmap, ISC_CSC_YR_YG + isc->offsets.csc, 0x42 | (0x81 << 16)); regmap_write(regmap, ISC_CSC_YB_OY + isc->offsets.csc, 0x19 | (0x10 << 16)); regmap_write(regmap, ISC_CSC_CBR_CBG + isc->offsets.csc, 0xFDA | (0xFB6 << 16)); regmap_write(regmap, ISC_CSC_CBB_OCB + isc->offsets.csc, 0x70 | (0x80 << 16)); regmap_write(regmap, ISC_CSC_CRR_CRG + isc->offsets.csc, 0x70 | (0xFA2 << 16)); regmap_write(regmap, ISC_CSC_CRB_OCR + isc->offsets.csc, 0xFEE | (0x80 << 16)); } static void isc_sama5d2_config_cbc(struct isc_device *isc) { struct regmap *regmap = isc->regmap; regmap_write(regmap, ISC_CBC_BRIGHT + isc->offsets.cbc, isc->ctrls.brightness); regmap_write(regmap, ISC_CBC_CONTRAST + isc->offsets.cbc, isc->ctrls.contrast); } static void isc_sama5d2_config_cc(struct isc_device *isc) { struct regmap *regmap = isc->regmap; /* Configure each register at the neutral fixed point 1.0 or 0.0 */ regmap_write(regmap, ISC_CC_RR_RG, (1 << 8)); regmap_write(regmap, ISC_CC_RB_OR, 0); regmap_write(regmap, ISC_CC_GR_GG, (1 << 8) << 16); regmap_write(regmap, ISC_CC_GB_OG, 0); regmap_write(regmap, ISC_CC_BR_BG, 0); regmap_write(regmap, ISC_CC_BB_OB, (1 << 8)); } static void isc_sama5d2_config_ctrls(struct isc_device *isc, const struct v4l2_ctrl_ops *ops) { struct isc_ctrls *ctrls = &isc->ctrls; struct v4l2_ctrl_handler *hdl = &ctrls->handler; ctrls->contrast = 256; v4l2_ctrl_new_std(hdl, ops, V4L2_CID_CONTRAST, -2048, 2047, 1, 256); } static void isc_sama5d2_config_dpc(struct isc_device *isc) { /* This module is not present on sama5d2 pipeline */ } static void isc_sama5d2_config_gam(struct isc_device *isc) { /* No specific gamma configuration */ } static void isc_sama5d2_config_rlp(struct isc_device *isc) { struct regmap *regmap = isc->regmap; u32 rlp_mode = isc->config.rlp_cfg_mode; /* * In sama5d2, the YUV planar modes and the YUYV modes are treated * in the same way in RLP register. * Normally, YYCC mode should be Luma(n) - Color B(n) - Color R (n) * and YCYC should be Luma(n + 1) - Color B (n) - Luma (n) - Color R (n) * but in sama5d2, the YCYC mode does not exist, and YYCC must be * selected for both planar and interleaved modes, as in fact * both modes are supported. * * Thus, if the YCYC mode is selected, replace it with the * sama5d2-compliant mode which is YYCC . */ if ((rlp_mode & ISC_RLP_CFG_MODE_MASK) == ISC_RLP_CFG_MODE_YCYC) { rlp_mode &= ~ISC_RLP_CFG_MODE_MASK; rlp_mode |= ISC_RLP_CFG_MODE_YYCC; } regmap_update_bits(regmap, ISC_RLP_CFG + isc->offsets.rlp, ISC_RLP_CFG_MODE_MASK, rlp_mode); } static void isc_sama5d2_adapt_pipeline(struct isc_device *isc) { isc->try_config.bits_pipeline &= ISC_SAMA5D2_PIPELINE; } /* Gamma table with gamma 1/2.2 */ static const u32 isc_sama5d2_gamma_table[][GAMMA_ENTRIES] = { /* 0 --> gamma 1/1.8 */ { 0x65, 0x66002F, 0x950025, 0xBB0020, 0xDB001D, 0xF8001A, 0x1130018, 0x12B0017, 0x1420016, 0x1580014, 0x16D0013, 0x1810012, 0x1940012, 0x1A60012, 0x1B80011, 0x1C90010, 0x1DA0010, 0x1EA000F, 0x1FA000F, 0x209000F, 0x218000F, 0x227000E, 0x235000E, 0x243000E, 0x251000E, 0x25F000D, 0x26C000D, 0x279000D, 0x286000D, 0x293000C, 0x2A0000C, 0x2AC000C, 0x2B8000C, 0x2C4000C, 0x2D0000B, 0x2DC000B, 0x2E7000B, 0x2F3000B, 0x2FE000B, 0x309000B, 0x314000B, 0x31F000A, 0x32A000A, 0x334000B, 0x33F000A, 0x349000A, 0x354000A, 0x35E000A, 0x368000A, 0x372000A, 0x37C000A, 0x386000A, 0x3900009, 0x399000A, 0x3A30009, 0x3AD0009, 0x3B60009, 0x3BF000A, 0x3C90009, 0x3D20009, 0x3DB0009, 0x3E40009, 0x3ED0009, 0x3F60009 }, /* 1 --> gamma 1/2 */ { 0x7F, 0x800034, 0xB50028, 0xDE0021, 0x100001E, 0x11E001B, 0x1390019, 0x1520017, 0x16A0015, 0x1800014, 0x1940014, 0x1A80013, 0x1BB0012, 0x1CD0011, 0x1DF0010, 0x1EF0010, 0x200000F, 0x20F000F, 0x21F000E, 0x22D000F, 0x23C000E, 0x24A000E, 0x258000D, 0x265000D, 0x273000C, 0x27F000D, 0x28C000C, 0x299000C, 0x2A5000C, 0x2B1000B, 0x2BC000C, 0x2C8000B, 0x2D3000C, 0x2DF000B, 0x2EA000A, 0x2F5000A, 0x2FF000B, 0x30A000A, 0x314000B, 0x31F000A, 0x329000A, 0x333000A, 0x33D0009, 0x3470009, 0x350000A, 0x35A0009, 0x363000A, 0x36D0009, 0x3760009, 0x37F0009, 0x3880009, 0x3910009, 0x39A0009, 0x3A30009, 0x3AC0008, 0x3B40009, 0x3BD0008, 0x3C60008, 0x3CE0008, 0x3D60009, 0x3DF0008, 0x3E70008, 0x3EF0008, 0x3F70008 }, /* 2 --> gamma 1/2.2 */ { 0x99, 0x9B0038, 0xD4002A, 0xFF0023, 0x122001F, 0x141001B, 0x15D0019, 0x1760017, 0x18E0015, 0x1A30015, 0x1B80013, 0x1CC0012, 0x1DE0011, 0x1F00010, 0x2010010, 0x2110010, 0x221000F, 0x230000F, 0x23F000E, 0x24D000E, 0x25B000D, 0x269000C, 0x276000C, 0x283000C, 0x28F000C, 0x29B000C, 0x2A7000C, 0x2B3000B, 0x2BF000B, 0x2CA000B, 0x2D5000B, 0x2E0000A, 0x2EB000A, 0x2F5000A, 0x2FF000A, 0x30A000A, 0x3140009, 0x31E0009, 0x327000A, 0x3310009, 0x33A0009, 0x3440009, 0x34D0009, 0x3560009, 0x35F0009, 0x3680008, 0x3710008, 0x3790009, 0x3820008, 0x38A0008, 0x3930008, 0x39B0008, 0x3A30008, 0x3AB0008, 0x3B30008, 0x3BB0008, 0x3C30008, 0x3CB0007, 0x3D20008, 0x3DA0007, 0x3E20007, 0x3E90007, 0x3F00008, 0x3F80007 }, }; static int isc_parse_dt(struct device *dev, struct isc_device *isc) { struct device_node *np = dev->of_node; struct device_node *epn = NULL; struct isc_subdev_entity *subdev_entity; unsigned int flags; int ret; INIT_LIST_HEAD(&isc->subdev_entities); while (1) { struct v4l2_fwnode_endpoint v4l2_epn = { .bus_type = 0 }; epn = of_graph_get_next_endpoint(np, epn); if (!epn) return 0; ret = v4l2_fwnode_endpoint_parse(of_fwnode_handle(epn), &v4l2_epn); if (ret) { ret = -EINVAL; dev_err(dev, "Could not parse the endpoint\n"); break; } subdev_entity = devm_kzalloc(dev, sizeof(*subdev_entity), GFP_KERNEL); if (!subdev_entity) { ret = -ENOMEM; break; } subdev_entity->epn = epn; flags = v4l2_epn.bus.parallel.flags; if (flags & V4L2_MBUS_HSYNC_ACTIVE_LOW) subdev_entity->pfe_cfg0 = ISC_PFE_CFG0_HPOL_LOW; if (flags & V4L2_MBUS_VSYNC_ACTIVE_LOW) subdev_entity->pfe_cfg0 |= ISC_PFE_CFG0_VPOL_LOW; if (flags & V4L2_MBUS_PCLK_SAMPLE_FALLING) subdev_entity->pfe_cfg0 |= ISC_PFE_CFG0_PPOL_LOW; if (v4l2_epn.bus_type == V4L2_MBUS_BT656) subdev_entity->pfe_cfg0 |= ISC_PFE_CFG0_CCIR_CRC | ISC_PFE_CFG0_CCIR656; list_add_tail(&subdev_entity->list, &isc->subdev_entities); } of_node_put(epn); return ret; } static int microchip_isc_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct isc_device *isc; void __iomem *io_base; struct isc_subdev_entity *subdev_entity; int irq; int ret; u32 ver; isc = devm_kzalloc(dev, sizeof(*isc), GFP_KERNEL); if (!isc) return -ENOMEM; platform_set_drvdata(pdev, isc); isc->dev = dev; io_base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(io_base)) return PTR_ERR(io_base); isc->regmap = devm_regmap_init_mmio(dev, io_base, &microchip_isc_regmap_config); if (IS_ERR(isc->regmap)) { ret = PTR_ERR(isc->regmap); dev_err(dev, "failed to init register map: %d\n", ret); return ret; } irq = platform_get_irq(pdev, 0); if (irq < 0) return irq; ret = devm_request_irq(dev, irq, microchip_isc_interrupt, 0, "microchip-sama5d2-isc", isc); if (ret < 0) { dev_err(dev, "can't register ISR for IRQ %u (ret=%i)\n", irq, ret); return ret; } isc->gamma_table = isc_sama5d2_gamma_table; isc->gamma_max = 2; isc->max_width = ISC_SAMA5D2_MAX_SUPPORT_WIDTH; isc->max_height = ISC_SAMA5D2_MAX_SUPPORT_HEIGHT; isc->config_dpc = isc_sama5d2_config_dpc; isc->config_csc = isc_sama5d2_config_csc; isc->config_cbc = isc_sama5d2_config_cbc; isc->config_cc = isc_sama5d2_config_cc; isc->config_gam = isc_sama5d2_config_gam; isc->config_rlp = isc_sama5d2_config_rlp; isc->config_ctrls = isc_sama5d2_config_ctrls; isc->adapt_pipeline = isc_sama5d2_adapt_pipeline; isc->offsets.csc = ISC_SAMA5D2_CSC_OFFSET; isc->offsets.cbc = ISC_SAMA5D2_CBC_OFFSET; isc->offsets.sub422 = ISC_SAMA5D2_SUB422_OFFSET; isc->offsets.sub420 = ISC_SAMA5D2_SUB420_OFFSET; isc->offsets.rlp = ISC_SAMA5D2_RLP_OFFSET; isc->offsets.his = ISC_SAMA5D2_HIS_OFFSET; isc->offsets.dma = ISC_SAMA5D2_DMA_OFFSET; isc->offsets.version = ISC_SAMA5D2_VERSION_OFFSET; isc->offsets.his_entry = ISC_SAMA5D2_HIS_ENTRY_OFFSET; isc->controller_formats = sama5d2_controller_formats; isc->controller_formats_size = ARRAY_SIZE(sama5d2_controller_formats); isc->formats_list = sama5d2_formats_list; isc->formats_list_size = ARRAY_SIZE(sama5d2_formats_list); /* sama5d2-isc - 8 bits per beat */ isc->dcfg = ISC_DCFG_YMBSIZE_BEATS8 | ISC_DCFG_CMBSIZE_BEATS8; /* sama5d2-isc : ISPCK is required and mandatory */ isc->ispck_required = true; ret = microchip_isc_pipeline_init(isc); if (ret) return ret; isc->hclock = devm_clk_get(dev, "hclock"); if (IS_ERR(isc->hclock)) { ret = PTR_ERR(isc->hclock); dev_err(dev, "failed to get hclock: %d\n", ret); return ret; } ret = clk_prepare_enable(isc->hclock); if (ret) { dev_err(dev, "failed to enable hclock: %d\n", ret); return ret; } ret = microchip_isc_clk_init(isc); if (ret) { dev_err(dev, "failed to init isc clock: %d\n", ret); goto unprepare_hclk; } ret = v4l2_device_register(dev, &isc->v4l2_dev); if (ret) { dev_err(dev, "unable to register v4l2 device.\n"); goto unprepare_clk; } ret = isc_parse_dt(dev, isc); if (ret) { dev_err(dev, "fail to parse device tree\n"); goto unregister_v4l2_device; } if (list_empty(&isc->subdev_entities)) { dev_err(dev, "no subdev found\n"); ret = -ENODEV; goto unregister_v4l2_device; } list_for_each_entry(subdev_entity, &isc->subdev_entities, list) { struct v4l2_async_connection *asd; struct fwnode_handle *fwnode = of_fwnode_handle(subdev_entity->epn); v4l2_async_nf_init(&subdev_entity->notifier, &isc->v4l2_dev); asd = v4l2_async_nf_add_fwnode_remote(&subdev_entity->notifier, fwnode, struct v4l2_async_connection); of_node_put(subdev_entity->epn); subdev_entity->epn = NULL; if (IS_ERR(asd)) { ret = PTR_ERR(asd); goto cleanup_subdev; } subdev_entity->notifier.ops = &microchip_isc_async_ops; ret = v4l2_async_nf_register(&subdev_entity->notifier); if (ret) { dev_err(dev, "fail to register async notifier\n"); goto cleanup_subdev; } if (video_is_registered(&isc->video_dev)) break; } regmap_read(isc->regmap, ISC_VERSION + isc->offsets.version, &ver); ret = isc_mc_init(isc, ver); if (ret < 0) goto isc_probe_mc_init_err; pm_runtime_set_active(dev); pm_runtime_enable(dev); pm_request_idle(dev); isc->ispck = isc->isc_clks[ISC_ISPCK].clk; ret = clk_prepare_enable(isc->ispck); if (ret) { dev_err(dev, "failed to enable ispck: %d\n", ret); goto disable_pm; } /* ispck should be greater or equal to hclock */ ret = clk_set_rate(isc->ispck, clk_get_rate(isc->hclock)); if (ret) { dev_err(dev, "failed to set ispck rate: %d\n", ret); goto unprepare_clk; } dev_info(dev, "Microchip ISC version %x\n", ver); return 0; unprepare_clk: clk_disable_unprepare(isc->ispck); disable_pm: pm_runtime_disable(dev); isc_probe_mc_init_err: isc_mc_cleanup(isc); cleanup_subdev: microchip_isc_subdev_cleanup(isc); unregister_v4l2_device: v4l2_device_unregister(&isc->v4l2_dev); unprepare_hclk: clk_disable_unprepare(isc->hclock); microchip_isc_clk_cleanup(isc); return ret; } static void microchip_isc_remove(struct platform_device *pdev) { struct isc_device *isc = platform_get_drvdata(pdev); pm_runtime_disable(&pdev->dev); isc_mc_cleanup(isc); microchip_isc_subdev_cleanup(isc); v4l2_device_unregister(&isc->v4l2_dev); clk_disable_unprepare(isc->ispck); clk_disable_unprepare(isc->hclock); microchip_isc_clk_cleanup(isc); } static int __maybe_unused isc_runtime_suspend(struct device *dev) { struct isc_device *isc = dev_get_drvdata(dev); clk_disable_unprepare(isc->ispck); clk_disable_unprepare(isc->hclock); return 0; } static int __maybe_unused isc_runtime_resume(struct device *dev) { struct isc_device *isc = dev_get_drvdata(dev); int ret; ret = clk_prepare_enable(isc->hclock); if (ret) return ret; ret = clk_prepare_enable(isc->ispck); if (ret) clk_disable_unprepare(isc->hclock); return ret; } static const struct dev_pm_ops microchip_isc_dev_pm_ops = { SET_RUNTIME_PM_OPS(isc_runtime_suspend, isc_runtime_resume, NULL) }; #if IS_ENABLED(CONFIG_OF) static const struct of_device_id microchip_isc_of_match[] = { { .compatible = "atmel,sama5d2-isc" }, { } }; MODULE_DEVICE_TABLE(of, microchip_isc_of_match); #endif static struct platform_driver microchip_isc_driver = { .probe = microchip_isc_probe, .remove_new = microchip_isc_remove, .driver = { .name = "microchip-sama5d2-isc", .pm = &microchip_isc_dev_pm_ops, .of_match_table = of_match_ptr(microchip_isc_of_match), }, }; module_platform_driver(microchip_isc_driver); MODULE_AUTHOR("Songjun Wu"); MODULE_DESCRIPTION("The V4L2 driver for Microchip-ISC"); MODULE_LICENSE("GPL v2");
linux-master
drivers/media/platform/microchip/microchip-sama5d2-isc.c
// SPDX-License-Identifier: GPL-2.0-only /* * Microchip Image Sensor Controller (ISC) common clock driver setup * * Copyright (C) 2016 Microchip Technology, Inc. * * Author: Songjun Wu * Author: Eugen Hristev <[email protected]> * */ #include <linux/clk.h> #include <linux/clkdev.h> #include <linux/clk-provider.h> #include <linux/pm_runtime.h> #include <linux/regmap.h> #include "microchip-isc-regs.h" #include "microchip-isc.h" static int isc_wait_clk_stable(struct clk_hw *hw) { struct isc_clk *isc_clk = to_isc_clk(hw); struct regmap *regmap = isc_clk->regmap; unsigned long timeout = jiffies + usecs_to_jiffies(1000); unsigned int status; while (time_before(jiffies, timeout)) { regmap_read(regmap, ISC_CLKSR, &status); if (!(status & ISC_CLKSR_SIP)) return 0; usleep_range(10, 250); } return -ETIMEDOUT; } static int isc_clk_prepare(struct clk_hw *hw) { struct isc_clk *isc_clk = to_isc_clk(hw); int ret; ret = pm_runtime_resume_and_get(isc_clk->dev); if (ret < 0) return ret; return isc_wait_clk_stable(hw); } static void isc_clk_unprepare(struct clk_hw *hw) { struct isc_clk *isc_clk = to_isc_clk(hw); isc_wait_clk_stable(hw); pm_runtime_put_sync(isc_clk->dev); } static int isc_clk_enable(struct clk_hw *hw) { struct isc_clk *isc_clk = to_isc_clk(hw); u32 id = isc_clk->id; struct regmap *regmap = isc_clk->regmap; unsigned long flags; unsigned int status; dev_dbg(isc_clk->dev, "ISC CLK: %s, id = %d, div = %d, parent id = %d\n", __func__, id, isc_clk->div, isc_clk->parent_id); spin_lock_irqsave(&isc_clk->lock, flags); regmap_update_bits(regmap, ISC_CLKCFG, ISC_CLKCFG_DIV_MASK(id) | ISC_CLKCFG_SEL_MASK(id), (isc_clk->div << ISC_CLKCFG_DIV_SHIFT(id)) | (isc_clk->parent_id << ISC_CLKCFG_SEL_SHIFT(id))); regmap_write(regmap, ISC_CLKEN, ISC_CLK(id)); spin_unlock_irqrestore(&isc_clk->lock, flags); regmap_read(regmap, ISC_CLKSR, &status); if (status & ISC_CLK(id)) return 0; else return -EINVAL; } static void isc_clk_disable(struct clk_hw *hw) { struct isc_clk *isc_clk = to_isc_clk(hw); u32 id = isc_clk->id; unsigned long flags; spin_lock_irqsave(&isc_clk->lock, flags); regmap_write(isc_clk->regmap, ISC_CLKDIS, ISC_CLK(id)); spin_unlock_irqrestore(&isc_clk->lock, flags); } static int isc_clk_is_enabled(struct clk_hw *hw) { struct isc_clk *isc_clk = to_isc_clk(hw); u32 status; int ret; ret = pm_runtime_resume_and_get(isc_clk->dev); if (ret < 0) return 0; regmap_read(isc_clk->regmap, ISC_CLKSR, &status); pm_runtime_put_sync(isc_clk->dev); return status & ISC_CLK(isc_clk->id) ? 1 : 0; } static unsigned long isc_clk_recalc_rate(struct clk_hw *hw, unsigned long parent_rate) { struct isc_clk *isc_clk = to_isc_clk(hw); return DIV_ROUND_CLOSEST(parent_rate, isc_clk->div + 1); } static int isc_clk_determine_rate(struct clk_hw *hw, struct clk_rate_request *req) { struct isc_clk *isc_clk = to_isc_clk(hw); long best_rate = -EINVAL; int best_diff = -1; unsigned int i, div; for (i = 0; i < clk_hw_get_num_parents(hw); i++) { struct clk_hw *parent; unsigned long parent_rate; parent = clk_hw_get_parent_by_index(hw, i); if (!parent) continue; parent_rate = clk_hw_get_rate(parent); if (!parent_rate) continue; for (div = 1; div < ISC_CLK_MAX_DIV + 2; div++) { unsigned long rate; int diff; rate = DIV_ROUND_CLOSEST(parent_rate, div); diff = abs(req->rate - rate); if (best_diff < 0 || best_diff > diff) { best_rate = rate; best_diff = diff; req->best_parent_rate = parent_rate; req->best_parent_hw = parent; } if (!best_diff || rate < req->rate) break; } if (!best_diff) break; } dev_dbg(isc_clk->dev, "ISC CLK: %s, best_rate = %ld, parent clk: %s @ %ld\n", __func__, best_rate, __clk_get_name((req->best_parent_hw)->clk), req->best_parent_rate); if (best_rate < 0) return best_rate; req->rate = best_rate; return 0; } static int isc_clk_set_parent(struct clk_hw *hw, u8 index) { struct isc_clk *isc_clk = to_isc_clk(hw); if (index >= clk_hw_get_num_parents(hw)) return -EINVAL; isc_clk->parent_id = index; return 0; } static u8 isc_clk_get_parent(struct clk_hw *hw) { struct isc_clk *isc_clk = to_isc_clk(hw); return isc_clk->parent_id; } static int isc_clk_set_rate(struct clk_hw *hw, unsigned long rate, unsigned long parent_rate) { struct isc_clk *isc_clk = to_isc_clk(hw); u32 div; if (!rate) return -EINVAL; div = DIV_ROUND_CLOSEST(parent_rate, rate); if (div > (ISC_CLK_MAX_DIV + 1) || !div) return -EINVAL; isc_clk->div = div - 1; return 0; } static const struct clk_ops isc_clk_ops = { .prepare = isc_clk_prepare, .unprepare = isc_clk_unprepare, .enable = isc_clk_enable, .disable = isc_clk_disable, .is_enabled = isc_clk_is_enabled, .recalc_rate = isc_clk_recalc_rate, .determine_rate = isc_clk_determine_rate, .set_parent = isc_clk_set_parent, .get_parent = isc_clk_get_parent, .set_rate = isc_clk_set_rate, }; static int isc_clk_register(struct isc_device *isc, unsigned int id) { struct regmap *regmap = isc->regmap; struct device_node *np = isc->dev->of_node; struct isc_clk *isc_clk; struct clk_init_data init; const char *clk_name = np->name; const char *parent_names[3]; int num_parents; if (id == ISC_ISPCK && !isc->ispck_required) return 0; num_parents = of_clk_get_parent_count(np); if (num_parents < 1 || num_parents > 3) return -EINVAL; if (num_parents > 2 && id == ISC_ISPCK) num_parents = 2; of_clk_parent_fill(np, parent_names, num_parents); if (id == ISC_MCK) of_property_read_string(np, "clock-output-names", &clk_name); else clk_name = "isc-ispck"; init.parent_names = parent_names; init.num_parents = num_parents; init.name = clk_name; init.ops = &isc_clk_ops; init.flags = CLK_SET_RATE_GATE | CLK_SET_PARENT_GATE; isc_clk = &isc->isc_clks[id]; isc_clk->hw.init = &init; isc_clk->regmap = regmap; isc_clk->id = id; isc_clk->dev = isc->dev; spin_lock_init(&isc_clk->lock); isc_clk->clk = clk_register(isc->dev, &isc_clk->hw); if (IS_ERR(isc_clk->clk)) { dev_err(isc->dev, "%s: clock register fail\n", clk_name); return PTR_ERR(isc_clk->clk); } else if (id == ISC_MCK) { of_clk_add_provider(np, of_clk_src_simple_get, isc_clk->clk); } return 0; } int microchip_isc_clk_init(struct isc_device *isc) { unsigned int i; int ret; for (i = 0; i < ARRAY_SIZE(isc->isc_clks); i++) isc->isc_clks[i].clk = ERR_PTR(-EINVAL); for (i = 0; i < ARRAY_SIZE(isc->isc_clks); i++) { ret = isc_clk_register(isc, i); if (ret) return ret; } return 0; } EXPORT_SYMBOL_GPL(microchip_isc_clk_init); void microchip_isc_clk_cleanup(struct isc_device *isc) { unsigned int i; of_clk_del_provider(isc->dev->of_node); for (i = 0; i < ARRAY_SIZE(isc->isc_clks); i++) { struct isc_clk *isc_clk = &isc->isc_clks[i]; if (!IS_ERR(isc_clk->clk)) clk_unregister(isc_clk->clk); } } EXPORT_SYMBOL_GPL(microchip_isc_clk_cleanup);
linux-master
drivers/media/platform/microchip/microchip-isc-clk.c
// SPDX-License-Identifier: GPL-2.0-only /* * Driver for the VIA Chrome integrated camera controller. * * Copyright 2009,2010 Jonathan Corbet <[email protected]> * * This work was supported by the One Laptop Per Child project */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/device.h> #include <linux/list.h> #include <linux/pci.h> #include <linux/gpio/consumer.h> #include <linux/interrupt.h> #include <linux/platform_device.h> #include <linux/videodev2.h> #include <media/v4l2-device.h> #include <media/v4l2-ioctl.h> #include <media/v4l2-ctrls.h> #include <media/v4l2-event.h> #include <media/v4l2-image-sizes.h> #include <media/i2c/ov7670.h> #include <media/videobuf2-dma-sg.h> #include <linux/delay.h> #include <linux/dma-mapping.h> #include <linux/pm_qos.h> #include <linux/via-core.h> #include <linux/via_i2c.h> #ifdef CONFIG_X86 #include <asm/olpc.h> #else #define machine_is_olpc(x) 0 #endif #include "via-camera.h" MODULE_ALIAS("platform:viafb-camera"); MODULE_AUTHOR("Jonathan Corbet <[email protected]>"); MODULE_DESCRIPTION("VIA framebuffer-based camera controller driver"); MODULE_LICENSE("GPL"); static bool flip_image; module_param(flip_image, bool, 0444); MODULE_PARM_DESC(flip_image, "If set, the sensor will be instructed to flip the image vertically."); static bool override_serial; module_param(override_serial, bool, 0444); MODULE_PARM_DESC(override_serial, "The camera driver will normally refuse to load if the XO 1.5 serial port is enabled. Set this option to force-enable the camera."); /* * The structure describing our camera. */ enum viacam_opstate { S_IDLE = 0, S_RUNNING = 1 }; struct via_camera { struct v4l2_device v4l2_dev; struct v4l2_ctrl_handler ctrl_handler; struct video_device vdev; struct v4l2_subdev *sensor; struct platform_device *platdev; struct viafb_dev *viadev; struct mutex lock; enum viacam_opstate opstate; unsigned long flags; struct pm_qos_request qos_request; /* * GPIO info for power/reset management */ struct gpio_desc *power_gpio; struct gpio_desc *reset_gpio; /* * I/O memory stuff. */ void __iomem *mmio; /* Where the registers live */ void __iomem *fbmem; /* Frame buffer memory */ u32 fb_offset; /* Reserved memory offset (FB) */ /* * Capture buffers and related. The controller supports * up to three, so that's what we have here. These buffers * live in frame buffer memory, so we don't call them "DMA". */ unsigned int cb_offsets[3]; /* offsets into fb mem */ u8 __iomem *cb_addrs[3]; /* Kernel-space addresses */ int n_cap_bufs; /* How many are we using? */ struct vb2_queue vq; struct list_head buffer_queue; u32 sequence; /* * Video format information. sensor_format is kept in a form * that we can use to pass to the sensor. We always run the * sensor in VGA resolution, though, and let the controller * downscale things if need be. So we keep the "real* * dimensions separately. */ struct v4l2_pix_format sensor_format; struct v4l2_pix_format user_format; u32 mbus_code; }; /* buffer for one video frame */ struct via_buffer { /* common v4l buffer stuff -- must be first */ struct vb2_v4l2_buffer vbuf; struct list_head queue; }; /* * Yes, this is a hack, but there's only going to be one of these * on any system we know of. */ static struct via_camera *via_cam_info; /* * Flag values, manipulated with bitops */ #define CF_DMA_ACTIVE 0 /* A frame is incoming */ #define CF_CONFIG_NEEDED 1 /* Must configure hardware */ /* * Nasty ugly v4l2 boilerplate. */ #define sensor_call(cam, optype, func, args...) \ v4l2_subdev_call(cam->sensor, optype, func, ##args) /* * Debugging and related. */ #define cam_err(cam, fmt, arg...) \ dev_err(&(cam)->platdev->dev, fmt, ##arg) #define cam_warn(cam, fmt, arg...) \ dev_warn(&(cam)->platdev->dev, fmt, ##arg) #define cam_dbg(cam, fmt, arg...) \ dev_dbg(&(cam)->platdev->dev, fmt, ##arg) /* * Format handling. This is ripped almost directly from Hans's changes * to cafe_ccic.c. It's a little unfortunate; until this change, we * didn't need to know anything about the format except its byte depth; * now this information must be managed at this level too. */ static struct via_format { __u32 pixelformat; int bpp; /* Bytes per pixel */ u32 mbus_code; } via_formats[] = { { .pixelformat = V4L2_PIX_FMT_YUYV, .mbus_code = MEDIA_BUS_FMT_YUYV8_2X8, .bpp = 2, }, /* RGB444 and Bayer should be doable, but have never been tested with this driver. RGB565 seems to work at the default resolution, but results in color corruption when being scaled by viacam_set_scaled(), and is disabled as a result. */ }; #define N_VIA_FMTS ARRAY_SIZE(via_formats) static struct via_format *via_find_format(u32 pixelformat) { unsigned i; for (i = 0; i < N_VIA_FMTS; i++) if (via_formats[i].pixelformat == pixelformat) return via_formats + i; /* Not found? Then return the first format. */ return via_formats; } /*--------------------------------------------------------------------------*/ /* * Sensor power/reset management. This piece is OLPC-specific for * sure; other configurations will have things connected differently. */ static int via_sensor_power_setup(struct via_camera *cam) { struct device *dev = &cam->platdev->dev; cam->power_gpio = devm_gpiod_get(dev, "VGPIO3", GPIOD_OUT_LOW); if (IS_ERR(cam->power_gpio)) return dev_err_probe(dev, PTR_ERR(cam->power_gpio), "failed to get power GPIO"); /* Request the reset line asserted */ cam->reset_gpio = devm_gpiod_get(dev, "VGPIO2", GPIOD_OUT_HIGH); if (IS_ERR(cam->reset_gpio)) return dev_err_probe(dev, PTR_ERR(cam->reset_gpio), "failed to get reset GPIO"); return 0; } /* * Power up the sensor and perform the reset dance. */ static void via_sensor_power_up(struct via_camera *cam) { gpiod_set_value(cam->power_gpio, 1); gpiod_set_value(cam->reset_gpio, 1); msleep(20); /* Probably excessive */ gpiod_set_value(cam->reset_gpio, 0); msleep(20); } static void via_sensor_power_down(struct via_camera *cam) { gpiod_set_value(cam->power_gpio, 0); gpiod_set_value(cam->reset_gpio, 1); } static void via_sensor_power_release(struct via_camera *cam) { via_sensor_power_down(cam); } /* --------------------------------------------------------------------------*/ /* Sensor ops */ /* * Manage the ov7670 "flip" bit, which needs special help. */ static int viacam_set_flip(struct via_camera *cam) { struct v4l2_control ctrl; memset(&ctrl, 0, sizeof(ctrl)); ctrl.id = V4L2_CID_VFLIP; ctrl.value = flip_image; return v4l2_s_ctrl(NULL, cam->sensor->ctrl_handler, &ctrl); } /* * Configure the sensor. It's up to the caller to ensure * that the camera is in the correct operating state. */ static int viacam_configure_sensor(struct via_camera *cam) { struct v4l2_subdev_format format = { .which = V4L2_SUBDEV_FORMAT_ACTIVE, }; int ret; v4l2_fill_mbus_format(&format.format, &cam->sensor_format, cam->mbus_code); ret = sensor_call(cam, core, init, 0); if (ret == 0) ret = sensor_call(cam, pad, set_fmt, NULL, &format); /* * OV7670 does weird things if flip is set *before* format... */ if (ret == 0) ret = viacam_set_flip(cam); return ret; } /* --------------------------------------------------------------------------*/ /* * Some simple register accessors; they assume that the lock is held. * * Should we want to support the second capture engine, we could * hide the register difference by adding 0x1000 to registers in the * 0x300-350 range. */ static inline void viacam_write_reg(struct via_camera *cam, int reg, int value) { iowrite32(value, cam->mmio + reg); } static inline int viacam_read_reg(struct via_camera *cam, int reg) { return ioread32(cam->mmio + reg); } static inline void viacam_write_reg_mask(struct via_camera *cam, int reg, int value, int mask) { int tmp = viacam_read_reg(cam, reg); tmp = (tmp & ~mask) | (value & mask); viacam_write_reg(cam, reg, tmp); } /* --------------------------------------------------------------------------*/ /* Interrupt management and handling */ static irqreturn_t viacam_quick_irq(int irq, void *data) { struct via_camera *cam = data; irqreturn_t ret = IRQ_NONE; int icv; /* * All we do here is to clear the interrupts and tell * the handler thread to wake up. */ spin_lock(&cam->viadev->reg_lock); icv = viacam_read_reg(cam, VCR_INTCTRL); if (icv & VCR_IC_EAV) { icv |= VCR_IC_EAV|VCR_IC_EVBI|VCR_IC_FFULL; viacam_write_reg(cam, VCR_INTCTRL, icv); ret = IRQ_WAKE_THREAD; } spin_unlock(&cam->viadev->reg_lock); return ret; } /* * Find the next buffer which has somebody waiting on it. */ static struct via_buffer *viacam_next_buffer(struct via_camera *cam) { if (cam->opstate != S_RUNNING) return NULL; if (list_empty(&cam->buffer_queue)) return NULL; return list_entry(cam->buffer_queue.next, struct via_buffer, queue); } /* * The threaded IRQ handler. */ static irqreturn_t viacam_irq(int irq, void *data) { struct via_camera *cam = data; struct via_buffer *vb; int bufn; struct sg_table *sgt; mutex_lock(&cam->lock); /* * If there is no place to put the data frame, don't bother * with anything else. */ vb = viacam_next_buffer(cam); if (vb == NULL) goto done; /* * Figure out which buffer we just completed. */ bufn = (viacam_read_reg(cam, VCR_INTCTRL) & VCR_IC_ACTBUF) >> 3; bufn -= 1; if (bufn < 0) bufn = cam->n_cap_bufs - 1; /* * Copy over the data and let any waiters know. */ sgt = vb2_dma_sg_plane_desc(&vb->vbuf.vb2_buf, 0); vb->vbuf.vb2_buf.timestamp = ktime_get_ns(); viafb_dma_copy_out_sg(cam->cb_offsets[bufn], sgt->sgl, sgt->nents); vb->vbuf.sequence = cam->sequence++; vb->vbuf.field = V4L2_FIELD_NONE; list_del(&vb->queue); vb2_buffer_done(&vb->vbuf.vb2_buf, VB2_BUF_STATE_DONE); done: mutex_unlock(&cam->lock); return IRQ_HANDLED; } /* * These functions must mess around with the general interrupt * control register, which is relevant to much more than just the * camera. Nothing else uses interrupts, though, as of this writing. * Should that situation change, we'll have to improve support at * the via-core level. */ static void viacam_int_enable(struct via_camera *cam) { viacam_write_reg(cam, VCR_INTCTRL, VCR_IC_INTEN|VCR_IC_EAV|VCR_IC_EVBI|VCR_IC_FFULL); viafb_irq_enable(VDE_I_C0AVEN); } static void viacam_int_disable(struct via_camera *cam) { viafb_irq_disable(VDE_I_C0AVEN); viacam_write_reg(cam, VCR_INTCTRL, 0); } /* --------------------------------------------------------------------------*/ /* Controller operations */ /* * Set up our capture buffers in framebuffer memory. */ static int viacam_ctlr_cbufs(struct via_camera *cam) { int nbuf = cam->viadev->camera_fbmem_size/cam->sensor_format.sizeimage; int i; unsigned int offset; /* * See how many buffers we can work with. */ if (nbuf >= 3) { cam->n_cap_bufs = 3; viacam_write_reg_mask(cam, VCR_CAPINTC, VCR_CI_3BUFS, VCR_CI_3BUFS); } else if (nbuf == 2) { cam->n_cap_bufs = 2; viacam_write_reg_mask(cam, VCR_CAPINTC, 0, VCR_CI_3BUFS); } else { cam_warn(cam, "Insufficient frame buffer memory\n"); return -ENOMEM; } /* * Set them up. */ offset = cam->fb_offset; for (i = 0; i < cam->n_cap_bufs; i++) { cam->cb_offsets[i] = offset; cam->cb_addrs[i] = cam->fbmem + offset; viacam_write_reg(cam, VCR_VBUF1 + i*4, offset & VCR_VBUF_MASK); offset += cam->sensor_format.sizeimage; } return 0; } /* * Set the scaling register for downscaling the image. * * This register works like this... Vertical scaling is enabled * by bit 26; if that bit is set, downscaling is controlled by the * value in bits 16:25. Those bits are divided by 1024 to get * the scaling factor; setting just bit 25 thus cuts the height * in half. * * Horizontal scaling works about the same, but it's enabled by * bit 11, with bits 0:10 giving the numerator of a fraction * (over 2048) for the scaling value. * * This function is naive in that, if the user departs from * the 3x4 VGA scaling factor, the image will distort. We * could work around that if it really seemed important. */ static void viacam_set_scale(struct via_camera *cam) { unsigned int avscale; int sf; if (cam->user_format.width == VGA_WIDTH) avscale = 0; else { sf = (cam->user_format.width*2048)/VGA_WIDTH; avscale = VCR_AVS_HEN | sf; } if (cam->user_format.height < VGA_HEIGHT) { sf = (1024*cam->user_format.height)/VGA_HEIGHT; avscale |= VCR_AVS_VEN | (sf << 16); } viacam_write_reg(cam, VCR_AVSCALE, avscale); } /* * Configure image-related information into the capture engine. */ static void viacam_ctlr_image(struct via_camera *cam) { int cicreg; /* * Disable clock before messing with stuff - from the via * sample driver. */ viacam_write_reg(cam, VCR_CAPINTC, ~(VCR_CI_ENABLE|VCR_CI_CLKEN)); /* * Set up the controller for VGA resolution, modulo magic * offsets from the via sample driver. */ viacam_write_reg(cam, VCR_HORRANGE, 0x06200120); viacam_write_reg(cam, VCR_VERTRANGE, 0x01de0000); viacam_set_scale(cam); /* * Image size info. */ viacam_write_reg(cam, VCR_MAXDATA, (cam->sensor_format.height << 16) | (cam->sensor_format.bytesperline >> 3)); viacam_write_reg(cam, VCR_MAXVBI, 0); viacam_write_reg(cam, VCR_VSTRIDE, cam->user_format.bytesperline & VCR_VS_STRIDE); /* * Set up the capture interface control register, * everything but the "go" bit. * * The FIFO threshold is a bit of a magic number; 8 is what * VIA's sample code uses. */ cicreg = VCR_CI_CLKEN | 0x08000000 | /* FIFO threshold */ VCR_CI_FLDINV | /* OLPC-specific? */ VCR_CI_VREFINV | /* OLPC-specific? */ VCR_CI_DIBOTH | /* Capture both fields */ VCR_CI_CCIR601_8; if (cam->n_cap_bufs == 3) cicreg |= VCR_CI_3BUFS; /* * YUV formats need different byte swapping than RGB. */ if (cam->user_format.pixelformat == V4L2_PIX_FMT_YUYV) cicreg |= VCR_CI_YUYV; else cicreg |= VCR_CI_UYVY; viacam_write_reg(cam, VCR_CAPINTC, cicreg); } static int viacam_config_controller(struct via_camera *cam) { int ret; unsigned long flags; spin_lock_irqsave(&cam->viadev->reg_lock, flags); ret = viacam_ctlr_cbufs(cam); if (!ret) viacam_ctlr_image(cam); spin_unlock_irqrestore(&cam->viadev->reg_lock, flags); clear_bit(CF_CONFIG_NEEDED, &cam->flags); return ret; } /* * Make it start grabbing data. */ static void viacam_start_engine(struct via_camera *cam) { spin_lock_irq(&cam->viadev->reg_lock); viacam_write_reg_mask(cam, VCR_CAPINTC, VCR_CI_ENABLE, VCR_CI_ENABLE); viacam_int_enable(cam); (void) viacam_read_reg(cam, VCR_CAPINTC); /* Force post */ cam->opstate = S_RUNNING; spin_unlock_irq(&cam->viadev->reg_lock); } static void viacam_stop_engine(struct via_camera *cam) { spin_lock_irq(&cam->viadev->reg_lock); viacam_int_disable(cam); viacam_write_reg_mask(cam, VCR_CAPINTC, 0, VCR_CI_ENABLE); (void) viacam_read_reg(cam, VCR_CAPINTC); /* Force post */ cam->opstate = S_IDLE; spin_unlock_irq(&cam->viadev->reg_lock); } /* --------------------------------------------------------------------------*/ /* vb2 callback ops */ static struct via_buffer *vb2_to_via_buffer(struct vb2_buffer *vb) { struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb); return container_of(vbuf, struct via_buffer, vbuf); } static void viacam_vb2_queue(struct vb2_buffer *vb) { struct via_camera *cam = vb2_get_drv_priv(vb->vb2_queue); struct via_buffer *via = vb2_to_via_buffer(vb); list_add_tail(&via->queue, &cam->buffer_queue); } static int viacam_vb2_prepare(struct vb2_buffer *vb) { struct via_camera *cam = vb2_get_drv_priv(vb->vb2_queue); if (vb2_plane_size(vb, 0) < cam->user_format.sizeimage) { cam_dbg(cam, "Plane size too small (%lu < %u)\n", vb2_plane_size(vb, 0), cam->user_format.sizeimage); return -EINVAL; } vb2_set_plane_payload(vb, 0, cam->user_format.sizeimage); return 0; } static int viacam_vb2_queue_setup(struct vb2_queue *vq, unsigned int *nbufs, unsigned int *num_planes, unsigned int sizes[], struct device *alloc_devs[]) { struct via_camera *cam = vb2_get_drv_priv(vq); int size = cam->user_format.sizeimage; if (*num_planes) return sizes[0] < size ? -EINVAL : 0; *num_planes = 1; sizes[0] = size; return 0; } static int viacam_vb2_start_streaming(struct vb2_queue *vq, unsigned int count) { struct via_camera *cam = vb2_get_drv_priv(vq); struct via_buffer *buf, *tmp; int ret = 0; if (cam->opstate != S_IDLE) { ret = -EBUSY; goto out; } /* * Configure things if need be. */ if (test_bit(CF_CONFIG_NEEDED, &cam->flags)) { ret = viacam_configure_sensor(cam); if (ret) goto out; ret = viacam_config_controller(cam); if (ret) goto out; } cam->sequence = 0; /* * If the CPU goes into C3, the DMA transfer gets corrupted and * users start filing unsightly bug reports. Put in a "latency" * requirement which will keep the CPU out of the deeper sleep * states. */ cpu_latency_qos_add_request(&cam->qos_request, 50); viacam_start_engine(cam); return 0; out: list_for_each_entry_safe(buf, tmp, &cam->buffer_queue, queue) { list_del(&buf->queue); vb2_buffer_done(&buf->vbuf.vb2_buf, VB2_BUF_STATE_QUEUED); } return ret; } static void viacam_vb2_stop_streaming(struct vb2_queue *vq) { struct via_camera *cam = vb2_get_drv_priv(vq); struct via_buffer *buf, *tmp; cpu_latency_qos_remove_request(&cam->qos_request); viacam_stop_engine(cam); list_for_each_entry_safe(buf, tmp, &cam->buffer_queue, queue) { list_del(&buf->queue); vb2_buffer_done(&buf->vbuf.vb2_buf, VB2_BUF_STATE_ERROR); } } static const struct vb2_ops viacam_vb2_ops = { .queue_setup = viacam_vb2_queue_setup, .buf_queue = viacam_vb2_queue, .buf_prepare = viacam_vb2_prepare, .start_streaming = viacam_vb2_start_streaming, .stop_streaming = viacam_vb2_stop_streaming, .wait_prepare = vb2_ops_wait_prepare, .wait_finish = vb2_ops_wait_finish, }; /* --------------------------------------------------------------------------*/ /* File operations */ static int viacam_open(struct file *filp) { struct via_camera *cam = video_drvdata(filp); int ret; /* * Note the new user. If this is the first one, we'll also * need to power up the sensor. */ mutex_lock(&cam->lock); ret = v4l2_fh_open(filp); if (ret) goto out; if (v4l2_fh_is_singular_file(filp)) { ret = viafb_request_dma(); if (ret) { v4l2_fh_release(filp); goto out; } via_sensor_power_up(cam); set_bit(CF_CONFIG_NEEDED, &cam->flags); } out: mutex_unlock(&cam->lock); return ret; } static int viacam_release(struct file *filp) { struct via_camera *cam = video_drvdata(filp); bool last_open; mutex_lock(&cam->lock); last_open = v4l2_fh_is_singular_file(filp); _vb2_fop_release(filp, NULL); /* * Last one out needs to turn out the lights. */ if (last_open) { via_sensor_power_down(cam); viafb_release_dma(); } mutex_unlock(&cam->lock); return 0; } static const struct v4l2_file_operations viacam_fops = { .owner = THIS_MODULE, .open = viacam_open, .release = viacam_release, .read = vb2_fop_read, .poll = vb2_fop_poll, .mmap = vb2_fop_mmap, .unlocked_ioctl = video_ioctl2, }; /*----------------------------------------------------------------------------*/ /* * The long list of v4l2 ioctl ops */ /* * Only one input. */ static int viacam_enum_input(struct file *filp, void *priv, struct v4l2_input *input) { if (input->index != 0) return -EINVAL; input->type = V4L2_INPUT_TYPE_CAMERA; strscpy(input->name, "Camera", sizeof(input->name)); return 0; } static int viacam_g_input(struct file *filp, void *priv, unsigned int *i) { *i = 0; return 0; } static int viacam_s_input(struct file *filp, void *priv, unsigned int i) { if (i != 0) return -EINVAL; return 0; } /* * Video format stuff. Here is our default format until * user space messes with things. */ static const struct v4l2_pix_format viacam_def_pix_format = { .width = VGA_WIDTH, .height = VGA_HEIGHT, .pixelformat = V4L2_PIX_FMT_YUYV, .field = V4L2_FIELD_NONE, .bytesperline = VGA_WIDTH * 2, .sizeimage = VGA_WIDTH * VGA_HEIGHT * 2, .colorspace = V4L2_COLORSPACE_SRGB, }; static const u32 via_def_mbus_code = MEDIA_BUS_FMT_YUYV8_2X8; static int viacam_enum_fmt_vid_cap(struct file *filp, void *priv, struct v4l2_fmtdesc *fmt) { if (fmt->index >= N_VIA_FMTS) return -EINVAL; fmt->pixelformat = via_formats[fmt->index].pixelformat; return 0; } /* * Figure out proper image dimensions, but always force the * sensor to VGA. */ static void viacam_fmt_pre(struct v4l2_pix_format *userfmt, struct v4l2_pix_format *sensorfmt) { *sensorfmt = *userfmt; if (userfmt->width < QCIF_WIDTH || userfmt->height < QCIF_HEIGHT) { userfmt->width = QCIF_WIDTH; userfmt->height = QCIF_HEIGHT; } if (userfmt->width > VGA_WIDTH || userfmt->height > VGA_HEIGHT) { userfmt->width = VGA_WIDTH; userfmt->height = VGA_HEIGHT; } sensorfmt->width = VGA_WIDTH; sensorfmt->height = VGA_HEIGHT; } static void viacam_fmt_post(struct v4l2_pix_format *userfmt, struct v4l2_pix_format *sensorfmt) { struct via_format *f = via_find_format(userfmt->pixelformat); sensorfmt->bytesperline = sensorfmt->width * f->bpp; sensorfmt->sizeimage = sensorfmt->height * sensorfmt->bytesperline; userfmt->pixelformat = sensorfmt->pixelformat; userfmt->field = sensorfmt->field; userfmt->bytesperline = 2 * userfmt->width; userfmt->sizeimage = userfmt->bytesperline * userfmt->height; userfmt->colorspace = sensorfmt->colorspace; userfmt->ycbcr_enc = sensorfmt->ycbcr_enc; userfmt->quantization = sensorfmt->quantization; userfmt->xfer_func = sensorfmt->xfer_func; } /* * The real work of figuring out a workable format. */ static int viacam_do_try_fmt(struct via_camera *cam, struct v4l2_pix_format *upix, struct v4l2_pix_format *spix) { int ret; struct v4l2_subdev_pad_config pad_cfg; struct v4l2_subdev_state pad_state = { .pads = &pad_cfg, }; struct v4l2_subdev_format format = { .which = V4L2_SUBDEV_FORMAT_TRY, }; struct via_format *f = via_find_format(upix->pixelformat); upix->pixelformat = f->pixelformat; viacam_fmt_pre(upix, spix); v4l2_fill_mbus_format(&format.format, spix, f->mbus_code); ret = sensor_call(cam, pad, set_fmt, &pad_state, &format); v4l2_fill_pix_format(spix, &format.format); viacam_fmt_post(upix, spix); return ret; } static int viacam_try_fmt_vid_cap(struct file *filp, void *priv, struct v4l2_format *fmt) { struct via_camera *cam = video_drvdata(filp); struct v4l2_format sfmt; return viacam_do_try_fmt(cam, &fmt->fmt.pix, &sfmt.fmt.pix); } static int viacam_g_fmt_vid_cap(struct file *filp, void *priv, struct v4l2_format *fmt) { struct via_camera *cam = video_drvdata(filp); fmt->fmt.pix = cam->user_format; return 0; } static int viacam_s_fmt_vid_cap(struct file *filp, void *priv, struct v4l2_format *fmt) { struct via_camera *cam = video_drvdata(filp); int ret; struct v4l2_format sfmt; struct via_format *f = via_find_format(fmt->fmt.pix.pixelformat); /* * Camera must be idle or we can't mess with the * video setup. */ if (cam->opstate != S_IDLE) return -EBUSY; /* * Let the sensor code look over and tweak the * requested formatting. */ ret = viacam_do_try_fmt(cam, &fmt->fmt.pix, &sfmt.fmt.pix); if (ret) return ret; /* * OK, let's commit to the new format. */ cam->user_format = fmt->fmt.pix; cam->sensor_format = sfmt.fmt.pix; cam->mbus_code = f->mbus_code; ret = viacam_configure_sensor(cam); if (!ret) ret = viacam_config_controller(cam); return ret; } static int viacam_querycap(struct file *filp, void *priv, struct v4l2_capability *cap) { strscpy(cap->driver, "via-camera", sizeof(cap->driver)); strscpy(cap->card, "via-camera", sizeof(cap->card)); strscpy(cap->bus_info, "platform:via-camera", sizeof(cap->bus_info)); return 0; } /* G/S_PARM */ static int viacam_g_parm(struct file *filp, void *priv, struct v4l2_streamparm *parm) { struct via_camera *cam = video_drvdata(filp); return v4l2_g_parm_cap(video_devdata(filp), cam->sensor, parm); } static int viacam_s_parm(struct file *filp, void *priv, struct v4l2_streamparm *parm) { struct via_camera *cam = video_drvdata(filp); return v4l2_s_parm_cap(video_devdata(filp), cam->sensor, parm); } static int viacam_enum_framesizes(struct file *filp, void *priv, struct v4l2_frmsizeenum *sizes) { unsigned int i; if (sizes->index != 0) return -EINVAL; for (i = 0; i < N_VIA_FMTS; i++) if (sizes->pixel_format == via_formats[i].pixelformat) break; if (i >= N_VIA_FMTS) return -EINVAL; sizes->type = V4L2_FRMSIZE_TYPE_CONTINUOUS; sizes->stepwise.min_width = QCIF_WIDTH; sizes->stepwise.min_height = QCIF_HEIGHT; sizes->stepwise.max_width = VGA_WIDTH; sizes->stepwise.max_height = VGA_HEIGHT; sizes->stepwise.step_width = sizes->stepwise.step_height = 1; return 0; } static int viacam_enum_frameintervals(struct file *filp, void *priv, struct v4l2_frmivalenum *interval) { struct via_camera *cam = video_drvdata(filp); struct v4l2_subdev_frame_interval_enum fie = { .index = interval->index, .code = cam->mbus_code, .width = cam->sensor_format.width, .height = cam->sensor_format.height, .which = V4L2_SUBDEV_FORMAT_ACTIVE, }; unsigned int i; int ret; for (i = 0; i < N_VIA_FMTS; i++) if (interval->pixel_format == via_formats[i].pixelformat) break; if (i >= N_VIA_FMTS) return -EINVAL; if (interval->width < QCIF_WIDTH || interval->width > VGA_WIDTH || interval->height < QCIF_HEIGHT || interval->height > VGA_HEIGHT) return -EINVAL; ret = sensor_call(cam, pad, enum_frame_interval, NULL, &fie); if (ret) return ret; interval->type = V4L2_FRMIVAL_TYPE_DISCRETE; interval->discrete = fie.interval; return 0; } static const struct v4l2_ioctl_ops viacam_ioctl_ops = { .vidioc_enum_input = viacam_enum_input, .vidioc_g_input = viacam_g_input, .vidioc_s_input = viacam_s_input, .vidioc_enum_fmt_vid_cap = viacam_enum_fmt_vid_cap, .vidioc_try_fmt_vid_cap = viacam_try_fmt_vid_cap, .vidioc_g_fmt_vid_cap = viacam_g_fmt_vid_cap, .vidioc_s_fmt_vid_cap = viacam_s_fmt_vid_cap, .vidioc_querycap = viacam_querycap, .vidioc_reqbufs = vb2_ioctl_reqbufs, .vidioc_create_bufs = vb2_ioctl_create_bufs, .vidioc_querybuf = vb2_ioctl_querybuf, .vidioc_prepare_buf = vb2_ioctl_prepare_buf, .vidioc_qbuf = vb2_ioctl_qbuf, .vidioc_dqbuf = vb2_ioctl_dqbuf, .vidioc_expbuf = vb2_ioctl_expbuf, .vidioc_streamon = vb2_ioctl_streamon, .vidioc_streamoff = vb2_ioctl_streamoff, .vidioc_g_parm = viacam_g_parm, .vidioc_s_parm = viacam_s_parm, .vidioc_enum_framesizes = viacam_enum_framesizes, .vidioc_enum_frameintervals = viacam_enum_frameintervals, .vidioc_subscribe_event = v4l2_ctrl_subscribe_event, .vidioc_unsubscribe_event = v4l2_event_unsubscribe, }; /*----------------------------------------------------------------------------*/ /* * Power management. */ #ifdef CONFIG_PM static int viacam_suspend(void *priv) { struct via_camera *cam = priv; enum viacam_opstate state = cam->opstate; if (cam->opstate != S_IDLE) { viacam_stop_engine(cam); cam->opstate = state; /* So resume restarts */ } return 0; } static int viacam_resume(void *priv) { struct via_camera *cam = priv; int ret = 0; /* * Get back to a reasonable operating state. */ via_write_reg_mask(VIASR, 0x78, 0, 0x80); via_write_reg_mask(VIASR, 0x1e, 0xc0, 0xc0); viacam_int_disable(cam); set_bit(CF_CONFIG_NEEDED, &cam->flags); /* * Make sure the sensor's power state is correct */ if (!list_empty(&cam->vdev.fh_list)) via_sensor_power_up(cam); else via_sensor_power_down(cam); /* * If it was operating, try to restart it. */ if (cam->opstate != S_IDLE) { mutex_lock(&cam->lock); ret = viacam_configure_sensor(cam); if (!ret) ret = viacam_config_controller(cam); mutex_unlock(&cam->lock); if (!ret) viacam_start_engine(cam); } return ret; } static struct viafb_pm_hooks viacam_pm_hooks = { .suspend = viacam_suspend, .resume = viacam_resume }; #endif /* CONFIG_PM */ /* * Setup stuff. */ static const struct video_device viacam_v4l_template = { .name = "via-camera", .minor = -1, .fops = &viacam_fops, .ioctl_ops = &viacam_ioctl_ops, .release = video_device_release_empty, /* Check this */ .device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_READWRITE | V4L2_CAP_STREAMING, }; /* * The OLPC folks put the serial port on the same pin as * the camera. They also get grumpy if we break the * serial port and keep them from using it. So we have * to check the serial enable bit and not step on it. */ #define VIACAM_SERIAL_DEVFN 0x88 #define VIACAM_SERIAL_CREG 0x46 #define VIACAM_SERIAL_BIT 0x40 static bool viacam_serial_is_enabled(void) { struct pci_bus *pbus = pci_find_bus(0, 0); u8 cbyte; if (!pbus) return false; pci_bus_read_config_byte(pbus, VIACAM_SERIAL_DEVFN, VIACAM_SERIAL_CREG, &cbyte); if ((cbyte & VIACAM_SERIAL_BIT) == 0) return false; /* Not enabled */ if (!override_serial) { printk(KERN_NOTICE "Via camera: serial port is enabled, " \ "refusing to load.\n"); printk(KERN_NOTICE "Specify override_serial=1 to force " \ "module loading.\n"); return true; } printk(KERN_NOTICE "Via camera: overriding serial port\n"); pci_bus_write_config_byte(pbus, VIACAM_SERIAL_DEVFN, VIACAM_SERIAL_CREG, cbyte & ~VIACAM_SERIAL_BIT); return false; } static struct ov7670_config sensor_cfg = { /* The XO-1.5 (only known user) clocks the camera at 90MHz. */ .clock_speed = 90, }; static int viacam_probe(struct platform_device *pdev) { int ret; struct i2c_adapter *sensor_adapter; struct viafb_dev *viadev = pdev->dev.platform_data; struct vb2_queue *vq; struct i2c_board_info ov7670_info = { .type = "ov7670", .addr = 0x42 >> 1, .platform_data = &sensor_cfg, }; /* * Note that there are actually two capture channels on * the device. We only deal with one for now. That * is encoded here; nothing else assumes it's dealing with * a unique capture device. */ struct via_camera *cam; /* * Ensure that frame buffer memory has been set aside for * this purpose. As an arbitrary limit, refuse to work * with less than two frames of VGA 16-bit data. * * If we ever support the second port, we'll need to set * aside more memory. */ if (viadev->camera_fbmem_size < (VGA_HEIGHT*VGA_WIDTH*4)) { printk(KERN_ERR "viacam: insufficient FB memory reserved\n"); return -ENOMEM; } if (viadev->engine_mmio == NULL) { printk(KERN_ERR "viacam: No I/O memory, so no pictures\n"); return -ENOMEM; } if (machine_is_olpc() && viacam_serial_is_enabled()) return -EBUSY; /* * Basic structure initialization. */ cam = kzalloc (sizeof(struct via_camera), GFP_KERNEL); if (cam == NULL) return -ENOMEM; via_cam_info = cam; cam->platdev = pdev; cam->viadev = viadev; cam->opstate = S_IDLE; cam->user_format = cam->sensor_format = viacam_def_pix_format; mutex_init(&cam->lock); INIT_LIST_HEAD(&cam->buffer_queue); cam->mmio = viadev->engine_mmio; cam->fbmem = viadev->fbmem; cam->fb_offset = viadev->camera_fbmem_offset; cam->flags = 1 << CF_CONFIG_NEEDED; cam->mbus_code = via_def_mbus_code; /* * Tell V4L that we exist. */ ret = v4l2_device_register(&pdev->dev, &cam->v4l2_dev); if (ret) { dev_err(&pdev->dev, "Unable to register v4l2 device\n"); goto out_free; } ret = v4l2_ctrl_handler_init(&cam->ctrl_handler, 10); if (ret) goto out_unregister; cam->v4l2_dev.ctrl_handler = &cam->ctrl_handler; /* * Convince the system that we can do DMA. */ pdev->dev.dma_mask = &viadev->pdev->dma_mask; ret = dma_set_mask(&pdev->dev, 0xffffffff); if (ret) goto out_ctrl_hdl_free; /* * Fire up the capture port. The write to 0x78 looks purely * OLPCish; any system will need to tweak 0x1e. */ via_write_reg_mask(VIASR, 0x78, 0, 0x80); via_write_reg_mask(VIASR, 0x1e, 0xc0, 0xc0); /* * Get the sensor powered up. */ ret = via_sensor_power_setup(cam); if (ret) goto out_ctrl_hdl_free; via_sensor_power_up(cam); /* * See if we can't find it on the bus. The VIA_PORT_31 assumption * is OLPC-specific. 0x42 assumption is ov7670-specific. */ sensor_adapter = viafb_find_i2c_adapter(VIA_PORT_31); cam->sensor = v4l2_i2c_new_subdev_board(&cam->v4l2_dev, sensor_adapter, &ov7670_info, NULL); if (cam->sensor == NULL) { dev_err(&pdev->dev, "Unable to find the sensor!\n"); ret = -ENODEV; goto out_power_down; } /* * Get the IRQ. */ viacam_int_disable(cam); ret = request_threaded_irq(viadev->pdev->irq, viacam_quick_irq, viacam_irq, IRQF_SHARED, "via-camera", cam); if (ret) goto out_power_down; vq = &cam->vq; vq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE; vq->io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF | VB2_READ; vq->drv_priv = cam; vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC; vq->buf_struct_size = sizeof(struct via_buffer); vq->dev = cam->v4l2_dev.dev; vq->ops = &viacam_vb2_ops; vq->mem_ops = &vb2_dma_sg_memops; vq->lock = &cam->lock; ret = vb2_queue_init(vq); /* * Tell V4l2 that we exist. */ cam->vdev = viacam_v4l_template; cam->vdev.v4l2_dev = &cam->v4l2_dev; cam->vdev.lock = &cam->lock; cam->vdev.queue = vq; video_set_drvdata(&cam->vdev, cam); ret = video_register_device(&cam->vdev, VFL_TYPE_VIDEO, -1); if (ret) goto out_irq; #ifdef CONFIG_PM /* * Hook into PM events */ viacam_pm_hooks.private = cam; viafb_pm_register(&viacam_pm_hooks); #endif /* Power the sensor down until somebody opens the device */ via_sensor_power_down(cam); return 0; out_irq: free_irq(viadev->pdev->irq, cam); out_power_down: via_sensor_power_release(cam); out_ctrl_hdl_free: v4l2_ctrl_handler_free(&cam->ctrl_handler); out_unregister: v4l2_device_unregister(&cam->v4l2_dev); out_free: kfree(cam); return ret; } static void viacam_remove(struct platform_device *pdev) { struct via_camera *cam = via_cam_info; struct viafb_dev *viadev = pdev->dev.platform_data; video_unregister_device(&cam->vdev); v4l2_device_unregister(&cam->v4l2_dev); #ifdef CONFIG_PM viafb_pm_unregister(&viacam_pm_hooks); #endif free_irq(viadev->pdev->irq, cam); via_sensor_power_release(cam); v4l2_ctrl_handler_free(&cam->ctrl_handler); kfree(cam); via_cam_info = NULL; } static struct platform_driver viacam_driver = { .driver = { .name = "viafb-camera", }, .probe = viacam_probe, .remove_new = viacam_remove, }; module_platform_driver(viacam_driver);
linux-master
drivers/media/platform/via/via-camera.c
// SPDX-License-Identifier: GPL-2.0-or-later // Copyright 2020 IBM Corp. // Copyright (c) 2019-2020 Intel Corporation #include <linux/atomic.h> #include <linux/bitfield.h> #include <linux/clk.h> #include <linux/delay.h> #include <linux/device.h> #include <linux/dma-mapping.h> #include <linux/interrupt.h> #include <linux/jiffies.h> #include <linux/module.h> #include <linux/mutex.h> #include <linux/of.h> #include <linux/of_irq.h> #include <linux/of_reserved_mem.h> #include <linux/platform_device.h> #include <linux/sched.h> #include <linux/spinlock.h> #include <linux/string.h> #include <linux/v4l2-controls.h> #include <linux/videodev2.h> #include <linux/wait.h> #include <linux/workqueue.h> #include <linux/debugfs.h> #include <linux/ktime.h> #include <media/v4l2-ctrls.h> #include <media/v4l2-dev.h> #include <media/v4l2-device.h> #include <media/v4l2-dv-timings.h> #include <media/v4l2-event.h> #include <media/v4l2-ioctl.h> #include <media/videobuf2-dma-contig.h> #include <uapi/linux/aspeed-video.h> #define ASPEED_VIDEO_V4L2_MIN_BUF_REQ 3 #define DEVICE_NAME "aspeed-video" #define ASPEED_VIDEO_JPEG_NUM_QUALITIES 12 #define ASPEED_VIDEO_JPEG_HEADER_SIZE 10 #define ASPEED_VIDEO_JPEG_QUANT_SIZE 116 #define ASPEED_VIDEO_JPEG_DCT_SIZE 34 #define MAX_FRAME_RATE 60 #define MAX_HEIGHT 1200 #define MAX_WIDTH 1920 #define MIN_HEIGHT 480 #define MIN_WIDTH 640 #define NUM_POLARITY_CHECKS 10 #define INVALID_RESOLUTION_RETRIES 2 #define INVALID_RESOLUTION_DELAY msecs_to_jiffies(250) #define RESOLUTION_CHANGE_DELAY msecs_to_jiffies(500) #define MODE_DETECT_TIMEOUT msecs_to_jiffies(500) #define STOP_TIMEOUT msecs_to_jiffies(1000) #define DIRECT_FETCH_THRESHOLD 0x0c0000 /* 1024 * 768 */ #define VE_MAX_SRC_BUFFER_SIZE 0x8ca000 /* 1920 * 1200, 32bpp */ #define VE_JPEG_HEADER_SIZE 0x006000 /* 512 * 12 * 4 */ #define VE_BCD_BUFF_SIZE 0x9000 /* (1920/8) * (1200/8) */ #define VE_PROTECTION_KEY 0x000 #define VE_PROTECTION_KEY_UNLOCK 0x1a038aa8 #define VE_SEQ_CTRL 0x004 #define VE_SEQ_CTRL_TRIG_MODE_DET BIT(0) #define VE_SEQ_CTRL_TRIG_CAPTURE BIT(1) #define VE_SEQ_CTRL_FORCE_IDLE BIT(2) #define VE_SEQ_CTRL_MULT_FRAME BIT(3) #define VE_SEQ_CTRL_TRIG_COMP BIT(4) #define VE_SEQ_CTRL_AUTO_COMP BIT(5) #define VE_SEQ_CTRL_EN_WATCHDOG BIT(7) #define VE_SEQ_CTRL_YUV420 BIT(10) #define VE_SEQ_CTRL_COMP_FMT GENMASK(11, 10) #define VE_SEQ_CTRL_HALT BIT(12) #define VE_SEQ_CTRL_EN_WATCHDOG_COMP BIT(14) #define VE_SEQ_CTRL_TRIG_JPG BIT(15) #define VE_SEQ_CTRL_CAP_BUSY BIT(16) #define VE_SEQ_CTRL_COMP_BUSY BIT(18) #define AST2500_VE_SEQ_CTRL_JPEG_MODE BIT(13) #define AST2400_VE_SEQ_CTRL_JPEG_MODE BIT(8) #define VE_CTRL 0x008 #define VE_CTRL_HSYNC_POL BIT(0) #define VE_CTRL_VSYNC_POL BIT(1) #define VE_CTRL_SOURCE BIT(2) #define VE_CTRL_INT_DE BIT(4) #define VE_CTRL_DIRECT_FETCH BIT(5) #define VE_CTRL_CAPTURE_FMT GENMASK(7, 6) #define VE_CTRL_AUTO_OR_CURSOR BIT(8) #define VE_CTRL_CLK_INVERSE BIT(11) #define VE_CTRL_CLK_DELAY GENMASK(11, 9) #define VE_CTRL_INTERLACE BIT(14) #define VE_CTRL_HSYNC_POL_CTRL BIT(15) #define VE_CTRL_FRC GENMASK(23, 16) #define VE_TGS_0 0x00c #define VE_TGS_1 0x010 #define VE_TGS_FIRST GENMASK(28, 16) #define VE_TGS_LAST GENMASK(12, 0) #define VE_SCALING_FACTOR 0x014 #define VE_SCALING_FILTER0 0x018 #define VE_SCALING_FILTER1 0x01c #define VE_SCALING_FILTER2 0x020 #define VE_SCALING_FILTER3 0x024 #define VE_BCD_CTRL 0x02C #define VE_BCD_CTRL_EN_BCD BIT(0) #define VE_BCD_CTRL_EN_ABCD BIT(1) #define VE_BCD_CTRL_EN_CB BIT(2) #define VE_BCD_CTRL_THR GENMASK(23, 16) #define VE_BCD_CTRL_ABCD_THR GENMASK(31, 24) #define VE_CAP_WINDOW 0x030 #define VE_COMP_WINDOW 0x034 #define VE_COMP_PROC_OFFSET 0x038 #define VE_COMP_OFFSET 0x03c #define VE_JPEG_ADDR 0x040 #define VE_SRC0_ADDR 0x044 #define VE_SRC_SCANLINE_OFFSET 0x048 #define VE_SRC1_ADDR 0x04c #define VE_BCD_ADDR 0x050 #define VE_COMP_ADDR 0x054 #define VE_STREAM_BUF_SIZE 0x058 #define VE_STREAM_BUF_SIZE_N_PACKETS GENMASK(5, 3) #define VE_STREAM_BUF_SIZE_P_SIZE GENMASK(2, 0) #define VE_COMP_CTRL 0x060 #define VE_COMP_CTRL_VQ_DCT_ONLY BIT(0) #define VE_COMP_CTRL_VQ_4COLOR BIT(1) #define VE_COMP_CTRL_QUANTIZE BIT(2) #define VE_COMP_CTRL_EN_BQ BIT(4) #define VE_COMP_CTRL_EN_CRYPTO BIT(5) #define VE_COMP_CTRL_DCT_CHR GENMASK(10, 6) #define VE_COMP_CTRL_DCT_LUM GENMASK(15, 11) #define VE_COMP_CTRL_EN_HQ BIT(16) #define VE_COMP_CTRL_RSVD BIT(19) #define VE_COMP_CTRL_ENCODE GENMASK(21, 20) #define VE_COMP_CTRL_HQ_DCT_CHR GENMASK(26, 22) #define VE_COMP_CTRL_HQ_DCT_LUM GENMASK(31, 27) #define VE_CB_ADDR 0x06C #define AST2400_VE_COMP_SIZE_READ_BACK 0x078 #define AST2600_VE_COMP_SIZE_READ_BACK 0x084 #define VE_SRC_LR_EDGE_DET 0x090 #define VE_SRC_LR_EDGE_DET_LEFT GENMASK(11, 0) #define VE_SRC_LR_EDGE_DET_NO_V BIT(12) #define VE_SRC_LR_EDGE_DET_NO_H BIT(13) #define VE_SRC_LR_EDGE_DET_NO_DISP BIT(14) #define VE_SRC_LR_EDGE_DET_NO_CLK BIT(15) #define VE_SRC_LR_EDGE_DET_RT GENMASK(27, 16) #define VE_SRC_LR_EDGE_DET_INTERLACE BIT(31) #define VE_SRC_TB_EDGE_DET 0x094 #define VE_SRC_TB_EDGE_DET_TOP GENMASK(12, 0) #define VE_SRC_TB_EDGE_DET_BOT GENMASK(28, 16) #define VE_MODE_DETECT_STATUS 0x098 #define VE_MODE_DETECT_H_PERIOD GENMASK(11, 0) #define VE_MODE_DETECT_EXTSRC_ADC BIT(12) #define VE_MODE_DETECT_H_STABLE BIT(13) #define VE_MODE_DETECT_V_STABLE BIT(14) #define VE_MODE_DETECT_V_LINES GENMASK(27, 16) #define VE_MODE_DETECT_STATUS_VSYNC BIT(28) #define VE_MODE_DETECT_STATUS_HSYNC BIT(29) #define VE_MODE_DETECT_VSYNC_RDY BIT(30) #define VE_MODE_DETECT_HSYNC_RDY BIT(31) #define VE_SYNC_STATUS 0x09c #define VE_SYNC_STATUS_HSYNC GENMASK(11, 0) #define VE_SYNC_STATUS_VSYNC GENMASK(27, 16) #define VE_H_TOTAL_PIXELS 0x0A0 #define VE_INTERRUPT_CTRL 0x304 #define VE_INTERRUPT_STATUS 0x308 #define VE_INTERRUPT_MODE_DETECT_WD BIT(0) #define VE_INTERRUPT_CAPTURE_COMPLETE BIT(1) #define VE_INTERRUPT_COMP_READY BIT(2) #define VE_INTERRUPT_COMP_COMPLETE BIT(3) #define VE_INTERRUPT_MODE_DETECT BIT(4) #define VE_INTERRUPT_FRAME_COMPLETE BIT(5) #define VE_INTERRUPT_DECODE_ERR BIT(6) #define VE_INTERRUPT_HALT_READY BIT(8) #define VE_INTERRUPT_HANG_WD BIT(9) #define VE_INTERRUPT_STREAM_DESC BIT(10) #define VE_INTERRUPT_VSYNC_DESC BIT(11) #define VE_MODE_DETECT 0x30c #define VE_MODE_DT_HOR_TOLER GENMASK(31, 28) #define VE_MODE_DT_VER_TOLER GENMASK(27, 24) #define VE_MODE_DT_HOR_STABLE GENMASK(23, 20) #define VE_MODE_DT_VER_STABLE GENMASK(19, 16) #define VE_MODE_DT_EDG_THROD GENMASK(15, 8) #define VE_MEM_RESTRICT_START 0x310 #define VE_MEM_RESTRICT_END 0x314 /* * VIDEO_MODE_DETECT_DONE: a flag raised if signal lock * VIDEO_RES_CHANGE: a flag raised if res_change work on-going * VIDEO_RES_DETECT: a flag raised if res. detection on-going * VIDEO_STREAMING: a flag raised if user requires stream-on * VIDEO_FRAME_INPRG: a flag raised if hw working on a frame * VIDEO_STOPPED: a flag raised if device release * VIDEO_CLOCKS_ON: a flag raised if clk is on */ enum { VIDEO_MODE_DETECT_DONE, VIDEO_RES_CHANGE, VIDEO_RES_DETECT, VIDEO_STREAMING, VIDEO_FRAME_INPRG, VIDEO_STOPPED, VIDEO_CLOCKS_ON, }; enum aspeed_video_format { VIDEO_FMT_STANDARD = 0, VIDEO_FMT_ASPEED, VIDEO_FMT_MAX = VIDEO_FMT_ASPEED }; // for VE_CTRL_CAPTURE_FMT enum aspeed_video_capture_format { VIDEO_CAP_FMT_YUV_STUDIO_SWING = 0, VIDEO_CAP_FMT_YUV_FULL_SWING, VIDEO_CAP_FMT_RGB, VIDEO_CAP_FMT_GRAY, VIDEO_CAP_FMT_MAX }; struct aspeed_video_addr { unsigned int size; dma_addr_t dma; void *virt; }; struct aspeed_video_buffer { struct vb2_v4l2_buffer vb; struct list_head link; }; struct aspeed_video_perf { ktime_t last_sample; u32 totaltime; u32 duration; u32 duration_min; u32 duration_max; }; #define to_aspeed_video_buffer(x) \ container_of((x), struct aspeed_video_buffer, vb) /* * struct aspeed_video - driver data * * res_work: holds the delayed_work for res-detection if unlock * buffers: holds the list of buffer queued from user * flags: holds the state of video * sequence: holds the last number of frame completed * max_compressed_size: holds max compressed stream's size * srcs: holds the buffer information for srcs * jpeg: holds the buffer information for jpeg header * bcd: holds the buffer information for bcd work * yuv420: a flag raised if JPEG subsampling is 420 * format: holds the video format * hq_mode: a flag raised if HQ is enabled. Only for VIDEO_FMT_ASPEED * frame_rate: holds the frame_rate * jpeg_quality: holds jpeq's quality (0~11) * jpeg_hq_quality: holds hq's quality (1~12) only if hq_mode enabled * frame_bottom: end position of video data in vertical direction * frame_left: start position of video data in horizontal direction * frame_right: end position of video data in horizontal direction * frame_top: start position of video data in vertical direction * perf: holds the statistics primary for debugfs */ struct aspeed_video { void __iomem *base; struct clk *eclk; struct clk *vclk; struct device *dev; struct v4l2_ctrl_handler ctrl_handler; struct v4l2_device v4l2_dev; struct v4l2_pix_format pix_fmt; struct v4l2_bt_timings active_timings; struct v4l2_bt_timings detected_timings; u32 v4l2_input_status; struct vb2_queue queue; struct video_device vdev; struct mutex video_lock; /* v4l2 and videobuf2 lock */ u32 jpeg_mode; u32 comp_size_read; wait_queue_head_t wait; spinlock_t lock; /* buffer list lock */ struct delayed_work res_work; struct list_head buffers; unsigned long flags; unsigned int sequence; unsigned int max_compressed_size; struct aspeed_video_addr srcs[2]; struct aspeed_video_addr jpeg; struct aspeed_video_addr bcd; bool yuv420; enum aspeed_video_format format; bool hq_mode; unsigned int frame_rate; unsigned int jpeg_quality; unsigned int jpeg_hq_quality; unsigned int frame_bottom; unsigned int frame_left; unsigned int frame_right; unsigned int frame_top; struct aspeed_video_perf perf; }; #define to_aspeed_video(x) container_of((x), struct aspeed_video, v4l2_dev) struct aspeed_video_config { u32 jpeg_mode; u32 comp_size_read; }; static const struct aspeed_video_config ast2400_config = { .jpeg_mode = AST2400_VE_SEQ_CTRL_JPEG_MODE, .comp_size_read = AST2400_VE_COMP_SIZE_READ_BACK, }; static const struct aspeed_video_config ast2500_config = { .jpeg_mode = AST2500_VE_SEQ_CTRL_JPEG_MODE, .comp_size_read = AST2400_VE_COMP_SIZE_READ_BACK, }; static const struct aspeed_video_config ast2600_config = { .jpeg_mode = AST2500_VE_SEQ_CTRL_JPEG_MODE, .comp_size_read = AST2600_VE_COMP_SIZE_READ_BACK, }; static const u32 aspeed_video_jpeg_header[ASPEED_VIDEO_JPEG_HEADER_SIZE] = { 0xe0ffd8ff, 0x464a1000, 0x01004649, 0x60000101, 0x00006000, 0x0f00feff, 0x00002d05, 0x00000000, 0x00000000, 0x00dbff00 }; static const u32 aspeed_video_jpeg_quant[ASPEED_VIDEO_JPEG_QUANT_SIZE] = { 0x081100c0, 0x00000000, 0x00110103, 0x03011102, 0xc4ff0111, 0x00001f00, 0x01010501, 0x01010101, 0x00000000, 0x00000000, 0x04030201, 0x08070605, 0xff0b0a09, 0x10b500c4, 0x03010200, 0x03040203, 0x04040505, 0x7d010000, 0x00030201, 0x12051104, 0x06413121, 0x07615113, 0x32147122, 0x08a19181, 0xc1b14223, 0xf0d15215, 0x72623324, 0x160a0982, 0x1a191817, 0x28272625, 0x35342a29, 0x39383736, 0x4544433a, 0x49484746, 0x5554534a, 0x59585756, 0x6564635a, 0x69686766, 0x7574736a, 0x79787776, 0x8584837a, 0x89888786, 0x9493928a, 0x98979695, 0xa3a29a99, 0xa7a6a5a4, 0xb2aaa9a8, 0xb6b5b4b3, 0xbab9b8b7, 0xc5c4c3c2, 0xc9c8c7c6, 0xd4d3d2ca, 0xd8d7d6d5, 0xe2e1dad9, 0xe6e5e4e3, 0xeae9e8e7, 0xf4f3f2f1, 0xf8f7f6f5, 0xc4fffaf9, 0x00011f00, 0x01010103, 0x01010101, 0x00000101, 0x00000000, 0x04030201, 0x08070605, 0xff0b0a09, 0x11b500c4, 0x02010200, 0x04030404, 0x04040507, 0x77020100, 0x03020100, 0x21050411, 0x41120631, 0x71610751, 0x81322213, 0x91421408, 0x09c1b1a1, 0xf0523323, 0xd1726215, 0x3424160a, 0x17f125e1, 0x261a1918, 0x2a292827, 0x38373635, 0x44433a39, 0x48474645, 0x54534a49, 0x58575655, 0x64635a59, 0x68676665, 0x74736a69, 0x78777675, 0x83827a79, 0x87868584, 0x928a8988, 0x96959493, 0x9a999897, 0xa5a4a3a2, 0xa9a8a7a6, 0xb4b3b2aa, 0xb8b7b6b5, 0xc3c2bab9, 0xc7c6c5c4, 0xd2cac9c8, 0xd6d5d4d3, 0xdad9d8d7, 0xe5e4e3e2, 0xe9e8e7e6, 0xf4f3f2ea, 0xf8f7f6f5, 0xdafffaf9, 0x01030c00, 0x03110200, 0x003f0011 }; static const u32 aspeed_video_jpeg_dct[ASPEED_VIDEO_JPEG_NUM_QUALITIES] [ASPEED_VIDEO_JPEG_DCT_SIZE] = { { 0x0d140043, 0x0c0f110f, 0x11101114, 0x17141516, 0x1e20321e, 0x3d1e1b1b, 0x32242e2b, 0x4b4c3f48, 0x44463f47, 0x61735a50, 0x566c5550, 0x88644644, 0x7a766c65, 0x4d808280, 0x8c978d60, 0x7e73967d, 0xdbff7b80, 0x1f014300, 0x272d2121, 0x3030582d, 0x697bb958, 0xb8b9b97b, 0xb9b8a6a6, 0xb9b9b9b9, 0xb9b9b9b9, 0xb9b9b9b9, 0xb9b9b9b9, 0xb9b9b9b9, 0xb9b9b9b9, 0xb9b9b9b9, 0xb9b9b9b9, 0xb9b9b9b9, 0xb9b9b9b9, 0xffb9b9b9 }, { 0x0c110043, 0x0a0d0f0d, 0x0f0e0f11, 0x14111213, 0x1a1c2b1a, 0x351a1818, 0x2b1f2826, 0x4142373f, 0x3c3d373e, 0x55644e46, 0x4b5f4a46, 0x77573d3c, 0x6b675f58, 0x43707170, 0x7a847b54, 0x6e64836d, 0xdbff6c70, 0x1b014300, 0x22271d1d, 0x2a2a4c27, 0x5b6ba04c, 0xa0a0a06b, 0xa0a0a0a0, 0xa0a0a0a0, 0xa0a0a0a0, 0xa0a0a0a0, 0xa0a0a0a0, 0xa0a0a0a0, 0xa0a0a0a0, 0xa0a0a0a0, 0xa0a0a0a0, 0xa0a0a0a0, 0xa0a0a0a0, 0xffa0a0a0 }, { 0x090e0043, 0x090a0c0a, 0x0c0b0c0e, 0x110e0f10, 0x15172415, 0x2c151313, 0x241a211f, 0x36372e34, 0x31322e33, 0x4653413a, 0x3e4e3d3a, 0x62483231, 0x58564e49, 0x385d5e5d, 0x656d6645, 0x5b536c5a, 0xdbff595d, 0x16014300, 0x1c201818, 0x22223f20, 0x4b58853f, 0x85858558, 0x85858585, 0x85858585, 0x85858585, 0x85858585, 0x85858585, 0x85858585, 0x85858585, 0x85858585, 0x85858585, 0x85858585, 0x85858585, 0xff858585 }, { 0x070b0043, 0x07080a08, 0x0a090a0b, 0x0d0b0c0c, 0x11121c11, 0x23110f0f, 0x1c141a19, 0x2b2b2429, 0x27282428, 0x3842332e, 0x313e302e, 0x4e392827, 0x46443e3a, 0x2c4a4a4a, 0x50565137, 0x48425647, 0xdbff474a, 0x12014300, 0x161a1313, 0x1c1c331a, 0x3d486c33, 0x6c6c6c48, 0x6c6c6c6c, 0x6c6c6c6c, 0x6c6c6c6c, 0x6c6c6c6c, 0x6c6c6c6c, 0x6c6c6c6c, 0x6c6c6c6c, 0x6c6c6c6c, 0x6c6c6c6c, 0x6c6c6c6c, 0x6c6c6c6c, 0xff6c6c6c }, { 0x06090043, 0x05060706, 0x07070709, 0x0a09090a, 0x0d0e160d, 0x1b0d0c0c, 0x16101413, 0x21221c20, 0x1e1f1c20, 0x2b332824, 0x26302624, 0x3d2d1f1e, 0x3735302d, 0x22393a39, 0x3f443f2b, 0x38334338, 0xdbff3739, 0x0d014300, 0x11130e0e, 0x15152613, 0x2d355026, 0x50505035, 0x50505050, 0x50505050, 0x50505050, 0x50505050, 0x50505050, 0x50505050, 0x50505050, 0x50505050, 0x50505050, 0x50505050, 0x50505050, 0xff505050 }, { 0x04060043, 0x03040504, 0x05040506, 0x07060606, 0x09090f09, 0x12090808, 0x0f0a0d0d, 0x16161315, 0x14151315, 0x1d221b18, 0x19201918, 0x281e1514, 0x2423201e, 0x17262726, 0x2a2d2a1c, 0x25222d25, 0xdbff2526, 0x09014300, 0x0b0d0a0a, 0x0e0e1a0d, 0x1f25371a, 0x37373725, 0x37373737, 0x37373737, 0x37373737, 0x37373737, 0x37373737, 0x37373737, 0x37373737, 0x37373737, 0x37373737, 0x37373737, 0x37373737, 0xff373737 }, { 0x02030043, 0x01020202, 0x02020203, 0x03030303, 0x04040704, 0x09040404, 0x07050606, 0x0b0b090a, 0x0a0a090a, 0x0e110d0c, 0x0c100c0c, 0x140f0a0a, 0x1211100f, 0x0b131313, 0x1516150e, 0x12111612, 0xdbff1213, 0x04014300, 0x05060505, 0x07070d06, 0x0f121b0d, 0x1b1b1b12, 0x1b1b1b1b, 0x1b1b1b1b, 0x1b1b1b1b, 0x1b1b1b1b, 0x1b1b1b1b, 0x1b1b1b1b, 0x1b1b1b1b, 0x1b1b1b1b, 0x1b1b1b1b, 0x1b1b1b1b, 0x1b1b1b1b, 0xff1b1b1b }, { 0x01020043, 0x01010101, 0x01010102, 0x02020202, 0x03030503, 0x06030202, 0x05030404, 0x07070607, 0x06070607, 0x090b0908, 0x080a0808, 0x0d0a0706, 0x0c0b0a0a, 0x070c0d0c, 0x0e0f0e09, 0x0c0b0f0c, 0xdbff0c0c, 0x03014300, 0x03040303, 0x04040804, 0x0a0c1208, 0x1212120c, 0x12121212, 0x12121212, 0x12121212, 0x12121212, 0x12121212, 0x12121212, 0x12121212, 0x12121212, 0x12121212, 0x12121212, 0x12121212, 0xff121212 }, { 0x01020043, 0x01010101, 0x01010102, 0x02020202, 0x03030503, 0x06030202, 0x05030404, 0x07070607, 0x06070607, 0x090b0908, 0x080a0808, 0x0d0a0706, 0x0c0b0a0a, 0x070c0d0c, 0x0e0f0e09, 0x0c0b0f0c, 0xdbff0c0c, 0x02014300, 0x03030202, 0x04040703, 0x080a0f07, 0x0f0f0f0a, 0x0f0f0f0f, 0x0f0f0f0f, 0x0f0f0f0f, 0x0f0f0f0f, 0x0f0f0f0f, 0x0f0f0f0f, 0x0f0f0f0f, 0x0f0f0f0f, 0x0f0f0f0f, 0x0f0f0f0f, 0x0f0f0f0f, 0xff0f0f0f }, { 0x01010043, 0x01010101, 0x01010101, 0x01010101, 0x02020302, 0x04020202, 0x03020303, 0x05050405, 0x05050405, 0x07080606, 0x06080606, 0x0a070505, 0x09080807, 0x05090909, 0x0a0b0a07, 0x09080b09, 0xdbff0909, 0x02014300, 0x02030202, 0x03030503, 0x07080c05, 0x0c0c0c08, 0x0c0c0c0c, 0x0c0c0c0c, 0x0c0c0c0c, 0x0c0c0c0c, 0x0c0c0c0c, 0x0c0c0c0c, 0x0c0c0c0c, 0x0c0c0c0c, 0x0c0c0c0c, 0x0c0c0c0c, 0x0c0c0c0c, 0xff0c0c0c }, { 0x01010043, 0x01010101, 0x01010101, 0x01010101, 0x01010201, 0x03010101, 0x02010202, 0x03030303, 0x03030303, 0x04050404, 0x04050404, 0x06050303, 0x06050505, 0x03060606, 0x07070704, 0x06050706, 0xdbff0606, 0x01014300, 0x01020101, 0x02020402, 0x05060904, 0x09090906, 0x09090909, 0x09090909, 0x09090909, 0x09090909, 0x09090909, 0x09090909, 0x09090909, 0x09090909, 0x09090909, 0x09090909, 0x09090909, 0xff090909 }, { 0x01010043, 0x01010101, 0x01010101, 0x01010101, 0x01010101, 0x01010101, 0x01010101, 0x01010101, 0x01010101, 0x02020202, 0x02020202, 0x03020101, 0x03020202, 0x01030303, 0x03030302, 0x03020303, 0xdbff0403, 0x01014300, 0x01010101, 0x01010201, 0x03040602, 0x06060604, 0x06060606, 0x06060606, 0x06060606, 0x06060606, 0x06060606, 0x06060606, 0x06060606, 0x06060606, 0x06060606, 0x06060606, 0x06060606, 0xff060606 } }; static const struct v4l2_dv_timings_cap aspeed_video_timings_cap = { .type = V4L2_DV_BT_656_1120, .bt = { .min_width = MIN_WIDTH, .max_width = MAX_WIDTH, .min_height = MIN_HEIGHT, .max_height = MAX_HEIGHT, .min_pixelclock = 6574080, /* 640 x 480 x 24Hz */ .max_pixelclock = 138240000, /* 1920 x 1200 x 60Hz */ .standards = V4L2_DV_BT_STD_CEA861 | V4L2_DV_BT_STD_DMT | V4L2_DV_BT_STD_CVT | V4L2_DV_BT_STD_GTF, .capabilities = V4L2_DV_BT_CAP_PROGRESSIVE | V4L2_DV_BT_CAP_REDUCED_BLANKING | V4L2_DV_BT_CAP_CUSTOM, }, }; static const char * const format_str[] = {"Standard JPEG", "Aspeed JPEG"}; static unsigned int debug; static bool aspeed_video_alloc_buf(struct aspeed_video *video, struct aspeed_video_addr *addr, unsigned int size); static void aspeed_video_free_buf(struct aspeed_video *video, struct aspeed_video_addr *addr); static void aspeed_video_init_jpeg_table(u32 *table, bool yuv420) { int i; unsigned int base; for (i = 0; i < ASPEED_VIDEO_JPEG_NUM_QUALITIES; i++) { base = 256 * i; /* AST HW requires this header spacing */ memcpy(&table[base], aspeed_video_jpeg_header, sizeof(aspeed_video_jpeg_header)); base += ASPEED_VIDEO_JPEG_HEADER_SIZE; memcpy(&table[base], aspeed_video_jpeg_dct[i], sizeof(aspeed_video_jpeg_dct[i])); base += ASPEED_VIDEO_JPEG_DCT_SIZE; memcpy(&table[base], aspeed_video_jpeg_quant, sizeof(aspeed_video_jpeg_quant)); if (yuv420) table[base + 2] = 0x00220103; } } // just update jpeg dct table per 420/444 static void aspeed_video_update_jpeg_table(u32 *table, bool yuv420) { int i; unsigned int base; for (i = 0; i < ASPEED_VIDEO_JPEG_NUM_QUALITIES; i++) { base = 256 * i; /* AST HW requires this header spacing */ base += ASPEED_VIDEO_JPEG_HEADER_SIZE + ASPEED_VIDEO_JPEG_DCT_SIZE; table[base + 2] = (yuv420) ? 0x00220103 : 0x00110103; } } static void aspeed_video_update(struct aspeed_video *video, u32 reg, u32 clear, u32 bits) { u32 t = readl(video->base + reg); u32 before = t; t &= ~clear; t |= bits; writel(t, video->base + reg); v4l2_dbg(3, debug, &video->v4l2_dev, "update %03x[%08x -> %08x]\n", reg, before, readl(video->base + reg)); } static u32 aspeed_video_read(struct aspeed_video *video, u32 reg) { u32 t = readl(video->base + reg); v4l2_dbg(3, debug, &video->v4l2_dev, "read %03x[%08x]\n", reg, t); return t; } static void aspeed_video_write(struct aspeed_video *video, u32 reg, u32 val) { writel(val, video->base + reg); v4l2_dbg(3, debug, &video->v4l2_dev, "write %03x[%08x]\n", reg, readl(video->base + reg)); } static void update_perf(struct aspeed_video_perf *p) { struct aspeed_video *v = container_of(p, struct aspeed_video, perf); p->duration = ktime_to_ms(ktime_sub(ktime_get(), p->last_sample)); p->totaltime += p->duration; p->duration_max = max(p->duration, p->duration_max); p->duration_min = min(p->duration, p->duration_min); v4l2_dbg(2, debug, &v->v4l2_dev, "time consumed: %d ms\n", p->duration); } static int aspeed_video_start_frame(struct aspeed_video *video) { dma_addr_t addr; unsigned long flags; struct aspeed_video_buffer *buf; u32 seq_ctrl = aspeed_video_read(video, VE_SEQ_CTRL); bool bcd_buf_need = (video->format != VIDEO_FMT_STANDARD); if (video->v4l2_input_status) { v4l2_dbg(1, debug, &video->v4l2_dev, "No signal; don't start frame\n"); return 0; } if (!(seq_ctrl & VE_SEQ_CTRL_COMP_BUSY) || !(seq_ctrl & VE_SEQ_CTRL_CAP_BUSY)) { v4l2_dbg(1, debug, &video->v4l2_dev, "Engine busy; don't start frame\n"); return -EBUSY; } if (bcd_buf_need && !video->bcd.size) { if (!aspeed_video_alloc_buf(video, &video->bcd, VE_BCD_BUFF_SIZE)) { dev_err(video->dev, "Failed to allocate BCD buffer\n"); dev_err(video->dev, "don't start frame\n"); return -ENOMEM; } aspeed_video_write(video, VE_BCD_ADDR, video->bcd.dma); v4l2_dbg(1, debug, &video->v4l2_dev, "bcd addr(%pad) size(%d)\n", &video->bcd.dma, video->bcd.size); } else if (!bcd_buf_need && video->bcd.size) { aspeed_video_free_buf(video, &video->bcd); } spin_lock_irqsave(&video->lock, flags); buf = list_first_entry_or_null(&video->buffers, struct aspeed_video_buffer, link); if (!buf) { spin_unlock_irqrestore(&video->lock, flags); v4l2_dbg(1, debug, &video->v4l2_dev, "No buffers; don't start frame\n"); return -EPROTO; } set_bit(VIDEO_FRAME_INPRG, &video->flags); addr = vb2_dma_contig_plane_dma_addr(&buf->vb.vb2_buf, 0); spin_unlock_irqrestore(&video->lock, flags); aspeed_video_write(video, VE_COMP_PROC_OFFSET, 0); aspeed_video_write(video, VE_COMP_OFFSET, 0); aspeed_video_write(video, VE_COMP_ADDR, addr); aspeed_video_update(video, VE_INTERRUPT_CTRL, 0, VE_INTERRUPT_COMP_COMPLETE); video->perf.last_sample = ktime_get(); aspeed_video_update(video, VE_SEQ_CTRL, 0, VE_SEQ_CTRL_TRIG_CAPTURE | VE_SEQ_CTRL_TRIG_COMP); return 0; } static void aspeed_video_enable_mode_detect(struct aspeed_video *video) { /* Enable mode detect interrupts */ aspeed_video_update(video, VE_INTERRUPT_CTRL, 0, VE_INTERRUPT_MODE_DETECT); /* Disable mode detect in order to re-trigger */ aspeed_video_update(video, VE_SEQ_CTRL, VE_SEQ_CTRL_TRIG_MODE_DET, 0); /* Trigger mode detect */ aspeed_video_update(video, VE_SEQ_CTRL, 0, VE_SEQ_CTRL_TRIG_MODE_DET); } static void aspeed_video_off(struct aspeed_video *video) { if (!test_bit(VIDEO_CLOCKS_ON, &video->flags)) return; /* Disable interrupts */ aspeed_video_write(video, VE_INTERRUPT_CTRL, 0); aspeed_video_write(video, VE_INTERRUPT_STATUS, 0xffffffff); /* Turn off the relevant clocks */ clk_disable(video->eclk); clk_disable(video->vclk); clear_bit(VIDEO_CLOCKS_ON, &video->flags); } static void aspeed_video_on(struct aspeed_video *video) { if (test_bit(VIDEO_CLOCKS_ON, &video->flags)) return; /* Turn on the relevant clocks */ clk_enable(video->vclk); clk_enable(video->eclk); set_bit(VIDEO_CLOCKS_ON, &video->flags); } static void aspeed_video_bufs_done(struct aspeed_video *video, enum vb2_buffer_state state) { unsigned long flags; struct aspeed_video_buffer *buf; spin_lock_irqsave(&video->lock, flags); list_for_each_entry(buf, &video->buffers, link) vb2_buffer_done(&buf->vb.vb2_buf, state); INIT_LIST_HEAD(&video->buffers); spin_unlock_irqrestore(&video->lock, flags); } static void aspeed_video_irq_res_change(struct aspeed_video *video, ulong delay) { v4l2_dbg(1, debug, &video->v4l2_dev, "Resolution changed; resetting\n"); set_bit(VIDEO_RES_CHANGE, &video->flags); clear_bit(VIDEO_FRAME_INPRG, &video->flags); video->v4l2_input_status = V4L2_IN_ST_NO_SIGNAL; aspeed_video_off(video); aspeed_video_bufs_done(video, VB2_BUF_STATE_ERROR); schedule_delayed_work(&video->res_work, delay); } static void aspeed_video_swap_src_buf(struct aspeed_video *v) { if (v->format == VIDEO_FMT_STANDARD) return; /* Reset bcd buffer to have a full frame update every 8 frames. */ if (IS_ALIGNED(v->sequence, 8)) memset((u8 *)v->bcd.virt, 0x00, VE_BCD_BUFF_SIZE); if (v->sequence & 0x01) { aspeed_video_write(v, VE_SRC0_ADDR, v->srcs[1].dma); aspeed_video_write(v, VE_SRC1_ADDR, v->srcs[0].dma); } else { aspeed_video_write(v, VE_SRC0_ADDR, v->srcs[0].dma); aspeed_video_write(v, VE_SRC1_ADDR, v->srcs[1].dma); } } static irqreturn_t aspeed_video_irq(int irq, void *arg) { struct aspeed_video *video = arg; u32 sts = aspeed_video_read(video, VE_INTERRUPT_STATUS); /* * Hardware sometimes asserts interrupts that we haven't actually * enabled; ignore them if so. */ sts &= aspeed_video_read(video, VE_INTERRUPT_CTRL); v4l2_dbg(2, debug, &video->v4l2_dev, "irq sts=%#x %s%s%s%s\n", sts, sts & VE_INTERRUPT_MODE_DETECT_WD ? ", unlock" : "", sts & VE_INTERRUPT_MODE_DETECT ? ", lock" : "", sts & VE_INTERRUPT_CAPTURE_COMPLETE ? ", capture-done" : "", sts & VE_INTERRUPT_COMP_COMPLETE ? ", comp-done" : ""); /* * Resolution changed or signal was lost; reset the engine and * re-initialize */ if (sts & VE_INTERRUPT_MODE_DETECT_WD) { aspeed_video_irq_res_change(video, 0); return IRQ_HANDLED; } if (sts & VE_INTERRUPT_MODE_DETECT) { if (test_bit(VIDEO_RES_DETECT, &video->flags)) { aspeed_video_update(video, VE_INTERRUPT_CTRL, VE_INTERRUPT_MODE_DETECT, 0); aspeed_video_write(video, VE_INTERRUPT_STATUS, VE_INTERRUPT_MODE_DETECT); sts &= ~VE_INTERRUPT_MODE_DETECT; set_bit(VIDEO_MODE_DETECT_DONE, &video->flags); wake_up_interruptible_all(&video->wait); } else { /* * Signal acquired while NOT doing resolution * detection; reset the engine and re-initialize */ aspeed_video_irq_res_change(video, RESOLUTION_CHANGE_DELAY); return IRQ_HANDLED; } } if (sts & VE_INTERRUPT_COMP_COMPLETE) { struct aspeed_video_buffer *buf; bool empty = true; u32 frame_size = aspeed_video_read(video, video->comp_size_read); update_perf(&video->perf); spin_lock(&video->lock); clear_bit(VIDEO_FRAME_INPRG, &video->flags); buf = list_first_entry_or_null(&video->buffers, struct aspeed_video_buffer, link); if (buf) { vb2_set_plane_payload(&buf->vb.vb2_buf, 0, frame_size); /* * aspeed_jpeg requires continuous update. * On the contrary, standard jpeg can keep last buffer * to always have the latest result. */ if (video->format == VIDEO_FMT_STANDARD && list_is_last(&buf->link, &video->buffers)) { empty = false; v4l2_dbg(1, debug, &video->v4l2_dev, "skip to keep last frame updated\n"); } else { buf->vb.vb2_buf.timestamp = ktime_get_ns(); buf->vb.sequence = video->sequence++; buf->vb.field = V4L2_FIELD_NONE; vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_DONE); list_del(&buf->link); empty = list_empty(&video->buffers); } } spin_unlock(&video->lock); aspeed_video_update(video, VE_SEQ_CTRL, VE_SEQ_CTRL_TRIG_CAPTURE | VE_SEQ_CTRL_FORCE_IDLE | VE_SEQ_CTRL_TRIG_COMP, 0); aspeed_video_update(video, VE_INTERRUPT_CTRL, VE_INTERRUPT_COMP_COMPLETE, 0); aspeed_video_write(video, VE_INTERRUPT_STATUS, VE_INTERRUPT_COMP_COMPLETE); sts &= ~VE_INTERRUPT_COMP_COMPLETE; aspeed_video_swap_src_buf(video); if (test_bit(VIDEO_STREAMING, &video->flags) && !empty) aspeed_video_start_frame(video); } return sts ? IRQ_NONE : IRQ_HANDLED; } static void aspeed_video_check_and_set_polarity(struct aspeed_video *video) { int i; int hsync_counter = 0; int vsync_counter = 0; u32 sts, ctrl; for (i = 0; i < NUM_POLARITY_CHECKS; ++i) { sts = aspeed_video_read(video, VE_MODE_DETECT_STATUS); if (sts & VE_MODE_DETECT_STATUS_VSYNC) vsync_counter--; else vsync_counter++; if (sts & VE_MODE_DETECT_STATUS_HSYNC) hsync_counter--; else hsync_counter++; } ctrl = aspeed_video_read(video, VE_CTRL); if (hsync_counter < 0) { ctrl |= VE_CTRL_HSYNC_POL; video->detected_timings.polarities &= ~V4L2_DV_HSYNC_POS_POL; } else { ctrl &= ~VE_CTRL_HSYNC_POL; video->detected_timings.polarities |= V4L2_DV_HSYNC_POS_POL; } if (vsync_counter < 0) { ctrl |= VE_CTRL_VSYNC_POL; video->detected_timings.polarities &= ~V4L2_DV_VSYNC_POS_POL; } else { ctrl &= ~VE_CTRL_VSYNC_POL; video->detected_timings.polarities |= V4L2_DV_VSYNC_POS_POL; } aspeed_video_write(video, VE_CTRL, ctrl); } static bool aspeed_video_alloc_buf(struct aspeed_video *video, struct aspeed_video_addr *addr, unsigned int size) { addr->virt = dma_alloc_coherent(video->dev, size, &addr->dma, GFP_KERNEL); if (!addr->virt) return false; addr->size = size; return true; } static void aspeed_video_free_buf(struct aspeed_video *video, struct aspeed_video_addr *addr) { dma_free_coherent(video->dev, addr->size, addr->virt, addr->dma); addr->size = 0; addr->dma = 0ULL; addr->virt = NULL; } /* * Get the minimum HW-supported compression buffer size for the frame size. * Assume worst-case JPEG compression size is 1/8 raw size. This should be * plenty even for maximum quality; any worse and the engine will simply return * incomplete JPEGs. */ static void aspeed_video_calc_compressed_size(struct aspeed_video *video, unsigned int frame_size) { int i, j; u32 compression_buffer_size_reg = 0; unsigned int size; const unsigned int num_compression_packets = 4; const unsigned int compression_packet_size = 1024; const unsigned int max_compressed_size = frame_size / 2; /* 4bpp / 8 */ video->max_compressed_size = UINT_MAX; for (i = 0; i < 6; ++i) { for (j = 0; j < 8; ++j) { size = (num_compression_packets << i) * (compression_packet_size << j); if (size < max_compressed_size) continue; if (size < video->max_compressed_size) { compression_buffer_size_reg = (i << 3) | j; video->max_compressed_size = size; } } } aspeed_video_write(video, VE_STREAM_BUF_SIZE, compression_buffer_size_reg); v4l2_dbg(1, debug, &video->v4l2_dev, "Max compressed size: %#x\n", video->max_compressed_size); } /* * Update v4l2_bt_timings per current status. * frame_top/frame_bottom/frame_left/frame_right need to be ready. * * The following registers start counting from sync's rising edge: * 1. VR090: frame edge's left and right * 2. VR094: frame edge's top and bottom * 3. VR09C: counting from sync's rising edge to falling edge * * [Vertical timing] * +--+ +-------------------+ +--+ * | | | v i d e o | | | * +--+ +-----+ +-----+ +---+ * vsync+--+ * frame_top+--------+ * frame_bottom+----------------------------+ * * +-------------------+ * | v i d e o | * +--+ +-----+ +-----+ +---+ * | | | | * +--+ +--+ * vsync+-------------------------------+ * frame_top+-----+ * frame_bottom+-------------------------+ * * [Horizontal timing] * +--+ +-------------------+ +--+ * | | | v i d e o | | | * +--+ +-----+ +-----+ +---+ * hsync+--+ * frame_left+--------+ * frame_right+----------------------------+ * * +-------------------+ * | v i d e o | * +--+ +-----+ +-----+ +---+ * | | | | * +--+ +--+ * hsync+-------------------------------+ * frame_left+-----+ * frame_right+-------------------------+ * * @v: the struct of aspeed_video * @det: v4l2_bt_timings to be updated. */ static void aspeed_video_get_timings(struct aspeed_video *v, struct v4l2_bt_timings *det) { u32 mds, sync, htotal, vtotal, vsync, hsync; mds = aspeed_video_read(v, VE_MODE_DETECT_STATUS); sync = aspeed_video_read(v, VE_SYNC_STATUS); htotal = aspeed_video_read(v, VE_H_TOTAL_PIXELS); vtotal = FIELD_GET(VE_MODE_DETECT_V_LINES, mds); vsync = FIELD_GET(VE_SYNC_STATUS_VSYNC, sync); hsync = FIELD_GET(VE_SYNC_STATUS_HSYNC, sync); /* * This is a workaround for polarity detection. * Because ast-soc counts sync from sync's rising edge, the reg value * of sync would be larger than video's active area if negative. */ if (vsync > det->height) det->polarities &= ~V4L2_DV_VSYNC_POS_POL; else det->polarities |= V4L2_DV_VSYNC_POS_POL; if (hsync > det->width) det->polarities &= ~V4L2_DV_HSYNC_POS_POL; else det->polarities |= V4L2_DV_HSYNC_POS_POL; if (det->polarities & V4L2_DV_VSYNC_POS_POL) { det->vbackporch = v->frame_top - vsync; det->vfrontporch = vtotal - v->frame_bottom; det->vsync = vsync; } else { det->vbackporch = v->frame_top; det->vfrontporch = vsync - v->frame_bottom; det->vsync = vtotal - vsync; } if (det->polarities & V4L2_DV_HSYNC_POS_POL) { det->hbackporch = v->frame_left - hsync; det->hfrontporch = htotal - v->frame_right; det->hsync = hsync; } else { det->hbackporch = v->frame_left; det->hfrontporch = hsync - v->frame_right; det->hsync = htotal - hsync; } } #define res_check(v) test_and_clear_bit(VIDEO_MODE_DETECT_DONE, &(v)->flags) static void aspeed_video_get_resolution(struct aspeed_video *video) { bool invalid_resolution = true; int rc; int tries = 0; u32 mds; u32 src_lr_edge; u32 src_tb_edge; struct v4l2_bt_timings *det = &video->detected_timings; det->width = MIN_WIDTH; det->height = MIN_HEIGHT; video->v4l2_input_status = V4L2_IN_ST_NO_SIGNAL; memset(&video->perf, 0, sizeof(video->perf)); do { if (tries) { set_current_state(TASK_INTERRUPTIBLE); if (schedule_timeout(INVALID_RESOLUTION_DELAY)) return; } set_bit(VIDEO_RES_DETECT, &video->flags); aspeed_video_update(video, VE_CTRL, VE_CTRL_VSYNC_POL | VE_CTRL_HSYNC_POL, 0); aspeed_video_enable_mode_detect(video); rc = wait_event_interruptible_timeout(video->wait, res_check(video), MODE_DETECT_TIMEOUT); if (!rc) { v4l2_dbg(1, debug, &video->v4l2_dev, "Timed out; first mode detect\n"); clear_bit(VIDEO_RES_DETECT, &video->flags); return; } mds = aspeed_video_read(video, VE_MODE_DETECT_STATUS); // try detection again if current signal isn't stable if (!(mds & VE_MODE_DETECT_H_STABLE) || !(mds & VE_MODE_DETECT_V_STABLE) || (mds & VE_MODE_DETECT_EXTSRC_ADC)) continue; aspeed_video_check_and_set_polarity(video); aspeed_video_enable_mode_detect(video); rc = wait_event_interruptible_timeout(video->wait, res_check(video), MODE_DETECT_TIMEOUT); clear_bit(VIDEO_RES_DETECT, &video->flags); if (!rc) { v4l2_dbg(1, debug, &video->v4l2_dev, "Timed out; second mode detect\n"); return; } src_lr_edge = aspeed_video_read(video, VE_SRC_LR_EDGE_DET); src_tb_edge = aspeed_video_read(video, VE_SRC_TB_EDGE_DET); video->frame_bottom = FIELD_GET(VE_SRC_TB_EDGE_DET_BOT, src_tb_edge); video->frame_top = FIELD_GET(VE_SRC_TB_EDGE_DET_TOP, src_tb_edge); if (video->frame_top > video->frame_bottom) continue; video->frame_right = FIELD_GET(VE_SRC_LR_EDGE_DET_RT, src_lr_edge); video->frame_left = FIELD_GET(VE_SRC_LR_EDGE_DET_LEFT, src_lr_edge); if (video->frame_left > video->frame_right) continue; invalid_resolution = false; } while (invalid_resolution && (tries++ < INVALID_RESOLUTION_RETRIES)); if (invalid_resolution) { v4l2_dbg(1, debug, &video->v4l2_dev, "Invalid resolution detected\n"); return; } det->height = (video->frame_bottom - video->frame_top) + 1; det->width = (video->frame_right - video->frame_left) + 1; video->v4l2_input_status = 0; aspeed_video_get_timings(video, det); /* * Enable mode-detect watchdog, resolution-change watchdog and * automatic compression after frame capture. */ aspeed_video_update(video, VE_INTERRUPT_CTRL, 0, VE_INTERRUPT_MODE_DETECT_WD); aspeed_video_update(video, VE_SEQ_CTRL, 0, VE_SEQ_CTRL_AUTO_COMP | VE_SEQ_CTRL_EN_WATCHDOG); v4l2_dbg(1, debug, &video->v4l2_dev, "Got resolution: %dx%d\n", det->width, det->height); } static void aspeed_video_set_resolution(struct aspeed_video *video) { struct v4l2_bt_timings *act = &video->active_timings; unsigned int size = act->width * ALIGN(act->height, 8); /* Set capture/compression frame sizes */ aspeed_video_calc_compressed_size(video, size); if (!IS_ALIGNED(act->width, 64)) { /* * This is a workaround to fix a AST2500 silicon bug on A1 and * A2 revisions. Since it doesn't break capturing operation of * other revisions, use it for all revisions without checking * the revision ID. It picked new width which is a very next * 64-pixels aligned value to minimize memory bandwidth * and to get better access speed from video engine. */ u32 width = ALIGN(act->width, 64); aspeed_video_write(video, VE_CAP_WINDOW, width << 16 | act->height); size = width * ALIGN(act->height, 8); } else { aspeed_video_write(video, VE_CAP_WINDOW, act->width << 16 | act->height); } aspeed_video_write(video, VE_COMP_WINDOW, act->width << 16 | act->height); aspeed_video_write(video, VE_SRC_SCANLINE_OFFSET, act->width * 4); /* Don't use direct mode below 1024 x 768 (irqs don't fire) */ if (size < DIRECT_FETCH_THRESHOLD) { v4l2_dbg(1, debug, &video->v4l2_dev, "Capture: Sync Mode\n"); aspeed_video_write(video, VE_TGS_0, FIELD_PREP(VE_TGS_FIRST, video->frame_left - 1) | FIELD_PREP(VE_TGS_LAST, video->frame_right)); aspeed_video_write(video, VE_TGS_1, FIELD_PREP(VE_TGS_FIRST, video->frame_top) | FIELD_PREP(VE_TGS_LAST, video->frame_bottom + 1)); aspeed_video_update(video, VE_CTRL, VE_CTRL_INT_DE | VE_CTRL_DIRECT_FETCH, VE_CTRL_INT_DE); } else { v4l2_dbg(1, debug, &video->v4l2_dev, "Capture: Direct Mode\n"); aspeed_video_update(video, VE_CTRL, VE_CTRL_INT_DE | VE_CTRL_DIRECT_FETCH, VE_CTRL_DIRECT_FETCH); } size *= 4; if (size != video->srcs[0].size) { if (video->srcs[0].size) aspeed_video_free_buf(video, &video->srcs[0]); if (video->srcs[1].size) aspeed_video_free_buf(video, &video->srcs[1]); if (!aspeed_video_alloc_buf(video, &video->srcs[0], size)) goto err_mem; if (!aspeed_video_alloc_buf(video, &video->srcs[1], size)) goto err_mem; v4l2_dbg(1, debug, &video->v4l2_dev, "src buf0 addr(%pad) size(%d)\n", &video->srcs[0].dma, video->srcs[0].size); v4l2_dbg(1, debug, &video->v4l2_dev, "src buf1 addr(%pad) size(%d)\n", &video->srcs[1].dma, video->srcs[1].size); aspeed_video_write(video, VE_SRC0_ADDR, video->srcs[0].dma); aspeed_video_write(video, VE_SRC1_ADDR, video->srcs[1].dma); } return; err_mem: dev_err(video->dev, "Failed to allocate source buffers\n"); if (video->srcs[0].size) aspeed_video_free_buf(video, &video->srcs[0]); } static void aspeed_video_update_regs(struct aspeed_video *video) { u8 jpeg_hq_quality = clamp((int)video->jpeg_hq_quality - 1, 0, ASPEED_VIDEO_JPEG_NUM_QUALITIES - 1); u32 comp_ctrl = FIELD_PREP(VE_COMP_CTRL_DCT_LUM, video->jpeg_quality) | FIELD_PREP(VE_COMP_CTRL_DCT_CHR, video->jpeg_quality | 0x10) | FIELD_PREP(VE_COMP_CTRL_EN_HQ, video->hq_mode) | FIELD_PREP(VE_COMP_CTRL_HQ_DCT_LUM, jpeg_hq_quality) | FIELD_PREP(VE_COMP_CTRL_HQ_DCT_CHR, jpeg_hq_quality | 0x10); u32 ctrl = 0; u32 seq_ctrl = 0; v4l2_dbg(1, debug, &video->v4l2_dev, "framerate(%d)\n", video->frame_rate); v4l2_dbg(1, debug, &video->v4l2_dev, "jpeg format(%s) subsample(%s)\n", format_str[video->format], video->yuv420 ? "420" : "444"); v4l2_dbg(1, debug, &video->v4l2_dev, "compression quality(%d)\n", video->jpeg_quality); v4l2_dbg(1, debug, &video->v4l2_dev, "hq_mode(%s) hq_quality(%d)\n", video->hq_mode ? "on" : "off", video->jpeg_hq_quality); if (video->format == VIDEO_FMT_ASPEED) aspeed_video_update(video, VE_BCD_CTRL, 0, VE_BCD_CTRL_EN_BCD); else aspeed_video_update(video, VE_BCD_CTRL, VE_BCD_CTRL_EN_BCD, 0); if (video->frame_rate) ctrl |= FIELD_PREP(VE_CTRL_FRC, video->frame_rate); if (video->format == VIDEO_FMT_STANDARD) { comp_ctrl &= ~FIELD_PREP(VE_COMP_CTRL_EN_HQ, video->hq_mode); seq_ctrl |= video->jpeg_mode; } if (video->yuv420) seq_ctrl |= VE_SEQ_CTRL_YUV420; if (video->jpeg.virt) aspeed_video_update_jpeg_table(video->jpeg.virt, video->yuv420); /* Set control registers */ aspeed_video_update(video, VE_SEQ_CTRL, video->jpeg_mode | VE_SEQ_CTRL_YUV420, seq_ctrl); aspeed_video_update(video, VE_CTRL, VE_CTRL_FRC, ctrl); aspeed_video_update(video, VE_COMP_CTRL, VE_COMP_CTRL_DCT_LUM | VE_COMP_CTRL_DCT_CHR | VE_COMP_CTRL_EN_HQ | VE_COMP_CTRL_HQ_DCT_LUM | VE_COMP_CTRL_HQ_DCT_CHR | VE_COMP_CTRL_VQ_4COLOR | VE_COMP_CTRL_VQ_DCT_ONLY, comp_ctrl); } static void aspeed_video_init_regs(struct aspeed_video *video) { u32 ctrl = VE_CTRL_AUTO_OR_CURSOR | FIELD_PREP(VE_CTRL_CAPTURE_FMT, VIDEO_CAP_FMT_YUV_FULL_SWING); /* Unlock VE registers */ aspeed_video_write(video, VE_PROTECTION_KEY, VE_PROTECTION_KEY_UNLOCK); /* Disable interrupts */ aspeed_video_write(video, VE_INTERRUPT_CTRL, 0); aspeed_video_write(video, VE_INTERRUPT_STATUS, 0xffffffff); /* Clear the offset */ aspeed_video_write(video, VE_COMP_PROC_OFFSET, 0); aspeed_video_write(video, VE_COMP_OFFSET, 0); aspeed_video_write(video, VE_JPEG_ADDR, video->jpeg.dma); /* Set control registers */ aspeed_video_write(video, VE_CTRL, ctrl); aspeed_video_write(video, VE_COMP_CTRL, VE_COMP_CTRL_RSVD); /* Don't downscale */ aspeed_video_write(video, VE_SCALING_FACTOR, 0x10001000); aspeed_video_write(video, VE_SCALING_FILTER0, 0x00200000); aspeed_video_write(video, VE_SCALING_FILTER1, 0x00200000); aspeed_video_write(video, VE_SCALING_FILTER2, 0x00200000); aspeed_video_write(video, VE_SCALING_FILTER3, 0x00200000); /* Set mode detection defaults */ aspeed_video_write(video, VE_MODE_DETECT, FIELD_PREP(VE_MODE_DT_HOR_TOLER, 2) | FIELD_PREP(VE_MODE_DT_VER_TOLER, 2) | FIELD_PREP(VE_MODE_DT_HOR_STABLE, 6) | FIELD_PREP(VE_MODE_DT_VER_STABLE, 6) | FIELD_PREP(VE_MODE_DT_EDG_THROD, 0x65)); aspeed_video_write(video, VE_BCD_CTRL, 0); } static void aspeed_video_start(struct aspeed_video *video) { aspeed_video_on(video); aspeed_video_init_regs(video); /* Resolution set to 640x480 if no signal found */ aspeed_video_get_resolution(video); /* Set timings since the device is being opened for the first time */ video->active_timings = video->detected_timings; aspeed_video_set_resolution(video); video->pix_fmt.width = video->active_timings.width; video->pix_fmt.height = video->active_timings.height; video->pix_fmt.sizeimage = video->max_compressed_size; } static void aspeed_video_stop(struct aspeed_video *video) { set_bit(VIDEO_STOPPED, &video->flags); cancel_delayed_work_sync(&video->res_work); aspeed_video_off(video); if (video->srcs[0].size) aspeed_video_free_buf(video, &video->srcs[0]); if (video->srcs[1].size) aspeed_video_free_buf(video, &video->srcs[1]); if (video->bcd.size) aspeed_video_free_buf(video, &video->bcd); video->v4l2_input_status = V4L2_IN_ST_NO_SIGNAL; video->flags = 0; } static int aspeed_video_querycap(struct file *file, void *fh, struct v4l2_capability *cap) { strscpy(cap->driver, DEVICE_NAME, sizeof(cap->driver)); strscpy(cap->card, "Aspeed Video Engine", sizeof(cap->card)); snprintf(cap->bus_info, sizeof(cap->bus_info), "platform:%s", DEVICE_NAME); return 0; } static int aspeed_video_enum_format(struct file *file, void *fh, struct v4l2_fmtdesc *f) { struct aspeed_video *video = video_drvdata(file); if (f->index) return -EINVAL; f->pixelformat = video->pix_fmt.pixelformat; return 0; } static int aspeed_video_get_format(struct file *file, void *fh, struct v4l2_format *f) { struct aspeed_video *video = video_drvdata(file); f->fmt.pix = video->pix_fmt; return 0; } static int aspeed_video_set_format(struct file *file, void *fh, struct v4l2_format *f) { struct aspeed_video *video = video_drvdata(file); if (vb2_is_busy(&video->queue)) return -EBUSY; switch (f->fmt.pix.pixelformat) { case V4L2_PIX_FMT_JPEG: video->format = VIDEO_FMT_STANDARD; break; case V4L2_PIX_FMT_AJPG: video->format = VIDEO_FMT_ASPEED; break; default: return -EINVAL; } video->pix_fmt.pixelformat = f->fmt.pix.pixelformat; return 0; } static int aspeed_video_enum_input(struct file *file, void *fh, struct v4l2_input *inp) { struct aspeed_video *video = video_drvdata(file); if (inp->index) return -EINVAL; strscpy(inp->name, "Host VGA capture", sizeof(inp->name)); inp->type = V4L2_INPUT_TYPE_CAMERA; inp->capabilities = V4L2_IN_CAP_DV_TIMINGS; inp->status = video->v4l2_input_status; return 0; } static int aspeed_video_get_input(struct file *file, void *fh, unsigned int *i) { *i = 0; return 0; } static int aspeed_video_set_input(struct file *file, void *fh, unsigned int i) { if (i) return -EINVAL; return 0; } static int aspeed_video_get_parm(struct file *file, void *fh, struct v4l2_streamparm *a) { struct aspeed_video *video = video_drvdata(file); a->parm.capture.capability = V4L2_CAP_TIMEPERFRAME; a->parm.capture.readbuffers = ASPEED_VIDEO_V4L2_MIN_BUF_REQ; a->parm.capture.timeperframe.numerator = 1; if (!video->frame_rate) a->parm.capture.timeperframe.denominator = MAX_FRAME_RATE; else a->parm.capture.timeperframe.denominator = video->frame_rate; return 0; } static int aspeed_video_set_parm(struct file *file, void *fh, struct v4l2_streamparm *a) { unsigned int frame_rate = 0; struct aspeed_video *video = video_drvdata(file); a->parm.capture.capability = V4L2_CAP_TIMEPERFRAME; a->parm.capture.readbuffers = ASPEED_VIDEO_V4L2_MIN_BUF_REQ; if (a->parm.capture.timeperframe.numerator) frame_rate = a->parm.capture.timeperframe.denominator / a->parm.capture.timeperframe.numerator; if (!frame_rate || frame_rate > MAX_FRAME_RATE) { frame_rate = 0; a->parm.capture.timeperframe.denominator = MAX_FRAME_RATE; a->parm.capture.timeperframe.numerator = 1; } if (video->frame_rate != frame_rate) { video->frame_rate = frame_rate; aspeed_video_update(video, VE_CTRL, VE_CTRL_FRC, FIELD_PREP(VE_CTRL_FRC, frame_rate)); } return 0; } static int aspeed_video_enum_framesizes(struct file *file, void *fh, struct v4l2_frmsizeenum *fsize) { struct aspeed_video *video = video_drvdata(file); if (fsize->index) return -EINVAL; if (fsize->pixel_format != V4L2_PIX_FMT_JPEG) return -EINVAL; fsize->discrete.width = video->pix_fmt.width; fsize->discrete.height = video->pix_fmt.height; fsize->type = V4L2_FRMSIZE_TYPE_DISCRETE; return 0; } static int aspeed_video_enum_frameintervals(struct file *file, void *fh, struct v4l2_frmivalenum *fival) { struct aspeed_video *video = video_drvdata(file); if (fival->index) return -EINVAL; if (fival->width != video->detected_timings.width || fival->height != video->detected_timings.height) return -EINVAL; if (fival->pixel_format != V4L2_PIX_FMT_JPEG) return -EINVAL; fival->type = V4L2_FRMIVAL_TYPE_CONTINUOUS; fival->stepwise.min.denominator = MAX_FRAME_RATE; fival->stepwise.min.numerator = 1; fival->stepwise.max.denominator = 1; fival->stepwise.max.numerator = 1; fival->stepwise.step = fival->stepwise.max; return 0; } static int aspeed_video_set_dv_timings(struct file *file, void *fh, struct v4l2_dv_timings *timings) { struct aspeed_video *video = video_drvdata(file); if (timings->bt.width == video->active_timings.width && timings->bt.height == video->active_timings.height) return 0; if (vb2_is_busy(&video->queue)) return -EBUSY; video->active_timings = timings->bt; aspeed_video_set_resolution(video); video->pix_fmt.width = timings->bt.width; video->pix_fmt.height = timings->bt.height; video->pix_fmt.sizeimage = video->max_compressed_size; timings->type = V4L2_DV_BT_656_1120; v4l2_dbg(1, debug, &video->v4l2_dev, "set new timings(%dx%d)\n", timings->bt.width, timings->bt.height); return 0; } static int aspeed_video_get_dv_timings(struct file *file, void *fh, struct v4l2_dv_timings *timings) { struct aspeed_video *video = video_drvdata(file); timings->type = V4L2_DV_BT_656_1120; timings->bt = video->active_timings; return 0; } static int aspeed_video_query_dv_timings(struct file *file, void *fh, struct v4l2_dv_timings *timings) { int rc; struct aspeed_video *video = video_drvdata(file); /* * This blocks only if the driver is currently in the process of * detecting a new resolution; in the event of no signal or timeout * this function is woken up. */ if (file->f_flags & O_NONBLOCK) { if (test_bit(VIDEO_RES_CHANGE, &video->flags)) return -EAGAIN; } else { rc = wait_event_interruptible(video->wait, !test_bit(VIDEO_RES_CHANGE, &video->flags)); if (rc) return -EINTR; } timings->type = V4L2_DV_BT_656_1120; timings->bt = video->detected_timings; return video->v4l2_input_status ? -ENOLINK : 0; } static int aspeed_video_enum_dv_timings(struct file *file, void *fh, struct v4l2_enum_dv_timings *timings) { return v4l2_enum_dv_timings_cap(timings, &aspeed_video_timings_cap, NULL, NULL); } static int aspeed_video_dv_timings_cap(struct file *file, void *fh, struct v4l2_dv_timings_cap *cap) { *cap = aspeed_video_timings_cap; return 0; } static int aspeed_video_sub_event(struct v4l2_fh *fh, const struct v4l2_event_subscription *sub) { switch (sub->type) { case V4L2_EVENT_SOURCE_CHANGE: return v4l2_src_change_event_subscribe(fh, sub); } return v4l2_ctrl_subscribe_event(fh, sub); } static const struct v4l2_ioctl_ops aspeed_video_ioctl_ops = { .vidioc_querycap = aspeed_video_querycap, .vidioc_enum_fmt_vid_cap = aspeed_video_enum_format, .vidioc_g_fmt_vid_cap = aspeed_video_get_format, .vidioc_s_fmt_vid_cap = aspeed_video_set_format, .vidioc_try_fmt_vid_cap = aspeed_video_get_format, .vidioc_reqbufs = vb2_ioctl_reqbufs, .vidioc_querybuf = vb2_ioctl_querybuf, .vidioc_qbuf = vb2_ioctl_qbuf, .vidioc_expbuf = vb2_ioctl_expbuf, .vidioc_dqbuf = vb2_ioctl_dqbuf, .vidioc_create_bufs = vb2_ioctl_create_bufs, .vidioc_prepare_buf = vb2_ioctl_prepare_buf, .vidioc_streamon = vb2_ioctl_streamon, .vidioc_streamoff = vb2_ioctl_streamoff, .vidioc_enum_input = aspeed_video_enum_input, .vidioc_g_input = aspeed_video_get_input, .vidioc_s_input = aspeed_video_set_input, .vidioc_g_parm = aspeed_video_get_parm, .vidioc_s_parm = aspeed_video_set_parm, .vidioc_enum_framesizes = aspeed_video_enum_framesizes, .vidioc_enum_frameintervals = aspeed_video_enum_frameintervals, .vidioc_s_dv_timings = aspeed_video_set_dv_timings, .vidioc_g_dv_timings = aspeed_video_get_dv_timings, .vidioc_query_dv_timings = aspeed_video_query_dv_timings, .vidioc_enum_dv_timings = aspeed_video_enum_dv_timings, .vidioc_dv_timings_cap = aspeed_video_dv_timings_cap, .vidioc_subscribe_event = aspeed_video_sub_event, .vidioc_unsubscribe_event = v4l2_event_unsubscribe, }; static int aspeed_video_set_ctrl(struct v4l2_ctrl *ctrl) { struct aspeed_video *video = container_of(ctrl->handler, struct aspeed_video, ctrl_handler); switch (ctrl->id) { case V4L2_CID_JPEG_COMPRESSION_QUALITY: video->jpeg_quality = ctrl->val; if (test_bit(VIDEO_STREAMING, &video->flags)) aspeed_video_update_regs(video); break; case V4L2_CID_JPEG_CHROMA_SUBSAMPLING: video->yuv420 = (ctrl->val == V4L2_JPEG_CHROMA_SUBSAMPLING_420); if (test_bit(VIDEO_STREAMING, &video->flags)) aspeed_video_update_regs(video); break; case V4L2_CID_ASPEED_HQ_MODE: video->hq_mode = ctrl->val; if (test_bit(VIDEO_STREAMING, &video->flags)) aspeed_video_update_regs(video); break; case V4L2_CID_ASPEED_HQ_JPEG_QUALITY: video->jpeg_hq_quality = ctrl->val; if (test_bit(VIDEO_STREAMING, &video->flags)) aspeed_video_update_regs(video); break; default: return -EINVAL; } return 0; } static const struct v4l2_ctrl_ops aspeed_video_ctrl_ops = { .s_ctrl = aspeed_video_set_ctrl, }; static const struct v4l2_ctrl_config aspeed_ctrl_HQ_mode = { .ops = &aspeed_video_ctrl_ops, .id = V4L2_CID_ASPEED_HQ_MODE, .name = "Aspeed HQ Mode", .type = V4L2_CTRL_TYPE_BOOLEAN, .min = false, .max = true, .step = 1, .def = false, }; static const struct v4l2_ctrl_config aspeed_ctrl_HQ_jpeg_quality = { .ops = &aspeed_video_ctrl_ops, .id = V4L2_CID_ASPEED_HQ_JPEG_QUALITY, .name = "Aspeed HQ Quality", .type = V4L2_CTRL_TYPE_INTEGER, .min = 1, .max = ASPEED_VIDEO_JPEG_NUM_QUALITIES, .step = 1, .def = 1, }; static void aspeed_video_resolution_work(struct work_struct *work) { struct delayed_work *dwork = to_delayed_work(work); struct aspeed_video *video = container_of(dwork, struct aspeed_video, res_work); aspeed_video_on(video); /* Exit early in case no clients remain */ if (test_bit(VIDEO_STOPPED, &video->flags)) goto done; aspeed_video_init_regs(video); aspeed_video_update_regs(video); aspeed_video_get_resolution(video); if (video->detected_timings.width != video->active_timings.width || video->detected_timings.height != video->active_timings.height) { static const struct v4l2_event ev = { .type = V4L2_EVENT_SOURCE_CHANGE, .u.src_change.changes = V4L2_EVENT_SRC_CH_RESOLUTION, }; v4l2_dbg(1, debug, &video->v4l2_dev, "fire source change event\n"); v4l2_event_queue(&video->vdev, &ev); } else if (test_bit(VIDEO_STREAMING, &video->flags)) { /* No resolution change so just restart streaming */ aspeed_video_start_frame(video); } done: clear_bit(VIDEO_RES_CHANGE, &video->flags); wake_up_interruptible_all(&video->wait); } static int aspeed_video_open(struct file *file) { int rc; struct aspeed_video *video = video_drvdata(file); mutex_lock(&video->video_lock); rc = v4l2_fh_open(file); if (rc) { mutex_unlock(&video->video_lock); return rc; } if (v4l2_fh_is_singular_file(file)) aspeed_video_start(video); mutex_unlock(&video->video_lock); return 0; } static int aspeed_video_release(struct file *file) { int rc; struct aspeed_video *video = video_drvdata(file); mutex_lock(&video->video_lock); if (v4l2_fh_is_singular_file(file)) aspeed_video_stop(video); rc = _vb2_fop_release(file, NULL); mutex_unlock(&video->video_lock); return rc; } static const struct v4l2_file_operations aspeed_video_v4l2_fops = { .owner = THIS_MODULE, .read = vb2_fop_read, .poll = vb2_fop_poll, .unlocked_ioctl = video_ioctl2, .mmap = vb2_fop_mmap, .open = aspeed_video_open, .release = aspeed_video_release, }; static int aspeed_video_queue_setup(struct vb2_queue *q, unsigned int *num_buffers, unsigned int *num_planes, unsigned int sizes[], struct device *alloc_devs[]) { struct aspeed_video *video = vb2_get_drv_priv(q); if (*num_planes) { if (sizes[0] < video->max_compressed_size) return -EINVAL; return 0; } *num_planes = 1; sizes[0] = video->max_compressed_size; return 0; } static int aspeed_video_buf_prepare(struct vb2_buffer *vb) { struct aspeed_video *video = vb2_get_drv_priv(vb->vb2_queue); if (vb2_plane_size(vb, 0) < video->max_compressed_size) return -EINVAL; return 0; } static int aspeed_video_start_streaming(struct vb2_queue *q, unsigned int count) { int rc; struct aspeed_video *video = vb2_get_drv_priv(q); video->sequence = 0; video->perf.duration_max = 0; video->perf.duration_min = 0xffffffff; aspeed_video_update_regs(video); rc = aspeed_video_start_frame(video); if (rc) { aspeed_video_bufs_done(video, VB2_BUF_STATE_QUEUED); return rc; } set_bit(VIDEO_STREAMING, &video->flags); return 0; } static void aspeed_video_stop_streaming(struct vb2_queue *q) { int rc; struct aspeed_video *video = vb2_get_drv_priv(q); clear_bit(VIDEO_STREAMING, &video->flags); rc = wait_event_timeout(video->wait, !test_bit(VIDEO_FRAME_INPRG, &video->flags), STOP_TIMEOUT); if (!rc) { v4l2_dbg(1, debug, &video->v4l2_dev, "Timed out when stopping streaming\n"); /* * Need to force stop any DMA and try and get HW into a good * state for future calls to start streaming again. */ aspeed_video_off(video); aspeed_video_on(video); aspeed_video_init_regs(video); aspeed_video_get_resolution(video); } aspeed_video_bufs_done(video, VB2_BUF_STATE_ERROR); } static void aspeed_video_buf_queue(struct vb2_buffer *vb) { bool empty; struct aspeed_video *video = vb2_get_drv_priv(vb->vb2_queue); struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb); struct aspeed_video_buffer *avb = to_aspeed_video_buffer(vbuf); unsigned long flags; spin_lock_irqsave(&video->lock, flags); empty = list_empty(&video->buffers); list_add_tail(&avb->link, &video->buffers); spin_unlock_irqrestore(&video->lock, flags); if (test_bit(VIDEO_STREAMING, &video->flags) && !test_bit(VIDEO_FRAME_INPRG, &video->flags) && empty) aspeed_video_start_frame(video); } static const struct vb2_ops aspeed_video_vb2_ops = { .queue_setup = aspeed_video_queue_setup, .wait_prepare = vb2_ops_wait_prepare, .wait_finish = vb2_ops_wait_finish, .buf_prepare = aspeed_video_buf_prepare, .start_streaming = aspeed_video_start_streaming, .stop_streaming = aspeed_video_stop_streaming, .buf_queue = aspeed_video_buf_queue, }; #ifdef CONFIG_DEBUG_FS static int aspeed_video_debugfs_show(struct seq_file *s, void *data) { struct aspeed_video *v = s->private; u32 val08; seq_puts(s, "\n"); seq_puts(s, "Capture:\n"); val08 = aspeed_video_read(v, VE_CTRL); if (FIELD_GET(VE_CTRL_DIRECT_FETCH, val08)) { seq_printf(s, " %-20s:\tDirect fetch\n", "Mode"); seq_printf(s, " %-20s:\t%s\n", "VGA bpp mode", FIELD_GET(VE_CTRL_INT_DE, val08) ? "16" : "32"); } else { seq_printf(s, " %-20s:\tSync\n", "Mode"); seq_printf(s, " %-20s:\t%s\n", "Video source", FIELD_GET(VE_CTRL_SOURCE, val08) ? "external" : "internal"); seq_printf(s, " %-20s:\t%s\n", "DE source", FIELD_GET(VE_CTRL_INT_DE, val08) ? "internal" : "external"); seq_printf(s, " %-20s:\t%s\n", "Cursor overlay", FIELD_GET(VE_CTRL_AUTO_OR_CURSOR, val08) ? "Without" : "With"); } seq_printf(s, " %-20s:\t%s\n", "Signal", v->v4l2_input_status ? "Unlock" : "Lock"); seq_printf(s, " %-20s:\t%d\n", "Width", v->pix_fmt.width); seq_printf(s, " %-20s:\t%d\n", "Height", v->pix_fmt.height); seq_printf(s, " %-20s:\t%d\n", "FRC", v->frame_rate); seq_puts(s, "\n"); seq_puts(s, "Compression:\n"); seq_printf(s, " %-20s:\t%s\n", "Format", format_str[v->format]); seq_printf(s, " %-20s:\t%s\n", "Subsampling", v->yuv420 ? "420" : "444"); seq_printf(s, " %-20s:\t%d\n", "Quality", v->jpeg_quality); if (v->format == VIDEO_FMT_ASPEED) { seq_printf(s, " %-20s:\t%s\n", "HQ Mode", v->hq_mode ? "on" : "off"); seq_printf(s, " %-20s:\t%d\n", "HQ Quality", v->hq_mode ? v->jpeg_hq_quality : 0); } seq_puts(s, "\n"); seq_puts(s, "Performance:\n"); seq_printf(s, " %-20s:\t%d\n", "Frame#", v->sequence); seq_printf(s, " %-20s:\n", "Frame Duration(ms)"); seq_printf(s, " %-18s:\t%d\n", "Now", v->perf.duration); seq_printf(s, " %-18s:\t%d\n", "Min", v->perf.duration_min); seq_printf(s, " %-18s:\t%d\n", "Max", v->perf.duration_max); seq_printf(s, " %-20s:\t%d\n", "FPS", (v->perf.totaltime && v->sequence) ? 1000 / (v->perf.totaltime / v->sequence) : 0); return 0; } DEFINE_SHOW_ATTRIBUTE(aspeed_video_debugfs); static struct dentry *debugfs_entry; static void aspeed_video_debugfs_remove(struct aspeed_video *video) { debugfs_remove_recursive(debugfs_entry); debugfs_entry = NULL; } static int aspeed_video_debugfs_create(struct aspeed_video *video) { debugfs_entry = debugfs_create_file(DEVICE_NAME, 0444, NULL, video, &aspeed_video_debugfs_fops); if (!debugfs_entry) aspeed_video_debugfs_remove(video); return !debugfs_entry ? -EIO : 0; } #else static void aspeed_video_debugfs_remove(struct aspeed_video *video) { } static int aspeed_video_debugfs_create(struct aspeed_video *video) { return 0; } #endif /* CONFIG_DEBUG_FS */ static int aspeed_video_setup_video(struct aspeed_video *video) { const u64 mask = ~(BIT(V4L2_JPEG_CHROMA_SUBSAMPLING_444) | BIT(V4L2_JPEG_CHROMA_SUBSAMPLING_420)); struct v4l2_device *v4l2_dev = &video->v4l2_dev; struct vb2_queue *vbq = &video->queue; struct video_device *vdev = &video->vdev; struct v4l2_ctrl_handler *hdl = &video->ctrl_handler; int rc; video->pix_fmt.pixelformat = V4L2_PIX_FMT_JPEG; video->pix_fmt.field = V4L2_FIELD_NONE; video->pix_fmt.colorspace = V4L2_COLORSPACE_SRGB; video->pix_fmt.quantization = V4L2_QUANTIZATION_FULL_RANGE; video->v4l2_input_status = V4L2_IN_ST_NO_SIGNAL; rc = v4l2_device_register(video->dev, v4l2_dev); if (rc) { dev_err(video->dev, "Failed to register v4l2 device\n"); return rc; } v4l2_ctrl_handler_init(hdl, 4); v4l2_ctrl_new_std(hdl, &aspeed_video_ctrl_ops, V4L2_CID_JPEG_COMPRESSION_QUALITY, 0, ASPEED_VIDEO_JPEG_NUM_QUALITIES - 1, 1, 0); v4l2_ctrl_new_std_menu(hdl, &aspeed_video_ctrl_ops, V4L2_CID_JPEG_CHROMA_SUBSAMPLING, V4L2_JPEG_CHROMA_SUBSAMPLING_420, mask, V4L2_JPEG_CHROMA_SUBSAMPLING_444); v4l2_ctrl_new_custom(hdl, &aspeed_ctrl_HQ_mode, NULL); v4l2_ctrl_new_custom(hdl, &aspeed_ctrl_HQ_jpeg_quality, NULL); rc = hdl->error; if (rc) { v4l2_ctrl_handler_free(&video->ctrl_handler); v4l2_device_unregister(v4l2_dev); dev_err(video->dev, "Failed to init controls: %d\n", rc); return rc; } v4l2_dev->ctrl_handler = hdl; vbq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE; vbq->io_modes = VB2_MMAP | VB2_READ | VB2_DMABUF; vbq->dev = v4l2_dev->dev; vbq->lock = &video->video_lock; vbq->ops = &aspeed_video_vb2_ops; vbq->mem_ops = &vb2_dma_contig_memops; vbq->drv_priv = video; vbq->buf_struct_size = sizeof(struct aspeed_video_buffer); vbq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC; vbq->min_buffers_needed = ASPEED_VIDEO_V4L2_MIN_BUF_REQ; rc = vb2_queue_init(vbq); if (rc) { v4l2_ctrl_handler_free(&video->ctrl_handler); v4l2_device_unregister(v4l2_dev); dev_err(video->dev, "Failed to init vb2 queue\n"); return rc; } vdev->queue = vbq; vdev->fops = &aspeed_video_v4l2_fops; vdev->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_READWRITE | V4L2_CAP_STREAMING; vdev->v4l2_dev = v4l2_dev; strscpy(vdev->name, DEVICE_NAME, sizeof(vdev->name)); vdev->vfl_type = VFL_TYPE_VIDEO; vdev->vfl_dir = VFL_DIR_RX; vdev->release = video_device_release_empty; vdev->ioctl_ops = &aspeed_video_ioctl_ops; vdev->lock = &video->video_lock; video_set_drvdata(vdev, video); rc = video_register_device(vdev, VFL_TYPE_VIDEO, 0); if (rc) { v4l2_ctrl_handler_free(&video->ctrl_handler); v4l2_device_unregister(v4l2_dev); dev_err(video->dev, "Failed to register video device\n"); return rc; } return 0; } static int aspeed_video_init(struct aspeed_video *video) { int irq; int rc; struct device *dev = video->dev; irq = irq_of_parse_and_map(dev->of_node, 0); if (!irq) { dev_err(dev, "Unable to find IRQ\n"); return -ENODEV; } rc = devm_request_threaded_irq(dev, irq, NULL, aspeed_video_irq, IRQF_ONESHOT, DEVICE_NAME, video); if (rc < 0) { dev_err(dev, "Unable to request IRQ %d\n", irq); return rc; } dev_info(video->dev, "irq %d\n", irq); video->eclk = devm_clk_get(dev, "eclk"); if (IS_ERR(video->eclk)) { dev_err(dev, "Unable to get ECLK\n"); return PTR_ERR(video->eclk); } rc = clk_prepare(video->eclk); if (rc) return rc; video->vclk = devm_clk_get(dev, "vclk"); if (IS_ERR(video->vclk)) { dev_err(dev, "Unable to get VCLK\n"); rc = PTR_ERR(video->vclk); goto err_unprepare_eclk; } rc = clk_prepare(video->vclk); if (rc) goto err_unprepare_eclk; of_reserved_mem_device_init(dev); rc = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32)); if (rc) { dev_err(dev, "Failed to set DMA mask\n"); goto err_release_reserved_mem; } if (!aspeed_video_alloc_buf(video, &video->jpeg, VE_JPEG_HEADER_SIZE)) { dev_err(dev, "Failed to allocate DMA for JPEG header\n"); rc = -ENOMEM; goto err_release_reserved_mem; } dev_info(video->dev, "alloc mem size(%d) at %pad for jpeg header\n", VE_JPEG_HEADER_SIZE, &video->jpeg.dma); aspeed_video_init_jpeg_table(video->jpeg.virt, video->yuv420); return 0; err_release_reserved_mem: of_reserved_mem_device_release(dev); clk_unprepare(video->vclk); err_unprepare_eclk: clk_unprepare(video->eclk); return rc; } static const struct of_device_id aspeed_video_of_match[] = { { .compatible = "aspeed,ast2400-video-engine", .data = &ast2400_config }, { .compatible = "aspeed,ast2500-video-engine", .data = &ast2500_config }, { .compatible = "aspeed,ast2600-video-engine", .data = &ast2600_config }, {} }; MODULE_DEVICE_TABLE(of, aspeed_video_of_match); static int aspeed_video_probe(struct platform_device *pdev) { const struct aspeed_video_config *config; struct aspeed_video *video; int rc; video = devm_kzalloc(&pdev->dev, sizeof(*video), GFP_KERNEL); if (!video) return -ENOMEM; video->base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(video->base)) return PTR_ERR(video->base); config = of_device_get_match_data(&pdev->dev); if (!config) return -ENODEV; video->jpeg_mode = config->jpeg_mode; video->comp_size_read = config->comp_size_read; video->frame_rate = 30; video->jpeg_hq_quality = 1; video->dev = &pdev->dev; spin_lock_init(&video->lock); mutex_init(&video->video_lock); init_waitqueue_head(&video->wait); INIT_DELAYED_WORK(&video->res_work, aspeed_video_resolution_work); INIT_LIST_HEAD(&video->buffers); rc = aspeed_video_init(video); if (rc) return rc; rc = aspeed_video_setup_video(video); if (rc) { aspeed_video_free_buf(video, &video->jpeg); clk_unprepare(video->vclk); clk_unprepare(video->eclk); return rc; } rc = aspeed_video_debugfs_create(video); if (rc) dev_err(video->dev, "debugfs create failed\n"); return 0; } static void aspeed_video_remove(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct v4l2_device *v4l2_dev = dev_get_drvdata(dev); struct aspeed_video *video = to_aspeed_video(v4l2_dev); aspeed_video_off(video); aspeed_video_debugfs_remove(video); clk_unprepare(video->vclk); clk_unprepare(video->eclk); vb2_video_unregister_device(&video->vdev); v4l2_ctrl_handler_free(&video->ctrl_handler); v4l2_device_unregister(v4l2_dev); aspeed_video_free_buf(video, &video->jpeg); of_reserved_mem_device_release(dev); } static struct platform_driver aspeed_video_driver = { .driver = { .name = DEVICE_NAME, .of_match_table = aspeed_video_of_match, }, .probe = aspeed_video_probe, .remove_new = aspeed_video_remove, }; module_platform_driver(aspeed_video_driver); module_param(debug, int, 0644); MODULE_PARM_DESC(debug, "Debug level (0=off,1=info,2=debug,3=reg ops)"); MODULE_DESCRIPTION("ASPEED Video Engine Driver"); MODULE_AUTHOR("Eddie James"); MODULE_LICENSE("GPL v2");
linux-master
drivers/media/platform/aspeed/aspeed-video.c
// SPDX-License-Identifier: GPL-2.0+ /* * Driver for Cadence MIPI-CSI2 TX Controller * * Copyright (C) 2017-2019 Cadence Design Systems Inc. */ #include <linux/clk.h> #include <linux/delay.h> #include <linux/io.h> #include <linux/module.h> #include <linux/mutex.h> #include <linux/of.h> #include <linux/of_graph.h> #include <linux/platform_device.h> #include <linux/slab.h> #include <media/mipi-csi2.h> #include <media/v4l2-ctrls.h> #include <media/v4l2-device.h> #include <media/v4l2-fwnode.h> #include <media/v4l2-subdev.h> #define CSI2TX_DEVICE_CONFIG_REG 0x00 #define CSI2TX_DEVICE_CONFIG_STREAMS_MASK GENMASK(6, 4) #define CSI2TX_DEVICE_CONFIG_HAS_DPHY BIT(3) #define CSI2TX_DEVICE_CONFIG_LANES_MASK GENMASK(2, 0) #define CSI2TX_CONFIG_REG 0x20 #define CSI2TX_CONFIG_CFG_REQ BIT(2) #define CSI2TX_CONFIG_SRST_REQ BIT(1) #define CSI2TX_DPHY_CFG_REG 0x28 #define CSI2TX_DPHY_CFG_CLK_RESET BIT(16) #define CSI2TX_DPHY_CFG_LANE_RESET(n) BIT((n) + 12) #define CSI2TX_DPHY_CFG_MODE_MASK GENMASK(9, 8) #define CSI2TX_DPHY_CFG_MODE_LPDT (2 << 8) #define CSI2TX_DPHY_CFG_MODE_HS (1 << 8) #define CSI2TX_DPHY_CFG_MODE_ULPS (0 << 8) #define CSI2TX_DPHY_CFG_CLK_ENABLE BIT(4) #define CSI2TX_DPHY_CFG_LANE_ENABLE(n) BIT(n) #define CSI2TX_DPHY_CLK_WAKEUP_REG 0x2c #define CSI2TX_DPHY_CLK_WAKEUP_ULPS_CYCLES(n) ((n) & 0xffff) #define CSI2TX_DT_CFG_REG(n) (0x80 + (n) * 8) #define CSI2TX_DT_CFG_DT(n) (((n) & 0x3f) << 2) #define CSI2TX_DT_FORMAT_REG(n) (0x84 + (n) * 8) #define CSI2TX_DT_FORMAT_BYTES_PER_LINE(n) (((n) & 0xffff) << 16) #define CSI2TX_DT_FORMAT_MAX_LINE_NUM(n) ((n) & 0xffff) #define CSI2TX_STREAM_IF_CFG_REG(n) (0x100 + (n) * 4) #define CSI2TX_STREAM_IF_CFG_FILL_LEVEL(n) ((n) & 0x1f) /* CSI2TX V2 Registers */ #define CSI2TX_V2_DPHY_CFG_REG 0x28 #define CSI2TX_V2_DPHY_CFG_RESET BIT(16) #define CSI2TX_V2_DPHY_CFG_CLOCK_MODE BIT(10) #define CSI2TX_V2_DPHY_CFG_MODE_MASK GENMASK(9, 8) #define CSI2TX_V2_DPHY_CFG_MODE_LPDT (2 << 8) #define CSI2TX_V2_DPHY_CFG_MODE_HS (1 << 8) #define CSI2TX_V2_DPHY_CFG_MODE_ULPS (0 << 8) #define CSI2TX_V2_DPHY_CFG_CLK_ENABLE BIT(4) #define CSI2TX_V2_DPHY_CFG_LANE_ENABLE(n) BIT(n) #define CSI2TX_LANES_MAX 4 #define CSI2TX_STREAMS_MAX 4 enum csi2tx_pads { CSI2TX_PAD_SOURCE, CSI2TX_PAD_SINK_STREAM0, CSI2TX_PAD_SINK_STREAM1, CSI2TX_PAD_SINK_STREAM2, CSI2TX_PAD_SINK_STREAM3, CSI2TX_PAD_MAX, }; struct csi2tx_fmt { u32 mbus; u32 dt; u32 bpp; }; struct csi2tx_priv; /* CSI2TX Variant Operations */ struct csi2tx_vops { void (*dphy_setup)(struct csi2tx_priv *csi2tx); }; struct csi2tx_priv { struct device *dev; unsigned int count; /* * Used to prevent race conditions between multiple, * concurrent calls to start and stop. */ struct mutex lock; void __iomem *base; struct csi2tx_vops *vops; struct clk *esc_clk; struct clk *p_clk; struct clk *pixel_clk[CSI2TX_STREAMS_MAX]; struct v4l2_subdev subdev; struct media_pad pads[CSI2TX_PAD_MAX]; struct v4l2_mbus_framefmt pad_fmts[CSI2TX_PAD_MAX]; bool has_internal_dphy; u8 lanes[CSI2TX_LANES_MAX]; unsigned int num_lanes; unsigned int max_lanes; unsigned int max_streams; }; static const struct csi2tx_fmt csi2tx_formats[] = { { .mbus = MEDIA_BUS_FMT_UYVY8_1X16, .bpp = 2, .dt = MIPI_CSI2_DT_YUV422_8B, }, { .mbus = MEDIA_BUS_FMT_RGB888_1X24, .bpp = 3, .dt = MIPI_CSI2_DT_RGB888, }, }; static const struct v4l2_mbus_framefmt fmt_default = { .width = 1280, .height = 720, .code = MEDIA_BUS_FMT_RGB888_1X24, .field = V4L2_FIELD_NONE, .colorspace = V4L2_COLORSPACE_DEFAULT, }; static inline struct csi2tx_priv *v4l2_subdev_to_csi2tx(struct v4l2_subdev *subdev) { return container_of(subdev, struct csi2tx_priv, subdev); } static const struct csi2tx_fmt *csi2tx_get_fmt_from_mbus(u32 mbus) { unsigned int i; for (i = 0; i < ARRAY_SIZE(csi2tx_formats); i++) if (csi2tx_formats[i].mbus == mbus) return &csi2tx_formats[i]; return NULL; } static int csi2tx_enum_mbus_code(struct v4l2_subdev *subdev, struct v4l2_subdev_state *sd_state, struct v4l2_subdev_mbus_code_enum *code) { if (code->pad || code->index >= ARRAY_SIZE(csi2tx_formats)) return -EINVAL; code->code = csi2tx_formats[code->index].mbus; return 0; } static struct v4l2_mbus_framefmt * __csi2tx_get_pad_format(struct v4l2_subdev *subdev, struct v4l2_subdev_state *sd_state, struct v4l2_subdev_format *fmt) { struct csi2tx_priv *csi2tx = v4l2_subdev_to_csi2tx(subdev); if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) return v4l2_subdev_get_try_format(subdev, sd_state, fmt->pad); return &csi2tx->pad_fmts[fmt->pad]; } static int csi2tx_get_pad_format(struct v4l2_subdev *subdev, struct v4l2_subdev_state *sd_state, struct v4l2_subdev_format *fmt) { const struct v4l2_mbus_framefmt *format; /* Multiplexed pad? */ if (fmt->pad == CSI2TX_PAD_SOURCE) return -EINVAL; format = __csi2tx_get_pad_format(subdev, sd_state, fmt); if (!format) return -EINVAL; fmt->format = *format; return 0; } static int csi2tx_set_pad_format(struct v4l2_subdev *subdev, struct v4l2_subdev_state *sd_state, struct v4l2_subdev_format *fmt) { const struct v4l2_mbus_framefmt *src_format = &fmt->format; struct v4l2_mbus_framefmt *dst_format; /* Multiplexed pad? */ if (fmt->pad == CSI2TX_PAD_SOURCE) return -EINVAL; if (!csi2tx_get_fmt_from_mbus(fmt->format.code)) src_format = &fmt_default; dst_format = __csi2tx_get_pad_format(subdev, sd_state, fmt); if (!dst_format) return -EINVAL; *dst_format = *src_format; return 0; } static const struct v4l2_subdev_pad_ops csi2tx_pad_ops = { .enum_mbus_code = csi2tx_enum_mbus_code, .get_fmt = csi2tx_get_pad_format, .set_fmt = csi2tx_set_pad_format, }; /* Set Wake Up value in the D-PHY */ static void csi2tx_dphy_set_wakeup(struct csi2tx_priv *csi2tx) { writel(CSI2TX_DPHY_CLK_WAKEUP_ULPS_CYCLES(32), csi2tx->base + CSI2TX_DPHY_CLK_WAKEUP_REG); } /* * Finishes the D-PHY initialization * reg dphy cfg value to be used */ static void csi2tx_dphy_init_finish(struct csi2tx_priv *csi2tx, u32 reg) { unsigned int i; udelay(10); /* Enable our (clock and data) lanes */ reg |= CSI2TX_DPHY_CFG_CLK_ENABLE; for (i = 0; i < csi2tx->num_lanes; i++) reg |= CSI2TX_DPHY_CFG_LANE_ENABLE(csi2tx->lanes[i] - 1); writel(reg, csi2tx->base + CSI2TX_DPHY_CFG_REG); udelay(10); /* Switch to HS mode */ reg &= ~CSI2TX_DPHY_CFG_MODE_MASK; writel(reg | CSI2TX_DPHY_CFG_MODE_HS, csi2tx->base + CSI2TX_DPHY_CFG_REG); } /* Configures D-PHY in CSIv1.3 */ static void csi2tx_dphy_setup(struct csi2tx_priv *csi2tx) { u32 reg; unsigned int i; csi2tx_dphy_set_wakeup(csi2tx); /* Put our lanes (clock and data) out of reset */ reg = CSI2TX_DPHY_CFG_CLK_RESET | CSI2TX_DPHY_CFG_MODE_LPDT; for (i = 0; i < csi2tx->num_lanes; i++) reg |= CSI2TX_DPHY_CFG_LANE_RESET(csi2tx->lanes[i] - 1); writel(reg, csi2tx->base + CSI2TX_DPHY_CFG_REG); csi2tx_dphy_init_finish(csi2tx, reg); } /* Configures D-PHY in CSIv2 */ static void csi2tx_v2_dphy_setup(struct csi2tx_priv *csi2tx) { u32 reg; csi2tx_dphy_set_wakeup(csi2tx); /* Put our lanes (clock and data) out of reset */ reg = CSI2TX_V2_DPHY_CFG_RESET | CSI2TX_V2_DPHY_CFG_MODE_LPDT; writel(reg, csi2tx->base + CSI2TX_V2_DPHY_CFG_REG); csi2tx_dphy_init_finish(csi2tx, reg); } static void csi2tx_reset(struct csi2tx_priv *csi2tx) { writel(CSI2TX_CONFIG_SRST_REQ, csi2tx->base + CSI2TX_CONFIG_REG); udelay(10); } static int csi2tx_start(struct csi2tx_priv *csi2tx) { struct media_entity *entity = &csi2tx->subdev.entity; struct media_link *link; unsigned int i; csi2tx_reset(csi2tx); writel(CSI2TX_CONFIG_CFG_REQ, csi2tx->base + CSI2TX_CONFIG_REG); udelay(10); if (csi2tx->vops && csi2tx->vops->dphy_setup) { csi2tx->vops->dphy_setup(csi2tx); udelay(10); } /* * Create a static mapping between the CSI virtual channels * and the input streams. * * This should be enhanced, but v4l2 lacks the support for * changing that mapping dynamically at the moment. * * We're protected from the userspace setting up links at the * same time by the upper layer having called * media_pipeline_start(). */ list_for_each_entry(link, &entity->links, list) { struct v4l2_mbus_framefmt *mfmt; const struct csi2tx_fmt *fmt; unsigned int stream; int pad_idx = -1; /* Only consider our enabled input pads */ for (i = CSI2TX_PAD_SINK_STREAM0; i < CSI2TX_PAD_MAX; i++) { struct media_pad *pad = &csi2tx->pads[i]; if ((pad == link->sink) && (link->flags & MEDIA_LNK_FL_ENABLED)) { pad_idx = i; break; } } if (pad_idx < 0) continue; mfmt = &csi2tx->pad_fmts[pad_idx]; fmt = csi2tx_get_fmt_from_mbus(mfmt->code); if (!fmt) continue; stream = pad_idx - CSI2TX_PAD_SINK_STREAM0; /* * We use the stream ID there, but it's wrong. * * A stream could very well send a data type that is * not equal to its stream ID. We need to find a * proper way to address it. */ writel(CSI2TX_DT_CFG_DT(fmt->dt), csi2tx->base + CSI2TX_DT_CFG_REG(stream)); writel(CSI2TX_DT_FORMAT_BYTES_PER_LINE(mfmt->width * fmt->bpp) | CSI2TX_DT_FORMAT_MAX_LINE_NUM(mfmt->height + 1), csi2tx->base + CSI2TX_DT_FORMAT_REG(stream)); /* * TODO: This needs to be calculated based on the * output CSI2 clock rate. */ writel(CSI2TX_STREAM_IF_CFG_FILL_LEVEL(4), csi2tx->base + CSI2TX_STREAM_IF_CFG_REG(stream)); } /* Disable the configuration mode */ writel(0, csi2tx->base + CSI2TX_CONFIG_REG); return 0; } static void csi2tx_stop(struct csi2tx_priv *csi2tx) { writel(CSI2TX_CONFIG_CFG_REQ | CSI2TX_CONFIG_SRST_REQ, csi2tx->base + CSI2TX_CONFIG_REG); } static int csi2tx_s_stream(struct v4l2_subdev *subdev, int enable) { struct csi2tx_priv *csi2tx = v4l2_subdev_to_csi2tx(subdev); int ret = 0; mutex_lock(&csi2tx->lock); if (enable) { /* * If we're not the first users, there's no need to * enable the whole controller. */ if (!csi2tx->count) { ret = csi2tx_start(csi2tx); if (ret) goto out; } csi2tx->count++; } else { csi2tx->count--; /* * Let the last user turn off the lights. */ if (!csi2tx->count) csi2tx_stop(csi2tx); } out: mutex_unlock(&csi2tx->lock); return ret; } static const struct v4l2_subdev_video_ops csi2tx_video_ops = { .s_stream = csi2tx_s_stream, }; static const struct v4l2_subdev_ops csi2tx_subdev_ops = { .pad = &csi2tx_pad_ops, .video = &csi2tx_video_ops, }; static int csi2tx_get_resources(struct csi2tx_priv *csi2tx, struct platform_device *pdev) { unsigned int i; u32 dev_cfg; int ret; csi2tx->base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(csi2tx->base)) return PTR_ERR(csi2tx->base); csi2tx->p_clk = devm_clk_get(&pdev->dev, "p_clk"); if (IS_ERR(csi2tx->p_clk)) { dev_err(&pdev->dev, "Couldn't get p_clk\n"); return PTR_ERR(csi2tx->p_clk); } csi2tx->esc_clk = devm_clk_get(&pdev->dev, "esc_clk"); if (IS_ERR(csi2tx->esc_clk)) { dev_err(&pdev->dev, "Couldn't get the esc_clk\n"); return PTR_ERR(csi2tx->esc_clk); } ret = clk_prepare_enable(csi2tx->p_clk); if (ret) { dev_err(&pdev->dev, "Couldn't prepare and enable p_clk\n"); return ret; } dev_cfg = readl(csi2tx->base + CSI2TX_DEVICE_CONFIG_REG); clk_disable_unprepare(csi2tx->p_clk); csi2tx->max_lanes = dev_cfg & CSI2TX_DEVICE_CONFIG_LANES_MASK; if (csi2tx->max_lanes > CSI2TX_LANES_MAX) { dev_err(&pdev->dev, "Invalid number of lanes: %u\n", csi2tx->max_lanes); return -EINVAL; } csi2tx->max_streams = (dev_cfg & CSI2TX_DEVICE_CONFIG_STREAMS_MASK) >> 4; if (csi2tx->max_streams > CSI2TX_STREAMS_MAX) { dev_err(&pdev->dev, "Invalid number of streams: %u\n", csi2tx->max_streams); return -EINVAL; } csi2tx->has_internal_dphy = !!(dev_cfg & CSI2TX_DEVICE_CONFIG_HAS_DPHY); for (i = 0; i < csi2tx->max_streams; i++) { char clk_name[16]; snprintf(clk_name, sizeof(clk_name), "pixel_if%u_clk", i); csi2tx->pixel_clk[i] = devm_clk_get(&pdev->dev, clk_name); if (IS_ERR(csi2tx->pixel_clk[i])) { dev_err(&pdev->dev, "Couldn't get clock %s\n", clk_name); return PTR_ERR(csi2tx->pixel_clk[i]); } } return 0; } static int csi2tx_check_lanes(struct csi2tx_priv *csi2tx) { struct v4l2_fwnode_endpoint v4l2_ep = { .bus_type = 0 }; struct device_node *ep; int ret, i; ep = of_graph_get_endpoint_by_regs(csi2tx->dev->of_node, 0, 0); if (!ep) return -EINVAL; ret = v4l2_fwnode_endpoint_parse(of_fwnode_handle(ep), &v4l2_ep); if (ret) { dev_err(csi2tx->dev, "Could not parse v4l2 endpoint\n"); goto out; } if (v4l2_ep.bus_type != V4L2_MBUS_CSI2_DPHY) { dev_err(csi2tx->dev, "Unsupported media bus type: 0x%x\n", v4l2_ep.bus_type); ret = -EINVAL; goto out; } csi2tx->num_lanes = v4l2_ep.bus.mipi_csi2.num_data_lanes; if (csi2tx->num_lanes > csi2tx->max_lanes) { dev_err(csi2tx->dev, "Current configuration uses more lanes than supported\n"); ret = -EINVAL; goto out; } for (i = 0; i < csi2tx->num_lanes; i++) { if (v4l2_ep.bus.mipi_csi2.data_lanes[i] < 1) { dev_err(csi2tx->dev, "Invalid lane[%d] number: %u\n", i, v4l2_ep.bus.mipi_csi2.data_lanes[i]); ret = -EINVAL; goto out; } } memcpy(csi2tx->lanes, v4l2_ep.bus.mipi_csi2.data_lanes, sizeof(csi2tx->lanes)); out: of_node_put(ep); return ret; } static const struct csi2tx_vops csi2tx_vops = { .dphy_setup = csi2tx_dphy_setup, }; static const struct csi2tx_vops csi2tx_v2_vops = { .dphy_setup = csi2tx_v2_dphy_setup, }; static const struct of_device_id csi2tx_of_table[] = { { .compatible = "cdns,csi2tx", .data = &csi2tx_vops }, { .compatible = "cdns,csi2tx-1.3", .data = &csi2tx_vops }, { .compatible = "cdns,csi2tx-2.1", .data = &csi2tx_v2_vops }, { } }; MODULE_DEVICE_TABLE(of, csi2tx_of_table); static int csi2tx_probe(struct platform_device *pdev) { struct csi2tx_priv *csi2tx; const struct of_device_id *of_id; unsigned int i; int ret; csi2tx = kzalloc(sizeof(*csi2tx), GFP_KERNEL); if (!csi2tx) return -ENOMEM; platform_set_drvdata(pdev, csi2tx); mutex_init(&csi2tx->lock); csi2tx->dev = &pdev->dev; ret = csi2tx_get_resources(csi2tx, pdev); if (ret) goto err_free_priv; of_id = of_match_node(csi2tx_of_table, pdev->dev.of_node); csi2tx->vops = (struct csi2tx_vops *)of_id->data; v4l2_subdev_init(&csi2tx->subdev, &csi2tx_subdev_ops); csi2tx->subdev.owner = THIS_MODULE; csi2tx->subdev.dev = &pdev->dev; csi2tx->subdev.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE; snprintf(csi2tx->subdev.name, V4L2_SUBDEV_NAME_SIZE, "%s.%s", KBUILD_MODNAME, dev_name(&pdev->dev)); ret = csi2tx_check_lanes(csi2tx); if (ret) goto err_free_priv; /* Create our media pads */ csi2tx->subdev.entity.function = MEDIA_ENT_F_VID_IF_BRIDGE; csi2tx->pads[CSI2TX_PAD_SOURCE].flags = MEDIA_PAD_FL_SOURCE; for (i = CSI2TX_PAD_SINK_STREAM0; i < CSI2TX_PAD_MAX; i++) csi2tx->pads[i].flags = MEDIA_PAD_FL_SINK; /* * Only the input pads are considered to have a format at the * moment. The CSI link can multiplex various streams with * different formats, and we can't expose this in v4l2 right * now. */ for (i = CSI2TX_PAD_SINK_STREAM0; i < CSI2TX_PAD_MAX; i++) csi2tx->pad_fmts[i] = fmt_default; ret = media_entity_pads_init(&csi2tx->subdev.entity, CSI2TX_PAD_MAX, csi2tx->pads); if (ret) goto err_free_priv; ret = v4l2_async_register_subdev(&csi2tx->subdev); if (ret < 0) goto err_free_priv; dev_info(&pdev->dev, "Probed CSI2TX with %u/%u lanes, %u streams, %s D-PHY\n", csi2tx->num_lanes, csi2tx->max_lanes, csi2tx->max_streams, csi2tx->has_internal_dphy ? "internal" : "no"); return 0; err_free_priv: kfree(csi2tx); return ret; } static void csi2tx_remove(struct platform_device *pdev) { struct csi2tx_priv *csi2tx = platform_get_drvdata(pdev); v4l2_async_unregister_subdev(&csi2tx->subdev); kfree(csi2tx); } static struct platform_driver csi2tx_driver = { .probe = csi2tx_probe, .remove_new = csi2tx_remove, .driver = { .name = "cdns-csi2tx", .of_match_table = csi2tx_of_table, }, }; module_platform_driver(csi2tx_driver); MODULE_AUTHOR("Maxime Ripard <[email protected]>"); MODULE_DESCRIPTION("Cadence CSI2-TX controller"); MODULE_LICENSE("GPL");
linux-master
drivers/media/platform/cadence/cdns-csi2tx.c
// SPDX-License-Identifier: GPL-2.0+ /* * Driver for Cadence MIPI-CSI2 RX Controller v1.3 * * Copyright (C) 2017 Cadence Design Systems Inc. */ #include <linux/clk.h> #include <linux/delay.h> #include <linux/io.h> #include <linux/module.h> #include <linux/of.h> #include <linux/of_graph.h> #include <linux/phy/phy.h> #include <linux/platform_device.h> #include <linux/reset.h> #include <linux/slab.h> #include <media/v4l2-ctrls.h> #include <media/v4l2-device.h> #include <media/v4l2-fwnode.h> #include <media/v4l2-subdev.h> #define CSI2RX_DEVICE_CFG_REG 0x000 #define CSI2RX_SOFT_RESET_REG 0x004 #define CSI2RX_SOFT_RESET_PROTOCOL BIT(1) #define CSI2RX_SOFT_RESET_FRONT BIT(0) #define CSI2RX_STATIC_CFG_REG 0x008 #define CSI2RX_STATIC_CFG_DLANE_MAP(llane, plane) ((plane) << (16 + (llane) * 4)) #define CSI2RX_STATIC_CFG_LANES_MASK GENMASK(11, 8) #define CSI2RX_DPHY_LANE_CTRL_REG 0x40 #define CSI2RX_DPHY_CL_RST BIT(16) #define CSI2RX_DPHY_DL_RST(i) BIT((i) + 12) #define CSI2RX_DPHY_CL_EN BIT(4) #define CSI2RX_DPHY_DL_EN(i) BIT(i) #define CSI2RX_STREAM_BASE(n) (((n) + 1) * 0x100) #define CSI2RX_STREAM_CTRL_REG(n) (CSI2RX_STREAM_BASE(n) + 0x000) #define CSI2RX_STREAM_CTRL_START BIT(0) #define CSI2RX_STREAM_DATA_CFG_REG(n) (CSI2RX_STREAM_BASE(n) + 0x008) #define CSI2RX_STREAM_DATA_CFG_EN_VC_SELECT BIT(31) #define CSI2RX_STREAM_DATA_CFG_VC_SELECT(n) BIT((n) + 16) #define CSI2RX_STREAM_CFG_REG(n) (CSI2RX_STREAM_BASE(n) + 0x00c) #define CSI2RX_STREAM_CFG_FIFO_MODE_LARGE_BUF (1 << 8) #define CSI2RX_LANES_MAX 4 #define CSI2RX_STREAMS_MAX 4 enum csi2rx_pads { CSI2RX_PAD_SINK, CSI2RX_PAD_SOURCE_STREAM0, CSI2RX_PAD_SOURCE_STREAM1, CSI2RX_PAD_SOURCE_STREAM2, CSI2RX_PAD_SOURCE_STREAM3, CSI2RX_PAD_MAX, }; struct csi2rx_priv { struct device *dev; unsigned int count; /* * Used to prevent race conditions between multiple, * concurrent calls to start and stop. */ struct mutex lock; void __iomem *base; struct clk *sys_clk; struct clk *p_clk; struct clk *pixel_clk[CSI2RX_STREAMS_MAX]; struct reset_control *sys_rst; struct reset_control *p_rst; struct reset_control *pixel_rst[CSI2RX_STREAMS_MAX]; struct phy *dphy; u8 lanes[CSI2RX_LANES_MAX]; u8 num_lanes; u8 max_lanes; u8 max_streams; bool has_internal_dphy; struct v4l2_subdev subdev; struct v4l2_async_notifier notifier; struct media_pad pads[CSI2RX_PAD_MAX]; /* Remote source */ struct v4l2_subdev *source_subdev; int source_pad; }; static inline struct csi2rx_priv *v4l2_subdev_to_csi2rx(struct v4l2_subdev *subdev) { return container_of(subdev, struct csi2rx_priv, subdev); } static void csi2rx_reset(struct csi2rx_priv *csi2rx) { writel(CSI2RX_SOFT_RESET_PROTOCOL | CSI2RX_SOFT_RESET_FRONT, csi2rx->base + CSI2RX_SOFT_RESET_REG); udelay(10); writel(0, csi2rx->base + CSI2RX_SOFT_RESET_REG); } static int csi2rx_configure_ext_dphy(struct csi2rx_priv *csi2rx) { union phy_configure_opts opts = { }; int ret; ret = phy_power_on(csi2rx->dphy); if (ret) return ret; ret = phy_configure(csi2rx->dphy, &opts); if (ret) { phy_power_off(csi2rx->dphy); return ret; } return 0; } static int csi2rx_start(struct csi2rx_priv *csi2rx) { unsigned int i; unsigned long lanes_used = 0; u32 reg; int ret; ret = clk_prepare_enable(csi2rx->p_clk); if (ret) return ret; reset_control_deassert(csi2rx->p_rst); csi2rx_reset(csi2rx); reg = csi2rx->num_lanes << 8; for (i = 0; i < csi2rx->num_lanes; i++) { reg |= CSI2RX_STATIC_CFG_DLANE_MAP(i, csi2rx->lanes[i]); set_bit(csi2rx->lanes[i], &lanes_used); } /* * Even the unused lanes need to be mapped. In order to avoid * to map twice to the same physical lane, keep the lanes used * in the previous loop, and only map unused physical lanes to * the rest of our logical lanes. */ for (i = csi2rx->num_lanes; i < csi2rx->max_lanes; i++) { unsigned int idx = find_first_zero_bit(&lanes_used, csi2rx->max_lanes); set_bit(idx, &lanes_used); reg |= CSI2RX_STATIC_CFG_DLANE_MAP(i, i + 1); } writel(reg, csi2rx->base + CSI2RX_STATIC_CFG_REG); ret = v4l2_subdev_call(csi2rx->source_subdev, video, s_stream, true); if (ret) goto err_disable_pclk; /* Enable DPHY clk and data lanes. */ if (csi2rx->dphy) { reg = CSI2RX_DPHY_CL_EN | CSI2RX_DPHY_CL_RST; for (i = 0; i < csi2rx->num_lanes; i++) { reg |= CSI2RX_DPHY_DL_EN(csi2rx->lanes[i] - 1); reg |= CSI2RX_DPHY_DL_RST(csi2rx->lanes[i] - 1); } writel(reg, csi2rx->base + CSI2RX_DPHY_LANE_CTRL_REG); } /* * Create a static mapping between the CSI virtual channels * and the output stream. * * This should be enhanced, but v4l2 lacks the support for * changing that mapping dynamically. * * We also cannot enable and disable independent streams here, * hence the reference counting. */ for (i = 0; i < csi2rx->max_streams; i++) { ret = clk_prepare_enable(csi2rx->pixel_clk[i]); if (ret) goto err_disable_pixclk; reset_control_deassert(csi2rx->pixel_rst[i]); writel(CSI2RX_STREAM_CFG_FIFO_MODE_LARGE_BUF, csi2rx->base + CSI2RX_STREAM_CFG_REG(i)); writel(CSI2RX_STREAM_DATA_CFG_EN_VC_SELECT | CSI2RX_STREAM_DATA_CFG_VC_SELECT(i), csi2rx->base + CSI2RX_STREAM_DATA_CFG_REG(i)); writel(CSI2RX_STREAM_CTRL_START, csi2rx->base + CSI2RX_STREAM_CTRL_REG(i)); } ret = clk_prepare_enable(csi2rx->sys_clk); if (ret) goto err_disable_pixclk; reset_control_deassert(csi2rx->sys_rst); if (csi2rx->dphy) { ret = csi2rx_configure_ext_dphy(csi2rx); if (ret) { dev_err(csi2rx->dev, "Failed to configure external DPHY: %d\n", ret); goto err_disable_sysclk; } } clk_disable_unprepare(csi2rx->p_clk); return 0; err_disable_sysclk: clk_disable_unprepare(csi2rx->sys_clk); err_disable_pixclk: for (; i > 0; i--) { reset_control_assert(csi2rx->pixel_rst[i - 1]); clk_disable_unprepare(csi2rx->pixel_clk[i - 1]); } err_disable_pclk: clk_disable_unprepare(csi2rx->p_clk); return ret; } static void csi2rx_stop(struct csi2rx_priv *csi2rx) { unsigned int i; clk_prepare_enable(csi2rx->p_clk); reset_control_assert(csi2rx->sys_rst); clk_disable_unprepare(csi2rx->sys_clk); for (i = 0; i < csi2rx->max_streams; i++) { writel(0, csi2rx->base + CSI2RX_STREAM_CTRL_REG(i)); reset_control_assert(csi2rx->pixel_rst[i]); clk_disable_unprepare(csi2rx->pixel_clk[i]); } reset_control_assert(csi2rx->p_rst); clk_disable_unprepare(csi2rx->p_clk); if (v4l2_subdev_call(csi2rx->source_subdev, video, s_stream, false)) dev_warn(csi2rx->dev, "Couldn't disable our subdev\n"); if (csi2rx->dphy) { writel(0, csi2rx->base + CSI2RX_DPHY_LANE_CTRL_REG); if (phy_power_off(csi2rx->dphy)) dev_warn(csi2rx->dev, "Couldn't power off DPHY\n"); } } static int csi2rx_s_stream(struct v4l2_subdev *subdev, int enable) { struct csi2rx_priv *csi2rx = v4l2_subdev_to_csi2rx(subdev); int ret = 0; mutex_lock(&csi2rx->lock); if (enable) { /* * If we're not the first users, there's no need to * enable the whole controller. */ if (!csi2rx->count) { ret = csi2rx_start(csi2rx); if (ret) goto out; } csi2rx->count++; } else { csi2rx->count--; /* * Let the last user turn off the lights. */ if (!csi2rx->count) csi2rx_stop(csi2rx); } out: mutex_unlock(&csi2rx->lock); return ret; } static const struct v4l2_subdev_video_ops csi2rx_video_ops = { .s_stream = csi2rx_s_stream, }; static const struct v4l2_subdev_ops csi2rx_subdev_ops = { .video = &csi2rx_video_ops, }; static int csi2rx_async_bound(struct v4l2_async_notifier *notifier, struct v4l2_subdev *s_subdev, struct v4l2_async_connection *asd) { struct v4l2_subdev *subdev = notifier->sd; struct csi2rx_priv *csi2rx = v4l2_subdev_to_csi2rx(subdev); csi2rx->source_pad = media_entity_get_fwnode_pad(&s_subdev->entity, s_subdev->fwnode, MEDIA_PAD_FL_SOURCE); if (csi2rx->source_pad < 0) { dev_err(csi2rx->dev, "Couldn't find output pad for subdev %s\n", s_subdev->name); return csi2rx->source_pad; } csi2rx->source_subdev = s_subdev; dev_dbg(csi2rx->dev, "Bound %s pad: %d\n", s_subdev->name, csi2rx->source_pad); return media_create_pad_link(&csi2rx->source_subdev->entity, csi2rx->source_pad, &csi2rx->subdev.entity, 0, MEDIA_LNK_FL_ENABLED | MEDIA_LNK_FL_IMMUTABLE); } static const struct v4l2_async_notifier_operations csi2rx_notifier_ops = { .bound = csi2rx_async_bound, }; static int csi2rx_get_resources(struct csi2rx_priv *csi2rx, struct platform_device *pdev) { unsigned char i; u32 dev_cfg; int ret; csi2rx->base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(csi2rx->base)) return PTR_ERR(csi2rx->base); csi2rx->sys_clk = devm_clk_get(&pdev->dev, "sys_clk"); if (IS_ERR(csi2rx->sys_clk)) { dev_err(&pdev->dev, "Couldn't get sys clock\n"); return PTR_ERR(csi2rx->sys_clk); } csi2rx->p_clk = devm_clk_get(&pdev->dev, "p_clk"); if (IS_ERR(csi2rx->p_clk)) { dev_err(&pdev->dev, "Couldn't get P clock\n"); return PTR_ERR(csi2rx->p_clk); } csi2rx->sys_rst = devm_reset_control_get_optional_exclusive(&pdev->dev, "sys"); if (IS_ERR(csi2rx->sys_rst)) return PTR_ERR(csi2rx->sys_rst); csi2rx->p_rst = devm_reset_control_get_optional_exclusive(&pdev->dev, "reg_bank"); if (IS_ERR(csi2rx->p_rst)) return PTR_ERR(csi2rx->p_rst); csi2rx->dphy = devm_phy_optional_get(&pdev->dev, "dphy"); if (IS_ERR(csi2rx->dphy)) { dev_err(&pdev->dev, "Couldn't get external D-PHY\n"); return PTR_ERR(csi2rx->dphy); } ret = clk_prepare_enable(csi2rx->p_clk); if (ret) { dev_err(&pdev->dev, "Couldn't prepare and enable P clock\n"); return ret; } dev_cfg = readl(csi2rx->base + CSI2RX_DEVICE_CFG_REG); clk_disable_unprepare(csi2rx->p_clk); csi2rx->max_lanes = dev_cfg & 7; if (csi2rx->max_lanes > CSI2RX_LANES_MAX) { dev_err(&pdev->dev, "Invalid number of lanes: %u\n", csi2rx->max_lanes); return -EINVAL; } csi2rx->max_streams = (dev_cfg >> 4) & 7; if (csi2rx->max_streams > CSI2RX_STREAMS_MAX) { dev_err(&pdev->dev, "Invalid number of streams: %u\n", csi2rx->max_streams); return -EINVAL; } csi2rx->has_internal_dphy = dev_cfg & BIT(3) ? true : false; /* * FIXME: Once we'll have internal D-PHY support, the check * will need to be removed. */ if (!csi2rx->dphy && csi2rx->has_internal_dphy) { dev_err(&pdev->dev, "Internal D-PHY not supported yet\n"); return -EINVAL; } for (i = 0; i < csi2rx->max_streams; i++) { char name[16]; snprintf(name, sizeof(name), "pixel_if%u_clk", i); csi2rx->pixel_clk[i] = devm_clk_get(&pdev->dev, name); if (IS_ERR(csi2rx->pixel_clk[i])) { dev_err(&pdev->dev, "Couldn't get clock %s\n", name); return PTR_ERR(csi2rx->pixel_clk[i]); } snprintf(name, sizeof(name), "pixel_if%u", i); csi2rx->pixel_rst[i] = devm_reset_control_get_optional_exclusive(&pdev->dev, name); if (IS_ERR(csi2rx->pixel_rst[i])) return PTR_ERR(csi2rx->pixel_rst[i]); } return 0; } static int csi2rx_parse_dt(struct csi2rx_priv *csi2rx) { struct v4l2_fwnode_endpoint v4l2_ep = { .bus_type = 0 }; struct v4l2_async_connection *asd; struct fwnode_handle *fwh; struct device_node *ep; int ret; ep = of_graph_get_endpoint_by_regs(csi2rx->dev->of_node, 0, 0); if (!ep) return -EINVAL; fwh = of_fwnode_handle(ep); ret = v4l2_fwnode_endpoint_parse(fwh, &v4l2_ep); if (ret) { dev_err(csi2rx->dev, "Could not parse v4l2 endpoint\n"); of_node_put(ep); return ret; } if (v4l2_ep.bus_type != V4L2_MBUS_CSI2_DPHY) { dev_err(csi2rx->dev, "Unsupported media bus type: 0x%x\n", v4l2_ep.bus_type); of_node_put(ep); return -EINVAL; } memcpy(csi2rx->lanes, v4l2_ep.bus.mipi_csi2.data_lanes, sizeof(csi2rx->lanes)); csi2rx->num_lanes = v4l2_ep.bus.mipi_csi2.num_data_lanes; if (csi2rx->num_lanes > csi2rx->max_lanes) { dev_err(csi2rx->dev, "Unsupported number of data-lanes: %d\n", csi2rx->num_lanes); of_node_put(ep); return -EINVAL; } v4l2_async_subdev_nf_init(&csi2rx->notifier, &csi2rx->subdev); asd = v4l2_async_nf_add_fwnode_remote(&csi2rx->notifier, fwh, struct v4l2_async_connection); of_node_put(ep); if (IS_ERR(asd)) return PTR_ERR(asd); csi2rx->notifier.ops = &csi2rx_notifier_ops; ret = v4l2_async_nf_register(&csi2rx->notifier); if (ret) v4l2_async_nf_cleanup(&csi2rx->notifier); return ret; } static int csi2rx_probe(struct platform_device *pdev) { struct csi2rx_priv *csi2rx; unsigned int i; int ret; csi2rx = kzalloc(sizeof(*csi2rx), GFP_KERNEL); if (!csi2rx) return -ENOMEM; platform_set_drvdata(pdev, csi2rx); csi2rx->dev = &pdev->dev; mutex_init(&csi2rx->lock); ret = csi2rx_get_resources(csi2rx, pdev); if (ret) goto err_free_priv; ret = csi2rx_parse_dt(csi2rx); if (ret) goto err_free_priv; csi2rx->subdev.owner = THIS_MODULE; csi2rx->subdev.dev = &pdev->dev; v4l2_subdev_init(&csi2rx->subdev, &csi2rx_subdev_ops); v4l2_set_subdevdata(&csi2rx->subdev, &pdev->dev); snprintf(csi2rx->subdev.name, V4L2_SUBDEV_NAME_SIZE, "%s.%s", KBUILD_MODNAME, dev_name(&pdev->dev)); /* Create our media pads */ csi2rx->subdev.entity.function = MEDIA_ENT_F_VID_IF_BRIDGE; csi2rx->pads[CSI2RX_PAD_SINK].flags = MEDIA_PAD_FL_SINK; for (i = CSI2RX_PAD_SOURCE_STREAM0; i < CSI2RX_PAD_MAX; i++) csi2rx->pads[i].flags = MEDIA_PAD_FL_SOURCE; ret = media_entity_pads_init(&csi2rx->subdev.entity, CSI2RX_PAD_MAX, csi2rx->pads); if (ret) goto err_cleanup; ret = v4l2_async_register_subdev(&csi2rx->subdev); if (ret < 0) goto err_cleanup; dev_info(&pdev->dev, "Probed CSI2RX with %u/%u lanes, %u streams, %s D-PHY\n", csi2rx->num_lanes, csi2rx->max_lanes, csi2rx->max_streams, csi2rx->dphy ? "external" : csi2rx->has_internal_dphy ? "internal" : "no"); return 0; err_cleanup: v4l2_async_nf_cleanup(&csi2rx->notifier); err_free_priv: kfree(csi2rx); return ret; } static void csi2rx_remove(struct platform_device *pdev) { struct csi2rx_priv *csi2rx = platform_get_drvdata(pdev); v4l2_async_unregister_subdev(&csi2rx->subdev); kfree(csi2rx); } static const struct of_device_id csi2rx_of_table[] = { { .compatible = "starfive,jh7110-csi2rx" }, { .compatible = "cdns,csi2rx" }, { }, }; MODULE_DEVICE_TABLE(of, csi2rx_of_table); static struct platform_driver csi2rx_driver = { .probe = csi2rx_probe, .remove_new = csi2rx_remove, .driver = { .name = "cdns-csi2rx", .of_match_table = csi2rx_of_table, }, }; module_platform_driver(csi2rx_driver); MODULE_AUTHOR("Maxime Ripard <[email protected]>"); MODULE_DESCRIPTION("Cadence CSI2-RX controller"); MODULE_LICENSE("GPL");
linux-master
drivers/media/platform/cadence/cdns-csi2rx.c
// SPDX-License-Identifier: GPL-2.0 /* * SuperH Video Output Unit (VOU) driver * * Copyright (C) 2010, Guennadi Liakhovetski <[email protected]> */ #include <linux/dma-mapping.h> #include <linux/delay.h> #include <linux/errno.h> #include <linux/fs.h> #include <linux/i2c.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/kernel.h> #include <linux/platform_device.h> #include <linux/pm_runtime.h> #include <linux/slab.h> #include <linux/videodev2.h> #include <linux/module.h> #include <media/drv-intf/sh_vou.h> #include <media/v4l2-common.h> #include <media/v4l2-device.h> #include <media/v4l2-ioctl.h> #include <media/v4l2-mediabus.h> #include <media/videobuf2-v4l2.h> #include <media/videobuf2-dma-contig.h> /* Mirror addresses are not available for all registers */ #define VOUER 0 #define VOUCR 4 #define VOUSTR 8 #define VOUVCR 0xc #define VOUISR 0x10 #define VOUBCR 0x14 #define VOUDPR 0x18 #define VOUDSR 0x1c #define VOUVPR 0x20 #define VOUIR 0x24 #define VOUSRR 0x28 #define VOUMSR 0x2c #define VOUHIR 0x30 #define VOUDFR 0x34 #define VOUAD1R 0x38 #define VOUAD2R 0x3c #define VOUAIR 0x40 #define VOUSWR 0x44 #define VOURCR 0x48 #define VOURPR 0x50 enum sh_vou_status { SH_VOU_IDLE, SH_VOU_INITIALISING, SH_VOU_RUNNING, }; #define VOU_MIN_IMAGE_WIDTH 16 #define VOU_MAX_IMAGE_WIDTH 720 #define VOU_MIN_IMAGE_HEIGHT 16 struct sh_vou_buffer { struct vb2_v4l2_buffer vb; struct list_head list; }; static inline struct sh_vou_buffer *to_sh_vou_buffer(struct vb2_v4l2_buffer *vb2) { return container_of(vb2, struct sh_vou_buffer, vb); } struct sh_vou_device { struct v4l2_device v4l2_dev; struct video_device vdev; struct sh_vou_pdata *pdata; spinlock_t lock; void __iomem *base; /* State information */ struct v4l2_pix_format pix; struct v4l2_rect rect; struct list_head buf_list; v4l2_std_id std; int pix_idx; struct vb2_queue queue; struct sh_vou_buffer *active; enum sh_vou_status status; unsigned sequence; struct mutex fop_lock; }; /* Register access routines for sides A, B and mirror addresses */ static void sh_vou_reg_a_write(struct sh_vou_device *vou_dev, unsigned int reg, u32 value) { __raw_writel(value, vou_dev->base + reg); } static void sh_vou_reg_ab_write(struct sh_vou_device *vou_dev, unsigned int reg, u32 value) { __raw_writel(value, vou_dev->base + reg); __raw_writel(value, vou_dev->base + reg + 0x1000); } static void sh_vou_reg_m_write(struct sh_vou_device *vou_dev, unsigned int reg, u32 value) { __raw_writel(value, vou_dev->base + reg + 0x2000); } static u32 sh_vou_reg_a_read(struct sh_vou_device *vou_dev, unsigned int reg) { return __raw_readl(vou_dev->base + reg); } static void sh_vou_reg_a_set(struct sh_vou_device *vou_dev, unsigned int reg, u32 value, u32 mask) { u32 old = __raw_readl(vou_dev->base + reg); value = (value & mask) | (old & ~mask); __raw_writel(value, vou_dev->base + reg); } static void sh_vou_reg_b_set(struct sh_vou_device *vou_dev, unsigned int reg, u32 value, u32 mask) { sh_vou_reg_a_set(vou_dev, reg + 0x1000, value, mask); } static void sh_vou_reg_ab_set(struct sh_vou_device *vou_dev, unsigned int reg, u32 value, u32 mask) { sh_vou_reg_a_set(vou_dev, reg, value, mask); sh_vou_reg_b_set(vou_dev, reg, value, mask); } struct sh_vou_fmt { u32 pfmt; unsigned char bpp; unsigned char bpl; unsigned char rgb; unsigned char yf; unsigned char pkf; }; /* Further pixel formats can be added */ static struct sh_vou_fmt vou_fmt[] = { { .pfmt = V4L2_PIX_FMT_NV12, .bpp = 12, .bpl = 1, .yf = 0, .rgb = 0, }, { .pfmt = V4L2_PIX_FMT_NV16, .bpp = 16, .bpl = 1, .yf = 1, .rgb = 0, }, { .pfmt = V4L2_PIX_FMT_RGB24, .bpp = 24, .bpl = 3, .pkf = 2, .rgb = 1, }, { .pfmt = V4L2_PIX_FMT_RGB565, .bpp = 16, .bpl = 2, .pkf = 3, .rgb = 1, }, { .pfmt = V4L2_PIX_FMT_RGB565X, .bpp = 16, .bpl = 2, .pkf = 3, .rgb = 1, }, }; static void sh_vou_schedule_next(struct sh_vou_device *vou_dev, struct vb2_v4l2_buffer *vbuf) { dma_addr_t addr1, addr2; addr1 = vb2_dma_contig_plane_dma_addr(&vbuf->vb2_buf, 0); switch (vou_dev->pix.pixelformat) { case V4L2_PIX_FMT_NV12: case V4L2_PIX_FMT_NV16: addr2 = addr1 + vou_dev->pix.width * vou_dev->pix.height; break; default: addr2 = 0; } sh_vou_reg_m_write(vou_dev, VOUAD1R, addr1); sh_vou_reg_m_write(vou_dev, VOUAD2R, addr2); } static void sh_vou_stream_config(struct sh_vou_device *vou_dev) { unsigned int row_coeff; #ifdef __LITTLE_ENDIAN u32 dataswap = 7; #else u32 dataswap = 0; #endif switch (vou_dev->pix.pixelformat) { default: case V4L2_PIX_FMT_NV12: case V4L2_PIX_FMT_NV16: row_coeff = 1; break; case V4L2_PIX_FMT_RGB565: dataswap ^= 1; fallthrough; case V4L2_PIX_FMT_RGB565X: row_coeff = 2; break; case V4L2_PIX_FMT_RGB24: row_coeff = 3; break; } sh_vou_reg_a_write(vou_dev, VOUSWR, dataswap); sh_vou_reg_ab_write(vou_dev, VOUAIR, vou_dev->pix.width * row_coeff); } /* Locking: caller holds fop_lock mutex */ static int sh_vou_queue_setup(struct vb2_queue *vq, unsigned int *nbuffers, unsigned int *nplanes, unsigned int sizes[], struct device *alloc_devs[]) { struct sh_vou_device *vou_dev = vb2_get_drv_priv(vq); struct v4l2_pix_format *pix = &vou_dev->pix; int bytes_per_line = vou_fmt[vou_dev->pix_idx].bpp * pix->width / 8; dev_dbg(vou_dev->v4l2_dev.dev, "%s()\n", __func__); if (*nplanes) return sizes[0] < pix->height * bytes_per_line ? -EINVAL : 0; *nplanes = 1; sizes[0] = pix->height * bytes_per_line; return 0; } static int sh_vou_buf_prepare(struct vb2_buffer *vb) { struct sh_vou_device *vou_dev = vb2_get_drv_priv(vb->vb2_queue); struct v4l2_pix_format *pix = &vou_dev->pix; unsigned bytes_per_line = vou_fmt[vou_dev->pix_idx].bpp * pix->width / 8; unsigned size = pix->height * bytes_per_line; dev_dbg(vou_dev->v4l2_dev.dev, "%s()\n", __func__); if (vb2_plane_size(vb, 0) < size) { /* User buffer too small */ dev_warn(vou_dev->v4l2_dev.dev, "buffer too small (%lu < %u)\n", vb2_plane_size(vb, 0), size); return -EINVAL; } vb2_set_plane_payload(vb, 0, size); return 0; } /* Locking: caller holds fop_lock mutex and vq->irqlock spinlock */ static void sh_vou_buf_queue(struct vb2_buffer *vb) { struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb); struct sh_vou_device *vou_dev = vb2_get_drv_priv(vb->vb2_queue); struct sh_vou_buffer *shbuf = to_sh_vou_buffer(vbuf); unsigned long flags; spin_lock_irqsave(&vou_dev->lock, flags); list_add_tail(&shbuf->list, &vou_dev->buf_list); spin_unlock_irqrestore(&vou_dev->lock, flags); } static int sh_vou_start_streaming(struct vb2_queue *vq, unsigned int count) { struct sh_vou_device *vou_dev = vb2_get_drv_priv(vq); struct sh_vou_buffer *buf, *node; int ret; vou_dev->sequence = 0; ret = v4l2_device_call_until_err(&vou_dev->v4l2_dev, 0, video, s_stream, 1); if (ret < 0 && ret != -ENOIOCTLCMD) { list_for_each_entry_safe(buf, node, &vou_dev->buf_list, list) { vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_QUEUED); list_del(&buf->list); } vou_dev->active = NULL; return ret; } buf = list_entry(vou_dev->buf_list.next, struct sh_vou_buffer, list); vou_dev->active = buf; /* Start from side A: we use mirror addresses, so, set B */ sh_vou_reg_a_write(vou_dev, VOURPR, 1); dev_dbg(vou_dev->v4l2_dev.dev, "%s: first buffer status 0x%x\n", __func__, sh_vou_reg_a_read(vou_dev, VOUSTR)); sh_vou_schedule_next(vou_dev, &buf->vb); buf = list_entry(buf->list.next, struct sh_vou_buffer, list); /* Second buffer - initialise register side B */ sh_vou_reg_a_write(vou_dev, VOURPR, 0); sh_vou_schedule_next(vou_dev, &buf->vb); /* Register side switching with frame VSYNC */ sh_vou_reg_a_write(vou_dev, VOURCR, 5); sh_vou_stream_config(vou_dev); /* Enable End-of-Frame (VSYNC) interrupts */ sh_vou_reg_a_write(vou_dev, VOUIR, 0x10004); /* Two buffers on the queue - activate the hardware */ vou_dev->status = SH_VOU_RUNNING; sh_vou_reg_a_write(vou_dev, VOUER, 0x107); return 0; } static void sh_vou_stop_streaming(struct vb2_queue *vq) { struct sh_vou_device *vou_dev = vb2_get_drv_priv(vq); struct sh_vou_buffer *buf, *node; unsigned long flags; v4l2_device_call_until_err(&vou_dev->v4l2_dev, 0, video, s_stream, 0); /* disable output */ sh_vou_reg_a_set(vou_dev, VOUER, 0, 1); /* ...but the current frame will complete */ sh_vou_reg_a_set(vou_dev, VOUIR, 0, 0x30000); msleep(50); spin_lock_irqsave(&vou_dev->lock, flags); list_for_each_entry_safe(buf, node, &vou_dev->buf_list, list) { vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR); list_del(&buf->list); } vou_dev->active = NULL; spin_unlock_irqrestore(&vou_dev->lock, flags); } static const struct vb2_ops sh_vou_qops = { .queue_setup = sh_vou_queue_setup, .buf_prepare = sh_vou_buf_prepare, .buf_queue = sh_vou_buf_queue, .start_streaming = sh_vou_start_streaming, .stop_streaming = sh_vou_stop_streaming, .wait_prepare = vb2_ops_wait_prepare, .wait_finish = vb2_ops_wait_finish, }; /* Video IOCTLs */ static int sh_vou_querycap(struct file *file, void *priv, struct v4l2_capability *cap) { struct sh_vou_device *vou_dev = video_drvdata(file); dev_dbg(vou_dev->v4l2_dev.dev, "%s()\n", __func__); strscpy(cap->card, "SuperH VOU", sizeof(cap->card)); strscpy(cap->driver, "sh-vou", sizeof(cap->driver)); strscpy(cap->bus_info, "platform:sh-vou", sizeof(cap->bus_info)); return 0; } /* Enumerate formats, that the device can accept from the user */ static int sh_vou_enum_fmt_vid_out(struct file *file, void *priv, struct v4l2_fmtdesc *fmt) { struct sh_vou_device *vou_dev = video_drvdata(file); if (fmt->index >= ARRAY_SIZE(vou_fmt)) return -EINVAL; dev_dbg(vou_dev->v4l2_dev.dev, "%s()\n", __func__); fmt->pixelformat = vou_fmt[fmt->index].pfmt; return 0; } static int sh_vou_g_fmt_vid_out(struct file *file, void *priv, struct v4l2_format *fmt) { struct sh_vou_device *vou_dev = video_drvdata(file); dev_dbg(vou_dev->v4l2_dev.dev, "%s()\n", __func__); fmt->type = V4L2_BUF_TYPE_VIDEO_OUTPUT; fmt->fmt.pix = vou_dev->pix; return 0; } static const unsigned char vou_scale_h_num[] = {1, 9, 2, 9, 4}; static const unsigned char vou_scale_h_den[] = {1, 8, 1, 4, 1}; static const unsigned char vou_scale_h_fld[] = {0, 2, 1, 3}; static const unsigned char vou_scale_v_num[] = {1, 2, 4}; static const unsigned char vou_scale_v_den[] = {1, 1, 1}; static const unsigned char vou_scale_v_fld[] = {0, 1}; static void sh_vou_configure_geometry(struct sh_vou_device *vou_dev, int pix_idx, int w_idx, int h_idx) { struct sh_vou_fmt *fmt = vou_fmt + pix_idx; unsigned int black_left, black_top, width_max, frame_in_height, frame_out_height, frame_out_top; struct v4l2_rect *rect = &vou_dev->rect; struct v4l2_pix_format *pix = &vou_dev->pix; u32 vouvcr = 0, dsr_h, dsr_v; if (vou_dev->std & V4L2_STD_525_60) { width_max = 858; /* height_max = 262; */ } else { width_max = 864; /* height_max = 312; */ } frame_in_height = pix->height / 2; frame_out_height = rect->height / 2; frame_out_top = rect->top / 2; /* * Cropping scheme: max useful image is 720x480, and the total video * area is 858x525 (NTSC) or 864x625 (PAL). AK8813 / 8814 starts * sampling data beginning with fixed 276th (NTSC) / 288th (PAL) clock, * of which the first 33 / 25 clocks HSYNC must be held active. This * has to be configured in CR[HW]. 1 pixel equals 2 clock periods. * This gives CR[HW] = 16 / 12, VPR[HVP] = 138 / 144, which gives * exactly 858 - 138 = 864 - 144 = 720! We call the out-of-display area, * beyond DSR, specified on the left and top by the VPR register "black * pixels" and out-of-image area (DPR) "background pixels." We fix VPR * at 138 / 144 : 20, because that's the HSYNC timing, that our first * client requires, and that's exactly what leaves us 720 pixels for the * image; we leave VPR[VVP] at default 20 for now, because the client * doesn't seem to have any special requirements for it. Otherwise we * could also set it to max - 240 = 22 / 72. Thus VPR depends only on * the selected standard, and DPR and DSR are selected according to * cropping. Q: how does the client detect the first valid line? Does * HSYNC stay inactive during invalid (black) lines? */ black_left = width_max - VOU_MAX_IMAGE_WIDTH; black_top = 20; dsr_h = rect->width + rect->left; dsr_v = frame_out_height + frame_out_top; dev_dbg(vou_dev->v4l2_dev.dev, "image %ux%u, black %u:%u, offset %u:%u, display %ux%u\n", pix->width, frame_in_height, black_left, black_top, rect->left, frame_out_top, dsr_h, dsr_v); /* VOUISR height - half of a frame height in frame mode */ sh_vou_reg_ab_write(vou_dev, VOUISR, (pix->width << 16) | frame_in_height); sh_vou_reg_ab_write(vou_dev, VOUVPR, (black_left << 16) | black_top); sh_vou_reg_ab_write(vou_dev, VOUDPR, (rect->left << 16) | frame_out_top); sh_vou_reg_ab_write(vou_dev, VOUDSR, (dsr_h << 16) | dsr_v); /* * if necessary, we could set VOUHIR to * max(black_left + dsr_h, width_max) here */ if (w_idx) vouvcr |= (1 << 15) | (vou_scale_h_fld[w_idx - 1] << 4); if (h_idx) vouvcr |= (1 << 14) | vou_scale_v_fld[h_idx - 1]; dev_dbg(vou_dev->v4l2_dev.dev, "0x%08x: scaling 0x%x\n", fmt->pfmt, vouvcr); /* To produce a colour bar for testing set bit 23 of VOUVCR */ sh_vou_reg_ab_write(vou_dev, VOUVCR, vouvcr); sh_vou_reg_ab_write(vou_dev, VOUDFR, fmt->pkf | (fmt->yf << 8) | (fmt->rgb << 16)); } struct sh_vou_geometry { struct v4l2_rect output; unsigned int in_width; unsigned int in_height; int scale_idx_h; int scale_idx_v; }; /* * Find input geometry, that we can use to produce output, closest to the * requested rectangle, using VOU scaling */ static void vou_adjust_input(struct sh_vou_geometry *geo, v4l2_std_id std) { /* The compiler cannot know, that best and idx will indeed be set */ unsigned int best_err = UINT_MAX, best = 0, img_height_max; int i, idx = 0; if (std & V4L2_STD_525_60) img_height_max = 480; else img_height_max = 576; /* Image width must be a multiple of 4 */ v4l_bound_align_image(&geo->in_width, VOU_MIN_IMAGE_WIDTH, VOU_MAX_IMAGE_WIDTH, 2, &geo->in_height, VOU_MIN_IMAGE_HEIGHT, img_height_max, 1, 0); /* Select scales to come as close as possible to the output image */ for (i = ARRAY_SIZE(vou_scale_h_num) - 1; i >= 0; i--) { unsigned int err; unsigned int found = geo->output.width * vou_scale_h_den[i] / vou_scale_h_num[i]; if (found > VOU_MAX_IMAGE_WIDTH) /* scales increase */ break; err = abs(found - geo->in_width); if (err < best_err) { best_err = err; idx = i; best = found; } if (!err) break; } geo->in_width = best; geo->scale_idx_h = idx; best_err = UINT_MAX; /* This loop can be replaced with one division */ for (i = ARRAY_SIZE(vou_scale_v_num) - 1; i >= 0; i--) { unsigned int err; unsigned int found = geo->output.height * vou_scale_v_den[i] / vou_scale_v_num[i]; if (found > img_height_max) /* scales increase */ break; err = abs(found - geo->in_height); if (err < best_err) { best_err = err; idx = i; best = found; } if (!err) break; } geo->in_height = best; geo->scale_idx_v = idx; } /* * Find output geometry, that we can produce, using VOU scaling, closest to * the requested rectangle */ static void vou_adjust_output(struct sh_vou_geometry *geo, v4l2_std_id std) { unsigned int best_err = UINT_MAX, best = geo->in_width, width_max, height_max, img_height_max; int i, idx_h = 0, idx_v = 0; if (std & V4L2_STD_525_60) { width_max = 858; height_max = 262 * 2; img_height_max = 480; } else { width_max = 864; height_max = 312 * 2; img_height_max = 576; } /* Select scales to come as close as possible to the output image */ for (i = 0; i < ARRAY_SIZE(vou_scale_h_num); i++) { unsigned int err; unsigned int found = geo->in_width * vou_scale_h_num[i] / vou_scale_h_den[i]; if (found > VOU_MAX_IMAGE_WIDTH) /* scales increase */ break; err = abs(found - geo->output.width); if (err < best_err) { best_err = err; idx_h = i; best = found; } if (!err) break; } geo->output.width = best; geo->scale_idx_h = idx_h; if (geo->output.left + best > width_max) geo->output.left = width_max - best; pr_debug("%s(): W %u * %u/%u = %u\n", __func__, geo->in_width, vou_scale_h_num[idx_h], vou_scale_h_den[idx_h], best); best_err = UINT_MAX; /* This loop can be replaced with one division */ for (i = 0; i < ARRAY_SIZE(vou_scale_v_num); i++) { unsigned int err; unsigned int found = geo->in_height * vou_scale_v_num[i] / vou_scale_v_den[i]; if (found > img_height_max) /* scales increase */ break; err = abs(found - geo->output.height); if (err < best_err) { best_err = err; idx_v = i; best = found; } if (!err) break; } geo->output.height = best; geo->scale_idx_v = idx_v; if (geo->output.top + best > height_max) geo->output.top = height_max - best; pr_debug("%s(): H %u * %u/%u = %u\n", __func__, geo->in_height, vou_scale_v_num[idx_v], vou_scale_v_den[idx_v], best); } static int sh_vou_try_fmt_vid_out(struct file *file, void *priv, struct v4l2_format *fmt) { struct sh_vou_device *vou_dev = video_drvdata(file); struct v4l2_pix_format *pix = &fmt->fmt.pix; unsigned int img_height_max; int pix_idx; dev_dbg(vou_dev->v4l2_dev.dev, "%s()\n", __func__); pix->field = V4L2_FIELD_INTERLACED; pix->colorspace = V4L2_COLORSPACE_SMPTE170M; pix->ycbcr_enc = pix->quantization = 0; for (pix_idx = 0; pix_idx < ARRAY_SIZE(vou_fmt); pix_idx++) if (vou_fmt[pix_idx].pfmt == pix->pixelformat) break; if (pix_idx == ARRAY_SIZE(vou_fmt)) return -EINVAL; if (vou_dev->std & V4L2_STD_525_60) img_height_max = 480; else img_height_max = 576; v4l_bound_align_image(&pix->width, VOU_MIN_IMAGE_WIDTH, VOU_MAX_IMAGE_WIDTH, 2, &pix->height, VOU_MIN_IMAGE_HEIGHT, img_height_max, 1, 0); pix->bytesperline = pix->width * vou_fmt[pix_idx].bpl; pix->sizeimage = pix->height * ((pix->width * vou_fmt[pix_idx].bpp) >> 3); return 0; } static int sh_vou_set_fmt_vid_out(struct sh_vou_device *vou_dev, struct v4l2_pix_format *pix) { unsigned int img_height_max; struct sh_vou_geometry geo; struct v4l2_subdev_format format = { .which = V4L2_SUBDEV_FORMAT_ACTIVE, /* Revisit: is this the correct code? */ .format.code = MEDIA_BUS_FMT_YUYV8_2X8, .format.field = V4L2_FIELD_INTERLACED, .format.colorspace = V4L2_COLORSPACE_SMPTE170M, }; struct v4l2_mbus_framefmt *mbfmt = &format.format; int pix_idx; int ret; if (vb2_is_busy(&vou_dev->queue)) return -EBUSY; for (pix_idx = 0; pix_idx < ARRAY_SIZE(vou_fmt); pix_idx++) if (vou_fmt[pix_idx].pfmt == pix->pixelformat) break; geo.in_width = pix->width; geo.in_height = pix->height; geo.output = vou_dev->rect; vou_adjust_output(&geo, vou_dev->std); mbfmt->width = geo.output.width; mbfmt->height = geo.output.height; ret = v4l2_device_call_until_err(&vou_dev->v4l2_dev, 0, pad, set_fmt, NULL, &format); /* Must be implemented, so, don't check for -ENOIOCTLCMD */ if (ret < 0) return ret; dev_dbg(vou_dev->v4l2_dev.dev, "%s(): %ux%u -> %ux%u\n", __func__, geo.output.width, geo.output.height, mbfmt->width, mbfmt->height); if (vou_dev->std & V4L2_STD_525_60) img_height_max = 480; else img_height_max = 576; /* Sanity checks */ if ((unsigned)mbfmt->width > VOU_MAX_IMAGE_WIDTH || (unsigned)mbfmt->height > img_height_max || mbfmt->code != MEDIA_BUS_FMT_YUYV8_2X8) return -EIO; if (mbfmt->width != geo.output.width || mbfmt->height != geo.output.height) { geo.output.width = mbfmt->width; geo.output.height = mbfmt->height; vou_adjust_input(&geo, vou_dev->std); } /* We tried to preserve output rectangle, but it could have changed */ vou_dev->rect = geo.output; pix->width = geo.in_width; pix->height = geo.in_height; dev_dbg(vou_dev->v4l2_dev.dev, "%s(): %ux%u\n", __func__, pix->width, pix->height); vou_dev->pix_idx = pix_idx; vou_dev->pix = *pix; sh_vou_configure_geometry(vou_dev, pix_idx, geo.scale_idx_h, geo.scale_idx_v); return 0; } static int sh_vou_s_fmt_vid_out(struct file *file, void *priv, struct v4l2_format *fmt) { struct sh_vou_device *vou_dev = video_drvdata(file); int ret = sh_vou_try_fmt_vid_out(file, priv, fmt); if (ret) return ret; return sh_vou_set_fmt_vid_out(vou_dev, &fmt->fmt.pix); } static int sh_vou_enum_output(struct file *file, void *fh, struct v4l2_output *a) { struct sh_vou_device *vou_dev = video_drvdata(file); if (a->index) return -EINVAL; strscpy(a->name, "Video Out", sizeof(a->name)); a->type = V4L2_OUTPUT_TYPE_ANALOG; a->std = vou_dev->vdev.tvnorms; return 0; } static int sh_vou_g_output(struct file *file, void *fh, unsigned int *i) { *i = 0; return 0; } static int sh_vou_s_output(struct file *file, void *fh, unsigned int i) { return i ? -EINVAL : 0; } static u32 sh_vou_ntsc_mode(enum sh_vou_bus_fmt bus_fmt) { switch (bus_fmt) { default: pr_warn("%s(): Invalid bus-format code %d, using default 8-bit\n", __func__, bus_fmt); fallthrough; case SH_VOU_BUS_8BIT: return 1; case SH_VOU_BUS_16BIT: return 0; case SH_VOU_BUS_BT656: return 3; } } static int sh_vou_s_std(struct file *file, void *priv, v4l2_std_id std_id) { struct sh_vou_device *vou_dev = video_drvdata(file); int ret; dev_dbg(vou_dev->v4l2_dev.dev, "%s(): 0x%llx\n", __func__, std_id); if (std_id == vou_dev->std) return 0; if (vb2_is_busy(&vou_dev->queue)) return -EBUSY; ret = v4l2_device_call_until_err(&vou_dev->v4l2_dev, 0, video, s_std_output, std_id); /* Shall we continue, if the subdev doesn't support .s_std_output()? */ if (ret < 0 && ret != -ENOIOCTLCMD) return ret; vou_dev->rect.top = vou_dev->rect.left = 0; vou_dev->rect.width = VOU_MAX_IMAGE_WIDTH; if (std_id & V4L2_STD_525_60) { sh_vou_reg_ab_set(vou_dev, VOUCR, sh_vou_ntsc_mode(vou_dev->pdata->bus_fmt) << 29, 7 << 29); vou_dev->rect.height = 480; } else { sh_vou_reg_ab_set(vou_dev, VOUCR, 5 << 29, 7 << 29); vou_dev->rect.height = 576; } vou_dev->pix.width = vou_dev->rect.width; vou_dev->pix.height = vou_dev->rect.height; vou_dev->pix.bytesperline = vou_dev->pix.width * vou_fmt[vou_dev->pix_idx].bpl; vou_dev->pix.sizeimage = vou_dev->pix.height * ((vou_dev->pix.width * vou_fmt[vou_dev->pix_idx].bpp) >> 3); vou_dev->std = std_id; sh_vou_set_fmt_vid_out(vou_dev, &vou_dev->pix); return 0; } static int sh_vou_g_std(struct file *file, void *priv, v4l2_std_id *std) { struct sh_vou_device *vou_dev = video_drvdata(file); dev_dbg(vou_dev->v4l2_dev.dev, "%s()\n", __func__); *std = vou_dev->std; return 0; } static int sh_vou_log_status(struct file *file, void *priv) { struct sh_vou_device *vou_dev = video_drvdata(file); pr_info("VOUER: 0x%08x\n", sh_vou_reg_a_read(vou_dev, VOUER)); pr_info("VOUCR: 0x%08x\n", sh_vou_reg_a_read(vou_dev, VOUCR)); pr_info("VOUSTR: 0x%08x\n", sh_vou_reg_a_read(vou_dev, VOUSTR)); pr_info("VOUVCR: 0x%08x\n", sh_vou_reg_a_read(vou_dev, VOUVCR)); pr_info("VOUISR: 0x%08x\n", sh_vou_reg_a_read(vou_dev, VOUISR)); pr_info("VOUBCR: 0x%08x\n", sh_vou_reg_a_read(vou_dev, VOUBCR)); pr_info("VOUDPR: 0x%08x\n", sh_vou_reg_a_read(vou_dev, VOUDPR)); pr_info("VOUDSR: 0x%08x\n", sh_vou_reg_a_read(vou_dev, VOUDSR)); pr_info("VOUVPR: 0x%08x\n", sh_vou_reg_a_read(vou_dev, VOUVPR)); pr_info("VOUIR: 0x%08x\n", sh_vou_reg_a_read(vou_dev, VOUIR)); pr_info("VOUSRR: 0x%08x\n", sh_vou_reg_a_read(vou_dev, VOUSRR)); pr_info("VOUMSR: 0x%08x\n", sh_vou_reg_a_read(vou_dev, VOUMSR)); pr_info("VOUHIR: 0x%08x\n", sh_vou_reg_a_read(vou_dev, VOUHIR)); pr_info("VOUDFR: 0x%08x\n", sh_vou_reg_a_read(vou_dev, VOUDFR)); pr_info("VOUAD1R: 0x%08x\n", sh_vou_reg_a_read(vou_dev, VOUAD1R)); pr_info("VOUAD2R: 0x%08x\n", sh_vou_reg_a_read(vou_dev, VOUAD2R)); pr_info("VOUAIR: 0x%08x\n", sh_vou_reg_a_read(vou_dev, VOUAIR)); pr_info("VOUSWR: 0x%08x\n", sh_vou_reg_a_read(vou_dev, VOUSWR)); pr_info("VOURCR: 0x%08x\n", sh_vou_reg_a_read(vou_dev, VOURCR)); pr_info("VOURPR: 0x%08x\n", sh_vou_reg_a_read(vou_dev, VOURPR)); return 0; } static int sh_vou_g_selection(struct file *file, void *fh, struct v4l2_selection *sel) { struct sh_vou_device *vou_dev = video_drvdata(file); if (sel->type != V4L2_BUF_TYPE_VIDEO_OUTPUT) return -EINVAL; switch (sel->target) { case V4L2_SEL_TGT_COMPOSE: sel->r = vou_dev->rect; break; case V4L2_SEL_TGT_COMPOSE_DEFAULT: case V4L2_SEL_TGT_COMPOSE_BOUNDS: sel->r.left = 0; sel->r.top = 0; sel->r.width = VOU_MAX_IMAGE_WIDTH; if (vou_dev->std & V4L2_STD_525_60) sel->r.height = 480; else sel->r.height = 576; break; default: return -EINVAL; } return 0; } /* Assume a dull encoder, do all the work ourselves. */ static int sh_vou_s_selection(struct file *file, void *fh, struct v4l2_selection *sel) { struct v4l2_rect *rect = &sel->r; struct sh_vou_device *vou_dev = video_drvdata(file); struct v4l2_subdev_selection sd_sel = { .which = V4L2_SUBDEV_FORMAT_ACTIVE, .target = V4L2_SEL_TGT_COMPOSE, }; struct v4l2_pix_format *pix = &vou_dev->pix; struct sh_vou_geometry geo; struct v4l2_subdev_format format = { .which = V4L2_SUBDEV_FORMAT_ACTIVE, /* Revisit: is this the correct code? */ .format.code = MEDIA_BUS_FMT_YUYV8_2X8, .format.field = V4L2_FIELD_INTERLACED, .format.colorspace = V4L2_COLORSPACE_SMPTE170M, }; unsigned int img_height_max; int ret; if (sel->type != V4L2_BUF_TYPE_VIDEO_OUTPUT || sel->target != V4L2_SEL_TGT_COMPOSE) return -EINVAL; if (vb2_is_busy(&vou_dev->queue)) return -EBUSY; if (vou_dev->std & V4L2_STD_525_60) img_height_max = 480; else img_height_max = 576; v4l_bound_align_image(&rect->width, VOU_MIN_IMAGE_WIDTH, VOU_MAX_IMAGE_WIDTH, 1, &rect->height, VOU_MIN_IMAGE_HEIGHT, img_height_max, 1, 0); if (rect->width + rect->left > VOU_MAX_IMAGE_WIDTH) rect->left = VOU_MAX_IMAGE_WIDTH - rect->width; if (rect->height + rect->top > img_height_max) rect->top = img_height_max - rect->height; geo.output = *rect; geo.in_width = pix->width; geo.in_height = pix->height; /* Configure the encoder one-to-one, position at 0, ignore errors */ sd_sel.r.width = geo.output.width; sd_sel.r.height = geo.output.height; /* * We first issue a S_SELECTION, so that the subsequent S_FMT delivers the * final encoder configuration. */ v4l2_device_call_until_err(&vou_dev->v4l2_dev, 0, pad, set_selection, NULL, &sd_sel); format.format.width = geo.output.width; format.format.height = geo.output.height; ret = v4l2_device_call_until_err(&vou_dev->v4l2_dev, 0, pad, set_fmt, NULL, &format); /* Must be implemented, so, don't check for -ENOIOCTLCMD */ if (ret < 0) return ret; /* Sanity checks */ if ((unsigned)format.format.width > VOU_MAX_IMAGE_WIDTH || (unsigned)format.format.height > img_height_max || format.format.code != MEDIA_BUS_FMT_YUYV8_2X8) return -EIO; geo.output.width = format.format.width; geo.output.height = format.format.height; /* * No down-scaling. According to the API, current call has precedence: * https://linuxtv.org/downloads/v4l-dvb-apis/uapi/v4l/crop.html#cropping-structures */ vou_adjust_input(&geo, vou_dev->std); /* We tried to preserve output rectangle, but it could have changed */ vou_dev->rect = geo.output; pix->width = geo.in_width; pix->height = geo.in_height; sh_vou_configure_geometry(vou_dev, vou_dev->pix_idx, geo.scale_idx_h, geo.scale_idx_v); return 0; } static irqreturn_t sh_vou_isr(int irq, void *dev_id) { struct sh_vou_device *vou_dev = dev_id; static unsigned long j; struct sh_vou_buffer *vb; static int cnt; u32 irq_status = sh_vou_reg_a_read(vou_dev, VOUIR), masked; u32 vou_status = sh_vou_reg_a_read(vou_dev, VOUSTR); if (!(irq_status & 0x300)) { if (printk_timed_ratelimit(&j, 500)) dev_warn(vou_dev->v4l2_dev.dev, "IRQ status 0x%x!\n", irq_status); return IRQ_NONE; } spin_lock(&vou_dev->lock); if (!vou_dev->active || list_empty(&vou_dev->buf_list)) { if (printk_timed_ratelimit(&j, 500)) dev_warn(vou_dev->v4l2_dev.dev, "IRQ without active buffer: %x!\n", irq_status); /* Just ack: buf_release will disable further interrupts */ sh_vou_reg_a_set(vou_dev, VOUIR, 0, 0x300); spin_unlock(&vou_dev->lock); return IRQ_HANDLED; } masked = ~(0x300 & irq_status) & irq_status & 0x30304; dev_dbg(vou_dev->v4l2_dev.dev, "IRQ status 0x%x -> 0x%x, VOU status 0x%x, cnt %d\n", irq_status, masked, vou_status, cnt); cnt++; /* side = vou_status & 0x10000; */ /* Clear only set interrupts */ sh_vou_reg_a_write(vou_dev, VOUIR, masked); vb = vou_dev->active; if (list_is_singular(&vb->list)) { /* Keep cycling while no next buffer is available */ sh_vou_schedule_next(vou_dev, &vb->vb); spin_unlock(&vou_dev->lock); return IRQ_HANDLED; } list_del(&vb->list); vb->vb.vb2_buf.timestamp = ktime_get_ns(); vb->vb.sequence = vou_dev->sequence++; vb->vb.field = V4L2_FIELD_INTERLACED; vb2_buffer_done(&vb->vb.vb2_buf, VB2_BUF_STATE_DONE); vou_dev->active = list_entry(vou_dev->buf_list.next, struct sh_vou_buffer, list); if (list_is_singular(&vou_dev->buf_list)) { /* Keep cycling while no next buffer is available */ sh_vou_schedule_next(vou_dev, &vou_dev->active->vb); } else { struct sh_vou_buffer *new = list_entry(vou_dev->active->list.next, struct sh_vou_buffer, list); sh_vou_schedule_next(vou_dev, &new->vb); } spin_unlock(&vou_dev->lock); return IRQ_HANDLED; } static int sh_vou_hw_init(struct sh_vou_device *vou_dev) { struct sh_vou_pdata *pdata = vou_dev->pdata; u32 voucr = sh_vou_ntsc_mode(pdata->bus_fmt) << 29; int i = 100; /* Disable all IRQs */ sh_vou_reg_a_write(vou_dev, VOUIR, 0); /* Reset VOU interfaces - registers unaffected */ sh_vou_reg_a_write(vou_dev, VOUSRR, 0x101); while (--i && (sh_vou_reg_a_read(vou_dev, VOUSRR) & 0x101)) udelay(1); if (!i) return -ETIMEDOUT; dev_dbg(vou_dev->v4l2_dev.dev, "Reset took %dus\n", 100 - i); if (pdata->flags & SH_VOU_PCLK_FALLING) voucr |= 1 << 28; if (pdata->flags & SH_VOU_HSYNC_LOW) voucr |= 1 << 27; if (pdata->flags & SH_VOU_VSYNC_LOW) voucr |= 1 << 26; sh_vou_reg_ab_set(vou_dev, VOUCR, voucr, 0xfc000000); /* Manual register side switching at first */ sh_vou_reg_a_write(vou_dev, VOURCR, 4); /* Default - fixed HSYNC length, can be made configurable is required */ sh_vou_reg_ab_write(vou_dev, VOUMSR, 0x800000); sh_vou_set_fmt_vid_out(vou_dev, &vou_dev->pix); return 0; } /* File operations */ static int sh_vou_open(struct file *file) { struct sh_vou_device *vou_dev = video_drvdata(file); int err; if (mutex_lock_interruptible(&vou_dev->fop_lock)) return -ERESTARTSYS; err = v4l2_fh_open(file); if (err) goto done_open; if (v4l2_fh_is_singular_file(file) && vou_dev->status == SH_VOU_INITIALISING) { /* First open */ err = pm_runtime_resume_and_get(vou_dev->v4l2_dev.dev); if (err < 0) { v4l2_fh_release(file); goto done_open; } err = sh_vou_hw_init(vou_dev); if (err < 0) { pm_runtime_put(vou_dev->v4l2_dev.dev); v4l2_fh_release(file); } else { vou_dev->status = SH_VOU_IDLE; } } done_open: mutex_unlock(&vou_dev->fop_lock); return err; } static int sh_vou_release(struct file *file) { struct sh_vou_device *vou_dev = video_drvdata(file); bool is_last; mutex_lock(&vou_dev->fop_lock); is_last = v4l2_fh_is_singular_file(file); _vb2_fop_release(file, NULL); if (is_last) { /* Last close */ vou_dev->status = SH_VOU_INITIALISING; sh_vou_reg_a_set(vou_dev, VOUER, 0, 0x101); pm_runtime_put(vou_dev->v4l2_dev.dev); } mutex_unlock(&vou_dev->fop_lock); return 0; } /* sh_vou display ioctl operations */ static const struct v4l2_ioctl_ops sh_vou_ioctl_ops = { .vidioc_querycap = sh_vou_querycap, .vidioc_enum_fmt_vid_out = sh_vou_enum_fmt_vid_out, .vidioc_g_fmt_vid_out = sh_vou_g_fmt_vid_out, .vidioc_s_fmt_vid_out = sh_vou_s_fmt_vid_out, .vidioc_try_fmt_vid_out = sh_vou_try_fmt_vid_out, .vidioc_reqbufs = vb2_ioctl_reqbufs, .vidioc_create_bufs = vb2_ioctl_create_bufs, .vidioc_querybuf = vb2_ioctl_querybuf, .vidioc_qbuf = vb2_ioctl_qbuf, .vidioc_dqbuf = vb2_ioctl_dqbuf, .vidioc_prepare_buf = vb2_ioctl_prepare_buf, .vidioc_streamon = vb2_ioctl_streamon, .vidioc_streamoff = vb2_ioctl_streamoff, .vidioc_expbuf = vb2_ioctl_expbuf, .vidioc_g_output = sh_vou_g_output, .vidioc_s_output = sh_vou_s_output, .vidioc_enum_output = sh_vou_enum_output, .vidioc_s_std = sh_vou_s_std, .vidioc_g_std = sh_vou_g_std, .vidioc_g_selection = sh_vou_g_selection, .vidioc_s_selection = sh_vou_s_selection, .vidioc_log_status = sh_vou_log_status, }; static const struct v4l2_file_operations sh_vou_fops = { .owner = THIS_MODULE, .open = sh_vou_open, .release = sh_vou_release, .unlocked_ioctl = video_ioctl2, .mmap = vb2_fop_mmap, .poll = vb2_fop_poll, .write = vb2_fop_write, }; static const struct video_device sh_vou_video_template = { .name = "sh_vou", .fops = &sh_vou_fops, .ioctl_ops = &sh_vou_ioctl_ops, .tvnorms = V4L2_STD_525_60, /* PAL only supported in 8-bit non-bt656 mode */ .vfl_dir = VFL_DIR_TX, .device_caps = V4L2_CAP_VIDEO_OUTPUT | V4L2_CAP_READWRITE | V4L2_CAP_STREAMING, }; static int sh_vou_probe(struct platform_device *pdev) { struct sh_vou_pdata *vou_pdata = pdev->dev.platform_data; struct v4l2_rect *rect; struct v4l2_pix_format *pix; struct i2c_adapter *i2c_adap; struct video_device *vdev; struct sh_vou_device *vou_dev; struct v4l2_subdev *subdev; struct vb2_queue *q; int irq, ret; if (!vou_pdata) { dev_err(&pdev->dev, "Insufficient VOU platform information.\n"); return -ENODEV; } irq = platform_get_irq(pdev, 0); if (irq < 0) return irq; vou_dev = devm_kzalloc(&pdev->dev, sizeof(*vou_dev), GFP_KERNEL); if (!vou_dev) return -ENOMEM; INIT_LIST_HEAD(&vou_dev->buf_list); spin_lock_init(&vou_dev->lock); mutex_init(&vou_dev->fop_lock); vou_dev->pdata = vou_pdata; vou_dev->status = SH_VOU_INITIALISING; vou_dev->pix_idx = 1; rect = &vou_dev->rect; pix = &vou_dev->pix; /* Fill in defaults */ vou_dev->std = V4L2_STD_NTSC_M; rect->left = 0; rect->top = 0; rect->width = VOU_MAX_IMAGE_WIDTH; rect->height = 480; pix->width = VOU_MAX_IMAGE_WIDTH; pix->height = 480; pix->pixelformat = V4L2_PIX_FMT_NV16; pix->field = V4L2_FIELD_INTERLACED; pix->bytesperline = VOU_MAX_IMAGE_WIDTH; pix->sizeimage = VOU_MAX_IMAGE_WIDTH * 2 * 480; pix->colorspace = V4L2_COLORSPACE_SMPTE170M; vou_dev->base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(vou_dev->base)) return PTR_ERR(vou_dev->base); ret = devm_request_irq(&pdev->dev, irq, sh_vou_isr, 0, "vou", vou_dev); if (ret < 0) return ret; ret = v4l2_device_register(&pdev->dev, &vou_dev->v4l2_dev); if (ret < 0) { dev_err(&pdev->dev, "Error registering v4l2 device\n"); return ret; } vdev = &vou_dev->vdev; *vdev = sh_vou_video_template; if (vou_pdata->bus_fmt == SH_VOU_BUS_8BIT) vdev->tvnorms |= V4L2_STD_PAL; vdev->v4l2_dev = &vou_dev->v4l2_dev; vdev->release = video_device_release_empty; vdev->lock = &vou_dev->fop_lock; video_set_drvdata(vdev, vou_dev); /* Initialize the vb2 queue */ q = &vou_dev->queue; q->type = V4L2_BUF_TYPE_VIDEO_OUTPUT; q->io_modes = VB2_MMAP | VB2_DMABUF | VB2_WRITE; q->drv_priv = vou_dev; q->buf_struct_size = sizeof(struct sh_vou_buffer); q->ops = &sh_vou_qops; q->mem_ops = &vb2_dma_contig_memops; q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC; q->min_buffers_needed = 2; q->lock = &vou_dev->fop_lock; q->dev = &pdev->dev; ret = vb2_queue_init(q); if (ret) goto ei2cgadap; vdev->queue = q; INIT_LIST_HEAD(&vou_dev->buf_list); pm_runtime_enable(&pdev->dev); pm_runtime_resume(&pdev->dev); i2c_adap = i2c_get_adapter(vou_pdata->i2c_adap); if (!i2c_adap) { ret = -ENODEV; goto ei2cgadap; } ret = sh_vou_hw_init(vou_dev); if (ret < 0) goto ereset; subdev = v4l2_i2c_new_subdev_board(&vou_dev->v4l2_dev, i2c_adap, vou_pdata->board_info, NULL); if (!subdev) { ret = -ENOMEM; goto ei2cnd; } ret = video_register_device(vdev, VFL_TYPE_VIDEO, -1); if (ret < 0) goto evregdev; return 0; evregdev: ei2cnd: ereset: i2c_put_adapter(i2c_adap); ei2cgadap: pm_runtime_disable(&pdev->dev); v4l2_device_unregister(&vou_dev->v4l2_dev); return ret; } static void sh_vou_remove(struct platform_device *pdev) { struct v4l2_device *v4l2_dev = platform_get_drvdata(pdev); struct sh_vou_device *vou_dev = container_of(v4l2_dev, struct sh_vou_device, v4l2_dev); struct v4l2_subdev *sd = list_entry(v4l2_dev->subdevs.next, struct v4l2_subdev, list); struct i2c_client *client = v4l2_get_subdevdata(sd); pm_runtime_disable(&pdev->dev); video_unregister_device(&vou_dev->vdev); i2c_put_adapter(client->adapter); v4l2_device_unregister(&vou_dev->v4l2_dev); } static struct platform_driver sh_vou = { .remove_new = sh_vou_remove, .driver = { .name = "sh-vou", }, }; module_platform_driver_probe(sh_vou, sh_vou_probe); MODULE_DESCRIPTION("SuperH VOU driver"); MODULE_AUTHOR("Guennadi Liakhovetski <[email protected]>"); MODULE_LICENSE("GPL v2"); MODULE_VERSION("0.1.0"); MODULE_ALIAS("platform:sh-vou");
linux-master
drivers/media/platform/renesas/sh_vou.c
// SPDX-License-Identifier: GPL-2.0 /* * Copyright (C) 2021 Renesas Electronics Corp. * * Driver for Renesas R-Car ISP Channel Selector * * The ISP hardware is capable of more than just channel selection, features * such as demosaicing, white balance control and color space conversion are * also possible. These more advanced features are not supported by the driver * due to lack of documentation. */ #include <linux/module.h> #include <linux/mutex.h> #include <linux/of.h> #include <linux/platform_device.h> #include <linux/pm_runtime.h> #include <linux/reset.h> #include <media/mipi-csi2.h> #include <media/v4l2-subdev.h> #define ISPINPUTSEL0_REG 0x0008 #define ISPINPUTSEL0_SEL_CSI0 BIT(31) #define ISPSTART_REG 0x0014 #define ISPSTART_START 0xffff #define ISPSTART_STOP 0x0000 #define ISPPROCMODE_DT_REG(n) (0x1100 + (0x4 * (n))) #define ISPPROCMODE_DT_PROC_MODE_VC3(pm) (((pm) & 0x3f) << 24) #define ISPPROCMODE_DT_PROC_MODE_VC2(pm) (((pm) & 0x3f) << 16) #define ISPPROCMODE_DT_PROC_MODE_VC1(pm) (((pm) & 0x3f) << 8) #define ISPPROCMODE_DT_PROC_MODE_VC0(pm) ((pm) & 0x3f) #define ISPCS_FILTER_ID_CH_REG(n) (0x3000 + (0x0100 * (n))) #define ISPCS_DT_CODE03_CH_REG(n) (0x3008 + (0x100 * (n))) #define ISPCS_DT_CODE03_EN3 BIT(31) #define ISPCS_DT_CODE03_DT3(dt) (((dt) & 0x3f) << 24) #define ISPCS_DT_CODE03_EN2 BIT(23) #define ISPCS_DT_CODE03_DT2(dt) (((dt) & 0x3f) << 16) #define ISPCS_DT_CODE03_EN1 BIT(15) #define ISPCS_DT_CODE03_DT1(dt) (((dt) & 0x3f) << 8) #define ISPCS_DT_CODE03_EN0 BIT(7) #define ISPCS_DT_CODE03_DT0(dt) ((dt) & 0x3f) struct rcar_isp_format { u32 code; unsigned int datatype; unsigned int procmode; }; static const struct rcar_isp_format rcar_isp_formats[] = { { .code = MEDIA_BUS_FMT_RGB888_1X24, .datatype = MIPI_CSI2_DT_RGB888, .procmode = 0x15 }, { .code = MEDIA_BUS_FMT_Y10_1X10, .datatype = MIPI_CSI2_DT_RAW10, .procmode = 0x10, }, { .code = MEDIA_BUS_FMT_UYVY8_1X16, .datatype = MIPI_CSI2_DT_YUV422_8B, .procmode = 0x0c, }, { .code = MEDIA_BUS_FMT_YUYV8_1X16, .datatype = MIPI_CSI2_DT_YUV422_8B, .procmode = 0x0c, }, { .code = MEDIA_BUS_FMT_UYVY8_2X8, .datatype = MIPI_CSI2_DT_YUV422_8B, .procmode = 0x0c, }, { .code = MEDIA_BUS_FMT_YUYV10_2X10, .datatype = MIPI_CSI2_DT_YUV422_8B, .procmode = 0x0c, }, }; static const struct rcar_isp_format *risp_code_to_fmt(unsigned int code) { unsigned int i; for (i = 0; i < ARRAY_SIZE(rcar_isp_formats); i++) { if (rcar_isp_formats[i].code == code) return &rcar_isp_formats[i]; } return NULL; } enum rcar_isp_input { RISP_CSI_INPUT0, RISP_CSI_INPUT1, }; enum rcar_isp_pads { RCAR_ISP_SINK, RCAR_ISP_PORT0, RCAR_ISP_PORT1, RCAR_ISP_PORT2, RCAR_ISP_PORT3, RCAR_ISP_PORT4, RCAR_ISP_PORT5, RCAR_ISP_PORT6, RCAR_ISP_PORT7, RCAR_ISP_NUM_PADS, }; struct rcar_isp { struct device *dev; void __iomem *base; struct reset_control *rstc; enum rcar_isp_input csi_input; struct v4l2_subdev subdev; struct media_pad pads[RCAR_ISP_NUM_PADS]; struct v4l2_async_notifier notifier; struct v4l2_subdev *remote; struct mutex lock; /* Protects mf and stream_count. */ struct v4l2_mbus_framefmt mf; int stream_count; }; static inline struct rcar_isp *sd_to_isp(struct v4l2_subdev *sd) { return container_of(sd, struct rcar_isp, subdev); } static inline struct rcar_isp *notifier_to_isp(struct v4l2_async_notifier *n) { return container_of(n, struct rcar_isp, notifier); } static void risp_write(struct rcar_isp *isp, u32 offset, u32 value) { iowrite32(value, isp->base + offset); } static u32 risp_read(struct rcar_isp *isp, u32 offset) { return ioread32(isp->base + offset); } static int risp_power_on(struct rcar_isp *isp) { int ret; ret = pm_runtime_resume_and_get(isp->dev); if (ret < 0) return ret; ret = reset_control_deassert(isp->rstc); if (ret < 0) { pm_runtime_put(isp->dev); return ret; } return 0; } static void risp_power_off(struct rcar_isp *isp) { reset_control_assert(isp->rstc); pm_runtime_put(isp->dev); } static int risp_start(struct rcar_isp *isp) { const struct rcar_isp_format *format; unsigned int vc; u32 sel_csi = 0; int ret; format = risp_code_to_fmt(isp->mf.code); if (!format) { dev_err(isp->dev, "Unsupported bus format\n"); return -EINVAL; } ret = risp_power_on(isp); if (ret) { dev_err(isp->dev, "Failed to power on ISP\n"); return ret; } /* Select CSI-2 input source. */ if (isp->csi_input == RISP_CSI_INPUT1) sel_csi = ISPINPUTSEL0_SEL_CSI0; risp_write(isp, ISPINPUTSEL0_REG, risp_read(isp, ISPINPUTSEL0_REG) | sel_csi); /* Configure Channel Selector. */ for (vc = 0; vc < 4; vc++) { u8 ch = vc + 4; u8 dt = format->datatype; risp_write(isp, ISPCS_FILTER_ID_CH_REG(ch), BIT(vc)); risp_write(isp, ISPCS_DT_CODE03_CH_REG(ch), ISPCS_DT_CODE03_EN3 | ISPCS_DT_CODE03_DT3(dt) | ISPCS_DT_CODE03_EN2 | ISPCS_DT_CODE03_DT2(dt) | ISPCS_DT_CODE03_EN1 | ISPCS_DT_CODE03_DT1(dt) | ISPCS_DT_CODE03_EN0 | ISPCS_DT_CODE03_DT0(dt)); } /* Setup processing method. */ risp_write(isp, ISPPROCMODE_DT_REG(format->datatype), ISPPROCMODE_DT_PROC_MODE_VC3(format->procmode) | ISPPROCMODE_DT_PROC_MODE_VC2(format->procmode) | ISPPROCMODE_DT_PROC_MODE_VC1(format->procmode) | ISPPROCMODE_DT_PROC_MODE_VC0(format->procmode)); /* Start ISP. */ risp_write(isp, ISPSTART_REG, ISPSTART_START); ret = v4l2_subdev_call(isp->remote, video, s_stream, 1); if (ret) risp_power_off(isp); return ret; } static void risp_stop(struct rcar_isp *isp) { v4l2_subdev_call(isp->remote, video, s_stream, 0); /* Stop ISP. */ risp_write(isp, ISPSTART_REG, ISPSTART_STOP); risp_power_off(isp); } static int risp_s_stream(struct v4l2_subdev *sd, int enable) { struct rcar_isp *isp = sd_to_isp(sd); int ret = 0; mutex_lock(&isp->lock); if (!isp->remote) { ret = -ENODEV; goto out; } if (enable && isp->stream_count == 0) { ret = risp_start(isp); if (ret) goto out; } else if (!enable && isp->stream_count == 1) { risp_stop(isp); } isp->stream_count += enable ? 1 : -1; out: mutex_unlock(&isp->lock); return ret; } static const struct v4l2_subdev_video_ops risp_video_ops = { .s_stream = risp_s_stream, }; static int risp_set_pad_format(struct v4l2_subdev *sd, struct v4l2_subdev_state *sd_state, struct v4l2_subdev_format *format) { struct rcar_isp *isp = sd_to_isp(sd); struct v4l2_mbus_framefmt *framefmt; mutex_lock(&isp->lock); if (!risp_code_to_fmt(format->format.code)) format->format.code = rcar_isp_formats[0].code; if (format->which == V4L2_SUBDEV_FORMAT_ACTIVE) { isp->mf = format->format; } else { framefmt = v4l2_subdev_get_try_format(sd, sd_state, 0); *framefmt = format->format; } mutex_unlock(&isp->lock); return 0; } static int risp_get_pad_format(struct v4l2_subdev *sd, struct v4l2_subdev_state *sd_state, struct v4l2_subdev_format *format) { struct rcar_isp *isp = sd_to_isp(sd); mutex_lock(&isp->lock); if (format->which == V4L2_SUBDEV_FORMAT_ACTIVE) format->format = isp->mf; else format->format = *v4l2_subdev_get_try_format(sd, sd_state, 0); mutex_unlock(&isp->lock); return 0; } static const struct v4l2_subdev_pad_ops risp_pad_ops = { .set_fmt = risp_set_pad_format, .get_fmt = risp_get_pad_format, .link_validate = v4l2_subdev_link_validate_default, }; static const struct v4l2_subdev_ops rcar_isp_subdev_ops = { .video = &risp_video_ops, .pad = &risp_pad_ops, }; /* ----------------------------------------------------------------------------- * Async handling and registration of subdevices and links */ static int risp_notify_bound(struct v4l2_async_notifier *notifier, struct v4l2_subdev *subdev, struct v4l2_async_connection *asd) { struct rcar_isp *isp = notifier_to_isp(notifier); int pad; pad = media_entity_get_fwnode_pad(&subdev->entity, asd->match.fwnode, MEDIA_PAD_FL_SOURCE); if (pad < 0) { dev_err(isp->dev, "Failed to find pad for %s\n", subdev->name); return pad; } isp->remote = subdev; dev_dbg(isp->dev, "Bound %s pad: %d\n", subdev->name, pad); return media_create_pad_link(&subdev->entity, pad, &isp->subdev.entity, 0, MEDIA_LNK_FL_ENABLED | MEDIA_LNK_FL_IMMUTABLE); } static void risp_notify_unbind(struct v4l2_async_notifier *notifier, struct v4l2_subdev *subdev, struct v4l2_async_connection *asd) { struct rcar_isp *isp = notifier_to_isp(notifier); isp->remote = NULL; dev_dbg(isp->dev, "Unbind %s\n", subdev->name); } static const struct v4l2_async_notifier_operations risp_notify_ops = { .bound = risp_notify_bound, .unbind = risp_notify_unbind, }; static int risp_parse_dt(struct rcar_isp *isp) { struct v4l2_async_connection *asd; struct fwnode_handle *fwnode; struct fwnode_handle *ep; unsigned int id; int ret; for (id = 0; id < 2; id++) { ep = fwnode_graph_get_endpoint_by_id(dev_fwnode(isp->dev), 0, id, 0); if (ep) break; } if (!ep) { dev_err(isp->dev, "Not connected to subdevice\n"); return -EINVAL; } if (id == 1) isp->csi_input = RISP_CSI_INPUT1; fwnode = fwnode_graph_get_remote_endpoint(ep); fwnode_handle_put(ep); dev_dbg(isp->dev, "Found '%pOF'\n", to_of_node(fwnode)); v4l2_async_subdev_nf_init(&isp->notifier, &isp->subdev); isp->notifier.ops = &risp_notify_ops; asd = v4l2_async_nf_add_fwnode(&isp->notifier, fwnode, struct v4l2_async_connection); fwnode_handle_put(fwnode); if (IS_ERR(asd)) return PTR_ERR(asd); ret = v4l2_async_nf_register(&isp->notifier); if (ret) v4l2_async_nf_cleanup(&isp->notifier); return ret; } /* ----------------------------------------------------------------------------- * Platform Device Driver */ static const struct media_entity_operations risp_entity_ops = { .link_validate = v4l2_subdev_link_validate, }; static int risp_probe_resources(struct rcar_isp *isp, struct platform_device *pdev) { isp->base = devm_platform_get_and_ioremap_resource(pdev, 0, NULL); if (IS_ERR(isp->base)) return PTR_ERR(isp->base); isp->rstc = devm_reset_control_get(&pdev->dev, NULL); return PTR_ERR_OR_ZERO(isp->rstc); } static const struct of_device_id risp_of_id_table[] = { { .compatible = "renesas,r8a779a0-isp" }, { .compatible = "renesas,r8a779g0-isp" }, { /* sentinel */ }, }; MODULE_DEVICE_TABLE(of, risp_of_id_table); static int risp_probe(struct platform_device *pdev) { struct rcar_isp *isp; unsigned int i; int ret; isp = devm_kzalloc(&pdev->dev, sizeof(*isp), GFP_KERNEL); if (!isp) return -ENOMEM; isp->dev = &pdev->dev; mutex_init(&isp->lock); ret = risp_probe_resources(isp, pdev); if (ret) { dev_err(isp->dev, "Failed to get resources\n"); goto error_mutex; } platform_set_drvdata(pdev, isp); pm_runtime_enable(&pdev->dev); ret = risp_parse_dt(isp); if (ret) goto error_pm; isp->subdev.owner = THIS_MODULE; isp->subdev.dev = &pdev->dev; v4l2_subdev_init(&isp->subdev, &rcar_isp_subdev_ops); v4l2_set_subdevdata(&isp->subdev, &pdev->dev); snprintf(isp->subdev.name, V4L2_SUBDEV_NAME_SIZE, "%s %s", KBUILD_MODNAME, dev_name(&pdev->dev)); isp->subdev.flags = V4L2_SUBDEV_FL_HAS_DEVNODE; isp->subdev.entity.function = MEDIA_ENT_F_VID_MUX; isp->subdev.entity.ops = &risp_entity_ops; isp->pads[RCAR_ISP_SINK].flags = MEDIA_PAD_FL_SINK; for (i = RCAR_ISP_PORT0; i < RCAR_ISP_NUM_PADS; i++) isp->pads[i].flags = MEDIA_PAD_FL_SOURCE; ret = media_entity_pads_init(&isp->subdev.entity, RCAR_ISP_NUM_PADS, isp->pads); if (ret) goto error_notifier; ret = v4l2_async_register_subdev(&isp->subdev); if (ret < 0) goto error_notifier; dev_info(isp->dev, "Using CSI-2 input: %u\n", isp->csi_input); return 0; error_notifier: v4l2_async_nf_unregister(&isp->notifier); v4l2_async_nf_cleanup(&isp->notifier); error_pm: pm_runtime_disable(&pdev->dev); error_mutex: mutex_destroy(&isp->lock); return ret; } static void risp_remove(struct platform_device *pdev) { struct rcar_isp *isp = platform_get_drvdata(pdev); v4l2_async_nf_unregister(&isp->notifier); v4l2_async_nf_cleanup(&isp->notifier); v4l2_async_unregister_subdev(&isp->subdev); pm_runtime_disable(&pdev->dev); mutex_destroy(&isp->lock); } static struct platform_driver rcar_isp_driver = { .driver = { .name = "rcar-isp", .of_match_table = risp_of_id_table, }, .probe = risp_probe, .remove_new = risp_remove, }; module_platform_driver(rcar_isp_driver); MODULE_AUTHOR("Niklas Söderlund <[email protected]>"); MODULE_DESCRIPTION("Renesas R-Car ISP Channel Selector driver"); MODULE_LICENSE("GPL");
linux-master
drivers/media/platform/renesas/rcar-isp.c
// SPDX-License-Identifier: GPL-2.0+ /* * R-Car Gen3 Digital Radio Interface (DRIF) driver * * Copyright (C) 2017 Renesas Electronics Corporation */ /* * The R-Car DRIF is a receive only MSIOF like controller with an * external master device driving the SCK. It receives data into a FIFO, * then this driver uses the SYS-DMAC engine to move the data from * the device to memory. * * Each DRIF channel DRIFx (as per datasheet) contains two internal * channels DRIFx0 & DRIFx1 within itself with each having its own resources * like module clk, register set, irq and dma. These internal channels share * common CLK & SYNC from master. The two data pins D0 & D1 shall be * considered to represent the two internal channels. This internal split * is not visible to the master device. * * Depending on the master device, a DRIF channel can use * (1) both internal channels (D0 & D1) to receive data in parallel (or) * (2) one internal channel (D0 or D1) to receive data * * The primary design goal of this controller is to act as a Digital Radio * Interface that receives digital samples from a tuner device. Hence the * driver exposes the device as a V4L2 SDR device. In order to qualify as * a V4L2 SDR device, it should possess a tuner interface as mandated by the * framework. This driver expects a tuner driver (sub-device) to bind * asynchronously with this device and the combined drivers shall expose * a V4L2 compliant SDR device. The DRIF driver is independent of the * tuner vendor. * * The DRIF h/w can support I2S mode and Frame start synchronization pulse mode. * This driver is tested for I2S mode only because of the availability of * suitable master devices. Hence, not all configurable options of DRIF h/w * like lsb/msb first, syncdl, dtdl etc. are exposed via DT and I2S defaults * are used. These can be exposed later if needed after testing. */ #include <linux/bitops.h> #include <linux/clk.h> #include <linux/dma-mapping.h> #include <linux/dmaengine.h> #include <linux/ioctl.h> #include <linux/iopoll.h> #include <linux/module.h> #include <linux/of.h> #include <linux/of_graph.h> #include <linux/of_platform.h> #include <linux/platform_device.h> #include <linux/sched.h> #include <media/v4l2-async.h> #include <media/v4l2-ctrls.h> #include <media/v4l2-device.h> #include <media/v4l2-event.h> #include <media/v4l2-fh.h> #include <media/v4l2-ioctl.h> #include <media/videobuf2-v4l2.h> #include <media/videobuf2-vmalloc.h> /* DRIF register offsets */ #define RCAR_DRIF_SITMDR1 0x00 #define RCAR_DRIF_SITMDR2 0x04 #define RCAR_DRIF_SITMDR3 0x08 #define RCAR_DRIF_SIRMDR1 0x10 #define RCAR_DRIF_SIRMDR2 0x14 #define RCAR_DRIF_SIRMDR3 0x18 #define RCAR_DRIF_SICTR 0x28 #define RCAR_DRIF_SIFCTR 0x30 #define RCAR_DRIF_SISTR 0x40 #define RCAR_DRIF_SIIER 0x44 #define RCAR_DRIF_SIRFDR 0x60 #define RCAR_DRIF_RFOVF BIT(3) /* Receive FIFO overflow */ #define RCAR_DRIF_RFUDF BIT(4) /* Receive FIFO underflow */ #define RCAR_DRIF_RFSERR BIT(5) /* Receive frame sync error */ #define RCAR_DRIF_REOF BIT(7) /* Frame reception end */ #define RCAR_DRIF_RDREQ BIT(12) /* Receive data xfer req */ #define RCAR_DRIF_RFFUL BIT(13) /* Receive FIFO full */ /* SIRMDR1 */ #define RCAR_DRIF_SIRMDR1_SYNCMD_FRAME (0 << 28) #define RCAR_DRIF_SIRMDR1_SYNCMD_LR (3 << 28) #define RCAR_DRIF_SIRMDR1_SYNCAC_POL_HIGH (0 << 25) #define RCAR_DRIF_SIRMDR1_SYNCAC_POL_LOW (1 << 25) #define RCAR_DRIF_SIRMDR1_MSB_FIRST (0 << 24) #define RCAR_DRIF_SIRMDR1_LSB_FIRST (1 << 24) #define RCAR_DRIF_SIRMDR1_DTDL_0 (0 << 20) #define RCAR_DRIF_SIRMDR1_DTDL_1 (1 << 20) #define RCAR_DRIF_SIRMDR1_DTDL_2 (2 << 20) #define RCAR_DRIF_SIRMDR1_DTDL_0PT5 (5 << 20) #define RCAR_DRIF_SIRMDR1_DTDL_1PT5 (6 << 20) #define RCAR_DRIF_SIRMDR1_SYNCDL_0 (0 << 20) #define RCAR_DRIF_SIRMDR1_SYNCDL_1 (1 << 20) #define RCAR_DRIF_SIRMDR1_SYNCDL_2 (2 << 20) #define RCAR_DRIF_SIRMDR1_SYNCDL_3 (3 << 20) #define RCAR_DRIF_SIRMDR1_SYNCDL_0PT5 (5 << 20) #define RCAR_DRIF_SIRMDR1_SYNCDL_1PT5 (6 << 20) #define RCAR_DRIF_MDR_GRPCNT(n) (((n) - 1) << 30) #define RCAR_DRIF_MDR_BITLEN(n) (((n) - 1) << 24) #define RCAR_DRIF_MDR_WDCNT(n) (((n) - 1) << 16) /* Hidden Transmit register that controls CLK & SYNC */ #define RCAR_DRIF_SITMDR1_PCON BIT(30) #define RCAR_DRIF_SICTR_RX_RISING_EDGE BIT(26) #define RCAR_DRIF_SICTR_RX_EN BIT(8) #define RCAR_DRIF_SICTR_RESET BIT(0) /* Constants */ #define RCAR_DRIF_NUM_HWBUFS 32 #define RCAR_DRIF_MAX_DEVS 4 #define RCAR_DRIF_DEFAULT_NUM_HWBUFS 16 #define RCAR_DRIF_DEFAULT_HWBUF_SIZE (4 * PAGE_SIZE) #define RCAR_DRIF_MAX_CHANNEL 2 #define RCAR_SDR_BUFFER_SIZE SZ_64K /* Internal buffer status flags */ #define RCAR_DRIF_BUF_DONE BIT(0) /* DMA completed */ #define RCAR_DRIF_BUF_OVERFLOW BIT(1) /* Overflow detected */ #define to_rcar_drif_buf_pair(sdr, ch_num, idx) \ (&((sdr)->ch[!(ch_num)]->buf[(idx)])) #define for_each_rcar_drif_channel(ch, ch_mask) \ for_each_set_bit(ch, ch_mask, RCAR_DRIF_MAX_CHANNEL) /* Debug */ #define rdrif_dbg(sdr, fmt, arg...) \ dev_dbg(sdr->v4l2_dev.dev, fmt, ## arg) #define rdrif_err(sdr, fmt, arg...) \ dev_err(sdr->v4l2_dev.dev, fmt, ## arg) /* Stream formats */ struct rcar_drif_format { u32 pixelformat; u32 buffersize; u32 bitlen; u32 wdcnt; u32 num_ch; }; /* Format descriptions for capture */ static const struct rcar_drif_format formats[] = { { .pixelformat = V4L2_SDR_FMT_PCU16BE, .buffersize = RCAR_SDR_BUFFER_SIZE, .bitlen = 16, .wdcnt = 1, .num_ch = 2, }, { .pixelformat = V4L2_SDR_FMT_PCU18BE, .buffersize = RCAR_SDR_BUFFER_SIZE, .bitlen = 18, .wdcnt = 1, .num_ch = 2, }, { .pixelformat = V4L2_SDR_FMT_PCU20BE, .buffersize = RCAR_SDR_BUFFER_SIZE, .bitlen = 20, .wdcnt = 1, .num_ch = 2, }, }; /* Buffer for a received frame from one or both internal channels */ struct rcar_drif_frame_buf { /* Common v4l buffer stuff -- must be first */ struct vb2_v4l2_buffer vb; struct list_head list; }; /* OF graph endpoint's V4L2 async data */ struct rcar_drif_graph_ep { struct v4l2_subdev *subdev; /* Async matched subdev */ }; /* DMA buffer */ struct rcar_drif_hwbuf { void *addr; /* CPU-side address */ unsigned int status; /* Buffer status flags */ }; /* Internal channel */ struct rcar_drif { struct rcar_drif_sdr *sdr; /* Group device */ struct platform_device *pdev; /* Channel's pdev */ void __iomem *base; /* Base register address */ resource_size_t start; /* I/O resource offset */ struct dma_chan *dmach; /* Reserved DMA channel */ struct clk *clk; /* Module clock */ struct rcar_drif_hwbuf buf[RCAR_DRIF_NUM_HWBUFS]; /* H/W bufs */ dma_addr_t dma_handle; /* Handle for all bufs */ unsigned int num; /* Channel number */ bool acting_sdr; /* Channel acting as SDR device */ }; /* DRIF V4L2 SDR */ struct rcar_drif_sdr { struct device *dev; /* Platform device */ struct video_device *vdev; /* V4L2 SDR device */ struct v4l2_device v4l2_dev; /* V4L2 device */ /* Videobuf2 queue and queued buffers list */ struct vb2_queue vb_queue; struct list_head queued_bufs; spinlock_t queued_bufs_lock; /* Protects queued_bufs */ spinlock_t dma_lock; /* To serialize DMA cb of channels */ struct mutex v4l2_mutex; /* To serialize ioctls */ struct mutex vb_queue_mutex; /* To serialize streaming ioctls */ struct v4l2_ctrl_handler ctrl_hdl; /* SDR control handler */ struct v4l2_async_notifier notifier; /* For subdev (tuner) */ struct rcar_drif_graph_ep ep; /* Endpoint V4L2 async data */ /* Current V4L2 SDR format ptr */ const struct rcar_drif_format *fmt; /* Device tree SYNC properties */ u32 mdr1; /* Internals */ struct rcar_drif *ch[RCAR_DRIF_MAX_CHANNEL]; /* DRIFx0,1 */ unsigned long hw_ch_mask; /* Enabled channels per DT */ unsigned long cur_ch_mask; /* Used channels for an SDR FMT */ u32 num_hw_ch; /* Num of DT enabled channels */ u32 num_cur_ch; /* Num of used channels */ u32 hwbuf_size; /* Each DMA buffer size */ u32 produced; /* Buffers produced by sdr dev */ }; /* Register access functions */ static void rcar_drif_write(struct rcar_drif *ch, u32 offset, u32 data) { writel(data, ch->base + offset); } static u32 rcar_drif_read(struct rcar_drif *ch, u32 offset) { return readl(ch->base + offset); } /* Release DMA channels */ static void rcar_drif_release_dmachannels(struct rcar_drif_sdr *sdr) { unsigned int i; for_each_rcar_drif_channel(i, &sdr->cur_ch_mask) if (sdr->ch[i]->dmach) { dma_release_channel(sdr->ch[i]->dmach); sdr->ch[i]->dmach = NULL; } } /* Allocate DMA channels */ static int rcar_drif_alloc_dmachannels(struct rcar_drif_sdr *sdr) { struct dma_slave_config dma_cfg; unsigned int i; int ret; for_each_rcar_drif_channel(i, &sdr->cur_ch_mask) { struct rcar_drif *ch = sdr->ch[i]; ch->dmach = dma_request_chan(&ch->pdev->dev, "rx"); if (IS_ERR(ch->dmach)) { ret = PTR_ERR(ch->dmach); if (ret != -EPROBE_DEFER) rdrif_err(sdr, "ch%u: dma channel req failed: %pe\n", i, ch->dmach); ch->dmach = NULL; goto dmach_error; } /* Configure slave */ memset(&dma_cfg, 0, sizeof(dma_cfg)); dma_cfg.src_addr = (phys_addr_t)(ch->start + RCAR_DRIF_SIRFDR); dma_cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; ret = dmaengine_slave_config(ch->dmach, &dma_cfg); if (ret) { rdrif_err(sdr, "ch%u: dma slave config failed\n", i); goto dmach_error; } } return 0; dmach_error: rcar_drif_release_dmachannels(sdr); return ret; } /* Release queued vb2 buffers */ static void rcar_drif_release_queued_bufs(struct rcar_drif_sdr *sdr, enum vb2_buffer_state state) { struct rcar_drif_frame_buf *fbuf, *tmp; unsigned long flags; spin_lock_irqsave(&sdr->queued_bufs_lock, flags); list_for_each_entry_safe(fbuf, tmp, &sdr->queued_bufs, list) { list_del(&fbuf->list); vb2_buffer_done(&fbuf->vb.vb2_buf, state); } spin_unlock_irqrestore(&sdr->queued_bufs_lock, flags); } /* Set MDR defaults */ static inline void rcar_drif_set_mdr1(struct rcar_drif_sdr *sdr) { unsigned int i; /* Set defaults for enabled internal channels */ for_each_rcar_drif_channel(i, &sdr->cur_ch_mask) { /* Refer MSIOF section in manual for this register setting */ rcar_drif_write(sdr->ch[i], RCAR_DRIF_SITMDR1, RCAR_DRIF_SITMDR1_PCON); /* Setup MDR1 value */ rcar_drif_write(sdr->ch[i], RCAR_DRIF_SIRMDR1, sdr->mdr1); rdrif_dbg(sdr, "ch%u: mdr1 = 0x%08x", i, rcar_drif_read(sdr->ch[i], RCAR_DRIF_SIRMDR1)); } } /* Set DRIF receive format */ static int rcar_drif_set_format(struct rcar_drif_sdr *sdr) { unsigned int i; rdrif_dbg(sdr, "setfmt: bitlen %u wdcnt %u num_ch %u\n", sdr->fmt->bitlen, sdr->fmt->wdcnt, sdr->fmt->num_ch); /* Sanity check */ if (sdr->fmt->num_ch > sdr->num_cur_ch) { rdrif_err(sdr, "fmt num_ch %u cur_ch %u mismatch\n", sdr->fmt->num_ch, sdr->num_cur_ch); return -EINVAL; } /* Setup group, bitlen & wdcnt */ for_each_rcar_drif_channel(i, &sdr->cur_ch_mask) { u32 mdr; /* Two groups */ mdr = RCAR_DRIF_MDR_GRPCNT(2) | RCAR_DRIF_MDR_BITLEN(sdr->fmt->bitlen) | RCAR_DRIF_MDR_WDCNT(sdr->fmt->wdcnt); rcar_drif_write(sdr->ch[i], RCAR_DRIF_SIRMDR2, mdr); mdr = RCAR_DRIF_MDR_BITLEN(sdr->fmt->bitlen) | RCAR_DRIF_MDR_WDCNT(sdr->fmt->wdcnt); rcar_drif_write(sdr->ch[i], RCAR_DRIF_SIRMDR3, mdr); rdrif_dbg(sdr, "ch%u: new mdr[2,3] = 0x%08x, 0x%08x\n", i, rcar_drif_read(sdr->ch[i], RCAR_DRIF_SIRMDR2), rcar_drif_read(sdr->ch[i], RCAR_DRIF_SIRMDR3)); } return 0; } /* Release DMA buffers */ static void rcar_drif_release_buf(struct rcar_drif_sdr *sdr) { unsigned int i; for_each_rcar_drif_channel(i, &sdr->cur_ch_mask) { struct rcar_drif *ch = sdr->ch[i]; /* First entry contains the dma buf ptr */ if (ch->buf[0].addr) { dma_free_coherent(&ch->pdev->dev, sdr->hwbuf_size * RCAR_DRIF_NUM_HWBUFS, ch->buf[0].addr, ch->dma_handle); ch->buf[0].addr = NULL; } } } /* Request DMA buffers */ static int rcar_drif_request_buf(struct rcar_drif_sdr *sdr) { int ret = -ENOMEM; unsigned int i, j; void *addr; for_each_rcar_drif_channel(i, &sdr->cur_ch_mask) { struct rcar_drif *ch = sdr->ch[i]; /* Allocate DMA buffers */ addr = dma_alloc_coherent(&ch->pdev->dev, sdr->hwbuf_size * RCAR_DRIF_NUM_HWBUFS, &ch->dma_handle, GFP_KERNEL); if (!addr) { rdrif_err(sdr, "ch%u: dma alloc failed. num hwbufs %u size %u\n", i, RCAR_DRIF_NUM_HWBUFS, sdr->hwbuf_size); goto error; } /* Split the chunk and populate bufctxt */ for (j = 0; j < RCAR_DRIF_NUM_HWBUFS; j++) { ch->buf[j].addr = addr + (j * sdr->hwbuf_size); ch->buf[j].status = 0; } } return 0; error: return ret; } /* Setup vb_queue minimum buffer requirements */ static int rcar_drif_queue_setup(struct vb2_queue *vq, unsigned int *num_buffers, unsigned int *num_planes, unsigned int sizes[], struct device *alloc_devs[]) { struct rcar_drif_sdr *sdr = vb2_get_drv_priv(vq); /* Need at least 16 buffers */ if (vq->num_buffers + *num_buffers < 16) *num_buffers = 16 - vq->num_buffers; *num_planes = 1; sizes[0] = PAGE_ALIGN(sdr->fmt->buffersize); rdrif_dbg(sdr, "num_bufs %d sizes[0] %d\n", *num_buffers, sizes[0]); return 0; } /* Enqueue buffer */ static void rcar_drif_buf_queue(struct vb2_buffer *vb) { struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb); struct rcar_drif_sdr *sdr = vb2_get_drv_priv(vb->vb2_queue); struct rcar_drif_frame_buf *fbuf = container_of(vbuf, struct rcar_drif_frame_buf, vb); unsigned long flags; rdrif_dbg(sdr, "buf_queue idx %u\n", vb->index); spin_lock_irqsave(&sdr->queued_bufs_lock, flags); list_add_tail(&fbuf->list, &sdr->queued_bufs); spin_unlock_irqrestore(&sdr->queued_bufs_lock, flags); } /* Get a frame buf from list */ static struct rcar_drif_frame_buf * rcar_drif_get_fbuf(struct rcar_drif_sdr *sdr) { struct rcar_drif_frame_buf *fbuf; unsigned long flags; spin_lock_irqsave(&sdr->queued_bufs_lock, flags); fbuf = list_first_entry_or_null(&sdr->queued_bufs, struct rcar_drif_frame_buf, list); if (!fbuf) { /* * App is late in enqueing buffers. Samples lost & there will * be a gap in sequence number when app recovers */ rdrif_dbg(sdr, "\napp late: prod %u\n", sdr->produced); spin_unlock_irqrestore(&sdr->queued_bufs_lock, flags); return NULL; } list_del(&fbuf->list); spin_unlock_irqrestore(&sdr->queued_bufs_lock, flags); return fbuf; } /* Helpers to set/clear buf pair status */ static inline bool rcar_drif_bufs_done(struct rcar_drif_hwbuf **buf) { return (buf[0]->status & buf[1]->status & RCAR_DRIF_BUF_DONE); } static inline bool rcar_drif_bufs_overflow(struct rcar_drif_hwbuf **buf) { return ((buf[0]->status | buf[1]->status) & RCAR_DRIF_BUF_OVERFLOW); } static inline void rcar_drif_bufs_clear(struct rcar_drif_hwbuf **buf, unsigned int bit) { unsigned int i; for (i = 0; i < RCAR_DRIF_MAX_CHANNEL; i++) buf[i]->status &= ~bit; } /* Channel DMA complete */ static void rcar_drif_channel_complete(struct rcar_drif *ch, u32 idx) { u32 str; ch->buf[idx].status |= RCAR_DRIF_BUF_DONE; /* Check for DRIF errors */ str = rcar_drif_read(ch, RCAR_DRIF_SISTR); if (unlikely(str & RCAR_DRIF_RFOVF)) { /* Writing the same clears it */ rcar_drif_write(ch, RCAR_DRIF_SISTR, str); /* Overflow: some samples are lost */ ch->buf[idx].status |= RCAR_DRIF_BUF_OVERFLOW; } } /* DMA callback for each stage */ static void rcar_drif_dma_complete(void *dma_async_param) { struct rcar_drif *ch = dma_async_param; struct rcar_drif_sdr *sdr = ch->sdr; struct rcar_drif_hwbuf *buf[RCAR_DRIF_MAX_CHANNEL]; struct rcar_drif_frame_buf *fbuf; bool overflow = false; u32 idx, produced; unsigned int i; spin_lock(&sdr->dma_lock); /* DMA can be terminated while the callback was waiting on lock */ if (!vb2_is_streaming(&sdr->vb_queue)) { spin_unlock(&sdr->dma_lock); return; } idx = sdr->produced % RCAR_DRIF_NUM_HWBUFS; rcar_drif_channel_complete(ch, idx); if (sdr->num_cur_ch == RCAR_DRIF_MAX_CHANNEL) { buf[0] = ch->num ? to_rcar_drif_buf_pair(sdr, ch->num, idx) : &ch->buf[idx]; buf[1] = ch->num ? &ch->buf[idx] : to_rcar_drif_buf_pair(sdr, ch->num, idx); /* Check if both DMA buffers are done */ if (!rcar_drif_bufs_done(buf)) { spin_unlock(&sdr->dma_lock); return; } /* Clear buf done status */ rcar_drif_bufs_clear(buf, RCAR_DRIF_BUF_DONE); if (rcar_drif_bufs_overflow(buf)) { overflow = true; /* Clear the flag in status */ rcar_drif_bufs_clear(buf, RCAR_DRIF_BUF_OVERFLOW); } } else { buf[0] = &ch->buf[idx]; if (buf[0]->status & RCAR_DRIF_BUF_OVERFLOW) { overflow = true; /* Clear the flag in status */ buf[0]->status &= ~RCAR_DRIF_BUF_OVERFLOW; } } /* Buffer produced for consumption */ produced = sdr->produced++; spin_unlock(&sdr->dma_lock); rdrif_dbg(sdr, "ch%u: prod %u\n", ch->num, produced); /* Get fbuf */ fbuf = rcar_drif_get_fbuf(sdr); if (!fbuf) return; for (i = 0; i < RCAR_DRIF_MAX_CHANNEL; i++) memcpy(vb2_plane_vaddr(&fbuf->vb.vb2_buf, 0) + i * sdr->hwbuf_size, buf[i]->addr, sdr->hwbuf_size); fbuf->vb.field = V4L2_FIELD_NONE; fbuf->vb.sequence = produced; fbuf->vb.vb2_buf.timestamp = ktime_get_ns(); vb2_set_plane_payload(&fbuf->vb.vb2_buf, 0, sdr->fmt->buffersize); /* Set error state on overflow */ vb2_buffer_done(&fbuf->vb.vb2_buf, overflow ? VB2_BUF_STATE_ERROR : VB2_BUF_STATE_DONE); } static int rcar_drif_qbuf(struct rcar_drif *ch) { struct rcar_drif_sdr *sdr = ch->sdr; dma_addr_t addr = ch->dma_handle; struct dma_async_tx_descriptor *rxd; dma_cookie_t cookie; int ret = -EIO; /* Setup cyclic DMA with given buffers */ rxd = dmaengine_prep_dma_cyclic(ch->dmach, addr, sdr->hwbuf_size * RCAR_DRIF_NUM_HWBUFS, sdr->hwbuf_size, DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT | DMA_CTRL_ACK); if (!rxd) { rdrif_err(sdr, "ch%u: prep dma cyclic failed\n", ch->num); return ret; } /* Submit descriptor */ rxd->callback = rcar_drif_dma_complete; rxd->callback_param = ch; cookie = dmaengine_submit(rxd); if (dma_submit_error(cookie)) { rdrif_err(sdr, "ch%u: dma submit failed\n", ch->num); return ret; } dma_async_issue_pending(ch->dmach); return 0; } /* Enable reception */ static int rcar_drif_enable_rx(struct rcar_drif_sdr *sdr) { unsigned int i; u32 ctr; int ret = -EINVAL; /* * When both internal channels are enabled, they can be synchronized * only by the master */ /* Enable receive */ for_each_rcar_drif_channel(i, &sdr->cur_ch_mask) { ctr = rcar_drif_read(sdr->ch[i], RCAR_DRIF_SICTR); ctr |= (RCAR_DRIF_SICTR_RX_RISING_EDGE | RCAR_DRIF_SICTR_RX_EN); rcar_drif_write(sdr->ch[i], RCAR_DRIF_SICTR, ctr); } /* Check receive enabled */ for_each_rcar_drif_channel(i, &sdr->cur_ch_mask) { ret = readl_poll_timeout(sdr->ch[i]->base + RCAR_DRIF_SICTR, ctr, ctr & RCAR_DRIF_SICTR_RX_EN, 7, 100000); if (ret) { rdrif_err(sdr, "ch%u: rx en failed. ctr 0x%08x\n", i, rcar_drif_read(sdr->ch[i], RCAR_DRIF_SICTR)); break; } } return ret; } /* Disable reception */ static void rcar_drif_disable_rx(struct rcar_drif_sdr *sdr) { unsigned int i; u32 ctr; int ret; /* Disable receive */ for_each_rcar_drif_channel(i, &sdr->cur_ch_mask) { ctr = rcar_drif_read(sdr->ch[i], RCAR_DRIF_SICTR); ctr &= ~RCAR_DRIF_SICTR_RX_EN; rcar_drif_write(sdr->ch[i], RCAR_DRIF_SICTR, ctr); } /* Check receive disabled */ for_each_rcar_drif_channel(i, &sdr->cur_ch_mask) { ret = readl_poll_timeout(sdr->ch[i]->base + RCAR_DRIF_SICTR, ctr, !(ctr & RCAR_DRIF_SICTR_RX_EN), 7, 100000); if (ret) dev_warn(&sdr->vdev->dev, "ch%u: failed to disable rx. ctr 0x%08x\n", i, rcar_drif_read(sdr->ch[i], RCAR_DRIF_SICTR)); } } /* Stop channel */ static void rcar_drif_stop_channel(struct rcar_drif *ch) { /* Disable DMA receive interrupt */ rcar_drif_write(ch, RCAR_DRIF_SIIER, 0x00000000); /* Terminate all DMA transfers */ dmaengine_terminate_sync(ch->dmach); } /* Stop receive operation */ static void rcar_drif_stop(struct rcar_drif_sdr *sdr) { unsigned int i; /* Disable Rx */ rcar_drif_disable_rx(sdr); for_each_rcar_drif_channel(i, &sdr->cur_ch_mask) rcar_drif_stop_channel(sdr->ch[i]); } /* Start channel */ static int rcar_drif_start_channel(struct rcar_drif *ch) { struct rcar_drif_sdr *sdr = ch->sdr; u32 ctr, str; int ret; /* Reset receive */ rcar_drif_write(ch, RCAR_DRIF_SICTR, RCAR_DRIF_SICTR_RESET); ret = readl_poll_timeout(ch->base + RCAR_DRIF_SICTR, ctr, !(ctr & RCAR_DRIF_SICTR_RESET), 7, 100000); if (ret) { rdrif_err(sdr, "ch%u: failed to reset rx. ctr 0x%08x\n", ch->num, rcar_drif_read(ch, RCAR_DRIF_SICTR)); return ret; } /* Queue buffers for DMA */ ret = rcar_drif_qbuf(ch); if (ret) return ret; /* Clear status register flags */ str = RCAR_DRIF_RFFUL | RCAR_DRIF_REOF | RCAR_DRIF_RFSERR | RCAR_DRIF_RFUDF | RCAR_DRIF_RFOVF; rcar_drif_write(ch, RCAR_DRIF_SISTR, str); /* Enable DMA receive interrupt */ rcar_drif_write(ch, RCAR_DRIF_SIIER, 0x00009000); return ret; } /* Start receive operation */ static int rcar_drif_start(struct rcar_drif_sdr *sdr) { unsigned long enabled = 0; unsigned int i; int ret; for_each_rcar_drif_channel(i, &sdr->cur_ch_mask) { ret = rcar_drif_start_channel(sdr->ch[i]); if (ret) goto start_error; enabled |= BIT(i); } ret = rcar_drif_enable_rx(sdr); if (ret) goto enable_error; sdr->produced = 0; return ret; enable_error: rcar_drif_disable_rx(sdr); start_error: for_each_rcar_drif_channel(i, &enabled) rcar_drif_stop_channel(sdr->ch[i]); return ret; } /* Start streaming */ static int rcar_drif_start_streaming(struct vb2_queue *vq, unsigned int count) { struct rcar_drif_sdr *sdr = vb2_get_drv_priv(vq); unsigned long enabled = 0; unsigned int i; int ret; mutex_lock(&sdr->v4l2_mutex); for_each_rcar_drif_channel(i, &sdr->cur_ch_mask) { ret = clk_prepare_enable(sdr->ch[i]->clk); if (ret) goto error; enabled |= BIT(i); } /* Set default MDRx settings */ rcar_drif_set_mdr1(sdr); /* Set new format */ ret = rcar_drif_set_format(sdr); if (ret) goto error; if (sdr->num_cur_ch == RCAR_DRIF_MAX_CHANNEL) sdr->hwbuf_size = sdr->fmt->buffersize / RCAR_DRIF_MAX_CHANNEL; else sdr->hwbuf_size = sdr->fmt->buffersize; rdrif_dbg(sdr, "num hwbufs %u, hwbuf_size %u\n", RCAR_DRIF_NUM_HWBUFS, sdr->hwbuf_size); /* Alloc DMA channel */ ret = rcar_drif_alloc_dmachannels(sdr); if (ret) goto error; /* Request buffers */ ret = rcar_drif_request_buf(sdr); if (ret) goto error; /* Start Rx */ ret = rcar_drif_start(sdr); if (ret) goto error; mutex_unlock(&sdr->v4l2_mutex); return ret; error: rcar_drif_release_queued_bufs(sdr, VB2_BUF_STATE_QUEUED); rcar_drif_release_buf(sdr); rcar_drif_release_dmachannels(sdr); for_each_rcar_drif_channel(i, &enabled) clk_disable_unprepare(sdr->ch[i]->clk); mutex_unlock(&sdr->v4l2_mutex); return ret; } /* Stop streaming */ static void rcar_drif_stop_streaming(struct vb2_queue *vq) { struct rcar_drif_sdr *sdr = vb2_get_drv_priv(vq); unsigned int i; mutex_lock(&sdr->v4l2_mutex); /* Stop hardware streaming */ rcar_drif_stop(sdr); /* Return all queued buffers to vb2 */ rcar_drif_release_queued_bufs(sdr, VB2_BUF_STATE_ERROR); /* Release buf */ rcar_drif_release_buf(sdr); /* Release DMA channel resources */ rcar_drif_release_dmachannels(sdr); for_each_rcar_drif_channel(i, &sdr->cur_ch_mask) clk_disable_unprepare(sdr->ch[i]->clk); mutex_unlock(&sdr->v4l2_mutex); } /* Vb2 ops */ static const struct vb2_ops rcar_drif_vb2_ops = { .queue_setup = rcar_drif_queue_setup, .buf_queue = rcar_drif_buf_queue, .start_streaming = rcar_drif_start_streaming, .stop_streaming = rcar_drif_stop_streaming, .wait_prepare = vb2_ops_wait_prepare, .wait_finish = vb2_ops_wait_finish, }; static int rcar_drif_querycap(struct file *file, void *fh, struct v4l2_capability *cap) { struct rcar_drif_sdr *sdr = video_drvdata(file); strscpy(cap->driver, KBUILD_MODNAME, sizeof(cap->driver)); strscpy(cap->card, sdr->vdev->name, sizeof(cap->card)); snprintf(cap->bus_info, sizeof(cap->bus_info), "platform:%s", sdr->vdev->name); return 0; } static int rcar_drif_set_default_format(struct rcar_drif_sdr *sdr) { unsigned int i; for (i = 0; i < ARRAY_SIZE(formats); i++) { /* Matching fmt based on required channels is set as default */ if (sdr->num_hw_ch == formats[i].num_ch) { sdr->fmt = &formats[i]; sdr->cur_ch_mask = sdr->hw_ch_mask; sdr->num_cur_ch = sdr->num_hw_ch; dev_dbg(sdr->dev, "default fmt[%u]: mask %lu num %u\n", i, sdr->cur_ch_mask, sdr->num_cur_ch); return 0; } } return -EINVAL; } static int rcar_drif_enum_fmt_sdr_cap(struct file *file, void *priv, struct v4l2_fmtdesc *f) { if (f->index >= ARRAY_SIZE(formats)) return -EINVAL; f->pixelformat = formats[f->index].pixelformat; return 0; } static int rcar_drif_g_fmt_sdr_cap(struct file *file, void *priv, struct v4l2_format *f) { struct rcar_drif_sdr *sdr = video_drvdata(file); f->fmt.sdr.pixelformat = sdr->fmt->pixelformat; f->fmt.sdr.buffersize = sdr->fmt->buffersize; return 0; } static int rcar_drif_s_fmt_sdr_cap(struct file *file, void *priv, struct v4l2_format *f) { struct rcar_drif_sdr *sdr = video_drvdata(file); struct vb2_queue *q = &sdr->vb_queue; unsigned int i; if (vb2_is_busy(q)) return -EBUSY; for (i = 0; i < ARRAY_SIZE(formats); i++) { if (formats[i].pixelformat == f->fmt.sdr.pixelformat) break; } if (i == ARRAY_SIZE(formats)) i = 0; /* Set the 1st format as default on no match */ sdr->fmt = &formats[i]; f->fmt.sdr.pixelformat = sdr->fmt->pixelformat; f->fmt.sdr.buffersize = formats[i].buffersize; memset(f->fmt.sdr.reserved, 0, sizeof(f->fmt.sdr.reserved)); /* * If a format demands one channel only out of two * enabled channels, pick the 0th channel. */ if (formats[i].num_ch < sdr->num_hw_ch) { sdr->cur_ch_mask = BIT(0); sdr->num_cur_ch = formats[i].num_ch; } else { sdr->cur_ch_mask = sdr->hw_ch_mask; sdr->num_cur_ch = sdr->num_hw_ch; } rdrif_dbg(sdr, "cur: idx %u mask %lu num %u\n", i, sdr->cur_ch_mask, sdr->num_cur_ch); return 0; } static int rcar_drif_try_fmt_sdr_cap(struct file *file, void *priv, struct v4l2_format *f) { unsigned int i; for (i = 0; i < ARRAY_SIZE(formats); i++) { if (formats[i].pixelformat == f->fmt.sdr.pixelformat) { f->fmt.sdr.buffersize = formats[i].buffersize; return 0; } } f->fmt.sdr.pixelformat = formats[0].pixelformat; f->fmt.sdr.buffersize = formats[0].buffersize; memset(f->fmt.sdr.reserved, 0, sizeof(f->fmt.sdr.reserved)); return 0; } /* Tuner subdev ioctls */ static int rcar_drif_enum_freq_bands(struct file *file, void *priv, struct v4l2_frequency_band *band) { struct rcar_drif_sdr *sdr = video_drvdata(file); return v4l2_subdev_call(sdr->ep.subdev, tuner, enum_freq_bands, band); } static int rcar_drif_g_frequency(struct file *file, void *priv, struct v4l2_frequency *f) { struct rcar_drif_sdr *sdr = video_drvdata(file); return v4l2_subdev_call(sdr->ep.subdev, tuner, g_frequency, f); } static int rcar_drif_s_frequency(struct file *file, void *priv, const struct v4l2_frequency *f) { struct rcar_drif_sdr *sdr = video_drvdata(file); return v4l2_subdev_call(sdr->ep.subdev, tuner, s_frequency, f); } static int rcar_drif_g_tuner(struct file *file, void *priv, struct v4l2_tuner *vt) { struct rcar_drif_sdr *sdr = video_drvdata(file); return v4l2_subdev_call(sdr->ep.subdev, tuner, g_tuner, vt); } static int rcar_drif_s_tuner(struct file *file, void *priv, const struct v4l2_tuner *vt) { struct rcar_drif_sdr *sdr = video_drvdata(file); return v4l2_subdev_call(sdr->ep.subdev, tuner, s_tuner, vt); } static const struct v4l2_ioctl_ops rcar_drif_ioctl_ops = { .vidioc_querycap = rcar_drif_querycap, .vidioc_enum_fmt_sdr_cap = rcar_drif_enum_fmt_sdr_cap, .vidioc_g_fmt_sdr_cap = rcar_drif_g_fmt_sdr_cap, .vidioc_s_fmt_sdr_cap = rcar_drif_s_fmt_sdr_cap, .vidioc_try_fmt_sdr_cap = rcar_drif_try_fmt_sdr_cap, .vidioc_reqbufs = vb2_ioctl_reqbufs, .vidioc_create_bufs = vb2_ioctl_create_bufs, .vidioc_prepare_buf = vb2_ioctl_prepare_buf, .vidioc_querybuf = vb2_ioctl_querybuf, .vidioc_qbuf = vb2_ioctl_qbuf, .vidioc_dqbuf = vb2_ioctl_dqbuf, .vidioc_streamon = vb2_ioctl_streamon, .vidioc_streamoff = vb2_ioctl_streamoff, .vidioc_s_frequency = rcar_drif_s_frequency, .vidioc_g_frequency = rcar_drif_g_frequency, .vidioc_s_tuner = rcar_drif_s_tuner, .vidioc_g_tuner = rcar_drif_g_tuner, .vidioc_enum_freq_bands = rcar_drif_enum_freq_bands, .vidioc_subscribe_event = v4l2_ctrl_subscribe_event, .vidioc_unsubscribe_event = v4l2_event_unsubscribe, .vidioc_log_status = v4l2_ctrl_log_status, }; static const struct v4l2_file_operations rcar_drif_fops = { .owner = THIS_MODULE, .open = v4l2_fh_open, .release = vb2_fop_release, .read = vb2_fop_read, .poll = vb2_fop_poll, .mmap = vb2_fop_mmap, .unlocked_ioctl = video_ioctl2, }; static int rcar_drif_sdr_register(struct rcar_drif_sdr *sdr) { int ret; /* Init video_device structure */ sdr->vdev = video_device_alloc(); if (!sdr->vdev) return -ENOMEM; snprintf(sdr->vdev->name, sizeof(sdr->vdev->name), "R-Car DRIF"); sdr->vdev->fops = &rcar_drif_fops; sdr->vdev->ioctl_ops = &rcar_drif_ioctl_ops; sdr->vdev->release = video_device_release; sdr->vdev->lock = &sdr->v4l2_mutex; sdr->vdev->queue = &sdr->vb_queue; sdr->vdev->queue->lock = &sdr->vb_queue_mutex; sdr->vdev->ctrl_handler = &sdr->ctrl_hdl; sdr->vdev->v4l2_dev = &sdr->v4l2_dev; sdr->vdev->device_caps = V4L2_CAP_SDR_CAPTURE | V4L2_CAP_TUNER | V4L2_CAP_STREAMING | V4L2_CAP_READWRITE; video_set_drvdata(sdr->vdev, sdr); /* Register V4L2 SDR device */ ret = video_register_device(sdr->vdev, VFL_TYPE_SDR, -1); if (ret) { video_device_release(sdr->vdev); sdr->vdev = NULL; dev_err(sdr->dev, "failed video_register_device (%d)\n", ret); } return ret; } static void rcar_drif_sdr_unregister(struct rcar_drif_sdr *sdr) { video_unregister_device(sdr->vdev); sdr->vdev = NULL; } /* Sub-device bound callback */ static int rcar_drif_notify_bound(struct v4l2_async_notifier *notifier, struct v4l2_subdev *subdev, struct v4l2_async_connection *asd) { struct rcar_drif_sdr *sdr = container_of(notifier, struct rcar_drif_sdr, notifier); v4l2_set_subdev_hostdata(subdev, sdr); sdr->ep.subdev = subdev; rdrif_dbg(sdr, "bound asd %s\n", subdev->name); return 0; } /* Sub-device unbind callback */ static void rcar_drif_notify_unbind(struct v4l2_async_notifier *notifier, struct v4l2_subdev *subdev, struct v4l2_async_connection *asd) { struct rcar_drif_sdr *sdr = container_of(notifier, struct rcar_drif_sdr, notifier); if (sdr->ep.subdev != subdev) { rdrif_err(sdr, "subdev %s is not bound\n", subdev->name); return; } /* Free ctrl handler if initialized */ v4l2_ctrl_handler_free(&sdr->ctrl_hdl); sdr->v4l2_dev.ctrl_handler = NULL; sdr->ep.subdev = NULL; rcar_drif_sdr_unregister(sdr); rdrif_dbg(sdr, "unbind asd %s\n", subdev->name); } /* Sub-device registered notification callback */ static int rcar_drif_notify_complete(struct v4l2_async_notifier *notifier) { struct rcar_drif_sdr *sdr = container_of(notifier, struct rcar_drif_sdr, notifier); int ret; /* * The subdev tested at this point uses 4 controls. Using 10 as a worst * case scenario hint. When less controls are needed there will be some * unused memory and when more controls are needed the framework uses * hash to manage controls within this number. */ ret = v4l2_ctrl_handler_init(&sdr->ctrl_hdl, 10); if (ret) return -ENOMEM; sdr->v4l2_dev.ctrl_handler = &sdr->ctrl_hdl; ret = v4l2_device_register_subdev_nodes(&sdr->v4l2_dev); if (ret) { rdrif_err(sdr, "failed: register subdev nodes ret %d\n", ret); goto error; } ret = v4l2_ctrl_add_handler(&sdr->ctrl_hdl, sdr->ep.subdev->ctrl_handler, NULL, true); if (ret) { rdrif_err(sdr, "failed: ctrl add hdlr ret %d\n", ret); goto error; } ret = rcar_drif_sdr_register(sdr); if (ret) goto error; return ret; error: v4l2_ctrl_handler_free(&sdr->ctrl_hdl); return ret; } static const struct v4l2_async_notifier_operations rcar_drif_notify_ops = { .bound = rcar_drif_notify_bound, .unbind = rcar_drif_notify_unbind, .complete = rcar_drif_notify_complete, }; /* Read endpoint properties */ static void rcar_drif_get_ep_properties(struct rcar_drif_sdr *sdr, struct fwnode_handle *fwnode) { u32 val; /* Set the I2S defaults for SIRMDR1*/ sdr->mdr1 = RCAR_DRIF_SIRMDR1_SYNCMD_LR | RCAR_DRIF_SIRMDR1_MSB_FIRST | RCAR_DRIF_SIRMDR1_DTDL_1 | RCAR_DRIF_SIRMDR1_SYNCDL_0; /* Parse sync polarity from endpoint */ if (!fwnode_property_read_u32(fwnode, "sync-active", &val)) sdr->mdr1 |= val ? RCAR_DRIF_SIRMDR1_SYNCAC_POL_HIGH : RCAR_DRIF_SIRMDR1_SYNCAC_POL_LOW; else sdr->mdr1 |= RCAR_DRIF_SIRMDR1_SYNCAC_POL_HIGH; /* default */ dev_dbg(sdr->dev, "mdr1 0x%08x\n", sdr->mdr1); } /* Parse sub-devs (tuner) to find a matching device */ static int rcar_drif_parse_subdevs(struct rcar_drif_sdr *sdr) { struct v4l2_async_notifier *notifier = &sdr->notifier; struct fwnode_handle *fwnode, *ep; struct v4l2_async_connection *asd; v4l2_async_nf_init(&sdr->notifier, &sdr->v4l2_dev); ep = fwnode_graph_get_next_endpoint(of_fwnode_handle(sdr->dev->of_node), NULL); if (!ep) return 0; /* Get the endpoint properties */ rcar_drif_get_ep_properties(sdr, ep); fwnode = fwnode_graph_get_remote_port_parent(ep); fwnode_handle_put(ep); if (!fwnode) { dev_warn(sdr->dev, "bad remote port parent\n"); return -EINVAL; } asd = v4l2_async_nf_add_fwnode(notifier, fwnode, struct v4l2_async_connection); fwnode_handle_put(fwnode); if (IS_ERR(asd)) return PTR_ERR(asd); return 0; } /* Check if the given device is the primary bond */ static bool rcar_drif_primary_bond(struct platform_device *pdev) { return of_property_read_bool(pdev->dev.of_node, "renesas,primary-bond"); } /* Check if both devices of the bond are enabled */ static struct device_node *rcar_drif_bond_enabled(struct platform_device *p) { struct device_node *np; np = of_parse_phandle(p->dev.of_node, "renesas,bonding", 0); if (np && of_device_is_available(np)) return np; return NULL; } /* Check if the bonded device is probed */ static int rcar_drif_bond_available(struct rcar_drif_sdr *sdr, struct device_node *np) { struct platform_device *pdev; struct rcar_drif *ch; int ret = 0; pdev = of_find_device_by_node(np); if (!pdev) { dev_err(sdr->dev, "failed to get bonded device from node\n"); return -ENODEV; } device_lock(&pdev->dev); ch = platform_get_drvdata(pdev); if (ch) { /* Update sdr data in the bonded device */ ch->sdr = sdr; /* Update sdr with bonded device data */ sdr->ch[ch->num] = ch; sdr->hw_ch_mask |= BIT(ch->num); } else { /* Defer */ dev_info(sdr->dev, "defer probe\n"); ret = -EPROBE_DEFER; } device_unlock(&pdev->dev); put_device(&pdev->dev); return ret; } /* V4L2 SDR device probe */ static int rcar_drif_sdr_probe(struct rcar_drif_sdr *sdr) { int ret; /* Validate any supported format for enabled channels */ ret = rcar_drif_set_default_format(sdr); if (ret) { dev_err(sdr->dev, "failed to set default format\n"); return ret; } /* Set defaults */ sdr->hwbuf_size = RCAR_DRIF_DEFAULT_HWBUF_SIZE; mutex_init(&sdr->v4l2_mutex); mutex_init(&sdr->vb_queue_mutex); spin_lock_init(&sdr->queued_bufs_lock); spin_lock_init(&sdr->dma_lock); INIT_LIST_HEAD(&sdr->queued_bufs); /* Init videobuf2 queue structure */ sdr->vb_queue.type = V4L2_BUF_TYPE_SDR_CAPTURE; sdr->vb_queue.io_modes = VB2_READ | VB2_MMAP | VB2_DMABUF; sdr->vb_queue.drv_priv = sdr; sdr->vb_queue.buf_struct_size = sizeof(struct rcar_drif_frame_buf); sdr->vb_queue.ops = &rcar_drif_vb2_ops; sdr->vb_queue.mem_ops = &vb2_vmalloc_memops; sdr->vb_queue.timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC; /* Init videobuf2 queue */ ret = vb2_queue_init(&sdr->vb_queue); if (ret) { dev_err(sdr->dev, "failed: vb2_queue_init ret %d\n", ret); return ret; } /* Register the v4l2_device */ ret = v4l2_device_register(sdr->dev, &sdr->v4l2_dev); if (ret) { dev_err(sdr->dev, "failed: v4l2_device_register ret %d\n", ret); return ret; } /* * Parse subdevs after v4l2_device_register because if the subdev * is already probed, bound and complete will be called immediately */ ret = rcar_drif_parse_subdevs(sdr); if (ret) goto error; sdr->notifier.ops = &rcar_drif_notify_ops; /* Register notifier */ ret = v4l2_async_nf_register(&sdr->notifier); if (ret < 0) { dev_err(sdr->dev, "failed: notifier register ret %d\n", ret); goto cleanup; } return ret; cleanup: v4l2_async_nf_cleanup(&sdr->notifier); error: v4l2_device_unregister(&sdr->v4l2_dev); return ret; } /* V4L2 SDR device remove */ static void rcar_drif_sdr_remove(struct rcar_drif_sdr *sdr) { v4l2_async_nf_unregister(&sdr->notifier); v4l2_async_nf_cleanup(&sdr->notifier); v4l2_device_unregister(&sdr->v4l2_dev); } /* DRIF channel probe */ static int rcar_drif_probe(struct platform_device *pdev) { struct rcar_drif_sdr *sdr; struct device_node *np; struct rcar_drif *ch; struct resource *res; int ret; /* Reserve memory for enabled channel */ ch = devm_kzalloc(&pdev->dev, sizeof(*ch), GFP_KERNEL); if (!ch) return -ENOMEM; ch->pdev = pdev; /* Module clock */ ch->clk = devm_clk_get(&pdev->dev, "fck"); if (IS_ERR(ch->clk)) { ret = PTR_ERR(ch->clk); dev_err(&pdev->dev, "clk get failed (%d)\n", ret); return ret; } /* Register map */ ch->base = devm_platform_get_and_ioremap_resource(pdev, 0, &res); if (IS_ERR(ch->base)) return PTR_ERR(ch->base); ch->start = res->start; platform_set_drvdata(pdev, ch); /* Check if both channels of the bond are enabled */ np = rcar_drif_bond_enabled(pdev); if (np) { /* Check if current channel acting as primary-bond */ if (!rcar_drif_primary_bond(pdev)) { ch->num = 1; /* Primary bond is channel 0 always */ of_node_put(np); return 0; } } /* Reserve memory for SDR structure */ sdr = devm_kzalloc(&pdev->dev, sizeof(*sdr), GFP_KERNEL); if (!sdr) { of_node_put(np); return -ENOMEM; } ch->sdr = sdr; sdr->dev = &pdev->dev; /* Establish links between SDR and channel(s) */ sdr->ch[ch->num] = ch; sdr->hw_ch_mask = BIT(ch->num); if (np) { /* Check if bonded device is ready */ ret = rcar_drif_bond_available(sdr, np); of_node_put(np); if (ret) return ret; } sdr->num_hw_ch = hweight_long(sdr->hw_ch_mask); return rcar_drif_sdr_probe(sdr); } /* DRIF channel remove */ static void rcar_drif_remove(struct platform_device *pdev) { struct rcar_drif *ch = platform_get_drvdata(pdev); struct rcar_drif_sdr *sdr = ch->sdr; /* Channel 0 will be the SDR instance */ if (ch->num) return; /* SDR instance */ rcar_drif_sdr_remove(sdr); } /* FIXME: Implement suspend/resume support */ static int __maybe_unused rcar_drif_suspend(struct device *dev) { return 0; } static int __maybe_unused rcar_drif_resume(struct device *dev) { return 0; } static SIMPLE_DEV_PM_OPS(rcar_drif_pm_ops, rcar_drif_suspend, rcar_drif_resume); static const struct of_device_id rcar_drif_of_table[] = { { .compatible = "renesas,rcar-gen3-drif" }, { } }; MODULE_DEVICE_TABLE(of, rcar_drif_of_table); #define RCAR_DRIF_DRV_NAME "rcar_drif" static struct platform_driver rcar_drif_driver = { .driver = { .name = RCAR_DRIF_DRV_NAME, .of_match_table = rcar_drif_of_table, .pm = &rcar_drif_pm_ops, }, .probe = rcar_drif_probe, .remove_new = rcar_drif_remove, }; module_platform_driver(rcar_drif_driver); MODULE_DESCRIPTION("Renesas R-Car Gen3 DRIF driver"); MODULE_ALIAS("platform:" RCAR_DRIF_DRV_NAME); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Ramesh Shanmugasundaram <[email protected]>");
linux-master
drivers/media/platform/renesas/rcar_drif.c
// SPDX-License-Identifier: GPL-2.0 /* * Author: Mikhail Ulyanov * Copyright (C) 2014-2015 Cogent Embedded, Inc. <[email protected]> * Copyright (C) 2014-2015 Renesas Electronics Corporation * * This is based on the drivers/media/platform/samsung/s5p-jpeg driver by * Andrzej Pietrasiewicz and Jacek Anaszewski. * Some portions of code inspired by VSP1 driver by Laurent Pinchart. * * TODO in order of priority: * 1) Rotation * 2) Cropping * 3) V4L2_CID_JPEG_ACTIVE_MARKER */ #include <asm/unaligned.h> #include <linux/clk.h> #include <linux/err.h> #include <linux/interrupt.h> #include <linux/io.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/of.h> #include <linux/platform_device.h> #include <linux/slab.h> #include <linux/spinlock.h> #include <linux/string.h> #include <linux/videodev2.h> #include <media/jpeg.h> #include <media/v4l2-ctrls.h> #include <media/v4l2-device.h> #include <media/v4l2-event.h> #include <media/v4l2-fh.h> #include <media/v4l2-mem2mem.h> #include <media/v4l2-ioctl.h> #include <media/videobuf2-v4l2.h> #include <media/videobuf2-dma-contig.h> #define DRV_NAME "rcar_jpu" /* * Align JPEG header end to cache line to make sure we will not have any issues * with cache; additionally to requirement (33.3.27 R01UH0501EJ0100 Rev.1.00) */ #define JPU_JPEG_HDR_SIZE (ALIGN(0x258, L1_CACHE_BYTES)) #define JPU_JPEG_MAX_BYTES_PER_PIXEL 2 /* 16 bit precision format */ #define JPU_JPEG_MIN_SIZE 25 /* SOI + SOF + EOI */ #define JPU_JPEG_QTBL_SIZE 0x40 #define JPU_JPEG_HDCTBL_SIZE 0x1c #define JPU_JPEG_HACTBL_SIZE 0xb2 #define JPU_JPEG_HEIGHT_OFFSET 0x91 #define JPU_JPEG_WIDTH_OFFSET 0x93 #define JPU_JPEG_SUBS_OFFSET 0x97 #define JPU_JPEG_QTBL_LUM_OFFSET 0x07 #define JPU_JPEG_QTBL_CHR_OFFSET 0x4c #define JPU_JPEG_HDCTBL_LUM_OFFSET 0xa4 #define JPU_JPEG_HACTBL_LUM_OFFSET 0xc5 #define JPU_JPEG_HDCTBL_CHR_OFFSET 0x17c #define JPU_JPEG_HACTBL_CHR_OFFSET 0x19d #define JPU_JPEG_PADDING_OFFSET 0x24f #define JPU_JPEG_LUM 0x00 #define JPU_JPEG_CHR 0x01 #define JPU_JPEG_DC 0x00 #define JPU_JPEG_AC 0x10 #define JPU_JPEG_422 0x21 #define JPU_JPEG_420 0x22 #define JPU_JPEG_DEFAULT_422_PIX_FMT V4L2_PIX_FMT_NV16M #define JPU_JPEG_DEFAULT_420_PIX_FMT V4L2_PIX_FMT_NV12M #define JPU_RESET_TIMEOUT 100 /* ms */ #define JPU_JOB_TIMEOUT 300 /* ms */ #define JPU_MAX_QUALITY 4 #define JPU_WIDTH_MIN 16 #define JPU_HEIGHT_MIN 16 #define JPU_WIDTH_MAX 4096 #define JPU_HEIGHT_MAX 4096 #define JPU_MEMALIGN 8 /* Flags that indicate a format can be used for capture/output */ #define JPU_FMT_TYPE_OUTPUT 0 #define JPU_FMT_TYPE_CAPTURE 1 #define JPU_ENC_CAPTURE (1 << 0) #define JPU_ENC_OUTPUT (1 << 1) #define JPU_DEC_CAPTURE (1 << 2) #define JPU_DEC_OUTPUT (1 << 3) /* * JPEG registers and bits */ /* JPEG code mode register */ #define JCMOD 0x00 #define JCMOD_PCTR (1 << 7) #define JCMOD_MSKIP_ENABLE (1 << 5) #define JCMOD_DSP_ENC (0 << 3) #define JCMOD_DSP_DEC (1 << 3) #define JCMOD_REDU (7 << 0) #define JCMOD_REDU_422 (1 << 0) #define JCMOD_REDU_420 (2 << 0) /* JPEG code command register */ #define JCCMD 0x04 #define JCCMD_SRST (1 << 12) #define JCCMD_JEND (1 << 2) #define JCCMD_JSRT (1 << 0) /* JPEG code quantization table number register */ #define JCQTN 0x0c #define JCQTN_SHIFT(t) (((t) - 1) << 1) /* JPEG code Huffman table number register */ #define JCHTN 0x10 #define JCHTN_AC_SHIFT(t) (((t) << 1) - 1) #define JCHTN_DC_SHIFT(t) (((t) - 1) << 1) #define JCVSZU 0x1c /* JPEG code vertical size upper register */ #define JCVSZD 0x20 /* JPEG code vertical size lower register */ #define JCHSZU 0x24 /* JPEG code horizontal size upper register */ #define JCHSZD 0x28 /* JPEG code horizontal size lower register */ #define JCSZ_MASK 0xff /* JPEG code h/v size register contains only 1 byte*/ #define JCDTCU 0x2c /* JPEG code data count upper register */ #define JCDTCM 0x30 /* JPEG code data count middle register */ #define JCDTCD 0x34 /* JPEG code data count lower register */ /* JPEG interrupt enable register */ #define JINTE 0x38 #define JINTE_ERR (7 << 5) /* INT5 + INT6 + INT7 */ #define JINTE_TRANSF_COMPL (1 << 10) /* JPEG interrupt status register */ #define JINTS 0x3c #define JINTS_MASK 0x7c68 #define JINTS_ERR (1 << 5) #define JINTS_PROCESS_COMPL (1 << 6) #define JINTS_TRANSF_COMPL (1 << 10) #define JCDERR 0x40 /* JPEG code decode error register */ #define JCDERR_MASK 0xf /* JPEG code decode error register mask*/ /* JPEG interface encoding */ #define JIFECNT 0x70 #define JIFECNT_INFT_422 0 #define JIFECNT_INFT_420 1 #define JIFECNT_SWAP_WB (3 << 4) /* to JPU */ #define JIFESYA1 0x74 /* encode source Y address register 1 */ #define JIFESCA1 0x78 /* encode source C address register 1 */ #define JIFESYA2 0x7c /* encode source Y address register 2 */ #define JIFESCA2 0x80 /* encode source C address register 2 */ #define JIFESMW 0x84 /* encode source memory width register */ #define JIFESVSZ 0x88 /* encode source vertical size register */ #define JIFESHSZ 0x8c /* encode source horizontal size register */ #define JIFEDA1 0x90 /* encode destination address register 1 */ #define JIFEDA2 0x94 /* encode destination address register 2 */ /* JPEG decoding control register */ #define JIFDCNT 0xa0 #define JIFDCNT_SWAP_WB (3 << 1) /* from JPU */ #define JIFDSA1 0xa4 /* decode source address register 1 */ #define JIFDDMW 0xb0 /* decode destination memory width register */ #define JIFDDVSZ 0xb4 /* decode destination vert. size register */ #define JIFDDHSZ 0xb8 /* decode destination horiz. size register */ #define JIFDDYA1 0xbc /* decode destination Y address register 1 */ #define JIFDDCA1 0xc0 /* decode destination C address register 1 */ #define JCQTBL(n) (0x10000 + (n) * 0x40) /* quantization tables regs */ #define JCHTBD(n) (0x10100 + (n) * 0x100) /* Huffman table DC regs */ #define JCHTBA(n) (0x10120 + (n) * 0x100) /* Huffman table AC regs */ /** * struct jpu - JPEG IP abstraction * @mutex: the mutex protecting this structure * @lock: spinlock protecting the device contexts * @v4l2_dev: v4l2 device for mem2mem mode * @vfd_encoder: video device node for encoder mem2mem mode * @vfd_decoder: video device node for decoder mem2mem mode * @m2m_dev: v4l2 mem2mem device data * @curr: pointer to current context * @regs: JPEG IP registers mapping * @irq: JPEG IP irq * @clk: JPEG IP clock * @dev: JPEG IP struct device * @ref_count: reference counter */ struct jpu { struct mutex mutex; spinlock_t lock; struct v4l2_device v4l2_dev; struct video_device vfd_encoder; struct video_device vfd_decoder; struct v4l2_m2m_dev *m2m_dev; struct jpu_ctx *curr; void __iomem *regs; unsigned int irq; struct clk *clk; struct device *dev; int ref_count; }; /** * struct jpu_buffer - driver's specific video buffer * @buf: m2m buffer * @compr_quality: destination image quality in compression mode * @subsampling: source image subsampling in decompression mode */ struct jpu_buffer { struct v4l2_m2m_buffer buf; unsigned short compr_quality; unsigned char subsampling; }; /** * struct jpu_fmt - driver's internal format data * @fourcc: the fourcc code, 0 if not applicable * @colorspace: the colorspace specifier * @bpp: number of bits per pixel per plane * @h_align: horizontal alignment order (align to 2^h_align) * @v_align: vertical alignment order (align to 2^v_align) * @subsampling: (horizontal:4 | vertical:4) subsampling factor * @num_planes: number of planes * @types: types of queue this format is applicable to */ struct jpu_fmt { u32 fourcc; u32 colorspace; u8 bpp[2]; u8 h_align; u8 v_align; u8 subsampling; u8 num_planes; u16 types; }; /** * struct jpu_q_data - parameters of one queue * @fmtinfo: driver-specific format of this queue * @format: multiplanar format of this queue * @sequence: sequence number */ struct jpu_q_data { struct jpu_fmt *fmtinfo; struct v4l2_pix_format_mplane format; unsigned int sequence; }; /** * struct jpu_ctx - the device context data * @jpu: JPEG IP device for this context * @encoder: compression (encode) operation or decompression (decode) * @compr_quality: destination image quality in compression (encode) mode * @out_q: source (output) queue information * @cap_q: destination (capture) queue information * @fh: file handler * @ctrl_handler: controls handler */ struct jpu_ctx { struct jpu *jpu; bool encoder; unsigned short compr_quality; struct jpu_q_data out_q; struct jpu_q_data cap_q; struct v4l2_fh fh; struct v4l2_ctrl_handler ctrl_handler; }; /** * jpeg_buffer - description of memory containing input JPEG data * @end: end position in the buffer * @curr: current position in the buffer */ struct jpeg_buffer { void *end; void *curr; }; static struct jpu_fmt jpu_formats[] = { { V4L2_PIX_FMT_JPEG, V4L2_COLORSPACE_JPEG, {0, 0}, 0, 0, 0, 1, JPU_ENC_CAPTURE | JPU_DEC_OUTPUT }, { V4L2_PIX_FMT_NV16M, V4L2_COLORSPACE_SRGB, {8, 8}, 2, 2, JPU_JPEG_422, 2, JPU_ENC_OUTPUT | JPU_DEC_CAPTURE }, { V4L2_PIX_FMT_NV12M, V4L2_COLORSPACE_SRGB, {8, 4}, 2, 2, JPU_JPEG_420, 2, JPU_ENC_OUTPUT | JPU_DEC_CAPTURE }, { V4L2_PIX_FMT_NV16, V4L2_COLORSPACE_SRGB, {16, 0}, 2, 2, JPU_JPEG_422, 1, JPU_ENC_OUTPUT | JPU_DEC_CAPTURE }, { V4L2_PIX_FMT_NV12, V4L2_COLORSPACE_SRGB, {12, 0}, 2, 2, JPU_JPEG_420, 1, JPU_ENC_OUTPUT | JPU_DEC_CAPTURE }, }; static const u8 zigzag[] = { 0x03, 0x02, 0x0b, 0x13, 0x0a, 0x01, 0x00, 0x09, 0x12, 0x1b, 0x23, 0x1a, 0x11, 0x08, 0x07, 0x06, 0x0f, 0x10, 0x19, 0x22, 0x2b, 0x33, 0x2a, 0x21, 0x18, 0x17, 0x0e, 0x05, 0x04, 0x0d, 0x16, 0x1f, 0x20, 0x29, 0x32, 0x3b, 0x3a, 0x31, 0x28, 0x27, 0x1e, 0x15, 0x0e, 0x14, 0x10, 0x26, 0x2f, 0x30, 0x39, 0x38, 0x37, 0x2e, 0x25, 0x1c, 0x24, 0x2b, 0x36, 0x3f, 0x3e, 0x35, 0x2c, 0x34, 0x3d, 0x3c }; #define QTBL_SIZE (ALIGN(JPU_JPEG_QTBL_SIZE, \ sizeof(unsigned int)) / sizeof(unsigned int)) #define HDCTBL_SIZE (ALIGN(JPU_JPEG_HDCTBL_SIZE, \ sizeof(unsigned int)) / sizeof(unsigned int)) #define HACTBL_SIZE (ALIGN(JPU_JPEG_HACTBL_SIZE, \ sizeof(unsigned int)) / sizeof(unsigned int)) /* * Start of image; Quantization tables * SOF0 (17 bytes payload) is Baseline DCT - Sample precision, height, width, * Number of image components, (Ci:8 - Hi:4 - Vi:4 - Tq:8) * 3 - Y,Cb,Cr; * Huffman tables; Padding with 0xff (33.3.27 R01UH0501EJ0100 Rev.1.00) */ #define JPU_JPEG_HDR_BLOB { \ 0xff, JPEG_MARKER_SOI, 0xff, JPEG_MARKER_DQT, 0x00, \ JPU_JPEG_QTBL_SIZE + 0x3, JPU_JPEG_LUM, \ [JPU_JPEG_QTBL_LUM_OFFSET ... \ JPU_JPEG_QTBL_LUM_OFFSET + JPU_JPEG_QTBL_SIZE - 1] = 0x00, \ 0xff, JPEG_MARKER_DQT, 0x00, JPU_JPEG_QTBL_SIZE + 0x3, JPU_JPEG_CHR, \ [JPU_JPEG_QTBL_CHR_OFFSET ... JPU_JPEG_QTBL_CHR_OFFSET + \ JPU_JPEG_QTBL_SIZE - 1] = 0x00, \ 0xff, JPEG_MARKER_SOF0, 0x00, 0x11, 0x08, \ [JPU_JPEG_HEIGHT_OFFSET ... JPU_JPEG_HEIGHT_OFFSET + 1] = 0x00, \ [JPU_JPEG_WIDTH_OFFSET ... JPU_JPEG_WIDTH_OFFSET + 1] = 0x00, \ 0x03, 0x01, [JPU_JPEG_SUBS_OFFSET] = 0x00, JPU_JPEG_LUM, \ 0x02, 0x11, JPU_JPEG_CHR, 0x03, 0x11, JPU_JPEG_CHR, \ 0xff, JPEG_MARKER_DHT, 0x00, JPU_JPEG_HDCTBL_SIZE + 0x3, \ JPU_JPEG_LUM | JPU_JPEG_DC, \ [JPU_JPEG_HDCTBL_LUM_OFFSET ... \ JPU_JPEG_HDCTBL_LUM_OFFSET + JPU_JPEG_HDCTBL_SIZE - 1] = 0x00, \ 0xff, JPEG_MARKER_DHT, 0x00, JPU_JPEG_HACTBL_SIZE + 0x3, \ JPU_JPEG_LUM | JPU_JPEG_AC, \ [JPU_JPEG_HACTBL_LUM_OFFSET ... \ JPU_JPEG_HACTBL_LUM_OFFSET + JPU_JPEG_HACTBL_SIZE - 1] = 0x00, \ 0xff, JPEG_MARKER_DHT, 0x00, JPU_JPEG_HDCTBL_SIZE + 0x3, \ JPU_JPEG_CHR | JPU_JPEG_DC, \ [JPU_JPEG_HDCTBL_CHR_OFFSET ... \ JPU_JPEG_HDCTBL_CHR_OFFSET + JPU_JPEG_HDCTBL_SIZE - 1] = 0x00, \ 0xff, JPEG_MARKER_DHT, 0x00, JPU_JPEG_HACTBL_SIZE + 0x3, \ JPU_JPEG_CHR | JPU_JPEG_AC, \ [JPU_JPEG_HACTBL_CHR_OFFSET ... \ JPU_JPEG_HACTBL_CHR_OFFSET + JPU_JPEG_HACTBL_SIZE - 1] = 0x00, \ [JPU_JPEG_PADDING_OFFSET ... JPU_JPEG_HDR_SIZE - 1] = 0xff \ } static unsigned char jpeg_hdrs[JPU_MAX_QUALITY][JPU_JPEG_HDR_SIZE] = { [0 ... JPU_MAX_QUALITY - 1] = JPU_JPEG_HDR_BLOB }; static const unsigned int qtbl_lum[JPU_MAX_QUALITY][QTBL_SIZE] = { { 0x14101927, 0x322e3e44, 0x10121726, 0x26354144, 0x19171f26, 0x35414444, 0x27262635, 0x41444444, 0x32263541, 0x44444444, 0x2e354144, 0x44444444, 0x3e414444, 0x44444444, 0x44444444, 0x44444444 }, { 0x100b0b10, 0x171b1f1e, 0x0b0c0c0f, 0x1417171e, 0x0b0c0d10, 0x171a232f, 0x100f1017, 0x1a252f40, 0x1714171a, 0x27334040, 0x1b171a25, 0x33404040, 0x1f17232f, 0x40404040, 0x1e1e2f40, 0x40404040 }, { 0x0c08080c, 0x11151817, 0x0809090b, 0x0f131217, 0x08090a0c, 0x13141b24, 0x0c0b0c15, 0x141c2435, 0x110f1314, 0x1e27333b, 0x1513141c, 0x27333b3b, 0x18121b24, 0x333b3b3b, 0x17172435, 0x3b3b3b3b }, { 0x08060608, 0x0c0e1011, 0x06060608, 0x0a0d0c0f, 0x06060708, 0x0d0e1218, 0x0808080e, 0x0d131823, 0x0c0a0d0d, 0x141a2227, 0x0e0d0e13, 0x1a222727, 0x100c1318, 0x22272727, 0x110f1823, 0x27272727 } }; static const unsigned int qtbl_chr[JPU_MAX_QUALITY][QTBL_SIZE] = { { 0x15192026, 0x36444444, 0x191c1826, 0x36444444, 0x2018202b, 0x42444444, 0x26262b35, 0x44444444, 0x36424444, 0x44444444, 0x44444444, 0x44444444, 0x44444444, 0x44444444, 0x44444444, 0x44444444 }, { 0x110f1115, 0x141a2630, 0x0f131211, 0x141a232b, 0x11121416, 0x1a1e2e35, 0x1511161c, 0x1e273540, 0x14141a1e, 0x27304040, 0x1a1a1e27, 0x303f4040, 0x26232e35, 0x40404040, 0x302b3540, 0x40404040 }, { 0x0d0b0d10, 0x14141d25, 0x0b0e0e0e, 0x10141a20, 0x0d0e0f11, 0x14172328, 0x100e1115, 0x171e2832, 0x14101417, 0x1e25323b, 0x1414171e, 0x25303b3b, 0x1d1a2328, 0x323b3b3b, 0x25202832, 0x3b3b3b3b }, { 0x0908090b, 0x0e111318, 0x080a090b, 0x0e0d1116, 0x09090d0e, 0x0d0f171a, 0x0b0b0e0e, 0x0f141a21, 0x0e0e0d0f, 0x14182127, 0x110d0f14, 0x18202727, 0x1311171a, 0x21272727, 0x18161a21, 0x27272727 } }; static const unsigned int hdctbl_lum[HDCTBL_SIZE] = { 0x00010501, 0x01010101, 0x01000000, 0x00000000, 0x00010203, 0x04050607, 0x08090a0b }; static const unsigned int hdctbl_chr[HDCTBL_SIZE] = { 0x00010501, 0x01010101, 0x01000000, 0x00000000, 0x00010203, 0x04050607, 0x08090a0b }; static const unsigned int hactbl_lum[HACTBL_SIZE] = { 0x00020103, 0x03020403, 0x05050404, 0x0000017d, 0x01020300, 0x04110512, 0x21314106, 0x13516107, 0x22711432, 0x8191a108, 0x2342b1c1, 0x1552d1f0, 0x24336272, 0x82090a16, 0x1718191a, 0x25262728, 0x292a3435, 0x36373839, 0x3a434445, 0x46474849, 0x4a535455, 0x56575859, 0x5a636465, 0x66676869, 0x6a737475, 0x76777879, 0x7a838485, 0x86878889, 0x8a929394, 0x95969798, 0x999aa2a3, 0xa4a5a6a7, 0xa8a9aab2, 0xb3b4b5b6, 0xb7b8b9ba, 0xc2c3c4c5, 0xc6c7c8c9, 0xcad2d3d4, 0xd5d6d7d8, 0xd9dae1e2, 0xe3e4e5e6, 0xe7e8e9ea, 0xf1f2f3f4, 0xf5f6f7f8, 0xf9fa0000 }; static const unsigned int hactbl_chr[HACTBL_SIZE] = { 0x00020103, 0x03020403, 0x05050404, 0x0000017d, 0x01020300, 0x04110512, 0x21314106, 0x13516107, 0x22711432, 0x8191a108, 0x2342b1c1, 0x1552d1f0, 0x24336272, 0x82090a16, 0x1718191a, 0x25262728, 0x292a3435, 0x36373839, 0x3a434445, 0x46474849, 0x4a535455, 0x56575859, 0x5a636465, 0x66676869, 0x6a737475, 0x76777879, 0x7a838485, 0x86878889, 0x8a929394, 0x95969798, 0x999aa2a3, 0xa4a5a6a7, 0xa8a9aab2, 0xb3b4b5b6, 0xb7b8b9ba, 0xc2c3c4c5, 0xc6c7c8c9, 0xcad2d3d4, 0xd5d6d7d8, 0xd9dae1e2, 0xe3e4e5e6, 0xe7e8e9ea, 0xf1f2f3f4, 0xf5f6f7f8, 0xf9fa0000 }; static const char *error_to_text[16] = { "Normal", "SOI not detected", "SOF1 to SOFF detected", "Subsampling not detected", "SOF accuracy error", "DQT accuracy error", "Component error 1", "Component error 2", "SOF0, DQT, and DHT not detected when SOS detected", "SOS not detected", "EOI not detected", "Restart interval data number error detected", "Image size error", "Last MCU data number error", "Block data number error", "Unknown" }; static struct jpu_buffer *vb2_to_jpu_buffer(struct vb2_v4l2_buffer *vb) { struct v4l2_m2m_buffer *b = container_of(vb, struct v4l2_m2m_buffer, vb); return container_of(b, struct jpu_buffer, buf); } static u32 jpu_read(struct jpu *jpu, unsigned int reg) { return ioread32(jpu->regs + reg); } static void jpu_write(struct jpu *jpu, u32 val, unsigned int reg) { iowrite32(val, jpu->regs + reg); } static struct jpu_ctx *ctrl_to_ctx(struct v4l2_ctrl *c) { return container_of(c->handler, struct jpu_ctx, ctrl_handler); } static struct jpu_ctx *fh_to_ctx(struct v4l2_fh *fh) { return container_of(fh, struct jpu_ctx, fh); } static void jpu_set_tbl(struct jpu *jpu, u32 reg, const unsigned int *tbl, unsigned int len) { unsigned int i; for (i = 0; i < len; i++) jpu_write(jpu, tbl[i], reg + (i << 2)); } static void jpu_set_qtbl(struct jpu *jpu, unsigned short quality) { jpu_set_tbl(jpu, JCQTBL(0), qtbl_lum[quality], QTBL_SIZE); jpu_set_tbl(jpu, JCQTBL(1), qtbl_chr[quality], QTBL_SIZE); } static void jpu_set_htbl(struct jpu *jpu) { jpu_set_tbl(jpu, JCHTBD(0), hdctbl_lum, HDCTBL_SIZE); jpu_set_tbl(jpu, JCHTBA(0), hactbl_lum, HACTBL_SIZE); jpu_set_tbl(jpu, JCHTBD(1), hdctbl_chr, HDCTBL_SIZE); jpu_set_tbl(jpu, JCHTBA(1), hactbl_chr, HACTBL_SIZE); } static int jpu_wait_reset(struct jpu *jpu) { unsigned long timeout; timeout = jiffies + msecs_to_jiffies(JPU_RESET_TIMEOUT); while (jpu_read(jpu, JCCMD) & JCCMD_SRST) { if (time_after(jiffies, timeout)) { dev_err(jpu->dev, "timed out in reset\n"); return -ETIMEDOUT; } schedule(); } return 0; } static int jpu_reset(struct jpu *jpu) { jpu_write(jpu, JCCMD_SRST, JCCMD); return jpu_wait_reset(jpu); } /* * ============================================================================ * video ioctl operations * ============================================================================ */ static void put_qtbl(u8 *p, const u8 *qtbl) { unsigned int i; for (i = 0; i < ARRAY_SIZE(zigzag); i++) p[i] = *(qtbl + zigzag[i]); } static void put_htbl(u8 *p, const u8 *htbl, unsigned int len) { unsigned int i, j; for (i = 0; i < len; i += 4) for (j = 0; j < 4 && (i + j) < len; ++j) p[i + j] = htbl[i + 3 - j]; } static void jpu_generate_hdr(unsigned short quality, unsigned char *p) { put_qtbl(p + JPU_JPEG_QTBL_LUM_OFFSET, (const u8 *)qtbl_lum[quality]); put_qtbl(p + JPU_JPEG_QTBL_CHR_OFFSET, (const u8 *)qtbl_chr[quality]); put_htbl(p + JPU_JPEG_HDCTBL_LUM_OFFSET, (const u8 *)hdctbl_lum, JPU_JPEG_HDCTBL_SIZE); put_htbl(p + JPU_JPEG_HACTBL_LUM_OFFSET, (const u8 *)hactbl_lum, JPU_JPEG_HACTBL_SIZE); put_htbl(p + JPU_JPEG_HDCTBL_CHR_OFFSET, (const u8 *)hdctbl_chr, JPU_JPEG_HDCTBL_SIZE); put_htbl(p + JPU_JPEG_HACTBL_CHR_OFFSET, (const u8 *)hactbl_chr, JPU_JPEG_HACTBL_SIZE); } static int get_byte(struct jpeg_buffer *buf) { if (buf->curr >= buf->end) return -1; return *(u8 *)buf->curr++; } static int get_word_be(struct jpeg_buffer *buf, unsigned int *word) { if (buf->end - buf->curr < 2) return -1; *word = get_unaligned_be16(buf->curr); buf->curr += 2; return 0; } static void skip(struct jpeg_buffer *buf, unsigned long len) { buf->curr += min((unsigned long)(buf->end - buf->curr), len); } static u8 jpu_parse_hdr(void *buffer, unsigned long size, unsigned int *width, unsigned int *height) { struct jpeg_buffer jpeg_buffer; unsigned int word; bool soi = false; jpeg_buffer.end = buffer + size; jpeg_buffer.curr = buffer; /* * basic size check and EOI - we don't want to let JPU cross * buffer bounds in any case. Hope it's stopping by EOI. */ if (size < JPU_JPEG_MIN_SIZE || *(u8 *)(buffer + size - 1) != JPEG_MARKER_EOI) return 0; for (;;) { int c; /* skip preceding filler bytes */ do c = get_byte(&jpeg_buffer); while (c == 0xff || c == 0); if (!soi && c == JPEG_MARKER_SOI) { soi = true; continue; } else if (soi != (c != JPEG_MARKER_SOI)) return 0; switch (c) { case JPEG_MARKER_SOF0: /* SOF0: baseline JPEG */ skip(&jpeg_buffer, 3); /* segment length and bpp */ if (get_word_be(&jpeg_buffer, height) || get_word_be(&jpeg_buffer, width) || get_byte(&jpeg_buffer) != 3) /* YCbCr only */ return 0; skip(&jpeg_buffer, 1); return get_byte(&jpeg_buffer); case JPEG_MARKER_DHT: case JPEG_MARKER_DQT: case JPEG_MARKER_COM: case JPEG_MARKER_DRI: case JPEG_MARKER_APP0 ... JPEG_MARKER_APP0 + 0x0f: if (get_word_be(&jpeg_buffer, &word)) return 0; skip(&jpeg_buffer, (long)word - 2); break; case 0: break; default: return 0; } } return 0; } static int jpu_querycap(struct file *file, void *priv, struct v4l2_capability *cap) { struct jpu_ctx *ctx = fh_to_ctx(priv); if (ctx->encoder) strscpy(cap->card, DRV_NAME " encoder", sizeof(cap->card)); else strscpy(cap->card, DRV_NAME " decoder", sizeof(cap->card)); strscpy(cap->driver, DRV_NAME, sizeof(cap->driver)); memset(cap->reserved, 0, sizeof(cap->reserved)); return 0; } static struct jpu_fmt *jpu_find_format(bool encoder, u32 pixelformat, unsigned int fmt_type) { unsigned int i, fmt_flag; if (encoder) fmt_flag = fmt_type == JPU_FMT_TYPE_OUTPUT ? JPU_ENC_OUTPUT : JPU_ENC_CAPTURE; else fmt_flag = fmt_type == JPU_FMT_TYPE_OUTPUT ? JPU_DEC_OUTPUT : JPU_DEC_CAPTURE; for (i = 0; i < ARRAY_SIZE(jpu_formats); i++) { struct jpu_fmt *fmt = &jpu_formats[i]; if (fmt->fourcc == pixelformat && fmt->types & fmt_flag) return fmt; } return NULL; } static int jpu_enum_fmt(struct v4l2_fmtdesc *f, u32 type) { unsigned int i, num = 0; for (i = 0; i < ARRAY_SIZE(jpu_formats); ++i) { if (jpu_formats[i].types & type) { if (num == f->index) break; ++num; } } if (i >= ARRAY_SIZE(jpu_formats)) return -EINVAL; f->pixelformat = jpu_formats[i].fourcc; return 0; } static int jpu_enum_fmt_cap(struct file *file, void *priv, struct v4l2_fmtdesc *f) { struct jpu_ctx *ctx = fh_to_ctx(priv); return jpu_enum_fmt(f, ctx->encoder ? JPU_ENC_CAPTURE : JPU_DEC_CAPTURE); } static int jpu_enum_fmt_out(struct file *file, void *priv, struct v4l2_fmtdesc *f) { struct jpu_ctx *ctx = fh_to_ctx(priv); return jpu_enum_fmt(f, ctx->encoder ? JPU_ENC_OUTPUT : JPU_DEC_OUTPUT); } static struct jpu_q_data *jpu_get_q_data(struct jpu_ctx *ctx, enum v4l2_buf_type type) { if (V4L2_TYPE_IS_OUTPUT(type)) return &ctx->out_q; else return &ctx->cap_q; } static void jpu_bound_align_image(u32 *w, unsigned int w_min, unsigned int w_max, unsigned int w_align, u32 *h, unsigned int h_min, unsigned int h_max, unsigned int h_align) { unsigned int width, height, w_step, h_step; width = *w; height = *h; w_step = 1U << w_align; h_step = 1U << h_align; v4l_bound_align_image(w, w_min, w_max, w_align, h, h_min, h_max, h_align, 3); if (*w < width && *w + w_step < w_max) *w += w_step; if (*h < height && *h + h_step < h_max) *h += h_step; } static int __jpu_try_fmt(struct jpu_ctx *ctx, struct jpu_fmt **fmtinfo, struct v4l2_pix_format_mplane *pix, enum v4l2_buf_type type) { struct jpu_fmt *fmt; unsigned int f_type, w, h; f_type = V4L2_TYPE_IS_OUTPUT(type) ? JPU_FMT_TYPE_OUTPUT : JPU_FMT_TYPE_CAPTURE; fmt = jpu_find_format(ctx->encoder, pix->pixelformat, f_type); if (!fmt) { unsigned int pixelformat; dev_dbg(ctx->jpu->dev, "unknown format; set default format\n"); if (ctx->encoder) pixelformat = f_type == JPU_FMT_TYPE_OUTPUT ? V4L2_PIX_FMT_NV16M : V4L2_PIX_FMT_JPEG; else pixelformat = f_type == JPU_FMT_TYPE_CAPTURE ? V4L2_PIX_FMT_NV16M : V4L2_PIX_FMT_JPEG; fmt = jpu_find_format(ctx->encoder, pixelformat, f_type); } pix->pixelformat = fmt->fourcc; pix->colorspace = fmt->colorspace; pix->field = V4L2_FIELD_NONE; pix->num_planes = fmt->num_planes; jpu_bound_align_image(&pix->width, JPU_WIDTH_MIN, JPU_WIDTH_MAX, fmt->h_align, &pix->height, JPU_HEIGHT_MIN, JPU_HEIGHT_MAX, fmt->v_align); w = pix->width; h = pix->height; if (fmt->fourcc == V4L2_PIX_FMT_JPEG) { /* ignore userspaces's sizeimage for encoding */ if (pix->plane_fmt[0].sizeimage <= 0 || ctx->encoder) pix->plane_fmt[0].sizeimage = JPU_JPEG_HDR_SIZE + (JPU_JPEG_MAX_BYTES_PER_PIXEL * w * h); pix->plane_fmt[0].bytesperline = 0; } else { unsigned int i, bpl = 0; for (i = 0; i < pix->num_planes; ++i) bpl = max(bpl, pix->plane_fmt[i].bytesperline); bpl = clamp_t(unsigned int, bpl, w, JPU_WIDTH_MAX); bpl = round_up(bpl, JPU_MEMALIGN); for (i = 0; i < pix->num_planes; ++i) { pix->plane_fmt[i].bytesperline = bpl; pix->plane_fmt[i].sizeimage = bpl * h * fmt->bpp[i] / 8; } } if (fmtinfo) *fmtinfo = fmt; return 0; } static int jpu_try_fmt(struct file *file, void *priv, struct v4l2_format *f) { struct jpu_ctx *ctx = fh_to_ctx(priv); if (!v4l2_m2m_get_vq(ctx->fh.m2m_ctx, f->type)) return -EINVAL; return __jpu_try_fmt(ctx, NULL, &f->fmt.pix_mp, f->type); } static int jpu_s_fmt(struct file *file, void *priv, struct v4l2_format *f) { struct vb2_queue *vq; struct jpu_ctx *ctx = fh_to_ctx(priv); struct v4l2_m2m_ctx *m2m_ctx = ctx->fh.m2m_ctx; struct jpu_fmt *fmtinfo; struct jpu_q_data *q_data; int ret; vq = v4l2_m2m_get_vq(m2m_ctx, f->type); if (!vq) return -EINVAL; if (vb2_is_busy(vq)) { v4l2_err(&ctx->jpu->v4l2_dev, "%s queue busy\n", __func__); return -EBUSY; } ret = __jpu_try_fmt(ctx, &fmtinfo, &f->fmt.pix_mp, f->type); if (ret < 0) return ret; q_data = jpu_get_q_data(ctx, f->type); q_data->format = f->fmt.pix_mp; q_data->fmtinfo = fmtinfo; return 0; } static int jpu_g_fmt(struct file *file, void *priv, struct v4l2_format *f) { struct jpu_q_data *q_data; struct jpu_ctx *ctx = fh_to_ctx(priv); if (!v4l2_m2m_get_vq(ctx->fh.m2m_ctx, f->type)) return -EINVAL; q_data = jpu_get_q_data(ctx, f->type); f->fmt.pix_mp = q_data->format; return 0; } /* * V4L2 controls */ static int jpu_s_ctrl(struct v4l2_ctrl *ctrl) { struct jpu_ctx *ctx = ctrl_to_ctx(ctrl); unsigned long flags; spin_lock_irqsave(&ctx->jpu->lock, flags); if (ctrl->id == V4L2_CID_JPEG_COMPRESSION_QUALITY) ctx->compr_quality = ctrl->val; spin_unlock_irqrestore(&ctx->jpu->lock, flags); return 0; } static const struct v4l2_ctrl_ops jpu_ctrl_ops = { .s_ctrl = jpu_s_ctrl, }; static int jpu_streamon(struct file *file, void *priv, enum v4l2_buf_type type) { struct jpu_ctx *ctx = fh_to_ctx(priv); struct jpu_q_data *src_q_data, *dst_q_data, *orig, adj, *ref; enum v4l2_buf_type adj_type; src_q_data = jpu_get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE); dst_q_data = jpu_get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE); if (ctx->encoder) { adj = *src_q_data; orig = src_q_data; ref = dst_q_data; adj_type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE; } else { adj = *dst_q_data; orig = dst_q_data; ref = src_q_data; adj_type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE; } adj.format.width = ref->format.width; adj.format.height = ref->format.height; __jpu_try_fmt(ctx, NULL, &adj.format, adj_type); if (adj.format.width != orig->format.width || adj.format.height != orig->format.height) { dev_err(ctx->jpu->dev, "src and dst formats do not match.\n"); /* maybe we can return -EPIPE here? */ return -EINVAL; } return v4l2_m2m_streamon(file, ctx->fh.m2m_ctx, type); } static const struct v4l2_ioctl_ops jpu_ioctl_ops = { .vidioc_querycap = jpu_querycap, .vidioc_enum_fmt_vid_cap = jpu_enum_fmt_cap, .vidioc_enum_fmt_vid_out = jpu_enum_fmt_out, .vidioc_g_fmt_vid_cap_mplane = jpu_g_fmt, .vidioc_g_fmt_vid_out_mplane = jpu_g_fmt, .vidioc_try_fmt_vid_cap_mplane = jpu_try_fmt, .vidioc_try_fmt_vid_out_mplane = jpu_try_fmt, .vidioc_s_fmt_vid_cap_mplane = jpu_s_fmt, .vidioc_s_fmt_vid_out_mplane = jpu_s_fmt, .vidioc_reqbufs = v4l2_m2m_ioctl_reqbufs, .vidioc_create_bufs = v4l2_m2m_ioctl_create_bufs, .vidioc_querybuf = v4l2_m2m_ioctl_querybuf, .vidioc_qbuf = v4l2_m2m_ioctl_qbuf, .vidioc_dqbuf = v4l2_m2m_ioctl_dqbuf, .vidioc_expbuf = v4l2_m2m_ioctl_expbuf, .vidioc_streamon = jpu_streamon, .vidioc_streamoff = v4l2_m2m_ioctl_streamoff, .vidioc_subscribe_event = v4l2_ctrl_subscribe_event, .vidioc_unsubscribe_event = v4l2_event_unsubscribe }; static int jpu_controls_create(struct jpu_ctx *ctx) { struct v4l2_ctrl *ctrl; int ret; v4l2_ctrl_handler_init(&ctx->ctrl_handler, 1); ctrl = v4l2_ctrl_new_std(&ctx->ctrl_handler, &jpu_ctrl_ops, V4L2_CID_JPEG_COMPRESSION_QUALITY, 0, JPU_MAX_QUALITY - 1, 1, 0); if (ctx->ctrl_handler.error) { ret = ctx->ctrl_handler.error; goto error_free; } if (!ctx->encoder) ctrl->flags |= V4L2_CTRL_FLAG_VOLATILE | V4L2_CTRL_FLAG_READ_ONLY; ret = v4l2_ctrl_handler_setup(&ctx->ctrl_handler); if (ret < 0) goto error_free; return 0; error_free: v4l2_ctrl_handler_free(&ctx->ctrl_handler); return ret; } /* * ============================================================================ * Queue operations * ============================================================================ */ static int jpu_queue_setup(struct vb2_queue *vq, unsigned int *nbuffers, unsigned int *nplanes, unsigned int sizes[], struct device *alloc_devs[]) { struct jpu_ctx *ctx = vb2_get_drv_priv(vq); struct jpu_q_data *q_data; unsigned int i; q_data = jpu_get_q_data(ctx, vq->type); if (*nplanes) { if (*nplanes != q_data->format.num_planes) return -EINVAL; for (i = 0; i < *nplanes; i++) { unsigned int q_size = q_data->format.plane_fmt[i].sizeimage; if (sizes[i] < q_size) return -EINVAL; } return 0; } *nplanes = q_data->format.num_planes; for (i = 0; i < *nplanes; i++) sizes[i] = q_data->format.plane_fmt[i].sizeimage; return 0; } static int jpu_buf_prepare(struct vb2_buffer *vb) { struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb); struct jpu_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue); struct jpu_q_data *q_data; unsigned int i; q_data = jpu_get_q_data(ctx, vb->vb2_queue->type); if (V4L2_TYPE_IS_OUTPUT(vb->vb2_queue->type)) { if (vbuf->field == V4L2_FIELD_ANY) vbuf->field = V4L2_FIELD_NONE; if (vbuf->field != V4L2_FIELD_NONE) { dev_err(ctx->jpu->dev, "%s field isn't supported\n", __func__); return -EINVAL; } } for (i = 0; i < q_data->format.num_planes; i++) { unsigned long size = q_data->format.plane_fmt[i].sizeimage; if (vb2_plane_size(vb, i) < size) { dev_err(ctx->jpu->dev, "%s: data will not fit into plane (%lu < %lu)\n", __func__, vb2_plane_size(vb, i), size); return -EINVAL; } /* decoder capture queue */ if (!ctx->encoder && V4L2_TYPE_IS_CAPTURE(vb->vb2_queue->type)) vb2_set_plane_payload(vb, i, size); } return 0; } static void jpu_buf_queue(struct vb2_buffer *vb) { struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb); struct jpu_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue); if (!ctx->encoder && V4L2_TYPE_IS_OUTPUT(vb->vb2_queue->type)) { struct jpu_buffer *jpu_buf = vb2_to_jpu_buffer(vbuf); struct jpu_q_data *q_data, adjust; void *buffer = vb2_plane_vaddr(vb, 0); unsigned long buf_size = vb2_get_plane_payload(vb, 0); unsigned int width, height; u8 subsampling = jpu_parse_hdr(buffer, buf_size, &width, &height); /* check if JPEG data basic parsing was successful */ if (subsampling != JPU_JPEG_422 && subsampling != JPU_JPEG_420) goto format_error; q_data = &ctx->out_q; adjust = *q_data; adjust.format.width = width; adjust.format.height = height; __jpu_try_fmt(ctx, &adjust.fmtinfo, &adjust.format, V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE); if (adjust.format.width != q_data->format.width || adjust.format.height != q_data->format.height) goto format_error; /* * keep subsampling in buffer to check it * for compatibility in device_run */ jpu_buf->subsampling = subsampling; } if (ctx->fh.m2m_ctx) v4l2_m2m_buf_queue(ctx->fh.m2m_ctx, vbuf); return; format_error: dev_err(ctx->jpu->dev, "incompatible or corrupted JPEG data\n"); vb2_buffer_done(vb, VB2_BUF_STATE_ERROR); } static void jpu_buf_finish(struct vb2_buffer *vb) { struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb); struct jpu_buffer *jpu_buf = vb2_to_jpu_buffer(vbuf); struct jpu_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue); struct jpu_q_data *q_data = &ctx->out_q; enum v4l2_buf_type type = vb->vb2_queue->type; u8 *buffer; if (vb->state == VB2_BUF_STATE_DONE) vbuf->sequence = jpu_get_q_data(ctx, type)->sequence++; if (!ctx->encoder || vb->state != VB2_BUF_STATE_DONE || V4L2_TYPE_IS_OUTPUT(type)) return; buffer = vb2_plane_vaddr(vb, 0); memcpy(buffer, jpeg_hdrs[jpu_buf->compr_quality], JPU_JPEG_HDR_SIZE); *(__be16 *)(buffer + JPU_JPEG_HEIGHT_OFFSET) = cpu_to_be16(q_data->format.height); *(__be16 *)(buffer + JPU_JPEG_WIDTH_OFFSET) = cpu_to_be16(q_data->format.width); *(buffer + JPU_JPEG_SUBS_OFFSET) = q_data->fmtinfo->subsampling; } static int jpu_start_streaming(struct vb2_queue *vq, unsigned count) { struct jpu_ctx *ctx = vb2_get_drv_priv(vq); struct jpu_q_data *q_data = jpu_get_q_data(ctx, vq->type); q_data->sequence = 0; return 0; } static void jpu_stop_streaming(struct vb2_queue *vq) { struct jpu_ctx *ctx = vb2_get_drv_priv(vq); struct vb2_v4l2_buffer *vb; unsigned long flags; for (;;) { if (V4L2_TYPE_IS_OUTPUT(vq->type)) vb = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx); else vb = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx); if (vb == NULL) return; spin_lock_irqsave(&ctx->jpu->lock, flags); v4l2_m2m_buf_done(vb, VB2_BUF_STATE_ERROR); spin_unlock_irqrestore(&ctx->jpu->lock, flags); } } static const struct vb2_ops jpu_qops = { .queue_setup = jpu_queue_setup, .buf_prepare = jpu_buf_prepare, .buf_queue = jpu_buf_queue, .buf_finish = jpu_buf_finish, .start_streaming = jpu_start_streaming, .stop_streaming = jpu_stop_streaming, .wait_prepare = vb2_ops_wait_prepare, .wait_finish = vb2_ops_wait_finish, }; static int jpu_queue_init(void *priv, struct vb2_queue *src_vq, struct vb2_queue *dst_vq) { struct jpu_ctx *ctx = priv; int ret; memset(src_vq, 0, sizeof(*src_vq)); src_vq->type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE; src_vq->io_modes = VB2_MMAP | VB2_DMABUF; src_vq->drv_priv = ctx; src_vq->buf_struct_size = sizeof(struct jpu_buffer); src_vq->ops = &jpu_qops; src_vq->mem_ops = &vb2_dma_contig_memops; src_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY; src_vq->lock = &ctx->jpu->mutex; src_vq->dev = ctx->jpu->v4l2_dev.dev; ret = vb2_queue_init(src_vq); if (ret) return ret; memset(dst_vq, 0, sizeof(*dst_vq)); dst_vq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE; dst_vq->io_modes = VB2_MMAP | VB2_DMABUF; dst_vq->drv_priv = ctx; dst_vq->buf_struct_size = sizeof(struct jpu_buffer); dst_vq->ops = &jpu_qops; dst_vq->mem_ops = &vb2_dma_contig_memops; dst_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY; dst_vq->lock = &ctx->jpu->mutex; dst_vq->dev = ctx->jpu->v4l2_dev.dev; return vb2_queue_init(dst_vq); } /* * ============================================================================ * Device file operations * ============================================================================ */ static int jpu_open(struct file *file) { struct jpu *jpu = video_drvdata(file); struct video_device *vfd = video_devdata(file); struct jpu_ctx *ctx; int ret; ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); if (!ctx) return -ENOMEM; v4l2_fh_init(&ctx->fh, vfd); ctx->fh.ctrl_handler = &ctx->ctrl_handler; file->private_data = &ctx->fh; v4l2_fh_add(&ctx->fh); ctx->jpu = jpu; ctx->encoder = vfd == &jpu->vfd_encoder; __jpu_try_fmt(ctx, &ctx->out_q.fmtinfo, &ctx->out_q.format, V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE); __jpu_try_fmt(ctx, &ctx->cap_q.fmtinfo, &ctx->cap_q.format, V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE); ctx->fh.m2m_ctx = v4l2_m2m_ctx_init(jpu->m2m_dev, ctx, jpu_queue_init); if (IS_ERR(ctx->fh.m2m_ctx)) { ret = PTR_ERR(ctx->fh.m2m_ctx); goto v4l_prepare_rollback; } ret = jpu_controls_create(ctx); if (ret < 0) goto v4l_prepare_rollback; if (mutex_lock_interruptible(&jpu->mutex)) { ret = -ERESTARTSYS; goto v4l_prepare_rollback; } if (jpu->ref_count == 0) { ret = clk_prepare_enable(jpu->clk); if (ret < 0) goto device_prepare_rollback; /* ...issue software reset */ ret = jpu_reset(jpu); if (ret) goto jpu_reset_rollback; } jpu->ref_count++; mutex_unlock(&jpu->mutex); return 0; jpu_reset_rollback: clk_disable_unprepare(jpu->clk); device_prepare_rollback: mutex_unlock(&jpu->mutex); v4l_prepare_rollback: v4l2_fh_del(&ctx->fh); v4l2_fh_exit(&ctx->fh); kfree(ctx); return ret; } static int jpu_release(struct file *file) { struct jpu *jpu = video_drvdata(file); struct jpu_ctx *ctx = fh_to_ctx(file->private_data); v4l2_m2m_ctx_release(ctx->fh.m2m_ctx); v4l2_ctrl_handler_free(&ctx->ctrl_handler); v4l2_fh_del(&ctx->fh); v4l2_fh_exit(&ctx->fh); kfree(ctx); mutex_lock(&jpu->mutex); if (--jpu->ref_count == 0) clk_disable_unprepare(jpu->clk); mutex_unlock(&jpu->mutex); return 0; } static const struct v4l2_file_operations jpu_fops = { .owner = THIS_MODULE, .open = jpu_open, .release = jpu_release, .unlocked_ioctl = video_ioctl2, .poll = v4l2_m2m_fop_poll, .mmap = v4l2_m2m_fop_mmap, }; /* * ============================================================================ * mem2mem callbacks * ============================================================================ */ static void jpu_cleanup(struct jpu_ctx *ctx, bool reset) { /* remove current buffers and finish job */ struct vb2_v4l2_buffer *src_buf, *dst_buf; unsigned long flags; spin_lock_irqsave(&ctx->jpu->lock, flags); src_buf = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx); dst_buf = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx); v4l2_m2m_buf_done(src_buf, VB2_BUF_STATE_ERROR); v4l2_m2m_buf_done(dst_buf, VB2_BUF_STATE_ERROR); /* ...and give it a chance on next run */ if (reset) jpu_write(ctx->jpu, JCCMD_SRST, JCCMD); spin_unlock_irqrestore(&ctx->jpu->lock, flags); v4l2_m2m_job_finish(ctx->jpu->m2m_dev, ctx->fh.m2m_ctx); } static void jpu_device_run(void *priv) { struct jpu_ctx *ctx = priv; struct jpu *jpu = ctx->jpu; struct jpu_buffer *jpu_buf; struct jpu_q_data *q_data; struct vb2_v4l2_buffer *src_buf, *dst_buf; unsigned int w, h, bpl; unsigned char num_planes, subsampling; unsigned long flags; /* ...wait until module reset completes; we have mutex locked here */ if (jpu_wait_reset(jpu)) { jpu_cleanup(ctx, true); return; } spin_lock_irqsave(&ctx->jpu->lock, flags); jpu->curr = ctx; src_buf = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx); dst_buf = v4l2_m2m_next_dst_buf(ctx->fh.m2m_ctx); if (ctx->encoder) { jpu_buf = vb2_to_jpu_buffer(dst_buf); q_data = &ctx->out_q; } else { jpu_buf = vb2_to_jpu_buffer(src_buf); q_data = &ctx->cap_q; } w = q_data->format.width; h = q_data->format.height; bpl = q_data->format.plane_fmt[0].bytesperline; num_planes = q_data->fmtinfo->num_planes; subsampling = q_data->fmtinfo->subsampling; if (ctx->encoder) { unsigned long src_1_addr, src_2_addr, dst_addr; unsigned int redu, inft; dst_addr = vb2_dma_contig_plane_dma_addr(&dst_buf->vb2_buf, 0); src_1_addr = vb2_dma_contig_plane_dma_addr(&src_buf->vb2_buf, 0); if (num_planes > 1) src_2_addr = vb2_dma_contig_plane_dma_addr( &src_buf->vb2_buf, 1); else src_2_addr = src_1_addr + w * h; jpu_buf->compr_quality = ctx->compr_quality; if (subsampling == JPU_JPEG_420) { redu = JCMOD_REDU_420; inft = JIFECNT_INFT_420; } else { redu = JCMOD_REDU_422; inft = JIFECNT_INFT_422; } /* only no marker mode works for encoding */ jpu_write(jpu, JCMOD_DSP_ENC | JCMOD_PCTR | redu | JCMOD_MSKIP_ENABLE, JCMOD); jpu_write(jpu, JIFECNT_SWAP_WB | inft, JIFECNT); jpu_write(jpu, JIFDCNT_SWAP_WB, JIFDCNT); jpu_write(jpu, JINTE_TRANSF_COMPL, JINTE); /* Y and C components source addresses */ jpu_write(jpu, src_1_addr, JIFESYA1); jpu_write(jpu, src_2_addr, JIFESCA1); /* memory width */ jpu_write(jpu, bpl, JIFESMW); jpu_write(jpu, (w >> 8) & JCSZ_MASK, JCHSZU); jpu_write(jpu, w & JCSZ_MASK, JCHSZD); jpu_write(jpu, (h >> 8) & JCSZ_MASK, JCVSZU); jpu_write(jpu, h & JCSZ_MASK, JCVSZD); jpu_write(jpu, w, JIFESHSZ); jpu_write(jpu, h, JIFESVSZ); jpu_write(jpu, dst_addr + JPU_JPEG_HDR_SIZE, JIFEDA1); jpu_write(jpu, 0 << JCQTN_SHIFT(1) | 1 << JCQTN_SHIFT(2) | 1 << JCQTN_SHIFT(3), JCQTN); jpu_write(jpu, 0 << JCHTN_AC_SHIFT(1) | 0 << JCHTN_DC_SHIFT(1) | 1 << JCHTN_AC_SHIFT(2) | 1 << JCHTN_DC_SHIFT(2) | 1 << JCHTN_AC_SHIFT(3) | 1 << JCHTN_DC_SHIFT(3), JCHTN); jpu_set_qtbl(jpu, ctx->compr_quality); jpu_set_htbl(jpu); } else { unsigned long src_addr, dst_1_addr, dst_2_addr; if (jpu_buf->subsampling != subsampling) { dev_err(ctx->jpu->dev, "src and dst formats do not match.\n"); spin_unlock_irqrestore(&ctx->jpu->lock, flags); jpu_cleanup(ctx, false); return; } src_addr = vb2_dma_contig_plane_dma_addr(&src_buf->vb2_buf, 0); dst_1_addr = vb2_dma_contig_plane_dma_addr(&dst_buf->vb2_buf, 0); if (q_data->fmtinfo->num_planes > 1) dst_2_addr = vb2_dma_contig_plane_dma_addr( &dst_buf->vb2_buf, 1); else dst_2_addr = dst_1_addr + w * h; /* ...set up decoder operation */ jpu_write(jpu, JCMOD_DSP_DEC | JCMOD_PCTR, JCMOD); jpu_write(jpu, JIFECNT_SWAP_WB, JIFECNT); jpu_write(jpu, JIFDCNT_SWAP_WB, JIFDCNT); /* ...enable interrupts on transfer completion and d-g error */ jpu_write(jpu, JINTE_TRANSF_COMPL | JINTE_ERR, JINTE); /* ...set source/destination addresses of encoded data */ jpu_write(jpu, src_addr, JIFDSA1); jpu_write(jpu, dst_1_addr, JIFDDYA1); jpu_write(jpu, dst_2_addr, JIFDDCA1); jpu_write(jpu, bpl, JIFDDMW); } /* ...start encoder/decoder operation */ jpu_write(jpu, JCCMD_JSRT, JCCMD); spin_unlock_irqrestore(&ctx->jpu->lock, flags); } static const struct v4l2_m2m_ops jpu_m2m_ops = { .device_run = jpu_device_run, }; /* * ============================================================================ * IRQ handler * ============================================================================ */ static irqreturn_t jpu_irq_handler(int irq, void *dev_id) { struct jpu *jpu = dev_id; struct jpu_ctx *curr_ctx; struct vb2_v4l2_buffer *src_buf, *dst_buf; unsigned int int_status; int_status = jpu_read(jpu, JINTS); /* ...spurious interrupt */ if (!((JINTS_TRANSF_COMPL | JINTS_PROCESS_COMPL | JINTS_ERR) & int_status)) return IRQ_NONE; /* ...clear interrupts */ jpu_write(jpu, ~(int_status & JINTS_MASK), JINTS); if (int_status & (JINTS_ERR | JINTS_PROCESS_COMPL)) jpu_write(jpu, JCCMD_JEND, JCCMD); spin_lock(&jpu->lock); if ((int_status & JINTS_PROCESS_COMPL) && !(int_status & JINTS_TRANSF_COMPL)) goto handled; curr_ctx = v4l2_m2m_get_curr_priv(jpu->m2m_dev); if (!curr_ctx) { /* ...instance is not running */ dev_err(jpu->dev, "no active context for m2m\n"); goto handled; } src_buf = v4l2_m2m_src_buf_remove(curr_ctx->fh.m2m_ctx); dst_buf = v4l2_m2m_dst_buf_remove(curr_ctx->fh.m2m_ctx); if (int_status & JINTS_TRANSF_COMPL) { if (curr_ctx->encoder) { unsigned long payload_size = jpu_read(jpu, JCDTCU) << 16 | jpu_read(jpu, JCDTCM) << 8 | jpu_read(jpu, JCDTCD); vb2_set_plane_payload(&dst_buf->vb2_buf, 0, payload_size + JPU_JPEG_HDR_SIZE); } dst_buf->field = src_buf->field; dst_buf->vb2_buf.timestamp = src_buf->vb2_buf.timestamp; if (src_buf->flags & V4L2_BUF_FLAG_TIMECODE) dst_buf->timecode = src_buf->timecode; dst_buf->flags = src_buf->flags & (V4L2_BUF_FLAG_TIMECODE | V4L2_BUF_FLAG_KEYFRAME | V4L2_BUF_FLAG_PFRAME | V4L2_BUF_FLAG_BFRAME | V4L2_BUF_FLAG_TSTAMP_SRC_MASK); v4l2_m2m_buf_done(src_buf, VB2_BUF_STATE_DONE); v4l2_m2m_buf_done(dst_buf, VB2_BUF_STATE_DONE); } else if (int_status & JINTS_ERR) { unsigned char error = jpu_read(jpu, JCDERR) & JCDERR_MASK; dev_dbg(jpu->dev, "processing error: %#X: %s\n", error, error_to_text[error]); v4l2_m2m_buf_done(src_buf, VB2_BUF_STATE_ERROR); v4l2_m2m_buf_done(dst_buf, VB2_BUF_STATE_ERROR); } jpu->curr = NULL; /* ...reset JPU after completion */ jpu_write(jpu, JCCMD_SRST, JCCMD); spin_unlock(&jpu->lock); v4l2_m2m_job_finish(jpu->m2m_dev, curr_ctx->fh.m2m_ctx); return IRQ_HANDLED; handled: spin_unlock(&jpu->lock); return IRQ_HANDLED; } /* * ============================================================================ * Driver basic infrastructure * ============================================================================ */ static const struct of_device_id jpu_dt_ids[] = { { .compatible = "renesas,jpu-r8a7790" }, /* H2 */ { .compatible = "renesas,jpu-r8a7791" }, /* M2-W */ { .compatible = "renesas,jpu-r8a7792" }, /* V2H */ { .compatible = "renesas,jpu-r8a7793" }, /* M2-N */ { .compatible = "renesas,rcar-gen2-jpu" }, { }, }; MODULE_DEVICE_TABLE(of, jpu_dt_ids); static int jpu_probe(struct platform_device *pdev) { struct jpu *jpu; int ret; unsigned int i; jpu = devm_kzalloc(&pdev->dev, sizeof(*jpu), GFP_KERNEL); if (!jpu) return -ENOMEM; mutex_init(&jpu->mutex); spin_lock_init(&jpu->lock); jpu->dev = &pdev->dev; /* memory-mapped registers */ jpu->regs = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(jpu->regs)) return PTR_ERR(jpu->regs); /* interrupt service routine registration */ jpu->irq = ret = platform_get_irq(pdev, 0); if (ret < 0) return ret; ret = devm_request_irq(&pdev->dev, jpu->irq, jpu_irq_handler, 0, dev_name(&pdev->dev), jpu); if (ret) { dev_err(&pdev->dev, "cannot claim IRQ %d\n", jpu->irq); return ret; } /* clocks */ jpu->clk = devm_clk_get(&pdev->dev, NULL); if (IS_ERR(jpu->clk)) { dev_err(&pdev->dev, "cannot get clock\n"); return PTR_ERR(jpu->clk); } /* v4l2 device */ ret = v4l2_device_register(&pdev->dev, &jpu->v4l2_dev); if (ret) { dev_err(&pdev->dev, "Failed to register v4l2 device\n"); return ret; } /* mem2mem device */ jpu->m2m_dev = v4l2_m2m_init(&jpu_m2m_ops); if (IS_ERR(jpu->m2m_dev)) { v4l2_err(&jpu->v4l2_dev, "Failed to init mem2mem device\n"); ret = PTR_ERR(jpu->m2m_dev); goto device_register_rollback; } /* fill in quantization and Huffman tables for encoder */ for (i = 0; i < JPU_MAX_QUALITY; i++) jpu_generate_hdr(i, (unsigned char *)jpeg_hdrs[i]); strscpy(jpu->vfd_encoder.name, DRV_NAME, sizeof(jpu->vfd_encoder.name)); jpu->vfd_encoder.fops = &jpu_fops; jpu->vfd_encoder.ioctl_ops = &jpu_ioctl_ops; jpu->vfd_encoder.minor = -1; jpu->vfd_encoder.release = video_device_release_empty; jpu->vfd_encoder.lock = &jpu->mutex; jpu->vfd_encoder.v4l2_dev = &jpu->v4l2_dev; jpu->vfd_encoder.vfl_dir = VFL_DIR_M2M; jpu->vfd_encoder.device_caps = V4L2_CAP_STREAMING | V4L2_CAP_VIDEO_M2M_MPLANE; ret = video_register_device(&jpu->vfd_encoder, VFL_TYPE_VIDEO, -1); if (ret) { v4l2_err(&jpu->v4l2_dev, "Failed to register video device\n"); goto m2m_init_rollback; } video_set_drvdata(&jpu->vfd_encoder, jpu); strscpy(jpu->vfd_decoder.name, DRV_NAME, sizeof(jpu->vfd_decoder.name)); jpu->vfd_decoder.fops = &jpu_fops; jpu->vfd_decoder.ioctl_ops = &jpu_ioctl_ops; jpu->vfd_decoder.minor = -1; jpu->vfd_decoder.release = video_device_release_empty; jpu->vfd_decoder.lock = &jpu->mutex; jpu->vfd_decoder.v4l2_dev = &jpu->v4l2_dev; jpu->vfd_decoder.vfl_dir = VFL_DIR_M2M; jpu->vfd_decoder.device_caps = V4L2_CAP_STREAMING | V4L2_CAP_VIDEO_M2M_MPLANE; ret = video_register_device(&jpu->vfd_decoder, VFL_TYPE_VIDEO, -1); if (ret) { v4l2_err(&jpu->v4l2_dev, "Failed to register video device\n"); goto enc_vdev_register_rollback; } video_set_drvdata(&jpu->vfd_decoder, jpu); platform_set_drvdata(pdev, jpu); v4l2_info(&jpu->v4l2_dev, "encoder device registered as /dev/video%d\n", jpu->vfd_encoder.num); v4l2_info(&jpu->v4l2_dev, "decoder device registered as /dev/video%d\n", jpu->vfd_decoder.num); return 0; enc_vdev_register_rollback: video_unregister_device(&jpu->vfd_encoder); m2m_init_rollback: v4l2_m2m_release(jpu->m2m_dev); device_register_rollback: v4l2_device_unregister(&jpu->v4l2_dev); return ret; } static void jpu_remove(struct platform_device *pdev) { struct jpu *jpu = platform_get_drvdata(pdev); video_unregister_device(&jpu->vfd_decoder); video_unregister_device(&jpu->vfd_encoder); v4l2_m2m_release(jpu->m2m_dev); v4l2_device_unregister(&jpu->v4l2_dev); } #ifdef CONFIG_PM_SLEEP static int jpu_suspend(struct device *dev) { struct jpu *jpu = dev_get_drvdata(dev); if (jpu->ref_count == 0) return 0; clk_disable_unprepare(jpu->clk); return 0; } static int jpu_resume(struct device *dev) { struct jpu *jpu = dev_get_drvdata(dev); if (jpu->ref_count == 0) return 0; clk_prepare_enable(jpu->clk); return 0; } #endif static const struct dev_pm_ops jpu_pm_ops = { SET_SYSTEM_SLEEP_PM_OPS(jpu_suspend, jpu_resume) }; static struct platform_driver jpu_driver = { .probe = jpu_probe, .remove_new = jpu_remove, .driver = { .of_match_table = jpu_dt_ids, .name = DRV_NAME, .pm = &jpu_pm_ops, }, }; module_platform_driver(jpu_driver); MODULE_ALIAS("platform:" DRV_NAME); MODULE_AUTHOR("Mikhail Ulianov <[email protected]>"); MODULE_DESCRIPTION("Renesas R-Car JPEG processing unit driver"); MODULE_LICENSE("GPL v2");
linux-master
drivers/media/platform/renesas/rcar_jpu.c
// SPDX-License-Identifier: GPL-2.0+ /* * rcar-fcp.c -- R-Car Frame Compression Processor Driver * * Copyright (C) 2016 Renesas Electronics Corporation * * Contact: Laurent Pinchart ([email protected]) */ #include <linux/device.h> #include <linux/dma-mapping.h> #include <linux/list.h> #include <linux/module.h> #include <linux/mod_devicetable.h> #include <linux/mutex.h> #include <linux/platform_device.h> #include <linux/pm_runtime.h> #include <linux/slab.h> #include <media/rcar-fcp.h> struct rcar_fcp_device { struct list_head list; struct device *dev; }; static LIST_HEAD(fcp_devices); static DEFINE_MUTEX(fcp_lock); /* ----------------------------------------------------------------------------- * Public API */ /** * rcar_fcp_get - Find and acquire a reference to an FCP instance * @np: Device node of the FCP instance * * Search the list of registered FCP instances for the instance corresponding to * the given device node. * * Return a pointer to the FCP instance, or an ERR_PTR if the instance can't be * found. */ struct rcar_fcp_device *rcar_fcp_get(const struct device_node *np) { struct rcar_fcp_device *fcp; mutex_lock(&fcp_lock); list_for_each_entry(fcp, &fcp_devices, list) { if (fcp->dev->of_node != np) continue; get_device(fcp->dev); goto done; } fcp = ERR_PTR(-EPROBE_DEFER); done: mutex_unlock(&fcp_lock); return fcp; } EXPORT_SYMBOL_GPL(rcar_fcp_get); /** * rcar_fcp_put - Release a reference to an FCP instance * @fcp: The FCP instance * * Release the FCP instance acquired by a call to rcar_fcp_get(). */ void rcar_fcp_put(struct rcar_fcp_device *fcp) { if (fcp) put_device(fcp->dev); } EXPORT_SYMBOL_GPL(rcar_fcp_put); struct device *rcar_fcp_get_device(struct rcar_fcp_device *fcp) { return fcp->dev; } EXPORT_SYMBOL_GPL(rcar_fcp_get_device); /** * rcar_fcp_enable - Enable an FCP * @fcp: The FCP instance * * Before any memory access through an FCP is performed by a module, the FCP * must be enabled by a call to this function. The enable calls are reference * counted, each successful call must be followed by one rcar_fcp_disable() * call when no more memory transfer can occur through the FCP. * * Return 0 on success or a negative error code if an error occurs. The enable * reference count isn't increased when this function returns an error. */ int rcar_fcp_enable(struct rcar_fcp_device *fcp) { if (!fcp) return 0; return pm_runtime_resume_and_get(fcp->dev); } EXPORT_SYMBOL_GPL(rcar_fcp_enable); /** * rcar_fcp_disable - Disable an FCP * @fcp: The FCP instance * * This function is the counterpart of rcar_fcp_enable(). As enable calls are * reference counted a disable call may not disable the FCP synchronously. */ void rcar_fcp_disable(struct rcar_fcp_device *fcp) { if (fcp) pm_runtime_put(fcp->dev); } EXPORT_SYMBOL_GPL(rcar_fcp_disable); /* ----------------------------------------------------------------------------- * Platform Driver */ static int rcar_fcp_probe(struct platform_device *pdev) { struct rcar_fcp_device *fcp; fcp = devm_kzalloc(&pdev->dev, sizeof(*fcp), GFP_KERNEL); if (fcp == NULL) return -ENOMEM; fcp->dev = &pdev->dev; dma_set_max_seg_size(fcp->dev, UINT_MAX); pm_runtime_enable(&pdev->dev); mutex_lock(&fcp_lock); list_add_tail(&fcp->list, &fcp_devices); mutex_unlock(&fcp_lock); platform_set_drvdata(pdev, fcp); return 0; } static void rcar_fcp_remove(struct platform_device *pdev) { struct rcar_fcp_device *fcp = platform_get_drvdata(pdev); mutex_lock(&fcp_lock); list_del(&fcp->list); mutex_unlock(&fcp_lock); pm_runtime_disable(&pdev->dev); } static const struct of_device_id rcar_fcp_of_match[] = { { .compatible = "renesas,fcpf" }, { .compatible = "renesas,fcpv" }, { }, }; MODULE_DEVICE_TABLE(of, rcar_fcp_of_match); static struct platform_driver rcar_fcp_platform_driver = { .probe = rcar_fcp_probe, .remove_new = rcar_fcp_remove, .driver = { .name = "rcar-fcp", .of_match_table = rcar_fcp_of_match, .suppress_bind_attrs = true, }, }; module_platform_driver(rcar_fcp_platform_driver); MODULE_ALIAS("rcar-fcp"); MODULE_AUTHOR("Laurent Pinchart <[email protected]>"); MODULE_DESCRIPTION("Renesas FCP Driver"); MODULE_LICENSE("GPL");
linux-master
drivers/media/platform/renesas/rcar-fcp.c
// SPDX-License-Identifier: GPL-2.0 /* * V4L2 Driver for Renesas Capture Engine Unit (CEU) interface * Copyright (C) 2017-2018 Jacopo Mondi <[email protected]> * * Based on soc-camera driver "soc_camera/sh_mobile_ceu_camera.c" * Copyright (C) 2008 Magnus Damm * * Based on V4L2 Driver for PXA camera host - "pxa_camera.c", * Copyright (C) 2006, Sascha Hauer, Pengutronix * Copyright (C) 2008, Guennadi Liakhovetski <[email protected]> */ #include <linux/delay.h> #include <linux/device.h> #include <linux/dma-mapping.h> #include <linux/err.h> #include <linux/errno.h> #include <linux/interrupt.h> #include <linux/io.h> #include <linux/kernel.h> #include <linux/mm.h> #include <linux/module.h> #include <linux/of.h> #include <linux/of_graph.h> #include <linux/platform_device.h> #include <linux/pm_runtime.h> #include <linux/slab.h> #include <linux/time.h> #include <linux/videodev2.h> #include <media/v4l2-async.h> #include <media/v4l2-common.h> #include <media/v4l2-ctrls.h> #include <media/v4l2-dev.h> #include <media/v4l2-device.h> #include <media/v4l2-event.h> #include <media/v4l2-fwnode.h> #include <media/v4l2-image-sizes.h> #include <media/v4l2-ioctl.h> #include <media/v4l2-mediabus.h> #include <media/videobuf2-dma-contig.h> #include <media/drv-intf/renesas-ceu.h> #define DRIVER_NAME "renesas-ceu" /* CEU registers offsets and masks. */ #define CEU_CAPSR 0x00 /* Capture start register */ #define CEU_CAPCR 0x04 /* Capture control register */ #define CEU_CAMCR 0x08 /* Capture interface control register */ #define CEU_CAMOR 0x10 /* Capture interface offset register */ #define CEU_CAPWR 0x14 /* Capture interface width register */ #define CEU_CAIFR 0x18 /* Capture interface input format register */ #define CEU_CRCNTR 0x28 /* CEU register control register */ #define CEU_CRCMPR 0x2c /* CEU register forcible control register */ #define CEU_CFLCR 0x30 /* Capture filter control register */ #define CEU_CFSZR 0x34 /* Capture filter size clip register */ #define CEU_CDWDR 0x38 /* Capture destination width register */ #define CEU_CDAYR 0x3c /* Capture data address Y register */ #define CEU_CDACR 0x40 /* Capture data address C register */ #define CEU_CFWCR 0x5c /* Firewall operation control register */ #define CEU_CDOCR 0x64 /* Capture data output control register */ #define CEU_CEIER 0x70 /* Capture event interrupt enable register */ #define CEU_CETCR 0x74 /* Capture event flag clear register */ #define CEU_CSTSR 0x7c /* Capture status register */ #define CEU_CSRTR 0x80 /* Capture software reset register */ /* Data synchronous fetch mode. */ #define CEU_CAMCR_JPEG BIT(4) /* Input components ordering: CEU_CAMCR.DTARY field. */ #define CEU_CAMCR_DTARY_8_UYVY (0x00 << 8) #define CEU_CAMCR_DTARY_8_VYUY (0x01 << 8) #define CEU_CAMCR_DTARY_8_YUYV (0x02 << 8) #define CEU_CAMCR_DTARY_8_YVYU (0x03 << 8) /* TODO: input components ordering for 16 bits input. */ /* Bus transfer MTU. */ #define CEU_CAPCR_BUS_WIDTH256 (0x3 << 20) /* Bus width configuration. */ #define CEU_CAMCR_DTIF_16BITS BIT(12) /* No downsampling to planar YUV420 in image fetch mode. */ #define CEU_CDOCR_NO_DOWSAMPLE BIT(4) /* Swap all input data in 8-bit, 16-bits and 32-bits units (Figure 46.45). */ #define CEU_CDOCR_SWAP_ENDIANNESS (7) /* Capture reset and enable bits. */ #define CEU_CAPSR_CPKIL BIT(16) #define CEU_CAPSR_CE BIT(0) /* CEU operating flag bit. */ #define CEU_CAPCR_CTNCP BIT(16) #define CEU_CSTRST_CPTON BIT(0) /* Platform specific IRQ source flags. */ #define CEU_CETCR_ALL_IRQS_RZ 0x397f313 #define CEU_CETCR_ALL_IRQS_SH4 0x3d7f313 /* Prohibited register access interrupt bit. */ #define CEU_CETCR_IGRW BIT(4) /* One-frame capture end interrupt. */ #define CEU_CEIER_CPE BIT(0) /* VBP error. */ #define CEU_CEIER_VBP BIT(20) #define CEU_CEIER_MASK (CEU_CEIER_CPE | CEU_CEIER_VBP) #define CEU_MAX_WIDTH 2560 #define CEU_MAX_HEIGHT 1920 #define CEU_MAX_BPL 8188 #define CEU_W_MAX(w) ((w) < CEU_MAX_WIDTH ? (w) : CEU_MAX_WIDTH) #define CEU_H_MAX(h) ((h) < CEU_MAX_HEIGHT ? (h) : CEU_MAX_HEIGHT) /* * ceu_bus_fmt - describe a 8-bits yuyv format the sensor can produce * * @mbus_code: bus format code * @fmt_order: CEU_CAMCR.DTARY ordering of input components (Y, Cb, Cr) * @fmt_order_swap: swapped CEU_CAMCR.DTARY ordering of input components * (Y, Cr, Cb) * @swapped: does Cr appear before Cb? * @bps: number of bits sent over bus for each sample * @bpp: number of bits per pixels unit */ struct ceu_mbus_fmt { u32 mbus_code; u32 fmt_order; u32 fmt_order_swap; bool swapped; u8 bps; u8 bpp; }; /* * ceu_buffer - Link vb2 buffer to the list of available buffers. */ struct ceu_buffer { struct vb2_v4l2_buffer vb; struct list_head queue; }; static inline struct ceu_buffer *vb2_to_ceu(struct vb2_v4l2_buffer *vbuf) { return container_of(vbuf, struct ceu_buffer, vb); } /* * ceu_subdev - Wraps v4l2 sub-device and provides async subdevice. */ struct ceu_subdev { struct v4l2_async_connection asd; struct v4l2_subdev *v4l2_sd; /* per-subdevice mbus configuration options */ unsigned int mbus_flags; struct ceu_mbus_fmt mbus_fmt; }; static struct ceu_subdev *to_ceu_subdev(struct v4l2_async_connection *asd) { return container_of(asd, struct ceu_subdev, asd); } /* * ceu_device - CEU device instance */ struct ceu_device { struct device *dev; struct video_device vdev; struct v4l2_device v4l2_dev; /* subdevices descriptors */ struct ceu_subdev **subdevs; /* the subdevice currently in use */ struct ceu_subdev *sd; unsigned int sd_index; unsigned int num_sd; /* platform specific mask with all IRQ sources flagged */ u32 irq_mask; /* currently configured field and pixel format */ enum v4l2_field field; struct v4l2_pix_format_mplane v4l2_pix; /* async subdev notification helpers */ struct v4l2_async_notifier notifier; /* vb2 queue, capture buffer list and active buffer pointer */ struct vb2_queue vb2_vq; struct list_head capture; struct vb2_v4l2_buffer *active; unsigned int sequence; /* mlock - lock access to interface reset and vb2 queue */ struct mutex mlock; /* lock - lock access to capture buffer queue and active buffer */ spinlock_t lock; /* base - CEU memory base address */ void __iomem *base; }; static inline struct ceu_device *v4l2_to_ceu(struct v4l2_device *v4l2_dev) { return container_of(v4l2_dev, struct ceu_device, v4l2_dev); } /* --- CEU memory output formats --- */ /* * ceu_fmt - describe a memory output format supported by CEU interface. * * @fourcc: memory layout fourcc format code * @bpp: number of bits for each pixel stored in memory */ struct ceu_fmt { u32 fourcc; u32 bpp; }; /* * ceu_format_list - List of supported memory output formats * * If sensor provides any YUYV bus format, all the following planar memory * formats are available thanks to CEU re-ordering and sub-sampling * capabilities. */ static const struct ceu_fmt ceu_fmt_list[] = { { .fourcc = V4L2_PIX_FMT_NV16, .bpp = 16, }, { .fourcc = V4L2_PIX_FMT_NV61, .bpp = 16, }, { .fourcc = V4L2_PIX_FMT_NV12, .bpp = 12, }, { .fourcc = V4L2_PIX_FMT_NV21, .bpp = 12, }, { .fourcc = V4L2_PIX_FMT_YUYV, .bpp = 16, }, { .fourcc = V4L2_PIX_FMT_UYVY, .bpp = 16, }, { .fourcc = V4L2_PIX_FMT_YVYU, .bpp = 16, }, { .fourcc = V4L2_PIX_FMT_VYUY, .bpp = 16, }, }; static const struct ceu_fmt *get_ceu_fmt_from_fourcc(unsigned int fourcc) { const struct ceu_fmt *fmt = &ceu_fmt_list[0]; unsigned int i; for (i = 0; i < ARRAY_SIZE(ceu_fmt_list); i++, fmt++) if (fmt->fourcc == fourcc) return fmt; return NULL; } static bool ceu_fmt_mplane(struct v4l2_pix_format_mplane *pix) { switch (pix->pixelformat) { case V4L2_PIX_FMT_YUYV: case V4L2_PIX_FMT_UYVY: case V4L2_PIX_FMT_YVYU: case V4L2_PIX_FMT_VYUY: return false; case V4L2_PIX_FMT_NV16: case V4L2_PIX_FMT_NV61: case V4L2_PIX_FMT_NV12: case V4L2_PIX_FMT_NV21: return true; default: return false; } } /* --- CEU HW operations --- */ static void ceu_write(struct ceu_device *priv, unsigned int reg_offs, u32 data) { iowrite32(data, priv->base + reg_offs); } static u32 ceu_read(struct ceu_device *priv, unsigned int reg_offs) { return ioread32(priv->base + reg_offs); } /* * ceu_soft_reset() - Software reset the CEU interface. * @ceu_device: CEU device. * * Returns 0 for success, -EIO for error. */ static int ceu_soft_reset(struct ceu_device *ceudev) { unsigned int i; ceu_write(ceudev, CEU_CAPSR, CEU_CAPSR_CPKIL); for (i = 0; i < 100; i++) { if (!(ceu_read(ceudev, CEU_CSTSR) & CEU_CSTRST_CPTON)) break; udelay(1); } if (i == 100) { dev_err(ceudev->dev, "soft reset time out\n"); return -EIO; } for (i = 0; i < 100; i++) { if (!(ceu_read(ceudev, CEU_CAPSR) & CEU_CAPSR_CPKIL)) return 0; udelay(1); } /* If we get here, CEU has not reset properly. */ return -EIO; } /* --- CEU Capture Operations --- */ /* * ceu_hw_config() - Configure CEU interface registers. */ static int ceu_hw_config(struct ceu_device *ceudev) { u32 camcr, cdocr, cfzsr, cdwdr, capwr; struct v4l2_pix_format_mplane *pix = &ceudev->v4l2_pix; struct ceu_subdev *ceu_sd = ceudev->sd; struct ceu_mbus_fmt *mbus_fmt = &ceu_sd->mbus_fmt; unsigned int mbus_flags = ceu_sd->mbus_flags; /* Start configuring CEU registers */ ceu_write(ceudev, CEU_CAIFR, 0); ceu_write(ceudev, CEU_CFWCR, 0); ceu_write(ceudev, CEU_CRCNTR, 0); ceu_write(ceudev, CEU_CRCMPR, 0); /* Set the frame capture period for both image capture and data sync. */ capwr = (pix->height << 16) | pix->width * mbus_fmt->bpp / 8; /* * Swap input data endianness by default. * In data fetch mode bytes are received in chunks of 8 bytes. * D0, D1, D2, D3, D4, D5, D6, D7 (D0 received first) * The data is however by default written to memory in reverse order: * D7, D6, D5, D4, D3, D2, D1, D0 (D7 written to lowest byte) * * Use CEU_CDOCR[2:0] to swap data ordering. */ cdocr = CEU_CDOCR_SWAP_ENDIANNESS; /* * Configure CAMCR and CDOCR: * match input components ordering with memory output format and * handle downsampling to YUV420. * * If the memory output planar format is 'swapped' (Cr before Cb) and * input format is not, use the swapped version of CAMCR.DTARY. * * If the memory output planar format is not 'swapped' (Cb before Cr) * and input format is, use the swapped version of CAMCR.DTARY. * * CEU by default downsample to planar YUV420 (CDCOR[4] = 0). * If output is planar YUV422 set CDOCR[4] = 1 * * No downsample for data fetch sync mode. */ switch (pix->pixelformat) { /* Data fetch sync mode */ case V4L2_PIX_FMT_YUYV: case V4L2_PIX_FMT_YVYU: case V4L2_PIX_FMT_UYVY: case V4L2_PIX_FMT_VYUY: camcr = CEU_CAMCR_JPEG; cdocr |= CEU_CDOCR_NO_DOWSAMPLE; cfzsr = (pix->height << 16) | pix->width; cdwdr = pix->plane_fmt[0].bytesperline; break; /* Non-swapped planar image capture mode. */ case V4L2_PIX_FMT_NV16: cdocr |= CEU_CDOCR_NO_DOWSAMPLE; fallthrough; case V4L2_PIX_FMT_NV12: if (mbus_fmt->swapped) camcr = mbus_fmt->fmt_order_swap; else camcr = mbus_fmt->fmt_order; cfzsr = (pix->height << 16) | pix->width; cdwdr = pix->width; break; /* Swapped planar image capture mode. */ case V4L2_PIX_FMT_NV61: cdocr |= CEU_CDOCR_NO_DOWSAMPLE; fallthrough; case V4L2_PIX_FMT_NV21: if (mbus_fmt->swapped) camcr = mbus_fmt->fmt_order; else camcr = mbus_fmt->fmt_order_swap; cfzsr = (pix->height << 16) | pix->width; cdwdr = pix->width; break; default: return -EINVAL; } camcr |= mbus_flags & V4L2_MBUS_VSYNC_ACTIVE_LOW ? 1 << 1 : 0; camcr |= mbus_flags & V4L2_MBUS_HSYNC_ACTIVE_LOW ? 1 << 0 : 0; /* TODO: handle 16 bit bus width with DTIF bit in CAMCR */ ceu_write(ceudev, CEU_CAMCR, camcr); ceu_write(ceudev, CEU_CDOCR, cdocr); ceu_write(ceudev, CEU_CAPCR, CEU_CAPCR_BUS_WIDTH256); /* * TODO: make CAMOR offsets configurable. * CAMOR wants to know the number of blanks between a VS/HS signal * and valid data. This value should actually come from the sensor... */ ceu_write(ceudev, CEU_CAMOR, 0); /* TODO: 16 bit bus width require re-calculation of cdwdr and cfzsr */ ceu_write(ceudev, CEU_CAPWR, capwr); ceu_write(ceudev, CEU_CFSZR, cfzsr); ceu_write(ceudev, CEU_CDWDR, cdwdr); return 0; } /* * ceu_capture() - Trigger start of a capture sequence. * * Program the CEU DMA registers with addresses where to transfer image data. */ static int ceu_capture(struct ceu_device *ceudev) { struct v4l2_pix_format_mplane *pix = &ceudev->v4l2_pix; dma_addr_t phys_addr_top; phys_addr_top = vb2_dma_contig_plane_dma_addr(&ceudev->active->vb2_buf, 0); ceu_write(ceudev, CEU_CDAYR, phys_addr_top); /* Ignore CbCr plane for non multi-planar image formats. */ if (ceu_fmt_mplane(pix)) { phys_addr_top = vb2_dma_contig_plane_dma_addr(&ceudev->active->vb2_buf, 1); ceu_write(ceudev, CEU_CDACR, phys_addr_top); } /* * Trigger new capture start: once for each frame, as we work in * one-frame capture mode. */ ceu_write(ceudev, CEU_CAPSR, CEU_CAPSR_CE); return 0; } static irqreturn_t ceu_irq(int irq, void *data) { struct ceu_device *ceudev = data; struct vb2_v4l2_buffer *vbuf; struct ceu_buffer *buf; u32 status; /* Clean interrupt status. */ status = ceu_read(ceudev, CEU_CETCR); ceu_write(ceudev, CEU_CETCR, ~ceudev->irq_mask); /* Unexpected interrupt. */ if (!(status & CEU_CEIER_MASK)) return IRQ_NONE; spin_lock(&ceudev->lock); /* Stale interrupt from a released buffer, ignore it. */ vbuf = ceudev->active; if (!vbuf) { spin_unlock(&ceudev->lock); return IRQ_HANDLED; } /* * When a VBP interrupt occurs, no capture end interrupt will occur * and the image of that frame is not captured correctly. */ if (status & CEU_CEIER_VBP) { dev_err(ceudev->dev, "VBP interrupt: abort capture\n"); goto error_irq_out; } /* Prepare to return the 'previous' buffer. */ vbuf->vb2_buf.timestamp = ktime_get_ns(); vbuf->sequence = ceudev->sequence++; vbuf->field = ceudev->field; /* Prepare a new 'active' buffer and trigger a new capture. */ if (!list_empty(&ceudev->capture)) { buf = list_first_entry(&ceudev->capture, struct ceu_buffer, queue); list_del(&buf->queue); ceudev->active = &buf->vb; ceu_capture(ceudev); } /* Return the 'previous' buffer. */ vb2_buffer_done(&vbuf->vb2_buf, VB2_BUF_STATE_DONE); spin_unlock(&ceudev->lock); return IRQ_HANDLED; error_irq_out: /* Return the 'previous' buffer and all queued ones. */ vb2_buffer_done(&vbuf->vb2_buf, VB2_BUF_STATE_ERROR); list_for_each_entry(buf, &ceudev->capture, queue) vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR); spin_unlock(&ceudev->lock); return IRQ_HANDLED; } /* --- CEU Videobuf2 operations --- */ static void ceu_update_plane_sizes(struct v4l2_plane_pix_format *plane, unsigned int bpl, unsigned int szimage) { memset(plane, 0, sizeof(*plane)); plane->sizeimage = szimage; if (plane->bytesperline < bpl || plane->bytesperline > CEU_MAX_BPL) plane->bytesperline = bpl; } /* * ceu_calc_plane_sizes() - Fill per-plane 'struct v4l2_plane_pix_format' * information according to the currently configured * pixel format. * @ceu_device: CEU device. * @ceu_fmt: Active image format. * @pix: Pixel format information (store line width and image sizes) */ static void ceu_calc_plane_sizes(struct ceu_device *ceudev, const struct ceu_fmt *ceu_fmt, struct v4l2_pix_format_mplane *pix) { unsigned int bpl, szimage; switch (pix->pixelformat) { case V4L2_PIX_FMT_YUYV: case V4L2_PIX_FMT_UYVY: case V4L2_PIX_FMT_YVYU: case V4L2_PIX_FMT_VYUY: pix->num_planes = 1; bpl = pix->width * ceu_fmt->bpp / 8; szimage = pix->height * bpl; ceu_update_plane_sizes(&pix->plane_fmt[0], bpl, szimage); break; case V4L2_PIX_FMT_NV12: case V4L2_PIX_FMT_NV21: pix->num_planes = 2; bpl = pix->width; szimage = pix->height * pix->width; ceu_update_plane_sizes(&pix->plane_fmt[0], bpl, szimage); ceu_update_plane_sizes(&pix->plane_fmt[1], bpl, szimage / 2); break; case V4L2_PIX_FMT_NV16: case V4L2_PIX_FMT_NV61: default: pix->num_planes = 2; bpl = pix->width; szimage = pix->height * pix->width; ceu_update_plane_sizes(&pix->plane_fmt[0], bpl, szimage); ceu_update_plane_sizes(&pix->plane_fmt[1], bpl, szimage); break; } } /* * ceu_vb2_setup() - is called to check whether the driver can accept the * requested number of buffers and to fill in plane sizes * for the current frame format, if required. */ static int ceu_vb2_setup(struct vb2_queue *vq, unsigned int *count, unsigned int *num_planes, unsigned int sizes[], struct device *alloc_devs[]) { struct ceu_device *ceudev = vb2_get_drv_priv(vq); struct v4l2_pix_format_mplane *pix = &ceudev->v4l2_pix; unsigned int i; /* num_planes is set: just check plane sizes. */ if (*num_planes) { for (i = 0; i < pix->num_planes; i++) if (sizes[i] < pix->plane_fmt[i].sizeimage) return -EINVAL; return 0; } /* num_planes not set: called from REQBUFS, just set plane sizes. */ *num_planes = pix->num_planes; for (i = 0; i < pix->num_planes; i++) sizes[i] = pix->plane_fmt[i].sizeimage; return 0; } static void ceu_vb2_queue(struct vb2_buffer *vb) { struct ceu_device *ceudev = vb2_get_drv_priv(vb->vb2_queue); struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb); struct ceu_buffer *buf = vb2_to_ceu(vbuf); unsigned long irqflags; spin_lock_irqsave(&ceudev->lock, irqflags); list_add_tail(&buf->queue, &ceudev->capture); spin_unlock_irqrestore(&ceudev->lock, irqflags); } static int ceu_vb2_prepare(struct vb2_buffer *vb) { struct ceu_device *ceudev = vb2_get_drv_priv(vb->vb2_queue); struct v4l2_pix_format_mplane *pix = &ceudev->v4l2_pix; unsigned int i; for (i = 0; i < pix->num_planes; i++) { if (vb2_plane_size(vb, i) < pix->plane_fmt[i].sizeimage) { dev_err(ceudev->dev, "Plane size too small (%lu < %u)\n", vb2_plane_size(vb, i), pix->plane_fmt[i].sizeimage); return -EINVAL; } vb2_set_plane_payload(vb, i, pix->plane_fmt[i].sizeimage); } return 0; } static int ceu_start_streaming(struct vb2_queue *vq, unsigned int count) { struct ceu_device *ceudev = vb2_get_drv_priv(vq); struct v4l2_subdev *v4l2_sd = ceudev->sd->v4l2_sd; struct ceu_buffer *buf; unsigned long irqflags; int ret; /* Program the CEU interface according to the CEU image format. */ ret = ceu_hw_config(ceudev); if (ret) goto error_return_bufs; ret = v4l2_subdev_call(v4l2_sd, video, s_stream, 1); if (ret && ret != -ENOIOCTLCMD) { dev_dbg(ceudev->dev, "Subdevice failed to start streaming: %d\n", ret); goto error_return_bufs; } spin_lock_irqsave(&ceudev->lock, irqflags); ceudev->sequence = 0; /* Grab the first available buffer and trigger the first capture. */ buf = list_first_entry(&ceudev->capture, struct ceu_buffer, queue); list_del(&buf->queue); ceudev->active = &buf->vb; /* Clean and program interrupts for first capture. */ ceu_write(ceudev, CEU_CETCR, ~ceudev->irq_mask); ceu_write(ceudev, CEU_CEIER, CEU_CEIER_MASK); ceu_capture(ceudev); spin_unlock_irqrestore(&ceudev->lock, irqflags); return 0; error_return_bufs: spin_lock_irqsave(&ceudev->lock, irqflags); list_for_each_entry(buf, &ceudev->capture, queue) vb2_buffer_done(&ceudev->active->vb2_buf, VB2_BUF_STATE_QUEUED); ceudev->active = NULL; spin_unlock_irqrestore(&ceudev->lock, irqflags); return ret; } static void ceu_stop_streaming(struct vb2_queue *vq) { struct ceu_device *ceudev = vb2_get_drv_priv(vq); struct v4l2_subdev *v4l2_sd = ceudev->sd->v4l2_sd; struct ceu_buffer *buf; unsigned long irqflags; /* Clean and disable interrupt sources. */ ceu_write(ceudev, CEU_CETCR, ceu_read(ceudev, CEU_CETCR) & ceudev->irq_mask); ceu_write(ceudev, CEU_CEIER, CEU_CEIER_MASK); v4l2_subdev_call(v4l2_sd, video, s_stream, 0); spin_lock_irqsave(&ceudev->lock, irqflags); if (ceudev->active) { vb2_buffer_done(&ceudev->active->vb2_buf, VB2_BUF_STATE_ERROR); ceudev->active = NULL; } /* Release all queued buffers. */ list_for_each_entry(buf, &ceudev->capture, queue) vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR); INIT_LIST_HEAD(&ceudev->capture); spin_unlock_irqrestore(&ceudev->lock, irqflags); ceu_soft_reset(ceudev); } static const struct vb2_ops ceu_vb2_ops = { .queue_setup = ceu_vb2_setup, .buf_queue = ceu_vb2_queue, .buf_prepare = ceu_vb2_prepare, .wait_prepare = vb2_ops_wait_prepare, .wait_finish = vb2_ops_wait_finish, .start_streaming = ceu_start_streaming, .stop_streaming = ceu_stop_streaming, }; /* --- CEU image formats handling --- */ /* * __ceu_try_fmt() - test format on CEU and sensor * @ceudev: The CEU device. * @v4l2_fmt: format to test. * @sd_mbus_code: the media bus code accepted by the subdevice; output param. * * Returns 0 for success, < 0 for errors. */ static int __ceu_try_fmt(struct ceu_device *ceudev, struct v4l2_format *v4l2_fmt, u32 *sd_mbus_code) { struct ceu_subdev *ceu_sd = ceudev->sd; struct v4l2_pix_format_mplane *pix = &v4l2_fmt->fmt.pix_mp; struct v4l2_subdev *v4l2_sd = ceu_sd->v4l2_sd; struct v4l2_subdev_pad_config pad_cfg; struct v4l2_subdev_state pad_state = { .pads = &pad_cfg, }; const struct ceu_fmt *ceu_fmt; u32 mbus_code_old; u32 mbus_code; int ret; /* * Set format on sensor sub device: bus format used to produce memory * format is selected depending on YUV component ordering or * at initialization time. */ struct v4l2_subdev_format sd_format = { .which = V4L2_SUBDEV_FORMAT_TRY, }; mbus_code_old = ceu_sd->mbus_fmt.mbus_code; switch (pix->pixelformat) { case V4L2_PIX_FMT_YUYV: mbus_code = MEDIA_BUS_FMT_YUYV8_2X8; break; case V4L2_PIX_FMT_UYVY: mbus_code = MEDIA_BUS_FMT_UYVY8_2X8; break; case V4L2_PIX_FMT_YVYU: mbus_code = MEDIA_BUS_FMT_YVYU8_2X8; break; case V4L2_PIX_FMT_VYUY: mbus_code = MEDIA_BUS_FMT_VYUY8_2X8; break; case V4L2_PIX_FMT_NV16: case V4L2_PIX_FMT_NV61: case V4L2_PIX_FMT_NV12: case V4L2_PIX_FMT_NV21: mbus_code = ceu_sd->mbus_fmt.mbus_code; break; default: pix->pixelformat = V4L2_PIX_FMT_NV16; mbus_code = ceu_sd->mbus_fmt.mbus_code; break; } ceu_fmt = get_ceu_fmt_from_fourcc(pix->pixelformat); /* CFSZR requires height and width to be 4-pixel aligned. */ v4l_bound_align_image(&pix->width, 2, CEU_MAX_WIDTH, 4, &pix->height, 4, CEU_MAX_HEIGHT, 4, 0); v4l2_fill_mbus_format_mplane(&sd_format.format, pix); /* * Try with the mbus_code matching YUYV components ordering first, * if that one fails, fallback to default selected at initialization * time. */ sd_format.format.code = mbus_code; ret = v4l2_subdev_call(v4l2_sd, pad, set_fmt, &pad_state, &sd_format); if (ret) { if (ret == -EINVAL) { /* fallback */ sd_format.format.code = mbus_code_old; ret = v4l2_subdev_call(v4l2_sd, pad, set_fmt, &pad_state, &sd_format); } if (ret) return ret; } /* Apply size returned by sensor as the CEU can't scale. */ v4l2_fill_pix_format_mplane(pix, &sd_format.format); /* Calculate per-plane sizes based on image format. */ ceu_calc_plane_sizes(ceudev, ceu_fmt, pix); /* Report to caller the configured mbus format. */ *sd_mbus_code = sd_format.format.code; return 0; } /* * ceu_try_fmt() - Wrapper for __ceu_try_fmt; discard configured mbus_fmt */ static int ceu_try_fmt(struct ceu_device *ceudev, struct v4l2_format *v4l2_fmt) { u32 mbus_code; return __ceu_try_fmt(ceudev, v4l2_fmt, &mbus_code); } /* * ceu_set_fmt() - Apply the supplied format to both sensor and CEU */ static int ceu_set_fmt(struct ceu_device *ceudev, struct v4l2_format *v4l2_fmt) { struct ceu_subdev *ceu_sd = ceudev->sd; struct v4l2_subdev *v4l2_sd = ceu_sd->v4l2_sd; u32 mbus_code; int ret; /* * Set format on sensor sub device: bus format used to produce memory * format is selected at initialization time. */ struct v4l2_subdev_format format = { .which = V4L2_SUBDEV_FORMAT_ACTIVE, }; ret = __ceu_try_fmt(ceudev, v4l2_fmt, &mbus_code); if (ret) return ret; format.format.code = mbus_code; v4l2_fill_mbus_format_mplane(&format.format, &v4l2_fmt->fmt.pix_mp); ret = v4l2_subdev_call(v4l2_sd, pad, set_fmt, NULL, &format); if (ret) return ret; ceudev->v4l2_pix = v4l2_fmt->fmt.pix_mp; ceudev->field = V4L2_FIELD_NONE; return 0; } /* * ceu_set_default_fmt() - Apply default NV16 memory output format with VGA * sizes. */ static int ceu_set_default_fmt(struct ceu_device *ceudev) { int ret; struct v4l2_format v4l2_fmt = { .type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE, .fmt.pix_mp = { .width = VGA_WIDTH, .height = VGA_HEIGHT, .field = V4L2_FIELD_NONE, .pixelformat = V4L2_PIX_FMT_NV16, .num_planes = 2, .plane_fmt = { [0] = { .sizeimage = VGA_WIDTH * VGA_HEIGHT * 2, .bytesperline = VGA_WIDTH * 2, }, [1] = { .sizeimage = VGA_WIDTH * VGA_HEIGHT * 2, .bytesperline = VGA_WIDTH * 2, }, }, }, }; ret = ceu_try_fmt(ceudev, &v4l2_fmt); if (ret) return ret; ceudev->v4l2_pix = v4l2_fmt.fmt.pix_mp; ceudev->field = V4L2_FIELD_NONE; return 0; } /* * ceu_init_mbus_fmt() - Query sensor for supported formats and initialize * CEU media bus format used to produce memory formats. * * Find out if sensor can produce a permutation of 8-bits YUYV bus format. * From a single 8-bits YUYV bus format the CEU can produce several memory * output formats: * - NV[12|21|16|61] through image fetch mode; * - YUYV422 if sensor provides YUYV422 * * TODO: Other YUYV422 permutations through data fetch sync mode and DTARY * TODO: Binary data (eg. JPEG) and raw formats through data fetch sync mode */ static int ceu_init_mbus_fmt(struct ceu_device *ceudev) { struct ceu_subdev *ceu_sd = ceudev->sd; struct ceu_mbus_fmt *mbus_fmt = &ceu_sd->mbus_fmt; struct v4l2_subdev *v4l2_sd = ceu_sd->v4l2_sd; bool yuyv_bus_fmt = false; struct v4l2_subdev_mbus_code_enum sd_mbus_fmt = { .which = V4L2_SUBDEV_FORMAT_ACTIVE, .index = 0, }; /* Find out if sensor can produce any permutation of 8-bits YUYV422. */ while (!yuyv_bus_fmt && !v4l2_subdev_call(v4l2_sd, pad, enum_mbus_code, NULL, &sd_mbus_fmt)) { switch (sd_mbus_fmt.code) { case MEDIA_BUS_FMT_YUYV8_2X8: case MEDIA_BUS_FMT_YVYU8_2X8: case MEDIA_BUS_FMT_UYVY8_2X8: case MEDIA_BUS_FMT_VYUY8_2X8: yuyv_bus_fmt = true; break; default: /* * Only support 8-bits YUYV bus formats at the moment; * * TODO: add support for binary formats (data sync * fetch mode). */ break; } sd_mbus_fmt.index++; } if (!yuyv_bus_fmt) return -ENXIO; /* * Save the first encountered YUYV format as "mbus_fmt" and use it * to output all planar YUV422 and YUV420 (NV*) formats to memory as * well as for data synch fetch mode (YUYV - YVYU etc. ). */ mbus_fmt->mbus_code = sd_mbus_fmt.code; mbus_fmt->bps = 8; /* Annotate the selected bus format components ordering. */ switch (sd_mbus_fmt.code) { case MEDIA_BUS_FMT_YUYV8_2X8: mbus_fmt->fmt_order = CEU_CAMCR_DTARY_8_YUYV; mbus_fmt->fmt_order_swap = CEU_CAMCR_DTARY_8_YVYU; mbus_fmt->swapped = false; mbus_fmt->bpp = 16; break; case MEDIA_BUS_FMT_YVYU8_2X8: mbus_fmt->fmt_order = CEU_CAMCR_DTARY_8_YVYU; mbus_fmt->fmt_order_swap = CEU_CAMCR_DTARY_8_YUYV; mbus_fmt->swapped = true; mbus_fmt->bpp = 16; break; case MEDIA_BUS_FMT_UYVY8_2X8: mbus_fmt->fmt_order = CEU_CAMCR_DTARY_8_UYVY; mbus_fmt->fmt_order_swap = CEU_CAMCR_DTARY_8_VYUY; mbus_fmt->swapped = false; mbus_fmt->bpp = 16; break; case MEDIA_BUS_FMT_VYUY8_2X8: mbus_fmt->fmt_order = CEU_CAMCR_DTARY_8_VYUY; mbus_fmt->fmt_order_swap = CEU_CAMCR_DTARY_8_UYVY; mbus_fmt->swapped = true; mbus_fmt->bpp = 16; break; } return 0; } /* --- Runtime PM Handlers --- */ /* * ceu_runtime_resume() - soft-reset the interface and turn sensor power on. */ static int __maybe_unused ceu_runtime_resume(struct device *dev) { struct ceu_device *ceudev = dev_get_drvdata(dev); struct v4l2_subdev *v4l2_sd = ceudev->sd->v4l2_sd; v4l2_subdev_call(v4l2_sd, core, s_power, 1); ceu_soft_reset(ceudev); return 0; } /* * ceu_runtime_suspend() - disable capture and interrupts and soft-reset. * Turn sensor power off. */ static int __maybe_unused ceu_runtime_suspend(struct device *dev) { struct ceu_device *ceudev = dev_get_drvdata(dev); struct v4l2_subdev *v4l2_sd = ceudev->sd->v4l2_sd; v4l2_subdev_call(v4l2_sd, core, s_power, 0); ceu_write(ceudev, CEU_CEIER, 0); ceu_soft_reset(ceudev); return 0; } /* --- File Operations --- */ static int ceu_open(struct file *file) { struct ceu_device *ceudev = video_drvdata(file); int ret; ret = v4l2_fh_open(file); if (ret) return ret; mutex_lock(&ceudev->mlock); /* Causes soft-reset and sensor power on on first open */ ret = pm_runtime_resume_and_get(ceudev->dev); mutex_unlock(&ceudev->mlock); return ret; } static int ceu_release(struct file *file) { struct ceu_device *ceudev = video_drvdata(file); vb2_fop_release(file); mutex_lock(&ceudev->mlock); /* Causes soft-reset and sensor power down on last close */ pm_runtime_put(ceudev->dev); mutex_unlock(&ceudev->mlock); return 0; } static const struct v4l2_file_operations ceu_fops = { .owner = THIS_MODULE, .open = ceu_open, .release = ceu_release, .unlocked_ioctl = video_ioctl2, .mmap = vb2_fop_mmap, .poll = vb2_fop_poll, }; /* --- Video Device IOCTLs --- */ static int ceu_querycap(struct file *file, void *priv, struct v4l2_capability *cap) { struct ceu_device *ceudev = video_drvdata(file); strscpy(cap->card, "Renesas CEU", sizeof(cap->card)); strscpy(cap->driver, DRIVER_NAME, sizeof(cap->driver)); snprintf(cap->bus_info, sizeof(cap->bus_info), "platform:renesas-ceu-%s", dev_name(ceudev->dev)); return 0; } static int ceu_enum_fmt_vid_cap(struct file *file, void *priv, struct v4l2_fmtdesc *f) { const struct ceu_fmt *fmt; if (f->index >= ARRAY_SIZE(ceu_fmt_list)) return -EINVAL; fmt = &ceu_fmt_list[f->index]; f->pixelformat = fmt->fourcc; return 0; } static int ceu_try_fmt_vid_cap(struct file *file, void *priv, struct v4l2_format *f) { struct ceu_device *ceudev = video_drvdata(file); return ceu_try_fmt(ceudev, f); } static int ceu_s_fmt_vid_cap(struct file *file, void *priv, struct v4l2_format *f) { struct ceu_device *ceudev = video_drvdata(file); if (vb2_is_streaming(&ceudev->vb2_vq)) return -EBUSY; return ceu_set_fmt(ceudev, f); } static int ceu_g_fmt_vid_cap(struct file *file, void *priv, struct v4l2_format *f) { struct ceu_device *ceudev = video_drvdata(file); f->fmt.pix_mp = ceudev->v4l2_pix; return 0; } static int ceu_enum_input(struct file *file, void *priv, struct v4l2_input *inp) { struct ceu_device *ceudev = video_drvdata(file); struct ceu_subdev *ceusd; if (inp->index >= ceudev->num_sd) return -EINVAL; ceusd = ceudev->subdevs[inp->index]; inp->type = V4L2_INPUT_TYPE_CAMERA; inp->std = 0; snprintf(inp->name, sizeof(inp->name), "Camera%u: %s", inp->index, ceusd->v4l2_sd->name); return 0; } static int ceu_g_input(struct file *file, void *priv, unsigned int *i) { struct ceu_device *ceudev = video_drvdata(file); *i = ceudev->sd_index; return 0; } static int ceu_s_input(struct file *file, void *priv, unsigned int i) { struct ceu_device *ceudev = video_drvdata(file); struct ceu_subdev *ceu_sd_old; int ret; if (i >= ceudev->num_sd) return -EINVAL; if (vb2_is_streaming(&ceudev->vb2_vq)) return -EBUSY; if (i == ceudev->sd_index) return 0; ceu_sd_old = ceudev->sd; ceudev->sd = ceudev->subdevs[i]; /* * Make sure we can generate output image formats and apply * default one. */ ret = ceu_init_mbus_fmt(ceudev); if (ret) { ceudev->sd = ceu_sd_old; return -EINVAL; } ret = ceu_set_default_fmt(ceudev); if (ret) { ceudev->sd = ceu_sd_old; return -EINVAL; } /* Now that we're sure we can use the sensor, power off the old one. */ v4l2_subdev_call(ceu_sd_old->v4l2_sd, core, s_power, 0); v4l2_subdev_call(ceudev->sd->v4l2_sd, core, s_power, 1); ceudev->sd_index = i; return 0; } static int ceu_g_parm(struct file *file, void *fh, struct v4l2_streamparm *a) { struct ceu_device *ceudev = video_drvdata(file); return v4l2_g_parm_cap(video_devdata(file), ceudev->sd->v4l2_sd, a); } static int ceu_s_parm(struct file *file, void *fh, struct v4l2_streamparm *a) { struct ceu_device *ceudev = video_drvdata(file); return v4l2_s_parm_cap(video_devdata(file), ceudev->sd->v4l2_sd, a); } static int ceu_enum_framesizes(struct file *file, void *fh, struct v4l2_frmsizeenum *fsize) { struct ceu_device *ceudev = video_drvdata(file); struct ceu_subdev *ceu_sd = ceudev->sd; const struct ceu_fmt *ceu_fmt; struct v4l2_subdev *v4l2_sd = ceu_sd->v4l2_sd; int ret; struct v4l2_subdev_frame_size_enum fse = { .code = ceu_sd->mbus_fmt.mbus_code, .index = fsize->index, .which = V4L2_SUBDEV_FORMAT_ACTIVE, }; /* Just check if user supplied pixel format is supported. */ ceu_fmt = get_ceu_fmt_from_fourcc(fsize->pixel_format); if (!ceu_fmt) return -EINVAL; ret = v4l2_subdev_call(v4l2_sd, pad, enum_frame_size, NULL, &fse); if (ret) return ret; fsize->type = V4L2_FRMSIZE_TYPE_DISCRETE; fsize->discrete.width = CEU_W_MAX(fse.max_width); fsize->discrete.height = CEU_H_MAX(fse.max_height); return 0; } static int ceu_enum_frameintervals(struct file *file, void *fh, struct v4l2_frmivalenum *fival) { struct ceu_device *ceudev = video_drvdata(file); struct ceu_subdev *ceu_sd = ceudev->sd; const struct ceu_fmt *ceu_fmt; struct v4l2_subdev *v4l2_sd = ceu_sd->v4l2_sd; int ret; struct v4l2_subdev_frame_interval_enum fie = { .code = ceu_sd->mbus_fmt.mbus_code, .index = fival->index, .width = fival->width, .height = fival->height, .which = V4L2_SUBDEV_FORMAT_ACTIVE, }; /* Just check if user supplied pixel format is supported. */ ceu_fmt = get_ceu_fmt_from_fourcc(fival->pixel_format); if (!ceu_fmt) return -EINVAL; ret = v4l2_subdev_call(v4l2_sd, pad, enum_frame_interval, NULL, &fie); if (ret) return ret; fival->type = V4L2_FRMIVAL_TYPE_DISCRETE; fival->discrete = fie.interval; return 0; } static const struct v4l2_ioctl_ops ceu_ioctl_ops = { .vidioc_querycap = ceu_querycap, .vidioc_enum_fmt_vid_cap = ceu_enum_fmt_vid_cap, .vidioc_try_fmt_vid_cap_mplane = ceu_try_fmt_vid_cap, .vidioc_s_fmt_vid_cap_mplane = ceu_s_fmt_vid_cap, .vidioc_g_fmt_vid_cap_mplane = ceu_g_fmt_vid_cap, .vidioc_enum_input = ceu_enum_input, .vidioc_g_input = ceu_g_input, .vidioc_s_input = ceu_s_input, .vidioc_reqbufs = vb2_ioctl_reqbufs, .vidioc_querybuf = vb2_ioctl_querybuf, .vidioc_qbuf = vb2_ioctl_qbuf, .vidioc_expbuf = vb2_ioctl_expbuf, .vidioc_dqbuf = vb2_ioctl_dqbuf, .vidioc_create_bufs = vb2_ioctl_create_bufs, .vidioc_prepare_buf = vb2_ioctl_prepare_buf, .vidioc_streamon = vb2_ioctl_streamon, .vidioc_streamoff = vb2_ioctl_streamoff, .vidioc_g_parm = ceu_g_parm, .vidioc_s_parm = ceu_s_parm, .vidioc_enum_framesizes = ceu_enum_framesizes, .vidioc_enum_frameintervals = ceu_enum_frameintervals, .vidioc_log_status = v4l2_ctrl_log_status, .vidioc_subscribe_event = v4l2_ctrl_subscribe_event, .vidioc_unsubscribe_event = v4l2_event_unsubscribe, }; /* * ceu_vdev_release() - release CEU video device memory when last reference * to this driver is closed */ static void ceu_vdev_release(struct video_device *vdev) { struct ceu_device *ceudev = video_get_drvdata(vdev); kfree(ceudev); } static int ceu_notify_bound(struct v4l2_async_notifier *notifier, struct v4l2_subdev *v4l2_sd, struct v4l2_async_connection *asd) { struct v4l2_device *v4l2_dev = notifier->v4l2_dev; struct ceu_device *ceudev = v4l2_to_ceu(v4l2_dev); struct ceu_subdev *ceu_sd = to_ceu_subdev(asd); ceu_sd->v4l2_sd = v4l2_sd; ceudev->num_sd++; return 0; } static int ceu_notify_complete(struct v4l2_async_notifier *notifier) { struct v4l2_device *v4l2_dev = notifier->v4l2_dev; struct ceu_device *ceudev = v4l2_to_ceu(v4l2_dev); struct video_device *vdev = &ceudev->vdev; struct vb2_queue *q = &ceudev->vb2_vq; struct v4l2_subdev *v4l2_sd; int ret; /* Initialize vb2 queue. */ q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE; q->io_modes = VB2_MMAP | VB2_DMABUF; q->drv_priv = ceudev; q->ops = &ceu_vb2_ops; q->mem_ops = &vb2_dma_contig_memops; q->buf_struct_size = sizeof(struct ceu_buffer); q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC; q->min_buffers_needed = 2; q->lock = &ceudev->mlock; q->dev = ceudev->v4l2_dev.dev; ret = vb2_queue_init(q); if (ret) return ret; /* * Make sure at least one sensor is primary and use it to initialize * ceu formats. */ if (!ceudev->sd) { ceudev->sd = ceudev->subdevs[0]; ceudev->sd_index = 0; } v4l2_sd = ceudev->sd->v4l2_sd; ret = ceu_init_mbus_fmt(ceudev); if (ret) return ret; ret = ceu_set_default_fmt(ceudev); if (ret) return ret; /* Register the video device. */ strscpy(vdev->name, DRIVER_NAME, sizeof(vdev->name)); vdev->v4l2_dev = v4l2_dev; vdev->lock = &ceudev->mlock; vdev->queue = &ceudev->vb2_vq; vdev->ctrl_handler = v4l2_sd->ctrl_handler; vdev->fops = &ceu_fops; vdev->ioctl_ops = &ceu_ioctl_ops; vdev->release = ceu_vdev_release; vdev->device_caps = V4L2_CAP_VIDEO_CAPTURE_MPLANE | V4L2_CAP_STREAMING; video_set_drvdata(vdev, ceudev); ret = video_register_device(vdev, VFL_TYPE_VIDEO, -1); if (ret < 0) { v4l2_err(vdev->v4l2_dev, "video_register_device failed: %d\n", ret); return ret; } return 0; } static const struct v4l2_async_notifier_operations ceu_notify_ops = { .bound = ceu_notify_bound, .complete = ceu_notify_complete, }; /* * ceu_init_async_subdevs() - Initialize CEU subdevices and async_subdevs in * ceu device. Both DT and platform data parsing use * this routine. * * Returns 0 for success, -ENOMEM for failure. */ static int ceu_init_async_subdevs(struct ceu_device *ceudev, unsigned int n_sd) { /* Reserve memory for 'n_sd' ceu_subdev descriptors. */ ceudev->subdevs = devm_kcalloc(ceudev->dev, n_sd, sizeof(*ceudev->subdevs), GFP_KERNEL); if (!ceudev->subdevs) return -ENOMEM; ceudev->sd = NULL; ceudev->sd_index = 0; ceudev->num_sd = 0; return 0; } /* * ceu_parse_platform_data() - Initialize async_subdevices using platform * device provided data. */ static int ceu_parse_platform_data(struct ceu_device *ceudev, const struct ceu_platform_data *pdata) { const struct ceu_async_subdev *async_sd; struct ceu_subdev *ceu_sd; unsigned int i; int ret; if (pdata->num_subdevs == 0) return -ENODEV; ret = ceu_init_async_subdevs(ceudev, pdata->num_subdevs); if (ret) return ret; for (i = 0; i < pdata->num_subdevs; i++) { /* Setup the ceu subdevice and the async subdevice. */ async_sd = &pdata->subdevs[i]; ceu_sd = v4l2_async_nf_add_i2c(&ceudev->notifier, async_sd->i2c_adapter_id, async_sd->i2c_address, struct ceu_subdev); if (IS_ERR(ceu_sd)) { v4l2_async_nf_cleanup(&ceudev->notifier); return PTR_ERR(ceu_sd); } ceu_sd->mbus_flags = async_sd->flags; ceudev->subdevs[i] = ceu_sd; } return pdata->num_subdevs; } /* * ceu_parse_dt() - Initialize async_subdevs parsing device tree graph. */ static int ceu_parse_dt(struct ceu_device *ceudev) { struct device_node *of = ceudev->dev->of_node; struct device_node *ep; struct ceu_subdev *ceu_sd; unsigned int i; int num_ep; int ret; num_ep = of_graph_get_endpoint_count(of); if (!num_ep) return -ENODEV; ret = ceu_init_async_subdevs(ceudev, num_ep); if (ret) return ret; for (i = 0; i < num_ep; i++) { struct v4l2_fwnode_endpoint fw_ep = { .bus_type = V4L2_MBUS_PARALLEL, .bus = { .parallel = { .flags = V4L2_MBUS_HSYNC_ACTIVE_HIGH | V4L2_MBUS_VSYNC_ACTIVE_HIGH, .bus_width = 8, }, }, }; ep = of_graph_get_endpoint_by_regs(of, 0, i); if (!ep) { dev_err(ceudev->dev, "No subdevice connected on endpoint %u.\n", i); ret = -ENODEV; goto error_cleanup; } ret = v4l2_fwnode_endpoint_parse(of_fwnode_handle(ep), &fw_ep); if (ret) { dev_err(ceudev->dev, "Unable to parse endpoint #%u: %d.\n", i, ret); goto error_cleanup; } /* Setup the ceu subdevice and the async subdevice. */ ceu_sd = v4l2_async_nf_add_fwnode_remote(&ceudev->notifier, of_fwnode_handle(ep), struct ceu_subdev); if (IS_ERR(ceu_sd)) { ret = PTR_ERR(ceu_sd); goto error_cleanup; } ceu_sd->mbus_flags = fw_ep.bus.parallel.flags; ceudev->subdevs[i] = ceu_sd; of_node_put(ep); } return num_ep; error_cleanup: v4l2_async_nf_cleanup(&ceudev->notifier); of_node_put(ep); return ret; } /* * struct ceu_data - Platform specific CEU data * @irq_mask: CETCR mask with all interrupt sources enabled. The mask differs * between SH4 and RZ platforms. */ struct ceu_data { u32 irq_mask; }; static const struct ceu_data ceu_data_sh4 = { .irq_mask = CEU_CETCR_ALL_IRQS_SH4, }; #if IS_ENABLED(CONFIG_OF) static const struct ceu_data ceu_data_rz = { .irq_mask = CEU_CETCR_ALL_IRQS_RZ, }; static const struct of_device_id ceu_of_match[] = { { .compatible = "renesas,r7s72100-ceu", .data = &ceu_data_rz }, { .compatible = "renesas,r8a7740-ceu", .data = &ceu_data_rz }, { } }; MODULE_DEVICE_TABLE(of, ceu_of_match); #endif static int ceu_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; const struct ceu_data *ceu_data; struct ceu_device *ceudev; unsigned int irq; int num_subdevs; int ret; ceudev = kzalloc(sizeof(*ceudev), GFP_KERNEL); if (!ceudev) return -ENOMEM; platform_set_drvdata(pdev, ceudev); ceudev->dev = dev; INIT_LIST_HEAD(&ceudev->capture); spin_lock_init(&ceudev->lock); mutex_init(&ceudev->mlock); ceudev->base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(ceudev->base)) { ret = PTR_ERR(ceudev->base); goto error_free_ceudev; } ret = platform_get_irq(pdev, 0); if (ret < 0) goto error_free_ceudev; irq = ret; ret = devm_request_irq(dev, irq, ceu_irq, 0, dev_name(dev), ceudev); if (ret) { dev_err(&pdev->dev, "Unable to request CEU interrupt.\n"); goto error_free_ceudev; } pm_runtime_enable(dev); ret = v4l2_device_register(dev, &ceudev->v4l2_dev); if (ret) goto error_pm_disable; v4l2_async_nf_init(&ceudev->notifier, &ceudev->v4l2_dev); if (IS_ENABLED(CONFIG_OF) && dev->of_node) { ceu_data = of_device_get_match_data(dev); num_subdevs = ceu_parse_dt(ceudev); } else if (dev->platform_data) { /* Assume SH4 if booting with platform data. */ ceu_data = &ceu_data_sh4; num_subdevs = ceu_parse_platform_data(ceudev, dev->platform_data); } else { num_subdevs = -EINVAL; } if (num_subdevs < 0) { ret = num_subdevs; goto error_v4l2_unregister; } ceudev->irq_mask = ceu_data->irq_mask; ceudev->notifier.v4l2_dev = &ceudev->v4l2_dev; ceudev->notifier.ops = &ceu_notify_ops; ret = v4l2_async_nf_register(&ceudev->notifier); if (ret) goto error_cleanup; dev_info(dev, "Renesas Capture Engine Unit %s\n", dev_name(dev)); return 0; error_cleanup: v4l2_async_nf_cleanup(&ceudev->notifier); error_v4l2_unregister: v4l2_device_unregister(&ceudev->v4l2_dev); error_pm_disable: pm_runtime_disable(dev); error_free_ceudev: kfree(ceudev); return ret; } static void ceu_remove(struct platform_device *pdev) { struct ceu_device *ceudev = platform_get_drvdata(pdev); pm_runtime_disable(ceudev->dev); v4l2_async_nf_unregister(&ceudev->notifier); v4l2_async_nf_cleanup(&ceudev->notifier); v4l2_device_unregister(&ceudev->v4l2_dev); video_unregister_device(&ceudev->vdev); } static const struct dev_pm_ops ceu_pm_ops = { SET_RUNTIME_PM_OPS(ceu_runtime_suspend, ceu_runtime_resume, NULL) }; static struct platform_driver ceu_driver = { .driver = { .name = DRIVER_NAME, .pm = &ceu_pm_ops, .of_match_table = of_match_ptr(ceu_of_match), }, .probe = ceu_probe, .remove_new = ceu_remove, }; module_platform_driver(ceu_driver); MODULE_DESCRIPTION("Renesas CEU camera driver"); MODULE_AUTHOR("Jacopo Mondi <[email protected]>"); MODULE_LICENSE("GPL v2");
linux-master
drivers/media/platform/renesas/renesas-ceu.c
// SPDX-License-Identifier: GPL-2.0+ /* * Renesas R-Car Fine Display Processor * * Video format converter and frame deinterlacer device. * * Author: Kieran Bingham, <[email protected]> * Copyright (c) 2016 Renesas Electronics Corporation. * * This code is developed and inspired from the vim2m, rcar_jpu, * m2m-deinterlace, and vsp1 drivers. */ #include <linux/clk.h> #include <linux/delay.h> #include <linux/dma-mapping.h> #include <linux/fs.h> #include <linux/interrupt.h> #include <linux/module.h> #include <linux/of.h> #include <linux/platform_device.h> #include <linux/pm_runtime.h> #include <linux/sched.h> #include <linux/slab.h> #include <linux/timer.h> #include <media/rcar-fcp.h> #include <media/v4l2-ctrls.h> #include <media/v4l2-device.h> #include <media/v4l2-event.h> #include <media/v4l2-ioctl.h> #include <media/v4l2-mem2mem.h> #include <media/videobuf2-dma-contig.h> static unsigned int debug; module_param(debug, uint, 0644); MODULE_PARM_DESC(debug, "activate debug info"); /* Minimum and maximum frame width/height */ #define FDP1_MIN_W 80U #define FDP1_MIN_H 80U #define FDP1_MAX_W 3840U #define FDP1_MAX_H 2160U #define FDP1_MAX_PLANES 3U #define FDP1_MAX_STRIDE 8190U /* Flags that indicate a format can be used for capture/output */ #define FDP1_CAPTURE BIT(0) #define FDP1_OUTPUT BIT(1) #define DRIVER_NAME "rcar_fdp1" /* Number of Job's to have available on the processing queue */ #define FDP1_NUMBER_JOBS 8 #define dprintk(fdp1, fmt, arg...) \ v4l2_dbg(1, debug, &fdp1->v4l2_dev, "%s: " fmt, __func__, ## arg) /* * FDP1 registers and bits */ /* FDP1 start register - Imm */ #define FD1_CTL_CMD 0x0000 #define FD1_CTL_CMD_STRCMD BIT(0) /* Sync generator register - Imm */ #define FD1_CTL_SGCMD 0x0004 #define FD1_CTL_SGCMD_SGEN BIT(0) /* Register set end register - Imm */ #define FD1_CTL_REGEND 0x0008 #define FD1_CTL_REGEND_REGEND BIT(0) /* Channel activation register - Vupdt */ #define FD1_CTL_CHACT 0x000c #define FD1_CTL_CHACT_SMW BIT(9) #define FD1_CTL_CHACT_WR BIT(8) #define FD1_CTL_CHACT_SMR BIT(3) #define FD1_CTL_CHACT_RD2 BIT(2) #define FD1_CTL_CHACT_RD1 BIT(1) #define FD1_CTL_CHACT_RD0 BIT(0) /* Operation Mode Register - Vupdt */ #define FD1_CTL_OPMODE 0x0010 #define FD1_CTL_OPMODE_PRG BIT(4) #define FD1_CTL_OPMODE_VIMD_INTERRUPT (0 << 0) #define FD1_CTL_OPMODE_VIMD_BESTEFFORT (1 << 0) #define FD1_CTL_OPMODE_VIMD_NOINTERRUPT (2 << 0) #define FD1_CTL_VPERIOD 0x0014 #define FD1_CTL_CLKCTRL 0x0018 #define FD1_CTL_CLKCTRL_CSTP_N BIT(0) /* Software reset register */ #define FD1_CTL_SRESET 0x001c #define FD1_CTL_SRESET_SRST BIT(0) /* Control status register (V-update-status) */ #define FD1_CTL_STATUS 0x0024 #define FD1_CTL_STATUS_VINT_CNT_MASK GENMASK(31, 16) #define FD1_CTL_STATUS_VINT_CNT_SHIFT 16 #define FD1_CTL_STATUS_SGREGSET BIT(10) #define FD1_CTL_STATUS_SGVERR BIT(9) #define FD1_CTL_STATUS_SGFREND BIT(8) #define FD1_CTL_STATUS_BSY BIT(0) #define FD1_CTL_VCYCLE_STAT 0x0028 /* Interrupt enable register */ #define FD1_CTL_IRQENB 0x0038 /* Interrupt status register */ #define FD1_CTL_IRQSTA 0x003c /* Interrupt control register */ #define FD1_CTL_IRQFSET 0x0040 /* Common IRQ Bit settings */ #define FD1_CTL_IRQ_VERE BIT(16) #define FD1_CTL_IRQ_VINTE BIT(4) #define FD1_CTL_IRQ_FREE BIT(0) #define FD1_CTL_IRQ_MASK (FD1_CTL_IRQ_VERE | \ FD1_CTL_IRQ_VINTE | \ FD1_CTL_IRQ_FREE) /* RPF */ #define FD1_RPF_SIZE 0x0060 #define FD1_RPF_SIZE_MASK GENMASK(12, 0) #define FD1_RPF_SIZE_H_SHIFT 16 #define FD1_RPF_SIZE_V_SHIFT 0 #define FD1_RPF_FORMAT 0x0064 #define FD1_RPF_FORMAT_CIPM BIT(16) #define FD1_RPF_FORMAT_RSPYCS BIT(13) #define FD1_RPF_FORMAT_RSPUVS BIT(12) #define FD1_RPF_FORMAT_CF BIT(8) #define FD1_RPF_PSTRIDE 0x0068 #define FD1_RPF_PSTRIDE_Y_SHIFT 16 #define FD1_RPF_PSTRIDE_C_SHIFT 0 /* RPF0 Source Component Y Address register */ #define FD1_RPF0_ADDR_Y 0x006c /* RPF1 Current Picture Registers */ #define FD1_RPF1_ADDR_Y 0x0078 #define FD1_RPF1_ADDR_C0 0x007c #define FD1_RPF1_ADDR_C1 0x0080 /* RPF2 next picture register */ #define FD1_RPF2_ADDR_Y 0x0084 #define FD1_RPF_SMSK_ADDR 0x0090 #define FD1_RPF_SWAP 0x0094 /* WPF */ #define FD1_WPF_FORMAT 0x00c0 #define FD1_WPF_FORMAT_PDV_SHIFT 24 #define FD1_WPF_FORMAT_FCNL BIT(20) #define FD1_WPF_FORMAT_WSPYCS BIT(15) #define FD1_WPF_FORMAT_WSPUVS BIT(14) #define FD1_WPF_FORMAT_WRTM_601_16 (0 << 9) #define FD1_WPF_FORMAT_WRTM_601_0 (1 << 9) #define FD1_WPF_FORMAT_WRTM_709_16 (2 << 9) #define FD1_WPF_FORMAT_CSC BIT(8) #define FD1_WPF_RNDCTL 0x00c4 #define FD1_WPF_RNDCTL_CBRM BIT(28) #define FD1_WPF_RNDCTL_CLMD_NOCLIP (0 << 12) #define FD1_WPF_RNDCTL_CLMD_CLIP_16_235 (1 << 12) #define FD1_WPF_RNDCTL_CLMD_CLIP_1_254 (2 << 12) #define FD1_WPF_PSTRIDE 0x00c8 #define FD1_WPF_PSTRIDE_Y_SHIFT 16 #define FD1_WPF_PSTRIDE_C_SHIFT 0 /* WPF Destination picture */ #define FD1_WPF_ADDR_Y 0x00cc #define FD1_WPF_ADDR_C0 0x00d0 #define FD1_WPF_ADDR_C1 0x00d4 #define FD1_WPF_SWAP 0x00d8 #define FD1_WPF_SWAP_OSWAP_SHIFT 0 #define FD1_WPF_SWAP_SSWAP_SHIFT 4 /* WPF/RPF Common */ #define FD1_RWPF_SWAP_BYTE BIT(0) #define FD1_RWPF_SWAP_WORD BIT(1) #define FD1_RWPF_SWAP_LWRD BIT(2) #define FD1_RWPF_SWAP_LLWD BIT(3) /* IPC */ #define FD1_IPC_MODE 0x0100 #define FD1_IPC_MODE_DLI BIT(8) #define FD1_IPC_MODE_DIM_ADAPT2D3D (0 << 0) #define FD1_IPC_MODE_DIM_FIXED2D (1 << 0) #define FD1_IPC_MODE_DIM_FIXED3D (2 << 0) #define FD1_IPC_MODE_DIM_PREVFIELD (3 << 0) #define FD1_IPC_MODE_DIM_NEXTFIELD (4 << 0) #define FD1_IPC_SMSK_THRESH 0x0104 #define FD1_IPC_SMSK_THRESH_CONST 0x00010002 #define FD1_IPC_COMB_DET 0x0108 #define FD1_IPC_COMB_DET_CONST 0x00200040 #define FD1_IPC_MOTDEC 0x010c #define FD1_IPC_MOTDEC_CONST 0x00008020 /* DLI registers */ #define FD1_IPC_DLI_BLEND 0x0120 #define FD1_IPC_DLI_BLEND_CONST 0x0080ff02 #define FD1_IPC_DLI_HGAIN 0x0124 #define FD1_IPC_DLI_HGAIN_CONST 0x001000ff #define FD1_IPC_DLI_SPRS 0x0128 #define FD1_IPC_DLI_SPRS_CONST 0x009004ff #define FD1_IPC_DLI_ANGLE 0x012c #define FD1_IPC_DLI_ANGLE_CONST 0x0004080c #define FD1_IPC_DLI_ISOPIX0 0x0130 #define FD1_IPC_DLI_ISOPIX0_CONST 0xff10ff10 #define FD1_IPC_DLI_ISOPIX1 0x0134 #define FD1_IPC_DLI_ISOPIX1_CONST 0x0000ff10 /* Sensor registers */ #define FD1_IPC_SENSOR_TH0 0x0140 #define FD1_IPC_SENSOR_TH0_CONST 0x20208080 #define FD1_IPC_SENSOR_TH1 0x0144 #define FD1_IPC_SENSOR_TH1_CONST 0 #define FD1_IPC_SENSOR_CTL0 0x0170 #define FD1_IPC_SENSOR_CTL0_CONST 0x00002201 #define FD1_IPC_SENSOR_CTL1 0x0174 #define FD1_IPC_SENSOR_CTL1_CONST 0 #define FD1_IPC_SENSOR_CTL2 0x0178 #define FD1_IPC_SENSOR_CTL2_X_SHIFT 16 #define FD1_IPC_SENSOR_CTL2_Y_SHIFT 0 #define FD1_IPC_SENSOR_CTL3 0x017c #define FD1_IPC_SENSOR_CTL3_0_SHIFT 16 #define FD1_IPC_SENSOR_CTL3_1_SHIFT 0 /* Line memory pixel number register */ #define FD1_IPC_LMEM 0x01e0 #define FD1_IPC_LMEM_LINEAR 1024 #define FD1_IPC_LMEM_TILE 960 /* Internal Data (HW Version) */ #define FD1_IP_INTDATA 0x0800 /* R-Car Gen2 HW manual says zero, but actual value matches R-Car H3 ES1.x */ #define FD1_IP_GEN2 0x02010101 #define FD1_IP_M3W 0x02010202 #define FD1_IP_H3 0x02010203 #define FD1_IP_M3N 0x02010204 #define FD1_IP_E3 0x02010205 /* LUTs */ #define FD1_LUT_DIF_ADJ 0x1000 #define FD1_LUT_SAD_ADJ 0x1400 #define FD1_LUT_BLD_GAIN 0x1800 #define FD1_LUT_DIF_GAIN 0x1c00 #define FD1_LUT_MDET 0x2000 /** * struct fdp1_fmt - The FDP1 internal format data * @fourcc: the fourcc code, to match the V4L2 API * @bpp: bits per pixel per plane * @num_planes: number of planes * @hsub: horizontal subsampling factor * @vsub: vertical subsampling factor * @fmt: 7-bit format code for the fdp1 hardware * @swap_yc: the Y and C components are swapped (Y comes before C) * @swap_uv: the U and V components are swapped (V comes before U) * @swap: swap register control * @types: types of queue this format is applicable to */ struct fdp1_fmt { u32 fourcc; u8 bpp[3]; u8 num_planes; u8 hsub; u8 vsub; u8 fmt; bool swap_yc; bool swap_uv; u8 swap; u8 types; }; static const struct fdp1_fmt fdp1_formats[] = { /* RGB formats are only supported by the Write Pixel Formatter */ { V4L2_PIX_FMT_RGB332, { 8, 0, 0 }, 1, 1, 1, 0x00, false, false, FD1_RWPF_SWAP_LLWD | FD1_RWPF_SWAP_LWRD | FD1_RWPF_SWAP_WORD | FD1_RWPF_SWAP_BYTE, FDP1_CAPTURE }, { V4L2_PIX_FMT_XRGB444, { 16, 0, 0 }, 1, 1, 1, 0x01, false, false, FD1_RWPF_SWAP_LLWD | FD1_RWPF_SWAP_LWRD | FD1_RWPF_SWAP_WORD, FDP1_CAPTURE }, { V4L2_PIX_FMT_XRGB555, { 16, 0, 0 }, 1, 1, 1, 0x04, false, false, FD1_RWPF_SWAP_LLWD | FD1_RWPF_SWAP_LWRD | FD1_RWPF_SWAP_WORD, FDP1_CAPTURE }, { V4L2_PIX_FMT_RGB565, { 16, 0, 0 }, 1, 1, 1, 0x06, false, false, FD1_RWPF_SWAP_LLWD | FD1_RWPF_SWAP_LWRD | FD1_RWPF_SWAP_WORD, FDP1_CAPTURE }, { V4L2_PIX_FMT_ABGR32, { 32, 0, 0 }, 1, 1, 1, 0x13, false, false, FD1_RWPF_SWAP_LLWD | FD1_RWPF_SWAP_LWRD, FDP1_CAPTURE }, { V4L2_PIX_FMT_XBGR32, { 32, 0, 0 }, 1, 1, 1, 0x13, false, false, FD1_RWPF_SWAP_LLWD | FD1_RWPF_SWAP_LWRD, FDP1_CAPTURE }, { V4L2_PIX_FMT_ARGB32, { 32, 0, 0 }, 1, 1, 1, 0x13, false, false, FD1_RWPF_SWAP_LLWD | FD1_RWPF_SWAP_LWRD | FD1_RWPF_SWAP_WORD | FD1_RWPF_SWAP_BYTE, FDP1_CAPTURE }, { V4L2_PIX_FMT_XRGB32, { 32, 0, 0 }, 1, 1, 1, 0x13, false, false, FD1_RWPF_SWAP_LLWD | FD1_RWPF_SWAP_LWRD | FD1_RWPF_SWAP_WORD | FD1_RWPF_SWAP_BYTE, FDP1_CAPTURE }, { V4L2_PIX_FMT_RGB24, { 24, 0, 0 }, 1, 1, 1, 0x15, false, false, FD1_RWPF_SWAP_LLWD | FD1_RWPF_SWAP_LWRD | FD1_RWPF_SWAP_WORD | FD1_RWPF_SWAP_BYTE, FDP1_CAPTURE }, { V4L2_PIX_FMT_BGR24, { 24, 0, 0 }, 1, 1, 1, 0x18, false, false, FD1_RWPF_SWAP_LLWD | FD1_RWPF_SWAP_LWRD | FD1_RWPF_SWAP_WORD | FD1_RWPF_SWAP_BYTE, FDP1_CAPTURE }, { V4L2_PIX_FMT_ARGB444, { 16, 0, 0 }, 1, 1, 1, 0x19, false, false, FD1_RWPF_SWAP_LLWD | FD1_RWPF_SWAP_LWRD | FD1_RWPF_SWAP_WORD, FDP1_CAPTURE }, { V4L2_PIX_FMT_ARGB555, { 16, 0, 0 }, 1, 1, 1, 0x1b, false, false, FD1_RWPF_SWAP_LLWD | FD1_RWPF_SWAP_LWRD | FD1_RWPF_SWAP_WORD, FDP1_CAPTURE }, /* YUV Formats are supported by Read and Write Pixel Formatters */ { V4L2_PIX_FMT_NV16M, { 8, 16, 0 }, 2, 2, 1, 0x41, false, false, FD1_RWPF_SWAP_LLWD | FD1_RWPF_SWAP_LWRD | FD1_RWPF_SWAP_WORD | FD1_RWPF_SWAP_BYTE, FDP1_CAPTURE | FDP1_OUTPUT }, { V4L2_PIX_FMT_NV61M, { 8, 16, 0 }, 2, 2, 1, 0x41, false, true, FD1_RWPF_SWAP_LLWD | FD1_RWPF_SWAP_LWRD | FD1_RWPF_SWAP_WORD | FD1_RWPF_SWAP_BYTE, FDP1_CAPTURE | FDP1_OUTPUT }, { V4L2_PIX_FMT_NV12M, { 8, 16, 0 }, 2, 2, 2, 0x42, false, false, FD1_RWPF_SWAP_LLWD | FD1_RWPF_SWAP_LWRD | FD1_RWPF_SWAP_WORD | FD1_RWPF_SWAP_BYTE, FDP1_CAPTURE | FDP1_OUTPUT }, { V4L2_PIX_FMT_NV21M, { 8, 16, 0 }, 2, 2, 2, 0x42, false, true, FD1_RWPF_SWAP_LLWD | FD1_RWPF_SWAP_LWRD | FD1_RWPF_SWAP_WORD | FD1_RWPF_SWAP_BYTE, FDP1_CAPTURE | FDP1_OUTPUT }, { V4L2_PIX_FMT_UYVY, { 16, 0, 0 }, 1, 2, 1, 0x47, false, false, FD1_RWPF_SWAP_LLWD | FD1_RWPF_SWAP_LWRD | FD1_RWPF_SWAP_WORD | FD1_RWPF_SWAP_BYTE, FDP1_CAPTURE | FDP1_OUTPUT }, { V4L2_PIX_FMT_VYUY, { 16, 0, 0 }, 1, 2, 1, 0x47, false, true, FD1_RWPF_SWAP_LLWD | FD1_RWPF_SWAP_LWRD | FD1_RWPF_SWAP_WORD | FD1_RWPF_SWAP_BYTE, FDP1_CAPTURE | FDP1_OUTPUT }, { V4L2_PIX_FMT_YUYV, { 16, 0, 0 }, 1, 2, 1, 0x47, true, false, FD1_RWPF_SWAP_LLWD | FD1_RWPF_SWAP_LWRD | FD1_RWPF_SWAP_WORD | FD1_RWPF_SWAP_BYTE, FDP1_CAPTURE | FDP1_OUTPUT }, { V4L2_PIX_FMT_YVYU, { 16, 0, 0 }, 1, 2, 1, 0x47, true, true, FD1_RWPF_SWAP_LLWD | FD1_RWPF_SWAP_LWRD | FD1_RWPF_SWAP_WORD | FD1_RWPF_SWAP_BYTE, FDP1_CAPTURE | FDP1_OUTPUT }, { V4L2_PIX_FMT_YUV444M, { 8, 8, 8 }, 3, 1, 1, 0x4a, false, false, FD1_RWPF_SWAP_LLWD | FD1_RWPF_SWAP_LWRD | FD1_RWPF_SWAP_WORD | FD1_RWPF_SWAP_BYTE, FDP1_CAPTURE | FDP1_OUTPUT }, { V4L2_PIX_FMT_YVU444M, { 8, 8, 8 }, 3, 1, 1, 0x4a, false, true, FD1_RWPF_SWAP_LLWD | FD1_RWPF_SWAP_LWRD | FD1_RWPF_SWAP_WORD | FD1_RWPF_SWAP_BYTE, FDP1_CAPTURE | FDP1_OUTPUT }, { V4L2_PIX_FMT_YUV422M, { 8, 8, 8 }, 3, 2, 1, 0x4b, false, false, FD1_RWPF_SWAP_LLWD | FD1_RWPF_SWAP_LWRD | FD1_RWPF_SWAP_WORD | FD1_RWPF_SWAP_BYTE, FDP1_CAPTURE | FDP1_OUTPUT }, { V4L2_PIX_FMT_YVU422M, { 8, 8, 8 }, 3, 2, 1, 0x4b, false, true, FD1_RWPF_SWAP_LLWD | FD1_RWPF_SWAP_LWRD | FD1_RWPF_SWAP_WORD | FD1_RWPF_SWAP_BYTE, FDP1_CAPTURE | FDP1_OUTPUT }, { V4L2_PIX_FMT_YUV420M, { 8, 8, 8 }, 3, 2, 2, 0x4c, false, false, FD1_RWPF_SWAP_LLWD | FD1_RWPF_SWAP_LWRD | FD1_RWPF_SWAP_WORD | FD1_RWPF_SWAP_BYTE, FDP1_CAPTURE | FDP1_OUTPUT }, { V4L2_PIX_FMT_YVU420M, { 8, 8, 8 }, 3, 2, 2, 0x4c, false, true, FD1_RWPF_SWAP_LLWD | FD1_RWPF_SWAP_LWRD | FD1_RWPF_SWAP_WORD | FD1_RWPF_SWAP_BYTE, FDP1_CAPTURE | FDP1_OUTPUT }, }; static int fdp1_fmt_is_rgb(const struct fdp1_fmt *fmt) { return fmt->fmt <= 0x1b; /* Last RGB code */ } /* * FDP1 Lookup tables range from 0...255 only * * Each table must be less than 256 entries, and all tables * are padded out to 256 entries by duplicating the last value. */ static const u8 fdp1_diff_adj[] = { 0x00, 0x24, 0x43, 0x5e, 0x76, 0x8c, 0x9e, 0xaf, 0xbd, 0xc9, 0xd4, 0xdd, 0xe4, 0xea, 0xef, 0xf3, 0xf6, 0xf9, 0xfb, 0xfc, 0xfd, 0xfe, 0xfe, 0xff, }; static const u8 fdp1_sad_adj[] = { 0x00, 0x24, 0x43, 0x5e, 0x76, 0x8c, 0x9e, 0xaf, 0xbd, 0xc9, 0xd4, 0xdd, 0xe4, 0xea, 0xef, 0xf3, 0xf6, 0xf9, 0xfb, 0xfc, 0xfd, 0xfe, 0xfe, 0xff, }; static const u8 fdp1_bld_gain[] = { 0x80, }; static const u8 fdp1_dif_gain[] = { 0x80, }; static const u8 fdp1_mdet[] = { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, 0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f, 0xa0, 0xa1, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7, 0xa8, 0xa9, 0xaa, 0xab, 0xac, 0xad, 0xae, 0xaf, 0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7, 0xb8, 0xb9, 0xba, 0xbb, 0xbc, 0xbd, 0xbe, 0xbf, 0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, 0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf, 0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, 0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xdf, 0xe0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, 0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef, 0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, 0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff }; /* Per-queue, driver-specific private data */ struct fdp1_q_data { const struct fdp1_fmt *fmt; struct v4l2_pix_format_mplane format; unsigned int vsize; unsigned int stride_y; unsigned int stride_c; }; static const struct fdp1_fmt *fdp1_find_format(u32 pixelformat) { const struct fdp1_fmt *fmt; unsigned int i; for (i = 0; i < ARRAY_SIZE(fdp1_formats); i++) { fmt = &fdp1_formats[i]; if (fmt->fourcc == pixelformat) return fmt; } return NULL; } enum fdp1_deint_mode { FDP1_PROGRESSIVE = 0, /* Must be zero when !deinterlacing */ FDP1_ADAPT2D3D, FDP1_FIXED2D, FDP1_FIXED3D, FDP1_PREVFIELD, FDP1_NEXTFIELD, }; #define FDP1_DEINT_MODE_USES_NEXT(mode) \ (mode == FDP1_ADAPT2D3D || \ mode == FDP1_FIXED3D || \ mode == FDP1_NEXTFIELD) #define FDP1_DEINT_MODE_USES_PREV(mode) \ (mode == FDP1_ADAPT2D3D || \ mode == FDP1_FIXED3D || \ mode == FDP1_PREVFIELD) /* * FDP1 operates on potentially 3 fields, which are tracked * from the VB buffers using this context structure. * Will always be a field or a full frame, never two fields. */ struct fdp1_field_buffer { struct vb2_v4l2_buffer *vb; dma_addr_t addrs[3]; /* Should be NONE:TOP:BOTTOM only */ enum v4l2_field field; /* Flag to indicate this is the last field in the vb */ bool last_field; /* Buffer queue lists */ struct list_head list; }; struct fdp1_buffer { struct v4l2_m2m_buffer m2m_buf; struct fdp1_field_buffer fields[2]; unsigned int num_fields; }; static inline struct fdp1_buffer *to_fdp1_buffer(struct vb2_v4l2_buffer *vb) { return container_of(vb, struct fdp1_buffer, m2m_buf.vb); } struct fdp1_job { struct fdp1_field_buffer *previous; struct fdp1_field_buffer *active; struct fdp1_field_buffer *next; struct fdp1_field_buffer *dst; /* A job can only be on one list at a time */ struct list_head list; }; struct fdp1_dev { struct v4l2_device v4l2_dev; struct video_device vfd; struct mutex dev_mutex; spinlock_t irqlock; spinlock_t device_process_lock; void __iomem *regs; unsigned int irq; struct device *dev; /* Job Queues */ struct fdp1_job jobs[FDP1_NUMBER_JOBS]; struct list_head free_job_list; struct list_head queued_job_list; struct list_head hw_job_list; unsigned int clk_rate; struct rcar_fcp_device *fcp; struct v4l2_m2m_dev *m2m_dev; }; struct fdp1_ctx { struct v4l2_fh fh; struct fdp1_dev *fdp1; struct v4l2_ctrl_handler hdl; unsigned int sequence; /* Processed buffers in this transaction */ u8 num_processed; /* Transaction length (i.e. how many buffers per transaction) */ u32 translen; /* Abort requested by m2m */ int aborting; /* Deinterlace processing mode */ enum fdp1_deint_mode deint_mode; /* * Adaptive 2D/3D mode uses a shared mask * This is allocated at streamon, if the ADAPT2D3D mode * is requested */ unsigned int smsk_size; dma_addr_t smsk_addr[2]; void *smsk_cpu; /* Capture pipeline, can specify an alpha value * for supported formats. 0-255 only */ unsigned char alpha; /* Source and destination queue data */ struct fdp1_q_data out_q; /* HW Source */ struct fdp1_q_data cap_q; /* HW Destination */ /* * Field Queues * Interlaced fields are used on 3 occasions, and tracked in this list. * * V4L2 Buffers are tracked inside the fdp1_buffer * and released when the last 'field' completes */ struct list_head fields_queue; unsigned int buffers_queued; /* * For de-interlacing we need to track our previous buffer * while preparing our job lists. */ struct fdp1_field_buffer *previous; }; static inline struct fdp1_ctx *fh_to_ctx(struct v4l2_fh *fh) { return container_of(fh, struct fdp1_ctx, fh); } static struct fdp1_q_data *get_q_data(struct fdp1_ctx *ctx, enum v4l2_buf_type type) { if (V4L2_TYPE_IS_OUTPUT(type)) return &ctx->out_q; else return &ctx->cap_q; } /* * list_remove_job: Take the first item off the specified job list * * Returns: pointer to a job, or NULL if the list is empty. */ static struct fdp1_job *list_remove_job(struct fdp1_dev *fdp1, struct list_head *list) { struct fdp1_job *job; unsigned long flags; spin_lock_irqsave(&fdp1->irqlock, flags); job = list_first_entry_or_null(list, struct fdp1_job, list); if (job) list_del(&job->list); spin_unlock_irqrestore(&fdp1->irqlock, flags); return job; } /* * list_add_job: Add a job to the specified job list * * Returns: void - always succeeds */ static void list_add_job(struct fdp1_dev *fdp1, struct list_head *list, struct fdp1_job *job) { unsigned long flags; spin_lock_irqsave(&fdp1->irqlock, flags); list_add_tail(&job->list, list); spin_unlock_irqrestore(&fdp1->irqlock, flags); } static struct fdp1_job *fdp1_job_alloc(struct fdp1_dev *fdp1) { return list_remove_job(fdp1, &fdp1->free_job_list); } static void fdp1_job_free(struct fdp1_dev *fdp1, struct fdp1_job *job) { /* Ensure that all residue from previous jobs is gone */ memset(job, 0, sizeof(struct fdp1_job)); list_add_job(fdp1, &fdp1->free_job_list, job); } static void queue_job(struct fdp1_dev *fdp1, struct fdp1_job *job) { list_add_job(fdp1, &fdp1->queued_job_list, job); } static struct fdp1_job *get_queued_job(struct fdp1_dev *fdp1) { return list_remove_job(fdp1, &fdp1->queued_job_list); } static void queue_hw_job(struct fdp1_dev *fdp1, struct fdp1_job *job) { list_add_job(fdp1, &fdp1->hw_job_list, job); } static struct fdp1_job *get_hw_queued_job(struct fdp1_dev *fdp1) { return list_remove_job(fdp1, &fdp1->hw_job_list); } /* * Buffer lists handling */ static void fdp1_field_complete(struct fdp1_ctx *ctx, struct fdp1_field_buffer *fbuf) { /* job->previous may be on the first field */ if (!fbuf) return; if (fbuf->last_field) v4l2_m2m_buf_done(fbuf->vb, VB2_BUF_STATE_DONE); } static void fdp1_queue_field(struct fdp1_ctx *ctx, struct fdp1_field_buffer *fbuf) { unsigned long flags; spin_lock_irqsave(&ctx->fdp1->irqlock, flags); list_add_tail(&fbuf->list, &ctx->fields_queue); spin_unlock_irqrestore(&ctx->fdp1->irqlock, flags); ctx->buffers_queued++; } static struct fdp1_field_buffer *fdp1_dequeue_field(struct fdp1_ctx *ctx) { struct fdp1_field_buffer *fbuf; unsigned long flags; ctx->buffers_queued--; spin_lock_irqsave(&ctx->fdp1->irqlock, flags); fbuf = list_first_entry_or_null(&ctx->fields_queue, struct fdp1_field_buffer, list); if (fbuf) list_del(&fbuf->list); spin_unlock_irqrestore(&ctx->fdp1->irqlock, flags); return fbuf; } /* * Return the next field in the queue - or NULL, * without removing the item from the list */ static struct fdp1_field_buffer *fdp1_peek_queued_field(struct fdp1_ctx *ctx) { struct fdp1_field_buffer *fbuf; unsigned long flags; spin_lock_irqsave(&ctx->fdp1->irqlock, flags); fbuf = list_first_entry_or_null(&ctx->fields_queue, struct fdp1_field_buffer, list); spin_unlock_irqrestore(&ctx->fdp1->irqlock, flags); return fbuf; } static u32 fdp1_read(struct fdp1_dev *fdp1, unsigned int reg) { u32 value = ioread32(fdp1->regs + reg); if (debug >= 2) dprintk(fdp1, "Read 0x%08x from 0x%04x\n", value, reg); return value; } static void fdp1_write(struct fdp1_dev *fdp1, u32 val, unsigned int reg) { if (debug >= 2) dprintk(fdp1, "Write 0x%08x to 0x%04x\n", val, reg); iowrite32(val, fdp1->regs + reg); } /* IPC registers are to be programmed with constant values */ static void fdp1_set_ipc_dli(struct fdp1_ctx *ctx) { struct fdp1_dev *fdp1 = ctx->fdp1; fdp1_write(fdp1, FD1_IPC_SMSK_THRESH_CONST, FD1_IPC_SMSK_THRESH); fdp1_write(fdp1, FD1_IPC_COMB_DET_CONST, FD1_IPC_COMB_DET); fdp1_write(fdp1, FD1_IPC_MOTDEC_CONST, FD1_IPC_MOTDEC); fdp1_write(fdp1, FD1_IPC_DLI_BLEND_CONST, FD1_IPC_DLI_BLEND); fdp1_write(fdp1, FD1_IPC_DLI_HGAIN_CONST, FD1_IPC_DLI_HGAIN); fdp1_write(fdp1, FD1_IPC_DLI_SPRS_CONST, FD1_IPC_DLI_SPRS); fdp1_write(fdp1, FD1_IPC_DLI_ANGLE_CONST, FD1_IPC_DLI_ANGLE); fdp1_write(fdp1, FD1_IPC_DLI_ISOPIX0_CONST, FD1_IPC_DLI_ISOPIX0); fdp1_write(fdp1, FD1_IPC_DLI_ISOPIX1_CONST, FD1_IPC_DLI_ISOPIX1); } static void fdp1_set_ipc_sensor(struct fdp1_ctx *ctx) { struct fdp1_dev *fdp1 = ctx->fdp1; struct fdp1_q_data *src_q_data = &ctx->out_q; unsigned int x0, x1; unsigned int hsize = src_q_data->format.width; unsigned int vsize = src_q_data->format.height; x0 = hsize / 3; x1 = 2 * hsize / 3; fdp1_write(fdp1, FD1_IPC_SENSOR_TH0_CONST, FD1_IPC_SENSOR_TH0); fdp1_write(fdp1, FD1_IPC_SENSOR_TH1_CONST, FD1_IPC_SENSOR_TH1); fdp1_write(fdp1, FD1_IPC_SENSOR_CTL0_CONST, FD1_IPC_SENSOR_CTL0); fdp1_write(fdp1, FD1_IPC_SENSOR_CTL1_CONST, FD1_IPC_SENSOR_CTL1); fdp1_write(fdp1, ((hsize - 1) << FD1_IPC_SENSOR_CTL2_X_SHIFT) | ((vsize - 1) << FD1_IPC_SENSOR_CTL2_Y_SHIFT), FD1_IPC_SENSOR_CTL2); fdp1_write(fdp1, (x0 << FD1_IPC_SENSOR_CTL3_0_SHIFT) | (x1 << FD1_IPC_SENSOR_CTL3_1_SHIFT), FD1_IPC_SENSOR_CTL3); } /* * fdp1_write_lut: Write a padded LUT to the hw * * FDP1 uses constant data for de-interlacing processing, * with large tables. These hardware tables are all 256 bytes * long, however they often contain repeated data at the end. * * The last byte of the table is written to all remaining entries. */ static void fdp1_write_lut(struct fdp1_dev *fdp1, const u8 *lut, unsigned int len, unsigned int base) { unsigned int i; u8 pad; /* Tables larger than the hw are clipped */ len = min(len, 256u); for (i = 0; i < len; i++) fdp1_write(fdp1, lut[i], base + (i*4)); /* Tables are padded with the last entry */ pad = lut[i-1]; for (; i < 256; i++) fdp1_write(fdp1, pad, base + (i*4)); } static void fdp1_set_lut(struct fdp1_dev *fdp1) { fdp1_write_lut(fdp1, fdp1_diff_adj, ARRAY_SIZE(fdp1_diff_adj), FD1_LUT_DIF_ADJ); fdp1_write_lut(fdp1, fdp1_sad_adj, ARRAY_SIZE(fdp1_sad_adj), FD1_LUT_SAD_ADJ); fdp1_write_lut(fdp1, fdp1_bld_gain, ARRAY_SIZE(fdp1_bld_gain), FD1_LUT_BLD_GAIN); fdp1_write_lut(fdp1, fdp1_dif_gain, ARRAY_SIZE(fdp1_dif_gain), FD1_LUT_DIF_GAIN); fdp1_write_lut(fdp1, fdp1_mdet, ARRAY_SIZE(fdp1_mdet), FD1_LUT_MDET); } static void fdp1_configure_rpf(struct fdp1_ctx *ctx, struct fdp1_job *job) { struct fdp1_dev *fdp1 = ctx->fdp1; u32 picture_size; u32 pstride; u32 format; u32 smsk_addr; struct fdp1_q_data *q_data = &ctx->out_q; /* Picture size is common to Source and Destination frames */ picture_size = (q_data->format.width << FD1_RPF_SIZE_H_SHIFT) | (q_data->vsize << FD1_RPF_SIZE_V_SHIFT); /* Strides */ pstride = q_data->stride_y << FD1_RPF_PSTRIDE_Y_SHIFT; if (q_data->format.num_planes > 1) pstride |= q_data->stride_c << FD1_RPF_PSTRIDE_C_SHIFT; /* Format control */ format = q_data->fmt->fmt; if (q_data->fmt->swap_yc) format |= FD1_RPF_FORMAT_RSPYCS; if (q_data->fmt->swap_uv) format |= FD1_RPF_FORMAT_RSPUVS; if (job->active->field == V4L2_FIELD_BOTTOM) { format |= FD1_RPF_FORMAT_CF; /* Set for Bottom field */ smsk_addr = ctx->smsk_addr[0]; } else { smsk_addr = ctx->smsk_addr[1]; } /* Deint mode is non-zero when deinterlacing */ if (ctx->deint_mode) format |= FD1_RPF_FORMAT_CIPM; fdp1_write(fdp1, format, FD1_RPF_FORMAT); fdp1_write(fdp1, q_data->fmt->swap, FD1_RPF_SWAP); fdp1_write(fdp1, picture_size, FD1_RPF_SIZE); fdp1_write(fdp1, pstride, FD1_RPF_PSTRIDE); fdp1_write(fdp1, smsk_addr, FD1_RPF_SMSK_ADDR); /* Previous Field Channel (CH0) */ if (job->previous) fdp1_write(fdp1, job->previous->addrs[0], FD1_RPF0_ADDR_Y); /* Current Field Channel (CH1) */ fdp1_write(fdp1, job->active->addrs[0], FD1_RPF1_ADDR_Y); fdp1_write(fdp1, job->active->addrs[1], FD1_RPF1_ADDR_C0); fdp1_write(fdp1, job->active->addrs[2], FD1_RPF1_ADDR_C1); /* Next Field Channel (CH2) */ if (job->next) fdp1_write(fdp1, job->next->addrs[0], FD1_RPF2_ADDR_Y); } static void fdp1_configure_wpf(struct fdp1_ctx *ctx, struct fdp1_job *job) { struct fdp1_dev *fdp1 = ctx->fdp1; struct fdp1_q_data *src_q_data = &ctx->out_q; struct fdp1_q_data *q_data = &ctx->cap_q; u32 pstride; u32 format; u32 swap; u32 rndctl; pstride = q_data->format.plane_fmt[0].bytesperline << FD1_WPF_PSTRIDE_Y_SHIFT; if (q_data->format.num_planes > 1) pstride |= q_data->format.plane_fmt[1].bytesperline << FD1_WPF_PSTRIDE_C_SHIFT; format = q_data->fmt->fmt; /* Output Format Code */ if (q_data->fmt->swap_yc) format |= FD1_WPF_FORMAT_WSPYCS; if (q_data->fmt->swap_uv) format |= FD1_WPF_FORMAT_WSPUVS; if (fdp1_fmt_is_rgb(q_data->fmt)) { /* Enable Colour Space conversion */ format |= FD1_WPF_FORMAT_CSC; /* Set WRTM */ if (src_q_data->format.ycbcr_enc == V4L2_YCBCR_ENC_709) format |= FD1_WPF_FORMAT_WRTM_709_16; else if (src_q_data->format.quantization == V4L2_QUANTIZATION_FULL_RANGE) format |= FD1_WPF_FORMAT_WRTM_601_0; else format |= FD1_WPF_FORMAT_WRTM_601_16; } /* Set an alpha value into the Pad Value */ format |= ctx->alpha << FD1_WPF_FORMAT_PDV_SHIFT; /* Determine picture rounding and clipping */ rndctl = FD1_WPF_RNDCTL_CBRM; /* Rounding Off */ rndctl |= FD1_WPF_RNDCTL_CLMD_NOCLIP; /* WPF Swap needs both ISWAP and OSWAP setting */ swap = q_data->fmt->swap << FD1_WPF_SWAP_OSWAP_SHIFT; swap |= src_q_data->fmt->swap << FD1_WPF_SWAP_SSWAP_SHIFT; fdp1_write(fdp1, format, FD1_WPF_FORMAT); fdp1_write(fdp1, rndctl, FD1_WPF_RNDCTL); fdp1_write(fdp1, swap, FD1_WPF_SWAP); fdp1_write(fdp1, pstride, FD1_WPF_PSTRIDE); fdp1_write(fdp1, job->dst->addrs[0], FD1_WPF_ADDR_Y); fdp1_write(fdp1, job->dst->addrs[1], FD1_WPF_ADDR_C0); fdp1_write(fdp1, job->dst->addrs[2], FD1_WPF_ADDR_C1); } static void fdp1_configure_deint_mode(struct fdp1_ctx *ctx, struct fdp1_job *job) { struct fdp1_dev *fdp1 = ctx->fdp1; u32 opmode = FD1_CTL_OPMODE_VIMD_NOINTERRUPT; u32 ipcmode = FD1_IPC_MODE_DLI; /* Always set */ u32 channels = FD1_CTL_CHACT_WR | FD1_CTL_CHACT_RD1; /* Always on */ /* De-interlacing Mode */ switch (ctx->deint_mode) { default: case FDP1_PROGRESSIVE: dprintk(fdp1, "Progressive Mode\n"); opmode |= FD1_CTL_OPMODE_PRG; ipcmode |= FD1_IPC_MODE_DIM_FIXED2D; break; case FDP1_ADAPT2D3D: dprintk(fdp1, "Adapt2D3D Mode\n"); if (ctx->sequence == 0 || ctx->aborting) ipcmode |= FD1_IPC_MODE_DIM_FIXED2D; else ipcmode |= FD1_IPC_MODE_DIM_ADAPT2D3D; if (ctx->sequence > 1) { channels |= FD1_CTL_CHACT_SMW; channels |= FD1_CTL_CHACT_RD0 | FD1_CTL_CHACT_RD2; } if (ctx->sequence > 2) channels |= FD1_CTL_CHACT_SMR; break; case FDP1_FIXED3D: dprintk(fdp1, "Fixed 3D Mode\n"); ipcmode |= FD1_IPC_MODE_DIM_FIXED3D; /* Except for first and last frame, enable all channels */ if (!(ctx->sequence == 0 || ctx->aborting)) channels |= FD1_CTL_CHACT_RD0 | FD1_CTL_CHACT_RD2; break; case FDP1_FIXED2D: dprintk(fdp1, "Fixed 2D Mode\n"); ipcmode |= FD1_IPC_MODE_DIM_FIXED2D; /* No extra channels enabled */ break; case FDP1_PREVFIELD: dprintk(fdp1, "Previous Field Mode\n"); ipcmode |= FD1_IPC_MODE_DIM_PREVFIELD; channels |= FD1_CTL_CHACT_RD0; /* Previous */ break; case FDP1_NEXTFIELD: dprintk(fdp1, "Next Field Mode\n"); ipcmode |= FD1_IPC_MODE_DIM_NEXTFIELD; channels |= FD1_CTL_CHACT_RD2; /* Next */ break; } fdp1_write(fdp1, channels, FD1_CTL_CHACT); fdp1_write(fdp1, opmode, FD1_CTL_OPMODE); fdp1_write(fdp1, ipcmode, FD1_IPC_MODE); } /* * fdp1_device_process() - Run the hardware * * Configure and start the hardware to generate a single frame * of output given our input parameters. */ static int fdp1_device_process(struct fdp1_ctx *ctx) { struct fdp1_dev *fdp1 = ctx->fdp1; struct fdp1_job *job; unsigned long flags; spin_lock_irqsave(&fdp1->device_process_lock, flags); /* Get a job to process */ job = get_queued_job(fdp1); if (!job) { /* * VINT can call us to see if we can queue another job. * If we have no work to do, we simply return. */ spin_unlock_irqrestore(&fdp1->device_process_lock, flags); return 0; } /* First Frame only? ... */ fdp1_write(fdp1, FD1_CTL_CLKCTRL_CSTP_N, FD1_CTL_CLKCTRL); /* Set the mode, and configuration */ fdp1_configure_deint_mode(ctx, job); /* DLI Static Configuration */ fdp1_set_ipc_dli(ctx); /* Sensor Configuration */ fdp1_set_ipc_sensor(ctx); /* Setup the source picture */ fdp1_configure_rpf(ctx, job); /* Setup the destination picture */ fdp1_configure_wpf(ctx, job); /* Line Memory Pixel Number Register for linear access */ fdp1_write(fdp1, FD1_IPC_LMEM_LINEAR, FD1_IPC_LMEM); /* Enable Interrupts */ fdp1_write(fdp1, FD1_CTL_IRQ_MASK, FD1_CTL_IRQENB); /* Finally, the Immediate Registers */ /* This job is now in the HW queue */ queue_hw_job(fdp1, job); /* Start the command */ fdp1_write(fdp1, FD1_CTL_CMD_STRCMD, FD1_CTL_CMD); /* Registers will update to HW at next VINT */ fdp1_write(fdp1, FD1_CTL_REGEND_REGEND, FD1_CTL_REGEND); /* Enable VINT Generator */ fdp1_write(fdp1, FD1_CTL_SGCMD_SGEN, FD1_CTL_SGCMD); spin_unlock_irqrestore(&fdp1->device_process_lock, flags); return 0; } /* * mem2mem callbacks */ /* * job_ready() - check whether an instance is ready to be scheduled to run */ static int fdp1_m2m_job_ready(void *priv) { struct fdp1_ctx *ctx = priv; struct fdp1_q_data *src_q_data = &ctx->out_q; int srcbufs = 1; int dstbufs = 1; dprintk(ctx->fdp1, "+ Src: %d : Dst: %d\n", v4l2_m2m_num_src_bufs_ready(ctx->fh.m2m_ctx), v4l2_m2m_num_dst_bufs_ready(ctx->fh.m2m_ctx)); /* One output buffer is required for each field */ if (V4L2_FIELD_HAS_BOTH(src_q_data->format.field)) dstbufs = 2; if (v4l2_m2m_num_src_bufs_ready(ctx->fh.m2m_ctx) < srcbufs || v4l2_m2m_num_dst_bufs_ready(ctx->fh.m2m_ctx) < dstbufs) { dprintk(ctx->fdp1, "Not enough buffers available\n"); return 0; } return 1; } static void fdp1_m2m_job_abort(void *priv) { struct fdp1_ctx *ctx = priv; dprintk(ctx->fdp1, "+\n"); /* Will cancel the transaction in the next interrupt handler */ ctx->aborting = 1; /* Immediate abort sequence */ fdp1_write(ctx->fdp1, 0, FD1_CTL_SGCMD); fdp1_write(ctx->fdp1, FD1_CTL_SRESET_SRST, FD1_CTL_SRESET); } /* * fdp1_prepare_job: Prepare and queue a new job for a single action of work * * Prepare the next field, (or frame in progressive) and an output * buffer for the hardware to perform a single operation. */ static struct fdp1_job *fdp1_prepare_job(struct fdp1_ctx *ctx) { struct vb2_v4l2_buffer *vbuf; struct fdp1_buffer *fbuf; struct fdp1_dev *fdp1 = ctx->fdp1; struct fdp1_job *job; unsigned int buffers_required = 1; dprintk(fdp1, "+\n"); if (FDP1_DEINT_MODE_USES_NEXT(ctx->deint_mode)) buffers_required = 2; if (ctx->buffers_queued < buffers_required) return NULL; job = fdp1_job_alloc(fdp1); if (!job) { dprintk(fdp1, "No free jobs currently available\n"); return NULL; } job->active = fdp1_dequeue_field(ctx); if (!job->active) { /* Buffer check should prevent this ever happening */ dprintk(fdp1, "No input buffers currently available\n"); fdp1_job_free(fdp1, job); return NULL; } dprintk(fdp1, "+ Buffer en-route...\n"); /* Source buffers have been prepared on our buffer_queue * Prepare our Output buffer */ vbuf = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx); fbuf = to_fdp1_buffer(vbuf); job->dst = &fbuf->fields[0]; job->active->vb->sequence = ctx->sequence; job->dst->vb->sequence = ctx->sequence; ctx->sequence++; if (FDP1_DEINT_MODE_USES_PREV(ctx->deint_mode)) { job->previous = ctx->previous; /* Active buffer becomes the next job's previous buffer */ ctx->previous = job->active; } if (FDP1_DEINT_MODE_USES_NEXT(ctx->deint_mode)) { /* Must be called after 'active' is dequeued */ job->next = fdp1_peek_queued_field(ctx); } /* Transfer timestamps and flags from src->dst */ job->dst->vb->vb2_buf.timestamp = job->active->vb->vb2_buf.timestamp; job->dst->vb->flags = job->active->vb->flags & V4L2_BUF_FLAG_TSTAMP_SRC_MASK; /* Ideally, the frame-end function will just 'check' to see * if there are more jobs instead */ ctx->translen++; /* Finally, Put this job on the processing queue */ queue_job(fdp1, job); dprintk(fdp1, "Job Queued translen = %d\n", ctx->translen); return job; } /* fdp1_m2m_device_run() - prepares and starts the device for an M2M task * * A single input buffer is taken and serialised into our fdp1_buffer * queue. The queue is then processed to create as many jobs as possible * from our available input. */ static void fdp1_m2m_device_run(void *priv) { struct fdp1_ctx *ctx = priv; struct fdp1_dev *fdp1 = ctx->fdp1; struct vb2_v4l2_buffer *src_vb; struct fdp1_buffer *buf; unsigned int i; dprintk(fdp1, "+\n"); ctx->translen = 0; /* Get our incoming buffer of either one or two fields, or one frame */ src_vb = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx); buf = to_fdp1_buffer(src_vb); for (i = 0; i < buf->num_fields; i++) { struct fdp1_field_buffer *fbuf = &buf->fields[i]; fdp1_queue_field(ctx, fbuf); dprintk(fdp1, "Queued Buffer [%d] last_field:%d\n", i, fbuf->last_field); } /* Queue as many jobs as our data provides for */ while (fdp1_prepare_job(ctx)) ; if (ctx->translen == 0) { dprintk(fdp1, "No jobs were processed. M2M action complete\n"); v4l2_m2m_job_finish(fdp1->m2m_dev, ctx->fh.m2m_ctx); return; } /* Kick the job processing action */ fdp1_device_process(ctx); } /* * device_frame_end: * * Handles the M2M level after a buffer completion event. */ static void device_frame_end(struct fdp1_dev *fdp1, enum vb2_buffer_state state) { struct fdp1_ctx *ctx; unsigned long flags; struct fdp1_job *job = get_hw_queued_job(fdp1); dprintk(fdp1, "+\n"); ctx = v4l2_m2m_get_curr_priv(fdp1->m2m_dev); if (ctx == NULL) { v4l2_err(&fdp1->v4l2_dev, "Instance released before the end of transaction\n"); return; } ctx->num_processed++; /* * fdp1_field_complete will call buf_done only when the last vb2_buffer * reference is complete */ if (FDP1_DEINT_MODE_USES_PREV(ctx->deint_mode)) fdp1_field_complete(ctx, job->previous); else fdp1_field_complete(ctx, job->active); spin_lock_irqsave(&fdp1->irqlock, flags); v4l2_m2m_buf_done(job->dst->vb, state); job->dst = NULL; spin_unlock_irqrestore(&fdp1->irqlock, flags); /* Move this job back to the free job list */ fdp1_job_free(fdp1, job); dprintk(fdp1, "curr_ctx->num_processed %d curr_ctx->translen %d\n", ctx->num_processed, ctx->translen); if (ctx->num_processed == ctx->translen || ctx->aborting) { dprintk(ctx->fdp1, "Finishing transaction\n"); ctx->num_processed = 0; v4l2_m2m_job_finish(fdp1->m2m_dev, ctx->fh.m2m_ctx); } else { /* * For pipelined performance support, this would * be called from a VINT handler */ fdp1_device_process(ctx); } } /* * video ioctls */ static int fdp1_vidioc_querycap(struct file *file, void *priv, struct v4l2_capability *cap) { strscpy(cap->driver, DRIVER_NAME, sizeof(cap->driver)); strscpy(cap->card, DRIVER_NAME, sizeof(cap->card)); snprintf(cap->bus_info, sizeof(cap->bus_info), "platform:%s", DRIVER_NAME); return 0; } static int fdp1_enum_fmt(struct v4l2_fmtdesc *f, u32 type) { unsigned int i, num; num = 0; for (i = 0; i < ARRAY_SIZE(fdp1_formats); ++i) { if (fdp1_formats[i].types & type) { if (num == f->index) break; ++num; } } /* Format not found */ if (i >= ARRAY_SIZE(fdp1_formats)) return -EINVAL; /* Format found */ f->pixelformat = fdp1_formats[i].fourcc; return 0; } static int fdp1_enum_fmt_vid_cap(struct file *file, void *priv, struct v4l2_fmtdesc *f) { return fdp1_enum_fmt(f, FDP1_CAPTURE); } static int fdp1_enum_fmt_vid_out(struct file *file, void *priv, struct v4l2_fmtdesc *f) { return fdp1_enum_fmt(f, FDP1_OUTPUT); } static int fdp1_g_fmt(struct file *file, void *priv, struct v4l2_format *f) { struct fdp1_q_data *q_data; struct fdp1_ctx *ctx = fh_to_ctx(priv); if (!v4l2_m2m_get_vq(ctx->fh.m2m_ctx, f->type)) return -EINVAL; q_data = get_q_data(ctx, f->type); f->fmt.pix_mp = q_data->format; return 0; } static void fdp1_compute_stride(struct v4l2_pix_format_mplane *pix, const struct fdp1_fmt *fmt) { unsigned int i; /* Compute and clamp the stride and image size. */ for (i = 0; i < min_t(unsigned int, fmt->num_planes, 2U); ++i) { unsigned int hsub = i > 0 ? fmt->hsub : 1; unsigned int vsub = i > 0 ? fmt->vsub : 1; /* From VSP : TODO: Confirm alignment limits for FDP1 */ unsigned int align = 128; unsigned int bpl; bpl = clamp_t(unsigned int, pix->plane_fmt[i].bytesperline, pix->width / hsub * fmt->bpp[i] / 8, round_down(FDP1_MAX_STRIDE, align)); pix->plane_fmt[i].bytesperline = round_up(bpl, align); pix->plane_fmt[i].sizeimage = pix->plane_fmt[i].bytesperline * pix->height / vsub; } if (fmt->num_planes == 3) { /* The two chroma planes must have the same stride. */ pix->plane_fmt[2].bytesperline = pix->plane_fmt[1].bytesperline; pix->plane_fmt[2].sizeimage = pix->plane_fmt[1].sizeimage; } } static void fdp1_try_fmt_output(struct fdp1_ctx *ctx, const struct fdp1_fmt **fmtinfo, struct v4l2_pix_format_mplane *pix) { const struct fdp1_fmt *fmt; unsigned int width; unsigned int height; /* Validate the pixel format to ensure the output queue supports it. */ fmt = fdp1_find_format(pix->pixelformat); if (!fmt || !(fmt->types & FDP1_OUTPUT)) fmt = fdp1_find_format(V4L2_PIX_FMT_YUYV); if (fmtinfo) *fmtinfo = fmt; pix->pixelformat = fmt->fourcc; pix->num_planes = fmt->num_planes; /* * Progressive video and all interlaced field orders are acceptable. * Default to V4L2_FIELD_INTERLACED. */ if (pix->field != V4L2_FIELD_NONE && pix->field != V4L2_FIELD_ALTERNATE && !V4L2_FIELD_HAS_BOTH(pix->field)) pix->field = V4L2_FIELD_INTERLACED; /* * The deinterlacer doesn't care about the colorspace, accept all values * and default to V4L2_COLORSPACE_SMPTE170M. The YUV to RGB conversion * at the output of the deinterlacer supports a subset of encodings and * quantization methods and will only be available when the colorspace * allows it. */ if (pix->colorspace == V4L2_COLORSPACE_DEFAULT) pix->colorspace = V4L2_COLORSPACE_SMPTE170M; /* * Align the width and height for YUV 4:2:2 and 4:2:0 formats and clamp * them to the supported frame size range. The height boundary are * related to the full frame, divide them by two when the format passes * fields in separate buffers. */ width = round_down(pix->width, fmt->hsub); pix->width = clamp(width, FDP1_MIN_W, FDP1_MAX_W); height = round_down(pix->height, fmt->vsub); if (pix->field == V4L2_FIELD_ALTERNATE) pix->height = clamp(height, FDP1_MIN_H / 2, FDP1_MAX_H / 2); else pix->height = clamp(height, FDP1_MIN_H, FDP1_MAX_H); fdp1_compute_stride(pix, fmt); } static void fdp1_try_fmt_capture(struct fdp1_ctx *ctx, const struct fdp1_fmt **fmtinfo, struct v4l2_pix_format_mplane *pix) { struct fdp1_q_data *src_data = &ctx->out_q; enum v4l2_colorspace colorspace; enum v4l2_ycbcr_encoding ycbcr_enc; enum v4l2_quantization quantization; const struct fdp1_fmt *fmt; bool allow_rgb; /* * Validate the pixel format. We can only accept RGB output formats if * the input encoding and quantization are compatible with the format * conversions supported by the hardware. The supported combinations are * * V4L2_YCBCR_ENC_601 + V4L2_QUANTIZATION_LIM_RANGE * V4L2_YCBCR_ENC_601 + V4L2_QUANTIZATION_FULL_RANGE * V4L2_YCBCR_ENC_709 + V4L2_QUANTIZATION_LIM_RANGE */ colorspace = src_data->format.colorspace; ycbcr_enc = src_data->format.ycbcr_enc; if (ycbcr_enc == V4L2_YCBCR_ENC_DEFAULT) ycbcr_enc = V4L2_MAP_YCBCR_ENC_DEFAULT(colorspace); quantization = src_data->format.quantization; if (quantization == V4L2_QUANTIZATION_DEFAULT) quantization = V4L2_MAP_QUANTIZATION_DEFAULT(false, colorspace, ycbcr_enc); allow_rgb = ycbcr_enc == V4L2_YCBCR_ENC_601 || (ycbcr_enc == V4L2_YCBCR_ENC_709 && quantization == V4L2_QUANTIZATION_LIM_RANGE); fmt = fdp1_find_format(pix->pixelformat); if (!fmt || (!allow_rgb && fdp1_fmt_is_rgb(fmt))) fmt = fdp1_find_format(V4L2_PIX_FMT_YUYV); if (fmtinfo) *fmtinfo = fmt; pix->pixelformat = fmt->fourcc; pix->num_planes = fmt->num_planes; pix->field = V4L2_FIELD_NONE; /* * The colorspace on the capture queue is copied from the output queue * as the hardware can't change the colorspace. It can convert YCbCr to * RGB though, in which case the encoding and quantization are set to * default values as anything else wouldn't make sense. */ pix->colorspace = src_data->format.colorspace; pix->xfer_func = src_data->format.xfer_func; if (fdp1_fmt_is_rgb(fmt)) { pix->ycbcr_enc = V4L2_YCBCR_ENC_DEFAULT; pix->quantization = V4L2_QUANTIZATION_DEFAULT; } else { pix->ycbcr_enc = src_data->format.ycbcr_enc; pix->quantization = src_data->format.quantization; } /* * The frame width is identical to the output queue, and the height is * either doubled or identical depending on whether the output queue * field order contains one or two fields per frame. */ pix->width = src_data->format.width; if (src_data->format.field == V4L2_FIELD_ALTERNATE) pix->height = 2 * src_data->format.height; else pix->height = src_data->format.height; fdp1_compute_stride(pix, fmt); } static int fdp1_try_fmt(struct file *file, void *priv, struct v4l2_format *f) { struct fdp1_ctx *ctx = fh_to_ctx(priv); if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) fdp1_try_fmt_output(ctx, NULL, &f->fmt.pix_mp); else fdp1_try_fmt_capture(ctx, NULL, &f->fmt.pix_mp); dprintk(ctx->fdp1, "Try %s format: %4.4s (0x%08x) %ux%u field %u\n", V4L2_TYPE_IS_OUTPUT(f->type) ? "output" : "capture", (char *)&f->fmt.pix_mp.pixelformat, f->fmt.pix_mp.pixelformat, f->fmt.pix_mp.width, f->fmt.pix_mp.height, f->fmt.pix_mp.field); return 0; } static void fdp1_set_format(struct fdp1_ctx *ctx, struct v4l2_pix_format_mplane *pix, enum v4l2_buf_type type) { struct fdp1_q_data *q_data = get_q_data(ctx, type); const struct fdp1_fmt *fmtinfo; if (type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) fdp1_try_fmt_output(ctx, &fmtinfo, pix); else fdp1_try_fmt_capture(ctx, &fmtinfo, pix); q_data->fmt = fmtinfo; q_data->format = *pix; q_data->vsize = pix->height; if (pix->field != V4L2_FIELD_NONE) q_data->vsize /= 2; q_data->stride_y = pix->plane_fmt[0].bytesperline; q_data->stride_c = pix->plane_fmt[1].bytesperline; /* Adjust strides for interleaved buffers */ if (pix->field == V4L2_FIELD_INTERLACED || pix->field == V4L2_FIELD_INTERLACED_TB || pix->field == V4L2_FIELD_INTERLACED_BT) { q_data->stride_y *= 2; q_data->stride_c *= 2; } /* Propagate the format from the output node to the capture node. */ if (type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) { struct fdp1_q_data *dst_data = &ctx->cap_q; /* * Copy the format, clear the per-plane bytes per line and image * size, override the field and double the height if needed. */ dst_data->format = q_data->format; memset(dst_data->format.plane_fmt, 0, sizeof(dst_data->format.plane_fmt)); dst_data->format.field = V4L2_FIELD_NONE; if (pix->field == V4L2_FIELD_ALTERNATE) dst_data->format.height *= 2; fdp1_try_fmt_capture(ctx, &dst_data->fmt, &dst_data->format); dst_data->vsize = dst_data->format.height; dst_data->stride_y = dst_data->format.plane_fmt[0].bytesperline; dst_data->stride_c = dst_data->format.plane_fmt[1].bytesperline; } } static int fdp1_s_fmt(struct file *file, void *priv, struct v4l2_format *f) { struct fdp1_ctx *ctx = fh_to_ctx(priv); struct v4l2_m2m_ctx *m2m_ctx = ctx->fh.m2m_ctx; struct vb2_queue *vq = v4l2_m2m_get_vq(m2m_ctx, f->type); if (vb2_is_busy(vq)) { v4l2_err(&ctx->fdp1->v4l2_dev, "%s queue busy\n", __func__); return -EBUSY; } fdp1_set_format(ctx, &f->fmt.pix_mp, f->type); dprintk(ctx->fdp1, "Set %s format: %4.4s (0x%08x) %ux%u field %u\n", V4L2_TYPE_IS_OUTPUT(f->type) ? "output" : "capture", (char *)&f->fmt.pix_mp.pixelformat, f->fmt.pix_mp.pixelformat, f->fmt.pix_mp.width, f->fmt.pix_mp.height, f->fmt.pix_mp.field); return 0; } static int fdp1_g_ctrl(struct v4l2_ctrl *ctrl) { struct fdp1_ctx *ctx = container_of(ctrl->handler, struct fdp1_ctx, hdl); struct fdp1_q_data *src_q_data = &ctx->out_q; switch (ctrl->id) { case V4L2_CID_MIN_BUFFERS_FOR_CAPTURE: if (V4L2_FIELD_HAS_BOTH(src_q_data->format.field)) ctrl->val = 2; else ctrl->val = 1; return 0; } return 1; } static int fdp1_s_ctrl(struct v4l2_ctrl *ctrl) { struct fdp1_ctx *ctx = container_of(ctrl->handler, struct fdp1_ctx, hdl); switch (ctrl->id) { case V4L2_CID_ALPHA_COMPONENT: ctx->alpha = ctrl->val; break; case V4L2_CID_DEINTERLACING_MODE: ctx->deint_mode = ctrl->val; break; } return 0; } static const struct v4l2_ctrl_ops fdp1_ctrl_ops = { .s_ctrl = fdp1_s_ctrl, .g_volatile_ctrl = fdp1_g_ctrl, }; static const char * const fdp1_ctrl_deint_menu[] = { "Progressive", "Adaptive 2D/3D", "Fixed 2D", "Fixed 3D", "Previous field", "Next field", NULL }; static const struct v4l2_ioctl_ops fdp1_ioctl_ops = { .vidioc_querycap = fdp1_vidioc_querycap, .vidioc_enum_fmt_vid_cap = fdp1_enum_fmt_vid_cap, .vidioc_enum_fmt_vid_out = fdp1_enum_fmt_vid_out, .vidioc_g_fmt_vid_cap_mplane = fdp1_g_fmt, .vidioc_g_fmt_vid_out_mplane = fdp1_g_fmt, .vidioc_try_fmt_vid_cap_mplane = fdp1_try_fmt, .vidioc_try_fmt_vid_out_mplane = fdp1_try_fmt, .vidioc_s_fmt_vid_cap_mplane = fdp1_s_fmt, .vidioc_s_fmt_vid_out_mplane = fdp1_s_fmt, .vidioc_reqbufs = v4l2_m2m_ioctl_reqbufs, .vidioc_querybuf = v4l2_m2m_ioctl_querybuf, .vidioc_qbuf = v4l2_m2m_ioctl_qbuf, .vidioc_dqbuf = v4l2_m2m_ioctl_dqbuf, .vidioc_prepare_buf = v4l2_m2m_ioctl_prepare_buf, .vidioc_create_bufs = v4l2_m2m_ioctl_create_bufs, .vidioc_expbuf = v4l2_m2m_ioctl_expbuf, .vidioc_streamon = v4l2_m2m_ioctl_streamon, .vidioc_streamoff = v4l2_m2m_ioctl_streamoff, .vidioc_subscribe_event = v4l2_ctrl_subscribe_event, .vidioc_unsubscribe_event = v4l2_event_unsubscribe, }; /* * Queue operations */ static int fdp1_queue_setup(struct vb2_queue *vq, unsigned int *nbuffers, unsigned int *nplanes, unsigned int sizes[], struct device *alloc_ctxs[]) { struct fdp1_ctx *ctx = vb2_get_drv_priv(vq); struct fdp1_q_data *q_data; unsigned int i; q_data = get_q_data(ctx, vq->type); if (*nplanes) { if (*nplanes > FDP1_MAX_PLANES) return -EINVAL; return 0; } *nplanes = q_data->format.num_planes; for (i = 0; i < *nplanes; i++) sizes[i] = q_data->format.plane_fmt[i].sizeimage; return 0; } static void fdp1_buf_prepare_field(struct fdp1_q_data *q_data, struct vb2_v4l2_buffer *vbuf, unsigned int field_num) { struct fdp1_buffer *buf = to_fdp1_buffer(vbuf); struct fdp1_field_buffer *fbuf = &buf->fields[field_num]; unsigned int num_fields; unsigned int i; num_fields = V4L2_FIELD_HAS_BOTH(vbuf->field) ? 2 : 1; fbuf->vb = vbuf; fbuf->last_field = (field_num + 1) == num_fields; for (i = 0; i < vbuf->vb2_buf.num_planes; ++i) fbuf->addrs[i] = vb2_dma_contig_plane_dma_addr(&vbuf->vb2_buf, i); switch (vbuf->field) { case V4L2_FIELD_INTERLACED: /* * Interlaced means bottom-top for 60Hz TV standards (NTSC) and * top-bottom for 50Hz. As TV standards are not applicable to * the mem-to-mem API, use the height as a heuristic. */ fbuf->field = (q_data->format.height < 576) == field_num ? V4L2_FIELD_TOP : V4L2_FIELD_BOTTOM; break; case V4L2_FIELD_INTERLACED_TB: case V4L2_FIELD_SEQ_TB: fbuf->field = field_num ? V4L2_FIELD_BOTTOM : V4L2_FIELD_TOP; break; case V4L2_FIELD_INTERLACED_BT: case V4L2_FIELD_SEQ_BT: fbuf->field = field_num ? V4L2_FIELD_TOP : V4L2_FIELD_BOTTOM; break; default: fbuf->field = vbuf->field; break; } /* Buffer is completed */ if (!field_num) return; /* Adjust buffer addresses for second field */ switch (vbuf->field) { case V4L2_FIELD_INTERLACED: case V4L2_FIELD_INTERLACED_TB: case V4L2_FIELD_INTERLACED_BT: for (i = 0; i < vbuf->vb2_buf.num_planes; i++) fbuf->addrs[i] += (i == 0 ? q_data->stride_y : q_data->stride_c); break; case V4L2_FIELD_SEQ_TB: case V4L2_FIELD_SEQ_BT: for (i = 0; i < vbuf->vb2_buf.num_planes; i++) fbuf->addrs[i] += q_data->vsize * (i == 0 ? q_data->stride_y : q_data->stride_c); break; } } static int fdp1_buf_prepare(struct vb2_buffer *vb) { struct fdp1_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue); struct fdp1_q_data *q_data = get_q_data(ctx, vb->vb2_queue->type); struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb); struct fdp1_buffer *buf = to_fdp1_buffer(vbuf); unsigned int i; if (V4L2_TYPE_IS_OUTPUT(vb->vb2_queue->type)) { bool field_valid = true; /* Validate the buffer field. */ switch (q_data->format.field) { case V4L2_FIELD_NONE: if (vbuf->field != V4L2_FIELD_NONE) field_valid = false; break; case V4L2_FIELD_ALTERNATE: if (vbuf->field != V4L2_FIELD_TOP && vbuf->field != V4L2_FIELD_BOTTOM) field_valid = false; break; case V4L2_FIELD_INTERLACED: case V4L2_FIELD_SEQ_TB: case V4L2_FIELD_SEQ_BT: case V4L2_FIELD_INTERLACED_TB: case V4L2_FIELD_INTERLACED_BT: if (vbuf->field != q_data->format.field) field_valid = false; break; } if (!field_valid) { dprintk(ctx->fdp1, "buffer field %u invalid for format field %u\n", vbuf->field, q_data->format.field); return -EINVAL; } } else { vbuf->field = V4L2_FIELD_NONE; } /* Validate the planes sizes. */ for (i = 0; i < q_data->format.num_planes; i++) { unsigned long size = q_data->format.plane_fmt[i].sizeimage; if (vb2_plane_size(vb, i) < size) { dprintk(ctx->fdp1, "data will not fit into plane [%u/%u] (%lu < %lu)\n", i, q_data->format.num_planes, vb2_plane_size(vb, i), size); return -EINVAL; } /* We have known size formats all around */ vb2_set_plane_payload(vb, i, size); } buf->num_fields = V4L2_FIELD_HAS_BOTH(vbuf->field) ? 2 : 1; for (i = 0; i < buf->num_fields; ++i) fdp1_buf_prepare_field(q_data, vbuf, i); return 0; } static void fdp1_buf_queue(struct vb2_buffer *vb) { struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb); struct fdp1_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue); v4l2_m2m_buf_queue(ctx->fh.m2m_ctx, vbuf); } static int fdp1_start_streaming(struct vb2_queue *q, unsigned int count) { struct fdp1_ctx *ctx = vb2_get_drv_priv(q); struct fdp1_q_data *q_data = get_q_data(ctx, q->type); if (V4L2_TYPE_IS_OUTPUT(q->type)) { /* * Force our deint_mode when we are progressive, * ignoring any setting on the device from the user, * Otherwise, lock in the requested de-interlace mode. */ if (q_data->format.field == V4L2_FIELD_NONE) ctx->deint_mode = FDP1_PROGRESSIVE; if (ctx->deint_mode == FDP1_ADAPT2D3D) { u32 stride; dma_addr_t smsk_base; const u32 bpp = 2; /* bytes per pixel */ stride = round_up(q_data->format.width, 8); ctx->smsk_size = bpp * stride * q_data->vsize; ctx->smsk_cpu = dma_alloc_coherent(ctx->fdp1->dev, ctx->smsk_size, &smsk_base, GFP_KERNEL); if (ctx->smsk_cpu == NULL) { dprintk(ctx->fdp1, "Failed to alloc smsk\n"); return -ENOMEM; } ctx->smsk_addr[0] = smsk_base; ctx->smsk_addr[1] = smsk_base + (ctx->smsk_size/2); } } return 0; } static void fdp1_stop_streaming(struct vb2_queue *q) { struct fdp1_ctx *ctx = vb2_get_drv_priv(q); struct vb2_v4l2_buffer *vbuf; unsigned long flags; while (1) { if (V4L2_TYPE_IS_OUTPUT(q->type)) vbuf = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx); else vbuf = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx); if (vbuf == NULL) break; spin_lock_irqsave(&ctx->fdp1->irqlock, flags); v4l2_m2m_buf_done(vbuf, VB2_BUF_STATE_ERROR); spin_unlock_irqrestore(&ctx->fdp1->irqlock, flags); } /* Empty Output queues */ if (V4L2_TYPE_IS_OUTPUT(q->type)) { /* Empty our internal queues */ struct fdp1_field_buffer *fbuf; /* Free any queued buffers */ fbuf = fdp1_dequeue_field(ctx); while (fbuf != NULL) { fdp1_field_complete(ctx, fbuf); fbuf = fdp1_dequeue_field(ctx); } /* Free smsk_data */ if (ctx->smsk_cpu) { dma_free_coherent(ctx->fdp1->dev, ctx->smsk_size, ctx->smsk_cpu, ctx->smsk_addr[0]); ctx->smsk_addr[0] = ctx->smsk_addr[1] = 0; ctx->smsk_cpu = NULL; } WARN(!list_empty(&ctx->fields_queue), "Buffer queue not empty"); } else { /* Empty Capture queues (Jobs) */ struct fdp1_job *job; job = get_queued_job(ctx->fdp1); while (job) { if (FDP1_DEINT_MODE_USES_PREV(ctx->deint_mode)) fdp1_field_complete(ctx, job->previous); else fdp1_field_complete(ctx, job->active); v4l2_m2m_buf_done(job->dst->vb, VB2_BUF_STATE_ERROR); job->dst = NULL; job = get_queued_job(ctx->fdp1); } /* Free any held buffer in the ctx */ fdp1_field_complete(ctx, ctx->previous); WARN(!list_empty(&ctx->fdp1->queued_job_list), "Queued Job List not empty"); WARN(!list_empty(&ctx->fdp1->hw_job_list), "HW Job list not empty"); } } static const struct vb2_ops fdp1_qops = { .queue_setup = fdp1_queue_setup, .buf_prepare = fdp1_buf_prepare, .buf_queue = fdp1_buf_queue, .start_streaming = fdp1_start_streaming, .stop_streaming = fdp1_stop_streaming, .wait_prepare = vb2_ops_wait_prepare, .wait_finish = vb2_ops_wait_finish, }; static int queue_init(void *priv, struct vb2_queue *src_vq, struct vb2_queue *dst_vq) { struct fdp1_ctx *ctx = priv; int ret; src_vq->type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE; src_vq->io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF; src_vq->drv_priv = ctx; src_vq->buf_struct_size = sizeof(struct fdp1_buffer); src_vq->ops = &fdp1_qops; src_vq->mem_ops = &vb2_dma_contig_memops; src_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY; src_vq->lock = &ctx->fdp1->dev_mutex; src_vq->dev = ctx->fdp1->dev; ret = vb2_queue_init(src_vq); if (ret) return ret; dst_vq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE; dst_vq->io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF; dst_vq->drv_priv = ctx; dst_vq->buf_struct_size = sizeof(struct fdp1_buffer); dst_vq->ops = &fdp1_qops; dst_vq->mem_ops = &vb2_dma_contig_memops; dst_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY; dst_vq->lock = &ctx->fdp1->dev_mutex; dst_vq->dev = ctx->fdp1->dev; return vb2_queue_init(dst_vq); } /* * File operations */ static int fdp1_open(struct file *file) { struct fdp1_dev *fdp1 = video_drvdata(file); struct v4l2_pix_format_mplane format; struct fdp1_ctx *ctx = NULL; struct v4l2_ctrl *ctrl; int ret = 0; if (mutex_lock_interruptible(&fdp1->dev_mutex)) return -ERESTARTSYS; ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); if (!ctx) { ret = -ENOMEM; goto done; } v4l2_fh_init(&ctx->fh, video_devdata(file)); file->private_data = &ctx->fh; ctx->fdp1 = fdp1; /* Initialise Queues */ INIT_LIST_HEAD(&ctx->fields_queue); ctx->translen = 1; ctx->sequence = 0; /* Initialise controls */ v4l2_ctrl_handler_init(&ctx->hdl, 3); v4l2_ctrl_new_std_menu_items(&ctx->hdl, &fdp1_ctrl_ops, V4L2_CID_DEINTERLACING_MODE, FDP1_NEXTFIELD, BIT(0), FDP1_FIXED3D, fdp1_ctrl_deint_menu); ctrl = v4l2_ctrl_new_std(&ctx->hdl, &fdp1_ctrl_ops, V4L2_CID_MIN_BUFFERS_FOR_CAPTURE, 1, 2, 1, 1); if (ctrl) ctrl->flags |= V4L2_CTRL_FLAG_VOLATILE; v4l2_ctrl_new_std(&ctx->hdl, &fdp1_ctrl_ops, V4L2_CID_ALPHA_COMPONENT, 0, 255, 1, 255); if (ctx->hdl.error) { ret = ctx->hdl.error; goto error_ctx; } ctx->fh.ctrl_handler = &ctx->hdl; v4l2_ctrl_handler_setup(&ctx->hdl); /* Configure default parameters. */ memset(&format, 0, sizeof(format)); fdp1_set_format(ctx, &format, V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE); ctx->fh.m2m_ctx = v4l2_m2m_ctx_init(fdp1->m2m_dev, ctx, &queue_init); if (IS_ERR(ctx->fh.m2m_ctx)) { ret = PTR_ERR(ctx->fh.m2m_ctx); goto error_ctx; } /* Perform any power management required */ ret = pm_runtime_resume_and_get(fdp1->dev); if (ret < 0) goto error_pm; v4l2_fh_add(&ctx->fh); dprintk(fdp1, "Created instance: %p, m2m_ctx: %p\n", ctx, ctx->fh.m2m_ctx); mutex_unlock(&fdp1->dev_mutex); return 0; error_pm: v4l2_m2m_ctx_release(ctx->fh.m2m_ctx); error_ctx: v4l2_ctrl_handler_free(&ctx->hdl); kfree(ctx); done: mutex_unlock(&fdp1->dev_mutex); return ret; } static int fdp1_release(struct file *file) { struct fdp1_dev *fdp1 = video_drvdata(file); struct fdp1_ctx *ctx = fh_to_ctx(file->private_data); dprintk(fdp1, "Releasing instance %p\n", ctx); v4l2_fh_del(&ctx->fh); v4l2_fh_exit(&ctx->fh); v4l2_ctrl_handler_free(&ctx->hdl); mutex_lock(&fdp1->dev_mutex); v4l2_m2m_ctx_release(ctx->fh.m2m_ctx); mutex_unlock(&fdp1->dev_mutex); kfree(ctx); pm_runtime_put(fdp1->dev); return 0; } static const struct v4l2_file_operations fdp1_fops = { .owner = THIS_MODULE, .open = fdp1_open, .release = fdp1_release, .poll = v4l2_m2m_fop_poll, .unlocked_ioctl = video_ioctl2, .mmap = v4l2_m2m_fop_mmap, }; static const struct video_device fdp1_videodev = { .name = DRIVER_NAME, .vfl_dir = VFL_DIR_M2M, .fops = &fdp1_fops, .device_caps = V4L2_CAP_VIDEO_M2M_MPLANE | V4L2_CAP_STREAMING, .ioctl_ops = &fdp1_ioctl_ops, .minor = -1, .release = video_device_release_empty, }; static const struct v4l2_m2m_ops m2m_ops = { .device_run = fdp1_m2m_device_run, .job_ready = fdp1_m2m_job_ready, .job_abort = fdp1_m2m_job_abort, }; static irqreturn_t fdp1_irq_handler(int irq, void *dev_id) { struct fdp1_dev *fdp1 = dev_id; u32 int_status; u32 ctl_status; u32 vint_cnt; u32 cycles; int_status = fdp1_read(fdp1, FD1_CTL_IRQSTA); cycles = fdp1_read(fdp1, FD1_CTL_VCYCLE_STAT); ctl_status = fdp1_read(fdp1, FD1_CTL_STATUS); vint_cnt = (ctl_status & FD1_CTL_STATUS_VINT_CNT_MASK) >> FD1_CTL_STATUS_VINT_CNT_SHIFT; /* Clear interrupts */ fdp1_write(fdp1, ~(int_status) & FD1_CTL_IRQ_MASK, FD1_CTL_IRQSTA); if (debug >= 2) { dprintk(fdp1, "IRQ: 0x%x %s%s%s\n", int_status, int_status & FD1_CTL_IRQ_VERE ? "[Error]" : "[!E]", int_status & FD1_CTL_IRQ_VINTE ? "[VSync]" : "[!V]", int_status & FD1_CTL_IRQ_FREE ? "[FrameEnd]" : "[!F]"); dprintk(fdp1, "CycleStatus = %d (%dms)\n", cycles, cycles/(fdp1->clk_rate/1000)); dprintk(fdp1, "Control Status = 0x%08x : VINT_CNT = %d %s:%s:%s:%s\n", ctl_status, vint_cnt, ctl_status & FD1_CTL_STATUS_SGREGSET ? "RegSet" : "", ctl_status & FD1_CTL_STATUS_SGVERR ? "Vsync Error" : "", ctl_status & FD1_CTL_STATUS_SGFREND ? "FrameEnd" : "", ctl_status & FD1_CTL_STATUS_BSY ? "Busy" : ""); dprintk(fdp1, "***********************************\n"); } /* Spurious interrupt */ if (!(FD1_CTL_IRQ_MASK & int_status)) return IRQ_NONE; /* Work completed, release the frame */ if (FD1_CTL_IRQ_VERE & int_status) device_frame_end(fdp1, VB2_BUF_STATE_ERROR); else if (FD1_CTL_IRQ_FREE & int_status) device_frame_end(fdp1, VB2_BUF_STATE_DONE); return IRQ_HANDLED; } static int fdp1_probe(struct platform_device *pdev) { struct fdp1_dev *fdp1; struct video_device *vfd; struct device_node *fcp_node; struct clk *clk; unsigned int i; int ret; int hw_version; fdp1 = devm_kzalloc(&pdev->dev, sizeof(*fdp1), GFP_KERNEL); if (!fdp1) return -ENOMEM; INIT_LIST_HEAD(&fdp1->free_job_list); INIT_LIST_HEAD(&fdp1->queued_job_list); INIT_LIST_HEAD(&fdp1->hw_job_list); /* Initialise the jobs on the free list */ for (i = 0; i < ARRAY_SIZE(fdp1->jobs); i++) list_add(&fdp1->jobs[i].list, &fdp1->free_job_list); mutex_init(&fdp1->dev_mutex); spin_lock_init(&fdp1->irqlock); spin_lock_init(&fdp1->device_process_lock); fdp1->dev = &pdev->dev; platform_set_drvdata(pdev, fdp1); /* Memory-mapped registers */ fdp1->regs = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(fdp1->regs)) return PTR_ERR(fdp1->regs); /* Interrupt service routine registration */ ret = platform_get_irq(pdev, 0); if (ret < 0) return ret; fdp1->irq = ret; ret = devm_request_irq(&pdev->dev, fdp1->irq, fdp1_irq_handler, 0, dev_name(&pdev->dev), fdp1); if (ret) { dev_err(&pdev->dev, "cannot claim IRQ %d\n", fdp1->irq); return ret; } /* FCP */ fcp_node = of_parse_phandle(pdev->dev.of_node, "renesas,fcp", 0); if (fcp_node) { fdp1->fcp = rcar_fcp_get(fcp_node); of_node_put(fcp_node); if (IS_ERR(fdp1->fcp)) { dev_dbg(&pdev->dev, "FCP not found (%ld)\n", PTR_ERR(fdp1->fcp)); return PTR_ERR(fdp1->fcp); } } /* Determine our clock rate */ clk = clk_get(&pdev->dev, NULL); if (IS_ERR(clk)) { ret = PTR_ERR(clk); goto put_dev; } fdp1->clk_rate = clk_get_rate(clk); clk_put(clk); /* V4L2 device registration */ ret = v4l2_device_register(&pdev->dev, &fdp1->v4l2_dev); if (ret) { v4l2_err(&fdp1->v4l2_dev, "Failed to register video device\n"); goto put_dev; } /* M2M registration */ fdp1->m2m_dev = v4l2_m2m_init(&m2m_ops); if (IS_ERR(fdp1->m2m_dev)) { v4l2_err(&fdp1->v4l2_dev, "Failed to init mem2mem device\n"); ret = PTR_ERR(fdp1->m2m_dev); goto unreg_dev; } /* Video registration */ fdp1->vfd = fdp1_videodev; vfd = &fdp1->vfd; vfd->lock = &fdp1->dev_mutex; vfd->v4l2_dev = &fdp1->v4l2_dev; video_set_drvdata(vfd, fdp1); strscpy(vfd->name, fdp1_videodev.name, sizeof(vfd->name)); ret = video_register_device(vfd, VFL_TYPE_VIDEO, 0); if (ret) { v4l2_err(&fdp1->v4l2_dev, "Failed to register video device\n"); goto release_m2m; } v4l2_info(&fdp1->v4l2_dev, "Device registered as /dev/video%d\n", vfd->num); /* Power up the cells to read HW */ pm_runtime_enable(&pdev->dev); ret = pm_runtime_resume_and_get(fdp1->dev); if (ret < 0) goto disable_pm; hw_version = fdp1_read(fdp1, FD1_IP_INTDATA); switch (hw_version) { case FD1_IP_GEN2: dprintk(fdp1, "FDP1 Version R-Car Gen2\n"); break; case FD1_IP_M3W: dprintk(fdp1, "FDP1 Version R-Car M3-W\n"); break; case FD1_IP_H3: dprintk(fdp1, "FDP1 Version R-Car H3\n"); break; case FD1_IP_M3N: dprintk(fdp1, "FDP1 Version R-Car M3-N\n"); break; case FD1_IP_E3: dprintk(fdp1, "FDP1 Version R-Car E3\n"); break; default: dev_err(fdp1->dev, "FDP1 Unidentifiable (0x%08x)\n", hw_version); } /* Allow the hw to sleep until an open call puts it to use */ pm_runtime_put(fdp1->dev); return 0; disable_pm: pm_runtime_disable(fdp1->dev); release_m2m: v4l2_m2m_release(fdp1->m2m_dev); unreg_dev: v4l2_device_unregister(&fdp1->v4l2_dev); put_dev: rcar_fcp_put(fdp1->fcp); return ret; } static void fdp1_remove(struct platform_device *pdev) { struct fdp1_dev *fdp1 = platform_get_drvdata(pdev); v4l2_m2m_release(fdp1->m2m_dev); video_unregister_device(&fdp1->vfd); v4l2_device_unregister(&fdp1->v4l2_dev); pm_runtime_disable(&pdev->dev); rcar_fcp_put(fdp1->fcp); } static int __maybe_unused fdp1_pm_runtime_suspend(struct device *dev) { struct fdp1_dev *fdp1 = dev_get_drvdata(dev); rcar_fcp_disable(fdp1->fcp); return 0; } static int __maybe_unused fdp1_pm_runtime_resume(struct device *dev) { struct fdp1_dev *fdp1 = dev_get_drvdata(dev); /* Program in the static LUTs */ fdp1_set_lut(fdp1); return rcar_fcp_enable(fdp1->fcp); } static const struct dev_pm_ops fdp1_pm_ops = { SET_RUNTIME_PM_OPS(fdp1_pm_runtime_suspend, fdp1_pm_runtime_resume, NULL) }; static const struct of_device_id fdp1_dt_ids[] = { { .compatible = "renesas,fdp1" }, { }, }; MODULE_DEVICE_TABLE(of, fdp1_dt_ids); static struct platform_driver fdp1_pdrv = { .probe = fdp1_probe, .remove_new = fdp1_remove, .driver = { .name = DRIVER_NAME, .of_match_table = fdp1_dt_ids, .pm = &fdp1_pm_ops, }, }; module_platform_driver(fdp1_pdrv); MODULE_DESCRIPTION("Renesas R-Car Fine Display Processor Driver"); MODULE_AUTHOR("Kieran Bingham <[email protected]>"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:" DRIVER_NAME);
linux-master
drivers/media/platform/renesas/rcar_fdp1.c
// SPDX-License-Identifier: GPL-2.0+ /* * Driver for Renesas R-Car VIN * * Copyright (C) 2016 Renesas Electronics Corp. * Copyright (C) 2011-2013 Renesas Solutions Corp. * Copyright (C) 2013 Cogent Embedded, Inc., <[email protected]> * Copyright (C) 2008 Magnus Damm * * Based on the soc-camera rcar_vin driver */ #include <linux/delay.h> #include <linux/interrupt.h> #include <linux/pm_runtime.h> #include <media/videobuf2-dma-contig.h> #include "rcar-vin.h" /* ----------------------------------------------------------------------------- * HW Functions */ /* Register offsets for R-Car VIN */ #define VNMC_REG 0x00 /* Video n Main Control Register */ #define VNMS_REG 0x04 /* Video n Module Status Register */ #define VNFC_REG 0x08 /* Video n Frame Capture Register */ #define VNSLPRC_REG 0x0C /* Video n Start Line Pre-Clip Register */ #define VNELPRC_REG 0x10 /* Video n End Line Pre-Clip Register */ #define VNSPPRC_REG 0x14 /* Video n Start Pixel Pre-Clip Register */ #define VNEPPRC_REG 0x18 /* Video n End Pixel Pre-Clip Register */ #define VNIS_REG 0x2C /* Video n Image Stride Register */ #define VNMB_REG(m) (0x30 + ((m) << 2)) /* Video n Memory Base m Register */ #define VNIE_REG 0x40 /* Video n Interrupt Enable Register */ #define VNINTS_REG 0x44 /* Video n Interrupt Status Register */ #define VNSI_REG 0x48 /* Video n Scanline Interrupt Register */ #define VNMTC_REG 0x4C /* Video n Memory Transfer Control Register */ #define VNDMR_REG 0x58 /* Video n Data Mode Register */ #define VNDMR2_REG 0x5C /* Video n Data Mode Register 2 */ #define VNUVAOF_REG 0x60 /* Video n UV Address Offset Register */ /* Register offsets specific for Gen2 */ #define VNSLPOC_REG 0x1C /* Video n Start Line Post-Clip Register */ #define VNELPOC_REG 0x20 /* Video n End Line Post-Clip Register */ #define VNSPPOC_REG 0x24 /* Video n Start Pixel Post-Clip Register */ #define VNEPPOC_REG 0x28 /* Video n End Pixel Post-Clip Register */ #define VNYS_REG 0x50 /* Video n Y Scale Register */ #define VNXS_REG 0x54 /* Video n X Scale Register */ #define VNC1A_REG 0x80 /* Video n Coefficient Set C1A Register */ #define VNC1B_REG 0x84 /* Video n Coefficient Set C1B Register */ #define VNC1C_REG 0x88 /* Video n Coefficient Set C1C Register */ #define VNC2A_REG 0x90 /* Video n Coefficient Set C2A Register */ #define VNC2B_REG 0x94 /* Video n Coefficient Set C2B Register */ #define VNC2C_REG 0x98 /* Video n Coefficient Set C2C Register */ #define VNC3A_REG 0xA0 /* Video n Coefficient Set C3A Register */ #define VNC3B_REG 0xA4 /* Video n Coefficient Set C3B Register */ #define VNC3C_REG 0xA8 /* Video n Coefficient Set C3C Register */ #define VNC4A_REG 0xB0 /* Video n Coefficient Set C4A Register */ #define VNC4B_REG 0xB4 /* Video n Coefficient Set C4B Register */ #define VNC4C_REG 0xB8 /* Video n Coefficient Set C4C Register */ #define VNC5A_REG 0xC0 /* Video n Coefficient Set C5A Register */ #define VNC5B_REG 0xC4 /* Video n Coefficient Set C5B Register */ #define VNC5C_REG 0xC8 /* Video n Coefficient Set C5C Register */ #define VNC6A_REG 0xD0 /* Video n Coefficient Set C6A Register */ #define VNC6B_REG 0xD4 /* Video n Coefficient Set C6B Register */ #define VNC6C_REG 0xD8 /* Video n Coefficient Set C6C Register */ #define VNC7A_REG 0xE0 /* Video n Coefficient Set C7A Register */ #define VNC7B_REG 0xE4 /* Video n Coefficient Set C7B Register */ #define VNC7C_REG 0xE8 /* Video n Coefficient Set C7C Register */ #define VNC8A_REG 0xF0 /* Video n Coefficient Set C8A Register */ #define VNC8B_REG 0xF4 /* Video n Coefficient Set C8B Register */ #define VNC8C_REG 0xF8 /* Video n Coefficient Set C8C Register */ /* Register offsets specific for Gen3 */ #define VNCSI_IFMD_REG 0x20 /* Video n CSI2 Interface Mode Register */ #define VNUDS_CTRL_REG 0x80 /* Video n scaling control register */ #define VNUDS_SCALE_REG 0x84 /* Video n scaling factor register */ #define VNUDS_PASS_BWIDTH_REG 0x90 /* Video n passband register */ #define VNUDS_CLIP_SIZE_REG 0xa4 /* Video n UDS output size clipping reg */ /* Register bit fields for R-Car VIN */ /* Video n Main Control Register bits */ #define VNMC_INF_MASK (7 << 16) #define VNMC_DPINE (1 << 27) /* Gen3 specific */ #define VNMC_SCLE (1 << 26) /* Gen3 specific */ #define VNMC_FOC (1 << 21) #define VNMC_YCAL (1 << 19) #define VNMC_INF_YUV8_BT656 (0 << 16) #define VNMC_INF_YUV8_BT601 (1 << 16) #define VNMC_INF_YUV10_BT656 (2 << 16) #define VNMC_INF_YUV10_BT601 (3 << 16) #define VNMC_INF_RAW8 (4 << 16) #define VNMC_INF_YUV16 (5 << 16) #define VNMC_INF_RGB888 (6 << 16) #define VNMC_INF_RGB666 (7 << 16) #define VNMC_VUP (1 << 10) #define VNMC_IM_ODD (0 << 3) #define VNMC_IM_ODD_EVEN (1 << 3) #define VNMC_IM_EVEN (2 << 3) #define VNMC_IM_FULL (3 << 3) #define VNMC_BPS (1 << 1) #define VNMC_ME (1 << 0) /* Video n Module Status Register bits */ #define VNMS_FBS_MASK (3 << 3) #define VNMS_FBS_SHIFT 3 #define VNMS_FS (1 << 2) #define VNMS_AV (1 << 1) #define VNMS_CA (1 << 0) /* Video n Frame Capture Register bits */ #define VNFC_C_FRAME (1 << 1) #define VNFC_S_FRAME (1 << 0) /* Video n Interrupt Enable Register bits */ #define VNIE_FIE (1 << 4) #define VNIE_EFE (1 << 1) /* Video n Interrupt Status Register bits */ #define VNINTS_FIS (1 << 4) /* Video n Data Mode Register bits */ #define VNDMR_A8BIT(n) (((n) & 0xff) << 24) #define VNDMR_A8BIT_MASK (0xff << 24) #define VNDMR_YMODE_Y8 (1 << 12) #define VNDMR_EXRGB (1 << 8) #define VNDMR_BPSM (1 << 4) #define VNDMR_ABIT (1 << 2) #define VNDMR_DTMD_YCSEP (1 << 1) #define VNDMR_DTMD_ARGB (1 << 0) #define VNDMR_DTMD_YCSEP_420 (3 << 0) /* Video n Data Mode Register 2 bits */ #define VNDMR2_VPS (1 << 30) #define VNDMR2_HPS (1 << 29) #define VNDMR2_CES (1 << 28) #define VNDMR2_YDS (1 << 22) #define VNDMR2_FTEV (1 << 17) #define VNDMR2_VLV(n) ((n & 0xf) << 12) /* Video n CSI2 Interface Mode Register (Gen3) */ #define VNCSI_IFMD_DES1 (1 << 26) #define VNCSI_IFMD_DES0 (1 << 25) #define VNCSI_IFMD_CSI_CHSEL(n) (((n) & 0xf) << 0) /* Video n scaling control register (Gen3) */ #define VNUDS_CTRL_AMD (1 << 30) struct rvin_buffer { struct vb2_v4l2_buffer vb; struct list_head list; }; #define to_buf_list(vb2_buffer) (&container_of(vb2_buffer, \ struct rvin_buffer, \ vb)->list) static void rvin_write(struct rvin_dev *vin, u32 value, u32 offset) { iowrite32(value, vin->base + offset); } static u32 rvin_read(struct rvin_dev *vin, u32 offset) { return ioread32(vin->base + offset); } /* ----------------------------------------------------------------------------- * Crop and Scaling */ static bool rvin_scaler_needed(const struct rvin_dev *vin) { return !(vin->crop.width == vin->format.width && vin->compose.width == vin->format.width && vin->crop.height == vin->format.height && vin->compose.height == vin->format.height); } struct vin_coeff { unsigned short xs_value; u32 coeff_set[24]; }; static const struct vin_coeff vin_coeff_set[] = { { 0x0000, { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 }, }, { 0x1000, { 0x000fa400, 0x000fa400, 0x09625902, 0x000003f8, 0x00000403, 0x3de0d9f0, 0x001fffed, 0x00000804, 0x3cc1f9c3, 0x001003de, 0x00000c01, 0x3cb34d7f, 0x002003d2, 0x00000c00, 0x3d24a92d, 0x00200bca, 0x00000bff, 0x3df600d2, 0x002013cc, 0x000007ff, 0x3ed70c7e, 0x00100fde, 0x00000000, 0x3f87c036 }, }, { 0x1200, { 0x002ffff1, 0x002ffff1, 0x02a0a9c8, 0x002003e7, 0x001ffffa, 0x000185bc, 0x002007dc, 0x000003ff, 0x3e52859c, 0x00200bd4, 0x00000002, 0x3d53996b, 0x00100fd0, 0x00000403, 0x3d04ad2d, 0x00000bd5, 0x00000403, 0x3d35ace7, 0x3ff003e4, 0x00000801, 0x3dc674a1, 0x3fffe800, 0x00000800, 0x3e76f461 }, }, { 0x1400, { 0x00100be3, 0x00100be3, 0x04d1359a, 0x00000fdb, 0x002003ed, 0x0211fd93, 0x00000fd6, 0x002003f4, 0x0002d97b, 0x000007d6, 0x002ffffb, 0x3e93b956, 0x3ff003da, 0x001003ff, 0x3db49926, 0x3fffefe9, 0x00100001, 0x3d655cee, 0x3fffd400, 0x00000003, 0x3d65f4b6, 0x000fb421, 0x00000402, 0x3dc6547e }, }, { 0x1600, { 0x00000bdd, 0x00000bdd, 0x06519578, 0x3ff007da, 0x00000be3, 0x03c24973, 0x3ff003d9, 0x00000be9, 0x01b30d5f, 0x3ffff7df, 0x001003f1, 0x0003c542, 0x000fdfec, 0x001003f7, 0x3ec4711d, 0x000fc400, 0x002ffffd, 0x3df504f1, 0x001fa81a, 0x002ffc00, 0x3d957cc2, 0x002f8c3c, 0x00100000, 0x3db5c891 }, }, { 0x1800, { 0x3ff003dc, 0x3ff003dc, 0x0791e558, 0x000ff7dd, 0x3ff007de, 0x05328554, 0x000fe7e3, 0x3ff00be2, 0x03232546, 0x000fd7ee, 0x000007e9, 0x0143bd30, 0x001fb800, 0x000007ee, 0x00044511, 0x002fa015, 0x000007f4, 0x3ef4bcee, 0x002f8832, 0x001003f9, 0x3e4514c7, 0x001f7853, 0x001003fd, 0x3de54c9f }, }, { 0x1a00, { 0x000fefe0, 0x000fefe0, 0x08721d3c, 0x001fdbe7, 0x000ffbde, 0x0652a139, 0x001fcbf0, 0x000003df, 0x0463292e, 0x002fb3ff, 0x3ff007e3, 0x0293a91d, 0x002f9c12, 0x3ff00be7, 0x01241905, 0x001f8c29, 0x000007ed, 0x3fe470eb, 0x000f7c46, 0x000007f2, 0x3f04b8ca, 0x3fef7865, 0x000007f6, 0x3e74e4a8 }, }, { 0x1c00, { 0x001fd3e9, 0x001fd3e9, 0x08f23d26, 0x002fbff3, 0x001fe3e4, 0x0712ad23, 0x002fa800, 0x000ff3e0, 0x05631d1b, 0x001f9810, 0x000ffbe1, 0x03b3890d, 0x000f8c23, 0x000003e3, 0x0233e8fa, 0x3fef843b, 0x000003e7, 0x00f430e4, 0x3fbf8456, 0x3ff00bea, 0x00046cc8, 0x3f8f8c72, 0x3ff00bef, 0x3f3490ac }, }, { 0x1e00, { 0x001fbbf4, 0x001fbbf4, 0x09425112, 0x001fa800, 0x002fc7ed, 0x0792b110, 0x000f980e, 0x001fdbe6, 0x0613110a, 0x3fff8c20, 0x001fe7e3, 0x04a368fd, 0x3fcf8c33, 0x000ff7e2, 0x0343b8ed, 0x3f9f8c4a, 0x000fffe3, 0x0203f8da, 0x3f5f9c61, 0x000003e6, 0x00e428c5, 0x3f1fb07b, 0x000003eb, 0x3fe440af }, }, { 0x2000, { 0x000fa400, 0x000fa400, 0x09625902, 0x3fff980c, 0x001fb7f5, 0x0812b0ff, 0x3fdf901c, 0x001fc7ed, 0x06b2fcfa, 0x3faf902d, 0x001fd3e8, 0x055348f1, 0x3f7f983f, 0x001fe3e5, 0x04038ce3, 0x3f3fa454, 0x001fefe3, 0x02e3c8d1, 0x3f0fb86a, 0x001ff7e4, 0x01c3e8c0, 0x3ecfd880, 0x000fffe6, 0x00c404ac }, }, { 0x2200, { 0x3fdf9c0b, 0x3fdf9c0b, 0x09725cf4, 0x3fbf9818, 0x3fffa400, 0x0842a8f1, 0x3f8f9827, 0x000fb3f7, 0x0702f0ec, 0x3f5fa037, 0x000fc3ef, 0x05d330e4, 0x3f2fac49, 0x001fcfea, 0x04a364d9, 0x3effc05c, 0x001fdbe7, 0x038394ca, 0x3ecfdc6f, 0x001fe7e6, 0x0273b0bb, 0x3ea00083, 0x001fefe6, 0x0183c0a9 }, }, { 0x2400, { 0x3f9fa014, 0x3f9fa014, 0x098260e6, 0x3f7f9c23, 0x3fcf9c0a, 0x08629ce5, 0x3f4fa431, 0x3fefa400, 0x0742d8e1, 0x3f1fb440, 0x3fffb3f8, 0x062310d9, 0x3eefc850, 0x000fbbf2, 0x050340d0, 0x3ecfe062, 0x000fcbec, 0x041364c2, 0x3ea00073, 0x001fd3ea, 0x03037cb5, 0x3e902086, 0x001fdfe8, 0x022388a5 }, }, { 0x2600, { 0x3f5fa81e, 0x3f5fa81e, 0x096258da, 0x3f3fac2b, 0x3f8fa412, 0x088290d8, 0x3f0fbc38, 0x3fafa408, 0x0772c8d5, 0x3eefcc47, 0x3fcfa800, 0x0672f4ce, 0x3ecfe456, 0x3fefaffa, 0x05531cc6, 0x3eb00066, 0x3fffbbf3, 0x047334bb, 0x3ea01c77, 0x000fc7ee, 0x039348ae, 0x3ea04486, 0x000fd3eb, 0x02b350a1 }, }, { 0x2800, { 0x3f2fb426, 0x3f2fb426, 0x094250ce, 0x3f0fc032, 0x3f4fac1b, 0x086284cd, 0x3eefd040, 0x3f7fa811, 0x0782acc9, 0x3ecfe84c, 0x3f9fa807, 0x06a2d8c4, 0x3eb0005b, 0x3fbfac00, 0x05b2f4bc, 0x3eb0186a, 0x3fdfb3fa, 0x04c308b4, 0x3eb04077, 0x3fefbbf4, 0x03f31ca8, 0x3ec06884, 0x000fbff2, 0x03031c9e }, }, { 0x2a00, { 0x3f0fc42d, 0x3f0fc42d, 0x090240c4, 0x3eefd439, 0x3f2fb822, 0x08526cc2, 0x3edfe845, 0x3f4fb018, 0x078294bf, 0x3ec00051, 0x3f6fac0f, 0x06b2b4bb, 0x3ec0185f, 0x3f8fac07, 0x05e2ccb4, 0x3ec0386b, 0x3fafac00, 0x0502e8ac, 0x3ed05c77, 0x3fcfb3fb, 0x0432f0a3, 0x3ef08482, 0x3fdfbbf6, 0x0372f898 }, }, { 0x2c00, { 0x3eefdc31, 0x3eefdc31, 0x08e238b8, 0x3edfec3d, 0x3f0fc828, 0x082258b9, 0x3ed00049, 0x3f1fc01e, 0x077278b6, 0x3ed01455, 0x3f3fb815, 0x06c294b2, 0x3ed03460, 0x3f5fb40d, 0x0602acac, 0x3ef0506c, 0x3f7fb006, 0x0542c0a4, 0x3f107476, 0x3f9fb400, 0x0472c89d, 0x3f309c80, 0x3fbfb7fc, 0x03b2cc94 }, }, { 0x2e00, { 0x3eefec37, 0x3eefec37, 0x088220b0, 0x3ee00041, 0x3effdc2d, 0x07f244ae, 0x3ee0144c, 0x3f0fd023, 0x07625cad, 0x3ef02c57, 0x3f1fc81a, 0x06c274a9, 0x3f004861, 0x3f3fbc13, 0x060288a6, 0x3f20686b, 0x3f5fb80c, 0x05529c9e, 0x3f408c74, 0x3f6fb805, 0x04b2ac96, 0x3f80ac7e, 0x3f8fb800, 0x0402ac8e }, }, { 0x3000, { 0x3ef0003a, 0x3ef0003a, 0x084210a6, 0x3ef01045, 0x3effec32, 0x07b228a7, 0x3f00284e, 0x3f0fdc29, 0x073244a4, 0x3f104058, 0x3f0fd420, 0x06a258a2, 0x3f305c62, 0x3f2fc818, 0x0612689d, 0x3f508069, 0x3f3fc011, 0x05728496, 0x3f80a072, 0x3f4fc00a, 0x04d28c90, 0x3fc0c07b, 0x3f6fbc04, 0x04429088 }, }, { 0x3200, { 0x3f00103e, 0x3f00103e, 0x07f1fc9e, 0x3f102447, 0x3f000035, 0x0782149d, 0x3f203c4f, 0x3f0ff02c, 0x07122c9c, 0x3f405458, 0x3f0fe424, 0x06924099, 0x3f607061, 0x3f1fd41d, 0x06024c97, 0x3f909068, 0x3f2fcc16, 0x05726490, 0x3fc0b070, 0x3f3fc80f, 0x04f26c8a, 0x0000d077, 0x3f4fc409, 0x04627484 }, }, { 0x3400, { 0x3f202040, 0x3f202040, 0x07a1e898, 0x3f303449, 0x3f100c38, 0x0741fc98, 0x3f504c50, 0x3f10002f, 0x06e21495, 0x3f706459, 0x3f1ff028, 0x06722492, 0x3fa08060, 0x3f1fe421, 0x05f2348f, 0x3fd09c67, 0x3f1fdc19, 0x05824c89, 0x0000bc6e, 0x3f2fd014, 0x04f25086, 0x0040dc74, 0x3f3fcc0d, 0x04825c7f }, }, { 0x3600, { 0x3f403042, 0x3f403042, 0x0761d890, 0x3f504848, 0x3f301c3b, 0x0701f090, 0x3f805c50, 0x3f200c33, 0x06a2008f, 0x3fa07458, 0x3f10002b, 0x06520c8d, 0x3fd0905e, 0x3f1ff424, 0x05e22089, 0x0000ac65, 0x3f1fe81d, 0x05823483, 0x0030cc6a, 0x3f2fdc18, 0x04f23c81, 0x0080e871, 0x3f2fd412, 0x0482407c }, }, { 0x3800, { 0x3f604043, 0x3f604043, 0x0721c88a, 0x3f80544a, 0x3f502c3c, 0x06d1d88a, 0x3fb06851, 0x3f301c35, 0x0681e889, 0x3fd08456, 0x3f30082f, 0x0611fc88, 0x00009c5d, 0x3f200027, 0x05d20884, 0x0030b863, 0x3f2ff421, 0x05621880, 0x0070d468, 0x3f2fe81b, 0x0502247c, 0x00c0ec6f, 0x3f2fe015, 0x04a22877 }, }, { 0x3a00, { 0x3f904c44, 0x3f904c44, 0x06e1b884, 0x3fb0604a, 0x3f70383e, 0x0691c885, 0x3fe07451, 0x3f502c36, 0x0661d483, 0x00009055, 0x3f401831, 0x0601ec81, 0x0030a85b, 0x3f300c2a, 0x05b1f480, 0x0070c061, 0x3f300024, 0x0562047a, 0x00b0d867, 0x3f3ff41e, 0x05020c77, 0x00f0f46b, 0x3f2fec19, 0x04a21474 }, }, { 0x3c00, { 0x3fb05c43, 0x3fb05c43, 0x06c1b07e, 0x3fe06c4b, 0x3f902c3f, 0x0681c081, 0x0000844f, 0x3f703838, 0x0631cc7d, 0x00309855, 0x3f602433, 0x05d1d47e, 0x0060b459, 0x3f50142e, 0x0581e47b, 0x00a0c85f, 0x3f400828, 0x0531f078, 0x00e0e064, 0x3f300021, 0x0501fc73, 0x00b0fc6a, 0x3f3ff41d, 0x04a20873 }, }, { 0x3e00, { 0x3fe06444, 0x3fe06444, 0x0681a07a, 0x00007849, 0x3fc0503f, 0x0641b07a, 0x0020904d, 0x3fa0403a, 0x05f1c07a, 0x0060a453, 0x3f803034, 0x05c1c878, 0x0090b858, 0x3f70202f, 0x0571d477, 0x00d0d05d, 0x3f501829, 0x0531e073, 0x0110e462, 0x3f500825, 0x04e1e471, 0x01510065, 0x3f40001f, 0x04a1f06d }, }, { 0x4000, { 0x00007044, 0x00007044, 0x06519476, 0x00208448, 0x3fe05c3f, 0x0621a476, 0x0050984d, 0x3fc04c3a, 0x05e1b075, 0x0080ac52, 0x3fa03c35, 0x05a1b875, 0x00c0c056, 0x3f803030, 0x0561c473, 0x0100d45b, 0x3f70202b, 0x0521d46f, 0x0140e860, 0x3f601427, 0x04d1d46e, 0x01810064, 0x3f500822, 0x0491dc6b }, }, { 0x5000, { 0x0110a442, 0x0110a442, 0x0551545e, 0x0140b045, 0x00e0983f, 0x0531585f, 0x0160c047, 0x00c08c3c, 0x0511645e, 0x0190cc4a, 0x00908039, 0x04f1685f, 0x01c0dc4c, 0x00707436, 0x04d1705e, 0x0200e850, 0x00506833, 0x04b1785b, 0x0230f453, 0x00305c30, 0x0491805a, 0x02710056, 0x0010542d, 0x04718059 }, }, { 0x6000, { 0x01c0bc40, 0x01c0bc40, 0x04c13052, 0x01e0c841, 0x01a0b43d, 0x04c13851, 0x0210cc44, 0x0180a83c, 0x04a13453, 0x0230d845, 0x0160a03a, 0x04913c52, 0x0260e047, 0x01409838, 0x04714052, 0x0280ec49, 0x01208c37, 0x04514c50, 0x02b0f44b, 0x01008435, 0x04414c50, 0x02d1004c, 0x00e07c33, 0x0431544f }, }, { 0x7000, { 0x0230c83e, 0x0230c83e, 0x04711c4c, 0x0250d03f, 0x0210c43c, 0x0471204b, 0x0270d840, 0x0200b83c, 0x0451244b, 0x0290dc42, 0x01e0b43a, 0x0441244c, 0x02b0e443, 0x01c0b038, 0x0441284b, 0x02d0ec44, 0x01b0a438, 0x0421304a, 0x02f0f445, 0x0190a036, 0x04213449, 0x0310f847, 0x01709c34, 0x04213848 }, }, { 0x8000, { 0x0280d03d, 0x0280d03d, 0x04310c48, 0x02a0d43e, 0x0270c83c, 0x04311047, 0x02b0dc3e, 0x0250c83a, 0x04311447, 0x02d0e040, 0x0240c03a, 0x04211446, 0x02e0e840, 0x0220bc39, 0x04111847, 0x0300e842, 0x0210b438, 0x04012445, 0x0310f043, 0x0200b037, 0x04012045, 0x0330f444, 0x01e0ac36, 0x03f12445 }, }, { 0xefff, { 0x0340dc3a, 0x0340dc3a, 0x03b0ec40, 0x0340e03a, 0x0330e039, 0x03c0f03e, 0x0350e03b, 0x0330dc39, 0x03c0ec3e, 0x0350e43a, 0x0320dc38, 0x03c0f43e, 0x0360e43b, 0x0320d839, 0x03b0f03e, 0x0360e83b, 0x0310d838, 0x03c0fc3b, 0x0370e83b, 0x0310d439, 0x03a0f83d, 0x0370e83c, 0x0300d438, 0x03b0fc3c }, } }; static void rvin_set_coeff(struct rvin_dev *vin, unsigned short xs) { int i; const struct vin_coeff *p_prev_set = NULL; const struct vin_coeff *p_set = NULL; /* Look for suitable coefficient values */ for (i = 0; i < ARRAY_SIZE(vin_coeff_set); i++) { p_prev_set = p_set; p_set = &vin_coeff_set[i]; if (xs < p_set->xs_value) break; } /* Use previous value if its XS value is closer */ if (p_prev_set && xs - p_prev_set->xs_value < p_set->xs_value - xs) p_set = p_prev_set; /* Set coefficient registers */ rvin_write(vin, p_set->coeff_set[0], VNC1A_REG); rvin_write(vin, p_set->coeff_set[1], VNC1B_REG); rvin_write(vin, p_set->coeff_set[2], VNC1C_REG); rvin_write(vin, p_set->coeff_set[3], VNC2A_REG); rvin_write(vin, p_set->coeff_set[4], VNC2B_REG); rvin_write(vin, p_set->coeff_set[5], VNC2C_REG); rvin_write(vin, p_set->coeff_set[6], VNC3A_REG); rvin_write(vin, p_set->coeff_set[7], VNC3B_REG); rvin_write(vin, p_set->coeff_set[8], VNC3C_REG); rvin_write(vin, p_set->coeff_set[9], VNC4A_REG); rvin_write(vin, p_set->coeff_set[10], VNC4B_REG); rvin_write(vin, p_set->coeff_set[11], VNC4C_REG); rvin_write(vin, p_set->coeff_set[12], VNC5A_REG); rvin_write(vin, p_set->coeff_set[13], VNC5B_REG); rvin_write(vin, p_set->coeff_set[14], VNC5C_REG); rvin_write(vin, p_set->coeff_set[15], VNC6A_REG); rvin_write(vin, p_set->coeff_set[16], VNC6B_REG); rvin_write(vin, p_set->coeff_set[17], VNC6C_REG); rvin_write(vin, p_set->coeff_set[18], VNC7A_REG); rvin_write(vin, p_set->coeff_set[19], VNC7B_REG); rvin_write(vin, p_set->coeff_set[20], VNC7C_REG); rvin_write(vin, p_set->coeff_set[21], VNC8A_REG); rvin_write(vin, p_set->coeff_set[22], VNC8B_REG); rvin_write(vin, p_set->coeff_set[23], VNC8C_REG); } void rvin_scaler_gen2(struct rvin_dev *vin) { unsigned int crop_height; u32 xs, ys; /* Set scaling coefficient */ crop_height = vin->crop.height; if (V4L2_FIELD_HAS_BOTH(vin->format.field)) crop_height *= 2; ys = 0; if (crop_height != vin->compose.height) ys = (4096 * crop_height) / vin->compose.height; rvin_write(vin, ys, VNYS_REG); xs = 0; if (vin->crop.width != vin->compose.width) xs = (4096 * vin->crop.width) / vin->compose.width; /* Horizontal upscaling is up to double size */ if (xs > 0 && xs < 2048) xs = 2048; rvin_write(vin, xs, VNXS_REG); /* Horizontal upscaling is done out by scaling down from double size */ if (xs < 4096) xs *= 2; rvin_set_coeff(vin, xs); /* Set Start/End Pixel/Line Post-Clip */ rvin_write(vin, 0, VNSPPOC_REG); rvin_write(vin, 0, VNSLPOC_REG); rvin_write(vin, vin->format.width - 1, VNEPPOC_REG); if (V4L2_FIELD_HAS_BOTH(vin->format.field)) rvin_write(vin, vin->format.height / 2 - 1, VNELPOC_REG); else rvin_write(vin, vin->format.height - 1, VNELPOC_REG); vin_dbg(vin, "Pre-Clip: %ux%u@%u:%u YS: %d XS: %d Post-Clip: %ux%u@%u:%u\n", vin->crop.width, vin->crop.height, vin->crop.left, vin->crop.top, ys, xs, vin->format.width, vin->format.height, 0, 0); } static unsigned int rvin_uds_scale_ratio(unsigned int in, unsigned int out) { unsigned int ratio; ratio = in * 4096 / out; return ratio >= 0x10000 ? 0xffff : ratio; } static unsigned int rvin_uds_filter_width(unsigned int ratio) { if (ratio >= 0x1000) return 64 * (ratio & 0xf000) / ratio; return 64; } void rvin_scaler_gen3(struct rvin_dev *vin) { unsigned int ratio_h, ratio_v; unsigned int bwidth_h, bwidth_v; u32 vnmc, clip_size; vnmc = rvin_read(vin, VNMC_REG); /* Disable scaler if not needed. */ if (!rvin_scaler_needed(vin)) { rvin_write(vin, vnmc & ~VNMC_SCLE, VNMC_REG); return; } ratio_h = rvin_uds_scale_ratio(vin->crop.width, vin->compose.width); bwidth_h = rvin_uds_filter_width(ratio_h); ratio_v = rvin_uds_scale_ratio(vin->crop.height, vin->compose.height); bwidth_v = rvin_uds_filter_width(ratio_v); clip_size = vin->compose.width << 16; switch (vin->format.field) { case V4L2_FIELD_INTERLACED_TB: case V4L2_FIELD_INTERLACED_BT: case V4L2_FIELD_INTERLACED: case V4L2_FIELD_SEQ_TB: case V4L2_FIELD_SEQ_BT: clip_size |= vin->compose.height / 2; break; default: clip_size |= vin->compose.height; break; } rvin_write(vin, vnmc | VNMC_SCLE, VNMC_REG); rvin_write(vin, VNUDS_CTRL_AMD, VNUDS_CTRL_REG); rvin_write(vin, (ratio_h << 16) | ratio_v, VNUDS_SCALE_REG); rvin_write(vin, (bwidth_h << 16) | bwidth_v, VNUDS_PASS_BWIDTH_REG); rvin_write(vin, clip_size, VNUDS_CLIP_SIZE_REG); vin_dbg(vin, "Pre-Clip: %ux%u@%u:%u Post-Clip: %ux%u@%u:%u\n", vin->crop.width, vin->crop.height, vin->crop.left, vin->crop.top, vin->compose.width, vin->compose.height, vin->compose.left, vin->compose.top); } void rvin_crop_scale_comp(struct rvin_dev *vin) { const struct rvin_video_format *fmt; u32 stride; /* Set Start/End Pixel/Line Pre-Clip */ rvin_write(vin, vin->crop.left, VNSPPRC_REG); rvin_write(vin, vin->crop.left + vin->crop.width - 1, VNEPPRC_REG); rvin_write(vin, vin->crop.top, VNSLPRC_REG); rvin_write(vin, vin->crop.top + vin->crop.height - 1, VNELPRC_REG); if (vin->scaler) vin->scaler(vin); fmt = rvin_format_from_pixel(vin, vin->format.pixelformat); stride = vin->format.bytesperline / fmt->bpp; /* For RAW8 format bpp is 1, but the hardware process RAW8 * format in 2 pixel unit hence configure VNIS_REG as stride / 2. */ switch (vin->format.pixelformat) { case V4L2_PIX_FMT_SBGGR8: case V4L2_PIX_FMT_SGBRG8: case V4L2_PIX_FMT_SGRBG8: case V4L2_PIX_FMT_SRGGB8: case V4L2_PIX_FMT_GREY: stride /= 2; break; default: break; } rvin_write(vin, stride, VNIS_REG); } /* ----------------------------------------------------------------------------- * Hardware setup */ static int rvin_setup(struct rvin_dev *vin) { u32 vnmc, dmr, dmr2, interrupts; bool progressive = false, output_is_yuv = false, input_is_yuv = false; switch (vin->format.field) { case V4L2_FIELD_TOP: vnmc = VNMC_IM_ODD; break; case V4L2_FIELD_BOTTOM: vnmc = VNMC_IM_EVEN; break; case V4L2_FIELD_INTERLACED: /* Default to TB */ vnmc = VNMC_IM_FULL; /* Use BT if video standard can be read and is 60 Hz format */ if (!vin->info->use_mc && vin->std & V4L2_STD_525_60) vnmc = VNMC_IM_FULL | VNMC_FOC; break; case V4L2_FIELD_INTERLACED_TB: vnmc = VNMC_IM_FULL; break; case V4L2_FIELD_INTERLACED_BT: vnmc = VNMC_IM_FULL | VNMC_FOC; break; case V4L2_FIELD_SEQ_TB: case V4L2_FIELD_SEQ_BT: case V4L2_FIELD_NONE: case V4L2_FIELD_ALTERNATE: vnmc = VNMC_IM_ODD_EVEN; progressive = true; break; default: vnmc = VNMC_IM_ODD; break; } /* * Input interface */ switch (vin->mbus_code) { case MEDIA_BUS_FMT_YUYV8_1X16: /* BT.601/BT.1358 16bit YCbCr422 */ vnmc |= VNMC_INF_YUV16; input_is_yuv = true; break; case MEDIA_BUS_FMT_UYVY8_1X16: vnmc |= VNMC_INF_YUV16 | VNMC_YCAL; input_is_yuv = true; break; case MEDIA_BUS_FMT_UYVY8_2X8: /* BT.656 8bit YCbCr422 or BT.601 8bit YCbCr422 */ if (!vin->is_csi && vin->parallel.mbus_type == V4L2_MBUS_BT656) vnmc |= VNMC_INF_YUV8_BT656; else vnmc |= VNMC_INF_YUV8_BT601; input_is_yuv = true; break; case MEDIA_BUS_FMT_RGB888_1X24: vnmc |= VNMC_INF_RGB888; break; case MEDIA_BUS_FMT_UYVY10_2X10: /* BT.656 10bit YCbCr422 or BT.601 10bit YCbCr422 */ if (!vin->is_csi && vin->parallel.mbus_type == V4L2_MBUS_BT656) vnmc |= VNMC_INF_YUV10_BT656; else vnmc |= VNMC_INF_YUV10_BT601; input_is_yuv = true; break; case MEDIA_BUS_FMT_SBGGR8_1X8: case MEDIA_BUS_FMT_SGBRG8_1X8: case MEDIA_BUS_FMT_SGRBG8_1X8: case MEDIA_BUS_FMT_SRGGB8_1X8: case MEDIA_BUS_FMT_Y8_1X8: vnmc |= VNMC_INF_RAW8; break; default: break; } /* Make sure input interface and input format is valid. */ if (vin->info->model == RCAR_GEN3) { switch (vnmc & VNMC_INF_MASK) { case VNMC_INF_YUV8_BT656: case VNMC_INF_YUV10_BT656: case VNMC_INF_YUV16: case VNMC_INF_RGB666: if (vin->is_csi) { vin_err(vin, "Invalid setting in MIPI CSI2\n"); return -EINVAL; } break; case VNMC_INF_RAW8: if (!vin->is_csi) { vin_err(vin, "Invalid setting in Digital Pins\n"); return -EINVAL; } break; default: break; } } /* Enable VSYNC Field Toggle mode after one VSYNC input */ if (vin->info->model == RCAR_GEN3) dmr2 = VNDMR2_FTEV; else dmr2 = VNDMR2_FTEV | VNDMR2_VLV(1); if (!vin->is_csi) { /* Hsync Signal Polarity Select */ if (!(vin->parallel.bus.flags & V4L2_MBUS_HSYNC_ACTIVE_LOW)) dmr2 |= VNDMR2_HPS; /* Vsync Signal Polarity Select */ if (!(vin->parallel.bus.flags & V4L2_MBUS_VSYNC_ACTIVE_LOW)) dmr2 |= VNDMR2_VPS; /* Data Enable Polarity Select */ if (vin->parallel.bus.flags & V4L2_MBUS_DATA_ENABLE_LOW) dmr2 |= VNDMR2_CES; switch (vin->mbus_code) { case MEDIA_BUS_FMT_UYVY8_2X8: if (vin->parallel.bus.bus_width == 8 && vin->parallel.bus.data_shift == 8) dmr2 |= VNDMR2_YDS; break; default: break; } } /* * Output format */ switch (vin->format.pixelformat) { case V4L2_PIX_FMT_NV12: case V4L2_PIX_FMT_NV16: rvin_write(vin, ALIGN(vin->format.bytesperline * vin->format.height, 0x80), VNUVAOF_REG); dmr = vin->format.pixelformat == V4L2_PIX_FMT_NV12 ? VNDMR_DTMD_YCSEP_420 : VNDMR_DTMD_YCSEP; output_is_yuv = true; break; case V4L2_PIX_FMT_YUYV: dmr = VNDMR_BPSM; output_is_yuv = true; break; case V4L2_PIX_FMT_UYVY: dmr = 0; output_is_yuv = true; break; case V4L2_PIX_FMT_XRGB555: dmr = VNDMR_DTMD_ARGB; break; case V4L2_PIX_FMT_RGB565: dmr = 0; break; case V4L2_PIX_FMT_XBGR32: /* Note: not supported on M1 */ dmr = VNDMR_EXRGB; break; case V4L2_PIX_FMT_ARGB555: dmr = (vin->alpha ? VNDMR_ABIT : 0) | VNDMR_DTMD_ARGB; break; case V4L2_PIX_FMT_ABGR32: dmr = VNDMR_A8BIT(vin->alpha) | VNDMR_EXRGB | VNDMR_DTMD_ARGB; break; case V4L2_PIX_FMT_SBGGR8: case V4L2_PIX_FMT_SGBRG8: case V4L2_PIX_FMT_SGRBG8: case V4L2_PIX_FMT_SRGGB8: dmr = 0; break; case V4L2_PIX_FMT_GREY: if (input_is_yuv) { dmr = VNDMR_DTMD_YCSEP | VNDMR_YMODE_Y8; output_is_yuv = true; } else { dmr = 0; } break; default: vin_err(vin, "Invalid pixelformat (0x%x)\n", vin->format.pixelformat); return -EINVAL; } /* Always update on field change */ vnmc |= VNMC_VUP; if (!vin->info->use_isp) { /* If input and output use the same colorspace, use bypass mode */ if (input_is_yuv == output_is_yuv) vnmc |= VNMC_BPS; if (vin->info->model == RCAR_GEN3) { /* Select between CSI-2 and parallel input */ if (vin->is_csi) vnmc &= ~VNMC_DPINE; else vnmc |= VNMC_DPINE; } } /* Progressive or interlaced mode */ interrupts = progressive ? VNIE_FIE : VNIE_EFE; /* Ack interrupts */ rvin_write(vin, interrupts, VNINTS_REG); /* Enable interrupts */ rvin_write(vin, interrupts, VNIE_REG); /* Start capturing */ rvin_write(vin, dmr, VNDMR_REG); rvin_write(vin, dmr2, VNDMR2_REG); /* Enable module */ rvin_write(vin, vnmc | VNMC_ME, VNMC_REG); return 0; } static void rvin_disable_interrupts(struct rvin_dev *vin) { rvin_write(vin, 0, VNIE_REG); } static u32 rvin_get_interrupt_status(struct rvin_dev *vin) { return rvin_read(vin, VNINTS_REG); } static void rvin_ack_interrupt(struct rvin_dev *vin) { rvin_write(vin, rvin_read(vin, VNINTS_REG), VNINTS_REG); } static bool rvin_capture_active(struct rvin_dev *vin) { return rvin_read(vin, VNMS_REG) & VNMS_CA; } static enum v4l2_field rvin_get_active_field(struct rvin_dev *vin, u32 vnms) { if (vin->format.field == V4L2_FIELD_ALTERNATE) { /* If FS is set it is an Even field. */ if (vnms & VNMS_FS) return V4L2_FIELD_BOTTOM; return V4L2_FIELD_TOP; } return vin->format.field; } static void rvin_set_slot_addr(struct rvin_dev *vin, int slot, dma_addr_t addr) { const struct rvin_video_format *fmt; int offsetx, offsety; dma_addr_t offset; fmt = rvin_format_from_pixel(vin, vin->format.pixelformat); /* * There is no HW support for composition do the beast we can * by modifying the buffer offset */ offsetx = vin->compose.left * fmt->bpp; offsety = vin->compose.top * vin->format.bytesperline; offset = addr + offsetx + offsety; /* * The address needs to be 128 bytes aligned. Driver should never accept * settings that do not satisfy this in the first place... */ if (WARN_ON((offsetx | offsety | offset) & HW_BUFFER_MASK)) return; rvin_write(vin, offset, VNMB_REG(slot)); } /* * Moves a buffer from the queue to the HW slot. If no buffer is * available use the scratch buffer. The scratch buffer is never * returned to userspace, its only function is to enable the capture * loop to keep running. */ static void rvin_fill_hw_slot(struct rvin_dev *vin, int slot) { struct rvin_buffer *buf; struct vb2_v4l2_buffer *vbuf; dma_addr_t phys_addr; int prev; /* A already populated slot shall never be overwritten. */ if (WARN_ON(vin->buf_hw[slot].buffer)) return; prev = (slot == 0 ? HW_BUFFER_NUM : slot) - 1; if (vin->buf_hw[prev].type == HALF_TOP) { vbuf = vin->buf_hw[prev].buffer; vin->buf_hw[slot].buffer = vbuf; vin->buf_hw[slot].type = HALF_BOTTOM; switch (vin->format.pixelformat) { case V4L2_PIX_FMT_NV12: case V4L2_PIX_FMT_NV16: phys_addr = vin->buf_hw[prev].phys + vin->format.sizeimage / 4; break; default: phys_addr = vin->buf_hw[prev].phys + vin->format.sizeimage / 2; break; } } else if ((vin->state != STOPPED && vin->state != RUNNING) || list_empty(&vin->buf_list)) { vin->buf_hw[slot].buffer = NULL; vin->buf_hw[slot].type = FULL; phys_addr = vin->scratch_phys; } else { /* Keep track of buffer we give to HW */ buf = list_entry(vin->buf_list.next, struct rvin_buffer, list); vbuf = &buf->vb; list_del_init(to_buf_list(vbuf)); vin->buf_hw[slot].buffer = vbuf; vin->buf_hw[slot].type = V4L2_FIELD_IS_SEQUENTIAL(vin->format.field) ? HALF_TOP : FULL; /* Setup DMA */ phys_addr = vb2_dma_contig_plane_dma_addr(&vbuf->vb2_buf, 0); } vin_dbg(vin, "Filling HW slot: %d type: %d buffer: %p\n", slot, vin->buf_hw[slot].type, vin->buf_hw[slot].buffer); vin->buf_hw[slot].phys = phys_addr; rvin_set_slot_addr(vin, slot, phys_addr); } static int rvin_capture_start(struct rvin_dev *vin) { int slot, ret; for (slot = 0; slot < HW_BUFFER_NUM; slot++) { vin->buf_hw[slot].buffer = NULL; vin->buf_hw[slot].type = FULL; } for (slot = 0; slot < HW_BUFFER_NUM; slot++) rvin_fill_hw_slot(vin, slot); ret = rvin_setup(vin); if (ret) return ret; rvin_crop_scale_comp(vin); vin_dbg(vin, "Starting to capture\n"); /* Continuous Frame Capture Mode */ rvin_write(vin, VNFC_C_FRAME, VNFC_REG); vin->state = STARTING; return 0; } static void rvin_capture_stop(struct rvin_dev *vin) { /* Set continuous & single transfer off */ rvin_write(vin, 0, VNFC_REG); /* Disable module */ rvin_write(vin, rvin_read(vin, VNMC_REG) & ~VNMC_ME, VNMC_REG); } /* ----------------------------------------------------------------------------- * DMA Functions */ #define RVIN_TIMEOUT_MS 100 #define RVIN_RETRIES 10 static irqreturn_t rvin_irq(int irq, void *data) { struct rvin_dev *vin = data; u32 int_status, vnms; int slot; unsigned int handled = 0; unsigned long flags; spin_lock_irqsave(&vin->qlock, flags); int_status = rvin_get_interrupt_status(vin); if (!int_status) goto done; rvin_ack_interrupt(vin); handled = 1; /* Nothing to do if nothing was captured. */ if (!(int_status & VNINTS_FIS)) goto done; /* Nothing to do if capture status is 'STOPPED' */ if (vin->state == STOPPED) { vin_dbg(vin, "IRQ while state stopped\n"); goto done; } /* Prepare for capture and update state */ vnms = rvin_read(vin, VNMS_REG); slot = (vnms & VNMS_FBS_MASK) >> VNMS_FBS_SHIFT; /* * To hand buffers back in a known order to userspace start * to capture first from slot 0. */ if (vin->state == STARTING) { if (slot != 0) { vin_dbg(vin, "Starting sync slot: %d\n", slot); goto done; } vin_dbg(vin, "Capture start synced!\n"); vin->state = RUNNING; } /* Capture frame */ if (vin->buf_hw[slot].buffer) { /* * Nothing to do but refill the hardware slot if * capture only filled first half of vb2 buffer. */ if (vin->buf_hw[slot].type == HALF_TOP) { vin->buf_hw[slot].buffer = NULL; rvin_fill_hw_slot(vin, slot); goto done; } vin->buf_hw[slot].buffer->field = rvin_get_active_field(vin, vnms); vin->buf_hw[slot].buffer->sequence = vin->sequence; vin->buf_hw[slot].buffer->vb2_buf.timestamp = ktime_get_ns(); vb2_buffer_done(&vin->buf_hw[slot].buffer->vb2_buf, VB2_BUF_STATE_DONE); vin->buf_hw[slot].buffer = NULL; } else { /* Scratch buffer was used, dropping frame. */ vin_dbg(vin, "Dropping frame %u\n", vin->sequence); } vin->sequence++; /* Prepare for next frame */ rvin_fill_hw_slot(vin, slot); done: spin_unlock_irqrestore(&vin->qlock, flags); return IRQ_RETVAL(handled); } static void return_unused_buffers(struct rvin_dev *vin, enum vb2_buffer_state state) { struct rvin_buffer *buf, *node; unsigned long flags; spin_lock_irqsave(&vin->qlock, flags); list_for_each_entry_safe(buf, node, &vin->buf_list, list) { vb2_buffer_done(&buf->vb.vb2_buf, state); list_del(&buf->list); } spin_unlock_irqrestore(&vin->qlock, flags); } static int rvin_queue_setup(struct vb2_queue *vq, unsigned int *nbuffers, unsigned int *nplanes, unsigned int sizes[], struct device *alloc_devs[]) { struct rvin_dev *vin = vb2_get_drv_priv(vq); /* Make sure the image size is large enough. */ if (*nplanes) return sizes[0] < vin->format.sizeimage ? -EINVAL : 0; *nplanes = 1; sizes[0] = vin->format.sizeimage; return 0; }; static int rvin_buffer_prepare(struct vb2_buffer *vb) { struct rvin_dev *vin = vb2_get_drv_priv(vb->vb2_queue); unsigned long size = vin->format.sizeimage; if (vb2_plane_size(vb, 0) < size) { vin_err(vin, "buffer too small (%lu < %lu)\n", vb2_plane_size(vb, 0), size); return -EINVAL; } vb2_set_plane_payload(vb, 0, size); return 0; } static void rvin_buffer_queue(struct vb2_buffer *vb) { struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb); struct rvin_dev *vin = vb2_get_drv_priv(vb->vb2_queue); unsigned long flags; spin_lock_irqsave(&vin->qlock, flags); list_add_tail(to_buf_list(vbuf), &vin->buf_list); spin_unlock_irqrestore(&vin->qlock, flags); } static int rvin_mc_validate_format(struct rvin_dev *vin, struct v4l2_subdev *sd, struct media_pad *pad) { struct v4l2_subdev_format fmt = { .which = V4L2_SUBDEV_FORMAT_ACTIVE, }; fmt.pad = pad->index; if (v4l2_subdev_call(sd, pad, get_fmt, NULL, &fmt)) return -EPIPE; switch (fmt.format.code) { case MEDIA_BUS_FMT_YUYV8_1X16: case MEDIA_BUS_FMT_UYVY8_1X16: case MEDIA_BUS_FMT_UYVY8_2X8: case MEDIA_BUS_FMT_UYVY10_2X10: case MEDIA_BUS_FMT_RGB888_1X24: break; case MEDIA_BUS_FMT_SBGGR8_1X8: if (vin->format.pixelformat != V4L2_PIX_FMT_SBGGR8) return -EPIPE; break; case MEDIA_BUS_FMT_SGBRG8_1X8: if (vin->format.pixelformat != V4L2_PIX_FMT_SGBRG8) return -EPIPE; break; case MEDIA_BUS_FMT_SGRBG8_1X8: if (vin->format.pixelformat != V4L2_PIX_FMT_SGRBG8) return -EPIPE; break; case MEDIA_BUS_FMT_SRGGB8_1X8: if (vin->format.pixelformat != V4L2_PIX_FMT_SRGGB8) return -EPIPE; break; case MEDIA_BUS_FMT_Y8_1X8: if (vin->format.pixelformat != V4L2_PIX_FMT_GREY) return -EPIPE; break; default: return -EPIPE; } vin->mbus_code = fmt.format.code; switch (fmt.format.field) { case V4L2_FIELD_TOP: case V4L2_FIELD_BOTTOM: case V4L2_FIELD_NONE: case V4L2_FIELD_INTERLACED_TB: case V4L2_FIELD_INTERLACED_BT: case V4L2_FIELD_INTERLACED: case V4L2_FIELD_SEQ_TB: case V4L2_FIELD_SEQ_BT: /* Supported natively */ break; case V4L2_FIELD_ALTERNATE: switch (vin->format.field) { case V4L2_FIELD_TOP: case V4L2_FIELD_BOTTOM: case V4L2_FIELD_NONE: case V4L2_FIELD_ALTERNATE: break; case V4L2_FIELD_INTERLACED_TB: case V4L2_FIELD_INTERLACED_BT: case V4L2_FIELD_INTERLACED: case V4L2_FIELD_SEQ_TB: case V4L2_FIELD_SEQ_BT: /* Use VIN hardware to combine the two fields */ fmt.format.height *= 2; break; default: return -EPIPE; } break; default: return -EPIPE; } if (rvin_scaler_needed(vin)) { /* Gen3 can't scale NV12 */ if (vin->info->model == RCAR_GEN3 && vin->format.pixelformat == V4L2_PIX_FMT_NV12) return -EPIPE; if (!vin->scaler) return -EPIPE; } else { if (vin->format.pixelformat == V4L2_PIX_FMT_NV12) { if (ALIGN(fmt.format.width, 32) != vin->format.width || ALIGN(fmt.format.height, 32) != vin->format.height) return -EPIPE; } else { if (fmt.format.width != vin->format.width || fmt.format.height != vin->format.height) return -EPIPE; } } if (fmt.format.code != vin->mbus_code) return -EPIPE; return 0; } static int rvin_set_stream(struct rvin_dev *vin, int on) { struct v4l2_subdev *sd; struct media_pad *pad; int ret; /* No media controller used, simply pass operation to subdevice. */ if (!vin->info->use_mc) { ret = v4l2_subdev_call(vin->parallel.subdev, video, s_stream, on); return ret == -ENOIOCTLCMD ? 0 : ret; } pad = media_pad_remote_pad_first(&vin->pad); if (!pad) return -EPIPE; sd = media_entity_to_v4l2_subdev(pad->entity); if (!on) { video_device_pipeline_stop(&vin->vdev); return v4l2_subdev_call(sd, video, s_stream, 0); } ret = rvin_mc_validate_format(vin, sd, pad); if (ret) return ret; ret = video_device_pipeline_alloc_start(&vin->vdev); if (ret) return ret; ret = v4l2_subdev_call(sd, video, s_stream, 1); if (ret == -ENOIOCTLCMD) ret = 0; if (ret) video_device_pipeline_stop(&vin->vdev); return ret; } int rvin_start_streaming(struct rvin_dev *vin) { unsigned long flags; int ret; ret = rvin_set_stream(vin, 1); if (ret) return ret; spin_lock_irqsave(&vin->qlock, flags); vin->sequence = 0; ret = rvin_capture_start(vin); if (ret) rvin_set_stream(vin, 0); spin_unlock_irqrestore(&vin->qlock, flags); return ret; } static int rvin_start_streaming_vq(struct vb2_queue *vq, unsigned int count) { struct rvin_dev *vin = vb2_get_drv_priv(vq); int ret = -ENOMEM; /* Allocate scratch buffer. */ vin->scratch = dma_alloc_coherent(vin->dev, vin->format.sizeimage, &vin->scratch_phys, GFP_KERNEL); if (!vin->scratch) goto err_scratch; ret = rvin_start_streaming(vin); if (ret) goto err_start; return 0; err_start: dma_free_coherent(vin->dev, vin->format.sizeimage, vin->scratch, vin->scratch_phys); err_scratch: return_unused_buffers(vin, VB2_BUF_STATE_QUEUED); return ret; } void rvin_stop_streaming(struct rvin_dev *vin) { unsigned int i, retries; unsigned long flags; bool buffersFreed; spin_lock_irqsave(&vin->qlock, flags); if (vin->state == STOPPED) { spin_unlock_irqrestore(&vin->qlock, flags); return; } vin->state = STOPPING; /* Wait until only scratch buffer is used, max 3 interrupts. */ retries = 0; while (retries++ < RVIN_RETRIES) { buffersFreed = true; for (i = 0; i < HW_BUFFER_NUM; i++) if (vin->buf_hw[i].buffer) buffersFreed = false; if (buffersFreed) break; spin_unlock_irqrestore(&vin->qlock, flags); msleep(RVIN_TIMEOUT_MS); spin_lock_irqsave(&vin->qlock, flags); } /* Wait for streaming to stop */ retries = 0; while (retries++ < RVIN_RETRIES) { rvin_capture_stop(vin); /* Check if HW is stopped */ if (!rvin_capture_active(vin)) { vin->state = STOPPED; break; } spin_unlock_irqrestore(&vin->qlock, flags); msleep(RVIN_TIMEOUT_MS); spin_lock_irqsave(&vin->qlock, flags); } if (!buffersFreed || vin->state != STOPPED) { /* * If this happens something have gone horribly wrong. * Set state to stopped to prevent the interrupt handler * to make things worse... */ vin_err(vin, "Failed stop HW, something is seriously broken\n"); vin->state = STOPPED; } spin_unlock_irqrestore(&vin->qlock, flags); /* If something went wrong, free buffers with an error. */ if (!buffersFreed) { return_unused_buffers(vin, VB2_BUF_STATE_ERROR); for (i = 0; i < HW_BUFFER_NUM; i++) { if (vin->buf_hw[i].buffer) vb2_buffer_done(&vin->buf_hw[i].buffer->vb2_buf, VB2_BUF_STATE_ERROR); } } rvin_set_stream(vin, 0); /* disable interrupts */ rvin_disable_interrupts(vin); } static void rvin_stop_streaming_vq(struct vb2_queue *vq) { struct rvin_dev *vin = vb2_get_drv_priv(vq); rvin_stop_streaming(vin); /* Free scratch buffer. */ dma_free_coherent(vin->dev, vin->format.sizeimage, vin->scratch, vin->scratch_phys); return_unused_buffers(vin, VB2_BUF_STATE_ERROR); } static const struct vb2_ops rvin_qops = { .queue_setup = rvin_queue_setup, .buf_prepare = rvin_buffer_prepare, .buf_queue = rvin_buffer_queue, .start_streaming = rvin_start_streaming_vq, .stop_streaming = rvin_stop_streaming_vq, .wait_prepare = vb2_ops_wait_prepare, .wait_finish = vb2_ops_wait_finish, }; void rvin_dma_unregister(struct rvin_dev *vin) { mutex_destroy(&vin->lock); v4l2_device_unregister(&vin->v4l2_dev); } int rvin_dma_register(struct rvin_dev *vin, int irq) { struct vb2_queue *q = &vin->queue; int i, ret; /* Initialize the top-level structure */ ret = v4l2_device_register(vin->dev, &vin->v4l2_dev); if (ret) return ret; mutex_init(&vin->lock); INIT_LIST_HEAD(&vin->buf_list); spin_lock_init(&vin->qlock); vin->state = STOPPED; for (i = 0; i < HW_BUFFER_NUM; i++) vin->buf_hw[i].buffer = NULL; /* buffer queue */ q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE; q->io_modes = VB2_MMAP | VB2_READ | VB2_DMABUF; q->lock = &vin->lock; q->drv_priv = vin; q->buf_struct_size = sizeof(struct rvin_buffer); q->ops = &rvin_qops; q->mem_ops = &vb2_dma_contig_memops; q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC; q->min_buffers_needed = 4; q->dev = vin->dev; ret = vb2_queue_init(q); if (ret < 0) { vin_err(vin, "failed to initialize VB2 queue\n"); goto error; } /* irq */ ret = devm_request_irq(vin->dev, irq, rvin_irq, IRQF_SHARED, KBUILD_MODNAME, vin); if (ret) { vin_err(vin, "failed to request irq\n"); goto error; } return 0; error: rvin_dma_unregister(vin); return ret; } /* ----------------------------------------------------------------------------- * Gen3 CHSEL manipulation */ /* * There is no need to have locking around changing the routing * as it's only possible to do so when no VIN in the group is * streaming so nothing can race with the VNMC register. */ int rvin_set_channel_routing(struct rvin_dev *vin, u8 chsel) { const struct rvin_group_route *route; u32 ifmd = 0; u32 vnmc; int ret; ret = pm_runtime_resume_and_get(vin->dev); if (ret < 0) return ret; /* Make register writes take effect immediately. */ vnmc = rvin_read(vin, VNMC_REG); rvin_write(vin, vnmc & ~VNMC_VUP, VNMC_REG); /* * Set data expansion mode to "pad with 0s" by inspecting the routes * table to find out which bit fields are available in the IFMD * register. IFMD_DES1 controls data expansion mode for CSI20/21, * IFMD_DES0 controls data expansion mode for CSI40/41. */ for (route = vin->info->routes; route->chsel; route++) { if (route->csi == RVIN_CSI20 || route->csi == RVIN_CSI21) ifmd |= VNCSI_IFMD_DES1; else ifmd |= VNCSI_IFMD_DES0; if (ifmd == (VNCSI_IFMD_DES0 | VNCSI_IFMD_DES1)) break; } if (ifmd) { ifmd |= VNCSI_IFMD_CSI_CHSEL(chsel); rvin_write(vin, ifmd, VNCSI_IFMD_REG); } vin_dbg(vin, "Set IFMD 0x%x\n", ifmd); vin->chsel = chsel; /* Restore VNMC. */ rvin_write(vin, vnmc, VNMC_REG); pm_runtime_put(vin->dev); return 0; } void rvin_set_alpha(struct rvin_dev *vin, unsigned int alpha) { unsigned long flags; u32 dmr; spin_lock_irqsave(&vin->qlock, flags); vin->alpha = alpha; if (vin->state == STOPPED) goto out; switch (vin->format.pixelformat) { case V4L2_PIX_FMT_ARGB555: dmr = rvin_read(vin, VNDMR_REG) & ~VNDMR_ABIT; if (vin->alpha) dmr |= VNDMR_ABIT; break; case V4L2_PIX_FMT_ABGR32: dmr = rvin_read(vin, VNDMR_REG) & ~VNDMR_A8BIT_MASK; dmr |= VNDMR_A8BIT(vin->alpha); break; default: goto out; } rvin_write(vin, dmr, VNDMR_REG); out: spin_unlock_irqrestore(&vin->qlock, flags); }
linux-master
drivers/media/platform/renesas/rcar-vin/rcar-dma.c
// SPDX-License-Identifier: GPL-2.0 /* * Driver for Renesas R-Car MIPI CSI-2 Receiver * * Copyright (C) 2018 Renesas Electronics Corp. */ #include <linux/delay.h> #include <linux/interrupt.h> #include <linux/io.h> #include <linux/module.h> #include <linux/of.h> #include <linux/of_graph.h> #include <linux/platform_device.h> #include <linux/pm_runtime.h> #include <linux/reset.h> #include <linux/sys_soc.h> #include <media/mipi-csi2.h> #include <media/v4l2-ctrls.h> #include <media/v4l2-device.h> #include <media/v4l2-fwnode.h> #include <media/v4l2-mc.h> #include <media/v4l2-subdev.h> struct rcar_csi2; /* Register offsets and bits */ /* Control Timing Select */ #define TREF_REG 0x00 #define TREF_TREF BIT(0) /* Software Reset */ #define SRST_REG 0x04 #define SRST_SRST BIT(0) /* PHY Operation Control */ #define PHYCNT_REG 0x08 #define PHYCNT_SHUTDOWNZ BIT(17) #define PHYCNT_RSTZ BIT(16) #define PHYCNT_ENABLECLK BIT(4) #define PHYCNT_ENABLE_3 BIT(3) #define PHYCNT_ENABLE_2 BIT(2) #define PHYCNT_ENABLE_1 BIT(1) #define PHYCNT_ENABLE_0 BIT(0) /* Checksum Control */ #define CHKSUM_REG 0x0c #define CHKSUM_ECC_EN BIT(1) #define CHKSUM_CRC_EN BIT(0) /* * Channel Data Type Select * VCDT[0-15]: Channel 0 VCDT[16-31]: Channel 1 * VCDT2[0-15]: Channel 2 VCDT2[16-31]: Channel 3 */ #define VCDT_REG 0x10 #define VCDT2_REG 0x14 #define VCDT_VCDTN_EN BIT(15) #define VCDT_SEL_VC(n) (((n) & 0x3) << 8) #define VCDT_SEL_DTN_ON BIT(6) #define VCDT_SEL_DT(n) (((n) & 0x3f) << 0) /* Frame Data Type Select */ #define FRDT_REG 0x18 /* Field Detection Control */ #define FLD_REG 0x1c #define FLD_FLD_NUM(n) (((n) & 0xff) << 16) #define FLD_DET_SEL(n) (((n) & 0x3) << 4) #define FLD_FLD_EN4 BIT(3) #define FLD_FLD_EN3 BIT(2) #define FLD_FLD_EN2 BIT(1) #define FLD_FLD_EN BIT(0) /* Automatic Standby Control */ #define ASTBY_REG 0x20 /* Long Data Type Setting 0 */ #define LNGDT0_REG 0x28 /* Long Data Type Setting 1 */ #define LNGDT1_REG 0x2c /* Interrupt Enable */ #define INTEN_REG 0x30 #define INTEN_INT_AFIFO_OF BIT(27) #define INTEN_INT_ERRSOTHS BIT(4) #define INTEN_INT_ERRSOTSYNCHS BIT(3) /* Interrupt Source Mask */ #define INTCLOSE_REG 0x34 /* Interrupt Status Monitor */ #define INTSTATE_REG 0x38 #define INTSTATE_INT_ULPS_START BIT(7) #define INTSTATE_INT_ULPS_END BIT(6) /* Interrupt Error Status Monitor */ #define INTERRSTATE_REG 0x3c /* Short Packet Data */ #define SHPDAT_REG 0x40 /* Short Packet Count */ #define SHPCNT_REG 0x44 /* LINK Operation Control */ #define LINKCNT_REG 0x48 #define LINKCNT_MONITOR_EN BIT(31) #define LINKCNT_REG_MONI_PACT_EN BIT(25) #define LINKCNT_ICLK_NONSTOP BIT(24) /* Lane Swap */ #define LSWAP_REG 0x4c #define LSWAP_L3SEL(n) (((n) & 0x3) << 6) #define LSWAP_L2SEL(n) (((n) & 0x3) << 4) #define LSWAP_L1SEL(n) (((n) & 0x3) << 2) #define LSWAP_L0SEL(n) (((n) & 0x3) << 0) /* PHY Test Interface Write Register */ #define PHTW_REG 0x50 #define PHTW_DWEN BIT(24) #define PHTW_TESTDIN_DATA(n) (((n & 0xff)) << 16) #define PHTW_CWEN BIT(8) #define PHTW_TESTDIN_CODE(n) ((n & 0xff)) #define PHYFRX_REG 0x64 #define PHYFRX_FORCERX_MODE_3 BIT(3) #define PHYFRX_FORCERX_MODE_2 BIT(2) #define PHYFRX_FORCERX_MODE_1 BIT(1) #define PHYFRX_FORCERX_MODE_0 BIT(0) /* V4H BASE registers */ #define V4H_N_LANES_REG 0x0004 #define V4H_CSI2_RESETN_REG 0x0008 #define V4H_PHY_MODE_REG 0x001c #define V4H_PHY_SHUTDOWNZ_REG 0x0040 #define V4H_DPHY_RSTZ_REG 0x0044 #define V4H_FLDC_REG 0x0804 #define V4H_FLDD_REG 0x0808 #define V4H_IDIC_REG 0x0810 #define V4H_PHY_EN_REG 0x2000 #define V4H_ST_PHYST_REG 0x2814 #define V4H_ST_PHYST_ST_PHY_READY BIT(31) #define V4H_ST_PHYST_ST_STOPSTATE_3 BIT(3) #define V4H_ST_PHYST_ST_STOPSTATE_2 BIT(2) #define V4H_ST_PHYST_ST_STOPSTATE_1 BIT(1) #define V4H_ST_PHYST_ST_STOPSTATE_0 BIT(0) /* V4H PPI registers */ #define V4H_PPI_STARTUP_RW_COMMON_DPHY_REG(n) (0x21800 + ((n) * 2)) /* n = 0 - 9 */ #define V4H_PPI_STARTUP_RW_COMMON_STARTUP_1_1_REG 0x21822 #define V4H_PPI_CALIBCTRL_RW_COMMON_BG_0_REG 0x2184c #define V4H_PPI_RW_LPDCOCAL_TIMEBASE_REG 0x21c02 #define V4H_PPI_RW_LPDCOCAL_NREF_REG 0x21c04 #define V4H_PPI_RW_LPDCOCAL_NREF_RANGE_REG 0x21c06 #define V4H_PPI_RW_LPDCOCAL_TWAIT_CONFIG_REG 0x21c0a #define V4H_PPI_RW_LPDCOCAL_VT_CONFIG_REG 0x21c0c #define V4H_PPI_RW_LPDCOCAL_COARSE_CFG_REG 0x21c10 #define V4H_PPI_RW_COMMON_CFG_REG 0x21c6c #define V4H_PPI_RW_TERMCAL_CFG_0_REG 0x21c80 #define V4H_PPI_RW_OFFSETCAL_CFG_0_REG 0x21ca0 /* V4H CORE registers */ #define V4H_CORE_DIG_IOCTRL_RW_AFE_LANE0_CTRL_2_REG(n) (0x22040 + ((n) * 2)) /* n = 0 - 15 */ #define V4H_CORE_DIG_IOCTRL_RW_AFE_LANE1_CTRL_2_REG(n) (0x22440 + ((n) * 2)) /* n = 0 - 15 */ #define V4H_CORE_DIG_IOCTRL_RW_AFE_LANE2_CTRL_2_REG(n) (0x22840 + ((n) * 2)) /* n = 0 - 15 */ #define V4H_CORE_DIG_IOCTRL_RW_AFE_LANE3_CTRL_2_REG(n) (0x22c40 + ((n) * 2)) /* n = 0 - 15 */ #define V4H_CORE_DIG_IOCTRL_RW_AFE_LANE4_CTRL_2_REG(n) (0x23040 + ((n) * 2)) /* n = 0 - 15 */ #define V4H_CORE_DIG_IOCTRL_RW_AFE_CB_CTRL_2_REG(n) (0x23840 + ((n) * 2)) /* n = 0 - 11 */ #define V4H_CORE_DIG_RW_COMMON_REG(n) (0x23880 + ((n) * 2)) /* n = 0 - 15 */ #define V4H_CORE_DIG_ANACTRL_RW_COMMON_ANACTRL_REG(n) (0x239e0 + ((n) * 2)) /* n = 0 - 3 */ #define V4H_CORE_DIG_CLANE_1_RW_CFG_0_REG 0x2a400 #define V4H_CORE_DIG_CLANE_1_RW_HS_TX_6_REG 0x2a60c /* V4H C-PHY */ #define V4H_CORE_DIG_RW_TRIO0_REG(n) (0x22100 + ((n) * 2)) /* n = 0 - 3 */ #define V4H_CORE_DIG_RW_TRIO1_REG(n) (0x22500 + ((n) * 2)) /* n = 0 - 3 */ #define V4H_CORE_DIG_RW_TRIO2_REG(n) (0x22900 + ((n) * 2)) /* n = 0 - 3 */ #define V4H_CORE_DIG_CLANE_0_RW_LP_0_REG 0x2a080 #define V4H_CORE_DIG_CLANE_0_RW_HS_RX_REG(n) (0x2a100 + ((n) * 2)) /* n = 0 - 6 */ #define V4H_CORE_DIG_CLANE_1_RW_LP_0_REG 0x2a480 #define V4H_CORE_DIG_CLANE_1_RW_HS_RX_REG(n) (0x2a500 + ((n) * 2)) /* n = 0 - 6 */ #define V4H_CORE_DIG_CLANE_2_RW_LP_0_REG 0x2a880 #define V4H_CORE_DIG_CLANE_2_RW_HS_RX_REG(n) (0x2a900 + ((n) * 2)) /* n = 0 - 6 */ struct rcsi2_cphy_setting { u16 msps; u16 rx2; u16 trio0; u16 trio1; u16 trio2; u16 lane27; u16 lane29; }; static const struct rcsi2_cphy_setting cphy_setting_table_r8a779g0[] = { { .msps = 80, .rx2 = 0x38, .trio0 = 0x024a, .trio1 = 0x0134, .trio2 = 0x6a, .lane27 = 0x0000, .lane29 = 0x0a24 }, { .msps = 100, .rx2 = 0x38, .trio0 = 0x024a, .trio1 = 0x00f5, .trio2 = 0x55, .lane27 = 0x0000, .lane29 = 0x0a24 }, { .msps = 200, .rx2 = 0x38, .trio0 = 0x024a, .trio1 = 0x0077, .trio2 = 0x2b, .lane27 = 0x0000, .lane29 = 0x0a44 }, { .msps = 300, .rx2 = 0x38, .trio0 = 0x024a, .trio1 = 0x004d, .trio2 = 0x1d, .lane27 = 0x0000, .lane29 = 0x0a44 }, { .msps = 400, .rx2 = 0x38, .trio0 = 0x024a, .trio1 = 0x0038, .trio2 = 0x16, .lane27 = 0x0000, .lane29 = 0x0a64 }, { .msps = 500, .rx2 = 0x38, .trio0 = 0x024a, .trio1 = 0x002b, .trio2 = 0x12, .lane27 = 0x0000, .lane29 = 0x0a64 }, { .msps = 600, .rx2 = 0x38, .trio0 = 0x024a, .trio1 = 0x0023, .trio2 = 0x0f, .lane27 = 0x0000, .lane29 = 0x0a64 }, { .msps = 700, .rx2 = 0x38, .trio0 = 0x024a, .trio1 = 0x001d, .trio2 = 0x0d, .lane27 = 0x0000, .lane29 = 0x0a84 }, { .msps = 800, .rx2 = 0x38, .trio0 = 0x024a, .trio1 = 0x0018, .trio2 = 0x0c, .lane27 = 0x0000, .lane29 = 0x0a84 }, { .msps = 900, .rx2 = 0x38, .trio0 = 0x024a, .trio1 = 0x0015, .trio2 = 0x0b, .lane27 = 0x0000, .lane29 = 0x0a84 }, { .msps = 1000, .rx2 = 0x3e, .trio0 = 0x024a, .trio1 = 0x0012, .trio2 = 0x0a, .lane27 = 0x0400, .lane29 = 0x0a84 }, { .msps = 1100, .rx2 = 0x44, .trio0 = 0x024a, .trio1 = 0x000f, .trio2 = 0x09, .lane27 = 0x0800, .lane29 = 0x0a84 }, { .msps = 1200, .rx2 = 0x4a, .trio0 = 0x024a, .trio1 = 0x000e, .trio2 = 0x08, .lane27 = 0x0c00, .lane29 = 0x0a84 }, { .msps = 1300, .rx2 = 0x51, .trio0 = 0x024a, .trio1 = 0x000c, .trio2 = 0x08, .lane27 = 0x0c00, .lane29 = 0x0aa4 }, { .msps = 1400, .rx2 = 0x57, .trio0 = 0x024a, .trio1 = 0x000b, .trio2 = 0x07, .lane27 = 0x1000, .lane29 = 0x0aa4 }, { .msps = 1500, .rx2 = 0x5d, .trio0 = 0x044a, .trio1 = 0x0009, .trio2 = 0x07, .lane27 = 0x1000, .lane29 = 0x0aa4 }, { .msps = 1600, .rx2 = 0x63, .trio0 = 0x044a, .trio1 = 0x0008, .trio2 = 0x07, .lane27 = 0x1400, .lane29 = 0x0aa4 }, { .msps = 1700, .rx2 = 0x6a, .trio0 = 0x044a, .trio1 = 0x0007, .trio2 = 0x06, .lane27 = 0x1400, .lane29 = 0x0aa4 }, { .msps = 1800, .rx2 = 0x70, .trio0 = 0x044a, .trio1 = 0x0007, .trio2 = 0x06, .lane27 = 0x1400, .lane29 = 0x0aa4 }, { .msps = 1900, .rx2 = 0x76, .trio0 = 0x044a, .trio1 = 0x0006, .trio2 = 0x06, .lane27 = 0x1400, .lane29 = 0x0aa4 }, { .msps = 2000, .rx2 = 0x7c, .trio0 = 0x044a, .trio1 = 0x0005, .trio2 = 0x06, .lane27 = 0x1800, .lane29 = 0x0aa4 }, { .msps = 2100, .rx2 = 0x83, .trio0 = 0x044a, .trio1 = 0x0005, .trio2 = 0x05, .lane27 = 0x1800, .lane29 = 0x0aa4 }, { .msps = 2200, .rx2 = 0x89, .trio0 = 0x064a, .trio1 = 0x0004, .trio2 = 0x05, .lane27 = 0x1800, .lane29 = 0x0aa4 }, { .msps = 2300, .rx2 = 0x8f, .trio0 = 0x064a, .trio1 = 0x0003, .trio2 = 0x05, .lane27 = 0x1800, .lane29 = 0x0aa4 }, { .msps = 2400, .rx2 = 0x95, .trio0 = 0x064a, .trio1 = 0x0003, .trio2 = 0x05, .lane27 = 0x1800, .lane29 = 0x0aa4 }, { .msps = 2500, .rx2 = 0x9c, .trio0 = 0x064a, .trio1 = 0x0003, .trio2 = 0x05, .lane27 = 0x1c00, .lane29 = 0x0aa4 }, { .msps = 2600, .rx2 = 0xa2, .trio0 = 0x064a, .trio1 = 0x0002, .trio2 = 0x05, .lane27 = 0x1c00, .lane29 = 0x0ad4 }, { .msps = 2700, .rx2 = 0xa8, .trio0 = 0x064a, .trio1 = 0x0002, .trio2 = 0x05, .lane27 = 0x1c00, .lane29 = 0x0ad4 }, { .msps = 2800, .rx2 = 0xae, .trio0 = 0x064a, .trio1 = 0x0002, .trio2 = 0x04, .lane27 = 0x1c00, .lane29 = 0x0ad4 }, { .msps = 2900, .rx2 = 0xb5, .trio0 = 0x084a, .trio1 = 0x0001, .trio2 = 0x04, .lane27 = 0x1c00, .lane29 = 0x0ad4 }, { .msps = 3000, .rx2 = 0xbb, .trio0 = 0x084a, .trio1 = 0x0001, .trio2 = 0x04, .lane27 = 0x1c00, .lane29 = 0x0ad4 }, { .msps = 3100, .rx2 = 0xc1, .trio0 = 0x084a, .trio1 = 0x0001, .trio2 = 0x04, .lane27 = 0x1c00, .lane29 = 0x0ad4 }, { .msps = 3200, .rx2 = 0xc7, .trio0 = 0x084a, .trio1 = 0x0001, .trio2 = 0x04, .lane27 = 0x1c00, .lane29 = 0x0ad4 }, { .msps = 3300, .rx2 = 0xce, .trio0 = 0x084a, .trio1 = 0x0001, .trio2 = 0x04, .lane27 = 0x1c00, .lane29 = 0x0ad4 }, { .msps = 3400, .rx2 = 0xd4, .trio0 = 0x084a, .trio1 = 0x0001, .trio2 = 0x04, .lane27 = 0x1c00, .lane29 = 0x0ad4 }, { .msps = 3500, .rx2 = 0xda, .trio0 = 0x084a, .trio1 = 0x0001, .trio2 = 0x04, .lane27 = 0x1c00, .lane29 = 0x0ad4 }, { /* sentinel */ }, }; struct phtw_value { u16 data; u16 code; }; struct rcsi2_mbps_reg { u16 mbps; u16 reg; }; static const struct rcsi2_mbps_reg phtw_mbps_v3u[] = { { .mbps = 1500, .reg = 0xcc }, { .mbps = 1550, .reg = 0x1d }, { .mbps = 1600, .reg = 0x27 }, { .mbps = 1650, .reg = 0x30 }, { .mbps = 1700, .reg = 0x39 }, { .mbps = 1750, .reg = 0x42 }, { .mbps = 1800, .reg = 0x4b }, { .mbps = 1850, .reg = 0x55 }, { .mbps = 1900, .reg = 0x5e }, { .mbps = 1950, .reg = 0x67 }, { .mbps = 2000, .reg = 0x71 }, { .mbps = 2050, .reg = 0x79 }, { .mbps = 2100, .reg = 0x83 }, { .mbps = 2150, .reg = 0x8c }, { .mbps = 2200, .reg = 0x95 }, { .mbps = 2250, .reg = 0x9e }, { .mbps = 2300, .reg = 0xa7 }, { .mbps = 2350, .reg = 0xb0 }, { .mbps = 2400, .reg = 0xba }, { .mbps = 2450, .reg = 0xc3 }, { .mbps = 2500, .reg = 0xcc }, { /* sentinel */ }, }; static const struct rcsi2_mbps_reg phtw_mbps_h3_v3h_m3n[] = { { .mbps = 80, .reg = 0x86 }, { .mbps = 90, .reg = 0x86 }, { .mbps = 100, .reg = 0x87 }, { .mbps = 110, .reg = 0x87 }, { .mbps = 120, .reg = 0x88 }, { .mbps = 130, .reg = 0x88 }, { .mbps = 140, .reg = 0x89 }, { .mbps = 150, .reg = 0x89 }, { .mbps = 160, .reg = 0x8a }, { .mbps = 170, .reg = 0x8a }, { .mbps = 180, .reg = 0x8b }, { .mbps = 190, .reg = 0x8b }, { .mbps = 205, .reg = 0x8c }, { .mbps = 220, .reg = 0x8d }, { .mbps = 235, .reg = 0x8e }, { .mbps = 250, .reg = 0x8e }, { /* sentinel */ }, }; static const struct rcsi2_mbps_reg phtw_mbps_v3m_e3[] = { { .mbps = 80, .reg = 0x00 }, { .mbps = 90, .reg = 0x20 }, { .mbps = 100, .reg = 0x40 }, { .mbps = 110, .reg = 0x02 }, { .mbps = 130, .reg = 0x22 }, { .mbps = 140, .reg = 0x42 }, { .mbps = 150, .reg = 0x04 }, { .mbps = 170, .reg = 0x24 }, { .mbps = 180, .reg = 0x44 }, { .mbps = 200, .reg = 0x06 }, { .mbps = 220, .reg = 0x26 }, { .mbps = 240, .reg = 0x46 }, { .mbps = 250, .reg = 0x08 }, { .mbps = 270, .reg = 0x28 }, { .mbps = 300, .reg = 0x0a }, { .mbps = 330, .reg = 0x2a }, { .mbps = 360, .reg = 0x4a }, { .mbps = 400, .reg = 0x0c }, { .mbps = 450, .reg = 0x2c }, { .mbps = 500, .reg = 0x0e }, { .mbps = 550, .reg = 0x2e }, { .mbps = 600, .reg = 0x10 }, { .mbps = 650, .reg = 0x30 }, { .mbps = 700, .reg = 0x12 }, { .mbps = 750, .reg = 0x32 }, { .mbps = 800, .reg = 0x52 }, { .mbps = 850, .reg = 0x72 }, { .mbps = 900, .reg = 0x14 }, { .mbps = 950, .reg = 0x34 }, { .mbps = 1000, .reg = 0x54 }, { .mbps = 1050, .reg = 0x74 }, { .mbps = 1125, .reg = 0x16 }, { /* sentinel */ }, }; /* PHY Test Interface Clear */ #define PHTC_REG 0x58 #define PHTC_TESTCLR BIT(0) /* PHY Frequency Control */ #define PHYPLL_REG 0x68 #define PHYPLL_HSFREQRANGE(n) ((n) << 16) static const struct rcsi2_mbps_reg hsfreqrange_v3u[] = { { .mbps = 80, .reg = 0x00 }, { .mbps = 90, .reg = 0x10 }, { .mbps = 100, .reg = 0x20 }, { .mbps = 110, .reg = 0x30 }, { .mbps = 120, .reg = 0x01 }, { .mbps = 130, .reg = 0x11 }, { .mbps = 140, .reg = 0x21 }, { .mbps = 150, .reg = 0x31 }, { .mbps = 160, .reg = 0x02 }, { .mbps = 170, .reg = 0x12 }, { .mbps = 180, .reg = 0x22 }, { .mbps = 190, .reg = 0x32 }, { .mbps = 205, .reg = 0x03 }, { .mbps = 220, .reg = 0x13 }, { .mbps = 235, .reg = 0x23 }, { .mbps = 250, .reg = 0x33 }, { .mbps = 275, .reg = 0x04 }, { .mbps = 300, .reg = 0x14 }, { .mbps = 325, .reg = 0x25 }, { .mbps = 350, .reg = 0x35 }, { .mbps = 400, .reg = 0x05 }, { .mbps = 450, .reg = 0x16 }, { .mbps = 500, .reg = 0x26 }, { .mbps = 550, .reg = 0x37 }, { .mbps = 600, .reg = 0x07 }, { .mbps = 650, .reg = 0x18 }, { .mbps = 700, .reg = 0x28 }, { .mbps = 750, .reg = 0x39 }, { .mbps = 800, .reg = 0x09 }, { .mbps = 850, .reg = 0x19 }, { .mbps = 900, .reg = 0x29 }, { .mbps = 950, .reg = 0x3a }, { .mbps = 1000, .reg = 0x0a }, { .mbps = 1050, .reg = 0x1a }, { .mbps = 1100, .reg = 0x2a }, { .mbps = 1150, .reg = 0x3b }, { .mbps = 1200, .reg = 0x0b }, { .mbps = 1250, .reg = 0x1b }, { .mbps = 1300, .reg = 0x2b }, { .mbps = 1350, .reg = 0x3c }, { .mbps = 1400, .reg = 0x0c }, { .mbps = 1450, .reg = 0x1c }, { .mbps = 1500, .reg = 0x2c }, { .mbps = 1550, .reg = 0x3d }, { .mbps = 1600, .reg = 0x0d }, { .mbps = 1650, .reg = 0x1d }, { .mbps = 1700, .reg = 0x2e }, { .mbps = 1750, .reg = 0x3e }, { .mbps = 1800, .reg = 0x0e }, { .mbps = 1850, .reg = 0x1e }, { .mbps = 1900, .reg = 0x2f }, { .mbps = 1950, .reg = 0x3f }, { .mbps = 2000, .reg = 0x0f }, { .mbps = 2050, .reg = 0x40 }, { .mbps = 2100, .reg = 0x41 }, { .mbps = 2150, .reg = 0x42 }, { .mbps = 2200, .reg = 0x43 }, { .mbps = 2300, .reg = 0x45 }, { .mbps = 2350, .reg = 0x46 }, { .mbps = 2400, .reg = 0x47 }, { .mbps = 2450, .reg = 0x48 }, { .mbps = 2500, .reg = 0x49 }, { /* sentinel */ }, }; static const struct rcsi2_mbps_reg hsfreqrange_h3_v3h_m3n[] = { { .mbps = 80, .reg = 0x00 }, { .mbps = 90, .reg = 0x10 }, { .mbps = 100, .reg = 0x20 }, { .mbps = 110, .reg = 0x30 }, { .mbps = 120, .reg = 0x01 }, { .mbps = 130, .reg = 0x11 }, { .mbps = 140, .reg = 0x21 }, { .mbps = 150, .reg = 0x31 }, { .mbps = 160, .reg = 0x02 }, { .mbps = 170, .reg = 0x12 }, { .mbps = 180, .reg = 0x22 }, { .mbps = 190, .reg = 0x32 }, { .mbps = 205, .reg = 0x03 }, { .mbps = 220, .reg = 0x13 }, { .mbps = 235, .reg = 0x23 }, { .mbps = 250, .reg = 0x33 }, { .mbps = 275, .reg = 0x04 }, { .mbps = 300, .reg = 0x14 }, { .mbps = 325, .reg = 0x25 }, { .mbps = 350, .reg = 0x35 }, { .mbps = 400, .reg = 0x05 }, { .mbps = 450, .reg = 0x16 }, { .mbps = 500, .reg = 0x26 }, { .mbps = 550, .reg = 0x37 }, { .mbps = 600, .reg = 0x07 }, { .mbps = 650, .reg = 0x18 }, { .mbps = 700, .reg = 0x28 }, { .mbps = 750, .reg = 0x39 }, { .mbps = 800, .reg = 0x09 }, { .mbps = 850, .reg = 0x19 }, { .mbps = 900, .reg = 0x29 }, { .mbps = 950, .reg = 0x3a }, { .mbps = 1000, .reg = 0x0a }, { .mbps = 1050, .reg = 0x1a }, { .mbps = 1100, .reg = 0x2a }, { .mbps = 1150, .reg = 0x3b }, { .mbps = 1200, .reg = 0x0b }, { .mbps = 1250, .reg = 0x1b }, { .mbps = 1300, .reg = 0x2b }, { .mbps = 1350, .reg = 0x3c }, { .mbps = 1400, .reg = 0x0c }, { .mbps = 1450, .reg = 0x1c }, { .mbps = 1500, .reg = 0x2c }, { /* sentinel */ }, }; static const struct rcsi2_mbps_reg hsfreqrange_m3w[] = { { .mbps = 80, .reg = 0x00 }, { .mbps = 90, .reg = 0x10 }, { .mbps = 100, .reg = 0x20 }, { .mbps = 110, .reg = 0x30 }, { .mbps = 120, .reg = 0x01 }, { .mbps = 130, .reg = 0x11 }, { .mbps = 140, .reg = 0x21 }, { .mbps = 150, .reg = 0x31 }, { .mbps = 160, .reg = 0x02 }, { .mbps = 170, .reg = 0x12 }, { .mbps = 180, .reg = 0x22 }, { .mbps = 190, .reg = 0x32 }, { .mbps = 205, .reg = 0x03 }, { .mbps = 220, .reg = 0x13 }, { .mbps = 235, .reg = 0x23 }, { .mbps = 250, .reg = 0x33 }, { .mbps = 275, .reg = 0x04 }, { .mbps = 300, .reg = 0x14 }, { .mbps = 325, .reg = 0x05 }, { .mbps = 350, .reg = 0x15 }, { .mbps = 400, .reg = 0x25 }, { .mbps = 450, .reg = 0x06 }, { .mbps = 500, .reg = 0x16 }, { .mbps = 550, .reg = 0x07 }, { .mbps = 600, .reg = 0x17 }, { .mbps = 650, .reg = 0x08 }, { .mbps = 700, .reg = 0x18 }, { .mbps = 750, .reg = 0x09 }, { .mbps = 800, .reg = 0x19 }, { .mbps = 850, .reg = 0x29 }, { .mbps = 900, .reg = 0x39 }, { .mbps = 950, .reg = 0x0a }, { .mbps = 1000, .reg = 0x1a }, { .mbps = 1050, .reg = 0x2a }, { .mbps = 1100, .reg = 0x3a }, { .mbps = 1150, .reg = 0x0b }, { .mbps = 1200, .reg = 0x1b }, { .mbps = 1250, .reg = 0x2b }, { .mbps = 1300, .reg = 0x3b }, { .mbps = 1350, .reg = 0x0c }, { .mbps = 1400, .reg = 0x1c }, { .mbps = 1450, .reg = 0x2c }, { .mbps = 1500, .reg = 0x3c }, { /* sentinel */ }, }; /* PHY ESC Error Monitor */ #define PHEERM_REG 0x74 /* PHY Clock Lane Monitor */ #define PHCLM_REG 0x78 #define PHCLM_STOPSTATECKL BIT(0) /* PHY Data Lane Monitor */ #define PHDLM_REG 0x7c /* CSI0CLK Frequency Configuration Preset Register */ #define CSI0CLKFCPR_REG 0x260 #define CSI0CLKFREQRANGE(n) ((n & 0x3f) << 16) struct rcar_csi2_format { u32 code; unsigned int datatype; unsigned int bpp; }; static const struct rcar_csi2_format rcar_csi2_formats[] = { { .code = MEDIA_BUS_FMT_RGB888_1X24, .datatype = MIPI_CSI2_DT_RGB888, .bpp = 24, }, { .code = MEDIA_BUS_FMT_UYVY8_1X16, .datatype = MIPI_CSI2_DT_YUV422_8B, .bpp = 16, }, { .code = MEDIA_BUS_FMT_YUYV8_1X16, .datatype = MIPI_CSI2_DT_YUV422_8B, .bpp = 16, }, { .code = MEDIA_BUS_FMT_UYVY8_2X8, .datatype = MIPI_CSI2_DT_YUV422_8B, .bpp = 16, }, { .code = MEDIA_BUS_FMT_YUYV10_2X10, .datatype = MIPI_CSI2_DT_YUV422_8B, .bpp = 20, }, { .code = MEDIA_BUS_FMT_Y10_1X10, .datatype = MIPI_CSI2_DT_RAW10, .bpp = 10, }, { .code = MEDIA_BUS_FMT_SBGGR8_1X8, .datatype = MIPI_CSI2_DT_RAW8, .bpp = 8, }, { .code = MEDIA_BUS_FMT_SGBRG8_1X8, .datatype = MIPI_CSI2_DT_RAW8, .bpp = 8, }, { .code = MEDIA_BUS_FMT_SGRBG8_1X8, .datatype = MIPI_CSI2_DT_RAW8, .bpp = 8, }, { .code = MEDIA_BUS_FMT_SRGGB8_1X8, .datatype = MIPI_CSI2_DT_RAW8, .bpp = 8, }, { .code = MEDIA_BUS_FMT_Y8_1X8, .datatype = MIPI_CSI2_DT_RAW8, .bpp = 8, }, }; static const struct rcar_csi2_format *rcsi2_code_to_fmt(unsigned int code) { unsigned int i; for (i = 0; i < ARRAY_SIZE(rcar_csi2_formats); i++) if (rcar_csi2_formats[i].code == code) return &rcar_csi2_formats[i]; return NULL; } enum rcar_csi2_pads { RCAR_CSI2_SINK, RCAR_CSI2_SOURCE_VC0, RCAR_CSI2_SOURCE_VC1, RCAR_CSI2_SOURCE_VC2, RCAR_CSI2_SOURCE_VC3, NR_OF_RCAR_CSI2_PAD, }; struct rcar_csi2_info { int (*init_phtw)(struct rcar_csi2 *priv, unsigned int mbps); int (*phy_post_init)(struct rcar_csi2 *priv); int (*start_receiver)(struct rcar_csi2 *priv); void (*enter_standby)(struct rcar_csi2 *priv); const struct rcsi2_mbps_reg *hsfreqrange; unsigned int csi0clkfreqrange; unsigned int num_channels; bool clear_ulps; bool use_isp; bool support_dphy; bool support_cphy; }; struct rcar_csi2 { struct device *dev; void __iomem *base; const struct rcar_csi2_info *info; struct reset_control *rstc; struct v4l2_subdev subdev; struct media_pad pads[NR_OF_RCAR_CSI2_PAD]; struct v4l2_async_notifier notifier; struct v4l2_subdev *remote; unsigned int remote_pad; int channel_vc[4]; struct mutex lock; /* Protects mf and stream_count. */ struct v4l2_mbus_framefmt mf; int stream_count; bool cphy; unsigned short lanes; unsigned char lane_swap[4]; }; static inline struct rcar_csi2 *sd_to_csi2(struct v4l2_subdev *sd) { return container_of(sd, struct rcar_csi2, subdev); } static inline struct rcar_csi2 *notifier_to_csi2(struct v4l2_async_notifier *n) { return container_of(n, struct rcar_csi2, notifier); } static u32 rcsi2_read(struct rcar_csi2 *priv, unsigned int reg) { return ioread32(priv->base + reg); } static void rcsi2_write(struct rcar_csi2 *priv, unsigned int reg, u32 data) { iowrite32(data, priv->base + reg); } static void rcsi2_write16(struct rcar_csi2 *priv, unsigned int reg, u16 data) { iowrite16(data, priv->base + reg); } static void rcsi2_enter_standby_gen3(struct rcar_csi2 *priv) { rcsi2_write(priv, PHYCNT_REG, 0); rcsi2_write(priv, PHTC_REG, PHTC_TESTCLR); } static void rcsi2_enter_standby(struct rcar_csi2 *priv) { if (priv->info->enter_standby) priv->info->enter_standby(priv); reset_control_assert(priv->rstc); usleep_range(100, 150); pm_runtime_put(priv->dev); } static int rcsi2_exit_standby(struct rcar_csi2 *priv) { int ret; ret = pm_runtime_resume_and_get(priv->dev); if (ret < 0) return ret; reset_control_deassert(priv->rstc); return 0; } static int rcsi2_wait_phy_start(struct rcar_csi2 *priv, unsigned int lanes) { unsigned int timeout; /* Wait for the clock and data lanes to enter LP-11 state. */ for (timeout = 0; timeout <= 20; timeout++) { const u32 lane_mask = (1 << lanes) - 1; if ((rcsi2_read(priv, PHCLM_REG) & PHCLM_STOPSTATECKL) && (rcsi2_read(priv, PHDLM_REG) & lane_mask) == lane_mask) return 0; usleep_range(1000, 2000); } dev_err(priv->dev, "Timeout waiting for LP-11 state\n"); return -ETIMEDOUT; } static int rcsi2_set_phypll(struct rcar_csi2 *priv, unsigned int mbps) { const struct rcsi2_mbps_reg *hsfreq; const struct rcsi2_mbps_reg *hsfreq_prev = NULL; if (mbps < priv->info->hsfreqrange->mbps) dev_warn(priv->dev, "%u Mbps less than min PHY speed %u Mbps", mbps, priv->info->hsfreqrange->mbps); for (hsfreq = priv->info->hsfreqrange; hsfreq->mbps != 0; hsfreq++) { if (hsfreq->mbps >= mbps) break; hsfreq_prev = hsfreq; } if (!hsfreq->mbps) { dev_err(priv->dev, "Unsupported PHY speed (%u Mbps)", mbps); return -ERANGE; } if (hsfreq_prev && ((mbps - hsfreq_prev->mbps) <= (hsfreq->mbps - mbps))) hsfreq = hsfreq_prev; rcsi2_write(priv, PHYPLL_REG, PHYPLL_HSFREQRANGE(hsfreq->reg)); return 0; } static int rcsi2_calc_mbps(struct rcar_csi2 *priv, unsigned int bpp, unsigned int lanes) { struct v4l2_subdev *source; struct v4l2_ctrl *ctrl; u64 mbps; if (!priv->remote) return -ENODEV; source = priv->remote; /* Read the pixel rate control from remote. */ ctrl = v4l2_ctrl_find(source->ctrl_handler, V4L2_CID_PIXEL_RATE); if (!ctrl) { dev_err(priv->dev, "no pixel rate control in subdev %s\n", source->name); return -EINVAL; } /* * Calculate the phypll in mbps. * link_freq = (pixel_rate * bits_per_sample) / (2 * nr_of_lanes) * bps = link_freq * 2 */ mbps = v4l2_ctrl_g_ctrl_int64(ctrl) * bpp; do_div(mbps, lanes * 1000000); /* Adjust for C-PHY, divide by 2.8. */ if (priv->cphy) mbps = div_u64(mbps * 5, 14); return mbps; } static int rcsi2_get_active_lanes(struct rcar_csi2 *priv, unsigned int *lanes) { struct v4l2_mbus_config mbus_config = { 0 }; int ret; *lanes = priv->lanes; ret = v4l2_subdev_call(priv->remote, pad, get_mbus_config, priv->remote_pad, &mbus_config); if (ret == -ENOIOCTLCMD) { dev_dbg(priv->dev, "No remote mbus configuration available\n"); return 0; } if (ret) { dev_err(priv->dev, "Failed to get remote mbus configuration\n"); return ret; } switch (mbus_config.type) { case V4L2_MBUS_CSI2_CPHY: if (!priv->cphy) return -EINVAL; break; case V4L2_MBUS_CSI2_DPHY: if (priv->cphy) return -EINVAL; break; default: dev_err(priv->dev, "Unsupported media bus type %u\n", mbus_config.type); return -EINVAL; } if (mbus_config.bus.mipi_csi2.num_data_lanes > priv->lanes) { dev_err(priv->dev, "Unsupported mbus config: too many data lanes %u\n", mbus_config.bus.mipi_csi2.num_data_lanes); return -EINVAL; } *lanes = mbus_config.bus.mipi_csi2.num_data_lanes; return 0; } static int rcsi2_start_receiver_gen3(struct rcar_csi2 *priv) { const struct rcar_csi2_format *format; u32 phycnt, vcdt = 0, vcdt2 = 0, fld = 0; unsigned int lanes; unsigned int i; int mbps, ret; dev_dbg(priv->dev, "Input size (%ux%u%c)\n", priv->mf.width, priv->mf.height, priv->mf.field == V4L2_FIELD_NONE ? 'p' : 'i'); /* Code is validated in set_fmt. */ format = rcsi2_code_to_fmt(priv->mf.code); if (!format) return -EINVAL; /* * Enable all supported CSI-2 channels with virtual channel and * data type matching. * * NOTE: It's not possible to get individual datatype for each * source virtual channel. Once this is possible in V4L2 * it should be used here. */ for (i = 0; i < priv->info->num_channels; i++) { u32 vcdt_part; if (priv->channel_vc[i] < 0) continue; vcdt_part = VCDT_SEL_VC(priv->channel_vc[i]) | VCDT_VCDTN_EN | VCDT_SEL_DTN_ON | VCDT_SEL_DT(format->datatype); /* Store in correct reg and offset. */ if (i < 2) vcdt |= vcdt_part << ((i % 2) * 16); else vcdt2 |= vcdt_part << ((i % 2) * 16); } if (priv->mf.field == V4L2_FIELD_ALTERNATE) { fld = FLD_DET_SEL(1) | FLD_FLD_EN4 | FLD_FLD_EN3 | FLD_FLD_EN2 | FLD_FLD_EN; if (priv->mf.height == 240) fld |= FLD_FLD_NUM(0); else fld |= FLD_FLD_NUM(1); } /* * Get the number of active data lanes inspecting the remote mbus * configuration. */ ret = rcsi2_get_active_lanes(priv, &lanes); if (ret) return ret; phycnt = PHYCNT_ENABLECLK; phycnt |= (1 << lanes) - 1; mbps = rcsi2_calc_mbps(priv, format->bpp, lanes); if (mbps < 0) return mbps; /* Enable interrupts. */ rcsi2_write(priv, INTEN_REG, INTEN_INT_AFIFO_OF | INTEN_INT_ERRSOTHS | INTEN_INT_ERRSOTSYNCHS); /* Init */ rcsi2_write(priv, TREF_REG, TREF_TREF); rcsi2_write(priv, PHTC_REG, 0); /* Configure */ if (!priv->info->use_isp) { rcsi2_write(priv, VCDT_REG, vcdt); if (vcdt2) rcsi2_write(priv, VCDT2_REG, vcdt2); } /* Lanes are zero indexed. */ rcsi2_write(priv, LSWAP_REG, LSWAP_L0SEL(priv->lane_swap[0] - 1) | LSWAP_L1SEL(priv->lane_swap[1] - 1) | LSWAP_L2SEL(priv->lane_swap[2] - 1) | LSWAP_L3SEL(priv->lane_swap[3] - 1)); /* Start */ if (priv->info->init_phtw) { ret = priv->info->init_phtw(priv, mbps); if (ret) return ret; } if (priv->info->hsfreqrange) { ret = rcsi2_set_phypll(priv, mbps); if (ret) return ret; } if (priv->info->csi0clkfreqrange) rcsi2_write(priv, CSI0CLKFCPR_REG, CSI0CLKFREQRANGE(priv->info->csi0clkfreqrange)); if (priv->info->use_isp) rcsi2_write(priv, PHYFRX_REG, PHYFRX_FORCERX_MODE_3 | PHYFRX_FORCERX_MODE_2 | PHYFRX_FORCERX_MODE_1 | PHYFRX_FORCERX_MODE_0); rcsi2_write(priv, PHYCNT_REG, phycnt); rcsi2_write(priv, LINKCNT_REG, LINKCNT_MONITOR_EN | LINKCNT_REG_MONI_PACT_EN | LINKCNT_ICLK_NONSTOP); rcsi2_write(priv, FLD_REG, fld); rcsi2_write(priv, PHYCNT_REG, phycnt | PHYCNT_SHUTDOWNZ); rcsi2_write(priv, PHYCNT_REG, phycnt | PHYCNT_SHUTDOWNZ | PHYCNT_RSTZ); ret = rcsi2_wait_phy_start(priv, lanes); if (ret) return ret; if (priv->info->use_isp) rcsi2_write(priv, PHYFRX_REG, 0); /* Run post PHY start initialization, if needed. */ if (priv->info->phy_post_init) { ret = priv->info->phy_post_init(priv); if (ret) return ret; } /* Clear Ultra Low Power interrupt. */ if (priv->info->clear_ulps) rcsi2_write(priv, INTSTATE_REG, INTSTATE_INT_ULPS_START | INTSTATE_INT_ULPS_END); return 0; } static int rcsi2_wait_phy_start_v4h(struct rcar_csi2 *priv, u32 match) { unsigned int timeout; u32 status; for (timeout = 0; timeout <= 10; timeout++) { status = rcsi2_read(priv, V4H_ST_PHYST_REG); if ((status & match) == match) return 0; usleep_range(1000, 2000); } return -ETIMEDOUT; } static int rcsi2_c_phy_setting_v4h(struct rcar_csi2 *priv, int msps) { const struct rcsi2_cphy_setting *conf; for (conf = cphy_setting_table_r8a779g0; conf->msps != 0; conf++) { if (conf->msps > msps) break; } if (!conf->msps) { dev_err(priv->dev, "Unsupported PHY speed for msps setting (%u Msps)", msps); return -ERANGE; } /* C-PHY specific */ rcsi2_write16(priv, V4H_CORE_DIG_RW_COMMON_REG(7), 0x0155); rcsi2_write16(priv, V4H_PPI_STARTUP_RW_COMMON_DPHY_REG(7), 0x0068); rcsi2_write16(priv, V4H_PPI_STARTUP_RW_COMMON_DPHY_REG(8), 0x0010); rcsi2_write16(priv, V4H_CORE_DIG_CLANE_0_RW_LP_0_REG, 0x463c); rcsi2_write16(priv, V4H_CORE_DIG_CLANE_1_RW_LP_0_REG, 0x463c); rcsi2_write16(priv, V4H_CORE_DIG_CLANE_2_RW_LP_0_REG, 0x463c); rcsi2_write16(priv, V4H_CORE_DIG_CLANE_0_RW_HS_RX_REG(0), 0x00d5); rcsi2_write16(priv, V4H_CORE_DIG_CLANE_1_RW_HS_RX_REG(0), 0x00d5); rcsi2_write16(priv, V4H_CORE_DIG_CLANE_2_RW_HS_RX_REG(0), 0x00d5); rcsi2_write16(priv, V4H_CORE_DIG_CLANE_0_RW_HS_RX_REG(1), 0x0013); rcsi2_write16(priv, V4H_CORE_DIG_CLANE_1_RW_HS_RX_REG(1), 0x0013); rcsi2_write16(priv, V4H_CORE_DIG_CLANE_2_RW_HS_RX_REG(1), 0x0013); rcsi2_write16(priv, V4H_CORE_DIG_CLANE_0_RW_HS_RX_REG(5), 0x0013); rcsi2_write16(priv, V4H_CORE_DIG_CLANE_1_RW_HS_RX_REG(5), 0x0013); rcsi2_write16(priv, V4H_CORE_DIG_CLANE_2_RW_HS_RX_REG(5), 0x0013); rcsi2_write16(priv, V4H_CORE_DIG_CLANE_0_RW_HS_RX_REG(6), 0x000a); rcsi2_write16(priv, V4H_CORE_DIG_CLANE_1_RW_HS_RX_REG(6), 0x000a); rcsi2_write16(priv, V4H_CORE_DIG_CLANE_2_RW_HS_RX_REG(6), 0x000a); rcsi2_write16(priv, V4H_CORE_DIG_CLANE_0_RW_HS_RX_REG(2), conf->rx2); rcsi2_write16(priv, V4H_CORE_DIG_CLANE_1_RW_HS_RX_REG(2), conf->rx2); rcsi2_write16(priv, V4H_CORE_DIG_CLANE_2_RW_HS_RX_REG(2), conf->rx2); rcsi2_write16(priv, V4H_CORE_DIG_IOCTRL_RW_AFE_LANE0_CTRL_2_REG(2), 0x0001); rcsi2_write16(priv, V4H_CORE_DIG_IOCTRL_RW_AFE_LANE1_CTRL_2_REG(2), 0); rcsi2_write16(priv, V4H_CORE_DIG_IOCTRL_RW_AFE_LANE2_CTRL_2_REG(2), 0x0001); rcsi2_write16(priv, V4H_CORE_DIG_IOCTRL_RW_AFE_LANE3_CTRL_2_REG(2), 0x0001); rcsi2_write16(priv, V4H_CORE_DIG_IOCTRL_RW_AFE_LANE4_CTRL_2_REG(2), 0); rcsi2_write16(priv, V4H_CORE_DIG_RW_TRIO0_REG(0), conf->trio0); rcsi2_write16(priv, V4H_CORE_DIG_RW_TRIO1_REG(0), conf->trio0); rcsi2_write16(priv, V4H_CORE_DIG_RW_TRIO2_REG(0), conf->trio0); rcsi2_write16(priv, V4H_CORE_DIG_RW_TRIO0_REG(2), conf->trio2); rcsi2_write16(priv, V4H_CORE_DIG_RW_TRIO1_REG(2), conf->trio2); rcsi2_write16(priv, V4H_CORE_DIG_RW_TRIO2_REG(2), conf->trio2); rcsi2_write16(priv, V4H_CORE_DIG_RW_TRIO0_REG(1), conf->trio1); rcsi2_write16(priv, V4H_CORE_DIG_RW_TRIO1_REG(1), conf->trio1); rcsi2_write16(priv, V4H_CORE_DIG_RW_TRIO2_REG(1), conf->trio1); /* * Configure pin-swap. * TODO: This registers is not documented yet, the values should depend * on the 'clock-lanes' and 'data-lanes' devicetree properties. */ rcsi2_write16(priv, V4H_CORE_DIG_CLANE_1_RW_CFG_0_REG, 0xf5); rcsi2_write16(priv, V4H_CORE_DIG_CLANE_1_RW_HS_TX_6_REG, 0x5000); /* Leave Shutdown mode */ rcsi2_write(priv, V4H_DPHY_RSTZ_REG, BIT(0)); rcsi2_write(priv, V4H_PHY_SHUTDOWNZ_REG, BIT(0)); /* Wait for calibration */ if (rcsi2_wait_phy_start_v4h(priv, V4H_ST_PHYST_ST_PHY_READY)) { dev_err(priv->dev, "PHY calibration failed\n"); return -ETIMEDOUT; } /* C-PHY setting - analog programing*/ rcsi2_write16(priv, V4H_CORE_DIG_IOCTRL_RW_AFE_LANE0_CTRL_2_REG(9), conf->lane29); rcsi2_write16(priv, V4H_CORE_DIG_IOCTRL_RW_AFE_LANE0_CTRL_2_REG(7), conf->lane27); return 0; } static int rcsi2_start_receiver_v4h(struct rcar_csi2 *priv) { const struct rcar_csi2_format *format; unsigned int lanes; int msps; int ret; /* Calculate parameters */ format = rcsi2_code_to_fmt(priv->mf.code); if (!format) return -EINVAL; ret = rcsi2_get_active_lanes(priv, &lanes); if (ret) return ret; msps = rcsi2_calc_mbps(priv, format->bpp, lanes); if (msps < 0) return msps; /* Reset LINK and PHY*/ rcsi2_write(priv, V4H_CSI2_RESETN_REG, 0); rcsi2_write(priv, V4H_DPHY_RSTZ_REG, 0); rcsi2_write(priv, V4H_PHY_SHUTDOWNZ_REG, 0); /* PHY static setting */ rcsi2_write(priv, V4H_PHY_EN_REG, BIT(0)); rcsi2_write(priv, V4H_FLDC_REG, 0); rcsi2_write(priv, V4H_FLDD_REG, 0); rcsi2_write(priv, V4H_IDIC_REG, 0); rcsi2_write(priv, V4H_PHY_MODE_REG, BIT(0)); rcsi2_write(priv, V4H_N_LANES_REG, lanes - 1); /* Reset CSI2 */ rcsi2_write(priv, V4H_CSI2_RESETN_REG, BIT(0)); /* Registers static setting through APB */ /* Common setting */ rcsi2_write16(priv, V4H_CORE_DIG_ANACTRL_RW_COMMON_ANACTRL_REG(0), 0x1bfd); rcsi2_write16(priv, V4H_PPI_STARTUP_RW_COMMON_STARTUP_1_1_REG, 0x0233); rcsi2_write16(priv, V4H_PPI_STARTUP_RW_COMMON_DPHY_REG(6), 0x0027); rcsi2_write16(priv, V4H_PPI_CALIBCTRL_RW_COMMON_BG_0_REG, 0x01f4); rcsi2_write16(priv, V4H_PPI_RW_TERMCAL_CFG_0_REG, 0x0013); rcsi2_write16(priv, V4H_PPI_RW_OFFSETCAL_CFG_0_REG, 0x0003); rcsi2_write16(priv, V4H_PPI_RW_LPDCOCAL_TIMEBASE_REG, 0x004f); rcsi2_write16(priv, V4H_PPI_RW_LPDCOCAL_NREF_REG, 0x0320); rcsi2_write16(priv, V4H_PPI_RW_LPDCOCAL_NREF_RANGE_REG, 0x000f); rcsi2_write16(priv, V4H_PPI_RW_LPDCOCAL_TWAIT_CONFIG_REG, 0xfe18); rcsi2_write16(priv, V4H_PPI_RW_LPDCOCAL_VT_CONFIG_REG, 0x0c3c); rcsi2_write16(priv, V4H_PPI_RW_LPDCOCAL_COARSE_CFG_REG, 0x0105); rcsi2_write16(priv, V4H_CORE_DIG_IOCTRL_RW_AFE_CB_CTRL_2_REG(6), 0x1000); rcsi2_write16(priv, V4H_PPI_RW_COMMON_CFG_REG, 0x0003); /* C-PHY settings */ ret = rcsi2_c_phy_setting_v4h(priv, msps); if (ret) return ret; rcsi2_wait_phy_start_v4h(priv, V4H_ST_PHYST_ST_STOPSTATE_0 | V4H_ST_PHYST_ST_STOPSTATE_1 | V4H_ST_PHYST_ST_STOPSTATE_2); return 0; } static int rcsi2_start(struct rcar_csi2 *priv) { int ret; ret = rcsi2_exit_standby(priv); if (ret < 0) return ret; ret = priv->info->start_receiver(priv); if (ret) { rcsi2_enter_standby(priv); return ret; } ret = v4l2_subdev_call(priv->remote, video, s_stream, 1); if (ret) { rcsi2_enter_standby(priv); return ret; } return 0; } static void rcsi2_stop(struct rcar_csi2 *priv) { rcsi2_enter_standby(priv); v4l2_subdev_call(priv->remote, video, s_stream, 0); } static int rcsi2_s_stream(struct v4l2_subdev *sd, int enable) { struct rcar_csi2 *priv = sd_to_csi2(sd); int ret = 0; mutex_lock(&priv->lock); if (!priv->remote) { ret = -ENODEV; goto out; } if (enable && priv->stream_count == 0) { ret = rcsi2_start(priv); if (ret) goto out; } else if (!enable && priv->stream_count == 1) { rcsi2_stop(priv); } priv->stream_count += enable ? 1 : -1; out: mutex_unlock(&priv->lock); return ret; } static int rcsi2_set_pad_format(struct v4l2_subdev *sd, struct v4l2_subdev_state *sd_state, struct v4l2_subdev_format *format) { struct rcar_csi2 *priv = sd_to_csi2(sd); struct v4l2_mbus_framefmt *framefmt; mutex_lock(&priv->lock); if (!rcsi2_code_to_fmt(format->format.code)) format->format.code = rcar_csi2_formats[0].code; if (format->which == V4L2_SUBDEV_FORMAT_ACTIVE) { priv->mf = format->format; } else { framefmt = v4l2_subdev_get_try_format(sd, sd_state, 0); *framefmt = format->format; } mutex_unlock(&priv->lock); return 0; } static int rcsi2_get_pad_format(struct v4l2_subdev *sd, struct v4l2_subdev_state *sd_state, struct v4l2_subdev_format *format) { struct rcar_csi2 *priv = sd_to_csi2(sd); mutex_lock(&priv->lock); if (format->which == V4L2_SUBDEV_FORMAT_ACTIVE) format->format = priv->mf; else format->format = *v4l2_subdev_get_try_format(sd, sd_state, 0); mutex_unlock(&priv->lock); return 0; } static const struct v4l2_subdev_video_ops rcar_csi2_video_ops = { .s_stream = rcsi2_s_stream, }; static const struct v4l2_subdev_pad_ops rcar_csi2_pad_ops = { .set_fmt = rcsi2_set_pad_format, .get_fmt = rcsi2_get_pad_format, }; static const struct v4l2_subdev_ops rcar_csi2_subdev_ops = { .video = &rcar_csi2_video_ops, .pad = &rcar_csi2_pad_ops, }; static irqreturn_t rcsi2_irq(int irq, void *data) { struct rcar_csi2 *priv = data; u32 status, err_status; status = rcsi2_read(priv, INTSTATE_REG); err_status = rcsi2_read(priv, INTERRSTATE_REG); if (!status) return IRQ_HANDLED; rcsi2_write(priv, INTSTATE_REG, status); if (!err_status) return IRQ_HANDLED; rcsi2_write(priv, INTERRSTATE_REG, err_status); dev_info(priv->dev, "Transfer error, restarting CSI-2 receiver\n"); return IRQ_WAKE_THREAD; } static irqreturn_t rcsi2_irq_thread(int irq, void *data) { struct rcar_csi2 *priv = data; mutex_lock(&priv->lock); rcsi2_stop(priv); usleep_range(1000, 2000); if (rcsi2_start(priv)) dev_warn(priv->dev, "Failed to restart CSI-2 receiver\n"); mutex_unlock(&priv->lock); return IRQ_HANDLED; } /* ----------------------------------------------------------------------------- * Async handling and registration of subdevices and links. */ static int rcsi2_notify_bound(struct v4l2_async_notifier *notifier, struct v4l2_subdev *subdev, struct v4l2_async_connection *asc) { struct rcar_csi2 *priv = notifier_to_csi2(notifier); int pad; pad = media_entity_get_fwnode_pad(&subdev->entity, asc->match.fwnode, MEDIA_PAD_FL_SOURCE); if (pad < 0) { dev_err(priv->dev, "Failed to find pad for %s\n", subdev->name); return pad; } priv->remote = subdev; priv->remote_pad = pad; dev_dbg(priv->dev, "Bound %s pad: %d\n", subdev->name, pad); return media_create_pad_link(&subdev->entity, pad, &priv->subdev.entity, 0, MEDIA_LNK_FL_ENABLED | MEDIA_LNK_FL_IMMUTABLE); } static void rcsi2_notify_unbind(struct v4l2_async_notifier *notifier, struct v4l2_subdev *subdev, struct v4l2_async_connection *asc) { struct rcar_csi2 *priv = notifier_to_csi2(notifier); priv->remote = NULL; dev_dbg(priv->dev, "Unbind %s\n", subdev->name); } static const struct v4l2_async_notifier_operations rcar_csi2_notify_ops = { .bound = rcsi2_notify_bound, .unbind = rcsi2_notify_unbind, }; static int rcsi2_parse_v4l2(struct rcar_csi2 *priv, struct v4l2_fwnode_endpoint *vep) { unsigned int i; /* Only port 0 endpoint 0 is valid. */ if (vep->base.port || vep->base.id) return -ENOTCONN; priv->lanes = vep->bus.mipi_csi2.num_data_lanes; switch (vep->bus_type) { case V4L2_MBUS_CSI2_DPHY: if (!priv->info->support_dphy) { dev_err(priv->dev, "D-PHY not supported\n"); return -EINVAL; } if (priv->lanes != 1 && priv->lanes != 2 && priv->lanes != 4) { dev_err(priv->dev, "Unsupported number of data-lanes for D-PHY: %u\n", priv->lanes); return -EINVAL; } priv->cphy = false; break; case V4L2_MBUS_CSI2_CPHY: if (!priv->info->support_cphy) { dev_err(priv->dev, "C-PHY not supported\n"); return -EINVAL; } if (priv->lanes != 3) { dev_err(priv->dev, "Unsupported number of data-lanes for C-PHY: %u\n", priv->lanes); return -EINVAL; } priv->cphy = true; break; default: dev_err(priv->dev, "Unsupported bus: %u\n", vep->bus_type); return -EINVAL; } for (i = 0; i < ARRAY_SIZE(priv->lane_swap); i++) { priv->lane_swap[i] = i < priv->lanes ? vep->bus.mipi_csi2.data_lanes[i] : i; /* Check for valid lane number. */ if (priv->lane_swap[i] < 1 || priv->lane_swap[i] > 4) { dev_err(priv->dev, "data-lanes must be in 1-4 range\n"); return -EINVAL; } } return 0; } static int rcsi2_parse_dt(struct rcar_csi2 *priv) { struct v4l2_async_connection *asc; struct fwnode_handle *fwnode; struct fwnode_handle *ep; struct v4l2_fwnode_endpoint v4l2_ep = { .bus_type = V4L2_MBUS_UNKNOWN, }; int ret; ep = fwnode_graph_get_endpoint_by_id(dev_fwnode(priv->dev), 0, 0, 0); if (!ep) { dev_err(priv->dev, "Not connected to subdevice\n"); return -EINVAL; } ret = v4l2_fwnode_endpoint_parse(ep, &v4l2_ep); if (ret) { dev_err(priv->dev, "Could not parse v4l2 endpoint\n"); fwnode_handle_put(ep); return -EINVAL; } ret = rcsi2_parse_v4l2(priv, &v4l2_ep); if (ret) { fwnode_handle_put(ep); return ret; } fwnode = fwnode_graph_get_remote_endpoint(ep); fwnode_handle_put(ep); dev_dbg(priv->dev, "Found '%pOF'\n", to_of_node(fwnode)); v4l2_async_subdev_nf_init(&priv->notifier, &priv->subdev); priv->notifier.ops = &rcar_csi2_notify_ops; asc = v4l2_async_nf_add_fwnode(&priv->notifier, fwnode, struct v4l2_async_connection); fwnode_handle_put(fwnode); if (IS_ERR(asc)) return PTR_ERR(asc); ret = v4l2_async_nf_register(&priv->notifier); if (ret) v4l2_async_nf_cleanup(&priv->notifier); return ret; } /* ----------------------------------------------------------------------------- * PHTW initialization sequences. * * NOTE: Magic values are from the datasheet and lack documentation. */ static int rcsi2_phtw_write(struct rcar_csi2 *priv, u16 data, u16 code) { unsigned int timeout; rcsi2_write(priv, PHTW_REG, PHTW_DWEN | PHTW_TESTDIN_DATA(data) | PHTW_CWEN | PHTW_TESTDIN_CODE(code)); /* Wait for DWEN and CWEN to be cleared by hardware. */ for (timeout = 0; timeout <= 20; timeout++) { if (!(rcsi2_read(priv, PHTW_REG) & (PHTW_DWEN | PHTW_CWEN))) return 0; usleep_range(1000, 2000); } dev_err(priv->dev, "Timeout waiting for PHTW_DWEN and/or PHTW_CWEN\n"); return -ETIMEDOUT; } static int rcsi2_phtw_write_array(struct rcar_csi2 *priv, const struct phtw_value *values) { const struct phtw_value *value; int ret; for (value = values; value->data || value->code; value++) { ret = rcsi2_phtw_write(priv, value->data, value->code); if (ret) return ret; } return 0; } static int rcsi2_phtw_write_mbps(struct rcar_csi2 *priv, unsigned int mbps, const struct rcsi2_mbps_reg *values, u16 code) { const struct rcsi2_mbps_reg *value; const struct rcsi2_mbps_reg *prev_value = NULL; for (value = values; value->mbps; value++) { if (value->mbps >= mbps) break; prev_value = value; } if (prev_value && ((mbps - prev_value->mbps) <= (value->mbps - mbps))) value = prev_value; if (!value->mbps) { dev_err(priv->dev, "Unsupported PHY speed (%u Mbps)", mbps); return -ERANGE; } return rcsi2_phtw_write(priv, value->reg, code); } static int __rcsi2_init_phtw_h3_v3h_m3n(struct rcar_csi2 *priv, unsigned int mbps) { static const struct phtw_value step1[] = { { .data = 0xcc, .code = 0xe2 }, { .data = 0x01, .code = 0xe3 }, { .data = 0x11, .code = 0xe4 }, { .data = 0x01, .code = 0xe5 }, { .data = 0x10, .code = 0x04 }, { /* sentinel */ }, }; static const struct phtw_value step2[] = { { .data = 0x38, .code = 0x08 }, { .data = 0x01, .code = 0x00 }, { .data = 0x4b, .code = 0xac }, { .data = 0x03, .code = 0x00 }, { .data = 0x80, .code = 0x07 }, { /* sentinel */ }, }; int ret; ret = rcsi2_phtw_write_array(priv, step1); if (ret) return ret; if (mbps != 0 && mbps <= 250) { ret = rcsi2_phtw_write(priv, 0x39, 0x05); if (ret) return ret; ret = rcsi2_phtw_write_mbps(priv, mbps, phtw_mbps_h3_v3h_m3n, 0xf1); if (ret) return ret; } return rcsi2_phtw_write_array(priv, step2); } static int rcsi2_init_phtw_h3_v3h_m3n(struct rcar_csi2 *priv, unsigned int mbps) { return __rcsi2_init_phtw_h3_v3h_m3n(priv, mbps); } static int rcsi2_init_phtw_h3es2(struct rcar_csi2 *priv, unsigned int mbps) { return __rcsi2_init_phtw_h3_v3h_m3n(priv, 0); } static int rcsi2_init_phtw_v3m_e3(struct rcar_csi2 *priv, unsigned int mbps) { return rcsi2_phtw_write_mbps(priv, mbps, phtw_mbps_v3m_e3, 0x44); } static int rcsi2_phy_post_init_v3m_e3(struct rcar_csi2 *priv) { static const struct phtw_value step1[] = { { .data = 0xee, .code = 0x34 }, { .data = 0xee, .code = 0x44 }, { .data = 0xee, .code = 0x54 }, { .data = 0xee, .code = 0x84 }, { .data = 0xee, .code = 0x94 }, { /* sentinel */ }, }; return rcsi2_phtw_write_array(priv, step1); } static int rcsi2_init_phtw_v3u(struct rcar_csi2 *priv, unsigned int mbps) { /* In case of 1500Mbps or less */ static const struct phtw_value step1[] = { { .data = 0xcc, .code = 0xe2 }, { /* sentinel */ }, }; static const struct phtw_value step2[] = { { .data = 0x01, .code = 0xe3 }, { .data = 0x11, .code = 0xe4 }, { .data = 0x01, .code = 0xe5 }, { /* sentinel */ }, }; /* In case of 1500Mbps or less */ static const struct phtw_value step3[] = { { .data = 0x38, .code = 0x08 }, { /* sentinel */ }, }; static const struct phtw_value step4[] = { { .data = 0x01, .code = 0x00 }, { .data = 0x4b, .code = 0xac }, { .data = 0x03, .code = 0x00 }, { .data = 0x80, .code = 0x07 }, { /* sentinel */ }, }; int ret; if (mbps != 0 && mbps <= 1500) ret = rcsi2_phtw_write_array(priv, step1); else ret = rcsi2_phtw_write_mbps(priv, mbps, phtw_mbps_v3u, 0xe2); if (ret) return ret; ret = rcsi2_phtw_write_array(priv, step2); if (ret) return ret; if (mbps != 0 && mbps <= 1500) { ret = rcsi2_phtw_write_array(priv, step3); if (ret) return ret; } ret = rcsi2_phtw_write_array(priv, step4); if (ret) return ret; return ret; } /* ----------------------------------------------------------------------------- * Platform Device Driver. */ static int rcsi2_link_setup(struct media_entity *entity, const struct media_pad *local, const struct media_pad *remote, u32 flags) { struct v4l2_subdev *sd = media_entity_to_v4l2_subdev(entity); struct rcar_csi2 *priv = sd_to_csi2(sd); struct video_device *vdev; int channel, vc; u32 id; if (!is_media_entity_v4l2_video_device(remote->entity)) { dev_err(priv->dev, "Remote is not a video device\n"); return -EINVAL; } vdev = media_entity_to_video_device(remote->entity); if (of_property_read_u32(vdev->dev_parent->of_node, "renesas,id", &id)) { dev_err(priv->dev, "No renesas,id, can't configure routing\n"); return -EINVAL; } channel = id % 4; if (flags & MEDIA_LNK_FL_ENABLED) { if (media_pad_remote_pad_first(local)) { dev_dbg(priv->dev, "Each VC can only be routed to one output channel\n"); return -EINVAL; } vc = local->index - 1; dev_dbg(priv->dev, "Route VC%d to VIN%u on output channel %d\n", vc, id, channel); } else { vc = -1; } priv->channel_vc[channel] = vc; return 0; } static const struct media_entity_operations rcar_csi2_entity_ops = { .link_setup = rcsi2_link_setup, .link_validate = v4l2_subdev_link_validate, }; static int rcsi2_probe_resources(struct rcar_csi2 *priv, struct platform_device *pdev) { int irq, ret; priv->base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(priv->base)) return PTR_ERR(priv->base); irq = platform_get_irq(pdev, 0); if (irq < 0) return irq; ret = devm_request_threaded_irq(&pdev->dev, irq, rcsi2_irq, rcsi2_irq_thread, IRQF_SHARED, KBUILD_MODNAME, priv); if (ret) return ret; priv->rstc = devm_reset_control_get(&pdev->dev, NULL); return PTR_ERR_OR_ZERO(priv->rstc); } static const struct rcar_csi2_info rcar_csi2_info_r8a7795 = { .init_phtw = rcsi2_init_phtw_h3_v3h_m3n, .start_receiver = rcsi2_start_receiver_gen3, .enter_standby = rcsi2_enter_standby_gen3, .hsfreqrange = hsfreqrange_h3_v3h_m3n, .csi0clkfreqrange = 0x20, .num_channels = 4, .clear_ulps = true, .support_dphy = true, }; static const struct rcar_csi2_info rcar_csi2_info_r8a7795es2 = { .init_phtw = rcsi2_init_phtw_h3es2, .start_receiver = rcsi2_start_receiver_gen3, .enter_standby = rcsi2_enter_standby_gen3, .hsfreqrange = hsfreqrange_h3_v3h_m3n, .csi0clkfreqrange = 0x20, .num_channels = 4, .clear_ulps = true, .support_dphy = true, }; static const struct rcar_csi2_info rcar_csi2_info_r8a7796 = { .start_receiver = rcsi2_start_receiver_gen3, .enter_standby = rcsi2_enter_standby_gen3, .hsfreqrange = hsfreqrange_m3w, .num_channels = 4, .support_dphy = true, }; static const struct rcar_csi2_info rcar_csi2_info_r8a77961 = { .start_receiver = rcsi2_start_receiver_gen3, .enter_standby = rcsi2_enter_standby_gen3, .hsfreqrange = hsfreqrange_m3w, .num_channels = 4, .support_dphy = true, }; static const struct rcar_csi2_info rcar_csi2_info_r8a77965 = { .init_phtw = rcsi2_init_phtw_h3_v3h_m3n, .start_receiver = rcsi2_start_receiver_gen3, .enter_standby = rcsi2_enter_standby_gen3, .hsfreqrange = hsfreqrange_h3_v3h_m3n, .csi0clkfreqrange = 0x20, .num_channels = 4, .clear_ulps = true, .support_dphy = true, }; static const struct rcar_csi2_info rcar_csi2_info_r8a77970 = { .init_phtw = rcsi2_init_phtw_v3m_e3, .phy_post_init = rcsi2_phy_post_init_v3m_e3, .start_receiver = rcsi2_start_receiver_gen3, .enter_standby = rcsi2_enter_standby_gen3, .num_channels = 4, .support_dphy = true, }; static const struct rcar_csi2_info rcar_csi2_info_r8a77980 = { .init_phtw = rcsi2_init_phtw_h3_v3h_m3n, .start_receiver = rcsi2_start_receiver_gen3, .enter_standby = rcsi2_enter_standby_gen3, .hsfreqrange = hsfreqrange_h3_v3h_m3n, .csi0clkfreqrange = 0x20, .clear_ulps = true, .support_dphy = true, }; static const struct rcar_csi2_info rcar_csi2_info_r8a77990 = { .init_phtw = rcsi2_init_phtw_v3m_e3, .phy_post_init = rcsi2_phy_post_init_v3m_e3, .start_receiver = rcsi2_start_receiver_gen3, .enter_standby = rcsi2_enter_standby_gen3, .num_channels = 2, .support_dphy = true, }; static const struct rcar_csi2_info rcar_csi2_info_r8a779a0 = { .init_phtw = rcsi2_init_phtw_v3u, .start_receiver = rcsi2_start_receiver_gen3, .enter_standby = rcsi2_enter_standby_gen3, .hsfreqrange = hsfreqrange_v3u, .csi0clkfreqrange = 0x20, .clear_ulps = true, .use_isp = true, .support_dphy = true, }; static const struct rcar_csi2_info rcar_csi2_info_r8a779g0 = { .start_receiver = rcsi2_start_receiver_v4h, .use_isp = true, .support_cphy = true, }; static const struct of_device_id rcar_csi2_of_table[] = { { .compatible = "renesas,r8a774a1-csi2", .data = &rcar_csi2_info_r8a7796, }, { .compatible = "renesas,r8a774b1-csi2", .data = &rcar_csi2_info_r8a77965, }, { .compatible = "renesas,r8a774c0-csi2", .data = &rcar_csi2_info_r8a77990, }, { .compatible = "renesas,r8a774e1-csi2", .data = &rcar_csi2_info_r8a7795, }, { .compatible = "renesas,r8a7795-csi2", .data = &rcar_csi2_info_r8a7795, }, { .compatible = "renesas,r8a7796-csi2", .data = &rcar_csi2_info_r8a7796, }, { .compatible = "renesas,r8a77961-csi2", .data = &rcar_csi2_info_r8a77961, }, { .compatible = "renesas,r8a77965-csi2", .data = &rcar_csi2_info_r8a77965, }, { .compatible = "renesas,r8a77970-csi2", .data = &rcar_csi2_info_r8a77970, }, { .compatible = "renesas,r8a77980-csi2", .data = &rcar_csi2_info_r8a77980, }, { .compatible = "renesas,r8a77990-csi2", .data = &rcar_csi2_info_r8a77990, }, { .compatible = "renesas,r8a779a0-csi2", .data = &rcar_csi2_info_r8a779a0, }, { .compatible = "renesas,r8a779g0-csi2", .data = &rcar_csi2_info_r8a779g0, }, { /* sentinel */ }, }; MODULE_DEVICE_TABLE(of, rcar_csi2_of_table); static const struct soc_device_attribute r8a7795[] = { { .soc_id = "r8a7795", .revision = "ES2.*", .data = &rcar_csi2_info_r8a7795es2, }, { /* sentinel */ } }; static int rcsi2_probe(struct platform_device *pdev) { const struct soc_device_attribute *attr; struct rcar_csi2 *priv; unsigned int i, num_pads; int ret; priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL); if (!priv) return -ENOMEM; priv->info = of_device_get_match_data(&pdev->dev); /* * The different ES versions of r8a7795 (H3) behave differently but * share the same compatible string. */ attr = soc_device_match(r8a7795); if (attr) priv->info = attr->data; priv->dev = &pdev->dev; mutex_init(&priv->lock); priv->stream_count = 0; ret = rcsi2_probe_resources(priv, pdev); if (ret) { dev_err(priv->dev, "Failed to get resources\n"); goto error_mutex; } platform_set_drvdata(pdev, priv); ret = rcsi2_parse_dt(priv); if (ret) goto error_mutex; priv->subdev.owner = THIS_MODULE; priv->subdev.dev = &pdev->dev; v4l2_subdev_init(&priv->subdev, &rcar_csi2_subdev_ops); v4l2_set_subdevdata(&priv->subdev, &pdev->dev); snprintf(priv->subdev.name, V4L2_SUBDEV_NAME_SIZE, "%s %s", KBUILD_MODNAME, dev_name(&pdev->dev)); priv->subdev.flags = V4L2_SUBDEV_FL_HAS_DEVNODE; priv->subdev.entity.function = MEDIA_ENT_F_PROC_VIDEO_PIXEL_FORMATTER; priv->subdev.entity.ops = &rcar_csi2_entity_ops; num_pads = priv->info->use_isp ? 2 : NR_OF_RCAR_CSI2_PAD; priv->pads[RCAR_CSI2_SINK].flags = MEDIA_PAD_FL_SINK; for (i = RCAR_CSI2_SOURCE_VC0; i < num_pads; i++) priv->pads[i].flags = MEDIA_PAD_FL_SOURCE; ret = media_entity_pads_init(&priv->subdev.entity, num_pads, priv->pads); if (ret) goto error_async; for (i = 0; i < ARRAY_SIZE(priv->channel_vc); i++) priv->channel_vc[i] = -1; pm_runtime_enable(&pdev->dev); ret = v4l2_async_register_subdev(&priv->subdev); if (ret < 0) goto error_async; dev_info(priv->dev, "%d lanes found\n", priv->lanes); return 0; error_async: v4l2_async_nf_unregister(&priv->notifier); v4l2_async_nf_cleanup(&priv->notifier); error_mutex: mutex_destroy(&priv->lock); return ret; } static void rcsi2_remove(struct platform_device *pdev) { struct rcar_csi2 *priv = platform_get_drvdata(pdev); v4l2_async_nf_unregister(&priv->notifier); v4l2_async_nf_cleanup(&priv->notifier); v4l2_async_unregister_subdev(&priv->subdev); pm_runtime_disable(&pdev->dev); mutex_destroy(&priv->lock); } static struct platform_driver rcar_csi2_pdrv = { .remove_new = rcsi2_remove, .probe = rcsi2_probe, .driver = { .name = "rcar-csi2", .suppress_bind_attrs = true, .of_match_table = rcar_csi2_of_table, }, }; module_platform_driver(rcar_csi2_pdrv); MODULE_AUTHOR("Niklas Söderlund <[email protected]>"); MODULE_DESCRIPTION("Renesas R-Car MIPI CSI-2 receiver driver"); MODULE_LICENSE("GPL");
linux-master
drivers/media/platform/renesas/rcar-vin/rcar-csi2.c
// SPDX-License-Identifier: GPL-2.0+ /* * Driver for Renesas R-Car VIN * * Copyright (C) 2016 Renesas Electronics Corp. * Copyright (C) 2011-2013 Renesas Solutions Corp. * Copyright (C) 2013 Cogent Embedded, Inc., <[email protected]> * Copyright (C) 2008 Magnus Damm * * Based on the soc-camera rcar_vin driver */ #include <linux/pm_runtime.h> #include <media/v4l2-event.h> #include <media/v4l2-ioctl.h> #include <media/v4l2-mc.h> #include <media/v4l2-rect.h> #include "rcar-vin.h" #define RVIN_DEFAULT_FORMAT V4L2_PIX_FMT_YUYV #define RVIN_DEFAULT_WIDTH 800 #define RVIN_DEFAULT_HEIGHT 600 #define RVIN_DEFAULT_FIELD V4L2_FIELD_NONE #define RVIN_DEFAULT_COLORSPACE V4L2_COLORSPACE_SRGB /* ----------------------------------------------------------------------------- * Format Conversions */ static const struct rvin_video_format rvin_formats[] = { { .fourcc = V4L2_PIX_FMT_NV12, .bpp = 1, }, { .fourcc = V4L2_PIX_FMT_NV16, .bpp = 1, }, { .fourcc = V4L2_PIX_FMT_YUYV, .bpp = 2, }, { .fourcc = V4L2_PIX_FMT_UYVY, .bpp = 2, }, { .fourcc = V4L2_PIX_FMT_RGB565, .bpp = 2, }, { .fourcc = V4L2_PIX_FMT_XRGB555, .bpp = 2, }, { .fourcc = V4L2_PIX_FMT_XBGR32, .bpp = 4, }, { .fourcc = V4L2_PIX_FMT_ARGB555, .bpp = 2, }, { .fourcc = V4L2_PIX_FMT_ABGR32, .bpp = 4, }, { .fourcc = V4L2_PIX_FMT_SBGGR8, .bpp = 1, }, { .fourcc = V4L2_PIX_FMT_SGBRG8, .bpp = 1, }, { .fourcc = V4L2_PIX_FMT_SGRBG8, .bpp = 1, }, { .fourcc = V4L2_PIX_FMT_SRGGB8, .bpp = 1, }, { .fourcc = V4L2_PIX_FMT_GREY, .bpp = 1, }, }; const struct rvin_video_format *rvin_format_from_pixel(struct rvin_dev *vin, u32 pixelformat) { int i; switch (pixelformat) { case V4L2_PIX_FMT_XBGR32: if (vin->info->model == RCAR_M1) return NULL; break; case V4L2_PIX_FMT_NV12: /* * If NV12 is supported it's only supported on channels 0, 1, 4, * 5, 8, 9, 12 and 13. */ if (!vin->info->nv12 || !(BIT(vin->id) & 0x3333)) return NULL; break; default: break; } for (i = 0; i < ARRAY_SIZE(rvin_formats); i++) if (rvin_formats[i].fourcc == pixelformat) return rvin_formats + i; return NULL; } static u32 rvin_format_bytesperline(struct rvin_dev *vin, struct v4l2_pix_format *pix) { const struct rvin_video_format *fmt; u32 align; fmt = rvin_format_from_pixel(vin, pix->pixelformat); if (WARN_ON(!fmt)) return -EINVAL; switch (pix->pixelformat) { case V4L2_PIX_FMT_NV12: case V4L2_PIX_FMT_NV16: align = 0x20; break; default: align = 0x10; break; } if (V4L2_FIELD_IS_SEQUENTIAL(pix->field)) align = 0x80; return ALIGN(pix->width, align) * fmt->bpp; } static u32 rvin_format_sizeimage(struct v4l2_pix_format *pix) { switch (pix->pixelformat) { case V4L2_PIX_FMT_NV12: return pix->bytesperline * pix->height * 3 / 2; case V4L2_PIX_FMT_NV16: return pix->bytesperline * pix->height * 2; default: return pix->bytesperline * pix->height; } } static void rvin_format_align(struct rvin_dev *vin, struct v4l2_pix_format *pix) { u32 walign; if (!rvin_format_from_pixel(vin, pix->pixelformat)) pix->pixelformat = RVIN_DEFAULT_FORMAT; switch (pix->field) { case V4L2_FIELD_TOP: case V4L2_FIELD_BOTTOM: case V4L2_FIELD_NONE: case V4L2_FIELD_INTERLACED_TB: case V4L2_FIELD_INTERLACED_BT: case V4L2_FIELD_INTERLACED: case V4L2_FIELD_ALTERNATE: case V4L2_FIELD_SEQ_TB: case V4L2_FIELD_SEQ_BT: break; default: pix->field = RVIN_DEFAULT_FIELD; break; } /* Hardware limits width alignment based on format. */ switch (pix->pixelformat) { /* Multiple of 32 (2^5) for NV12/16. */ case V4L2_PIX_FMT_NV12: case V4L2_PIX_FMT_NV16: walign = 5; break; /* Multiple of 2 (2^1) for YUV. */ case V4L2_PIX_FMT_YUYV: case V4L2_PIX_FMT_UYVY: walign = 1; break; /* No multiple for RGB. */ default: walign = 0; break; } /* Limit to VIN capabilities */ v4l_bound_align_image(&pix->width, 5, vin->info->max_width, walign, &pix->height, 2, vin->info->max_height, 0, 0); pix->bytesperline = rvin_format_bytesperline(vin, pix); pix->sizeimage = rvin_format_sizeimage(pix); vin_dbg(vin, "Format %ux%u bpl: %u size: %u\n", pix->width, pix->height, pix->bytesperline, pix->sizeimage); } /* ----------------------------------------------------------------------------- * V4L2 */ static int rvin_reset_format(struct rvin_dev *vin) { struct v4l2_subdev_format fmt = { .which = V4L2_SUBDEV_FORMAT_ACTIVE, .pad = vin->parallel.source_pad, }; int ret; ret = v4l2_subdev_call(vin_to_source(vin), pad, get_fmt, NULL, &fmt); if (ret) return ret; v4l2_fill_pix_format(&vin->format, &fmt.format); vin->crop.top = 0; vin->crop.left = 0; vin->crop.width = vin->format.width; vin->crop.height = vin->format.height; /* Make use of the hardware interlacer by default. */ if (vin->format.field == V4L2_FIELD_ALTERNATE) { vin->format.field = V4L2_FIELD_INTERLACED; vin->format.height *= 2; } rvin_format_align(vin, &vin->format); vin->compose.top = 0; vin->compose.left = 0; vin->compose.width = vin->format.width; vin->compose.height = vin->format.height; return 0; } static int rvin_try_format(struct rvin_dev *vin, u32 which, struct v4l2_pix_format *pix, struct v4l2_rect *src_rect) { struct v4l2_subdev *sd = vin_to_source(vin); struct v4l2_subdev_state *sd_state; static struct lock_class_key key; struct v4l2_subdev_format format = { .which = which, .pad = vin->parallel.source_pad, }; enum v4l2_field field; u32 width, height; int ret; /* * FIXME: Drop this call, drivers are not supposed to use * __v4l2_subdev_state_alloc(). */ sd_state = __v4l2_subdev_state_alloc(sd, "rvin:state->lock", &key); if (IS_ERR(sd_state)) return PTR_ERR(sd_state); if (!rvin_format_from_pixel(vin, pix->pixelformat)) pix->pixelformat = RVIN_DEFAULT_FORMAT; v4l2_fill_mbus_format(&format.format, pix, vin->mbus_code); /* Allow the video device to override field and to scale */ field = pix->field; width = pix->width; height = pix->height; ret = v4l2_subdev_call(sd, pad, set_fmt, sd_state, &format); if (ret < 0 && ret != -ENOIOCTLCMD) goto done; ret = 0; v4l2_fill_pix_format(pix, &format.format); if (src_rect) { src_rect->top = 0; src_rect->left = 0; src_rect->width = pix->width; src_rect->height = pix->height; } if (field != V4L2_FIELD_ANY) pix->field = field; pix->width = width; pix->height = height; rvin_format_align(vin, pix); done: __v4l2_subdev_state_free(sd_state); return ret; } static int rvin_querycap(struct file *file, void *priv, struct v4l2_capability *cap) { strscpy(cap->driver, KBUILD_MODNAME, sizeof(cap->driver)); strscpy(cap->card, "R_Car_VIN", sizeof(cap->card)); return 0; } static int rvin_try_fmt_vid_cap(struct file *file, void *priv, struct v4l2_format *f) { struct rvin_dev *vin = video_drvdata(file); return rvin_try_format(vin, V4L2_SUBDEV_FORMAT_TRY, &f->fmt.pix, NULL); } static int rvin_s_fmt_vid_cap(struct file *file, void *priv, struct v4l2_format *f) { struct rvin_dev *vin = video_drvdata(file); struct v4l2_rect fmt_rect, src_rect; int ret; if (vb2_is_busy(&vin->queue)) return -EBUSY; ret = rvin_try_format(vin, V4L2_SUBDEV_FORMAT_ACTIVE, &f->fmt.pix, &src_rect); if (ret) return ret; vin->format = f->fmt.pix; fmt_rect.top = 0; fmt_rect.left = 0; fmt_rect.width = vin->format.width; fmt_rect.height = vin->format.height; v4l2_rect_map_inside(&vin->crop, &src_rect); v4l2_rect_map_inside(&vin->compose, &fmt_rect); return 0; } static int rvin_g_fmt_vid_cap(struct file *file, void *priv, struct v4l2_format *f) { struct rvin_dev *vin = video_drvdata(file); f->fmt.pix = vin->format; return 0; } static int rvin_enum_fmt_vid_cap(struct file *file, void *priv, struct v4l2_fmtdesc *f) { struct rvin_dev *vin = video_drvdata(file); unsigned int i; int matched; /* * If mbus_code is set only enumerate supported pixel formats for that * bus code. Converting from YCbCr to RGB and RGB to YCbCr is possible * with VIN, so all supported YCbCr and RGB media bus codes can produce * all of the related pixel formats. If mbus_code is not set enumerate * all possible pixelformats. * * TODO: Once raw MEDIA_BUS_FMT_SRGGB12_1X12 format is added to the * driver this needs to be extended so raw media bus code only result in * raw pixel format. */ switch (f->mbus_code) { case 0: case MEDIA_BUS_FMT_YUYV8_1X16: case MEDIA_BUS_FMT_UYVY8_1X16: case MEDIA_BUS_FMT_UYVY8_2X8: case MEDIA_BUS_FMT_UYVY10_2X10: case MEDIA_BUS_FMT_RGB888_1X24: break; case MEDIA_BUS_FMT_SBGGR8_1X8: if (f->index) return -EINVAL; f->pixelformat = V4L2_PIX_FMT_SBGGR8; return 0; case MEDIA_BUS_FMT_SGBRG8_1X8: if (f->index) return -EINVAL; f->pixelformat = V4L2_PIX_FMT_SGBRG8; return 0; case MEDIA_BUS_FMT_SGRBG8_1X8: if (f->index) return -EINVAL; f->pixelformat = V4L2_PIX_FMT_SGRBG8; return 0; case MEDIA_BUS_FMT_SRGGB8_1X8: if (f->index) return -EINVAL; f->pixelformat = V4L2_PIX_FMT_SRGGB8; return 0; default: return -EINVAL; } matched = -1; for (i = 0; i < ARRAY_SIZE(rvin_formats); i++) { if (rvin_format_from_pixel(vin, rvin_formats[i].fourcc)) matched++; if (matched == f->index) { f->pixelformat = rvin_formats[i].fourcc; return 0; } } return -EINVAL; } static int rvin_remote_rectangle(struct rvin_dev *vin, struct v4l2_rect *rect) { struct v4l2_subdev_format fmt = { .which = V4L2_SUBDEV_FORMAT_ACTIVE, }; struct v4l2_subdev *sd; unsigned int index; int ret; if (vin->info->use_mc) { struct media_pad *pad = media_pad_remote_pad_first(&vin->pad); if (!pad) return -EINVAL; sd = media_entity_to_v4l2_subdev(pad->entity); index = pad->index; } else { sd = vin_to_source(vin); index = vin->parallel.source_pad; } fmt.pad = index; ret = v4l2_subdev_call(sd, pad, get_fmt, NULL, &fmt); if (ret) return ret; rect->left = rect->top = 0; rect->width = fmt.format.width; rect->height = fmt.format.height; if (fmt.format.field == V4L2_FIELD_ALTERNATE) { switch (vin->format.field) { case V4L2_FIELD_INTERLACED_TB: case V4L2_FIELD_INTERLACED_BT: case V4L2_FIELD_INTERLACED: case V4L2_FIELD_SEQ_TB: case V4L2_FIELD_SEQ_BT: rect->height *= 2; break; } } return 0; } static int rvin_g_selection(struct file *file, void *fh, struct v4l2_selection *s) { struct rvin_dev *vin = video_drvdata(file); int ret; if (!vin->scaler) return -ENOIOCTLCMD; if (s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) return -EINVAL; switch (s->target) { case V4L2_SEL_TGT_CROP_BOUNDS: case V4L2_SEL_TGT_CROP_DEFAULT: ret = rvin_remote_rectangle(vin, &s->r); if (ret) return ret; break; case V4L2_SEL_TGT_CROP: s->r = vin->crop; break; case V4L2_SEL_TGT_COMPOSE_BOUNDS: case V4L2_SEL_TGT_COMPOSE_DEFAULT: s->r.left = s->r.top = 0; s->r.width = vin->format.width; s->r.height = vin->format.height; break; case V4L2_SEL_TGT_COMPOSE: s->r = vin->compose; break; default: return -EINVAL; } return 0; } static int rvin_s_selection(struct file *file, void *fh, struct v4l2_selection *s) { struct rvin_dev *vin = video_drvdata(file); const struct rvin_video_format *fmt; struct v4l2_rect r = s->r; struct v4l2_rect max_rect; struct v4l2_rect min_rect = { .width = 6, .height = 2, }; int ret; if (!vin->scaler) return -ENOIOCTLCMD; if (s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) return -EINVAL; v4l2_rect_set_min_size(&r, &min_rect); switch (s->target) { case V4L2_SEL_TGT_CROP: /* Can't crop outside of source input */ ret = rvin_remote_rectangle(vin, &max_rect); if (ret) return ret; v4l2_rect_map_inside(&r, &max_rect); v4l_bound_align_image(&r.width, 6, max_rect.width, 0, &r.height, 2, max_rect.height, 0, 0); r.top = clamp_t(s32, r.top, 0, max_rect.height - r.height); r.left = clamp_t(s32, r.left, 0, max_rect.width - r.width); vin->crop = s->r = r; vin_dbg(vin, "Cropped %dx%d@%d:%d of %dx%d\n", r.width, r.height, r.left, r.top, max_rect.width, max_rect.height); break; case V4L2_SEL_TGT_COMPOSE: /* Make sure compose rect fits inside output format */ max_rect.top = max_rect.left = 0; max_rect.width = vin->format.width; max_rect.height = vin->format.height; v4l2_rect_map_inside(&r, &max_rect); /* * Composing is done by adding a offset to the buffer address, * the HW wants this address to be aligned to HW_BUFFER_MASK. * Make sure the top and left values meets this requirement. */ while ((r.top * vin->format.bytesperline) & HW_BUFFER_MASK) r.top--; fmt = rvin_format_from_pixel(vin, vin->format.pixelformat); while ((r.left * fmt->bpp) & HW_BUFFER_MASK) r.left--; vin->compose = s->r = r; vin_dbg(vin, "Compose %dx%d@%d:%d in %dx%d\n", r.width, r.height, r.left, r.top, vin->format.width, vin->format.height); break; default: return -EINVAL; } /* HW supports modifying configuration while running */ rvin_crop_scale_comp(vin); return 0; } static int rvin_g_parm(struct file *file, void *priv, struct v4l2_streamparm *parm) { struct rvin_dev *vin = video_drvdata(file); struct v4l2_subdev *sd = vin_to_source(vin); return v4l2_g_parm_cap(&vin->vdev, sd, parm); } static int rvin_s_parm(struct file *file, void *priv, struct v4l2_streamparm *parm) { struct rvin_dev *vin = video_drvdata(file); struct v4l2_subdev *sd = vin_to_source(vin); return v4l2_s_parm_cap(&vin->vdev, sd, parm); } static int rvin_g_pixelaspect(struct file *file, void *priv, int type, struct v4l2_fract *f) { struct rvin_dev *vin = video_drvdata(file); struct v4l2_subdev *sd = vin_to_source(vin); if (type != V4L2_BUF_TYPE_VIDEO_CAPTURE) return -EINVAL; return v4l2_subdev_call(sd, video, g_pixelaspect, f); } static int rvin_enum_input(struct file *file, void *priv, struct v4l2_input *i) { struct rvin_dev *vin = video_drvdata(file); struct v4l2_subdev *sd = vin_to_source(vin); int ret; if (i->index != 0) return -EINVAL; ret = v4l2_subdev_call(sd, video, g_input_status, &i->status); if (ret < 0 && ret != -ENOIOCTLCMD && ret != -ENODEV) return ret; i->type = V4L2_INPUT_TYPE_CAMERA; if (v4l2_subdev_has_op(sd, pad, dv_timings_cap)) { i->capabilities = V4L2_IN_CAP_DV_TIMINGS; i->std = 0; } else { i->capabilities = V4L2_IN_CAP_STD; i->std = vin->vdev.tvnorms; } strscpy(i->name, "Camera", sizeof(i->name)); return 0; } static int rvin_g_input(struct file *file, void *priv, unsigned int *i) { *i = 0; return 0; } static int rvin_s_input(struct file *file, void *priv, unsigned int i) { if (i > 0) return -EINVAL; return 0; } static int rvin_querystd(struct file *file, void *priv, v4l2_std_id *a) { struct rvin_dev *vin = video_drvdata(file); struct v4l2_subdev *sd = vin_to_source(vin); return v4l2_subdev_call(sd, video, querystd, a); } static int rvin_s_std(struct file *file, void *priv, v4l2_std_id a) { struct rvin_dev *vin = video_drvdata(file); int ret; ret = v4l2_subdev_call(vin_to_source(vin), video, s_std, a); if (ret < 0) return ret; vin->std = a; /* Changing the standard will change the width/height */ return rvin_reset_format(vin); } static int rvin_g_std(struct file *file, void *priv, v4l2_std_id *a) { struct rvin_dev *vin = video_drvdata(file); if (v4l2_subdev_has_op(vin_to_source(vin), pad, dv_timings_cap)) return -ENOIOCTLCMD; *a = vin->std; return 0; } static int rvin_subscribe_event(struct v4l2_fh *fh, const struct v4l2_event_subscription *sub) { switch (sub->type) { case V4L2_EVENT_SOURCE_CHANGE: return v4l2_event_subscribe(fh, sub, 4, NULL); } return v4l2_ctrl_subscribe_event(fh, sub); } static int rvin_enum_dv_timings(struct file *file, void *priv_fh, struct v4l2_enum_dv_timings *timings) { struct rvin_dev *vin = video_drvdata(file); struct v4l2_subdev *sd = vin_to_source(vin); int ret; if (timings->pad) return -EINVAL; timings->pad = vin->parallel.sink_pad; ret = v4l2_subdev_call(sd, pad, enum_dv_timings, timings); timings->pad = 0; return ret; } static int rvin_s_dv_timings(struct file *file, void *priv_fh, struct v4l2_dv_timings *timings) { struct rvin_dev *vin = video_drvdata(file); struct v4l2_subdev *sd = vin_to_source(vin); int ret; ret = v4l2_subdev_call(sd, video, s_dv_timings, timings); if (ret) return ret; /* Changing the timings will change the width/height */ return rvin_reset_format(vin); } static int rvin_g_dv_timings(struct file *file, void *priv_fh, struct v4l2_dv_timings *timings) { struct rvin_dev *vin = video_drvdata(file); struct v4l2_subdev *sd = vin_to_source(vin); return v4l2_subdev_call(sd, video, g_dv_timings, timings); } static int rvin_query_dv_timings(struct file *file, void *priv_fh, struct v4l2_dv_timings *timings) { struct rvin_dev *vin = video_drvdata(file); struct v4l2_subdev *sd = vin_to_source(vin); return v4l2_subdev_call(sd, video, query_dv_timings, timings); } static int rvin_dv_timings_cap(struct file *file, void *priv_fh, struct v4l2_dv_timings_cap *cap) { struct rvin_dev *vin = video_drvdata(file); struct v4l2_subdev *sd = vin_to_source(vin); int ret; if (cap->pad) return -EINVAL; cap->pad = vin->parallel.sink_pad; ret = v4l2_subdev_call(sd, pad, dv_timings_cap, cap); cap->pad = 0; return ret; } static int rvin_g_edid(struct file *file, void *fh, struct v4l2_edid *edid) { struct rvin_dev *vin = video_drvdata(file); struct v4l2_subdev *sd = vin_to_source(vin); int ret; if (edid->pad) return -EINVAL; edid->pad = vin->parallel.sink_pad; ret = v4l2_subdev_call(sd, pad, get_edid, edid); edid->pad = 0; return ret; } static int rvin_s_edid(struct file *file, void *fh, struct v4l2_edid *edid) { struct rvin_dev *vin = video_drvdata(file); struct v4l2_subdev *sd = vin_to_source(vin); int ret; if (edid->pad) return -EINVAL; edid->pad = vin->parallel.sink_pad; ret = v4l2_subdev_call(sd, pad, set_edid, edid); edid->pad = 0; return ret; } static const struct v4l2_ioctl_ops rvin_ioctl_ops = { .vidioc_querycap = rvin_querycap, .vidioc_try_fmt_vid_cap = rvin_try_fmt_vid_cap, .vidioc_g_fmt_vid_cap = rvin_g_fmt_vid_cap, .vidioc_s_fmt_vid_cap = rvin_s_fmt_vid_cap, .vidioc_enum_fmt_vid_cap = rvin_enum_fmt_vid_cap, .vidioc_g_selection = rvin_g_selection, .vidioc_s_selection = rvin_s_selection, .vidioc_g_parm = rvin_g_parm, .vidioc_s_parm = rvin_s_parm, .vidioc_g_pixelaspect = rvin_g_pixelaspect, .vidioc_enum_input = rvin_enum_input, .vidioc_g_input = rvin_g_input, .vidioc_s_input = rvin_s_input, .vidioc_dv_timings_cap = rvin_dv_timings_cap, .vidioc_enum_dv_timings = rvin_enum_dv_timings, .vidioc_g_dv_timings = rvin_g_dv_timings, .vidioc_s_dv_timings = rvin_s_dv_timings, .vidioc_query_dv_timings = rvin_query_dv_timings, .vidioc_g_edid = rvin_g_edid, .vidioc_s_edid = rvin_s_edid, .vidioc_querystd = rvin_querystd, .vidioc_g_std = rvin_g_std, .vidioc_s_std = rvin_s_std, .vidioc_reqbufs = vb2_ioctl_reqbufs, .vidioc_create_bufs = vb2_ioctl_create_bufs, .vidioc_querybuf = vb2_ioctl_querybuf, .vidioc_qbuf = vb2_ioctl_qbuf, .vidioc_dqbuf = vb2_ioctl_dqbuf, .vidioc_expbuf = vb2_ioctl_expbuf, .vidioc_prepare_buf = vb2_ioctl_prepare_buf, .vidioc_streamon = vb2_ioctl_streamon, .vidioc_streamoff = vb2_ioctl_streamoff, .vidioc_log_status = v4l2_ctrl_log_status, .vidioc_subscribe_event = rvin_subscribe_event, .vidioc_unsubscribe_event = v4l2_event_unsubscribe, }; /* ----------------------------------------------------------------------------- * V4L2 Media Controller */ static void rvin_mc_try_format(struct rvin_dev *vin, struct v4l2_pix_format *pix) { /* * The V4L2 specification clearly documents the colorspace fields * as being set by drivers for capture devices. Using the values * supplied by userspace thus wouldn't comply with the API. Until * the API is updated force fixed values. */ pix->colorspace = RVIN_DEFAULT_COLORSPACE; pix->xfer_func = V4L2_MAP_XFER_FUNC_DEFAULT(pix->colorspace); pix->ycbcr_enc = V4L2_MAP_YCBCR_ENC_DEFAULT(pix->colorspace); pix->quantization = V4L2_MAP_QUANTIZATION_DEFAULT(true, pix->colorspace, pix->ycbcr_enc); rvin_format_align(vin, pix); } static int rvin_mc_try_fmt_vid_cap(struct file *file, void *priv, struct v4l2_format *f) { struct rvin_dev *vin = video_drvdata(file); rvin_mc_try_format(vin, &f->fmt.pix); return 0; } static int rvin_mc_s_fmt_vid_cap(struct file *file, void *priv, struct v4l2_format *f) { struct rvin_dev *vin = video_drvdata(file); if (vb2_is_busy(&vin->queue)) return -EBUSY; rvin_mc_try_format(vin, &f->fmt.pix); vin->format = f->fmt.pix; vin->crop.top = 0; vin->crop.left = 0; vin->crop.width = vin->format.width; vin->crop.height = vin->format.height; vin->compose = vin->crop; return 0; } static const struct v4l2_ioctl_ops rvin_mc_ioctl_ops = { .vidioc_querycap = rvin_querycap, .vidioc_try_fmt_vid_cap = rvin_mc_try_fmt_vid_cap, .vidioc_g_fmt_vid_cap = rvin_g_fmt_vid_cap, .vidioc_s_fmt_vid_cap = rvin_mc_s_fmt_vid_cap, .vidioc_enum_fmt_vid_cap = rvin_enum_fmt_vid_cap, .vidioc_g_selection = rvin_g_selection, .vidioc_s_selection = rvin_s_selection, .vidioc_reqbufs = vb2_ioctl_reqbufs, .vidioc_create_bufs = vb2_ioctl_create_bufs, .vidioc_querybuf = vb2_ioctl_querybuf, .vidioc_qbuf = vb2_ioctl_qbuf, .vidioc_dqbuf = vb2_ioctl_dqbuf, .vidioc_expbuf = vb2_ioctl_expbuf, .vidioc_prepare_buf = vb2_ioctl_prepare_buf, .vidioc_streamon = vb2_ioctl_streamon, .vidioc_streamoff = vb2_ioctl_streamoff, .vidioc_log_status = v4l2_ctrl_log_status, .vidioc_subscribe_event = rvin_subscribe_event, .vidioc_unsubscribe_event = v4l2_event_unsubscribe, }; /* ----------------------------------------------------------------------------- * File Operations */ static int rvin_power_parallel(struct rvin_dev *vin, bool on) { struct v4l2_subdev *sd = vin_to_source(vin); int power = on ? 1 : 0; int ret; ret = v4l2_subdev_call(sd, core, s_power, power); if (ret < 0 && ret != -ENOIOCTLCMD && ret != -ENODEV) return ret; return 0; } static int rvin_open(struct file *file) { struct rvin_dev *vin = video_drvdata(file); int ret; ret = pm_runtime_resume_and_get(vin->dev); if (ret < 0) return ret; ret = mutex_lock_interruptible(&vin->lock); if (ret) goto err_pm; file->private_data = vin; ret = v4l2_fh_open(file); if (ret) goto err_unlock; if (vin->info->use_mc) ret = v4l2_pipeline_pm_get(&vin->vdev.entity); else if (v4l2_fh_is_singular_file(file)) ret = rvin_power_parallel(vin, true); if (ret < 0) goto err_open; ret = v4l2_ctrl_handler_setup(&vin->ctrl_handler); if (ret) goto err_power; mutex_unlock(&vin->lock); return 0; err_power: if (vin->info->use_mc) v4l2_pipeline_pm_put(&vin->vdev.entity); else if (v4l2_fh_is_singular_file(file)) rvin_power_parallel(vin, false); err_open: v4l2_fh_release(file); err_unlock: mutex_unlock(&vin->lock); err_pm: pm_runtime_put(vin->dev); return ret; } static int rvin_release(struct file *file) { struct rvin_dev *vin = video_drvdata(file); bool fh_singular; int ret; mutex_lock(&vin->lock); /* Save the singular status before we call the clean-up helper */ fh_singular = v4l2_fh_is_singular_file(file); /* the release helper will cleanup any on-going streaming */ ret = _vb2_fop_release(file, NULL); if (vin->info->use_mc) { v4l2_pipeline_pm_put(&vin->vdev.entity); } else { if (fh_singular) rvin_power_parallel(vin, false); } mutex_unlock(&vin->lock); pm_runtime_put(vin->dev); return ret; } static const struct v4l2_file_operations rvin_fops = { .owner = THIS_MODULE, .unlocked_ioctl = video_ioctl2, .open = rvin_open, .release = rvin_release, .poll = vb2_fop_poll, .mmap = vb2_fop_mmap, .read = vb2_fop_read, }; void rvin_v4l2_unregister(struct rvin_dev *vin) { if (!video_is_registered(&vin->vdev)) return; v4l2_info(&vin->v4l2_dev, "Removing %s\n", video_device_node_name(&vin->vdev)); /* Checks internally if vdev have been init or not */ video_unregister_device(&vin->vdev); } static void rvin_notify_video_device(struct rvin_dev *vin, unsigned int notification, void *arg) { switch (notification) { case V4L2_DEVICE_NOTIFY_EVENT: v4l2_event_queue(&vin->vdev, arg); break; default: break; } } static void rvin_notify(struct v4l2_subdev *sd, unsigned int notification, void *arg) { struct v4l2_subdev *remote; struct rvin_group *group; struct media_pad *pad; struct rvin_dev *vin = container_of(sd->v4l2_dev, struct rvin_dev, v4l2_dev); unsigned int i; /* If no media controller, no need to route the event. */ if (!vin->info->use_mc) { rvin_notify_video_device(vin, notification, arg); return; } group = vin->group; for (i = 0; i < RCAR_VIN_NUM; i++) { vin = group->vin[i]; if (!vin) continue; pad = media_pad_remote_pad_first(&vin->pad); if (!pad) continue; remote = media_entity_to_v4l2_subdev(pad->entity); if (remote != sd) continue; rvin_notify_video_device(vin, notification, arg); } } int rvin_v4l2_register(struct rvin_dev *vin) { struct video_device *vdev = &vin->vdev; int ret; vin->v4l2_dev.notify = rvin_notify; /* video node */ vdev->v4l2_dev = &vin->v4l2_dev; vdev->queue = &vin->queue; snprintf(vdev->name, sizeof(vdev->name), "VIN%u output", vin->id); vdev->release = video_device_release_empty; vdev->lock = &vin->lock; vdev->fops = &rvin_fops; vdev->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING | V4L2_CAP_READWRITE; /* Set a default format */ vin->format.pixelformat = RVIN_DEFAULT_FORMAT; vin->format.width = RVIN_DEFAULT_WIDTH; vin->format.height = RVIN_DEFAULT_HEIGHT; vin->format.field = RVIN_DEFAULT_FIELD; vin->format.colorspace = RVIN_DEFAULT_COLORSPACE; if (vin->info->use_mc) { vdev->device_caps |= V4L2_CAP_IO_MC; vdev->ioctl_ops = &rvin_mc_ioctl_ops; } else { vdev->ioctl_ops = &rvin_ioctl_ops; rvin_reset_format(vin); } rvin_format_align(vin, &vin->format); ret = video_register_device(&vin->vdev, VFL_TYPE_VIDEO, -1); if (ret) { vin_err(vin, "Failed to register video device\n"); return ret; } video_set_drvdata(&vin->vdev, vin); v4l2_info(&vin->v4l2_dev, "Device registered as %s\n", video_device_node_name(&vin->vdev)); return ret; }
linux-master
drivers/media/platform/renesas/rcar-vin/rcar-v4l2.c
// SPDX-License-Identifier: GPL-2.0+ /* * Driver for Renesas R-Car VIN * * Copyright (C) 2016 Renesas Electronics Corp. * Copyright (C) 2011-2013 Renesas Solutions Corp. * Copyright (C) 2013 Cogent Embedded, Inc., <[email protected]> * Copyright (C) 2008 Magnus Damm * * Based on the soc-camera rcar_vin driver */ #include <linux/module.h> #include <linux/of.h> #include <linux/of_graph.h> #include <linux/platform_device.h> #include <linux/pm_runtime.h> #include <linux/slab.h> #include <media/v4l2-async.h> #include <media/v4l2-fwnode.h> #include <media/v4l2-mc.h> #include "rcar-vin.h" /* * The companion CSI-2 receiver driver (rcar-csi2) is known * and we know it has one source pad (pad 0) and four sink * pads (pad 1-4). So to translate a pad on the remote * CSI-2 receiver to/from the VIN internal channel number simply * subtract/add one from the pad/channel number. */ #define rvin_group_csi_pad_to_channel(pad) ((pad) - 1) #define rvin_group_csi_channel_to_pad(channel) ((channel) + 1) /* * Not all VINs are created equal, master VINs control the * routing for other VIN's. We can figure out which VIN is * master by looking at a VINs id. */ #define rvin_group_id_to_master(vin) ((vin) < 4 ? 0 : 4) #define v4l2_dev_to_vin(d) container_of(d, struct rvin_dev, v4l2_dev) /* ----------------------------------------------------------------------------- * Gen3 Group Allocator */ /* FIXME: This should if we find a system that supports more * than one group for the whole system be replaced with a linked * list of groups. And eventually all of this should be replaced * with a global device allocator API. * * But for now this works as on all supported systems there will * be only one group for all instances. */ static DEFINE_MUTEX(rvin_group_lock); static struct rvin_group *rvin_group_data; static void rvin_group_cleanup(struct rvin_group *group) { media_device_cleanup(&group->mdev); mutex_destroy(&group->lock); } static int rvin_group_init(struct rvin_group *group, struct rvin_dev *vin, int (*link_setup)(struct rvin_dev *), const struct media_device_ops *ops) { struct media_device *mdev = &group->mdev; const struct of_device_id *match; struct device_node *np; mutex_init(&group->lock); /* Count number of VINs in the system */ group->count = 0; for_each_matching_node(np, vin->dev->driver->of_match_table) if (of_device_is_available(np)) group->count++; vin_dbg(vin, "found %u enabled VIN's in DT", group->count); group->link_setup = link_setup; mdev->dev = vin->dev; mdev->ops = ops; match = of_match_node(vin->dev->driver->of_match_table, vin->dev->of_node); strscpy(mdev->driver_name, KBUILD_MODNAME, sizeof(mdev->driver_name)); strscpy(mdev->model, match->compatible, sizeof(mdev->model)); media_device_init(mdev); return 0; } static void rvin_group_release(struct kref *kref) { struct rvin_group *group = container_of(kref, struct rvin_group, refcount); mutex_lock(&rvin_group_lock); rvin_group_data = NULL; rvin_group_cleanup(group); kfree(group); mutex_unlock(&rvin_group_lock); } static int rvin_group_get(struct rvin_dev *vin, int (*link_setup)(struct rvin_dev *), const struct media_device_ops *ops) { struct rvin_group *group; u32 id; int ret; /* Make sure VIN id is present and sane */ ret = of_property_read_u32(vin->dev->of_node, "renesas,id", &id); if (ret) { vin_err(vin, "%pOF: No renesas,id property found\n", vin->dev->of_node); return -EINVAL; } if (id >= RCAR_VIN_NUM) { vin_err(vin, "%pOF: Invalid renesas,id '%u'\n", vin->dev->of_node, id); return -EINVAL; } /* Join or create a VIN group */ mutex_lock(&rvin_group_lock); if (rvin_group_data) { group = rvin_group_data; kref_get(&group->refcount); } else { group = kzalloc(sizeof(*group), GFP_KERNEL); if (!group) { ret = -ENOMEM; goto err_group; } ret = rvin_group_init(group, vin, link_setup, ops); if (ret) { kfree(group); vin_err(vin, "Failed to initialize group\n"); goto err_group; } kref_init(&group->refcount); rvin_group_data = group; } mutex_unlock(&rvin_group_lock); /* Add VIN to group */ mutex_lock(&group->lock); if (group->vin[id]) { vin_err(vin, "Duplicate renesas,id property value %u\n", id); mutex_unlock(&group->lock); kref_put(&group->refcount, rvin_group_release); return -EINVAL; } group->vin[id] = vin; vin->id = id; vin->group = group; vin->v4l2_dev.mdev = &group->mdev; mutex_unlock(&group->lock); return 0; err_group: mutex_unlock(&rvin_group_lock); return ret; } static void rvin_group_put(struct rvin_dev *vin) { struct rvin_group *group = vin->group; mutex_lock(&group->lock); vin->group = NULL; vin->v4l2_dev.mdev = NULL; if (WARN_ON(group->vin[vin->id] != vin)) goto out; group->vin[vin->id] = NULL; out: mutex_unlock(&group->lock); kref_put(&group->refcount, rvin_group_release); } /* group lock should be held when calling this function. */ static int rvin_group_entity_to_remote_id(struct rvin_group *group, struct media_entity *entity) { struct v4l2_subdev *sd; unsigned int i; sd = media_entity_to_v4l2_subdev(entity); for (i = 0; i < RVIN_REMOTES_MAX; i++) if (group->remotes[i].subdev == sd) return i; return -ENODEV; } static int rvin_group_notify_complete(struct v4l2_async_notifier *notifier) { struct rvin_dev *vin = v4l2_dev_to_vin(notifier->v4l2_dev); unsigned int i; int ret; ret = media_device_register(&vin->group->mdev); if (ret) return ret; ret = v4l2_device_register_subdev_nodes(&vin->v4l2_dev); if (ret) { vin_err(vin, "Failed to register subdev nodes\n"); return ret; } /* Register all video nodes for the group. */ for (i = 0; i < RCAR_VIN_NUM; i++) { if (vin->group->vin[i] && !video_is_registered(&vin->group->vin[i]->vdev)) { ret = rvin_v4l2_register(vin->group->vin[i]); if (ret) return ret; } } return vin->group->link_setup(vin); } static void rvin_group_notify_unbind(struct v4l2_async_notifier *notifier, struct v4l2_subdev *subdev, struct v4l2_async_connection *asc) { struct rvin_dev *vin = v4l2_dev_to_vin(notifier->v4l2_dev); unsigned int i; for (i = 0; i < RCAR_VIN_NUM; i++) if (vin->group->vin[i]) rvin_v4l2_unregister(vin->group->vin[i]); mutex_lock(&vin->group->lock); for (i = 0; i < RVIN_CSI_MAX; i++) { if (vin->group->remotes[i].asc != asc) continue; vin->group->remotes[i].subdev = NULL; vin_dbg(vin, "Unbind %s from slot %u\n", subdev->name, i); break; } mutex_unlock(&vin->group->lock); media_device_unregister(&vin->group->mdev); } static int rvin_group_notify_bound(struct v4l2_async_notifier *notifier, struct v4l2_subdev *subdev, struct v4l2_async_connection *asc) { struct rvin_dev *vin = v4l2_dev_to_vin(notifier->v4l2_dev); unsigned int i; mutex_lock(&vin->group->lock); for (i = 0; i < RVIN_CSI_MAX; i++) { if (vin->group->remotes[i].asc != asc) continue; vin->group->remotes[i].subdev = subdev; vin_dbg(vin, "Bound %s to slot %u\n", subdev->name, i); break; } mutex_unlock(&vin->group->lock); return 0; } static const struct v4l2_async_notifier_operations rvin_group_notify_ops = { .bound = rvin_group_notify_bound, .unbind = rvin_group_notify_unbind, .complete = rvin_group_notify_complete, }; static int rvin_group_parse_of(struct rvin_dev *vin, unsigned int port, unsigned int id) { struct fwnode_handle *ep, *fwnode; struct v4l2_fwnode_endpoint vep = { .bus_type = V4L2_MBUS_CSI2_DPHY, }; struct v4l2_async_connection *asc; int ret; ep = fwnode_graph_get_endpoint_by_id(dev_fwnode(vin->dev), port, id, 0); if (!ep) return 0; fwnode = fwnode_graph_get_remote_endpoint(ep); ret = v4l2_fwnode_endpoint_parse(ep, &vep); fwnode_handle_put(ep); if (ret) { vin_err(vin, "Failed to parse %pOF\n", to_of_node(fwnode)); ret = -EINVAL; goto out; } asc = v4l2_async_nf_add_fwnode(&vin->group->notifier, fwnode, struct v4l2_async_connection); if (IS_ERR(asc)) { ret = PTR_ERR(asc); goto out; } vin->group->remotes[vep.base.id].asc = asc; vin_dbg(vin, "Add group OF device %pOF to slot %u\n", to_of_node(fwnode), vep.base.id); out: fwnode_handle_put(fwnode); return ret; } static void rvin_group_notifier_cleanup(struct rvin_dev *vin) { if (&vin->v4l2_dev == vin->group->notifier.v4l2_dev) { v4l2_async_nf_unregister(&vin->group->notifier); v4l2_async_nf_cleanup(&vin->group->notifier); } } static int rvin_group_notifier_init(struct rvin_dev *vin, unsigned int port, unsigned int max_id) { unsigned int count = 0, vin_mask = 0; unsigned int i, id; int ret; mutex_lock(&vin->group->lock); /* If not all VIN's are registered don't register the notifier. */ for (i = 0; i < RCAR_VIN_NUM; i++) { if (vin->group->vin[i]) { count++; vin_mask |= BIT(i); } } if (vin->group->count != count) { mutex_unlock(&vin->group->lock); return 0; } mutex_unlock(&vin->group->lock); v4l2_async_nf_init(&vin->group->notifier, &vin->v4l2_dev); /* * Some subdevices may overlap but the parser function can handle it and * each subdevice will only be registered once with the group notifier. */ for (i = 0; i < RCAR_VIN_NUM; i++) { if (!(vin_mask & BIT(i))) continue; for (id = 0; id < max_id; id++) { if (vin->group->remotes[id].asc) continue; ret = rvin_group_parse_of(vin->group->vin[i], port, id); if (ret) return ret; } } if (list_empty(&vin->group->notifier.waiting_list)) return 0; vin->group->notifier.ops = &rvin_group_notify_ops; ret = v4l2_async_nf_register(&vin->group->notifier); if (ret < 0) { vin_err(vin, "Notifier registration failed\n"); v4l2_async_nf_cleanup(&vin->group->notifier); return ret; } return 0; } /* ----------------------------------------------------------------------------- * Controls */ static int rvin_s_ctrl(struct v4l2_ctrl *ctrl) { struct rvin_dev *vin = container_of(ctrl->handler, struct rvin_dev, ctrl_handler); switch (ctrl->id) { case V4L2_CID_ALPHA_COMPONENT: rvin_set_alpha(vin, ctrl->val); break; } return 0; } static const struct v4l2_ctrl_ops rvin_ctrl_ops = { .s_ctrl = rvin_s_ctrl, }; static void rvin_free_controls(struct rvin_dev *vin) { v4l2_ctrl_handler_free(&vin->ctrl_handler); vin->vdev.ctrl_handler = NULL; } static int rvin_create_controls(struct rvin_dev *vin, struct v4l2_subdev *subdev) { int ret; ret = v4l2_ctrl_handler_init(&vin->ctrl_handler, 16); if (ret < 0) return ret; /* The VIN directly deals with alpha component. */ v4l2_ctrl_new_std(&vin->ctrl_handler, &rvin_ctrl_ops, V4L2_CID_ALPHA_COMPONENT, 0, 255, 1, 255); if (vin->ctrl_handler.error) { ret = vin->ctrl_handler.error; rvin_free_controls(vin); return ret; } /* For the non-MC mode add controls from the subdevice. */ if (subdev) { ret = v4l2_ctrl_add_handler(&vin->ctrl_handler, subdev->ctrl_handler, NULL, true); if (ret < 0) { rvin_free_controls(vin); return ret; } } vin->vdev.ctrl_handler = &vin->ctrl_handler; return 0; } /* ----------------------------------------------------------------------------- * Async notifier */ static int rvin_find_pad(struct v4l2_subdev *sd, int direction) { unsigned int pad; if (sd->entity.num_pads <= 1) return 0; for (pad = 0; pad < sd->entity.num_pads; pad++) if (sd->entity.pads[pad].flags & direction) return pad; return -EINVAL; } /* ----------------------------------------------------------------------------- * Parallel async notifier */ /* The vin lock should be held when calling the subdevice attach and detach */ static int rvin_parallel_subdevice_attach(struct rvin_dev *vin, struct v4l2_subdev *subdev) { struct v4l2_subdev_mbus_code_enum code = { .which = V4L2_SUBDEV_FORMAT_ACTIVE, }; int ret; /* Find source and sink pad of remote subdevice */ ret = rvin_find_pad(subdev, MEDIA_PAD_FL_SOURCE); if (ret < 0) return ret; vin->parallel.source_pad = ret; ret = rvin_find_pad(subdev, MEDIA_PAD_FL_SINK); vin->parallel.sink_pad = ret < 0 ? 0 : ret; if (vin->info->use_mc) { vin->parallel.subdev = subdev; return 0; } /* Find compatible subdevices mbus format */ vin->mbus_code = 0; code.index = 0; code.pad = vin->parallel.source_pad; while (!vin->mbus_code && !v4l2_subdev_call(subdev, pad, enum_mbus_code, NULL, &code)) { code.index++; switch (code.code) { case MEDIA_BUS_FMT_YUYV8_1X16: case MEDIA_BUS_FMT_UYVY8_1X16: case MEDIA_BUS_FMT_UYVY8_2X8: case MEDIA_BUS_FMT_UYVY10_2X10: case MEDIA_BUS_FMT_RGB888_1X24: vin->mbus_code = code.code; vin_dbg(vin, "Found media bus format for %s: %d\n", subdev->name, vin->mbus_code); break; default: break; } } if (!vin->mbus_code) { vin_err(vin, "Unsupported media bus format for %s\n", subdev->name); return -EINVAL; } /* Read tvnorms */ ret = v4l2_subdev_call(subdev, video, g_tvnorms, &vin->vdev.tvnorms); if (ret < 0 && ret != -ENOIOCTLCMD && ret != -ENODEV) return ret; /* Read standard */ vin->std = V4L2_STD_UNKNOWN; ret = v4l2_subdev_call(subdev, video, g_std, &vin->std); if (ret < 0 && ret != -ENOIOCTLCMD) return ret; /* Add the controls */ ret = rvin_create_controls(vin, subdev); if (ret < 0) return ret; vin->parallel.subdev = subdev; return 0; } static void rvin_parallel_subdevice_detach(struct rvin_dev *vin) { rvin_v4l2_unregister(vin); vin->parallel.subdev = NULL; if (!vin->info->use_mc) rvin_free_controls(vin); } static int rvin_parallel_notify_complete(struct v4l2_async_notifier *notifier) { struct rvin_dev *vin = v4l2_dev_to_vin(notifier->v4l2_dev); struct media_entity *source; struct media_entity *sink; int ret; ret = v4l2_device_register_subdev_nodes(&vin->v4l2_dev); if (ret < 0) { vin_err(vin, "Failed to register subdev nodes\n"); return ret; } if (!video_is_registered(&vin->vdev)) { ret = rvin_v4l2_register(vin); if (ret < 0) return ret; } if (!vin->info->use_mc) return 0; /* If we're running with media-controller, link the subdevs. */ source = &vin->parallel.subdev->entity; sink = &vin->vdev.entity; ret = media_create_pad_link(source, vin->parallel.source_pad, sink, vin->parallel.sink_pad, 0); if (ret) vin_err(vin, "Error adding link from %s to %s: %d\n", source->name, sink->name, ret); return ret; } static void rvin_parallel_notify_unbind(struct v4l2_async_notifier *notifier, struct v4l2_subdev *subdev, struct v4l2_async_connection *asc) { struct rvin_dev *vin = v4l2_dev_to_vin(notifier->v4l2_dev); vin_dbg(vin, "unbind parallel subdev %s\n", subdev->name); mutex_lock(&vin->lock); rvin_parallel_subdevice_detach(vin); mutex_unlock(&vin->lock); } static int rvin_parallel_notify_bound(struct v4l2_async_notifier *notifier, struct v4l2_subdev *subdev, struct v4l2_async_connection *asc) { struct rvin_dev *vin = v4l2_dev_to_vin(notifier->v4l2_dev); int ret; mutex_lock(&vin->lock); ret = rvin_parallel_subdevice_attach(vin, subdev); mutex_unlock(&vin->lock); if (ret) return ret; v4l2_set_subdev_hostdata(subdev, vin); vin_dbg(vin, "bound subdev %s source pad: %u sink pad: %u\n", subdev->name, vin->parallel.source_pad, vin->parallel.sink_pad); return 0; } static const struct v4l2_async_notifier_operations rvin_parallel_notify_ops = { .bound = rvin_parallel_notify_bound, .unbind = rvin_parallel_notify_unbind, .complete = rvin_parallel_notify_complete, }; static int rvin_parallel_parse_of(struct rvin_dev *vin) { struct fwnode_handle *ep, *fwnode; struct v4l2_fwnode_endpoint vep = { .bus_type = V4L2_MBUS_UNKNOWN, }; struct v4l2_async_connection *asc; int ret; ep = fwnode_graph_get_endpoint_by_id(dev_fwnode(vin->dev), 0, 0, 0); if (!ep) return 0; fwnode = fwnode_graph_get_remote_endpoint(ep); ret = v4l2_fwnode_endpoint_parse(ep, &vep); fwnode_handle_put(ep); if (ret) { vin_err(vin, "Failed to parse %pOF\n", to_of_node(fwnode)); ret = -EINVAL; goto out; } switch (vep.bus_type) { case V4L2_MBUS_PARALLEL: case V4L2_MBUS_BT656: vin_dbg(vin, "Found %s media bus\n", vep.bus_type == V4L2_MBUS_PARALLEL ? "PARALLEL" : "BT656"); vin->parallel.mbus_type = vep.bus_type; vin->parallel.bus = vep.bus.parallel; break; default: vin_err(vin, "Unknown media bus type\n"); ret = -EINVAL; goto out; } asc = v4l2_async_nf_add_fwnode(&vin->notifier, fwnode, struct v4l2_async_connection); if (IS_ERR(asc)) { ret = PTR_ERR(asc); goto out; } vin->parallel.asc = asc; vin_dbg(vin, "Add parallel OF device %pOF\n", to_of_node(fwnode)); out: fwnode_handle_put(fwnode); return ret; } static void rvin_parallel_cleanup(struct rvin_dev *vin) { v4l2_async_nf_unregister(&vin->notifier); v4l2_async_nf_cleanup(&vin->notifier); } static int rvin_parallel_init(struct rvin_dev *vin) { int ret; v4l2_async_nf_init(&vin->notifier, &vin->v4l2_dev); ret = rvin_parallel_parse_of(vin); if (ret) return ret; if (!vin->parallel.asc) return -ENODEV; vin_dbg(vin, "Found parallel subdevice %pOF\n", to_of_node(vin->parallel.asc->match.fwnode)); vin->notifier.ops = &rvin_parallel_notify_ops; ret = v4l2_async_nf_register(&vin->notifier); if (ret < 0) { vin_err(vin, "Notifier registration failed\n"); v4l2_async_nf_cleanup(&vin->notifier); return ret; } return 0; } /* ----------------------------------------------------------------------------- * CSI-2 */ /* * Link setup for the links between a VIN and a CSI-2 receiver is a bit * complex. The reason for this is that the register controlling routing * is not present in each VIN instance. There are special VINs which * control routing for themselves and other VINs. There are not many * different possible links combinations that can be enabled at the same * time, therefor all already enabled links which are controlled by a * master VIN need to be taken into account when making the decision * if a new link can be enabled or not. * * 1. Find out which VIN the link the user tries to enable is connected to. * 2. Lookup which master VIN controls the links for this VIN. * 3. Start with a bitmask with all bits set. * 4. For each previously enabled link from the master VIN bitwise AND its * route mask (see documentation for mask in struct rvin_group_route) * with the bitmask. * 5. Bitwise AND the mask for the link the user tries to enable to the bitmask. * 6. If the bitmask is not empty at this point the new link can be enabled * while keeping all previous links enabled. Update the CHSEL value of the * master VIN and inform the user that the link could be enabled. * * Please note that no link can be enabled if any VIN in the group is * currently open. */ static int rvin_csi2_link_notify(struct media_link *link, u32 flags, unsigned int notification) { struct rvin_group *group = container_of(link->graph_obj.mdev, struct rvin_group, mdev); struct media_entity *entity; struct video_device *vdev; struct rvin_dev *vin; unsigned int i; int csi_id, ret; ret = v4l2_pipeline_link_notify(link, flags, notification); if (ret) return ret; /* Only care about link enablement for VIN nodes. */ if (!(flags & MEDIA_LNK_FL_ENABLED) || !is_media_entity_v4l2_video_device(link->sink->entity)) return 0; /* * Don't allow link changes if any stream in the graph is active as * modifying the CHSEL register fields can disrupt running streams. */ media_device_for_each_entity(entity, &group->mdev) if (media_entity_is_streaming(entity)) return -EBUSY; /* Find the master VIN that controls the routes. */ vdev = media_entity_to_video_device(link->sink->entity); vin = container_of(vdev, struct rvin_dev, vdev); mutex_lock(&group->lock); csi_id = rvin_group_entity_to_remote_id(group, link->source->entity); if (csi_id == -ENODEV) { struct v4l2_subdev *sd; /* * Make sure the source entity subdevice is registered as * a parallel input of one of the enabled VINs if it is not * one of the CSI-2 subdevices. * * No hardware configuration required for parallel inputs, * we can return here. */ sd = media_entity_to_v4l2_subdev(link->source->entity); for (i = 0; i < RCAR_VIN_NUM; i++) { if (group->vin[i] && group->vin[i]->parallel.subdev == sd) { group->vin[i]->is_csi = false; ret = 0; goto out; } } vin_err(vin, "Subdevice %s not registered to any VIN\n", link->source->entity->name); ret = -ENODEV; } else { const struct rvin_group_route *route; unsigned int chsel = UINT_MAX; unsigned int master_id; master_id = rvin_group_id_to_master(vin->id); if (WARN_ON(!group->vin[master_id])) { ret = -ENODEV; goto out; } /* Make sure group is connected to same CSI-2 */ for (i = master_id; i < master_id + 4; i++) { struct media_pad *csi_pad; if (!group->vin[i]) continue; /* Get remote CSI-2, if any. */ csi_pad = media_pad_remote_pad_first( &group->vin[i]->vdev.entity.pads[0]); if (!csi_pad) continue; if (csi_pad->entity != link->source->entity) { vin_dbg(vin, "Already attached to %s\n", csi_pad->entity->name); ret = -EBUSY; goto out; } } for (route = vin->info->routes; route->chsel; route++) { if (route->master == master_id && route->csi == csi_id) { chsel = route->chsel; break; } } if (chsel == UINT_MAX) { vin_err(vin, "No CHSEL value found\n"); ret = -EINVAL; goto out; } ret = rvin_set_channel_routing(group->vin[master_id], chsel); if (ret) goto out; vin->is_csi = true; } out: mutex_unlock(&group->lock); return ret; } static const struct media_device_ops rvin_csi2_media_ops = { .link_notify = rvin_csi2_link_notify, }; static int rvin_csi2_create_link(struct rvin_group *group, unsigned int id, const struct rvin_group_route *route) { struct media_entity *source = &group->remotes[route->csi].subdev->entity; struct media_entity *sink = &group->vin[id]->vdev.entity; struct media_pad *sink_pad = &sink->pads[0]; unsigned int channel; int ret; for (channel = 0; channel < 4; channel++) { unsigned int source_idx = rvin_group_csi_channel_to_pad(channel); struct media_pad *source_pad = &source->pads[source_idx]; /* Skip if link already exists. */ if (media_entity_find_link(source_pad, sink_pad)) continue; ret = media_create_pad_link(source, source_idx, sink, 0, 0); if (ret) return ret; } return 0; } static int rvin_csi2_setup_links(struct rvin_dev *vin) { const struct rvin_group_route *route; unsigned int id; int ret = -EINVAL; /* Create all media device links between VINs and CSI-2's. */ mutex_lock(&vin->group->lock); for (route = vin->info->routes; route->chsel; route++) { /* Check that VIN' master is part of the group. */ if (!vin->group->vin[route->master]) continue; /* Check that CSI-2 is part of the group. */ if (!vin->group->remotes[route->csi].subdev) continue; for (id = route->master; id < route->master + 4; id++) { /* Check that VIN is part of the group. */ if (!vin->group->vin[id]) continue; ret = rvin_csi2_create_link(vin->group, id, route); if (ret) goto out; } } out: mutex_unlock(&vin->group->lock); return ret; } static void rvin_csi2_cleanup(struct rvin_dev *vin) { rvin_parallel_cleanup(vin); rvin_group_notifier_cleanup(vin); rvin_group_put(vin); rvin_free_controls(vin); } static int rvin_csi2_init(struct rvin_dev *vin) { int ret; vin->pad.flags = MEDIA_PAD_FL_SINK; ret = media_entity_pads_init(&vin->vdev.entity, 1, &vin->pad); if (ret) return ret; ret = rvin_create_controls(vin, NULL); if (ret < 0) return ret; ret = rvin_group_get(vin, rvin_csi2_setup_links, &rvin_csi2_media_ops); if (ret) goto err_controls; /* It's OK to not have a parallel subdevice. */ ret = rvin_parallel_init(vin); if (ret && ret != -ENODEV) goto err_group; ret = rvin_group_notifier_init(vin, 1, RVIN_CSI_MAX); if (ret) goto err_parallel; return 0; err_parallel: rvin_parallel_cleanup(vin); err_group: rvin_group_put(vin); err_controls: rvin_free_controls(vin); return ret; } /* ----------------------------------------------------------------------------- * ISP */ static int rvin_isp_setup_links(struct rvin_dev *vin) { unsigned int i; int ret = -EINVAL; /* Create all media device links between VINs and ISP's. */ mutex_lock(&vin->group->lock); for (i = 0; i < RCAR_VIN_NUM; i++) { struct media_pad *source_pad, *sink_pad; struct media_entity *source, *sink; unsigned int source_slot = i / 8; unsigned int source_idx = i % 8 + 1; if (!vin->group->vin[i]) continue; /* Check that ISP is part of the group. */ if (!vin->group->remotes[source_slot].subdev) continue; source = &vin->group->remotes[source_slot].subdev->entity; source_pad = &source->pads[source_idx]; sink = &vin->group->vin[i]->vdev.entity; sink_pad = &sink->pads[0]; /* Skip if link already exists. */ if (media_entity_find_link(source_pad, sink_pad)) continue; ret = media_create_pad_link(source, source_idx, sink, 0, MEDIA_LNK_FL_ENABLED | MEDIA_LNK_FL_IMMUTABLE); if (ret) { vin_err(vin, "Error adding link from %s to %s\n", source->name, sink->name); break; } } mutex_unlock(&vin->group->lock); return ret; } static void rvin_isp_cleanup(struct rvin_dev *vin) { rvin_group_notifier_cleanup(vin); rvin_group_put(vin); rvin_free_controls(vin); } static int rvin_isp_init(struct rvin_dev *vin) { int ret; vin->pad.flags = MEDIA_PAD_FL_SINK; ret = media_entity_pads_init(&vin->vdev.entity, 1, &vin->pad); if (ret) return ret; ret = rvin_create_controls(vin, NULL); if (ret < 0) return ret; ret = rvin_group_get(vin, rvin_isp_setup_links, NULL); if (ret) goto err_controls; ret = rvin_group_notifier_init(vin, 2, RVIN_ISP_MAX); if (ret) goto err_group; return 0; err_group: rvin_group_put(vin); err_controls: rvin_free_controls(vin); return ret; } /* ----------------------------------------------------------------------------- * Suspend / Resume */ static int __maybe_unused rvin_suspend(struct device *dev) { struct rvin_dev *vin = dev_get_drvdata(dev); if (vin->state != RUNNING) return 0; rvin_stop_streaming(vin); vin->state = SUSPENDED; return 0; } static int __maybe_unused rvin_resume(struct device *dev) { struct rvin_dev *vin = dev_get_drvdata(dev); if (vin->state != SUSPENDED) return 0; /* * Restore group master CHSEL setting. * * This needs to be done by every VIN resuming not only the master * as we don't know if and in which order the master VINs will * be resumed. */ if (vin->info->use_mc) { unsigned int master_id = rvin_group_id_to_master(vin->id); struct rvin_dev *master = vin->group->vin[master_id]; int ret; if (WARN_ON(!master)) return -ENODEV; ret = rvin_set_channel_routing(master, master->chsel); if (ret) return ret; } return rvin_start_streaming(vin); } /* ----------------------------------------------------------------------------- * Platform Device Driver */ static const struct rvin_info rcar_info_h1 = { .model = RCAR_H1, .use_mc = false, .max_width = 2048, .max_height = 2048, .scaler = rvin_scaler_gen2, }; static const struct rvin_info rcar_info_m1 = { .model = RCAR_M1, .use_mc = false, .max_width = 2048, .max_height = 2048, .scaler = rvin_scaler_gen2, }; static const struct rvin_info rcar_info_gen2 = { .model = RCAR_GEN2, .use_mc = false, .max_width = 2048, .max_height = 2048, .scaler = rvin_scaler_gen2, }; static const struct rvin_group_route rcar_info_r8a774e1_routes[] = { { .master = 0, .csi = RVIN_CSI20, .chsel = 0x04 }, { .master = 0, .csi = RVIN_CSI40, .chsel = 0x03 }, { .master = 4, .csi = RVIN_CSI20, .chsel = 0x04 }, { /* Sentinel */ } }; static const struct rvin_info rcar_info_r8a774e1 = { .model = RCAR_GEN3, .use_mc = true, .max_width = 4096, .max_height = 4096, .routes = rcar_info_r8a774e1_routes, }; static const struct rvin_group_route rcar_info_r8a7795_routes[] = { { .master = 0, .csi = RVIN_CSI20, .chsel = 0x04 }, { .master = 0, .csi = RVIN_CSI40, .chsel = 0x03 }, { .master = 4, .csi = RVIN_CSI20, .chsel = 0x04 }, { .master = 4, .csi = RVIN_CSI41, .chsel = 0x03 }, { /* Sentinel */ } }; static const struct rvin_info rcar_info_r8a7795 = { .model = RCAR_GEN3, .use_mc = true, .nv12 = true, .max_width = 4096, .max_height = 4096, .routes = rcar_info_r8a7795_routes, .scaler = rvin_scaler_gen3, }; static const struct rvin_group_route rcar_info_r8a7796_routes[] = { { .master = 0, .csi = RVIN_CSI20, .chsel = 0x04 }, { .master = 0, .csi = RVIN_CSI40, .chsel = 0x03 }, { .master = 4, .csi = RVIN_CSI20, .chsel = 0x04 }, { .master = 4, .csi = RVIN_CSI40, .chsel = 0x03 }, { /* Sentinel */ } }; static const struct rvin_info rcar_info_r8a7796 = { .model = RCAR_GEN3, .use_mc = true, .nv12 = true, .max_width = 4096, .max_height = 4096, .routes = rcar_info_r8a7796_routes, .scaler = rvin_scaler_gen3, }; static const struct rvin_group_route rcar_info_r8a77965_routes[] = { { .master = 0, .csi = RVIN_CSI20, .chsel = 0x04 }, { .master = 0, .csi = RVIN_CSI40, .chsel = 0x03 }, { .master = 4, .csi = RVIN_CSI20, .chsel = 0x04 }, { .master = 4, .csi = RVIN_CSI40, .chsel = 0x03 }, { /* Sentinel */ } }; static const struct rvin_info rcar_info_r8a77965 = { .model = RCAR_GEN3, .use_mc = true, .nv12 = true, .max_width = 4096, .max_height = 4096, .routes = rcar_info_r8a77965_routes, .scaler = rvin_scaler_gen3, }; static const struct rvin_group_route rcar_info_r8a77970_routes[] = { { .master = 0, .csi = RVIN_CSI40, .chsel = 0x03 }, { /* Sentinel */ } }; static const struct rvin_info rcar_info_r8a77970 = { .model = RCAR_GEN3, .use_mc = true, .max_width = 4096, .max_height = 4096, .routes = rcar_info_r8a77970_routes, }; static const struct rvin_group_route rcar_info_r8a77980_routes[] = { { .master = 0, .csi = RVIN_CSI40, .chsel = 0x03 }, { .master = 4, .csi = RVIN_CSI41, .chsel = 0x03 }, { /* Sentinel */ } }; static const struct rvin_info rcar_info_r8a77980 = { .model = RCAR_GEN3, .use_mc = true, .nv12 = true, .max_width = 4096, .max_height = 4096, .routes = rcar_info_r8a77980_routes, }; static const struct rvin_group_route rcar_info_r8a77990_routes[] = { { .master = 4, .csi = RVIN_CSI40, .chsel = 0x03 }, { /* Sentinel */ } }; static const struct rvin_info rcar_info_r8a77990 = { .model = RCAR_GEN3, .use_mc = true, .nv12 = true, .max_width = 4096, .max_height = 4096, .routes = rcar_info_r8a77990_routes, .scaler = rvin_scaler_gen3, }; static const struct rvin_group_route rcar_info_r8a77995_routes[] = { { /* Sentinel */ } }; static const struct rvin_info rcar_info_r8a77995 = { .model = RCAR_GEN3, .use_mc = true, .nv12 = true, .max_width = 4096, .max_height = 4096, .routes = rcar_info_r8a77995_routes, .scaler = rvin_scaler_gen3, }; static const struct rvin_info rcar_info_r8a779a0 = { .model = RCAR_GEN3, .use_mc = true, .use_isp = true, .nv12 = true, .max_width = 4096, .max_height = 4096, }; static const struct rvin_info rcar_info_r8a779g0 = { .model = RCAR_GEN3, .use_mc = true, .use_isp = true, .nv12 = true, .max_width = 4096, .max_height = 4096, }; static const struct of_device_id rvin_of_id_table[] = { { .compatible = "renesas,vin-r8a774a1", .data = &rcar_info_r8a7796, }, { .compatible = "renesas,vin-r8a774b1", .data = &rcar_info_r8a77965, }, { .compatible = "renesas,vin-r8a774c0", .data = &rcar_info_r8a77990, }, { .compatible = "renesas,vin-r8a774e1", .data = &rcar_info_r8a774e1, }, { .compatible = "renesas,vin-r8a7778", .data = &rcar_info_m1, }, { .compatible = "renesas,vin-r8a7779", .data = &rcar_info_h1, }, { .compatible = "renesas,rcar-gen2-vin", .data = &rcar_info_gen2, }, { .compatible = "renesas,vin-r8a7795", .data = &rcar_info_r8a7795, }, { .compatible = "renesas,vin-r8a7796", .data = &rcar_info_r8a7796, }, { .compatible = "renesas,vin-r8a77961", .data = &rcar_info_r8a7796, }, { .compatible = "renesas,vin-r8a77965", .data = &rcar_info_r8a77965, }, { .compatible = "renesas,vin-r8a77970", .data = &rcar_info_r8a77970, }, { .compatible = "renesas,vin-r8a77980", .data = &rcar_info_r8a77980, }, { .compatible = "renesas,vin-r8a77990", .data = &rcar_info_r8a77990, }, { .compatible = "renesas,vin-r8a77995", .data = &rcar_info_r8a77995, }, { .compatible = "renesas,vin-r8a779a0", .data = &rcar_info_r8a779a0, }, { .compatible = "renesas,vin-r8a779g0", .data = &rcar_info_r8a779g0, }, { /* Sentinel */ }, }; MODULE_DEVICE_TABLE(of, rvin_of_id_table); static int rcar_vin_probe(struct platform_device *pdev) { struct rvin_dev *vin; int irq, ret; vin = devm_kzalloc(&pdev->dev, sizeof(*vin), GFP_KERNEL); if (!vin) return -ENOMEM; vin->dev = &pdev->dev; vin->info = of_device_get_match_data(&pdev->dev); vin->alpha = 0xff; vin->base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(vin->base)) return PTR_ERR(vin->base); irq = platform_get_irq(pdev, 0); if (irq < 0) return irq; ret = rvin_dma_register(vin, irq); if (ret) return ret; platform_set_drvdata(pdev, vin); if (vin->info->use_isp) { ret = rvin_isp_init(vin); } else if (vin->info->use_mc) { ret = rvin_csi2_init(vin); if (vin->info->scaler && rvin_group_id_to_master(vin->id) == vin->id) vin->scaler = vin->info->scaler; } else { ret = rvin_parallel_init(vin); if (vin->info->scaler) vin->scaler = vin->info->scaler; } if (ret) { rvin_dma_unregister(vin); return ret; } pm_suspend_ignore_children(&pdev->dev, true); pm_runtime_enable(&pdev->dev); return 0; } static void rcar_vin_remove(struct platform_device *pdev) { struct rvin_dev *vin = platform_get_drvdata(pdev); pm_runtime_disable(&pdev->dev); rvin_v4l2_unregister(vin); if (vin->info->use_isp) rvin_isp_cleanup(vin); else if (vin->info->use_mc) rvin_csi2_cleanup(vin); else rvin_parallel_cleanup(vin); rvin_dma_unregister(vin); } static SIMPLE_DEV_PM_OPS(rvin_pm_ops, rvin_suspend, rvin_resume); static struct platform_driver rcar_vin_driver = { .driver = { .name = "rcar-vin", .suppress_bind_attrs = true, .pm = &rvin_pm_ops, .of_match_table = rvin_of_id_table, }, .probe = rcar_vin_probe, .remove_new = rcar_vin_remove, }; module_platform_driver(rcar_vin_driver); MODULE_AUTHOR("Niklas Söderlund <[email protected]>"); MODULE_DESCRIPTION("Renesas R-Car VIN camera host driver"); MODULE_LICENSE("GPL");
linux-master
drivers/media/platform/renesas/rcar-vin/rcar-core.c
// SPDX-License-Identifier: GPL-2.0+ /* * Driver for Renesas RZ/G2L CRU * * Copyright (C) 2022 Renesas Electronics Corp. * * Based on Renesas R-Car VIN * Copyright (C) 2016 Renesas Electronics Corp. * Copyright (C) 2011-2013 Renesas Solutions Corp. * Copyright (C) 2013 Cogent Embedded, Inc., <[email protected]> * Copyright (C) 2008 Magnus Damm */ #include <linux/clk.h> #include <linux/delay.h> #include <linux/pm_runtime.h> #include <media/v4l2-ioctl.h> #include <media/videobuf2-dma-contig.h> #include "rzg2l-cru.h" /* HW CRU Registers Definition */ /* CRU Control Register */ #define CRUnCTRL 0x0 #define CRUnCTRL_VINSEL(x) ((x) << 0) /* CRU Interrupt Enable Register */ #define CRUnIE 0x4 #define CRUnIE_EFE BIT(17) /* CRU Interrupt Status Register */ #define CRUnINTS 0x8 #define CRUnINTS_SFS BIT(16) /* CRU Reset Register */ #define CRUnRST 0xc #define CRUnRST_VRESETN BIT(0) /* Memory Bank Base Address (Lower) Register for CRU Image Data */ #define AMnMBxADDRL(x) (0x100 + ((x) * 8)) /* Memory Bank Base Address (Higher) Register for CRU Image Data */ #define AMnMBxADDRH(x) (0x104 + ((x) * 8)) /* Memory Bank Enable Register for CRU Image Data */ #define AMnMBVALID 0x148 #define AMnMBVALID_MBVALID(x) GENMASK(x, 0) /* Memory Bank Status Register for CRU Image Data */ #define AMnMBS 0x14c #define AMnMBS_MBSTS 0x7 /* AXI Master FIFO Pointer Register for CRU Image Data */ #define AMnFIFOPNTR 0x168 #define AMnFIFOPNTR_FIFOWPNTR GENMASK(7, 0) #define AMnFIFOPNTR_FIFORPNTR_Y GENMASK(23, 16) /* AXI Master Transfer Stop Register for CRU Image Data */ #define AMnAXISTP 0x174 #define AMnAXISTP_AXI_STOP BIT(0) /* AXI Master Transfer Stop Status Register for CRU Image Data */ #define AMnAXISTPACK 0x178 #define AMnAXISTPACK_AXI_STOP_ACK BIT(0) /* CRU Image Processing Enable Register */ #define ICnEN 0x200 #define ICnEN_ICEN BIT(0) /* CRU Image Processing Main Control Register */ #define ICnMC 0x208 #define ICnMC_CSCTHR BIT(5) #define ICnMC_INF_YUV8_422 (0x1e << 16) #define ICnMC_INF_USER (0x30 << 16) #define ICnMC_VCSEL(x) ((x) << 22) #define ICnMC_INF_MASK GENMASK(21, 16) /* CRU Module Status Register */ #define ICnMS 0x254 #define ICnMS_IA BIT(2) /* CRU Data Output Mode Register */ #define ICnDMR 0x26c #define ICnDMR_YCMODE_UYVY (1 << 4) #define RZG2L_TIMEOUT_MS 100 #define RZG2L_RETRIES 10 #define RZG2L_CRU_DEFAULT_FORMAT V4L2_PIX_FMT_UYVY #define RZG2L_CRU_DEFAULT_WIDTH RZG2L_CRU_MIN_INPUT_WIDTH #define RZG2L_CRU_DEFAULT_HEIGHT RZG2L_CRU_MIN_INPUT_HEIGHT #define RZG2L_CRU_DEFAULT_FIELD V4L2_FIELD_NONE #define RZG2L_CRU_DEFAULT_COLORSPACE V4L2_COLORSPACE_SRGB struct rzg2l_cru_buffer { struct vb2_v4l2_buffer vb; struct list_head list; }; #define to_buf_list(vb2_buffer) \ (&container_of(vb2_buffer, struct rzg2l_cru_buffer, vb)->list) /* ----------------------------------------------------------------------------- * DMA operations */ static void rzg2l_cru_write(struct rzg2l_cru_dev *cru, u32 offset, u32 value) { iowrite32(value, cru->base + offset); } static u32 rzg2l_cru_read(struct rzg2l_cru_dev *cru, u32 offset) { return ioread32(cru->base + offset); } /* Need to hold qlock before calling */ static void return_unused_buffers(struct rzg2l_cru_dev *cru, enum vb2_buffer_state state) { struct rzg2l_cru_buffer *buf, *node; unsigned long flags; unsigned int i; spin_lock_irqsave(&cru->qlock, flags); for (i = 0; i < cru->num_buf; i++) { if (cru->queue_buf[i]) { vb2_buffer_done(&cru->queue_buf[i]->vb2_buf, state); cru->queue_buf[i] = NULL; } } list_for_each_entry_safe(buf, node, &cru->buf_list, list) { vb2_buffer_done(&buf->vb.vb2_buf, state); list_del(&buf->list); } spin_unlock_irqrestore(&cru->qlock, flags); } static int rzg2l_cru_queue_setup(struct vb2_queue *vq, unsigned int *nbuffers, unsigned int *nplanes, unsigned int sizes[], struct device *alloc_devs[]) { struct rzg2l_cru_dev *cru = vb2_get_drv_priv(vq); /* Make sure the image size is large enough. */ if (*nplanes) return sizes[0] < cru->format.sizeimage ? -EINVAL : 0; *nplanes = 1; sizes[0] = cru->format.sizeimage; return 0; }; static int rzg2l_cru_buffer_prepare(struct vb2_buffer *vb) { struct rzg2l_cru_dev *cru = vb2_get_drv_priv(vb->vb2_queue); unsigned long size = cru->format.sizeimage; if (vb2_plane_size(vb, 0) < size) { dev_err(cru->dev, "buffer too small (%lu < %lu)\n", vb2_plane_size(vb, 0), size); return -EINVAL; } vb2_set_plane_payload(vb, 0, size); return 0; } static void rzg2l_cru_buffer_queue(struct vb2_buffer *vb) { struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb); struct rzg2l_cru_dev *cru = vb2_get_drv_priv(vb->vb2_queue); unsigned long flags; spin_lock_irqsave(&cru->qlock, flags); list_add_tail(to_buf_list(vbuf), &cru->buf_list); spin_unlock_irqrestore(&cru->qlock, flags); } static int rzg2l_cru_mc_validate_format(struct rzg2l_cru_dev *cru, struct v4l2_subdev *sd, struct media_pad *pad) { struct v4l2_subdev_format fmt = { .which = V4L2_SUBDEV_FORMAT_ACTIVE, }; fmt.pad = pad->index; if (v4l2_subdev_call_state_active(sd, pad, get_fmt, &fmt)) return -EPIPE; switch (fmt.format.code) { case MEDIA_BUS_FMT_UYVY8_1X16: break; default: return -EPIPE; } switch (fmt.format.field) { case V4L2_FIELD_TOP: case V4L2_FIELD_BOTTOM: case V4L2_FIELD_NONE: case V4L2_FIELD_INTERLACED_TB: case V4L2_FIELD_INTERLACED_BT: case V4L2_FIELD_INTERLACED: case V4L2_FIELD_SEQ_TB: case V4L2_FIELD_SEQ_BT: break; default: return -EPIPE; } if (fmt.format.width != cru->format.width || fmt.format.height != cru->format.height) return -EPIPE; return 0; } static void rzg2l_cru_set_slot_addr(struct rzg2l_cru_dev *cru, int slot, dma_addr_t addr) { /* * The address needs to be 512 bytes aligned. Driver should never accept * settings that do not satisfy this in the first place... */ if (WARN_ON((addr) & RZG2L_CRU_HW_BUFFER_MASK)) return; /* Currently, we just use the buffer in 32 bits address */ rzg2l_cru_write(cru, AMnMBxADDRL(slot), addr); rzg2l_cru_write(cru, AMnMBxADDRH(slot), 0); } /* * Moves a buffer from the queue to the HW slot. If no buffer is * available use the scratch buffer. The scratch buffer is never * returned to userspace, its only function is to enable the capture * loop to keep running. */ static void rzg2l_cru_fill_hw_slot(struct rzg2l_cru_dev *cru, int slot) { struct vb2_v4l2_buffer *vbuf; struct rzg2l_cru_buffer *buf; dma_addr_t phys_addr; /* A already populated slot shall never be overwritten. */ if (WARN_ON(cru->queue_buf[slot])) return; dev_dbg(cru->dev, "Filling HW slot: %d\n", slot); if (list_empty(&cru->buf_list)) { cru->queue_buf[slot] = NULL; phys_addr = cru->scratch_phys; } else { /* Keep track of buffer we give to HW */ buf = list_entry(cru->buf_list.next, struct rzg2l_cru_buffer, list); vbuf = &buf->vb; list_del_init(to_buf_list(vbuf)); cru->queue_buf[slot] = vbuf; /* Setup DMA */ phys_addr = vb2_dma_contig_plane_dma_addr(&vbuf->vb2_buf, 0); } rzg2l_cru_set_slot_addr(cru, slot, phys_addr); } static void rzg2l_cru_initialize_axi(struct rzg2l_cru_dev *cru) { unsigned int slot; /* * Set image data memory banks. * Currently, we will use maximum address. */ rzg2l_cru_write(cru, AMnMBVALID, AMnMBVALID_MBVALID(cru->num_buf - 1)); for (slot = 0; slot < cru->num_buf; slot++) rzg2l_cru_fill_hw_slot(cru, slot); } static void rzg2l_cru_csi2_setup(struct rzg2l_cru_dev *cru, bool *input_is_yuv, struct v4l2_mbus_framefmt *ip_sd_fmt) { u32 icnmc; switch (ip_sd_fmt->code) { case MEDIA_BUS_FMT_UYVY8_1X16: icnmc = ICnMC_INF_YUV8_422; *input_is_yuv = true; break; default: *input_is_yuv = false; icnmc = ICnMC_INF_USER; break; } icnmc |= (rzg2l_cru_read(cru, ICnMC) & ~ICnMC_INF_MASK); /* Set virtual channel CSI2 */ icnmc |= ICnMC_VCSEL(cru->csi.channel); rzg2l_cru_write(cru, ICnMC, icnmc); } static int rzg2l_cru_initialize_image_conv(struct rzg2l_cru_dev *cru, struct v4l2_mbus_framefmt *ip_sd_fmt) { bool output_is_yuv = false; bool input_is_yuv = false; u32 icndmr; rzg2l_cru_csi2_setup(cru, &input_is_yuv, ip_sd_fmt); /* Output format */ switch (cru->format.pixelformat) { case V4L2_PIX_FMT_UYVY: icndmr = ICnDMR_YCMODE_UYVY; output_is_yuv = true; break; default: dev_err(cru->dev, "Invalid pixelformat (0x%x)\n", cru->format.pixelformat); return -EINVAL; } /* If input and output use same colorspace, do bypass mode */ if (output_is_yuv == input_is_yuv) rzg2l_cru_write(cru, ICnMC, rzg2l_cru_read(cru, ICnMC) | ICnMC_CSCTHR); else rzg2l_cru_write(cru, ICnMC, rzg2l_cru_read(cru, ICnMC) & (~ICnMC_CSCTHR)); /* Set output data format */ rzg2l_cru_write(cru, ICnDMR, icndmr); return 0; } void rzg2l_cru_stop_image_processing(struct rzg2l_cru_dev *cru) { u32 amnfifopntr, amnfifopntr_w, amnfifopntr_r_y; unsigned int retries = 0; unsigned long flags; u32 icnms; spin_lock_irqsave(&cru->qlock, flags); /* Disable and clear the interrupt */ rzg2l_cru_write(cru, CRUnIE, 0); rzg2l_cru_write(cru, CRUnINTS, 0x001F0F0F); /* Stop the operation of image conversion */ rzg2l_cru_write(cru, ICnEN, 0); /* Wait for streaming to stop */ while ((rzg2l_cru_read(cru, ICnMS) & ICnMS_IA) && retries++ < RZG2L_RETRIES) { spin_unlock_irqrestore(&cru->qlock, flags); msleep(RZG2L_TIMEOUT_MS); spin_lock_irqsave(&cru->qlock, flags); } icnms = rzg2l_cru_read(cru, ICnMS) & ICnMS_IA; if (icnms) dev_err(cru->dev, "Failed stop HW, something is seriously broken\n"); cru->state = RZG2L_CRU_DMA_STOPPED; /* Wait until the FIFO becomes empty */ for (retries = 5; retries > 0; retries--) { amnfifopntr = rzg2l_cru_read(cru, AMnFIFOPNTR); amnfifopntr_w = amnfifopntr & AMnFIFOPNTR_FIFOWPNTR; amnfifopntr_r_y = (amnfifopntr & AMnFIFOPNTR_FIFORPNTR_Y) >> 16; if (amnfifopntr_w == amnfifopntr_r_y) break; usleep_range(10, 20); } /* Notify that FIFO is not empty here */ if (!retries) dev_err(cru->dev, "Failed to empty FIFO\n"); /* Stop AXI bus */ rzg2l_cru_write(cru, AMnAXISTP, AMnAXISTP_AXI_STOP); /* Wait until the AXI bus stop */ for (retries = 5; retries > 0; retries--) { if (rzg2l_cru_read(cru, AMnAXISTPACK) & AMnAXISTPACK_AXI_STOP_ACK) break; usleep_range(10, 20); } /* Notify that AXI bus can not stop here */ if (!retries) dev_err(cru->dev, "Failed to stop AXI bus\n"); /* Cancel the AXI bus stop request */ rzg2l_cru_write(cru, AMnAXISTP, 0); /* Reset the CRU (AXI-master) */ reset_control_assert(cru->aresetn); /* Resets the image processing module */ rzg2l_cru_write(cru, CRUnRST, 0); spin_unlock_irqrestore(&cru->qlock, flags); } int rzg2l_cru_start_image_processing(struct rzg2l_cru_dev *cru) { struct v4l2_mbus_framefmt *fmt = rzg2l_cru_ip_get_src_fmt(cru); unsigned long flags; int ret; spin_lock_irqsave(&cru->qlock, flags); /* Initialize image convert */ ret = rzg2l_cru_initialize_image_conv(cru, fmt); if (ret) { spin_unlock_irqrestore(&cru->qlock, flags); return ret; } /* Select a video input */ rzg2l_cru_write(cru, CRUnCTRL, CRUnCTRL_VINSEL(0)); /* Cancel the software reset for image processing block */ rzg2l_cru_write(cru, CRUnRST, CRUnRST_VRESETN); /* Disable and clear the interrupt before using */ rzg2l_cru_write(cru, CRUnIE, 0); rzg2l_cru_write(cru, CRUnINTS, 0x001f000f); /* Initialize the AXI master */ rzg2l_cru_initialize_axi(cru); /* Enable interrupt */ rzg2l_cru_write(cru, CRUnIE, CRUnIE_EFE); /* Enable image processing reception */ rzg2l_cru_write(cru, ICnEN, ICnEN_ICEN); spin_unlock_irqrestore(&cru->qlock, flags); return 0; } void rzg2l_cru_vclk_unprepare(struct rzg2l_cru_dev *cru) { clk_disable_unprepare(cru->vclk); } int rzg2l_cru_vclk_prepare(struct rzg2l_cru_dev *cru) { return clk_prepare_enable(cru->vclk); } static int rzg2l_cru_set_stream(struct rzg2l_cru_dev *cru, int on) { struct media_pipeline *pipe; struct v4l2_subdev *sd; struct media_pad *pad; int ret; pad = media_pad_remote_pad_first(&cru->pad); if (!pad) return -EPIPE; sd = media_entity_to_v4l2_subdev(pad->entity); if (!on) { int stream_off_ret = 0; ret = v4l2_subdev_call(sd, video, s_stream, 0); if (ret) stream_off_ret = ret; ret = v4l2_subdev_call(sd, video, post_streamoff); if (ret == -ENOIOCTLCMD) ret = 0; if (ret && !stream_off_ret) stream_off_ret = ret; video_device_pipeline_stop(&cru->vdev); pm_runtime_put_sync(cru->dev); clk_disable_unprepare(cru->vclk); return stream_off_ret; } ret = pm_runtime_resume_and_get(cru->dev); if (ret) return ret; ret = clk_prepare_enable(cru->vclk); if (ret) goto err_pm_put; ret = rzg2l_cru_mc_validate_format(cru, sd, pad); if (ret) goto err_vclk_disable; pipe = media_entity_pipeline(&sd->entity) ? : &cru->vdev.pipe; ret = video_device_pipeline_start(&cru->vdev, pipe); if (ret) goto err_vclk_disable; ret = v4l2_subdev_call(sd, video, pre_streamon, 0); if (ret == -ENOIOCTLCMD) ret = 0; if (ret) goto pipe_line_stop; ret = v4l2_subdev_call(sd, video, s_stream, 1); if (ret == -ENOIOCTLCMD) ret = 0; if (ret) goto err_s_stream; return 0; err_s_stream: v4l2_subdev_call(sd, video, post_streamoff); pipe_line_stop: video_device_pipeline_stop(&cru->vdev); err_vclk_disable: clk_disable_unprepare(cru->vclk); err_pm_put: pm_runtime_put_sync(cru->dev); return ret; } static void rzg2l_cru_stop_streaming(struct rzg2l_cru_dev *cru) { cru->state = RZG2L_CRU_DMA_STOPPING; rzg2l_cru_set_stream(cru, 0); } static irqreturn_t rzg2l_cru_irq(int irq, void *data) { struct rzg2l_cru_dev *cru = data; unsigned int handled = 0; unsigned long flags; u32 irq_status; u32 amnmbs; int slot; spin_lock_irqsave(&cru->qlock, flags); irq_status = rzg2l_cru_read(cru, CRUnINTS); if (!irq_status) goto done; handled = 1; rzg2l_cru_write(cru, CRUnINTS, rzg2l_cru_read(cru, CRUnINTS)); /* Nothing to do if capture status is 'RZG2L_CRU_DMA_STOPPED' */ if (cru->state == RZG2L_CRU_DMA_STOPPED) { dev_dbg(cru->dev, "IRQ while state stopped\n"); goto done; } /* Increase stop retries if capture status is 'RZG2L_CRU_DMA_STOPPING' */ if (cru->state == RZG2L_CRU_DMA_STOPPING) { if (irq_status & CRUnINTS_SFS) dev_dbg(cru->dev, "IRQ while state stopping\n"); goto done; } /* Prepare for capture and update state */ amnmbs = rzg2l_cru_read(cru, AMnMBS); slot = amnmbs & AMnMBS_MBSTS; /* * AMnMBS.MBSTS indicates the destination of Memory Bank (MB). * Recalculate to get the current transfer complete MB. */ if (slot == 0) slot = cru->num_buf - 1; else slot--; /* * To hand buffers back in a known order to userspace start * to capture first from slot 0. */ if (cru->state == RZG2L_CRU_DMA_STARTING) { if (slot != 0) { dev_dbg(cru->dev, "Starting sync slot: %d\n", slot); goto done; } dev_dbg(cru->dev, "Capture start synced!\n"); cru->state = RZG2L_CRU_DMA_RUNNING; } /* Capture frame */ if (cru->queue_buf[slot]) { cru->queue_buf[slot]->field = cru->format.field; cru->queue_buf[slot]->sequence = cru->sequence; cru->queue_buf[slot]->vb2_buf.timestamp = ktime_get_ns(); vb2_buffer_done(&cru->queue_buf[slot]->vb2_buf, VB2_BUF_STATE_DONE); cru->queue_buf[slot] = NULL; } else { /* Scratch buffer was used, dropping frame. */ dev_dbg(cru->dev, "Dropping frame %u\n", cru->sequence); } cru->sequence++; /* Prepare for next frame */ rzg2l_cru_fill_hw_slot(cru, slot); done: spin_unlock_irqrestore(&cru->qlock, flags); return IRQ_RETVAL(handled); } static int rzg2l_cru_start_streaming_vq(struct vb2_queue *vq, unsigned int count) { struct rzg2l_cru_dev *cru = vb2_get_drv_priv(vq); int ret; /* Release reset state */ ret = reset_control_deassert(cru->aresetn); if (ret) { dev_err(cru->dev, "failed to deassert aresetn\n"); return ret; } ret = reset_control_deassert(cru->presetn); if (ret) { reset_control_assert(cru->aresetn); dev_err(cru->dev, "failed to deassert presetn\n"); return ret; } ret = request_irq(cru->image_conv_irq, rzg2l_cru_irq, IRQF_SHARED, KBUILD_MODNAME, cru); if (ret) { dev_err(cru->dev, "failed to request irq\n"); goto assert_resets; } /* Allocate scratch buffer. */ cru->scratch = dma_alloc_coherent(cru->dev, cru->format.sizeimage, &cru->scratch_phys, GFP_KERNEL); if (!cru->scratch) { return_unused_buffers(cru, VB2_BUF_STATE_QUEUED); dev_err(cru->dev, "Failed to allocate scratch buffer\n"); ret = -ENOMEM; goto free_image_conv_irq; } cru->sequence = 0; ret = rzg2l_cru_set_stream(cru, 1); if (ret) { return_unused_buffers(cru, VB2_BUF_STATE_QUEUED); goto out; } cru->state = RZG2L_CRU_DMA_STARTING; dev_dbg(cru->dev, "Starting to capture\n"); return 0; out: if (ret) dma_free_coherent(cru->dev, cru->format.sizeimage, cru->scratch, cru->scratch_phys); free_image_conv_irq: free_irq(cru->image_conv_irq, cru); assert_resets: reset_control_assert(cru->presetn); reset_control_assert(cru->aresetn); return ret; } static void rzg2l_cru_stop_streaming_vq(struct vb2_queue *vq) { struct rzg2l_cru_dev *cru = vb2_get_drv_priv(vq); rzg2l_cru_stop_streaming(cru); /* Free scratch buffer */ dma_free_coherent(cru->dev, cru->format.sizeimage, cru->scratch, cru->scratch_phys); free_irq(cru->image_conv_irq, cru); reset_control_assert(cru->presetn); return_unused_buffers(cru, VB2_BUF_STATE_ERROR); } static const struct vb2_ops rzg2l_cru_qops = { .queue_setup = rzg2l_cru_queue_setup, .buf_prepare = rzg2l_cru_buffer_prepare, .buf_queue = rzg2l_cru_buffer_queue, .start_streaming = rzg2l_cru_start_streaming_vq, .stop_streaming = rzg2l_cru_stop_streaming_vq, .wait_prepare = vb2_ops_wait_prepare, .wait_finish = vb2_ops_wait_finish, }; void rzg2l_cru_dma_unregister(struct rzg2l_cru_dev *cru) { mutex_destroy(&cru->lock); v4l2_device_unregister(&cru->v4l2_dev); vb2_queue_release(&cru->queue); } int rzg2l_cru_dma_register(struct rzg2l_cru_dev *cru) { struct vb2_queue *q = &cru->queue; unsigned int i; int ret; /* Initialize the top-level structure */ ret = v4l2_device_register(cru->dev, &cru->v4l2_dev); if (ret) return ret; mutex_init(&cru->lock); INIT_LIST_HEAD(&cru->buf_list); spin_lock_init(&cru->qlock); cru->state = RZG2L_CRU_DMA_STOPPED; for (i = 0; i < RZG2L_CRU_HW_BUFFER_MAX; i++) cru->queue_buf[i] = NULL; /* buffer queue */ q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE; q->io_modes = VB2_MMAP | VB2_DMABUF; q->lock = &cru->lock; q->drv_priv = cru; q->buf_struct_size = sizeof(struct rzg2l_cru_buffer); q->ops = &rzg2l_cru_qops; q->mem_ops = &vb2_dma_contig_memops; q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC; q->min_buffers_needed = 4; q->dev = cru->dev; ret = vb2_queue_init(q); if (ret < 0) { dev_err(cru->dev, "failed to initialize VB2 queue\n"); goto error; } return 0; error: mutex_destroy(&cru->lock); v4l2_device_unregister(&cru->v4l2_dev); return ret; } /* ----------------------------------------------------------------------------- * V4L2 stuff */ static const struct v4l2_format_info rzg2l_cru_formats[] = { { .format = V4L2_PIX_FMT_UYVY, .bpp[0] = 2, }, }; const struct v4l2_format_info *rzg2l_cru_format_from_pixel(u32 format) { unsigned int i; for (i = 0; i < ARRAY_SIZE(rzg2l_cru_formats); i++) if (rzg2l_cru_formats[i].format == format) return rzg2l_cru_formats + i; return NULL; } static u32 rzg2l_cru_format_bytesperline(struct v4l2_pix_format *pix) { const struct v4l2_format_info *fmt; fmt = rzg2l_cru_format_from_pixel(pix->pixelformat); if (WARN_ON(!fmt)) return -EINVAL; return pix->width * fmt->bpp[0]; } static u32 rzg2l_cru_format_sizeimage(struct v4l2_pix_format *pix) { return pix->bytesperline * pix->height; } static void rzg2l_cru_format_align(struct rzg2l_cru_dev *cru, struct v4l2_pix_format *pix) { if (!rzg2l_cru_format_from_pixel(pix->pixelformat)) pix->pixelformat = RZG2L_CRU_DEFAULT_FORMAT; switch (pix->field) { case V4L2_FIELD_TOP: case V4L2_FIELD_BOTTOM: case V4L2_FIELD_NONE: case V4L2_FIELD_INTERLACED_TB: case V4L2_FIELD_INTERLACED_BT: case V4L2_FIELD_INTERLACED: break; default: pix->field = RZG2L_CRU_DEFAULT_FIELD; break; } /* Limit to CRU capabilities */ v4l_bound_align_image(&pix->width, 320, RZG2L_CRU_MAX_INPUT_WIDTH, 1, &pix->height, 240, RZG2L_CRU_MAX_INPUT_HEIGHT, 2, 0); pix->bytesperline = rzg2l_cru_format_bytesperline(pix); pix->sizeimage = rzg2l_cru_format_sizeimage(pix); dev_dbg(cru->dev, "Format %ux%u bpl: %u size: %u\n", pix->width, pix->height, pix->bytesperline, pix->sizeimage); } static void rzg2l_cru_try_format(struct rzg2l_cru_dev *cru, struct v4l2_pix_format *pix) { /* * The V4L2 specification clearly documents the colorspace fields * as being set by drivers for capture devices. Using the values * supplied by userspace thus wouldn't comply with the API. Until * the API is updated force fixed values. */ pix->colorspace = RZG2L_CRU_DEFAULT_COLORSPACE; pix->xfer_func = V4L2_MAP_XFER_FUNC_DEFAULT(pix->colorspace); pix->ycbcr_enc = V4L2_MAP_YCBCR_ENC_DEFAULT(pix->colorspace); pix->quantization = V4L2_MAP_QUANTIZATION_DEFAULT(true, pix->colorspace, pix->ycbcr_enc); rzg2l_cru_format_align(cru, pix); } static int rzg2l_cru_querycap(struct file *file, void *priv, struct v4l2_capability *cap) { strscpy(cap->driver, KBUILD_MODNAME, sizeof(cap->driver)); strscpy(cap->card, "RZG2L_CRU", sizeof(cap->card)); return 0; } static int rzg2l_cru_try_fmt_vid_cap(struct file *file, void *priv, struct v4l2_format *f) { struct rzg2l_cru_dev *cru = video_drvdata(file); rzg2l_cru_try_format(cru, &f->fmt.pix); return 0; } static int rzg2l_cru_s_fmt_vid_cap(struct file *file, void *priv, struct v4l2_format *f) { struct rzg2l_cru_dev *cru = video_drvdata(file); if (vb2_is_busy(&cru->queue)) return -EBUSY; rzg2l_cru_try_format(cru, &f->fmt.pix); cru->format = f->fmt.pix; return 0; } static int rzg2l_cru_g_fmt_vid_cap(struct file *file, void *priv, struct v4l2_format *f) { struct rzg2l_cru_dev *cru = video_drvdata(file); f->fmt.pix = cru->format; return 0; } static int rzg2l_cru_enum_fmt_vid_cap(struct file *file, void *priv, struct v4l2_fmtdesc *f) { if (f->index >= ARRAY_SIZE(rzg2l_cru_formats)) return -EINVAL; f->pixelformat = rzg2l_cru_formats[f->index].format; return 0; } static const struct v4l2_ioctl_ops rzg2l_cru_ioctl_ops = { .vidioc_querycap = rzg2l_cru_querycap, .vidioc_try_fmt_vid_cap = rzg2l_cru_try_fmt_vid_cap, .vidioc_g_fmt_vid_cap = rzg2l_cru_g_fmt_vid_cap, .vidioc_s_fmt_vid_cap = rzg2l_cru_s_fmt_vid_cap, .vidioc_enum_fmt_vid_cap = rzg2l_cru_enum_fmt_vid_cap, .vidioc_reqbufs = vb2_ioctl_reqbufs, .vidioc_create_bufs = vb2_ioctl_create_bufs, .vidioc_querybuf = vb2_ioctl_querybuf, .vidioc_qbuf = vb2_ioctl_qbuf, .vidioc_dqbuf = vb2_ioctl_dqbuf, .vidioc_expbuf = vb2_ioctl_expbuf, .vidioc_prepare_buf = vb2_ioctl_prepare_buf, .vidioc_streamon = vb2_ioctl_streamon, .vidioc_streamoff = vb2_ioctl_streamoff, }; /* ----------------------------------------------------------------------------- * Media controller file operations */ static int rzg2l_cru_open(struct file *file) { struct rzg2l_cru_dev *cru = video_drvdata(file); int ret; ret = mutex_lock_interruptible(&cru->lock); if (ret) return ret; file->private_data = cru; ret = v4l2_fh_open(file); if (ret) goto err_unlock; mutex_unlock(&cru->lock); return 0; err_unlock: mutex_unlock(&cru->lock); return ret; } static int rzg2l_cru_release(struct file *file) { struct rzg2l_cru_dev *cru = video_drvdata(file); int ret; mutex_lock(&cru->lock); /* the release helper will cleanup any on-going streaming. */ ret = _vb2_fop_release(file, NULL); mutex_unlock(&cru->lock); return ret; } static const struct v4l2_file_operations rzg2l_cru_fops = { .owner = THIS_MODULE, .unlocked_ioctl = video_ioctl2, .open = rzg2l_cru_open, .release = rzg2l_cru_release, .poll = vb2_fop_poll, .mmap = vb2_fop_mmap, .read = vb2_fop_read, }; static void rzg2l_cru_v4l2_init(struct rzg2l_cru_dev *cru) { struct video_device *vdev = &cru->vdev; vdev->v4l2_dev = &cru->v4l2_dev; vdev->queue = &cru->queue; snprintf(vdev->name, sizeof(vdev->name), "CRU output"); vdev->release = video_device_release_empty; vdev->lock = &cru->lock; vdev->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING; vdev->device_caps |= V4L2_CAP_IO_MC; vdev->fops = &rzg2l_cru_fops; vdev->ioctl_ops = &rzg2l_cru_ioctl_ops; /* Set a default format */ cru->format.pixelformat = RZG2L_CRU_DEFAULT_FORMAT; cru->format.width = RZG2L_CRU_DEFAULT_WIDTH; cru->format.height = RZG2L_CRU_DEFAULT_HEIGHT; cru->format.field = RZG2L_CRU_DEFAULT_FIELD; cru->format.colorspace = RZG2L_CRU_DEFAULT_COLORSPACE; rzg2l_cru_format_align(cru, &cru->format); } void rzg2l_cru_video_unregister(struct rzg2l_cru_dev *cru) { media_device_unregister(&cru->mdev); video_unregister_device(&cru->vdev); } int rzg2l_cru_video_register(struct rzg2l_cru_dev *cru) { struct video_device *vdev = &cru->vdev; int ret; if (video_is_registered(&cru->vdev)) { struct media_entity *entity; entity = &cru->vdev.entity; if (!entity->graph_obj.mdev) entity->graph_obj.mdev = &cru->mdev; return 0; } rzg2l_cru_v4l2_init(cru); video_set_drvdata(vdev, cru); ret = video_register_device(vdev, VFL_TYPE_VIDEO, -1); if (ret) { dev_err(cru->dev, "Failed to register video device\n"); return ret; } ret = media_device_register(&cru->mdev); if (ret) { video_unregister_device(&cru->vdev); return ret; } return 0; }
linux-master
drivers/media/platform/renesas/rzg2l-cru/rzg2l-video.c
// SPDX-License-Identifier: GPL-2.0 /* * Driver for Renesas RZ/G2L CRU * * Copyright (C) 2022 Renesas Electronics Corp. */ #include "rzg2l-cru.h" struct rzg2l_cru_ip_format { u32 code; unsigned int datatype; unsigned int bpp; }; static const struct rzg2l_cru_ip_format rzg2l_cru_ip_formats[] = { { .code = MEDIA_BUS_FMT_UYVY8_1X16, .datatype = 0x1e, .bpp = 16 }, }; enum rzg2l_csi2_pads { RZG2L_CRU_IP_SINK = 0, RZG2L_CRU_IP_SOURCE, }; static const struct rzg2l_cru_ip_format *rzg2l_cru_ip_code_to_fmt(unsigned int code) { unsigned int i; for (i = 0; i < ARRAY_SIZE(rzg2l_cru_ip_formats); i++) if (rzg2l_cru_ip_formats[i].code == code) return &rzg2l_cru_ip_formats[i]; return NULL; } struct v4l2_mbus_framefmt *rzg2l_cru_ip_get_src_fmt(struct rzg2l_cru_dev *cru) { struct v4l2_subdev_state *state; struct v4l2_mbus_framefmt *fmt; state = v4l2_subdev_lock_and_get_active_state(&cru->ip.subdev); fmt = v4l2_subdev_get_pad_format(&cru->ip.subdev, state, 1); v4l2_subdev_unlock_state(state); return fmt; } static int rzg2l_cru_ip_s_stream(struct v4l2_subdev *sd, int enable) { struct rzg2l_cru_dev *cru; int s_stream_ret = 0; int ret; cru = v4l2_get_subdevdata(sd); if (!enable) { ret = v4l2_subdev_call(cru->ip.remote, video, s_stream, enable); if (ret) s_stream_ret = ret; ret = v4l2_subdev_call(cru->ip.remote, video, post_streamoff); if (ret == -ENOIOCTLCMD) ret = 0; if (ret && !s_stream_ret) s_stream_ret = ret; rzg2l_cru_stop_image_processing(cru); } else { ret = v4l2_subdev_call(cru->ip.remote, video, pre_streamon, 0); if (ret == -ENOIOCTLCMD) ret = 0; if (ret) return ret; ret = rzg2l_cru_start_image_processing(cru); if (ret) { v4l2_subdev_call(cru->ip.remote, video, post_streamoff); return ret; } rzg2l_cru_vclk_unprepare(cru); ret = v4l2_subdev_call(cru->ip.remote, video, s_stream, enable); if (ret == -ENOIOCTLCMD) ret = 0; if (!ret) { ret = rzg2l_cru_vclk_prepare(cru); if (!ret) return 0; } else { /* enable back vclk so that s_stream in error path disables it */ if (rzg2l_cru_vclk_prepare(cru)) dev_err(cru->dev, "Failed to enable vclk\n"); } s_stream_ret = ret; v4l2_subdev_call(cru->ip.remote, video, post_streamoff); rzg2l_cru_stop_image_processing(cru); } return s_stream_ret; } static int rzg2l_cru_ip_set_format(struct v4l2_subdev *sd, struct v4l2_subdev_state *state, struct v4l2_subdev_format *fmt) { struct v4l2_mbus_framefmt *src_format; struct v4l2_mbus_framefmt *sink_format; src_format = v4l2_subdev_get_pad_format(sd, state, RZG2L_CRU_IP_SOURCE); if (fmt->pad == RZG2L_CRU_IP_SOURCE) { fmt->format = *src_format; return 0; } sink_format = v4l2_subdev_get_pad_format(sd, state, fmt->pad); if (!rzg2l_cru_ip_code_to_fmt(fmt->format.code)) sink_format->code = rzg2l_cru_ip_formats[0].code; else sink_format->code = fmt->format.code; sink_format->field = V4L2_FIELD_NONE; sink_format->colorspace = fmt->format.colorspace; sink_format->xfer_func = fmt->format.xfer_func; sink_format->ycbcr_enc = fmt->format.ycbcr_enc; sink_format->quantization = fmt->format.quantization; sink_format->width = clamp_t(u32, fmt->format.width, RZG2L_CRU_MIN_INPUT_WIDTH, RZG2L_CRU_MAX_INPUT_WIDTH); sink_format->height = clamp_t(u32, fmt->format.height, RZG2L_CRU_MIN_INPUT_HEIGHT, RZG2L_CRU_MAX_INPUT_HEIGHT); fmt->format = *sink_format; /* propagate format to source pad */ *src_format = *sink_format; return 0; } static int rzg2l_cru_ip_enum_mbus_code(struct v4l2_subdev *sd, struct v4l2_subdev_state *state, struct v4l2_subdev_mbus_code_enum *code) { if (code->index >= ARRAY_SIZE(rzg2l_cru_ip_formats)) return -EINVAL; code->code = rzg2l_cru_ip_formats[code->index].code; return 0; } static int rzg2l_cru_ip_enum_frame_size(struct v4l2_subdev *sd, struct v4l2_subdev_state *state, struct v4l2_subdev_frame_size_enum *fse) { if (fse->index != 0) return -EINVAL; if (fse->code != MEDIA_BUS_FMT_UYVY8_1X16) return -EINVAL; fse->min_width = RZG2L_CRU_MIN_INPUT_WIDTH; fse->min_height = RZG2L_CRU_MIN_INPUT_HEIGHT; fse->max_width = RZG2L_CRU_MAX_INPUT_WIDTH; fse->max_height = RZG2L_CRU_MAX_INPUT_HEIGHT; return 0; } static int rzg2l_cru_ip_init_config(struct v4l2_subdev *sd, struct v4l2_subdev_state *sd_state) { struct v4l2_subdev_format fmt = { .pad = RZG2L_CRU_IP_SINK, }; fmt.format.width = RZG2L_CRU_MIN_INPUT_WIDTH; fmt.format.height = RZG2L_CRU_MIN_INPUT_HEIGHT; fmt.format.field = V4L2_FIELD_NONE; fmt.format.code = MEDIA_BUS_FMT_UYVY8_1X16; fmt.format.colorspace = V4L2_COLORSPACE_SRGB; fmt.format.ycbcr_enc = V4L2_YCBCR_ENC_DEFAULT; fmt.format.quantization = V4L2_QUANTIZATION_DEFAULT; fmt.format.xfer_func = V4L2_XFER_FUNC_DEFAULT; return rzg2l_cru_ip_set_format(sd, sd_state, &fmt); } static const struct v4l2_subdev_video_ops rzg2l_cru_ip_video_ops = { .s_stream = rzg2l_cru_ip_s_stream, }; static const struct v4l2_subdev_pad_ops rzg2l_cru_ip_pad_ops = { .enum_mbus_code = rzg2l_cru_ip_enum_mbus_code, .enum_frame_size = rzg2l_cru_ip_enum_frame_size, .init_cfg = rzg2l_cru_ip_init_config, .get_fmt = v4l2_subdev_get_fmt, .set_fmt = rzg2l_cru_ip_set_format, }; static const struct v4l2_subdev_ops rzg2l_cru_ip_subdev_ops = { .video = &rzg2l_cru_ip_video_ops, .pad = &rzg2l_cru_ip_pad_ops, }; static const struct media_entity_operations rzg2l_cru_ip_entity_ops = { .link_validate = v4l2_subdev_link_validate, }; int rzg2l_cru_ip_subdev_register(struct rzg2l_cru_dev *cru) { struct rzg2l_cru_ip *ip = &cru->ip; int ret; ip->subdev.dev = cru->dev; v4l2_subdev_init(&ip->subdev, &rzg2l_cru_ip_subdev_ops); v4l2_set_subdevdata(&ip->subdev, cru); snprintf(ip->subdev.name, sizeof(ip->subdev.name), "cru-ip-%s", dev_name(cru->dev)); ip->subdev.flags = V4L2_SUBDEV_FL_HAS_DEVNODE; ip->subdev.entity.function = MEDIA_ENT_F_PROC_VIDEO_PIXEL_FORMATTER; ip->subdev.entity.ops = &rzg2l_cru_ip_entity_ops; ip->pads[0].flags = MEDIA_PAD_FL_SINK; ip->pads[1].flags = MEDIA_PAD_FL_SOURCE; ret = media_entity_pads_init(&ip->subdev.entity, 2, ip->pads); if (ret) return ret; ret = v4l2_subdev_init_finalize(&ip->subdev); if (ret < 0) goto entity_cleanup; ret = v4l2_device_register_subdev(&cru->v4l2_dev, &ip->subdev); if (ret < 0) goto error_subdev; return 0; error_subdev: v4l2_subdev_cleanup(&ip->subdev); entity_cleanup: media_entity_cleanup(&ip->subdev.entity); return ret; } void rzg2l_cru_ip_subdev_unregister(struct rzg2l_cru_dev *cru) { struct rzg2l_cru_ip *ip = &cru->ip; media_entity_cleanup(&ip->subdev.entity); v4l2_subdev_cleanup(&ip->subdev); v4l2_device_unregister_subdev(&ip->subdev); }
linux-master
drivers/media/platform/renesas/rzg2l-cru/rzg2l-ip.c
// SPDX-License-Identifier: GPL-2.0+ /* * Driver for Renesas RZ/G2L CRU * * Copyright (C) 2022 Renesas Electronics Corp. * * Based on Renesas R-Car VIN * Copyright (C) 2011-2013 Renesas Solutions Corp. * Copyright (C) 2013 Cogent Embedded, Inc., <[email protected]> * Copyright (C) 2008 Magnus Damm */ #include <linux/clk.h> #include <linux/module.h> #include <linux/mod_devicetable.h> #include <linux/of.h> #include <linux/of_graph.h> #include <linux/platform_device.h> #include <linux/pm_runtime.h> #include <media/v4l2-fwnode.h> #include <media/v4l2-mc.h> #include "rzg2l-cru.h" static inline struct rzg2l_cru_dev *notifier_to_cru(struct v4l2_async_notifier *n) { return container_of(n, struct rzg2l_cru_dev, notifier); } static const struct media_device_ops rzg2l_cru_media_ops = { .link_notify = v4l2_pipeline_link_notify, }; /* ----------------------------------------------------------------------------- * Group async notifier */ static int rzg2l_cru_group_notify_complete(struct v4l2_async_notifier *notifier) { struct rzg2l_cru_dev *cru = notifier_to_cru(notifier); struct media_entity *source, *sink; int ret; ret = rzg2l_cru_ip_subdev_register(cru); if (ret) return ret; ret = v4l2_device_register_subdev_nodes(&cru->v4l2_dev); if (ret) { dev_err(cru->dev, "Failed to register subdev nodes\n"); return ret; } ret = rzg2l_cru_video_register(cru); if (ret) return ret; /* * CRU can be connected either to CSI2 or PARALLEL device * For now we are only supporting CSI2 * * Create media device link between CSI-2 <-> CRU IP */ source = &cru->csi.subdev->entity; sink = &cru->ip.subdev.entity; ret = media_create_pad_link(source, 1, sink, 0, MEDIA_LNK_FL_ENABLED | MEDIA_LNK_FL_IMMUTABLE); if (ret) { dev_err(cru->dev, "Error creating link from %s to %s\n", source->name, sink->name); return ret; } cru->csi.channel = 0; cru->ip.remote = cru->csi.subdev; /* Create media device link between CRU IP <-> CRU OUTPUT */ source = &cru->ip.subdev.entity; sink = &cru->vdev.entity; ret = media_create_pad_link(source, 1, sink, 0, MEDIA_LNK_FL_ENABLED | MEDIA_LNK_FL_IMMUTABLE); if (ret) { dev_err(cru->dev, "Error creating link from %s to %s\n", source->name, sink->name); return ret; } return 0; } static void rzg2l_cru_group_notify_unbind(struct v4l2_async_notifier *notifier, struct v4l2_subdev *subdev, struct v4l2_async_connection *asd) { struct rzg2l_cru_dev *cru = notifier_to_cru(notifier); rzg2l_cru_ip_subdev_unregister(cru); mutex_lock(&cru->mdev_lock); if (cru->csi.asd == asd) { cru->csi.subdev = NULL; dev_dbg(cru->dev, "Unbind CSI-2 %s\n", subdev->name); } mutex_unlock(&cru->mdev_lock); } static int rzg2l_cru_group_notify_bound(struct v4l2_async_notifier *notifier, struct v4l2_subdev *subdev, struct v4l2_async_connection *asd) { struct rzg2l_cru_dev *cru = notifier_to_cru(notifier); mutex_lock(&cru->mdev_lock); if (cru->csi.asd == asd) { cru->csi.subdev = subdev; dev_dbg(cru->dev, "Bound CSI-2 %s\n", subdev->name); } mutex_unlock(&cru->mdev_lock); return 0; } static const struct v4l2_async_notifier_operations rzg2l_cru_async_ops = { .bound = rzg2l_cru_group_notify_bound, .unbind = rzg2l_cru_group_notify_unbind, .complete = rzg2l_cru_group_notify_complete, }; static int rzg2l_cru_mc_parse_of(struct rzg2l_cru_dev *cru) { struct v4l2_fwnode_endpoint vep = { .bus_type = V4L2_MBUS_CSI2_DPHY, }; struct fwnode_handle *ep, *fwnode; struct v4l2_async_connection *asd; int ret; ep = fwnode_graph_get_endpoint_by_id(dev_fwnode(cru->dev), 1, 0, 0); if (!ep) return 0; fwnode = fwnode_graph_get_remote_endpoint(ep); ret = v4l2_fwnode_endpoint_parse(ep, &vep); fwnode_handle_put(ep); if (ret) { dev_err(cru->dev, "Failed to parse %pOF\n", to_of_node(fwnode)); ret = -EINVAL; goto out; } if (!of_device_is_available(to_of_node(fwnode))) { dev_dbg(cru->dev, "OF device %pOF disabled, ignoring\n", to_of_node(fwnode)); ret = -ENOTCONN; goto out; } asd = v4l2_async_nf_add_fwnode(&cru->notifier, fwnode, struct v4l2_async_connection); if (IS_ERR(asd)) { ret = PTR_ERR(asd); goto out; } cru->csi.asd = asd; dev_dbg(cru->dev, "Added OF device %pOF to slot %u\n", to_of_node(fwnode), vep.base.id); out: fwnode_handle_put(fwnode); return ret; } static int rzg2l_cru_mc_parse_of_graph(struct rzg2l_cru_dev *cru) { int ret; v4l2_async_nf_init(&cru->notifier, &cru->v4l2_dev); ret = rzg2l_cru_mc_parse_of(cru); if (ret) return ret; cru->notifier.ops = &rzg2l_cru_async_ops; if (list_empty(&cru->notifier.waiting_list)) return 0; ret = v4l2_async_nf_register(&cru->notifier); if (ret < 0) { dev_err(cru->dev, "Notifier registration failed\n"); v4l2_async_nf_cleanup(&cru->notifier); return ret; } return 0; } static int rzg2l_cru_media_init(struct rzg2l_cru_dev *cru) { struct media_device *mdev = NULL; const struct of_device_id *match; int ret; cru->pad.flags = MEDIA_PAD_FL_SINK; ret = media_entity_pads_init(&cru->vdev.entity, 1, &cru->pad); if (ret) return ret; mutex_init(&cru->mdev_lock); mdev = &cru->mdev; mdev->dev = cru->dev; mdev->ops = &rzg2l_cru_media_ops; match = of_match_node(cru->dev->driver->of_match_table, cru->dev->of_node); strscpy(mdev->driver_name, KBUILD_MODNAME, sizeof(mdev->driver_name)); strscpy(mdev->model, match->compatible, sizeof(mdev->model)); cru->v4l2_dev.mdev = &cru->mdev; media_device_init(mdev); ret = rzg2l_cru_mc_parse_of_graph(cru); if (ret) { mutex_lock(&cru->mdev_lock); cru->v4l2_dev.mdev = NULL; mutex_unlock(&cru->mdev_lock); } return 0; } static int rzg2l_cru_probe(struct platform_device *pdev) { struct rzg2l_cru_dev *cru; int ret; cru = devm_kzalloc(&pdev->dev, sizeof(*cru), GFP_KERNEL); if (!cru) return -ENOMEM; cru->base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(cru->base)) return PTR_ERR(cru->base); cru->presetn = devm_reset_control_get_shared(&pdev->dev, "presetn"); if (IS_ERR(cru->presetn)) return dev_err_probe(&pdev->dev, PTR_ERR(cru->presetn), "Failed to get cpg presetn\n"); cru->aresetn = devm_reset_control_get_exclusive(&pdev->dev, "aresetn"); if (IS_ERR(cru->aresetn)) return dev_err_probe(&pdev->dev, PTR_ERR(cru->aresetn), "Failed to get cpg aresetn\n"); cru->vclk = devm_clk_get(&pdev->dev, "video"); if (IS_ERR(cru->vclk)) return dev_err_probe(&pdev->dev, PTR_ERR(cru->vclk), "Failed to get video clock\n"); cru->dev = &pdev->dev; cru->info = of_device_get_match_data(&pdev->dev); cru->image_conv_irq = platform_get_irq(pdev, 0); if (cru->image_conv_irq < 0) return cru->image_conv_irq; platform_set_drvdata(pdev, cru); ret = rzg2l_cru_dma_register(cru); if (ret) return ret; cru->num_buf = RZG2L_CRU_HW_BUFFER_DEFAULT; pm_suspend_ignore_children(&pdev->dev, true); pm_runtime_enable(&pdev->dev); ret = rzg2l_cru_media_init(cru); if (ret) goto error_dma_unregister; return 0; error_dma_unregister: rzg2l_cru_dma_unregister(cru); pm_runtime_disable(&pdev->dev); return ret; } static void rzg2l_cru_remove(struct platform_device *pdev) { struct rzg2l_cru_dev *cru = platform_get_drvdata(pdev); pm_runtime_disable(&pdev->dev); v4l2_async_nf_unregister(&cru->notifier); v4l2_async_nf_cleanup(&cru->notifier); rzg2l_cru_video_unregister(cru); media_device_cleanup(&cru->mdev); mutex_destroy(&cru->mdev_lock); rzg2l_cru_dma_unregister(cru); } static const struct of_device_id rzg2l_cru_of_id_table[] = { { .compatible = "renesas,rzg2l-cru", }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(of, rzg2l_cru_of_id_table); static struct platform_driver rzg2l_cru_driver = { .driver = { .name = "rzg2l-cru", .of_match_table = rzg2l_cru_of_id_table, }, .probe = rzg2l_cru_probe, .remove_new = rzg2l_cru_remove, }; module_platform_driver(rzg2l_cru_driver); MODULE_AUTHOR("Lad Prabhakar <[email protected]>"); MODULE_DESCRIPTION("Renesas RZ/G2L CRU driver"); MODULE_LICENSE("GPL");
linux-master
drivers/media/platform/renesas/rzg2l-cru/rzg2l-core.c
// SPDX-License-Identifier: GPL-2.0 /* * Driver for Renesas RZ/G2L MIPI CSI-2 Receiver * * Copyright (C) 2022 Renesas Electronics Corp. */ #include <linux/clk.h> #include <linux/delay.h> #include <linux/interrupt.h> #include <linux/io.h> #include <linux/module.h> #include <linux/of.h> #include <linux/of_graph.h> #include <linux/platform_device.h> #include <linux/pm_runtime.h> #include <linux/reset.h> #include <linux/sys_soc.h> #include <linux/units.h> #include <media/v4l2-ctrls.h> #include <media/v4l2-device.h> #include <media/v4l2-fwnode.h> #include <media/v4l2-mc.h> #include <media/v4l2-subdev.h> /* LINK registers */ /* Module Configuration Register */ #define CSI2nMCG 0x0 #define CSI2nMCG_SDLN GENMASK(11, 8) /* Module Control Register 0 */ #define CSI2nMCT0 0x10 #define CSI2nMCT0_VDLN(x) ((x) << 0) /* Module Control Register 2 */ #define CSI2nMCT2 0x18 #define CSI2nMCT2_FRRSKW(x) ((x) << 16) #define CSI2nMCT2_FRRCLK(x) ((x) << 0) /* Module Control Register 3 */ #define CSI2nMCT3 0x1c #define CSI2nMCT3_RXEN BIT(0) /* Reset Control Register */ #define CSI2nRTCT 0x28 #define CSI2nRTCT_VSRST BIT(0) /* Reset Status Register */ #define CSI2nRTST 0x2c #define CSI2nRTST_VSRSTS BIT(0) /* Receive Data Type Enable Low Register */ #define CSI2nDTEL 0x60 /* Receive Data Type Enable High Register */ #define CSI2nDTEH 0x64 /* DPHY registers */ /* D-PHY Control Register 0 */ #define CSIDPHYCTRL0 0x400 #define CSIDPHYCTRL0_EN_LDO1200 BIT(1) #define CSIDPHYCTRL0_EN_BGR BIT(0) /* D-PHY Timing Register 0 */ #define CSIDPHYTIM0 0x404 #define CSIDPHYTIM0_TCLK_MISS(x) ((x) << 24) #define CSIDPHYTIM0_T_INIT(x) ((x) << 0) /* D-PHY Timing Register 1 */ #define CSIDPHYTIM1 0x408 #define CSIDPHYTIM1_THS_PREPARE(x) ((x) << 24) #define CSIDPHYTIM1_TCLK_PREPARE(x) ((x) << 16) #define CSIDPHYTIM1_THS_SETTLE(x) ((x) << 8) #define CSIDPHYTIM1_TCLK_SETTLE(x) ((x) << 0) /* D-PHY Skew Adjustment Function */ #define CSIDPHYSKW0 0x460 #define CSIDPHYSKW0_UTIL_DL0_SKW_ADJ(x) ((x) & 0x3) #define CSIDPHYSKW0_UTIL_DL1_SKW_ADJ(x) (((x) & 0x3) << 4) #define CSIDPHYSKW0_UTIL_DL2_SKW_ADJ(x) (((x) & 0x3) << 8) #define CSIDPHYSKW0_UTIL_DL3_SKW_ADJ(x) (((x) & 0x3) << 12) #define CSIDPHYSKW0_DEFAULT_SKW (CSIDPHYSKW0_UTIL_DL0_SKW_ADJ(1) | \ CSIDPHYSKW0_UTIL_DL1_SKW_ADJ(1) | \ CSIDPHYSKW0_UTIL_DL2_SKW_ADJ(1) | \ CSIDPHYSKW0_UTIL_DL3_SKW_ADJ(1)) #define VSRSTS_RETRIES 20 #define RZG2L_CSI2_MIN_WIDTH 320 #define RZG2L_CSI2_MIN_HEIGHT 240 #define RZG2L_CSI2_MAX_WIDTH 2800 #define RZG2L_CSI2_MAX_HEIGHT 4095 #define RZG2L_CSI2_DEFAULT_WIDTH RZG2L_CSI2_MIN_WIDTH #define RZG2L_CSI2_DEFAULT_HEIGHT RZG2L_CSI2_MIN_HEIGHT #define RZG2L_CSI2_DEFAULT_FMT MEDIA_BUS_FMT_UYVY8_1X16 enum rzg2l_csi2_pads { RZG2L_CSI2_SINK = 0, RZG2L_CSI2_SOURCE, NR_OF_RZG2L_CSI2_PAD, }; struct rzg2l_csi2 { struct device *dev; void __iomem *base; struct reset_control *presetn; struct reset_control *cmn_rstb; struct clk *sysclk; unsigned long vclk_rate; struct v4l2_subdev subdev; struct media_pad pads[NR_OF_RZG2L_CSI2_PAD]; struct v4l2_async_notifier notifier; struct v4l2_subdev *remote_source; unsigned short lanes; unsigned long hsfreq; bool dphy_enabled; }; struct rzg2l_csi2_timings { u32 t_init; u32 tclk_miss; u32 tclk_settle; u32 ths_settle; u32 tclk_prepare; u32 ths_prepare; u32 max_hsfreq; }; static const struct rzg2l_csi2_timings rzg2l_csi2_global_timings[] = { { .max_hsfreq = 80, .t_init = 79801, .tclk_miss = 4, .tclk_settle = 23, .ths_settle = 31, .tclk_prepare = 10, .ths_prepare = 19, }, { .max_hsfreq = 125, .t_init = 79801, .tclk_miss = 4, .tclk_settle = 23, .ths_settle = 28, .tclk_prepare = 10, .ths_prepare = 19, }, { .max_hsfreq = 250, .t_init = 79801, .tclk_miss = 4, .tclk_settle = 23, .ths_settle = 22, .tclk_prepare = 10, .ths_prepare = 16, }, { .max_hsfreq = 360, .t_init = 79801, .tclk_miss = 4, .tclk_settle = 18, .ths_settle = 19, .tclk_prepare = 10, .ths_prepare = 10, }, { .max_hsfreq = 1500, .t_init = 79801, .tclk_miss = 4, .tclk_settle = 18, .ths_settle = 18, .tclk_prepare = 10, .ths_prepare = 10, }, }; struct rzg2l_csi2_format { u32 code; unsigned int datatype; unsigned int bpp; }; static const struct rzg2l_csi2_format rzg2l_csi2_formats[] = { { .code = MEDIA_BUS_FMT_UYVY8_1X16, .datatype = 0x1e, .bpp = 16 }, }; static inline struct rzg2l_csi2 *sd_to_csi2(struct v4l2_subdev *sd) { return container_of(sd, struct rzg2l_csi2, subdev); } static const struct rzg2l_csi2_format *rzg2l_csi2_code_to_fmt(unsigned int code) { unsigned int i; for (i = 0; i < ARRAY_SIZE(rzg2l_csi2_formats); i++) if (rzg2l_csi2_formats[i].code == code) return &rzg2l_csi2_formats[i]; return NULL; } static inline struct rzg2l_csi2 *notifier_to_csi2(struct v4l2_async_notifier *n) { return container_of(n, struct rzg2l_csi2, notifier); } static u32 rzg2l_csi2_read(struct rzg2l_csi2 *csi2, unsigned int reg) { return ioread32(csi2->base + reg); } static void rzg2l_csi2_write(struct rzg2l_csi2 *csi2, unsigned int reg, u32 data) { iowrite32(data, csi2->base + reg); } static void rzg2l_csi2_set(struct rzg2l_csi2 *csi2, unsigned int reg, u32 set) { rzg2l_csi2_write(csi2, reg, rzg2l_csi2_read(csi2, reg) | set); } static void rzg2l_csi2_clr(struct rzg2l_csi2 *csi2, unsigned int reg, u32 clr) { rzg2l_csi2_write(csi2, reg, rzg2l_csi2_read(csi2, reg) & ~clr); } static int rzg2l_csi2_calc_mbps(struct rzg2l_csi2 *csi2) { struct v4l2_subdev *source = csi2->remote_source; const struct rzg2l_csi2_format *format; const struct v4l2_mbus_framefmt *fmt; struct v4l2_subdev_state *state; struct v4l2_ctrl *ctrl; u64 mbps; /* Read the pixel rate control from remote. */ ctrl = v4l2_ctrl_find(source->ctrl_handler, V4L2_CID_PIXEL_RATE); if (!ctrl) { dev_err(csi2->dev, "no pixel rate control in subdev %s\n", source->name); return -EINVAL; } state = v4l2_subdev_lock_and_get_active_state(&csi2->subdev); fmt = v4l2_subdev_get_pad_format(&csi2->subdev, state, RZG2L_CSI2_SINK); format = rzg2l_csi2_code_to_fmt(fmt->code); v4l2_subdev_unlock_state(state); /* * Calculate hsfreq in Mbps * hsfreq = (pixel_rate * bits_per_sample) / number_of_lanes */ mbps = v4l2_ctrl_g_ctrl_int64(ctrl) * format->bpp; do_div(mbps, csi2->lanes * 1000000); return mbps; } /* ----------------------------------------------------------------------------- * DPHY setting */ static int rzg2l_csi2_dphy_disable(struct rzg2l_csi2 *csi2) { int ret; /* Reset the CRU (D-PHY) */ ret = reset_control_assert(csi2->cmn_rstb); if (ret) return ret; /* Stop the D-PHY clock */ clk_disable_unprepare(csi2->sysclk); /* Cancel the EN_LDO1200 register setting */ rzg2l_csi2_clr(csi2, CSIDPHYCTRL0, CSIDPHYCTRL0_EN_LDO1200); /* Cancel the EN_BGR register setting */ rzg2l_csi2_clr(csi2, CSIDPHYCTRL0, CSIDPHYCTRL0_EN_BGR); csi2->dphy_enabled = false; return 0; } static int rzg2l_csi2_dphy_enable(struct rzg2l_csi2 *csi2) { const struct rzg2l_csi2_timings *dphy_timing; u32 dphytim0, dphytim1; unsigned int i; int mbps; int ret; mbps = rzg2l_csi2_calc_mbps(csi2); if (mbps < 0) return mbps; csi2->hsfreq = mbps; /* Set DPHY timing parameters */ for (i = 0; i < ARRAY_SIZE(rzg2l_csi2_global_timings); ++i) { dphy_timing = &rzg2l_csi2_global_timings[i]; if (csi2->hsfreq <= dphy_timing->max_hsfreq) break; } if (i >= ARRAY_SIZE(rzg2l_csi2_global_timings)) return -EINVAL; /* Set D-PHY timing parameters */ dphytim0 = CSIDPHYTIM0_TCLK_MISS(dphy_timing->tclk_miss) | CSIDPHYTIM0_T_INIT(dphy_timing->t_init); dphytim1 = CSIDPHYTIM1_THS_PREPARE(dphy_timing->ths_prepare) | CSIDPHYTIM1_TCLK_PREPARE(dphy_timing->tclk_prepare) | CSIDPHYTIM1_THS_SETTLE(dphy_timing->ths_settle) | CSIDPHYTIM1_TCLK_SETTLE(dphy_timing->tclk_settle); rzg2l_csi2_write(csi2, CSIDPHYTIM0, dphytim0); rzg2l_csi2_write(csi2, CSIDPHYTIM1, dphytim1); /* Enable D-PHY power control 0 */ rzg2l_csi2_write(csi2, CSIDPHYSKW0, CSIDPHYSKW0_DEFAULT_SKW); /* Set the EN_BGR bit */ rzg2l_csi2_set(csi2, CSIDPHYCTRL0, CSIDPHYCTRL0_EN_BGR); /* Delay 20us to be stable */ usleep_range(20, 40); /* Enable D-PHY power control 1 */ rzg2l_csi2_set(csi2, CSIDPHYCTRL0, CSIDPHYCTRL0_EN_LDO1200); /* Delay 10us to be stable */ usleep_range(10, 20); /* Start supplying the internal clock for the D-PHY block */ ret = clk_prepare_enable(csi2->sysclk); if (ret) rzg2l_csi2_dphy_disable(csi2); csi2->dphy_enabled = true; return ret; } static int rzg2l_csi2_dphy_setting(struct v4l2_subdev *sd, bool on) { struct rzg2l_csi2 *csi2 = sd_to_csi2(sd); if (on) return rzg2l_csi2_dphy_enable(csi2); return rzg2l_csi2_dphy_disable(csi2); } static void rzg2l_csi2_mipi_link_enable(struct rzg2l_csi2 *csi2) { unsigned long vclk_rate = csi2->vclk_rate / HZ_PER_MHZ; u32 frrskw, frrclk, frrskw_coeff, frrclk_coeff; /* Select data lanes */ rzg2l_csi2_write(csi2, CSI2nMCT0, CSI2nMCT0_VDLN(csi2->lanes)); frrskw_coeff = 3 * vclk_rate * 8; frrclk_coeff = frrskw_coeff / 2; frrskw = DIV_ROUND_UP(frrskw_coeff, csi2->hsfreq); frrclk = DIV_ROUND_UP(frrclk_coeff, csi2->hsfreq); rzg2l_csi2_write(csi2, CSI2nMCT2, CSI2nMCT2_FRRSKW(frrskw) | CSI2nMCT2_FRRCLK(frrclk)); /* * Select data type. * FS, FE, LS, LE, Generic Short Packet Codes 1 to 8, * Generic Long Packet Data Types 1 to 4 YUV422 8-bit, * RGB565, RGB888, RAW8 to RAW20, User-defined 8-bit * data types 1 to 8 */ rzg2l_csi2_write(csi2, CSI2nDTEL, 0xf778ff0f); rzg2l_csi2_write(csi2, CSI2nDTEH, 0x00ffff1f); /* Enable LINK reception */ rzg2l_csi2_write(csi2, CSI2nMCT3, CSI2nMCT3_RXEN); } static void rzg2l_csi2_mipi_link_disable(struct rzg2l_csi2 *csi2) { unsigned int timeout = VSRSTS_RETRIES; /* Stop LINK reception */ rzg2l_csi2_clr(csi2, CSI2nMCT3, CSI2nMCT3_RXEN); /* Request a software reset of the LINK Video Pixel Interface */ rzg2l_csi2_write(csi2, CSI2nRTCT, CSI2nRTCT_VSRST); /* Make sure CSI2nRTST.VSRSTS bit is cleared */ while (--timeout) { if (!(rzg2l_csi2_read(csi2, CSI2nRTST) & CSI2nRTST_VSRSTS)) break; usleep_range(100, 200); } if (!timeout) dev_err(csi2->dev, "Clearing CSI2nRTST.VSRSTS timed out\n"); } static int rzg2l_csi2_mipi_link_setting(struct v4l2_subdev *sd, bool on) { struct rzg2l_csi2 *csi2 = sd_to_csi2(sd); if (on) rzg2l_csi2_mipi_link_enable(csi2); else rzg2l_csi2_mipi_link_disable(csi2); return 0; } static int rzg2l_csi2_s_stream(struct v4l2_subdev *sd, int enable) { struct rzg2l_csi2 *csi2 = sd_to_csi2(sd); int s_stream_ret = 0; int ret; if (enable) { ret = pm_runtime_resume_and_get(csi2->dev); if (ret) return ret; ret = rzg2l_csi2_mipi_link_setting(sd, 1); if (ret) goto err_pm_put; ret = reset_control_deassert(csi2->cmn_rstb); if (ret) goto err_mipi_link_disable; } ret = v4l2_subdev_call(csi2->remote_source, video, s_stream, enable); if (ret) s_stream_ret = ret; if (enable && ret) goto err_assert_rstb; if (!enable) { ret = rzg2l_csi2_dphy_setting(sd, 0); if (ret && !s_stream_ret) s_stream_ret = ret; ret = rzg2l_csi2_mipi_link_setting(sd, 0); if (ret && !s_stream_ret) s_stream_ret = ret; pm_runtime_put_sync(csi2->dev); } return s_stream_ret; err_assert_rstb: reset_control_assert(csi2->cmn_rstb); err_mipi_link_disable: rzg2l_csi2_mipi_link_setting(sd, 0); err_pm_put: pm_runtime_put_sync(csi2->dev); return ret; } static int rzg2l_csi2_pre_streamon(struct v4l2_subdev *sd, u32 flags) { return rzg2l_csi2_dphy_setting(sd, 1); } static int rzg2l_csi2_post_streamoff(struct v4l2_subdev *sd) { struct rzg2l_csi2 *csi2 = sd_to_csi2(sd); /* * In ideal case D-PHY will be disabled in s_stream(0) callback * as mentioned in the HW manual. The below will only happen when * pre_streamon succeeds and further down the line s_stream(1) * fails so we need to undo things in post_streamoff. */ if (csi2->dphy_enabled) return rzg2l_csi2_dphy_setting(sd, 0); return 0; } static int rzg2l_csi2_set_format(struct v4l2_subdev *sd, struct v4l2_subdev_state *state, struct v4l2_subdev_format *fmt) { struct v4l2_mbus_framefmt *src_format; struct v4l2_mbus_framefmt *sink_format; src_format = v4l2_subdev_get_pad_format(sd, state, RZG2L_CSI2_SOURCE); if (fmt->pad == RZG2L_CSI2_SOURCE) { fmt->format = *src_format; return 0; } sink_format = v4l2_subdev_get_pad_format(sd, state, RZG2L_CSI2_SINK); if (!rzg2l_csi2_code_to_fmt(fmt->format.code)) sink_format->code = rzg2l_csi2_formats[0].code; else sink_format->code = fmt->format.code; sink_format->field = V4L2_FIELD_NONE; sink_format->colorspace = fmt->format.colorspace; sink_format->xfer_func = fmt->format.xfer_func; sink_format->ycbcr_enc = fmt->format.ycbcr_enc; sink_format->quantization = fmt->format.quantization; sink_format->width = clamp_t(u32, fmt->format.width, RZG2L_CSI2_MIN_WIDTH, RZG2L_CSI2_MAX_WIDTH); sink_format->height = clamp_t(u32, fmt->format.height, RZG2L_CSI2_MIN_HEIGHT, RZG2L_CSI2_MAX_HEIGHT); fmt->format = *sink_format; /* propagate format to source pad */ *src_format = *sink_format; return 0; } static int rzg2l_csi2_init_config(struct v4l2_subdev *sd, struct v4l2_subdev_state *sd_state) { struct v4l2_subdev_format fmt = { .pad = RZG2L_CSI2_SINK, }; fmt.format.width = RZG2L_CSI2_DEFAULT_WIDTH; fmt.format.height = RZG2L_CSI2_DEFAULT_HEIGHT; fmt.format.field = V4L2_FIELD_NONE; fmt.format.code = RZG2L_CSI2_DEFAULT_FMT; fmt.format.colorspace = V4L2_COLORSPACE_SRGB; fmt.format.ycbcr_enc = V4L2_YCBCR_ENC_DEFAULT; fmt.format.quantization = V4L2_QUANTIZATION_DEFAULT; fmt.format.xfer_func = V4L2_XFER_FUNC_DEFAULT; return rzg2l_csi2_set_format(sd, sd_state, &fmt); } static int rzg2l_csi2_enum_mbus_code(struct v4l2_subdev *sd, struct v4l2_subdev_state *sd_state, struct v4l2_subdev_mbus_code_enum *code) { if (code->index >= ARRAY_SIZE(rzg2l_csi2_formats)) return -EINVAL; code->code = rzg2l_csi2_formats[code->index].code; return 0; } static int rzg2l_csi2_enum_frame_size(struct v4l2_subdev *sd, struct v4l2_subdev_state *sd_state, struct v4l2_subdev_frame_size_enum *fse) { if (fse->index != 0) return -EINVAL; fse->min_width = RZG2L_CSI2_MIN_WIDTH; fse->min_height = RZG2L_CSI2_MIN_HEIGHT; fse->max_width = RZG2L_CSI2_MAX_WIDTH; fse->max_height = RZG2L_CSI2_MAX_HEIGHT; return 0; } static const struct v4l2_subdev_video_ops rzg2l_csi2_video_ops = { .s_stream = rzg2l_csi2_s_stream, .pre_streamon = rzg2l_csi2_pre_streamon, .post_streamoff = rzg2l_csi2_post_streamoff, }; static const struct v4l2_subdev_pad_ops rzg2l_csi2_pad_ops = { .enum_mbus_code = rzg2l_csi2_enum_mbus_code, .init_cfg = rzg2l_csi2_init_config, .enum_frame_size = rzg2l_csi2_enum_frame_size, .set_fmt = rzg2l_csi2_set_format, .get_fmt = v4l2_subdev_get_fmt, }; static const struct v4l2_subdev_ops rzg2l_csi2_subdev_ops = { .video = &rzg2l_csi2_video_ops, .pad = &rzg2l_csi2_pad_ops, }; /* ----------------------------------------------------------------------------- * Async handling and registration of subdevices and links. */ static int rzg2l_csi2_notify_bound(struct v4l2_async_notifier *notifier, struct v4l2_subdev *subdev, struct v4l2_async_connection *asd) { struct rzg2l_csi2 *csi2 = notifier_to_csi2(notifier); csi2->remote_source = subdev; dev_dbg(csi2->dev, "Bound subdev: %s pad\n", subdev->name); return media_create_pad_link(&subdev->entity, RZG2L_CSI2_SINK, &csi2->subdev.entity, 0, MEDIA_LNK_FL_ENABLED | MEDIA_LNK_FL_IMMUTABLE); } static void rzg2l_csi2_notify_unbind(struct v4l2_async_notifier *notifier, struct v4l2_subdev *subdev, struct v4l2_async_connection *asd) { struct rzg2l_csi2 *csi2 = notifier_to_csi2(notifier); csi2->remote_source = NULL; dev_dbg(csi2->dev, "Unbind subdev %s\n", subdev->name); } static const struct v4l2_async_notifier_operations rzg2l_csi2_notify_ops = { .bound = rzg2l_csi2_notify_bound, .unbind = rzg2l_csi2_notify_unbind, }; static int rzg2l_csi2_parse_v4l2(struct rzg2l_csi2 *csi2, struct v4l2_fwnode_endpoint *vep) { /* Only port 0 endpoint 0 is valid. */ if (vep->base.port || vep->base.id) return -ENOTCONN; csi2->lanes = vep->bus.mipi_csi2.num_data_lanes; return 0; } static int rzg2l_csi2_parse_dt(struct rzg2l_csi2 *csi2) { struct v4l2_fwnode_endpoint v4l2_ep = { .bus_type = V4L2_MBUS_CSI2_DPHY }; struct v4l2_async_connection *asd; struct fwnode_handle *fwnode; struct fwnode_handle *ep; int ret; ep = fwnode_graph_get_endpoint_by_id(dev_fwnode(csi2->dev), 0, 0, 0); if (!ep) { dev_err(csi2->dev, "Not connected to subdevice\n"); return -EINVAL; } ret = v4l2_fwnode_endpoint_parse(ep, &v4l2_ep); if (ret) { dev_err(csi2->dev, "Could not parse v4l2 endpoint\n"); fwnode_handle_put(ep); return -EINVAL; } ret = rzg2l_csi2_parse_v4l2(csi2, &v4l2_ep); if (ret) { fwnode_handle_put(ep); return ret; } fwnode = fwnode_graph_get_remote_endpoint(ep); fwnode_handle_put(ep); v4l2_async_subdev_nf_init(&csi2->notifier, &csi2->subdev); csi2->notifier.ops = &rzg2l_csi2_notify_ops; asd = v4l2_async_nf_add_fwnode(&csi2->notifier, fwnode, struct v4l2_async_connection); fwnode_handle_put(fwnode); if (IS_ERR(asd)) return PTR_ERR(asd); ret = v4l2_async_nf_register(&csi2->notifier); if (ret) v4l2_async_nf_cleanup(&csi2->notifier); return ret; } static int rzg2l_validate_csi2_lanes(struct rzg2l_csi2 *csi2) { int lanes; int ret; if (csi2->lanes != 1 && csi2->lanes != 2 && csi2->lanes != 4) { dev_err(csi2->dev, "Unsupported number of data-lanes: %u\n", csi2->lanes); return -EINVAL; } ret = pm_runtime_resume_and_get(csi2->dev); if (ret) return ret; /* Checking the maximum lanes support for CSI-2 module */ lanes = (rzg2l_csi2_read(csi2, CSI2nMCG) & CSI2nMCG_SDLN) >> 8; if (lanes < csi2->lanes) { dev_err(csi2->dev, "Failed to support %d data lanes\n", csi2->lanes); ret = -EINVAL; } pm_runtime_put_sync(csi2->dev); return ret; } /* ----------------------------------------------------------------------------- * Platform Device Driver. */ static const struct media_entity_operations rzg2l_csi2_entity_ops = { .link_validate = v4l2_subdev_link_validate, }; static int rzg2l_csi2_probe(struct platform_device *pdev) { struct rzg2l_csi2 *csi2; struct clk *vclk; int ret; csi2 = devm_kzalloc(&pdev->dev, sizeof(*csi2), GFP_KERNEL); if (!csi2) return -ENOMEM; csi2->base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(csi2->base)) return PTR_ERR(csi2->base); csi2->cmn_rstb = devm_reset_control_get_exclusive(&pdev->dev, "cmn-rstb"); if (IS_ERR(csi2->cmn_rstb)) return dev_err_probe(&pdev->dev, PTR_ERR(csi2->cmn_rstb), "Failed to get cpg cmn-rstb\n"); csi2->presetn = devm_reset_control_get_shared(&pdev->dev, "presetn"); if (IS_ERR(csi2->presetn)) return dev_err_probe(&pdev->dev, PTR_ERR(csi2->presetn), "Failed to get cpg presetn\n"); csi2->sysclk = devm_clk_get(&pdev->dev, "system"); if (IS_ERR(csi2->sysclk)) return dev_err_probe(&pdev->dev, PTR_ERR(csi2->sysclk), "Failed to get system clk\n"); vclk = clk_get(&pdev->dev, "video"); if (IS_ERR(vclk)) return dev_err_probe(&pdev->dev, PTR_ERR(vclk), "Failed to get video clock\n"); csi2->vclk_rate = clk_get_rate(vclk); clk_put(vclk); csi2->dev = &pdev->dev; platform_set_drvdata(pdev, csi2); ret = rzg2l_csi2_parse_dt(csi2); if (ret) return ret; pm_runtime_enable(&pdev->dev); ret = rzg2l_validate_csi2_lanes(csi2); if (ret) goto error_pm; csi2->subdev.dev = &pdev->dev; v4l2_subdev_init(&csi2->subdev, &rzg2l_csi2_subdev_ops); v4l2_set_subdevdata(&csi2->subdev, &pdev->dev); snprintf(csi2->subdev.name, sizeof(csi2->subdev.name), "csi-%s", dev_name(&pdev->dev)); csi2->subdev.flags = V4L2_SUBDEV_FL_HAS_DEVNODE; csi2->subdev.entity.function = MEDIA_ENT_F_VID_IF_BRIDGE; csi2->subdev.entity.ops = &rzg2l_csi2_entity_ops; csi2->pads[RZG2L_CSI2_SINK].flags = MEDIA_PAD_FL_SINK; /* * TODO: RZ/G2L CSI2 supports 4 virtual channels, as virtual * channels should be implemented by streams API which is under * development lets hardcode to VC0 for now. */ csi2->pads[RZG2L_CSI2_SOURCE].flags = MEDIA_PAD_FL_SOURCE; ret = media_entity_pads_init(&csi2->subdev.entity, 2, csi2->pads); if (ret) goto error_pm; ret = v4l2_subdev_init_finalize(&csi2->subdev); if (ret < 0) goto error_async; ret = v4l2_async_register_subdev(&csi2->subdev); if (ret < 0) goto error_subdev; return 0; error_subdev: v4l2_subdev_cleanup(&csi2->subdev); error_async: v4l2_async_nf_unregister(&csi2->notifier); v4l2_async_nf_cleanup(&csi2->notifier); media_entity_cleanup(&csi2->subdev.entity); error_pm: pm_runtime_disable(&pdev->dev); return ret; } static void rzg2l_csi2_remove(struct platform_device *pdev) { struct rzg2l_csi2 *csi2 = platform_get_drvdata(pdev); v4l2_async_nf_unregister(&csi2->notifier); v4l2_async_nf_cleanup(&csi2->notifier); v4l2_async_unregister_subdev(&csi2->subdev); v4l2_subdev_cleanup(&csi2->subdev); media_entity_cleanup(&csi2->subdev.entity); pm_runtime_disable(&pdev->dev); } static int __maybe_unused rzg2l_csi2_pm_runtime_suspend(struct device *dev) { struct rzg2l_csi2 *csi2 = dev_get_drvdata(dev); reset_control_assert(csi2->presetn); return 0; } static int __maybe_unused rzg2l_csi2_pm_runtime_resume(struct device *dev) { struct rzg2l_csi2 *csi2 = dev_get_drvdata(dev); return reset_control_deassert(csi2->presetn); } static const struct dev_pm_ops rzg2l_csi2_pm_ops = { SET_RUNTIME_PM_OPS(rzg2l_csi2_pm_runtime_suspend, rzg2l_csi2_pm_runtime_resume, NULL) }; static const struct of_device_id rzg2l_csi2_of_table[] = { { .compatible = "renesas,rzg2l-csi2", }, { /* sentinel */ } }; static struct platform_driver rzg2l_csi2_pdrv = { .remove_new = rzg2l_csi2_remove, .probe = rzg2l_csi2_probe, .driver = { .name = "rzg2l-csi2", .of_match_table = rzg2l_csi2_of_table, .pm = &rzg2l_csi2_pm_ops, }, }; module_platform_driver(rzg2l_csi2_pdrv); MODULE_AUTHOR("Lad Prabhakar <[email protected]>"); MODULE_DESCRIPTION("Renesas RZ/G2L MIPI CSI2 receiver driver"); MODULE_LICENSE("GPL");
linux-master
drivers/media/platform/renesas/rzg2l-cru/rzg2l-csi2.c
// SPDX-License-Identifier: GPL-2.0+ /* * vsp1_lut.c -- R-Car VSP1 Look-Up Table * * Copyright (C) 2013 Renesas Corporation * * Contact: Laurent Pinchart ([email protected]) */ #include <linux/device.h> #include <linux/gfp.h> #include <media/v4l2-subdev.h> #include "vsp1.h" #include "vsp1_dl.h" #include "vsp1_lut.h" #define LUT_MIN_SIZE 4U #define LUT_MAX_SIZE 8190U #define LUT_SIZE 256 /* ----------------------------------------------------------------------------- * Device Access */ static inline void vsp1_lut_write(struct vsp1_lut *lut, struct vsp1_dl_body *dlb, u32 reg, u32 data) { vsp1_dl_body_write(dlb, reg, data); } /* ----------------------------------------------------------------------------- * Controls */ #define V4L2_CID_VSP1_LUT_TABLE (V4L2_CID_USER_BASE | 0x1001) static int lut_set_table(struct vsp1_lut *lut, struct v4l2_ctrl *ctrl) { struct vsp1_dl_body *dlb; unsigned int i; dlb = vsp1_dl_body_get(lut->pool); if (!dlb) return -ENOMEM; for (i = 0; i < LUT_SIZE; ++i) vsp1_dl_body_write(dlb, VI6_LUT_TABLE + 4 * i, ctrl->p_new.p_u32[i]); spin_lock_irq(&lut->lock); swap(lut->lut, dlb); spin_unlock_irq(&lut->lock); vsp1_dl_body_put(dlb); return 0; } static int lut_s_ctrl(struct v4l2_ctrl *ctrl) { struct vsp1_lut *lut = container_of(ctrl->handler, struct vsp1_lut, ctrls); switch (ctrl->id) { case V4L2_CID_VSP1_LUT_TABLE: lut_set_table(lut, ctrl); break; } return 0; } static const struct v4l2_ctrl_ops lut_ctrl_ops = { .s_ctrl = lut_s_ctrl, }; static const struct v4l2_ctrl_config lut_table_control = { .ops = &lut_ctrl_ops, .id = V4L2_CID_VSP1_LUT_TABLE, .name = "Look-Up Table", .type = V4L2_CTRL_TYPE_U32, .min = 0x00000000, .max = 0x00ffffff, .step = 1, .def = 0, .dims = { LUT_SIZE }, }; /* ----------------------------------------------------------------------------- * V4L2 Subdevice Pad Operations */ static const unsigned int lut_codes[] = { MEDIA_BUS_FMT_ARGB8888_1X32, MEDIA_BUS_FMT_AHSV8888_1X32, MEDIA_BUS_FMT_AYUV8_1X32, }; static int lut_enum_mbus_code(struct v4l2_subdev *subdev, struct v4l2_subdev_state *sd_state, struct v4l2_subdev_mbus_code_enum *code) { return vsp1_subdev_enum_mbus_code(subdev, sd_state, code, lut_codes, ARRAY_SIZE(lut_codes)); } static int lut_enum_frame_size(struct v4l2_subdev *subdev, struct v4l2_subdev_state *sd_state, struct v4l2_subdev_frame_size_enum *fse) { return vsp1_subdev_enum_frame_size(subdev, sd_state, fse, LUT_MIN_SIZE, LUT_MIN_SIZE, LUT_MAX_SIZE, LUT_MAX_SIZE); } static int lut_set_format(struct v4l2_subdev *subdev, struct v4l2_subdev_state *sd_state, struct v4l2_subdev_format *fmt) { return vsp1_subdev_set_pad_format(subdev, sd_state, fmt, lut_codes, ARRAY_SIZE(lut_codes), LUT_MIN_SIZE, LUT_MIN_SIZE, LUT_MAX_SIZE, LUT_MAX_SIZE); } /* ----------------------------------------------------------------------------- * V4L2 Subdevice Operations */ static const struct v4l2_subdev_pad_ops lut_pad_ops = { .init_cfg = vsp1_entity_init_cfg, .enum_mbus_code = lut_enum_mbus_code, .enum_frame_size = lut_enum_frame_size, .get_fmt = vsp1_subdev_get_pad_format, .set_fmt = lut_set_format, }; static const struct v4l2_subdev_ops lut_ops = { .pad = &lut_pad_ops, }; /* ----------------------------------------------------------------------------- * VSP1 Entity Operations */ static void lut_configure_stream(struct vsp1_entity *entity, struct vsp1_pipeline *pipe, struct vsp1_dl_list *dl, struct vsp1_dl_body *dlb) { struct vsp1_lut *lut = to_lut(&entity->subdev); vsp1_lut_write(lut, dlb, VI6_LUT_CTRL, VI6_LUT_CTRL_EN); } static void lut_configure_frame(struct vsp1_entity *entity, struct vsp1_pipeline *pipe, struct vsp1_dl_list *dl, struct vsp1_dl_body *dlb) { struct vsp1_lut *lut = to_lut(&entity->subdev); struct vsp1_dl_body *lut_dlb; unsigned long flags; spin_lock_irqsave(&lut->lock, flags); lut_dlb = lut->lut; lut->lut = NULL; spin_unlock_irqrestore(&lut->lock, flags); if (lut_dlb) { vsp1_dl_list_add_body(dl, lut_dlb); /* Release our local reference. */ vsp1_dl_body_put(lut_dlb); } } static void lut_destroy(struct vsp1_entity *entity) { struct vsp1_lut *lut = to_lut(&entity->subdev); vsp1_dl_body_pool_destroy(lut->pool); } static const struct vsp1_entity_operations lut_entity_ops = { .configure_stream = lut_configure_stream, .configure_frame = lut_configure_frame, .destroy = lut_destroy, }; /* ----------------------------------------------------------------------------- * Initialization and Cleanup */ struct vsp1_lut *vsp1_lut_create(struct vsp1_device *vsp1) { struct vsp1_lut *lut; int ret; lut = devm_kzalloc(vsp1->dev, sizeof(*lut), GFP_KERNEL); if (lut == NULL) return ERR_PTR(-ENOMEM); spin_lock_init(&lut->lock); lut->entity.ops = &lut_entity_ops; lut->entity.type = VSP1_ENTITY_LUT; ret = vsp1_entity_init(vsp1, &lut->entity, "lut", 2, &lut_ops, MEDIA_ENT_F_PROC_VIDEO_LUT); if (ret < 0) return ERR_PTR(ret); /* * Pre-allocate a body pool, with 3 bodies allowing a userspace update * before the hardware has committed a previous set of tables, handling * both the queued and pending dl entries. */ lut->pool = vsp1_dl_body_pool_create(vsp1, 3, LUT_SIZE, 0); if (!lut->pool) return ERR_PTR(-ENOMEM); /* Initialize the control handler. */ v4l2_ctrl_handler_init(&lut->ctrls, 1); v4l2_ctrl_new_custom(&lut->ctrls, &lut_table_control, NULL); lut->entity.subdev.ctrl_handler = &lut->ctrls; if (lut->ctrls.error) { dev_err(vsp1->dev, "lut: failed to initialize controls\n"); ret = lut->ctrls.error; vsp1_entity_destroy(&lut->entity); return ERR_PTR(ret); } v4l2_ctrl_handler_setup(&lut->ctrls); return lut; }
linux-master
drivers/media/platform/renesas/vsp1/vsp1_lut.c
// SPDX-License-Identifier: GPL-2.0+ /* * vsp1_hsit.c -- R-Car VSP1 Hue Saturation value (Inverse) Transform * * Copyright (C) 2013 Renesas Corporation * * Contact: Laurent Pinchart ([email protected]) */ #include <linux/device.h> #include <linux/gfp.h> #include <media/v4l2-subdev.h> #include "vsp1.h" #include "vsp1_dl.h" #include "vsp1_hsit.h" #define HSIT_MIN_SIZE 4U #define HSIT_MAX_SIZE 8190U /* ----------------------------------------------------------------------------- * Device Access */ static inline void vsp1_hsit_write(struct vsp1_hsit *hsit, struct vsp1_dl_body *dlb, u32 reg, u32 data) { vsp1_dl_body_write(dlb, reg, data); } /* ----------------------------------------------------------------------------- * V4L2 Subdevice Operations */ static int hsit_enum_mbus_code(struct v4l2_subdev *subdev, struct v4l2_subdev_state *sd_state, struct v4l2_subdev_mbus_code_enum *code) { struct vsp1_hsit *hsit = to_hsit(subdev); if (code->index > 0) return -EINVAL; if ((code->pad == HSIT_PAD_SINK && !hsit->inverse) | (code->pad == HSIT_PAD_SOURCE && hsit->inverse)) code->code = MEDIA_BUS_FMT_ARGB8888_1X32; else code->code = MEDIA_BUS_FMT_AHSV8888_1X32; return 0; } static int hsit_enum_frame_size(struct v4l2_subdev *subdev, struct v4l2_subdev_state *sd_state, struct v4l2_subdev_frame_size_enum *fse) { return vsp1_subdev_enum_frame_size(subdev, sd_state, fse, HSIT_MIN_SIZE, HSIT_MIN_SIZE, HSIT_MAX_SIZE, HSIT_MAX_SIZE); } static int hsit_set_format(struct v4l2_subdev *subdev, struct v4l2_subdev_state *sd_state, struct v4l2_subdev_format *fmt) { struct vsp1_hsit *hsit = to_hsit(subdev); struct v4l2_subdev_state *config; struct v4l2_mbus_framefmt *format; int ret = 0; mutex_lock(&hsit->entity.lock); config = vsp1_entity_get_pad_config(&hsit->entity, sd_state, fmt->which); if (!config) { ret = -EINVAL; goto done; } format = vsp1_entity_get_pad_format(&hsit->entity, config, fmt->pad); if (fmt->pad == HSIT_PAD_SOURCE) { /* * The HST and HSI output format code and resolution can't be * modified. */ fmt->format = *format; goto done; } format->code = hsit->inverse ? MEDIA_BUS_FMT_AHSV8888_1X32 : MEDIA_BUS_FMT_ARGB8888_1X32; format->width = clamp_t(unsigned int, fmt->format.width, HSIT_MIN_SIZE, HSIT_MAX_SIZE); format->height = clamp_t(unsigned int, fmt->format.height, HSIT_MIN_SIZE, HSIT_MAX_SIZE); format->field = V4L2_FIELD_NONE; format->colorspace = V4L2_COLORSPACE_SRGB; fmt->format = *format; /* Propagate the format to the source pad. */ format = vsp1_entity_get_pad_format(&hsit->entity, config, HSIT_PAD_SOURCE); *format = fmt->format; format->code = hsit->inverse ? MEDIA_BUS_FMT_ARGB8888_1X32 : MEDIA_BUS_FMT_AHSV8888_1X32; done: mutex_unlock(&hsit->entity.lock); return ret; } static const struct v4l2_subdev_pad_ops hsit_pad_ops = { .init_cfg = vsp1_entity_init_cfg, .enum_mbus_code = hsit_enum_mbus_code, .enum_frame_size = hsit_enum_frame_size, .get_fmt = vsp1_subdev_get_pad_format, .set_fmt = hsit_set_format, }; static const struct v4l2_subdev_ops hsit_ops = { .pad = &hsit_pad_ops, }; /* ----------------------------------------------------------------------------- * VSP1 Entity Operations */ static void hsit_configure_stream(struct vsp1_entity *entity, struct vsp1_pipeline *pipe, struct vsp1_dl_list *dl, struct vsp1_dl_body *dlb) { struct vsp1_hsit *hsit = to_hsit(&entity->subdev); if (hsit->inverse) vsp1_hsit_write(hsit, dlb, VI6_HSI_CTRL, VI6_HSI_CTRL_EN); else vsp1_hsit_write(hsit, dlb, VI6_HST_CTRL, VI6_HST_CTRL_EN); } static const struct vsp1_entity_operations hsit_entity_ops = { .configure_stream = hsit_configure_stream, }; /* ----------------------------------------------------------------------------- * Initialization and Cleanup */ struct vsp1_hsit *vsp1_hsit_create(struct vsp1_device *vsp1, bool inverse) { struct vsp1_hsit *hsit; int ret; hsit = devm_kzalloc(vsp1->dev, sizeof(*hsit), GFP_KERNEL); if (hsit == NULL) return ERR_PTR(-ENOMEM); hsit->inverse = inverse; hsit->entity.ops = &hsit_entity_ops; if (inverse) hsit->entity.type = VSP1_ENTITY_HSI; else hsit->entity.type = VSP1_ENTITY_HST; ret = vsp1_entity_init(vsp1, &hsit->entity, inverse ? "hsi" : "hst", 2, &hsit_ops, MEDIA_ENT_F_PROC_VIDEO_PIXEL_ENC_CONV); if (ret < 0) return ERR_PTR(ret); return hsit; }
linux-master
drivers/media/platform/renesas/vsp1/vsp1_hsit.c
// SPDX-License-Identifier: GPL-2.0+ /* * vsp1_uds.c -- R-Car VSP1 Up and Down Scaler * * Copyright (C) 2013-2014 Renesas Electronics Corporation * * Contact: Laurent Pinchart ([email protected]) */ #include <linux/device.h> #include <linux/gfp.h> #include <media/v4l2-subdev.h> #include "vsp1.h" #include "vsp1_dl.h" #include "vsp1_pipe.h" #include "vsp1_uds.h" #define UDS_MIN_SIZE 4U #define UDS_MAX_SIZE 8190U #define UDS_MIN_FACTOR 0x0100 #define UDS_MAX_FACTOR 0xffff /* ----------------------------------------------------------------------------- * Device Access */ static inline void vsp1_uds_write(struct vsp1_uds *uds, struct vsp1_dl_body *dlb, u32 reg, u32 data) { vsp1_dl_body_write(dlb, reg + uds->entity.index * VI6_UDS_OFFSET, data); } /* ----------------------------------------------------------------------------- * Scaling Computation */ void vsp1_uds_set_alpha(struct vsp1_entity *entity, struct vsp1_dl_body *dlb, unsigned int alpha) { struct vsp1_uds *uds = to_uds(&entity->subdev); vsp1_uds_write(uds, dlb, VI6_UDS_ALPVAL, alpha << VI6_UDS_ALPVAL_VAL0_SHIFT); } /* * uds_output_size - Return the output size for an input size and scaling ratio * @input: input size in pixels * @ratio: scaling ratio in U4.12 fixed-point format */ static unsigned int uds_output_size(unsigned int input, unsigned int ratio) { if (ratio > 4096) { /* Down-scaling */ unsigned int mp; mp = ratio / 4096; mp = mp < 4 ? 1 : (mp < 8 ? 2 : 4); return (input - 1) / mp * mp * 4096 / ratio + 1; } else { /* Up-scaling */ return (input - 1) * 4096 / ratio + 1; } } /* * uds_output_limits - Return the min and max output sizes for an input size * @input: input size in pixels * @minimum: minimum output size (returned) * @maximum: maximum output size (returned) */ static void uds_output_limits(unsigned int input, unsigned int *minimum, unsigned int *maximum) { *minimum = max(uds_output_size(input, UDS_MAX_FACTOR), UDS_MIN_SIZE); *maximum = min(uds_output_size(input, UDS_MIN_FACTOR), UDS_MAX_SIZE); } /* * uds_passband_width - Return the passband filter width for a scaling ratio * @ratio: scaling ratio in U4.12 fixed-point format */ static unsigned int uds_passband_width(unsigned int ratio) { if (ratio >= 4096) { /* Down-scaling */ unsigned int mp; mp = ratio / 4096; mp = mp < 4 ? 1 : (mp < 8 ? 2 : 4); return 64 * 4096 * mp / ratio; } else { /* Up-scaling */ return 64; } } static unsigned int uds_compute_ratio(unsigned int input, unsigned int output) { /* TODO: This is an approximation that will need to be refined. */ return (input - 1) * 4096 / (output - 1); } /* ----------------------------------------------------------------------------- * V4L2 Subdevice Pad Operations */ static int uds_enum_mbus_code(struct v4l2_subdev *subdev, struct v4l2_subdev_state *sd_state, struct v4l2_subdev_mbus_code_enum *code) { static const unsigned int codes[] = { MEDIA_BUS_FMT_ARGB8888_1X32, MEDIA_BUS_FMT_AYUV8_1X32, }; return vsp1_subdev_enum_mbus_code(subdev, sd_state, code, codes, ARRAY_SIZE(codes)); } static int uds_enum_frame_size(struct v4l2_subdev *subdev, struct v4l2_subdev_state *sd_state, struct v4l2_subdev_frame_size_enum *fse) { struct vsp1_uds *uds = to_uds(subdev); struct v4l2_subdev_state *config; struct v4l2_mbus_framefmt *format; int ret = 0; config = vsp1_entity_get_pad_config(&uds->entity, sd_state, fse->which); if (!config) return -EINVAL; format = vsp1_entity_get_pad_format(&uds->entity, config, UDS_PAD_SINK); mutex_lock(&uds->entity.lock); if (fse->index || fse->code != format->code) { ret = -EINVAL; goto done; } if (fse->pad == UDS_PAD_SINK) { fse->min_width = UDS_MIN_SIZE; fse->max_width = UDS_MAX_SIZE; fse->min_height = UDS_MIN_SIZE; fse->max_height = UDS_MAX_SIZE; } else { uds_output_limits(format->width, &fse->min_width, &fse->max_width); uds_output_limits(format->height, &fse->min_height, &fse->max_height); } done: mutex_unlock(&uds->entity.lock); return ret; } static void uds_try_format(struct vsp1_uds *uds, struct v4l2_subdev_state *sd_state, unsigned int pad, struct v4l2_mbus_framefmt *fmt) { struct v4l2_mbus_framefmt *format; unsigned int minimum; unsigned int maximum; switch (pad) { case UDS_PAD_SINK: /* Default to YUV if the requested format is not supported. */ if (fmt->code != MEDIA_BUS_FMT_ARGB8888_1X32 && fmt->code != MEDIA_BUS_FMT_AYUV8_1X32) fmt->code = MEDIA_BUS_FMT_AYUV8_1X32; fmt->width = clamp(fmt->width, UDS_MIN_SIZE, UDS_MAX_SIZE); fmt->height = clamp(fmt->height, UDS_MIN_SIZE, UDS_MAX_SIZE); break; case UDS_PAD_SOURCE: /* The UDS scales but can't perform format conversion. */ format = vsp1_entity_get_pad_format(&uds->entity, sd_state, UDS_PAD_SINK); fmt->code = format->code; uds_output_limits(format->width, &minimum, &maximum); fmt->width = clamp(fmt->width, minimum, maximum); uds_output_limits(format->height, &minimum, &maximum); fmt->height = clamp(fmt->height, minimum, maximum); break; } fmt->field = V4L2_FIELD_NONE; fmt->colorspace = V4L2_COLORSPACE_SRGB; } static int uds_set_format(struct v4l2_subdev *subdev, struct v4l2_subdev_state *sd_state, struct v4l2_subdev_format *fmt) { struct vsp1_uds *uds = to_uds(subdev); struct v4l2_subdev_state *config; struct v4l2_mbus_framefmt *format; int ret = 0; mutex_lock(&uds->entity.lock); config = vsp1_entity_get_pad_config(&uds->entity, sd_state, fmt->which); if (!config) { ret = -EINVAL; goto done; } uds_try_format(uds, config, fmt->pad, &fmt->format); format = vsp1_entity_get_pad_format(&uds->entity, config, fmt->pad); *format = fmt->format; if (fmt->pad == UDS_PAD_SINK) { /* Propagate the format to the source pad. */ format = vsp1_entity_get_pad_format(&uds->entity, config, UDS_PAD_SOURCE); *format = fmt->format; uds_try_format(uds, config, UDS_PAD_SOURCE, format); } done: mutex_unlock(&uds->entity.lock); return ret; } /* ----------------------------------------------------------------------------- * V4L2 Subdevice Operations */ static const struct v4l2_subdev_pad_ops uds_pad_ops = { .init_cfg = vsp1_entity_init_cfg, .enum_mbus_code = uds_enum_mbus_code, .enum_frame_size = uds_enum_frame_size, .get_fmt = vsp1_subdev_get_pad_format, .set_fmt = uds_set_format, }; static const struct v4l2_subdev_ops uds_ops = { .pad = &uds_pad_ops, }; /* ----------------------------------------------------------------------------- * VSP1 Entity Operations */ static void uds_configure_stream(struct vsp1_entity *entity, struct vsp1_pipeline *pipe, struct vsp1_dl_list *dl, struct vsp1_dl_body *dlb) { struct vsp1_uds *uds = to_uds(&entity->subdev); const struct v4l2_mbus_framefmt *output; const struct v4l2_mbus_framefmt *input; unsigned int hscale; unsigned int vscale; bool multitap; input = vsp1_entity_get_pad_format(&uds->entity, uds->entity.config, UDS_PAD_SINK); output = vsp1_entity_get_pad_format(&uds->entity, uds->entity.config, UDS_PAD_SOURCE); hscale = uds_compute_ratio(input->width, output->width); vscale = uds_compute_ratio(input->height, output->height); dev_dbg(uds->entity.vsp1->dev, "hscale %u vscale %u\n", hscale, vscale); /* * Multi-tap scaling can't be enabled along with alpha scaling when * scaling down with a factor lower than or equal to 1/2 in either * direction. */ if (uds->scale_alpha && (hscale >= 8192 || vscale >= 8192)) multitap = false; else multitap = true; vsp1_uds_write(uds, dlb, VI6_UDS_CTRL, (uds->scale_alpha ? VI6_UDS_CTRL_AON : 0) | (multitap ? VI6_UDS_CTRL_BC : 0)); vsp1_uds_write(uds, dlb, VI6_UDS_PASS_BWIDTH, (uds_passband_width(hscale) << VI6_UDS_PASS_BWIDTH_H_SHIFT) | (uds_passband_width(vscale) << VI6_UDS_PASS_BWIDTH_V_SHIFT)); /* Set the scaling ratios. */ vsp1_uds_write(uds, dlb, VI6_UDS_SCALE, (hscale << VI6_UDS_SCALE_HFRAC_SHIFT) | (vscale << VI6_UDS_SCALE_VFRAC_SHIFT)); } static void uds_configure_partition(struct vsp1_entity *entity, struct vsp1_pipeline *pipe, struct vsp1_dl_list *dl, struct vsp1_dl_body *dlb) { struct vsp1_uds *uds = to_uds(&entity->subdev); struct vsp1_partition *partition = pipe->partition; const struct v4l2_mbus_framefmt *output; output = vsp1_entity_get_pad_format(&uds->entity, uds->entity.config, UDS_PAD_SOURCE); /* Input size clipping. */ vsp1_uds_write(uds, dlb, VI6_UDS_HSZCLIP, VI6_UDS_HSZCLIP_HCEN | (0 << VI6_UDS_HSZCLIP_HCL_OFST_SHIFT) | (partition->uds_sink.width << VI6_UDS_HSZCLIP_HCL_SIZE_SHIFT)); /* Output size clipping. */ vsp1_uds_write(uds, dlb, VI6_UDS_CLIP_SIZE, (partition->uds_source.width << VI6_UDS_CLIP_SIZE_HSIZE_SHIFT) | (output->height << VI6_UDS_CLIP_SIZE_VSIZE_SHIFT)); } static unsigned int uds_max_width(struct vsp1_entity *entity, struct vsp1_pipeline *pipe) { struct vsp1_uds *uds = to_uds(&entity->subdev); const struct v4l2_mbus_framefmt *output; const struct v4l2_mbus_framefmt *input; unsigned int hscale; input = vsp1_entity_get_pad_format(&uds->entity, uds->entity.config, UDS_PAD_SINK); output = vsp1_entity_get_pad_format(&uds->entity, uds->entity.config, UDS_PAD_SOURCE); hscale = output->width / input->width; /* * The maximum width of the UDS is 304 pixels. These are input pixels * in the event of up-scaling, and output pixels in the event of * downscaling. * * To support overlapping partition windows we clamp at units of 256 and * the remaining pixels are reserved. */ if (hscale <= 2) return 256; else if (hscale <= 4) return 512; else if (hscale <= 8) return 1024; else return 2048; } /* ----------------------------------------------------------------------------- * Partition Algorithm Support */ static void uds_partition(struct vsp1_entity *entity, struct vsp1_pipeline *pipe, struct vsp1_partition *partition, unsigned int partition_idx, struct vsp1_partition_window *window) { struct vsp1_uds *uds = to_uds(&entity->subdev); const struct v4l2_mbus_framefmt *output; const struct v4l2_mbus_framefmt *input; /* Initialise the partition state. */ partition->uds_sink = *window; partition->uds_source = *window; input = vsp1_entity_get_pad_format(&uds->entity, uds->entity.config, UDS_PAD_SINK); output = vsp1_entity_get_pad_format(&uds->entity, uds->entity.config, UDS_PAD_SOURCE); partition->uds_sink.width = window->width * input->width / output->width; partition->uds_sink.left = window->left * input->width / output->width; *window = partition->uds_sink; } static const struct vsp1_entity_operations uds_entity_ops = { .configure_stream = uds_configure_stream, .configure_partition = uds_configure_partition, .max_width = uds_max_width, .partition = uds_partition, }; /* ----------------------------------------------------------------------------- * Initialization and Cleanup */ struct vsp1_uds *vsp1_uds_create(struct vsp1_device *vsp1, unsigned int index) { struct vsp1_uds *uds; char name[6]; int ret; uds = devm_kzalloc(vsp1->dev, sizeof(*uds), GFP_KERNEL); if (uds == NULL) return ERR_PTR(-ENOMEM); uds->entity.ops = &uds_entity_ops; uds->entity.type = VSP1_ENTITY_UDS; uds->entity.index = index; sprintf(name, "uds.%u", index); ret = vsp1_entity_init(vsp1, &uds->entity, name, 2, &uds_ops, MEDIA_ENT_F_PROC_VIDEO_SCALER); if (ret < 0) return ERR_PTR(ret); return uds; }
linux-master
drivers/media/platform/renesas/vsp1/vsp1_uds.c
// SPDX-License-Identifier: GPL-2.0+ /* * vsp1_hgo.c -- R-Car VSP1 Histogram Generator 1D * * Copyright (C) 2016 Renesas Electronics Corporation * * Contact: Laurent Pinchart ([email protected]) */ #include <linux/device.h> #include <linux/gfp.h> #include <media/v4l2-subdev.h> #include <media/videobuf2-vmalloc.h> #include "vsp1.h" #include "vsp1_dl.h" #include "vsp1_hgo.h" #define HGO_DATA_SIZE ((2 + 256) * 4) /* ----------------------------------------------------------------------------- * Device Access */ static inline u32 vsp1_hgo_read(struct vsp1_hgo *hgo, u32 reg) { return vsp1_read(hgo->histo.entity.vsp1, reg); } static inline void vsp1_hgo_write(struct vsp1_hgo *hgo, struct vsp1_dl_body *dlb, u32 reg, u32 data) { vsp1_dl_body_write(dlb, reg, data); } /* ----------------------------------------------------------------------------- * Frame End Handler */ void vsp1_hgo_frame_end(struct vsp1_entity *entity) { struct vsp1_hgo *hgo = to_hgo(&entity->subdev); struct vsp1_histogram_buffer *buf; unsigned int i; size_t size; u32 *data; buf = vsp1_histogram_buffer_get(&hgo->histo); if (!buf) return; data = buf->addr; if (hgo->num_bins == 256) { *data++ = vsp1_hgo_read(hgo, VI6_HGO_G_MAXMIN); *data++ = vsp1_hgo_read(hgo, VI6_HGO_G_SUM); for (i = 0; i < 256; ++i) { vsp1_write(hgo->histo.entity.vsp1, VI6_HGO_EXT_HIST_ADDR, i); *data++ = vsp1_hgo_read(hgo, VI6_HGO_EXT_HIST_DATA); } size = (2 + 256) * sizeof(u32); } else if (hgo->max_rgb) { *data++ = vsp1_hgo_read(hgo, VI6_HGO_G_MAXMIN); *data++ = vsp1_hgo_read(hgo, VI6_HGO_G_SUM); for (i = 0; i < 64; ++i) *data++ = vsp1_hgo_read(hgo, VI6_HGO_G_HISTO(i)); size = (2 + 64) * sizeof(u32); } else { *data++ = vsp1_hgo_read(hgo, VI6_HGO_R_MAXMIN); *data++ = vsp1_hgo_read(hgo, VI6_HGO_G_MAXMIN); *data++ = vsp1_hgo_read(hgo, VI6_HGO_B_MAXMIN); *data++ = vsp1_hgo_read(hgo, VI6_HGO_R_SUM); *data++ = vsp1_hgo_read(hgo, VI6_HGO_G_SUM); *data++ = vsp1_hgo_read(hgo, VI6_HGO_B_SUM); for (i = 0; i < 64; ++i) { data[i] = vsp1_hgo_read(hgo, VI6_HGO_R_HISTO(i)); data[i+64] = vsp1_hgo_read(hgo, VI6_HGO_G_HISTO(i)); data[i+128] = vsp1_hgo_read(hgo, VI6_HGO_B_HISTO(i)); } size = (6 + 64 * 3) * sizeof(u32); } vsp1_histogram_buffer_complete(&hgo->histo, buf, size); } /* ----------------------------------------------------------------------------- * Controls */ #define V4L2_CID_VSP1_HGO_MAX_RGB (V4L2_CID_USER_BASE | 0x1001) #define V4L2_CID_VSP1_HGO_NUM_BINS (V4L2_CID_USER_BASE | 0x1002) static const struct v4l2_ctrl_config hgo_max_rgb_control = { .id = V4L2_CID_VSP1_HGO_MAX_RGB, .name = "Maximum RGB Mode", .type = V4L2_CTRL_TYPE_BOOLEAN, .min = 0, .max = 1, .def = 0, .step = 1, .flags = V4L2_CTRL_FLAG_MODIFY_LAYOUT, }; static const s64 hgo_num_bins[] = { 64, 256, }; static const struct v4l2_ctrl_config hgo_num_bins_control = { .id = V4L2_CID_VSP1_HGO_NUM_BINS, .name = "Number of Bins", .type = V4L2_CTRL_TYPE_INTEGER_MENU, .min = 0, .max = 1, .def = 0, .qmenu_int = hgo_num_bins, .flags = V4L2_CTRL_FLAG_MODIFY_LAYOUT, }; /* ----------------------------------------------------------------------------- * VSP1 Entity Operations */ static void hgo_configure_stream(struct vsp1_entity *entity, struct vsp1_pipeline *pipe, struct vsp1_dl_list *dl, struct vsp1_dl_body *dlb) { struct vsp1_hgo *hgo = to_hgo(&entity->subdev); struct v4l2_rect *compose; struct v4l2_rect *crop; unsigned int hratio; unsigned int vratio; crop = vsp1_entity_get_pad_selection(entity, entity->config, HISTO_PAD_SINK, V4L2_SEL_TGT_CROP); compose = vsp1_entity_get_pad_selection(entity, entity->config, HISTO_PAD_SINK, V4L2_SEL_TGT_COMPOSE); vsp1_hgo_write(hgo, dlb, VI6_HGO_REGRST, VI6_HGO_REGRST_RCLEA); vsp1_hgo_write(hgo, dlb, VI6_HGO_OFFSET, (crop->left << VI6_HGO_OFFSET_HOFFSET_SHIFT) | (crop->top << VI6_HGO_OFFSET_VOFFSET_SHIFT)); vsp1_hgo_write(hgo, dlb, VI6_HGO_SIZE, (crop->width << VI6_HGO_SIZE_HSIZE_SHIFT) | (crop->height << VI6_HGO_SIZE_VSIZE_SHIFT)); mutex_lock(hgo->ctrls.handler.lock); hgo->max_rgb = hgo->ctrls.max_rgb->cur.val; if (hgo->ctrls.num_bins) hgo->num_bins = hgo_num_bins[hgo->ctrls.num_bins->cur.val]; mutex_unlock(hgo->ctrls.handler.lock); hratio = crop->width * 2 / compose->width / 3; vratio = crop->height * 2 / compose->height / 3; vsp1_hgo_write(hgo, dlb, VI6_HGO_MODE, (hgo->num_bins == 256 ? VI6_HGO_MODE_STEP : 0) | (hgo->max_rgb ? VI6_HGO_MODE_MAXRGB : 0) | (hratio << VI6_HGO_MODE_HRATIO_SHIFT) | (vratio << VI6_HGO_MODE_VRATIO_SHIFT)); } static const struct vsp1_entity_operations hgo_entity_ops = { .configure_stream = hgo_configure_stream, .destroy = vsp1_histogram_destroy, }; /* ----------------------------------------------------------------------------- * Initialization and Cleanup */ static const unsigned int hgo_mbus_formats[] = { MEDIA_BUS_FMT_AYUV8_1X32, MEDIA_BUS_FMT_ARGB8888_1X32, MEDIA_BUS_FMT_AHSV8888_1X32, }; struct vsp1_hgo *vsp1_hgo_create(struct vsp1_device *vsp1) { struct vsp1_hgo *hgo; int ret; hgo = devm_kzalloc(vsp1->dev, sizeof(*hgo), GFP_KERNEL); if (hgo == NULL) return ERR_PTR(-ENOMEM); /* Initialize the control handler. */ v4l2_ctrl_handler_init(&hgo->ctrls.handler, vsp1->info->gen >= 3 ? 2 : 1); hgo->ctrls.max_rgb = v4l2_ctrl_new_custom(&hgo->ctrls.handler, &hgo_max_rgb_control, NULL); if (vsp1->info->gen >= 3) hgo->ctrls.num_bins = v4l2_ctrl_new_custom(&hgo->ctrls.handler, &hgo_num_bins_control, NULL); hgo->max_rgb = false; hgo->num_bins = 64; hgo->histo.entity.subdev.ctrl_handler = &hgo->ctrls.handler; /* Initialize the video device and queue for statistics data. */ ret = vsp1_histogram_init(vsp1, &hgo->histo, VSP1_ENTITY_HGO, "hgo", &hgo_entity_ops, hgo_mbus_formats, ARRAY_SIZE(hgo_mbus_formats), HGO_DATA_SIZE, V4L2_META_FMT_VSP1_HGO); if (ret < 0) { vsp1_entity_destroy(&hgo->histo.entity); return ERR_PTR(ret); } return hgo; }
linux-master
drivers/media/platform/renesas/vsp1/vsp1_hgo.c
// SPDX-License-Identifier: GPL-2.0+ /* * vsp1_clu.c -- R-Car VSP1 Cubic Look-Up Table * * Copyright (C) 2015-2016 Renesas Electronics Corporation * * Contact: Laurent Pinchart ([email protected]) */ #include <linux/device.h> #include <linux/slab.h> #include <media/v4l2-subdev.h> #include "vsp1.h" #include "vsp1_clu.h" #include "vsp1_dl.h" #define CLU_MIN_SIZE 4U #define CLU_MAX_SIZE 8190U #define CLU_SIZE (17 * 17 * 17) /* ----------------------------------------------------------------------------- * Device Access */ static inline void vsp1_clu_write(struct vsp1_clu *clu, struct vsp1_dl_body *dlb, u32 reg, u32 data) { vsp1_dl_body_write(dlb, reg, data); } /* ----------------------------------------------------------------------------- * Controls */ #define V4L2_CID_VSP1_CLU_TABLE (V4L2_CID_USER_BASE | 0x1001) #define V4L2_CID_VSP1_CLU_MODE (V4L2_CID_USER_BASE | 0x1002) #define V4L2_CID_VSP1_CLU_MODE_2D 0 #define V4L2_CID_VSP1_CLU_MODE_3D 1 static int clu_set_table(struct vsp1_clu *clu, struct v4l2_ctrl *ctrl) { struct vsp1_dl_body *dlb; unsigned int i; dlb = vsp1_dl_body_get(clu->pool); if (!dlb) return -ENOMEM; vsp1_dl_body_write(dlb, VI6_CLU_ADDR, 0); for (i = 0; i < CLU_SIZE; ++i) vsp1_dl_body_write(dlb, VI6_CLU_DATA, ctrl->p_new.p_u32[i]); spin_lock_irq(&clu->lock); swap(clu->clu, dlb); spin_unlock_irq(&clu->lock); vsp1_dl_body_put(dlb); return 0; } static int clu_s_ctrl(struct v4l2_ctrl *ctrl) { struct vsp1_clu *clu = container_of(ctrl->handler, struct vsp1_clu, ctrls); switch (ctrl->id) { case V4L2_CID_VSP1_CLU_TABLE: clu_set_table(clu, ctrl); break; case V4L2_CID_VSP1_CLU_MODE: clu->mode = ctrl->val; break; } return 0; } static const struct v4l2_ctrl_ops clu_ctrl_ops = { .s_ctrl = clu_s_ctrl, }; static const struct v4l2_ctrl_config clu_table_control = { .ops = &clu_ctrl_ops, .id = V4L2_CID_VSP1_CLU_TABLE, .name = "Look-Up Table", .type = V4L2_CTRL_TYPE_U32, .min = 0x00000000, .max = 0x00ffffff, .step = 1, .def = 0, .dims = { 17, 17, 17 }, }; static const char * const clu_mode_menu[] = { "2D", "3D", NULL, }; static const struct v4l2_ctrl_config clu_mode_control = { .ops = &clu_ctrl_ops, .id = V4L2_CID_VSP1_CLU_MODE, .name = "Mode", .type = V4L2_CTRL_TYPE_MENU, .min = 0, .max = 1, .def = 1, .qmenu = clu_mode_menu, }; /* ----------------------------------------------------------------------------- * V4L2 Subdevice Pad Operations */ static const unsigned int clu_codes[] = { MEDIA_BUS_FMT_ARGB8888_1X32, MEDIA_BUS_FMT_AHSV8888_1X32, MEDIA_BUS_FMT_AYUV8_1X32, }; static int clu_enum_mbus_code(struct v4l2_subdev *subdev, struct v4l2_subdev_state *sd_state, struct v4l2_subdev_mbus_code_enum *code) { return vsp1_subdev_enum_mbus_code(subdev, sd_state, code, clu_codes, ARRAY_SIZE(clu_codes)); } static int clu_enum_frame_size(struct v4l2_subdev *subdev, struct v4l2_subdev_state *sd_state, struct v4l2_subdev_frame_size_enum *fse) { return vsp1_subdev_enum_frame_size(subdev, sd_state, fse, CLU_MIN_SIZE, CLU_MIN_SIZE, CLU_MAX_SIZE, CLU_MAX_SIZE); } static int clu_set_format(struct v4l2_subdev *subdev, struct v4l2_subdev_state *sd_state, struct v4l2_subdev_format *fmt) { return vsp1_subdev_set_pad_format(subdev, sd_state, fmt, clu_codes, ARRAY_SIZE(clu_codes), CLU_MIN_SIZE, CLU_MIN_SIZE, CLU_MAX_SIZE, CLU_MAX_SIZE); } /* ----------------------------------------------------------------------------- * V4L2 Subdevice Operations */ static const struct v4l2_subdev_pad_ops clu_pad_ops = { .init_cfg = vsp1_entity_init_cfg, .enum_mbus_code = clu_enum_mbus_code, .enum_frame_size = clu_enum_frame_size, .get_fmt = vsp1_subdev_get_pad_format, .set_fmt = clu_set_format, }; static const struct v4l2_subdev_ops clu_ops = { .pad = &clu_pad_ops, }; /* ----------------------------------------------------------------------------- * VSP1 Entity Operations */ static void clu_configure_stream(struct vsp1_entity *entity, struct vsp1_pipeline *pipe, struct vsp1_dl_list *dl, struct vsp1_dl_body *dlb) { struct vsp1_clu *clu = to_clu(&entity->subdev); struct v4l2_mbus_framefmt *format; /* * The yuv_mode can't be changed during streaming. Cache it internally * for future runtime configuration calls. */ format = vsp1_entity_get_pad_format(&clu->entity, clu->entity.config, CLU_PAD_SINK); clu->yuv_mode = format->code == MEDIA_BUS_FMT_AYUV8_1X32; } static void clu_configure_frame(struct vsp1_entity *entity, struct vsp1_pipeline *pipe, struct vsp1_dl_list *dl, struct vsp1_dl_body *dlb) { struct vsp1_clu *clu = to_clu(&entity->subdev); struct vsp1_dl_body *clu_dlb; unsigned long flags; u32 ctrl = VI6_CLU_CTRL_AAI | VI6_CLU_CTRL_MVS | VI6_CLU_CTRL_EN; /* 2D mode can only be used with the YCbCr pixel encoding. */ if (clu->mode == V4L2_CID_VSP1_CLU_MODE_2D && clu->yuv_mode) ctrl |= VI6_CLU_CTRL_AX1I_2D | VI6_CLU_CTRL_AX2I_2D | VI6_CLU_CTRL_OS0_2D | VI6_CLU_CTRL_OS1_2D | VI6_CLU_CTRL_OS2_2D | VI6_CLU_CTRL_M2D; vsp1_clu_write(clu, dlb, VI6_CLU_CTRL, ctrl); spin_lock_irqsave(&clu->lock, flags); clu_dlb = clu->clu; clu->clu = NULL; spin_unlock_irqrestore(&clu->lock, flags); if (clu_dlb) { vsp1_dl_list_add_body(dl, clu_dlb); /* Release our local reference. */ vsp1_dl_body_put(clu_dlb); } } static void clu_destroy(struct vsp1_entity *entity) { struct vsp1_clu *clu = to_clu(&entity->subdev); vsp1_dl_body_pool_destroy(clu->pool); } static const struct vsp1_entity_operations clu_entity_ops = { .configure_stream = clu_configure_stream, .configure_frame = clu_configure_frame, .destroy = clu_destroy, }; /* ----------------------------------------------------------------------------- * Initialization and Cleanup */ struct vsp1_clu *vsp1_clu_create(struct vsp1_device *vsp1) { struct vsp1_clu *clu; int ret; clu = devm_kzalloc(vsp1->dev, sizeof(*clu), GFP_KERNEL); if (clu == NULL) return ERR_PTR(-ENOMEM); spin_lock_init(&clu->lock); clu->entity.ops = &clu_entity_ops; clu->entity.type = VSP1_ENTITY_CLU; ret = vsp1_entity_init(vsp1, &clu->entity, "clu", 2, &clu_ops, MEDIA_ENT_F_PROC_VIDEO_LUT); if (ret < 0) return ERR_PTR(ret); /* * Pre-allocate a body pool, with 3 bodies allowing a userspace update * before the hardware has committed a previous set of tables, handling * both the queued and pending dl entries. One extra entry is added to * the CLU_SIZE to allow for the VI6_CLU_ADDR header. */ clu->pool = vsp1_dl_body_pool_create(clu->entity.vsp1, 3, CLU_SIZE + 1, 0); if (!clu->pool) return ERR_PTR(-ENOMEM); /* Initialize the control handler. */ v4l2_ctrl_handler_init(&clu->ctrls, 2); v4l2_ctrl_new_custom(&clu->ctrls, &clu_table_control, NULL); v4l2_ctrl_new_custom(&clu->ctrls, &clu_mode_control, NULL); clu->entity.subdev.ctrl_handler = &clu->ctrls; if (clu->ctrls.error) { dev_err(vsp1->dev, "clu: failed to initialize controls\n"); ret = clu->ctrls.error; vsp1_entity_destroy(&clu->entity); return ERR_PTR(ret); } v4l2_ctrl_handler_setup(&clu->ctrls); return clu; }
linux-master
drivers/media/platform/renesas/vsp1/vsp1_clu.c
// SPDX-License-Identifier: GPL-2.0+ /* * vsp1_dl.c -- R-Car VSP1 Display List * * Copyright (C) 2015 Renesas Corporation * * Contact: Laurent Pinchart ([email protected]) */ #include <linux/device.h> #include <linux/dma-mapping.h> #include <linux/gfp.h> #include <linux/refcount.h> #include <linux/slab.h> #include <linux/workqueue.h> #include "vsp1.h" #include "vsp1_dl.h" #define VSP1_DL_NUM_ENTRIES 256 #define VSP1_DLH_INT_ENABLE (1 << 1) #define VSP1_DLH_AUTO_START (1 << 0) #define VSP1_DLH_EXT_PRE_CMD_EXEC (1 << 9) #define VSP1_DLH_EXT_POST_CMD_EXEC (1 << 8) struct vsp1_dl_header_list { u32 num_bytes; u32 addr; } __packed; struct vsp1_dl_header { u32 num_lists; struct vsp1_dl_header_list lists[8]; u32 next_header; u32 flags; } __packed; /** * struct vsp1_dl_ext_header - Extended display list header * @padding: padding zero bytes for alignment * @pre_ext_dl_num_cmd: number of pre-extended command bodies to parse * @flags: enables or disables execution of the pre and post command * @pre_ext_dl_plist: start address of pre-extended display list bodies * @post_ext_dl_num_cmd: number of post-extended command bodies to parse * @post_ext_dl_plist: start address of post-extended display list bodies */ struct vsp1_dl_ext_header { u32 padding; /* * The datasheet represents flags as stored before pre_ext_dl_num_cmd, * expecting 32-bit accesses. The flags are appropriate to the whole * header, not just the pre_ext command, and thus warrant being * separated out. Due to byte ordering, and representing as 16 bit * values here, the flags must be positioned after the * pre_ext_dl_num_cmd. */ u16 pre_ext_dl_num_cmd; u16 flags; u32 pre_ext_dl_plist; u32 post_ext_dl_num_cmd; u32 post_ext_dl_plist; } __packed; struct vsp1_dl_header_extended { struct vsp1_dl_header header; struct vsp1_dl_ext_header ext; } __packed; struct vsp1_dl_entry { u32 addr; u32 data; } __packed; /** * struct vsp1_pre_ext_dl_body - Pre Extended Display List Body * @opcode: Extended display list command operation code * @flags: Pre-extended command flags. These are specific to each command * @address_set: Source address set pointer. Must have 16-byte alignment * @reserved: Zero bits for alignment. */ struct vsp1_pre_ext_dl_body { u32 opcode; u32 flags; u32 address_set; u32 reserved; } __packed; /** * struct vsp1_dl_body - Display list body * @list: entry in the display list list of bodies * @free: entry in the pool free body list * @refcnt: reference tracking for the body * @pool: pool to which this body belongs * @entries: array of entries * @dma: DMA address of the entries * @size: size of the DMA memory in bytes * @num_entries: number of stored entries * @max_entries: number of entries available */ struct vsp1_dl_body { struct list_head list; struct list_head free; refcount_t refcnt; struct vsp1_dl_body_pool *pool; struct vsp1_dl_entry *entries; dma_addr_t dma; size_t size; unsigned int num_entries; unsigned int max_entries; }; /** * struct vsp1_dl_body_pool - display list body pool * @dma: DMA address of the entries * @size: size of the full DMA memory pool in bytes * @mem: CPU memory pointer for the pool * @bodies: Array of DLB structures for the pool * @free: List of free DLB entries * @lock: Protects the free list * @vsp1: the VSP1 device */ struct vsp1_dl_body_pool { /* DMA allocation */ dma_addr_t dma; size_t size; void *mem; /* Body management */ struct vsp1_dl_body *bodies; struct list_head free; spinlock_t lock; struct vsp1_device *vsp1; }; /** * struct vsp1_dl_cmd_pool - Display List commands pool * @dma: DMA address of the entries * @size: size of the full DMA memory pool in bytes * @mem: CPU memory pointer for the pool * @cmds: Array of command structures for the pool * @free: Free pool entries * @lock: Protects the free list * @vsp1: the VSP1 device */ struct vsp1_dl_cmd_pool { /* DMA allocation */ dma_addr_t dma; size_t size; void *mem; struct vsp1_dl_ext_cmd *cmds; struct list_head free; spinlock_t lock; struct vsp1_device *vsp1; }; /** * struct vsp1_dl_list - Display list * @list: entry in the display list manager lists * @dlm: the display list manager * @header: display list header * @extension: extended display list header. NULL for normal lists * @dma: DMA address for the header * @body0: first display list body * @bodies: list of extra display list bodies * @pre_cmd: pre command to be issued through extended dl header * @post_cmd: post command to be issued through extended dl header * @has_chain: if true, indicates that there's a partition chain * @chain: entry in the display list partition chain * @flags: display list flags, a combination of VSP1_DL_FRAME_END_* */ struct vsp1_dl_list { struct list_head list; struct vsp1_dl_manager *dlm; struct vsp1_dl_header *header; struct vsp1_dl_ext_header *extension; dma_addr_t dma; struct vsp1_dl_body *body0; struct list_head bodies; struct vsp1_dl_ext_cmd *pre_cmd; struct vsp1_dl_ext_cmd *post_cmd; bool has_chain; struct list_head chain; unsigned int flags; }; /** * struct vsp1_dl_manager - Display List manager * @index: index of the related WPF * @singleshot: execute the display list in single-shot mode * @vsp1: the VSP1 device * @lock: protects the free, active, queued, and pending lists * @free: array of all free display lists * @active: list currently being processed (loaded) by hardware * @queued: list queued to the hardware (written to the DL registers) * @pending: list waiting to be queued to the hardware * @pool: body pool for the display list bodies * @cmdpool: commands pool for extended display list */ struct vsp1_dl_manager { unsigned int index; bool singleshot; struct vsp1_device *vsp1; spinlock_t lock; struct list_head free; struct vsp1_dl_list *active; struct vsp1_dl_list *queued; struct vsp1_dl_list *pending; struct vsp1_dl_body_pool *pool; struct vsp1_dl_cmd_pool *cmdpool; }; /* ----------------------------------------------------------------------------- * Display List Body Management */ /** * vsp1_dl_body_pool_create - Create a pool of bodies from a single allocation * @vsp1: The VSP1 device * @num_bodies: The number of bodies to allocate * @num_entries: The maximum number of entries that a body can contain * @extra_size: Extra allocation provided for the bodies * * Allocate a pool of display list bodies each with enough memory to contain the * requested number of entries plus the @extra_size. * * Return a pointer to a pool on success or NULL if memory can't be allocated. */ struct vsp1_dl_body_pool * vsp1_dl_body_pool_create(struct vsp1_device *vsp1, unsigned int num_bodies, unsigned int num_entries, size_t extra_size) { struct vsp1_dl_body_pool *pool; size_t dlb_size; unsigned int i; pool = kzalloc(sizeof(*pool), GFP_KERNEL); if (!pool) return NULL; pool->vsp1 = vsp1; /* * TODO: 'extra_size' is only used by vsp1_dlm_create(), to allocate * extra memory for the display list header. We need only one header per * display list, not per display list body, thus this allocation is * extraneous and should be reworked in the future. */ dlb_size = num_entries * sizeof(struct vsp1_dl_entry) + extra_size; pool->size = dlb_size * num_bodies; pool->bodies = kcalloc(num_bodies, sizeof(*pool->bodies), GFP_KERNEL); if (!pool->bodies) { kfree(pool); return NULL; } pool->mem = dma_alloc_wc(vsp1->bus_master, pool->size, &pool->dma, GFP_KERNEL); if (!pool->mem) { kfree(pool->bodies); kfree(pool); return NULL; } spin_lock_init(&pool->lock); INIT_LIST_HEAD(&pool->free); for (i = 0; i < num_bodies; ++i) { struct vsp1_dl_body *dlb = &pool->bodies[i]; dlb->pool = pool; dlb->max_entries = num_entries; dlb->dma = pool->dma + i * dlb_size; dlb->entries = pool->mem + i * dlb_size; list_add_tail(&dlb->free, &pool->free); } return pool; } /** * vsp1_dl_body_pool_destroy - Release a body pool * @pool: The body pool * * Release all components of a pool allocation. */ void vsp1_dl_body_pool_destroy(struct vsp1_dl_body_pool *pool) { if (!pool) return; if (pool->mem) dma_free_wc(pool->vsp1->bus_master, pool->size, pool->mem, pool->dma); kfree(pool->bodies); kfree(pool); } /** * vsp1_dl_body_get - Obtain a body from a pool * @pool: The body pool * * Obtain a body from the pool without blocking. * * Returns a display list body or NULL if there are none available. */ struct vsp1_dl_body *vsp1_dl_body_get(struct vsp1_dl_body_pool *pool) { struct vsp1_dl_body *dlb = NULL; unsigned long flags; spin_lock_irqsave(&pool->lock, flags); if (!list_empty(&pool->free)) { dlb = list_first_entry(&pool->free, struct vsp1_dl_body, free); list_del(&dlb->free); refcount_set(&dlb->refcnt, 1); } spin_unlock_irqrestore(&pool->lock, flags); return dlb; } /** * vsp1_dl_body_put - Return a body back to its pool * @dlb: The display list body * * Return a body back to the pool, and reset the num_entries to clear the list. */ void vsp1_dl_body_put(struct vsp1_dl_body *dlb) { unsigned long flags; if (!dlb) return; if (!refcount_dec_and_test(&dlb->refcnt)) return; dlb->num_entries = 0; spin_lock_irqsave(&dlb->pool->lock, flags); list_add_tail(&dlb->free, &dlb->pool->free); spin_unlock_irqrestore(&dlb->pool->lock, flags); } /** * vsp1_dl_body_write - Write a register to a display list body * @dlb: The body * @reg: The register address * @data: The register value * * Write the given register and value to the display list body. The maximum * number of entries that can be written in a body is specified when the body is * allocated by vsp1_dl_body_alloc(). */ void vsp1_dl_body_write(struct vsp1_dl_body *dlb, u32 reg, u32 data) { if (WARN_ONCE(dlb->num_entries >= dlb->max_entries, "DLB size exceeded (max %u)", dlb->max_entries)) return; dlb->entries[dlb->num_entries].addr = reg; dlb->entries[dlb->num_entries].data = data; dlb->num_entries++; } /* ----------------------------------------------------------------------------- * Display List Extended Command Management */ enum vsp1_extcmd_type { VSP1_EXTCMD_AUTODISP, VSP1_EXTCMD_AUTOFLD, }; struct vsp1_extended_command_info { u16 opcode; size_t body_size; }; static const struct vsp1_extended_command_info vsp1_extended_commands[] = { [VSP1_EXTCMD_AUTODISP] = { 0x02, 96 }, [VSP1_EXTCMD_AUTOFLD] = { 0x03, 160 }, }; /** * vsp1_dl_cmd_pool_create - Create a pool of commands from a single allocation * @vsp1: The VSP1 device * @type: The command pool type * @num_cmds: The number of commands to allocate * * Allocate a pool of commands each with enough memory to contain the private * data of each command. The allocation sizes are dependent upon the command * type. * * Return a pointer to the pool on success or NULL if memory can't be allocated. */ static struct vsp1_dl_cmd_pool * vsp1_dl_cmd_pool_create(struct vsp1_device *vsp1, enum vsp1_extcmd_type type, unsigned int num_cmds) { struct vsp1_dl_cmd_pool *pool; unsigned int i; size_t cmd_size; pool = kzalloc(sizeof(*pool), GFP_KERNEL); if (!pool) return NULL; pool->vsp1 = vsp1; spin_lock_init(&pool->lock); INIT_LIST_HEAD(&pool->free); pool->cmds = kcalloc(num_cmds, sizeof(*pool->cmds), GFP_KERNEL); if (!pool->cmds) { kfree(pool); return NULL; } cmd_size = sizeof(struct vsp1_pre_ext_dl_body) + vsp1_extended_commands[type].body_size; cmd_size = ALIGN(cmd_size, 16); pool->size = cmd_size * num_cmds; pool->mem = dma_alloc_wc(vsp1->bus_master, pool->size, &pool->dma, GFP_KERNEL); if (!pool->mem) { kfree(pool->cmds); kfree(pool); return NULL; } for (i = 0; i < num_cmds; ++i) { struct vsp1_dl_ext_cmd *cmd = &pool->cmds[i]; size_t cmd_offset = i * cmd_size; /* data_offset must be 16 byte aligned for DMA. */ size_t data_offset = sizeof(struct vsp1_pre_ext_dl_body) + cmd_offset; cmd->pool = pool; cmd->opcode = vsp1_extended_commands[type].opcode; /* * TODO: Auto-disp can utilise more than one extended body * command per cmd. */ cmd->num_cmds = 1; cmd->cmds = pool->mem + cmd_offset; cmd->cmd_dma = pool->dma + cmd_offset; cmd->data = pool->mem + data_offset; cmd->data_dma = pool->dma + data_offset; list_add_tail(&cmd->free, &pool->free); } return pool; } static struct vsp1_dl_ext_cmd *vsp1_dl_ext_cmd_get(struct vsp1_dl_cmd_pool *pool) { struct vsp1_dl_ext_cmd *cmd = NULL; unsigned long flags; spin_lock_irqsave(&pool->lock, flags); if (!list_empty(&pool->free)) { cmd = list_first_entry(&pool->free, struct vsp1_dl_ext_cmd, free); list_del(&cmd->free); } spin_unlock_irqrestore(&pool->lock, flags); return cmd; } static void vsp1_dl_ext_cmd_put(struct vsp1_dl_ext_cmd *cmd) { unsigned long flags; if (!cmd) return; /* Reset flags, these mark data usage. */ cmd->flags = 0; spin_lock_irqsave(&cmd->pool->lock, flags); list_add_tail(&cmd->free, &cmd->pool->free); spin_unlock_irqrestore(&cmd->pool->lock, flags); } static void vsp1_dl_ext_cmd_pool_destroy(struct vsp1_dl_cmd_pool *pool) { if (!pool) return; if (pool->mem) dma_free_wc(pool->vsp1->bus_master, pool->size, pool->mem, pool->dma); kfree(pool->cmds); kfree(pool); } struct vsp1_dl_ext_cmd *vsp1_dl_get_pre_cmd(struct vsp1_dl_list *dl) { struct vsp1_dl_manager *dlm = dl->dlm; if (dl->pre_cmd) return dl->pre_cmd; dl->pre_cmd = vsp1_dl_ext_cmd_get(dlm->cmdpool); return dl->pre_cmd; } /* ---------------------------------------------------------------------------- * Display List Transaction Management */ static struct vsp1_dl_list *vsp1_dl_list_alloc(struct vsp1_dl_manager *dlm) { struct vsp1_dl_list *dl; size_t header_offset; dl = kzalloc(sizeof(*dl), GFP_KERNEL); if (!dl) return NULL; INIT_LIST_HEAD(&dl->bodies); dl->dlm = dlm; /* Get a default body for our list. */ dl->body0 = vsp1_dl_body_get(dlm->pool); if (!dl->body0) { kfree(dl); return NULL; } header_offset = dl->body0->max_entries * sizeof(*dl->body0->entries); dl->header = ((void *)dl->body0->entries) + header_offset; dl->dma = dl->body0->dma + header_offset; memset(dl->header, 0, sizeof(*dl->header)); dl->header->lists[0].addr = dl->body0->dma; return dl; } static void vsp1_dl_list_bodies_put(struct vsp1_dl_list *dl) { struct vsp1_dl_body *dlb, *tmp; list_for_each_entry_safe(dlb, tmp, &dl->bodies, list) { list_del(&dlb->list); vsp1_dl_body_put(dlb); } } static void vsp1_dl_list_free(struct vsp1_dl_list *dl) { vsp1_dl_body_put(dl->body0); vsp1_dl_list_bodies_put(dl); kfree(dl); } /** * vsp1_dl_list_get - Get a free display list * @dlm: The display list manager * * Get a display list from the pool of free lists and return it. * * This function must be called without the display list manager lock held. */ struct vsp1_dl_list *vsp1_dl_list_get(struct vsp1_dl_manager *dlm) { struct vsp1_dl_list *dl = NULL; unsigned long flags; spin_lock_irqsave(&dlm->lock, flags); if (!list_empty(&dlm->free)) { dl = list_first_entry(&dlm->free, struct vsp1_dl_list, list); list_del(&dl->list); /* * The display list chain must be initialised to ensure every * display list can assert list_empty() if it is not in a chain. */ INIT_LIST_HEAD(&dl->chain); } spin_unlock_irqrestore(&dlm->lock, flags); return dl; } /* This function must be called with the display list manager lock held.*/ static void __vsp1_dl_list_put(struct vsp1_dl_list *dl) { struct vsp1_dl_list *dl_next; if (!dl) return; /* * Release any linked display-lists which were chained for a single * hardware operation. */ if (dl->has_chain) { list_for_each_entry(dl_next, &dl->chain, chain) __vsp1_dl_list_put(dl_next); } dl->has_chain = false; vsp1_dl_list_bodies_put(dl); vsp1_dl_ext_cmd_put(dl->pre_cmd); vsp1_dl_ext_cmd_put(dl->post_cmd); dl->pre_cmd = NULL; dl->post_cmd = NULL; /* * body0 is reused as as an optimisation as presently every display list * has at least one body, thus we reinitialise the entries list. */ dl->body0->num_entries = 0; list_add_tail(&dl->list, &dl->dlm->free); } /** * vsp1_dl_list_put - Release a display list * @dl: The display list * * Release the display list and return it to the pool of free lists. * * Passing a NULL pointer to this function is safe, in that case no operation * will be performed. */ void vsp1_dl_list_put(struct vsp1_dl_list *dl) { unsigned long flags; if (!dl) return; spin_lock_irqsave(&dl->dlm->lock, flags); __vsp1_dl_list_put(dl); spin_unlock_irqrestore(&dl->dlm->lock, flags); } /** * vsp1_dl_list_get_body0 - Obtain the default body for the display list * @dl: The display list * * Obtain a pointer to the internal display list body allowing this to be passed * directly to configure operations. */ struct vsp1_dl_body *vsp1_dl_list_get_body0(struct vsp1_dl_list *dl) { return dl->body0; } /** * vsp1_dl_list_add_body - Add a body to the display list * @dl: The display list * @dlb: The body * * Add a display list body to a display list. Registers contained in bodies are * processed after registers contained in the main display list, in the order in * which bodies are added. * * Adding a body to a display list passes ownership of the body to the list. The * caller retains its reference to the body when adding it to the display list, * but is not allowed to add new entries to the body. * * The reference must be explicitly released by a call to vsp1_dl_body_put() * when the body isn't needed anymore. */ int vsp1_dl_list_add_body(struct vsp1_dl_list *dl, struct vsp1_dl_body *dlb) { refcount_inc(&dlb->refcnt); list_add_tail(&dlb->list, &dl->bodies); return 0; } /** * vsp1_dl_list_add_chain - Add a display list to a chain * @head: The head display list * @dl: The new display list * * Add a display list to an existing display list chain. The chained lists * will be automatically processed by the hardware without intervention from * the CPU. A display list end interrupt will only complete after the last * display list in the chain has completed processing. * * Adding a display list to a chain passes ownership of the display list to * the head display list item. The chain is released when the head dl item is * put back with __vsp1_dl_list_put(). */ int vsp1_dl_list_add_chain(struct vsp1_dl_list *head, struct vsp1_dl_list *dl) { head->has_chain = true; list_add_tail(&dl->chain, &head->chain); return 0; } static void vsp1_dl_ext_cmd_fill_header(struct vsp1_dl_ext_cmd *cmd) { cmd->cmds[0].opcode = cmd->opcode; cmd->cmds[0].flags = cmd->flags; cmd->cmds[0].address_set = cmd->data_dma; cmd->cmds[0].reserved = 0; } static void vsp1_dl_list_fill_header(struct vsp1_dl_list *dl, bool is_last) { struct vsp1_dl_manager *dlm = dl->dlm; struct vsp1_dl_header_list *hdr = dl->header->lists; struct vsp1_dl_body *dlb; unsigned int num_lists = 0; /* * Fill the header with the display list bodies addresses and sizes. The * address of the first body has already been filled when the display * list was allocated. */ hdr->num_bytes = dl->body0->num_entries * sizeof(*dl->header->lists); list_for_each_entry(dlb, &dl->bodies, list) { num_lists++; hdr++; hdr->addr = dlb->dma; hdr->num_bytes = dlb->num_entries * sizeof(*dl->header->lists); } dl->header->num_lists = num_lists; dl->header->flags = 0; /* * Enable the interrupt for the end of each frame. In continuous mode * chained lists are used with one list per frame, so enable the * interrupt for each list. In singleshot mode chained lists are used * to partition a single frame, so enable the interrupt for the last * list only. */ if (!dlm->singleshot || is_last) dl->header->flags |= VSP1_DLH_INT_ENABLE; /* * In continuous mode enable auto-start for all lists, as the VSP must * loop on the same list until a new one is queued. In singleshot mode * enable auto-start for all lists but the last to chain processing of * partitions without software intervention. */ if (!dlm->singleshot || !is_last) dl->header->flags |= VSP1_DLH_AUTO_START; if (!is_last) { /* * If this is not the last display list in the chain, queue the * next item for automatic processing by the hardware. */ struct vsp1_dl_list *next = list_next_entry(dl, chain); dl->header->next_header = next->dma; } else if (!dlm->singleshot) { /* * if the display list manager works in continuous mode, the VSP * should loop over the display list continuously until * instructed to do otherwise. */ dl->header->next_header = dl->dma; } if (!dl->extension) return; dl->extension->flags = 0; if (dl->pre_cmd) { dl->extension->pre_ext_dl_plist = dl->pre_cmd->cmd_dma; dl->extension->pre_ext_dl_num_cmd = dl->pre_cmd->num_cmds; dl->extension->flags |= VSP1_DLH_EXT_PRE_CMD_EXEC; vsp1_dl_ext_cmd_fill_header(dl->pre_cmd); } if (dl->post_cmd) { dl->extension->post_ext_dl_plist = dl->post_cmd->cmd_dma; dl->extension->post_ext_dl_num_cmd = dl->post_cmd->num_cmds; dl->extension->flags |= VSP1_DLH_EXT_POST_CMD_EXEC; vsp1_dl_ext_cmd_fill_header(dl->post_cmd); } } static bool vsp1_dl_list_hw_update_pending(struct vsp1_dl_manager *dlm) { struct vsp1_device *vsp1 = dlm->vsp1; if (!dlm->queued) return false; /* * Check whether the VSP1 has taken the update. The hardware indicates * this by clearing the UPDHDR bit in the CMD register. */ return !!(vsp1_read(vsp1, VI6_CMD(dlm->index)) & VI6_CMD_UPDHDR); } static void vsp1_dl_list_hw_enqueue(struct vsp1_dl_list *dl) { struct vsp1_dl_manager *dlm = dl->dlm; struct vsp1_device *vsp1 = dlm->vsp1; /* * Program the display list header address. If the hardware is idle * (single-shot mode or first frame in continuous mode) it will then be * started independently. If the hardware is operating, the * VI6_DL_HDR_REF_ADDR register will be updated with the display list * address. */ vsp1_write(vsp1, VI6_DL_HDR_ADDR(dlm->index), dl->dma); } static void vsp1_dl_list_commit_continuous(struct vsp1_dl_list *dl) { struct vsp1_dl_manager *dlm = dl->dlm; /* * If a previous display list has been queued to the hardware but not * processed yet, the VSP can start processing it at any time. In that * case we can't replace the queued list by the new one, as we could * race with the hardware. We thus mark the update as pending, it will * be queued up to the hardware by the frame end interrupt handler. * * If a display list is already pending we simply drop it as the new * display list is assumed to contain a more recent configuration. It is * an error if the already pending list has the * VSP1_DL_FRAME_END_INTERNAL flag set, as there is then a process * waiting for that list to complete. This shouldn't happen as the * waiting process should perform proper locking, but warn just in * case. */ if (vsp1_dl_list_hw_update_pending(dlm)) { WARN_ON(dlm->pending && (dlm->pending->flags & VSP1_DL_FRAME_END_INTERNAL)); __vsp1_dl_list_put(dlm->pending); dlm->pending = dl; return; } /* * Pass the new display list to the hardware and mark it as queued. It * will become active when the hardware starts processing it. */ vsp1_dl_list_hw_enqueue(dl); __vsp1_dl_list_put(dlm->queued); dlm->queued = dl; } static void vsp1_dl_list_commit_singleshot(struct vsp1_dl_list *dl) { struct vsp1_dl_manager *dlm = dl->dlm; /* * When working in single-shot mode, the caller guarantees that the * hardware is idle at this point. Just commit the head display list * to hardware. Chained lists will be started automatically. */ vsp1_dl_list_hw_enqueue(dl); dlm->active = dl; } void vsp1_dl_list_commit(struct vsp1_dl_list *dl, unsigned int dl_flags) { struct vsp1_dl_manager *dlm = dl->dlm; struct vsp1_dl_list *dl_next; unsigned long flags; /* Fill the header for the head and chained display lists. */ vsp1_dl_list_fill_header(dl, list_empty(&dl->chain)); list_for_each_entry(dl_next, &dl->chain, chain) { bool last = list_is_last(&dl_next->chain, &dl->chain); vsp1_dl_list_fill_header(dl_next, last); } dl->flags = dl_flags & ~VSP1_DL_FRAME_END_COMPLETED; spin_lock_irqsave(&dlm->lock, flags); if (dlm->singleshot) vsp1_dl_list_commit_singleshot(dl); else vsp1_dl_list_commit_continuous(dl); spin_unlock_irqrestore(&dlm->lock, flags); } /* ----------------------------------------------------------------------------- * Display List Manager */ /** * vsp1_dlm_irq_frame_end - Display list handler for the frame end interrupt * @dlm: the display list manager * * Return a set of flags that indicates display list completion status. * * The VSP1_DL_FRAME_END_COMPLETED flag indicates that the previous display list * has completed at frame end. If the flag is not returned display list * completion has been delayed by one frame because the display list commit * raced with the frame end interrupt. The function always returns with the flag * set in single-shot mode as display list processing is then not continuous and * races never occur. * * The following flags are only supported for continuous mode. * * The VSP1_DL_FRAME_END_INTERNAL flag indicates that the display list that just * became active had been queued with the internal notification flag. * * The VSP1_DL_FRAME_END_WRITEBACK flag indicates that the previously active * display list had been queued with the writeback flag. */ unsigned int vsp1_dlm_irq_frame_end(struct vsp1_dl_manager *dlm) { struct vsp1_device *vsp1 = dlm->vsp1; u32 status = vsp1_read(vsp1, VI6_STATUS); unsigned int flags = 0; spin_lock(&dlm->lock); /* * The mem-to-mem pipelines work in single-shot mode. No new display * list can be queued, we don't have to do anything. */ if (dlm->singleshot) { __vsp1_dl_list_put(dlm->active); dlm->active = NULL; flags |= VSP1_DL_FRAME_END_COMPLETED; goto done; } /* * If the commit operation raced with the interrupt and occurred after * the frame end event but before interrupt processing, the hardware * hasn't taken the update into account yet. We have to skip one frame * and retry. */ if (vsp1_dl_list_hw_update_pending(dlm)) goto done; /* * Progressive streams report only TOP fields. If we have a BOTTOM * field, we are interlaced, and expect the frame to complete on the * next frame end interrupt. */ if (status & VI6_STATUS_FLD_STD(dlm->index)) goto done; /* * If the active display list has the writeback flag set, the frame * completion marks the end of the writeback capture. Return the * VSP1_DL_FRAME_END_WRITEBACK flag and reset the display list's * writeback flag. */ if (dlm->active && (dlm->active->flags & VSP1_DL_FRAME_END_WRITEBACK)) { flags |= VSP1_DL_FRAME_END_WRITEBACK; dlm->active->flags &= ~VSP1_DL_FRAME_END_WRITEBACK; } /* * The device starts processing the queued display list right after the * frame end interrupt. The display list thus becomes active. */ if (dlm->queued) { if (dlm->queued->flags & VSP1_DL_FRAME_END_INTERNAL) flags |= VSP1_DL_FRAME_END_INTERNAL; dlm->queued->flags &= ~VSP1_DL_FRAME_END_INTERNAL; __vsp1_dl_list_put(dlm->active); dlm->active = dlm->queued; dlm->queued = NULL; flags |= VSP1_DL_FRAME_END_COMPLETED; } /* * Now that the VSP has started processing the queued display list, we * can queue the pending display list to the hardware if one has been * prepared. */ if (dlm->pending) { vsp1_dl_list_hw_enqueue(dlm->pending); dlm->queued = dlm->pending; dlm->pending = NULL; } done: spin_unlock(&dlm->lock); return flags; } /* Hardware Setup */ void vsp1_dlm_setup(struct vsp1_device *vsp1) { unsigned int i; u32 ctrl = (256 << VI6_DL_CTRL_AR_WAIT_SHIFT) | VI6_DL_CTRL_DC2 | VI6_DL_CTRL_DC1 | VI6_DL_CTRL_DC0 | VI6_DL_CTRL_DLE; u32 ext_dl = (0x02 << VI6_DL_EXT_CTRL_POLINT_SHIFT) | VI6_DL_EXT_CTRL_DLPRI | VI6_DL_EXT_CTRL_EXT; if (vsp1_feature(vsp1, VSP1_HAS_EXT_DL)) { for (i = 0; i < vsp1->info->wpf_count; ++i) vsp1_write(vsp1, VI6_DL_EXT_CTRL(i), ext_dl); } vsp1_write(vsp1, VI6_DL_CTRL, ctrl); vsp1_write(vsp1, VI6_DL_SWAP, VI6_DL_SWAP_LWS); } void vsp1_dlm_reset(struct vsp1_dl_manager *dlm) { unsigned long flags; spin_lock_irqsave(&dlm->lock, flags); __vsp1_dl_list_put(dlm->active); __vsp1_dl_list_put(dlm->queued); __vsp1_dl_list_put(dlm->pending); spin_unlock_irqrestore(&dlm->lock, flags); dlm->active = NULL; dlm->queued = NULL; dlm->pending = NULL; } struct vsp1_dl_body *vsp1_dlm_dl_body_get(struct vsp1_dl_manager *dlm) { return vsp1_dl_body_get(dlm->pool); } struct vsp1_dl_manager *vsp1_dlm_create(struct vsp1_device *vsp1, unsigned int index, unsigned int prealloc) { struct vsp1_dl_manager *dlm; size_t header_size; unsigned int i; dlm = devm_kzalloc(vsp1->dev, sizeof(*dlm), GFP_KERNEL); if (!dlm) return NULL; dlm->index = index; dlm->singleshot = vsp1->info->uapi; dlm->vsp1 = vsp1; spin_lock_init(&dlm->lock); INIT_LIST_HEAD(&dlm->free); /* * Initialize the display list body and allocate DMA memory for the body * and the header. Both are allocated together to avoid memory * fragmentation, with the header located right after the body in * memory. An extra body is allocated on top of the prealloc to account * for the cached body used by the vsp1_pipeline object. */ header_size = vsp1_feature(vsp1, VSP1_HAS_EXT_DL) ? sizeof(struct vsp1_dl_header_extended) : sizeof(struct vsp1_dl_header); header_size = ALIGN(header_size, 8); dlm->pool = vsp1_dl_body_pool_create(vsp1, prealloc + 1, VSP1_DL_NUM_ENTRIES, header_size); if (!dlm->pool) return NULL; for (i = 0; i < prealloc; ++i) { struct vsp1_dl_list *dl; dl = vsp1_dl_list_alloc(dlm); if (!dl) { vsp1_dlm_destroy(dlm); return NULL; } /* The extended header immediately follows the header. */ if (vsp1_feature(vsp1, VSP1_HAS_EXT_DL)) dl->extension = (void *)dl->header + sizeof(*dl->header); list_add_tail(&dl->list, &dlm->free); } if (vsp1_feature(vsp1, VSP1_HAS_EXT_DL)) { dlm->cmdpool = vsp1_dl_cmd_pool_create(vsp1, VSP1_EXTCMD_AUTOFLD, prealloc); if (!dlm->cmdpool) { vsp1_dlm_destroy(dlm); return NULL; } } return dlm; } void vsp1_dlm_destroy(struct vsp1_dl_manager *dlm) { struct vsp1_dl_list *dl, *next; if (!dlm) return; list_for_each_entry_safe(dl, next, &dlm->free, list) { list_del(&dl->list); vsp1_dl_list_free(dl); } vsp1_dl_body_pool_destroy(dlm->pool); vsp1_dl_ext_cmd_pool_destroy(dlm->cmdpool); }
linux-master
drivers/media/platform/renesas/vsp1/vsp1_dl.c
// SPDX-License-Identifier: GPL-2.0+ /* * vsp1_lif.c -- R-Car VSP1 LCD Controller Interface * * Copyright (C) 2013-2014 Renesas Electronics Corporation * * Contact: Laurent Pinchart ([email protected]) */ #include <linux/device.h> #include <linux/gfp.h> #include <media/v4l2-subdev.h> #include "vsp1.h" #include "vsp1_dl.h" #include "vsp1_lif.h" #define LIF_MIN_SIZE 2U #define LIF_MAX_SIZE 8190U /* ----------------------------------------------------------------------------- * Device Access */ static inline void vsp1_lif_write(struct vsp1_lif *lif, struct vsp1_dl_body *dlb, u32 reg, u32 data) { vsp1_dl_body_write(dlb, reg + lif->entity.index * VI6_LIF_OFFSET, data); } /* ----------------------------------------------------------------------------- * V4L2 Subdevice Operations */ static const unsigned int lif_codes[] = { MEDIA_BUS_FMT_ARGB8888_1X32, MEDIA_BUS_FMT_AYUV8_1X32, }; static int lif_enum_mbus_code(struct v4l2_subdev *subdev, struct v4l2_subdev_state *sd_state, struct v4l2_subdev_mbus_code_enum *code) { return vsp1_subdev_enum_mbus_code(subdev, sd_state, code, lif_codes, ARRAY_SIZE(lif_codes)); } static int lif_enum_frame_size(struct v4l2_subdev *subdev, struct v4l2_subdev_state *sd_state, struct v4l2_subdev_frame_size_enum *fse) { return vsp1_subdev_enum_frame_size(subdev, sd_state, fse, LIF_MIN_SIZE, LIF_MIN_SIZE, LIF_MAX_SIZE, LIF_MAX_SIZE); } static int lif_set_format(struct v4l2_subdev *subdev, struct v4l2_subdev_state *sd_state, struct v4l2_subdev_format *fmt) { return vsp1_subdev_set_pad_format(subdev, sd_state, fmt, lif_codes, ARRAY_SIZE(lif_codes), LIF_MIN_SIZE, LIF_MIN_SIZE, LIF_MAX_SIZE, LIF_MAX_SIZE); } static const struct v4l2_subdev_pad_ops lif_pad_ops = { .init_cfg = vsp1_entity_init_cfg, .enum_mbus_code = lif_enum_mbus_code, .enum_frame_size = lif_enum_frame_size, .get_fmt = vsp1_subdev_get_pad_format, .set_fmt = lif_set_format, }; static const struct v4l2_subdev_ops lif_ops = { .pad = &lif_pad_ops, }; /* ----------------------------------------------------------------------------- * VSP1 Entity Operations */ static void lif_configure_stream(struct vsp1_entity *entity, struct vsp1_pipeline *pipe, struct vsp1_dl_list *dl, struct vsp1_dl_body *dlb) { const struct v4l2_mbus_framefmt *format; struct vsp1_lif *lif = to_lif(&entity->subdev); unsigned int hbth; unsigned int obth; unsigned int lbth; format = vsp1_entity_get_pad_format(&lif->entity, lif->entity.config, LIF_PAD_SOURCE); switch (entity->vsp1->version & VI6_IP_VERSION_MODEL_MASK) { case VI6_IP_VERSION_MODEL_VSPD_GEN2: case VI6_IP_VERSION_MODEL_VSPD_V2H: hbth = 1536; obth = min(128U, (format->width + 1) / 2 * format->height - 4); lbth = 1520; break; case VI6_IP_VERSION_MODEL_VSPDL_GEN3: case VI6_IP_VERSION_MODEL_VSPD_V3: case VI6_IP_VERSION_MODEL_VSPD_RZG2L: hbth = 0; obth = 1500; lbth = 0; break; case VI6_IP_VERSION_MODEL_VSPD_GEN3: case VI6_IP_VERSION_MODEL_VSPD_GEN4: default: hbth = 0; obth = 3000; lbth = 0; break; } vsp1_lif_write(lif, dlb, VI6_LIF_CSBTH, (hbth << VI6_LIF_CSBTH_HBTH_SHIFT) | (lbth << VI6_LIF_CSBTH_LBTH_SHIFT)); vsp1_lif_write(lif, dlb, VI6_LIF_CTRL, (obth << VI6_LIF_CTRL_OBTH_SHIFT) | (format->code == 0 ? VI6_LIF_CTRL_CFMT : 0) | VI6_LIF_CTRL_REQSEL | VI6_LIF_CTRL_LIF_EN); /* * On R-Car V3M and RZ/G2L the LIF0 buffer attribute register has to be * set to a non-default value to guarantee proper operation (otherwise * artifacts may appear on the output). The value required by the * manual is not explained but is likely a buffer size or threshold. */ if (vsp1_feature(entity->vsp1, VSP1_HAS_NON_ZERO_LBA)) vsp1_lif_write(lif, dlb, VI6_LIF_LBA, VI6_LIF_LBA_LBA0 | (1536 << VI6_LIF_LBA_LBA1_SHIFT)); } static const struct vsp1_entity_operations lif_entity_ops = { .configure_stream = lif_configure_stream, }; /* ----------------------------------------------------------------------------- * Initialization and Cleanup */ struct vsp1_lif *vsp1_lif_create(struct vsp1_device *vsp1, unsigned int index) { struct vsp1_lif *lif; int ret; lif = devm_kzalloc(vsp1->dev, sizeof(*lif), GFP_KERNEL); if (lif == NULL) return ERR_PTR(-ENOMEM); lif->entity.ops = &lif_entity_ops; lif->entity.type = VSP1_ENTITY_LIF; lif->entity.index = index; /* * The LIF is never exposed to userspace, but media entity registration * requires a function to be set. Use PROC_VIDEO_PIXEL_FORMATTER just to * avoid triggering a WARN_ON(), the value won't be seen anywhere. */ ret = vsp1_entity_init(vsp1, &lif->entity, "lif", 2, &lif_ops, MEDIA_ENT_F_PROC_VIDEO_PIXEL_FORMATTER); if (ret < 0) return ERR_PTR(ret); return lif; }
linux-master
drivers/media/platform/renesas/vsp1/vsp1_lif.c
// SPDX-License-Identifier: GPL-2.0+ /* * vsp1_video.c -- R-Car VSP1 Video Node * * Copyright (C) 2013-2015 Renesas Electronics Corporation * * Contact: Laurent Pinchart ([email protected]) */ #include <linux/list.h> #include <linux/module.h> #include <linux/mutex.h> #include <linux/slab.h> #include <linux/v4l2-mediabus.h> #include <linux/videodev2.h> #include <linux/wait.h> #include <media/media-entity.h> #include <media/v4l2-dev.h> #include <media/v4l2-fh.h> #include <media/v4l2-ioctl.h> #include <media/v4l2-subdev.h> #include <media/videobuf2-v4l2.h> #include <media/videobuf2-dma-contig.h> #include "vsp1.h" #include "vsp1_brx.h" #include "vsp1_dl.h" #include "vsp1_entity.h" #include "vsp1_hgo.h" #include "vsp1_hgt.h" #include "vsp1_pipe.h" #include "vsp1_rwpf.h" #include "vsp1_uds.h" #include "vsp1_video.h" #define VSP1_VIDEO_DEF_FORMAT V4L2_PIX_FMT_YUYV #define VSP1_VIDEO_DEF_WIDTH 1024 #define VSP1_VIDEO_DEF_HEIGHT 768 #define VSP1_VIDEO_MAX_WIDTH 8190U #define VSP1_VIDEO_MAX_HEIGHT 8190U /* ----------------------------------------------------------------------------- * Helper functions */ static struct v4l2_subdev * vsp1_video_remote_subdev(struct media_pad *local, u32 *pad) { struct media_pad *remote; remote = media_pad_remote_pad_first(local); if (!remote || !is_media_entity_v4l2_subdev(remote->entity)) return NULL; if (pad) *pad = remote->index; return media_entity_to_v4l2_subdev(remote->entity); } static int vsp1_video_verify_format(struct vsp1_video *video) { struct v4l2_subdev_format fmt = { .which = V4L2_SUBDEV_FORMAT_ACTIVE, }; struct v4l2_subdev *subdev; int ret; subdev = vsp1_video_remote_subdev(&video->pad, &fmt.pad); if (subdev == NULL) return -EINVAL; ret = v4l2_subdev_call(subdev, pad, get_fmt, NULL, &fmt); if (ret < 0) return ret == -ENOIOCTLCMD ? -EINVAL : ret; if (video->rwpf->fmtinfo->mbus != fmt.format.code || video->rwpf->format.height != fmt.format.height || video->rwpf->format.width != fmt.format.width) return -EINVAL; return 0; } static int __vsp1_video_try_format(struct vsp1_video *video, struct v4l2_pix_format_mplane *pix, const struct vsp1_format_info **fmtinfo) { static const u32 xrgb_formats[][2] = { { V4L2_PIX_FMT_RGB444, V4L2_PIX_FMT_XRGB444 }, { V4L2_PIX_FMT_RGB555, V4L2_PIX_FMT_XRGB555 }, { V4L2_PIX_FMT_BGR32, V4L2_PIX_FMT_XBGR32 }, { V4L2_PIX_FMT_RGB32, V4L2_PIX_FMT_XRGB32 }, }; const struct vsp1_format_info *info; unsigned int width = pix->width; unsigned int height = pix->height; unsigned int i; /* * Backward compatibility: replace deprecated RGB formats by their XRGB * equivalent. This selects the format older userspace applications want * while still exposing the new format. */ for (i = 0; i < ARRAY_SIZE(xrgb_formats); ++i) { if (xrgb_formats[i][0] == pix->pixelformat) { pix->pixelformat = xrgb_formats[i][1]; break; } } /* * Retrieve format information and select the default format if the * requested format isn't supported. */ info = vsp1_get_format_info(video->vsp1, pix->pixelformat); if (info == NULL) info = vsp1_get_format_info(video->vsp1, VSP1_VIDEO_DEF_FORMAT); pix->pixelformat = info->fourcc; pix->colorspace = V4L2_COLORSPACE_SRGB; pix->field = V4L2_FIELD_NONE; if (info->fourcc == V4L2_PIX_FMT_HSV24 || info->fourcc == V4L2_PIX_FMT_HSV32) pix->hsv_enc = V4L2_HSV_ENC_256; memset(pix->reserved, 0, sizeof(pix->reserved)); /* Align the width and height for YUV 4:2:2 and 4:2:0 formats. */ width = round_down(width, info->hsub); height = round_down(height, info->vsub); /* Clamp the width and height. */ pix->width = clamp(width, info->hsub, VSP1_VIDEO_MAX_WIDTH); pix->height = clamp(height, info->vsub, VSP1_VIDEO_MAX_HEIGHT); /* * Compute and clamp the stride and image size. While not documented in * the datasheet, strides not aligned to a multiple of 128 bytes result * in image corruption. */ for (i = 0; i < min(info->planes, 2U); ++i) { unsigned int hsub = i > 0 ? info->hsub : 1; unsigned int vsub = i > 0 ? info->vsub : 1; unsigned int align = 128; unsigned int bpl; bpl = clamp_t(unsigned int, pix->plane_fmt[i].bytesperline, pix->width / hsub * info->bpp[i] / 8, round_down(65535U, align)); pix->plane_fmt[i].bytesperline = round_up(bpl, align); pix->plane_fmt[i].sizeimage = pix->plane_fmt[i].bytesperline * pix->height / vsub; } if (info->planes == 3) { /* The second and third planes must have the same stride. */ pix->plane_fmt[2].bytesperline = pix->plane_fmt[1].bytesperline; pix->plane_fmt[2].sizeimage = pix->plane_fmt[1].sizeimage; } pix->num_planes = info->planes; if (fmtinfo) *fmtinfo = info; return 0; } /* ----------------------------------------------------------------------------- * VSP1 Partition Algorithm support */ /** * vsp1_video_calculate_partition - Calculate the active partition output window * * @pipe: the pipeline * @partition: partition that will hold the calculated values * @div_size: pre-determined maximum partition division size * @index: partition index */ static void vsp1_video_calculate_partition(struct vsp1_pipeline *pipe, struct vsp1_partition *partition, unsigned int div_size, unsigned int index) { const struct v4l2_mbus_framefmt *format; struct vsp1_partition_window window; unsigned int modulus; /* * Partitions are computed on the size before rotation, use the format * at the WPF sink. */ format = vsp1_entity_get_pad_format(&pipe->output->entity, pipe->output->entity.config, RWPF_PAD_SINK); /* A single partition simply processes the output size in full. */ if (pipe->partitions <= 1) { window.left = 0; window.width = format->width; vsp1_pipeline_propagate_partition(pipe, partition, index, &window); return; } /* Initialise the partition with sane starting conditions. */ window.left = index * div_size; window.width = div_size; modulus = format->width % div_size; /* * We need to prevent the last partition from being smaller than the * *minimum* width of the hardware capabilities. * * If the modulus is less than half of the partition size, * the penultimate partition is reduced to half, which is added * to the final partition: |1234|1234|1234|12|341| * to prevent this: |1234|1234|1234|1234|1|. */ if (modulus) { /* * pipe->partitions is 1 based, whilst index is a 0 based index. * Normalise this locally. */ unsigned int partitions = pipe->partitions - 1; if (modulus < div_size / 2) { if (index == partitions - 1) { /* Halve the penultimate partition. */ window.width = div_size / 2; } else if (index == partitions) { /* Increase the final partition. */ window.width = (div_size / 2) + modulus; window.left -= div_size / 2; } } else if (index == partitions) { window.width = modulus; } } vsp1_pipeline_propagate_partition(pipe, partition, index, &window); } static int vsp1_video_pipeline_setup_partitions(struct vsp1_pipeline *pipe) { struct vsp1_device *vsp1 = pipe->output->entity.vsp1; const struct v4l2_mbus_framefmt *format; struct vsp1_entity *entity; unsigned int div_size; unsigned int i; /* * Partitions are computed on the size before rotation, use the format * at the WPF sink. */ format = vsp1_entity_get_pad_format(&pipe->output->entity, pipe->output->entity.config, RWPF_PAD_SINK); div_size = format->width; /* * Only Gen3+ hardware requires image partitioning, Gen2 will operate * with a single partition that covers the whole output. */ if (vsp1->info->gen >= 3) { list_for_each_entry(entity, &pipe->entities, list_pipe) { unsigned int entity_max; if (!entity->ops->max_width) continue; entity_max = entity->ops->max_width(entity, pipe); if (entity_max) div_size = min(div_size, entity_max); } } pipe->partitions = DIV_ROUND_UP(format->width, div_size); pipe->part_table = kcalloc(pipe->partitions, sizeof(*pipe->part_table), GFP_KERNEL); if (!pipe->part_table) return -ENOMEM; for (i = 0; i < pipe->partitions; ++i) vsp1_video_calculate_partition(pipe, &pipe->part_table[i], div_size, i); return 0; } /* ----------------------------------------------------------------------------- * Pipeline Management */ /* * vsp1_video_complete_buffer - Complete the current buffer * @video: the video node * * This function completes the current buffer by filling its sequence number, * time stamp and payload size, and hands it back to the vb2 core. * * Return the next queued buffer or NULL if the queue is empty. */ static struct vsp1_vb2_buffer * vsp1_video_complete_buffer(struct vsp1_video *video) { struct vsp1_pipeline *pipe = video->rwpf->entity.pipe; struct vsp1_vb2_buffer *next = NULL; struct vsp1_vb2_buffer *done; unsigned long flags; unsigned int i; spin_lock_irqsave(&video->irqlock, flags); if (list_empty(&video->irqqueue)) { spin_unlock_irqrestore(&video->irqlock, flags); return NULL; } done = list_first_entry(&video->irqqueue, struct vsp1_vb2_buffer, queue); list_del(&done->queue); if (!list_empty(&video->irqqueue)) next = list_first_entry(&video->irqqueue, struct vsp1_vb2_buffer, queue); spin_unlock_irqrestore(&video->irqlock, flags); done->buf.sequence = pipe->sequence; done->buf.vb2_buf.timestamp = ktime_get_ns(); for (i = 0; i < done->buf.vb2_buf.num_planes; ++i) vb2_set_plane_payload(&done->buf.vb2_buf, i, vb2_plane_size(&done->buf.vb2_buf, i)); vb2_buffer_done(&done->buf.vb2_buf, VB2_BUF_STATE_DONE); return next; } static void vsp1_video_frame_end(struct vsp1_pipeline *pipe, struct vsp1_rwpf *rwpf) { struct vsp1_video *video = rwpf->video; struct vsp1_vb2_buffer *buf; buf = vsp1_video_complete_buffer(video); if (buf == NULL) return; video->rwpf->mem = buf->mem; pipe->buffers_ready |= 1 << video->pipe_index; } static void vsp1_video_pipeline_run_partition(struct vsp1_pipeline *pipe, struct vsp1_dl_list *dl, unsigned int partition) { struct vsp1_dl_body *dlb = vsp1_dl_list_get_body0(dl); struct vsp1_entity *entity; pipe->partition = &pipe->part_table[partition]; list_for_each_entry(entity, &pipe->entities, list_pipe) vsp1_entity_configure_partition(entity, pipe, dl, dlb); } static void vsp1_video_pipeline_run(struct vsp1_pipeline *pipe) { struct vsp1_device *vsp1 = pipe->output->entity.vsp1; struct vsp1_entity *entity; struct vsp1_dl_body *dlb; struct vsp1_dl_list *dl; unsigned int partition; dl = vsp1_dl_list_get(pipe->output->dlm); /* * If the VSP hardware isn't configured yet (which occurs either when * processing the first frame or after a system suspend/resume), add the * cached stream configuration to the display list to perform a full * initialisation. */ if (!pipe->configured) vsp1_dl_list_add_body(dl, pipe->stream_config); dlb = vsp1_dl_list_get_body0(dl); list_for_each_entry(entity, &pipe->entities, list_pipe) vsp1_entity_configure_frame(entity, pipe, dl, dlb); /* Run the first partition. */ vsp1_video_pipeline_run_partition(pipe, dl, 0); /* Process consecutive partitions as necessary. */ for (partition = 1; partition < pipe->partitions; ++partition) { struct vsp1_dl_list *dl_next; dl_next = vsp1_dl_list_get(pipe->output->dlm); /* * An incomplete chain will still function, but output only * the partitions that had a dl available. The frame end * interrupt will be marked on the last dl in the chain. */ if (!dl_next) { dev_err(vsp1->dev, "Failed to obtain a dl list. Frame will be incomplete\n"); break; } vsp1_video_pipeline_run_partition(pipe, dl_next, partition); vsp1_dl_list_add_chain(dl, dl_next); } /* Complete, and commit the head display list. */ vsp1_dl_list_commit(dl, 0); pipe->configured = true; vsp1_pipeline_run(pipe); } static void vsp1_video_pipeline_frame_end(struct vsp1_pipeline *pipe, unsigned int completion) { struct vsp1_device *vsp1 = pipe->output->entity.vsp1; enum vsp1_pipeline_state state; unsigned long flags; unsigned int i; /* M2M Pipelines should never call here with an incomplete frame. */ WARN_ON_ONCE(!(completion & VSP1_DL_FRAME_END_COMPLETED)); spin_lock_irqsave(&pipe->irqlock, flags); /* Complete buffers on all video nodes. */ for (i = 0; i < vsp1->info->rpf_count; ++i) { if (!pipe->inputs[i]) continue; vsp1_video_frame_end(pipe, pipe->inputs[i]); } vsp1_video_frame_end(pipe, pipe->output); state = pipe->state; pipe->state = VSP1_PIPELINE_STOPPED; /* * If a stop has been requested, mark the pipeline as stopped and * return. Otherwise restart the pipeline if ready. */ if (state == VSP1_PIPELINE_STOPPING) wake_up(&pipe->wq); else if (vsp1_pipeline_ready(pipe)) vsp1_video_pipeline_run(pipe); spin_unlock_irqrestore(&pipe->irqlock, flags); } static int vsp1_video_pipeline_build_branch(struct vsp1_pipeline *pipe, struct vsp1_rwpf *input, struct vsp1_rwpf *output) { struct media_entity_enum ent_enum; struct vsp1_entity *entity; struct media_pad *pad; struct vsp1_brx *brx = NULL; int ret; ret = media_entity_enum_init(&ent_enum, &input->entity.vsp1->media_dev); if (ret < 0) return ret; /* * The main data path doesn't include the HGO or HGT, use * vsp1_entity_remote_pad() to traverse the graph. */ pad = vsp1_entity_remote_pad(&input->entity.pads[RWPF_PAD_SOURCE]); while (1) { if (pad == NULL) { ret = -EPIPE; goto out; } /* We've reached a video node, that shouldn't have happened. */ if (!is_media_entity_v4l2_subdev(pad->entity)) { ret = -EPIPE; goto out; } entity = to_vsp1_entity( media_entity_to_v4l2_subdev(pad->entity)); /* * A BRU or BRS is present in the pipeline, store its input pad * number in the input RPF for use when configuring the RPF. */ if (entity->type == VSP1_ENTITY_BRU || entity->type == VSP1_ENTITY_BRS) { /* BRU and BRS can't be chained. */ if (brx) { ret = -EPIPE; goto out; } brx = to_brx(&entity->subdev); brx->inputs[pad->index].rpf = input; input->brx_input = pad->index; } /* We've reached the WPF, we're done. */ if (entity->type == VSP1_ENTITY_WPF) break; /* Ensure the branch has no loop. */ if (media_entity_enum_test_and_set(&ent_enum, &entity->subdev.entity)) { ret = -EPIPE; goto out; } /* UDS can't be chained. */ if (entity->type == VSP1_ENTITY_UDS) { if (pipe->uds) { ret = -EPIPE; goto out; } pipe->uds = entity; pipe->uds_input = brx ? &brx->entity : &input->entity; } /* Follow the source link, ignoring any HGO or HGT. */ pad = &entity->pads[entity->source_pad]; pad = vsp1_entity_remote_pad(pad); } /* The last entity must be the output WPF. */ if (entity != &output->entity) ret = -EPIPE; out: media_entity_enum_cleanup(&ent_enum); return ret; } static int vsp1_video_pipeline_build(struct vsp1_pipeline *pipe, struct vsp1_video *video) { struct media_graph graph; struct media_entity *entity = &video->video.entity; struct media_device *mdev = entity->graph_obj.mdev; unsigned int i; int ret; /* Walk the graph to locate the entities and video nodes. */ ret = media_graph_walk_init(&graph, mdev); if (ret) return ret; media_graph_walk_start(&graph, entity); while ((entity = media_graph_walk_next(&graph))) { struct v4l2_subdev *subdev; struct vsp1_rwpf *rwpf; struct vsp1_entity *e; if (!is_media_entity_v4l2_subdev(entity)) continue; subdev = media_entity_to_v4l2_subdev(entity); e = to_vsp1_entity(subdev); list_add_tail(&e->list_pipe, &pipe->entities); e->pipe = pipe; switch (e->type) { case VSP1_ENTITY_RPF: rwpf = to_rwpf(subdev); pipe->inputs[rwpf->entity.index] = rwpf; rwpf->video->pipe_index = ++pipe->num_inputs; break; case VSP1_ENTITY_WPF: rwpf = to_rwpf(subdev); pipe->output = rwpf; rwpf->video->pipe_index = 0; break; case VSP1_ENTITY_LIF: pipe->lif = e; break; case VSP1_ENTITY_BRU: case VSP1_ENTITY_BRS: pipe->brx = e; break; case VSP1_ENTITY_HGO: pipe->hgo = e; break; case VSP1_ENTITY_HGT: pipe->hgt = e; break; default: break; } } media_graph_walk_cleanup(&graph); /* We need one output and at least one input. */ if (pipe->num_inputs == 0 || !pipe->output) return -EPIPE; /* * Follow links downstream for each input and make sure the graph * contains no loop and that all branches end at the output WPF. */ for (i = 0; i < video->vsp1->info->rpf_count; ++i) { if (!pipe->inputs[i]) continue; ret = vsp1_video_pipeline_build_branch(pipe, pipe->inputs[i], pipe->output); if (ret < 0) return ret; } return 0; } static int vsp1_video_pipeline_init(struct vsp1_pipeline *pipe, struct vsp1_video *video) { vsp1_pipeline_init(pipe); pipe->frame_end = vsp1_video_pipeline_frame_end; return vsp1_video_pipeline_build(pipe, video); } static struct vsp1_pipeline *vsp1_video_pipeline_get(struct vsp1_video *video) { struct vsp1_pipeline *pipe; int ret; /* * Get a pipeline object for the video node. If a pipeline has already * been allocated just increment its reference count and return it. * Otherwise allocate a new pipeline and initialize it, it will be freed * when the last reference is released. */ if (!video->rwpf->entity.pipe) { pipe = kzalloc(sizeof(*pipe), GFP_KERNEL); if (!pipe) return ERR_PTR(-ENOMEM); ret = vsp1_video_pipeline_init(pipe, video); if (ret < 0) { vsp1_pipeline_reset(pipe); kfree(pipe); return ERR_PTR(ret); } } else { pipe = video->rwpf->entity.pipe; kref_get(&pipe->kref); } return pipe; } static void vsp1_video_pipeline_release(struct kref *kref) { struct vsp1_pipeline *pipe = container_of(kref, typeof(*pipe), kref); vsp1_pipeline_reset(pipe); kfree(pipe); } static void vsp1_video_pipeline_put(struct vsp1_pipeline *pipe) { struct media_device *mdev = &pipe->output->entity.vsp1->media_dev; mutex_lock(&mdev->graph_mutex); kref_put(&pipe->kref, vsp1_video_pipeline_release); mutex_unlock(&mdev->graph_mutex); } /* ----------------------------------------------------------------------------- * videobuf2 Queue Operations */ static int vsp1_video_queue_setup(struct vb2_queue *vq, unsigned int *nbuffers, unsigned int *nplanes, unsigned int sizes[], struct device *alloc_devs[]) { struct vsp1_video *video = vb2_get_drv_priv(vq); const struct v4l2_pix_format_mplane *format = &video->rwpf->format; unsigned int i; if (*nplanes) { if (*nplanes != format->num_planes) return -EINVAL; for (i = 0; i < *nplanes; i++) if (sizes[i] < format->plane_fmt[i].sizeimage) return -EINVAL; return 0; } *nplanes = format->num_planes; for (i = 0; i < format->num_planes; ++i) sizes[i] = format->plane_fmt[i].sizeimage; return 0; } static int vsp1_video_buffer_prepare(struct vb2_buffer *vb) { struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb); struct vsp1_video *video = vb2_get_drv_priv(vb->vb2_queue); struct vsp1_vb2_buffer *buf = to_vsp1_vb2_buffer(vbuf); const struct v4l2_pix_format_mplane *format = &video->rwpf->format; unsigned int i; if (vb->num_planes < format->num_planes) return -EINVAL; for (i = 0; i < vb->num_planes; ++i) { buf->mem.addr[i] = vb2_dma_contig_plane_dma_addr(vb, i); if (vb2_plane_size(vb, i) < format->plane_fmt[i].sizeimage) return -EINVAL; } for ( ; i < 3; ++i) buf->mem.addr[i] = 0; return 0; } static void vsp1_video_buffer_queue(struct vb2_buffer *vb) { struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb); struct vsp1_video *video = vb2_get_drv_priv(vb->vb2_queue); struct vsp1_pipeline *pipe = video->rwpf->entity.pipe; struct vsp1_vb2_buffer *buf = to_vsp1_vb2_buffer(vbuf); unsigned long flags; bool empty; spin_lock_irqsave(&video->irqlock, flags); empty = list_empty(&video->irqqueue); list_add_tail(&buf->queue, &video->irqqueue); spin_unlock_irqrestore(&video->irqlock, flags); if (!empty) return; spin_lock_irqsave(&pipe->irqlock, flags); video->rwpf->mem = buf->mem; pipe->buffers_ready |= 1 << video->pipe_index; if (vb2_start_streaming_called(&video->queue) && vsp1_pipeline_ready(pipe)) vsp1_video_pipeline_run(pipe); spin_unlock_irqrestore(&pipe->irqlock, flags); } static int vsp1_video_setup_pipeline(struct vsp1_pipeline *pipe) { struct vsp1_entity *entity; int ret; /* Determine this pipelines sizes for image partitioning support. */ ret = vsp1_video_pipeline_setup_partitions(pipe); if (ret < 0) return ret; if (pipe->uds) { struct vsp1_uds *uds = to_uds(&pipe->uds->subdev); /* * If a BRU or BRS is present in the pipeline before the UDS, * the alpha component doesn't need to be scaled as the BRU and * BRS output alpha value is fixed to 255. Otherwise we need to * scale the alpha component only when available at the input * RPF. */ if (pipe->uds_input->type == VSP1_ENTITY_BRU || pipe->uds_input->type == VSP1_ENTITY_BRS) { uds->scale_alpha = false; } else { struct vsp1_rwpf *rpf = to_rwpf(&pipe->uds_input->subdev); uds->scale_alpha = rpf->fmtinfo->alpha; } } /* * Compute and cache the stream configuration into a body. The cached * body will be added to the display list by vsp1_video_pipeline_run() * whenever the pipeline needs to be fully reconfigured. */ pipe->stream_config = vsp1_dlm_dl_body_get(pipe->output->dlm); if (!pipe->stream_config) return -ENOMEM; list_for_each_entry(entity, &pipe->entities, list_pipe) { vsp1_entity_route_setup(entity, pipe, pipe->stream_config); vsp1_entity_configure_stream(entity, pipe, NULL, pipe->stream_config); } return 0; } static void vsp1_video_release_buffers(struct vsp1_video *video) { struct vsp1_vb2_buffer *buffer; unsigned long flags; /* Remove all buffers from the IRQ queue. */ spin_lock_irqsave(&video->irqlock, flags); list_for_each_entry(buffer, &video->irqqueue, queue) vb2_buffer_done(&buffer->buf.vb2_buf, VB2_BUF_STATE_ERROR); INIT_LIST_HEAD(&video->irqqueue); spin_unlock_irqrestore(&video->irqlock, flags); } static void vsp1_video_cleanup_pipeline(struct vsp1_pipeline *pipe) { lockdep_assert_held(&pipe->lock); /* Release any cached configuration from our output video. */ vsp1_dl_body_put(pipe->stream_config); pipe->stream_config = NULL; pipe->configured = false; /* Release our partition table allocation. */ kfree(pipe->part_table); pipe->part_table = NULL; } static int vsp1_video_start_streaming(struct vb2_queue *vq, unsigned int count) { struct vsp1_video *video = vb2_get_drv_priv(vq); struct vsp1_pipeline *pipe = video->rwpf->entity.pipe; bool start_pipeline = false; unsigned long flags; int ret; mutex_lock(&pipe->lock); if (pipe->stream_count == pipe->num_inputs) { ret = vsp1_video_setup_pipeline(pipe); if (ret < 0) { vsp1_video_release_buffers(video); vsp1_video_cleanup_pipeline(pipe); mutex_unlock(&pipe->lock); return ret; } start_pipeline = true; } pipe->stream_count++; mutex_unlock(&pipe->lock); /* * vsp1_pipeline_ready() is not sufficient to establish that all streams * are prepared and the pipeline is configured, as multiple streams * can race through streamon with buffers already queued; Therefore we * don't even attempt to start the pipeline until the last stream has * called through here. */ if (!start_pipeline) return 0; spin_lock_irqsave(&pipe->irqlock, flags); if (vsp1_pipeline_ready(pipe)) vsp1_video_pipeline_run(pipe); spin_unlock_irqrestore(&pipe->irqlock, flags); return 0; } static void vsp1_video_stop_streaming(struct vb2_queue *vq) { struct vsp1_video *video = vb2_get_drv_priv(vq); struct vsp1_pipeline *pipe = video->rwpf->entity.pipe; unsigned long flags; int ret; /* * Clear the buffers ready flag to make sure the device won't be started * by a QBUF on the video node on the other side of the pipeline. */ spin_lock_irqsave(&video->irqlock, flags); pipe->buffers_ready &= ~(1 << video->pipe_index); spin_unlock_irqrestore(&video->irqlock, flags); mutex_lock(&pipe->lock); if (--pipe->stream_count == pipe->num_inputs) { /* Stop the pipeline. */ ret = vsp1_pipeline_stop(pipe); if (ret == -ETIMEDOUT) dev_err(video->vsp1->dev, "pipeline stop timeout\n"); vsp1_video_cleanup_pipeline(pipe); } mutex_unlock(&pipe->lock); video_device_pipeline_stop(&video->video); vsp1_video_release_buffers(video); vsp1_video_pipeline_put(pipe); } static const struct vb2_ops vsp1_video_queue_qops = { .queue_setup = vsp1_video_queue_setup, .buf_prepare = vsp1_video_buffer_prepare, .buf_queue = vsp1_video_buffer_queue, .wait_prepare = vb2_ops_wait_prepare, .wait_finish = vb2_ops_wait_finish, .start_streaming = vsp1_video_start_streaming, .stop_streaming = vsp1_video_stop_streaming, }; /* ----------------------------------------------------------------------------- * V4L2 ioctls */ static int vsp1_video_querycap(struct file *file, void *fh, struct v4l2_capability *cap) { struct v4l2_fh *vfh = file->private_data; struct vsp1_video *video = to_vsp1_video(vfh->vdev); cap->capabilities = V4L2_CAP_DEVICE_CAPS | V4L2_CAP_STREAMING | V4L2_CAP_VIDEO_CAPTURE_MPLANE | V4L2_CAP_VIDEO_OUTPUT_MPLANE; strscpy(cap->driver, "vsp1", sizeof(cap->driver)); strscpy(cap->card, video->video.name, sizeof(cap->card)); return 0; } static int vsp1_video_get_format(struct file *file, void *fh, struct v4l2_format *format) { struct v4l2_fh *vfh = file->private_data; struct vsp1_video *video = to_vsp1_video(vfh->vdev); if (format->type != video->queue.type) return -EINVAL; mutex_lock(&video->lock); format->fmt.pix_mp = video->rwpf->format; mutex_unlock(&video->lock); return 0; } static int vsp1_video_try_format(struct file *file, void *fh, struct v4l2_format *format) { struct v4l2_fh *vfh = file->private_data; struct vsp1_video *video = to_vsp1_video(vfh->vdev); if (format->type != video->queue.type) return -EINVAL; return __vsp1_video_try_format(video, &format->fmt.pix_mp, NULL); } static int vsp1_video_set_format(struct file *file, void *fh, struct v4l2_format *format) { struct v4l2_fh *vfh = file->private_data; struct vsp1_video *video = to_vsp1_video(vfh->vdev); const struct vsp1_format_info *info; int ret; if (format->type != video->queue.type) return -EINVAL; ret = __vsp1_video_try_format(video, &format->fmt.pix_mp, &info); if (ret < 0) return ret; mutex_lock(&video->lock); if (vb2_is_busy(&video->queue)) { ret = -EBUSY; goto done; } video->rwpf->format = format->fmt.pix_mp; video->rwpf->fmtinfo = info; done: mutex_unlock(&video->lock); return ret; } static int vsp1_video_streamon(struct file *file, void *fh, enum v4l2_buf_type type) { struct v4l2_fh *vfh = file->private_data; struct vsp1_video *video = to_vsp1_video(vfh->vdev); struct media_device *mdev = &video->vsp1->media_dev; struct vsp1_pipeline *pipe; int ret; if (vb2_queue_is_busy(&video->queue, file)) return -EBUSY; /* * Get a pipeline for the video node and start streaming on it. No link * touching an entity in the pipeline can be activated or deactivated * once streaming is started. */ mutex_lock(&mdev->graph_mutex); pipe = vsp1_video_pipeline_get(video); if (IS_ERR(pipe)) { mutex_unlock(&mdev->graph_mutex); return PTR_ERR(pipe); } ret = __video_device_pipeline_start(&video->video, &pipe->pipe); if (ret < 0) { mutex_unlock(&mdev->graph_mutex); goto err_pipe; } mutex_unlock(&mdev->graph_mutex); /* * Verify that the configured format matches the output of the connected * subdev. */ ret = vsp1_video_verify_format(video); if (ret < 0) goto err_stop; /* Start the queue. */ ret = vb2_streamon(&video->queue, type); if (ret < 0) goto err_stop; return 0; err_stop: video_device_pipeline_stop(&video->video); err_pipe: vsp1_video_pipeline_put(pipe); return ret; } static const struct v4l2_ioctl_ops vsp1_video_ioctl_ops = { .vidioc_querycap = vsp1_video_querycap, .vidioc_g_fmt_vid_cap_mplane = vsp1_video_get_format, .vidioc_s_fmt_vid_cap_mplane = vsp1_video_set_format, .vidioc_try_fmt_vid_cap_mplane = vsp1_video_try_format, .vidioc_g_fmt_vid_out_mplane = vsp1_video_get_format, .vidioc_s_fmt_vid_out_mplane = vsp1_video_set_format, .vidioc_try_fmt_vid_out_mplane = vsp1_video_try_format, .vidioc_reqbufs = vb2_ioctl_reqbufs, .vidioc_querybuf = vb2_ioctl_querybuf, .vidioc_qbuf = vb2_ioctl_qbuf, .vidioc_dqbuf = vb2_ioctl_dqbuf, .vidioc_expbuf = vb2_ioctl_expbuf, .vidioc_create_bufs = vb2_ioctl_create_bufs, .vidioc_prepare_buf = vb2_ioctl_prepare_buf, .vidioc_streamon = vsp1_video_streamon, .vidioc_streamoff = vb2_ioctl_streamoff, }; /* ----------------------------------------------------------------------------- * V4L2 File Operations */ static int vsp1_video_open(struct file *file) { struct vsp1_video *video = video_drvdata(file); struct v4l2_fh *vfh; int ret = 0; vfh = kzalloc(sizeof(*vfh), GFP_KERNEL); if (vfh == NULL) return -ENOMEM; v4l2_fh_init(vfh, &video->video); v4l2_fh_add(vfh); file->private_data = vfh; ret = vsp1_device_get(video->vsp1); if (ret < 0) { v4l2_fh_del(vfh); v4l2_fh_exit(vfh); kfree(vfh); } return ret; } static int vsp1_video_release(struct file *file) { struct vsp1_video *video = video_drvdata(file); vb2_fop_release(file); vsp1_device_put(video->vsp1); return 0; } static const struct v4l2_file_operations vsp1_video_fops = { .owner = THIS_MODULE, .unlocked_ioctl = video_ioctl2, .open = vsp1_video_open, .release = vsp1_video_release, .poll = vb2_fop_poll, .mmap = vb2_fop_mmap, }; /* ----------------------------------------------------------------------------- * Suspend and Resume */ void vsp1_video_suspend(struct vsp1_device *vsp1) { unsigned long flags; unsigned int i; int ret; /* * To avoid increasing the system suspend time needlessly, loop over the * pipelines twice, first to set them all to the stopping state, and * then to wait for the stop to complete. */ for (i = 0; i < vsp1->info->wpf_count; ++i) { struct vsp1_rwpf *wpf = vsp1->wpf[i]; struct vsp1_pipeline *pipe; if (wpf == NULL) continue; pipe = wpf->entity.pipe; if (pipe == NULL) continue; spin_lock_irqsave(&pipe->irqlock, flags); if (pipe->state == VSP1_PIPELINE_RUNNING) pipe->state = VSP1_PIPELINE_STOPPING; spin_unlock_irqrestore(&pipe->irqlock, flags); } for (i = 0; i < vsp1->info->wpf_count; ++i) { struct vsp1_rwpf *wpf = vsp1->wpf[i]; struct vsp1_pipeline *pipe; if (wpf == NULL) continue; pipe = wpf->entity.pipe; if (pipe == NULL) continue; ret = wait_event_timeout(pipe->wq, vsp1_pipeline_stopped(pipe), msecs_to_jiffies(500)); if (ret == 0) dev_warn(vsp1->dev, "pipeline %u stop timeout\n", wpf->entity.index); } } void vsp1_video_resume(struct vsp1_device *vsp1) { unsigned long flags; unsigned int i; /* Resume all running pipelines. */ for (i = 0; i < vsp1->info->wpf_count; ++i) { struct vsp1_rwpf *wpf = vsp1->wpf[i]; struct vsp1_pipeline *pipe; if (wpf == NULL) continue; pipe = wpf->entity.pipe; if (pipe == NULL) continue; /* * The hardware may have been reset during a suspend and will * need a full reconfiguration. */ pipe->configured = false; spin_lock_irqsave(&pipe->irqlock, flags); if (vsp1_pipeline_ready(pipe)) vsp1_video_pipeline_run(pipe); spin_unlock_irqrestore(&pipe->irqlock, flags); } } /* ----------------------------------------------------------------------------- * Initialization and Cleanup */ struct vsp1_video *vsp1_video_create(struct vsp1_device *vsp1, struct vsp1_rwpf *rwpf) { struct vsp1_video *video; const char *direction; int ret; video = devm_kzalloc(vsp1->dev, sizeof(*video), GFP_KERNEL); if (!video) return ERR_PTR(-ENOMEM); rwpf->video = video; video->vsp1 = vsp1; video->rwpf = rwpf; if (rwpf->entity.type == VSP1_ENTITY_RPF) { direction = "input"; video->type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE; video->pad.flags = MEDIA_PAD_FL_SOURCE; video->video.vfl_dir = VFL_DIR_TX; video->video.device_caps = V4L2_CAP_VIDEO_OUTPUT_MPLANE | V4L2_CAP_STREAMING; } else { direction = "output"; video->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE; video->pad.flags = MEDIA_PAD_FL_SINK; video->video.vfl_dir = VFL_DIR_RX; video->video.device_caps = V4L2_CAP_VIDEO_CAPTURE_MPLANE | V4L2_CAP_STREAMING; } mutex_init(&video->lock); spin_lock_init(&video->irqlock); INIT_LIST_HEAD(&video->irqqueue); /* Initialize the media entity... */ ret = media_entity_pads_init(&video->video.entity, 1, &video->pad); if (ret < 0) return ERR_PTR(ret); /* ... and the format ... */ rwpf->format.pixelformat = VSP1_VIDEO_DEF_FORMAT; rwpf->format.width = VSP1_VIDEO_DEF_WIDTH; rwpf->format.height = VSP1_VIDEO_DEF_HEIGHT; __vsp1_video_try_format(video, &rwpf->format, &rwpf->fmtinfo); /* ... and the video node... */ video->video.v4l2_dev = &video->vsp1->v4l2_dev; video->video.fops = &vsp1_video_fops; snprintf(video->video.name, sizeof(video->video.name), "%s %s", rwpf->entity.subdev.name, direction); video->video.vfl_type = VFL_TYPE_VIDEO; video->video.release = video_device_release_empty; video->video.ioctl_ops = &vsp1_video_ioctl_ops; video_set_drvdata(&video->video, video); video->queue.type = video->type; video->queue.io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF; video->queue.lock = &video->lock; video->queue.drv_priv = video; video->queue.buf_struct_size = sizeof(struct vsp1_vb2_buffer); video->queue.ops = &vsp1_video_queue_qops; video->queue.mem_ops = &vb2_dma_contig_memops; video->queue.timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY; video->queue.dev = video->vsp1->bus_master; ret = vb2_queue_init(&video->queue); if (ret < 0) { dev_err(video->vsp1->dev, "failed to initialize vb2 queue\n"); goto error; } /* ... and register the video device. */ video->video.queue = &video->queue; ret = video_register_device(&video->video, VFL_TYPE_VIDEO, -1); if (ret < 0) { dev_err(video->vsp1->dev, "failed to register video device\n"); goto error; } return video; error: vsp1_video_cleanup(video); return ERR_PTR(ret); } void vsp1_video_cleanup(struct vsp1_video *video) { if (video_is_registered(&video->video)) video_unregister_device(&video->video); media_entity_cleanup(&video->video.entity); }
linux-master
drivers/media/platform/renesas/vsp1/vsp1_video.c
// SPDX-License-Identifier: GPL-2.0+ /* * vsp1_entity.c -- R-Car VSP1 Base Entity * * Copyright (C) 2013-2014 Renesas Electronics Corporation * * Contact: Laurent Pinchart ([email protected]) */ #include <linux/device.h> #include <linux/gfp.h> #include <media/media-entity.h> #include <media/v4l2-ctrls.h> #include <media/v4l2-subdev.h> #include "vsp1.h" #include "vsp1_dl.h" #include "vsp1_entity.h" #include "vsp1_pipe.h" #include "vsp1_rwpf.h" void vsp1_entity_route_setup(struct vsp1_entity *entity, struct vsp1_pipeline *pipe, struct vsp1_dl_body *dlb) { struct vsp1_entity *source; u32 route; if (entity->type == VSP1_ENTITY_HGO) { u32 smppt; /* * The HGO is a special case, its routing is configured on the * sink pad. */ source = entity->sources[0]; smppt = (pipe->output->entity.index << VI6_DPR_SMPPT_TGW_SHIFT) | (source->route->output << VI6_DPR_SMPPT_PT_SHIFT); vsp1_dl_body_write(dlb, VI6_DPR_HGO_SMPPT, smppt); return; } else if (entity->type == VSP1_ENTITY_HGT) { u32 smppt; /* * The HGT is a special case, its routing is configured on the * sink pad. */ source = entity->sources[0]; smppt = (pipe->output->entity.index << VI6_DPR_SMPPT_TGW_SHIFT) | (source->route->output << VI6_DPR_SMPPT_PT_SHIFT); vsp1_dl_body_write(dlb, VI6_DPR_HGT_SMPPT, smppt); return; } source = entity; if (source->route->reg == 0) return; route = source->sink->route->inputs[source->sink_pad]; /* * The ILV and BRS share the same data path route. The extra BRSSEL bit * selects between the ILV and BRS. */ if (source->type == VSP1_ENTITY_BRS) route |= VI6_DPR_ROUTE_BRSSEL; vsp1_dl_body_write(dlb, source->route->reg, route); } void vsp1_entity_configure_stream(struct vsp1_entity *entity, struct vsp1_pipeline *pipe, struct vsp1_dl_list *dl, struct vsp1_dl_body *dlb) { if (entity->ops->configure_stream) entity->ops->configure_stream(entity, pipe, dl, dlb); } void vsp1_entity_configure_frame(struct vsp1_entity *entity, struct vsp1_pipeline *pipe, struct vsp1_dl_list *dl, struct vsp1_dl_body *dlb) { if (entity->ops->configure_frame) entity->ops->configure_frame(entity, pipe, dl, dlb); } void vsp1_entity_configure_partition(struct vsp1_entity *entity, struct vsp1_pipeline *pipe, struct vsp1_dl_list *dl, struct vsp1_dl_body *dlb) { if (entity->ops->configure_partition) entity->ops->configure_partition(entity, pipe, dl, dlb); } /* ----------------------------------------------------------------------------- * V4L2 Subdevice Operations */ /** * vsp1_entity_get_pad_config - Get the pad configuration for an entity * @entity: the entity * @sd_state: the TRY state * @which: configuration selector (ACTIVE or TRY) * * When called with which set to V4L2_SUBDEV_FORMAT_ACTIVE the caller must hold * the entity lock to access the returned configuration. * * Return the pad configuration requested by the which argument. The TRY * configuration is passed explicitly to the function through the cfg argument * and simply returned when requested. The ACTIVE configuration comes from the * entity structure. */ struct v4l2_subdev_state * vsp1_entity_get_pad_config(struct vsp1_entity *entity, struct v4l2_subdev_state *sd_state, enum v4l2_subdev_format_whence which) { switch (which) { case V4L2_SUBDEV_FORMAT_ACTIVE: return entity->config; case V4L2_SUBDEV_FORMAT_TRY: default: return sd_state; } } /** * vsp1_entity_get_pad_format - Get a pad format from storage for an entity * @entity: the entity * @sd_state: the state storage * @pad: the pad number * * Return the format stored in the given configuration for an entity's pad. The * configuration can be an ACTIVE or TRY configuration. */ struct v4l2_mbus_framefmt * vsp1_entity_get_pad_format(struct vsp1_entity *entity, struct v4l2_subdev_state *sd_state, unsigned int pad) { return v4l2_subdev_get_try_format(&entity->subdev, sd_state, pad); } /** * vsp1_entity_get_pad_selection - Get a pad selection from storage for entity * @entity: the entity * @sd_state: the state storage * @pad: the pad number * @target: the selection target * * Return the selection rectangle stored in the given configuration for an * entity's pad. The configuration can be an ACTIVE or TRY configuration. The * selection target can be COMPOSE or CROP. */ struct v4l2_rect * vsp1_entity_get_pad_selection(struct vsp1_entity *entity, struct v4l2_subdev_state *sd_state, unsigned int pad, unsigned int target) { switch (target) { case V4L2_SEL_TGT_COMPOSE: return v4l2_subdev_get_try_compose(&entity->subdev, sd_state, pad); case V4L2_SEL_TGT_CROP: return v4l2_subdev_get_try_crop(&entity->subdev, sd_state, pad); default: return NULL; } } /* * vsp1_entity_init_cfg - Initialize formats on all pads * @subdev: V4L2 subdevice * @cfg: V4L2 subdev pad configuration * * Initialize all pad formats with default values in the given pad config. This * function can be used as a handler for the subdev pad::init_cfg operation. */ int vsp1_entity_init_cfg(struct v4l2_subdev *subdev, struct v4l2_subdev_state *sd_state) { unsigned int pad; for (pad = 0; pad < subdev->entity.num_pads - 1; ++pad) { struct v4l2_subdev_format format = { .pad = pad, .which = sd_state ? V4L2_SUBDEV_FORMAT_TRY : V4L2_SUBDEV_FORMAT_ACTIVE, }; v4l2_subdev_call(subdev, pad, set_fmt, sd_state, &format); } return 0; } /* * vsp1_subdev_get_pad_format - Subdev pad get_fmt handler * @subdev: V4L2 subdevice * @cfg: V4L2 subdev pad configuration * @fmt: V4L2 subdev format * * This function implements the subdev get_fmt pad operation. It can be used as * a direct drop-in for the operation handler. */ int vsp1_subdev_get_pad_format(struct v4l2_subdev *subdev, struct v4l2_subdev_state *sd_state, struct v4l2_subdev_format *fmt) { struct vsp1_entity *entity = to_vsp1_entity(subdev); struct v4l2_subdev_state *config; config = vsp1_entity_get_pad_config(entity, sd_state, fmt->which); if (!config) return -EINVAL; mutex_lock(&entity->lock); fmt->format = *vsp1_entity_get_pad_format(entity, config, fmt->pad); mutex_unlock(&entity->lock); return 0; } /* * vsp1_subdev_enum_mbus_code - Subdev pad enum_mbus_code handler * @subdev: V4L2 subdevice * @cfg: V4L2 subdev pad configuration * @code: Media bus code enumeration * @codes: Array of supported media bus codes * @ncodes: Number of supported media bus codes * * This function implements the subdev enum_mbus_code pad operation for entities * that do not support format conversion. It enumerates the given supported * media bus codes on the sink pad and reports a source pad format identical to * the sink pad. */ int vsp1_subdev_enum_mbus_code(struct v4l2_subdev *subdev, struct v4l2_subdev_state *sd_state, struct v4l2_subdev_mbus_code_enum *code, const unsigned int *codes, unsigned int ncodes) { struct vsp1_entity *entity = to_vsp1_entity(subdev); if (code->pad == 0) { if (code->index >= ncodes) return -EINVAL; code->code = codes[code->index]; } else { struct v4l2_subdev_state *config; struct v4l2_mbus_framefmt *format; /* * The entity can't perform format conversion, the sink format * is always identical to the source format. */ if (code->index) return -EINVAL; config = vsp1_entity_get_pad_config(entity, sd_state, code->which); if (!config) return -EINVAL; mutex_lock(&entity->lock); format = vsp1_entity_get_pad_format(entity, config, 0); code->code = format->code; mutex_unlock(&entity->lock); } return 0; } /* * vsp1_subdev_enum_frame_size - Subdev pad enum_frame_size handler * @subdev: V4L2 subdevice * @cfg: V4L2 subdev pad configuration * @fse: Frame size enumeration * @min_width: Minimum image width * @min_height: Minimum image height * @max_width: Maximum image width * @max_height: Maximum image height * * This function implements the subdev enum_frame_size pad operation for * entities that do not support scaling or cropping. It reports the given * minimum and maximum frame width and height on the sink pad, and a fixed * source pad size identical to the sink pad. */ int vsp1_subdev_enum_frame_size(struct v4l2_subdev *subdev, struct v4l2_subdev_state *sd_state, struct v4l2_subdev_frame_size_enum *fse, unsigned int min_width, unsigned int min_height, unsigned int max_width, unsigned int max_height) { struct vsp1_entity *entity = to_vsp1_entity(subdev); struct v4l2_subdev_state *config; struct v4l2_mbus_framefmt *format; int ret = 0; config = vsp1_entity_get_pad_config(entity, sd_state, fse->which); if (!config) return -EINVAL; format = vsp1_entity_get_pad_format(entity, config, fse->pad); mutex_lock(&entity->lock); if (fse->index || fse->code != format->code) { ret = -EINVAL; goto done; } if (fse->pad == 0) { fse->min_width = min_width; fse->max_width = max_width; fse->min_height = min_height; fse->max_height = max_height; } else { /* * The size on the source pad are fixed and always identical to * the size on the sink pad. */ fse->min_width = format->width; fse->max_width = format->width; fse->min_height = format->height; fse->max_height = format->height; } done: mutex_unlock(&entity->lock); return ret; } /* * vsp1_subdev_set_pad_format - Subdev pad set_fmt handler * @subdev: V4L2 subdevice * @cfg: V4L2 subdev pad configuration * @fmt: V4L2 subdev format * @codes: Array of supported media bus codes * @ncodes: Number of supported media bus codes * @min_width: Minimum image width * @min_height: Minimum image height * @max_width: Maximum image width * @max_height: Maximum image height * * This function implements the subdev set_fmt pad operation for entities that * do not support scaling or cropping. It defaults to the first supplied media * bus code if the requested code isn't supported, clamps the size to the * supplied minimum and maximum, and propagates the sink pad format to the * source pad. */ int vsp1_subdev_set_pad_format(struct v4l2_subdev *subdev, struct v4l2_subdev_state *sd_state, struct v4l2_subdev_format *fmt, const unsigned int *codes, unsigned int ncodes, unsigned int min_width, unsigned int min_height, unsigned int max_width, unsigned int max_height) { struct vsp1_entity *entity = to_vsp1_entity(subdev); struct v4l2_subdev_state *config; struct v4l2_mbus_framefmt *format; struct v4l2_rect *selection; unsigned int i; int ret = 0; mutex_lock(&entity->lock); config = vsp1_entity_get_pad_config(entity, sd_state, fmt->which); if (!config) { ret = -EINVAL; goto done; } format = vsp1_entity_get_pad_format(entity, config, fmt->pad); if (fmt->pad == entity->source_pad) { /* The output format can't be modified. */ fmt->format = *format; goto done; } /* * Default to the first media bus code if the requested format is not * supported. */ for (i = 0; i < ncodes; ++i) { if (fmt->format.code == codes[i]) break; } format->code = i < ncodes ? codes[i] : codes[0]; format->width = clamp_t(unsigned int, fmt->format.width, min_width, max_width); format->height = clamp_t(unsigned int, fmt->format.height, min_height, max_height); format->field = V4L2_FIELD_NONE; format->colorspace = V4L2_COLORSPACE_SRGB; fmt->format = *format; /* Propagate the format to the source pad. */ format = vsp1_entity_get_pad_format(entity, config, entity->source_pad); *format = fmt->format; /* Reset the crop and compose rectangles. */ selection = vsp1_entity_get_pad_selection(entity, config, fmt->pad, V4L2_SEL_TGT_CROP); selection->left = 0; selection->top = 0; selection->width = format->width; selection->height = format->height; selection = vsp1_entity_get_pad_selection(entity, config, fmt->pad, V4L2_SEL_TGT_COMPOSE); selection->left = 0; selection->top = 0; selection->width = format->width; selection->height = format->height; done: mutex_unlock(&entity->lock); return ret; } /* ----------------------------------------------------------------------------- * Media Operations */ static inline struct vsp1_entity * media_entity_to_vsp1_entity(struct media_entity *entity) { return container_of(entity, struct vsp1_entity, subdev.entity); } static int vsp1_entity_link_setup_source(const struct media_pad *source_pad, const struct media_pad *sink_pad, u32 flags) { struct vsp1_entity *source; source = media_entity_to_vsp1_entity(source_pad->entity); if (!source->route) return 0; if (flags & MEDIA_LNK_FL_ENABLED) { struct vsp1_entity *sink = media_entity_to_vsp1_entity(sink_pad->entity); /* * Fan-out is limited to one for the normal data path plus * optional HGO and HGT. We ignore the HGO and HGT here. */ if (sink->type != VSP1_ENTITY_HGO && sink->type != VSP1_ENTITY_HGT) { if (source->sink) return -EBUSY; source->sink = sink; source->sink_pad = sink_pad->index; } } else { source->sink = NULL; source->sink_pad = 0; } return 0; } static int vsp1_entity_link_setup_sink(const struct media_pad *source_pad, const struct media_pad *sink_pad, u32 flags) { struct vsp1_entity *sink; struct vsp1_entity *source; sink = media_entity_to_vsp1_entity(sink_pad->entity); source = media_entity_to_vsp1_entity(source_pad->entity); if (flags & MEDIA_LNK_FL_ENABLED) { /* Fan-in is limited to one. */ if (sink->sources[sink_pad->index]) return -EBUSY; sink->sources[sink_pad->index] = source; } else { sink->sources[sink_pad->index] = NULL; } return 0; } int vsp1_entity_link_setup(struct media_entity *entity, const struct media_pad *local, const struct media_pad *remote, u32 flags) { if (local->flags & MEDIA_PAD_FL_SOURCE) return vsp1_entity_link_setup_source(local, remote, flags); else return vsp1_entity_link_setup_sink(remote, local, flags); } /** * vsp1_entity_remote_pad - Find the pad at the remote end of a link * @pad: Pad at the local end of the link * * Search for a remote pad connected to the given pad by iterating over all * links originating or terminating at that pad until an enabled link is found. * * Our link setup implementation guarantees that the output fan-out will not be * higher than one for the data pipelines, except for the links to the HGO and * HGT that can be enabled in addition to a regular data link. When traversing * outgoing links this function ignores HGO and HGT entities and should thus be * used in place of the generic media_pad_remote_pad_first() function to * traverse data pipelines. * * Return a pointer to the pad at the remote end of the first found enabled * link, or NULL if no enabled link has been found. */ struct media_pad *vsp1_entity_remote_pad(struct media_pad *pad) { struct media_link *link; list_for_each_entry(link, &pad->entity->links, list) { struct vsp1_entity *entity; if (!(link->flags & MEDIA_LNK_FL_ENABLED)) continue; /* If we're the sink the source will never be an HGO or HGT. */ if (link->sink == pad) return link->source; if (link->source != pad) continue; /* If the sink isn't a subdevice it can't be an HGO or HGT. */ if (!is_media_entity_v4l2_subdev(link->sink->entity)) return link->sink; entity = media_entity_to_vsp1_entity(link->sink->entity); if (entity->type != VSP1_ENTITY_HGO && entity->type != VSP1_ENTITY_HGT) return link->sink; } return NULL; } /* ----------------------------------------------------------------------------- * Initialization */ #define VSP1_ENTITY_ROUTE(ent) \ { VSP1_ENTITY_##ent, 0, VI6_DPR_##ent##_ROUTE, \ { VI6_DPR_NODE_##ent }, VI6_DPR_NODE_##ent } #define VSP1_ENTITY_ROUTE_RPF(idx) \ { VSP1_ENTITY_RPF, idx, VI6_DPR_RPF_ROUTE(idx), \ { 0, }, VI6_DPR_NODE_RPF(idx) } #define VSP1_ENTITY_ROUTE_UDS(idx) \ { VSP1_ENTITY_UDS, idx, VI6_DPR_UDS_ROUTE(idx), \ { VI6_DPR_NODE_UDS(idx) }, VI6_DPR_NODE_UDS(idx) } #define VSP1_ENTITY_ROUTE_UIF(idx) \ { VSP1_ENTITY_UIF, idx, VI6_DPR_UIF_ROUTE(idx), \ { VI6_DPR_NODE_UIF(idx) }, VI6_DPR_NODE_UIF(idx) } #define VSP1_ENTITY_ROUTE_WPF(idx) \ { VSP1_ENTITY_WPF, idx, 0, \ { VI6_DPR_NODE_WPF(idx) }, VI6_DPR_NODE_WPF(idx) } static const struct vsp1_route vsp1_routes[] = { { VSP1_ENTITY_BRS, 0, VI6_DPR_ILV_BRS_ROUTE, { VI6_DPR_NODE_BRS_IN(0), VI6_DPR_NODE_BRS_IN(1) }, 0 }, { VSP1_ENTITY_BRU, 0, VI6_DPR_BRU_ROUTE, { VI6_DPR_NODE_BRU_IN(0), VI6_DPR_NODE_BRU_IN(1), VI6_DPR_NODE_BRU_IN(2), VI6_DPR_NODE_BRU_IN(3), VI6_DPR_NODE_BRU_IN(4) }, VI6_DPR_NODE_BRU_OUT }, VSP1_ENTITY_ROUTE(CLU), { VSP1_ENTITY_HGO, 0, 0, { 0, }, 0 }, { VSP1_ENTITY_HGT, 0, 0, { 0, }, 0 }, VSP1_ENTITY_ROUTE(HSI), VSP1_ENTITY_ROUTE(HST), { VSP1_ENTITY_LIF, 0, 0, { 0, }, 0 }, { VSP1_ENTITY_LIF, 1, 0, { 0, }, 0 }, VSP1_ENTITY_ROUTE(LUT), VSP1_ENTITY_ROUTE_RPF(0), VSP1_ENTITY_ROUTE_RPF(1), VSP1_ENTITY_ROUTE_RPF(2), VSP1_ENTITY_ROUTE_RPF(3), VSP1_ENTITY_ROUTE_RPF(4), VSP1_ENTITY_ROUTE(SRU), VSP1_ENTITY_ROUTE_UDS(0), VSP1_ENTITY_ROUTE_UDS(1), VSP1_ENTITY_ROUTE_UDS(2), VSP1_ENTITY_ROUTE_UIF(0), /* Named UIF4 in the documentation */ VSP1_ENTITY_ROUTE_UIF(1), /* Named UIF5 in the documentation */ VSP1_ENTITY_ROUTE_WPF(0), VSP1_ENTITY_ROUTE_WPF(1), VSP1_ENTITY_ROUTE_WPF(2), VSP1_ENTITY_ROUTE_WPF(3), }; int vsp1_entity_init(struct vsp1_device *vsp1, struct vsp1_entity *entity, const char *name, unsigned int num_pads, const struct v4l2_subdev_ops *ops, u32 function) { static struct lock_class_key key; struct v4l2_subdev *subdev; unsigned int i; int ret; for (i = 0; i < ARRAY_SIZE(vsp1_routes); ++i) { if (vsp1_routes[i].type == entity->type && vsp1_routes[i].index == entity->index) { entity->route = &vsp1_routes[i]; break; } } if (i == ARRAY_SIZE(vsp1_routes)) return -EINVAL; mutex_init(&entity->lock); entity->vsp1 = vsp1; entity->source_pad = num_pads - 1; /* Allocate and initialize pads. */ entity->pads = devm_kcalloc(vsp1->dev, num_pads, sizeof(*entity->pads), GFP_KERNEL); if (entity->pads == NULL) return -ENOMEM; for (i = 0; i < num_pads - 1; ++i) entity->pads[i].flags = MEDIA_PAD_FL_SINK; entity->sources = devm_kcalloc(vsp1->dev, max(num_pads - 1, 1U), sizeof(*entity->sources), GFP_KERNEL); if (entity->sources == NULL) return -ENOMEM; /* Single-pad entities only have a sink. */ entity->pads[num_pads - 1].flags = num_pads > 1 ? MEDIA_PAD_FL_SOURCE : MEDIA_PAD_FL_SINK; /* Initialize the media entity. */ ret = media_entity_pads_init(&entity->subdev.entity, num_pads, entity->pads); if (ret < 0) return ret; /* Initialize the V4L2 subdev. */ subdev = &entity->subdev; v4l2_subdev_init(subdev, ops); subdev->entity.function = function; subdev->entity.ops = &vsp1->media_ops; subdev->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE; snprintf(subdev->name, sizeof(subdev->name), "%s %s", dev_name(vsp1->dev), name); vsp1_entity_init_cfg(subdev, NULL); /* * Allocate the pad configuration to store formats and selection * rectangles. */ /* * FIXME: Drop this call, drivers are not supposed to use * __v4l2_subdev_state_alloc(). */ entity->config = __v4l2_subdev_state_alloc(&entity->subdev, "vsp1:config->lock", &key); if (IS_ERR(entity->config)) { media_entity_cleanup(&entity->subdev.entity); return PTR_ERR(entity->config); } return 0; } void vsp1_entity_destroy(struct vsp1_entity *entity) { if (entity->ops && entity->ops->destroy) entity->ops->destroy(entity); if (entity->subdev.ctrl_handler) v4l2_ctrl_handler_free(entity->subdev.ctrl_handler); __v4l2_subdev_state_free(entity->config); media_entity_cleanup(&entity->subdev.entity); }
linux-master
drivers/media/platform/renesas/vsp1/vsp1_entity.c
// SPDX-License-Identifier: GPL-2.0+ /* * vsp1_hgt.c -- R-Car VSP1 Histogram Generator 2D * * Copyright (C) 2016 Renesas Electronics Corporation * * Contact: Niklas Söderlund ([email protected]) */ #include <linux/device.h> #include <linux/gfp.h> #include <media/v4l2-subdev.h> #include <media/videobuf2-vmalloc.h> #include "vsp1.h" #include "vsp1_dl.h" #include "vsp1_hgt.h" #define HGT_DATA_SIZE ((2 + 6 * 32) * 4) /* ----------------------------------------------------------------------------- * Device Access */ static inline u32 vsp1_hgt_read(struct vsp1_hgt *hgt, u32 reg) { return vsp1_read(hgt->histo.entity.vsp1, reg); } static inline void vsp1_hgt_write(struct vsp1_hgt *hgt, struct vsp1_dl_body *dlb, u32 reg, u32 data) { vsp1_dl_body_write(dlb, reg, data); } /* ----------------------------------------------------------------------------- * Frame End Handler */ void vsp1_hgt_frame_end(struct vsp1_entity *entity) { struct vsp1_hgt *hgt = to_hgt(&entity->subdev); struct vsp1_histogram_buffer *buf; unsigned int m; unsigned int n; u32 *data; buf = vsp1_histogram_buffer_get(&hgt->histo); if (!buf) return; data = buf->addr; *data++ = vsp1_hgt_read(hgt, VI6_HGT_MAXMIN); *data++ = vsp1_hgt_read(hgt, VI6_HGT_SUM); for (m = 0; m < 6; ++m) for (n = 0; n < 32; ++n) *data++ = vsp1_hgt_read(hgt, VI6_HGT_HISTO(m, n)); vsp1_histogram_buffer_complete(&hgt->histo, buf, HGT_DATA_SIZE); } /* ----------------------------------------------------------------------------- * Controls */ #define V4L2_CID_VSP1_HGT_HUE_AREAS (V4L2_CID_USER_BASE | 0x1001) static int hgt_hue_areas_try_ctrl(struct v4l2_ctrl *ctrl) { const u8 *values = ctrl->p_new.p_u8; unsigned int i; /* * The hardware has constraints on the hue area boundaries beyond the * control min, max and step. The values must match one of the following * expressions. * * 0L <= 0U <= 1L <= 1U <= 2L <= 2U <= 3L <= 3U <= 4L <= 4U <= 5L <= 5U * 0U <= 1L <= 1U <= 2L <= 2U <= 3L <= 3U <= 4L <= 4U <= 5L <= 5U <= 0L * * Start by verifying the common part... */ for (i = 1; i < (HGT_NUM_HUE_AREAS * 2) - 1; ++i) { if (values[i] > values[i+1]) return -EINVAL; } /* ... and handle 0L separately. */ if (values[0] > values[1] && values[11] > values[0]) return -EINVAL; return 0; } static int hgt_hue_areas_s_ctrl(struct v4l2_ctrl *ctrl) { struct vsp1_hgt *hgt = container_of(ctrl->handler, struct vsp1_hgt, ctrls); memcpy(hgt->hue_areas, ctrl->p_new.p_u8, sizeof(hgt->hue_areas)); return 0; } static const struct v4l2_ctrl_ops hgt_hue_areas_ctrl_ops = { .try_ctrl = hgt_hue_areas_try_ctrl, .s_ctrl = hgt_hue_areas_s_ctrl, }; static const struct v4l2_ctrl_config hgt_hue_areas = { .ops = &hgt_hue_areas_ctrl_ops, .id = V4L2_CID_VSP1_HGT_HUE_AREAS, .name = "Boundary Values for Hue Area", .type = V4L2_CTRL_TYPE_U8, .min = 0, .max = 255, .def = 0, .step = 1, .dims = { 12 }, }; /* ----------------------------------------------------------------------------- * VSP1 Entity Operations */ static void hgt_configure_stream(struct vsp1_entity *entity, struct vsp1_pipeline *pipe, struct vsp1_dl_list *dl, struct vsp1_dl_body *dlb) { struct vsp1_hgt *hgt = to_hgt(&entity->subdev); struct v4l2_rect *compose; struct v4l2_rect *crop; unsigned int hratio; unsigned int vratio; u8 lower; u8 upper; unsigned int i; crop = vsp1_entity_get_pad_selection(entity, entity->config, HISTO_PAD_SINK, V4L2_SEL_TGT_CROP); compose = vsp1_entity_get_pad_selection(entity, entity->config, HISTO_PAD_SINK, V4L2_SEL_TGT_COMPOSE); vsp1_hgt_write(hgt, dlb, VI6_HGT_REGRST, VI6_HGT_REGRST_RCLEA); vsp1_hgt_write(hgt, dlb, VI6_HGT_OFFSET, (crop->left << VI6_HGT_OFFSET_HOFFSET_SHIFT) | (crop->top << VI6_HGT_OFFSET_VOFFSET_SHIFT)); vsp1_hgt_write(hgt, dlb, VI6_HGT_SIZE, (crop->width << VI6_HGT_SIZE_HSIZE_SHIFT) | (crop->height << VI6_HGT_SIZE_VSIZE_SHIFT)); mutex_lock(hgt->ctrls.lock); for (i = 0; i < HGT_NUM_HUE_AREAS; ++i) { lower = hgt->hue_areas[i*2 + 0]; upper = hgt->hue_areas[i*2 + 1]; vsp1_hgt_write(hgt, dlb, VI6_HGT_HUE_AREA(i), (lower << VI6_HGT_HUE_AREA_LOWER_SHIFT) | (upper << VI6_HGT_HUE_AREA_UPPER_SHIFT)); } mutex_unlock(hgt->ctrls.lock); hratio = crop->width * 2 / compose->width / 3; vratio = crop->height * 2 / compose->height / 3; vsp1_hgt_write(hgt, dlb, VI6_HGT_MODE, (hratio << VI6_HGT_MODE_HRATIO_SHIFT) | (vratio << VI6_HGT_MODE_VRATIO_SHIFT)); } static const struct vsp1_entity_operations hgt_entity_ops = { .configure_stream = hgt_configure_stream, .destroy = vsp1_histogram_destroy, }; /* ----------------------------------------------------------------------------- * Initialization and Cleanup */ static const unsigned int hgt_mbus_formats[] = { MEDIA_BUS_FMT_AHSV8888_1X32, }; struct vsp1_hgt *vsp1_hgt_create(struct vsp1_device *vsp1) { struct vsp1_hgt *hgt; int ret; hgt = devm_kzalloc(vsp1->dev, sizeof(*hgt), GFP_KERNEL); if (hgt == NULL) return ERR_PTR(-ENOMEM); /* Initialize the control handler. */ v4l2_ctrl_handler_init(&hgt->ctrls, 1); v4l2_ctrl_new_custom(&hgt->ctrls, &hgt_hue_areas, NULL); hgt->histo.entity.subdev.ctrl_handler = &hgt->ctrls; /* Initialize the video device and queue for statistics data. */ ret = vsp1_histogram_init(vsp1, &hgt->histo, VSP1_ENTITY_HGT, "hgt", &hgt_entity_ops, hgt_mbus_formats, ARRAY_SIZE(hgt_mbus_formats), HGT_DATA_SIZE, V4L2_META_FMT_VSP1_HGT); if (ret < 0) { vsp1_entity_destroy(&hgt->histo.entity); return ERR_PTR(ret); } v4l2_ctrl_handler_setup(&hgt->ctrls); return hgt; }
linux-master
drivers/media/platform/renesas/vsp1/vsp1_hgt.c
// SPDX-License-Identifier: GPL-2.0+ /* * vsp1_brx.c -- R-Car VSP1 Blend ROP Unit (BRU and BRS) * * Copyright (C) 2013 Renesas Corporation * * Contact: Laurent Pinchart ([email protected]) */ #include <linux/device.h> #include <linux/gfp.h> #include <media/v4l2-subdev.h> #include "vsp1.h" #include "vsp1_brx.h" #include "vsp1_dl.h" #include "vsp1_pipe.h" #include "vsp1_rwpf.h" #include "vsp1_video.h" #define BRX_MIN_SIZE 1U #define BRX_MAX_SIZE 8190U /* ----------------------------------------------------------------------------- * Device Access */ static inline void vsp1_brx_write(struct vsp1_brx *brx, struct vsp1_dl_body *dlb, u32 reg, u32 data) { vsp1_dl_body_write(dlb, brx->base + reg, data); } /* ----------------------------------------------------------------------------- * Controls */ static int brx_s_ctrl(struct v4l2_ctrl *ctrl) { struct vsp1_brx *brx = container_of(ctrl->handler, struct vsp1_brx, ctrls); switch (ctrl->id) { case V4L2_CID_BG_COLOR: brx->bgcolor = ctrl->val; break; } return 0; } static const struct v4l2_ctrl_ops brx_ctrl_ops = { .s_ctrl = brx_s_ctrl, }; /* ----------------------------------------------------------------------------- * V4L2 Subdevice Operations */ /* * The BRx can't perform format conversion, all sink and source formats must be * identical. We pick the format on the first sink pad (pad 0) and propagate it * to all other pads. */ static int brx_enum_mbus_code(struct v4l2_subdev *subdev, struct v4l2_subdev_state *sd_state, struct v4l2_subdev_mbus_code_enum *code) { static const unsigned int codes[] = { MEDIA_BUS_FMT_ARGB8888_1X32, MEDIA_BUS_FMT_AYUV8_1X32, }; return vsp1_subdev_enum_mbus_code(subdev, sd_state, code, codes, ARRAY_SIZE(codes)); } static int brx_enum_frame_size(struct v4l2_subdev *subdev, struct v4l2_subdev_state *sd_state, struct v4l2_subdev_frame_size_enum *fse) { if (fse->index) return -EINVAL; if (fse->code != MEDIA_BUS_FMT_ARGB8888_1X32 && fse->code != MEDIA_BUS_FMT_AYUV8_1X32) return -EINVAL; fse->min_width = BRX_MIN_SIZE; fse->max_width = BRX_MAX_SIZE; fse->min_height = BRX_MIN_SIZE; fse->max_height = BRX_MAX_SIZE; return 0; } static struct v4l2_rect *brx_get_compose(struct vsp1_brx *brx, struct v4l2_subdev_state *sd_state, unsigned int pad) { return v4l2_subdev_get_try_compose(&brx->entity.subdev, sd_state, pad); } static void brx_try_format(struct vsp1_brx *brx, struct v4l2_subdev_state *sd_state, unsigned int pad, struct v4l2_mbus_framefmt *fmt) { struct v4l2_mbus_framefmt *format; switch (pad) { case BRX_PAD_SINK(0): /* Default to YUV if the requested format is not supported. */ if (fmt->code != MEDIA_BUS_FMT_ARGB8888_1X32 && fmt->code != MEDIA_BUS_FMT_AYUV8_1X32) fmt->code = MEDIA_BUS_FMT_AYUV8_1X32; break; default: /* The BRx can't perform format conversion. */ format = vsp1_entity_get_pad_format(&brx->entity, sd_state, BRX_PAD_SINK(0)); fmt->code = format->code; break; } fmt->width = clamp(fmt->width, BRX_MIN_SIZE, BRX_MAX_SIZE); fmt->height = clamp(fmt->height, BRX_MIN_SIZE, BRX_MAX_SIZE); fmt->field = V4L2_FIELD_NONE; fmt->colorspace = V4L2_COLORSPACE_SRGB; } static int brx_set_format(struct v4l2_subdev *subdev, struct v4l2_subdev_state *sd_state, struct v4l2_subdev_format *fmt) { struct vsp1_brx *brx = to_brx(subdev); struct v4l2_subdev_state *config; struct v4l2_mbus_framefmt *format; int ret = 0; mutex_lock(&brx->entity.lock); config = vsp1_entity_get_pad_config(&brx->entity, sd_state, fmt->which); if (!config) { ret = -EINVAL; goto done; } brx_try_format(brx, config, fmt->pad, &fmt->format); format = vsp1_entity_get_pad_format(&brx->entity, config, fmt->pad); *format = fmt->format; /* Reset the compose rectangle. */ if (fmt->pad != brx->entity.source_pad) { struct v4l2_rect *compose; compose = brx_get_compose(brx, config, fmt->pad); compose->left = 0; compose->top = 0; compose->width = format->width; compose->height = format->height; } /* Propagate the format code to all pads. */ if (fmt->pad == BRX_PAD_SINK(0)) { unsigned int i; for (i = 0; i <= brx->entity.source_pad; ++i) { format = vsp1_entity_get_pad_format(&brx->entity, config, i); format->code = fmt->format.code; } } done: mutex_unlock(&brx->entity.lock); return ret; } static int brx_get_selection(struct v4l2_subdev *subdev, struct v4l2_subdev_state *sd_state, struct v4l2_subdev_selection *sel) { struct vsp1_brx *brx = to_brx(subdev); struct v4l2_subdev_state *config; if (sel->pad == brx->entity.source_pad) return -EINVAL; switch (sel->target) { case V4L2_SEL_TGT_COMPOSE_BOUNDS: sel->r.left = 0; sel->r.top = 0; sel->r.width = BRX_MAX_SIZE; sel->r.height = BRX_MAX_SIZE; return 0; case V4L2_SEL_TGT_COMPOSE: config = vsp1_entity_get_pad_config(&brx->entity, sd_state, sel->which); if (!config) return -EINVAL; mutex_lock(&brx->entity.lock); sel->r = *brx_get_compose(brx, config, sel->pad); mutex_unlock(&brx->entity.lock); return 0; default: return -EINVAL; } } static int brx_set_selection(struct v4l2_subdev *subdev, struct v4l2_subdev_state *sd_state, struct v4l2_subdev_selection *sel) { struct vsp1_brx *brx = to_brx(subdev); struct v4l2_subdev_state *config; struct v4l2_mbus_framefmt *format; struct v4l2_rect *compose; int ret = 0; if (sel->pad == brx->entity.source_pad) return -EINVAL; if (sel->target != V4L2_SEL_TGT_COMPOSE) return -EINVAL; mutex_lock(&brx->entity.lock); config = vsp1_entity_get_pad_config(&brx->entity, sd_state, sel->which); if (!config) { ret = -EINVAL; goto done; } /* * The compose rectangle top left corner must be inside the output * frame. */ format = vsp1_entity_get_pad_format(&brx->entity, config, brx->entity.source_pad); sel->r.left = clamp_t(unsigned int, sel->r.left, 0, format->width - 1); sel->r.top = clamp_t(unsigned int, sel->r.top, 0, format->height - 1); /* * Scaling isn't supported, the compose rectangle size must be identical * to the sink format size. */ format = vsp1_entity_get_pad_format(&brx->entity, config, sel->pad); sel->r.width = format->width; sel->r.height = format->height; compose = brx_get_compose(brx, config, sel->pad); *compose = sel->r; done: mutex_unlock(&brx->entity.lock); return ret; } static const struct v4l2_subdev_pad_ops brx_pad_ops = { .init_cfg = vsp1_entity_init_cfg, .enum_mbus_code = brx_enum_mbus_code, .enum_frame_size = brx_enum_frame_size, .get_fmt = vsp1_subdev_get_pad_format, .set_fmt = brx_set_format, .get_selection = brx_get_selection, .set_selection = brx_set_selection, }; static const struct v4l2_subdev_ops brx_ops = { .pad = &brx_pad_ops, }; /* ----------------------------------------------------------------------------- * VSP1 Entity Operations */ static void brx_configure_stream(struct vsp1_entity *entity, struct vsp1_pipeline *pipe, struct vsp1_dl_list *dl, struct vsp1_dl_body *dlb) { struct vsp1_brx *brx = to_brx(&entity->subdev); struct v4l2_mbus_framefmt *format; unsigned int flags; unsigned int i; format = vsp1_entity_get_pad_format(&brx->entity, brx->entity.config, brx->entity.source_pad); /* * The hardware is extremely flexible but we have no userspace API to * expose all the parameters, nor is it clear whether we would have use * cases for all the supported modes. Let's just hardcode the parameters * to sane default values for now. */ /* * Disable dithering and enable color data normalization unless the * format at the pipeline output is premultiplied. */ flags = pipe->output ? pipe->output->format.flags : 0; vsp1_brx_write(brx, dlb, VI6_BRU_INCTRL, flags & V4L2_PIX_FMT_FLAG_PREMUL_ALPHA ? 0 : VI6_BRU_INCTRL_NRM); /* * Set the background position to cover the whole output image and * configure its color. */ vsp1_brx_write(brx, dlb, VI6_BRU_VIRRPF_SIZE, (format->width << VI6_BRU_VIRRPF_SIZE_HSIZE_SHIFT) | (format->height << VI6_BRU_VIRRPF_SIZE_VSIZE_SHIFT)); vsp1_brx_write(brx, dlb, VI6_BRU_VIRRPF_LOC, 0); vsp1_brx_write(brx, dlb, VI6_BRU_VIRRPF_COL, brx->bgcolor | (0xff << VI6_BRU_VIRRPF_COL_A_SHIFT)); /* * Route BRU input 1 as SRC input to the ROP unit and configure the ROP * unit with a NOP operation to make BRU input 1 available as the * Blend/ROP unit B SRC input. Only needed for BRU, the BRS has no ROP * unit. */ if (entity->type == VSP1_ENTITY_BRU) vsp1_brx_write(brx, dlb, VI6_BRU_ROP, VI6_BRU_ROP_DSTSEL_BRUIN(1) | VI6_BRU_ROP_CROP(VI6_ROP_NOP) | VI6_BRU_ROP_AROP(VI6_ROP_NOP)); for (i = 0; i < brx->entity.source_pad; ++i) { bool premultiplied = false; u32 ctrl = 0; /* * Configure all Blend/ROP units corresponding to an enabled BRx * input for alpha blending. Blend/ROP units corresponding to * disabled BRx inputs are used in ROP NOP mode to ignore the * SRC input. */ if (brx->inputs[i].rpf) { ctrl |= VI6_BRU_CTRL_RBC; premultiplied = brx->inputs[i].rpf->format.flags & V4L2_PIX_FMT_FLAG_PREMUL_ALPHA; } else { ctrl |= VI6_BRU_CTRL_CROP(VI6_ROP_NOP) | VI6_BRU_CTRL_AROP(VI6_ROP_NOP); } /* * Select the virtual RPF as the Blend/ROP unit A DST input to * serve as a background color. */ if (i == 0) ctrl |= VI6_BRU_CTRL_DSTSEL_VRPF; /* * Route inputs 0 to 3 as SRC inputs to Blend/ROP units A to D * in that order. In the BRU the Blend/ROP unit B SRC is * hardwired to the ROP unit output, the corresponding register * bits must be set to 0. The BRS has no ROP unit and doesn't * need any special processing. */ if (!(entity->type == VSP1_ENTITY_BRU && i == 1)) ctrl |= VI6_BRU_CTRL_SRCSEL_BRUIN(i); vsp1_brx_write(brx, dlb, VI6_BRU_CTRL(i), ctrl); /* * Hardcode the blending formula to * * DSTc = DSTc * (1 - SRCa) + SRCc * SRCa * DSTa = DSTa * (1 - SRCa) + SRCa * * when the SRC input isn't premultiplied, and to * * DSTc = DSTc * (1 - SRCa) + SRCc * DSTa = DSTa * (1 - SRCa) + SRCa * * otherwise. */ vsp1_brx_write(brx, dlb, VI6_BRU_BLD(i), VI6_BRU_BLD_CCMDX_255_SRC_A | (premultiplied ? VI6_BRU_BLD_CCMDY_COEFY : VI6_BRU_BLD_CCMDY_SRC_A) | VI6_BRU_BLD_ACMDX_255_SRC_A | VI6_BRU_BLD_ACMDY_COEFY | (0xff << VI6_BRU_BLD_COEFY_SHIFT)); } } static const struct vsp1_entity_operations brx_entity_ops = { .configure_stream = brx_configure_stream, }; /* ----------------------------------------------------------------------------- * Initialization and Cleanup */ struct vsp1_brx *vsp1_brx_create(struct vsp1_device *vsp1, enum vsp1_entity_type type) { struct vsp1_brx *brx; unsigned int num_pads; const char *name; int ret; brx = devm_kzalloc(vsp1->dev, sizeof(*brx), GFP_KERNEL); if (brx == NULL) return ERR_PTR(-ENOMEM); brx->base = type == VSP1_ENTITY_BRU ? VI6_BRU_BASE : VI6_BRS_BASE; brx->entity.ops = &brx_entity_ops; brx->entity.type = type; if (type == VSP1_ENTITY_BRU) { num_pads = vsp1->info->num_bru_inputs + 1; name = "bru"; } else { num_pads = 3; name = "brs"; } ret = vsp1_entity_init(vsp1, &brx->entity, name, num_pads, &brx_ops, MEDIA_ENT_F_PROC_VIDEO_COMPOSER); if (ret < 0) return ERR_PTR(ret); /* Initialize the control handler. */ v4l2_ctrl_handler_init(&brx->ctrls, 1); v4l2_ctrl_new_std(&brx->ctrls, &brx_ctrl_ops, V4L2_CID_BG_COLOR, 0, 0xffffff, 1, 0); brx->bgcolor = 0; brx->entity.subdev.ctrl_handler = &brx->ctrls; if (brx->ctrls.error) { dev_err(vsp1->dev, "%s: failed to initialize controls\n", name); ret = brx->ctrls.error; vsp1_entity_destroy(&brx->entity); return ERR_PTR(ret); } return brx; }
linux-master
drivers/media/platform/renesas/vsp1/vsp1_brx.c
// SPDX-License-Identifier: GPL-2.0+ /* * vsp1_pipe.c -- R-Car VSP1 Pipeline * * Copyright (C) 2013-2015 Renesas Electronics Corporation * * Contact: Laurent Pinchart ([email protected]) */ #include <linux/delay.h> #include <linux/list.h> #include <linux/sched.h> #include <linux/wait.h> #include <media/media-entity.h> #include <media/v4l2-subdev.h> #include "vsp1.h" #include "vsp1_brx.h" #include "vsp1_dl.h" #include "vsp1_entity.h" #include "vsp1_hgo.h" #include "vsp1_hgt.h" #include "vsp1_pipe.h" #include "vsp1_rwpf.h" #include "vsp1_uds.h" /* ----------------------------------------------------------------------------- * Helper Functions */ static const struct vsp1_format_info vsp1_video_formats[] = { { V4L2_PIX_FMT_RGB332, MEDIA_BUS_FMT_ARGB8888_1X32, VI6_FMT_RGB_332, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS | VI6_RPF_DSWAP_P_WDS | VI6_RPF_DSWAP_P_BTS, 1, { 8, 0, 0 }, false, false, 1, 1, false }, { V4L2_PIX_FMT_ARGB444, MEDIA_BUS_FMT_ARGB8888_1X32, VI6_FMT_ARGB_4444, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS | VI6_RPF_DSWAP_P_WDS, 1, { 16, 0, 0 }, false, false, 1, 1, true }, { V4L2_PIX_FMT_XRGB444, MEDIA_BUS_FMT_ARGB8888_1X32, VI6_FMT_XRGB_4444, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS | VI6_RPF_DSWAP_P_WDS, 1, { 16, 0, 0 }, false, false, 1, 1, false }, { V4L2_PIX_FMT_RGBA444, MEDIA_BUS_FMT_ARGB8888_1X32, VI6_FMT_RGBA_4444, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS | VI6_RPF_DSWAP_P_WDS, 1, { 16, 0, 0 }, false, false, 1, 1, true }, { V4L2_PIX_FMT_RGBX444, MEDIA_BUS_FMT_ARGB8888_1X32, VI6_FMT_RGBX_4444, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS | VI6_RPF_DSWAP_P_WDS, 1, { 16, 0, 0 }, false, false, 1, 1, false }, { V4L2_PIX_FMT_ABGR444, MEDIA_BUS_FMT_ARGB8888_1X32, VI6_FMT_ABGR_4444, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS | VI6_RPF_DSWAP_P_WDS, 1, { 16, 0, 0 }, false, false, 1, 1, true }, { V4L2_PIX_FMT_XBGR444, MEDIA_BUS_FMT_ARGB8888_1X32, VI6_FMT_ABGR_4444, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS | VI6_RPF_DSWAP_P_WDS, 1, { 16, 0, 0 }, false, false, 1, 1, false }, { V4L2_PIX_FMT_BGRA444, MEDIA_BUS_FMT_ARGB8888_1X32, VI6_FMT_BGRA_4444, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS | VI6_RPF_DSWAP_P_WDS, 1, { 16, 0, 0 }, false, false, 1, 1, true }, { V4L2_PIX_FMT_BGRX444, MEDIA_BUS_FMT_ARGB8888_1X32, VI6_FMT_BGRA_4444, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS | VI6_RPF_DSWAP_P_WDS, 1, { 16, 0, 0 }, false, false, 1, 1, false }, { V4L2_PIX_FMT_ARGB555, MEDIA_BUS_FMT_ARGB8888_1X32, VI6_FMT_ARGB_1555, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS | VI6_RPF_DSWAP_P_WDS, 1, { 16, 0, 0 }, false, false, 1, 1, true }, { V4L2_PIX_FMT_XRGB555, MEDIA_BUS_FMT_ARGB8888_1X32, VI6_FMT_XRGB_1555, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS | VI6_RPF_DSWAP_P_WDS, 1, { 16, 0, 0 }, false, false, 1, 1, false }, { V4L2_PIX_FMT_RGBA555, MEDIA_BUS_FMT_ARGB8888_1X32, VI6_FMT_RGBA_5551, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS | VI6_RPF_DSWAP_P_WDS, 1, { 16, 0, 0 }, false, false, 1, 1, true }, { V4L2_PIX_FMT_RGBX555, MEDIA_BUS_FMT_ARGB8888_1X32, VI6_FMT_RGBX_5551, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS | VI6_RPF_DSWAP_P_WDS, 1, { 16, 0, 0 }, false, false, 1, 1, false }, { V4L2_PIX_FMT_ABGR555, MEDIA_BUS_FMT_ARGB8888_1X32, VI6_FMT_ABGR_1555, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS | VI6_RPF_DSWAP_P_WDS, 1, { 16, 0, 0 }, false, false, 1, 1, true }, { V4L2_PIX_FMT_XBGR555, MEDIA_BUS_FMT_ARGB8888_1X32, VI6_FMT_ABGR_1555, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS | VI6_RPF_DSWAP_P_WDS, 1, { 16, 0, 0 }, false, false, 1, 1, false }, { V4L2_PIX_FMT_BGRA555, MEDIA_BUS_FMT_ARGB8888_1X32, VI6_FMT_BGRA_5551, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS | VI6_RPF_DSWAP_P_WDS, 1, { 16, 0, 0 }, false, false, 1, 1, true }, { V4L2_PIX_FMT_BGRX555, MEDIA_BUS_FMT_ARGB8888_1X32, VI6_FMT_BGRA_5551, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS | VI6_RPF_DSWAP_P_WDS, 1, { 16, 0, 0 }, false, false, 1, 1, false }, { V4L2_PIX_FMT_RGB565, MEDIA_BUS_FMT_ARGB8888_1X32, VI6_FMT_RGB_565, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS | VI6_RPF_DSWAP_P_WDS, 1, { 16, 0, 0 }, false, false, 1, 1, false }, { V4L2_PIX_FMT_BGR24, MEDIA_BUS_FMT_ARGB8888_1X32, VI6_FMT_BGR_888, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS | VI6_RPF_DSWAP_P_WDS | VI6_RPF_DSWAP_P_BTS, 1, { 24, 0, 0 }, false, false, 1, 1, false }, { V4L2_PIX_FMT_RGB24, MEDIA_BUS_FMT_ARGB8888_1X32, VI6_FMT_RGB_888, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS | VI6_RPF_DSWAP_P_WDS | VI6_RPF_DSWAP_P_BTS, 1, { 24, 0, 0 }, false, false, 1, 1, false }, { V4L2_PIX_FMT_ABGR32, MEDIA_BUS_FMT_ARGB8888_1X32, VI6_FMT_ARGB_8888, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS, 1, { 32, 0, 0 }, false, false, 1, 1, true }, { V4L2_PIX_FMT_XBGR32, MEDIA_BUS_FMT_ARGB8888_1X32, VI6_FMT_ARGB_8888, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS, 1, { 32, 0, 0 }, false, false, 1, 1, false }, { V4L2_PIX_FMT_BGRA32, MEDIA_BUS_FMT_ARGB8888_1X32, VI6_FMT_RGBA_8888, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS, 1, { 32, 0, 0 }, false, false, 1, 1, true }, { V4L2_PIX_FMT_BGRX32, MEDIA_BUS_FMT_ARGB8888_1X32, VI6_FMT_RGBA_8888, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS, 1, { 32, 0, 0 }, false, false, 1, 1, false }, { V4L2_PIX_FMT_RGBA32, MEDIA_BUS_FMT_ARGB8888_1X32, VI6_FMT_RGBA_8888, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS | VI6_RPF_DSWAP_P_WDS | VI6_RPF_DSWAP_P_BTS, 1, { 32, 0, 0 }, false, false, 1, 1, true }, { V4L2_PIX_FMT_RGBX32, MEDIA_BUS_FMT_ARGB8888_1X32, VI6_FMT_RGBA_8888, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS | VI6_RPF_DSWAP_P_WDS | VI6_RPF_DSWAP_P_BTS, 1, { 32, 0, 0 }, false, false, 1, 1, false }, { V4L2_PIX_FMT_ARGB32, MEDIA_BUS_FMT_ARGB8888_1X32, VI6_FMT_ARGB_8888, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS | VI6_RPF_DSWAP_P_WDS | VI6_RPF_DSWAP_P_BTS, 1, { 32, 0, 0 }, false, false, 1, 1, true }, { V4L2_PIX_FMT_XRGB32, MEDIA_BUS_FMT_ARGB8888_1X32, VI6_FMT_ARGB_8888, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS | VI6_RPF_DSWAP_P_WDS | VI6_RPF_DSWAP_P_BTS, 1, { 32, 0, 0 }, false, false, 1, 1, false }, { V4L2_PIX_FMT_HSV24, MEDIA_BUS_FMT_AHSV8888_1X32, VI6_FMT_RGB_888, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS | VI6_RPF_DSWAP_P_WDS | VI6_RPF_DSWAP_P_BTS, 1, { 24, 0, 0 }, false, false, 1, 1, false }, { V4L2_PIX_FMT_HSV32, MEDIA_BUS_FMT_AHSV8888_1X32, VI6_FMT_ARGB_8888, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS | VI6_RPF_DSWAP_P_WDS | VI6_RPF_DSWAP_P_BTS, 1, { 32, 0, 0 }, false, false, 1, 1, false }, { V4L2_PIX_FMT_RGBX1010102, MEDIA_BUS_FMT_ARGB8888_1X32, VI6_FMT_RGB10_RGB10A2_A2RGB10, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS, 1, { 32, 0, 0 }, false, false, 1, 1, false }, { V4L2_PIX_FMT_RGBA1010102, MEDIA_BUS_FMT_ARGB8888_1X32, VI6_FMT_RGB10_RGB10A2_A2RGB10, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS, 1, { 32, 0, 0 }, false, false, 1, 1, false }, { V4L2_PIX_FMT_ARGB2101010, MEDIA_BUS_FMT_ARGB8888_1X32, VI6_FMT_RGB10_RGB10A2_A2RGB10, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS, 1, { 32, 0, 0 }, false, false, 1, 1, false }, { V4L2_PIX_FMT_UYVY, MEDIA_BUS_FMT_AYUV8_1X32, VI6_FMT_YUYV_422, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS | VI6_RPF_DSWAP_P_WDS | VI6_RPF_DSWAP_P_BTS, 1, { 16, 0, 0 }, false, false, 2, 1, false }, { V4L2_PIX_FMT_VYUY, MEDIA_BUS_FMT_AYUV8_1X32, VI6_FMT_YUYV_422, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS | VI6_RPF_DSWAP_P_WDS | VI6_RPF_DSWAP_P_BTS, 1, { 16, 0, 0 }, false, true, 2, 1, false }, { V4L2_PIX_FMT_YUYV, MEDIA_BUS_FMT_AYUV8_1X32, VI6_FMT_YUYV_422, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS | VI6_RPF_DSWAP_P_WDS | VI6_RPF_DSWAP_P_BTS, 1, { 16, 0, 0 }, true, false, 2, 1, false }, { V4L2_PIX_FMT_YVYU, MEDIA_BUS_FMT_AYUV8_1X32, VI6_FMT_YUYV_422, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS | VI6_RPF_DSWAP_P_WDS | VI6_RPF_DSWAP_P_BTS, 1, { 16, 0, 0 }, true, true, 2, 1, false }, { V4L2_PIX_FMT_NV12M, MEDIA_BUS_FMT_AYUV8_1X32, VI6_FMT_Y_UV_420, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS | VI6_RPF_DSWAP_P_WDS | VI6_RPF_DSWAP_P_BTS, 2, { 8, 16, 0 }, false, false, 2, 2, false }, { V4L2_PIX_FMT_NV21M, MEDIA_BUS_FMT_AYUV8_1X32, VI6_FMT_Y_UV_420, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS | VI6_RPF_DSWAP_P_WDS | VI6_RPF_DSWAP_P_BTS, 2, { 8, 16, 0 }, false, true, 2, 2, false }, { V4L2_PIX_FMT_NV16M, MEDIA_BUS_FMT_AYUV8_1X32, VI6_FMT_Y_UV_422, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS | VI6_RPF_DSWAP_P_WDS | VI6_RPF_DSWAP_P_BTS, 2, { 8, 16, 0 }, false, false, 2, 1, false }, { V4L2_PIX_FMT_NV61M, MEDIA_BUS_FMT_AYUV8_1X32, VI6_FMT_Y_UV_422, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS | VI6_RPF_DSWAP_P_WDS | VI6_RPF_DSWAP_P_BTS, 2, { 8, 16, 0 }, false, true, 2, 1, false }, { V4L2_PIX_FMT_YUV420M, MEDIA_BUS_FMT_AYUV8_1X32, VI6_FMT_Y_U_V_420, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS | VI6_RPF_DSWAP_P_WDS | VI6_RPF_DSWAP_P_BTS, 3, { 8, 8, 8 }, false, false, 2, 2, false }, { V4L2_PIX_FMT_YVU420M, MEDIA_BUS_FMT_AYUV8_1X32, VI6_FMT_Y_U_V_420, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS | VI6_RPF_DSWAP_P_WDS | VI6_RPF_DSWAP_P_BTS, 3, { 8, 8, 8 }, false, true, 2, 2, false }, { V4L2_PIX_FMT_YUV422M, MEDIA_BUS_FMT_AYUV8_1X32, VI6_FMT_Y_U_V_422, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS | VI6_RPF_DSWAP_P_WDS | VI6_RPF_DSWAP_P_BTS, 3, { 8, 8, 8 }, false, false, 2, 1, false }, { V4L2_PIX_FMT_YVU422M, MEDIA_BUS_FMT_AYUV8_1X32, VI6_FMT_Y_U_V_422, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS | VI6_RPF_DSWAP_P_WDS | VI6_RPF_DSWAP_P_BTS, 3, { 8, 8, 8 }, false, true, 2, 1, false }, { V4L2_PIX_FMT_YUV444M, MEDIA_BUS_FMT_AYUV8_1X32, VI6_FMT_Y_U_V_444, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS | VI6_RPF_DSWAP_P_WDS | VI6_RPF_DSWAP_P_BTS, 3, { 8, 8, 8 }, false, false, 1, 1, false }, { V4L2_PIX_FMT_YVU444M, MEDIA_BUS_FMT_AYUV8_1X32, VI6_FMT_Y_U_V_444, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS | VI6_RPF_DSWAP_P_WDS | VI6_RPF_DSWAP_P_BTS, 3, { 8, 8, 8 }, false, true, 1, 1, false }, { V4L2_PIX_FMT_Y210, MEDIA_BUS_FMT_AYUV8_1X32, VI6_FMT_YUYV_422, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS, 1, { 32, 0, 0 }, false, false, 2, 1, false }, { V4L2_PIX_FMT_Y212, MEDIA_BUS_FMT_AYUV8_1X32, VI6_FMT_YUYV_422, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS, 1, { 32, 0, 0 }, false, false, 2, 1, false }, }; /** * vsp1_get_format_info - Retrieve format information for a 4CC * @vsp1: the VSP1 device * @fourcc: the format 4CC * * Return a pointer to the format information structure corresponding to the * given V4L2 format 4CC, or NULL if no corresponding format can be found. */ const struct vsp1_format_info *vsp1_get_format_info(struct vsp1_device *vsp1, u32 fourcc) { unsigned int i; /* Special case, the VYUY and HSV formats are supported on Gen2 only. */ if (vsp1->info->gen != 2) { switch (fourcc) { case V4L2_PIX_FMT_VYUY: case V4L2_PIX_FMT_HSV24: case V4L2_PIX_FMT_HSV32: return NULL; } } for (i = 0; i < ARRAY_SIZE(vsp1_video_formats); ++i) { const struct vsp1_format_info *info = &vsp1_video_formats[i]; if (info->fourcc == fourcc) return info; } return NULL; } /* ----------------------------------------------------------------------------- * Pipeline Management */ void vsp1_pipeline_reset(struct vsp1_pipeline *pipe) { struct vsp1_entity *entity; unsigned int i; if (pipe->brx) { struct vsp1_brx *brx = to_brx(&pipe->brx->subdev); for (i = 0; i < ARRAY_SIZE(brx->inputs); ++i) brx->inputs[i].rpf = NULL; } for (i = 0; i < ARRAY_SIZE(pipe->inputs); ++i) pipe->inputs[i] = NULL; pipe->output = NULL; list_for_each_entry(entity, &pipe->entities, list_pipe) entity->pipe = NULL; INIT_LIST_HEAD(&pipe->entities); pipe->state = VSP1_PIPELINE_STOPPED; pipe->buffers_ready = 0; pipe->num_inputs = 0; pipe->brx = NULL; pipe->hgo = NULL; pipe->hgt = NULL; pipe->lif = NULL; pipe->uds = NULL; } void vsp1_pipeline_init(struct vsp1_pipeline *pipe) { mutex_init(&pipe->lock); spin_lock_init(&pipe->irqlock); init_waitqueue_head(&pipe->wq); kref_init(&pipe->kref); INIT_LIST_HEAD(&pipe->entities); pipe->state = VSP1_PIPELINE_STOPPED; } /* Must be called with the pipe irqlock held. */ void vsp1_pipeline_run(struct vsp1_pipeline *pipe) { struct vsp1_device *vsp1 = pipe->output->entity.vsp1; if (pipe->state == VSP1_PIPELINE_STOPPED) { vsp1_write(vsp1, VI6_CMD(pipe->output->entity.index), VI6_CMD_STRCMD); pipe->state = VSP1_PIPELINE_RUNNING; } pipe->buffers_ready = 0; } bool vsp1_pipeline_stopped(struct vsp1_pipeline *pipe) { unsigned long flags; bool stopped; spin_lock_irqsave(&pipe->irqlock, flags); stopped = pipe->state == VSP1_PIPELINE_STOPPED; spin_unlock_irqrestore(&pipe->irqlock, flags); return stopped; } int vsp1_pipeline_stop(struct vsp1_pipeline *pipe) { struct vsp1_device *vsp1 = pipe->output->entity.vsp1; struct vsp1_entity *entity; unsigned long flags; int ret; if (pipe->lif) { /* * When using display lists in continuous frame mode the only * way to stop the pipeline is to reset the hardware. */ ret = vsp1_reset_wpf(vsp1, pipe->output->entity.index); if (ret == 0) { spin_lock_irqsave(&pipe->irqlock, flags); pipe->state = VSP1_PIPELINE_STOPPED; spin_unlock_irqrestore(&pipe->irqlock, flags); } } else { /* Otherwise just request a stop and wait. */ spin_lock_irqsave(&pipe->irqlock, flags); if (pipe->state == VSP1_PIPELINE_RUNNING) pipe->state = VSP1_PIPELINE_STOPPING; spin_unlock_irqrestore(&pipe->irqlock, flags); ret = wait_event_timeout(pipe->wq, vsp1_pipeline_stopped(pipe), msecs_to_jiffies(500)); ret = ret == 0 ? -ETIMEDOUT : 0; } list_for_each_entry(entity, &pipe->entities, list_pipe) { if (entity->route && entity->route->reg) vsp1_write(vsp1, entity->route->reg, VI6_DPR_NODE_UNUSED); } if (pipe->hgo) vsp1_write(vsp1, VI6_DPR_HGO_SMPPT, (7 << VI6_DPR_SMPPT_TGW_SHIFT) | (VI6_DPR_NODE_UNUSED << VI6_DPR_SMPPT_PT_SHIFT)); if (pipe->hgt) vsp1_write(vsp1, VI6_DPR_HGT_SMPPT, (7 << VI6_DPR_SMPPT_TGW_SHIFT) | (VI6_DPR_NODE_UNUSED << VI6_DPR_SMPPT_PT_SHIFT)); v4l2_subdev_call(&pipe->output->entity.subdev, video, s_stream, 0); return ret; } bool vsp1_pipeline_ready(struct vsp1_pipeline *pipe) { unsigned int mask; mask = ((1 << pipe->num_inputs) - 1) << 1; if (!pipe->lif) mask |= 1 << 0; return pipe->buffers_ready == mask; } void vsp1_pipeline_frame_end(struct vsp1_pipeline *pipe) { unsigned int flags; if (pipe == NULL) return; /* * If the DL commit raced with the frame end interrupt, the commit ends * up being postponed by one frame. The returned flags tell whether the * active frame was finished or postponed. */ flags = vsp1_dlm_irq_frame_end(pipe->output->dlm); if (pipe->hgo) vsp1_hgo_frame_end(pipe->hgo); if (pipe->hgt) vsp1_hgt_frame_end(pipe->hgt); /* * Regardless of frame completion we still need to notify the pipe * frame_end to account for vblank events. */ if (pipe->frame_end) pipe->frame_end(pipe, flags); pipe->sequence++; } /* * Propagate the alpha value through the pipeline. * * As the UDS has restricted scaling capabilities when the alpha component needs * to be scaled, we disable alpha scaling when the UDS input has a fixed alpha * value. The UDS then outputs a fixed alpha value which needs to be programmed * from the input RPF alpha. */ void vsp1_pipeline_propagate_alpha(struct vsp1_pipeline *pipe, struct vsp1_dl_body *dlb, unsigned int alpha) { if (!pipe->uds) return; /* * The BRU and BRS background color has a fixed alpha value set to 255, * the output alpha value is thus always equal to 255. */ if (pipe->uds_input->type == VSP1_ENTITY_BRU || pipe->uds_input->type == VSP1_ENTITY_BRS) alpha = 255; vsp1_uds_set_alpha(pipe->uds, dlb, alpha); } /* * Propagate the partition calculations through the pipeline * * Work backwards through the pipe, allowing each entity to update the partition * parameters based on its configuration, and the entity connected to its * source. Each entity must produce the partition required for the previous * entity in the pipeline. */ void vsp1_pipeline_propagate_partition(struct vsp1_pipeline *pipe, struct vsp1_partition *partition, unsigned int index, struct vsp1_partition_window *window) { struct vsp1_entity *entity; list_for_each_entry_reverse(entity, &pipe->entities, list_pipe) { if (entity->ops->partition) entity->ops->partition(entity, pipe, partition, index, window); } }
linux-master
drivers/media/platform/renesas/vsp1/vsp1_pipe.c
// SPDX-License-Identifier: GPL-2.0+ /* * vsp1_drv.c -- R-Car VSP1 Driver * * Copyright (C) 2013-2015 Renesas Electronics Corporation * * Contact: Laurent Pinchart ([email protected]) */ #include <linux/clk.h> #include <linux/delay.h> #include <linux/device.h> #include <linux/interrupt.h> #include <linux/module.h> #include <linux/of.h> #include <linux/platform_device.h> #include <linux/pm_runtime.h> #include <linux/reset.h> #include <linux/videodev2.h> #include <media/rcar-fcp.h> #include <media/v4l2-subdev.h> #include "vsp1.h" #include "vsp1_brx.h" #include "vsp1_clu.h" #include "vsp1_dl.h" #include "vsp1_drm.h" #include "vsp1_hgo.h" #include "vsp1_hgt.h" #include "vsp1_hsit.h" #include "vsp1_lif.h" #include "vsp1_lut.h" #include "vsp1_pipe.h" #include "vsp1_rwpf.h" #include "vsp1_sru.h" #include "vsp1_uds.h" #include "vsp1_uif.h" #include "vsp1_video.h" /* ----------------------------------------------------------------------------- * Interrupt Handling */ static irqreturn_t vsp1_irq_handler(int irq, void *data) { u32 mask = VI6_WPF_IRQ_STA_DFE | VI6_WPF_IRQ_STA_FRE | VI6_WPF_IRQ_STA_UND; struct vsp1_device *vsp1 = data; irqreturn_t ret = IRQ_NONE; unsigned int i; u32 status; for (i = 0; i < vsp1->info->wpf_count; ++i) { struct vsp1_rwpf *wpf = vsp1->wpf[i]; if (wpf == NULL) continue; status = vsp1_read(vsp1, VI6_WPF_IRQ_STA(i)); vsp1_write(vsp1, VI6_WPF_IRQ_STA(i), ~status & mask); if ((status & VI6_WPF_IRQ_STA_UND) && wpf->entity.pipe) { wpf->entity.pipe->underrun_count++; dev_warn_ratelimited(vsp1->dev, "Underrun occurred at WPF%u (total underruns %u)\n", i, wpf->entity.pipe->underrun_count); } if (status & VI6_WPF_IRQ_STA_DFE) { vsp1_pipeline_frame_end(wpf->entity.pipe); ret = IRQ_HANDLED; } } return ret; } /* ----------------------------------------------------------------------------- * Entities */ /* * vsp1_create_sink_links - Create links from all sources to the given sink * * This function creates media links from all valid sources to the given sink * pad. Links that would be invalid according to the VSP1 hardware capabilities * are skipped. Those include all links * * - from a UDS to a UDS (UDS entities can't be chained) * - from an entity to itself (no loops are allowed) * * Furthermore, the BRS can't be connected to histogram generators, but no * special check is currently needed as all VSP instances that include a BRS * have no histogram generator. */ static int vsp1_create_sink_links(struct vsp1_device *vsp1, struct vsp1_entity *sink) { struct media_entity *entity = &sink->subdev.entity; struct vsp1_entity *source; unsigned int pad; int ret; list_for_each_entry(source, &vsp1->entities, list_dev) { u32 flags; if (source->type == sink->type) continue; if (source->type == VSP1_ENTITY_HGO || source->type == VSP1_ENTITY_HGT || source->type == VSP1_ENTITY_LIF || source->type == VSP1_ENTITY_WPF) continue; flags = source->type == VSP1_ENTITY_RPF && sink->type == VSP1_ENTITY_WPF && source->index == sink->index ? MEDIA_LNK_FL_ENABLED : 0; for (pad = 0; pad < entity->num_pads; ++pad) { if (!(entity->pads[pad].flags & MEDIA_PAD_FL_SINK)) continue; ret = media_create_pad_link(&source->subdev.entity, source->source_pad, entity, pad, flags); if (ret < 0) return ret; if (flags & MEDIA_LNK_FL_ENABLED) source->sink = sink; } } return 0; } static int vsp1_uapi_create_links(struct vsp1_device *vsp1) { struct vsp1_entity *entity; unsigned int i; int ret; list_for_each_entry(entity, &vsp1->entities, list_dev) { if (entity->type == VSP1_ENTITY_LIF || entity->type == VSP1_ENTITY_RPF) continue; ret = vsp1_create_sink_links(vsp1, entity); if (ret < 0) return ret; } if (vsp1->hgo) { ret = media_create_pad_link(&vsp1->hgo->histo.entity.subdev.entity, HISTO_PAD_SOURCE, &vsp1->hgo->histo.video.entity, 0, MEDIA_LNK_FL_ENABLED | MEDIA_LNK_FL_IMMUTABLE); if (ret < 0) return ret; } if (vsp1->hgt) { ret = media_create_pad_link(&vsp1->hgt->histo.entity.subdev.entity, HISTO_PAD_SOURCE, &vsp1->hgt->histo.video.entity, 0, MEDIA_LNK_FL_ENABLED | MEDIA_LNK_FL_IMMUTABLE); if (ret < 0) return ret; } for (i = 0; i < vsp1->info->lif_count; ++i) { if (!vsp1->lif[i]) continue; ret = media_create_pad_link(&vsp1->wpf[i]->entity.subdev.entity, RWPF_PAD_SOURCE, &vsp1->lif[i]->entity.subdev.entity, LIF_PAD_SINK, 0); if (ret < 0) return ret; } for (i = 0; i < vsp1->info->rpf_count; ++i) { struct vsp1_rwpf *rpf = vsp1->rpf[i]; ret = media_create_pad_link(&rpf->video->video.entity, 0, &rpf->entity.subdev.entity, RWPF_PAD_SINK, MEDIA_LNK_FL_ENABLED | MEDIA_LNK_FL_IMMUTABLE); if (ret < 0) return ret; } for (i = 0; i < vsp1->info->wpf_count; ++i) { /* * Connect the video device to the WPF. All connections are * immutable. */ struct vsp1_rwpf *wpf = vsp1->wpf[i]; ret = media_create_pad_link(&wpf->entity.subdev.entity, RWPF_PAD_SOURCE, &wpf->video->video.entity, 0, MEDIA_LNK_FL_IMMUTABLE | MEDIA_LNK_FL_ENABLED); if (ret < 0) return ret; } return 0; } static void vsp1_destroy_entities(struct vsp1_device *vsp1) { struct vsp1_entity *entity, *_entity; struct vsp1_video *video, *_video; list_for_each_entry_safe(entity, _entity, &vsp1->entities, list_dev) { list_del(&entity->list_dev); vsp1_entity_destroy(entity); } list_for_each_entry_safe(video, _video, &vsp1->videos, list) { list_del(&video->list); vsp1_video_cleanup(video); } v4l2_device_unregister(&vsp1->v4l2_dev); if (vsp1->info->uapi) media_device_unregister(&vsp1->media_dev); media_device_cleanup(&vsp1->media_dev); if (!vsp1->info->uapi) vsp1_drm_cleanup(vsp1); } static int vsp1_create_entities(struct vsp1_device *vsp1) { struct media_device *mdev = &vsp1->media_dev; struct v4l2_device *vdev = &vsp1->v4l2_dev; struct vsp1_entity *entity; unsigned int i; int ret; mdev->dev = vsp1->dev; mdev->hw_revision = vsp1->version; strscpy(mdev->model, vsp1->info->model, sizeof(mdev->model)); media_device_init(mdev); vsp1->media_ops.link_setup = vsp1_entity_link_setup; /* * Don't perform link validation when the userspace API is disabled as * the pipeline is configured internally by the driver in that case, and * its configuration can thus be trusted. */ if (vsp1->info->uapi) vsp1->media_ops.link_validate = v4l2_subdev_link_validate; vdev->mdev = mdev; ret = v4l2_device_register(vsp1->dev, vdev); if (ret < 0) { dev_err(vsp1->dev, "V4L2 device registration failed (%d)\n", ret); goto done; } /* Instantiate all the entities. */ if (vsp1_feature(vsp1, VSP1_HAS_BRS)) { vsp1->brs = vsp1_brx_create(vsp1, VSP1_ENTITY_BRS); if (IS_ERR(vsp1->brs)) { ret = PTR_ERR(vsp1->brs); goto done; } list_add_tail(&vsp1->brs->entity.list_dev, &vsp1->entities); } if (vsp1_feature(vsp1, VSP1_HAS_BRU)) { vsp1->bru = vsp1_brx_create(vsp1, VSP1_ENTITY_BRU); if (IS_ERR(vsp1->bru)) { ret = PTR_ERR(vsp1->bru); goto done; } list_add_tail(&vsp1->bru->entity.list_dev, &vsp1->entities); } if (vsp1_feature(vsp1, VSP1_HAS_CLU)) { vsp1->clu = vsp1_clu_create(vsp1); if (IS_ERR(vsp1->clu)) { ret = PTR_ERR(vsp1->clu); goto done; } list_add_tail(&vsp1->clu->entity.list_dev, &vsp1->entities); } vsp1->hsi = vsp1_hsit_create(vsp1, true); if (IS_ERR(vsp1->hsi)) { ret = PTR_ERR(vsp1->hsi); goto done; } list_add_tail(&vsp1->hsi->entity.list_dev, &vsp1->entities); vsp1->hst = vsp1_hsit_create(vsp1, false); if (IS_ERR(vsp1->hst)) { ret = PTR_ERR(vsp1->hst); goto done; } list_add_tail(&vsp1->hst->entity.list_dev, &vsp1->entities); if (vsp1_feature(vsp1, VSP1_HAS_HGO) && vsp1->info->uapi) { vsp1->hgo = vsp1_hgo_create(vsp1); if (IS_ERR(vsp1->hgo)) { ret = PTR_ERR(vsp1->hgo); goto done; } list_add_tail(&vsp1->hgo->histo.entity.list_dev, &vsp1->entities); } if (vsp1_feature(vsp1, VSP1_HAS_HGT) && vsp1->info->uapi) { vsp1->hgt = vsp1_hgt_create(vsp1); if (IS_ERR(vsp1->hgt)) { ret = PTR_ERR(vsp1->hgt); goto done; } list_add_tail(&vsp1->hgt->histo.entity.list_dev, &vsp1->entities); } /* * The LIFs are only supported when used in conjunction with the DU, in * which case the userspace API is disabled. If the userspace API is * enabled skip the LIFs, even when present. */ if (!vsp1->info->uapi) { for (i = 0; i < vsp1->info->lif_count; ++i) { struct vsp1_lif *lif; lif = vsp1_lif_create(vsp1, i); if (IS_ERR(lif)) { ret = PTR_ERR(lif); goto done; } vsp1->lif[i] = lif; list_add_tail(&lif->entity.list_dev, &vsp1->entities); } } if (vsp1_feature(vsp1, VSP1_HAS_LUT)) { vsp1->lut = vsp1_lut_create(vsp1); if (IS_ERR(vsp1->lut)) { ret = PTR_ERR(vsp1->lut); goto done; } list_add_tail(&vsp1->lut->entity.list_dev, &vsp1->entities); } for (i = 0; i < vsp1->info->rpf_count; ++i) { struct vsp1_rwpf *rpf; rpf = vsp1_rpf_create(vsp1, i); if (IS_ERR(rpf)) { ret = PTR_ERR(rpf); goto done; } vsp1->rpf[i] = rpf; list_add_tail(&rpf->entity.list_dev, &vsp1->entities); if (vsp1->info->uapi) { struct vsp1_video *video = vsp1_video_create(vsp1, rpf); if (IS_ERR(video)) { ret = PTR_ERR(video); goto done; } list_add_tail(&video->list, &vsp1->videos); } } if (vsp1_feature(vsp1, VSP1_HAS_SRU)) { vsp1->sru = vsp1_sru_create(vsp1); if (IS_ERR(vsp1->sru)) { ret = PTR_ERR(vsp1->sru); goto done; } list_add_tail(&vsp1->sru->entity.list_dev, &vsp1->entities); } for (i = 0; i < vsp1->info->uds_count; ++i) { struct vsp1_uds *uds; uds = vsp1_uds_create(vsp1, i); if (IS_ERR(uds)) { ret = PTR_ERR(uds); goto done; } vsp1->uds[i] = uds; list_add_tail(&uds->entity.list_dev, &vsp1->entities); } for (i = 0; i < vsp1->info->uif_count; ++i) { struct vsp1_uif *uif; uif = vsp1_uif_create(vsp1, i); if (IS_ERR(uif)) { ret = PTR_ERR(uif); goto done; } vsp1->uif[i] = uif; list_add_tail(&uif->entity.list_dev, &vsp1->entities); } for (i = 0; i < vsp1->info->wpf_count; ++i) { struct vsp1_rwpf *wpf; wpf = vsp1_wpf_create(vsp1, i); if (IS_ERR(wpf)) { ret = PTR_ERR(wpf); goto done; } vsp1->wpf[i] = wpf; list_add_tail(&wpf->entity.list_dev, &vsp1->entities); if (vsp1->info->uapi) { struct vsp1_video *video = vsp1_video_create(vsp1, wpf); if (IS_ERR(video)) { ret = PTR_ERR(video); goto done; } list_add_tail(&video->list, &vsp1->videos); } } /* Register all subdevs. */ list_for_each_entry(entity, &vsp1->entities, list_dev) { ret = v4l2_device_register_subdev(&vsp1->v4l2_dev, &entity->subdev); if (ret < 0) goto done; } /* * Create links and register subdev nodes if the userspace API is * enabled or initialize the DRM pipeline otherwise. */ if (vsp1->info->uapi) { ret = vsp1_uapi_create_links(vsp1); if (ret < 0) goto done; ret = v4l2_device_register_subdev_nodes(&vsp1->v4l2_dev); if (ret < 0) goto done; ret = media_device_register(mdev); } else { ret = vsp1_drm_init(vsp1); } done: if (ret < 0) vsp1_destroy_entities(vsp1); return ret; } int vsp1_reset_wpf(struct vsp1_device *vsp1, unsigned int index) { unsigned int timeout; u32 status; status = vsp1_read(vsp1, VI6_STATUS); if (!(status & VI6_STATUS_SYS_ACT(index))) return 0; vsp1_write(vsp1, VI6_SRESET, VI6_SRESET_SRTS(index)); for (timeout = 10; timeout > 0; --timeout) { status = vsp1_read(vsp1, VI6_STATUS); if (!(status & VI6_STATUS_SYS_ACT(index))) break; usleep_range(1000, 2000); } if (!timeout) { dev_err(vsp1->dev, "failed to reset wpf.%u\n", index); return -ETIMEDOUT; } return 0; } static int vsp1_device_init(struct vsp1_device *vsp1) { unsigned int i; int ret; /* Reset any channel that might be running. */ for (i = 0; i < vsp1->info->wpf_count; ++i) { ret = vsp1_reset_wpf(vsp1, i); if (ret < 0) return ret; } vsp1_write(vsp1, VI6_CLK_DCSWT, (8 << VI6_CLK_DCSWT_CSTPW_SHIFT) | (8 << VI6_CLK_DCSWT_CSTRW_SHIFT)); for (i = 0; i < vsp1->info->rpf_count; ++i) vsp1_write(vsp1, VI6_DPR_RPF_ROUTE(i), VI6_DPR_NODE_UNUSED); for (i = 0; i < vsp1->info->uds_count; ++i) vsp1_write(vsp1, VI6_DPR_UDS_ROUTE(i), VI6_DPR_NODE_UNUSED); for (i = 0; i < vsp1->info->uif_count; ++i) vsp1_write(vsp1, VI6_DPR_UIF_ROUTE(i), VI6_DPR_NODE_UNUSED); vsp1_write(vsp1, VI6_DPR_SRU_ROUTE, VI6_DPR_NODE_UNUSED); vsp1_write(vsp1, VI6_DPR_LUT_ROUTE, VI6_DPR_NODE_UNUSED); vsp1_write(vsp1, VI6_DPR_CLU_ROUTE, VI6_DPR_NODE_UNUSED); vsp1_write(vsp1, VI6_DPR_HST_ROUTE, VI6_DPR_NODE_UNUSED); vsp1_write(vsp1, VI6_DPR_HSI_ROUTE, VI6_DPR_NODE_UNUSED); vsp1_write(vsp1, VI6_DPR_BRU_ROUTE, VI6_DPR_NODE_UNUSED); if (vsp1_feature(vsp1, VSP1_HAS_BRS)) vsp1_write(vsp1, VI6_DPR_ILV_BRS_ROUTE, VI6_DPR_NODE_UNUSED); vsp1_write(vsp1, VI6_DPR_HGO_SMPPT, (7 << VI6_DPR_SMPPT_TGW_SHIFT) | (VI6_DPR_NODE_UNUSED << VI6_DPR_SMPPT_PT_SHIFT)); vsp1_write(vsp1, VI6_DPR_HGT_SMPPT, (7 << VI6_DPR_SMPPT_TGW_SHIFT) | (VI6_DPR_NODE_UNUSED << VI6_DPR_SMPPT_PT_SHIFT)); vsp1_dlm_setup(vsp1); return 0; } static void vsp1_mask_all_interrupts(struct vsp1_device *vsp1) { unsigned int i; for (i = 0; i < vsp1->info->lif_count; ++i) vsp1_write(vsp1, VI6_DISP_IRQ_ENB(i), 0); for (i = 0; i < vsp1->info->wpf_count; ++i) vsp1_write(vsp1, VI6_WPF_IRQ_ENB(i), 0); } /* * vsp1_device_get - Acquire the VSP1 device * * Make sure the device is not suspended and initialize it if needed. * * Return 0 on success or a negative error code otherwise. */ int vsp1_device_get(struct vsp1_device *vsp1) { return pm_runtime_resume_and_get(vsp1->dev); } /* * vsp1_device_put - Release the VSP1 device * * Decrement the VSP1 reference count and cleanup the device if the last * reference is released. */ void vsp1_device_put(struct vsp1_device *vsp1) { pm_runtime_put_sync(vsp1->dev); } /* ----------------------------------------------------------------------------- * Power Management */ static int __maybe_unused vsp1_pm_suspend(struct device *dev) { struct vsp1_device *vsp1 = dev_get_drvdata(dev); /* * When used as part of a display pipeline, the VSP is stopped and * restarted explicitly by the DU. */ if (!vsp1->drm) vsp1_video_suspend(vsp1); pm_runtime_force_suspend(vsp1->dev); return 0; } static int __maybe_unused vsp1_pm_resume(struct device *dev) { struct vsp1_device *vsp1 = dev_get_drvdata(dev); pm_runtime_force_resume(vsp1->dev); /* * When used as part of a display pipeline, the VSP is stopped and * restarted explicitly by the DU. */ if (!vsp1->drm) vsp1_video_resume(vsp1); return 0; } static int __maybe_unused vsp1_pm_runtime_suspend(struct device *dev) { struct vsp1_device *vsp1 = dev_get_drvdata(dev); rcar_fcp_disable(vsp1->fcp); reset_control_assert(vsp1->rstc); return 0; } static int __maybe_unused vsp1_pm_runtime_resume(struct device *dev) { struct vsp1_device *vsp1 = dev_get_drvdata(dev); int ret; ret = reset_control_deassert(vsp1->rstc); if (ret < 0) return ret; if (vsp1->info) { /* * On R-Car Gen2 and RZ/G1, vsp1 register access after deassert * can cause lock-up. It is a special case and needs some delay * to avoid this lock-up. */ if (vsp1->info->gen == 2) udelay(1); ret = vsp1_device_init(vsp1); if (ret < 0) goto done; } ret = rcar_fcp_enable(vsp1->fcp); done: if (ret < 0) reset_control_assert(vsp1->rstc); return ret; } static const struct dev_pm_ops vsp1_pm_ops = { SET_SYSTEM_SLEEP_PM_OPS(vsp1_pm_suspend, vsp1_pm_resume) SET_RUNTIME_PM_OPS(vsp1_pm_runtime_suspend, vsp1_pm_runtime_resume, NULL) }; /* ----------------------------------------------------------------------------- * Platform Driver */ static const struct vsp1_device_info vsp1_device_infos[] = { { .version = VI6_IP_VERSION_MODEL_VSPS_H2, .model = "VSP1-S", .gen = 2, .features = VSP1_HAS_BRU | VSP1_HAS_CLU | VSP1_HAS_HGO | VSP1_HAS_HGT | VSP1_HAS_LUT | VSP1_HAS_SRU | VSP1_HAS_WPF_VFLIP, .rpf_count = 5, .uds_count = 3, .wpf_count = 4, .num_bru_inputs = 4, .uapi = true, }, { .version = VI6_IP_VERSION_MODEL_VSPR_H2, .model = "VSP1-R", .gen = 2, .features = VSP1_HAS_BRU | VSP1_HAS_SRU | VSP1_HAS_WPF_VFLIP, .rpf_count = 5, .uds_count = 3, .wpf_count = 4, .num_bru_inputs = 4, .uapi = true, }, { .version = VI6_IP_VERSION_MODEL_VSPD_GEN2, .model = "VSP1-D", .gen = 2, .features = VSP1_HAS_BRU | VSP1_HAS_HGO | VSP1_HAS_LUT, .lif_count = 1, .rpf_count = 4, .uds_count = 1, .wpf_count = 1, .num_bru_inputs = 4, .uapi = true, }, { .version = VI6_IP_VERSION_MODEL_VSPS_M2, .model = "VSP1-S", .gen = 2, .features = VSP1_HAS_BRU | VSP1_HAS_CLU | VSP1_HAS_HGO | VSP1_HAS_HGT | VSP1_HAS_LUT | VSP1_HAS_SRU | VSP1_HAS_WPF_VFLIP, .rpf_count = 5, .uds_count = 1, .wpf_count = 4, .num_bru_inputs = 4, .uapi = true, }, { .version = VI6_IP_VERSION_MODEL_VSPS_V2H, .model = "VSP1V-S", .gen = 2, .features = VSP1_HAS_BRU | VSP1_HAS_CLU | VSP1_HAS_LUT | VSP1_HAS_SRU | VSP1_HAS_WPF_VFLIP, .rpf_count = 4, .uds_count = 1, .wpf_count = 4, .num_bru_inputs = 4, .uapi = true, }, { .version = VI6_IP_VERSION_MODEL_VSPD_V2H, .model = "VSP1V-D", .gen = 2, .features = VSP1_HAS_BRU | VSP1_HAS_CLU | VSP1_HAS_LUT, .lif_count = 1, .rpf_count = 4, .uds_count = 1, .wpf_count = 1, .num_bru_inputs = 4, .uapi = true, }, { .version = VI6_IP_VERSION_MODEL_VSPI_GEN3, .model = "VSP2-I", .gen = 3, .features = VSP1_HAS_CLU | VSP1_HAS_HGO | VSP1_HAS_HGT | VSP1_HAS_LUT | VSP1_HAS_SRU | VSP1_HAS_WPF_HFLIP | VSP1_HAS_WPF_VFLIP, .rpf_count = 1, .uds_count = 1, .wpf_count = 1, .uapi = true, }, { .version = VI6_IP_VERSION_MODEL_VSPBD_GEN3, .model = "VSP2-BD", .gen = 3, .features = VSP1_HAS_BRU | VSP1_HAS_WPF_VFLIP, .rpf_count = 5, .wpf_count = 1, .num_bru_inputs = 5, .uapi = true, }, { .version = VI6_IP_VERSION_MODEL_VSPBC_GEN3, .model = "VSP2-BC", .gen = 3, .features = VSP1_HAS_BRU | VSP1_HAS_CLU | VSP1_HAS_HGO | VSP1_HAS_LUT | VSP1_HAS_WPF_VFLIP, .rpf_count = 5, .wpf_count = 1, .num_bru_inputs = 5, .uapi = true, }, { .version = VI6_IP_VERSION_MODEL_VSPBS_GEN3, .model = "VSP2-BS", .gen = 3, .features = VSP1_HAS_BRS | VSP1_HAS_WPF_VFLIP, .rpf_count = 2, .wpf_count = 1, .uapi = true, }, { .version = VI6_IP_VERSION_MODEL_VSPD_GEN3, .model = "VSP2-D", .gen = 3, .features = VSP1_HAS_BRU | VSP1_HAS_WPF_VFLIP | VSP1_HAS_EXT_DL, .lif_count = 1, .rpf_count = 5, .uif_count = 1, .wpf_count = 2, .num_bru_inputs = 5, }, { .version = VI6_IP_VERSION_MODEL_VSPD_V3, .model = "VSP2-D", .soc = VI6_IP_VERSION_SOC_V3H, .gen = 3, .features = VSP1_HAS_BRS | VSP1_HAS_BRU, .lif_count = 1, .rpf_count = 5, .uif_count = 1, .wpf_count = 1, .num_bru_inputs = 5, }, { .version = VI6_IP_VERSION_MODEL_VSPD_V3, .model = "VSP2-D", .soc = VI6_IP_VERSION_SOC_V3M, .gen = 3, .features = VSP1_HAS_BRS | VSP1_HAS_BRU | VSP1_HAS_NON_ZERO_LBA, .lif_count = 1, .rpf_count = 5, .uif_count = 1, .wpf_count = 1, .num_bru_inputs = 5, }, { .version = VI6_IP_VERSION_MODEL_VSPDL_GEN3, .model = "VSP2-DL", .gen = 3, .features = VSP1_HAS_BRS | VSP1_HAS_BRU | VSP1_HAS_EXT_DL, .lif_count = 2, .rpf_count = 5, .uif_count = 2, .wpf_count = 2, .num_bru_inputs = 5, }, { .version = VI6_IP_VERSION_MODEL_VSPD_GEN4, .model = "VSP2-D", .gen = 4, .features = VSP1_HAS_BRU | VSP1_HAS_EXT_DL, .lif_count = 1, .rpf_count = 5, .uif_count = 2, .wpf_count = 1, .num_bru_inputs = 5, }, }; static const struct vsp1_device_info rzg2l_vsp2_device_info = { .version = VI6_IP_VERSION_MODEL_VSPD_RZG2L, .model = "VSP2-D", .soc = VI6_IP_VERSION_SOC_RZG2L, .gen = 3, .features = VSP1_HAS_BRS | VSP1_HAS_WPF_VFLIP | VSP1_HAS_EXT_DL | VSP1_HAS_NON_ZERO_LBA, .lif_count = 1, .rpf_count = 2, .wpf_count = 1, }; static const struct vsp1_device_info *vsp1_lookup_info(struct vsp1_device *vsp1) { const struct vsp1_device_info *info; unsigned int i; u32 model; u32 soc; /* * Try the info stored in match data first for devices that don't have * a version register. */ info = of_device_get_match_data(vsp1->dev); if (info) { vsp1->version = VI6_IP_VERSION_VSP_SW | info->version | info->soc; return info; } vsp1->version = vsp1_read(vsp1, VI6_IP_VERSION); model = vsp1->version & VI6_IP_VERSION_MODEL_MASK; soc = vsp1->version & VI6_IP_VERSION_SOC_MASK; for (i = 0; i < ARRAY_SIZE(vsp1_device_infos); ++i) { info = &vsp1_device_infos[i]; if (model == info->version && (!info->soc || soc == info->soc)) return info; } dev_err(vsp1->dev, "unsupported IP version 0x%08x\n", vsp1->version); return NULL; } static int vsp1_probe(struct platform_device *pdev) { struct vsp1_device *vsp1; struct device_node *fcp_node; int ret; int irq; vsp1 = devm_kzalloc(&pdev->dev, sizeof(*vsp1), GFP_KERNEL); if (vsp1 == NULL) return -ENOMEM; vsp1->dev = &pdev->dev; INIT_LIST_HEAD(&vsp1->entities); INIT_LIST_HEAD(&vsp1->videos); platform_set_drvdata(pdev, vsp1); /* I/O and IRQ resources (clock managed by the clock PM domain). */ vsp1->mmio = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(vsp1->mmio)) return PTR_ERR(vsp1->mmio); irq = platform_get_irq(pdev, 0); if (irq < 0) return irq; vsp1->rstc = devm_reset_control_get_shared(&pdev->dev, NULL); if (IS_ERR(vsp1->rstc)) return dev_err_probe(&pdev->dev, PTR_ERR(vsp1->rstc), "failed to get reset control\n"); /* FCP (optional). */ fcp_node = of_parse_phandle(pdev->dev.of_node, "renesas,fcp", 0); if (fcp_node) { vsp1->fcp = rcar_fcp_get(fcp_node); of_node_put(fcp_node); if (IS_ERR(vsp1->fcp)) { dev_dbg(&pdev->dev, "FCP not found (%ld)\n", PTR_ERR(vsp1->fcp)); return PTR_ERR(vsp1->fcp); } /* * When the FCP is present, it handles all bus master accesses * for the VSP and must thus be used in place of the VSP device * to map DMA buffers. */ vsp1->bus_master = rcar_fcp_get_device(vsp1->fcp); } else { vsp1->bus_master = vsp1->dev; } /* Configure device parameters based on the version register. */ pm_runtime_enable(&pdev->dev); ret = vsp1_device_get(vsp1); if (ret < 0) goto done; vsp1->info = vsp1_lookup_info(vsp1); if (!vsp1->info) { vsp1_device_put(vsp1); ret = -ENXIO; goto done; } dev_dbg(&pdev->dev, "IP version 0x%08x\n", vsp1->version); /* * Previous use of the hardware (e.g. by the bootloader) could leave * some interrupts enabled and pending. * * TODO: Investigate if this shouldn't be better handled by using the * device reset provided by the CPG. */ vsp1_mask_all_interrupts(vsp1); vsp1_device_put(vsp1); ret = devm_request_irq(&pdev->dev, irq, vsp1_irq_handler, IRQF_SHARED, dev_name(&pdev->dev), vsp1); if (ret < 0) { dev_err(&pdev->dev, "failed to request IRQ\n"); goto done; } /* Instantiate entities. */ ret = vsp1_create_entities(vsp1); if (ret < 0) { dev_err(&pdev->dev, "failed to create entities\n"); goto done; } done: if (ret) { pm_runtime_disable(&pdev->dev); rcar_fcp_put(vsp1->fcp); } return ret; } static void vsp1_remove(struct platform_device *pdev) { struct vsp1_device *vsp1 = platform_get_drvdata(pdev); vsp1_destroy_entities(vsp1); rcar_fcp_put(vsp1->fcp); pm_runtime_disable(&pdev->dev); } static const struct of_device_id vsp1_of_match[] = { { .compatible = "renesas,vsp1" }, { .compatible = "renesas,vsp2" }, { .compatible = "renesas,r9a07g044-vsp2", .data = &rzg2l_vsp2_device_info }, { }, }; MODULE_DEVICE_TABLE(of, vsp1_of_match); static struct platform_driver vsp1_platform_driver = { .probe = vsp1_probe, .remove_new = vsp1_remove, .driver = { .name = "vsp1", .pm = &vsp1_pm_ops, .of_match_table = vsp1_of_match, }, }; module_platform_driver(vsp1_platform_driver); MODULE_ALIAS("vsp1"); MODULE_AUTHOR("Laurent Pinchart <[email protected]>"); MODULE_DESCRIPTION("Renesas VSP1 Driver"); MODULE_LICENSE("GPL");
linux-master
drivers/media/platform/renesas/vsp1/vsp1_drv.c
// SPDX-License-Identifier: GPL-2.0+ /* * vsp1_rpf.c -- R-Car VSP1 Read Pixel Formatter * * Copyright (C) 2013-2014 Renesas Electronics Corporation * * Contact: Laurent Pinchart ([email protected]) */ #include <linux/device.h> #include <media/v4l2-subdev.h> #include "vsp1.h" #include "vsp1_dl.h" #include "vsp1_pipe.h" #include "vsp1_rwpf.h" #include "vsp1_video.h" #define RPF_MAX_WIDTH 8190 #define RPF_MAX_HEIGHT 8190 /* Pre extended display list command data structure. */ struct vsp1_extcmd_auto_fld_body { u32 top_y0; u32 bottom_y0; u32 top_c0; u32 bottom_c0; u32 top_c1; u32 bottom_c1; u32 reserved0; u32 reserved1; } __packed; /* ----------------------------------------------------------------------------- * Device Access */ static inline void vsp1_rpf_write(struct vsp1_rwpf *rpf, struct vsp1_dl_body *dlb, u32 reg, u32 data) { vsp1_dl_body_write(dlb, reg + rpf->entity.index * VI6_RPF_OFFSET, data); } /* ----------------------------------------------------------------------------- * V4L2 Subdevice Operations */ static const struct v4l2_subdev_ops rpf_ops = { .pad = &vsp1_rwpf_pad_ops, }; /* ----------------------------------------------------------------------------- * VSP1 Entity Operations */ static void rpf_configure_stream(struct vsp1_entity *entity, struct vsp1_pipeline *pipe, struct vsp1_dl_list *dl, struct vsp1_dl_body *dlb) { struct vsp1_rwpf *rpf = to_rwpf(&entity->subdev); const struct vsp1_format_info *fmtinfo = rpf->fmtinfo; const struct v4l2_pix_format_mplane *format = &rpf->format; const struct v4l2_mbus_framefmt *source_format; const struct v4l2_mbus_framefmt *sink_format; unsigned int left = 0; unsigned int top = 0; u32 pstride; u32 infmt; /* Stride */ pstride = format->plane_fmt[0].bytesperline << VI6_RPF_SRCM_PSTRIDE_Y_SHIFT; if (format->num_planes > 1) pstride |= format->plane_fmt[1].bytesperline << VI6_RPF_SRCM_PSTRIDE_C_SHIFT; /* * pstride has both STRIDE_Y and STRIDE_C, but multiplying the whole * of pstride by 2 is conveniently OK here as we are multiplying both * values. */ if (pipe->interlaced) pstride *= 2; vsp1_rpf_write(rpf, dlb, VI6_RPF_SRCM_PSTRIDE, pstride); /* Format */ sink_format = vsp1_entity_get_pad_format(&rpf->entity, rpf->entity.config, RWPF_PAD_SINK); source_format = vsp1_entity_get_pad_format(&rpf->entity, rpf->entity.config, RWPF_PAD_SOURCE); infmt = VI6_RPF_INFMT_CIPM | (fmtinfo->hwfmt << VI6_RPF_INFMT_RDFMT_SHIFT); if (fmtinfo->swap_yc) infmt |= VI6_RPF_INFMT_SPYCS; if (fmtinfo->swap_uv) infmt |= VI6_RPF_INFMT_SPUVS; if (sink_format->code != source_format->code) infmt |= VI6_RPF_INFMT_CSC; vsp1_rpf_write(rpf, dlb, VI6_RPF_INFMT, infmt); vsp1_rpf_write(rpf, dlb, VI6_RPF_DSWAP, fmtinfo->swap); if (entity->vsp1->info->gen == 4) { u32 ext_infmt0; u32 ext_infmt1; u32 ext_infmt2; switch (fmtinfo->fourcc) { case V4L2_PIX_FMT_RGBX1010102: ext_infmt0 = VI6_RPF_EXT_INFMT0_BYPP_M1_RGB10; ext_infmt1 = VI6_RPF_EXT_INFMT1_PACK_CPOS(0, 10, 20, 0); ext_infmt2 = VI6_RPF_EXT_INFMT2_PACK_CLEN(10, 10, 10, 0); break; case V4L2_PIX_FMT_RGBA1010102: ext_infmt0 = VI6_RPF_EXT_INFMT0_BYPP_M1_RGB10; ext_infmt1 = VI6_RPF_EXT_INFMT1_PACK_CPOS(0, 10, 20, 30); ext_infmt2 = VI6_RPF_EXT_INFMT2_PACK_CLEN(10, 10, 10, 2); break; case V4L2_PIX_FMT_ARGB2101010: ext_infmt0 = VI6_RPF_EXT_INFMT0_BYPP_M1_RGB10; ext_infmt1 = VI6_RPF_EXT_INFMT1_PACK_CPOS(2, 12, 22, 0); ext_infmt2 = VI6_RPF_EXT_INFMT2_PACK_CLEN(10, 10, 10, 2); break; case V4L2_PIX_FMT_Y210: ext_infmt0 = VI6_RPF_EXT_INFMT0_F2B | VI6_RPF_EXT_INFMT0_IPBD_Y_10 | VI6_RPF_EXT_INFMT0_IPBD_C_10; ext_infmt1 = 0x0; ext_infmt2 = 0x0; break; case V4L2_PIX_FMT_Y212: ext_infmt0 = VI6_RPF_EXT_INFMT0_F2B | VI6_RPF_EXT_INFMT0_IPBD_Y_12 | VI6_RPF_EXT_INFMT0_IPBD_C_12; ext_infmt1 = 0x0; ext_infmt2 = 0x0; break; default: ext_infmt0 = 0; ext_infmt1 = 0; ext_infmt2 = 0; break; } vsp1_rpf_write(rpf, dlb, VI6_RPF_EXT_INFMT0, ext_infmt0); vsp1_rpf_write(rpf, dlb, VI6_RPF_EXT_INFMT1, ext_infmt1); vsp1_rpf_write(rpf, dlb, VI6_RPF_EXT_INFMT2, ext_infmt2); } /* Output location. */ if (pipe->brx) { const struct v4l2_rect *compose; compose = vsp1_entity_get_pad_selection(pipe->brx, pipe->brx->config, rpf->brx_input, V4L2_SEL_TGT_COMPOSE); left = compose->left; top = compose->top; } if (pipe->interlaced) top /= 2; vsp1_rpf_write(rpf, dlb, VI6_RPF_LOC, (left << VI6_RPF_LOC_HCOORD_SHIFT) | (top << VI6_RPF_LOC_VCOORD_SHIFT)); /* * On Gen2 use the alpha channel (extended to 8 bits) when available or * a fixed alpha value set through the V4L2_CID_ALPHA_COMPONENT control * otherwise. * * The Gen3+ RPF has extended alpha capability and can both multiply the * alpha channel by a fixed global alpha value, and multiply the pixel * components to convert the input to premultiplied alpha. * * As alpha premultiplication is available in the BRx for both Gen2 and * Gen3+ we handle it there and use the Gen3 alpha multiplier for global * alpha multiplication only. This however prevents conversion to * premultiplied alpha if no BRx is present in the pipeline. If that use * case turns out to be useful we will revisit the implementation (for * Gen3 only). * * We enable alpha multiplication on Gen3+ using the fixed alpha value * set through the V4L2_CID_ALPHA_COMPONENT control when the input * contains an alpha channel. On Gen2 the global alpha is ignored in * that case. * * In all cases, disable color keying. */ vsp1_rpf_write(rpf, dlb, VI6_RPF_ALPH_SEL, VI6_RPF_ALPH_SEL_AEXT_EXT | (fmtinfo->alpha ? VI6_RPF_ALPH_SEL_ASEL_PACKED : VI6_RPF_ALPH_SEL_ASEL_FIXED)); if (entity->vsp1->info->gen >= 3) { u32 mult; if (fmtinfo->alpha) { /* * When the input contains an alpha channel enable the * alpha multiplier. If the input is premultiplied we * need to multiply both the alpha channel and the pixel * components by the global alpha value to keep them * premultiplied. Otherwise multiply the alpha channel * only. */ bool premultiplied = format->flags & V4L2_PIX_FMT_FLAG_PREMUL_ALPHA; mult = VI6_RPF_MULT_ALPHA_A_MMD_RATIO | (premultiplied ? VI6_RPF_MULT_ALPHA_P_MMD_RATIO : VI6_RPF_MULT_ALPHA_P_MMD_NONE); } else { /* * When the input doesn't contain an alpha channel the * global alpha value is applied in the unpacking unit, * the alpha multiplier isn't needed and must be * disabled. */ mult = VI6_RPF_MULT_ALPHA_A_MMD_NONE | VI6_RPF_MULT_ALPHA_P_MMD_NONE; } rpf->mult_alpha = mult; } vsp1_rpf_write(rpf, dlb, VI6_RPF_MSK_CTRL, 0); vsp1_rpf_write(rpf, dlb, VI6_RPF_CKEY_CTRL, 0); } static void vsp1_rpf_configure_autofld(struct vsp1_rwpf *rpf, struct vsp1_dl_list *dl) { const struct v4l2_pix_format_mplane *format = &rpf->format; struct vsp1_dl_ext_cmd *cmd; struct vsp1_extcmd_auto_fld_body *auto_fld; u32 offset_y, offset_c; cmd = vsp1_dl_get_pre_cmd(dl); if (WARN_ONCE(!cmd, "Failed to obtain an autofld cmd")) return; /* Re-index our auto_fld to match the current RPF. */ auto_fld = cmd->data; auto_fld = &auto_fld[rpf->entity.index]; auto_fld->top_y0 = rpf->mem.addr[0]; auto_fld->top_c0 = rpf->mem.addr[1]; auto_fld->top_c1 = rpf->mem.addr[2]; offset_y = format->plane_fmt[0].bytesperline; offset_c = format->plane_fmt[1].bytesperline; auto_fld->bottom_y0 = rpf->mem.addr[0] + offset_y; auto_fld->bottom_c0 = rpf->mem.addr[1] + offset_c; auto_fld->bottom_c1 = rpf->mem.addr[2] + offset_c; cmd->flags |= VI6_DL_EXT_AUTOFLD_INT | BIT(16 + rpf->entity.index); } static void rpf_configure_frame(struct vsp1_entity *entity, struct vsp1_pipeline *pipe, struct vsp1_dl_list *dl, struct vsp1_dl_body *dlb) { struct vsp1_rwpf *rpf = to_rwpf(&entity->subdev); vsp1_rpf_write(rpf, dlb, VI6_RPF_VRTCOL_SET, rpf->alpha << VI6_RPF_VRTCOL_SET_LAYA_SHIFT); vsp1_rpf_write(rpf, dlb, VI6_RPF_MULT_ALPHA, rpf->mult_alpha | (rpf->alpha << VI6_RPF_MULT_ALPHA_RATIO_SHIFT)); vsp1_pipeline_propagate_alpha(pipe, dlb, rpf->alpha); } static void rpf_configure_partition(struct vsp1_entity *entity, struct vsp1_pipeline *pipe, struct vsp1_dl_list *dl, struct vsp1_dl_body *dlb) { struct vsp1_rwpf *rpf = to_rwpf(&entity->subdev); struct vsp1_rwpf_memory mem = rpf->mem; struct vsp1_device *vsp1 = rpf->entity.vsp1; const struct vsp1_format_info *fmtinfo = rpf->fmtinfo; const struct v4l2_pix_format_mplane *format = &rpf->format; struct v4l2_rect crop; /* * Source size and crop offsets. * * The crop offsets correspond to the location of the crop * rectangle top left corner in the plane buffer. Only two * offsets are needed, as planes 2 and 3 always have identical * strides. */ crop = *vsp1_rwpf_get_crop(rpf, rpf->entity.config); /* * Partition Algorithm Control * * The partition algorithm can split this frame into multiple * slices. We must scale our partition window based on the pipe * configuration to match the destination partition window. * To achieve this, we adjust our crop to provide a 'sub-crop' * matching the expected partition window. Only 'left' and * 'width' need to be adjusted. */ if (pipe->partitions > 1) { crop.width = pipe->partition->rpf.width; crop.left += pipe->partition->rpf.left; } if (pipe->interlaced) { crop.height = round_down(crop.height / 2, fmtinfo->vsub); crop.top = round_down(crop.top / 2, fmtinfo->vsub); } vsp1_rpf_write(rpf, dlb, VI6_RPF_SRC_BSIZE, (crop.width << VI6_RPF_SRC_BSIZE_BHSIZE_SHIFT) | (crop.height << VI6_RPF_SRC_BSIZE_BVSIZE_SHIFT)); vsp1_rpf_write(rpf, dlb, VI6_RPF_SRC_ESIZE, (crop.width << VI6_RPF_SRC_ESIZE_EHSIZE_SHIFT) | (crop.height << VI6_RPF_SRC_ESIZE_EVSIZE_SHIFT)); mem.addr[0] += crop.top * format->plane_fmt[0].bytesperline + crop.left * fmtinfo->bpp[0] / 8; if (format->num_planes > 1) { unsigned int bpl = format->plane_fmt[1].bytesperline; unsigned int offset; offset = crop.top / fmtinfo->vsub * bpl + crop.left / fmtinfo->hsub * fmtinfo->bpp[1] / 8; mem.addr[1] += offset; mem.addr[2] += offset; } /* * On Gen3+ hardware the SPUVS bit has no effect on 3-planar * formats. Swap the U and V planes manually in that case. */ if (vsp1->info->gen >= 3 && format->num_planes == 3 && fmtinfo->swap_uv) swap(mem.addr[1], mem.addr[2]); /* * Interlaced pipelines will use the extended pre-cmd to process * SRCM_ADDR_{Y,C0,C1}. */ if (pipe->interlaced) { vsp1_rpf_configure_autofld(rpf, dl); } else { vsp1_rpf_write(rpf, dlb, VI6_RPF_SRCM_ADDR_Y, mem.addr[0]); vsp1_rpf_write(rpf, dlb, VI6_RPF_SRCM_ADDR_C0, mem.addr[1]); vsp1_rpf_write(rpf, dlb, VI6_RPF_SRCM_ADDR_C1, mem.addr[2]); } } static void rpf_partition(struct vsp1_entity *entity, struct vsp1_pipeline *pipe, struct vsp1_partition *partition, unsigned int partition_idx, struct vsp1_partition_window *window) { partition->rpf = *window; } static const struct vsp1_entity_operations rpf_entity_ops = { .configure_stream = rpf_configure_stream, .configure_frame = rpf_configure_frame, .configure_partition = rpf_configure_partition, .partition = rpf_partition, }; /* ----------------------------------------------------------------------------- * Initialization and Cleanup */ struct vsp1_rwpf *vsp1_rpf_create(struct vsp1_device *vsp1, unsigned int index) { struct vsp1_rwpf *rpf; char name[6]; int ret; rpf = devm_kzalloc(vsp1->dev, sizeof(*rpf), GFP_KERNEL); if (rpf == NULL) return ERR_PTR(-ENOMEM); rpf->max_width = RPF_MAX_WIDTH; rpf->max_height = RPF_MAX_HEIGHT; rpf->entity.ops = &rpf_entity_ops; rpf->entity.type = VSP1_ENTITY_RPF; rpf->entity.index = index; sprintf(name, "rpf.%u", index); ret = vsp1_entity_init(vsp1, &rpf->entity, name, 2, &rpf_ops, MEDIA_ENT_F_PROC_VIDEO_PIXEL_FORMATTER); if (ret < 0) return ERR_PTR(ret); /* Initialize the control handler. */ ret = vsp1_rwpf_init_ctrls(rpf, 0); if (ret < 0) { dev_err(vsp1->dev, "rpf%u: failed to initialize controls\n", index); goto error; } v4l2_ctrl_handler_setup(&rpf->ctrls); return rpf; error: vsp1_entity_destroy(&rpf->entity); return ERR_PTR(ret); }
linux-master
drivers/media/platform/renesas/vsp1/vsp1_rpf.c
// SPDX-License-Identifier: GPL-2.0+ /* * vsp1_drm.c -- R-Car VSP1 DRM/KMS Interface * * Copyright (C) 2015 Renesas Electronics Corporation * * Contact: Laurent Pinchart ([email protected]) */ #include <linux/device.h> #include <linux/dma-mapping.h> #include <linux/slab.h> #include <media/media-entity.h> #include <media/v4l2-subdev.h> #include <media/vsp1.h> #include "vsp1.h" #include "vsp1_brx.h" #include "vsp1_dl.h" #include "vsp1_drm.h" #include "vsp1_lif.h" #include "vsp1_pipe.h" #include "vsp1_rwpf.h" #include "vsp1_uif.h" #define BRX_NAME(e) (e)->type == VSP1_ENTITY_BRU ? "BRU" : "BRS" /* ----------------------------------------------------------------------------- * Interrupt Handling */ static void vsp1_du_pipeline_frame_end(struct vsp1_pipeline *pipe, unsigned int completion) { struct vsp1_drm_pipeline *drm_pipe = to_vsp1_drm_pipeline(pipe); if (drm_pipe->du_complete) { struct vsp1_entity *uif = drm_pipe->uif; unsigned int status = completion & (VSP1_DU_STATUS_COMPLETE | VSP1_DU_STATUS_WRITEBACK); u32 crc; crc = uif ? vsp1_uif_get_crc(to_uif(&uif->subdev)) : 0; drm_pipe->du_complete(drm_pipe->du_private, status, crc); } if (completion & VSP1_DL_FRAME_END_INTERNAL) { drm_pipe->force_brx_release = false; wake_up(&drm_pipe->wait_queue); } } /* ----------------------------------------------------------------------------- * Pipeline Configuration */ /* * Insert the UIF in the pipeline between the prev and next entities. If no UIF * is available connect the two entities directly. */ static int vsp1_du_insert_uif(struct vsp1_device *vsp1, struct vsp1_pipeline *pipe, struct vsp1_entity *uif, struct vsp1_entity *prev, unsigned int prev_pad, struct vsp1_entity *next, unsigned int next_pad) { struct v4l2_subdev_format format = { .which = V4L2_SUBDEV_FORMAT_ACTIVE, }; int ret; if (!uif) { /* * If there's no UIF to be inserted, connect the previous and * next entities directly. */ prev->sink = next; prev->sink_pad = next_pad; return 0; } prev->sink = uif; prev->sink_pad = UIF_PAD_SINK; format.pad = prev_pad; ret = v4l2_subdev_call(&prev->subdev, pad, get_fmt, NULL, &format); if (ret < 0) return ret; format.pad = UIF_PAD_SINK; ret = v4l2_subdev_call(&uif->subdev, pad, set_fmt, NULL, &format); if (ret < 0) return ret; dev_dbg(vsp1->dev, "%s: set format %ux%u (%x) on UIF sink\n", __func__, format.format.width, format.format.height, format.format.code); /* * The UIF doesn't mangle the format between its sink and source pads, * so there is no need to retrieve the format on its source pad. */ uif->sink = next; uif->sink_pad = next_pad; return 0; } /* Setup one RPF and the connected BRx sink pad. */ static int vsp1_du_pipeline_setup_rpf(struct vsp1_device *vsp1, struct vsp1_pipeline *pipe, struct vsp1_rwpf *rpf, struct vsp1_entity *uif, unsigned int brx_input) { struct v4l2_subdev_selection sel = { .which = V4L2_SUBDEV_FORMAT_ACTIVE, }; struct v4l2_subdev_format format = { .which = V4L2_SUBDEV_FORMAT_ACTIVE, }; const struct v4l2_rect *crop; int ret; /* * Configure the format on the RPF sink pad and propagate it up to the * BRx sink pad. */ crop = &vsp1->drm->inputs[rpf->entity.index].crop; format.pad = RWPF_PAD_SINK; format.format.width = crop->width + crop->left; format.format.height = crop->height + crop->top; format.format.code = rpf->fmtinfo->mbus; format.format.field = V4L2_FIELD_NONE; ret = v4l2_subdev_call(&rpf->entity.subdev, pad, set_fmt, NULL, &format); if (ret < 0) return ret; dev_dbg(vsp1->dev, "%s: set format %ux%u (%x) on RPF%u sink\n", __func__, format.format.width, format.format.height, format.format.code, rpf->entity.index); sel.pad = RWPF_PAD_SINK; sel.target = V4L2_SEL_TGT_CROP; sel.r = *crop; ret = v4l2_subdev_call(&rpf->entity.subdev, pad, set_selection, NULL, &sel); if (ret < 0) return ret; dev_dbg(vsp1->dev, "%s: set selection (%u,%u)/%ux%u on RPF%u sink\n", __func__, sel.r.left, sel.r.top, sel.r.width, sel.r.height, rpf->entity.index); /* * RPF source, hardcode the format to ARGB8888 to turn on format * conversion if needed. */ format.pad = RWPF_PAD_SOURCE; ret = v4l2_subdev_call(&rpf->entity.subdev, pad, get_fmt, NULL, &format); if (ret < 0) return ret; dev_dbg(vsp1->dev, "%s: got format %ux%u (%x) on RPF%u source\n", __func__, format.format.width, format.format.height, format.format.code, rpf->entity.index); format.format.code = MEDIA_BUS_FMT_ARGB8888_1X32; ret = v4l2_subdev_call(&rpf->entity.subdev, pad, set_fmt, NULL, &format); if (ret < 0) return ret; /* Insert and configure the UIF if available. */ ret = vsp1_du_insert_uif(vsp1, pipe, uif, &rpf->entity, RWPF_PAD_SOURCE, pipe->brx, brx_input); if (ret < 0) return ret; /* BRx sink, propagate the format from the RPF source. */ format.pad = brx_input; ret = v4l2_subdev_call(&pipe->brx->subdev, pad, set_fmt, NULL, &format); if (ret < 0) return ret; dev_dbg(vsp1->dev, "%s: set format %ux%u (%x) on %s pad %u\n", __func__, format.format.width, format.format.height, format.format.code, BRX_NAME(pipe->brx), format.pad); sel.pad = brx_input; sel.target = V4L2_SEL_TGT_COMPOSE; sel.r = vsp1->drm->inputs[rpf->entity.index].compose; ret = v4l2_subdev_call(&pipe->brx->subdev, pad, set_selection, NULL, &sel); if (ret < 0) return ret; dev_dbg(vsp1->dev, "%s: set selection (%u,%u)/%ux%u on %s pad %u\n", __func__, sel.r.left, sel.r.top, sel.r.width, sel.r.height, BRX_NAME(pipe->brx), sel.pad); return 0; } /* Setup the BRx source pad. */ static int vsp1_du_pipeline_setup_inputs(struct vsp1_device *vsp1, struct vsp1_pipeline *pipe); static void vsp1_du_pipeline_configure(struct vsp1_pipeline *pipe); static int vsp1_du_pipeline_setup_brx(struct vsp1_device *vsp1, struct vsp1_pipeline *pipe) { struct vsp1_drm_pipeline *drm_pipe = to_vsp1_drm_pipeline(pipe); struct v4l2_subdev_format format = { .which = V4L2_SUBDEV_FORMAT_ACTIVE, }; struct vsp1_entity *brx; int ret; /* * Pick a BRx: * - If we need more than two inputs, use the BRU. * - Otherwise, if we are not forced to release our BRx, keep it. * - Else, use any free BRx (randomly starting with the BRU). */ if (pipe->num_inputs > 2) brx = &vsp1->bru->entity; else if (pipe->brx && !drm_pipe->force_brx_release) brx = pipe->brx; else if (vsp1_feature(vsp1, VSP1_HAS_BRU) && !vsp1->bru->entity.pipe) brx = &vsp1->bru->entity; else brx = &vsp1->brs->entity; /* Switch BRx if needed. */ if (brx != pipe->brx) { struct vsp1_entity *released_brx = NULL; /* Release our BRx if we have one. */ if (pipe->brx) { dev_dbg(vsp1->dev, "%s: pipe %u: releasing %s\n", __func__, pipe->lif->index, BRX_NAME(pipe->brx)); /* * The BRx might be acquired by the other pipeline in * the next step. We must thus remove it from the list * of entities for this pipeline. The other pipeline's * hardware configuration will reconfigure the BRx * routing. * * However, if the other pipeline doesn't acquire our * BRx, we need to keep it in the list, otherwise the * hardware configuration step won't disconnect it from * the pipeline. To solve this, store the released BRx * pointer to add it back to the list of entities later * if it isn't acquired by the other pipeline. */ released_brx = pipe->brx; list_del(&pipe->brx->list_pipe); pipe->brx->sink = NULL; pipe->brx->pipe = NULL; pipe->brx = NULL; } /* * If the BRx we need is in use, force the owner pipeline to * switch to the other BRx and wait until the switch completes. */ if (brx->pipe) { struct vsp1_drm_pipeline *owner_pipe; dev_dbg(vsp1->dev, "%s: pipe %u: waiting for %s\n", __func__, pipe->lif->index, BRX_NAME(brx)); owner_pipe = to_vsp1_drm_pipeline(brx->pipe); owner_pipe->force_brx_release = true; vsp1_du_pipeline_setup_inputs(vsp1, &owner_pipe->pipe); vsp1_du_pipeline_configure(&owner_pipe->pipe); ret = wait_event_timeout(owner_pipe->wait_queue, !owner_pipe->force_brx_release, msecs_to_jiffies(500)); if (ret == 0) dev_warn(vsp1->dev, "DRM pipeline %u reconfiguration timeout\n", owner_pipe->pipe.lif->index); } /* * If the BRx we have released previously hasn't been acquired * by the other pipeline, add it back to the entities list (with * the pipe pointer NULL) to let vsp1_du_pipeline_configure() * disconnect it from the hardware pipeline. */ if (released_brx && !released_brx->pipe) list_add_tail(&released_brx->list_pipe, &pipe->entities); /* Add the BRx to the pipeline. */ dev_dbg(vsp1->dev, "%s: pipe %u: acquired %s\n", __func__, pipe->lif->index, BRX_NAME(brx)); pipe->brx = brx; pipe->brx->pipe = pipe; pipe->brx->sink = &pipe->output->entity; pipe->brx->sink_pad = 0; list_add_tail(&pipe->brx->list_pipe, &pipe->entities); } /* * Configure the format on the BRx source and verify that it matches the * requested format. We don't set the media bus code as it is configured * on the BRx sink pad 0 and propagated inside the entity, not on the * source pad. */ format.pad = brx->source_pad; format.format.width = drm_pipe->width; format.format.height = drm_pipe->height; format.format.field = V4L2_FIELD_NONE; ret = v4l2_subdev_call(&brx->subdev, pad, set_fmt, NULL, &format); if (ret < 0) return ret; dev_dbg(vsp1->dev, "%s: set format %ux%u (%x) on %s pad %u\n", __func__, format.format.width, format.format.height, format.format.code, BRX_NAME(brx), brx->source_pad); if (format.format.width != drm_pipe->width || format.format.height != drm_pipe->height) { dev_dbg(vsp1->dev, "%s: format mismatch\n", __func__); return -EPIPE; } return 0; } static unsigned int rpf_zpos(struct vsp1_device *vsp1, struct vsp1_rwpf *rpf) { return vsp1->drm->inputs[rpf->entity.index].zpos; } /* Setup the input side of the pipeline (RPFs and BRx). */ static int vsp1_du_pipeline_setup_inputs(struct vsp1_device *vsp1, struct vsp1_pipeline *pipe) { struct vsp1_drm_pipeline *drm_pipe = to_vsp1_drm_pipeline(pipe); struct vsp1_rwpf *inputs[VSP1_MAX_RPF] = { NULL, }; struct vsp1_entity *uif; bool use_uif = false; struct vsp1_brx *brx; unsigned int i; int ret; /* Count the number of enabled inputs and sort them by Z-order. */ pipe->num_inputs = 0; for (i = 0; i < vsp1->info->rpf_count; ++i) { struct vsp1_rwpf *rpf = vsp1->rpf[i]; unsigned int j; if (!pipe->inputs[i]) continue; /* Insert the RPF in the sorted RPFs array. */ for (j = pipe->num_inputs++; j > 0; --j) { if (rpf_zpos(vsp1, inputs[j-1]) <= rpf_zpos(vsp1, rpf)) break; inputs[j] = inputs[j-1]; } inputs[j] = rpf; } /* * Setup the BRx. This must be done before setting up the RPF input * pipelines as the BRx sink compose rectangles depend on the BRx source * format. */ ret = vsp1_du_pipeline_setup_brx(vsp1, pipe); if (ret < 0) { dev_err(vsp1->dev, "%s: failed to setup %s source\n", __func__, BRX_NAME(pipe->brx)); return ret; } brx = to_brx(&pipe->brx->subdev); /* Setup the RPF input pipeline for every enabled input. */ for (i = 0; i < pipe->brx->source_pad; ++i) { struct vsp1_rwpf *rpf = inputs[i]; if (!rpf) { brx->inputs[i].rpf = NULL; continue; } if (!rpf->entity.pipe) { rpf->entity.pipe = pipe; list_add_tail(&rpf->entity.list_pipe, &pipe->entities); } brx->inputs[i].rpf = rpf; rpf->brx_input = i; rpf->entity.sink = pipe->brx; rpf->entity.sink_pad = i; dev_dbg(vsp1->dev, "%s: connecting RPF.%u to %s:%u\n", __func__, rpf->entity.index, BRX_NAME(pipe->brx), i); uif = drm_pipe->crc.source == VSP1_DU_CRC_PLANE && drm_pipe->crc.index == i ? drm_pipe->uif : NULL; if (uif) use_uif = true; ret = vsp1_du_pipeline_setup_rpf(vsp1, pipe, rpf, uif, i); if (ret < 0) { dev_err(vsp1->dev, "%s: failed to setup RPF.%u\n", __func__, rpf->entity.index); return ret; } } /* Insert and configure the UIF at the BRx output if available. */ uif = drm_pipe->crc.source == VSP1_DU_CRC_OUTPUT ? drm_pipe->uif : NULL; if (uif) use_uif = true; ret = vsp1_du_insert_uif(vsp1, pipe, uif, pipe->brx, pipe->brx->source_pad, &pipe->output->entity, 0); if (ret < 0) dev_err(vsp1->dev, "%s: failed to setup UIF after %s\n", __func__, BRX_NAME(pipe->brx)); /* If the DRM pipe does not have a UIF there is nothing we can update. */ if (!drm_pipe->uif) return 0; /* * If the UIF is not in use schedule it for removal by setting its pipe * pointer to NULL, vsp1_du_pipeline_configure() will remove it from the * hardware pipeline and from the pipeline's list of entities. Otherwise * make sure it is present in the pipeline's list of entities if it * wasn't already. */ if (!use_uif) { drm_pipe->uif->pipe = NULL; } else if (!drm_pipe->uif->pipe) { drm_pipe->uif->pipe = pipe; list_add_tail(&drm_pipe->uif->list_pipe, &pipe->entities); } return 0; } /* Setup the output side of the pipeline (WPF and LIF). */ static int vsp1_du_pipeline_setup_output(struct vsp1_device *vsp1, struct vsp1_pipeline *pipe) { struct vsp1_drm_pipeline *drm_pipe = to_vsp1_drm_pipeline(pipe); struct v4l2_subdev_format format = { .which = V4L2_SUBDEV_FORMAT_ACTIVE, }; int ret; format.pad = RWPF_PAD_SINK; format.format.width = drm_pipe->width; format.format.height = drm_pipe->height; format.format.code = MEDIA_BUS_FMT_ARGB8888_1X32; format.format.field = V4L2_FIELD_NONE; ret = v4l2_subdev_call(&pipe->output->entity.subdev, pad, set_fmt, NULL, &format); if (ret < 0) return ret; dev_dbg(vsp1->dev, "%s: set format %ux%u (%x) on WPF%u sink\n", __func__, format.format.width, format.format.height, format.format.code, pipe->output->entity.index); format.pad = RWPF_PAD_SOURCE; ret = v4l2_subdev_call(&pipe->output->entity.subdev, pad, get_fmt, NULL, &format); if (ret < 0) return ret; dev_dbg(vsp1->dev, "%s: got format %ux%u (%x) on WPF%u source\n", __func__, format.format.width, format.format.height, format.format.code, pipe->output->entity.index); format.pad = LIF_PAD_SINK; ret = v4l2_subdev_call(&pipe->lif->subdev, pad, set_fmt, NULL, &format); if (ret < 0) return ret; dev_dbg(vsp1->dev, "%s: set format %ux%u (%x) on LIF%u sink\n", __func__, format.format.width, format.format.height, format.format.code, pipe->lif->index); /* * Verify that the format at the output of the pipeline matches the * requested frame size and media bus code. */ if (format.format.width != drm_pipe->width || format.format.height != drm_pipe->height || format.format.code != MEDIA_BUS_FMT_ARGB8888_1X32) { dev_dbg(vsp1->dev, "%s: format mismatch on LIF%u\n", __func__, pipe->lif->index); return -EPIPE; } return 0; } /* Configure all entities in the pipeline. */ static void vsp1_du_pipeline_configure(struct vsp1_pipeline *pipe) { struct vsp1_drm_pipeline *drm_pipe = to_vsp1_drm_pipeline(pipe); struct vsp1_entity *entity; struct vsp1_entity *next; struct vsp1_dl_list *dl; struct vsp1_dl_body *dlb; unsigned int dl_flags = 0; if (drm_pipe->force_brx_release) dl_flags |= VSP1_DL_FRAME_END_INTERNAL; if (pipe->output->writeback) dl_flags |= VSP1_DL_FRAME_END_WRITEBACK; dl = vsp1_dl_list_get(pipe->output->dlm); dlb = vsp1_dl_list_get_body0(dl); list_for_each_entry_safe(entity, next, &pipe->entities, list_pipe) { /* Disconnect unused entities from the pipeline. */ if (!entity->pipe) { vsp1_dl_body_write(dlb, entity->route->reg, VI6_DPR_NODE_UNUSED); entity->sink = NULL; list_del(&entity->list_pipe); continue; } vsp1_entity_route_setup(entity, pipe, dlb); vsp1_entity_configure_stream(entity, pipe, dl, dlb); vsp1_entity_configure_frame(entity, pipe, dl, dlb); vsp1_entity_configure_partition(entity, pipe, dl, dlb); } vsp1_dl_list_commit(dl, dl_flags); } static int vsp1_du_pipeline_set_rwpf_format(struct vsp1_device *vsp1, struct vsp1_rwpf *rwpf, u32 pixelformat, unsigned int pitch) { const struct vsp1_format_info *fmtinfo; unsigned int chroma_hsub; fmtinfo = vsp1_get_format_info(vsp1, pixelformat); if (!fmtinfo) { dev_dbg(vsp1->dev, "Unsupported pixel format %08x\n", pixelformat); return -EINVAL; } /* * Only formats with three planes can affect the chroma planes pitch. * All formats with two planes have a horizontal subsampling value of 2, * but combine U and V in a single chroma plane, which thus results in * the luma plane and chroma plane having the same pitch. */ chroma_hsub = (fmtinfo->planes == 3) ? fmtinfo->hsub : 1; rwpf->fmtinfo = fmtinfo; rwpf->format.num_planes = fmtinfo->planes; rwpf->format.plane_fmt[0].bytesperline = pitch; rwpf->format.plane_fmt[1].bytesperline = pitch / chroma_hsub; return 0; } /* ----------------------------------------------------------------------------- * DU Driver API */ int vsp1_du_init(struct device *dev) { struct vsp1_device *vsp1 = dev_get_drvdata(dev); if (!vsp1) return -EPROBE_DEFER; return 0; } EXPORT_SYMBOL_GPL(vsp1_du_init); /** * vsp1_du_setup_lif - Setup the output part of the VSP pipeline * @dev: the VSP device * @pipe_index: the DRM pipeline index * @cfg: the LIF configuration * * Configure the output part of VSP DRM pipeline for the given frame @cfg.width * and @cfg.height. This sets up formats on the BRx source pad, the WPF sink and * source pads, and the LIF sink pad. * * The @pipe_index argument selects which DRM pipeline to setup. The number of * available pipelines depend on the VSP instance. * * As the media bus code on the blend unit source pad is conditioned by the * configuration of its sink 0 pad, we also set up the formats on all blend unit * sinks, even if the configuration will be overwritten later by * vsp1_du_setup_rpf(). This ensures that the blend unit configuration is set to * a well defined state. * * Return 0 on success or a negative error code on failure. */ int vsp1_du_setup_lif(struct device *dev, unsigned int pipe_index, const struct vsp1_du_lif_config *cfg) { struct vsp1_device *vsp1 = dev_get_drvdata(dev); struct vsp1_drm_pipeline *drm_pipe; struct vsp1_pipeline *pipe; unsigned long flags; unsigned int i; int ret; if (pipe_index >= vsp1->info->lif_count) return -EINVAL; drm_pipe = &vsp1->drm->pipe[pipe_index]; pipe = &drm_pipe->pipe; if (!cfg) { struct vsp1_brx *brx; mutex_lock(&vsp1->drm->lock); brx = to_brx(&pipe->brx->subdev); /* * NULL configuration means the CRTC is being disabled, stop * the pipeline and turn the light off. */ ret = vsp1_pipeline_stop(pipe); if (ret == -ETIMEDOUT) dev_err(vsp1->dev, "DRM pipeline stop timeout\n"); for (i = 0; i < ARRAY_SIZE(pipe->inputs); ++i) { struct vsp1_rwpf *rpf = pipe->inputs[i]; if (!rpf) continue; /* * Remove the RPF from the pipe and the list of BRx * inputs. */ WARN_ON(!rpf->entity.pipe); rpf->entity.pipe = NULL; list_del(&rpf->entity.list_pipe); pipe->inputs[i] = NULL; brx->inputs[rpf->brx_input].rpf = NULL; } drm_pipe->du_complete = NULL; pipe->num_inputs = 0; dev_dbg(vsp1->dev, "%s: pipe %u: releasing %s\n", __func__, pipe->lif->index, BRX_NAME(pipe->brx)); list_del(&pipe->brx->list_pipe); pipe->brx->pipe = NULL; pipe->brx = NULL; mutex_unlock(&vsp1->drm->lock); vsp1_dlm_reset(pipe->output->dlm); vsp1_device_put(vsp1); dev_dbg(vsp1->dev, "%s: pipeline disabled\n", __func__); return 0; } /* Reset the underrun counter */ pipe->underrun_count = 0; drm_pipe->width = cfg->width; drm_pipe->height = cfg->height; pipe->interlaced = cfg->interlaced; dev_dbg(vsp1->dev, "%s: configuring LIF%u with format %ux%u%s\n", __func__, pipe_index, cfg->width, cfg->height, pipe->interlaced ? "i" : ""); mutex_lock(&vsp1->drm->lock); /* Setup formats through the pipeline. */ ret = vsp1_du_pipeline_setup_inputs(vsp1, pipe); if (ret < 0) goto unlock; ret = vsp1_du_pipeline_setup_output(vsp1, pipe); if (ret < 0) goto unlock; /* Enable the VSP1. */ ret = vsp1_device_get(vsp1); if (ret < 0) goto unlock; /* * Register a callback to allow us to notify the DRM driver of frame * completion events. */ drm_pipe->du_complete = cfg->callback; drm_pipe->du_private = cfg->callback_data; /* Disable the display interrupts. */ vsp1_write(vsp1, VI6_DISP_IRQ_STA(pipe_index), 0); vsp1_write(vsp1, VI6_DISP_IRQ_ENB(pipe_index), 0); /* Configure all entities in the pipeline. */ vsp1_du_pipeline_configure(pipe); unlock: mutex_unlock(&vsp1->drm->lock); if (ret < 0) return ret; /* Start the pipeline. */ spin_lock_irqsave(&pipe->irqlock, flags); vsp1_pipeline_run(pipe); spin_unlock_irqrestore(&pipe->irqlock, flags); dev_dbg(vsp1->dev, "%s: pipeline enabled\n", __func__); return 0; } EXPORT_SYMBOL_GPL(vsp1_du_setup_lif); /** * vsp1_du_atomic_begin - Prepare for an atomic update * @dev: the VSP device * @pipe_index: the DRM pipeline index */ void vsp1_du_atomic_begin(struct device *dev, unsigned int pipe_index) { } EXPORT_SYMBOL_GPL(vsp1_du_atomic_begin); /** * vsp1_du_atomic_update - Setup one RPF input of the VSP pipeline * @dev: the VSP device * @pipe_index: the DRM pipeline index * @rpf_index: index of the RPF to setup (0-based) * @cfg: the RPF configuration * * Configure the VSP to perform image composition through RPF @rpf_index as * described by the @cfg configuration. The image to compose is referenced by * @cfg.mem and composed using the @cfg.src crop rectangle and the @cfg.dst * composition rectangle. The Z-order is configurable with higher @zpos values * displayed on top. * * If the @cfg configuration is NULL, the RPF will be disabled. Calling the * function on a disabled RPF is allowed. * * Image format as stored in memory is expressed as a V4L2 @cfg.pixelformat * value. The memory pitch is configurable to allow for padding at end of lines, * or simply for images that extend beyond the crop rectangle boundaries. The * @cfg.pitch value is expressed in bytes and applies to all planes for * multiplanar formats. * * The source memory buffer is referenced by the DMA address of its planes in * the @cfg.mem array. Up to two planes are supported. The second plane DMA * address is ignored for formats using a single plane. * * This function isn't reentrant, the caller needs to serialize calls. * * Return 0 on success or a negative error code on failure. */ int vsp1_du_atomic_update(struct device *dev, unsigned int pipe_index, unsigned int rpf_index, const struct vsp1_du_atomic_config *cfg) { struct vsp1_device *vsp1 = dev_get_drvdata(dev); struct vsp1_drm_pipeline *drm_pipe = &vsp1->drm->pipe[pipe_index]; struct vsp1_rwpf *rpf; int ret; if (rpf_index >= vsp1->info->rpf_count) return -EINVAL; rpf = vsp1->rpf[rpf_index]; if (!cfg) { dev_dbg(vsp1->dev, "%s: RPF%u: disable requested\n", __func__, rpf_index); /* * Remove the RPF from the pipeline's inputs. Keep it in the * pipeline's entity list to let vsp1_du_pipeline_configure() * remove it from the hardware pipeline. */ rpf->entity.pipe = NULL; drm_pipe->pipe.inputs[rpf_index] = NULL; return 0; } dev_dbg(vsp1->dev, "%s: RPF%u: (%u,%u)/%ux%u -> (%u,%u)/%ux%u (%08x), pitch %u dma { %pad, %pad, %pad } zpos %u\n", __func__, rpf_index, cfg->src.left, cfg->src.top, cfg->src.width, cfg->src.height, cfg->dst.left, cfg->dst.top, cfg->dst.width, cfg->dst.height, cfg->pixelformat, cfg->pitch, &cfg->mem[0], &cfg->mem[1], &cfg->mem[2], cfg->zpos); /* * Store the format, stride, memory buffer address, crop and compose * rectangles and Z-order position and for the input. */ ret = vsp1_du_pipeline_set_rwpf_format(vsp1, rpf, cfg->pixelformat, cfg->pitch); if (ret < 0) return ret; rpf->alpha = cfg->alpha; rpf->mem.addr[0] = cfg->mem[0]; rpf->mem.addr[1] = cfg->mem[1]; rpf->mem.addr[2] = cfg->mem[2]; rpf->format.flags = cfg->premult ? V4L2_PIX_FMT_FLAG_PREMUL_ALPHA : 0; vsp1->drm->inputs[rpf_index].crop = cfg->src; vsp1->drm->inputs[rpf_index].compose = cfg->dst; vsp1->drm->inputs[rpf_index].zpos = cfg->zpos; drm_pipe->pipe.inputs[rpf_index] = rpf; return 0; } EXPORT_SYMBOL_GPL(vsp1_du_atomic_update); /** * vsp1_du_atomic_flush - Commit an atomic update * @dev: the VSP device * @pipe_index: the DRM pipeline index * @cfg: atomic pipe configuration */ void vsp1_du_atomic_flush(struct device *dev, unsigned int pipe_index, const struct vsp1_du_atomic_pipe_config *cfg) { struct vsp1_device *vsp1 = dev_get_drvdata(dev); struct vsp1_drm_pipeline *drm_pipe = &vsp1->drm->pipe[pipe_index]; struct vsp1_pipeline *pipe = &drm_pipe->pipe; int ret; drm_pipe->crc = cfg->crc; mutex_lock(&vsp1->drm->lock); if (cfg->writeback.pixelformat) { const struct vsp1_du_writeback_config *wb_cfg = &cfg->writeback; ret = vsp1_du_pipeline_set_rwpf_format(vsp1, pipe->output, wb_cfg->pixelformat, wb_cfg->pitch); if (WARN_ON(ret < 0)) goto done; pipe->output->mem.addr[0] = wb_cfg->mem[0]; pipe->output->mem.addr[1] = wb_cfg->mem[1]; pipe->output->mem.addr[2] = wb_cfg->mem[2]; pipe->output->writeback = true; } vsp1_du_pipeline_setup_inputs(vsp1, pipe); vsp1_du_pipeline_configure(pipe); done: mutex_unlock(&vsp1->drm->lock); } EXPORT_SYMBOL_GPL(vsp1_du_atomic_flush); int vsp1_du_map_sg(struct device *dev, struct sg_table *sgt) { struct vsp1_device *vsp1 = dev_get_drvdata(dev); /* * As all the buffers allocated by the DU driver are coherent, we can * skip cache sync. This will need to be revisited when support for * non-coherent buffers will be added to the DU driver. */ return dma_map_sgtable(vsp1->bus_master, sgt, DMA_TO_DEVICE, DMA_ATTR_SKIP_CPU_SYNC); } EXPORT_SYMBOL_GPL(vsp1_du_map_sg); void vsp1_du_unmap_sg(struct device *dev, struct sg_table *sgt) { struct vsp1_device *vsp1 = dev_get_drvdata(dev); dma_unmap_sgtable(vsp1->bus_master, sgt, DMA_TO_DEVICE, DMA_ATTR_SKIP_CPU_SYNC); } EXPORT_SYMBOL_GPL(vsp1_du_unmap_sg); /* ----------------------------------------------------------------------------- * Initialization */ int vsp1_drm_init(struct vsp1_device *vsp1) { unsigned int i; vsp1->drm = devm_kzalloc(vsp1->dev, sizeof(*vsp1->drm), GFP_KERNEL); if (!vsp1->drm) return -ENOMEM; mutex_init(&vsp1->drm->lock); /* Create one DRM pipeline per LIF. */ for (i = 0; i < vsp1->info->lif_count; ++i) { struct vsp1_drm_pipeline *drm_pipe = &vsp1->drm->pipe[i]; struct vsp1_pipeline *pipe = &drm_pipe->pipe; init_waitqueue_head(&drm_pipe->wait_queue); vsp1_pipeline_init(pipe); pipe->frame_end = vsp1_du_pipeline_frame_end; /* * The output side of the DRM pipeline is static, add the * corresponding entities manually. */ pipe->output = vsp1->wpf[i]; pipe->lif = &vsp1->lif[i]->entity; pipe->output->entity.pipe = pipe; pipe->output->entity.sink = pipe->lif; pipe->output->entity.sink_pad = 0; list_add_tail(&pipe->output->entity.list_pipe, &pipe->entities); pipe->lif->pipe = pipe; list_add_tail(&pipe->lif->list_pipe, &pipe->entities); /* * CRC computation is initially disabled, don't add the UIF to * the pipeline. */ if (i < vsp1->info->uif_count) drm_pipe->uif = &vsp1->uif[i]->entity; } /* Disable all RPFs initially. */ for (i = 0; i < vsp1->info->rpf_count; ++i) { struct vsp1_rwpf *input = vsp1->rpf[i]; INIT_LIST_HEAD(&input->entity.list_pipe); } return 0; } void vsp1_drm_cleanup(struct vsp1_device *vsp1) { mutex_destroy(&vsp1->drm->lock); }
linux-master
drivers/media/platform/renesas/vsp1/vsp1_drm.c
// SPDX-License-Identifier: GPL-2.0+ /* * vsp1_rwpf.c -- R-Car VSP1 Read and Write Pixel Formatters * * Copyright (C) 2013-2014 Renesas Electronics Corporation * * Contact: Laurent Pinchart ([email protected]) */ #include <media/v4l2-subdev.h> #include "vsp1.h" #include "vsp1_rwpf.h" #include "vsp1_video.h" #define RWPF_MIN_WIDTH 1 #define RWPF_MIN_HEIGHT 1 struct v4l2_rect *vsp1_rwpf_get_crop(struct vsp1_rwpf *rwpf, struct v4l2_subdev_state *sd_state) { return v4l2_subdev_get_try_crop(&rwpf->entity.subdev, sd_state, RWPF_PAD_SINK); } /* ----------------------------------------------------------------------------- * V4L2 Subdevice Pad Operations */ static int vsp1_rwpf_enum_mbus_code(struct v4l2_subdev *subdev, struct v4l2_subdev_state *sd_state, struct v4l2_subdev_mbus_code_enum *code) { static const unsigned int codes[] = { MEDIA_BUS_FMT_ARGB8888_1X32, MEDIA_BUS_FMT_AHSV8888_1X32, MEDIA_BUS_FMT_AYUV8_1X32, }; if (code->index >= ARRAY_SIZE(codes)) return -EINVAL; code->code = codes[code->index]; return 0; } static int vsp1_rwpf_enum_frame_size(struct v4l2_subdev *subdev, struct v4l2_subdev_state *sd_state, struct v4l2_subdev_frame_size_enum *fse) { struct vsp1_rwpf *rwpf = to_rwpf(subdev); return vsp1_subdev_enum_frame_size(subdev, sd_state, fse, RWPF_MIN_WIDTH, RWPF_MIN_HEIGHT, rwpf->max_width, rwpf->max_height); } static int vsp1_rwpf_set_format(struct v4l2_subdev *subdev, struct v4l2_subdev_state *sd_state, struct v4l2_subdev_format *fmt) { struct vsp1_rwpf *rwpf = to_rwpf(subdev); struct v4l2_subdev_state *config; struct v4l2_mbus_framefmt *format; int ret = 0; mutex_lock(&rwpf->entity.lock); config = vsp1_entity_get_pad_config(&rwpf->entity, sd_state, fmt->which); if (!config) { ret = -EINVAL; goto done; } /* Default to YUV if the requested format is not supported. */ if (fmt->format.code != MEDIA_BUS_FMT_ARGB8888_1X32 && fmt->format.code != MEDIA_BUS_FMT_AHSV8888_1X32 && fmt->format.code != MEDIA_BUS_FMT_AYUV8_1X32) fmt->format.code = MEDIA_BUS_FMT_AYUV8_1X32; format = vsp1_entity_get_pad_format(&rwpf->entity, config, fmt->pad); if (fmt->pad == RWPF_PAD_SOURCE) { /* * The RWPF performs format conversion but can't scale, only the * format code can be changed on the source pad. */ format->code = fmt->format.code; fmt->format = *format; goto done; } format->code = fmt->format.code; format->width = clamp_t(unsigned int, fmt->format.width, RWPF_MIN_WIDTH, rwpf->max_width); format->height = clamp_t(unsigned int, fmt->format.height, RWPF_MIN_HEIGHT, rwpf->max_height); format->field = V4L2_FIELD_NONE; format->colorspace = V4L2_COLORSPACE_SRGB; fmt->format = *format; if (rwpf->entity.type == VSP1_ENTITY_RPF) { struct v4l2_rect *crop; /* Update the sink crop rectangle. */ crop = vsp1_rwpf_get_crop(rwpf, config); crop->left = 0; crop->top = 0; crop->width = fmt->format.width; crop->height = fmt->format.height; } /* Propagate the format to the source pad. */ format = vsp1_entity_get_pad_format(&rwpf->entity, config, RWPF_PAD_SOURCE); *format = fmt->format; if (rwpf->flip.rotate) { format->width = fmt->format.height; format->height = fmt->format.width; } done: mutex_unlock(&rwpf->entity.lock); return ret; } static int vsp1_rwpf_get_selection(struct v4l2_subdev *subdev, struct v4l2_subdev_state *sd_state, struct v4l2_subdev_selection *sel) { struct vsp1_rwpf *rwpf = to_rwpf(subdev); struct v4l2_subdev_state *config; struct v4l2_mbus_framefmt *format; int ret = 0; /* * Cropping is only supported on the RPF and is implemented on the sink * pad. */ if (rwpf->entity.type == VSP1_ENTITY_WPF || sel->pad != RWPF_PAD_SINK) return -EINVAL; mutex_lock(&rwpf->entity.lock); config = vsp1_entity_get_pad_config(&rwpf->entity, sd_state, sel->which); if (!config) { ret = -EINVAL; goto done; } switch (sel->target) { case V4L2_SEL_TGT_CROP: sel->r = *vsp1_rwpf_get_crop(rwpf, config); break; case V4L2_SEL_TGT_CROP_BOUNDS: format = vsp1_entity_get_pad_format(&rwpf->entity, config, RWPF_PAD_SINK); sel->r.left = 0; sel->r.top = 0; sel->r.width = format->width; sel->r.height = format->height; break; default: ret = -EINVAL; break; } done: mutex_unlock(&rwpf->entity.lock); return ret; } static int vsp1_rwpf_set_selection(struct v4l2_subdev *subdev, struct v4l2_subdev_state *sd_state, struct v4l2_subdev_selection *sel) { struct vsp1_rwpf *rwpf = to_rwpf(subdev); struct v4l2_subdev_state *config; struct v4l2_mbus_framefmt *format; struct v4l2_rect *crop; int ret = 0; /* * Cropping is only supported on the RPF and is implemented on the sink * pad. */ if (rwpf->entity.type == VSP1_ENTITY_WPF || sel->pad != RWPF_PAD_SINK) return -EINVAL; if (sel->target != V4L2_SEL_TGT_CROP) return -EINVAL; mutex_lock(&rwpf->entity.lock); config = vsp1_entity_get_pad_config(&rwpf->entity, sd_state, sel->which); if (!config) { ret = -EINVAL; goto done; } /* Make sure the crop rectangle is entirely contained in the image. */ format = vsp1_entity_get_pad_format(&rwpf->entity, config, RWPF_PAD_SINK); /* * Restrict the crop rectangle coordinates to multiples of 2 to avoid * shifting the color plane. */ if (format->code == MEDIA_BUS_FMT_AYUV8_1X32) { sel->r.left = ALIGN(sel->r.left, 2); sel->r.top = ALIGN(sel->r.top, 2); sel->r.width = round_down(sel->r.width, 2); sel->r.height = round_down(sel->r.height, 2); } sel->r.left = min_t(unsigned int, sel->r.left, format->width - 2); sel->r.top = min_t(unsigned int, sel->r.top, format->height - 2); sel->r.width = min_t(unsigned int, sel->r.width, format->width - sel->r.left); sel->r.height = min_t(unsigned int, sel->r.height, format->height - sel->r.top); crop = vsp1_rwpf_get_crop(rwpf, config); *crop = sel->r; /* Propagate the format to the source pad. */ format = vsp1_entity_get_pad_format(&rwpf->entity, config, RWPF_PAD_SOURCE); format->width = crop->width; format->height = crop->height; done: mutex_unlock(&rwpf->entity.lock); return ret; } const struct v4l2_subdev_pad_ops vsp1_rwpf_pad_ops = { .init_cfg = vsp1_entity_init_cfg, .enum_mbus_code = vsp1_rwpf_enum_mbus_code, .enum_frame_size = vsp1_rwpf_enum_frame_size, .get_fmt = vsp1_subdev_get_pad_format, .set_fmt = vsp1_rwpf_set_format, .get_selection = vsp1_rwpf_get_selection, .set_selection = vsp1_rwpf_set_selection, }; /* ----------------------------------------------------------------------------- * Controls */ static int vsp1_rwpf_s_ctrl(struct v4l2_ctrl *ctrl) { struct vsp1_rwpf *rwpf = container_of(ctrl->handler, struct vsp1_rwpf, ctrls); switch (ctrl->id) { case V4L2_CID_ALPHA_COMPONENT: rwpf->alpha = ctrl->val; break; } return 0; } static const struct v4l2_ctrl_ops vsp1_rwpf_ctrl_ops = { .s_ctrl = vsp1_rwpf_s_ctrl, }; int vsp1_rwpf_init_ctrls(struct vsp1_rwpf *rwpf, unsigned int ncontrols) { v4l2_ctrl_handler_init(&rwpf->ctrls, ncontrols + 1); v4l2_ctrl_new_std(&rwpf->ctrls, &vsp1_rwpf_ctrl_ops, V4L2_CID_ALPHA_COMPONENT, 0, 255, 1, 255); rwpf->entity.subdev.ctrl_handler = &rwpf->ctrls; return rwpf->ctrls.error; }
linux-master
drivers/media/platform/renesas/vsp1/vsp1_rwpf.c
// SPDX-License-Identifier: GPL-2.0+ /* * vsp1_uif.c -- R-Car VSP1 User Logic Interface * * Copyright (C) 2017-2018 Laurent Pinchart * * Contact: Laurent Pinchart ([email protected]) */ #include <linux/device.h> #include <linux/gfp.h> #include <linux/sys_soc.h> #include <media/media-entity.h> #include <media/v4l2-subdev.h> #include "vsp1.h" #include "vsp1_dl.h" #include "vsp1_entity.h" #include "vsp1_uif.h" #define UIF_MIN_SIZE 4U #define UIF_MAX_SIZE 8190U /* ----------------------------------------------------------------------------- * Device Access */ static inline u32 vsp1_uif_read(struct vsp1_uif *uif, u32 reg) { return vsp1_read(uif->entity.vsp1, uif->entity.index * VI6_UIF_OFFSET + reg); } static inline void vsp1_uif_write(struct vsp1_uif *uif, struct vsp1_dl_body *dlb, u32 reg, u32 data) { vsp1_dl_body_write(dlb, reg + uif->entity.index * VI6_UIF_OFFSET, data); } u32 vsp1_uif_get_crc(struct vsp1_uif *uif) { return vsp1_uif_read(uif, VI6_UIF_DISCOM_DOCMCCRCR); } /* ----------------------------------------------------------------------------- * V4L2 Subdevice Pad Operations */ static const unsigned int uif_codes[] = { MEDIA_BUS_FMT_ARGB8888_1X32, MEDIA_BUS_FMT_AHSV8888_1X32, MEDIA_BUS_FMT_AYUV8_1X32, }; static int uif_enum_mbus_code(struct v4l2_subdev *subdev, struct v4l2_subdev_state *sd_state, struct v4l2_subdev_mbus_code_enum *code) { return vsp1_subdev_enum_mbus_code(subdev, sd_state, code, uif_codes, ARRAY_SIZE(uif_codes)); } static int uif_enum_frame_size(struct v4l2_subdev *subdev, struct v4l2_subdev_state *sd_state, struct v4l2_subdev_frame_size_enum *fse) { return vsp1_subdev_enum_frame_size(subdev, sd_state, fse, UIF_MIN_SIZE, UIF_MIN_SIZE, UIF_MAX_SIZE, UIF_MAX_SIZE); } static int uif_set_format(struct v4l2_subdev *subdev, struct v4l2_subdev_state *sd_state, struct v4l2_subdev_format *fmt) { return vsp1_subdev_set_pad_format(subdev, sd_state, fmt, uif_codes, ARRAY_SIZE(uif_codes), UIF_MIN_SIZE, UIF_MIN_SIZE, UIF_MAX_SIZE, UIF_MAX_SIZE); } static int uif_get_selection(struct v4l2_subdev *subdev, struct v4l2_subdev_state *sd_state, struct v4l2_subdev_selection *sel) { struct vsp1_uif *uif = to_uif(subdev); struct v4l2_subdev_state *config; struct v4l2_mbus_framefmt *format; int ret = 0; if (sel->pad != UIF_PAD_SINK) return -EINVAL; mutex_lock(&uif->entity.lock); config = vsp1_entity_get_pad_config(&uif->entity, sd_state, sel->which); if (!config) { ret = -EINVAL; goto done; } switch (sel->target) { case V4L2_SEL_TGT_CROP_BOUNDS: case V4L2_SEL_TGT_CROP_DEFAULT: format = vsp1_entity_get_pad_format(&uif->entity, config, UIF_PAD_SINK); sel->r.left = 0; sel->r.top = 0; sel->r.width = format->width; sel->r.height = format->height; break; case V4L2_SEL_TGT_CROP: sel->r = *vsp1_entity_get_pad_selection(&uif->entity, config, sel->pad, sel->target); break; default: ret = -EINVAL; break; } done: mutex_unlock(&uif->entity.lock); return ret; } static int uif_set_selection(struct v4l2_subdev *subdev, struct v4l2_subdev_state *sd_state, struct v4l2_subdev_selection *sel) { struct vsp1_uif *uif = to_uif(subdev); struct v4l2_subdev_state *config; struct v4l2_mbus_framefmt *format; struct v4l2_rect *selection; int ret = 0; if (sel->pad != UIF_PAD_SINK || sel->target != V4L2_SEL_TGT_CROP) return -EINVAL; mutex_lock(&uif->entity.lock); config = vsp1_entity_get_pad_config(&uif->entity, sd_state, sel->which); if (!config) { ret = -EINVAL; goto done; } /* The crop rectangle must be inside the input frame. */ format = vsp1_entity_get_pad_format(&uif->entity, config, UIF_PAD_SINK); sel->r.left = clamp_t(unsigned int, sel->r.left, 0, format->width - 1); sel->r.top = clamp_t(unsigned int, sel->r.top, 0, format->height - 1); sel->r.width = clamp_t(unsigned int, sel->r.width, UIF_MIN_SIZE, format->width - sel->r.left); sel->r.height = clamp_t(unsigned int, sel->r.height, UIF_MIN_SIZE, format->height - sel->r.top); /* Store the crop rectangle. */ selection = vsp1_entity_get_pad_selection(&uif->entity, config, sel->pad, V4L2_SEL_TGT_CROP); *selection = sel->r; done: mutex_unlock(&uif->entity.lock); return ret; } /* ----------------------------------------------------------------------------- * V4L2 Subdevice Operations */ static const struct v4l2_subdev_pad_ops uif_pad_ops = { .init_cfg = vsp1_entity_init_cfg, .enum_mbus_code = uif_enum_mbus_code, .enum_frame_size = uif_enum_frame_size, .get_fmt = vsp1_subdev_get_pad_format, .set_fmt = uif_set_format, .get_selection = uif_get_selection, .set_selection = uif_set_selection, }; static const struct v4l2_subdev_ops uif_ops = { .pad = &uif_pad_ops, }; /* ----------------------------------------------------------------------------- * VSP1 Entity Operations */ static void uif_configure_stream(struct vsp1_entity *entity, struct vsp1_pipeline *pipe, struct vsp1_dl_list *dl, struct vsp1_dl_body *dlb) { struct vsp1_uif *uif = to_uif(&entity->subdev); const struct v4l2_rect *crop; unsigned int left; unsigned int width; vsp1_uif_write(uif, dlb, VI6_UIF_DISCOM_DOCMPMR, VI6_UIF_DISCOM_DOCMPMR_SEL(9)); crop = vsp1_entity_get_pad_selection(entity, entity->config, UIF_PAD_SINK, V4L2_SEL_TGT_CROP); left = crop->left; width = crop->width; /* On M3-W the horizontal coordinates are twice the register value. */ if (uif->m3w_quirk) { left /= 2; width /= 2; } vsp1_uif_write(uif, dlb, VI6_UIF_DISCOM_DOCMSPXR, left); vsp1_uif_write(uif, dlb, VI6_UIF_DISCOM_DOCMSPYR, crop->top); vsp1_uif_write(uif, dlb, VI6_UIF_DISCOM_DOCMSZXR, width); vsp1_uif_write(uif, dlb, VI6_UIF_DISCOM_DOCMSZYR, crop->height); vsp1_uif_write(uif, dlb, VI6_UIF_DISCOM_DOCMCR, VI6_UIF_DISCOM_DOCMCR_CMPR); } static const struct vsp1_entity_operations uif_entity_ops = { .configure_stream = uif_configure_stream, }; /* ----------------------------------------------------------------------------- * Initialization and Cleanup */ static const struct soc_device_attribute vsp1_r8a7796[] = { { .soc_id = "r8a7796" }, { /* sentinel */ } }; struct vsp1_uif *vsp1_uif_create(struct vsp1_device *vsp1, unsigned int index) { struct vsp1_uif *uif; char name[6]; int ret; uif = devm_kzalloc(vsp1->dev, sizeof(*uif), GFP_KERNEL); if (!uif) return ERR_PTR(-ENOMEM); if (soc_device_match(vsp1_r8a7796)) uif->m3w_quirk = true; uif->entity.ops = &uif_entity_ops; uif->entity.type = VSP1_ENTITY_UIF; uif->entity.index = index; /* The datasheet names the two UIF instances UIF4 and UIF5. */ sprintf(name, "uif.%u", index + 4); ret = vsp1_entity_init(vsp1, &uif->entity, name, 2, &uif_ops, MEDIA_ENT_F_PROC_VIDEO_STATISTICS); if (ret < 0) return ERR_PTR(ret); return uif; }
linux-master
drivers/media/platform/renesas/vsp1/vsp1_uif.c
// SPDX-License-Identifier: GPL-2.0+ /* * vsp1_histo.c -- R-Car VSP1 Histogram API * * Copyright (C) 2016 Renesas Electronics Corporation * Copyright (C) 2016 Laurent Pinchart * * Contact: Laurent Pinchart ([email protected]) */ #include <linux/device.h> #include <linux/gfp.h> #include <media/v4l2-ioctl.h> #include <media/v4l2-subdev.h> #include <media/videobuf2-vmalloc.h> #include "vsp1.h" #include "vsp1_histo.h" #include "vsp1_pipe.h" #define HISTO_MIN_SIZE 4U #define HISTO_MAX_SIZE 8192U /* ----------------------------------------------------------------------------- * Buffer Operations */ static inline struct vsp1_histogram_buffer * to_vsp1_histogram_buffer(struct vb2_v4l2_buffer *vbuf) { return container_of(vbuf, struct vsp1_histogram_buffer, buf); } struct vsp1_histogram_buffer * vsp1_histogram_buffer_get(struct vsp1_histogram *histo) { struct vsp1_histogram_buffer *buf = NULL; unsigned long flags; spin_lock_irqsave(&histo->irqlock, flags); if (list_empty(&histo->irqqueue)) goto done; buf = list_first_entry(&histo->irqqueue, struct vsp1_histogram_buffer, queue); list_del(&buf->queue); histo->readout = true; done: spin_unlock_irqrestore(&histo->irqlock, flags); return buf; } void vsp1_histogram_buffer_complete(struct vsp1_histogram *histo, struct vsp1_histogram_buffer *buf, size_t size) { struct vsp1_pipeline *pipe = histo->entity.pipe; unsigned long flags; /* * The pipeline pointer is guaranteed to be valid as this function is * called from the frame completion interrupt handler, which can only * occur when video streaming is active. */ buf->buf.sequence = pipe->sequence; buf->buf.vb2_buf.timestamp = ktime_get_ns(); vb2_set_plane_payload(&buf->buf.vb2_buf, 0, size); vb2_buffer_done(&buf->buf.vb2_buf, VB2_BUF_STATE_DONE); spin_lock_irqsave(&histo->irqlock, flags); histo->readout = false; wake_up(&histo->wait_queue); spin_unlock_irqrestore(&histo->irqlock, flags); } /* ----------------------------------------------------------------------------- * videobuf2 Queue Operations */ static int histo_queue_setup(struct vb2_queue *vq, unsigned int *nbuffers, unsigned int *nplanes, unsigned int sizes[], struct device *alloc_devs[]) { struct vsp1_histogram *histo = vb2_get_drv_priv(vq); if (*nplanes) { if (*nplanes != 1) return -EINVAL; if (sizes[0] < histo->data_size) return -EINVAL; return 0; } *nplanes = 1; sizes[0] = histo->data_size; return 0; } static int histo_buffer_prepare(struct vb2_buffer *vb) { struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb); struct vsp1_histogram *histo = vb2_get_drv_priv(vb->vb2_queue); struct vsp1_histogram_buffer *buf = to_vsp1_histogram_buffer(vbuf); if (vb->num_planes != 1) return -EINVAL; if (vb2_plane_size(vb, 0) < histo->data_size) return -EINVAL; buf->addr = vb2_plane_vaddr(vb, 0); return 0; } static void histo_buffer_queue(struct vb2_buffer *vb) { struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb); struct vsp1_histogram *histo = vb2_get_drv_priv(vb->vb2_queue); struct vsp1_histogram_buffer *buf = to_vsp1_histogram_buffer(vbuf); unsigned long flags; spin_lock_irqsave(&histo->irqlock, flags); list_add_tail(&buf->queue, &histo->irqqueue); spin_unlock_irqrestore(&histo->irqlock, flags); } static int histo_start_streaming(struct vb2_queue *vq, unsigned int count) { return 0; } static void histo_stop_streaming(struct vb2_queue *vq) { struct vsp1_histogram *histo = vb2_get_drv_priv(vq); struct vsp1_histogram_buffer *buffer; unsigned long flags; spin_lock_irqsave(&histo->irqlock, flags); /* Remove all buffers from the IRQ queue. */ list_for_each_entry(buffer, &histo->irqqueue, queue) vb2_buffer_done(&buffer->buf.vb2_buf, VB2_BUF_STATE_ERROR); INIT_LIST_HEAD(&histo->irqqueue); /* Wait for the buffer being read out (if any) to complete. */ wait_event_lock_irq(histo->wait_queue, !histo->readout, histo->irqlock); spin_unlock_irqrestore(&histo->irqlock, flags); } static const struct vb2_ops histo_video_queue_qops = { .queue_setup = histo_queue_setup, .buf_prepare = histo_buffer_prepare, .buf_queue = histo_buffer_queue, .wait_prepare = vb2_ops_wait_prepare, .wait_finish = vb2_ops_wait_finish, .start_streaming = histo_start_streaming, .stop_streaming = histo_stop_streaming, }; /* ----------------------------------------------------------------------------- * V4L2 Subdevice Operations */ static int histo_enum_mbus_code(struct v4l2_subdev *subdev, struct v4l2_subdev_state *sd_state, struct v4l2_subdev_mbus_code_enum *code) { struct vsp1_histogram *histo = subdev_to_histo(subdev); if (code->pad == HISTO_PAD_SOURCE) { code->code = MEDIA_BUS_FMT_FIXED; return 0; } return vsp1_subdev_enum_mbus_code(subdev, sd_state, code, histo->formats, histo->num_formats); } static int histo_enum_frame_size(struct v4l2_subdev *subdev, struct v4l2_subdev_state *sd_state, struct v4l2_subdev_frame_size_enum *fse) { if (fse->pad != HISTO_PAD_SINK) return -EINVAL; return vsp1_subdev_enum_frame_size(subdev, sd_state, fse, HISTO_MIN_SIZE, HISTO_MIN_SIZE, HISTO_MAX_SIZE, HISTO_MAX_SIZE); } static int histo_get_selection(struct v4l2_subdev *subdev, struct v4l2_subdev_state *sd_state, struct v4l2_subdev_selection *sel) { struct vsp1_histogram *histo = subdev_to_histo(subdev); struct v4l2_subdev_state *config; struct v4l2_mbus_framefmt *format; struct v4l2_rect *crop; int ret = 0; if (sel->pad != HISTO_PAD_SINK) return -EINVAL; mutex_lock(&histo->entity.lock); config = vsp1_entity_get_pad_config(&histo->entity, sd_state, sel->which); if (!config) { ret = -EINVAL; goto done; } switch (sel->target) { case V4L2_SEL_TGT_COMPOSE_BOUNDS: case V4L2_SEL_TGT_COMPOSE_DEFAULT: crop = vsp1_entity_get_pad_selection(&histo->entity, config, HISTO_PAD_SINK, V4L2_SEL_TGT_CROP); sel->r.left = 0; sel->r.top = 0; sel->r.width = crop->width; sel->r.height = crop->height; break; case V4L2_SEL_TGT_CROP_BOUNDS: case V4L2_SEL_TGT_CROP_DEFAULT: format = vsp1_entity_get_pad_format(&histo->entity, config, HISTO_PAD_SINK); sel->r.left = 0; sel->r.top = 0; sel->r.width = format->width; sel->r.height = format->height; break; case V4L2_SEL_TGT_COMPOSE: case V4L2_SEL_TGT_CROP: sel->r = *vsp1_entity_get_pad_selection(&histo->entity, config, sel->pad, sel->target); break; default: ret = -EINVAL; break; } done: mutex_unlock(&histo->entity.lock); return ret; } static int histo_set_crop(struct v4l2_subdev *subdev, struct v4l2_subdev_state *sd_state, struct v4l2_subdev_selection *sel) { struct vsp1_histogram *histo = subdev_to_histo(subdev); struct v4l2_mbus_framefmt *format; struct v4l2_rect *selection; /* The crop rectangle must be inside the input frame. */ format = vsp1_entity_get_pad_format(&histo->entity, sd_state, HISTO_PAD_SINK); sel->r.left = clamp_t(unsigned int, sel->r.left, 0, format->width - 1); sel->r.top = clamp_t(unsigned int, sel->r.top, 0, format->height - 1); sel->r.width = clamp_t(unsigned int, sel->r.width, HISTO_MIN_SIZE, format->width - sel->r.left); sel->r.height = clamp_t(unsigned int, sel->r.height, HISTO_MIN_SIZE, format->height - sel->r.top); /* Set the crop rectangle and reset the compose rectangle. */ selection = vsp1_entity_get_pad_selection(&histo->entity, sd_state, sel->pad, V4L2_SEL_TGT_CROP); *selection = sel->r; selection = vsp1_entity_get_pad_selection(&histo->entity, sd_state, sel->pad, V4L2_SEL_TGT_COMPOSE); *selection = sel->r; return 0; } static int histo_set_compose(struct v4l2_subdev *subdev, struct v4l2_subdev_state *sd_state, struct v4l2_subdev_selection *sel) { struct vsp1_histogram *histo = subdev_to_histo(subdev); struct v4l2_rect *compose; struct v4l2_rect *crop; unsigned int ratio; /* * The compose rectangle is used to configure downscaling, the top left * corner is fixed to (0,0) and the size to 1/2 or 1/4 of the crop * rectangle. */ sel->r.left = 0; sel->r.top = 0; crop = vsp1_entity_get_pad_selection(&histo->entity, sd_state, sel->pad, V4L2_SEL_TGT_CROP); /* * Clamp the width and height to acceptable values first and then * compute the closest rounded dividing ratio. * * Ratio Rounded ratio * -------------------------- * [1.0 1.5[ 1 * [1.5 3.0[ 2 * [3.0 4.0] 4 * * The rounded ratio can be computed using * * 1 << (ceil(ratio * 2) / 3) */ sel->r.width = clamp(sel->r.width, crop->width / 4, crop->width); ratio = 1 << (crop->width * 2 / sel->r.width / 3); sel->r.width = crop->width / ratio; sel->r.height = clamp(sel->r.height, crop->height / 4, crop->height); ratio = 1 << (crop->height * 2 / sel->r.height / 3); sel->r.height = crop->height / ratio; compose = vsp1_entity_get_pad_selection(&histo->entity, sd_state, sel->pad, V4L2_SEL_TGT_COMPOSE); *compose = sel->r; return 0; } static int histo_set_selection(struct v4l2_subdev *subdev, struct v4l2_subdev_state *sd_state, struct v4l2_subdev_selection *sel) { struct vsp1_histogram *histo = subdev_to_histo(subdev); struct v4l2_subdev_state *config; int ret; if (sel->pad != HISTO_PAD_SINK) return -EINVAL; mutex_lock(&histo->entity.lock); config = vsp1_entity_get_pad_config(&histo->entity, sd_state, sel->which); if (!config) { ret = -EINVAL; goto done; } if (sel->target == V4L2_SEL_TGT_CROP) ret = histo_set_crop(subdev, config, sel); else if (sel->target == V4L2_SEL_TGT_COMPOSE) ret = histo_set_compose(subdev, config, sel); else ret = -EINVAL; done: mutex_unlock(&histo->entity.lock); return ret; } static int histo_get_format(struct v4l2_subdev *subdev, struct v4l2_subdev_state *sd_state, struct v4l2_subdev_format *fmt) { if (fmt->pad == HISTO_PAD_SOURCE) { fmt->format.code = MEDIA_BUS_FMT_FIXED; fmt->format.width = 0; fmt->format.height = 0; fmt->format.field = V4L2_FIELD_NONE; fmt->format.colorspace = V4L2_COLORSPACE_RAW; return 0; } return vsp1_subdev_get_pad_format(subdev, sd_state, fmt); } static int histo_set_format(struct v4l2_subdev *subdev, struct v4l2_subdev_state *sd_state, struct v4l2_subdev_format *fmt) { struct vsp1_histogram *histo = subdev_to_histo(subdev); if (fmt->pad != HISTO_PAD_SINK) return histo_get_format(subdev, sd_state, fmt); return vsp1_subdev_set_pad_format(subdev, sd_state, fmt, histo->formats, histo->num_formats, HISTO_MIN_SIZE, HISTO_MIN_SIZE, HISTO_MAX_SIZE, HISTO_MAX_SIZE); } static const struct v4l2_subdev_pad_ops histo_pad_ops = { .enum_mbus_code = histo_enum_mbus_code, .enum_frame_size = histo_enum_frame_size, .get_fmt = histo_get_format, .set_fmt = histo_set_format, .get_selection = histo_get_selection, .set_selection = histo_set_selection, }; static const struct v4l2_subdev_ops histo_ops = { .pad = &histo_pad_ops, }; /* ----------------------------------------------------------------------------- * V4L2 ioctls */ static int histo_v4l2_querycap(struct file *file, void *fh, struct v4l2_capability *cap) { struct v4l2_fh *vfh = file->private_data; struct vsp1_histogram *histo = vdev_to_histo(vfh->vdev); cap->capabilities = V4L2_CAP_DEVICE_CAPS | V4L2_CAP_STREAMING | V4L2_CAP_VIDEO_CAPTURE_MPLANE | V4L2_CAP_VIDEO_OUTPUT_MPLANE | V4L2_CAP_META_CAPTURE; strscpy(cap->driver, "vsp1", sizeof(cap->driver)); strscpy(cap->card, histo->video.name, sizeof(cap->card)); return 0; } static int histo_v4l2_enum_format(struct file *file, void *fh, struct v4l2_fmtdesc *f) { struct v4l2_fh *vfh = file->private_data; struct vsp1_histogram *histo = vdev_to_histo(vfh->vdev); if (f->index > 0 || f->type != histo->queue.type) return -EINVAL; f->pixelformat = histo->meta_format; return 0; } static int histo_v4l2_get_format(struct file *file, void *fh, struct v4l2_format *format) { struct v4l2_fh *vfh = file->private_data; struct vsp1_histogram *histo = vdev_to_histo(vfh->vdev); struct v4l2_meta_format *meta = &format->fmt.meta; if (format->type != histo->queue.type) return -EINVAL; memset(meta, 0, sizeof(*meta)); meta->dataformat = histo->meta_format; meta->buffersize = histo->data_size; return 0; } static const struct v4l2_ioctl_ops histo_v4l2_ioctl_ops = { .vidioc_querycap = histo_v4l2_querycap, .vidioc_enum_fmt_meta_cap = histo_v4l2_enum_format, .vidioc_g_fmt_meta_cap = histo_v4l2_get_format, .vidioc_s_fmt_meta_cap = histo_v4l2_get_format, .vidioc_try_fmt_meta_cap = histo_v4l2_get_format, .vidioc_reqbufs = vb2_ioctl_reqbufs, .vidioc_querybuf = vb2_ioctl_querybuf, .vidioc_qbuf = vb2_ioctl_qbuf, .vidioc_dqbuf = vb2_ioctl_dqbuf, .vidioc_create_bufs = vb2_ioctl_create_bufs, .vidioc_prepare_buf = vb2_ioctl_prepare_buf, .vidioc_streamon = vb2_ioctl_streamon, .vidioc_streamoff = vb2_ioctl_streamoff, }; /* ----------------------------------------------------------------------------- * V4L2 File Operations */ static const struct v4l2_file_operations histo_v4l2_fops = { .owner = THIS_MODULE, .unlocked_ioctl = video_ioctl2, .open = v4l2_fh_open, .release = vb2_fop_release, .poll = vb2_fop_poll, .mmap = vb2_fop_mmap, }; static void vsp1_histogram_cleanup(struct vsp1_histogram *histo) { if (video_is_registered(&histo->video)) video_unregister_device(&histo->video); media_entity_cleanup(&histo->video.entity); } void vsp1_histogram_destroy(struct vsp1_entity *entity) { struct vsp1_histogram *histo = subdev_to_histo(&entity->subdev); vsp1_histogram_cleanup(histo); } int vsp1_histogram_init(struct vsp1_device *vsp1, struct vsp1_histogram *histo, enum vsp1_entity_type type, const char *name, const struct vsp1_entity_operations *ops, const unsigned int *formats, unsigned int num_formats, size_t data_size, u32 meta_format) { int ret; histo->formats = formats; histo->num_formats = num_formats; histo->data_size = data_size; histo->meta_format = meta_format; histo->pad.flags = MEDIA_PAD_FL_SINK; histo->video.vfl_dir = VFL_DIR_RX; mutex_init(&histo->lock); spin_lock_init(&histo->irqlock); INIT_LIST_HEAD(&histo->irqqueue); init_waitqueue_head(&histo->wait_queue); /* Initialize the VSP entity... */ histo->entity.ops = ops; histo->entity.type = type; ret = vsp1_entity_init(vsp1, &histo->entity, name, 2, &histo_ops, MEDIA_ENT_F_PROC_VIDEO_STATISTICS); if (ret < 0) return ret; /* ... and the media entity... */ ret = media_entity_pads_init(&histo->video.entity, 1, &histo->pad); if (ret < 0) return ret; /* ... and the video node... */ histo->video.v4l2_dev = &vsp1->v4l2_dev; histo->video.fops = &histo_v4l2_fops; snprintf(histo->video.name, sizeof(histo->video.name), "%s histo", histo->entity.subdev.name); histo->video.vfl_type = VFL_TYPE_VIDEO; histo->video.release = video_device_release_empty; histo->video.ioctl_ops = &histo_v4l2_ioctl_ops; histo->video.device_caps = V4L2_CAP_META_CAPTURE | V4L2_CAP_STREAMING; video_set_drvdata(&histo->video, histo); /* ... and the buffers queue... */ histo->queue.type = V4L2_BUF_TYPE_META_CAPTURE; histo->queue.io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF; histo->queue.lock = &histo->lock; histo->queue.drv_priv = histo; histo->queue.buf_struct_size = sizeof(struct vsp1_histogram_buffer); histo->queue.ops = &histo_video_queue_qops; histo->queue.mem_ops = &vb2_vmalloc_memops; histo->queue.timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY; histo->queue.dev = vsp1->dev; ret = vb2_queue_init(&histo->queue); if (ret < 0) { dev_err(vsp1->dev, "failed to initialize vb2 queue\n"); goto error; } /* ... and register the video device. */ histo->video.queue = &histo->queue; ret = video_register_device(&histo->video, VFL_TYPE_VIDEO, -1); if (ret < 0) { dev_err(vsp1->dev, "failed to register video device\n"); goto error; } return 0; error: vsp1_histogram_cleanup(histo); return ret; }
linux-master
drivers/media/platform/renesas/vsp1/vsp1_histo.c
// SPDX-License-Identifier: GPL-2.0+ /* * vsp1_sru.c -- R-Car VSP1 Super Resolution Unit * * Copyright (C) 2013 Renesas Corporation * * Contact: Laurent Pinchart ([email protected]) */ #include <linux/device.h> #include <linux/gfp.h> #include <media/v4l2-subdev.h> #include "vsp1.h" #include "vsp1_dl.h" #include "vsp1_pipe.h" #include "vsp1_sru.h" #define SRU_MIN_SIZE 4U #define SRU_MAX_SIZE 8190U /* ----------------------------------------------------------------------------- * Device Access */ static inline void vsp1_sru_write(struct vsp1_sru *sru, struct vsp1_dl_body *dlb, u32 reg, u32 data) { vsp1_dl_body_write(dlb, reg, data); } /* ----------------------------------------------------------------------------- * Controls */ #define V4L2_CID_VSP1_SRU_INTENSITY (V4L2_CID_USER_BASE | 0x1001) struct vsp1_sru_param { u32 ctrl0; u32 ctrl2; }; #define VI6_SRU_CTRL0_PARAMS(p0, p1) \ (((p0) << VI6_SRU_CTRL0_PARAM0_SHIFT) | \ ((p1) << VI6_SRU_CTRL0_PARAM1_SHIFT)) #define VI6_SRU_CTRL2_PARAMS(p6, p7, p8) \ (((p6) << VI6_SRU_CTRL2_PARAM6_SHIFT) | \ ((p7) << VI6_SRU_CTRL2_PARAM7_SHIFT) | \ ((p8) << VI6_SRU_CTRL2_PARAM8_SHIFT)) static const struct vsp1_sru_param vsp1_sru_params[] = { { .ctrl0 = VI6_SRU_CTRL0_PARAMS(256, 4) | VI6_SRU_CTRL0_EN, .ctrl2 = VI6_SRU_CTRL2_PARAMS(24, 40, 255), }, { .ctrl0 = VI6_SRU_CTRL0_PARAMS(256, 4) | VI6_SRU_CTRL0_EN, .ctrl2 = VI6_SRU_CTRL2_PARAMS(8, 16, 255), }, { .ctrl0 = VI6_SRU_CTRL0_PARAMS(384, 5) | VI6_SRU_CTRL0_EN, .ctrl2 = VI6_SRU_CTRL2_PARAMS(36, 60, 255), }, { .ctrl0 = VI6_SRU_CTRL0_PARAMS(384, 5) | VI6_SRU_CTRL0_EN, .ctrl2 = VI6_SRU_CTRL2_PARAMS(12, 27, 255), }, { .ctrl0 = VI6_SRU_CTRL0_PARAMS(511, 6) | VI6_SRU_CTRL0_EN, .ctrl2 = VI6_SRU_CTRL2_PARAMS(48, 80, 255), }, { .ctrl0 = VI6_SRU_CTRL0_PARAMS(511, 6) | VI6_SRU_CTRL0_EN, .ctrl2 = VI6_SRU_CTRL2_PARAMS(16, 36, 255), }, }; static int sru_s_ctrl(struct v4l2_ctrl *ctrl) { struct vsp1_sru *sru = container_of(ctrl->handler, struct vsp1_sru, ctrls); switch (ctrl->id) { case V4L2_CID_VSP1_SRU_INTENSITY: sru->intensity = ctrl->val; break; } return 0; } static const struct v4l2_ctrl_ops sru_ctrl_ops = { .s_ctrl = sru_s_ctrl, }; static const struct v4l2_ctrl_config sru_intensity_control = { .ops = &sru_ctrl_ops, .id = V4L2_CID_VSP1_SRU_INTENSITY, .name = "Intensity", .type = V4L2_CTRL_TYPE_INTEGER, .min = 1, .max = 6, .def = 1, .step = 1, }; /* ----------------------------------------------------------------------------- * V4L2 Subdevice Operations */ static int sru_enum_mbus_code(struct v4l2_subdev *subdev, struct v4l2_subdev_state *sd_state, struct v4l2_subdev_mbus_code_enum *code) { static const unsigned int codes[] = { MEDIA_BUS_FMT_ARGB8888_1X32, MEDIA_BUS_FMT_AYUV8_1X32, }; return vsp1_subdev_enum_mbus_code(subdev, sd_state, code, codes, ARRAY_SIZE(codes)); } static int sru_enum_frame_size(struct v4l2_subdev *subdev, struct v4l2_subdev_state *sd_state, struct v4l2_subdev_frame_size_enum *fse) { struct vsp1_sru *sru = to_sru(subdev); struct v4l2_subdev_state *config; struct v4l2_mbus_framefmt *format; int ret = 0; config = vsp1_entity_get_pad_config(&sru->entity, sd_state, fse->which); if (!config) return -EINVAL; format = vsp1_entity_get_pad_format(&sru->entity, config, SRU_PAD_SINK); mutex_lock(&sru->entity.lock); if (fse->index || fse->code != format->code) { ret = -EINVAL; goto done; } if (fse->pad == SRU_PAD_SINK) { fse->min_width = SRU_MIN_SIZE; fse->max_width = SRU_MAX_SIZE; fse->min_height = SRU_MIN_SIZE; fse->max_height = SRU_MAX_SIZE; } else { fse->min_width = format->width; fse->min_height = format->height; if (format->width <= SRU_MAX_SIZE / 2 && format->height <= SRU_MAX_SIZE / 2) { fse->max_width = format->width * 2; fse->max_height = format->height * 2; } else { fse->max_width = format->width; fse->max_height = format->height; } } done: mutex_unlock(&sru->entity.lock); return ret; } static void sru_try_format(struct vsp1_sru *sru, struct v4l2_subdev_state *sd_state, unsigned int pad, struct v4l2_mbus_framefmt *fmt) { struct v4l2_mbus_framefmt *format; unsigned int input_area; unsigned int output_area; switch (pad) { case SRU_PAD_SINK: /* Default to YUV if the requested format is not supported. */ if (fmt->code != MEDIA_BUS_FMT_ARGB8888_1X32 && fmt->code != MEDIA_BUS_FMT_AYUV8_1X32) fmt->code = MEDIA_BUS_FMT_AYUV8_1X32; fmt->width = clamp(fmt->width, SRU_MIN_SIZE, SRU_MAX_SIZE); fmt->height = clamp(fmt->height, SRU_MIN_SIZE, SRU_MAX_SIZE); break; case SRU_PAD_SOURCE: /* The SRU can't perform format conversion. */ format = vsp1_entity_get_pad_format(&sru->entity, sd_state, SRU_PAD_SINK); fmt->code = format->code; /* * We can upscale by 2 in both direction, but not independently. * Compare the input and output rectangles areas (avoiding * integer overflows on the output): if the requested output * area is larger than 1.5^2 the input area upscale by two, * otherwise don't scale. */ input_area = format->width * format->height; output_area = min(fmt->width, SRU_MAX_SIZE) * min(fmt->height, SRU_MAX_SIZE); if (fmt->width <= SRU_MAX_SIZE / 2 && fmt->height <= SRU_MAX_SIZE / 2 && output_area > input_area * 9 / 4) { fmt->width = format->width * 2; fmt->height = format->height * 2; } else { fmt->width = format->width; fmt->height = format->height; } break; } fmt->field = V4L2_FIELD_NONE; fmt->colorspace = V4L2_COLORSPACE_SRGB; } static int sru_set_format(struct v4l2_subdev *subdev, struct v4l2_subdev_state *sd_state, struct v4l2_subdev_format *fmt) { struct vsp1_sru *sru = to_sru(subdev); struct v4l2_subdev_state *config; struct v4l2_mbus_framefmt *format; int ret = 0; mutex_lock(&sru->entity.lock); config = vsp1_entity_get_pad_config(&sru->entity, sd_state, fmt->which); if (!config) { ret = -EINVAL; goto done; } sru_try_format(sru, config, fmt->pad, &fmt->format); format = vsp1_entity_get_pad_format(&sru->entity, config, fmt->pad); *format = fmt->format; if (fmt->pad == SRU_PAD_SINK) { /* Propagate the format to the source pad. */ format = vsp1_entity_get_pad_format(&sru->entity, config, SRU_PAD_SOURCE); *format = fmt->format; sru_try_format(sru, config, SRU_PAD_SOURCE, format); } done: mutex_unlock(&sru->entity.lock); return ret; } static const struct v4l2_subdev_pad_ops sru_pad_ops = { .init_cfg = vsp1_entity_init_cfg, .enum_mbus_code = sru_enum_mbus_code, .enum_frame_size = sru_enum_frame_size, .get_fmt = vsp1_subdev_get_pad_format, .set_fmt = sru_set_format, }; static const struct v4l2_subdev_ops sru_ops = { .pad = &sru_pad_ops, }; /* ----------------------------------------------------------------------------- * VSP1 Entity Operations */ static void sru_configure_stream(struct vsp1_entity *entity, struct vsp1_pipeline *pipe, struct vsp1_dl_list *dl, struct vsp1_dl_body *dlb) { const struct vsp1_sru_param *param; struct vsp1_sru *sru = to_sru(&entity->subdev); struct v4l2_mbus_framefmt *input; struct v4l2_mbus_framefmt *output; u32 ctrl0; input = vsp1_entity_get_pad_format(&sru->entity, sru->entity.config, SRU_PAD_SINK); output = vsp1_entity_get_pad_format(&sru->entity, sru->entity.config, SRU_PAD_SOURCE); if (input->code == MEDIA_BUS_FMT_ARGB8888_1X32) ctrl0 = VI6_SRU_CTRL0_PARAM2 | VI6_SRU_CTRL0_PARAM3 | VI6_SRU_CTRL0_PARAM4; else ctrl0 = VI6_SRU_CTRL0_PARAM3; if (input->width != output->width) ctrl0 |= VI6_SRU_CTRL0_MODE_UPSCALE; param = &vsp1_sru_params[sru->intensity - 1]; ctrl0 |= param->ctrl0; vsp1_sru_write(sru, dlb, VI6_SRU_CTRL0, ctrl0); vsp1_sru_write(sru, dlb, VI6_SRU_CTRL1, VI6_SRU_CTRL1_PARAM5); vsp1_sru_write(sru, dlb, VI6_SRU_CTRL2, param->ctrl2); } static unsigned int sru_max_width(struct vsp1_entity *entity, struct vsp1_pipeline *pipe) { struct vsp1_sru *sru = to_sru(&entity->subdev); struct v4l2_mbus_framefmt *input; struct v4l2_mbus_framefmt *output; input = vsp1_entity_get_pad_format(&sru->entity, sru->entity.config, SRU_PAD_SINK); output = vsp1_entity_get_pad_format(&sru->entity, sru->entity.config, SRU_PAD_SOURCE); /* * The maximum input width of the SRU is 288 input pixels, but 32 * pixels are reserved to support overlapping partition windows when * scaling. */ if (input->width != output->width) return 512; else return 256; } static void sru_partition(struct vsp1_entity *entity, struct vsp1_pipeline *pipe, struct vsp1_partition *partition, unsigned int partition_idx, struct vsp1_partition_window *window) { struct vsp1_sru *sru = to_sru(&entity->subdev); struct v4l2_mbus_framefmt *input; struct v4l2_mbus_framefmt *output; input = vsp1_entity_get_pad_format(&sru->entity, sru->entity.config, SRU_PAD_SINK); output = vsp1_entity_get_pad_format(&sru->entity, sru->entity.config, SRU_PAD_SOURCE); /* Adapt if SRUx2 is enabled. */ if (input->width != output->width) { window->width /= 2; window->left /= 2; } partition->sru = *window; } static const struct vsp1_entity_operations sru_entity_ops = { .configure_stream = sru_configure_stream, .max_width = sru_max_width, .partition = sru_partition, }; /* ----------------------------------------------------------------------------- * Initialization and Cleanup */ struct vsp1_sru *vsp1_sru_create(struct vsp1_device *vsp1) { struct vsp1_sru *sru; int ret; sru = devm_kzalloc(vsp1->dev, sizeof(*sru), GFP_KERNEL); if (sru == NULL) return ERR_PTR(-ENOMEM); sru->entity.ops = &sru_entity_ops; sru->entity.type = VSP1_ENTITY_SRU; ret = vsp1_entity_init(vsp1, &sru->entity, "sru", 2, &sru_ops, MEDIA_ENT_F_PROC_VIDEO_SCALER); if (ret < 0) return ERR_PTR(ret); /* Initialize the control handler. */ v4l2_ctrl_handler_init(&sru->ctrls, 1); v4l2_ctrl_new_custom(&sru->ctrls, &sru_intensity_control, NULL); sru->intensity = 1; sru->entity.subdev.ctrl_handler = &sru->ctrls; if (sru->ctrls.error) { dev_err(vsp1->dev, "sru: failed to initialize controls\n"); ret = sru->ctrls.error; vsp1_entity_destroy(&sru->entity); return ERR_PTR(ret); } return sru; }
linux-master
drivers/media/platform/renesas/vsp1/vsp1_sru.c
// SPDX-License-Identifier: GPL-2.0+ /* * vsp1_wpf.c -- R-Car VSP1 Write Pixel Formatter * * Copyright (C) 2013-2014 Renesas Electronics Corporation * * Contact: Laurent Pinchart ([email protected]) */ #include <linux/device.h> #include <media/v4l2-subdev.h> #include "vsp1.h" #include "vsp1_dl.h" #include "vsp1_pipe.h" #include "vsp1_rwpf.h" #include "vsp1_video.h" #define WPF_GEN2_MAX_WIDTH 2048U #define WPF_GEN2_MAX_HEIGHT 2048U #define WPF_GEN3_MAX_WIDTH 8190U #define WPF_GEN3_MAX_HEIGHT 8190U /* ----------------------------------------------------------------------------- * Device Access */ static inline void vsp1_wpf_write(struct vsp1_rwpf *wpf, struct vsp1_dl_body *dlb, u32 reg, u32 data) { vsp1_dl_body_write(dlb, reg + wpf->entity.index * VI6_WPF_OFFSET, data); } /* ----------------------------------------------------------------------------- * Controls */ enum wpf_flip_ctrl { WPF_CTRL_VFLIP = 0, WPF_CTRL_HFLIP = 1, }; static int vsp1_wpf_set_rotation(struct vsp1_rwpf *wpf, unsigned int rotation) { struct vsp1_video *video = wpf->video; struct v4l2_mbus_framefmt *sink_format; struct v4l2_mbus_framefmt *source_format; bool rotate; int ret = 0; /* * Only consider the 0°/180° from/to 90°/270° modifications, the rest * is taken care of by the flipping configuration. */ rotate = rotation == 90 || rotation == 270; if (rotate == wpf->flip.rotate) return 0; /* Changing rotation isn't allowed when buffers are allocated. */ mutex_lock(&video->lock); if (vb2_is_busy(&video->queue)) { ret = -EBUSY; goto done; } sink_format = vsp1_entity_get_pad_format(&wpf->entity, wpf->entity.config, RWPF_PAD_SINK); source_format = vsp1_entity_get_pad_format(&wpf->entity, wpf->entity.config, RWPF_PAD_SOURCE); mutex_lock(&wpf->entity.lock); if (rotate) { source_format->width = sink_format->height; source_format->height = sink_format->width; } else { source_format->width = sink_format->width; source_format->height = sink_format->height; } wpf->flip.rotate = rotate; mutex_unlock(&wpf->entity.lock); done: mutex_unlock(&video->lock); return ret; } static int vsp1_wpf_s_ctrl(struct v4l2_ctrl *ctrl) { struct vsp1_rwpf *wpf = container_of(ctrl->handler, struct vsp1_rwpf, ctrls); unsigned int rotation; u32 flip = 0; int ret; /* Update the rotation. */ rotation = wpf->flip.ctrls.rotate ? wpf->flip.ctrls.rotate->val : 0; ret = vsp1_wpf_set_rotation(wpf, rotation); if (ret < 0) return ret; /* * Compute the flip value resulting from all three controls, with * rotation by 180° flipping the image in both directions. Store the * result in the pending flip field for the next frame that will be * processed. */ if (wpf->flip.ctrls.vflip->val) flip |= BIT(WPF_CTRL_VFLIP); if (wpf->flip.ctrls.hflip && wpf->flip.ctrls.hflip->val) flip |= BIT(WPF_CTRL_HFLIP); if (rotation == 180 || rotation == 270) flip ^= BIT(WPF_CTRL_VFLIP) | BIT(WPF_CTRL_HFLIP); spin_lock_irq(&wpf->flip.lock); wpf->flip.pending = flip; spin_unlock_irq(&wpf->flip.lock); return 0; } static const struct v4l2_ctrl_ops vsp1_wpf_ctrl_ops = { .s_ctrl = vsp1_wpf_s_ctrl, }; static int wpf_init_controls(struct vsp1_rwpf *wpf) { struct vsp1_device *vsp1 = wpf->entity.vsp1; unsigned int num_flip_ctrls; spin_lock_init(&wpf->flip.lock); if (wpf->entity.index != 0) { /* Only WPF0 supports flipping. */ num_flip_ctrls = 0; } else if (vsp1_feature(vsp1, VSP1_HAS_WPF_HFLIP)) { /* * When horizontal flip is supported the WPF implements three * controls (horizontal flip, vertical flip and rotation). */ num_flip_ctrls = 3; } else if (vsp1_feature(vsp1, VSP1_HAS_WPF_VFLIP)) { /* * When only vertical flip is supported the WPF implements a * single control (vertical flip). */ num_flip_ctrls = 1; } else { /* Otherwise flipping is not supported. */ num_flip_ctrls = 0; } vsp1_rwpf_init_ctrls(wpf, num_flip_ctrls); if (num_flip_ctrls >= 1) { wpf->flip.ctrls.vflip = v4l2_ctrl_new_std(&wpf->ctrls, &vsp1_wpf_ctrl_ops, V4L2_CID_VFLIP, 0, 1, 1, 0); } if (num_flip_ctrls == 3) { wpf->flip.ctrls.hflip = v4l2_ctrl_new_std(&wpf->ctrls, &vsp1_wpf_ctrl_ops, V4L2_CID_HFLIP, 0, 1, 1, 0); wpf->flip.ctrls.rotate = v4l2_ctrl_new_std(&wpf->ctrls, &vsp1_wpf_ctrl_ops, V4L2_CID_ROTATE, 0, 270, 90, 0); v4l2_ctrl_cluster(3, &wpf->flip.ctrls.vflip); } if (wpf->ctrls.error) { dev_err(vsp1->dev, "wpf%u: failed to initialize controls\n", wpf->entity.index); return wpf->ctrls.error; } return 0; } /* ----------------------------------------------------------------------------- * V4L2 Subdevice Core Operations */ static int wpf_s_stream(struct v4l2_subdev *subdev, int enable) { struct vsp1_rwpf *wpf = to_rwpf(subdev); struct vsp1_device *vsp1 = wpf->entity.vsp1; if (enable) return 0; /* * Write to registers directly when stopping the stream as there will be * no pipeline run to apply the display list. */ vsp1_write(vsp1, VI6_WPF_IRQ_ENB(wpf->entity.index), 0); vsp1_write(vsp1, wpf->entity.index * VI6_WPF_OFFSET + VI6_WPF_SRCRPF, 0); return 0; } /* ----------------------------------------------------------------------------- * V4L2 Subdevice Operations */ static const struct v4l2_subdev_video_ops wpf_video_ops = { .s_stream = wpf_s_stream, }; static const struct v4l2_subdev_ops wpf_ops = { .video = &wpf_video_ops, .pad = &vsp1_rwpf_pad_ops, }; /* ----------------------------------------------------------------------------- * VSP1 Entity Operations */ static void vsp1_wpf_destroy(struct vsp1_entity *entity) { struct vsp1_rwpf *wpf = entity_to_rwpf(entity); vsp1_dlm_destroy(wpf->dlm); } static int wpf_configure_writeback_chain(struct vsp1_rwpf *wpf, struct vsp1_dl_list *dl) { unsigned int index = wpf->entity.index; struct vsp1_dl_list *dl_next; struct vsp1_dl_body *dlb; dl_next = vsp1_dl_list_get(wpf->dlm); if (!dl_next) { dev_err(wpf->entity.vsp1->dev, "Failed to obtain a dl list, disabling writeback\n"); return -ENOMEM; } dlb = vsp1_dl_list_get_body0(dl_next); vsp1_dl_body_write(dlb, VI6_WPF_WRBCK_CTRL(index), 0); vsp1_dl_list_add_chain(dl, dl_next); return 0; } static void wpf_configure_stream(struct vsp1_entity *entity, struct vsp1_pipeline *pipe, struct vsp1_dl_list *dl, struct vsp1_dl_body *dlb) { struct vsp1_rwpf *wpf = to_rwpf(&entity->subdev); struct vsp1_device *vsp1 = wpf->entity.vsp1; const struct v4l2_mbus_framefmt *source_format; const struct v4l2_mbus_framefmt *sink_format; unsigned int index = wpf->entity.index; unsigned int i; u32 outfmt = 0; u32 srcrpf = 0; int ret; sink_format = vsp1_entity_get_pad_format(&wpf->entity, wpf->entity.config, RWPF_PAD_SINK); source_format = vsp1_entity_get_pad_format(&wpf->entity, wpf->entity.config, RWPF_PAD_SOURCE); /* Format */ if (!pipe->lif || wpf->writeback) { const struct v4l2_pix_format_mplane *format = &wpf->format; const struct vsp1_format_info *fmtinfo = wpf->fmtinfo; outfmt = fmtinfo->hwfmt << VI6_WPF_OUTFMT_WRFMT_SHIFT; if (wpf->flip.rotate) outfmt |= VI6_WPF_OUTFMT_ROT; if (fmtinfo->alpha) outfmt |= VI6_WPF_OUTFMT_PXA; if (fmtinfo->swap_yc) outfmt |= VI6_WPF_OUTFMT_SPYCS; if (fmtinfo->swap_uv) outfmt |= VI6_WPF_OUTFMT_SPUVS; /* Destination stride and byte swapping. */ vsp1_wpf_write(wpf, dlb, VI6_WPF_DSTM_STRIDE_Y, format->plane_fmt[0].bytesperline); if (format->num_planes > 1) vsp1_wpf_write(wpf, dlb, VI6_WPF_DSTM_STRIDE_C, format->plane_fmt[1].bytesperline); vsp1_wpf_write(wpf, dlb, VI6_WPF_DSWAP, fmtinfo->swap); if (vsp1_feature(vsp1, VSP1_HAS_WPF_HFLIP) && index == 0) vsp1_wpf_write(wpf, dlb, VI6_WPF_ROT_CTRL, VI6_WPF_ROT_CTRL_LN16 | (256 << VI6_WPF_ROT_CTRL_LMEM_WD_SHIFT)); } if (sink_format->code != source_format->code) outfmt |= VI6_WPF_OUTFMT_CSC; wpf->outfmt = outfmt; vsp1_dl_body_write(dlb, VI6_DPR_WPF_FPORCH(index), VI6_DPR_WPF_FPORCH_FP_WPFN); /* * Sources. If the pipeline has a single input and BRx is not used, * configure it as the master layer. Otherwise configure all * inputs as sub-layers and select the virtual RPF as the master * layer. */ for (i = 0; i < vsp1->info->rpf_count; ++i) { struct vsp1_rwpf *input = pipe->inputs[i]; if (!input) continue; srcrpf |= (!pipe->brx && pipe->num_inputs == 1) ? VI6_WPF_SRCRPF_RPF_ACT_MST(input->entity.index) : VI6_WPF_SRCRPF_RPF_ACT_SUB(input->entity.index); } if (pipe->brx) srcrpf |= pipe->brx->type == VSP1_ENTITY_BRU ? VI6_WPF_SRCRPF_VIRACT_MST : VI6_WPF_SRCRPF_VIRACT2_MST; vsp1_wpf_write(wpf, dlb, VI6_WPF_SRCRPF, srcrpf); /* Enable interrupts. */ vsp1_dl_body_write(dlb, VI6_WPF_IRQ_STA(index), 0); vsp1_dl_body_write(dlb, VI6_WPF_IRQ_ENB(index), VI6_WPF_IRQ_ENB_DFEE); /* * Configure writeback for display pipelines (the wpf writeback flag is * never set for memory-to-memory pipelines). Start by adding a chained * display list to disable writeback after a single frame, and process * to enable writeback. If the display list allocation fails don't * enable writeback as we wouldn't be able to safely disable it, * resulting in possible memory corruption. */ if (wpf->writeback) { ret = wpf_configure_writeback_chain(wpf, dl); if (ret < 0) wpf->writeback = false; } vsp1_dl_body_write(dlb, VI6_WPF_WRBCK_CTRL(index), wpf->writeback ? VI6_WPF_WRBCK_CTRL_WBMD : 0); } static void wpf_configure_frame(struct vsp1_entity *entity, struct vsp1_pipeline *pipe, struct vsp1_dl_list *dl, struct vsp1_dl_body *dlb) { const unsigned int mask = BIT(WPF_CTRL_VFLIP) | BIT(WPF_CTRL_HFLIP); struct vsp1_rwpf *wpf = to_rwpf(&entity->subdev); unsigned long flags; u32 outfmt; spin_lock_irqsave(&wpf->flip.lock, flags); wpf->flip.active = (wpf->flip.active & ~mask) | (wpf->flip.pending & mask); spin_unlock_irqrestore(&wpf->flip.lock, flags); outfmt = (wpf->alpha << VI6_WPF_OUTFMT_PDV_SHIFT) | wpf->outfmt; if (wpf->flip.active & BIT(WPF_CTRL_VFLIP)) outfmt |= VI6_WPF_OUTFMT_FLP; if (wpf->flip.active & BIT(WPF_CTRL_HFLIP)) outfmt |= VI6_WPF_OUTFMT_HFLP; vsp1_wpf_write(wpf, dlb, VI6_WPF_OUTFMT, outfmt); } static void wpf_configure_partition(struct vsp1_entity *entity, struct vsp1_pipeline *pipe, struct vsp1_dl_list *dl, struct vsp1_dl_body *dlb) { struct vsp1_rwpf *wpf = to_rwpf(&entity->subdev); struct vsp1_device *vsp1 = wpf->entity.vsp1; struct vsp1_rwpf_memory mem = wpf->mem; const struct v4l2_mbus_framefmt *sink_format; const struct v4l2_pix_format_mplane *format = &wpf->format; const struct vsp1_format_info *fmtinfo = wpf->fmtinfo; unsigned int width; unsigned int height; unsigned int left; unsigned int offset; unsigned int flip; unsigned int i; sink_format = vsp1_entity_get_pad_format(&wpf->entity, wpf->entity.config, RWPF_PAD_SINK); width = sink_format->width; height = sink_format->height; left = 0; /* * Cropping. The partition algorithm can split the image into * multiple slices. */ if (pipe->partitions > 1) { width = pipe->partition->wpf.width; left = pipe->partition->wpf.left; } vsp1_wpf_write(wpf, dlb, VI6_WPF_HSZCLIP, VI6_WPF_SZCLIP_EN | (0 << VI6_WPF_SZCLIP_OFST_SHIFT) | (width << VI6_WPF_SZCLIP_SIZE_SHIFT)); vsp1_wpf_write(wpf, dlb, VI6_WPF_VSZCLIP, VI6_WPF_SZCLIP_EN | (0 << VI6_WPF_SZCLIP_OFST_SHIFT) | (height << VI6_WPF_SZCLIP_SIZE_SHIFT)); /* * For display pipelines without writeback enabled there's no memory * address to configure, return now. */ if (pipe->lif && !wpf->writeback) return; /* * Update the memory offsets based on flipping configuration. * The destination addresses point to the locations where the * VSP starts writing to memory, which can be any corner of the * image depending on the combination of flipping and rotation. */ /* * First take the partition left coordinate into account. * Compute the offset to order the partitions correctly on the * output based on whether flipping is enabled. Consider * horizontal flipping when rotation is disabled but vertical * flipping when rotation is enabled, as rotating the image * switches the horizontal and vertical directions. The offset * is applied horizontally or vertically accordingly. */ flip = wpf->flip.active; if (flip & BIT(WPF_CTRL_HFLIP) && !wpf->flip.rotate) offset = format->width - left - width; else if (flip & BIT(WPF_CTRL_VFLIP) && wpf->flip.rotate) offset = format->height - left - width; else offset = left; for (i = 0; i < format->num_planes; ++i) { unsigned int hsub = i > 0 ? fmtinfo->hsub : 1; unsigned int vsub = i > 0 ? fmtinfo->vsub : 1; if (wpf->flip.rotate) mem.addr[i] += offset / vsub * format->plane_fmt[i].bytesperline; else mem.addr[i] += offset / hsub * fmtinfo->bpp[i] / 8; } if (flip & BIT(WPF_CTRL_VFLIP)) { /* * When rotating the output (after rotation) image * height is equal to the partition width (before * rotation). Otherwise it is equal to the output * image height. */ if (wpf->flip.rotate) height = width; else height = format->height; mem.addr[0] += (height - 1) * format->plane_fmt[0].bytesperline; if (format->num_planes > 1) { offset = (height / fmtinfo->vsub - 1) * format->plane_fmt[1].bytesperline; mem.addr[1] += offset; mem.addr[2] += offset; } } if (wpf->flip.rotate && !(flip & BIT(WPF_CTRL_HFLIP))) { unsigned int hoffset = max(0, (int)format->width - 16); /* * Compute the output coordinate. The partition * horizontal (left) offset becomes a vertical offset. */ for (i = 0; i < format->num_planes; ++i) { unsigned int hsub = i > 0 ? fmtinfo->hsub : 1; mem.addr[i] += hoffset / hsub * fmtinfo->bpp[i] / 8; } } /* * On Gen3+ hardware the SPUVS bit has no effect on 3-planar * formats. Swap the U and V planes manually in that case. */ if (vsp1->info->gen >= 3 && format->num_planes == 3 && fmtinfo->swap_uv) swap(mem.addr[1], mem.addr[2]); vsp1_wpf_write(wpf, dlb, VI6_WPF_DSTM_ADDR_Y, mem.addr[0]); vsp1_wpf_write(wpf, dlb, VI6_WPF_DSTM_ADDR_C0, mem.addr[1]); vsp1_wpf_write(wpf, dlb, VI6_WPF_DSTM_ADDR_C1, mem.addr[2]); /* * Writeback operates in single-shot mode and lasts for a single frame, * reset the writeback flag to false for the next frame. */ wpf->writeback = false; } static unsigned int wpf_max_width(struct vsp1_entity *entity, struct vsp1_pipeline *pipe) { struct vsp1_rwpf *wpf = to_rwpf(&entity->subdev); return wpf->flip.rotate ? 256 : wpf->max_width; } static void wpf_partition(struct vsp1_entity *entity, struct vsp1_pipeline *pipe, struct vsp1_partition *partition, unsigned int partition_idx, struct vsp1_partition_window *window) { partition->wpf = *window; } static const struct vsp1_entity_operations wpf_entity_ops = { .destroy = vsp1_wpf_destroy, .configure_stream = wpf_configure_stream, .configure_frame = wpf_configure_frame, .configure_partition = wpf_configure_partition, .max_width = wpf_max_width, .partition = wpf_partition, }; /* ----------------------------------------------------------------------------- * Initialization and Cleanup */ struct vsp1_rwpf *vsp1_wpf_create(struct vsp1_device *vsp1, unsigned int index) { struct vsp1_rwpf *wpf; char name[6]; int ret; wpf = devm_kzalloc(vsp1->dev, sizeof(*wpf), GFP_KERNEL); if (wpf == NULL) return ERR_PTR(-ENOMEM); if (vsp1->info->gen == 2) { wpf->max_width = WPF_GEN2_MAX_WIDTH; wpf->max_height = WPF_GEN2_MAX_HEIGHT; } else { wpf->max_width = WPF_GEN3_MAX_WIDTH; wpf->max_height = WPF_GEN3_MAX_HEIGHT; } wpf->entity.ops = &wpf_entity_ops; wpf->entity.type = VSP1_ENTITY_WPF; wpf->entity.index = index; sprintf(name, "wpf.%u", index); ret = vsp1_entity_init(vsp1, &wpf->entity, name, 2, &wpf_ops, MEDIA_ENT_F_PROC_VIDEO_PIXEL_FORMATTER); if (ret < 0) return ERR_PTR(ret); /* Initialize the display list manager. */ wpf->dlm = vsp1_dlm_create(vsp1, index, 64); if (!wpf->dlm) { ret = -ENOMEM; goto error; } /* Initialize the control handler. */ ret = wpf_init_controls(wpf); if (ret < 0) { dev_err(vsp1->dev, "wpf%u: failed to initialize controls\n", index); goto error; } v4l2_ctrl_handler_setup(&wpf->ctrls); return wpf; error: vsp1_entity_destroy(&wpf->entity); return ERR_PTR(ret); }
linux-master
drivers/media/platform/renesas/vsp1/vsp1_wpf.c
// SPDX-License-Identifier: GPL-2.0 /* * Copyright (C) 2019-2020 Pengutronix, Michael Tretter <[email protected]> * * Helper functions to generate a raw byte sequence payload from values. */ #include <linux/kernel.h> #include <linux/types.h> #include <linux/string.h> #include <linux/v4l2-controls.h> #include <linux/device.h> #include <linux/export.h> #include <linux/log2.h> #include "nal-rbsp.h" void rbsp_init(struct rbsp *rbsp, void *addr, size_t size, struct nal_rbsp_ops *ops) { if (!rbsp) return; rbsp->data = addr; rbsp->size = size; rbsp->pos = 0; rbsp->ops = ops; rbsp->error = 0; } void rbsp_unsupported(struct rbsp *rbsp) { rbsp->error = -EINVAL; } static int rbsp_read_bits(struct rbsp *rbsp, int n, unsigned int *value); static int rbsp_write_bits(struct rbsp *rbsp, int n, unsigned int value); /* * When reading or writing, the emulation_prevention_three_byte is detected * only when the 2 one bits need to be inserted. Therefore, we are not * actually adding the 0x3 byte, but the 2 one bits and the six 0 bits of the * next byte. */ #define EMULATION_PREVENTION_THREE_BYTE (0x3 << 6) static int add_emulation_prevention_three_byte(struct rbsp *rbsp) { rbsp->num_consecutive_zeros = 0; rbsp_write_bits(rbsp, 8, EMULATION_PREVENTION_THREE_BYTE); return 0; } static int discard_emulation_prevention_three_byte(struct rbsp *rbsp) { unsigned int tmp = 0; rbsp->num_consecutive_zeros = 0; rbsp_read_bits(rbsp, 8, &tmp); if (tmp != EMULATION_PREVENTION_THREE_BYTE) return -EINVAL; return 0; } static inline int rbsp_read_bit(struct rbsp *rbsp) { int shift; int ofs; int bit; int err; if (rbsp->num_consecutive_zeros == 22) { err = discard_emulation_prevention_three_byte(rbsp); if (err) return err; } shift = 7 - (rbsp->pos % 8); ofs = rbsp->pos / 8; if (ofs >= rbsp->size) return -EINVAL; bit = (rbsp->data[ofs] >> shift) & 1; rbsp->pos++; if (bit == 1 || (rbsp->num_consecutive_zeros < 7 && (rbsp->pos % 8 == 0))) rbsp->num_consecutive_zeros = 0; else rbsp->num_consecutive_zeros++; return bit; } static inline int rbsp_write_bit(struct rbsp *rbsp, bool value) { int shift; int ofs; if (rbsp->num_consecutive_zeros == 22) add_emulation_prevention_three_byte(rbsp); shift = 7 - (rbsp->pos % 8); ofs = rbsp->pos / 8; if (ofs >= rbsp->size) return -EINVAL; rbsp->data[ofs] &= ~(1 << shift); rbsp->data[ofs] |= value << shift; rbsp->pos++; if (value || (rbsp->num_consecutive_zeros < 7 && (rbsp->pos % 8 == 0))) { rbsp->num_consecutive_zeros = 0; } else { rbsp->num_consecutive_zeros++; } return 0; } static inline int rbsp_read_bits(struct rbsp *rbsp, int n, unsigned int *value) { int i; int bit; unsigned int tmp = 0; if (n > 8 * sizeof(*value)) return -EINVAL; for (i = n; i > 0; i--) { bit = rbsp_read_bit(rbsp); if (bit < 0) return bit; tmp |= bit << (i - 1); } if (value) *value = tmp; return 0; } static int rbsp_write_bits(struct rbsp *rbsp, int n, unsigned int value) { int ret; if (n > 8 * sizeof(value)) return -EINVAL; while (n--) { ret = rbsp_write_bit(rbsp, (value >> n) & 1); if (ret) return ret; } return 0; } static int rbsp_read_uev(struct rbsp *rbsp, unsigned int *value) { int leading_zero_bits = 0; unsigned int tmp = 0; int ret; while ((ret = rbsp_read_bit(rbsp)) == 0) leading_zero_bits++; if (ret < 0) return ret; if (leading_zero_bits > 0) { ret = rbsp_read_bits(rbsp, leading_zero_bits, &tmp); if (ret) return ret; } if (value) *value = (1 << leading_zero_bits) - 1 + tmp; return 0; } static int rbsp_write_uev(struct rbsp *rbsp, unsigned int *value) { int ret; int leading_zero_bits; if (!value) return -EINVAL; leading_zero_bits = ilog2(*value + 1); ret = rbsp_write_bits(rbsp, leading_zero_bits, 0); if (ret) return ret; return rbsp_write_bits(rbsp, leading_zero_bits + 1, *value + 1); } static int rbsp_read_sev(struct rbsp *rbsp, int *value) { int ret; unsigned int tmp; ret = rbsp_read_uev(rbsp, &tmp); if (ret) return ret; if (value) { if (tmp & 1) *value = (tmp + 1) / 2; else *value = -(tmp / 2); } return 0; } static int rbsp_write_sev(struct rbsp *rbsp, int *value) { unsigned int tmp; if (!value) return -EINVAL; if (*value > 0) tmp = (2 * (*value)) | 1; else tmp = -2 * (*value); return rbsp_write_uev(rbsp, &tmp); } static int __rbsp_write_bit(struct rbsp *rbsp, int *value) { return rbsp_write_bit(rbsp, *value); } static int __rbsp_write_bits(struct rbsp *rbsp, int n, unsigned int *value) { return rbsp_write_bits(rbsp, n, *value); } struct nal_rbsp_ops write = { .rbsp_bit = __rbsp_write_bit, .rbsp_bits = __rbsp_write_bits, .rbsp_uev = rbsp_write_uev, .rbsp_sev = rbsp_write_sev, }; static int __rbsp_read_bit(struct rbsp *rbsp, int *value) { int tmp = rbsp_read_bit(rbsp); if (tmp < 0) return tmp; *value = tmp; return 0; } struct nal_rbsp_ops read = { .rbsp_bit = __rbsp_read_bit, .rbsp_bits = rbsp_read_bits, .rbsp_uev = rbsp_read_uev, .rbsp_sev = rbsp_read_sev, }; void rbsp_bit(struct rbsp *rbsp, int *value) { if (rbsp->error) return; rbsp->error = rbsp->ops->rbsp_bit(rbsp, value); } void rbsp_bits(struct rbsp *rbsp, int n, int *value) { if (rbsp->error) return; rbsp->error = rbsp->ops->rbsp_bits(rbsp, n, value); } void rbsp_uev(struct rbsp *rbsp, unsigned int *value) { if (rbsp->error) return; rbsp->error = rbsp->ops->rbsp_uev(rbsp, value); } void rbsp_sev(struct rbsp *rbsp, int *value) { if (rbsp->error) return; rbsp->error = rbsp->ops->rbsp_sev(rbsp, value); } void rbsp_trailing_bits(struct rbsp *rbsp) { unsigned int rbsp_stop_one_bit = 1; unsigned int rbsp_alignment_zero_bit = 0; rbsp_bit(rbsp, &rbsp_stop_one_bit); rbsp_bits(rbsp, round_up(rbsp->pos, 8) - rbsp->pos, &rbsp_alignment_zero_bit); }
linux-master
drivers/media/platform/allegro-dvt/nal-rbsp.c
// SPDX-License-Identifier: GPL-2.0 /* * Copyright (C) 2019-2020 Pengutronix, Michael Tretter <[email protected]> * * Convert NAL units between raw byte sequence payloads (RBSP) and C structs. * * The conversion is defined in "ITU-T Rec. H.265 (02/2018) high efficiency * video coding". Decoder drivers may use the parser to parse RBSP from * encoded streams and configure the hardware, if the hardware is not able to * parse RBSP itself. Encoder drivers may use the generator to generate the * RBSP for VPS/SPS/PPS nal units and add them to the encoded stream if the * hardware does not generate the units. */ #include <linux/kernel.h> #include <linux/types.h> #include <linux/string.h> #include <linux/v4l2-controls.h> #include <linux/device.h> #include <linux/export.h> #include <linux/log2.h> #include "nal-hevc.h" #include "nal-rbsp.h" /* * See Rec. ITU-T H.265 (02/2018) Table 7-1 - NAL unit type codes and NAL unit * type classes */ enum nal_unit_type { VPS_NUT = 32, SPS_NUT = 33, PPS_NUT = 34, FD_NUT = 38, }; static void nal_hevc_write_start_code_prefix(struct rbsp *rbsp) { u8 *p = rbsp->data + DIV_ROUND_UP(rbsp->pos, 8); int i = 4; if (DIV_ROUND_UP(rbsp->pos, 8) + i > rbsp->size) { rbsp->error = -EINVAL; return; } p[0] = 0x00; p[1] = 0x00; p[2] = 0x00; p[3] = 0x01; rbsp->pos += i * 8; } static void nal_hevc_read_start_code_prefix(struct rbsp *rbsp) { u8 *p = rbsp->data + DIV_ROUND_UP(rbsp->pos, 8); int i = 4; if (DIV_ROUND_UP(rbsp->pos, 8) + i > rbsp->size) { rbsp->error = -EINVAL; return; } if (p[0] != 0x00 || p[1] != 0x00 || p[2] != 0x00 || p[3] != 0x01) { rbsp->error = -EINVAL; return; } rbsp->pos += i * 8; } static void nal_hevc_write_filler_data(struct rbsp *rbsp) { u8 *p = rbsp->data + DIV_ROUND_UP(rbsp->pos, 8); int i; /* Keep 1 byte extra for terminating the NAL unit */ i = rbsp->size - DIV_ROUND_UP(rbsp->pos, 8) - 1; memset(p, 0xff, i); rbsp->pos += i * 8; } static void nal_hevc_read_filler_data(struct rbsp *rbsp) { u8 *p = rbsp->data + DIV_ROUND_UP(rbsp->pos, 8); while (*p == 0xff) { if (DIV_ROUND_UP(rbsp->pos, 8) > rbsp->size) { rbsp->error = -EINVAL; return; } p++; rbsp->pos += 8; } } static void nal_hevc_rbsp_profile_tier_level(struct rbsp *rbsp, struct nal_hevc_profile_tier_level *ptl) { unsigned int i; unsigned int max_num_sub_layers_minus_1 = 0; rbsp_bits(rbsp, 2, &ptl->general_profile_space); rbsp_bit(rbsp, &ptl->general_tier_flag); rbsp_bits(rbsp, 5, &ptl->general_profile_idc); for (i = 0; i < 32; i++) rbsp_bit(rbsp, &ptl->general_profile_compatibility_flag[i]); rbsp_bit(rbsp, &ptl->general_progressive_source_flag); rbsp_bit(rbsp, &ptl->general_interlaced_source_flag); rbsp_bit(rbsp, &ptl->general_non_packed_constraint_flag); rbsp_bit(rbsp, &ptl->general_frame_only_constraint_flag); if (ptl->general_profile_idc == 4 || ptl->general_profile_compatibility_flag[4] || ptl->general_profile_idc == 5 || ptl->general_profile_compatibility_flag[5] || ptl->general_profile_idc == 6 || ptl->general_profile_compatibility_flag[6] || ptl->general_profile_idc == 7 || ptl->general_profile_compatibility_flag[7] || ptl->general_profile_idc == 8 || ptl->general_profile_compatibility_flag[8] || ptl->general_profile_idc == 9 || ptl->general_profile_compatibility_flag[9] || ptl->general_profile_idc == 10 || ptl->general_profile_compatibility_flag[10]) { rbsp_bit(rbsp, &ptl->general_max_12bit_constraint_flag); rbsp_bit(rbsp, &ptl->general_max_10bit_constraint_flag); rbsp_bit(rbsp, &ptl->general_max_8bit_constraint_flag); rbsp_bit(rbsp, &ptl->general_max_422chroma_constraint_flag); rbsp_bit(rbsp, &ptl->general_max_420chroma_constraint_flag); rbsp_bit(rbsp, &ptl->general_max_monochrome_constraint_flag); rbsp_bit(rbsp, &ptl->general_intra_constraint_flag); rbsp_bit(rbsp, &ptl->general_one_picture_only_constraint_flag); rbsp_bit(rbsp, &ptl->general_lower_bit_rate_constraint_flag); if (ptl->general_profile_idc == 5 || ptl->general_profile_compatibility_flag[5] || ptl->general_profile_idc == 9 || ptl->general_profile_compatibility_flag[9] || ptl->general_profile_idc == 10 || ptl->general_profile_compatibility_flag[10]) { rbsp_bit(rbsp, &ptl->general_max_14bit_constraint_flag); rbsp_bits(rbsp, 32, &ptl->general_reserved_zero_33bits); rbsp_bits(rbsp, 33 - 32, &ptl->general_reserved_zero_33bits); } else { rbsp_bits(rbsp, 32, &ptl->general_reserved_zero_34bits); rbsp_bits(rbsp, 34 - 2, &ptl->general_reserved_zero_34bits); } } else if (ptl->general_profile_idc == 2 || ptl->general_profile_compatibility_flag[2]) { rbsp_bits(rbsp, 7, &ptl->general_reserved_zero_7bits); rbsp_bit(rbsp, &ptl->general_one_picture_only_constraint_flag); rbsp_bits(rbsp, 32, &ptl->general_reserved_zero_35bits); rbsp_bits(rbsp, 35 - 32, &ptl->general_reserved_zero_35bits); } else { rbsp_bits(rbsp, 32, &ptl->general_reserved_zero_43bits); rbsp_bits(rbsp, 43 - 32, &ptl->general_reserved_zero_43bits); } if ((ptl->general_profile_idc >= 1 && ptl->general_profile_idc <= 5) || ptl->general_profile_idc == 9 || ptl->general_profile_compatibility_flag[1] || ptl->general_profile_compatibility_flag[2] || ptl->general_profile_compatibility_flag[3] || ptl->general_profile_compatibility_flag[4] || ptl->general_profile_compatibility_flag[5] || ptl->general_profile_compatibility_flag[9]) rbsp_bit(rbsp, &ptl->general_inbld_flag); else rbsp_bit(rbsp, &ptl->general_reserved_zero_bit); rbsp_bits(rbsp, 8, &ptl->general_level_idc); if (max_num_sub_layers_minus_1 > 0) rbsp_unsupported(rbsp); } static void nal_hevc_rbsp_vps(struct rbsp *rbsp, struct nal_hevc_vps *vps) { unsigned int i, j; unsigned int reserved_0xffff_16bits = 0xffff; rbsp_bits(rbsp, 4, &vps->video_parameter_set_id); rbsp_bit(rbsp, &vps->base_layer_internal_flag); rbsp_bit(rbsp, &vps->base_layer_available_flag); rbsp_bits(rbsp, 6, &vps->max_layers_minus1); rbsp_bits(rbsp, 3, &vps->max_sub_layers_minus1); rbsp_bits(rbsp, 1, &vps->temporal_id_nesting_flag); rbsp_bits(rbsp, 16, &reserved_0xffff_16bits); nal_hevc_rbsp_profile_tier_level(rbsp, &vps->profile_tier_level); rbsp_bit(rbsp, &vps->sub_layer_ordering_info_present_flag); for (i = vps->sub_layer_ordering_info_present_flag ? 0 : vps->max_sub_layers_minus1; i <= vps->max_sub_layers_minus1; i++) { rbsp_uev(rbsp, &vps->max_dec_pic_buffering_minus1[i]); rbsp_uev(rbsp, &vps->max_num_reorder_pics[i]); rbsp_uev(rbsp, &vps->max_latency_increase_plus1[i]); } rbsp_bits(rbsp, 6, &vps->max_layer_id); rbsp_uev(rbsp, &vps->num_layer_sets_minus1); for (i = 0; i <= vps->num_layer_sets_minus1; i++) for (j = 0; j <= vps->max_layer_id; j++) rbsp_bit(rbsp, &vps->layer_id_included_flag[i][j]); rbsp_bit(rbsp, &vps->timing_info_present_flag); if (vps->timing_info_present_flag) rbsp_unsupported(rbsp); rbsp_bit(rbsp, &vps->extension_flag); if (vps->extension_flag) rbsp_unsupported(rbsp); } static void nal_hevc_rbsp_sub_layer_hrd_parameters(struct rbsp *rbsp, struct nal_hevc_sub_layer_hrd_parameters *hrd) { unsigned int i; unsigned int cpb_cnt = 1; for (i = 0; i < cpb_cnt; i++) { rbsp_uev(rbsp, &hrd->bit_rate_value_minus1[i]); rbsp_uev(rbsp, &hrd->cpb_size_value_minus1[i]); rbsp_bit(rbsp, &hrd->cbr_flag[i]); } } static void nal_hevc_rbsp_hrd_parameters(struct rbsp *rbsp, struct nal_hevc_hrd_parameters *hrd) { unsigned int i; unsigned int max_num_sub_layers_minus_1 = 0; rbsp_bit(rbsp, &hrd->nal_hrd_parameters_present_flag); rbsp_bit(rbsp, &hrd->vcl_hrd_parameters_present_flag); if (hrd->nal_hrd_parameters_present_flag || hrd->vcl_hrd_parameters_present_flag) { rbsp_bit(rbsp, &hrd->sub_pic_hrd_params_present_flag); if (hrd->sub_pic_hrd_params_present_flag) { rbsp_bits(rbsp, 8, &hrd->tick_divisor_minus2); rbsp_bits(rbsp, 5, &hrd->du_cpb_removal_delay_increment_length_minus1); rbsp_bit(rbsp, &hrd->sub_pic_cpb_params_in_pic_timing_sei_flag); rbsp_bits(rbsp, 5, &hrd->dpb_output_delay_du_length_minus1); } rbsp_bits(rbsp, 4, &hrd->bit_rate_scale); rbsp_bits(rbsp, 4, &hrd->cpb_size_scale); if (hrd->sub_pic_hrd_params_present_flag) rbsp_bits(rbsp, 4, &hrd->cpb_size_du_scale); rbsp_bits(rbsp, 5, &hrd->initial_cpb_removal_delay_length_minus1); rbsp_bits(rbsp, 5, &hrd->au_cpb_removal_delay_length_minus1); rbsp_bits(rbsp, 5, &hrd->dpb_output_delay_length_minus1); } for (i = 0; i <= max_num_sub_layers_minus_1; i++) { rbsp_bit(rbsp, &hrd->fixed_pic_rate_general_flag[i]); if (!hrd->fixed_pic_rate_general_flag[i]) rbsp_bit(rbsp, &hrd->fixed_pic_rate_within_cvs_flag[i]); if (hrd->fixed_pic_rate_within_cvs_flag[i]) rbsp_uev(rbsp, &hrd->elemental_duration_in_tc_minus1[i]); else rbsp_bit(rbsp, &hrd->low_delay_hrd_flag[i]); if (!hrd->low_delay_hrd_flag[i]) rbsp_uev(rbsp, &hrd->cpb_cnt_minus1[i]); if (hrd->nal_hrd_parameters_present_flag) nal_hevc_rbsp_sub_layer_hrd_parameters(rbsp, &hrd->vcl_hrd[i]); if (hrd->vcl_hrd_parameters_present_flag) nal_hevc_rbsp_sub_layer_hrd_parameters(rbsp, &hrd->vcl_hrd[i]); } } static void nal_hevc_rbsp_vui_parameters(struct rbsp *rbsp, struct nal_hevc_vui_parameters *vui) { if (!vui) { rbsp->error = -EINVAL; return; } rbsp_bit(rbsp, &vui->aspect_ratio_info_present_flag); if (vui->aspect_ratio_info_present_flag) { rbsp_bits(rbsp, 8, &vui->aspect_ratio_idc); if (vui->aspect_ratio_idc == 255) { rbsp_bits(rbsp, 16, &vui->sar_width); rbsp_bits(rbsp, 16, &vui->sar_height); } } rbsp_bit(rbsp, &vui->overscan_info_present_flag); if (vui->overscan_info_present_flag) rbsp_bit(rbsp, &vui->overscan_appropriate_flag); rbsp_bit(rbsp, &vui->video_signal_type_present_flag); if (vui->video_signal_type_present_flag) { rbsp_bits(rbsp, 3, &vui->video_format); rbsp_bit(rbsp, &vui->video_full_range_flag); rbsp_bit(rbsp, &vui->colour_description_present_flag); if (vui->colour_description_present_flag) { rbsp_bits(rbsp, 8, &vui->colour_primaries); rbsp_bits(rbsp, 8, &vui->transfer_characteristics); rbsp_bits(rbsp, 8, &vui->matrix_coeffs); } } rbsp_bit(rbsp, &vui->chroma_loc_info_present_flag); if (vui->chroma_loc_info_present_flag) { rbsp_uev(rbsp, &vui->chroma_sample_loc_type_top_field); rbsp_uev(rbsp, &vui->chroma_sample_loc_type_bottom_field); } rbsp_bit(rbsp, &vui->neutral_chroma_indication_flag); rbsp_bit(rbsp, &vui->field_seq_flag); rbsp_bit(rbsp, &vui->frame_field_info_present_flag); rbsp_bit(rbsp, &vui->default_display_window_flag); if (vui->default_display_window_flag) { rbsp_uev(rbsp, &vui->def_disp_win_left_offset); rbsp_uev(rbsp, &vui->def_disp_win_right_offset); rbsp_uev(rbsp, &vui->def_disp_win_top_offset); rbsp_uev(rbsp, &vui->def_disp_win_bottom_offset); } rbsp_bit(rbsp, &vui->vui_timing_info_present_flag); if (vui->vui_timing_info_present_flag) { rbsp_bits(rbsp, 32, &vui->vui_num_units_in_tick); rbsp_bits(rbsp, 32, &vui->vui_time_scale); rbsp_bit(rbsp, &vui->vui_poc_proportional_to_timing_flag); if (vui->vui_poc_proportional_to_timing_flag) rbsp_uev(rbsp, &vui->vui_num_ticks_poc_diff_one_minus1); rbsp_bit(rbsp, &vui->vui_hrd_parameters_present_flag); if (vui->vui_hrd_parameters_present_flag) nal_hevc_rbsp_hrd_parameters(rbsp, &vui->nal_hrd_parameters); } rbsp_bit(rbsp, &vui->bitstream_restriction_flag); if (vui->bitstream_restriction_flag) { rbsp_bit(rbsp, &vui->tiles_fixed_structure_flag); rbsp_bit(rbsp, &vui->motion_vectors_over_pic_boundaries_flag); rbsp_bit(rbsp, &vui->restricted_ref_pic_lists_flag); rbsp_uev(rbsp, &vui->min_spatial_segmentation_idc); rbsp_uev(rbsp, &vui->max_bytes_per_pic_denom); rbsp_uev(rbsp, &vui->max_bits_per_min_cu_denom); rbsp_uev(rbsp, &vui->log2_max_mv_length_horizontal); rbsp_uev(rbsp, &vui->log2_max_mv_length_vertical); } } static void nal_hevc_rbsp_sps(struct rbsp *rbsp, struct nal_hevc_sps *sps) { unsigned int i; rbsp_bits(rbsp, 4, &sps->video_parameter_set_id); rbsp_bits(rbsp, 3, &sps->max_sub_layers_minus1); rbsp_bit(rbsp, &sps->temporal_id_nesting_flag); nal_hevc_rbsp_profile_tier_level(rbsp, &sps->profile_tier_level); rbsp_uev(rbsp, &sps->seq_parameter_set_id); rbsp_uev(rbsp, &sps->chroma_format_idc); if (sps->chroma_format_idc == 3) rbsp_bit(rbsp, &sps->separate_colour_plane_flag); rbsp_uev(rbsp, &sps->pic_width_in_luma_samples); rbsp_uev(rbsp, &sps->pic_height_in_luma_samples); rbsp_bit(rbsp, &sps->conformance_window_flag); if (sps->conformance_window_flag) { rbsp_uev(rbsp, &sps->conf_win_left_offset); rbsp_uev(rbsp, &sps->conf_win_right_offset); rbsp_uev(rbsp, &sps->conf_win_top_offset); rbsp_uev(rbsp, &sps->conf_win_bottom_offset); } rbsp_uev(rbsp, &sps->bit_depth_luma_minus8); rbsp_uev(rbsp, &sps->bit_depth_chroma_minus8); rbsp_uev(rbsp, &sps->log2_max_pic_order_cnt_lsb_minus4); rbsp_bit(rbsp, &sps->sub_layer_ordering_info_present_flag); for (i = (sps->sub_layer_ordering_info_present_flag ? 0 : sps->max_sub_layers_minus1); i <= sps->max_sub_layers_minus1; i++) { rbsp_uev(rbsp, &sps->max_dec_pic_buffering_minus1[i]); rbsp_uev(rbsp, &sps->max_num_reorder_pics[i]); rbsp_uev(rbsp, &sps->max_latency_increase_plus1[i]); } rbsp_uev(rbsp, &sps->log2_min_luma_coding_block_size_minus3); rbsp_uev(rbsp, &sps->log2_diff_max_min_luma_coding_block_size); rbsp_uev(rbsp, &sps->log2_min_luma_transform_block_size_minus2); rbsp_uev(rbsp, &sps->log2_diff_max_min_luma_transform_block_size); rbsp_uev(rbsp, &sps->max_transform_hierarchy_depth_inter); rbsp_uev(rbsp, &sps->max_transform_hierarchy_depth_intra); rbsp_bit(rbsp, &sps->scaling_list_enabled_flag); if (sps->scaling_list_enabled_flag) rbsp_unsupported(rbsp); rbsp_bit(rbsp, &sps->amp_enabled_flag); rbsp_bit(rbsp, &sps->sample_adaptive_offset_enabled_flag); rbsp_bit(rbsp, &sps->pcm_enabled_flag); if (sps->pcm_enabled_flag) { rbsp_bits(rbsp, 4, &sps->pcm_sample_bit_depth_luma_minus1); rbsp_bits(rbsp, 4, &sps->pcm_sample_bit_depth_chroma_minus1); rbsp_uev(rbsp, &sps->log2_min_pcm_luma_coding_block_size_minus3); rbsp_uev(rbsp, &sps->log2_diff_max_min_pcm_luma_coding_block_size); rbsp_bit(rbsp, &sps->pcm_loop_filter_disabled_flag); } rbsp_uev(rbsp, &sps->num_short_term_ref_pic_sets); if (sps->num_short_term_ref_pic_sets > 0) rbsp_unsupported(rbsp); rbsp_bit(rbsp, &sps->long_term_ref_pics_present_flag); if (sps->long_term_ref_pics_present_flag) rbsp_unsupported(rbsp); rbsp_bit(rbsp, &sps->sps_temporal_mvp_enabled_flag); rbsp_bit(rbsp, &sps->strong_intra_smoothing_enabled_flag); rbsp_bit(rbsp, &sps->vui_parameters_present_flag); if (sps->vui_parameters_present_flag) nal_hevc_rbsp_vui_parameters(rbsp, &sps->vui); rbsp_bit(rbsp, &sps->extension_present_flag); if (sps->extension_present_flag) { rbsp_bit(rbsp, &sps->sps_range_extension_flag); rbsp_bit(rbsp, &sps->sps_multilayer_extension_flag); rbsp_bit(rbsp, &sps->sps_3d_extension_flag); rbsp_bit(rbsp, &sps->sps_scc_extension_flag); rbsp_bits(rbsp, 5, &sps->sps_extension_4bits); } if (sps->sps_range_extension_flag) rbsp_unsupported(rbsp); if (sps->sps_multilayer_extension_flag) rbsp_unsupported(rbsp); if (sps->sps_3d_extension_flag) rbsp_unsupported(rbsp); if (sps->sps_scc_extension_flag) rbsp_unsupported(rbsp); if (sps->sps_extension_4bits) rbsp_unsupported(rbsp); } static void nal_hevc_rbsp_pps(struct rbsp *rbsp, struct nal_hevc_pps *pps) { unsigned int i; rbsp_uev(rbsp, &pps->pps_pic_parameter_set_id); rbsp_uev(rbsp, &pps->pps_seq_parameter_set_id); rbsp_bit(rbsp, &pps->dependent_slice_segments_enabled_flag); rbsp_bit(rbsp, &pps->output_flag_present_flag); rbsp_bits(rbsp, 3, &pps->num_extra_slice_header_bits); rbsp_bit(rbsp, &pps->sign_data_hiding_enabled_flag); rbsp_bit(rbsp, &pps->cabac_init_present_flag); rbsp_uev(rbsp, &pps->num_ref_idx_l0_default_active_minus1); rbsp_uev(rbsp, &pps->num_ref_idx_l1_default_active_minus1); rbsp_sev(rbsp, &pps->init_qp_minus26); rbsp_bit(rbsp, &pps->constrained_intra_pred_flag); rbsp_bit(rbsp, &pps->transform_skip_enabled_flag); rbsp_bit(rbsp, &pps->cu_qp_delta_enabled_flag); if (pps->cu_qp_delta_enabled_flag) rbsp_uev(rbsp, &pps->diff_cu_qp_delta_depth); rbsp_sev(rbsp, &pps->pps_cb_qp_offset); rbsp_sev(rbsp, &pps->pps_cr_qp_offset); rbsp_bit(rbsp, &pps->pps_slice_chroma_qp_offsets_present_flag); rbsp_bit(rbsp, &pps->weighted_pred_flag); rbsp_bit(rbsp, &pps->weighted_bipred_flag); rbsp_bit(rbsp, &pps->transquant_bypass_enabled_flag); rbsp_bit(rbsp, &pps->tiles_enabled_flag); rbsp_bit(rbsp, &pps->entropy_coding_sync_enabled_flag); if (pps->tiles_enabled_flag) { rbsp_uev(rbsp, &pps->num_tile_columns_minus1); rbsp_uev(rbsp, &pps->num_tile_rows_minus1); rbsp_bit(rbsp, &pps->uniform_spacing_flag); if (!pps->uniform_spacing_flag) { for (i = 0; i < pps->num_tile_columns_minus1; i++) rbsp_uev(rbsp, &pps->column_width_minus1[i]); for (i = 0; i < pps->num_tile_rows_minus1; i++) rbsp_uev(rbsp, &pps->row_height_minus1[i]); } rbsp_bit(rbsp, &pps->loop_filter_across_tiles_enabled_flag); } rbsp_bit(rbsp, &pps->pps_loop_filter_across_slices_enabled_flag); rbsp_bit(rbsp, &pps->deblocking_filter_control_present_flag); if (pps->deblocking_filter_control_present_flag) { rbsp_bit(rbsp, &pps->deblocking_filter_override_enabled_flag); rbsp_bit(rbsp, &pps->pps_deblocking_filter_disabled_flag); if (!pps->pps_deblocking_filter_disabled_flag) { rbsp_sev(rbsp, &pps->pps_beta_offset_div2); rbsp_sev(rbsp, &pps->pps_tc_offset_div2); } } rbsp_bit(rbsp, &pps->pps_scaling_list_data_present_flag); if (pps->pps_scaling_list_data_present_flag) rbsp_unsupported(rbsp); rbsp_bit(rbsp, &pps->lists_modification_present_flag); rbsp_uev(rbsp, &pps->log2_parallel_merge_level_minus2); rbsp_bit(rbsp, &pps->slice_segment_header_extension_present_flag); rbsp_bit(rbsp, &pps->pps_extension_present_flag); if (pps->pps_extension_present_flag) { rbsp_bit(rbsp, &pps->pps_range_extension_flag); rbsp_bit(rbsp, &pps->pps_multilayer_extension_flag); rbsp_bit(rbsp, &pps->pps_3d_extension_flag); rbsp_bit(rbsp, &pps->pps_scc_extension_flag); rbsp_bits(rbsp, 4, &pps->pps_extension_4bits); } if (pps->pps_range_extension_flag) rbsp_unsupported(rbsp); if (pps->pps_multilayer_extension_flag) rbsp_unsupported(rbsp); if (pps->pps_3d_extension_flag) rbsp_unsupported(rbsp); if (pps->pps_scc_extension_flag) rbsp_unsupported(rbsp); if (pps->pps_extension_4bits) rbsp_unsupported(rbsp); } /** * nal_hevc_write_vps() - Write PPS NAL unit into RBSP format * @dev: device pointer * @dest: the buffer that is filled with RBSP data * @n: maximum size of @dest in bytes * @vps: &struct nal_hevc_vps to convert to RBSP * * Convert @vps to RBSP data and write it into @dest. * * The size of the VPS NAL unit is not known in advance and this function will * fail, if @dest does not hold sufficient space for the VPS NAL unit. * * Return: number of bytes written to @dest or negative error code */ ssize_t nal_hevc_write_vps(const struct device *dev, void *dest, size_t n, struct nal_hevc_vps *vps) { struct rbsp rbsp; unsigned int forbidden_zero_bit = 0; unsigned int nal_unit_type = VPS_NUT; unsigned int nuh_layer_id = 0; unsigned int nuh_temporal_id_plus1 = 1; if (!dest) return -EINVAL; rbsp_init(&rbsp, dest, n, &write); nal_hevc_write_start_code_prefix(&rbsp); /* NAL unit header */ rbsp_bit(&rbsp, &forbidden_zero_bit); rbsp_bits(&rbsp, 6, &nal_unit_type); rbsp_bits(&rbsp, 6, &nuh_layer_id); rbsp_bits(&rbsp, 3, &nuh_temporal_id_plus1); nal_hevc_rbsp_vps(&rbsp, vps); rbsp_trailing_bits(&rbsp); if (rbsp.error) return rbsp.error; return DIV_ROUND_UP(rbsp.pos, 8); } EXPORT_SYMBOL_GPL(nal_hevc_write_vps); /** * nal_hevc_read_vps() - Read VPS NAL unit from RBSP format * @dev: device pointer * @vps: the &struct nal_hevc_vps to fill from the RBSP data * @src: the buffer that contains the RBSP data * @n: size of @src in bytes * * Read RBSP data from @src and use it to fill @vps. * * Return: number of bytes read from @src or negative error code */ ssize_t nal_hevc_read_vps(const struct device *dev, struct nal_hevc_vps *vps, void *src, size_t n) { struct rbsp rbsp; unsigned int forbidden_zero_bit; unsigned int nal_unit_type; unsigned int nuh_layer_id; unsigned int nuh_temporal_id_plus1; if (!src) return -EINVAL; rbsp_init(&rbsp, src, n, &read); nal_hevc_read_start_code_prefix(&rbsp); rbsp_bit(&rbsp, &forbidden_zero_bit); rbsp_bits(&rbsp, 6, &nal_unit_type); rbsp_bits(&rbsp, 6, &nuh_layer_id); rbsp_bits(&rbsp, 3, &nuh_temporal_id_plus1); if (rbsp.error || forbidden_zero_bit != 0 || nal_unit_type != VPS_NUT) return -EINVAL; nal_hevc_rbsp_vps(&rbsp, vps); rbsp_trailing_bits(&rbsp); if (rbsp.error) return rbsp.error; return DIV_ROUND_UP(rbsp.pos, 8); } EXPORT_SYMBOL_GPL(nal_hevc_read_vps); /** * nal_hevc_write_sps() - Write SPS NAL unit into RBSP format * @dev: device pointer * @dest: the buffer that is filled with RBSP data * @n: maximum size of @dest in bytes * @sps: &struct nal_hevc_sps to convert to RBSP * * Convert @sps to RBSP data and write it into @dest. * * The size of the SPS NAL unit is not known in advance and this function will * fail, if @dest does not hold sufficient space for the SPS NAL unit. * * Return: number of bytes written to @dest or negative error code */ ssize_t nal_hevc_write_sps(const struct device *dev, void *dest, size_t n, struct nal_hevc_sps *sps) { struct rbsp rbsp; unsigned int forbidden_zero_bit = 0; unsigned int nal_unit_type = SPS_NUT; unsigned int nuh_layer_id = 0; unsigned int nuh_temporal_id_plus1 = 1; if (!dest) return -EINVAL; rbsp_init(&rbsp, dest, n, &write); nal_hevc_write_start_code_prefix(&rbsp); /* NAL unit header */ rbsp_bit(&rbsp, &forbidden_zero_bit); rbsp_bits(&rbsp, 6, &nal_unit_type); rbsp_bits(&rbsp, 6, &nuh_layer_id); rbsp_bits(&rbsp, 3, &nuh_temporal_id_plus1); nal_hevc_rbsp_sps(&rbsp, sps); rbsp_trailing_bits(&rbsp); if (rbsp.error) return rbsp.error; return DIV_ROUND_UP(rbsp.pos, 8); } EXPORT_SYMBOL_GPL(nal_hevc_write_sps); /** * nal_hevc_read_sps() - Read SPS NAL unit from RBSP format * @dev: device pointer * @sps: the &struct nal_hevc_sps to fill from the RBSP data * @src: the buffer that contains the RBSP data * @n: size of @src in bytes * * Read RBSP data from @src and use it to fill @sps. * * Return: number of bytes read from @src or negative error code */ ssize_t nal_hevc_read_sps(const struct device *dev, struct nal_hevc_sps *sps, void *src, size_t n) { struct rbsp rbsp; unsigned int forbidden_zero_bit; unsigned int nal_unit_type; unsigned int nuh_layer_id; unsigned int nuh_temporal_id_plus1; if (!src) return -EINVAL; rbsp_init(&rbsp, src, n, &read); nal_hevc_read_start_code_prefix(&rbsp); rbsp_bit(&rbsp, &forbidden_zero_bit); rbsp_bits(&rbsp, 6, &nal_unit_type); rbsp_bits(&rbsp, 6, &nuh_layer_id); rbsp_bits(&rbsp, 3, &nuh_temporal_id_plus1); if (rbsp.error || forbidden_zero_bit != 0 || nal_unit_type != SPS_NUT) return -EINVAL; nal_hevc_rbsp_sps(&rbsp, sps); rbsp_trailing_bits(&rbsp); if (rbsp.error) return rbsp.error; return DIV_ROUND_UP(rbsp.pos, 8); } EXPORT_SYMBOL_GPL(nal_hevc_read_sps); /** * nal_hevc_write_pps() - Write PPS NAL unit into RBSP format * @dev: device pointer * @dest: the buffer that is filled with RBSP data * @n: maximum size of @dest in bytes * @pps: &struct nal_hevc_pps to convert to RBSP * * Convert @pps to RBSP data and write it into @dest. * * The size of the PPS NAL unit is not known in advance and this function will * fail, if @dest does not hold sufficient space for the PPS NAL unit. * * Return: number of bytes written to @dest or negative error code */ ssize_t nal_hevc_write_pps(const struct device *dev, void *dest, size_t n, struct nal_hevc_pps *pps) { struct rbsp rbsp; unsigned int forbidden_zero_bit = 0; unsigned int nal_unit_type = PPS_NUT; unsigned int nuh_layer_id = 0; unsigned int nuh_temporal_id_plus1 = 1; if (!dest) return -EINVAL; rbsp_init(&rbsp, dest, n, &write); nal_hevc_write_start_code_prefix(&rbsp); /* NAL unit header */ rbsp_bit(&rbsp, &forbidden_zero_bit); rbsp_bits(&rbsp, 6, &nal_unit_type); rbsp_bits(&rbsp, 6, &nuh_layer_id); rbsp_bits(&rbsp, 3, &nuh_temporal_id_plus1); nal_hevc_rbsp_pps(&rbsp, pps); rbsp_trailing_bits(&rbsp); if (rbsp.error) return rbsp.error; return DIV_ROUND_UP(rbsp.pos, 8); } EXPORT_SYMBOL_GPL(nal_hevc_write_pps); /** * nal_hevc_read_pps() - Read PPS NAL unit from RBSP format * @dev: device pointer * @pps: the &struct nal_hevc_pps to fill from the RBSP data * @src: the buffer that contains the RBSP data * @n: size of @src in bytes * * Read RBSP data from @src and use it to fill @pps. * * Return: number of bytes read from @src or negative error code */ ssize_t nal_hevc_read_pps(const struct device *dev, struct nal_hevc_pps *pps, void *src, size_t n) { struct rbsp rbsp; unsigned int forbidden_zero_bit; unsigned int nal_unit_type; unsigned int nuh_layer_id; unsigned int nuh_temporal_id_plus1; if (!src) return -EINVAL; rbsp_init(&rbsp, src, n, &read); nal_hevc_read_start_code_prefix(&rbsp); /* NAL unit header */ rbsp_bit(&rbsp, &forbidden_zero_bit); rbsp_bits(&rbsp, 6, &nal_unit_type); rbsp_bits(&rbsp, 6, &nuh_layer_id); rbsp_bits(&rbsp, 3, &nuh_temporal_id_plus1); nal_hevc_rbsp_pps(&rbsp, pps); rbsp_trailing_bits(&rbsp); if (rbsp.error) return rbsp.error; return DIV_ROUND_UP(rbsp.pos, 8); } EXPORT_SYMBOL_GPL(nal_hevc_read_pps); /** * nal_hevc_write_filler() - Write filler data RBSP * @dev: device pointer * @dest: buffer to fill with filler data * @n: size of the buffer to fill with filler data * * Write a filler data RBSP to @dest with a size of @n bytes and return the * number of written filler data bytes. * * Use this function to generate dummy data in an RBSP data stream that can be * safely ignored by hevc decoders. * * The RBSP format of the filler data is specified in Rec. ITU-T H.265 * (02/2018) 7.3.2.8 Filler data RBSP syntax. * * Return: number of filler data bytes (including marker) or negative error */ ssize_t nal_hevc_write_filler(const struct device *dev, void *dest, size_t n) { struct rbsp rbsp; unsigned int forbidden_zero_bit = 0; unsigned int nal_unit_type = FD_NUT; unsigned int nuh_layer_id = 0; unsigned int nuh_temporal_id_plus1 = 1; if (!dest) return -EINVAL; rbsp_init(&rbsp, dest, n, &write); nal_hevc_write_start_code_prefix(&rbsp); rbsp_bit(&rbsp, &forbidden_zero_bit); rbsp_bits(&rbsp, 6, &nal_unit_type); rbsp_bits(&rbsp, 6, &nuh_layer_id); rbsp_bits(&rbsp, 3, &nuh_temporal_id_plus1); nal_hevc_write_filler_data(&rbsp); rbsp_trailing_bits(&rbsp); if (rbsp.error) return rbsp.error; return DIV_ROUND_UP(rbsp.pos, 8); } EXPORT_SYMBOL_GPL(nal_hevc_write_filler); /** * nal_hevc_read_filler() - Read filler data RBSP * @dev: device pointer * @src: buffer with RBSP data that is read * @n: maximum size of src that shall be read * * Read a filler data RBSP from @src up to a maximum size of @n bytes and * return the size of the filler data in bytes including the marker. * * This function is used to parse filler data and skip the respective bytes in * the RBSP data. * * The RBSP format of the filler data is specified in Rec. ITU-T H.265 * (02/2018) 7.3.2.8 Filler data RBSP syntax. * * Return: number of filler data bytes (including marker) or negative error */ ssize_t nal_hevc_read_filler(const struct device *dev, void *src, size_t n) { struct rbsp rbsp; unsigned int forbidden_zero_bit; unsigned int nal_unit_type; unsigned int nuh_layer_id; unsigned int nuh_temporal_id_plus1; if (!src) return -EINVAL; rbsp_init(&rbsp, src, n, &read); nal_hevc_read_start_code_prefix(&rbsp); rbsp_bit(&rbsp, &forbidden_zero_bit); rbsp_bits(&rbsp, 6, &nal_unit_type); rbsp_bits(&rbsp, 6, &nuh_layer_id); rbsp_bits(&rbsp, 3, &nuh_temporal_id_plus1); if (rbsp.error) return rbsp.error; if (forbidden_zero_bit != 0 || nal_unit_type != FD_NUT) return -EINVAL; nal_hevc_read_filler_data(&rbsp); rbsp_trailing_bits(&rbsp); if (rbsp.error) return rbsp.error; return DIV_ROUND_UP(rbsp.pos, 8); } EXPORT_SYMBOL_GPL(nal_hevc_read_filler);
linux-master
drivers/media/platform/allegro-dvt/nal-hevc.c
// SPDX-License-Identifier: GPL-2.0 /* * Copyright (C) 2019 Pengutronix, Michael Tretter <[email protected]> * * Convert NAL units between raw byte sequence payloads (RBSP) and C structs * * The conversion is defined in "ITU-T Rec. H.264 (04/2017) Advanced video * coding for generic audiovisual services". Decoder drivers may use the * parser to parse RBSP from encoded streams and configure the hardware, if * the hardware is not able to parse RBSP itself. Encoder drivers may use the * generator to generate the RBSP for SPS/PPS nal units and add them to the * encoded stream if the hardware does not generate the units. */ #include <linux/kernel.h> #include <linux/types.h> #include <linux/string.h> #include <linux/v4l2-controls.h> #include <linux/device.h> #include <linux/export.h> #include <linux/log2.h> #include "nal-h264.h" #include "nal-rbsp.h" /* * See Rec. ITU-T H.264 (04/2017) Table 7-1 - NAL unit type codes, syntax * element categories, and NAL unit type classes */ enum nal_unit_type { SEQUENCE_PARAMETER_SET = 7, PICTURE_PARAMETER_SET = 8, FILLER_DATA = 12, }; static void nal_h264_write_start_code_prefix(struct rbsp *rbsp) { u8 *p = rbsp->data + DIV_ROUND_UP(rbsp->pos, 8); int i = 4; if (DIV_ROUND_UP(rbsp->pos, 8) + i > rbsp->size) { rbsp->error = -EINVAL; return; } p[0] = 0x00; p[1] = 0x00; p[2] = 0x00; p[3] = 0x01; rbsp->pos += i * 8; } static void nal_h264_read_start_code_prefix(struct rbsp *rbsp) { u8 *p = rbsp->data + DIV_ROUND_UP(rbsp->pos, 8); int i = 4; if (DIV_ROUND_UP(rbsp->pos, 8) + i > rbsp->size) { rbsp->error = -EINVAL; return; } if (p[0] != 0x00 || p[1] != 0x00 || p[2] != 0x00 || p[3] != 0x01) { rbsp->error = -EINVAL; return; } rbsp->pos += i * 8; } static void nal_h264_write_filler_data(struct rbsp *rbsp) { u8 *p = rbsp->data + DIV_ROUND_UP(rbsp->pos, 8); int i; /* Keep 1 byte extra for terminating the NAL unit */ i = rbsp->size - DIV_ROUND_UP(rbsp->pos, 8) - 1; memset(p, 0xff, i); rbsp->pos += i * 8; } static void nal_h264_read_filler_data(struct rbsp *rbsp) { u8 *p = rbsp->data + DIV_ROUND_UP(rbsp->pos, 8); while (*p == 0xff) { if (DIV_ROUND_UP(rbsp->pos, 8) > rbsp->size) { rbsp->error = -EINVAL; return; } p++; rbsp->pos += 8; } } static void nal_h264_rbsp_hrd_parameters(struct rbsp *rbsp, struct nal_h264_hrd_parameters *hrd) { unsigned int i; if (!hrd) { rbsp->error = -EINVAL; return; } rbsp_uev(rbsp, &hrd->cpb_cnt_minus1); rbsp_bits(rbsp, 4, &hrd->bit_rate_scale); rbsp_bits(rbsp, 4, &hrd->cpb_size_scale); for (i = 0; i <= hrd->cpb_cnt_minus1; i++) { rbsp_uev(rbsp, &hrd->bit_rate_value_minus1[i]); rbsp_uev(rbsp, &hrd->cpb_size_value_minus1[i]); rbsp_bit(rbsp, &hrd->cbr_flag[i]); } rbsp_bits(rbsp, 5, &hrd->initial_cpb_removal_delay_length_minus1); rbsp_bits(rbsp, 5, &hrd->cpb_removal_delay_length_minus1); rbsp_bits(rbsp, 5, &hrd->dpb_output_delay_length_minus1); rbsp_bits(rbsp, 5, &hrd->time_offset_length); } static void nal_h264_rbsp_vui_parameters(struct rbsp *rbsp, struct nal_h264_vui_parameters *vui) { if (!vui) { rbsp->error = -EINVAL; return; } rbsp_bit(rbsp, &vui->aspect_ratio_info_present_flag); if (vui->aspect_ratio_info_present_flag) { rbsp_bits(rbsp, 8, &vui->aspect_ratio_idc); if (vui->aspect_ratio_idc == 255) { rbsp_bits(rbsp, 16, &vui->sar_width); rbsp_bits(rbsp, 16, &vui->sar_height); } } rbsp_bit(rbsp, &vui->overscan_info_present_flag); if (vui->overscan_info_present_flag) rbsp_bit(rbsp, &vui->overscan_appropriate_flag); rbsp_bit(rbsp, &vui->video_signal_type_present_flag); if (vui->video_signal_type_present_flag) { rbsp_bits(rbsp, 3, &vui->video_format); rbsp_bit(rbsp, &vui->video_full_range_flag); rbsp_bit(rbsp, &vui->colour_description_present_flag); if (vui->colour_description_present_flag) { rbsp_bits(rbsp, 8, &vui->colour_primaries); rbsp_bits(rbsp, 8, &vui->transfer_characteristics); rbsp_bits(rbsp, 8, &vui->matrix_coefficients); } } rbsp_bit(rbsp, &vui->chroma_loc_info_present_flag); if (vui->chroma_loc_info_present_flag) { rbsp_uev(rbsp, &vui->chroma_sample_loc_type_top_field); rbsp_uev(rbsp, &vui->chroma_sample_loc_type_bottom_field); } rbsp_bit(rbsp, &vui->timing_info_present_flag); if (vui->timing_info_present_flag) { rbsp_bits(rbsp, 32, &vui->num_units_in_tick); rbsp_bits(rbsp, 32, &vui->time_scale); rbsp_bit(rbsp, &vui->fixed_frame_rate_flag); } rbsp_bit(rbsp, &vui->nal_hrd_parameters_present_flag); if (vui->nal_hrd_parameters_present_flag) nal_h264_rbsp_hrd_parameters(rbsp, &vui->nal_hrd_parameters); rbsp_bit(rbsp, &vui->vcl_hrd_parameters_present_flag); if (vui->vcl_hrd_parameters_present_flag) nal_h264_rbsp_hrd_parameters(rbsp, &vui->vcl_hrd_parameters); if (vui->nal_hrd_parameters_present_flag || vui->vcl_hrd_parameters_present_flag) rbsp_bit(rbsp, &vui->low_delay_hrd_flag); rbsp_bit(rbsp, &vui->pic_struct_present_flag); rbsp_bit(rbsp, &vui->bitstream_restriction_flag); if (vui->bitstream_restriction_flag) { rbsp_bit(rbsp, &vui->motion_vectors_over_pic_boundaries_flag); rbsp_uev(rbsp, &vui->max_bytes_per_pic_denom); rbsp_uev(rbsp, &vui->max_bits_per_mb_denom); rbsp_uev(rbsp, &vui->log2_max_mv_length_horizontal); rbsp_uev(rbsp, &vui->log21_max_mv_length_vertical); rbsp_uev(rbsp, &vui->max_num_reorder_frames); rbsp_uev(rbsp, &vui->max_dec_frame_buffering); } } static void nal_h264_rbsp_sps(struct rbsp *rbsp, struct nal_h264_sps *sps) { unsigned int i; if (!sps) { rbsp->error = -EINVAL; return; } rbsp_bits(rbsp, 8, &sps->profile_idc); rbsp_bit(rbsp, &sps->constraint_set0_flag); rbsp_bit(rbsp, &sps->constraint_set1_flag); rbsp_bit(rbsp, &sps->constraint_set2_flag); rbsp_bit(rbsp, &sps->constraint_set3_flag); rbsp_bit(rbsp, &sps->constraint_set4_flag); rbsp_bit(rbsp, &sps->constraint_set5_flag); rbsp_bits(rbsp, 2, &sps->reserved_zero_2bits); rbsp_bits(rbsp, 8, &sps->level_idc); rbsp_uev(rbsp, &sps->seq_parameter_set_id); if (sps->profile_idc == 100 || sps->profile_idc == 110 || sps->profile_idc == 122 || sps->profile_idc == 244 || sps->profile_idc == 44 || sps->profile_idc == 83 || sps->profile_idc == 86 || sps->profile_idc == 118 || sps->profile_idc == 128 || sps->profile_idc == 138 || sps->profile_idc == 139 || sps->profile_idc == 134 || sps->profile_idc == 135) { rbsp_uev(rbsp, &sps->chroma_format_idc); if (sps->chroma_format_idc == 3) rbsp_bit(rbsp, &sps->separate_colour_plane_flag); rbsp_uev(rbsp, &sps->bit_depth_luma_minus8); rbsp_uev(rbsp, &sps->bit_depth_chroma_minus8); rbsp_bit(rbsp, &sps->qpprime_y_zero_transform_bypass_flag); rbsp_bit(rbsp, &sps->seq_scaling_matrix_present_flag); if (sps->seq_scaling_matrix_present_flag) rbsp->error = -EINVAL; } rbsp_uev(rbsp, &sps->log2_max_frame_num_minus4); rbsp_uev(rbsp, &sps->pic_order_cnt_type); switch (sps->pic_order_cnt_type) { case 0: rbsp_uev(rbsp, &sps->log2_max_pic_order_cnt_lsb_minus4); break; case 1: rbsp_bit(rbsp, &sps->delta_pic_order_always_zero_flag); rbsp_sev(rbsp, &sps->offset_for_non_ref_pic); rbsp_sev(rbsp, &sps->offset_for_top_to_bottom_field); rbsp_uev(rbsp, &sps->num_ref_frames_in_pic_order_cnt_cycle); for (i = 0; i < sps->num_ref_frames_in_pic_order_cnt_cycle; i++) rbsp_sev(rbsp, &sps->offset_for_ref_frame[i]); break; default: rbsp->error = -EINVAL; break; } rbsp_uev(rbsp, &sps->max_num_ref_frames); rbsp_bit(rbsp, &sps->gaps_in_frame_num_value_allowed_flag); rbsp_uev(rbsp, &sps->pic_width_in_mbs_minus1); rbsp_uev(rbsp, &sps->pic_height_in_map_units_minus1); rbsp_bit(rbsp, &sps->frame_mbs_only_flag); if (!sps->frame_mbs_only_flag) rbsp_bit(rbsp, &sps->mb_adaptive_frame_field_flag); rbsp_bit(rbsp, &sps->direct_8x8_inference_flag); rbsp_bit(rbsp, &sps->frame_cropping_flag); if (sps->frame_cropping_flag) { rbsp_uev(rbsp, &sps->crop_left); rbsp_uev(rbsp, &sps->crop_right); rbsp_uev(rbsp, &sps->crop_top); rbsp_uev(rbsp, &sps->crop_bottom); } rbsp_bit(rbsp, &sps->vui_parameters_present_flag); if (sps->vui_parameters_present_flag) nal_h264_rbsp_vui_parameters(rbsp, &sps->vui); } static void nal_h264_rbsp_pps(struct rbsp *rbsp, struct nal_h264_pps *pps) { int i; rbsp_uev(rbsp, &pps->pic_parameter_set_id); rbsp_uev(rbsp, &pps->seq_parameter_set_id); rbsp_bit(rbsp, &pps->entropy_coding_mode_flag); rbsp_bit(rbsp, &pps->bottom_field_pic_order_in_frame_present_flag); rbsp_uev(rbsp, &pps->num_slice_groups_minus1); if (pps->num_slice_groups_minus1 > 0) { rbsp_uev(rbsp, &pps->slice_group_map_type); switch (pps->slice_group_map_type) { case 0: for (i = 0; i < pps->num_slice_groups_minus1; i++) rbsp_uev(rbsp, &pps->run_length_minus1[i]); break; case 2: for (i = 0; i < pps->num_slice_groups_minus1; i++) { rbsp_uev(rbsp, &pps->top_left[i]); rbsp_uev(rbsp, &pps->bottom_right[i]); } break; case 3: case 4: case 5: rbsp_bit(rbsp, &pps->slice_group_change_direction_flag); rbsp_uev(rbsp, &pps->slice_group_change_rate_minus1); break; case 6: rbsp_uev(rbsp, &pps->pic_size_in_map_units_minus1); for (i = 0; i < pps->pic_size_in_map_units_minus1; i++) rbsp_bits(rbsp, order_base_2(pps->num_slice_groups_minus1 + 1), &pps->slice_group_id[i]); break; default: break; } } rbsp_uev(rbsp, &pps->num_ref_idx_l0_default_active_minus1); rbsp_uev(rbsp, &pps->num_ref_idx_l1_default_active_minus1); rbsp_bit(rbsp, &pps->weighted_pred_flag); rbsp_bits(rbsp, 2, &pps->weighted_bipred_idc); rbsp_sev(rbsp, &pps->pic_init_qp_minus26); rbsp_sev(rbsp, &pps->pic_init_qs_minus26); rbsp_sev(rbsp, &pps->chroma_qp_index_offset); rbsp_bit(rbsp, &pps->deblocking_filter_control_present_flag); rbsp_bit(rbsp, &pps->constrained_intra_pred_flag); rbsp_bit(rbsp, &pps->redundant_pic_cnt_present_flag); if (/* more_rbsp_data() */ false) { rbsp_bit(rbsp, &pps->transform_8x8_mode_flag); rbsp_bit(rbsp, &pps->pic_scaling_matrix_present_flag); if (pps->pic_scaling_matrix_present_flag) rbsp->error = -EINVAL; rbsp_sev(rbsp, &pps->second_chroma_qp_index_offset); } } /** * nal_h264_write_sps() - Write SPS NAL unit into RBSP format * @dev: device pointer * @dest: the buffer that is filled with RBSP data * @n: maximum size of @dest in bytes * @sps: &struct nal_h264_sps to convert to RBSP * * Convert @sps to RBSP data and write it into @dest. * * The size of the SPS NAL unit is not known in advance and this function will * fail, if @dest does not hold sufficient space for the SPS NAL unit. * * Return: number of bytes written to @dest or negative error code */ ssize_t nal_h264_write_sps(const struct device *dev, void *dest, size_t n, struct nal_h264_sps *sps) { struct rbsp rbsp; unsigned int forbidden_zero_bit = 0; unsigned int nal_ref_idc = 0; unsigned int nal_unit_type = SEQUENCE_PARAMETER_SET; if (!dest) return -EINVAL; rbsp_init(&rbsp, dest, n, &write); nal_h264_write_start_code_prefix(&rbsp); rbsp_bit(&rbsp, &forbidden_zero_bit); rbsp_bits(&rbsp, 2, &nal_ref_idc); rbsp_bits(&rbsp, 5, &nal_unit_type); nal_h264_rbsp_sps(&rbsp, sps); rbsp_trailing_bits(&rbsp); if (rbsp.error) return rbsp.error; return DIV_ROUND_UP(rbsp.pos, 8); } EXPORT_SYMBOL_GPL(nal_h264_write_sps); /** * nal_h264_read_sps() - Read SPS NAL unit from RBSP format * @dev: device pointer * @sps: the &struct nal_h264_sps to fill from the RBSP data * @src: the buffer that contains the RBSP data * @n: size of @src in bytes * * Read RBSP data from @src and use it to fill @sps. * * Return: number of bytes read from @src or negative error code */ ssize_t nal_h264_read_sps(const struct device *dev, struct nal_h264_sps *sps, void *src, size_t n) { struct rbsp rbsp; unsigned int forbidden_zero_bit; unsigned int nal_ref_idc; unsigned int nal_unit_type; if (!src) return -EINVAL; rbsp_init(&rbsp, src, n, &read); nal_h264_read_start_code_prefix(&rbsp); rbsp_bit(&rbsp, &forbidden_zero_bit); rbsp_bits(&rbsp, 2, &nal_ref_idc); rbsp_bits(&rbsp, 5, &nal_unit_type); if (rbsp.error || forbidden_zero_bit != 0 || nal_ref_idc != 0 || nal_unit_type != SEQUENCE_PARAMETER_SET) return -EINVAL; nal_h264_rbsp_sps(&rbsp, sps); rbsp_trailing_bits(&rbsp); if (rbsp.error) return rbsp.error; return DIV_ROUND_UP(rbsp.pos, 8); } EXPORT_SYMBOL_GPL(nal_h264_read_sps); /** * nal_h264_write_pps() - Write PPS NAL unit into RBSP format * @dev: device pointer * @dest: the buffer that is filled with RBSP data * @n: maximum size of @dest in bytes * @pps: &struct nal_h264_pps to convert to RBSP * * Convert @pps to RBSP data and write it into @dest. * * The size of the PPS NAL unit is not known in advance and this function will * fail, if @dest does not hold sufficient space for the PPS NAL unit. * * Return: number of bytes written to @dest or negative error code */ ssize_t nal_h264_write_pps(const struct device *dev, void *dest, size_t n, struct nal_h264_pps *pps) { struct rbsp rbsp; unsigned int forbidden_zero_bit = 0; unsigned int nal_ref_idc = 0; unsigned int nal_unit_type = PICTURE_PARAMETER_SET; if (!dest) return -EINVAL; rbsp_init(&rbsp, dest, n, &write); nal_h264_write_start_code_prefix(&rbsp); /* NAL unit header */ rbsp_bit(&rbsp, &forbidden_zero_bit); rbsp_bits(&rbsp, 2, &nal_ref_idc); rbsp_bits(&rbsp, 5, &nal_unit_type); nal_h264_rbsp_pps(&rbsp, pps); rbsp_trailing_bits(&rbsp); if (rbsp.error) return rbsp.error; return DIV_ROUND_UP(rbsp.pos, 8); } EXPORT_SYMBOL_GPL(nal_h264_write_pps); /** * nal_h264_read_pps() - Read PPS NAL unit from RBSP format * @dev: device pointer * @pps: the &struct nal_h264_pps to fill from the RBSP data * @src: the buffer that contains the RBSP data * @n: size of @src in bytes * * Read RBSP data from @src and use it to fill @pps. * * Return: number of bytes read from @src or negative error code */ ssize_t nal_h264_read_pps(const struct device *dev, struct nal_h264_pps *pps, void *src, size_t n) { struct rbsp rbsp; if (!src) return -EINVAL; rbsp_init(&rbsp, src, n, &read); nal_h264_read_start_code_prefix(&rbsp); /* NAL unit header */ rbsp.pos += 8; nal_h264_rbsp_pps(&rbsp, pps); rbsp_trailing_bits(&rbsp); if (rbsp.error) return rbsp.error; return DIV_ROUND_UP(rbsp.pos, 8); } EXPORT_SYMBOL_GPL(nal_h264_read_pps); /** * nal_h264_write_filler() - Write filler data RBSP * @dev: device pointer * @dest: buffer to fill with filler data * @n: size of the buffer to fill with filler data * * Write a filler data RBSP to @dest with a size of @n bytes and return the * number of written filler data bytes. * * Use this function to generate dummy data in an RBSP data stream that can be * safely ignored by h264 decoders. * * The RBSP format of the filler data is specified in Rec. ITU-T H.264 * (04/2017) 7.3.2.7 Filler data RBSP syntax. * * Return: number of filler data bytes (including marker) or negative error */ ssize_t nal_h264_write_filler(const struct device *dev, void *dest, size_t n) { struct rbsp rbsp; unsigned int forbidden_zero_bit = 0; unsigned int nal_ref_idc = 0; unsigned int nal_unit_type = FILLER_DATA; if (!dest) return -EINVAL; rbsp_init(&rbsp, dest, n, &write); nal_h264_write_start_code_prefix(&rbsp); rbsp_bit(&rbsp, &forbidden_zero_bit); rbsp_bits(&rbsp, 2, &nal_ref_idc); rbsp_bits(&rbsp, 5, &nal_unit_type); nal_h264_write_filler_data(&rbsp); rbsp_trailing_bits(&rbsp); return DIV_ROUND_UP(rbsp.pos, 8); } EXPORT_SYMBOL_GPL(nal_h264_write_filler); /** * nal_h264_read_filler() - Read filler data RBSP * @dev: device pointer * @src: buffer with RBSP data that is read * @n: maximum size of src that shall be read * * Read a filler data RBSP from @src up to a maximum size of @n bytes and * return the size of the filler data in bytes including the marker. * * This function is used to parse filler data and skip the respective bytes in * the RBSP data. * * The RBSP format of the filler data is specified in Rec. ITU-T H.264 * (04/2017) 7.3.2.7 Filler data RBSP syntax. * * Return: number of filler data bytes (including marker) or negative error */ ssize_t nal_h264_read_filler(const struct device *dev, void *src, size_t n) { struct rbsp rbsp; unsigned int forbidden_zero_bit; unsigned int nal_ref_idc; unsigned int nal_unit_type; if (!src) return -EINVAL; rbsp_init(&rbsp, src, n, &read); nal_h264_read_start_code_prefix(&rbsp); rbsp_bit(&rbsp, &forbidden_zero_bit); rbsp_bits(&rbsp, 2, &nal_ref_idc); rbsp_bits(&rbsp, 5, &nal_unit_type); if (rbsp.error) return rbsp.error; if (forbidden_zero_bit != 0 || nal_ref_idc != 0 || nal_unit_type != FILLER_DATA) return -EINVAL; nal_h264_read_filler_data(&rbsp); rbsp_trailing_bits(&rbsp); if (rbsp.error) return rbsp.error; return DIV_ROUND_UP(rbsp.pos, 8); } EXPORT_SYMBOL_GPL(nal_h264_read_filler);
linux-master
drivers/media/platform/allegro-dvt/nal-h264.c
// SPDX-License-Identifier: GPL-2.0 /* * Copyright (C) 2019 Pengutronix, Michael Tretter <[email protected]> * * Helper functions for handling messages that are send via mailbox to the * Allegro VCU firmware. */ #include <linux/bitfield.h> #include <linux/export.h> #include <linux/errno.h> #include <linux/string.h> #include <linux/videodev2.h> #include "allegro-mail.h" const char *msg_type_name(enum mcu_msg_type type) { static char buf[9]; switch (type) { case MCU_MSG_TYPE_INIT: return "INIT"; case MCU_MSG_TYPE_CREATE_CHANNEL: return "CREATE_CHANNEL"; case MCU_MSG_TYPE_DESTROY_CHANNEL: return "DESTROY_CHANNEL"; case MCU_MSG_TYPE_ENCODE_FRAME: return "ENCODE_FRAME"; case MCU_MSG_TYPE_PUT_STREAM_BUFFER: return "PUT_STREAM_BUFFER"; case MCU_MSG_TYPE_PUSH_BUFFER_INTERMEDIATE: return "PUSH_BUFFER_INTERMEDIATE"; case MCU_MSG_TYPE_PUSH_BUFFER_REFERENCE: return "PUSH_BUFFER_REFERENCE"; default: snprintf(buf, sizeof(buf), "(0x%04x)", type); return buf; } } EXPORT_SYMBOL(msg_type_name); static ssize_t allegro_enc_init(u32 *dst, struct mcu_msg_init_request *msg) { unsigned int i = 0; enum mcu_msg_version version = msg->header.version; dst[i++] = msg->reserved0; dst[i++] = msg->suballoc_dma; dst[i++] = msg->suballoc_size; dst[i++] = msg->encoder_buffer_size; dst[i++] = msg->encoder_buffer_color_depth; dst[i++] = msg->num_cores; if (version >= MCU_MSG_VERSION_2019_2) { dst[i++] = msg->clk_rate; dst[i++] = 0; } return i * sizeof(*dst); } static inline u32 settings_get_mcu_codec(struct create_channel_param *param) { enum mcu_msg_version version = param->version; u32 pixelformat = param->codec; if (version < MCU_MSG_VERSION_2019_2) { switch (pixelformat) { case V4L2_PIX_FMT_HEVC: return 2; case V4L2_PIX_FMT_H264: default: return 1; } } else { switch (pixelformat) { case V4L2_PIX_FMT_HEVC: return 1; case V4L2_PIX_FMT_H264: default: return 0; } } } ssize_t allegro_encode_config_blob(u32 *dst, struct create_channel_param *param) { enum mcu_msg_version version = param->version; unsigned int i = 0; unsigned int j = 0; u32 val; unsigned int codec = settings_get_mcu_codec(param); if (version >= MCU_MSG_VERSION_2019_2) dst[i++] = param->layer_id; dst[i++] = FIELD_PREP(GENMASK(31, 16), param->height) | FIELD_PREP(GENMASK(15, 0), param->width); if (version >= MCU_MSG_VERSION_2019_2) dst[i++] = param->videomode; dst[i++] = param->format; if (version < MCU_MSG_VERSION_2019_2) dst[i++] = param->colorspace; dst[i++] = param->src_mode; if (version >= MCU_MSG_VERSION_2019_2) dst[i++] = param->src_bit_depth; dst[i++] = FIELD_PREP(GENMASK(31, 24), codec) | FIELD_PREP(GENMASK(23, 8), param->constraint_set_flags) | FIELD_PREP(GENMASK(7, 0), param->profile); dst[i++] = FIELD_PREP(GENMASK(31, 16), param->tier) | FIELD_PREP(GENMASK(15, 0), param->level); val = 0; val |= param->temporal_mvp_enable ? BIT(20) : 0; val |= FIELD_PREP(GENMASK(7, 4), param->log2_max_frame_num); if (version >= MCU_MSG_VERSION_2019_2) val |= FIELD_PREP(GENMASK(3, 0), param->log2_max_poc - 1); else val |= FIELD_PREP(GENMASK(3, 0), param->log2_max_poc); dst[i++] = val; val = 0; val |= param->enable_reordering ? BIT(0) : 0; val |= param->dbf_ovr_en ? BIT(2) : 0; val |= param->override_lf ? BIT(12) : 0; dst[i++] = val; if (version >= MCU_MSG_VERSION_2019_2) { val = 0; val |= param->custom_lda ? BIT(2) : 0; val |= param->rdo_cost_mode ? BIT(20) : 0; dst[i++] = val; val = 0; val |= param->lf ? BIT(2) : 0; val |= param->lf_x_tile ? BIT(3) : 0; val |= param->lf_x_slice ? BIT(4) : 0; dst[i++] = val; } else { val = 0; dst[i++] = val; } dst[i++] = FIELD_PREP(GENMASK(15, 8), param->beta_offset) | FIELD_PREP(GENMASK(7, 0), param->tc_offset); dst[i++] = param->unknown11; dst[i++] = param->unknown12; dst[i++] = param->num_slices; dst[i++] = param->encoder_buffer_offset; dst[i++] = param->encoder_buffer_enabled; dst[i++] = FIELD_PREP(GENMASK(31, 16), param->clip_vrt_range) | FIELD_PREP(GENMASK(15, 0), param->clip_hrz_range); dst[i++] = FIELD_PREP(GENMASK(31, 16), param->me_range[1]) | FIELD_PREP(GENMASK(15, 0), param->me_range[0]); dst[i++] = FIELD_PREP(GENMASK(31, 16), param->me_range[3]) | FIELD_PREP(GENMASK(15, 0), param->me_range[2]); dst[i++] = FIELD_PREP(GENMASK(31, 24), param->min_tu_size) | FIELD_PREP(GENMASK(23, 16), param->max_tu_size) | FIELD_PREP(GENMASK(15, 8), param->min_cu_size) | FIELD_PREP(GENMASK(8, 0), param->max_cu_size); dst[i++] = FIELD_PREP(GENMASK(15, 8), param->max_transfo_depth_intra) | FIELD_PREP(GENMASK(7, 0), param->max_transfo_depth_inter); dst[i++] = param->entropy_mode; dst[i++] = param->wp_mode; dst[i++] = param->rate_control_mode; dst[i++] = param->initial_rem_delay; dst[i++] = param->cpb_size; dst[i++] = FIELD_PREP(GENMASK(31, 16), param->clk_ratio) | FIELD_PREP(GENMASK(15, 0), param->framerate); dst[i++] = param->target_bitrate; dst[i++] = param->max_bitrate; dst[i++] = FIELD_PREP(GENMASK(31, 16), param->min_qp) | FIELD_PREP(GENMASK(15, 0), param->initial_qp); dst[i++] = FIELD_PREP(GENMASK(31, 16), param->ip_delta) | FIELD_PREP(GENMASK(15, 0), param->max_qp); dst[i++] = FIELD_PREP(GENMASK(31, 16), param->golden_ref) | FIELD_PREP(GENMASK(15, 0), param->pb_delta); dst[i++] = FIELD_PREP(GENMASK(31, 16), param->golden_ref_frequency) | FIELD_PREP(GENMASK(15, 0), param->golden_delta); if (version >= MCU_MSG_VERSION_2019_2) dst[i++] = param->rate_control_option; else dst[i++] = 0; if (version >= MCU_MSG_VERSION_2019_2) { dst[i++] = param->num_pixel; dst[i++] = FIELD_PREP(GENMASK(31, 16), param->max_pixel_value) | FIELD_PREP(GENMASK(15, 0), param->max_psnr); for (j = 0; j < 3; j++) dst[i++] = param->maxpicturesize[j]; } if (version >= MCU_MSG_VERSION_2019_2) dst[i++] = param->gop_ctrl_mode; else dst[i++] = 0; if (version >= MCU_MSG_VERSION_2019_2) dst[i++] = FIELD_PREP(GENMASK(31, 24), param->freq_golden_ref) | FIELD_PREP(GENMASK(23, 16), param->num_b) | FIELD_PREP(GENMASK(15, 0), param->gop_length); dst[i++] = param->freq_idr; if (version >= MCU_MSG_VERSION_2019_2) dst[i++] = param->enable_lt; dst[i++] = param->freq_lt; dst[i++] = param->gdr_mode; if (version < MCU_MSG_VERSION_2019_2) dst[i++] = FIELD_PREP(GENMASK(31, 24), param->freq_golden_ref) | FIELD_PREP(GENMASK(23, 16), param->num_b) | FIELD_PREP(GENMASK(15, 0), param->gop_length); if (version >= MCU_MSG_VERSION_2019_2) dst[i++] = param->tmpdqp; dst[i++] = param->subframe_latency; dst[i++] = param->lda_control_mode; if (version < MCU_MSG_VERSION_2019_2) dst[i++] = param->unknown41; if (version >= MCU_MSG_VERSION_2019_2) { for (j = 0; j < 6; j++) dst[i++] = param->lda_factors[j]; dst[i++] = param->max_num_merge_cand; } return i * sizeof(*dst); } static ssize_t allegro_enc_create_channel(u32 *dst, struct mcu_msg_create_channel *msg) { enum mcu_msg_version version = msg->header.version; unsigned int i = 0; dst[i++] = msg->user_id; if (version >= MCU_MSG_VERSION_2019_2) { dst[i++] = msg->blob_mcu_addr; } else { memcpy(&dst[i], msg->blob, msg->blob_size); i += msg->blob_size / sizeof(*dst); } if (version >= MCU_MSG_VERSION_2019_2) dst[i++] = msg->ep1_addr; return i * sizeof(*dst); } ssize_t allegro_decode_config_blob(struct create_channel_param *param, struct mcu_msg_create_channel_response *msg, u32 *src) { enum mcu_msg_version version = msg->header.version; if (version >= MCU_MSG_VERSION_2019_2) { param->num_ref_idx_l0 = FIELD_GET(GENMASK(7, 4), src[9]); param->num_ref_idx_l1 = FIELD_GET(GENMASK(11, 8), src[9]); } else { param->num_ref_idx_l0 = msg->num_ref_idx_l0; param->num_ref_idx_l1 = msg->num_ref_idx_l1; } return 0; } static ssize_t allegro_enc_destroy_channel(u32 *dst, struct mcu_msg_destroy_channel *msg) { unsigned int i = 0; dst[i++] = msg->channel_id; return i * sizeof(*dst); } static ssize_t allegro_enc_push_buffers(u32 *dst, struct mcu_msg_push_buffers_internal *msg) { unsigned int i = 0; struct mcu_msg_push_buffers_internal_buffer *buffer; unsigned int num_buffers = msg->num_buffers; unsigned int j; dst[i++] = msg->channel_id; for (j = 0; j < num_buffers; j++) { buffer = &msg->buffer[j]; dst[i++] = buffer->dma_addr; dst[i++] = buffer->mcu_addr; dst[i++] = buffer->size; } return i * sizeof(*dst); } static ssize_t allegro_enc_put_stream_buffer(u32 *dst, struct mcu_msg_put_stream_buffer *msg) { unsigned int i = 0; dst[i++] = msg->channel_id; dst[i++] = msg->dma_addr; dst[i++] = msg->mcu_addr; dst[i++] = msg->size; dst[i++] = msg->offset; dst[i++] = lower_32_bits(msg->dst_handle); dst[i++] = upper_32_bits(msg->dst_handle); return i * sizeof(*dst); } static ssize_t allegro_enc_encode_frame(u32 *dst, struct mcu_msg_encode_frame *msg) { enum mcu_msg_version version = msg->header.version; unsigned int i = 0; dst[i++] = msg->channel_id; dst[i++] = msg->reserved; dst[i++] = msg->encoding_options; dst[i++] = FIELD_PREP(GENMASK(31, 16), msg->padding) | FIELD_PREP(GENMASK(15, 0), msg->pps_qp); if (version >= MCU_MSG_VERSION_2019_2) { dst[i++] = 0; dst[i++] = 0; dst[i++] = 0; dst[i++] = 0; } dst[i++] = lower_32_bits(msg->user_param); dst[i++] = upper_32_bits(msg->user_param); dst[i++] = lower_32_bits(msg->src_handle); dst[i++] = upper_32_bits(msg->src_handle); dst[i++] = msg->request_options; dst[i++] = msg->src_y; dst[i++] = msg->src_uv; if (version >= MCU_MSG_VERSION_2019_2) dst[i++] = msg->is_10_bit; dst[i++] = msg->stride; if (version >= MCU_MSG_VERSION_2019_2) dst[i++] = msg->format; dst[i++] = msg->ep2; dst[i++] = lower_32_bits(msg->ep2_v); dst[i++] = upper_32_bits(msg->ep2_v); return i * sizeof(*dst); } static ssize_t allegro_dec_init(struct mcu_msg_init_response *msg, u32 *src) { unsigned int i = 0; msg->reserved0 = src[i++]; return i * sizeof(*src); } static ssize_t allegro_dec_create_channel(struct mcu_msg_create_channel_response *msg, u32 *src) { enum mcu_msg_version version = msg->header.version; unsigned int i = 0; msg->channel_id = src[i++]; msg->user_id = src[i++]; /* * Version >= MCU_MSG_VERSION_2019_2 is handled in * allegro_decode_config_blob(). */ if (version < MCU_MSG_VERSION_2019_2) { msg->options = src[i++]; msg->num_core = src[i++]; msg->num_ref_idx_l0 = FIELD_GET(GENMASK(7, 4), src[i]); msg->num_ref_idx_l1 = FIELD_GET(GENMASK(11, 8), src[i++]); } msg->int_buffers_count = src[i++]; msg->int_buffers_size = src[i++]; msg->rec_buffers_count = src[i++]; msg->rec_buffers_size = src[i++]; msg->reserved = src[i++]; msg->error_code = src[i++]; return i * sizeof(*src); } static ssize_t allegro_dec_destroy_channel(struct mcu_msg_destroy_channel_response *msg, u32 *src) { unsigned int i = 0; msg->channel_id = src[i++]; return i * sizeof(*src); } static ssize_t allegro_dec_encode_frame(struct mcu_msg_encode_frame_response *msg, u32 *src) { enum mcu_msg_version version = msg->header.version; unsigned int i = 0; unsigned int j; msg->channel_id = src[i++]; msg->dst_handle = src[i++]; msg->dst_handle |= (((u64)src[i++]) << 32); msg->user_param = src[i++]; msg->user_param |= (((u64)src[i++]) << 32); msg->src_handle = src[i++]; msg->src_handle |= (((u64)src[i++]) << 32); msg->skip = FIELD_GET(GENMASK(31, 16), src[i]); msg->is_ref = FIELD_GET(GENMASK(15, 0), src[i++]); msg->initial_removal_delay = src[i++]; msg->dpb_output_delay = src[i++]; msg->size = src[i++]; msg->frame_tag_size = src[i++]; msg->stuffing = src[i++]; msg->filler = src[i++]; msg->num_row = FIELD_GET(GENMASK(31, 16), src[i]); msg->num_column = FIELD_GET(GENMASK(15, 0), src[i++]); msg->num_ref_idx_l1 = FIELD_GET(GENMASK(31, 24), src[i]); msg->num_ref_idx_l0 = FIELD_GET(GENMASK(23, 16), src[i]); msg->qp = FIELD_GET(GENMASK(15, 0), src[i++]); msg->partition_table_offset = src[i++]; msg->partition_table_size = src[i++]; msg->sum_complex = src[i++]; for (j = 0; j < 4; j++) msg->tile_width[j] = src[i++]; for (j = 0; j < 22; j++) msg->tile_height[j] = src[i++]; msg->error_code = src[i++]; msg->slice_type = src[i++]; msg->pic_struct = src[i++]; msg->reserved = FIELD_GET(GENMASK(31, 24), src[i]); msg->is_last_slice = FIELD_GET(GENMASK(23, 16), src[i]); msg->is_first_slice = FIELD_GET(GENMASK(15, 8), src[i]); msg->is_idr = FIELD_GET(GENMASK(7, 0), src[i++]); msg->reserved1 = FIELD_GET(GENMASK(31, 16), src[i]); msg->pps_qp = FIELD_GET(GENMASK(15, 0), src[i++]); msg->reserved2 = src[i++]; if (version >= MCU_MSG_VERSION_2019_2) { msg->reserved3 = src[i++]; msg->reserved4 = src[i++]; msg->reserved5 = src[i++]; msg->reserved6 = src[i++]; } return i * sizeof(*src); } /** * allegro_encode_mail() - Encode allegro messages to firmware format * @dst: Pointer to the memory that will be filled with data * @msg: The allegro message that will be encoded */ ssize_t allegro_encode_mail(u32 *dst, void *msg) { const struct mcu_msg_header *header = msg; ssize_t size; if (!msg || !dst) return -EINVAL; switch (header->type) { case MCU_MSG_TYPE_INIT: size = allegro_enc_init(&dst[1], msg); break; case MCU_MSG_TYPE_CREATE_CHANNEL: size = allegro_enc_create_channel(&dst[1], msg); break; case MCU_MSG_TYPE_DESTROY_CHANNEL: size = allegro_enc_destroy_channel(&dst[1], msg); break; case MCU_MSG_TYPE_ENCODE_FRAME: size = allegro_enc_encode_frame(&dst[1], msg); break; case MCU_MSG_TYPE_PUT_STREAM_BUFFER: size = allegro_enc_put_stream_buffer(&dst[1], msg); break; case MCU_MSG_TYPE_PUSH_BUFFER_INTERMEDIATE: case MCU_MSG_TYPE_PUSH_BUFFER_REFERENCE: size = allegro_enc_push_buffers(&dst[1], msg); break; default: return -EINVAL; } /* * The encoded messages might have different length depending on * the firmware version or certain fields. Therefore, we have to * set the body length after encoding the message. */ dst[0] = FIELD_PREP(GENMASK(31, 16), header->type) | FIELD_PREP(GENMASK(15, 0), size); return size + sizeof(*dst); } /** * allegro_decode_mail() - Parse allegro messages from the firmware. * @msg: The mcu_msg_response that will be filled with parsed values. * @src: Pointer to the memory that will be parsed * * The message format in the mailbox depends on the firmware. Parse the * different formats into a uniform message format that can be used without * taking care of the firmware version. */ int allegro_decode_mail(void *msg, u32 *src) { struct mcu_msg_header *header; if (!src || !msg) return -EINVAL; header = msg; header->type = FIELD_GET(GENMASK(31, 16), src[0]); src++; switch (header->type) { case MCU_MSG_TYPE_INIT: allegro_dec_init(msg, src); break; case MCU_MSG_TYPE_CREATE_CHANNEL: allegro_dec_create_channel(msg, src); break; case MCU_MSG_TYPE_DESTROY_CHANNEL: allegro_dec_destroy_channel(msg, src); break; case MCU_MSG_TYPE_ENCODE_FRAME: allegro_dec_encode_frame(msg, src); break; default: return -EINVAL; } return 0; }
linux-master
drivers/media/platform/allegro-dvt/allegro-mail.c
// SPDX-License-Identifier: GPL-2.0 /* * Copyright (C) 2019 Pengutronix, Michael Tretter <[email protected]> * * Allegro DVT video encoder driver */ #include <linux/bits.h> #include <linux/clk.h> #include <linux/firmware.h> #include <linux/gcd.h> #include <linux/interrupt.h> #include <linux/io.h> #include <linux/kernel.h> #include <linux/log2.h> #include <linux/mfd/syscon.h> #include <linux/mfd/syscon/xlnx-vcu.h> #include <linux/module.h> #include <linux/of.h> #include <linux/platform_device.h> #include <linux/pm_runtime.h> #include <linux/regmap.h> #include <linux/sizes.h> #include <linux/slab.h> #include <linux/videodev2.h> #include <media/v4l2-ctrls.h> #include <media/v4l2-device.h> #include <media/v4l2-event.h> #include <media/v4l2-ioctl.h> #include <media/v4l2-mem2mem.h> #include <media/videobuf2-dma-contig.h> #include <media/videobuf2-v4l2.h> #include "allegro-mail.h" #include "nal-h264.h" #include "nal-hevc.h" /* * Support up to 4k video streams. The hardware actually supports higher * resolutions, which are specified in PG252 June 6, 2018 (H.264/H.265 Video * Codec Unit v1.1) Chapter 3. */ #define ALLEGRO_WIDTH_MIN 128 #define ALLEGRO_WIDTH_DEFAULT 1920 #define ALLEGRO_WIDTH_MAX 3840 #define ALLEGRO_HEIGHT_MIN 64 #define ALLEGRO_HEIGHT_DEFAULT 1080 #define ALLEGRO_HEIGHT_MAX 2160 #define ALLEGRO_FRAMERATE_DEFAULT ((struct v4l2_fract) { 30, 1 }) #define ALLEGRO_GOP_SIZE_DEFAULT 25 #define ALLEGRO_GOP_SIZE_MAX 1000 /* * MCU Control Registers * * The Zynq UltraScale+ Devices Register Reference documents the registers * with an offset of 0x9000, which equals the size of the SRAM and one page * gap. The driver handles SRAM and registers separately and, therefore, is * oblivious of the offset. */ #define AL5_MCU_RESET 0x0000 #define AL5_MCU_RESET_SOFT BIT(0) #define AL5_MCU_RESET_REGS BIT(1) #define AL5_MCU_RESET_MODE 0x0004 #define AL5_MCU_RESET_MODE_SLEEP BIT(0) #define AL5_MCU_RESET_MODE_HALT BIT(1) #define AL5_MCU_STA 0x0008 #define AL5_MCU_STA_SLEEP BIT(0) #define AL5_MCU_WAKEUP 0x000c #define AL5_ICACHE_ADDR_OFFSET_MSB 0x0010 #define AL5_ICACHE_ADDR_OFFSET_LSB 0x0014 #define AL5_DCACHE_ADDR_OFFSET_MSB 0x0018 #define AL5_DCACHE_ADDR_OFFSET_LSB 0x001c #define AL5_MCU_INTERRUPT 0x0100 #define AL5_ITC_CPU_IRQ_MSK 0x0104 #define AL5_ITC_CPU_IRQ_CLR 0x0108 #define AL5_ITC_CPU_IRQ_STA 0x010C #define AL5_ITC_CPU_IRQ_STA_TRIGGERED BIT(0) #define AXI_ADDR_OFFSET_IP 0x0208 /* * The MCU accesses the system memory with a 2G offset compared to CPU * physical addresses. */ #define MCU_CACHE_OFFSET SZ_2G /* * The driver needs to reserve some space at the beginning of capture buffers, * because it needs to write SPS/PPS NAL units. The encoder writes the actual * frame data after the offset. */ #define ENCODER_STREAM_OFFSET SZ_128 #define SIZE_MACROBLOCK 16 /* Encoding options */ #define LOG2_MAX_FRAME_NUM 4 #define LOG2_MAX_PIC_ORDER_CNT 10 #define BETA_OFFSET_DIV_2 -1 #define TC_OFFSET_DIV_2 -1 /* * This control allows applications to explicitly disable the encoder buffer. * This value is Allegro specific. */ #define V4L2_CID_USER_ALLEGRO_ENCODER_BUFFER (V4L2_CID_USER_ALLEGRO_BASE + 0) static int debug; module_param(debug, int, 0644); MODULE_PARM_DESC(debug, "Debug level (0-2)"); struct allegro_buffer { void *vaddr; dma_addr_t paddr; size_t size; struct list_head head; }; struct allegro_dev; struct allegro_channel; struct allegro_mbox { struct allegro_dev *dev; unsigned int head; unsigned int tail; unsigned int data; size_t size; /* protect mailbox from simultaneous accesses */ struct mutex lock; }; struct allegro_encoder_buffer { unsigned int size; unsigned int color_depth; unsigned int num_cores; unsigned int clk_rate; }; struct allegro_dev { struct v4l2_device v4l2_dev; struct video_device video_dev; struct v4l2_m2m_dev *m2m_dev; struct platform_device *plat_dev; /* mutex protecting vb2_queue structure */ struct mutex lock; struct regmap *regmap; struct regmap *sram; struct regmap *settings; struct clk *clk_core; struct clk *clk_mcu; const struct fw_info *fw_info; struct allegro_buffer firmware; struct allegro_buffer suballocator; bool has_encoder_buffer; struct allegro_encoder_buffer encoder_buffer; struct completion init_complete; bool initialized; /* The mailbox interface */ struct allegro_mbox *mbox_command; struct allegro_mbox *mbox_status; /* * The downstream driver limits the users to 64 users, thus I can use * a bitfield for the user_ids that are in use. See also user_id in * struct allegro_channel. */ unsigned long channel_user_ids; struct list_head channels; }; static struct regmap_config allegro_regmap_config = { .name = "regmap", .reg_bits = 32, .val_bits = 32, .reg_stride = 4, .max_register = 0xfff, .cache_type = REGCACHE_NONE, }; static struct regmap_config allegro_sram_config = { .name = "sram", .reg_bits = 32, .val_bits = 32, .reg_stride = 4, .max_register = 0x7fff, .cache_type = REGCACHE_NONE, }; #define fh_to_channel(__fh) container_of(__fh, struct allegro_channel, fh) struct allegro_channel { struct allegro_dev *dev; struct v4l2_fh fh; struct v4l2_ctrl_handler ctrl_handler; unsigned int width; unsigned int height; unsigned int stride; struct v4l2_fract framerate; enum v4l2_colorspace colorspace; enum v4l2_ycbcr_encoding ycbcr_enc; enum v4l2_quantization quantization; enum v4l2_xfer_func xfer_func; u32 pixelformat; unsigned int sizeimage_raw; unsigned int osequence; u32 codec; unsigned int sizeimage_encoded; unsigned int csequence; bool frame_rc_enable; unsigned int bitrate; unsigned int bitrate_peak; struct allegro_buffer config_blob; unsigned int log2_max_frame_num; bool temporal_mvp_enable; bool enable_loop_filter_across_tiles; bool enable_loop_filter_across_slices; bool enable_deblocking_filter_override; bool enable_reordering; bool dbf_ovr_en; unsigned int num_ref_idx_l0; unsigned int num_ref_idx_l1; /* Maximum range for motion estimation */ int b_hrz_me_range; int b_vrt_me_range; int p_hrz_me_range; int p_vrt_me_range; /* Size limits of coding unit */ int min_cu_size; int max_cu_size; /* Size limits of transform unit */ int min_tu_size; int max_tu_size; int max_transfo_depth_intra; int max_transfo_depth_inter; struct v4l2_ctrl *mpeg_video_h264_profile; struct v4l2_ctrl *mpeg_video_h264_level; struct v4l2_ctrl *mpeg_video_h264_i_frame_qp; struct v4l2_ctrl *mpeg_video_h264_max_qp; struct v4l2_ctrl *mpeg_video_h264_min_qp; struct v4l2_ctrl *mpeg_video_h264_p_frame_qp; struct v4l2_ctrl *mpeg_video_h264_b_frame_qp; struct v4l2_ctrl *mpeg_video_hevc_profile; struct v4l2_ctrl *mpeg_video_hevc_level; struct v4l2_ctrl *mpeg_video_hevc_tier; struct v4l2_ctrl *mpeg_video_hevc_i_frame_qp; struct v4l2_ctrl *mpeg_video_hevc_max_qp; struct v4l2_ctrl *mpeg_video_hevc_min_qp; struct v4l2_ctrl *mpeg_video_hevc_p_frame_qp; struct v4l2_ctrl *mpeg_video_hevc_b_frame_qp; struct v4l2_ctrl *mpeg_video_frame_rc_enable; struct { /* video bitrate mode control cluster */ struct v4l2_ctrl *mpeg_video_bitrate_mode; struct v4l2_ctrl *mpeg_video_bitrate; struct v4l2_ctrl *mpeg_video_bitrate_peak; }; struct v4l2_ctrl *mpeg_video_cpb_size; struct v4l2_ctrl *mpeg_video_gop_size; struct v4l2_ctrl *encoder_buffer; /* user_id is used to identify the channel during CREATE_CHANNEL */ /* not sure, what to set here and if this is actually required */ int user_id; /* channel_id is set by the mcu and used by all later commands */ int mcu_channel_id; struct list_head buffers_reference; struct list_head buffers_intermediate; struct list_head source_shadow_list; struct list_head stream_shadow_list; /* protect shadow lists of buffers passed to firmware */ struct mutex shadow_list_lock; struct list_head list; struct completion completion; unsigned int error; }; static inline int allegro_channel_get_i_frame_qp(struct allegro_channel *channel) { if (channel->codec == V4L2_PIX_FMT_HEVC) return v4l2_ctrl_g_ctrl(channel->mpeg_video_hevc_i_frame_qp); else return v4l2_ctrl_g_ctrl(channel->mpeg_video_h264_i_frame_qp); } static inline int allegro_channel_get_p_frame_qp(struct allegro_channel *channel) { if (channel->codec == V4L2_PIX_FMT_HEVC) return v4l2_ctrl_g_ctrl(channel->mpeg_video_hevc_p_frame_qp); else return v4l2_ctrl_g_ctrl(channel->mpeg_video_h264_p_frame_qp); } static inline int allegro_channel_get_b_frame_qp(struct allegro_channel *channel) { if (channel->codec == V4L2_PIX_FMT_HEVC) return v4l2_ctrl_g_ctrl(channel->mpeg_video_hevc_b_frame_qp); else return v4l2_ctrl_g_ctrl(channel->mpeg_video_h264_b_frame_qp); } static inline int allegro_channel_get_min_qp(struct allegro_channel *channel) { if (channel->codec == V4L2_PIX_FMT_HEVC) return v4l2_ctrl_g_ctrl(channel->mpeg_video_hevc_min_qp); else return v4l2_ctrl_g_ctrl(channel->mpeg_video_h264_min_qp); } static inline int allegro_channel_get_max_qp(struct allegro_channel *channel) { if (channel->codec == V4L2_PIX_FMT_HEVC) return v4l2_ctrl_g_ctrl(channel->mpeg_video_hevc_max_qp); else return v4l2_ctrl_g_ctrl(channel->mpeg_video_h264_max_qp); } struct allegro_m2m_buffer { struct v4l2_m2m_buffer buf; struct list_head head; }; #define to_allegro_m2m_buffer(__buf) \ container_of(__buf, struct allegro_m2m_buffer, buf) struct fw_info { unsigned int id; unsigned int id_codec; char *version; unsigned int mailbox_cmd; unsigned int mailbox_status; size_t mailbox_size; enum mcu_msg_version mailbox_version; size_t suballocator_size; }; static const struct fw_info supported_firmware[] = { { .id = 18296, .id_codec = 96272, .version = "v2018.2", .mailbox_cmd = 0x7800, .mailbox_status = 0x7c00, .mailbox_size = 0x400 - 0x8, .mailbox_version = MCU_MSG_VERSION_2018_2, .suballocator_size = SZ_16M, }, { .id = 14680, .id_codec = 126572, .version = "v2019.2", .mailbox_cmd = 0x7000, .mailbox_status = 0x7800, .mailbox_size = 0x800 - 0x8, .mailbox_version = MCU_MSG_VERSION_2019_2, .suballocator_size = SZ_32M, }, }; static inline u32 to_mcu_addr(struct allegro_dev *dev, dma_addr_t phys) { if (upper_32_bits(phys) || (lower_32_bits(phys) & MCU_CACHE_OFFSET)) v4l2_warn(&dev->v4l2_dev, "address %pad is outside mcu window\n", &phys); return lower_32_bits(phys) | MCU_CACHE_OFFSET; } static inline u32 to_mcu_size(struct allegro_dev *dev, size_t size) { return lower_32_bits(size); } static inline u32 to_codec_addr(struct allegro_dev *dev, dma_addr_t phys) { if (upper_32_bits(phys)) v4l2_warn(&dev->v4l2_dev, "address %pad cannot be used by codec\n", &phys); return lower_32_bits(phys); } static inline u64 ptr_to_u64(const void *ptr) { return (uintptr_t)ptr; } /* Helper functions for channel and user operations */ static unsigned long allegro_next_user_id(struct allegro_dev *dev) { if (dev->channel_user_ids == ~0UL) return -EBUSY; return ffz(dev->channel_user_ids); } static struct allegro_channel * allegro_find_channel_by_user_id(struct allegro_dev *dev, unsigned int user_id) { struct allegro_channel *channel; list_for_each_entry(channel, &dev->channels, list) { if (channel->user_id == user_id) return channel; } return ERR_PTR(-EINVAL); } static struct allegro_channel * allegro_find_channel_by_channel_id(struct allegro_dev *dev, unsigned int channel_id) { struct allegro_channel *channel; list_for_each_entry(channel, &dev->channels, list) { if (channel->mcu_channel_id == channel_id) return channel; } return ERR_PTR(-EINVAL); } static inline bool channel_exists(struct allegro_channel *channel) { return channel->mcu_channel_id != -1; } #define AL_ERROR 0x80 #define AL_ERR_INIT_FAILED 0x81 #define AL_ERR_NO_FRAME_DECODED 0x82 #define AL_ERR_RESOLUTION_CHANGE 0x85 #define AL_ERR_NO_MEMORY 0x87 #define AL_ERR_STREAM_OVERFLOW 0x88 #define AL_ERR_TOO_MANY_SLICES 0x89 #define AL_ERR_BUF_NOT_READY 0x8c #define AL_ERR_NO_CHANNEL_AVAILABLE 0x8d #define AL_ERR_RESOURCE_UNAVAILABLE 0x8e #define AL_ERR_NOT_ENOUGH_CORES 0x8f #define AL_ERR_REQUEST_MALFORMED 0x90 #define AL_ERR_CMD_NOT_ALLOWED 0x91 #define AL_ERR_INVALID_CMD_VALUE 0x92 static inline const char *allegro_err_to_string(unsigned int err) { switch (err) { case AL_ERR_INIT_FAILED: return "initialization failed"; case AL_ERR_NO_FRAME_DECODED: return "no frame decoded"; case AL_ERR_RESOLUTION_CHANGE: return "resolution change"; case AL_ERR_NO_MEMORY: return "out of memory"; case AL_ERR_STREAM_OVERFLOW: return "stream buffer overflow"; case AL_ERR_TOO_MANY_SLICES: return "too many slices"; case AL_ERR_BUF_NOT_READY: return "buffer not ready"; case AL_ERR_NO_CHANNEL_AVAILABLE: return "no channel available"; case AL_ERR_RESOURCE_UNAVAILABLE: return "resource unavailable"; case AL_ERR_NOT_ENOUGH_CORES: return "not enough cores"; case AL_ERR_REQUEST_MALFORMED: return "request malformed"; case AL_ERR_CMD_NOT_ALLOWED: return "command not allowed"; case AL_ERR_INVALID_CMD_VALUE: return "invalid command value"; case AL_ERROR: default: return "unknown error"; } } static unsigned int estimate_stream_size(unsigned int width, unsigned int height) { unsigned int offset = ENCODER_STREAM_OFFSET; unsigned int num_blocks = DIV_ROUND_UP(width, SIZE_MACROBLOCK) * DIV_ROUND_UP(height, SIZE_MACROBLOCK); unsigned int pcm_size = SZ_256; unsigned int partition_table = SZ_256; return round_up(offset + num_blocks * pcm_size + partition_table, 32); } static enum v4l2_mpeg_video_h264_level select_minimum_h264_level(unsigned int width, unsigned int height) { unsigned int pic_width_in_mb = DIV_ROUND_UP(width, SIZE_MACROBLOCK); unsigned int frame_height_in_mb = DIV_ROUND_UP(height, SIZE_MACROBLOCK); unsigned int frame_size_in_mb = pic_width_in_mb * frame_height_in_mb; enum v4l2_mpeg_video_h264_level level = V4L2_MPEG_VIDEO_H264_LEVEL_4_0; /* * The level limits are specified in Rec. ITU-T H.264 Annex A.3.1 and * also specify limits regarding bit rate and CBP size. Only approximate * the levels using the frame size. * * Level 5.1 allows up to 4k video resolution. */ if (frame_size_in_mb <= 99) level = V4L2_MPEG_VIDEO_H264_LEVEL_1_0; else if (frame_size_in_mb <= 396) level = V4L2_MPEG_VIDEO_H264_LEVEL_1_1; else if (frame_size_in_mb <= 792) level = V4L2_MPEG_VIDEO_H264_LEVEL_2_1; else if (frame_size_in_mb <= 1620) level = V4L2_MPEG_VIDEO_H264_LEVEL_2_2; else if (frame_size_in_mb <= 3600) level = V4L2_MPEG_VIDEO_H264_LEVEL_3_1; else if (frame_size_in_mb <= 5120) level = V4L2_MPEG_VIDEO_H264_LEVEL_3_2; else if (frame_size_in_mb <= 8192) level = V4L2_MPEG_VIDEO_H264_LEVEL_4_0; else if (frame_size_in_mb <= 8704) level = V4L2_MPEG_VIDEO_H264_LEVEL_4_2; else if (frame_size_in_mb <= 22080) level = V4L2_MPEG_VIDEO_H264_LEVEL_5_0; else level = V4L2_MPEG_VIDEO_H264_LEVEL_5_1; return level; } static unsigned int h264_maximum_bitrate(enum v4l2_mpeg_video_h264_level level) { switch (level) { case V4L2_MPEG_VIDEO_H264_LEVEL_1_0: return 64000; case V4L2_MPEG_VIDEO_H264_LEVEL_1B: return 128000; case V4L2_MPEG_VIDEO_H264_LEVEL_1_1: return 192000; case V4L2_MPEG_VIDEO_H264_LEVEL_1_2: return 384000; case V4L2_MPEG_VIDEO_H264_LEVEL_1_3: return 768000; case V4L2_MPEG_VIDEO_H264_LEVEL_2_0: return 2000000; case V4L2_MPEG_VIDEO_H264_LEVEL_2_1: return 4000000; case V4L2_MPEG_VIDEO_H264_LEVEL_2_2: return 4000000; case V4L2_MPEG_VIDEO_H264_LEVEL_3_0: return 10000000; case V4L2_MPEG_VIDEO_H264_LEVEL_3_1: return 14000000; case V4L2_MPEG_VIDEO_H264_LEVEL_3_2: return 20000000; case V4L2_MPEG_VIDEO_H264_LEVEL_4_0: return 20000000; case V4L2_MPEG_VIDEO_H264_LEVEL_4_1: return 50000000; case V4L2_MPEG_VIDEO_H264_LEVEL_4_2: return 50000000; case V4L2_MPEG_VIDEO_H264_LEVEL_5_0: return 135000000; case V4L2_MPEG_VIDEO_H264_LEVEL_5_1: default: return 240000000; } } static unsigned int h264_maximum_cpb_size(enum v4l2_mpeg_video_h264_level level) { switch (level) { case V4L2_MPEG_VIDEO_H264_LEVEL_1_0: return 175; case V4L2_MPEG_VIDEO_H264_LEVEL_1B: return 350; case V4L2_MPEG_VIDEO_H264_LEVEL_1_1: return 500; case V4L2_MPEG_VIDEO_H264_LEVEL_1_2: return 1000; case V4L2_MPEG_VIDEO_H264_LEVEL_1_3: return 2000; case V4L2_MPEG_VIDEO_H264_LEVEL_2_0: return 2000; case V4L2_MPEG_VIDEO_H264_LEVEL_2_1: return 4000; case V4L2_MPEG_VIDEO_H264_LEVEL_2_2: return 4000; case V4L2_MPEG_VIDEO_H264_LEVEL_3_0: return 10000; case V4L2_MPEG_VIDEO_H264_LEVEL_3_1: return 14000; case V4L2_MPEG_VIDEO_H264_LEVEL_3_2: return 20000; case V4L2_MPEG_VIDEO_H264_LEVEL_4_0: return 25000; case V4L2_MPEG_VIDEO_H264_LEVEL_4_1: return 62500; case V4L2_MPEG_VIDEO_H264_LEVEL_4_2: return 62500; case V4L2_MPEG_VIDEO_H264_LEVEL_5_0: return 135000; case V4L2_MPEG_VIDEO_H264_LEVEL_5_1: default: return 240000; } } static enum v4l2_mpeg_video_hevc_level select_minimum_hevc_level(unsigned int width, unsigned int height) { unsigned int luma_picture_size = width * height; enum v4l2_mpeg_video_hevc_level level; if (luma_picture_size <= 36864) level = V4L2_MPEG_VIDEO_HEVC_LEVEL_1; else if (luma_picture_size <= 122880) level = V4L2_MPEG_VIDEO_HEVC_LEVEL_2; else if (luma_picture_size <= 245760) level = V4L2_MPEG_VIDEO_HEVC_LEVEL_2_1; else if (luma_picture_size <= 552960) level = V4L2_MPEG_VIDEO_HEVC_LEVEL_3; else if (luma_picture_size <= 983040) level = V4L2_MPEG_VIDEO_HEVC_LEVEL_3_1; else if (luma_picture_size <= 2228224) level = V4L2_MPEG_VIDEO_HEVC_LEVEL_4; else if (luma_picture_size <= 8912896) level = V4L2_MPEG_VIDEO_HEVC_LEVEL_5; else level = V4L2_MPEG_VIDEO_HEVC_LEVEL_6; return level; } static unsigned int hevc_maximum_bitrate(enum v4l2_mpeg_video_hevc_level level) { /* * See Rec. ITU-T H.265 v5 (02/2018), A.4.2 Profile-specific level * limits for the video profiles. */ switch (level) { case V4L2_MPEG_VIDEO_HEVC_LEVEL_1: return 128; case V4L2_MPEG_VIDEO_HEVC_LEVEL_2: return 1500; case V4L2_MPEG_VIDEO_HEVC_LEVEL_2_1: return 3000; case V4L2_MPEG_VIDEO_HEVC_LEVEL_3: return 6000; case V4L2_MPEG_VIDEO_HEVC_LEVEL_3_1: return 10000; case V4L2_MPEG_VIDEO_HEVC_LEVEL_4: return 12000; case V4L2_MPEG_VIDEO_HEVC_LEVEL_4_1: return 20000; case V4L2_MPEG_VIDEO_HEVC_LEVEL_5: return 25000; default: case V4L2_MPEG_VIDEO_HEVC_LEVEL_5_1: return 40000; } } static unsigned int hevc_maximum_cpb_size(enum v4l2_mpeg_video_hevc_level level) { switch (level) { case V4L2_MPEG_VIDEO_HEVC_LEVEL_1: return 350; case V4L2_MPEG_VIDEO_HEVC_LEVEL_2: return 1500; case V4L2_MPEG_VIDEO_HEVC_LEVEL_2_1: return 3000; case V4L2_MPEG_VIDEO_HEVC_LEVEL_3: return 6000; case V4L2_MPEG_VIDEO_HEVC_LEVEL_3_1: return 10000; case V4L2_MPEG_VIDEO_HEVC_LEVEL_4: return 12000; case V4L2_MPEG_VIDEO_HEVC_LEVEL_4_1: return 20000; case V4L2_MPEG_VIDEO_HEVC_LEVEL_5: return 25000; default: case V4L2_MPEG_VIDEO_HEVC_LEVEL_5_1: return 40000; } } static const struct fw_info * allegro_get_firmware_info(struct allegro_dev *dev, const struct firmware *fw, const struct firmware *fw_codec) { int i; unsigned int id = fw->size; unsigned int id_codec = fw_codec->size; for (i = 0; i < ARRAY_SIZE(supported_firmware); i++) if (supported_firmware[i].id == id && supported_firmware[i].id_codec == id_codec) return &supported_firmware[i]; return NULL; } /* * Buffers that are used internally by the MCU. */ static int allegro_alloc_buffer(struct allegro_dev *dev, struct allegro_buffer *buffer, size_t size) { buffer->vaddr = dma_alloc_coherent(&dev->plat_dev->dev, size, &buffer->paddr, GFP_KERNEL); if (!buffer->vaddr) return -ENOMEM; buffer->size = size; return 0; } static void allegro_free_buffer(struct allegro_dev *dev, struct allegro_buffer *buffer) { if (buffer->vaddr) { dma_free_coherent(&dev->plat_dev->dev, buffer->size, buffer->vaddr, buffer->paddr); buffer->vaddr = NULL; buffer->size = 0; } } /* * Mailbox interface to send messages to the MCU. */ static void allegro_mcu_interrupt(struct allegro_dev *dev); static void allegro_handle_message(struct allegro_dev *dev, union mcu_msg_response *msg); static struct allegro_mbox *allegro_mbox_init(struct allegro_dev *dev, unsigned int base, size_t size) { struct allegro_mbox *mbox; mbox = devm_kmalloc(&dev->plat_dev->dev, sizeof(*mbox), GFP_KERNEL); if (!mbox) return ERR_PTR(-ENOMEM); mbox->dev = dev; mbox->head = base; mbox->tail = base + 0x4; mbox->data = base + 0x8; mbox->size = size; mutex_init(&mbox->lock); regmap_write(dev->sram, mbox->head, 0); regmap_write(dev->sram, mbox->tail, 0); return mbox; } static int allegro_mbox_write(struct allegro_mbox *mbox, const u32 *src, size_t size) { struct regmap *sram = mbox->dev->sram; unsigned int tail; size_t size_no_wrap; int err = 0; int stride = regmap_get_reg_stride(sram); if (!src) return -EINVAL; if (size > mbox->size) return -EINVAL; mutex_lock(&mbox->lock); regmap_read(sram, mbox->tail, &tail); if (tail > mbox->size) { err = -EIO; goto out; } size_no_wrap = min(size, mbox->size - (size_t)tail); regmap_bulk_write(sram, mbox->data + tail, src, size_no_wrap / stride); regmap_bulk_write(sram, mbox->data, src + (size_no_wrap / sizeof(*src)), (size - size_no_wrap) / stride); regmap_write(sram, mbox->tail, (tail + size) % mbox->size); out: mutex_unlock(&mbox->lock); return err; } static ssize_t allegro_mbox_read(struct allegro_mbox *mbox, u32 *dst, size_t nbyte) { struct { u16 length; u16 type; } __attribute__ ((__packed__)) *header; struct regmap *sram = mbox->dev->sram; unsigned int head; ssize_t size; size_t body_no_wrap; int stride = regmap_get_reg_stride(sram); regmap_read(sram, mbox->head, &head); if (head > mbox->size) return -EIO; /* Assume that the header does not wrap. */ regmap_bulk_read(sram, mbox->data + head, dst, sizeof(*header) / stride); header = (void *)dst; size = header->length + sizeof(*header); if (size > mbox->size || size & 0x3) return -EIO; if (size > nbyte) return -EINVAL; /* * The message might wrap within the mailbox. If the message does not * wrap, the first read will read the entire message, otherwise the * first read will read message until the end of the mailbox and the * second read will read the remaining bytes from the beginning of the * mailbox. * * Skip the header, as was already read to get the size of the body. */ body_no_wrap = min((size_t)header->length, (size_t)(mbox->size - (head + sizeof(*header)))); regmap_bulk_read(sram, mbox->data + head + sizeof(*header), dst + (sizeof(*header) / sizeof(*dst)), body_no_wrap / stride); regmap_bulk_read(sram, mbox->data, dst + (sizeof(*header) + body_no_wrap) / sizeof(*dst), (header->length - body_no_wrap) / stride); regmap_write(sram, mbox->head, (head + size) % mbox->size); return size; } /** * allegro_mbox_send() - Send a message via the mailbox * @mbox: the mailbox which is used to send the message * @msg: the message to send */ static int allegro_mbox_send(struct allegro_mbox *mbox, void *msg) { struct allegro_dev *dev = mbox->dev; ssize_t size; int err; u32 *tmp; tmp = kzalloc(mbox->size, GFP_KERNEL); if (!tmp) { err = -ENOMEM; goto out; } size = allegro_encode_mail(tmp, msg); err = allegro_mbox_write(mbox, tmp, size); kfree(tmp); if (err) goto out; allegro_mcu_interrupt(dev); out: return err; } /** * allegro_mbox_notify() - Notify the mailbox about a new message * @mbox: The allegro_mbox to notify */ static void allegro_mbox_notify(struct allegro_mbox *mbox) { struct allegro_dev *dev = mbox->dev; union mcu_msg_response *msg; ssize_t size; u32 *tmp; int err; msg = kmalloc(sizeof(*msg), GFP_KERNEL); if (!msg) return; msg->header.version = dev->fw_info->mailbox_version; tmp = kmalloc(mbox->size, GFP_KERNEL); if (!tmp) goto out; size = allegro_mbox_read(mbox, tmp, mbox->size); if (size < 0) goto out; err = allegro_decode_mail(msg, tmp); if (err) goto out; allegro_handle_message(dev, msg); out: kfree(tmp); kfree(msg); } static int allegro_encoder_buffer_init(struct allegro_dev *dev, struct allegro_encoder_buffer *buffer) { int err; struct regmap *settings = dev->settings; unsigned int supports_10_bit; unsigned int memory_depth; unsigned int num_cores; unsigned int color_depth; unsigned long clk_rate; /* We don't support the encoder buffer pre Firmware version 2019.2 */ if (dev->fw_info->mailbox_version < MCU_MSG_VERSION_2019_2) return -ENODEV; if (!settings) return -EINVAL; err = regmap_read(settings, VCU_ENC_COLOR_DEPTH, &supports_10_bit); if (err < 0) return err; err = regmap_read(settings, VCU_MEMORY_DEPTH, &memory_depth); if (err < 0) return err; err = regmap_read(settings, VCU_NUM_CORE, &num_cores); if (err < 0) return err; clk_rate = clk_get_rate(dev->clk_core); if (clk_rate == 0) return -EINVAL; color_depth = supports_10_bit ? 10 : 8; /* The firmware expects the encoder buffer size in bits. */ buffer->size = color_depth * 32 * memory_depth; buffer->color_depth = color_depth; buffer->num_cores = num_cores; buffer->clk_rate = clk_rate; v4l2_dbg(1, debug, &dev->v4l2_dev, "using %d bits encoder buffer with %d-bit color depth\n", buffer->size, color_depth); return 0; } static void allegro_mcu_send_init(struct allegro_dev *dev, dma_addr_t suballoc_dma, size_t suballoc_size) { struct mcu_msg_init_request msg; memset(&msg, 0, sizeof(msg)); msg.header.type = MCU_MSG_TYPE_INIT; msg.header.version = dev->fw_info->mailbox_version; msg.suballoc_dma = to_mcu_addr(dev, suballoc_dma); msg.suballoc_size = to_mcu_size(dev, suballoc_size); if (dev->has_encoder_buffer) { msg.encoder_buffer_size = dev->encoder_buffer.size; msg.encoder_buffer_color_depth = dev->encoder_buffer.color_depth; msg.num_cores = dev->encoder_buffer.num_cores; msg.clk_rate = dev->encoder_buffer.clk_rate; } else { msg.encoder_buffer_size = -1; msg.encoder_buffer_color_depth = -1; msg.num_cores = -1; msg.clk_rate = -1; } allegro_mbox_send(dev->mbox_command, &msg); } static u32 v4l2_pixelformat_to_mcu_format(u32 pixelformat) { switch (pixelformat) { case V4L2_PIX_FMT_NV12: /* AL_420_8BITS: 0x100 -> NV12, 0x88 -> 8 bit */ return 0x100 | 0x88; default: return -EINVAL; } } static u32 v4l2_colorspace_to_mcu_colorspace(enum v4l2_colorspace colorspace) { switch (colorspace) { case V4L2_COLORSPACE_REC709: return 2; case V4L2_COLORSPACE_SMPTE170M: return 3; case V4L2_COLORSPACE_SMPTE240M: return 4; case V4L2_COLORSPACE_SRGB: return 7; default: /* UNKNOWN */ return 0; } } static u8 v4l2_profile_to_mcu_profile(enum v4l2_mpeg_video_h264_profile profile) { switch (profile) { case V4L2_MPEG_VIDEO_H264_PROFILE_BASELINE: default: return 66; } } static u16 v4l2_level_to_mcu_level(enum v4l2_mpeg_video_h264_level level) { switch (level) { case V4L2_MPEG_VIDEO_H264_LEVEL_1_0: return 10; case V4L2_MPEG_VIDEO_H264_LEVEL_1_1: return 11; case V4L2_MPEG_VIDEO_H264_LEVEL_1_2: return 12; case V4L2_MPEG_VIDEO_H264_LEVEL_1_3: return 13; case V4L2_MPEG_VIDEO_H264_LEVEL_2_0: return 20; case V4L2_MPEG_VIDEO_H264_LEVEL_2_1: return 21; case V4L2_MPEG_VIDEO_H264_LEVEL_2_2: return 22; case V4L2_MPEG_VIDEO_H264_LEVEL_3_0: return 30; case V4L2_MPEG_VIDEO_H264_LEVEL_3_1: return 31; case V4L2_MPEG_VIDEO_H264_LEVEL_3_2: return 32; case V4L2_MPEG_VIDEO_H264_LEVEL_4_0: return 40; case V4L2_MPEG_VIDEO_H264_LEVEL_4_1: return 41; case V4L2_MPEG_VIDEO_H264_LEVEL_4_2: return 42; case V4L2_MPEG_VIDEO_H264_LEVEL_5_0: return 50; case V4L2_MPEG_VIDEO_H264_LEVEL_5_1: default: return 51; } } static u8 hevc_profile_to_mcu_profile(enum v4l2_mpeg_video_hevc_profile profile) { switch (profile) { default: case V4L2_MPEG_VIDEO_HEVC_PROFILE_MAIN: return 1; case V4L2_MPEG_VIDEO_HEVC_PROFILE_MAIN_10: return 2; case V4L2_MPEG_VIDEO_HEVC_PROFILE_MAIN_STILL_PICTURE: return 3; } } static u16 hevc_level_to_mcu_level(enum v4l2_mpeg_video_hevc_level level) { switch (level) { case V4L2_MPEG_VIDEO_HEVC_LEVEL_1: return 10; case V4L2_MPEG_VIDEO_HEVC_LEVEL_2: return 20; case V4L2_MPEG_VIDEO_HEVC_LEVEL_2_1: return 21; case V4L2_MPEG_VIDEO_HEVC_LEVEL_3: return 30; case V4L2_MPEG_VIDEO_HEVC_LEVEL_3_1: return 31; case V4L2_MPEG_VIDEO_HEVC_LEVEL_4: return 40; case V4L2_MPEG_VIDEO_HEVC_LEVEL_4_1: return 41; case V4L2_MPEG_VIDEO_HEVC_LEVEL_5: return 50; default: case V4L2_MPEG_VIDEO_HEVC_LEVEL_5_1: return 51; } } static u8 hevc_tier_to_mcu_tier(enum v4l2_mpeg_video_hevc_tier tier) { switch (tier) { default: case V4L2_MPEG_VIDEO_HEVC_TIER_MAIN: return 0; case V4L2_MPEG_VIDEO_HEVC_TIER_HIGH: return 1; } } static u32 v4l2_bitrate_mode_to_mcu_mode(enum v4l2_mpeg_video_bitrate_mode mode) { switch (mode) { case V4L2_MPEG_VIDEO_BITRATE_MODE_VBR: return 2; case V4L2_MPEG_VIDEO_BITRATE_MODE_CBR: default: return 1; } } static u32 v4l2_cpb_size_to_mcu(unsigned int cpb_size, unsigned int bitrate) { unsigned int cpb_size_kbit; unsigned int bitrate_kbps; /* * The mcu expects the CPB size in units of a 90 kHz clock, but the * channel follows the V4L2_CID_MPEG_VIDEO_H264_CPB_SIZE and stores * the CPB size in kilobytes. */ cpb_size_kbit = cpb_size * BITS_PER_BYTE; bitrate_kbps = bitrate / 1000; return (cpb_size_kbit * 90000) / bitrate_kbps; } static s16 get_qp_delta(int minuend, int subtrahend) { if (minuend == subtrahend) return -1; else return minuend - subtrahend; } static u32 allegro_channel_get_entropy_mode(struct allegro_channel *channel) { #define ALLEGRO_ENTROPY_MODE_CAVLC 0 #define ALLEGRO_ENTROPY_MODE_CABAC 1 /* HEVC always uses CABAC, but this has to be explicitly set */ if (channel->codec == V4L2_PIX_FMT_HEVC) return ALLEGRO_ENTROPY_MODE_CABAC; return ALLEGRO_ENTROPY_MODE_CAVLC; } static int fill_create_channel_param(struct allegro_channel *channel, struct create_channel_param *param) { int i_frame_qp = allegro_channel_get_i_frame_qp(channel); int p_frame_qp = allegro_channel_get_p_frame_qp(channel); int b_frame_qp = allegro_channel_get_b_frame_qp(channel); int bitrate_mode = v4l2_ctrl_g_ctrl(channel->mpeg_video_bitrate_mode); unsigned int cpb_size = v4l2_ctrl_g_ctrl(channel->mpeg_video_cpb_size); param->width = channel->width; param->height = channel->height; param->format = v4l2_pixelformat_to_mcu_format(channel->pixelformat); param->colorspace = v4l2_colorspace_to_mcu_colorspace(channel->colorspace); param->src_mode = 0x0; param->codec = channel->codec; if (channel->codec == V4L2_PIX_FMT_H264) { enum v4l2_mpeg_video_h264_profile profile; enum v4l2_mpeg_video_h264_level level; profile = v4l2_ctrl_g_ctrl(channel->mpeg_video_h264_profile); level = v4l2_ctrl_g_ctrl(channel->mpeg_video_h264_level); param->profile = v4l2_profile_to_mcu_profile(profile); param->constraint_set_flags = BIT(1); param->level = v4l2_level_to_mcu_level(level); } else { enum v4l2_mpeg_video_hevc_profile profile; enum v4l2_mpeg_video_hevc_level level; enum v4l2_mpeg_video_hevc_tier tier; profile = v4l2_ctrl_g_ctrl(channel->mpeg_video_hevc_profile); level = v4l2_ctrl_g_ctrl(channel->mpeg_video_hevc_level); tier = v4l2_ctrl_g_ctrl(channel->mpeg_video_hevc_tier); param->profile = hevc_profile_to_mcu_profile(profile); param->level = hevc_level_to_mcu_level(level); param->tier = hevc_tier_to_mcu_tier(tier); } param->log2_max_poc = LOG2_MAX_PIC_ORDER_CNT; param->log2_max_frame_num = channel->log2_max_frame_num; param->temporal_mvp_enable = channel->temporal_mvp_enable; param->dbf_ovr_en = channel->dbf_ovr_en; param->override_lf = channel->enable_deblocking_filter_override; param->enable_reordering = channel->enable_reordering; param->entropy_mode = allegro_channel_get_entropy_mode(channel); param->rdo_cost_mode = 1; param->custom_lda = 1; param->lf = 1; param->lf_x_tile = channel->enable_loop_filter_across_tiles; param->lf_x_slice = channel->enable_loop_filter_across_slices; param->src_bit_depth = 8; param->beta_offset = BETA_OFFSET_DIV_2; param->tc_offset = TC_OFFSET_DIV_2; param->num_slices = 1; param->me_range[0] = channel->b_hrz_me_range; param->me_range[1] = channel->b_vrt_me_range; param->me_range[2] = channel->p_hrz_me_range; param->me_range[3] = channel->p_vrt_me_range; param->max_cu_size = channel->max_cu_size; param->min_cu_size = channel->min_cu_size; param->max_tu_size = channel->max_tu_size; param->min_tu_size = channel->min_tu_size; param->max_transfo_depth_intra = channel->max_transfo_depth_intra; param->max_transfo_depth_inter = channel->max_transfo_depth_inter; param->encoder_buffer_enabled = v4l2_ctrl_g_ctrl(channel->encoder_buffer); param->encoder_buffer_offset = 0; param->rate_control_mode = channel->frame_rc_enable ? v4l2_bitrate_mode_to_mcu_mode(bitrate_mode) : 0; param->cpb_size = v4l2_cpb_size_to_mcu(cpb_size, channel->bitrate_peak); /* Shall be ]0;cpb_size in 90 kHz units]. Use maximum value. */ param->initial_rem_delay = param->cpb_size; param->framerate = DIV_ROUND_UP(channel->framerate.numerator, channel->framerate.denominator); param->clk_ratio = channel->framerate.denominator == 1001 ? 1001 : 1000; param->target_bitrate = channel->bitrate; param->max_bitrate = channel->bitrate_peak; param->initial_qp = i_frame_qp; param->min_qp = allegro_channel_get_min_qp(channel); param->max_qp = allegro_channel_get_max_qp(channel); param->ip_delta = get_qp_delta(i_frame_qp, p_frame_qp); param->pb_delta = get_qp_delta(p_frame_qp, b_frame_qp); param->golden_ref = 0; param->golden_delta = 2; param->golden_ref_frequency = 10; param->rate_control_option = 0x00000000; param->num_pixel = channel->width + channel->height; param->max_psnr = 4200; param->max_pixel_value = 255; param->gop_ctrl_mode = 0x00000002; param->freq_idr = v4l2_ctrl_g_ctrl(channel->mpeg_video_gop_size); param->freq_lt = 0; param->gdr_mode = 0x00000000; param->gop_length = v4l2_ctrl_g_ctrl(channel->mpeg_video_gop_size); param->subframe_latency = 0x00000000; param->lda_factors[0] = 51; param->lda_factors[1] = 90; param->lda_factors[2] = 151; param->lda_factors[3] = 151; param->lda_factors[4] = 151; param->lda_factors[5] = 151; param->max_num_merge_cand = 5; return 0; } static int allegro_mcu_send_create_channel(struct allegro_dev *dev, struct allegro_channel *channel) { struct mcu_msg_create_channel msg; struct allegro_buffer *blob = &channel->config_blob; struct create_channel_param param; size_t size; memset(&param, 0, sizeof(param)); fill_create_channel_param(channel, &param); allegro_alloc_buffer(dev, blob, sizeof(struct create_channel_param)); param.version = dev->fw_info->mailbox_version; size = allegro_encode_config_blob(blob->vaddr, &param); memset(&msg, 0, sizeof(msg)); msg.header.type = MCU_MSG_TYPE_CREATE_CHANNEL; msg.header.version = dev->fw_info->mailbox_version; msg.user_id = channel->user_id; msg.blob = blob->vaddr; msg.blob_size = size; msg.blob_mcu_addr = to_mcu_addr(dev, blob->paddr); allegro_mbox_send(dev->mbox_command, &msg); return 0; } static int allegro_mcu_send_destroy_channel(struct allegro_dev *dev, struct allegro_channel *channel) { struct mcu_msg_destroy_channel msg; memset(&msg, 0, sizeof(msg)); msg.header.type = MCU_MSG_TYPE_DESTROY_CHANNEL; msg.header.version = dev->fw_info->mailbox_version; msg.channel_id = channel->mcu_channel_id; allegro_mbox_send(dev->mbox_command, &msg); return 0; } static int allegro_mcu_send_put_stream_buffer(struct allegro_dev *dev, struct allegro_channel *channel, dma_addr_t paddr, unsigned long size, u64 dst_handle) { struct mcu_msg_put_stream_buffer msg; memset(&msg, 0, sizeof(msg)); msg.header.type = MCU_MSG_TYPE_PUT_STREAM_BUFFER; msg.header.version = dev->fw_info->mailbox_version; msg.channel_id = channel->mcu_channel_id; msg.dma_addr = to_codec_addr(dev, paddr); msg.mcu_addr = to_mcu_addr(dev, paddr); msg.size = size; msg.offset = ENCODER_STREAM_OFFSET; /* copied to mcu_msg_encode_frame_response */ msg.dst_handle = dst_handle; allegro_mbox_send(dev->mbox_command, &msg); return 0; } static int allegro_mcu_send_encode_frame(struct allegro_dev *dev, struct allegro_channel *channel, dma_addr_t src_y, dma_addr_t src_uv, u64 src_handle) { struct mcu_msg_encode_frame msg; bool use_encoder_buffer = v4l2_ctrl_g_ctrl(channel->encoder_buffer); memset(&msg, 0, sizeof(msg)); msg.header.type = MCU_MSG_TYPE_ENCODE_FRAME; msg.header.version = dev->fw_info->mailbox_version; msg.channel_id = channel->mcu_channel_id; msg.encoding_options = AL_OPT_FORCE_LOAD; if (use_encoder_buffer) msg.encoding_options |= AL_OPT_USE_L2; msg.pps_qp = 26; /* qp are relative to 26 */ msg.user_param = 0; /* copied to mcu_msg_encode_frame_response */ /* src_handle is copied to mcu_msg_encode_frame_response */ msg.src_handle = src_handle; msg.src_y = to_codec_addr(dev, src_y); msg.src_uv = to_codec_addr(dev, src_uv); msg.stride = channel->stride; allegro_mbox_send(dev->mbox_command, &msg); return 0; } static int allegro_mcu_wait_for_init_timeout(struct allegro_dev *dev, unsigned long timeout_ms) { unsigned long tmo; tmo = wait_for_completion_timeout(&dev->init_complete, msecs_to_jiffies(timeout_ms)); if (tmo == 0) return -ETIMEDOUT; reinit_completion(&dev->init_complete); return 0; } static int allegro_mcu_push_buffer_internal(struct allegro_channel *channel, enum mcu_msg_type type) { struct allegro_dev *dev = channel->dev; struct mcu_msg_push_buffers_internal *msg; struct mcu_msg_push_buffers_internal_buffer *buffer; unsigned int num_buffers = 0; size_t size; struct allegro_buffer *al_buffer; struct list_head *list; int err; switch (type) { case MCU_MSG_TYPE_PUSH_BUFFER_REFERENCE: list = &channel->buffers_reference; break; case MCU_MSG_TYPE_PUSH_BUFFER_INTERMEDIATE: list = &channel->buffers_intermediate; break; default: return -EINVAL; } list_for_each_entry(al_buffer, list, head) num_buffers++; size = struct_size(msg, buffer, num_buffers); msg = kmalloc(size, GFP_KERNEL); if (!msg) return -ENOMEM; msg->header.type = type; msg->header.version = dev->fw_info->mailbox_version; msg->channel_id = channel->mcu_channel_id; msg->num_buffers = num_buffers; buffer = msg->buffer; list_for_each_entry(al_buffer, list, head) { buffer->dma_addr = to_codec_addr(dev, al_buffer->paddr); buffer->mcu_addr = to_mcu_addr(dev, al_buffer->paddr); buffer->size = to_mcu_size(dev, al_buffer->size); buffer++; } err = allegro_mbox_send(dev->mbox_command, msg); kfree(msg); return err; } static int allegro_mcu_push_buffer_intermediate(struct allegro_channel *channel) { enum mcu_msg_type type = MCU_MSG_TYPE_PUSH_BUFFER_INTERMEDIATE; return allegro_mcu_push_buffer_internal(channel, type); } static int allegro_mcu_push_buffer_reference(struct allegro_channel *channel) { enum mcu_msg_type type = MCU_MSG_TYPE_PUSH_BUFFER_REFERENCE; return allegro_mcu_push_buffer_internal(channel, type); } static int allocate_buffers_internal(struct allegro_channel *channel, struct list_head *list, size_t n, size_t size) { struct allegro_dev *dev = channel->dev; unsigned int i; int err; struct allegro_buffer *buffer, *tmp; for (i = 0; i < n; i++) { buffer = kmalloc(sizeof(*buffer), GFP_KERNEL); if (!buffer) { err = -ENOMEM; goto err; } INIT_LIST_HEAD(&buffer->head); err = allegro_alloc_buffer(dev, buffer, size); if (err) goto err; list_add(&buffer->head, list); } return 0; err: list_for_each_entry_safe(buffer, tmp, list, head) { list_del(&buffer->head); allegro_free_buffer(dev, buffer); kfree(buffer); } return err; } static void destroy_buffers_internal(struct allegro_channel *channel, struct list_head *list) { struct allegro_dev *dev = channel->dev; struct allegro_buffer *buffer, *tmp; list_for_each_entry_safe(buffer, tmp, list, head) { list_del(&buffer->head); allegro_free_buffer(dev, buffer); kfree(buffer); } } static void destroy_reference_buffers(struct allegro_channel *channel) { return destroy_buffers_internal(channel, &channel->buffers_reference); } static void destroy_intermediate_buffers(struct allegro_channel *channel) { return destroy_buffers_internal(channel, &channel->buffers_intermediate); } static int allocate_intermediate_buffers(struct allegro_channel *channel, size_t n, size_t size) { return allocate_buffers_internal(channel, &channel->buffers_intermediate, n, size); } static int allocate_reference_buffers(struct allegro_channel *channel, size_t n, size_t size) { return allocate_buffers_internal(channel, &channel->buffers_reference, n, PAGE_ALIGN(size)); } static ssize_t allegro_h264_write_sps(struct allegro_channel *channel, void *dest, size_t n) { struct allegro_dev *dev = channel->dev; struct nal_h264_sps *sps; ssize_t size; unsigned int size_mb = SIZE_MACROBLOCK; /* Calculation of crop units in Rec. ITU-T H.264 (04/2017) p. 76 */ unsigned int crop_unit_x = 2; unsigned int crop_unit_y = 2; enum v4l2_mpeg_video_h264_profile profile; enum v4l2_mpeg_video_h264_level level; unsigned int cpb_size; unsigned int cpb_size_scale; sps = kzalloc(sizeof(*sps), GFP_KERNEL); if (!sps) return -ENOMEM; profile = v4l2_ctrl_g_ctrl(channel->mpeg_video_h264_profile); level = v4l2_ctrl_g_ctrl(channel->mpeg_video_h264_level); sps->profile_idc = nal_h264_profile(profile); sps->constraint_set0_flag = 0; sps->constraint_set1_flag = 1; sps->constraint_set2_flag = 0; sps->constraint_set3_flag = 0; sps->constraint_set4_flag = 0; sps->constraint_set5_flag = 0; sps->level_idc = nal_h264_level(level); sps->seq_parameter_set_id = 0; sps->log2_max_frame_num_minus4 = LOG2_MAX_FRAME_NUM - 4; sps->pic_order_cnt_type = 0; sps->log2_max_pic_order_cnt_lsb_minus4 = LOG2_MAX_PIC_ORDER_CNT - 4; sps->max_num_ref_frames = 3; sps->gaps_in_frame_num_value_allowed_flag = 0; sps->pic_width_in_mbs_minus1 = DIV_ROUND_UP(channel->width, size_mb) - 1; sps->pic_height_in_map_units_minus1 = DIV_ROUND_UP(channel->height, size_mb) - 1; sps->frame_mbs_only_flag = 1; sps->mb_adaptive_frame_field_flag = 0; sps->direct_8x8_inference_flag = 1; sps->frame_cropping_flag = (channel->width % size_mb) || (channel->height % size_mb); if (sps->frame_cropping_flag) { sps->crop_left = 0; sps->crop_right = (round_up(channel->width, size_mb) - channel->width) / crop_unit_x; sps->crop_top = 0; sps->crop_bottom = (round_up(channel->height, size_mb) - channel->height) / crop_unit_y; } sps->vui_parameters_present_flag = 1; sps->vui.aspect_ratio_info_present_flag = 0; sps->vui.overscan_info_present_flag = 0; sps->vui.video_signal_type_present_flag = 1; sps->vui.video_format = 5; /* unspecified */ sps->vui.video_full_range_flag = nal_h264_full_range(channel->quantization); sps->vui.colour_description_present_flag = 1; sps->vui.colour_primaries = nal_h264_color_primaries(channel->colorspace); sps->vui.transfer_characteristics = nal_h264_transfer_characteristics(channel->colorspace, channel->xfer_func); sps->vui.matrix_coefficients = nal_h264_matrix_coeffs(channel->colorspace, channel->ycbcr_enc); sps->vui.chroma_loc_info_present_flag = 1; sps->vui.chroma_sample_loc_type_top_field = 0; sps->vui.chroma_sample_loc_type_bottom_field = 0; sps->vui.timing_info_present_flag = 1; sps->vui.num_units_in_tick = channel->framerate.denominator; sps->vui.time_scale = 2 * channel->framerate.numerator; sps->vui.fixed_frame_rate_flag = 1; sps->vui.nal_hrd_parameters_present_flag = 0; sps->vui.vcl_hrd_parameters_present_flag = 1; sps->vui.vcl_hrd_parameters.cpb_cnt_minus1 = 0; /* See Rec. ITU-T H.264 (04/2017) p. 410 E-53 */ sps->vui.vcl_hrd_parameters.bit_rate_scale = ffs(channel->bitrate_peak) - 6; sps->vui.vcl_hrd_parameters.bit_rate_value_minus1[0] = channel->bitrate_peak / (1 << (6 + sps->vui.vcl_hrd_parameters.bit_rate_scale)) - 1; /* See Rec. ITU-T H.264 (04/2017) p. 410 E-54 */ cpb_size = v4l2_ctrl_g_ctrl(channel->mpeg_video_cpb_size); cpb_size_scale = ffs(cpb_size) - 4; sps->vui.vcl_hrd_parameters.cpb_size_scale = cpb_size_scale; sps->vui.vcl_hrd_parameters.cpb_size_value_minus1[0] = (cpb_size * 1000) / (1 << (4 + cpb_size_scale)) - 1; sps->vui.vcl_hrd_parameters.cbr_flag[0] = !v4l2_ctrl_g_ctrl(channel->mpeg_video_frame_rc_enable); sps->vui.vcl_hrd_parameters.initial_cpb_removal_delay_length_minus1 = 31; sps->vui.vcl_hrd_parameters.cpb_removal_delay_length_minus1 = 31; sps->vui.vcl_hrd_parameters.dpb_output_delay_length_minus1 = 31; sps->vui.vcl_hrd_parameters.time_offset_length = 0; sps->vui.low_delay_hrd_flag = 0; sps->vui.pic_struct_present_flag = 1; sps->vui.bitstream_restriction_flag = 0; size = nal_h264_write_sps(&dev->plat_dev->dev, dest, n, sps); kfree(sps); return size; } static ssize_t allegro_h264_write_pps(struct allegro_channel *channel, void *dest, size_t n) { struct allegro_dev *dev = channel->dev; struct nal_h264_pps *pps; ssize_t size; pps = kzalloc(sizeof(*pps), GFP_KERNEL); if (!pps) return -ENOMEM; pps->pic_parameter_set_id = 0; pps->seq_parameter_set_id = 0; pps->entropy_coding_mode_flag = 0; pps->bottom_field_pic_order_in_frame_present_flag = 0; pps->num_slice_groups_minus1 = 0; pps->num_ref_idx_l0_default_active_minus1 = channel->num_ref_idx_l0 - 1; pps->num_ref_idx_l1_default_active_minus1 = channel->num_ref_idx_l1 - 1; pps->weighted_pred_flag = 0; pps->weighted_bipred_idc = 0; pps->pic_init_qp_minus26 = 0; pps->pic_init_qs_minus26 = 0; pps->chroma_qp_index_offset = 0; pps->deblocking_filter_control_present_flag = 1; pps->constrained_intra_pred_flag = 0; pps->redundant_pic_cnt_present_flag = 0; pps->transform_8x8_mode_flag = 0; pps->pic_scaling_matrix_present_flag = 0; pps->second_chroma_qp_index_offset = 0; size = nal_h264_write_pps(&dev->plat_dev->dev, dest, n, pps); kfree(pps); return size; } static void allegro_channel_eos_event(struct allegro_channel *channel) { const struct v4l2_event eos_event = { .type = V4L2_EVENT_EOS }; v4l2_event_queue_fh(&channel->fh, &eos_event); } static ssize_t allegro_hevc_write_vps(struct allegro_channel *channel, void *dest, size_t n) { struct allegro_dev *dev = channel->dev; struct nal_hevc_vps *vps; struct nal_hevc_profile_tier_level *ptl; ssize_t size; unsigned int num_ref_frames = channel->num_ref_idx_l0; s32 profile = v4l2_ctrl_g_ctrl(channel->mpeg_video_hevc_profile); s32 level = v4l2_ctrl_g_ctrl(channel->mpeg_video_hevc_level); s32 tier = v4l2_ctrl_g_ctrl(channel->mpeg_video_hevc_tier); vps = kzalloc(sizeof(*vps), GFP_KERNEL); if (!vps) return -ENOMEM; vps->base_layer_internal_flag = 1; vps->base_layer_available_flag = 1; vps->temporal_id_nesting_flag = 1; ptl = &vps->profile_tier_level; ptl->general_profile_idc = nal_hevc_profile(profile); ptl->general_profile_compatibility_flag[ptl->general_profile_idc] = 1; ptl->general_tier_flag = nal_hevc_tier(tier); ptl->general_progressive_source_flag = 1; ptl->general_frame_only_constraint_flag = 1; ptl->general_level_idc = nal_hevc_level(level); vps->sub_layer_ordering_info_present_flag = 0; vps->max_dec_pic_buffering_minus1[0] = num_ref_frames; vps->max_num_reorder_pics[0] = num_ref_frames; size = nal_hevc_write_vps(&dev->plat_dev->dev, dest, n, vps); kfree(vps); return size; } static ssize_t allegro_hevc_write_sps(struct allegro_channel *channel, void *dest, size_t n) { struct allegro_dev *dev = channel->dev; struct nal_hevc_sps *sps; struct nal_hevc_profile_tier_level *ptl; struct nal_hevc_vui_parameters *vui; struct nal_hevc_hrd_parameters *hrd; ssize_t size; unsigned int cpb_size; unsigned int num_ref_frames = channel->num_ref_idx_l0; s32 profile = v4l2_ctrl_g_ctrl(channel->mpeg_video_hevc_profile); s32 level = v4l2_ctrl_g_ctrl(channel->mpeg_video_hevc_level); s32 tier = v4l2_ctrl_g_ctrl(channel->mpeg_video_hevc_tier); sps = kzalloc(sizeof(*sps), GFP_KERNEL); if (!sps) return -ENOMEM; sps->temporal_id_nesting_flag = 1; ptl = &sps->profile_tier_level; ptl->general_profile_idc = nal_hevc_profile(profile); ptl->general_profile_compatibility_flag[ptl->general_profile_idc] = 1; ptl->general_tier_flag = nal_hevc_tier(tier); ptl->general_progressive_source_flag = 1; ptl->general_frame_only_constraint_flag = 1; ptl->general_level_idc = nal_hevc_level(level); sps->seq_parameter_set_id = 0; sps->chroma_format_idc = 1; /* Only 4:2:0 sampling supported */ sps->pic_width_in_luma_samples = round_up(channel->width, 8); sps->pic_height_in_luma_samples = round_up(channel->height, 8); sps->conf_win_right_offset = sps->pic_width_in_luma_samples - channel->width; sps->conf_win_bottom_offset = sps->pic_height_in_luma_samples - channel->height; sps->conformance_window_flag = sps->conf_win_right_offset || sps->conf_win_bottom_offset; sps->log2_max_pic_order_cnt_lsb_minus4 = LOG2_MAX_PIC_ORDER_CNT - 4; sps->sub_layer_ordering_info_present_flag = 1; sps->max_dec_pic_buffering_minus1[0] = num_ref_frames; sps->max_num_reorder_pics[0] = num_ref_frames; sps->log2_min_luma_coding_block_size_minus3 = channel->min_cu_size - 3; sps->log2_diff_max_min_luma_coding_block_size = channel->max_cu_size - channel->min_cu_size; sps->log2_min_luma_transform_block_size_minus2 = channel->min_tu_size - 2; sps->log2_diff_max_min_luma_transform_block_size = channel->max_tu_size - channel->min_tu_size; sps->max_transform_hierarchy_depth_intra = channel->max_transfo_depth_intra; sps->max_transform_hierarchy_depth_inter = channel->max_transfo_depth_inter; sps->sps_temporal_mvp_enabled_flag = channel->temporal_mvp_enable; sps->strong_intra_smoothing_enabled_flag = channel->max_cu_size > 4; sps->vui_parameters_present_flag = 1; vui = &sps->vui; vui->video_signal_type_present_flag = 1; vui->video_format = 5; /* unspecified */ vui->video_full_range_flag = nal_hevc_full_range(channel->quantization); vui->colour_description_present_flag = 1; vui->colour_primaries = nal_hevc_color_primaries(channel->colorspace); vui->transfer_characteristics = nal_hevc_transfer_characteristics(channel->colorspace, channel->xfer_func); vui->matrix_coeffs = nal_hevc_matrix_coeffs(channel->colorspace, channel->ycbcr_enc); vui->chroma_loc_info_present_flag = 1; vui->chroma_sample_loc_type_top_field = 0; vui->chroma_sample_loc_type_bottom_field = 0; vui->vui_timing_info_present_flag = 1; vui->vui_num_units_in_tick = channel->framerate.denominator; vui->vui_time_scale = channel->framerate.numerator; vui->bitstream_restriction_flag = 1; vui->motion_vectors_over_pic_boundaries_flag = 1; vui->restricted_ref_pic_lists_flag = 1; vui->log2_max_mv_length_horizontal = 15; vui->log2_max_mv_length_vertical = 15; vui->vui_hrd_parameters_present_flag = 1; hrd = &vui->nal_hrd_parameters; hrd->vcl_hrd_parameters_present_flag = 1; hrd->initial_cpb_removal_delay_length_minus1 = 31; hrd->au_cpb_removal_delay_length_minus1 = 30; hrd->dpb_output_delay_length_minus1 = 30; hrd->bit_rate_scale = ffs(channel->bitrate_peak) - 6; hrd->vcl_hrd[0].bit_rate_value_minus1[0] = (channel->bitrate_peak >> (6 + hrd->bit_rate_scale)) - 1; cpb_size = v4l2_ctrl_g_ctrl(channel->mpeg_video_cpb_size) * 1000; hrd->cpb_size_scale = ffs(cpb_size) - 4; hrd->vcl_hrd[0].cpb_size_value_minus1[0] = (cpb_size >> (4 + hrd->cpb_size_scale)) - 1; hrd->vcl_hrd[0].cbr_flag[0] = !v4l2_ctrl_g_ctrl(channel->mpeg_video_frame_rc_enable); size = nal_hevc_write_sps(&dev->plat_dev->dev, dest, n, sps); kfree(sps); return size; } static ssize_t allegro_hevc_write_pps(struct allegro_channel *channel, struct mcu_msg_encode_frame_response *msg, void *dest, size_t n) { struct allegro_dev *dev = channel->dev; struct nal_hevc_pps *pps; ssize_t size; int i; pps = kzalloc(sizeof(*pps), GFP_KERNEL); if (!pps) return -ENOMEM; pps->pps_pic_parameter_set_id = 0; pps->pps_seq_parameter_set_id = 0; if (msg->num_column > 1 || msg->num_row > 1) { pps->tiles_enabled_flag = 1; pps->num_tile_columns_minus1 = msg->num_column - 1; pps->num_tile_rows_minus1 = msg->num_row - 1; for (i = 0; i < msg->num_column; i++) pps->column_width_minus1[i] = msg->tile_width[i] - 1; for (i = 0; i < msg->num_row; i++) pps->row_height_minus1[i] = msg->tile_height[i] - 1; } pps->loop_filter_across_tiles_enabled_flag = channel->enable_loop_filter_across_tiles; pps->pps_loop_filter_across_slices_enabled_flag = channel->enable_loop_filter_across_slices; pps->deblocking_filter_control_present_flag = 1; pps->deblocking_filter_override_enabled_flag = channel->enable_deblocking_filter_override; pps->pps_beta_offset_div2 = BETA_OFFSET_DIV_2; pps->pps_tc_offset_div2 = TC_OFFSET_DIV_2; pps->lists_modification_present_flag = channel->enable_reordering; size = nal_hevc_write_pps(&dev->plat_dev->dev, dest, n, pps); kfree(pps); return size; } static u64 allegro_put_buffer(struct allegro_channel *channel, struct list_head *list, struct vb2_v4l2_buffer *buffer) { struct v4l2_m2m_buffer *b = container_of(buffer, struct v4l2_m2m_buffer, vb); struct allegro_m2m_buffer *shadow = to_allegro_m2m_buffer(b); mutex_lock(&channel->shadow_list_lock); list_add_tail(&shadow->head, list); mutex_unlock(&channel->shadow_list_lock); return ptr_to_u64(buffer); } static struct vb2_v4l2_buffer * allegro_get_buffer(struct allegro_channel *channel, struct list_head *list, u64 handle) { struct allegro_m2m_buffer *shadow, *tmp; struct vb2_v4l2_buffer *buffer = NULL; mutex_lock(&channel->shadow_list_lock); list_for_each_entry_safe(shadow, tmp, list, head) { if (handle == ptr_to_u64(&shadow->buf.vb)) { buffer = &shadow->buf.vb; list_del_init(&shadow->head); break; } } mutex_unlock(&channel->shadow_list_lock); return buffer; } static void allegro_channel_finish_frame(struct allegro_channel *channel, struct mcu_msg_encode_frame_response *msg) { struct allegro_dev *dev = channel->dev; struct vb2_v4l2_buffer *src_buf; struct vb2_v4l2_buffer *dst_buf; struct { u32 offset; u32 size; } *partition; enum vb2_buffer_state state = VB2_BUF_STATE_ERROR; char *curr; ssize_t len; ssize_t free; src_buf = allegro_get_buffer(channel, &channel->source_shadow_list, msg->src_handle); if (!src_buf) v4l2_warn(&dev->v4l2_dev, "channel %d: invalid source buffer\n", channel->mcu_channel_id); dst_buf = allegro_get_buffer(channel, &channel->stream_shadow_list, msg->dst_handle); if (!dst_buf) v4l2_warn(&dev->v4l2_dev, "channel %d: invalid stream buffer\n", channel->mcu_channel_id); if (!src_buf || !dst_buf) goto err; if (v4l2_m2m_is_last_draining_src_buf(channel->fh.m2m_ctx, src_buf)) { dst_buf->flags |= V4L2_BUF_FLAG_LAST; allegro_channel_eos_event(channel); v4l2_m2m_mark_stopped(channel->fh.m2m_ctx); } dst_buf->sequence = channel->csequence++; if (msg->error_code & AL_ERROR) { v4l2_err(&dev->v4l2_dev, "channel %d: failed to encode frame: %s (%x)\n", channel->mcu_channel_id, allegro_err_to_string(msg->error_code), msg->error_code); goto err; } if (msg->partition_table_size != 1) { v4l2_warn(&dev->v4l2_dev, "channel %d: only handling first partition table entry (%d entries)\n", channel->mcu_channel_id, msg->partition_table_size); } if (msg->partition_table_offset + msg->partition_table_size * sizeof(*partition) > vb2_plane_size(&dst_buf->vb2_buf, 0)) { v4l2_err(&dev->v4l2_dev, "channel %d: partition table outside of dst_buf\n", channel->mcu_channel_id); goto err; } partition = vb2_plane_vaddr(&dst_buf->vb2_buf, 0) + msg->partition_table_offset; if (partition->offset + partition->size > vb2_plane_size(&dst_buf->vb2_buf, 0)) { v4l2_err(&dev->v4l2_dev, "channel %d: encoded frame is outside of dst_buf (offset 0x%x, size 0x%x)\n", channel->mcu_channel_id, partition->offset, partition->size); goto err; } v4l2_dbg(2, debug, &dev->v4l2_dev, "channel %d: encoded frame of size %d is at offset 0x%x\n", channel->mcu_channel_id, partition->size, partition->offset); /* * The payload must include the data before the partition offset, * because we will put the sps and pps data there. */ vb2_set_plane_payload(&dst_buf->vb2_buf, 0, partition->offset + partition->size); curr = vb2_plane_vaddr(&dst_buf->vb2_buf, 0); free = partition->offset; if (channel->codec == V4L2_PIX_FMT_HEVC && msg->is_idr) { len = allegro_hevc_write_vps(channel, curr, free); if (len < 0) { v4l2_err(&dev->v4l2_dev, "not enough space for video parameter set: %zd left\n", free); goto err; } curr += len; free -= len; v4l2_dbg(1, debug, &dev->v4l2_dev, "channel %d: wrote %zd byte VPS nal unit\n", channel->mcu_channel_id, len); } if (msg->is_idr) { if (channel->codec == V4L2_PIX_FMT_H264) len = allegro_h264_write_sps(channel, curr, free); else len = allegro_hevc_write_sps(channel, curr, free); if (len < 0) { v4l2_err(&dev->v4l2_dev, "not enough space for sequence parameter set: %zd left\n", free); goto err; } curr += len; free -= len; v4l2_dbg(1, debug, &dev->v4l2_dev, "channel %d: wrote %zd byte SPS nal unit\n", channel->mcu_channel_id, len); } if (msg->slice_type == AL_ENC_SLICE_TYPE_I) { if (channel->codec == V4L2_PIX_FMT_H264) len = allegro_h264_write_pps(channel, curr, free); else len = allegro_hevc_write_pps(channel, msg, curr, free); if (len < 0) { v4l2_err(&dev->v4l2_dev, "not enough space for picture parameter set: %zd left\n", free); goto err; } curr += len; free -= len; v4l2_dbg(1, debug, &dev->v4l2_dev, "channel %d: wrote %zd byte PPS nal unit\n", channel->mcu_channel_id, len); } if (msg->slice_type != AL_ENC_SLICE_TYPE_I && !msg->is_idr) { dst_buf->vb2_buf.planes[0].data_offset = free; free = 0; } else { if (channel->codec == V4L2_PIX_FMT_H264) len = nal_h264_write_filler(&dev->plat_dev->dev, curr, free); else len = nal_hevc_write_filler(&dev->plat_dev->dev, curr, free); if (len < 0) { v4l2_err(&dev->v4l2_dev, "failed to write %zd filler data\n", free); goto err; } curr += len; free -= len; v4l2_dbg(2, debug, &dev->v4l2_dev, "channel %d: wrote %zd bytes filler nal unit\n", channel->mcu_channel_id, len); } if (free != 0) { v4l2_err(&dev->v4l2_dev, "non-VCL NAL units do not fill space until VCL NAL unit: %zd bytes left\n", free); goto err; } state = VB2_BUF_STATE_DONE; v4l2_m2m_buf_copy_metadata(src_buf, dst_buf, false); if (msg->is_idr) dst_buf->flags |= V4L2_BUF_FLAG_KEYFRAME; else dst_buf->flags |= V4L2_BUF_FLAG_PFRAME; v4l2_dbg(1, debug, &dev->v4l2_dev, "channel %d: encoded frame #%03d (%s%s, QP %d, %d bytes)\n", channel->mcu_channel_id, dst_buf->sequence, msg->is_idr ? "IDR, " : "", msg->slice_type == AL_ENC_SLICE_TYPE_I ? "I slice" : msg->slice_type == AL_ENC_SLICE_TYPE_P ? "P slice" : "unknown", msg->qp, partition->size); err: if (src_buf) v4l2_m2m_buf_done(src_buf, VB2_BUF_STATE_DONE); if (dst_buf) v4l2_m2m_buf_done(dst_buf, state); } static int allegro_handle_init(struct allegro_dev *dev, struct mcu_msg_init_response *msg) { complete(&dev->init_complete); return 0; } static int allegro_handle_create_channel(struct allegro_dev *dev, struct mcu_msg_create_channel_response *msg) { struct allegro_channel *channel; int err = 0; struct create_channel_param param; channel = allegro_find_channel_by_user_id(dev, msg->user_id); if (IS_ERR(channel)) { v4l2_warn(&dev->v4l2_dev, "received %s for unknown user %d\n", msg_type_name(msg->header.type), msg->user_id); return -EINVAL; } if (msg->error_code) { v4l2_err(&dev->v4l2_dev, "user %d: mcu failed to create channel: %s (%x)\n", channel->user_id, allegro_err_to_string(msg->error_code), msg->error_code); err = -EIO; goto out; } channel->mcu_channel_id = msg->channel_id; v4l2_dbg(1, debug, &dev->v4l2_dev, "user %d: channel has channel id %d\n", channel->user_id, channel->mcu_channel_id); err = allegro_decode_config_blob(&param, msg, channel->config_blob.vaddr); allegro_free_buffer(channel->dev, &channel->config_blob); if (err) goto out; channel->num_ref_idx_l0 = param.num_ref_idx_l0; channel->num_ref_idx_l1 = param.num_ref_idx_l1; v4l2_dbg(1, debug, &dev->v4l2_dev, "channel %d: intermediate buffers: %d x %d bytes\n", channel->mcu_channel_id, msg->int_buffers_count, msg->int_buffers_size); err = allocate_intermediate_buffers(channel, msg->int_buffers_count, msg->int_buffers_size); if (err) { v4l2_err(&dev->v4l2_dev, "channel %d: failed to allocate intermediate buffers\n", channel->mcu_channel_id); goto out; } err = allegro_mcu_push_buffer_intermediate(channel); if (err) goto out; v4l2_dbg(1, debug, &dev->v4l2_dev, "channel %d: reference buffers: %d x %d bytes\n", channel->mcu_channel_id, msg->rec_buffers_count, msg->rec_buffers_size); err = allocate_reference_buffers(channel, msg->rec_buffers_count, msg->rec_buffers_size); if (err) { v4l2_err(&dev->v4l2_dev, "channel %d: failed to allocate reference buffers\n", channel->mcu_channel_id); goto out; } err = allegro_mcu_push_buffer_reference(channel); if (err) goto out; out: channel->error = err; complete(&channel->completion); /* Handled successfully, error is passed via channel->error */ return 0; } static int allegro_handle_destroy_channel(struct allegro_dev *dev, struct mcu_msg_destroy_channel_response *msg) { struct allegro_channel *channel; channel = allegro_find_channel_by_channel_id(dev, msg->channel_id); if (IS_ERR(channel)) { v4l2_err(&dev->v4l2_dev, "received %s for unknown channel %d\n", msg_type_name(msg->header.type), msg->channel_id); return -EINVAL; } v4l2_dbg(2, debug, &dev->v4l2_dev, "user %d: vcu destroyed channel %d\n", channel->user_id, channel->mcu_channel_id); complete(&channel->completion); return 0; } static int allegro_handle_encode_frame(struct allegro_dev *dev, struct mcu_msg_encode_frame_response *msg) { struct allegro_channel *channel; channel = allegro_find_channel_by_channel_id(dev, msg->channel_id); if (IS_ERR(channel)) { v4l2_err(&dev->v4l2_dev, "received %s for unknown channel %d\n", msg_type_name(msg->header.type), msg->channel_id); return -EINVAL; } allegro_channel_finish_frame(channel, msg); return 0; } static void allegro_handle_message(struct allegro_dev *dev, union mcu_msg_response *msg) { switch (msg->header.type) { case MCU_MSG_TYPE_INIT: allegro_handle_init(dev, &msg->init); break; case MCU_MSG_TYPE_CREATE_CHANNEL: allegro_handle_create_channel(dev, &msg->create_channel); break; case MCU_MSG_TYPE_DESTROY_CHANNEL: allegro_handle_destroy_channel(dev, &msg->destroy_channel); break; case MCU_MSG_TYPE_ENCODE_FRAME: allegro_handle_encode_frame(dev, &msg->encode_frame); break; default: v4l2_warn(&dev->v4l2_dev, "%s: unknown message %s\n", __func__, msg_type_name(msg->header.type)); break; } } static irqreturn_t allegro_hardirq(int irq, void *data) { struct allegro_dev *dev = data; unsigned int status; regmap_read(dev->regmap, AL5_ITC_CPU_IRQ_STA, &status); if (!(status & AL5_ITC_CPU_IRQ_STA_TRIGGERED)) return IRQ_NONE; regmap_write(dev->regmap, AL5_ITC_CPU_IRQ_CLR, status); return IRQ_WAKE_THREAD; } static irqreturn_t allegro_irq_thread(int irq, void *data) { struct allegro_dev *dev = data; /* * The firmware is initialized after the mailbox is setup. We further * check the AL5_ITC_CPU_IRQ_STA register, if the firmware actually * triggered the interrupt. Although this should not happen, make sure * that we ignore interrupts, if the mailbox is not initialized. */ if (!dev->mbox_status) return IRQ_NONE; allegro_mbox_notify(dev->mbox_status); return IRQ_HANDLED; } static void allegro_copy_firmware(struct allegro_dev *dev, const u8 * const buf, size_t size) { int err = 0; v4l2_dbg(1, debug, &dev->v4l2_dev, "copy mcu firmware (%zu B) to SRAM\n", size); err = regmap_bulk_write(dev->sram, 0x0, buf, size / 4); if (err) v4l2_err(&dev->v4l2_dev, "failed to copy firmware: %d\n", err); } static void allegro_copy_fw_codec(struct allegro_dev *dev, const u8 * const buf, size_t size) { int err; dma_addr_t icache_offset, dcache_offset; /* * The downstream allocates 600 KB for the codec firmware to have some * extra space for "possible extensions." My tests were fine with * allocating just enough memory for the actual firmware, but I am not * sure that the firmware really does not use the remaining space. */ err = allegro_alloc_buffer(dev, &dev->firmware, size); if (err) { v4l2_err(&dev->v4l2_dev, "failed to allocate %zu bytes for firmware\n", size); return; } v4l2_dbg(1, debug, &dev->v4l2_dev, "copy codec firmware (%zd B) to phys %pad\n", size, &dev->firmware.paddr); memcpy(dev->firmware.vaddr, buf, size); regmap_write(dev->regmap, AXI_ADDR_OFFSET_IP, upper_32_bits(dev->firmware.paddr)); icache_offset = dev->firmware.paddr - MCU_CACHE_OFFSET; v4l2_dbg(2, debug, &dev->v4l2_dev, "icache_offset: msb = 0x%x, lsb = 0x%x\n", upper_32_bits(icache_offset), lower_32_bits(icache_offset)); regmap_write(dev->regmap, AL5_ICACHE_ADDR_OFFSET_MSB, upper_32_bits(icache_offset)); regmap_write(dev->regmap, AL5_ICACHE_ADDR_OFFSET_LSB, lower_32_bits(icache_offset)); dcache_offset = (dev->firmware.paddr & 0xffffffff00000000ULL) - MCU_CACHE_OFFSET; v4l2_dbg(2, debug, &dev->v4l2_dev, "dcache_offset: msb = 0x%x, lsb = 0x%x\n", upper_32_bits(dcache_offset), lower_32_bits(dcache_offset)); regmap_write(dev->regmap, AL5_DCACHE_ADDR_OFFSET_MSB, upper_32_bits(dcache_offset)); regmap_write(dev->regmap, AL5_DCACHE_ADDR_OFFSET_LSB, lower_32_bits(dcache_offset)); } static void allegro_free_fw_codec(struct allegro_dev *dev) { allegro_free_buffer(dev, &dev->firmware); } /* * Control functions for the MCU */ static int allegro_mcu_enable_interrupts(struct allegro_dev *dev) { return regmap_write(dev->regmap, AL5_ITC_CPU_IRQ_MSK, BIT(0)); } static int allegro_mcu_disable_interrupts(struct allegro_dev *dev) { return regmap_write(dev->regmap, AL5_ITC_CPU_IRQ_MSK, 0); } static int allegro_mcu_wait_for_sleep(struct allegro_dev *dev) { unsigned long timeout; unsigned int status; timeout = jiffies + msecs_to_jiffies(100); while (regmap_read(dev->regmap, AL5_MCU_STA, &status) == 0 && status != AL5_MCU_STA_SLEEP) { if (time_after(jiffies, timeout)) return -ETIMEDOUT; cpu_relax(); } return 0; } static int allegro_mcu_start(struct allegro_dev *dev) { unsigned long timeout; unsigned int status; int err; err = regmap_write(dev->regmap, AL5_MCU_WAKEUP, BIT(0)); if (err) return err; timeout = jiffies + msecs_to_jiffies(100); while (regmap_read(dev->regmap, AL5_MCU_STA, &status) == 0 && status == AL5_MCU_STA_SLEEP) { if (time_after(jiffies, timeout)) return -ETIMEDOUT; cpu_relax(); } err = regmap_write(dev->regmap, AL5_MCU_WAKEUP, 0); if (err) return err; return 0; } static int allegro_mcu_reset(struct allegro_dev *dev) { int err; /* * Ensure that the AL5_MCU_WAKEUP bit is set to 0 otherwise the mcu * does not go to sleep after the reset. */ err = regmap_write(dev->regmap, AL5_MCU_WAKEUP, 0); if (err) return err; err = regmap_write(dev->regmap, AL5_MCU_RESET_MODE, AL5_MCU_RESET_MODE_SLEEP); if (err < 0) return err; err = regmap_write(dev->regmap, AL5_MCU_RESET, AL5_MCU_RESET_SOFT); if (err < 0) return err; return allegro_mcu_wait_for_sleep(dev); } static void allegro_mcu_interrupt(struct allegro_dev *dev) { regmap_write(dev->regmap, AL5_MCU_INTERRUPT, BIT(0)); } static void allegro_destroy_channel(struct allegro_channel *channel) { struct allegro_dev *dev = channel->dev; unsigned long timeout; if (channel_exists(channel)) { reinit_completion(&channel->completion); allegro_mcu_send_destroy_channel(dev, channel); timeout = wait_for_completion_timeout(&channel->completion, msecs_to_jiffies(5000)); if (timeout == 0) v4l2_warn(&dev->v4l2_dev, "channel %d: timeout while destroying\n", channel->mcu_channel_id); channel->mcu_channel_id = -1; } destroy_intermediate_buffers(channel); destroy_reference_buffers(channel); v4l2_ctrl_grab(channel->mpeg_video_h264_profile, false); v4l2_ctrl_grab(channel->mpeg_video_h264_level, false); v4l2_ctrl_grab(channel->mpeg_video_h264_i_frame_qp, false); v4l2_ctrl_grab(channel->mpeg_video_h264_max_qp, false); v4l2_ctrl_grab(channel->mpeg_video_h264_min_qp, false); v4l2_ctrl_grab(channel->mpeg_video_h264_p_frame_qp, false); v4l2_ctrl_grab(channel->mpeg_video_h264_b_frame_qp, false); v4l2_ctrl_grab(channel->mpeg_video_hevc_profile, false); v4l2_ctrl_grab(channel->mpeg_video_hevc_level, false); v4l2_ctrl_grab(channel->mpeg_video_hevc_tier, false); v4l2_ctrl_grab(channel->mpeg_video_hevc_i_frame_qp, false); v4l2_ctrl_grab(channel->mpeg_video_hevc_max_qp, false); v4l2_ctrl_grab(channel->mpeg_video_hevc_min_qp, false); v4l2_ctrl_grab(channel->mpeg_video_hevc_p_frame_qp, false); v4l2_ctrl_grab(channel->mpeg_video_hevc_b_frame_qp, false); v4l2_ctrl_grab(channel->mpeg_video_frame_rc_enable, false); v4l2_ctrl_grab(channel->mpeg_video_bitrate_mode, false); v4l2_ctrl_grab(channel->mpeg_video_bitrate, false); v4l2_ctrl_grab(channel->mpeg_video_bitrate_peak, false); v4l2_ctrl_grab(channel->mpeg_video_cpb_size, false); v4l2_ctrl_grab(channel->mpeg_video_gop_size, false); v4l2_ctrl_grab(channel->encoder_buffer, false); if (channel->user_id != -1) { clear_bit(channel->user_id, &dev->channel_user_ids); channel->user_id = -1; } } /* * Create the MCU channel * * After the channel has been created, the picture size, format, colorspace * and framerate are fixed. Also the codec, profile, bitrate, etc. cannot be * changed anymore. * * The channel can be created only once. The MCU will accept source buffers * and stream buffers only after a channel has been created. */ static int allegro_create_channel(struct allegro_channel *channel) { struct allegro_dev *dev = channel->dev; unsigned long timeout; if (channel_exists(channel)) { v4l2_warn(&dev->v4l2_dev, "channel already exists\n"); return 0; } channel->user_id = allegro_next_user_id(dev); if (channel->user_id < 0) { v4l2_err(&dev->v4l2_dev, "no free channels available\n"); return -EBUSY; } set_bit(channel->user_id, &dev->channel_user_ids); v4l2_dbg(1, debug, &dev->v4l2_dev, "user %d: creating channel (%4.4s, %dx%d@%d)\n", channel->user_id, (char *)&channel->codec, channel->width, channel->height, DIV_ROUND_UP(channel->framerate.numerator, channel->framerate.denominator)); v4l2_ctrl_grab(channel->mpeg_video_h264_profile, true); v4l2_ctrl_grab(channel->mpeg_video_h264_level, true); v4l2_ctrl_grab(channel->mpeg_video_h264_i_frame_qp, true); v4l2_ctrl_grab(channel->mpeg_video_h264_max_qp, true); v4l2_ctrl_grab(channel->mpeg_video_h264_min_qp, true); v4l2_ctrl_grab(channel->mpeg_video_h264_p_frame_qp, true); v4l2_ctrl_grab(channel->mpeg_video_h264_b_frame_qp, true); v4l2_ctrl_grab(channel->mpeg_video_hevc_profile, true); v4l2_ctrl_grab(channel->mpeg_video_hevc_level, true); v4l2_ctrl_grab(channel->mpeg_video_hevc_tier, true); v4l2_ctrl_grab(channel->mpeg_video_hevc_i_frame_qp, true); v4l2_ctrl_grab(channel->mpeg_video_hevc_max_qp, true); v4l2_ctrl_grab(channel->mpeg_video_hevc_min_qp, true); v4l2_ctrl_grab(channel->mpeg_video_hevc_p_frame_qp, true); v4l2_ctrl_grab(channel->mpeg_video_hevc_b_frame_qp, true); v4l2_ctrl_grab(channel->mpeg_video_frame_rc_enable, true); v4l2_ctrl_grab(channel->mpeg_video_bitrate_mode, true); v4l2_ctrl_grab(channel->mpeg_video_bitrate, true); v4l2_ctrl_grab(channel->mpeg_video_bitrate_peak, true); v4l2_ctrl_grab(channel->mpeg_video_cpb_size, true); v4l2_ctrl_grab(channel->mpeg_video_gop_size, true); v4l2_ctrl_grab(channel->encoder_buffer, true); reinit_completion(&channel->completion); allegro_mcu_send_create_channel(dev, channel); timeout = wait_for_completion_timeout(&channel->completion, msecs_to_jiffies(5000)); if (timeout == 0) channel->error = -ETIMEDOUT; if (channel->error) goto err; v4l2_dbg(1, debug, &dev->v4l2_dev, "channel %d: accepting buffers\n", channel->mcu_channel_id); return 0; err: allegro_destroy_channel(channel); return channel->error; } /** * allegro_channel_adjust() - Adjust channel parameters to current format * @channel: the channel to adjust * * Various parameters of a channel and their limits depend on the currently * set format. Adjust the parameters after a format change in one go. */ static void allegro_channel_adjust(struct allegro_channel *channel) { struct allegro_dev *dev = channel->dev; u32 codec = channel->codec; struct v4l2_ctrl *ctrl; s64 min; s64 max; channel->sizeimage_encoded = estimate_stream_size(channel->width, channel->height); if (codec == V4L2_PIX_FMT_H264) { ctrl = channel->mpeg_video_h264_level; min = select_minimum_h264_level(channel->width, channel->height); } else { ctrl = channel->mpeg_video_hevc_level; min = select_minimum_hevc_level(channel->width, channel->height); } if (ctrl->minimum > min) v4l2_dbg(1, debug, &dev->v4l2_dev, "%s.minimum: %lld -> %lld\n", v4l2_ctrl_get_name(ctrl->id), ctrl->minimum, min); v4l2_ctrl_lock(ctrl); __v4l2_ctrl_modify_range(ctrl, min, ctrl->maximum, ctrl->step, ctrl->default_value); v4l2_ctrl_unlock(ctrl); ctrl = channel->mpeg_video_bitrate; if (codec == V4L2_PIX_FMT_H264) max = h264_maximum_bitrate(v4l2_ctrl_g_ctrl(channel->mpeg_video_h264_level)); else max = hevc_maximum_bitrate(v4l2_ctrl_g_ctrl(channel->mpeg_video_hevc_level)); if (ctrl->maximum < max) v4l2_dbg(1, debug, &dev->v4l2_dev, "%s: maximum: %lld -> %lld\n", v4l2_ctrl_get_name(ctrl->id), ctrl->maximum, max); v4l2_ctrl_lock(ctrl); __v4l2_ctrl_modify_range(ctrl, ctrl->minimum, max, ctrl->step, ctrl->default_value); v4l2_ctrl_unlock(ctrl); ctrl = channel->mpeg_video_bitrate_peak; v4l2_ctrl_lock(ctrl); __v4l2_ctrl_modify_range(ctrl, ctrl->minimum, max, ctrl->step, ctrl->default_value); v4l2_ctrl_unlock(ctrl); v4l2_ctrl_activate(channel->mpeg_video_h264_profile, codec == V4L2_PIX_FMT_H264); v4l2_ctrl_activate(channel->mpeg_video_h264_level, codec == V4L2_PIX_FMT_H264); v4l2_ctrl_activate(channel->mpeg_video_h264_i_frame_qp, codec == V4L2_PIX_FMT_H264); v4l2_ctrl_activate(channel->mpeg_video_h264_max_qp, codec == V4L2_PIX_FMT_H264); v4l2_ctrl_activate(channel->mpeg_video_h264_min_qp, codec == V4L2_PIX_FMT_H264); v4l2_ctrl_activate(channel->mpeg_video_h264_p_frame_qp, codec == V4L2_PIX_FMT_H264); v4l2_ctrl_activate(channel->mpeg_video_h264_b_frame_qp, codec == V4L2_PIX_FMT_H264); v4l2_ctrl_activate(channel->mpeg_video_hevc_profile, codec == V4L2_PIX_FMT_HEVC); v4l2_ctrl_activate(channel->mpeg_video_hevc_level, codec == V4L2_PIX_FMT_HEVC); v4l2_ctrl_activate(channel->mpeg_video_hevc_tier, codec == V4L2_PIX_FMT_HEVC); v4l2_ctrl_activate(channel->mpeg_video_hevc_i_frame_qp, codec == V4L2_PIX_FMT_HEVC); v4l2_ctrl_activate(channel->mpeg_video_hevc_max_qp, codec == V4L2_PIX_FMT_HEVC); v4l2_ctrl_activate(channel->mpeg_video_hevc_min_qp, codec == V4L2_PIX_FMT_HEVC); v4l2_ctrl_activate(channel->mpeg_video_hevc_p_frame_qp, codec == V4L2_PIX_FMT_HEVC); v4l2_ctrl_activate(channel->mpeg_video_hevc_b_frame_qp, codec == V4L2_PIX_FMT_HEVC); if (codec == V4L2_PIX_FMT_H264) channel->log2_max_frame_num = LOG2_MAX_FRAME_NUM; channel->temporal_mvp_enable = true; channel->dbf_ovr_en = (codec == V4L2_PIX_FMT_H264); channel->enable_deblocking_filter_override = (codec == V4L2_PIX_FMT_HEVC); channel->enable_reordering = (codec == V4L2_PIX_FMT_HEVC); channel->enable_loop_filter_across_tiles = true; channel->enable_loop_filter_across_slices = true; if (codec == V4L2_PIX_FMT_H264) { channel->b_hrz_me_range = 8; channel->b_vrt_me_range = 8; channel->p_hrz_me_range = 16; channel->p_vrt_me_range = 16; channel->max_cu_size = ilog2(16); channel->min_cu_size = ilog2(8); channel->max_tu_size = ilog2(4); channel->min_tu_size = ilog2(4); } else { channel->b_hrz_me_range = 16; channel->b_vrt_me_range = 16; channel->p_hrz_me_range = 32; channel->p_vrt_me_range = 32; channel->max_cu_size = ilog2(32); channel->min_cu_size = ilog2(8); channel->max_tu_size = ilog2(32); channel->min_tu_size = ilog2(4); } channel->max_transfo_depth_intra = 1; channel->max_transfo_depth_inter = 1; } static void allegro_set_default_params(struct allegro_channel *channel) { channel->width = ALLEGRO_WIDTH_DEFAULT; channel->height = ALLEGRO_HEIGHT_DEFAULT; channel->stride = round_up(channel->width, 32); channel->framerate = ALLEGRO_FRAMERATE_DEFAULT; channel->colorspace = V4L2_COLORSPACE_REC709; channel->ycbcr_enc = V4L2_YCBCR_ENC_DEFAULT; channel->quantization = V4L2_QUANTIZATION_DEFAULT; channel->xfer_func = V4L2_XFER_FUNC_DEFAULT; channel->pixelformat = V4L2_PIX_FMT_NV12; channel->sizeimage_raw = channel->stride * channel->height * 3 / 2; channel->codec = V4L2_PIX_FMT_H264; } static int allegro_queue_setup(struct vb2_queue *vq, unsigned int *nbuffers, unsigned int *nplanes, unsigned int sizes[], struct device *alloc_devs[]) { struct allegro_channel *channel = vb2_get_drv_priv(vq); struct allegro_dev *dev = channel->dev; v4l2_dbg(2, debug, &dev->v4l2_dev, "%s: queue setup[%s]: nplanes = %d\n", V4L2_TYPE_IS_OUTPUT(vq->type) ? "output" : "capture", *nplanes == 0 ? "REQBUFS" : "CREATE_BUFS", *nplanes); if (*nplanes != 0) { if (V4L2_TYPE_IS_OUTPUT(vq->type)) { if (sizes[0] < channel->sizeimage_raw) return -EINVAL; } else { if (sizes[0] < channel->sizeimage_encoded) return -EINVAL; } } else { *nplanes = 1; if (V4L2_TYPE_IS_OUTPUT(vq->type)) sizes[0] = channel->sizeimage_raw; else sizes[0] = channel->sizeimage_encoded; } return 0; } static int allegro_buf_prepare(struct vb2_buffer *vb) { struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb); struct allegro_channel *channel = vb2_get_drv_priv(vb->vb2_queue); struct allegro_dev *dev = channel->dev; if (V4L2_TYPE_IS_OUTPUT(vb->vb2_queue->type)) { if (vbuf->field == V4L2_FIELD_ANY) vbuf->field = V4L2_FIELD_NONE; if (vbuf->field != V4L2_FIELD_NONE) { v4l2_err(&dev->v4l2_dev, "channel %d: unsupported field\n", channel->mcu_channel_id); return -EINVAL; } } return 0; } static void allegro_buf_queue(struct vb2_buffer *vb) { struct allegro_channel *channel = vb2_get_drv_priv(vb->vb2_queue); struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb); struct vb2_queue *q = vb->vb2_queue; if (V4L2_TYPE_IS_CAPTURE(q->type) && vb2_is_streaming(q) && v4l2_m2m_dst_buf_is_last(channel->fh.m2m_ctx)) { unsigned int i; for (i = 0; i < vb->num_planes; i++) vb2_set_plane_payload(vb, i, 0); vbuf->field = V4L2_FIELD_NONE; vbuf->sequence = channel->csequence++; v4l2_m2m_last_buffer_done(channel->fh.m2m_ctx, vbuf); allegro_channel_eos_event(channel); return; } v4l2_m2m_buf_queue(channel->fh.m2m_ctx, vbuf); } static int allegro_start_streaming(struct vb2_queue *q, unsigned int count) { struct allegro_channel *channel = vb2_get_drv_priv(q); struct allegro_dev *dev = channel->dev; v4l2_dbg(2, debug, &dev->v4l2_dev, "%s: start streaming\n", V4L2_TYPE_IS_OUTPUT(q->type) ? "output" : "capture"); v4l2_m2m_update_start_streaming_state(channel->fh.m2m_ctx, q); if (V4L2_TYPE_IS_OUTPUT(q->type)) channel->osequence = 0; else channel->csequence = 0; return 0; } static void allegro_stop_streaming(struct vb2_queue *q) { struct allegro_channel *channel = vb2_get_drv_priv(q); struct allegro_dev *dev = channel->dev; struct vb2_v4l2_buffer *buffer; struct allegro_m2m_buffer *shadow, *tmp; v4l2_dbg(2, debug, &dev->v4l2_dev, "%s: stop streaming\n", V4L2_TYPE_IS_OUTPUT(q->type) ? "output" : "capture"); if (V4L2_TYPE_IS_OUTPUT(q->type)) { mutex_lock(&channel->shadow_list_lock); list_for_each_entry_safe(shadow, tmp, &channel->source_shadow_list, head) { list_del(&shadow->head); v4l2_m2m_buf_done(&shadow->buf.vb, VB2_BUF_STATE_ERROR); } mutex_unlock(&channel->shadow_list_lock); while ((buffer = v4l2_m2m_src_buf_remove(channel->fh.m2m_ctx))) v4l2_m2m_buf_done(buffer, VB2_BUF_STATE_ERROR); } else { mutex_lock(&channel->shadow_list_lock); list_for_each_entry_safe(shadow, tmp, &channel->stream_shadow_list, head) { list_del(&shadow->head); v4l2_m2m_buf_done(&shadow->buf.vb, VB2_BUF_STATE_ERROR); } mutex_unlock(&channel->shadow_list_lock); allegro_destroy_channel(channel); while ((buffer = v4l2_m2m_dst_buf_remove(channel->fh.m2m_ctx))) v4l2_m2m_buf_done(buffer, VB2_BUF_STATE_ERROR); } v4l2_m2m_update_stop_streaming_state(channel->fh.m2m_ctx, q); if (V4L2_TYPE_IS_OUTPUT(q->type) && v4l2_m2m_has_stopped(channel->fh.m2m_ctx)) allegro_channel_eos_event(channel); } static const struct vb2_ops allegro_queue_ops = { .queue_setup = allegro_queue_setup, .buf_prepare = allegro_buf_prepare, .buf_queue = allegro_buf_queue, .start_streaming = allegro_start_streaming, .stop_streaming = allegro_stop_streaming, .wait_prepare = vb2_ops_wait_prepare, .wait_finish = vb2_ops_wait_finish, }; static int allegro_queue_init(void *priv, struct vb2_queue *src_vq, struct vb2_queue *dst_vq) { int err; struct allegro_channel *channel = priv; src_vq->dev = &channel->dev->plat_dev->dev; src_vq->type = V4L2_BUF_TYPE_VIDEO_OUTPUT; src_vq->io_modes = VB2_DMABUF | VB2_MMAP; src_vq->mem_ops = &vb2_dma_contig_memops; src_vq->drv_priv = channel; src_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY; src_vq->ops = &allegro_queue_ops; src_vq->buf_struct_size = sizeof(struct allegro_m2m_buffer); src_vq->lock = &channel->dev->lock; err = vb2_queue_init(src_vq); if (err) return err; dst_vq->dev = &channel->dev->plat_dev->dev; dst_vq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE; dst_vq->io_modes = VB2_DMABUF | VB2_MMAP; dst_vq->mem_ops = &vb2_dma_contig_memops; dst_vq->drv_priv = channel; dst_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY; dst_vq->ops = &allegro_queue_ops; dst_vq->buf_struct_size = sizeof(struct allegro_m2m_buffer); dst_vq->lock = &channel->dev->lock; err = vb2_queue_init(dst_vq); if (err) return err; return 0; } static int allegro_clamp_qp(struct allegro_channel *channel, struct v4l2_ctrl *ctrl) { struct v4l2_ctrl *next_ctrl; if (ctrl->id == V4L2_CID_MPEG_VIDEO_H264_I_FRAME_QP) next_ctrl = channel->mpeg_video_h264_p_frame_qp; else if (ctrl->id == V4L2_CID_MPEG_VIDEO_H264_P_FRAME_QP) next_ctrl = channel->mpeg_video_h264_b_frame_qp; else return 0; /* Modify range automatically updates the value */ __v4l2_ctrl_modify_range(next_ctrl, ctrl->val, 51, 1, ctrl->val); return allegro_clamp_qp(channel, next_ctrl); } static int allegro_clamp_bitrate(struct allegro_channel *channel, struct v4l2_ctrl *ctrl) { struct v4l2_ctrl *ctrl_bitrate = channel->mpeg_video_bitrate; struct v4l2_ctrl *ctrl_bitrate_peak = channel->mpeg_video_bitrate_peak; if (ctrl->val == V4L2_MPEG_VIDEO_BITRATE_MODE_VBR && ctrl_bitrate_peak->val < ctrl_bitrate->val) ctrl_bitrate_peak->val = ctrl_bitrate->val; return 0; } static int allegro_try_ctrl(struct v4l2_ctrl *ctrl) { struct allegro_channel *channel = container_of(ctrl->handler, struct allegro_channel, ctrl_handler); switch (ctrl->id) { case V4L2_CID_MPEG_VIDEO_BITRATE_MODE: allegro_clamp_bitrate(channel, ctrl); break; case V4L2_CID_USER_ALLEGRO_ENCODER_BUFFER: if (!channel->dev->has_encoder_buffer) ctrl->val = 0; break; } return 0; } static int allegro_s_ctrl(struct v4l2_ctrl *ctrl) { struct allegro_channel *channel = container_of(ctrl->handler, struct allegro_channel, ctrl_handler); struct allegro_dev *dev = channel->dev; v4l2_dbg(1, debug, &dev->v4l2_dev, "s_ctrl: %s = %d\n", v4l2_ctrl_get_name(ctrl->id), ctrl->val); switch (ctrl->id) { case V4L2_CID_MPEG_VIDEO_FRAME_RC_ENABLE: channel->frame_rc_enable = ctrl->val; break; case V4L2_CID_MPEG_VIDEO_BITRATE_MODE: channel->bitrate = channel->mpeg_video_bitrate->val; channel->bitrate_peak = channel->mpeg_video_bitrate_peak->val; v4l2_ctrl_activate(channel->mpeg_video_bitrate_peak, ctrl->val == V4L2_MPEG_VIDEO_BITRATE_MODE_VBR); break; case V4L2_CID_MPEG_VIDEO_H264_I_FRAME_QP: case V4L2_CID_MPEG_VIDEO_H264_P_FRAME_QP: case V4L2_CID_MPEG_VIDEO_H264_B_FRAME_QP: allegro_clamp_qp(channel, ctrl); break; } return 0; } static const struct v4l2_ctrl_ops allegro_ctrl_ops = { .try_ctrl = allegro_try_ctrl, .s_ctrl = allegro_s_ctrl, }; static const struct v4l2_ctrl_config allegro_encoder_buffer_ctrl_config = { .id = V4L2_CID_USER_ALLEGRO_ENCODER_BUFFER, .name = "Encoder Buffer Enable", .type = V4L2_CTRL_TYPE_BOOLEAN, .min = 0, .max = 1, .step = 1, .def = 1, }; static int allegro_open(struct file *file) { struct video_device *vdev = video_devdata(file); struct allegro_dev *dev = video_get_drvdata(vdev); struct allegro_channel *channel = NULL; struct v4l2_ctrl_handler *handler; u64 mask; int ret; unsigned int bitrate_max; unsigned int bitrate_def; unsigned int cpb_size_max; unsigned int cpb_size_def; channel = kzalloc(sizeof(*channel), GFP_KERNEL); if (!channel) return -ENOMEM; v4l2_fh_init(&channel->fh, vdev); init_completion(&channel->completion); INIT_LIST_HEAD(&channel->source_shadow_list); INIT_LIST_HEAD(&channel->stream_shadow_list); mutex_init(&channel->shadow_list_lock); channel->dev = dev; allegro_set_default_params(channel); handler = &channel->ctrl_handler; v4l2_ctrl_handler_init(handler, 0); channel->mpeg_video_h264_profile = v4l2_ctrl_new_std_menu(handler, &allegro_ctrl_ops, V4L2_CID_MPEG_VIDEO_H264_PROFILE, V4L2_MPEG_VIDEO_H264_PROFILE_BASELINE, 0x0, V4L2_MPEG_VIDEO_H264_PROFILE_BASELINE); mask = 1 << V4L2_MPEG_VIDEO_H264_LEVEL_1B; channel->mpeg_video_h264_level = v4l2_ctrl_new_std_menu(handler, &allegro_ctrl_ops, V4L2_CID_MPEG_VIDEO_H264_LEVEL, V4L2_MPEG_VIDEO_H264_LEVEL_5_1, mask, V4L2_MPEG_VIDEO_H264_LEVEL_5_1); channel->mpeg_video_h264_i_frame_qp = v4l2_ctrl_new_std(handler, &allegro_ctrl_ops, V4L2_CID_MPEG_VIDEO_H264_I_FRAME_QP, 0, 51, 1, 30); channel->mpeg_video_h264_max_qp = v4l2_ctrl_new_std(handler, &allegro_ctrl_ops, V4L2_CID_MPEG_VIDEO_H264_MAX_QP, 0, 51, 1, 51); channel->mpeg_video_h264_min_qp = v4l2_ctrl_new_std(handler, &allegro_ctrl_ops, V4L2_CID_MPEG_VIDEO_H264_MIN_QP, 0, 51, 1, 0); channel->mpeg_video_h264_p_frame_qp = v4l2_ctrl_new_std(handler, &allegro_ctrl_ops, V4L2_CID_MPEG_VIDEO_H264_P_FRAME_QP, 0, 51, 1, 30); channel->mpeg_video_h264_b_frame_qp = v4l2_ctrl_new_std(handler, &allegro_ctrl_ops, V4L2_CID_MPEG_VIDEO_H264_B_FRAME_QP, 0, 51, 1, 30); channel->mpeg_video_hevc_profile = v4l2_ctrl_new_std_menu(handler, &allegro_ctrl_ops, V4L2_CID_MPEG_VIDEO_HEVC_PROFILE, V4L2_MPEG_VIDEO_HEVC_PROFILE_MAIN, 0x0, V4L2_MPEG_VIDEO_HEVC_PROFILE_MAIN); channel->mpeg_video_hevc_level = v4l2_ctrl_new_std_menu(handler, &allegro_ctrl_ops, V4L2_CID_MPEG_VIDEO_HEVC_LEVEL, V4L2_MPEG_VIDEO_HEVC_LEVEL_5_1, 0x0, V4L2_MPEG_VIDEO_HEVC_LEVEL_5_1); channel->mpeg_video_hevc_tier = v4l2_ctrl_new_std_menu(handler, &allegro_ctrl_ops, V4L2_CID_MPEG_VIDEO_HEVC_TIER, V4L2_MPEG_VIDEO_HEVC_TIER_HIGH, 0x0, V4L2_MPEG_VIDEO_HEVC_TIER_MAIN); channel->mpeg_video_hevc_i_frame_qp = v4l2_ctrl_new_std(handler, &allegro_ctrl_ops, V4L2_CID_MPEG_VIDEO_HEVC_I_FRAME_QP, 0, 51, 1, 30); channel->mpeg_video_hevc_max_qp = v4l2_ctrl_new_std(handler, &allegro_ctrl_ops, V4L2_CID_MPEG_VIDEO_HEVC_MAX_QP, 0, 51, 1, 51); channel->mpeg_video_hevc_min_qp = v4l2_ctrl_new_std(handler, &allegro_ctrl_ops, V4L2_CID_MPEG_VIDEO_HEVC_MIN_QP, 0, 51, 1, 0); channel->mpeg_video_hevc_p_frame_qp = v4l2_ctrl_new_std(handler, &allegro_ctrl_ops, V4L2_CID_MPEG_VIDEO_HEVC_P_FRAME_QP, 0, 51, 1, 30); channel->mpeg_video_hevc_b_frame_qp = v4l2_ctrl_new_std(handler, &allegro_ctrl_ops, V4L2_CID_MPEG_VIDEO_HEVC_B_FRAME_QP, 0, 51, 1, 30); channel->mpeg_video_frame_rc_enable = v4l2_ctrl_new_std(handler, &allegro_ctrl_ops, V4L2_CID_MPEG_VIDEO_FRAME_RC_ENABLE, false, 0x1, true, false); channel->mpeg_video_bitrate_mode = v4l2_ctrl_new_std_menu(handler, &allegro_ctrl_ops, V4L2_CID_MPEG_VIDEO_BITRATE_MODE, V4L2_MPEG_VIDEO_BITRATE_MODE_CBR, 0, V4L2_MPEG_VIDEO_BITRATE_MODE_CBR); if (channel->codec == V4L2_PIX_FMT_H264) { bitrate_max = h264_maximum_bitrate(V4L2_MPEG_VIDEO_H264_LEVEL_5_1); bitrate_def = h264_maximum_bitrate(V4L2_MPEG_VIDEO_H264_LEVEL_5_1); cpb_size_max = h264_maximum_cpb_size(V4L2_MPEG_VIDEO_H264_LEVEL_5_1); cpb_size_def = h264_maximum_cpb_size(V4L2_MPEG_VIDEO_H264_LEVEL_5_1); } else { bitrate_max = hevc_maximum_bitrate(V4L2_MPEG_VIDEO_HEVC_LEVEL_5_1); bitrate_def = hevc_maximum_bitrate(V4L2_MPEG_VIDEO_HEVC_LEVEL_5_1); cpb_size_max = hevc_maximum_cpb_size(V4L2_MPEG_VIDEO_HEVC_LEVEL_5_1); cpb_size_def = hevc_maximum_cpb_size(V4L2_MPEG_VIDEO_HEVC_LEVEL_5_1); } channel->mpeg_video_bitrate = v4l2_ctrl_new_std(handler, &allegro_ctrl_ops, V4L2_CID_MPEG_VIDEO_BITRATE, 0, bitrate_max, 1, bitrate_def); channel->mpeg_video_bitrate_peak = v4l2_ctrl_new_std(handler, &allegro_ctrl_ops, V4L2_CID_MPEG_VIDEO_BITRATE_PEAK, 0, bitrate_max, 1, bitrate_def); channel->mpeg_video_cpb_size = v4l2_ctrl_new_std(handler, &allegro_ctrl_ops, V4L2_CID_MPEG_VIDEO_H264_CPB_SIZE, 0, cpb_size_max, 1, cpb_size_def); channel->mpeg_video_gop_size = v4l2_ctrl_new_std(handler, &allegro_ctrl_ops, V4L2_CID_MPEG_VIDEO_GOP_SIZE, 0, ALLEGRO_GOP_SIZE_MAX, 1, ALLEGRO_GOP_SIZE_DEFAULT); channel->encoder_buffer = v4l2_ctrl_new_custom(handler, &allegro_encoder_buffer_ctrl_config, NULL); v4l2_ctrl_new_std(handler, &allegro_ctrl_ops, V4L2_CID_MIN_BUFFERS_FOR_OUTPUT, 1, 32, 1, 1); if (handler->error != 0) { ret = handler->error; goto error; } channel->fh.ctrl_handler = handler; v4l2_ctrl_cluster(3, &channel->mpeg_video_bitrate_mode); v4l2_ctrl_handler_setup(handler); channel->mcu_channel_id = -1; channel->user_id = -1; INIT_LIST_HEAD(&channel->buffers_reference); INIT_LIST_HEAD(&channel->buffers_intermediate); channel->fh.m2m_ctx = v4l2_m2m_ctx_init(dev->m2m_dev, channel, allegro_queue_init); if (IS_ERR(channel->fh.m2m_ctx)) { ret = PTR_ERR(channel->fh.m2m_ctx); goto error; } list_add(&channel->list, &dev->channels); file->private_data = &channel->fh; v4l2_fh_add(&channel->fh); allegro_channel_adjust(channel); return 0; error: v4l2_ctrl_handler_free(handler); kfree(channel); return ret; } static int allegro_release(struct file *file) { struct allegro_channel *channel = fh_to_channel(file->private_data); v4l2_m2m_ctx_release(channel->fh.m2m_ctx); list_del(&channel->list); v4l2_ctrl_handler_free(&channel->ctrl_handler); v4l2_fh_del(&channel->fh); v4l2_fh_exit(&channel->fh); kfree(channel); return 0; } static int allegro_querycap(struct file *file, void *fh, struct v4l2_capability *cap) { strscpy(cap->driver, KBUILD_MODNAME, sizeof(cap->driver)); strscpy(cap->card, "Allegro DVT Video Encoder", sizeof(cap->card)); return 0; } static int allegro_enum_fmt_vid(struct file *file, void *fh, struct v4l2_fmtdesc *f) { switch (f->type) { case V4L2_BUF_TYPE_VIDEO_OUTPUT: if (f->index >= 1) return -EINVAL; f->pixelformat = V4L2_PIX_FMT_NV12; break; case V4L2_BUF_TYPE_VIDEO_CAPTURE: if (f->index >= 2) return -EINVAL; if (f->index == 0) f->pixelformat = V4L2_PIX_FMT_H264; if (f->index == 1) f->pixelformat = V4L2_PIX_FMT_HEVC; break; default: return -EINVAL; } return 0; } static int allegro_g_fmt_vid_cap(struct file *file, void *fh, struct v4l2_format *f) { struct allegro_channel *channel = fh_to_channel(fh); f->fmt.pix.field = V4L2_FIELD_NONE; f->fmt.pix.width = channel->width; f->fmt.pix.height = channel->height; f->fmt.pix.colorspace = channel->colorspace; f->fmt.pix.ycbcr_enc = channel->ycbcr_enc; f->fmt.pix.quantization = channel->quantization; f->fmt.pix.xfer_func = channel->xfer_func; f->fmt.pix.pixelformat = channel->codec; f->fmt.pix.bytesperline = 0; f->fmt.pix.sizeimage = channel->sizeimage_encoded; return 0; } static int allegro_try_fmt_vid_cap(struct file *file, void *fh, struct v4l2_format *f) { f->fmt.pix.field = V4L2_FIELD_NONE; f->fmt.pix.width = clamp_t(__u32, f->fmt.pix.width, ALLEGRO_WIDTH_MIN, ALLEGRO_WIDTH_MAX); f->fmt.pix.height = clamp_t(__u32, f->fmt.pix.height, ALLEGRO_HEIGHT_MIN, ALLEGRO_HEIGHT_MAX); if (f->fmt.pix.pixelformat != V4L2_PIX_FMT_HEVC && f->fmt.pix.pixelformat != V4L2_PIX_FMT_H264) f->fmt.pix.pixelformat = V4L2_PIX_FMT_H264; f->fmt.pix.bytesperline = 0; f->fmt.pix.sizeimage = estimate_stream_size(f->fmt.pix.width, f->fmt.pix.height); return 0; } static int allegro_s_fmt_vid_cap(struct file *file, void *fh, struct v4l2_format *f) { struct allegro_channel *channel = fh_to_channel(fh); struct vb2_queue *vq; int err; err = allegro_try_fmt_vid_cap(file, fh, f); if (err) return err; vq = v4l2_m2m_get_vq(channel->fh.m2m_ctx, f->type); if (!vq) return -EINVAL; if (vb2_is_busy(vq)) return -EBUSY; channel->codec = f->fmt.pix.pixelformat; allegro_channel_adjust(channel); return 0; } static int allegro_g_fmt_vid_out(struct file *file, void *fh, struct v4l2_format *f) { struct allegro_channel *channel = fh_to_channel(fh); f->fmt.pix.field = V4L2_FIELD_NONE; f->fmt.pix.width = channel->width; f->fmt.pix.height = channel->height; f->fmt.pix.colorspace = channel->colorspace; f->fmt.pix.ycbcr_enc = channel->ycbcr_enc; f->fmt.pix.quantization = channel->quantization; f->fmt.pix.xfer_func = channel->xfer_func; f->fmt.pix.pixelformat = channel->pixelformat; f->fmt.pix.bytesperline = channel->stride; f->fmt.pix.sizeimage = channel->sizeimage_raw; return 0; } static int allegro_try_fmt_vid_out(struct file *file, void *fh, struct v4l2_format *f) { f->fmt.pix.field = V4L2_FIELD_NONE; /* * The firmware of the Allegro codec handles the padding internally * and expects the visual frame size when configuring a channel. * Therefore, unlike other encoder drivers, this driver does not round * up the width and height to macroblock alignment and does not * implement the selection api. */ f->fmt.pix.width = clamp_t(__u32, f->fmt.pix.width, ALLEGRO_WIDTH_MIN, ALLEGRO_WIDTH_MAX); f->fmt.pix.height = clamp_t(__u32, f->fmt.pix.height, ALLEGRO_HEIGHT_MIN, ALLEGRO_HEIGHT_MAX); f->fmt.pix.pixelformat = V4L2_PIX_FMT_NV12; f->fmt.pix.bytesperline = round_up(f->fmt.pix.width, 32); f->fmt.pix.sizeimage = f->fmt.pix.bytesperline * f->fmt.pix.height * 3 / 2; return 0; } static int allegro_s_fmt_vid_out(struct file *file, void *fh, struct v4l2_format *f) { struct allegro_channel *channel = fh_to_channel(fh); int err; err = allegro_try_fmt_vid_out(file, fh, f); if (err) return err; channel->width = f->fmt.pix.width; channel->height = f->fmt.pix.height; channel->stride = f->fmt.pix.bytesperline; channel->sizeimage_raw = f->fmt.pix.sizeimage; channel->colorspace = f->fmt.pix.colorspace; channel->ycbcr_enc = f->fmt.pix.ycbcr_enc; channel->quantization = f->fmt.pix.quantization; channel->xfer_func = f->fmt.pix.xfer_func; allegro_channel_adjust(channel); return 0; } static int allegro_channel_cmd_stop(struct allegro_channel *channel) { if (v4l2_m2m_has_stopped(channel->fh.m2m_ctx)) allegro_channel_eos_event(channel); return 0; } static int allegro_channel_cmd_start(struct allegro_channel *channel) { if (v4l2_m2m_has_stopped(channel->fh.m2m_ctx)) vb2_clear_last_buffer_dequeued(&channel->fh.m2m_ctx->cap_q_ctx.q); return 0; } static int allegro_encoder_cmd(struct file *file, void *fh, struct v4l2_encoder_cmd *cmd) { struct allegro_channel *channel = fh_to_channel(fh); int err; err = v4l2_m2m_ioctl_try_encoder_cmd(file, fh, cmd); if (err) return err; err = v4l2_m2m_ioctl_encoder_cmd(file, fh, cmd); if (err) return err; if (cmd->cmd == V4L2_ENC_CMD_STOP) err = allegro_channel_cmd_stop(channel); if (cmd->cmd == V4L2_ENC_CMD_START) err = allegro_channel_cmd_start(channel); return err; } static int allegro_enum_framesizes(struct file *file, void *fh, struct v4l2_frmsizeenum *fsize) { switch (fsize->pixel_format) { case V4L2_PIX_FMT_HEVC: case V4L2_PIX_FMT_H264: case V4L2_PIX_FMT_NV12: break; default: return -EINVAL; } if (fsize->index) return -EINVAL; fsize->type = V4L2_FRMSIZE_TYPE_CONTINUOUS; fsize->stepwise.min_width = ALLEGRO_WIDTH_MIN; fsize->stepwise.max_width = ALLEGRO_WIDTH_MAX; fsize->stepwise.step_width = 1; fsize->stepwise.min_height = ALLEGRO_HEIGHT_MIN; fsize->stepwise.max_height = ALLEGRO_HEIGHT_MAX; fsize->stepwise.step_height = 1; return 0; } static int allegro_ioctl_streamon(struct file *file, void *priv, enum v4l2_buf_type type) { struct v4l2_fh *fh = file->private_data; struct allegro_channel *channel = fh_to_channel(fh); int err; if (type == V4L2_BUF_TYPE_VIDEO_CAPTURE) { err = allegro_create_channel(channel); if (err) return err; } return v4l2_m2m_streamon(file, fh->m2m_ctx, type); } static int allegro_g_parm(struct file *file, void *fh, struct v4l2_streamparm *a) { struct allegro_channel *channel = fh_to_channel(fh); struct v4l2_fract *timeperframe; if (a->type != V4L2_BUF_TYPE_VIDEO_OUTPUT) return -EINVAL; a->parm.output.capability = V4L2_CAP_TIMEPERFRAME; timeperframe = &a->parm.output.timeperframe; timeperframe->numerator = channel->framerate.denominator; timeperframe->denominator = channel->framerate.numerator; return 0; } static int allegro_s_parm(struct file *file, void *fh, struct v4l2_streamparm *a) { struct allegro_channel *channel = fh_to_channel(fh); struct v4l2_fract *timeperframe; int div; if (a->type != V4L2_BUF_TYPE_VIDEO_OUTPUT) return -EINVAL; a->parm.output.capability = V4L2_CAP_TIMEPERFRAME; timeperframe = &a->parm.output.timeperframe; if (timeperframe->numerator == 0 || timeperframe->denominator == 0) return allegro_g_parm(file, fh, a); div = gcd(timeperframe->denominator, timeperframe->numerator); channel->framerate.numerator = timeperframe->denominator / div; channel->framerate.denominator = timeperframe->numerator / div; return 0; } static int allegro_subscribe_event(struct v4l2_fh *fh, const struct v4l2_event_subscription *sub) { switch (sub->type) { case V4L2_EVENT_EOS: return v4l2_event_subscribe(fh, sub, 0, NULL); default: return v4l2_ctrl_subscribe_event(fh, sub); } } static const struct v4l2_ioctl_ops allegro_ioctl_ops = { .vidioc_querycap = allegro_querycap, .vidioc_enum_fmt_vid_cap = allegro_enum_fmt_vid, .vidioc_enum_fmt_vid_out = allegro_enum_fmt_vid, .vidioc_g_fmt_vid_cap = allegro_g_fmt_vid_cap, .vidioc_try_fmt_vid_cap = allegro_try_fmt_vid_cap, .vidioc_s_fmt_vid_cap = allegro_s_fmt_vid_cap, .vidioc_g_fmt_vid_out = allegro_g_fmt_vid_out, .vidioc_try_fmt_vid_out = allegro_try_fmt_vid_out, .vidioc_s_fmt_vid_out = allegro_s_fmt_vid_out, .vidioc_create_bufs = v4l2_m2m_ioctl_create_bufs, .vidioc_reqbufs = v4l2_m2m_ioctl_reqbufs, .vidioc_expbuf = v4l2_m2m_ioctl_expbuf, .vidioc_querybuf = v4l2_m2m_ioctl_querybuf, .vidioc_qbuf = v4l2_m2m_ioctl_qbuf, .vidioc_dqbuf = v4l2_m2m_ioctl_dqbuf, .vidioc_prepare_buf = v4l2_m2m_ioctl_prepare_buf, .vidioc_streamon = allegro_ioctl_streamon, .vidioc_streamoff = v4l2_m2m_ioctl_streamoff, .vidioc_try_encoder_cmd = v4l2_m2m_ioctl_try_encoder_cmd, .vidioc_encoder_cmd = allegro_encoder_cmd, .vidioc_enum_framesizes = allegro_enum_framesizes, .vidioc_g_parm = allegro_g_parm, .vidioc_s_parm = allegro_s_parm, .vidioc_subscribe_event = allegro_subscribe_event, .vidioc_unsubscribe_event = v4l2_event_unsubscribe, }; static const struct v4l2_file_operations allegro_fops = { .owner = THIS_MODULE, .open = allegro_open, .release = allegro_release, .poll = v4l2_m2m_fop_poll, .unlocked_ioctl = video_ioctl2, .mmap = v4l2_m2m_fop_mmap, }; static int allegro_register_device(struct allegro_dev *dev) { struct video_device *video_dev = &dev->video_dev; strscpy(video_dev->name, "allegro", sizeof(video_dev->name)); video_dev->fops = &allegro_fops; video_dev->ioctl_ops = &allegro_ioctl_ops; video_dev->release = video_device_release_empty; video_dev->lock = &dev->lock; video_dev->v4l2_dev = &dev->v4l2_dev; video_dev->vfl_dir = VFL_DIR_M2M; video_dev->device_caps = V4L2_CAP_VIDEO_M2M | V4L2_CAP_STREAMING; video_set_drvdata(video_dev, dev); return video_register_device(video_dev, VFL_TYPE_VIDEO, 0); } static void allegro_device_run(void *priv) { struct allegro_channel *channel = priv; struct allegro_dev *dev = channel->dev; struct vb2_v4l2_buffer *src_buf; struct vb2_v4l2_buffer *dst_buf; dma_addr_t src_y; dma_addr_t src_uv; dma_addr_t dst_addr; unsigned long dst_size; u64 src_handle; u64 dst_handle; dst_buf = v4l2_m2m_dst_buf_remove(channel->fh.m2m_ctx); dst_addr = vb2_dma_contig_plane_dma_addr(&dst_buf->vb2_buf, 0); dst_size = vb2_plane_size(&dst_buf->vb2_buf, 0); dst_handle = allegro_put_buffer(channel, &channel->stream_shadow_list, dst_buf); allegro_mcu_send_put_stream_buffer(dev, channel, dst_addr, dst_size, dst_handle); src_buf = v4l2_m2m_src_buf_remove(channel->fh.m2m_ctx); src_buf->sequence = channel->osequence++; src_y = vb2_dma_contig_plane_dma_addr(&src_buf->vb2_buf, 0); src_uv = src_y + (channel->stride * channel->height); src_handle = allegro_put_buffer(channel, &channel->source_shadow_list, src_buf); allegro_mcu_send_encode_frame(dev, channel, src_y, src_uv, src_handle); v4l2_m2m_job_finish(dev->m2m_dev, channel->fh.m2m_ctx); } static const struct v4l2_m2m_ops allegro_m2m_ops = { .device_run = allegro_device_run, }; static int allegro_mcu_hw_init(struct allegro_dev *dev, const struct fw_info *info) { int err; dev->mbox_command = allegro_mbox_init(dev, info->mailbox_cmd, info->mailbox_size); dev->mbox_status = allegro_mbox_init(dev, info->mailbox_status, info->mailbox_size); if (IS_ERR(dev->mbox_command) || IS_ERR(dev->mbox_status)) { v4l2_err(&dev->v4l2_dev, "failed to initialize mailboxes\n"); return -EIO; } err = allegro_encoder_buffer_init(dev, &dev->encoder_buffer); dev->has_encoder_buffer = (err == 0); if (!dev->has_encoder_buffer) v4l2_info(&dev->v4l2_dev, "encoder buffer not available\n"); allegro_mcu_enable_interrupts(dev); /* The mcu sends INIT after reset. */ allegro_mcu_start(dev); err = allegro_mcu_wait_for_init_timeout(dev, 5000); if (err < 0) { v4l2_err(&dev->v4l2_dev, "mcu did not send INIT after reset\n"); err = -EIO; goto err_disable_interrupts; } err = allegro_alloc_buffer(dev, &dev->suballocator, info->suballocator_size); if (err) { v4l2_err(&dev->v4l2_dev, "failed to allocate %zu bytes for suballocator\n", info->suballocator_size); goto err_reset_mcu; } allegro_mcu_send_init(dev, dev->suballocator.paddr, dev->suballocator.size); err = allegro_mcu_wait_for_init_timeout(dev, 5000); if (err < 0) { v4l2_err(&dev->v4l2_dev, "mcu failed to configure sub-allocator\n"); err = -EIO; goto err_free_suballocator; } return 0; err_free_suballocator: allegro_free_buffer(dev, &dev->suballocator); err_reset_mcu: allegro_mcu_reset(dev); err_disable_interrupts: allegro_mcu_disable_interrupts(dev); return err; } static int allegro_mcu_hw_deinit(struct allegro_dev *dev) { int err; err = allegro_mcu_reset(dev); if (err) v4l2_warn(&dev->v4l2_dev, "mcu failed to enter sleep state\n"); err = allegro_mcu_disable_interrupts(dev); if (err) v4l2_warn(&dev->v4l2_dev, "failed to disable interrupts\n"); allegro_free_buffer(dev, &dev->suballocator); return 0; } static void allegro_fw_callback(const struct firmware *fw, void *context) { struct allegro_dev *dev = context; const char *fw_codec_name = "al5e.fw"; const struct firmware *fw_codec; int err; if (!fw) return; v4l2_dbg(1, debug, &dev->v4l2_dev, "requesting codec firmware '%s'\n", fw_codec_name); err = request_firmware(&fw_codec, fw_codec_name, &dev->plat_dev->dev); if (err) goto err_release_firmware; dev->fw_info = allegro_get_firmware_info(dev, fw, fw_codec); if (!dev->fw_info) { v4l2_err(&dev->v4l2_dev, "firmware is not supported\n"); goto err_release_firmware_codec; } v4l2_info(&dev->v4l2_dev, "using mcu firmware version '%s'\n", dev->fw_info->version); pm_runtime_enable(&dev->plat_dev->dev); err = pm_runtime_resume_and_get(&dev->plat_dev->dev); if (err) goto err_release_firmware_codec; /* Ensure that the mcu is sleeping at the reset vector */ err = allegro_mcu_reset(dev); if (err) { v4l2_err(&dev->v4l2_dev, "failed to reset mcu\n"); goto err_suspend; } allegro_copy_firmware(dev, fw->data, fw->size); allegro_copy_fw_codec(dev, fw_codec->data, fw_codec->size); err = allegro_mcu_hw_init(dev, dev->fw_info); if (err) { v4l2_err(&dev->v4l2_dev, "failed to initialize mcu\n"); goto err_free_fw_codec; } dev->m2m_dev = v4l2_m2m_init(&allegro_m2m_ops); if (IS_ERR(dev->m2m_dev)) { v4l2_err(&dev->v4l2_dev, "failed to init mem2mem device\n"); goto err_mcu_hw_deinit; } err = allegro_register_device(dev); if (err) { v4l2_err(&dev->v4l2_dev, "failed to register video device\n"); goto err_m2m_release; } v4l2_dbg(1, debug, &dev->v4l2_dev, "allegro codec registered as /dev/video%d\n", dev->video_dev.num); dev->initialized = true; release_firmware(fw_codec); release_firmware(fw); return; err_m2m_release: v4l2_m2m_release(dev->m2m_dev); dev->m2m_dev = NULL; err_mcu_hw_deinit: allegro_mcu_hw_deinit(dev); err_free_fw_codec: allegro_free_fw_codec(dev); err_suspend: pm_runtime_put(&dev->plat_dev->dev); pm_runtime_disable(&dev->plat_dev->dev); err_release_firmware_codec: release_firmware(fw_codec); err_release_firmware: release_firmware(fw); } static int allegro_firmware_request_nowait(struct allegro_dev *dev) { const char *fw = "al5e_b.fw"; v4l2_dbg(1, debug, &dev->v4l2_dev, "requesting firmware '%s'\n", fw); return request_firmware_nowait(THIS_MODULE, true, fw, &dev->plat_dev->dev, GFP_KERNEL, dev, allegro_fw_callback); } static int allegro_probe(struct platform_device *pdev) { struct allegro_dev *dev; struct resource *res, *sram_res; int ret; int irq; void __iomem *regs, *sram_regs; dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL); if (!dev) return -ENOMEM; dev->plat_dev = pdev; init_completion(&dev->init_complete); INIT_LIST_HEAD(&dev->channels); mutex_init(&dev->lock); dev->initialized = false; res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "regs"); if (!res) { dev_err(&pdev->dev, "regs resource missing from device tree\n"); return -EINVAL; } regs = devm_ioremap(&pdev->dev, res->start, resource_size(res)); if (!regs) { dev_err(&pdev->dev, "failed to map registers\n"); return -ENOMEM; } dev->regmap = devm_regmap_init_mmio(&pdev->dev, regs, &allegro_regmap_config); if (IS_ERR(dev->regmap)) { dev_err(&pdev->dev, "failed to init regmap\n"); return PTR_ERR(dev->regmap); } sram_res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "sram"); if (!sram_res) { dev_err(&pdev->dev, "sram resource missing from device tree\n"); return -EINVAL; } sram_regs = devm_ioremap(&pdev->dev, sram_res->start, resource_size(sram_res)); if (!sram_regs) { dev_err(&pdev->dev, "failed to map sram\n"); return -ENOMEM; } dev->sram = devm_regmap_init_mmio(&pdev->dev, sram_regs, &allegro_sram_config); if (IS_ERR(dev->sram)) { dev_err(&pdev->dev, "failed to init sram\n"); return PTR_ERR(dev->sram); } dev->settings = syscon_regmap_lookup_by_compatible("xlnx,vcu-settings"); if (IS_ERR(dev->settings)) dev_warn(&pdev->dev, "failed to open settings\n"); dev->clk_core = devm_clk_get(&pdev->dev, "core_clk"); if (IS_ERR(dev->clk_core)) return PTR_ERR(dev->clk_core); dev->clk_mcu = devm_clk_get(&pdev->dev, "mcu_clk"); if (IS_ERR(dev->clk_mcu)) return PTR_ERR(dev->clk_mcu); irq = platform_get_irq(pdev, 0); if (irq < 0) return irq; ret = devm_request_threaded_irq(&pdev->dev, irq, allegro_hardirq, allegro_irq_thread, IRQF_SHARED, dev_name(&pdev->dev), dev); if (ret < 0) { dev_err(&pdev->dev, "failed to request irq: %d\n", ret); return ret; } ret = v4l2_device_register(&pdev->dev, &dev->v4l2_dev); if (ret) return ret; platform_set_drvdata(pdev, dev); ret = allegro_firmware_request_nowait(dev); if (ret < 0) { v4l2_err(&dev->v4l2_dev, "failed to request firmware: %d\n", ret); return ret; } return 0; } static void allegro_remove(struct platform_device *pdev) { struct allegro_dev *dev = platform_get_drvdata(pdev); if (dev->initialized) { video_unregister_device(&dev->video_dev); if (dev->m2m_dev) v4l2_m2m_release(dev->m2m_dev); allegro_mcu_hw_deinit(dev); allegro_free_fw_codec(dev); } pm_runtime_put(&dev->plat_dev->dev); pm_runtime_disable(&dev->plat_dev->dev); v4l2_device_unregister(&dev->v4l2_dev); } static int allegro_runtime_resume(struct device *device) { struct allegro_dev *dev = dev_get_drvdata(device); struct regmap *settings = dev->settings; unsigned int clk_mcu; unsigned int clk_core; int err; if (!settings) return -EINVAL; #define MHZ_TO_HZ(freq) ((freq) * 1000 * 1000) err = regmap_read(settings, VCU_CORE_CLK, &clk_core); if (err < 0) return err; err = clk_set_rate(dev->clk_core, MHZ_TO_HZ(clk_core)); if (err < 0) return err; err = clk_prepare_enable(dev->clk_core); if (err) return err; err = regmap_read(settings, VCU_MCU_CLK, &clk_mcu); if (err < 0) goto disable_clk_core; err = clk_set_rate(dev->clk_mcu, MHZ_TO_HZ(clk_mcu)); if (err < 0) goto disable_clk_core; err = clk_prepare_enable(dev->clk_mcu); if (err) goto disable_clk_core; #undef MHZ_TO_HZ return 0; disable_clk_core: clk_disable_unprepare(dev->clk_core); return err; } static int allegro_runtime_suspend(struct device *device) { struct allegro_dev *dev = dev_get_drvdata(device); clk_disable_unprepare(dev->clk_mcu); clk_disable_unprepare(dev->clk_core); return 0; } static const struct of_device_id allegro_dt_ids[] = { { .compatible = "allegro,al5e-1.1" }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(of, allegro_dt_ids); static const struct dev_pm_ops allegro_pm_ops = { .runtime_resume = allegro_runtime_resume, .runtime_suspend = allegro_runtime_suspend, }; static struct platform_driver allegro_driver = { .probe = allegro_probe, .remove_new = allegro_remove, .driver = { .name = "allegro", .of_match_table = allegro_dt_ids, .pm = &allegro_pm_ops, }, }; module_platform_driver(allegro_driver); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Michael Tretter <[email protected]>"); MODULE_DESCRIPTION("Allegro DVT encoder driver");
linux-master
drivers/media/platform/allegro-dvt/allegro-core.c
// SPDX-License-Identifier: GPL-2.0-only /* * isph3a.c * * TI OMAP3 ISP - H3A module * * Copyright (C) 2010 Nokia Corporation * Copyright (C) 2009 Texas Instruments, Inc. * * Contacts: David Cohen <[email protected]> * Laurent Pinchart <[email protected]> * Sakari Ailus <[email protected]> */ #include <linux/slab.h> #include <linux/uaccess.h> #include "isp.h" #include "isph3a.h" #include "ispstat.h" /* * h3a_aewb_update_regs - Helper function to update h3a registers. */ static void h3a_aewb_setup_regs(struct ispstat *aewb, void *priv) { struct omap3isp_h3a_aewb_config *conf = priv; u32 pcr; u32 win1; u32 start; u32 blk; u32 subwin; if (aewb->state == ISPSTAT_DISABLED) return; isp_reg_writel(aewb->isp, aewb->active_buf->dma_addr, OMAP3_ISP_IOMEM_H3A, ISPH3A_AEWBUFST); if (!aewb->update) return; /* Converting config metadata into reg values */ pcr = conf->saturation_limit << ISPH3A_PCR_AEW_AVE2LMT_SHIFT; pcr |= !!conf->alaw_enable << ISPH3A_PCR_AEW_ALAW_EN_SHIFT; win1 = ((conf->win_height >> 1) - 1) << ISPH3A_AEWWIN1_WINH_SHIFT; win1 |= ((conf->win_width >> 1) - 1) << ISPH3A_AEWWIN1_WINW_SHIFT; win1 |= (conf->ver_win_count - 1) << ISPH3A_AEWWIN1_WINVC_SHIFT; win1 |= (conf->hor_win_count - 1) << ISPH3A_AEWWIN1_WINHC_SHIFT; start = conf->hor_win_start << ISPH3A_AEWINSTART_WINSH_SHIFT; start |= conf->ver_win_start << ISPH3A_AEWINSTART_WINSV_SHIFT; blk = conf->blk_ver_win_start << ISPH3A_AEWINBLK_WINSV_SHIFT; blk |= ((conf->blk_win_height >> 1) - 1) << ISPH3A_AEWINBLK_WINH_SHIFT; subwin = ((conf->subsample_ver_inc >> 1) - 1) << ISPH3A_AEWSUBWIN_AEWINCV_SHIFT; subwin |= ((conf->subsample_hor_inc >> 1) - 1) << ISPH3A_AEWSUBWIN_AEWINCH_SHIFT; isp_reg_writel(aewb->isp, win1, OMAP3_ISP_IOMEM_H3A, ISPH3A_AEWWIN1); isp_reg_writel(aewb->isp, start, OMAP3_ISP_IOMEM_H3A, ISPH3A_AEWINSTART); isp_reg_writel(aewb->isp, blk, OMAP3_ISP_IOMEM_H3A, ISPH3A_AEWINBLK); isp_reg_writel(aewb->isp, subwin, OMAP3_ISP_IOMEM_H3A, ISPH3A_AEWSUBWIN); isp_reg_clr_set(aewb->isp, OMAP3_ISP_IOMEM_H3A, ISPH3A_PCR, ISPH3A_PCR_AEW_MASK, pcr); aewb->update = 0; aewb->config_counter += aewb->inc_config; aewb->inc_config = 0; aewb->buf_size = conf->buf_size; } static void h3a_aewb_enable(struct ispstat *aewb, int enable) { if (enable) { isp_reg_set(aewb->isp, OMAP3_ISP_IOMEM_H3A, ISPH3A_PCR, ISPH3A_PCR_AEW_EN); omap3isp_subclk_enable(aewb->isp, OMAP3_ISP_SUBCLK_AEWB); } else { isp_reg_clr(aewb->isp, OMAP3_ISP_IOMEM_H3A, ISPH3A_PCR, ISPH3A_PCR_AEW_EN); omap3isp_subclk_disable(aewb->isp, OMAP3_ISP_SUBCLK_AEWB); } } static int h3a_aewb_busy(struct ispstat *aewb) { return isp_reg_readl(aewb->isp, OMAP3_ISP_IOMEM_H3A, ISPH3A_PCR) & ISPH3A_PCR_BUSYAEAWB; } static u32 h3a_aewb_get_buf_size(struct omap3isp_h3a_aewb_config *conf) { /* Number of configured windows + extra row for black data */ u32 win_count = (conf->ver_win_count + 1) * conf->hor_win_count; /* * Unsaturated block counts for each 8 windows. * 1 extra for the last (win_count % 8) windows if win_count is not * divisible by 8. */ win_count += (win_count + 7) / 8; return win_count * AEWB_PACKET_SIZE; } static int h3a_aewb_validate_params(struct ispstat *aewb, void *new_conf) { struct omap3isp_h3a_aewb_config *user_cfg = new_conf; u32 buf_size; if (unlikely(user_cfg->saturation_limit > OMAP3ISP_AEWB_MAX_SATURATION_LIM)) return -EINVAL; if (unlikely(user_cfg->win_height < OMAP3ISP_AEWB_MIN_WIN_H || user_cfg->win_height > OMAP3ISP_AEWB_MAX_WIN_H || user_cfg->win_height & 0x01)) return -EINVAL; if (unlikely(user_cfg->win_width < OMAP3ISP_AEWB_MIN_WIN_W || user_cfg->win_width > OMAP3ISP_AEWB_MAX_WIN_W || user_cfg->win_width & 0x01)) return -EINVAL; if (unlikely(user_cfg->ver_win_count < OMAP3ISP_AEWB_MIN_WINVC || user_cfg->ver_win_count > OMAP3ISP_AEWB_MAX_WINVC)) return -EINVAL; if (unlikely(user_cfg->hor_win_count < OMAP3ISP_AEWB_MIN_WINHC || user_cfg->hor_win_count > OMAP3ISP_AEWB_MAX_WINHC)) return -EINVAL; if (unlikely(user_cfg->ver_win_start > OMAP3ISP_AEWB_MAX_WINSTART)) return -EINVAL; if (unlikely(user_cfg->hor_win_start > OMAP3ISP_AEWB_MAX_WINSTART)) return -EINVAL; if (unlikely(user_cfg->blk_ver_win_start > OMAP3ISP_AEWB_MAX_WINSTART)) return -EINVAL; if (unlikely(user_cfg->blk_win_height < OMAP3ISP_AEWB_MIN_WIN_H || user_cfg->blk_win_height > OMAP3ISP_AEWB_MAX_WIN_H || user_cfg->blk_win_height & 0x01)) return -EINVAL; if (unlikely(user_cfg->subsample_ver_inc < OMAP3ISP_AEWB_MIN_SUB_INC || user_cfg->subsample_ver_inc > OMAP3ISP_AEWB_MAX_SUB_INC || user_cfg->subsample_ver_inc & 0x01)) return -EINVAL; if (unlikely(user_cfg->subsample_hor_inc < OMAP3ISP_AEWB_MIN_SUB_INC || user_cfg->subsample_hor_inc > OMAP3ISP_AEWB_MAX_SUB_INC || user_cfg->subsample_hor_inc & 0x01)) return -EINVAL; buf_size = h3a_aewb_get_buf_size(user_cfg); if (buf_size > user_cfg->buf_size) user_cfg->buf_size = buf_size; else if (user_cfg->buf_size > OMAP3ISP_AEWB_MAX_BUF_SIZE) user_cfg->buf_size = OMAP3ISP_AEWB_MAX_BUF_SIZE; return 0; } /* * h3a_aewb_set_params - Helper function to check & store user given params. * @new_conf: Pointer to AE and AWB parameters struct. * * As most of them are busy-lock registers, need to wait until AEW_BUSY = 0 to * program them during ISR. */ static void h3a_aewb_set_params(struct ispstat *aewb, void *new_conf) { struct omap3isp_h3a_aewb_config *user_cfg = new_conf; struct omap3isp_h3a_aewb_config *cur_cfg = aewb->priv; int update = 0; if (cur_cfg->saturation_limit != user_cfg->saturation_limit) { cur_cfg->saturation_limit = user_cfg->saturation_limit; update = 1; } if (cur_cfg->alaw_enable != user_cfg->alaw_enable) { cur_cfg->alaw_enable = user_cfg->alaw_enable; update = 1; } if (cur_cfg->win_height != user_cfg->win_height) { cur_cfg->win_height = user_cfg->win_height; update = 1; } if (cur_cfg->win_width != user_cfg->win_width) { cur_cfg->win_width = user_cfg->win_width; update = 1; } if (cur_cfg->ver_win_count != user_cfg->ver_win_count) { cur_cfg->ver_win_count = user_cfg->ver_win_count; update = 1; } if (cur_cfg->hor_win_count != user_cfg->hor_win_count) { cur_cfg->hor_win_count = user_cfg->hor_win_count; update = 1; } if (cur_cfg->ver_win_start != user_cfg->ver_win_start) { cur_cfg->ver_win_start = user_cfg->ver_win_start; update = 1; } if (cur_cfg->hor_win_start != user_cfg->hor_win_start) { cur_cfg->hor_win_start = user_cfg->hor_win_start; update = 1; } if (cur_cfg->blk_ver_win_start != user_cfg->blk_ver_win_start) { cur_cfg->blk_ver_win_start = user_cfg->blk_ver_win_start; update = 1; } if (cur_cfg->blk_win_height != user_cfg->blk_win_height) { cur_cfg->blk_win_height = user_cfg->blk_win_height; update = 1; } if (cur_cfg->subsample_ver_inc != user_cfg->subsample_ver_inc) { cur_cfg->subsample_ver_inc = user_cfg->subsample_ver_inc; update = 1; } if (cur_cfg->subsample_hor_inc != user_cfg->subsample_hor_inc) { cur_cfg->subsample_hor_inc = user_cfg->subsample_hor_inc; update = 1; } if (update || !aewb->configured) { aewb->inc_config++; aewb->update = 1; cur_cfg->buf_size = h3a_aewb_get_buf_size(cur_cfg); } } static long h3a_aewb_ioctl(struct v4l2_subdev *sd, unsigned int cmd, void *arg) { struct ispstat *stat = v4l2_get_subdevdata(sd); switch (cmd) { case VIDIOC_OMAP3ISP_AEWB_CFG: return omap3isp_stat_config(stat, arg); case VIDIOC_OMAP3ISP_STAT_REQ: return omap3isp_stat_request_statistics(stat, arg); case VIDIOC_OMAP3ISP_STAT_REQ_TIME32: return omap3isp_stat_request_statistics_time32(stat, arg); case VIDIOC_OMAP3ISP_STAT_EN: { unsigned long *en = arg; return omap3isp_stat_enable(stat, !!*en); } } return -ENOIOCTLCMD; } static const struct ispstat_ops h3a_aewb_ops = { .validate_params = h3a_aewb_validate_params, .set_params = h3a_aewb_set_params, .setup_regs = h3a_aewb_setup_regs, .enable = h3a_aewb_enable, .busy = h3a_aewb_busy, }; static const struct v4l2_subdev_core_ops h3a_aewb_subdev_core_ops = { .ioctl = h3a_aewb_ioctl, .subscribe_event = omap3isp_stat_subscribe_event, .unsubscribe_event = omap3isp_stat_unsubscribe_event, }; static const struct v4l2_subdev_video_ops h3a_aewb_subdev_video_ops = { .s_stream = omap3isp_stat_s_stream, }; static const struct v4l2_subdev_ops h3a_aewb_subdev_ops = { .core = &h3a_aewb_subdev_core_ops, .video = &h3a_aewb_subdev_video_ops, }; /* * omap3isp_h3a_aewb_init - Module Initialisation. */ int omap3isp_h3a_aewb_init(struct isp_device *isp) { struct ispstat *aewb = &isp->isp_aewb; struct omap3isp_h3a_aewb_config *aewb_cfg; struct omap3isp_h3a_aewb_config *aewb_recover_cfg = NULL; int ret; aewb_cfg = kzalloc(sizeof(*aewb_cfg), GFP_KERNEL); if (!aewb_cfg) return -ENOMEM; aewb->ops = &h3a_aewb_ops; aewb->priv = aewb_cfg; aewb->event_type = V4L2_EVENT_OMAP3ISP_AEWB; aewb->isp = isp; /* Set recover state configuration */ aewb_recover_cfg = kzalloc(sizeof(*aewb_recover_cfg), GFP_KERNEL); if (!aewb_recover_cfg) { dev_err(aewb->isp->dev, "AEWB: cannot allocate memory for recover configuration.\n"); ret = -ENOMEM; goto err; } aewb_recover_cfg->saturation_limit = OMAP3ISP_AEWB_MAX_SATURATION_LIM; aewb_recover_cfg->win_height = OMAP3ISP_AEWB_MIN_WIN_H; aewb_recover_cfg->win_width = OMAP3ISP_AEWB_MIN_WIN_W; aewb_recover_cfg->ver_win_count = OMAP3ISP_AEWB_MIN_WINVC; aewb_recover_cfg->hor_win_count = OMAP3ISP_AEWB_MIN_WINHC; aewb_recover_cfg->blk_ver_win_start = aewb_recover_cfg->ver_win_start + aewb_recover_cfg->win_height * aewb_recover_cfg->ver_win_count; aewb_recover_cfg->blk_win_height = OMAP3ISP_AEWB_MIN_WIN_H; aewb_recover_cfg->subsample_ver_inc = OMAP3ISP_AEWB_MIN_SUB_INC; aewb_recover_cfg->subsample_hor_inc = OMAP3ISP_AEWB_MIN_SUB_INC; if (h3a_aewb_validate_params(aewb, aewb_recover_cfg)) { dev_err(aewb->isp->dev, "AEWB: recover configuration is invalid.\n"); ret = -EINVAL; goto err; } aewb_recover_cfg->buf_size = h3a_aewb_get_buf_size(aewb_recover_cfg); aewb->recover_priv = aewb_recover_cfg; ret = omap3isp_stat_init(aewb, "AEWB", &h3a_aewb_subdev_ops); err: if (ret) { kfree(aewb_cfg); kfree(aewb_recover_cfg); } return ret; } /* * omap3isp_h3a_aewb_cleanup - Module exit. */ void omap3isp_h3a_aewb_cleanup(struct isp_device *isp) { omap3isp_stat_cleanup(&isp->isp_aewb); }
linux-master
drivers/media/platform/ti/omap3isp/isph3a_aewb.c
// SPDX-License-Identifier: GPL-2.0-only /* * ispresizer.c * * TI OMAP3 ISP - Resizer module * * Copyright (C) 2010 Nokia Corporation * Copyright (C) 2009 Texas Instruments, Inc * * Contacts: Laurent Pinchart <[email protected]> * Sakari Ailus <[email protected]> */ #include <linux/device.h> #include <linux/mm.h> #include <linux/module.h> #include "isp.h" #include "ispreg.h" #include "ispresizer.h" /* * Resizer Constants */ #define MIN_RESIZE_VALUE 64 #define MID_RESIZE_VALUE 512 #define MAX_RESIZE_VALUE 1024 #define MIN_IN_WIDTH 32 #define MIN_IN_HEIGHT 32 #define MAX_IN_WIDTH_MEMORY_MODE 4095 #define MAX_IN_WIDTH_ONTHEFLY_MODE_ES1 1280 #define MAX_IN_WIDTH_ONTHEFLY_MODE_ES2 4095 #define MAX_IN_HEIGHT 4095 #define MIN_OUT_WIDTH 16 #define MIN_OUT_HEIGHT 2 #define MAX_OUT_HEIGHT 4095 /* * Resizer Use Constraints * "TRM ES3.1, table 12-46" */ #define MAX_4TAP_OUT_WIDTH_ES1 1280 #define MAX_7TAP_OUT_WIDTH_ES1 640 #define MAX_4TAP_OUT_WIDTH_ES2 3312 #define MAX_7TAP_OUT_WIDTH_ES2 1650 #define MAX_4TAP_OUT_WIDTH_3630 4096 #define MAX_7TAP_OUT_WIDTH_3630 2048 /* * Constants for ratio calculation */ #define RESIZE_DIVISOR 256 #define DEFAULT_PHASE 1 /* * Default (and only) configuration of filter coefficients. * 7-tap mode is for scale factors 0.25x to 0.5x. * 4-tap mode is for scale factors 0.5x to 4.0x. * There shouldn't be any reason to recalculate these, EVER. */ static const struct isprsz_coef filter_coefs = { /* For 8-phase 4-tap horizontal filter: */ { 0x0000, 0x0100, 0x0000, 0x0000, 0x03FA, 0x00F6, 0x0010, 0x0000, 0x03F9, 0x00DB, 0x002C, 0x0000, 0x03FB, 0x00B3, 0x0053, 0x03FF, 0x03FD, 0x0082, 0x0084, 0x03FD, 0x03FF, 0x0053, 0x00B3, 0x03FB, 0x0000, 0x002C, 0x00DB, 0x03F9, 0x0000, 0x0010, 0x00F6, 0x03FA }, /* For 8-phase 4-tap vertical filter: */ { 0x0000, 0x0100, 0x0000, 0x0000, 0x03FA, 0x00F6, 0x0010, 0x0000, 0x03F9, 0x00DB, 0x002C, 0x0000, 0x03FB, 0x00B3, 0x0053, 0x03FF, 0x03FD, 0x0082, 0x0084, 0x03FD, 0x03FF, 0x0053, 0x00B3, 0x03FB, 0x0000, 0x002C, 0x00DB, 0x03F9, 0x0000, 0x0010, 0x00F6, 0x03FA }, /* For 4-phase 7-tap horizontal filter: */ #define DUMMY 0 { 0x0004, 0x0023, 0x005A, 0x0058, 0x0023, 0x0004, 0x0000, DUMMY, 0x0002, 0x0018, 0x004d, 0x0060, 0x0031, 0x0008, 0x0000, DUMMY, 0x0001, 0x000f, 0x003f, 0x0062, 0x003f, 0x000f, 0x0001, DUMMY, 0x0000, 0x0008, 0x0031, 0x0060, 0x004d, 0x0018, 0x0002, DUMMY }, /* For 4-phase 7-tap vertical filter: */ { 0x0004, 0x0023, 0x005A, 0x0058, 0x0023, 0x0004, 0x0000, DUMMY, 0x0002, 0x0018, 0x004d, 0x0060, 0x0031, 0x0008, 0x0000, DUMMY, 0x0001, 0x000f, 0x003f, 0x0062, 0x003f, 0x000f, 0x0001, DUMMY, 0x0000, 0x0008, 0x0031, 0x0060, 0x004d, 0x0018, 0x0002, DUMMY } /* * The dummy padding is required in 7-tap mode because of how the * registers are arranged physically. */ #undef DUMMY }; /* * __resizer_get_format - helper function for getting resizer format * @res : pointer to resizer private structure * @pad : pad number * @cfg: V4L2 subdev pad configuration * @which : wanted subdev format * return zero */ static struct v4l2_mbus_framefmt * __resizer_get_format(struct isp_res_device *res, struct v4l2_subdev_state *sd_state, unsigned int pad, enum v4l2_subdev_format_whence which) { if (which == V4L2_SUBDEV_FORMAT_TRY) return v4l2_subdev_get_try_format(&res->subdev, sd_state, pad); else return &res->formats[pad]; } /* * __resizer_get_crop - helper function for getting resizer crop rectangle * @res : pointer to resizer private structure * @cfg: V4L2 subdev pad configuration * @which : wanted subdev crop rectangle */ static struct v4l2_rect * __resizer_get_crop(struct isp_res_device *res, struct v4l2_subdev_state *sd_state, enum v4l2_subdev_format_whence which) { if (which == V4L2_SUBDEV_FORMAT_TRY) return v4l2_subdev_get_try_crop(&res->subdev, sd_state, RESZ_PAD_SINK); else return &res->crop.request; } /* * resizer_set_filters - Set resizer filters * @res: Device context. * @h_coeff: horizontal coefficient * @v_coeff: vertical coefficient * Return none */ static void resizer_set_filters(struct isp_res_device *res, const u16 *h_coeff, const u16 *v_coeff) { struct isp_device *isp = to_isp_device(res); u32 startaddr_h, startaddr_v, tmp_h, tmp_v; int i; startaddr_h = ISPRSZ_HFILT10; startaddr_v = ISPRSZ_VFILT10; for (i = 0; i < COEFF_CNT; i += 2) { tmp_h = h_coeff[i] | (h_coeff[i + 1] << ISPRSZ_HFILT_COEF1_SHIFT); tmp_v = v_coeff[i] | (v_coeff[i + 1] << ISPRSZ_VFILT_COEF1_SHIFT); isp_reg_writel(isp, tmp_h, OMAP3_ISP_IOMEM_RESZ, startaddr_h); isp_reg_writel(isp, tmp_v, OMAP3_ISP_IOMEM_RESZ, startaddr_v); startaddr_h += 4; startaddr_v += 4; } } /* * resizer_set_bilinear - Chrominance horizontal algorithm select * @res: Device context. * @type: Filtering interpolation type. * * Filtering that is same as luminance processing is * intended only for downsampling, and bilinear interpolation * is intended only for upsampling. */ static void resizer_set_bilinear(struct isp_res_device *res, enum resizer_chroma_algo type) { struct isp_device *isp = to_isp_device(res); if (type == RSZ_BILINEAR) isp_reg_set(isp, OMAP3_ISP_IOMEM_RESZ, ISPRSZ_CNT, ISPRSZ_CNT_CBILIN); else isp_reg_clr(isp, OMAP3_ISP_IOMEM_RESZ, ISPRSZ_CNT, ISPRSZ_CNT_CBILIN); } /* * resizer_set_ycpos - Luminance and chrominance order * @res: Device context. * @pixelcode: pixel code. */ static void resizer_set_ycpos(struct isp_res_device *res, u32 pixelcode) { struct isp_device *isp = to_isp_device(res); switch (pixelcode) { case MEDIA_BUS_FMT_YUYV8_1X16: isp_reg_set(isp, OMAP3_ISP_IOMEM_RESZ, ISPRSZ_CNT, ISPRSZ_CNT_YCPOS); break; case MEDIA_BUS_FMT_UYVY8_1X16: isp_reg_clr(isp, OMAP3_ISP_IOMEM_RESZ, ISPRSZ_CNT, ISPRSZ_CNT_YCPOS); break; default: return; } } /* * resizer_set_phase - Setup horizontal and vertical starting phase * @res: Device context. * @h_phase: horizontal phase parameters. * @v_phase: vertical phase parameters. * * Horizontal and vertical phase range is 0 to 7 */ static void resizer_set_phase(struct isp_res_device *res, u32 h_phase, u32 v_phase) { struct isp_device *isp = to_isp_device(res); u32 rgval; rgval = isp_reg_readl(isp, OMAP3_ISP_IOMEM_RESZ, ISPRSZ_CNT) & ~(ISPRSZ_CNT_HSTPH_MASK | ISPRSZ_CNT_VSTPH_MASK); rgval |= (h_phase << ISPRSZ_CNT_HSTPH_SHIFT) & ISPRSZ_CNT_HSTPH_MASK; rgval |= (v_phase << ISPRSZ_CNT_VSTPH_SHIFT) & ISPRSZ_CNT_VSTPH_MASK; isp_reg_writel(isp, rgval, OMAP3_ISP_IOMEM_RESZ, ISPRSZ_CNT); } /* * resizer_set_luma - Setup luminance enhancer parameters * @res: Device context. * @luma: Structure for luminance enhancer parameters. * * Algorithm select: * 0x0: Disable * 0x1: [-1 2 -1]/2 high-pass filter * 0x2: [-1 -2 6 -2 -1]/4 high-pass filter * * Maximum gain: * The data is coded in U4Q4 representation. * * Slope: * The data is coded in U4Q4 representation. * * Coring offset: * The data is coded in U8Q0 representation. * * The new luminance value is computed as: * Y += HPF(Y) x max(GAIN, (HPF(Y) - CORE) x SLOP + 8) >> 4. */ static void resizer_set_luma(struct isp_res_device *res, struct resizer_luma_yenh *luma) { struct isp_device *isp = to_isp_device(res); u32 rgval; rgval = (luma->algo << ISPRSZ_YENH_ALGO_SHIFT) & ISPRSZ_YENH_ALGO_MASK; rgval |= (luma->gain << ISPRSZ_YENH_GAIN_SHIFT) & ISPRSZ_YENH_GAIN_MASK; rgval |= (luma->slope << ISPRSZ_YENH_SLOP_SHIFT) & ISPRSZ_YENH_SLOP_MASK; rgval |= (luma->core << ISPRSZ_YENH_CORE_SHIFT) & ISPRSZ_YENH_CORE_MASK; isp_reg_writel(isp, rgval, OMAP3_ISP_IOMEM_RESZ, ISPRSZ_YENH); } /* * resizer_set_source - Input source select * @res: Device context. * @source: Input source type * * If this field is set to RESIZER_INPUT_VP, the resizer input is fed from * Preview/CCDC engine, otherwise from memory. */ static void resizer_set_source(struct isp_res_device *res, enum resizer_input_entity source) { struct isp_device *isp = to_isp_device(res); if (source == RESIZER_INPUT_MEMORY) isp_reg_set(isp, OMAP3_ISP_IOMEM_RESZ, ISPRSZ_CNT, ISPRSZ_CNT_INPSRC); else isp_reg_clr(isp, OMAP3_ISP_IOMEM_RESZ, ISPRSZ_CNT, ISPRSZ_CNT_INPSRC); } /* * resizer_set_ratio - Setup horizontal and vertical resizing value * @res: Device context. * @ratio: Structure for ratio parameters. * * Resizing range from 64 to 1024 */ static void resizer_set_ratio(struct isp_res_device *res, const struct resizer_ratio *ratio) { struct isp_device *isp = to_isp_device(res); const u16 *h_filter, *v_filter; u32 rgval; rgval = isp_reg_readl(isp, OMAP3_ISP_IOMEM_RESZ, ISPRSZ_CNT) & ~(ISPRSZ_CNT_HRSZ_MASK | ISPRSZ_CNT_VRSZ_MASK); rgval |= ((ratio->horz - 1) << ISPRSZ_CNT_HRSZ_SHIFT) & ISPRSZ_CNT_HRSZ_MASK; rgval |= ((ratio->vert - 1) << ISPRSZ_CNT_VRSZ_SHIFT) & ISPRSZ_CNT_VRSZ_MASK; isp_reg_writel(isp, rgval, OMAP3_ISP_IOMEM_RESZ, ISPRSZ_CNT); /* prepare horizontal filter coefficients */ if (ratio->horz > MID_RESIZE_VALUE) h_filter = &filter_coefs.h_filter_coef_7tap[0]; else h_filter = &filter_coefs.h_filter_coef_4tap[0]; /* prepare vertical filter coefficients */ if (ratio->vert > MID_RESIZE_VALUE) v_filter = &filter_coefs.v_filter_coef_7tap[0]; else v_filter = &filter_coefs.v_filter_coef_4tap[0]; resizer_set_filters(res, h_filter, v_filter); } /* * resizer_set_dst_size - Setup the output height and width * @res: Device context. * @width: Output width. * @height: Output height. * * Width : * The value must be EVEN. * * Height: * The number of bytes written to SDRAM must be * a multiple of 16-bytes if the vertical resizing factor * is greater than 1x (upsizing) */ static void resizer_set_output_size(struct isp_res_device *res, u32 width, u32 height) { struct isp_device *isp = to_isp_device(res); u32 rgval; rgval = (width << ISPRSZ_OUT_SIZE_HORZ_SHIFT) & ISPRSZ_OUT_SIZE_HORZ_MASK; rgval |= (height << ISPRSZ_OUT_SIZE_VERT_SHIFT) & ISPRSZ_OUT_SIZE_VERT_MASK; isp_reg_writel(isp, rgval, OMAP3_ISP_IOMEM_RESZ, ISPRSZ_OUT_SIZE); } /* * resizer_set_output_offset - Setup memory offset for the output lines. * @res: Device context. * @offset: Memory offset. * * The 5 LSBs are forced to be zeros by the hardware to align on a 32-byte * boundary; the 5 LSBs are read-only. For optimal use of SDRAM bandwidth, * the SDRAM line offset must be set on a 256-byte boundary */ static void resizer_set_output_offset(struct isp_res_device *res, u32 offset) { struct isp_device *isp = to_isp_device(res); isp_reg_writel(isp, offset, OMAP3_ISP_IOMEM_RESZ, ISPRSZ_SDR_OUTOFF); } /* * resizer_set_start - Setup vertical and horizontal start position * @res: Device context. * @left: Horizontal start position. * @top: Vertical start position. * * Vertical start line: * This field makes sense only when the resizer obtains its input * from the preview engine/CCDC * * Horizontal start pixel: * Pixels are coded on 16 bits for YUV and 8 bits for color separate data. * When the resizer gets its input from SDRAM, this field must be set * to <= 15 for YUV 16-bit data and <= 31 for 8-bit color separate data */ static void resizer_set_start(struct isp_res_device *res, u32 left, u32 top) { struct isp_device *isp = to_isp_device(res); u32 rgval; rgval = (left << ISPRSZ_IN_START_HORZ_ST_SHIFT) & ISPRSZ_IN_START_HORZ_ST_MASK; rgval |= (top << ISPRSZ_IN_START_VERT_ST_SHIFT) & ISPRSZ_IN_START_VERT_ST_MASK; isp_reg_writel(isp, rgval, OMAP3_ISP_IOMEM_RESZ, ISPRSZ_IN_START); } /* * resizer_set_input_size - Setup the input size * @res: Device context. * @width: The range is 0 to 4095 pixels * @height: The range is 0 to 4095 lines */ static void resizer_set_input_size(struct isp_res_device *res, u32 width, u32 height) { struct isp_device *isp = to_isp_device(res); u32 rgval; rgval = (width << ISPRSZ_IN_SIZE_HORZ_SHIFT) & ISPRSZ_IN_SIZE_HORZ_MASK; rgval |= (height << ISPRSZ_IN_SIZE_VERT_SHIFT) & ISPRSZ_IN_SIZE_VERT_MASK; isp_reg_writel(isp, rgval, OMAP3_ISP_IOMEM_RESZ, ISPRSZ_IN_SIZE); } /* * resizer_set_src_offs - Setup the memory offset for the input lines * @res: Device context. * @offset: Memory offset. * * The 5 LSBs are forced to be zeros by the hardware to align on a 32-byte * boundary; the 5 LSBs are read-only. This field must be programmed to be * 0x0 if the resizer input is from preview engine/CCDC. */ static void resizer_set_input_offset(struct isp_res_device *res, u32 offset) { struct isp_device *isp = to_isp_device(res); isp_reg_writel(isp, offset, OMAP3_ISP_IOMEM_RESZ, ISPRSZ_SDR_INOFF); } /* * resizer_set_intype - Input type select * @res: Device context. * @type: Pixel format type. */ static void resizer_set_intype(struct isp_res_device *res, enum resizer_colors_type type) { struct isp_device *isp = to_isp_device(res); if (type == RSZ_COLOR8) isp_reg_set(isp, OMAP3_ISP_IOMEM_RESZ, ISPRSZ_CNT, ISPRSZ_CNT_INPTYP); else isp_reg_clr(isp, OMAP3_ISP_IOMEM_RESZ, ISPRSZ_CNT, ISPRSZ_CNT_INPTYP); } /* * __resizer_set_inaddr - Helper function for set input address * @res : pointer to resizer private data structure * @addr: input address * return none */ static void __resizer_set_inaddr(struct isp_res_device *res, u32 addr) { struct isp_device *isp = to_isp_device(res); isp_reg_writel(isp, addr, OMAP3_ISP_IOMEM_RESZ, ISPRSZ_SDR_INADD); } /* * The data rate at the horizontal resizer output must not exceed half the * functional clock or 100 MP/s, whichever is lower. According to the TRM * there's no similar requirement for the vertical resizer output. However * experience showed that vertical upscaling by 4 leads to SBL overflows (with * data rates at the resizer output exceeding 300 MP/s). Limiting the resizer * output data rate to the functional clock or 200 MP/s, whichever is lower, * seems to get rid of SBL overflows. * * The maximum data rate at the output of the horizontal resizer can thus be * computed with * * max intermediate rate <= L3 clock * input height / output height * max intermediate rate <= L3 clock / 2 * * The maximum data rate at the resizer input is then * * max input rate <= max intermediate rate * input width / output width * * where the input width and height are the resizer input crop rectangle size. * The TRM doesn't clearly explain if that's a maximum instant data rate or a * maximum average data rate. */ void omap3isp_resizer_max_rate(struct isp_res_device *res, unsigned int *max_rate) { struct isp_pipeline *pipe = to_isp_pipeline(&res->subdev.entity); const struct v4l2_mbus_framefmt *ofmt = &res->formats[RESZ_PAD_SOURCE]; unsigned long limit = min(pipe->l3_ick, 200000000UL); unsigned long clock; clock = div_u64((u64)limit * res->crop.active.height, ofmt->height); clock = min(clock, limit / 2); *max_rate = div_u64((u64)clock * res->crop.active.width, ofmt->width); } /* * When the resizer processes images from memory, the driver must slow down read * requests on the input to at least comply with the internal data rate * requirements. If the application real-time requirements can cope with slower * processing, the resizer can be slowed down even more to put less pressure on * the overall system. * * When the resizer processes images on the fly (either from the CCDC or the * preview module), the same data rate requirements apply but they can't be * enforced at the resizer level. The image input module (sensor, CCP2 or * preview module) must not provide image data faster than the resizer can * process. * * For live image pipelines, the data rate is set by the frame format, size and * rate. The sensor output frame rate must not exceed the maximum resizer data * rate. * * The resizer slows down read requests by inserting wait cycles in the SBL * requests. The maximum number of 256-byte requests per second can be computed * as (the data rate is multiplied by 2 to convert from pixels per second to * bytes per second) * * request per second = data rate * 2 / 256 * cycles per request = cycles per second / requests per second * * The number of cycles per second is controlled by the L3 clock, leading to * * cycles per request = L3 frequency / 2 * 256 / data rate */ static void resizer_adjust_bandwidth(struct isp_res_device *res) { struct isp_pipeline *pipe = to_isp_pipeline(&res->subdev.entity); struct isp_device *isp = to_isp_device(res); unsigned long l3_ick = pipe->l3_ick; struct v4l2_fract *timeperframe; unsigned int cycles_per_frame; unsigned int requests_per_frame; unsigned int cycles_per_request; unsigned int granularity; unsigned int minimum; unsigned int maximum; unsigned int value; if (res->input != RESIZER_INPUT_MEMORY) { isp_reg_clr(isp, OMAP3_ISP_IOMEM_SBL, ISPSBL_SDR_REQ_EXP, ISPSBL_SDR_REQ_RSZ_EXP_MASK); return; } switch (isp->revision) { case ISP_REVISION_1_0: case ISP_REVISION_2_0: default: granularity = 1024; break; case ISP_REVISION_15_0: granularity = 32; break; } /* Compute the minimum number of cycles per request, based on the * pipeline maximum data rate. This is an absolute lower bound if we * don't want SBL overflows, so round the value up. */ cycles_per_request = div_u64((u64)l3_ick / 2 * 256 + pipe->max_rate - 1, pipe->max_rate); minimum = DIV_ROUND_UP(cycles_per_request, granularity); /* Compute the maximum number of cycles per request, based on the * requested frame rate. This is a soft upper bound to achieve a frame * rate equal or higher than the requested value, so round the value * down. */ timeperframe = &pipe->max_timeperframe; requests_per_frame = DIV_ROUND_UP(res->crop.active.width * 2, 256) * res->crop.active.height; cycles_per_frame = div_u64((u64)l3_ick * timeperframe->numerator, timeperframe->denominator); cycles_per_request = cycles_per_frame / requests_per_frame; maximum = cycles_per_request / granularity; value = max(minimum, maximum); dev_dbg(isp->dev, "%s: cycles per request = %u\n", __func__, value); isp_reg_clr_set(isp, OMAP3_ISP_IOMEM_SBL, ISPSBL_SDR_REQ_EXP, ISPSBL_SDR_REQ_RSZ_EXP_MASK, value << ISPSBL_SDR_REQ_RSZ_EXP_SHIFT); } /* * omap3isp_resizer_busy - Checks if ISP resizer is busy. * * Returns busy field from ISPRSZ_PCR register. */ int omap3isp_resizer_busy(struct isp_res_device *res) { struct isp_device *isp = to_isp_device(res); return isp_reg_readl(isp, OMAP3_ISP_IOMEM_RESZ, ISPRSZ_PCR) & ISPRSZ_PCR_BUSY; } /* * resizer_set_inaddr - Sets the memory address of the input frame. * @addr: 32bit memory address aligned on 32byte boundary. */ static void resizer_set_inaddr(struct isp_res_device *res, u32 addr) { res->addr_base = addr; /* This will handle crop settings in stream off state */ if (res->crop_offset) addr += res->crop_offset & ~0x1f; __resizer_set_inaddr(res, addr); } /* * Configures the memory address to which the output frame is written. * @addr: 32bit memory address aligned on 32byte boundary. * Note: For SBL efficiency reasons the address should be on a 256-byte * boundary. */ static void resizer_set_outaddr(struct isp_res_device *res, u32 addr) { struct isp_device *isp = to_isp_device(res); /* * Set output address. This needs to be in its own function * because it changes often. */ isp_reg_writel(isp, addr << ISPRSZ_SDR_OUTADD_ADDR_SHIFT, OMAP3_ISP_IOMEM_RESZ, ISPRSZ_SDR_OUTADD); } /* * resizer_print_status - Prints the values of the resizer module registers. */ #define RSZ_PRINT_REGISTER(isp, name)\ dev_dbg(isp->dev, "###RSZ " #name "=0x%08x\n", \ isp_reg_readl(isp, OMAP3_ISP_IOMEM_RESZ, ISPRSZ_##name)) static void resizer_print_status(struct isp_res_device *res) { struct isp_device *isp = to_isp_device(res); dev_dbg(isp->dev, "-------------Resizer Register dump----------\n"); RSZ_PRINT_REGISTER(isp, PCR); RSZ_PRINT_REGISTER(isp, CNT); RSZ_PRINT_REGISTER(isp, OUT_SIZE); RSZ_PRINT_REGISTER(isp, IN_START); RSZ_PRINT_REGISTER(isp, IN_SIZE); RSZ_PRINT_REGISTER(isp, SDR_INADD); RSZ_PRINT_REGISTER(isp, SDR_INOFF); RSZ_PRINT_REGISTER(isp, SDR_OUTADD); RSZ_PRINT_REGISTER(isp, SDR_OUTOFF); RSZ_PRINT_REGISTER(isp, YENH); dev_dbg(isp->dev, "--------------------------------------------\n"); } /* * resizer_calc_ratios - Helper function for calculating resizer ratios * @res: pointer to resizer private data structure * @input: input frame size * @output: output frame size * @ratio : return calculated ratios * return none * * The resizer uses a polyphase sample rate converter. The upsampling filter * has a fixed number of phases that depend on the resizing ratio. As the ratio * computation depends on the number of phases, we need to compute a first * approximation and then refine it. * * The input/output/ratio relationship is given by the OMAP34xx TRM: * * - 8-phase, 4-tap mode (RSZ = 64 ~ 512) * iw = (32 * sph + (ow - 1) * hrsz + 16) >> 8 + 7 * ih = (32 * spv + (oh - 1) * vrsz + 16) >> 8 + 4 * - 4-phase, 7-tap mode (RSZ = 513 ~ 1024) * iw = (64 * sph + (ow - 1) * hrsz + 32) >> 8 + 7 * ih = (64 * spv + (oh - 1) * vrsz + 32) >> 8 + 7 * * iw and ih are the input width and height after cropping. Those equations need * to be satisfied exactly for the resizer to work correctly. * * The equations can't be easily reverted, as the >> 8 operation is not linear. * In addition, not all input sizes can be achieved for a given output size. To * get the highest input size lower than or equal to the requested input size, * we need to compute the highest resizing ratio that satisfies the following * inequality (taking the 4-tap mode width equation as an example) * * iw >= (32 * sph + (ow - 1) * hrsz + 16) >> 8 - 7 * * (where iw is the requested input width) which can be rewritten as * * iw - 7 >= (32 * sph + (ow - 1) * hrsz + 16) >> 8 * (iw - 7) << 8 >= 32 * sph + (ow - 1) * hrsz + 16 - b * ((iw - 7) << 8) + b >= 32 * sph + (ow - 1) * hrsz + 16 * * where b is the value of the 8 least significant bits of the right hand side * expression of the last inequality. The highest resizing ratio value will be * achieved when b is equal to its maximum value of 255. That resizing ratio * value will still satisfy the original inequality, as b will disappear when * the expression will be shifted right by 8. * * The reverted equations thus become * * - 8-phase, 4-tap mode * hrsz = ((iw - 7) * 256 + 255 - 16 - 32 * sph) / (ow - 1) * vrsz = ((ih - 4) * 256 + 255 - 16 - 32 * spv) / (oh - 1) * - 4-phase, 7-tap mode * hrsz = ((iw - 7) * 256 + 255 - 32 - 64 * sph) / (ow - 1) * vrsz = ((ih - 7) * 256 + 255 - 32 - 64 * spv) / (oh - 1) * * The ratios are integer values, and are rounded down to ensure that the * cropped input size is not bigger than the uncropped input size. * * As the number of phases/taps, used to select the correct equations to compute * the ratio, depends on the ratio, we start with the 4-tap mode equations to * compute an approximation of the ratio, and switch to the 7-tap mode equations * if the approximation is higher than the ratio threshold. * * As the 7-tap mode equations will return a ratio smaller than or equal to the * 4-tap mode equations, the resulting ratio could become lower than or equal to * the ratio threshold. This 'equations loop' isn't an issue as long as the * correct equations are used to compute the final input size. Starting with the * 4-tap mode equations ensure that, in case of values resulting in a 'ratio * loop', the smallest of the ratio values will be used, never exceeding the * requested input size. * * We first clamp the output size according to the hardware capability to avoid * auto-cropping the input more than required to satisfy the TRM equations. The * minimum output size is achieved with a scaling factor of 1024. It is thus * computed using the 7-tap equations. * * min ow = ((iw - 7) * 256 - 32 - 64 * sph) / 1024 + 1 * min oh = ((ih - 7) * 256 - 32 - 64 * spv) / 1024 + 1 * * Similarly, the maximum output size is achieved with a scaling factor of 64 * and computed using the 4-tap equations. * * max ow = ((iw - 7) * 256 + 255 - 16 - 32 * sph) / 64 + 1 * max oh = ((ih - 4) * 256 + 255 - 16 - 32 * spv) / 64 + 1 * * The additional +255 term compensates for the round down operation performed * by the TRM equations when shifting the value right by 8 bits. * * We then compute and clamp the ratios (x1/4 ~ x4). Clamping the output size to * the maximum value guarantees that the ratio value will never be smaller than * the minimum, but it could still slightly exceed the maximum. Clamping the * ratio will thus result in a resizing factor slightly larger than the * requested value. * * To accommodate that, and make sure the TRM equations are satisfied exactly, we * compute the input crop rectangle as the last step. * * As if the situation wasn't complex enough, the maximum output width depends * on the vertical resizing ratio. Fortunately, the output height doesn't * depend on the horizontal resizing ratio. We can then start by computing the * output height and the vertical ratio, and then move to computing the output * width and the horizontal ratio. */ static void resizer_calc_ratios(struct isp_res_device *res, struct v4l2_rect *input, struct v4l2_mbus_framefmt *output, struct resizer_ratio *ratio) { struct isp_device *isp = to_isp_device(res); const unsigned int spv = DEFAULT_PHASE; const unsigned int sph = DEFAULT_PHASE; unsigned int upscaled_width; unsigned int upscaled_height; unsigned int min_width; unsigned int min_height; unsigned int max_width; unsigned int max_height; unsigned int width_alignment; unsigned int width; unsigned int height; /* * Clamp the output height based on the hardware capabilities and * compute the vertical resizing ratio. */ min_height = ((input->height - 7) * 256 - 32 - 64 * spv) / 1024 + 1; min_height = max_t(unsigned int, min_height, MIN_OUT_HEIGHT); max_height = ((input->height - 4) * 256 + 255 - 16 - 32 * spv) / 64 + 1; max_height = min_t(unsigned int, max_height, MAX_OUT_HEIGHT); output->height = clamp(output->height, min_height, max_height); ratio->vert = ((input->height - 4) * 256 + 255 - 16 - 32 * spv) / (output->height - 1); if (ratio->vert > MID_RESIZE_VALUE) ratio->vert = ((input->height - 7) * 256 + 255 - 32 - 64 * spv) / (output->height - 1); ratio->vert = clamp_t(unsigned int, ratio->vert, MIN_RESIZE_VALUE, MAX_RESIZE_VALUE); if (ratio->vert <= MID_RESIZE_VALUE) { upscaled_height = (output->height - 1) * ratio->vert + 32 * spv + 16; height = (upscaled_height >> 8) + 4; } else { upscaled_height = (output->height - 1) * ratio->vert + 64 * spv + 32; height = (upscaled_height >> 8) + 7; } /* * Compute the minimum and maximum output widths based on the hardware * capabilities. The maximum depends on the vertical resizing ratio. */ min_width = ((input->width - 7) * 256 - 32 - 64 * sph) / 1024 + 1; min_width = max_t(unsigned int, min_width, MIN_OUT_WIDTH); if (ratio->vert <= MID_RESIZE_VALUE) { switch (isp->revision) { case ISP_REVISION_1_0: max_width = MAX_4TAP_OUT_WIDTH_ES1; break; case ISP_REVISION_2_0: default: max_width = MAX_4TAP_OUT_WIDTH_ES2; break; case ISP_REVISION_15_0: max_width = MAX_4TAP_OUT_WIDTH_3630; break; } } else { switch (isp->revision) { case ISP_REVISION_1_0: max_width = MAX_7TAP_OUT_WIDTH_ES1; break; case ISP_REVISION_2_0: default: max_width = MAX_7TAP_OUT_WIDTH_ES2; break; case ISP_REVISION_15_0: max_width = MAX_7TAP_OUT_WIDTH_3630; break; } } max_width = min(((input->width - 7) * 256 + 255 - 16 - 32 * sph) / 64 + 1, max_width); /* * The output width must be even, and must be a multiple of 16 bytes * when upscaling vertically. Clamp the output width to the valid range. * Take the alignment into account (the maximum width in 7-tap mode on * ES2 isn't a multiple of 8) and align the result up to make sure it * won't be smaller than the minimum. */ width_alignment = ratio->vert < 256 ? 8 : 2; output->width = clamp(output->width, min_width, max_width & ~(width_alignment - 1)); output->width = ALIGN(output->width, width_alignment); ratio->horz = ((input->width - 7) * 256 + 255 - 16 - 32 * sph) / (output->width - 1); if (ratio->horz > MID_RESIZE_VALUE) ratio->horz = ((input->width - 7) * 256 + 255 - 32 - 64 * sph) / (output->width - 1); ratio->horz = clamp_t(unsigned int, ratio->horz, MIN_RESIZE_VALUE, MAX_RESIZE_VALUE); if (ratio->horz <= MID_RESIZE_VALUE) { upscaled_width = (output->width - 1) * ratio->horz + 32 * sph + 16; width = (upscaled_width >> 8) + 7; } else { upscaled_width = (output->width - 1) * ratio->horz + 64 * sph + 32; width = (upscaled_width >> 8) + 7; } /* Center the new crop rectangle. */ input->left += (input->width - width) / 2; input->top += (input->height - height) / 2; input->width = width; input->height = height; } /* * resizer_set_crop_params - Setup hardware with cropping parameters * @res : resizer private structure * @input : format on sink pad * @output : format on source pad * return none */ static void resizer_set_crop_params(struct isp_res_device *res, const struct v4l2_mbus_framefmt *input, const struct v4l2_mbus_framefmt *output) { resizer_set_ratio(res, &res->ratio); /* Set chrominance horizontal algorithm */ if (res->ratio.horz >= RESIZE_DIVISOR) resizer_set_bilinear(res, RSZ_THE_SAME); else resizer_set_bilinear(res, RSZ_BILINEAR); resizer_adjust_bandwidth(res); if (res->input == RESIZER_INPUT_MEMORY) { /* Calculate additional offset for crop */ res->crop_offset = (res->crop.active.top * input->width + res->crop.active.left) * 2; /* * Write lowest 4 bits of horizontal pixel offset (in pixels), * vertical start must be 0. */ resizer_set_start(res, (res->crop_offset / 2) & 0xf, 0); /* * Set start (read) address for cropping, in bytes. * Lowest 5 bits must be zero. */ __resizer_set_inaddr(res, res->addr_base + (res->crop_offset & ~0x1f)); } else { /* * Set vertical start line and horizontal starting pixel. * If the input is from CCDC/PREV, horizontal start field is * in bytes (twice number of pixels). */ resizer_set_start(res, res->crop.active.left * 2, res->crop.active.top); /* Input address and offset must be 0 for preview/ccdc input */ __resizer_set_inaddr(res, 0); resizer_set_input_offset(res, 0); } /* Set the input size */ resizer_set_input_size(res, res->crop.active.width, res->crop.active.height); } static void resizer_configure(struct isp_res_device *res) { struct v4l2_mbus_framefmt *informat, *outformat; struct resizer_luma_yenh luma = {0, 0, 0, 0}; resizer_set_source(res, res->input); informat = &res->formats[RESZ_PAD_SINK]; outformat = &res->formats[RESZ_PAD_SOURCE]; /* RESZ_PAD_SINK */ if (res->input == RESIZER_INPUT_VP) resizer_set_input_offset(res, 0); else resizer_set_input_offset(res, ALIGN(informat->width, 0x10) * 2); /* YUV422 interleaved, default phase, no luma enhancement */ resizer_set_intype(res, RSZ_YUV422); resizer_set_ycpos(res, informat->code); resizer_set_phase(res, DEFAULT_PHASE, DEFAULT_PHASE); resizer_set_luma(res, &luma); /* RESZ_PAD_SOURCE */ resizer_set_output_offset(res, ALIGN(outformat->width * 2, 32)); resizer_set_output_size(res, outformat->width, outformat->height); resizer_set_crop_params(res, informat, outformat); } /* ----------------------------------------------------------------------------- * Interrupt handling */ static void resizer_enable_oneshot(struct isp_res_device *res) { struct isp_device *isp = to_isp_device(res); isp_reg_set(isp, OMAP3_ISP_IOMEM_RESZ, ISPRSZ_PCR, ISPRSZ_PCR_ENABLE | ISPRSZ_PCR_ONESHOT); } void omap3isp_resizer_isr_frame_sync(struct isp_res_device *res) { /* * If ISP_VIDEO_DMAQUEUE_QUEUED is set, DMA queue had an underrun * condition, the module was paused and now we have a buffer queued * on the output again. Restart the pipeline if running in continuous * mode. */ if (res->state == ISP_PIPELINE_STREAM_CONTINUOUS && res->video_out.dmaqueue_flags & ISP_VIDEO_DMAQUEUE_QUEUED) { resizer_enable_oneshot(res); isp_video_dmaqueue_flags_clr(&res->video_out); } } static void resizer_isr_buffer(struct isp_res_device *res) { struct isp_pipeline *pipe = to_isp_pipeline(&res->subdev.entity); struct isp_buffer *buffer; int restart = 0; if (res->state == ISP_PIPELINE_STREAM_STOPPED) return; /* Complete the output buffer and, if reading from memory, the input * buffer. */ buffer = omap3isp_video_buffer_next(&res->video_out); if (buffer != NULL) { resizer_set_outaddr(res, buffer->dma); restart = 1; } pipe->state |= ISP_PIPELINE_IDLE_OUTPUT; if (res->input == RESIZER_INPUT_MEMORY) { buffer = omap3isp_video_buffer_next(&res->video_in); if (buffer != NULL) resizer_set_inaddr(res, buffer->dma); pipe->state |= ISP_PIPELINE_IDLE_INPUT; } if (res->state == ISP_PIPELINE_STREAM_SINGLESHOT) { if (isp_pipeline_ready(pipe)) omap3isp_pipeline_set_stream(pipe, ISP_PIPELINE_STREAM_SINGLESHOT); } else { /* If an underrun occurs, the video queue operation handler will * restart the resizer. Otherwise restart it immediately. */ if (restart) resizer_enable_oneshot(res); } } /* * omap3isp_resizer_isr - ISP resizer interrupt handler * * Manage the resizer video buffers and configure shadowed and busy-locked * registers. */ void omap3isp_resizer_isr(struct isp_res_device *res) { struct v4l2_mbus_framefmt *informat, *outformat; unsigned long flags; if (omap3isp_module_sync_is_stopping(&res->wait, &res->stopping)) return; spin_lock_irqsave(&res->lock, flags); if (res->applycrop) { outformat = __resizer_get_format(res, NULL, RESZ_PAD_SOURCE, V4L2_SUBDEV_FORMAT_ACTIVE); informat = __resizer_get_format(res, NULL, RESZ_PAD_SINK, V4L2_SUBDEV_FORMAT_ACTIVE); resizer_set_crop_params(res, informat, outformat); res->applycrop = 0; } spin_unlock_irqrestore(&res->lock, flags); resizer_isr_buffer(res); } /* ----------------------------------------------------------------------------- * ISP video operations */ static int resizer_video_queue(struct isp_video *video, struct isp_buffer *buffer) { struct isp_res_device *res = &video->isp->isp_res; if (video->type == V4L2_BUF_TYPE_VIDEO_OUTPUT) resizer_set_inaddr(res, buffer->dma); /* * We now have a buffer queued on the output. Despite what the * TRM says, the resizer can't be restarted immediately. * Enabling it in one shot mode in the middle of a frame (or at * least asynchronously to the frame) results in the output * being shifted randomly left/right and up/down, as if the * hardware didn't synchronize itself to the beginning of the * frame correctly. * * Restart the resizer on the next sync interrupt if running in * continuous mode or when starting the stream. */ if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) resizer_set_outaddr(res, buffer->dma); return 0; } static const struct isp_video_operations resizer_video_ops = { .queue = resizer_video_queue, }; /* ----------------------------------------------------------------------------- * V4L2 subdev operations */ /* * resizer_set_stream - Enable/Disable streaming on resizer subdev * @sd: ISP resizer V4L2 subdev * @enable: 1 == Enable, 0 == Disable * * The resizer hardware can't be enabled without a memory buffer to write to. * As the s_stream operation is called in response to a STREAMON call without * any buffer queued yet, just update the state field and return immediately. * The resizer will be enabled in resizer_video_queue(). */ static int resizer_set_stream(struct v4l2_subdev *sd, int enable) { struct isp_res_device *res = v4l2_get_subdevdata(sd); struct isp_video *video_out = &res->video_out; struct isp_device *isp = to_isp_device(res); struct device *dev = to_device(res); if (res->state == ISP_PIPELINE_STREAM_STOPPED) { if (enable == ISP_PIPELINE_STREAM_STOPPED) return 0; omap3isp_subclk_enable(isp, OMAP3_ISP_SUBCLK_RESIZER); resizer_configure(res); resizer_print_status(res); } switch (enable) { case ISP_PIPELINE_STREAM_CONTINUOUS: omap3isp_sbl_enable(isp, OMAP3_ISP_SBL_RESIZER_WRITE); if (video_out->dmaqueue_flags & ISP_VIDEO_DMAQUEUE_QUEUED) { resizer_enable_oneshot(res); isp_video_dmaqueue_flags_clr(video_out); } break; case ISP_PIPELINE_STREAM_SINGLESHOT: if (res->input == RESIZER_INPUT_MEMORY) omap3isp_sbl_enable(isp, OMAP3_ISP_SBL_RESIZER_READ); omap3isp_sbl_enable(isp, OMAP3_ISP_SBL_RESIZER_WRITE); resizer_enable_oneshot(res); break; case ISP_PIPELINE_STREAM_STOPPED: if (omap3isp_module_sync_idle(&sd->entity, &res->wait, &res->stopping)) dev_dbg(dev, "%s: module stop timeout.\n", sd->name); omap3isp_sbl_disable(isp, OMAP3_ISP_SBL_RESIZER_READ | OMAP3_ISP_SBL_RESIZER_WRITE); omap3isp_subclk_disable(isp, OMAP3_ISP_SUBCLK_RESIZER); isp_video_dmaqueue_flags_clr(video_out); break; } res->state = enable; return 0; } /* * resizer_try_crop - mangles crop parameters. */ static void resizer_try_crop(const struct v4l2_mbus_framefmt *sink, const struct v4l2_mbus_framefmt *source, struct v4l2_rect *crop) { const unsigned int spv = DEFAULT_PHASE; const unsigned int sph = DEFAULT_PHASE; /* Crop rectangle is constrained by the output size so that zoom ratio * cannot exceed +/-4.0. */ unsigned int min_width = ((32 * sph + (source->width - 1) * 64 + 16) >> 8) + 7; unsigned int min_height = ((32 * spv + (source->height - 1) * 64 + 16) >> 8) + 4; unsigned int max_width = ((64 * sph + (source->width - 1) * 1024 + 32) >> 8) + 7; unsigned int max_height = ((64 * spv + (source->height - 1) * 1024 + 32) >> 8) + 7; crop->width = clamp_t(u32, crop->width, min_width, max_width); crop->height = clamp_t(u32, crop->height, min_height, max_height); /* Crop can not go beyond of the input rectangle */ crop->left = clamp_t(u32, crop->left, 0, sink->width - MIN_IN_WIDTH); crop->width = clamp_t(u32, crop->width, MIN_IN_WIDTH, sink->width - crop->left); crop->top = clamp_t(u32, crop->top, 0, sink->height - MIN_IN_HEIGHT); crop->height = clamp_t(u32, crop->height, MIN_IN_HEIGHT, sink->height - crop->top); } /* * resizer_get_selection - Retrieve a selection rectangle on a pad * @sd: ISP resizer V4L2 subdevice * @cfg: V4L2 subdev pad configuration * @sel: Selection rectangle * * The only supported rectangles are the crop rectangles on the sink pad. * * Return 0 on success or a negative error code otherwise. */ static int resizer_get_selection(struct v4l2_subdev *sd, struct v4l2_subdev_state *sd_state, struct v4l2_subdev_selection *sel) { struct isp_res_device *res = v4l2_get_subdevdata(sd); struct v4l2_mbus_framefmt *format_source; struct v4l2_mbus_framefmt *format_sink; struct resizer_ratio ratio; if (sel->pad != RESZ_PAD_SINK) return -EINVAL; format_sink = __resizer_get_format(res, sd_state, RESZ_PAD_SINK, sel->which); format_source = __resizer_get_format(res, sd_state, RESZ_PAD_SOURCE, sel->which); switch (sel->target) { case V4L2_SEL_TGT_CROP_BOUNDS: sel->r.left = 0; sel->r.top = 0; sel->r.width = INT_MAX; sel->r.height = INT_MAX; resizer_try_crop(format_sink, format_source, &sel->r); resizer_calc_ratios(res, &sel->r, format_source, &ratio); break; case V4L2_SEL_TGT_CROP: sel->r = *__resizer_get_crop(res, sd_state, sel->which); resizer_calc_ratios(res, &sel->r, format_source, &ratio); break; default: return -EINVAL; } return 0; } /* * resizer_set_selection - Set a selection rectangle on a pad * @sd: ISP resizer V4L2 subdevice * @cfg: V4L2 subdev pad configuration * @sel: Selection rectangle * * The only supported rectangle is the actual crop rectangle on the sink pad. * * FIXME: This function currently behaves as if the KEEP_CONFIG selection flag * was always set. * * Return 0 on success or a negative error code otherwise. */ static int resizer_set_selection(struct v4l2_subdev *sd, struct v4l2_subdev_state *sd_state, struct v4l2_subdev_selection *sel) { struct isp_res_device *res = v4l2_get_subdevdata(sd); struct isp_device *isp = to_isp_device(res); const struct v4l2_mbus_framefmt *format_sink; struct v4l2_mbus_framefmt format_source; struct resizer_ratio ratio; unsigned long flags; if (sel->target != V4L2_SEL_TGT_CROP || sel->pad != RESZ_PAD_SINK) return -EINVAL; format_sink = __resizer_get_format(res, sd_state, RESZ_PAD_SINK, sel->which); format_source = *__resizer_get_format(res, sd_state, RESZ_PAD_SOURCE, sel->which); dev_dbg(isp->dev, "%s(%s): req %ux%u -> (%d,%d)/%ux%u -> %ux%u\n", __func__, sel->which == V4L2_SUBDEV_FORMAT_TRY ? "try" : "act", format_sink->width, format_sink->height, sel->r.left, sel->r.top, sel->r.width, sel->r.height, format_source.width, format_source.height); /* Clamp the crop rectangle to the bounds, and then mangle it further to * fulfill the TRM equations. Store the clamped but otherwise unmangled * rectangle to avoid cropping the input multiple times: when an * application sets the output format, the current crop rectangle is * mangled during crop rectangle computation, which would lead to a new, * smaller input crop rectangle every time the output size is set if we * stored the mangled rectangle. */ resizer_try_crop(format_sink, &format_source, &sel->r); *__resizer_get_crop(res, sd_state, sel->which) = sel->r; resizer_calc_ratios(res, &sel->r, &format_source, &ratio); dev_dbg(isp->dev, "%s(%s): got %ux%u -> (%d,%d)/%ux%u -> %ux%u\n", __func__, sel->which == V4L2_SUBDEV_FORMAT_TRY ? "try" : "act", format_sink->width, format_sink->height, sel->r.left, sel->r.top, sel->r.width, sel->r.height, format_source.width, format_source.height); if (sel->which == V4L2_SUBDEV_FORMAT_TRY) { *__resizer_get_format(res, sd_state, RESZ_PAD_SOURCE, sel->which) = format_source; return 0; } /* Update the source format, resizing ratios and crop rectangle. If * streaming is on the IRQ handler will reprogram the resizer after the * current frame. We thus we need to protect against race conditions. */ spin_lock_irqsave(&res->lock, flags); *__resizer_get_format(res, sd_state, RESZ_PAD_SOURCE, sel->which) = format_source; res->ratio = ratio; res->crop.active = sel->r; if (res->state != ISP_PIPELINE_STREAM_STOPPED) res->applycrop = 1; spin_unlock_irqrestore(&res->lock, flags); return 0; } /* resizer pixel formats */ static const unsigned int resizer_formats[] = { MEDIA_BUS_FMT_UYVY8_1X16, MEDIA_BUS_FMT_YUYV8_1X16, }; static unsigned int resizer_max_in_width(struct isp_res_device *res) { struct isp_device *isp = to_isp_device(res); if (res->input == RESIZER_INPUT_MEMORY) { return MAX_IN_WIDTH_MEMORY_MODE; } else { if (isp->revision == ISP_REVISION_1_0) return MAX_IN_WIDTH_ONTHEFLY_MODE_ES1; else return MAX_IN_WIDTH_ONTHEFLY_MODE_ES2; } } /* * resizer_try_format - Handle try format by pad subdev method * @res : ISP resizer device * @cfg: V4L2 subdev pad configuration * @pad : pad num * @fmt : pointer to v4l2 format structure * @which : wanted subdev format */ static void resizer_try_format(struct isp_res_device *res, struct v4l2_subdev_state *sd_state, unsigned int pad, struct v4l2_mbus_framefmt *fmt, enum v4l2_subdev_format_whence which) { struct v4l2_mbus_framefmt *format; struct resizer_ratio ratio; struct v4l2_rect crop; switch (pad) { case RESZ_PAD_SINK: if (fmt->code != MEDIA_BUS_FMT_YUYV8_1X16 && fmt->code != MEDIA_BUS_FMT_UYVY8_1X16) fmt->code = MEDIA_BUS_FMT_YUYV8_1X16; fmt->width = clamp_t(u32, fmt->width, MIN_IN_WIDTH, resizer_max_in_width(res)); fmt->height = clamp_t(u32, fmt->height, MIN_IN_HEIGHT, MAX_IN_HEIGHT); break; case RESZ_PAD_SOURCE: format = __resizer_get_format(res, sd_state, RESZ_PAD_SINK, which); fmt->code = format->code; crop = *__resizer_get_crop(res, sd_state, which); resizer_calc_ratios(res, &crop, fmt, &ratio); break; } fmt->colorspace = V4L2_COLORSPACE_JPEG; fmt->field = V4L2_FIELD_NONE; } /* * resizer_enum_mbus_code - Handle pixel format enumeration * @sd : pointer to v4l2 subdev structure * @cfg: V4L2 subdev pad configuration * @code : pointer to v4l2_subdev_mbus_code_enum structure * return -EINVAL or zero on success */ static int resizer_enum_mbus_code(struct v4l2_subdev *sd, struct v4l2_subdev_state *sd_state, struct v4l2_subdev_mbus_code_enum *code) { struct isp_res_device *res = v4l2_get_subdevdata(sd); struct v4l2_mbus_framefmt *format; if (code->pad == RESZ_PAD_SINK) { if (code->index >= ARRAY_SIZE(resizer_formats)) return -EINVAL; code->code = resizer_formats[code->index]; } else { if (code->index != 0) return -EINVAL; format = __resizer_get_format(res, sd_state, RESZ_PAD_SINK, code->which); code->code = format->code; } return 0; } static int resizer_enum_frame_size(struct v4l2_subdev *sd, struct v4l2_subdev_state *sd_state, struct v4l2_subdev_frame_size_enum *fse) { struct isp_res_device *res = v4l2_get_subdevdata(sd); struct v4l2_mbus_framefmt format; if (fse->index != 0) return -EINVAL; format.code = fse->code; format.width = 1; format.height = 1; resizer_try_format(res, sd_state, fse->pad, &format, fse->which); fse->min_width = format.width; fse->min_height = format.height; if (format.code != fse->code) return -EINVAL; format.code = fse->code; format.width = -1; format.height = -1; resizer_try_format(res, sd_state, fse->pad, &format, fse->which); fse->max_width = format.width; fse->max_height = format.height; return 0; } /* * resizer_get_format - Handle get format by pads subdev method * @sd : pointer to v4l2 subdev structure * @cfg: V4L2 subdev pad configuration * @fmt : pointer to v4l2 subdev format structure * return -EINVAL or zero on success */ static int resizer_get_format(struct v4l2_subdev *sd, struct v4l2_subdev_state *sd_state, struct v4l2_subdev_format *fmt) { struct isp_res_device *res = v4l2_get_subdevdata(sd); struct v4l2_mbus_framefmt *format; format = __resizer_get_format(res, sd_state, fmt->pad, fmt->which); if (format == NULL) return -EINVAL; fmt->format = *format; return 0; } /* * resizer_set_format - Handle set format by pads subdev method * @sd : pointer to v4l2 subdev structure * @cfg: V4L2 subdev pad configuration * @fmt : pointer to v4l2 subdev format structure * return -EINVAL or zero on success */ static int resizer_set_format(struct v4l2_subdev *sd, struct v4l2_subdev_state *sd_state, struct v4l2_subdev_format *fmt) { struct isp_res_device *res = v4l2_get_subdevdata(sd); struct v4l2_mbus_framefmt *format; struct v4l2_rect *crop; format = __resizer_get_format(res, sd_state, fmt->pad, fmt->which); if (format == NULL) return -EINVAL; resizer_try_format(res, sd_state, fmt->pad, &fmt->format, fmt->which); *format = fmt->format; if (fmt->pad == RESZ_PAD_SINK) { /* reset crop rectangle */ crop = __resizer_get_crop(res, sd_state, fmt->which); crop->left = 0; crop->top = 0; crop->width = fmt->format.width; crop->height = fmt->format.height; /* Propagate the format from sink to source */ format = __resizer_get_format(res, sd_state, RESZ_PAD_SOURCE, fmt->which); *format = fmt->format; resizer_try_format(res, sd_state, RESZ_PAD_SOURCE, format, fmt->which); } if (fmt->which == V4L2_SUBDEV_FORMAT_ACTIVE) { /* Compute and store the active crop rectangle and resizer * ratios. format already points to the source pad active * format. */ res->crop.active = res->crop.request; resizer_calc_ratios(res, &res->crop.active, format, &res->ratio); } return 0; } static int resizer_link_validate(struct v4l2_subdev *sd, struct media_link *link, struct v4l2_subdev_format *source_fmt, struct v4l2_subdev_format *sink_fmt) { struct isp_res_device *res = v4l2_get_subdevdata(sd); struct isp_pipeline *pipe = to_isp_pipeline(&sd->entity); omap3isp_resizer_max_rate(res, &pipe->max_rate); return v4l2_subdev_link_validate_default(sd, link, source_fmt, sink_fmt); } /* * resizer_init_formats - Initialize formats on all pads * @sd: ISP resizer V4L2 subdevice * @fh: V4L2 subdev file handle * * Initialize all pad formats with default values. If fh is not NULL, try * formats are initialized on the file handle. Otherwise active formats are * initialized on the device. */ static int resizer_init_formats(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh) { struct v4l2_subdev_format format; memset(&format, 0, sizeof(format)); format.pad = RESZ_PAD_SINK; format.which = fh ? V4L2_SUBDEV_FORMAT_TRY : V4L2_SUBDEV_FORMAT_ACTIVE; format.format.code = MEDIA_BUS_FMT_YUYV8_1X16; format.format.width = 4096; format.format.height = 4096; resizer_set_format(sd, fh ? fh->state : NULL, &format); return 0; } /* subdev video operations */ static const struct v4l2_subdev_video_ops resizer_v4l2_video_ops = { .s_stream = resizer_set_stream, }; /* subdev pad operations */ static const struct v4l2_subdev_pad_ops resizer_v4l2_pad_ops = { .enum_mbus_code = resizer_enum_mbus_code, .enum_frame_size = resizer_enum_frame_size, .get_fmt = resizer_get_format, .set_fmt = resizer_set_format, .get_selection = resizer_get_selection, .set_selection = resizer_set_selection, .link_validate = resizer_link_validate, }; /* subdev operations */ static const struct v4l2_subdev_ops resizer_v4l2_ops = { .video = &resizer_v4l2_video_ops, .pad = &resizer_v4l2_pad_ops, }; /* subdev internal operations */ static const struct v4l2_subdev_internal_ops resizer_v4l2_internal_ops = { .open = resizer_init_formats, }; /* ----------------------------------------------------------------------------- * Media entity operations */ /* * resizer_link_setup - Setup resizer connections. * @entity : Pointer to media entity structure * @local : Pointer to local pad array * @remote : Pointer to remote pad array * @flags : Link flags * return -EINVAL or zero on success */ static int resizer_link_setup(struct media_entity *entity, const struct media_pad *local, const struct media_pad *remote, u32 flags) { struct v4l2_subdev *sd = media_entity_to_v4l2_subdev(entity); struct isp_res_device *res = v4l2_get_subdevdata(sd); unsigned int index = local->index; /* FIXME: this is actually a hack! */ if (is_media_entity_v4l2_subdev(remote->entity)) index |= 2 << 16; switch (index) { case RESZ_PAD_SINK: /* read from memory */ if (flags & MEDIA_LNK_FL_ENABLED) { if (res->input == RESIZER_INPUT_VP) return -EBUSY; res->input = RESIZER_INPUT_MEMORY; } else { if (res->input == RESIZER_INPUT_MEMORY) res->input = RESIZER_INPUT_NONE; } break; case RESZ_PAD_SINK | 2 << 16: /* read from ccdc or previewer */ if (flags & MEDIA_LNK_FL_ENABLED) { if (res->input == RESIZER_INPUT_MEMORY) return -EBUSY; res->input = RESIZER_INPUT_VP; } else { if (res->input == RESIZER_INPUT_VP) res->input = RESIZER_INPUT_NONE; } break; case RESZ_PAD_SOURCE: /* resizer always write to memory */ break; default: return -EINVAL; } return 0; } /* media operations */ static const struct media_entity_operations resizer_media_ops = { .link_setup = resizer_link_setup, .link_validate = v4l2_subdev_link_validate, }; void omap3isp_resizer_unregister_entities(struct isp_res_device *res) { v4l2_device_unregister_subdev(&res->subdev); omap3isp_video_unregister(&res->video_in); omap3isp_video_unregister(&res->video_out); } int omap3isp_resizer_register_entities(struct isp_res_device *res, struct v4l2_device *vdev) { int ret; /* Register the subdev and video nodes. */ res->subdev.dev = vdev->mdev->dev; ret = v4l2_device_register_subdev(vdev, &res->subdev); if (ret < 0) goto error; ret = omap3isp_video_register(&res->video_in, vdev); if (ret < 0) goto error; ret = omap3isp_video_register(&res->video_out, vdev); if (ret < 0) goto error; return 0; error: omap3isp_resizer_unregister_entities(res); return ret; } /* ----------------------------------------------------------------------------- * ISP resizer initialization and cleanup */ /* * resizer_init_entities - Initialize resizer subdev and media entity. * @res : Pointer to resizer device structure * return -ENOMEM or zero on success */ static int resizer_init_entities(struct isp_res_device *res) { struct v4l2_subdev *sd = &res->subdev; struct media_pad *pads = res->pads; struct media_entity *me = &sd->entity; int ret; res->input = RESIZER_INPUT_NONE; v4l2_subdev_init(sd, &resizer_v4l2_ops); sd->internal_ops = &resizer_v4l2_internal_ops; strscpy(sd->name, "OMAP3 ISP resizer", sizeof(sd->name)); sd->grp_id = 1 << 16; /* group ID for isp subdevs */ v4l2_set_subdevdata(sd, res); sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE; pads[RESZ_PAD_SINK].flags = MEDIA_PAD_FL_SINK | MEDIA_PAD_FL_MUST_CONNECT; pads[RESZ_PAD_SOURCE].flags = MEDIA_PAD_FL_SOURCE; me->ops = &resizer_media_ops; ret = media_entity_pads_init(me, RESZ_PADS_NUM, pads); if (ret < 0) return ret; resizer_init_formats(sd, NULL); res->video_in.type = V4L2_BUF_TYPE_VIDEO_OUTPUT; res->video_in.ops = &resizer_video_ops; res->video_in.isp = to_isp_device(res); res->video_in.capture_mem = PAGE_ALIGN(4096 * 4096) * 2 * 3; res->video_in.bpl_alignment = 32; res->video_out.type = V4L2_BUF_TYPE_VIDEO_CAPTURE; res->video_out.ops = &resizer_video_ops; res->video_out.isp = to_isp_device(res); res->video_out.capture_mem = PAGE_ALIGN(4096 * 4096) * 2 * 3; res->video_out.bpl_alignment = 32; ret = omap3isp_video_init(&res->video_in, "resizer"); if (ret < 0) goto error_video_in; ret = omap3isp_video_init(&res->video_out, "resizer"); if (ret < 0) goto error_video_out; res->video_out.video.entity.flags |= MEDIA_ENT_FL_DEFAULT; return 0; error_video_out: omap3isp_video_cleanup(&res->video_in); error_video_in: media_entity_cleanup(&res->subdev.entity); return ret; } /* * isp_resizer_init - Resizer initialization. * @isp : Pointer to ISP device * return -ENOMEM or zero on success */ int omap3isp_resizer_init(struct isp_device *isp) { struct isp_res_device *res = &isp->isp_res; init_waitqueue_head(&res->wait); atomic_set(&res->stopping, 0); spin_lock_init(&res->lock); return resizer_init_entities(res); } void omap3isp_resizer_cleanup(struct isp_device *isp) { struct isp_res_device *res = &isp->isp_res; omap3isp_video_cleanup(&res->video_in); omap3isp_video_cleanup(&res->video_out); media_entity_cleanup(&res->subdev.entity); }
linux-master
drivers/media/platform/ti/omap3isp/ispresizer.c
// SPDX-License-Identifier: GPL-2.0-only /* * ispvideo.c * * TI OMAP3 ISP - Generic video node * * Copyright (C) 2009-2010 Nokia Corporation * * Contacts: Laurent Pinchart <[email protected]> * Sakari Ailus <[email protected]> */ #include <linux/clk.h> #include <linux/mm.h> #include <linux/module.h> #include <linux/pagemap.h> #include <linux/scatterlist.h> #include <linux/sched.h> #include <linux/slab.h> #include <linux/vmalloc.h> #include <media/v4l2-dev.h> #include <media/v4l2-ioctl.h> #include <media/v4l2-mc.h> #include <media/videobuf2-dma-contig.h> #include "ispvideo.h" #include "isp.h" /* ----------------------------------------------------------------------------- * Helper functions */ /* * NOTE: When adding new media bus codes, always remember to add * corresponding in-memory formats to the table below!!! */ static struct isp_format_info formats[] = { { MEDIA_BUS_FMT_Y8_1X8, MEDIA_BUS_FMT_Y8_1X8, MEDIA_BUS_FMT_Y8_1X8, MEDIA_BUS_FMT_Y8_1X8, V4L2_PIX_FMT_GREY, 8, 1, }, { MEDIA_BUS_FMT_Y10_1X10, MEDIA_BUS_FMT_Y10_1X10, MEDIA_BUS_FMT_Y10_1X10, MEDIA_BUS_FMT_Y8_1X8, V4L2_PIX_FMT_Y10, 10, 2, }, { MEDIA_BUS_FMT_Y12_1X12, MEDIA_BUS_FMT_Y10_1X10, MEDIA_BUS_FMT_Y12_1X12, MEDIA_BUS_FMT_Y8_1X8, V4L2_PIX_FMT_Y12, 12, 2, }, { MEDIA_BUS_FMT_SBGGR8_1X8, MEDIA_BUS_FMT_SBGGR8_1X8, MEDIA_BUS_FMT_SBGGR8_1X8, MEDIA_BUS_FMT_SBGGR8_1X8, V4L2_PIX_FMT_SBGGR8, 8, 1, }, { MEDIA_BUS_FMT_SGBRG8_1X8, MEDIA_BUS_FMT_SGBRG8_1X8, MEDIA_BUS_FMT_SGBRG8_1X8, MEDIA_BUS_FMT_SGBRG8_1X8, V4L2_PIX_FMT_SGBRG8, 8, 1, }, { MEDIA_BUS_FMT_SGRBG8_1X8, MEDIA_BUS_FMT_SGRBG8_1X8, MEDIA_BUS_FMT_SGRBG8_1X8, MEDIA_BUS_FMT_SGRBG8_1X8, V4L2_PIX_FMT_SGRBG8, 8, 1, }, { MEDIA_BUS_FMT_SRGGB8_1X8, MEDIA_BUS_FMT_SRGGB8_1X8, MEDIA_BUS_FMT_SRGGB8_1X8, MEDIA_BUS_FMT_SRGGB8_1X8, V4L2_PIX_FMT_SRGGB8, 8, 1, }, { MEDIA_BUS_FMT_SBGGR10_DPCM8_1X8, MEDIA_BUS_FMT_SBGGR10_DPCM8_1X8, MEDIA_BUS_FMT_SBGGR10_1X10, 0, V4L2_PIX_FMT_SBGGR10DPCM8, 8, 1, }, { MEDIA_BUS_FMT_SGBRG10_DPCM8_1X8, MEDIA_BUS_FMT_SGBRG10_DPCM8_1X8, MEDIA_BUS_FMT_SGBRG10_1X10, 0, V4L2_PIX_FMT_SGBRG10DPCM8, 8, 1, }, { MEDIA_BUS_FMT_SGRBG10_DPCM8_1X8, MEDIA_BUS_FMT_SGRBG10_DPCM8_1X8, MEDIA_BUS_FMT_SGRBG10_1X10, 0, V4L2_PIX_FMT_SGRBG10DPCM8, 8, 1, }, { MEDIA_BUS_FMT_SRGGB10_DPCM8_1X8, MEDIA_BUS_FMT_SRGGB10_DPCM8_1X8, MEDIA_BUS_FMT_SRGGB10_1X10, 0, V4L2_PIX_FMT_SRGGB10DPCM8, 8, 1, }, { MEDIA_BUS_FMT_SBGGR10_1X10, MEDIA_BUS_FMT_SBGGR10_1X10, MEDIA_BUS_FMT_SBGGR10_1X10, MEDIA_BUS_FMT_SBGGR8_1X8, V4L2_PIX_FMT_SBGGR10, 10, 2, }, { MEDIA_BUS_FMT_SGBRG10_1X10, MEDIA_BUS_FMT_SGBRG10_1X10, MEDIA_BUS_FMT_SGBRG10_1X10, MEDIA_BUS_FMT_SGBRG8_1X8, V4L2_PIX_FMT_SGBRG10, 10, 2, }, { MEDIA_BUS_FMT_SGRBG10_1X10, MEDIA_BUS_FMT_SGRBG10_1X10, MEDIA_BUS_FMT_SGRBG10_1X10, MEDIA_BUS_FMT_SGRBG8_1X8, V4L2_PIX_FMT_SGRBG10, 10, 2, }, { MEDIA_BUS_FMT_SRGGB10_1X10, MEDIA_BUS_FMT_SRGGB10_1X10, MEDIA_BUS_FMT_SRGGB10_1X10, MEDIA_BUS_FMT_SRGGB8_1X8, V4L2_PIX_FMT_SRGGB10, 10, 2, }, { MEDIA_BUS_FMT_SBGGR12_1X12, MEDIA_BUS_FMT_SBGGR10_1X10, MEDIA_BUS_FMT_SBGGR12_1X12, MEDIA_BUS_FMT_SBGGR8_1X8, V4L2_PIX_FMT_SBGGR12, 12, 2, }, { MEDIA_BUS_FMT_SGBRG12_1X12, MEDIA_BUS_FMT_SGBRG10_1X10, MEDIA_BUS_FMT_SGBRG12_1X12, MEDIA_BUS_FMT_SGBRG8_1X8, V4L2_PIX_FMT_SGBRG12, 12, 2, }, { MEDIA_BUS_FMT_SGRBG12_1X12, MEDIA_BUS_FMT_SGRBG10_1X10, MEDIA_BUS_FMT_SGRBG12_1X12, MEDIA_BUS_FMT_SGRBG8_1X8, V4L2_PIX_FMT_SGRBG12, 12, 2, }, { MEDIA_BUS_FMT_SRGGB12_1X12, MEDIA_BUS_FMT_SRGGB10_1X10, MEDIA_BUS_FMT_SRGGB12_1X12, MEDIA_BUS_FMT_SRGGB8_1X8, V4L2_PIX_FMT_SRGGB12, 12, 2, }, { MEDIA_BUS_FMT_UYVY8_1X16, MEDIA_BUS_FMT_UYVY8_1X16, MEDIA_BUS_FMT_UYVY8_1X16, 0, V4L2_PIX_FMT_UYVY, 16, 2, }, { MEDIA_BUS_FMT_YUYV8_1X16, MEDIA_BUS_FMT_YUYV8_1X16, MEDIA_BUS_FMT_YUYV8_1X16, 0, V4L2_PIX_FMT_YUYV, 16, 2, }, { MEDIA_BUS_FMT_UYVY8_2X8, MEDIA_BUS_FMT_UYVY8_2X8, MEDIA_BUS_FMT_UYVY8_2X8, 0, V4L2_PIX_FMT_UYVY, 8, 2, }, { MEDIA_BUS_FMT_YUYV8_2X8, MEDIA_BUS_FMT_YUYV8_2X8, MEDIA_BUS_FMT_YUYV8_2X8, 0, V4L2_PIX_FMT_YUYV, 8, 2, }, /* Empty entry to catch the unsupported pixel code (0) used by the CCDC * module and avoid NULL pointer dereferences. */ { 0, } }; const struct isp_format_info *omap3isp_video_format_info(u32 code) { unsigned int i; for (i = 0; i < ARRAY_SIZE(formats); ++i) { if (formats[i].code == code) return &formats[i]; } return NULL; } /* * isp_video_mbus_to_pix - Convert v4l2_mbus_framefmt to v4l2_pix_format * @video: ISP video instance * @mbus: v4l2_mbus_framefmt format (input) * @pix: v4l2_pix_format format (output) * * Fill the output pix structure with information from the input mbus format. * The bytesperline and sizeimage fields are computed from the requested bytes * per line value in the pix format and information from the video instance. * * Return the number of padding bytes at end of line. */ static unsigned int isp_video_mbus_to_pix(const struct isp_video *video, const struct v4l2_mbus_framefmt *mbus, struct v4l2_pix_format *pix) { unsigned int bpl = pix->bytesperline; unsigned int min_bpl; unsigned int i; memset(pix, 0, sizeof(*pix)); pix->width = mbus->width; pix->height = mbus->height; for (i = 0; i < ARRAY_SIZE(formats); ++i) { if (formats[i].code == mbus->code) break; } if (WARN_ON(i == ARRAY_SIZE(formats))) return 0; min_bpl = pix->width * formats[i].bpp; /* Clamp the requested bytes per line value. If the maximum bytes per * line value is zero, the module doesn't support user configurable line * sizes. Override the requested value with the minimum in that case. */ if (video->bpl_max) bpl = clamp(bpl, min_bpl, video->bpl_max); else bpl = min_bpl; if (!video->bpl_zero_padding || bpl != min_bpl) bpl = ALIGN(bpl, video->bpl_alignment); pix->pixelformat = formats[i].pixelformat; pix->bytesperline = bpl; pix->sizeimage = pix->bytesperline * pix->height; pix->colorspace = mbus->colorspace; pix->field = mbus->field; return bpl - min_bpl; } static void isp_video_pix_to_mbus(const struct v4l2_pix_format *pix, struct v4l2_mbus_framefmt *mbus) { unsigned int i; memset(mbus, 0, sizeof(*mbus)); mbus->width = pix->width; mbus->height = pix->height; /* Skip the last format in the loop so that it will be selected if no * match is found. */ for (i = 0; i < ARRAY_SIZE(formats) - 1; ++i) { if (formats[i].pixelformat == pix->pixelformat) break; } mbus->code = formats[i].code; mbus->colorspace = pix->colorspace; mbus->field = pix->field; } static struct v4l2_subdev * isp_video_remote_subdev(struct isp_video *video, u32 *pad) { struct media_pad *remote; remote = media_pad_remote_pad_first(&video->pad); if (!remote || !is_media_entity_v4l2_subdev(remote->entity)) return NULL; if (pad) *pad = remote->index; return media_entity_to_v4l2_subdev(remote->entity); } /* Return a pointer to the ISP video instance at the far end of the pipeline. */ static int isp_video_get_graph_data(struct isp_video *video, struct isp_pipeline *pipe) { struct media_pipeline_entity_iter iter; struct media_entity *entity; struct isp_video *far_end = NULL; int ret; ret = media_pipeline_entity_iter_init(&pipe->pipe, &iter); if (ret) return ret; media_pipeline_for_each_entity(&pipe->pipe, &iter, entity) { struct isp_video *__video; media_entity_enum_set(&pipe->ent_enum, entity); if (far_end != NULL) continue; if (entity == &video->video.entity) continue; if (!is_media_entity_v4l2_video_device(entity)) continue; __video = to_isp_video(media_entity_to_video_device(entity)); if (__video->type != video->type) far_end = __video; } media_pipeline_entity_iter_cleanup(&iter); if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) { pipe->input = far_end; pipe->output = video; } else { if (far_end == NULL) return -EPIPE; pipe->input = video; pipe->output = far_end; } return 0; } static int __isp_video_get_format(struct isp_video *video, struct v4l2_format *format) { struct v4l2_subdev_format fmt = { .which = V4L2_SUBDEV_FORMAT_ACTIVE, }; struct v4l2_subdev *subdev; u32 pad; int ret; subdev = isp_video_remote_subdev(video, &pad); if (subdev == NULL) return -EINVAL; fmt.pad = pad; mutex_lock(&video->mutex); ret = v4l2_subdev_call(subdev, pad, get_fmt, NULL, &fmt); mutex_unlock(&video->mutex); if (ret) return ret; format->type = video->type; return isp_video_mbus_to_pix(video, &fmt.format, &format->fmt.pix); } static int isp_video_check_format(struct isp_video *video, struct isp_video_fh *vfh) { struct v4l2_format format; int ret; memcpy(&format, &vfh->format, sizeof(format)); ret = __isp_video_get_format(video, &format); if (ret < 0) return ret; if (vfh->format.fmt.pix.pixelformat != format.fmt.pix.pixelformat || vfh->format.fmt.pix.height != format.fmt.pix.height || vfh->format.fmt.pix.width != format.fmt.pix.width || vfh->format.fmt.pix.bytesperline != format.fmt.pix.bytesperline || vfh->format.fmt.pix.sizeimage != format.fmt.pix.sizeimage || vfh->format.fmt.pix.field != format.fmt.pix.field) return -EINVAL; return 0; } /* ----------------------------------------------------------------------------- * Video queue operations */ static int isp_video_queue_setup(struct vb2_queue *queue, unsigned int *count, unsigned int *num_planes, unsigned int sizes[], struct device *alloc_devs[]) { struct isp_video_fh *vfh = vb2_get_drv_priv(queue); struct isp_video *video = vfh->video; *num_planes = 1; sizes[0] = vfh->format.fmt.pix.sizeimage; if (sizes[0] == 0) return -EINVAL; *count = min(*count, video->capture_mem / PAGE_ALIGN(sizes[0])); return 0; } static int isp_video_buffer_prepare(struct vb2_buffer *buf) { struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(buf); struct isp_video_fh *vfh = vb2_get_drv_priv(buf->vb2_queue); struct isp_buffer *buffer = to_isp_buffer(vbuf); struct isp_video *video = vfh->video; dma_addr_t addr; /* Refuse to prepare the buffer is the video node has registered an * error. We don't need to take any lock here as the operation is * inherently racy. The authoritative check will be performed in the * queue handler, which can't return an error, this check is just a best * effort to notify userspace as early as possible. */ if (unlikely(video->error)) return -EIO; addr = vb2_dma_contig_plane_dma_addr(buf, 0); if (!IS_ALIGNED(addr, 32)) { dev_dbg(video->isp->dev, "Buffer address must be aligned to 32 bytes boundary.\n"); return -EINVAL; } vb2_set_plane_payload(&buffer->vb.vb2_buf, 0, vfh->format.fmt.pix.sizeimage); buffer->dma = addr; return 0; } /* * isp_video_buffer_queue - Add buffer to streaming queue * @buf: Video buffer * * In memory-to-memory mode, start streaming on the pipeline if buffers are * queued on both the input and the output, if the pipeline isn't already busy. * If the pipeline is busy, it will be restarted in the output module interrupt * handler. */ static void isp_video_buffer_queue(struct vb2_buffer *buf) { struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(buf); struct isp_video_fh *vfh = vb2_get_drv_priv(buf->vb2_queue); struct isp_buffer *buffer = to_isp_buffer(vbuf); struct isp_video *video = vfh->video; struct isp_pipeline *pipe = to_isp_pipeline(&video->video.entity); enum isp_pipeline_state state; unsigned long flags; unsigned int empty; unsigned int start; spin_lock_irqsave(&video->irqlock, flags); if (unlikely(video->error)) { vb2_buffer_done(&buffer->vb.vb2_buf, VB2_BUF_STATE_ERROR); spin_unlock_irqrestore(&video->irqlock, flags); return; } empty = list_empty(&video->dmaqueue); list_add_tail(&buffer->irqlist, &video->dmaqueue); spin_unlock_irqrestore(&video->irqlock, flags); if (empty) { if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) state = ISP_PIPELINE_QUEUE_OUTPUT; else state = ISP_PIPELINE_QUEUE_INPUT; spin_lock_irqsave(&pipe->lock, flags); pipe->state |= state; video->ops->queue(video, buffer); video->dmaqueue_flags |= ISP_VIDEO_DMAQUEUE_QUEUED; start = isp_pipeline_ready(pipe); if (start) pipe->state |= ISP_PIPELINE_STREAM; spin_unlock_irqrestore(&pipe->lock, flags); if (start) omap3isp_pipeline_set_stream(pipe, ISP_PIPELINE_STREAM_SINGLESHOT); } } /* * omap3isp_video_return_buffers - Return all queued buffers to videobuf2 * @video: ISP video object * @state: new state for the returned buffers * * Return all buffers queued on the video node to videobuf2 in the given state. * The buffer state should be VB2_BUF_STATE_QUEUED if called due to an error * when starting the stream, or VB2_BUF_STATE_ERROR otherwise. * * The function must be called with the video irqlock held. */ static void omap3isp_video_return_buffers(struct isp_video *video, enum vb2_buffer_state state) { while (!list_empty(&video->dmaqueue)) { struct isp_buffer *buf; buf = list_first_entry(&video->dmaqueue, struct isp_buffer, irqlist); list_del(&buf->irqlist); vb2_buffer_done(&buf->vb.vb2_buf, state); } } static int isp_video_start_streaming(struct vb2_queue *queue, unsigned int count) { struct isp_video_fh *vfh = vb2_get_drv_priv(queue); struct isp_video *video = vfh->video; struct isp_pipeline *pipe = to_isp_pipeline(&video->video.entity); unsigned long flags; int ret; /* In sensor-to-memory mode, the stream can be started synchronously * to the stream on command. In memory-to-memory mode, it will be * started when buffers are queued on both the input and output. */ if (pipe->input) return 0; ret = omap3isp_pipeline_set_stream(pipe, ISP_PIPELINE_STREAM_CONTINUOUS); if (ret < 0) { spin_lock_irqsave(&video->irqlock, flags); omap3isp_video_return_buffers(video, VB2_BUF_STATE_QUEUED); spin_unlock_irqrestore(&video->irqlock, flags); return ret; } spin_lock_irqsave(&video->irqlock, flags); if (list_empty(&video->dmaqueue)) video->dmaqueue_flags |= ISP_VIDEO_DMAQUEUE_UNDERRUN; spin_unlock_irqrestore(&video->irqlock, flags); return 0; } static const struct vb2_ops isp_video_queue_ops = { .queue_setup = isp_video_queue_setup, .buf_prepare = isp_video_buffer_prepare, .buf_queue = isp_video_buffer_queue, .start_streaming = isp_video_start_streaming, }; /* * omap3isp_video_buffer_next - Complete the current buffer and return the next * @video: ISP video object * * Remove the current video buffer from the DMA queue and fill its timestamp and * field count before handing it back to videobuf2. * * For capture video nodes the buffer state is set to VB2_BUF_STATE_DONE if no * error has been flagged in the pipeline, or to VB2_BUF_STATE_ERROR otherwise. * For video output nodes the buffer state is always set to VB2_BUF_STATE_DONE. * * The DMA queue is expected to contain at least one buffer. * * Return a pointer to the next buffer in the DMA queue, or NULL if the queue is * empty. */ struct isp_buffer *omap3isp_video_buffer_next(struct isp_video *video) { struct isp_pipeline *pipe = to_isp_pipeline(&video->video.entity); enum vb2_buffer_state vb_state; struct isp_buffer *buf; unsigned long flags; spin_lock_irqsave(&video->irqlock, flags); if (WARN_ON(list_empty(&video->dmaqueue))) { spin_unlock_irqrestore(&video->irqlock, flags); return NULL; } buf = list_first_entry(&video->dmaqueue, struct isp_buffer, irqlist); list_del(&buf->irqlist); spin_unlock_irqrestore(&video->irqlock, flags); buf->vb.vb2_buf.timestamp = ktime_get_ns(); /* Do frame number propagation only if this is the output video node. * Frame number either comes from the CSI receivers or it gets * incremented here if H3A is not active. * Note: There is no guarantee that the output buffer will finish * first, so the input number might lag behind by 1 in some cases. */ if (video == pipe->output && !pipe->do_propagation) buf->vb.sequence = atomic_inc_return(&pipe->frame_number); else buf->vb.sequence = atomic_read(&pipe->frame_number); if (pipe->field != V4L2_FIELD_NONE) buf->vb.sequence /= 2; buf->vb.field = pipe->field; /* Report pipeline errors to userspace on the capture device side. */ if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE && pipe->error) { vb_state = VB2_BUF_STATE_ERROR; pipe->error = false; } else { vb_state = VB2_BUF_STATE_DONE; } vb2_buffer_done(&buf->vb.vb2_buf, vb_state); spin_lock_irqsave(&video->irqlock, flags); if (list_empty(&video->dmaqueue)) { enum isp_pipeline_state state; spin_unlock_irqrestore(&video->irqlock, flags); if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) state = ISP_PIPELINE_QUEUE_OUTPUT | ISP_PIPELINE_STREAM; else state = ISP_PIPELINE_QUEUE_INPUT | ISP_PIPELINE_STREAM; spin_lock_irqsave(&pipe->lock, flags); pipe->state &= ~state; if (video->pipe.stream_state == ISP_PIPELINE_STREAM_CONTINUOUS) video->dmaqueue_flags |= ISP_VIDEO_DMAQUEUE_UNDERRUN; spin_unlock_irqrestore(&pipe->lock, flags); return NULL; } if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE && pipe->input != NULL) { spin_lock(&pipe->lock); pipe->state &= ~ISP_PIPELINE_STREAM; spin_unlock(&pipe->lock); } buf = list_first_entry(&video->dmaqueue, struct isp_buffer, irqlist); spin_unlock_irqrestore(&video->irqlock, flags); return buf; } /* * omap3isp_video_cancel_stream - Cancel stream on a video node * @video: ISP video object * * Cancelling a stream returns all buffers queued on the video node to videobuf2 * in the erroneous state and makes sure no new buffer can be queued. */ void omap3isp_video_cancel_stream(struct isp_video *video) { unsigned long flags; spin_lock_irqsave(&video->irqlock, flags); omap3isp_video_return_buffers(video, VB2_BUF_STATE_ERROR); video->error = true; spin_unlock_irqrestore(&video->irqlock, flags); } /* * omap3isp_video_resume - Perform resume operation on the buffers * @video: ISP video object * @continuous: Pipeline is in single shot mode if 0 or continuous mode otherwise * * This function is intended to be used on suspend/resume scenario. It * requests video queue layer to discard buffers marked as DONE if it's in * continuous mode and requests ISP modules to queue again the ACTIVE buffer * if there's any. */ void omap3isp_video_resume(struct isp_video *video, int continuous) { struct isp_buffer *buf = NULL; if (continuous && video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) { mutex_lock(&video->queue_lock); vb2_discard_done(video->queue); mutex_unlock(&video->queue_lock); } if (!list_empty(&video->dmaqueue)) { buf = list_first_entry(&video->dmaqueue, struct isp_buffer, irqlist); video->ops->queue(video, buf); video->dmaqueue_flags |= ISP_VIDEO_DMAQUEUE_QUEUED; } else { if (continuous) video->dmaqueue_flags |= ISP_VIDEO_DMAQUEUE_UNDERRUN; } } /* ----------------------------------------------------------------------------- * V4L2 ioctls */ static int isp_video_querycap(struct file *file, void *fh, struct v4l2_capability *cap) { struct isp_video *video = video_drvdata(file); strscpy(cap->driver, ISP_VIDEO_DRIVER_NAME, sizeof(cap->driver)); strscpy(cap->card, video->video.name, sizeof(cap->card)); strscpy(cap->bus_info, "media", sizeof(cap->bus_info)); cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_VIDEO_OUTPUT | V4L2_CAP_STREAMING | V4L2_CAP_DEVICE_CAPS; return 0; } static int isp_video_get_format(struct file *file, void *fh, struct v4l2_format *format) { struct isp_video_fh *vfh = to_isp_video_fh(fh); struct isp_video *video = video_drvdata(file); if (format->type != video->type) return -EINVAL; mutex_lock(&video->mutex); *format = vfh->format; mutex_unlock(&video->mutex); return 0; } static int isp_video_set_format(struct file *file, void *fh, struct v4l2_format *format) { struct isp_video_fh *vfh = to_isp_video_fh(fh); struct isp_video *video = video_drvdata(file); struct v4l2_mbus_framefmt fmt; if (format->type != video->type) return -EINVAL; /* Replace unsupported field orders with sane defaults. */ switch (format->fmt.pix.field) { case V4L2_FIELD_NONE: /* Progressive is supported everywhere. */ break; case V4L2_FIELD_ALTERNATE: /* ALTERNATE is not supported on output nodes. */ if (video->type == V4L2_BUF_TYPE_VIDEO_OUTPUT) format->fmt.pix.field = V4L2_FIELD_NONE; break; case V4L2_FIELD_INTERLACED: /* The ISP has no concept of video standard, select the * top-bottom order when the unqualified interlaced order is * requested. */ format->fmt.pix.field = V4L2_FIELD_INTERLACED_TB; fallthrough; case V4L2_FIELD_INTERLACED_TB: case V4L2_FIELD_INTERLACED_BT: /* Interlaced orders are only supported at the CCDC output. */ if (video != &video->isp->isp_ccdc.video_out) format->fmt.pix.field = V4L2_FIELD_NONE; break; case V4L2_FIELD_TOP: case V4L2_FIELD_BOTTOM: case V4L2_FIELD_SEQ_TB: case V4L2_FIELD_SEQ_BT: default: /* All other field orders are currently unsupported, default to * progressive. */ format->fmt.pix.field = V4L2_FIELD_NONE; break; } /* Fill the bytesperline and sizeimage fields by converting to media bus * format and back to pixel format. */ isp_video_pix_to_mbus(&format->fmt.pix, &fmt); isp_video_mbus_to_pix(video, &fmt, &format->fmt.pix); mutex_lock(&video->mutex); vfh->format = *format; mutex_unlock(&video->mutex); return 0; } static int isp_video_try_format(struct file *file, void *fh, struct v4l2_format *format) { struct isp_video *video = video_drvdata(file); struct v4l2_subdev_format fmt = { .which = V4L2_SUBDEV_FORMAT_ACTIVE, }; struct v4l2_subdev *subdev; u32 pad; int ret; if (format->type != video->type) return -EINVAL; subdev = isp_video_remote_subdev(video, &pad); if (subdev == NULL) return -EINVAL; isp_video_pix_to_mbus(&format->fmt.pix, &fmt.format); fmt.pad = pad; ret = v4l2_subdev_call(subdev, pad, get_fmt, NULL, &fmt); if (ret) return ret == -ENOIOCTLCMD ? -ENOTTY : ret; isp_video_mbus_to_pix(video, &fmt.format, &format->fmt.pix); return 0; } static int isp_video_get_selection(struct file *file, void *fh, struct v4l2_selection *sel) { struct isp_video *video = video_drvdata(file); struct v4l2_subdev_format format = { .which = V4L2_SUBDEV_FORMAT_ACTIVE, }; struct v4l2_subdev *subdev; struct v4l2_subdev_selection sdsel = { .which = V4L2_SUBDEV_FORMAT_ACTIVE, .target = sel->target, }; u32 pad; int ret; switch (sel->target) { case V4L2_SEL_TGT_CROP: case V4L2_SEL_TGT_CROP_BOUNDS: case V4L2_SEL_TGT_CROP_DEFAULT: if (video->type == V4L2_BUF_TYPE_VIDEO_OUTPUT) return -EINVAL; break; case V4L2_SEL_TGT_COMPOSE: case V4L2_SEL_TGT_COMPOSE_BOUNDS: case V4L2_SEL_TGT_COMPOSE_DEFAULT: if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) return -EINVAL; break; default: return -EINVAL; } subdev = isp_video_remote_subdev(video, &pad); if (subdev == NULL) return -EINVAL; /* Try the get selection operation first and fallback to get format if not * implemented. */ sdsel.pad = pad; ret = v4l2_subdev_call(subdev, pad, get_selection, NULL, &sdsel); if (!ret) sel->r = sdsel.r; if (ret != -ENOIOCTLCMD) return ret; format.pad = pad; ret = v4l2_subdev_call(subdev, pad, get_fmt, NULL, &format); if (ret < 0) return ret == -ENOIOCTLCMD ? -ENOTTY : ret; sel->r.left = 0; sel->r.top = 0; sel->r.width = format.format.width; sel->r.height = format.format.height; return 0; } static int isp_video_set_selection(struct file *file, void *fh, struct v4l2_selection *sel) { struct isp_video *video = video_drvdata(file); struct v4l2_subdev *subdev; struct v4l2_subdev_selection sdsel = { .which = V4L2_SUBDEV_FORMAT_ACTIVE, .target = sel->target, .flags = sel->flags, .r = sel->r, }; u32 pad; int ret; switch (sel->target) { case V4L2_SEL_TGT_CROP: if (video->type == V4L2_BUF_TYPE_VIDEO_OUTPUT) return -EINVAL; break; case V4L2_SEL_TGT_COMPOSE: if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) return -EINVAL; break; default: return -EINVAL; } subdev = isp_video_remote_subdev(video, &pad); if (subdev == NULL) return -EINVAL; sdsel.pad = pad; mutex_lock(&video->mutex); ret = v4l2_subdev_call(subdev, pad, set_selection, NULL, &sdsel); mutex_unlock(&video->mutex); if (!ret) sel->r = sdsel.r; return ret == -ENOIOCTLCMD ? -ENOTTY : ret; } static int isp_video_get_param(struct file *file, void *fh, struct v4l2_streamparm *a) { struct isp_video_fh *vfh = to_isp_video_fh(fh); struct isp_video *video = video_drvdata(file); if (video->type != V4L2_BUF_TYPE_VIDEO_OUTPUT || video->type != a->type) return -EINVAL; memset(a, 0, sizeof(*a)); a->type = V4L2_BUF_TYPE_VIDEO_OUTPUT; a->parm.output.capability = V4L2_CAP_TIMEPERFRAME; a->parm.output.timeperframe = vfh->timeperframe; return 0; } static int isp_video_set_param(struct file *file, void *fh, struct v4l2_streamparm *a) { struct isp_video_fh *vfh = to_isp_video_fh(fh); struct isp_video *video = video_drvdata(file); if (video->type != V4L2_BUF_TYPE_VIDEO_OUTPUT || video->type != a->type) return -EINVAL; if (a->parm.output.timeperframe.denominator == 0) a->parm.output.timeperframe.denominator = 1; vfh->timeperframe = a->parm.output.timeperframe; return 0; } static int isp_video_reqbufs(struct file *file, void *fh, struct v4l2_requestbuffers *rb) { struct isp_video_fh *vfh = to_isp_video_fh(fh); struct isp_video *video = video_drvdata(file); int ret; mutex_lock(&video->queue_lock); ret = vb2_reqbufs(&vfh->queue, rb); mutex_unlock(&video->queue_lock); return ret; } static int isp_video_querybuf(struct file *file, void *fh, struct v4l2_buffer *b) { struct isp_video_fh *vfh = to_isp_video_fh(fh); struct isp_video *video = video_drvdata(file); int ret; mutex_lock(&video->queue_lock); ret = vb2_querybuf(&vfh->queue, b); mutex_unlock(&video->queue_lock); return ret; } static int isp_video_qbuf(struct file *file, void *fh, struct v4l2_buffer *b) { struct isp_video_fh *vfh = to_isp_video_fh(fh); struct isp_video *video = video_drvdata(file); int ret; mutex_lock(&video->queue_lock); ret = vb2_qbuf(&vfh->queue, video->video.v4l2_dev->mdev, b); mutex_unlock(&video->queue_lock); return ret; } static int isp_video_dqbuf(struct file *file, void *fh, struct v4l2_buffer *b) { struct isp_video_fh *vfh = to_isp_video_fh(fh); struct isp_video *video = video_drvdata(file); int ret; mutex_lock(&video->queue_lock); ret = vb2_dqbuf(&vfh->queue, b, file->f_flags & O_NONBLOCK); mutex_unlock(&video->queue_lock); return ret; } static int isp_video_check_external_subdevs(struct isp_video *video, struct isp_pipeline *pipe) { struct isp_device *isp = video->isp; struct media_entity *ents[] = { &isp->isp_csi2a.subdev.entity, &isp->isp_csi2c.subdev.entity, &isp->isp_ccp2.subdev.entity, &isp->isp_ccdc.subdev.entity }; struct media_pad *source_pad; struct media_entity *source = NULL; struct media_entity *sink; struct v4l2_subdev_format fmt = { .which = V4L2_SUBDEV_FORMAT_ACTIVE, }; struct v4l2_ext_controls ctrls; struct v4l2_ext_control ctrl; unsigned int i; int ret; /* Memory-to-memory pipelines have no external subdev. */ if (pipe->input != NULL) return 0; for (i = 0; i < ARRAY_SIZE(ents); i++) { /* Is the entity part of the pipeline? */ if (!media_entity_enum_test(&pipe->ent_enum, ents[i])) continue; /* ISP entities have always sink pad == 0. Find source. */ source_pad = media_pad_remote_pad_first(&ents[i]->pads[0]); if (source_pad == NULL) continue; source = source_pad->entity; sink = ents[i]; break; } if (!source) { dev_warn(isp->dev, "can't find source, failing now\n"); return -EINVAL; } if (!is_media_entity_v4l2_subdev(source)) return 0; pipe->external = media_entity_to_v4l2_subdev(source); fmt.pad = source_pad->index; ret = v4l2_subdev_call(media_entity_to_v4l2_subdev(sink), pad, get_fmt, NULL, &fmt); if (unlikely(ret < 0)) { dev_warn(isp->dev, "get_fmt returned null!\n"); return ret; } pipe->external_width = omap3isp_video_format_info(fmt.format.code)->width; memset(&ctrls, 0, sizeof(ctrls)); memset(&ctrl, 0, sizeof(ctrl)); ctrl.id = V4L2_CID_PIXEL_RATE; ctrls.count = 1; ctrls.controls = &ctrl; ret = v4l2_g_ext_ctrls(pipe->external->ctrl_handler, &video->video, NULL, &ctrls); if (ret < 0) { dev_warn(isp->dev, "no pixel rate control in subdev %s\n", pipe->external->name); return ret; } pipe->external_rate = ctrl.value64; if (media_entity_enum_test(&pipe->ent_enum, &isp->isp_ccdc.subdev.entity)) { unsigned int rate = UINT_MAX; /* * Check that maximum allowed CCDC pixel rate isn't * exceeded by the pixel rate. */ omap3isp_ccdc_max_rate(&isp->isp_ccdc, &rate); if (pipe->external_rate > rate) return -ENOSPC; } return 0; } /* * Stream management * * Every ISP pipeline has a single input and a single output. The input can be * either a sensor or a video node. The output is always a video node. * * As every pipeline has an output video node, the ISP video objects at the * pipeline output stores the pipeline state. It tracks the streaming state of * both the input and output, as well as the availability of buffers. * * In sensor-to-memory mode, frames are always available at the pipeline input. * Starting the sensor usually requires I2C transfers and must be done in * interruptible context. The pipeline is started and stopped synchronously * to the stream on/off commands. All modules in the pipeline will get their * subdev set stream handler called. The module at the end of the pipeline must * delay starting the hardware until buffers are available at its output. * * In memory-to-memory mode, starting/stopping the stream requires * synchronization between the input and output. ISP modules can't be stopped * in the middle of a frame, and at least some of the modules seem to become * busy as soon as they're started, even if they don't receive a frame start * event. For that reason frames need to be processed in single-shot mode. The * driver needs to wait until a frame is completely processed and written to * memory before restarting the pipeline for the next frame. Pipelined * processing might be possible but requires more testing. * * Stream start must be delayed until buffers are available at both the input * and output. The pipeline must be started in the vb2 queue callback with * the buffers queue spinlock held. The modules subdev set stream operation must * not sleep. */ static int isp_video_streamon(struct file *file, void *fh, enum v4l2_buf_type type) { struct isp_video_fh *vfh = to_isp_video_fh(fh); struct isp_video *video = video_drvdata(file); enum isp_pipeline_state state; struct isp_pipeline *pipe; unsigned long flags; int ret; if (type != video->type) return -EINVAL; mutex_lock(&video->stream_lock); /* Start streaming on the pipeline. No link touching an entity in the * pipeline can be activated or deactivated once streaming is started. */ pipe = to_isp_pipeline(&video->video.entity) ? : &video->pipe; ret = media_entity_enum_init(&pipe->ent_enum, &video->isp->media_dev); if (ret) goto err_enum_init; /* TODO: Implement PM QoS */ pipe->l3_ick = clk_get_rate(video->isp->clock[ISP_CLK_L3_ICK]); pipe->max_rate = pipe->l3_ick; ret = video_device_pipeline_start(&video->video, &pipe->pipe); if (ret < 0) goto err_pipeline_start; /* Verify that the currently configured format matches the output of * the connected subdev. */ ret = isp_video_check_format(video, vfh); if (ret < 0) goto err_check_format; video->bpl_padding = ret; video->bpl_value = vfh->format.fmt.pix.bytesperline; ret = isp_video_get_graph_data(video, pipe); if (ret < 0) goto err_check_format; if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) state = ISP_PIPELINE_STREAM_OUTPUT | ISP_PIPELINE_IDLE_OUTPUT; else state = ISP_PIPELINE_STREAM_INPUT | ISP_PIPELINE_IDLE_INPUT; ret = isp_video_check_external_subdevs(video, pipe); if (ret < 0) goto err_check_format; pipe->error = false; spin_lock_irqsave(&pipe->lock, flags); pipe->state &= ~ISP_PIPELINE_STREAM; pipe->state |= state; spin_unlock_irqrestore(&pipe->lock, flags); /* Set the maximum time per frame as the value requested by userspace. * This is a soft limit that can be overridden if the hardware doesn't * support the request limit. */ if (video->type == V4L2_BUF_TYPE_VIDEO_OUTPUT) pipe->max_timeperframe = vfh->timeperframe; video->queue = &vfh->queue; INIT_LIST_HEAD(&video->dmaqueue); atomic_set(&pipe->frame_number, -1); pipe->field = vfh->format.fmt.pix.field; mutex_lock(&video->queue_lock); ret = vb2_streamon(&vfh->queue, type); mutex_unlock(&video->queue_lock); if (ret < 0) goto err_check_format; mutex_unlock(&video->stream_lock); return 0; err_check_format: video_device_pipeline_stop(&video->video); err_pipeline_start: /* TODO: Implement PM QoS */ /* The DMA queue must be emptied here, otherwise CCDC interrupts that * will get triggered the next time the CCDC is powered up will try to * access buffers that might have been freed but still present in the * DMA queue. This can easily get triggered if the above * omap3isp_pipeline_set_stream() call fails on a system with a * free-running sensor. */ INIT_LIST_HEAD(&video->dmaqueue); video->queue = NULL; media_entity_enum_cleanup(&pipe->ent_enum); err_enum_init: mutex_unlock(&video->stream_lock); return ret; } static int isp_video_streamoff(struct file *file, void *fh, enum v4l2_buf_type type) { struct isp_video_fh *vfh = to_isp_video_fh(fh); struct isp_video *video = video_drvdata(file); struct isp_pipeline *pipe = to_isp_pipeline(&video->video.entity); enum isp_pipeline_state state; unsigned int streaming; unsigned long flags; if (type != video->type) return -EINVAL; mutex_lock(&video->stream_lock); /* Make sure we're not streaming yet. */ mutex_lock(&video->queue_lock); streaming = vb2_is_streaming(&vfh->queue); mutex_unlock(&video->queue_lock); if (!streaming) goto done; /* Update the pipeline state. */ if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) state = ISP_PIPELINE_STREAM_OUTPUT | ISP_PIPELINE_QUEUE_OUTPUT; else state = ISP_PIPELINE_STREAM_INPUT | ISP_PIPELINE_QUEUE_INPUT; spin_lock_irqsave(&pipe->lock, flags); pipe->state &= ~state; spin_unlock_irqrestore(&pipe->lock, flags); /* Stop the stream. */ omap3isp_pipeline_set_stream(pipe, ISP_PIPELINE_STREAM_STOPPED); omap3isp_video_cancel_stream(video); mutex_lock(&video->queue_lock); vb2_streamoff(&vfh->queue, type); mutex_unlock(&video->queue_lock); video->queue = NULL; video->error = false; /* TODO: Implement PM QoS */ video_device_pipeline_stop(&video->video); media_entity_enum_cleanup(&pipe->ent_enum); done: mutex_unlock(&video->stream_lock); return 0; } static int isp_video_enum_input(struct file *file, void *fh, struct v4l2_input *input) { if (input->index > 0) return -EINVAL; strscpy(input->name, "camera", sizeof(input->name)); input->type = V4L2_INPUT_TYPE_CAMERA; return 0; } static int isp_video_g_input(struct file *file, void *fh, unsigned int *input) { *input = 0; return 0; } static int isp_video_s_input(struct file *file, void *fh, unsigned int input) { return input == 0 ? 0 : -EINVAL; } static const struct v4l2_ioctl_ops isp_video_ioctl_ops = { .vidioc_querycap = isp_video_querycap, .vidioc_g_fmt_vid_cap = isp_video_get_format, .vidioc_s_fmt_vid_cap = isp_video_set_format, .vidioc_try_fmt_vid_cap = isp_video_try_format, .vidioc_g_fmt_vid_out = isp_video_get_format, .vidioc_s_fmt_vid_out = isp_video_set_format, .vidioc_try_fmt_vid_out = isp_video_try_format, .vidioc_g_selection = isp_video_get_selection, .vidioc_s_selection = isp_video_set_selection, .vidioc_g_parm = isp_video_get_param, .vidioc_s_parm = isp_video_set_param, .vidioc_reqbufs = isp_video_reqbufs, .vidioc_querybuf = isp_video_querybuf, .vidioc_qbuf = isp_video_qbuf, .vidioc_dqbuf = isp_video_dqbuf, .vidioc_streamon = isp_video_streamon, .vidioc_streamoff = isp_video_streamoff, .vidioc_enum_input = isp_video_enum_input, .vidioc_g_input = isp_video_g_input, .vidioc_s_input = isp_video_s_input, }; /* ----------------------------------------------------------------------------- * V4L2 file operations */ static int isp_video_open(struct file *file) { struct isp_video *video = video_drvdata(file); struct isp_video_fh *handle; struct vb2_queue *queue; int ret = 0; handle = kzalloc(sizeof(*handle), GFP_KERNEL); if (handle == NULL) return -ENOMEM; v4l2_fh_init(&handle->vfh, &video->video); v4l2_fh_add(&handle->vfh); /* If this is the first user, initialise the pipeline. */ if (omap3isp_get(video->isp) == NULL) { ret = -EBUSY; goto done; } ret = v4l2_pipeline_pm_get(&video->video.entity); if (ret < 0) { omap3isp_put(video->isp); goto done; } queue = &handle->queue; queue->type = video->type; queue->io_modes = VB2_MMAP | VB2_USERPTR; queue->drv_priv = handle; queue->ops = &isp_video_queue_ops; queue->mem_ops = &vb2_dma_contig_memops; queue->buf_struct_size = sizeof(struct isp_buffer); queue->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC; queue->dev = video->isp->dev; ret = vb2_queue_init(&handle->queue); if (ret < 0) { omap3isp_put(video->isp); goto done; } memset(&handle->format, 0, sizeof(handle->format)); handle->format.type = video->type; handle->timeperframe.denominator = 1; handle->video = video; file->private_data = &handle->vfh; done: if (ret < 0) { v4l2_fh_del(&handle->vfh); v4l2_fh_exit(&handle->vfh); kfree(handle); } return ret; } static int isp_video_release(struct file *file) { struct isp_video *video = video_drvdata(file); struct v4l2_fh *vfh = file->private_data; struct isp_video_fh *handle = to_isp_video_fh(vfh); /* Disable streaming and free the buffers queue resources. */ isp_video_streamoff(file, vfh, video->type); mutex_lock(&video->queue_lock); vb2_queue_release(&handle->queue); mutex_unlock(&video->queue_lock); v4l2_pipeline_pm_put(&video->video.entity); /* Release the file handle. */ v4l2_fh_del(vfh); v4l2_fh_exit(vfh); kfree(handle); file->private_data = NULL; omap3isp_put(video->isp); return 0; } static __poll_t isp_video_poll(struct file *file, poll_table *wait) { struct isp_video_fh *vfh = to_isp_video_fh(file->private_data); struct isp_video *video = video_drvdata(file); __poll_t ret; mutex_lock(&video->queue_lock); ret = vb2_poll(&vfh->queue, file, wait); mutex_unlock(&video->queue_lock); return ret; } static int isp_video_mmap(struct file *file, struct vm_area_struct *vma) { struct isp_video_fh *vfh = to_isp_video_fh(file->private_data); return vb2_mmap(&vfh->queue, vma); } static const struct v4l2_file_operations isp_video_fops = { .owner = THIS_MODULE, .unlocked_ioctl = video_ioctl2, .open = isp_video_open, .release = isp_video_release, .poll = isp_video_poll, .mmap = isp_video_mmap, }; /* ----------------------------------------------------------------------------- * ISP video core */ static const struct isp_video_operations isp_video_dummy_ops = { }; int omap3isp_video_init(struct isp_video *video, const char *name) { const char *direction; int ret; switch (video->type) { case V4L2_BUF_TYPE_VIDEO_CAPTURE: direction = "output"; video->pad.flags = MEDIA_PAD_FL_SINK | MEDIA_PAD_FL_MUST_CONNECT; break; case V4L2_BUF_TYPE_VIDEO_OUTPUT: direction = "input"; video->pad.flags = MEDIA_PAD_FL_SOURCE | MEDIA_PAD_FL_MUST_CONNECT; video->video.vfl_dir = VFL_DIR_TX; break; default: return -EINVAL; } ret = media_entity_pads_init(&video->video.entity, 1, &video->pad); if (ret < 0) return ret; mutex_init(&video->mutex); atomic_set(&video->active, 0); spin_lock_init(&video->pipe.lock); mutex_init(&video->stream_lock); mutex_init(&video->queue_lock); spin_lock_init(&video->irqlock); /* Initialize the video device. */ if (video->ops == NULL) video->ops = &isp_video_dummy_ops; video->video.fops = &isp_video_fops; snprintf(video->video.name, sizeof(video->video.name), "OMAP3 ISP %s %s", name, direction); video->video.vfl_type = VFL_TYPE_VIDEO; video->video.release = video_device_release_empty; video->video.ioctl_ops = &isp_video_ioctl_ops; if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) video->video.device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING; else video->video.device_caps = V4L2_CAP_VIDEO_OUTPUT | V4L2_CAP_STREAMING; video->pipe.stream_state = ISP_PIPELINE_STREAM_STOPPED; video_set_drvdata(&video->video, video); return 0; } void omap3isp_video_cleanup(struct isp_video *video) { media_entity_cleanup(&video->video.entity); mutex_destroy(&video->queue_lock); mutex_destroy(&video->stream_lock); mutex_destroy(&video->mutex); } int omap3isp_video_register(struct isp_video *video, struct v4l2_device *vdev) { int ret; video->video.v4l2_dev = vdev; ret = video_register_device(&video->video, VFL_TYPE_VIDEO, -1); if (ret < 0) dev_err(video->isp->dev, "%s: could not register video device (%d)\n", __func__, ret); return ret; } void omap3isp_video_unregister(struct isp_video *video) { video_unregister_device(&video->video); }
linux-master
drivers/media/platform/ti/omap3isp/ispvideo.c
// SPDX-License-Identifier: GPL-2.0-only /* * ispccdc.c * * TI OMAP3 ISP - CCDC module * * Copyright (C) 2009-2010 Nokia Corporation * Copyright (C) 2009 Texas Instruments, Inc. * * Contacts: Laurent Pinchart <[email protected]> * Sakari Ailus <[email protected]> */ #include <linux/module.h> #include <linux/uaccess.h> #include <linux/delay.h> #include <linux/device.h> #include <linux/dma-mapping.h> #include <linux/mm.h> #include <linux/sched.h> #include <linux/slab.h> #include <media/v4l2-event.h> #include "isp.h" #include "ispreg.h" #include "ispccdc.h" #define CCDC_MIN_WIDTH 32 #define CCDC_MIN_HEIGHT 32 static struct v4l2_mbus_framefmt * __ccdc_get_format(struct isp_ccdc_device *ccdc, struct v4l2_subdev_state *sd_state, unsigned int pad, enum v4l2_subdev_format_whence which); static const unsigned int ccdc_fmts[] = { MEDIA_BUS_FMT_Y8_1X8, MEDIA_BUS_FMT_Y10_1X10, MEDIA_BUS_FMT_Y12_1X12, MEDIA_BUS_FMT_SGRBG8_1X8, MEDIA_BUS_FMT_SRGGB8_1X8, MEDIA_BUS_FMT_SBGGR8_1X8, MEDIA_BUS_FMT_SGBRG8_1X8, MEDIA_BUS_FMT_SGRBG10_1X10, MEDIA_BUS_FMT_SRGGB10_1X10, MEDIA_BUS_FMT_SBGGR10_1X10, MEDIA_BUS_FMT_SGBRG10_1X10, MEDIA_BUS_FMT_SGRBG12_1X12, MEDIA_BUS_FMT_SRGGB12_1X12, MEDIA_BUS_FMT_SBGGR12_1X12, MEDIA_BUS_FMT_SGBRG12_1X12, MEDIA_BUS_FMT_YUYV8_2X8, MEDIA_BUS_FMT_UYVY8_2X8, }; /* * ccdc_print_status - Print current CCDC Module register values. * @ccdc: Pointer to ISP CCDC device. * * Also prints other debug information stored in the CCDC module. */ #define CCDC_PRINT_REGISTER(isp, name)\ dev_dbg(isp->dev, "###CCDC " #name "=0x%08x\n", \ isp_reg_readl(isp, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_##name)) static void ccdc_print_status(struct isp_ccdc_device *ccdc) { struct isp_device *isp = to_isp_device(ccdc); dev_dbg(isp->dev, "-------------CCDC Register dump-------------\n"); CCDC_PRINT_REGISTER(isp, PCR); CCDC_PRINT_REGISTER(isp, SYN_MODE); CCDC_PRINT_REGISTER(isp, HD_VD_WID); CCDC_PRINT_REGISTER(isp, PIX_LINES); CCDC_PRINT_REGISTER(isp, HORZ_INFO); CCDC_PRINT_REGISTER(isp, VERT_START); CCDC_PRINT_REGISTER(isp, VERT_LINES); CCDC_PRINT_REGISTER(isp, CULLING); CCDC_PRINT_REGISTER(isp, HSIZE_OFF); CCDC_PRINT_REGISTER(isp, SDOFST); CCDC_PRINT_REGISTER(isp, SDR_ADDR); CCDC_PRINT_REGISTER(isp, CLAMP); CCDC_PRINT_REGISTER(isp, DCSUB); CCDC_PRINT_REGISTER(isp, COLPTN); CCDC_PRINT_REGISTER(isp, BLKCMP); CCDC_PRINT_REGISTER(isp, FPC); CCDC_PRINT_REGISTER(isp, FPC_ADDR); CCDC_PRINT_REGISTER(isp, VDINT); CCDC_PRINT_REGISTER(isp, ALAW); CCDC_PRINT_REGISTER(isp, REC656IF); CCDC_PRINT_REGISTER(isp, CFG); CCDC_PRINT_REGISTER(isp, FMTCFG); CCDC_PRINT_REGISTER(isp, FMT_HORZ); CCDC_PRINT_REGISTER(isp, FMT_VERT); CCDC_PRINT_REGISTER(isp, PRGEVEN0); CCDC_PRINT_REGISTER(isp, PRGEVEN1); CCDC_PRINT_REGISTER(isp, PRGODD0); CCDC_PRINT_REGISTER(isp, PRGODD1); CCDC_PRINT_REGISTER(isp, VP_OUT); CCDC_PRINT_REGISTER(isp, LSC_CONFIG); CCDC_PRINT_REGISTER(isp, LSC_INITIAL); CCDC_PRINT_REGISTER(isp, LSC_TABLE_BASE); CCDC_PRINT_REGISTER(isp, LSC_TABLE_OFFSET); dev_dbg(isp->dev, "--------------------------------------------\n"); } /* * omap3isp_ccdc_busy - Get busy state of the CCDC. * @ccdc: Pointer to ISP CCDC device. */ int omap3isp_ccdc_busy(struct isp_ccdc_device *ccdc) { struct isp_device *isp = to_isp_device(ccdc); return isp_reg_readl(isp, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_PCR) & ISPCCDC_PCR_BUSY; } /* ----------------------------------------------------------------------------- * Lens Shading Compensation */ /* * ccdc_lsc_validate_config - Check that LSC configuration is valid. * @ccdc: Pointer to ISP CCDC device. * @lsc_cfg: the LSC configuration to check. * * Returns 0 if the LSC configuration is valid, or -EINVAL if invalid. */ static int ccdc_lsc_validate_config(struct isp_ccdc_device *ccdc, struct omap3isp_ccdc_lsc_config *lsc_cfg) { struct isp_device *isp = to_isp_device(ccdc); struct v4l2_mbus_framefmt *format; unsigned int paxel_width, paxel_height; unsigned int paxel_shift_x, paxel_shift_y; unsigned int min_width, min_height, min_size; unsigned int input_width, input_height; paxel_shift_x = lsc_cfg->gain_mode_m; paxel_shift_y = lsc_cfg->gain_mode_n; if ((paxel_shift_x < 2) || (paxel_shift_x > 6) || (paxel_shift_y < 2) || (paxel_shift_y > 6)) { dev_dbg(isp->dev, "CCDC: LSC: Invalid paxel size\n"); return -EINVAL; } if (lsc_cfg->offset & 3) { dev_dbg(isp->dev, "CCDC: LSC: Offset must be a multiple of 4\n"); return -EINVAL; } if ((lsc_cfg->initial_x & 1) || (lsc_cfg->initial_y & 1)) { dev_dbg(isp->dev, "CCDC: LSC: initial_x and y must be even\n"); return -EINVAL; } format = __ccdc_get_format(ccdc, NULL, CCDC_PAD_SINK, V4L2_SUBDEV_FORMAT_ACTIVE); input_width = format->width; input_height = format->height; /* Calculate minimum bytesize for validation */ paxel_width = 1 << paxel_shift_x; min_width = ((input_width + lsc_cfg->initial_x + paxel_width - 1) >> paxel_shift_x) + 1; paxel_height = 1 << paxel_shift_y; min_height = ((input_height + lsc_cfg->initial_y + paxel_height - 1) >> paxel_shift_y) + 1; min_size = 4 * min_width * min_height; if (min_size > lsc_cfg->size) { dev_dbg(isp->dev, "CCDC: LSC: too small table\n"); return -EINVAL; } if (lsc_cfg->offset < (min_width * 4)) { dev_dbg(isp->dev, "CCDC: LSC: Offset is too small\n"); return -EINVAL; } if ((lsc_cfg->size / lsc_cfg->offset) < min_height) { dev_dbg(isp->dev, "CCDC: LSC: Wrong size/offset combination\n"); return -EINVAL; } return 0; } /* * ccdc_lsc_program_table - Program Lens Shading Compensation table address. * @ccdc: Pointer to ISP CCDC device. */ static void ccdc_lsc_program_table(struct isp_ccdc_device *ccdc, dma_addr_t addr) { isp_reg_writel(to_isp_device(ccdc), addr, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_LSC_TABLE_BASE); } /* * ccdc_lsc_setup_regs - Configures the lens shading compensation module * @ccdc: Pointer to ISP CCDC device. */ static void ccdc_lsc_setup_regs(struct isp_ccdc_device *ccdc, struct omap3isp_ccdc_lsc_config *cfg) { struct isp_device *isp = to_isp_device(ccdc); int reg; isp_reg_writel(isp, cfg->offset, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_LSC_TABLE_OFFSET); reg = 0; reg |= cfg->gain_mode_n << ISPCCDC_LSC_GAIN_MODE_N_SHIFT; reg |= cfg->gain_mode_m << ISPCCDC_LSC_GAIN_MODE_M_SHIFT; reg |= cfg->gain_format << ISPCCDC_LSC_GAIN_FORMAT_SHIFT; isp_reg_writel(isp, reg, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_LSC_CONFIG); reg = 0; reg &= ~ISPCCDC_LSC_INITIAL_X_MASK; reg |= cfg->initial_x << ISPCCDC_LSC_INITIAL_X_SHIFT; reg &= ~ISPCCDC_LSC_INITIAL_Y_MASK; reg |= cfg->initial_y << ISPCCDC_LSC_INITIAL_Y_SHIFT; isp_reg_writel(isp, reg, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_LSC_INITIAL); } static int ccdc_lsc_wait_prefetch(struct isp_ccdc_device *ccdc) { struct isp_device *isp = to_isp_device(ccdc); unsigned int wait; isp_reg_writel(isp, IRQ0STATUS_CCDC_LSC_PREF_COMP_IRQ, OMAP3_ISP_IOMEM_MAIN, ISP_IRQ0STATUS); /* timeout 1 ms */ for (wait = 0; wait < 1000; wait++) { if (isp_reg_readl(isp, OMAP3_ISP_IOMEM_MAIN, ISP_IRQ0STATUS) & IRQ0STATUS_CCDC_LSC_PREF_COMP_IRQ) { isp_reg_writel(isp, IRQ0STATUS_CCDC_LSC_PREF_COMP_IRQ, OMAP3_ISP_IOMEM_MAIN, ISP_IRQ0STATUS); return 0; } rmb(); udelay(1); } return -ETIMEDOUT; } /* * __ccdc_lsc_enable - Enables/Disables the Lens Shading Compensation module. * @ccdc: Pointer to ISP CCDC device. * @enable: 0 Disables LSC, 1 Enables LSC. */ static int __ccdc_lsc_enable(struct isp_ccdc_device *ccdc, int enable) { struct isp_device *isp = to_isp_device(ccdc); const struct v4l2_mbus_framefmt *format = __ccdc_get_format(ccdc, NULL, CCDC_PAD_SINK, V4L2_SUBDEV_FORMAT_ACTIVE); if ((format->code != MEDIA_BUS_FMT_SGRBG10_1X10) && (format->code != MEDIA_BUS_FMT_SRGGB10_1X10) && (format->code != MEDIA_BUS_FMT_SBGGR10_1X10) && (format->code != MEDIA_BUS_FMT_SGBRG10_1X10)) return -EINVAL; if (enable) omap3isp_sbl_enable(isp, OMAP3_ISP_SBL_CCDC_LSC_READ); isp_reg_clr_set(isp, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_LSC_CONFIG, ISPCCDC_LSC_ENABLE, enable ? ISPCCDC_LSC_ENABLE : 0); if (enable) { if (ccdc_lsc_wait_prefetch(ccdc) < 0) { isp_reg_clr(isp, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_LSC_CONFIG, ISPCCDC_LSC_ENABLE); ccdc->lsc.state = LSC_STATE_STOPPED; dev_warn(to_device(ccdc), "LSC prefetch timeout\n"); return -ETIMEDOUT; } ccdc->lsc.state = LSC_STATE_RUNNING; } else { ccdc->lsc.state = LSC_STATE_STOPPING; } return 0; } static int ccdc_lsc_busy(struct isp_ccdc_device *ccdc) { struct isp_device *isp = to_isp_device(ccdc); return isp_reg_readl(isp, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_LSC_CONFIG) & ISPCCDC_LSC_BUSY; } /* * __ccdc_lsc_configure - Apply a new configuration to the LSC engine * @ccdc: Pointer to ISP CCDC device * @req: New configuration request */ static int __ccdc_lsc_configure(struct isp_ccdc_device *ccdc, struct ispccdc_lsc_config_req *req) { if (!req->enable) return -EINVAL; if (ccdc_lsc_validate_config(ccdc, &req->config) < 0) { dev_dbg(to_device(ccdc), "Discard LSC configuration\n"); return -EINVAL; } if (ccdc_lsc_busy(ccdc)) return -EBUSY; ccdc_lsc_setup_regs(ccdc, &req->config); ccdc_lsc_program_table(ccdc, req->table.dma); return 0; } /* * ccdc_lsc_error_handler - Handle LSC prefetch error scenario. * @ccdc: Pointer to ISP CCDC device. * * Disables LSC, and defers enablement to shadow registers update time. */ static void ccdc_lsc_error_handler(struct isp_ccdc_device *ccdc) { struct isp_device *isp = to_isp_device(ccdc); /* * From OMAP3 TRM: When this event is pending, the module * goes into transparent mode (output =input). Normal * operation can be resumed at the start of the next frame * after: * 1) Clearing this event * 2) Disabling the LSC module * 3) Enabling it */ isp_reg_clr(isp, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_LSC_CONFIG, ISPCCDC_LSC_ENABLE); ccdc->lsc.state = LSC_STATE_STOPPED; } static void ccdc_lsc_free_request(struct isp_ccdc_device *ccdc, struct ispccdc_lsc_config_req *req) { struct isp_device *isp = to_isp_device(ccdc); if (req == NULL) return; if (req->table.addr) { sg_free_table(&req->table.sgt); dma_free_coherent(isp->dev, req->config.size, req->table.addr, req->table.dma); } kfree(req); } static void ccdc_lsc_free_queue(struct isp_ccdc_device *ccdc, struct list_head *queue) { struct ispccdc_lsc_config_req *req, *n; unsigned long flags; spin_lock_irqsave(&ccdc->lsc.req_lock, flags); list_for_each_entry_safe(req, n, queue, list) { list_del(&req->list); spin_unlock_irqrestore(&ccdc->lsc.req_lock, flags); ccdc_lsc_free_request(ccdc, req); spin_lock_irqsave(&ccdc->lsc.req_lock, flags); } spin_unlock_irqrestore(&ccdc->lsc.req_lock, flags); } static void ccdc_lsc_free_table_work(struct work_struct *work) { struct isp_ccdc_device *ccdc; struct ispccdc_lsc *lsc; lsc = container_of(work, struct ispccdc_lsc, table_work); ccdc = container_of(lsc, struct isp_ccdc_device, lsc); ccdc_lsc_free_queue(ccdc, &lsc->free_queue); } /* * ccdc_lsc_config - Configure the LSC module from a userspace request * * Store the request LSC configuration in the LSC engine request pointer. The * configuration will be applied to the hardware when the CCDC will be enabled, * or at the next LSC interrupt if the CCDC is already running. */ static int ccdc_lsc_config(struct isp_ccdc_device *ccdc, struct omap3isp_ccdc_update_config *config) { struct isp_device *isp = to_isp_device(ccdc); struct ispccdc_lsc_config_req *req; unsigned long flags; u16 update; int ret; update = config->update & (OMAP3ISP_CCDC_CONFIG_LSC | OMAP3ISP_CCDC_TBL_LSC); if (!update) return 0; if (update != (OMAP3ISP_CCDC_CONFIG_LSC | OMAP3ISP_CCDC_TBL_LSC)) { dev_dbg(to_device(ccdc), "%s: Both LSC configuration and table need to be supplied\n", __func__); return -EINVAL; } req = kzalloc(sizeof(*req), GFP_KERNEL); if (req == NULL) return -ENOMEM; if (config->flag & OMAP3ISP_CCDC_CONFIG_LSC) { if (copy_from_user(&req->config, config->lsc_cfg, sizeof(req->config))) { ret = -EFAULT; goto done; } req->enable = 1; req->table.addr = dma_alloc_coherent(isp->dev, req->config.size, &req->table.dma, GFP_KERNEL); if (req->table.addr == NULL) { ret = -ENOMEM; goto done; } ret = dma_get_sgtable(isp->dev, &req->table.sgt, req->table.addr, req->table.dma, req->config.size); if (ret < 0) goto done; dma_sync_sg_for_cpu(isp->dev, req->table.sgt.sgl, req->table.sgt.nents, DMA_TO_DEVICE); if (copy_from_user(req->table.addr, config->lsc, req->config.size)) { ret = -EFAULT; goto done; } dma_sync_sg_for_device(isp->dev, req->table.sgt.sgl, req->table.sgt.nents, DMA_TO_DEVICE); } spin_lock_irqsave(&ccdc->lsc.req_lock, flags); if (ccdc->lsc.request) { list_add_tail(&ccdc->lsc.request->list, &ccdc->lsc.free_queue); schedule_work(&ccdc->lsc.table_work); } ccdc->lsc.request = req; spin_unlock_irqrestore(&ccdc->lsc.req_lock, flags); ret = 0; done: if (ret < 0) ccdc_lsc_free_request(ccdc, req); return ret; } static inline int ccdc_lsc_is_configured(struct isp_ccdc_device *ccdc) { unsigned long flags; int ret; spin_lock_irqsave(&ccdc->lsc.req_lock, flags); ret = ccdc->lsc.active != NULL; spin_unlock_irqrestore(&ccdc->lsc.req_lock, flags); return ret; } static int ccdc_lsc_enable(struct isp_ccdc_device *ccdc) { struct ispccdc_lsc *lsc = &ccdc->lsc; if (lsc->state != LSC_STATE_STOPPED) return -EINVAL; if (lsc->active) { list_add_tail(&lsc->active->list, &lsc->free_queue); lsc->active = NULL; } if (__ccdc_lsc_configure(ccdc, lsc->request) < 0) { omap3isp_sbl_disable(to_isp_device(ccdc), OMAP3_ISP_SBL_CCDC_LSC_READ); list_add_tail(&lsc->request->list, &lsc->free_queue); lsc->request = NULL; goto done; } lsc->active = lsc->request; lsc->request = NULL; __ccdc_lsc_enable(ccdc, 1); done: if (!list_empty(&lsc->free_queue)) schedule_work(&lsc->table_work); return 0; } /* ----------------------------------------------------------------------------- * Parameters configuration */ /* * ccdc_configure_clamp - Configure optical-black or digital clamping * @ccdc: Pointer to ISP CCDC device. * * The CCDC performs either optical-black or digital clamp. Configure and enable * the selected clamp method. */ static void ccdc_configure_clamp(struct isp_ccdc_device *ccdc) { struct isp_device *isp = to_isp_device(ccdc); u32 clamp; if (ccdc->obclamp) { clamp = ccdc->clamp.obgain << ISPCCDC_CLAMP_OBGAIN_SHIFT; clamp |= ccdc->clamp.oblen << ISPCCDC_CLAMP_OBSLEN_SHIFT; clamp |= ccdc->clamp.oblines << ISPCCDC_CLAMP_OBSLN_SHIFT; clamp |= ccdc->clamp.obstpixel << ISPCCDC_CLAMP_OBST_SHIFT; isp_reg_writel(isp, clamp, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_CLAMP); } else { isp_reg_writel(isp, ccdc->clamp.dcsubval, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_DCSUB); } isp_reg_clr_set(isp, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_CLAMP, ISPCCDC_CLAMP_CLAMPEN, ccdc->obclamp ? ISPCCDC_CLAMP_CLAMPEN : 0); } /* * ccdc_configure_fpc - Configure Faulty Pixel Correction * @ccdc: Pointer to ISP CCDC device. */ static void ccdc_configure_fpc(struct isp_ccdc_device *ccdc) { struct isp_device *isp = to_isp_device(ccdc); isp_reg_clr(isp, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_FPC, ISPCCDC_FPC_FPCEN); if (!ccdc->fpc_en) return; isp_reg_writel(isp, ccdc->fpc.dma, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_FPC_ADDR); /* The FPNUM field must be set before enabling FPC. */ isp_reg_writel(isp, (ccdc->fpc.fpnum << ISPCCDC_FPC_FPNUM_SHIFT), OMAP3_ISP_IOMEM_CCDC, ISPCCDC_FPC); isp_reg_writel(isp, (ccdc->fpc.fpnum << ISPCCDC_FPC_FPNUM_SHIFT) | ISPCCDC_FPC_FPCEN, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_FPC); } /* * ccdc_configure_black_comp - Configure Black Level Compensation. * @ccdc: Pointer to ISP CCDC device. */ static void ccdc_configure_black_comp(struct isp_ccdc_device *ccdc) { struct isp_device *isp = to_isp_device(ccdc); u32 blcomp; blcomp = ccdc->blcomp.b_mg << ISPCCDC_BLKCMP_B_MG_SHIFT; blcomp |= ccdc->blcomp.gb_g << ISPCCDC_BLKCMP_GB_G_SHIFT; blcomp |= ccdc->blcomp.gr_cy << ISPCCDC_BLKCMP_GR_CY_SHIFT; blcomp |= ccdc->blcomp.r_ye << ISPCCDC_BLKCMP_R_YE_SHIFT; isp_reg_writel(isp, blcomp, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_BLKCMP); } /* * ccdc_configure_lpf - Configure Low-Pass Filter (LPF). * @ccdc: Pointer to ISP CCDC device. */ static void ccdc_configure_lpf(struct isp_ccdc_device *ccdc) { struct isp_device *isp = to_isp_device(ccdc); isp_reg_clr_set(isp, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_SYN_MODE, ISPCCDC_SYN_MODE_LPF, ccdc->lpf ? ISPCCDC_SYN_MODE_LPF : 0); } /* * ccdc_configure_alaw - Configure A-law compression. * @ccdc: Pointer to ISP CCDC device. */ static void ccdc_configure_alaw(struct isp_ccdc_device *ccdc) { struct isp_device *isp = to_isp_device(ccdc); const struct isp_format_info *info; u32 alaw = 0; info = omap3isp_video_format_info(ccdc->formats[CCDC_PAD_SINK].code); switch (info->width) { case 8: return; case 10: alaw = ISPCCDC_ALAW_GWDI_9_0; break; case 11: alaw = ISPCCDC_ALAW_GWDI_10_1; break; case 12: alaw = ISPCCDC_ALAW_GWDI_11_2; break; case 13: alaw = ISPCCDC_ALAW_GWDI_12_3; break; } if (ccdc->alaw) alaw |= ISPCCDC_ALAW_CCDTBL; isp_reg_writel(isp, alaw, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_ALAW); } /* * ccdc_config_imgattr - Configure sensor image specific attributes. * @ccdc: Pointer to ISP CCDC device. * @colptn: Color pattern of the sensor. */ static void ccdc_config_imgattr(struct isp_ccdc_device *ccdc, u32 colptn) { struct isp_device *isp = to_isp_device(ccdc); isp_reg_writel(isp, colptn, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_COLPTN); } /* * ccdc_config - Set CCDC configuration from userspace * @ccdc: Pointer to ISP CCDC device. * @ccdc_struct: Structure containing CCDC configuration sent from userspace. * * Returns 0 if successful, -EINVAL if the pointer to the configuration * structure is null, or the copy_from_user function fails to copy user space * memory to kernel space memory. */ static int ccdc_config(struct isp_ccdc_device *ccdc, struct omap3isp_ccdc_update_config *ccdc_struct) { struct isp_device *isp = to_isp_device(ccdc); unsigned long flags; spin_lock_irqsave(&ccdc->lock, flags); ccdc->shadow_update = 1; spin_unlock_irqrestore(&ccdc->lock, flags); if (OMAP3ISP_CCDC_ALAW & ccdc_struct->update) { ccdc->alaw = !!(OMAP3ISP_CCDC_ALAW & ccdc_struct->flag); ccdc->update |= OMAP3ISP_CCDC_ALAW; } if (OMAP3ISP_CCDC_LPF & ccdc_struct->update) { ccdc->lpf = !!(OMAP3ISP_CCDC_LPF & ccdc_struct->flag); ccdc->update |= OMAP3ISP_CCDC_LPF; } if (OMAP3ISP_CCDC_BLCLAMP & ccdc_struct->update) { if (copy_from_user(&ccdc->clamp, ccdc_struct->bclamp, sizeof(ccdc->clamp))) { ccdc->shadow_update = 0; return -EFAULT; } ccdc->obclamp = !!(OMAP3ISP_CCDC_BLCLAMP & ccdc_struct->flag); ccdc->update |= OMAP3ISP_CCDC_BLCLAMP; } if (OMAP3ISP_CCDC_BCOMP & ccdc_struct->update) { if (copy_from_user(&ccdc->blcomp, ccdc_struct->blcomp, sizeof(ccdc->blcomp))) { ccdc->shadow_update = 0; return -EFAULT; } ccdc->update |= OMAP3ISP_CCDC_BCOMP; } ccdc->shadow_update = 0; if (OMAP3ISP_CCDC_FPC & ccdc_struct->update) { struct omap3isp_ccdc_fpc fpc; struct ispccdc_fpc fpc_old = { .addr = NULL, }; struct ispccdc_fpc fpc_new; u32 size; if (ccdc->state != ISP_PIPELINE_STREAM_STOPPED) return -EBUSY; ccdc->fpc_en = !!(OMAP3ISP_CCDC_FPC & ccdc_struct->flag); if (ccdc->fpc_en) { if (copy_from_user(&fpc, ccdc_struct->fpc, sizeof(fpc))) return -EFAULT; size = fpc.fpnum * 4; /* * The table address must be 64-bytes aligned, which is * guaranteed by dma_alloc_coherent(). */ fpc_new.fpnum = fpc.fpnum; fpc_new.addr = dma_alloc_coherent(isp->dev, size, &fpc_new.dma, GFP_KERNEL); if (fpc_new.addr == NULL) return -ENOMEM; if (copy_from_user(fpc_new.addr, (__force void __user *)(long)fpc.fpcaddr, size)) { dma_free_coherent(isp->dev, size, fpc_new.addr, fpc_new.dma); return -EFAULT; } fpc_old = ccdc->fpc; ccdc->fpc = fpc_new; } ccdc_configure_fpc(ccdc); if (fpc_old.addr != NULL) dma_free_coherent(isp->dev, fpc_old.fpnum * 4, fpc_old.addr, fpc_old.dma); } return ccdc_lsc_config(ccdc, ccdc_struct); } static void ccdc_apply_controls(struct isp_ccdc_device *ccdc) { if (ccdc->update & OMAP3ISP_CCDC_ALAW) { ccdc_configure_alaw(ccdc); ccdc->update &= ~OMAP3ISP_CCDC_ALAW; } if (ccdc->update & OMAP3ISP_CCDC_LPF) { ccdc_configure_lpf(ccdc); ccdc->update &= ~OMAP3ISP_CCDC_LPF; } if (ccdc->update & OMAP3ISP_CCDC_BLCLAMP) { ccdc_configure_clamp(ccdc); ccdc->update &= ~OMAP3ISP_CCDC_BLCLAMP; } if (ccdc->update & OMAP3ISP_CCDC_BCOMP) { ccdc_configure_black_comp(ccdc); ccdc->update &= ~OMAP3ISP_CCDC_BCOMP; } } /* * omap3isp_ccdc_restore_context - Restore values of the CCDC module registers * @isp: Pointer to ISP device */ void omap3isp_ccdc_restore_context(struct isp_device *isp) { struct isp_ccdc_device *ccdc = &isp->isp_ccdc; isp_reg_set(isp, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_CFG, ISPCCDC_CFG_VDLC); ccdc->update = OMAP3ISP_CCDC_ALAW | OMAP3ISP_CCDC_LPF | OMAP3ISP_CCDC_BLCLAMP | OMAP3ISP_CCDC_BCOMP; ccdc_apply_controls(ccdc); ccdc_configure_fpc(ccdc); } /* ----------------------------------------------------------------------------- * Format- and pipeline-related configuration helpers */ /* * ccdc_config_vp - Configure the Video Port. * @ccdc: Pointer to ISP CCDC device. */ static void ccdc_config_vp(struct isp_ccdc_device *ccdc) { struct isp_pipeline *pipe = to_isp_pipeline(&ccdc->subdev.entity); struct isp_device *isp = to_isp_device(ccdc); const struct isp_format_info *info; struct v4l2_mbus_framefmt *format; unsigned long l3_ick = pipe->l3_ick; unsigned int max_div = isp->revision == ISP_REVISION_15_0 ? 64 : 8; unsigned int div = 0; u32 fmtcfg = ISPCCDC_FMTCFG_VPEN; format = &ccdc->formats[CCDC_PAD_SOURCE_VP]; if (!format->code) { /* Disable the video port when the input format isn't supported. * This is indicated by a pixel code set to 0. */ isp_reg_writel(isp, 0, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_FMTCFG); return; } isp_reg_writel(isp, (0 << ISPCCDC_FMT_HORZ_FMTSPH_SHIFT) | (format->width << ISPCCDC_FMT_HORZ_FMTLNH_SHIFT), OMAP3_ISP_IOMEM_CCDC, ISPCCDC_FMT_HORZ); isp_reg_writel(isp, (0 << ISPCCDC_FMT_VERT_FMTSLV_SHIFT) | ((format->height + 1) << ISPCCDC_FMT_VERT_FMTLNV_SHIFT), OMAP3_ISP_IOMEM_CCDC, ISPCCDC_FMT_VERT); isp_reg_writel(isp, (format->width << ISPCCDC_VP_OUT_HORZ_NUM_SHIFT) | (format->height << ISPCCDC_VP_OUT_VERT_NUM_SHIFT), OMAP3_ISP_IOMEM_CCDC, ISPCCDC_VP_OUT); info = omap3isp_video_format_info(ccdc->formats[CCDC_PAD_SINK].code); switch (info->width) { case 8: case 10: fmtcfg |= ISPCCDC_FMTCFG_VPIN_9_0; break; case 11: fmtcfg |= ISPCCDC_FMTCFG_VPIN_10_1; break; case 12: fmtcfg |= ISPCCDC_FMTCFG_VPIN_11_2; break; case 13: fmtcfg |= ISPCCDC_FMTCFG_VPIN_12_3; break; } if (pipe->input) div = DIV_ROUND_UP(l3_ick, pipe->max_rate); else if (pipe->external_rate) div = l3_ick / pipe->external_rate; div = clamp(div, 2U, max_div); fmtcfg |= (div - 2) << ISPCCDC_FMTCFG_VPIF_FRQ_SHIFT; isp_reg_writel(isp, fmtcfg, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_FMTCFG); } /* * ccdc_config_outlineoffset - Configure memory saving output line offset * @ccdc: Pointer to ISP CCDC device. * @bpl: Number of bytes per line when stored in memory. * @field: Field order when storing interlaced formats in memory. * * Configure the offsets for the line output control: * * - The horizontal line offset is defined as the number of bytes between the * start of two consecutive lines in memory. Set it to the given bytes per * line value. * * - The field offset value is defined as the number of lines to offset the * start of the field identified by FID = 1. Set it to one. * * - The line offset values are defined as the number of lines (as defined by * the horizontal line offset) between the start of two consecutive lines for * all combinations of odd/even lines in odd/even fields. When interleaving * fields set them all to two lines, and to one line otherwise. */ static void ccdc_config_outlineoffset(struct isp_ccdc_device *ccdc, unsigned int bpl, enum v4l2_field field) { struct isp_device *isp = to_isp_device(ccdc); u32 sdofst = 0; isp_reg_writel(isp, bpl & 0xffff, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_HSIZE_OFF); switch (field) { case V4L2_FIELD_INTERLACED_TB: case V4L2_FIELD_INTERLACED_BT: /* When interleaving fields in memory offset field one by one * line and set the line offset to two lines. */ sdofst |= (1 << ISPCCDC_SDOFST_LOFST0_SHIFT) | (1 << ISPCCDC_SDOFST_LOFST1_SHIFT) | (1 << ISPCCDC_SDOFST_LOFST2_SHIFT) | (1 << ISPCCDC_SDOFST_LOFST3_SHIFT); break; default: /* In all other cases set the line offsets to one line. */ break; } isp_reg_writel(isp, sdofst, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_SDOFST); } /* * ccdc_set_outaddr - Set memory address to save output image * @ccdc: Pointer to ISP CCDC device. * @addr: ISP MMU Mapped 32-bit memory address aligned on 32 byte boundary. * * Sets the memory address where the output will be saved. */ static void ccdc_set_outaddr(struct isp_ccdc_device *ccdc, u32 addr) { struct isp_device *isp = to_isp_device(ccdc); isp_reg_writel(isp, addr, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_SDR_ADDR); } /* * omap3isp_ccdc_max_rate - Calculate maximum input data rate based on the input * @ccdc: Pointer to ISP CCDC device. * @max_rate: Maximum calculated data rate. * * Returns in *max_rate less value between calculated and passed */ void omap3isp_ccdc_max_rate(struct isp_ccdc_device *ccdc, unsigned int *max_rate) { struct isp_pipeline *pipe = to_isp_pipeline(&ccdc->subdev.entity); unsigned int rate; if (pipe == NULL) return; /* * TRM says that for parallel sensors the maximum data rate * should be 90% form L3/2 clock, otherwise just L3/2. */ if (ccdc->input == CCDC_INPUT_PARALLEL) rate = pipe->l3_ick / 2 * 9 / 10; else rate = pipe->l3_ick / 2; *max_rate = min(*max_rate, rate); } /* * ccdc_config_sync_if - Set CCDC sync interface configuration * @ccdc: Pointer to ISP CCDC device. * @parcfg: Parallel interface platform data (may be NULL) * @data_size: Data size */ static void ccdc_config_sync_if(struct isp_ccdc_device *ccdc, struct isp_parallel_cfg *parcfg, unsigned int data_size) { struct isp_device *isp = to_isp_device(ccdc); const struct v4l2_mbus_framefmt *format; u32 syn_mode = ISPCCDC_SYN_MODE_VDHDEN; format = &ccdc->formats[CCDC_PAD_SINK]; if (format->code == MEDIA_BUS_FMT_YUYV8_2X8 || format->code == MEDIA_BUS_FMT_UYVY8_2X8) { /* According to the OMAP3 TRM the input mode only affects SYNC * mode, enabling BT.656 mode should take precedence. However, * in practice setting the input mode to YCbCr data on 8 bits * seems to be required in BT.656 mode. In SYNC mode set it to * YCbCr on 16 bits as the bridge is enabled in that case. */ if (ccdc->bt656) syn_mode |= ISPCCDC_SYN_MODE_INPMOD_YCBCR8; else syn_mode |= ISPCCDC_SYN_MODE_INPMOD_YCBCR16; } switch (data_size) { case 8: syn_mode |= ISPCCDC_SYN_MODE_DATSIZ_8; break; case 10: syn_mode |= ISPCCDC_SYN_MODE_DATSIZ_10; break; case 11: syn_mode |= ISPCCDC_SYN_MODE_DATSIZ_11; break; case 12: syn_mode |= ISPCCDC_SYN_MODE_DATSIZ_12; break; } if (parcfg && parcfg->data_pol) syn_mode |= ISPCCDC_SYN_MODE_DATAPOL; if (parcfg && parcfg->hs_pol) syn_mode |= ISPCCDC_SYN_MODE_HDPOL; /* The polarity of the vertical sync signal output by the BT.656 * decoder is not documented and seems to be active low. */ if ((parcfg && parcfg->vs_pol) || ccdc->bt656) syn_mode |= ISPCCDC_SYN_MODE_VDPOL; if (parcfg && parcfg->fld_pol) syn_mode |= ISPCCDC_SYN_MODE_FLDPOL; isp_reg_writel(isp, syn_mode, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_SYN_MODE); /* The CCDC_CFG.Y8POS bit is used in YCbCr8 input mode only. The * hardware seems to ignore it in all other input modes. */ if (format->code == MEDIA_BUS_FMT_UYVY8_2X8) isp_reg_set(isp, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_CFG, ISPCCDC_CFG_Y8POS); else isp_reg_clr(isp, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_CFG, ISPCCDC_CFG_Y8POS); /* Enable or disable BT.656 mode, including error correction for the * synchronization codes. */ if (ccdc->bt656) isp_reg_set(isp, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_REC656IF, ISPCCDC_REC656IF_R656ON | ISPCCDC_REC656IF_ECCFVH); else isp_reg_clr(isp, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_REC656IF, ISPCCDC_REC656IF_R656ON | ISPCCDC_REC656IF_ECCFVH); } /* CCDC formats descriptions */ static const u32 ccdc_sgrbg_pattern = ISPCCDC_COLPTN_Gr_Cy << ISPCCDC_COLPTN_CP0PLC0_SHIFT | ISPCCDC_COLPTN_R_Ye << ISPCCDC_COLPTN_CP0PLC1_SHIFT | ISPCCDC_COLPTN_Gr_Cy << ISPCCDC_COLPTN_CP0PLC2_SHIFT | ISPCCDC_COLPTN_R_Ye << ISPCCDC_COLPTN_CP0PLC3_SHIFT | ISPCCDC_COLPTN_B_Mg << ISPCCDC_COLPTN_CP1PLC0_SHIFT | ISPCCDC_COLPTN_Gb_G << ISPCCDC_COLPTN_CP1PLC1_SHIFT | ISPCCDC_COLPTN_B_Mg << ISPCCDC_COLPTN_CP1PLC2_SHIFT | ISPCCDC_COLPTN_Gb_G << ISPCCDC_COLPTN_CP1PLC3_SHIFT | ISPCCDC_COLPTN_Gr_Cy << ISPCCDC_COLPTN_CP2PLC0_SHIFT | ISPCCDC_COLPTN_R_Ye << ISPCCDC_COLPTN_CP2PLC1_SHIFT | ISPCCDC_COLPTN_Gr_Cy << ISPCCDC_COLPTN_CP2PLC2_SHIFT | ISPCCDC_COLPTN_R_Ye << ISPCCDC_COLPTN_CP2PLC3_SHIFT | ISPCCDC_COLPTN_B_Mg << ISPCCDC_COLPTN_CP3PLC0_SHIFT | ISPCCDC_COLPTN_Gb_G << ISPCCDC_COLPTN_CP3PLC1_SHIFT | ISPCCDC_COLPTN_B_Mg << ISPCCDC_COLPTN_CP3PLC2_SHIFT | ISPCCDC_COLPTN_Gb_G << ISPCCDC_COLPTN_CP3PLC3_SHIFT; static const u32 ccdc_srggb_pattern = ISPCCDC_COLPTN_R_Ye << ISPCCDC_COLPTN_CP0PLC0_SHIFT | ISPCCDC_COLPTN_Gr_Cy << ISPCCDC_COLPTN_CP0PLC1_SHIFT | ISPCCDC_COLPTN_R_Ye << ISPCCDC_COLPTN_CP0PLC2_SHIFT | ISPCCDC_COLPTN_Gr_Cy << ISPCCDC_COLPTN_CP0PLC3_SHIFT | ISPCCDC_COLPTN_Gb_G << ISPCCDC_COLPTN_CP1PLC0_SHIFT | ISPCCDC_COLPTN_B_Mg << ISPCCDC_COLPTN_CP1PLC1_SHIFT | ISPCCDC_COLPTN_Gb_G << ISPCCDC_COLPTN_CP1PLC2_SHIFT | ISPCCDC_COLPTN_B_Mg << ISPCCDC_COLPTN_CP1PLC3_SHIFT | ISPCCDC_COLPTN_R_Ye << ISPCCDC_COLPTN_CP2PLC0_SHIFT | ISPCCDC_COLPTN_Gr_Cy << ISPCCDC_COLPTN_CP2PLC1_SHIFT | ISPCCDC_COLPTN_R_Ye << ISPCCDC_COLPTN_CP2PLC2_SHIFT | ISPCCDC_COLPTN_Gr_Cy << ISPCCDC_COLPTN_CP2PLC3_SHIFT | ISPCCDC_COLPTN_Gb_G << ISPCCDC_COLPTN_CP3PLC0_SHIFT | ISPCCDC_COLPTN_B_Mg << ISPCCDC_COLPTN_CP3PLC1_SHIFT | ISPCCDC_COLPTN_Gb_G << ISPCCDC_COLPTN_CP3PLC2_SHIFT | ISPCCDC_COLPTN_B_Mg << ISPCCDC_COLPTN_CP3PLC3_SHIFT; static const u32 ccdc_sbggr_pattern = ISPCCDC_COLPTN_B_Mg << ISPCCDC_COLPTN_CP0PLC0_SHIFT | ISPCCDC_COLPTN_Gb_G << ISPCCDC_COLPTN_CP0PLC1_SHIFT | ISPCCDC_COLPTN_B_Mg << ISPCCDC_COLPTN_CP0PLC2_SHIFT | ISPCCDC_COLPTN_Gb_G << ISPCCDC_COLPTN_CP0PLC3_SHIFT | ISPCCDC_COLPTN_Gr_Cy << ISPCCDC_COLPTN_CP1PLC0_SHIFT | ISPCCDC_COLPTN_R_Ye << ISPCCDC_COLPTN_CP1PLC1_SHIFT | ISPCCDC_COLPTN_Gr_Cy << ISPCCDC_COLPTN_CP1PLC2_SHIFT | ISPCCDC_COLPTN_R_Ye << ISPCCDC_COLPTN_CP1PLC3_SHIFT | ISPCCDC_COLPTN_B_Mg << ISPCCDC_COLPTN_CP2PLC0_SHIFT | ISPCCDC_COLPTN_Gb_G << ISPCCDC_COLPTN_CP2PLC1_SHIFT | ISPCCDC_COLPTN_B_Mg << ISPCCDC_COLPTN_CP2PLC2_SHIFT | ISPCCDC_COLPTN_Gb_G << ISPCCDC_COLPTN_CP2PLC3_SHIFT | ISPCCDC_COLPTN_Gr_Cy << ISPCCDC_COLPTN_CP3PLC0_SHIFT | ISPCCDC_COLPTN_R_Ye << ISPCCDC_COLPTN_CP3PLC1_SHIFT | ISPCCDC_COLPTN_Gr_Cy << ISPCCDC_COLPTN_CP3PLC2_SHIFT | ISPCCDC_COLPTN_R_Ye << ISPCCDC_COLPTN_CP3PLC3_SHIFT; static const u32 ccdc_sgbrg_pattern = ISPCCDC_COLPTN_Gb_G << ISPCCDC_COLPTN_CP0PLC0_SHIFT | ISPCCDC_COLPTN_B_Mg << ISPCCDC_COLPTN_CP0PLC1_SHIFT | ISPCCDC_COLPTN_Gb_G << ISPCCDC_COLPTN_CP0PLC2_SHIFT | ISPCCDC_COLPTN_B_Mg << ISPCCDC_COLPTN_CP0PLC3_SHIFT | ISPCCDC_COLPTN_R_Ye << ISPCCDC_COLPTN_CP1PLC0_SHIFT | ISPCCDC_COLPTN_Gr_Cy << ISPCCDC_COLPTN_CP1PLC1_SHIFT | ISPCCDC_COLPTN_R_Ye << ISPCCDC_COLPTN_CP1PLC2_SHIFT | ISPCCDC_COLPTN_Gr_Cy << ISPCCDC_COLPTN_CP1PLC3_SHIFT | ISPCCDC_COLPTN_Gb_G << ISPCCDC_COLPTN_CP2PLC0_SHIFT | ISPCCDC_COLPTN_B_Mg << ISPCCDC_COLPTN_CP2PLC1_SHIFT | ISPCCDC_COLPTN_Gb_G << ISPCCDC_COLPTN_CP2PLC2_SHIFT | ISPCCDC_COLPTN_B_Mg << ISPCCDC_COLPTN_CP2PLC3_SHIFT | ISPCCDC_COLPTN_R_Ye << ISPCCDC_COLPTN_CP3PLC0_SHIFT | ISPCCDC_COLPTN_Gr_Cy << ISPCCDC_COLPTN_CP3PLC1_SHIFT | ISPCCDC_COLPTN_R_Ye << ISPCCDC_COLPTN_CP3PLC2_SHIFT | ISPCCDC_COLPTN_Gr_Cy << ISPCCDC_COLPTN_CP3PLC3_SHIFT; static void ccdc_configure(struct isp_ccdc_device *ccdc) { struct isp_device *isp = to_isp_device(ccdc); struct isp_parallel_cfg *parcfg = NULL; struct v4l2_subdev *sensor; struct v4l2_mbus_framefmt *format; const struct v4l2_rect *crop; const struct isp_format_info *fmt_info; struct v4l2_subdev_format fmt_src = { .which = V4L2_SUBDEV_FORMAT_ACTIVE, }; unsigned int depth_out; unsigned int depth_in = 0; struct media_pad *pad; unsigned long flags; unsigned int bridge; unsigned int shift; unsigned int nph; unsigned int sph; u32 syn_mode; u32 ccdc_pattern; ccdc->bt656 = false; ccdc->fields = 0; pad = media_pad_remote_pad_first(&ccdc->pads[CCDC_PAD_SINK]); sensor = media_entity_to_v4l2_subdev(pad->entity); if (ccdc->input == CCDC_INPUT_PARALLEL) { struct v4l2_subdev *sd = to_isp_pipeline(&ccdc->subdev.entity)->external; struct isp_bus_cfg *bus_cfg; bus_cfg = v4l2_subdev_to_bus_cfg(sd); if (WARN_ON(!bus_cfg)) return; parcfg = &bus_cfg->bus.parallel; ccdc->bt656 = parcfg->bt656; } /* CCDC_PAD_SINK */ format = &ccdc->formats[CCDC_PAD_SINK]; /* Compute the lane shifter shift value and enable the bridge when the * input format is a non-BT.656 YUV variant. */ fmt_src.pad = pad->index; if (!v4l2_subdev_call(sensor, pad, get_fmt, NULL, &fmt_src)) { fmt_info = omap3isp_video_format_info(fmt_src.format.code); depth_in = fmt_info->width; } fmt_info = omap3isp_video_format_info(format->code); depth_out = fmt_info->width; shift = depth_in - depth_out; if (ccdc->bt656) bridge = ISPCTRL_PAR_BRIDGE_DISABLE; else if (fmt_info->code == MEDIA_BUS_FMT_YUYV8_2X8) bridge = ISPCTRL_PAR_BRIDGE_LENDIAN; else if (fmt_info->code == MEDIA_BUS_FMT_UYVY8_2X8) bridge = ISPCTRL_PAR_BRIDGE_BENDIAN; else bridge = ISPCTRL_PAR_BRIDGE_DISABLE; omap3isp_configure_bridge(isp, ccdc->input, parcfg, shift, bridge); /* Configure the sync interface. */ ccdc_config_sync_if(ccdc, parcfg, depth_out); syn_mode = isp_reg_readl(isp, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_SYN_MODE); /* Use the raw, unprocessed data when writing to memory. The H3A and * histogram modules are still fed with lens shading corrected data. */ syn_mode &= ~ISPCCDC_SYN_MODE_VP2SDR; if (ccdc->output & CCDC_OUTPUT_MEMORY) syn_mode |= ISPCCDC_SYN_MODE_WEN; else syn_mode &= ~ISPCCDC_SYN_MODE_WEN; if (ccdc->output & CCDC_OUTPUT_RESIZER) syn_mode |= ISPCCDC_SYN_MODE_SDR2RSZ; else syn_mode &= ~ISPCCDC_SYN_MODE_SDR2RSZ; /* Mosaic filter */ switch (format->code) { case MEDIA_BUS_FMT_SRGGB10_1X10: case MEDIA_BUS_FMT_SRGGB12_1X12: ccdc_pattern = ccdc_srggb_pattern; break; case MEDIA_BUS_FMT_SBGGR10_1X10: case MEDIA_BUS_FMT_SBGGR12_1X12: ccdc_pattern = ccdc_sbggr_pattern; break; case MEDIA_BUS_FMT_SGBRG10_1X10: case MEDIA_BUS_FMT_SGBRG12_1X12: ccdc_pattern = ccdc_sgbrg_pattern; break; default: /* Use GRBG */ ccdc_pattern = ccdc_sgrbg_pattern; break; } ccdc_config_imgattr(ccdc, ccdc_pattern); /* Generate VD0 on the last line of the image and VD1 on the * 2/3 height line. */ isp_reg_writel(isp, ((format->height - 2) << ISPCCDC_VDINT_0_SHIFT) | ((format->height * 2 / 3) << ISPCCDC_VDINT_1_SHIFT), OMAP3_ISP_IOMEM_CCDC, ISPCCDC_VDINT); /* CCDC_PAD_SOURCE_OF */ format = &ccdc->formats[CCDC_PAD_SOURCE_OF]; crop = &ccdc->crop; /* The horizontal coordinates are expressed in pixel clock cycles. We * need two cycles per pixel in BT.656 mode, and one cycle per pixel in * SYNC mode regardless of the format as the bridge is enabled for YUV * formats in that case. */ if (ccdc->bt656) { sph = crop->left * 2; nph = crop->width * 2 - 1; } else { sph = crop->left; nph = crop->width - 1; } isp_reg_writel(isp, (sph << ISPCCDC_HORZ_INFO_SPH_SHIFT) | (nph << ISPCCDC_HORZ_INFO_NPH_SHIFT), OMAP3_ISP_IOMEM_CCDC, ISPCCDC_HORZ_INFO); isp_reg_writel(isp, (crop->top << ISPCCDC_VERT_START_SLV0_SHIFT) | (crop->top << ISPCCDC_VERT_START_SLV1_SHIFT), OMAP3_ISP_IOMEM_CCDC, ISPCCDC_VERT_START); isp_reg_writel(isp, (crop->height - 1) << ISPCCDC_VERT_LINES_NLV_SHIFT, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_VERT_LINES); ccdc_config_outlineoffset(ccdc, ccdc->video_out.bpl_value, format->field); /* When interleaving fields enable processing of the field input signal. * This will cause the line output control module to apply the field * offset to field 1. */ if (ccdc->formats[CCDC_PAD_SINK].field == V4L2_FIELD_ALTERNATE && (format->field == V4L2_FIELD_INTERLACED_TB || format->field == V4L2_FIELD_INTERLACED_BT)) syn_mode |= ISPCCDC_SYN_MODE_FLDMODE; /* The CCDC outputs data in UYVY order by default. Swap bytes to get * YUYV. */ if (format->code == MEDIA_BUS_FMT_YUYV8_1X16) isp_reg_set(isp, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_CFG, ISPCCDC_CFG_BSWD); else isp_reg_clr(isp, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_CFG, ISPCCDC_CFG_BSWD); /* Use PACK8 mode for 1byte per pixel formats. Check for BT.656 mode * explicitly as the driver reports 1X16 instead of 2X8 at the OF pad * for simplicity. */ if (omap3isp_video_format_info(format->code)->width <= 8 || ccdc->bt656) syn_mode |= ISPCCDC_SYN_MODE_PACK8; else syn_mode &= ~ISPCCDC_SYN_MODE_PACK8; isp_reg_writel(isp, syn_mode, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_SYN_MODE); /* CCDC_PAD_SOURCE_VP */ ccdc_config_vp(ccdc); /* Lens shading correction. */ spin_lock_irqsave(&ccdc->lsc.req_lock, flags); if (ccdc->lsc.request == NULL) goto unlock; WARN_ON(ccdc->lsc.active); /* Get last good LSC configuration. If it is not supported for * the current active resolution discard it. */ if (ccdc->lsc.active == NULL && __ccdc_lsc_configure(ccdc, ccdc->lsc.request) == 0) { ccdc->lsc.active = ccdc->lsc.request; } else { list_add_tail(&ccdc->lsc.request->list, &ccdc->lsc.free_queue); schedule_work(&ccdc->lsc.table_work); } ccdc->lsc.request = NULL; unlock: spin_unlock_irqrestore(&ccdc->lsc.req_lock, flags); ccdc_apply_controls(ccdc); } static void __ccdc_enable(struct isp_ccdc_device *ccdc, int enable) { struct isp_device *isp = to_isp_device(ccdc); /* Avoid restarting the CCDC when streaming is stopping. */ if (enable && ccdc->stopping & CCDC_STOP_REQUEST) return; isp_reg_clr_set(isp, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_PCR, ISPCCDC_PCR_EN, enable ? ISPCCDC_PCR_EN : 0); ccdc->running = enable; } static int ccdc_disable(struct isp_ccdc_device *ccdc) { unsigned long flags; int ret = 0; spin_lock_irqsave(&ccdc->lock, flags); if (ccdc->state == ISP_PIPELINE_STREAM_CONTINUOUS) ccdc->stopping = CCDC_STOP_REQUEST; if (!ccdc->running) ccdc->stopping = CCDC_STOP_FINISHED; spin_unlock_irqrestore(&ccdc->lock, flags); ret = wait_event_timeout(ccdc->wait, ccdc->stopping == CCDC_STOP_FINISHED, msecs_to_jiffies(2000)); if (ret == 0) { ret = -ETIMEDOUT; dev_warn(to_device(ccdc), "CCDC stop timeout!\n"); } omap3isp_sbl_disable(to_isp_device(ccdc), OMAP3_ISP_SBL_CCDC_LSC_READ); mutex_lock(&ccdc->ioctl_lock); ccdc_lsc_free_request(ccdc, ccdc->lsc.request); ccdc->lsc.request = ccdc->lsc.active; ccdc->lsc.active = NULL; cancel_work_sync(&ccdc->lsc.table_work); ccdc_lsc_free_queue(ccdc, &ccdc->lsc.free_queue); mutex_unlock(&ccdc->ioctl_lock); ccdc->stopping = CCDC_STOP_NOT_REQUESTED; return ret > 0 ? 0 : ret; } static void ccdc_enable(struct isp_ccdc_device *ccdc) { if (ccdc_lsc_is_configured(ccdc)) __ccdc_lsc_enable(ccdc, 1); __ccdc_enable(ccdc, 1); } /* ----------------------------------------------------------------------------- * Interrupt handling */ /* * ccdc_sbl_busy - Poll idle state of CCDC and related SBL memory write bits * @ccdc: Pointer to ISP CCDC device. * * Returns zero if the CCDC is idle and the image has been written to * memory, too. */ static int ccdc_sbl_busy(struct isp_ccdc_device *ccdc) { struct isp_device *isp = to_isp_device(ccdc); return omap3isp_ccdc_busy(ccdc) | (isp_reg_readl(isp, OMAP3_ISP_IOMEM_SBL, ISPSBL_CCDC_WR_0) & ISPSBL_CCDC_WR_0_DATA_READY) | (isp_reg_readl(isp, OMAP3_ISP_IOMEM_SBL, ISPSBL_CCDC_WR_1) & ISPSBL_CCDC_WR_0_DATA_READY) | (isp_reg_readl(isp, OMAP3_ISP_IOMEM_SBL, ISPSBL_CCDC_WR_2) & ISPSBL_CCDC_WR_0_DATA_READY) | (isp_reg_readl(isp, OMAP3_ISP_IOMEM_SBL, ISPSBL_CCDC_WR_3) & ISPSBL_CCDC_WR_0_DATA_READY); } /* * ccdc_sbl_wait_idle - Wait until the CCDC and related SBL are idle * @ccdc: Pointer to ISP CCDC device. * @max_wait: Max retry count in us for wait for idle/busy transition. */ static int ccdc_sbl_wait_idle(struct isp_ccdc_device *ccdc, unsigned int max_wait) { unsigned int wait = 0; if (max_wait == 0) max_wait = 10000; /* 10 ms */ for (wait = 0; wait <= max_wait; wait++) { if (!ccdc_sbl_busy(ccdc)) return 0; rmb(); udelay(1); } return -EBUSY; } /* ccdc_handle_stopping - Handle CCDC and/or LSC stopping sequence * @ccdc: Pointer to ISP CCDC device. * @event: Pointing which event trigger handler * * Return 1 when the event and stopping request combination is satisfied, * zero otherwise. */ static int ccdc_handle_stopping(struct isp_ccdc_device *ccdc, u32 event) { int rval = 0; switch ((ccdc->stopping & 3) | event) { case CCDC_STOP_REQUEST | CCDC_EVENT_VD1: if (ccdc->lsc.state != LSC_STATE_STOPPED) __ccdc_lsc_enable(ccdc, 0); __ccdc_enable(ccdc, 0); ccdc->stopping = CCDC_STOP_EXECUTED; return 1; case CCDC_STOP_EXECUTED | CCDC_EVENT_VD0: ccdc->stopping |= CCDC_STOP_CCDC_FINISHED; if (ccdc->lsc.state == LSC_STATE_STOPPED) ccdc->stopping |= CCDC_STOP_LSC_FINISHED; rval = 1; break; case CCDC_STOP_EXECUTED | CCDC_EVENT_LSC_DONE: ccdc->stopping |= CCDC_STOP_LSC_FINISHED; rval = 1; break; case CCDC_STOP_EXECUTED | CCDC_EVENT_VD1: return 1; } if (ccdc->stopping == CCDC_STOP_FINISHED) { wake_up(&ccdc->wait); rval = 1; } return rval; } static void ccdc_hs_vs_isr(struct isp_ccdc_device *ccdc) { struct isp_pipeline *pipe = to_isp_pipeline(&ccdc->subdev.entity); struct video_device *vdev = ccdc->subdev.devnode; struct v4l2_event event; /* Frame number propagation */ atomic_inc(&pipe->frame_number); memset(&event, 0, sizeof(event)); event.type = V4L2_EVENT_FRAME_SYNC; event.u.frame_sync.frame_sequence = atomic_read(&pipe->frame_number); v4l2_event_queue(vdev, &event); } /* * ccdc_lsc_isr - Handle LSC events * @ccdc: Pointer to ISP CCDC device. * @events: LSC events */ static void ccdc_lsc_isr(struct isp_ccdc_device *ccdc, u32 events) { unsigned long flags; if (events & IRQ0STATUS_CCDC_LSC_PREF_ERR_IRQ) { struct isp_pipeline *pipe = to_isp_pipeline(&ccdc->subdev.entity); ccdc_lsc_error_handler(ccdc); pipe->error = true; dev_dbg(to_device(ccdc), "lsc prefetch error\n"); } if (!(events & IRQ0STATUS_CCDC_LSC_DONE_IRQ)) return; /* LSC_DONE interrupt occur, there are two cases * 1. stopping for reconfiguration * 2. stopping because of STREAM OFF command */ spin_lock_irqsave(&ccdc->lsc.req_lock, flags); if (ccdc->lsc.state == LSC_STATE_STOPPING) ccdc->lsc.state = LSC_STATE_STOPPED; if (ccdc_handle_stopping(ccdc, CCDC_EVENT_LSC_DONE)) goto done; if (ccdc->lsc.state != LSC_STATE_RECONFIG) goto done; /* LSC is in STOPPING state, change to the new state */ ccdc->lsc.state = LSC_STATE_STOPPED; /* This is an exception. Start of frame and LSC_DONE interrupt * have been received on the same time. Skip this event and wait * for better times. */ if (events & IRQ0STATUS_HS_VS_IRQ) goto done; /* The LSC engine is stopped at this point. Enable it if there's a * pending request. */ if (ccdc->lsc.request == NULL) goto done; ccdc_lsc_enable(ccdc); done: spin_unlock_irqrestore(&ccdc->lsc.req_lock, flags); } /* * Check whether the CCDC has captured all fields necessary to complete the * buffer. */ static bool ccdc_has_all_fields(struct isp_ccdc_device *ccdc) { struct isp_pipeline *pipe = to_isp_pipeline(&ccdc->subdev.entity); struct isp_device *isp = to_isp_device(ccdc); enum v4l2_field of_field = ccdc->formats[CCDC_PAD_SOURCE_OF].field; enum v4l2_field field; /* When the input is progressive fields don't matter. */ if (of_field == V4L2_FIELD_NONE) return true; /* Read the current field identifier. */ field = isp_reg_readl(isp, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_SYN_MODE) & ISPCCDC_SYN_MODE_FLDSTAT ? V4L2_FIELD_BOTTOM : V4L2_FIELD_TOP; /* When capturing fields in alternate order just store the current field * identifier in the pipeline. */ if (of_field == V4L2_FIELD_ALTERNATE) { pipe->field = field; return true; } /* The format is interlaced. Make sure we've captured both fields. */ ccdc->fields |= field == V4L2_FIELD_BOTTOM ? CCDC_FIELD_BOTTOM : CCDC_FIELD_TOP; if (ccdc->fields != CCDC_FIELD_BOTH) return false; /* Verify that the field just captured corresponds to the last field * needed based on the desired field order. */ if ((of_field == V4L2_FIELD_INTERLACED_TB && field == V4L2_FIELD_TOP) || (of_field == V4L2_FIELD_INTERLACED_BT && field == V4L2_FIELD_BOTTOM)) return false; /* The buffer can be completed, reset the fields for the next buffer. */ ccdc->fields = 0; return true; } static int ccdc_isr_buffer(struct isp_ccdc_device *ccdc) { struct isp_pipeline *pipe = to_isp_pipeline(&ccdc->subdev.entity); struct isp_device *isp = to_isp_device(ccdc); struct isp_buffer *buffer; /* The CCDC generates VD0 interrupts even when disabled (the datasheet * doesn't explicitly state if that's supposed to happen or not, so it * can be considered as a hardware bug or as a feature, but we have to * deal with it anyway). Disabling the CCDC when no buffer is available * would thus not be enough, we need to handle the situation explicitly. */ if (list_empty(&ccdc->video_out.dmaqueue)) return 0; /* We're in continuous mode, and memory writes were disabled due to a * buffer underrun. Re-enable them now that we have a buffer. The buffer * address has been set in ccdc_video_queue. */ if (ccdc->state == ISP_PIPELINE_STREAM_CONTINUOUS && ccdc->underrun) { ccdc->underrun = 0; return 1; } /* Wait for the CCDC to become idle. */ if (ccdc_sbl_wait_idle(ccdc, 1000)) { dev_info(isp->dev, "CCDC won't become idle!\n"); media_entity_enum_set(&isp->crashed, &ccdc->subdev.entity); omap3isp_pipeline_cancel_stream(pipe); return 0; } /* Don't restart CCDC if we're just about to stop streaming. */ if (ccdc->state == ISP_PIPELINE_STREAM_CONTINUOUS && ccdc->stopping & CCDC_STOP_REQUEST) return 0; if (!ccdc_has_all_fields(ccdc)) return 1; buffer = omap3isp_video_buffer_next(&ccdc->video_out); if (buffer != NULL) ccdc_set_outaddr(ccdc, buffer->dma); pipe->state |= ISP_PIPELINE_IDLE_OUTPUT; if (ccdc->state == ISP_PIPELINE_STREAM_SINGLESHOT && isp_pipeline_ready(pipe)) omap3isp_pipeline_set_stream(pipe, ISP_PIPELINE_STREAM_SINGLESHOT); return buffer != NULL; } /* * ccdc_vd0_isr - Handle VD0 event * @ccdc: Pointer to ISP CCDC device. * * Executes LSC deferred enablement before next frame starts. */ static void ccdc_vd0_isr(struct isp_ccdc_device *ccdc) { unsigned long flags; int restart = 0; /* In BT.656 mode the CCDC doesn't generate an HS/VS interrupt. We thus * need to increment the frame counter here. */ if (ccdc->bt656) { struct isp_pipeline *pipe = to_isp_pipeline(&ccdc->subdev.entity); atomic_inc(&pipe->frame_number); } /* Emulate a VD1 interrupt for BT.656 mode, as we can't stop the CCDC in * the VD1 interrupt handler in that mode without risking a CCDC stall * if a short frame is received. */ if (ccdc->bt656) { spin_lock_irqsave(&ccdc->lock, flags); if (ccdc->state == ISP_PIPELINE_STREAM_CONTINUOUS && ccdc->output & CCDC_OUTPUT_MEMORY) { if (ccdc->lsc.state != LSC_STATE_STOPPED) __ccdc_lsc_enable(ccdc, 0); __ccdc_enable(ccdc, 0); } ccdc_handle_stopping(ccdc, CCDC_EVENT_VD1); spin_unlock_irqrestore(&ccdc->lock, flags); } spin_lock_irqsave(&ccdc->lock, flags); if (ccdc_handle_stopping(ccdc, CCDC_EVENT_VD0)) { spin_unlock_irqrestore(&ccdc->lock, flags); return; } if (ccdc->output & CCDC_OUTPUT_MEMORY) restart = ccdc_isr_buffer(ccdc); if (!ccdc->shadow_update) ccdc_apply_controls(ccdc); spin_unlock_irqrestore(&ccdc->lock, flags); if (restart) ccdc_enable(ccdc); } /* * ccdc_vd1_isr - Handle VD1 event * @ccdc: Pointer to ISP CCDC device. */ static void ccdc_vd1_isr(struct isp_ccdc_device *ccdc) { unsigned long flags; /* In BT.656 mode the synchronization signals are generated by the CCDC * from the embedded sync codes. The VD0 and VD1 interrupts are thus * only triggered when the CCDC is enabled, unlike external sync mode * where the line counter runs even when the CCDC is stopped. We can't * disable the CCDC at VD1 time, as no VD0 interrupt would be generated * for a short frame, which would result in the CCDC being stopped and * no VD interrupt generated anymore. The CCDC is stopped from the VD0 * interrupt handler instead for BT.656. */ if (ccdc->bt656) return; spin_lock_irqsave(&ccdc->lsc.req_lock, flags); /* * Depending on the CCDC pipeline state, CCDC stopping should be * handled differently. In SINGLESHOT we emulate an internal CCDC * stopping because the CCDC hw works only in continuous mode. * When CONTINUOUS pipeline state is used and the CCDC writes it's * data to memory the CCDC and LSC are stopped immediately but * without change the CCDC stopping state machine. The CCDC * stopping state machine should be used only when user request * for stopping is received (SINGLESHOT is an exception). */ switch (ccdc->state) { case ISP_PIPELINE_STREAM_SINGLESHOT: ccdc->stopping = CCDC_STOP_REQUEST; break; case ISP_PIPELINE_STREAM_CONTINUOUS: if (ccdc->output & CCDC_OUTPUT_MEMORY) { if (ccdc->lsc.state != LSC_STATE_STOPPED) __ccdc_lsc_enable(ccdc, 0); __ccdc_enable(ccdc, 0); } break; case ISP_PIPELINE_STREAM_STOPPED: break; } if (ccdc_handle_stopping(ccdc, CCDC_EVENT_VD1)) goto done; if (ccdc->lsc.request == NULL) goto done; /* * LSC need to be reconfigured. Stop it here and on next LSC_DONE IRQ * do the appropriate changes in registers */ if (ccdc->lsc.state == LSC_STATE_RUNNING) { __ccdc_lsc_enable(ccdc, 0); ccdc->lsc.state = LSC_STATE_RECONFIG; goto done; } /* LSC has been in STOPPED state, enable it */ if (ccdc->lsc.state == LSC_STATE_STOPPED) ccdc_lsc_enable(ccdc); done: spin_unlock_irqrestore(&ccdc->lsc.req_lock, flags); } /* * omap3isp_ccdc_isr - Configure CCDC during interframe time. * @ccdc: Pointer to ISP CCDC device. * @events: CCDC events */ int omap3isp_ccdc_isr(struct isp_ccdc_device *ccdc, u32 events) { if (ccdc->state == ISP_PIPELINE_STREAM_STOPPED) return 0; if (events & IRQ0STATUS_CCDC_VD1_IRQ) ccdc_vd1_isr(ccdc); ccdc_lsc_isr(ccdc, events); if (events & IRQ0STATUS_CCDC_VD0_IRQ) ccdc_vd0_isr(ccdc); if (events & IRQ0STATUS_HS_VS_IRQ) ccdc_hs_vs_isr(ccdc); return 0; } /* ----------------------------------------------------------------------------- * ISP video operations */ static int ccdc_video_queue(struct isp_video *video, struct isp_buffer *buffer) { struct isp_ccdc_device *ccdc = &video->isp->isp_ccdc; unsigned long flags; bool restart = false; if (!(ccdc->output & CCDC_OUTPUT_MEMORY)) return -ENODEV; ccdc_set_outaddr(ccdc, buffer->dma); /* We now have a buffer queued on the output, restart the pipeline * on the next CCDC interrupt if running in continuous mode (or when * starting the stream) in external sync mode, or immediately in BT.656 * sync mode as no CCDC interrupt is generated when the CCDC is stopped * in that case. */ spin_lock_irqsave(&ccdc->lock, flags); if (ccdc->state == ISP_PIPELINE_STREAM_CONTINUOUS && !ccdc->running && ccdc->bt656) restart = true; else ccdc->underrun = 1; spin_unlock_irqrestore(&ccdc->lock, flags); if (restart) ccdc_enable(ccdc); return 0; } static const struct isp_video_operations ccdc_video_ops = { .queue = ccdc_video_queue, }; /* ----------------------------------------------------------------------------- * V4L2 subdev operations */ /* * ccdc_ioctl - CCDC module private ioctl's * @sd: ISP CCDC V4L2 subdevice * @cmd: ioctl command * @arg: ioctl argument * * Return 0 on success or a negative error code otherwise. */ static long ccdc_ioctl(struct v4l2_subdev *sd, unsigned int cmd, void *arg) { struct isp_ccdc_device *ccdc = v4l2_get_subdevdata(sd); int ret; switch (cmd) { case VIDIOC_OMAP3ISP_CCDC_CFG: mutex_lock(&ccdc->ioctl_lock); ret = ccdc_config(ccdc, arg); mutex_unlock(&ccdc->ioctl_lock); break; default: return -ENOIOCTLCMD; } return ret; } static int ccdc_subscribe_event(struct v4l2_subdev *sd, struct v4l2_fh *fh, struct v4l2_event_subscription *sub) { if (sub->type != V4L2_EVENT_FRAME_SYNC) return -EINVAL; /* line number is zero at frame start */ if (sub->id != 0) return -EINVAL; return v4l2_event_subscribe(fh, sub, OMAP3ISP_CCDC_NEVENTS, NULL); } static int ccdc_unsubscribe_event(struct v4l2_subdev *sd, struct v4l2_fh *fh, struct v4l2_event_subscription *sub) { return v4l2_event_unsubscribe(fh, sub); } /* * ccdc_set_stream - Enable/Disable streaming on the CCDC module * @sd: ISP CCDC V4L2 subdevice * @enable: Enable/disable stream * * When writing to memory, the CCDC hardware can't be enabled without a memory * buffer to write to. As the s_stream operation is called in response to a * STREAMON call without any buffer queued yet, just update the enabled field * and return immediately. The CCDC will be enabled in ccdc_isr_buffer(). * * When not writing to memory enable the CCDC immediately. */ static int ccdc_set_stream(struct v4l2_subdev *sd, int enable) { struct isp_ccdc_device *ccdc = v4l2_get_subdevdata(sd); struct isp_device *isp = to_isp_device(ccdc); int ret = 0; if (ccdc->state == ISP_PIPELINE_STREAM_STOPPED) { if (enable == ISP_PIPELINE_STREAM_STOPPED) return 0; omap3isp_subclk_enable(isp, OMAP3_ISP_SUBCLK_CCDC); isp_reg_set(isp, OMAP3_ISP_IOMEM_CCDC, ISPCCDC_CFG, ISPCCDC_CFG_VDLC); ccdc_configure(ccdc); ccdc_print_status(ccdc); } switch (enable) { case ISP_PIPELINE_STREAM_CONTINUOUS: if (ccdc->output & CCDC_OUTPUT_MEMORY) omap3isp_sbl_enable(isp, OMAP3_ISP_SBL_CCDC_WRITE); if (ccdc->underrun || !(ccdc->output & CCDC_OUTPUT_MEMORY)) ccdc_enable(ccdc); ccdc->underrun = 0; break; case ISP_PIPELINE_STREAM_SINGLESHOT: if (ccdc->output & CCDC_OUTPUT_MEMORY && ccdc->state != ISP_PIPELINE_STREAM_SINGLESHOT) omap3isp_sbl_enable(isp, OMAP3_ISP_SBL_CCDC_WRITE); ccdc_enable(ccdc); break; case ISP_PIPELINE_STREAM_STOPPED: ret = ccdc_disable(ccdc); if (ccdc->output & CCDC_OUTPUT_MEMORY) omap3isp_sbl_disable(isp, OMAP3_ISP_SBL_CCDC_WRITE); omap3isp_subclk_disable(isp, OMAP3_ISP_SUBCLK_CCDC); ccdc->underrun = 0; break; } ccdc->state = enable; return ret; } static struct v4l2_mbus_framefmt * __ccdc_get_format(struct isp_ccdc_device *ccdc, struct v4l2_subdev_state *sd_state, unsigned int pad, enum v4l2_subdev_format_whence which) { if (which == V4L2_SUBDEV_FORMAT_TRY) return v4l2_subdev_get_try_format(&ccdc->subdev, sd_state, pad); else return &ccdc->formats[pad]; } static struct v4l2_rect * __ccdc_get_crop(struct isp_ccdc_device *ccdc, struct v4l2_subdev_state *sd_state, enum v4l2_subdev_format_whence which) { if (which == V4L2_SUBDEV_FORMAT_TRY) return v4l2_subdev_get_try_crop(&ccdc->subdev, sd_state, CCDC_PAD_SOURCE_OF); else return &ccdc->crop; } /* * ccdc_try_format - Try video format on a pad * @ccdc: ISP CCDC device * @cfg : V4L2 subdev pad configuration * @pad: Pad number * @fmt: Format */ static void ccdc_try_format(struct isp_ccdc_device *ccdc, struct v4l2_subdev_state *sd_state, unsigned int pad, struct v4l2_mbus_framefmt *fmt, enum v4l2_subdev_format_whence which) { const struct isp_format_info *info; u32 pixelcode; unsigned int width = fmt->width; unsigned int height = fmt->height; struct v4l2_rect *crop; enum v4l2_field field; unsigned int i; switch (pad) { case CCDC_PAD_SINK: for (i = 0; i < ARRAY_SIZE(ccdc_fmts); i++) { if (fmt->code == ccdc_fmts[i]) break; } /* If not found, use SGRBG10 as default */ if (i >= ARRAY_SIZE(ccdc_fmts)) fmt->code = MEDIA_BUS_FMT_SGRBG10_1X10; /* Clamp the input size. */ fmt->width = clamp_t(u32, width, 32, 4096); fmt->height = clamp_t(u32, height, 32, 4096); /* Default to progressive field order. */ if (fmt->field == V4L2_FIELD_ANY) fmt->field = V4L2_FIELD_NONE; break; case CCDC_PAD_SOURCE_OF: pixelcode = fmt->code; field = fmt->field; *fmt = *__ccdc_get_format(ccdc, sd_state, CCDC_PAD_SINK, which); /* In SYNC mode the bridge converts YUV formats from 2X8 to * 1X16. In BT.656 no such conversion occurs. As we don't know * at this point whether the source will use SYNC or BT.656 mode * let's pretend the conversion always occurs. The CCDC will be * configured to pack bytes in BT.656, hiding the inaccuracy. * In all cases bytes can be swapped. */ if (fmt->code == MEDIA_BUS_FMT_YUYV8_2X8 || fmt->code == MEDIA_BUS_FMT_UYVY8_2X8) { /* Use the user requested format if YUV. */ if (pixelcode == MEDIA_BUS_FMT_YUYV8_2X8 || pixelcode == MEDIA_BUS_FMT_UYVY8_2X8 || pixelcode == MEDIA_BUS_FMT_YUYV8_1X16 || pixelcode == MEDIA_BUS_FMT_UYVY8_1X16) fmt->code = pixelcode; if (fmt->code == MEDIA_BUS_FMT_YUYV8_2X8) fmt->code = MEDIA_BUS_FMT_YUYV8_1X16; else if (fmt->code == MEDIA_BUS_FMT_UYVY8_2X8) fmt->code = MEDIA_BUS_FMT_UYVY8_1X16; } /* Hardcode the output size to the crop rectangle size. */ crop = __ccdc_get_crop(ccdc, sd_state, which); fmt->width = crop->width; fmt->height = crop->height; /* When input format is interlaced with alternating fields the * CCDC can interleave the fields. */ if (fmt->field == V4L2_FIELD_ALTERNATE && (field == V4L2_FIELD_INTERLACED_TB || field == V4L2_FIELD_INTERLACED_BT)) { fmt->field = field; fmt->height *= 2; } break; case CCDC_PAD_SOURCE_VP: *fmt = *__ccdc_get_format(ccdc, sd_state, CCDC_PAD_SINK, which); /* The video port interface truncates the data to 10 bits. */ info = omap3isp_video_format_info(fmt->code); fmt->code = info->truncated; /* YUV formats are not supported by the video port. */ if (fmt->code == MEDIA_BUS_FMT_YUYV8_2X8 || fmt->code == MEDIA_BUS_FMT_UYVY8_2X8) fmt->code = 0; /* The number of lines that can be clocked out from the video * port output must be at least one line less than the number * of input lines. */ fmt->width = clamp_t(u32, width, 32, fmt->width); fmt->height = clamp_t(u32, height, 32, fmt->height - 1); break; } /* Data is written to memory unpacked, each 10-bit or 12-bit pixel is * stored on 2 bytes. */ fmt->colorspace = V4L2_COLORSPACE_SRGB; } /* * ccdc_try_crop - Validate a crop rectangle * @ccdc: ISP CCDC device * @sink: format on the sink pad * @crop: crop rectangle to be validated */ static void ccdc_try_crop(struct isp_ccdc_device *ccdc, const struct v4l2_mbus_framefmt *sink, struct v4l2_rect *crop) { const struct isp_format_info *info; unsigned int max_width; /* For Bayer formats, restrict left/top and width/height to even values * to keep the Bayer pattern. */ info = omap3isp_video_format_info(sink->code); if (info->flavor != MEDIA_BUS_FMT_Y8_1X8) { crop->left &= ~1; crop->top &= ~1; } crop->left = clamp_t(u32, crop->left, 0, sink->width - CCDC_MIN_WIDTH); crop->top = clamp_t(u32, crop->top, 0, sink->height - CCDC_MIN_HEIGHT); /* The data formatter truncates the number of horizontal output pixels * to a multiple of 16. To avoid clipping data, allow callers to request * an output size bigger than the input size up to the nearest multiple * of 16. */ max_width = (sink->width - crop->left + 15) & ~15; crop->width = clamp_t(u32, crop->width, CCDC_MIN_WIDTH, max_width) & ~15; crop->height = clamp_t(u32, crop->height, CCDC_MIN_HEIGHT, sink->height - crop->top); /* Odd width/height values don't make sense for Bayer formats. */ if (info->flavor != MEDIA_BUS_FMT_Y8_1X8) { crop->width &= ~1; crop->height &= ~1; } } /* * ccdc_enum_mbus_code - Handle pixel format enumeration * @sd : pointer to v4l2 subdev structure * @cfg : V4L2 subdev pad configuration * @code : pointer to v4l2_subdev_mbus_code_enum structure * return -EINVAL or zero on success */ static int ccdc_enum_mbus_code(struct v4l2_subdev *sd, struct v4l2_subdev_state *sd_state, struct v4l2_subdev_mbus_code_enum *code) { struct isp_ccdc_device *ccdc = v4l2_get_subdevdata(sd); struct v4l2_mbus_framefmt *format; switch (code->pad) { case CCDC_PAD_SINK: if (code->index >= ARRAY_SIZE(ccdc_fmts)) return -EINVAL; code->code = ccdc_fmts[code->index]; break; case CCDC_PAD_SOURCE_OF: format = __ccdc_get_format(ccdc, sd_state, code->pad, code->which); if (format->code == MEDIA_BUS_FMT_YUYV8_2X8 || format->code == MEDIA_BUS_FMT_UYVY8_2X8) { /* In YUV mode the CCDC can swap bytes. */ if (code->index == 0) code->code = MEDIA_BUS_FMT_YUYV8_1X16; else if (code->index == 1) code->code = MEDIA_BUS_FMT_UYVY8_1X16; else return -EINVAL; } else { /* In raw mode, no configurable format confversion is * available. */ if (code->index == 0) code->code = format->code; else return -EINVAL; } break; case CCDC_PAD_SOURCE_VP: /* The CCDC supports no configurable format conversion * compatible with the video port. Enumerate a single output * format code. */ if (code->index != 0) return -EINVAL; format = __ccdc_get_format(ccdc, sd_state, code->pad, code->which); /* A pixel code equal to 0 means that the video port doesn't * support the input format. Don't enumerate any pixel code. */ if (format->code == 0) return -EINVAL; code->code = format->code; break; default: return -EINVAL; } return 0; } static int ccdc_enum_frame_size(struct v4l2_subdev *sd, struct v4l2_subdev_state *sd_state, struct v4l2_subdev_frame_size_enum *fse) { struct isp_ccdc_device *ccdc = v4l2_get_subdevdata(sd); struct v4l2_mbus_framefmt format; if (fse->index != 0) return -EINVAL; format.code = fse->code; format.width = 1; format.height = 1; ccdc_try_format(ccdc, sd_state, fse->pad, &format, fse->which); fse->min_width = format.width; fse->min_height = format.height; if (format.code != fse->code) return -EINVAL; format.code = fse->code; format.width = -1; format.height = -1; ccdc_try_format(ccdc, sd_state, fse->pad, &format, fse->which); fse->max_width = format.width; fse->max_height = format.height; return 0; } /* * ccdc_get_selection - Retrieve a selection rectangle on a pad * @sd: ISP CCDC V4L2 subdevice * @cfg: V4L2 subdev pad configuration * @sel: Selection rectangle * * The only supported rectangles are the crop rectangles on the output formatter * source pad. * * Return 0 on success or a negative error code otherwise. */ static int ccdc_get_selection(struct v4l2_subdev *sd, struct v4l2_subdev_state *sd_state, struct v4l2_subdev_selection *sel) { struct isp_ccdc_device *ccdc = v4l2_get_subdevdata(sd); struct v4l2_mbus_framefmt *format; if (sel->pad != CCDC_PAD_SOURCE_OF) return -EINVAL; switch (sel->target) { case V4L2_SEL_TGT_CROP_BOUNDS: sel->r.left = 0; sel->r.top = 0; sel->r.width = INT_MAX; sel->r.height = INT_MAX; format = __ccdc_get_format(ccdc, sd_state, CCDC_PAD_SINK, sel->which); ccdc_try_crop(ccdc, format, &sel->r); break; case V4L2_SEL_TGT_CROP: sel->r = *__ccdc_get_crop(ccdc, sd_state, sel->which); break; default: return -EINVAL; } return 0; } /* * ccdc_set_selection - Set a selection rectangle on a pad * @sd: ISP CCDC V4L2 subdevice * @cfg: V4L2 subdev pad configuration * @sel: Selection rectangle * * The only supported rectangle is the actual crop rectangle on the output * formatter source pad. * * Return 0 on success or a negative error code otherwise. */ static int ccdc_set_selection(struct v4l2_subdev *sd, struct v4l2_subdev_state *sd_state, struct v4l2_subdev_selection *sel) { struct isp_ccdc_device *ccdc = v4l2_get_subdevdata(sd); struct v4l2_mbus_framefmt *format; if (sel->target != V4L2_SEL_TGT_CROP || sel->pad != CCDC_PAD_SOURCE_OF) return -EINVAL; /* The crop rectangle can't be changed while streaming. */ if (ccdc->state != ISP_PIPELINE_STREAM_STOPPED) return -EBUSY; /* Modifying the crop rectangle always changes the format on the source * pad. If the KEEP_CONFIG flag is set, just return the current crop * rectangle. */ if (sel->flags & V4L2_SEL_FLAG_KEEP_CONFIG) { sel->r = *__ccdc_get_crop(ccdc, sd_state, sel->which); return 0; } format = __ccdc_get_format(ccdc, sd_state, CCDC_PAD_SINK, sel->which); ccdc_try_crop(ccdc, format, &sel->r); *__ccdc_get_crop(ccdc, sd_state, sel->which) = sel->r; /* Update the source format. */ format = __ccdc_get_format(ccdc, sd_state, CCDC_PAD_SOURCE_OF, sel->which); ccdc_try_format(ccdc, sd_state, CCDC_PAD_SOURCE_OF, format, sel->which); return 0; } /* * ccdc_get_format - Retrieve the video format on a pad * @sd : ISP CCDC V4L2 subdevice * @cfg: V4L2 subdev pad configuration * @fmt: Format * * Return 0 on success or -EINVAL if the pad is invalid or doesn't correspond * to the format type. */ static int ccdc_get_format(struct v4l2_subdev *sd, struct v4l2_subdev_state *sd_state, struct v4l2_subdev_format *fmt) { struct isp_ccdc_device *ccdc = v4l2_get_subdevdata(sd); struct v4l2_mbus_framefmt *format; format = __ccdc_get_format(ccdc, sd_state, fmt->pad, fmt->which); if (format == NULL) return -EINVAL; fmt->format = *format; return 0; } /* * ccdc_set_format - Set the video format on a pad * @sd : ISP CCDC V4L2 subdevice * @cfg: V4L2 subdev pad configuration * @fmt: Format * * Return 0 on success or -EINVAL if the pad is invalid or doesn't correspond * to the format type. */ static int ccdc_set_format(struct v4l2_subdev *sd, struct v4l2_subdev_state *sd_state, struct v4l2_subdev_format *fmt) { struct isp_ccdc_device *ccdc = v4l2_get_subdevdata(sd); struct v4l2_mbus_framefmt *format; struct v4l2_rect *crop; format = __ccdc_get_format(ccdc, sd_state, fmt->pad, fmt->which); if (format == NULL) return -EINVAL; ccdc_try_format(ccdc, sd_state, fmt->pad, &fmt->format, fmt->which); *format = fmt->format; /* Propagate the format from sink to source */ if (fmt->pad == CCDC_PAD_SINK) { /* Reset the crop rectangle. */ crop = __ccdc_get_crop(ccdc, sd_state, fmt->which); crop->left = 0; crop->top = 0; crop->width = fmt->format.width; crop->height = fmt->format.height; ccdc_try_crop(ccdc, &fmt->format, crop); /* Update the source formats. */ format = __ccdc_get_format(ccdc, sd_state, CCDC_PAD_SOURCE_OF, fmt->which); *format = fmt->format; ccdc_try_format(ccdc, sd_state, CCDC_PAD_SOURCE_OF, format, fmt->which); format = __ccdc_get_format(ccdc, sd_state, CCDC_PAD_SOURCE_VP, fmt->which); *format = fmt->format; ccdc_try_format(ccdc, sd_state, CCDC_PAD_SOURCE_VP, format, fmt->which); } return 0; } /* * Decide whether desired output pixel code can be obtained with * the lane shifter by shifting the input pixel code. * @in: input pixelcode to shifter * @out: output pixelcode from shifter * @additional_shift: # of bits the sensor's LSB is offset from CAMEXT[0] * * return true if the combination is possible * return false otherwise */ static bool ccdc_is_shiftable(u32 in, u32 out, unsigned int additional_shift) { const struct isp_format_info *in_info, *out_info; if (in == out) return true; in_info = omap3isp_video_format_info(in); out_info = omap3isp_video_format_info(out); if ((in_info->flavor == 0) || (out_info->flavor == 0)) return false; if (in_info->flavor != out_info->flavor) return false; return in_info->width - out_info->width + additional_shift <= 6; } static int ccdc_link_validate(struct v4l2_subdev *sd, struct media_link *link, struct v4l2_subdev_format *source_fmt, struct v4l2_subdev_format *sink_fmt) { struct isp_ccdc_device *ccdc = v4l2_get_subdevdata(sd); unsigned long parallel_shift; /* Check if the two ends match */ if (source_fmt->format.width != sink_fmt->format.width || source_fmt->format.height != sink_fmt->format.height) return -EPIPE; /* We've got a parallel sensor here. */ if (ccdc->input == CCDC_INPUT_PARALLEL) { struct v4l2_subdev *sd = media_entity_to_v4l2_subdev(link->source->entity); struct isp_bus_cfg *bus_cfg; bus_cfg = v4l2_subdev_to_bus_cfg(sd); if (WARN_ON(!bus_cfg)) return -EPIPE; parallel_shift = bus_cfg->bus.parallel.data_lane_shift; } else { parallel_shift = 0; } /* Lane shifter may be used to drop bits on CCDC sink pad */ if (!ccdc_is_shiftable(source_fmt->format.code, sink_fmt->format.code, parallel_shift)) return -EPIPE; return 0; } /* * ccdc_init_formats - Initialize formats on all pads * @sd: ISP CCDC V4L2 subdevice * @fh: V4L2 subdev file handle * * Initialize all pad formats with default values. If fh is not NULL, try * formats are initialized on the file handle. Otherwise active formats are * initialized on the device. */ static int ccdc_init_formats(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh) { struct v4l2_subdev_format format; memset(&format, 0, sizeof(format)); format.pad = CCDC_PAD_SINK; format.which = fh ? V4L2_SUBDEV_FORMAT_TRY : V4L2_SUBDEV_FORMAT_ACTIVE; format.format.code = MEDIA_BUS_FMT_SGRBG10_1X10; format.format.width = 4096; format.format.height = 4096; ccdc_set_format(sd, fh ? fh->state : NULL, &format); return 0; } /* V4L2 subdev core operations */ static const struct v4l2_subdev_core_ops ccdc_v4l2_core_ops = { .ioctl = ccdc_ioctl, .subscribe_event = ccdc_subscribe_event, .unsubscribe_event = ccdc_unsubscribe_event, }; /* V4L2 subdev video operations */ static const struct v4l2_subdev_video_ops ccdc_v4l2_video_ops = { .s_stream = ccdc_set_stream, }; /* V4L2 subdev pad operations */ static const struct v4l2_subdev_pad_ops ccdc_v4l2_pad_ops = { .enum_mbus_code = ccdc_enum_mbus_code, .enum_frame_size = ccdc_enum_frame_size, .get_fmt = ccdc_get_format, .set_fmt = ccdc_set_format, .get_selection = ccdc_get_selection, .set_selection = ccdc_set_selection, .link_validate = ccdc_link_validate, }; /* V4L2 subdev operations */ static const struct v4l2_subdev_ops ccdc_v4l2_ops = { .core = &ccdc_v4l2_core_ops, .video = &ccdc_v4l2_video_ops, .pad = &ccdc_v4l2_pad_ops, }; /* V4L2 subdev internal operations */ static const struct v4l2_subdev_internal_ops ccdc_v4l2_internal_ops = { .open = ccdc_init_formats, }; /* ----------------------------------------------------------------------------- * Media entity operations */ /* * ccdc_link_setup - Setup CCDC connections * @entity: CCDC media entity * @local: Pad at the local end of the link * @remote: Pad at the remote end of the link * @flags: Link flags * * return -EINVAL or zero on success */ static int ccdc_link_setup(struct media_entity *entity, const struct media_pad *local, const struct media_pad *remote, u32 flags) { struct v4l2_subdev *sd = media_entity_to_v4l2_subdev(entity); struct isp_ccdc_device *ccdc = v4l2_get_subdevdata(sd); struct isp_device *isp = to_isp_device(ccdc); unsigned int index = local->index; /* FIXME: this is actually a hack! */ if (is_media_entity_v4l2_subdev(remote->entity)) index |= 2 << 16; switch (index) { case CCDC_PAD_SINK | 2 << 16: /* Read from the sensor (parallel interface), CCP2, CSI2a or * CSI2c. */ if (!(flags & MEDIA_LNK_FL_ENABLED)) { ccdc->input = CCDC_INPUT_NONE; break; } if (ccdc->input != CCDC_INPUT_NONE) return -EBUSY; if (remote->entity == &isp->isp_ccp2.subdev.entity) ccdc->input = CCDC_INPUT_CCP2B; else if (remote->entity == &isp->isp_csi2a.subdev.entity) ccdc->input = CCDC_INPUT_CSI2A; else if (remote->entity == &isp->isp_csi2c.subdev.entity) ccdc->input = CCDC_INPUT_CSI2C; else ccdc->input = CCDC_INPUT_PARALLEL; break; /* * The ISP core doesn't support pipelines with multiple video outputs. * Revisit this when it will be implemented, and return -EBUSY for now. */ case CCDC_PAD_SOURCE_VP | 2 << 16: /* Write to preview engine, histogram and H3A. When none of * those links are active, the video port can be disabled. */ if (flags & MEDIA_LNK_FL_ENABLED) { if (ccdc->output & ~CCDC_OUTPUT_PREVIEW) return -EBUSY; ccdc->output |= CCDC_OUTPUT_PREVIEW; } else { ccdc->output &= ~CCDC_OUTPUT_PREVIEW; } break; case CCDC_PAD_SOURCE_OF: /* Write to memory */ if (flags & MEDIA_LNK_FL_ENABLED) { if (ccdc->output & ~CCDC_OUTPUT_MEMORY) return -EBUSY; ccdc->output |= CCDC_OUTPUT_MEMORY; } else { ccdc->output &= ~CCDC_OUTPUT_MEMORY; } break; case CCDC_PAD_SOURCE_OF | 2 << 16: /* Write to resizer */ if (flags & MEDIA_LNK_FL_ENABLED) { if (ccdc->output & ~CCDC_OUTPUT_RESIZER) return -EBUSY; ccdc->output |= CCDC_OUTPUT_RESIZER; } else { ccdc->output &= ~CCDC_OUTPUT_RESIZER; } break; default: return -EINVAL; } return 0; } /* media operations */ static const struct media_entity_operations ccdc_media_ops = { .link_setup = ccdc_link_setup, .link_validate = v4l2_subdev_link_validate, }; void omap3isp_ccdc_unregister_entities(struct isp_ccdc_device *ccdc) { v4l2_device_unregister_subdev(&ccdc->subdev); omap3isp_video_unregister(&ccdc->video_out); } int omap3isp_ccdc_register_entities(struct isp_ccdc_device *ccdc, struct v4l2_device *vdev) { int ret; /* Register the subdev and video node. */ ccdc->subdev.dev = vdev->mdev->dev; ret = v4l2_device_register_subdev(vdev, &ccdc->subdev); if (ret < 0) goto error; ret = omap3isp_video_register(&ccdc->video_out, vdev); if (ret < 0) goto error; return 0; error: omap3isp_ccdc_unregister_entities(ccdc); return ret; } /* ----------------------------------------------------------------------------- * ISP CCDC initialisation and cleanup */ /* * ccdc_init_entities - Initialize V4L2 subdev and media entity * @ccdc: ISP CCDC module * * Return 0 on success and a negative error code on failure. */ static int ccdc_init_entities(struct isp_ccdc_device *ccdc) { struct v4l2_subdev *sd = &ccdc->subdev; struct media_pad *pads = ccdc->pads; struct media_entity *me = &sd->entity; int ret; ccdc->input = CCDC_INPUT_NONE; v4l2_subdev_init(sd, &ccdc_v4l2_ops); sd->internal_ops = &ccdc_v4l2_internal_ops; strscpy(sd->name, "OMAP3 ISP CCDC", sizeof(sd->name)); sd->grp_id = 1 << 16; /* group ID for isp subdevs */ v4l2_set_subdevdata(sd, ccdc); sd->flags |= V4L2_SUBDEV_FL_HAS_EVENTS | V4L2_SUBDEV_FL_HAS_DEVNODE; pads[CCDC_PAD_SINK].flags = MEDIA_PAD_FL_SINK | MEDIA_PAD_FL_MUST_CONNECT; pads[CCDC_PAD_SOURCE_VP].flags = MEDIA_PAD_FL_SOURCE; pads[CCDC_PAD_SOURCE_OF].flags = MEDIA_PAD_FL_SOURCE; me->ops = &ccdc_media_ops; ret = media_entity_pads_init(me, CCDC_PADS_NUM, pads); if (ret < 0) return ret; ccdc_init_formats(sd, NULL); ccdc->video_out.type = V4L2_BUF_TYPE_VIDEO_CAPTURE; ccdc->video_out.ops = &ccdc_video_ops; ccdc->video_out.isp = to_isp_device(ccdc); ccdc->video_out.capture_mem = PAGE_ALIGN(4096 * 4096) * 3; ccdc->video_out.bpl_alignment = 32; ret = omap3isp_video_init(&ccdc->video_out, "CCDC"); if (ret < 0) goto error; return 0; error: media_entity_cleanup(me); return ret; } /* * omap3isp_ccdc_init - CCDC module initialization. * @isp: Device pointer specific to the OMAP3 ISP. * * TODO: Get the initialisation values from platform data. * * Return 0 on success or a negative error code otherwise. */ int omap3isp_ccdc_init(struct isp_device *isp) { struct isp_ccdc_device *ccdc = &isp->isp_ccdc; int ret; spin_lock_init(&ccdc->lock); init_waitqueue_head(&ccdc->wait); mutex_init(&ccdc->ioctl_lock); ccdc->stopping = CCDC_STOP_NOT_REQUESTED; INIT_WORK(&ccdc->lsc.table_work, ccdc_lsc_free_table_work); ccdc->lsc.state = LSC_STATE_STOPPED; INIT_LIST_HEAD(&ccdc->lsc.free_queue); spin_lock_init(&ccdc->lsc.req_lock); ccdc->clamp.oblen = 0; ccdc->clamp.dcsubval = 0; ccdc->update = OMAP3ISP_CCDC_BLCLAMP; ccdc_apply_controls(ccdc); ret = ccdc_init_entities(ccdc); if (ret < 0) { mutex_destroy(&ccdc->ioctl_lock); return ret; } return 0; } /* * omap3isp_ccdc_cleanup - CCDC module cleanup. * @isp: Device pointer specific to the OMAP3 ISP. */ void omap3isp_ccdc_cleanup(struct isp_device *isp) { struct isp_ccdc_device *ccdc = &isp->isp_ccdc; omap3isp_video_cleanup(&ccdc->video_out); media_entity_cleanup(&ccdc->subdev.entity); /* Free LSC requests. As the CCDC is stopped there's no active request, * so only the pending request and the free queue need to be handled. */ ccdc_lsc_free_request(ccdc, ccdc->lsc.request); cancel_work_sync(&ccdc->lsc.table_work); ccdc_lsc_free_queue(ccdc, &ccdc->lsc.free_queue); if (ccdc->fpc.addr != NULL) dma_free_coherent(isp->dev, ccdc->fpc.fpnum * 4, ccdc->fpc.addr, ccdc->fpc.dma); mutex_destroy(&ccdc->ioctl_lock); }
linux-master
drivers/media/platform/ti/omap3isp/ispccdc.c
// SPDX-License-Identifier: GPL-2.0-only /* * isphist.c * * TI OMAP3 ISP - Histogram module * * Copyright (C) 2010 Nokia Corporation * Copyright (C) 2009 Texas Instruments, Inc. * * Contacts: David Cohen <[email protected]> * Laurent Pinchart <[email protected]> * Sakari Ailus <[email protected]> */ #include <linux/delay.h> #include <linux/device.h> #include <linux/dmaengine.h> #include <linux/slab.h> #include <linux/uaccess.h> #include "isp.h" #include "ispreg.h" #include "isphist.h" #define HIST_CONFIG_DMA 1 /* * hist_reset_mem - clear Histogram memory before start stats engine. */ static void hist_reset_mem(struct ispstat *hist) { struct isp_device *isp = hist->isp; struct omap3isp_hist_config *conf = hist->priv; unsigned int i; isp_reg_writel(isp, 0, OMAP3_ISP_IOMEM_HIST, ISPHIST_ADDR); /* * By setting it, the histogram internal buffer is being cleared at the * same time it's being read. This bit must be cleared afterwards. */ isp_reg_set(isp, OMAP3_ISP_IOMEM_HIST, ISPHIST_CNT, ISPHIST_CNT_CLEAR); /* * We'll clear 4 words at each iteration for optimization. It avoids * 3/4 of the jumps. We also know HIST_MEM_SIZE is divisible by 4. */ for (i = OMAP3ISP_HIST_MEM_SIZE / 4; i > 0; i--) { isp_reg_readl(isp, OMAP3_ISP_IOMEM_HIST, ISPHIST_DATA); isp_reg_readl(isp, OMAP3_ISP_IOMEM_HIST, ISPHIST_DATA); isp_reg_readl(isp, OMAP3_ISP_IOMEM_HIST, ISPHIST_DATA); isp_reg_readl(isp, OMAP3_ISP_IOMEM_HIST, ISPHIST_DATA); } isp_reg_clr(isp, OMAP3_ISP_IOMEM_HIST, ISPHIST_CNT, ISPHIST_CNT_CLEAR); hist->wait_acc_frames = conf->num_acc_frames; } /* * hist_setup_regs - Helper function to update Histogram registers. */ static void hist_setup_regs(struct ispstat *hist, void *priv) { struct isp_device *isp = hist->isp; struct omap3isp_hist_config *conf = priv; int c; u32 cnt; u32 wb_gain; u32 reg_hor[OMAP3ISP_HIST_MAX_REGIONS]; u32 reg_ver[OMAP3ISP_HIST_MAX_REGIONS]; if (!hist->update || hist->state == ISPSTAT_DISABLED || hist->state == ISPSTAT_DISABLING) return; cnt = conf->cfa << ISPHIST_CNT_CFA_SHIFT; wb_gain = conf->wg[0] << ISPHIST_WB_GAIN_WG00_SHIFT; wb_gain |= conf->wg[1] << ISPHIST_WB_GAIN_WG01_SHIFT; wb_gain |= conf->wg[2] << ISPHIST_WB_GAIN_WG02_SHIFT; if (conf->cfa == OMAP3ISP_HIST_CFA_BAYER) wb_gain |= conf->wg[3] << ISPHIST_WB_GAIN_WG03_SHIFT; /* Regions size and position */ for (c = 0; c < OMAP3ISP_HIST_MAX_REGIONS; c++) { if (c < conf->num_regions) { reg_hor[c] = (conf->region[c].h_start << ISPHIST_REG_START_SHIFT) | (conf->region[c].h_end << ISPHIST_REG_END_SHIFT); reg_ver[c] = (conf->region[c].v_start << ISPHIST_REG_START_SHIFT) | (conf->region[c].v_end << ISPHIST_REG_END_SHIFT); } else { reg_hor[c] = 0; reg_ver[c] = 0; } } cnt |= conf->hist_bins << ISPHIST_CNT_BINS_SHIFT; switch (conf->hist_bins) { case OMAP3ISP_HIST_BINS_256: cnt |= (ISPHIST_IN_BIT_WIDTH_CCDC - 8) << ISPHIST_CNT_SHIFT_SHIFT; break; case OMAP3ISP_HIST_BINS_128: cnt |= (ISPHIST_IN_BIT_WIDTH_CCDC - 7) << ISPHIST_CNT_SHIFT_SHIFT; break; case OMAP3ISP_HIST_BINS_64: cnt |= (ISPHIST_IN_BIT_WIDTH_CCDC - 6) << ISPHIST_CNT_SHIFT_SHIFT; break; default: /* OMAP3ISP_HIST_BINS_32 */ cnt |= (ISPHIST_IN_BIT_WIDTH_CCDC - 5) << ISPHIST_CNT_SHIFT_SHIFT; break; } hist_reset_mem(hist); isp_reg_writel(isp, cnt, OMAP3_ISP_IOMEM_HIST, ISPHIST_CNT); isp_reg_writel(isp, wb_gain, OMAP3_ISP_IOMEM_HIST, ISPHIST_WB_GAIN); isp_reg_writel(isp, reg_hor[0], OMAP3_ISP_IOMEM_HIST, ISPHIST_R0_HORZ); isp_reg_writel(isp, reg_ver[0], OMAP3_ISP_IOMEM_HIST, ISPHIST_R0_VERT); isp_reg_writel(isp, reg_hor[1], OMAP3_ISP_IOMEM_HIST, ISPHIST_R1_HORZ); isp_reg_writel(isp, reg_ver[1], OMAP3_ISP_IOMEM_HIST, ISPHIST_R1_VERT); isp_reg_writel(isp, reg_hor[2], OMAP3_ISP_IOMEM_HIST, ISPHIST_R2_HORZ); isp_reg_writel(isp, reg_ver[2], OMAP3_ISP_IOMEM_HIST, ISPHIST_R2_VERT); isp_reg_writel(isp, reg_hor[3], OMAP3_ISP_IOMEM_HIST, ISPHIST_R3_HORZ); isp_reg_writel(isp, reg_ver[3], OMAP3_ISP_IOMEM_HIST, ISPHIST_R3_VERT); hist->update = 0; hist->config_counter += hist->inc_config; hist->inc_config = 0; hist->buf_size = conf->buf_size; } static void hist_enable(struct ispstat *hist, int enable) { if (enable) { isp_reg_set(hist->isp, OMAP3_ISP_IOMEM_HIST, ISPHIST_PCR, ISPHIST_PCR_ENABLE); omap3isp_subclk_enable(hist->isp, OMAP3_ISP_SUBCLK_HIST); } else { isp_reg_clr(hist->isp, OMAP3_ISP_IOMEM_HIST, ISPHIST_PCR, ISPHIST_PCR_ENABLE); omap3isp_subclk_disable(hist->isp, OMAP3_ISP_SUBCLK_HIST); } } static int hist_busy(struct ispstat *hist) { return isp_reg_readl(hist->isp, OMAP3_ISP_IOMEM_HIST, ISPHIST_PCR) & ISPHIST_PCR_BUSY; } static void hist_dma_cb(void *data) { struct ispstat *hist = data; /* FIXME: The DMA engine API can't report transfer errors :-/ */ isp_reg_clr(hist->isp, OMAP3_ISP_IOMEM_HIST, ISPHIST_CNT, ISPHIST_CNT_CLEAR); omap3isp_stat_dma_isr(hist); if (hist->state != ISPSTAT_DISABLED) omap3isp_hist_dma_done(hist->isp); } static int hist_buf_dma(struct ispstat *hist) { dma_addr_t dma_addr = hist->active_buf->dma_addr; struct dma_async_tx_descriptor *tx; struct dma_slave_config cfg; dma_cookie_t cookie; int ret; if (unlikely(!dma_addr)) { dev_dbg(hist->isp->dev, "hist: invalid DMA buffer address\n"); goto error; } isp_reg_writel(hist->isp, 0, OMAP3_ISP_IOMEM_HIST, ISPHIST_ADDR); isp_reg_set(hist->isp, OMAP3_ISP_IOMEM_HIST, ISPHIST_CNT, ISPHIST_CNT_CLEAR); omap3isp_flush(hist->isp); memset(&cfg, 0, sizeof(cfg)); cfg.src_addr = hist->isp->mmio_hist_base_phys + ISPHIST_DATA; cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; cfg.src_maxburst = hist->buf_size / 4; ret = dmaengine_slave_config(hist->dma_ch, &cfg); if (ret < 0) { dev_dbg(hist->isp->dev, "hist: DMA slave configuration failed\n"); goto error; } tx = dmaengine_prep_slave_single(hist->dma_ch, dma_addr, hist->buf_size, DMA_DEV_TO_MEM, DMA_CTRL_ACK); if (tx == NULL) { dev_dbg(hist->isp->dev, "hist: DMA slave preparation failed\n"); goto error; } tx->callback = hist_dma_cb; tx->callback_param = hist; cookie = tx->tx_submit(tx); if (dma_submit_error(cookie)) { dev_dbg(hist->isp->dev, "hist: DMA submission failed\n"); goto error; } dma_async_issue_pending(hist->dma_ch); return STAT_BUF_WAITING_DMA; error: hist_reset_mem(hist); return STAT_NO_BUF; } static int hist_buf_pio(struct ispstat *hist) { struct isp_device *isp = hist->isp; u32 *buf = hist->active_buf->virt_addr; unsigned int i; if (!buf) { dev_dbg(isp->dev, "hist: invalid PIO buffer address\n"); hist_reset_mem(hist); return STAT_NO_BUF; } isp_reg_writel(isp, 0, OMAP3_ISP_IOMEM_HIST, ISPHIST_ADDR); /* * By setting it, the histogram internal buffer is being cleared at the * same time it's being read. This bit must be cleared just after all * data is acquired. */ isp_reg_set(isp, OMAP3_ISP_IOMEM_HIST, ISPHIST_CNT, ISPHIST_CNT_CLEAR); /* * We'll read 4 times a 4-bytes-word at each iteration for * optimization. It avoids 3/4 of the jumps. We also know buf_size is * divisible by 16. */ for (i = hist->buf_size / 16; i > 0; i--) { *buf++ = isp_reg_readl(isp, OMAP3_ISP_IOMEM_HIST, ISPHIST_DATA); *buf++ = isp_reg_readl(isp, OMAP3_ISP_IOMEM_HIST, ISPHIST_DATA); *buf++ = isp_reg_readl(isp, OMAP3_ISP_IOMEM_HIST, ISPHIST_DATA); *buf++ = isp_reg_readl(isp, OMAP3_ISP_IOMEM_HIST, ISPHIST_DATA); } isp_reg_clr(hist->isp, OMAP3_ISP_IOMEM_HIST, ISPHIST_CNT, ISPHIST_CNT_CLEAR); return STAT_BUF_DONE; } /* * hist_buf_process - Callback from ISP driver for HIST interrupt. */ static int hist_buf_process(struct ispstat *hist) { struct omap3isp_hist_config *user_cfg = hist->priv; int ret; if (atomic_read(&hist->buf_err) || hist->state != ISPSTAT_ENABLED) { hist_reset_mem(hist); return STAT_NO_BUF; } if (--(hist->wait_acc_frames)) return STAT_NO_BUF; if (hist->dma_ch) ret = hist_buf_dma(hist); else ret = hist_buf_pio(hist); hist->wait_acc_frames = user_cfg->num_acc_frames; return ret; } static u32 hist_get_buf_size(struct omap3isp_hist_config *conf) { return OMAP3ISP_HIST_MEM_SIZE_BINS(conf->hist_bins) * conf->num_regions; } /* * hist_validate_params - Helper function to check user given params. * @new_conf: Pointer to user configuration structure. * * Returns 0 on success configuration. */ static int hist_validate_params(struct ispstat *hist, void *new_conf) { struct omap3isp_hist_config *user_cfg = new_conf; int c; u32 buf_size; if (user_cfg->cfa > OMAP3ISP_HIST_CFA_FOVEONX3) return -EINVAL; /* Regions size and position */ if ((user_cfg->num_regions < OMAP3ISP_HIST_MIN_REGIONS) || (user_cfg->num_regions > OMAP3ISP_HIST_MAX_REGIONS)) return -EINVAL; /* Regions */ for (c = 0; c < user_cfg->num_regions; c++) { if (user_cfg->region[c].h_start & ~ISPHIST_REG_START_END_MASK) return -EINVAL; if (user_cfg->region[c].h_end & ~ISPHIST_REG_START_END_MASK) return -EINVAL; if (user_cfg->region[c].v_start & ~ISPHIST_REG_START_END_MASK) return -EINVAL; if (user_cfg->region[c].v_end & ~ISPHIST_REG_START_END_MASK) return -EINVAL; if (user_cfg->region[c].h_start > user_cfg->region[c].h_end) return -EINVAL; if (user_cfg->region[c].v_start > user_cfg->region[c].v_end) return -EINVAL; } switch (user_cfg->num_regions) { case 1: if (user_cfg->hist_bins > OMAP3ISP_HIST_BINS_256) return -EINVAL; break; case 2: if (user_cfg->hist_bins > OMAP3ISP_HIST_BINS_128) return -EINVAL; break; default: /* 3 or 4 */ if (user_cfg->hist_bins > OMAP3ISP_HIST_BINS_64) return -EINVAL; break; } buf_size = hist_get_buf_size(user_cfg); if (buf_size > user_cfg->buf_size) /* User's buf_size request wasn't enough */ user_cfg->buf_size = buf_size; else if (user_cfg->buf_size > OMAP3ISP_HIST_MAX_BUF_SIZE) user_cfg->buf_size = OMAP3ISP_HIST_MAX_BUF_SIZE; return 0; } static int hist_comp_params(struct ispstat *hist, struct omap3isp_hist_config *user_cfg) { struct omap3isp_hist_config *cur_cfg = hist->priv; int c; if (cur_cfg->cfa != user_cfg->cfa) return 1; if (cur_cfg->num_acc_frames != user_cfg->num_acc_frames) return 1; if (cur_cfg->hist_bins != user_cfg->hist_bins) return 1; for (c = 0; c < OMAP3ISP_HIST_MAX_WG; c++) { if (c == 3 && user_cfg->cfa == OMAP3ISP_HIST_CFA_FOVEONX3) break; else if (cur_cfg->wg[c] != user_cfg->wg[c]) return 1; } if (cur_cfg->num_regions != user_cfg->num_regions) return 1; /* Regions */ for (c = 0; c < user_cfg->num_regions; c++) { if (cur_cfg->region[c].h_start != user_cfg->region[c].h_start) return 1; if (cur_cfg->region[c].h_end != user_cfg->region[c].h_end) return 1; if (cur_cfg->region[c].v_start != user_cfg->region[c].v_start) return 1; if (cur_cfg->region[c].v_end != user_cfg->region[c].v_end) return 1; } return 0; } /* * hist_update_params - Helper function to check and store user given params. * @new_conf: Pointer to user configuration structure. */ static void hist_set_params(struct ispstat *hist, void *new_conf) { struct omap3isp_hist_config *user_cfg = new_conf; struct omap3isp_hist_config *cur_cfg = hist->priv; if (!hist->configured || hist_comp_params(hist, user_cfg)) { memcpy(cur_cfg, user_cfg, sizeof(*user_cfg)); if (user_cfg->num_acc_frames == 0) user_cfg->num_acc_frames = 1; hist->inc_config++; hist->update = 1; /* * User might be asked for a bigger buffer than necessary for * this configuration. In order to return the right amount of * data during buffer request, let's calculate the size here * instead of stick with user_cfg->buf_size. */ cur_cfg->buf_size = hist_get_buf_size(cur_cfg); } } static long hist_ioctl(struct v4l2_subdev *sd, unsigned int cmd, void *arg) { struct ispstat *stat = v4l2_get_subdevdata(sd); switch (cmd) { case VIDIOC_OMAP3ISP_HIST_CFG: return omap3isp_stat_config(stat, arg); case VIDIOC_OMAP3ISP_STAT_REQ: return omap3isp_stat_request_statistics(stat, arg); case VIDIOC_OMAP3ISP_STAT_REQ_TIME32: return omap3isp_stat_request_statistics_time32(stat, arg); case VIDIOC_OMAP3ISP_STAT_EN: { int *en = arg; return omap3isp_stat_enable(stat, !!*en); } } return -ENOIOCTLCMD; } static const struct ispstat_ops hist_ops = { .validate_params = hist_validate_params, .set_params = hist_set_params, .setup_regs = hist_setup_regs, .enable = hist_enable, .busy = hist_busy, .buf_process = hist_buf_process, }; static const struct v4l2_subdev_core_ops hist_subdev_core_ops = { .ioctl = hist_ioctl, .subscribe_event = omap3isp_stat_subscribe_event, .unsubscribe_event = omap3isp_stat_unsubscribe_event, }; static const struct v4l2_subdev_video_ops hist_subdev_video_ops = { .s_stream = omap3isp_stat_s_stream, }; static const struct v4l2_subdev_ops hist_subdev_ops = { .core = &hist_subdev_core_ops, .video = &hist_subdev_video_ops, }; /* * omap3isp_hist_init - Module Initialization. */ int omap3isp_hist_init(struct isp_device *isp) { struct ispstat *hist = &isp->isp_hist; struct omap3isp_hist_config *hist_cfg; int ret; hist_cfg = kzalloc(sizeof(*hist_cfg), GFP_KERNEL); if (hist_cfg == NULL) return -ENOMEM; hist->isp = isp; if (HIST_CONFIG_DMA) { dma_cap_mask_t mask; /* * We need slave capable channel without DMA request line for * reading out the data. * For this we can use dma_request_chan_by_mask() as we are * happy with any channel as long as it is capable of slave * configuration. */ dma_cap_zero(mask); dma_cap_set(DMA_SLAVE, mask); hist->dma_ch = dma_request_chan_by_mask(&mask); if (IS_ERR(hist->dma_ch)) { ret = PTR_ERR(hist->dma_ch); if (ret == -EPROBE_DEFER) goto err; hist->dma_ch = NULL; dev_warn(isp->dev, "hist: DMA channel request failed, using PIO\n"); } else { dev_dbg(isp->dev, "hist: using DMA channel %s\n", dma_chan_name(hist->dma_ch)); } } hist->ops = &hist_ops; hist->priv = hist_cfg; hist->event_type = V4L2_EVENT_OMAP3ISP_HIST; ret = omap3isp_stat_init(hist, "histogram", &hist_subdev_ops); err: if (ret) { if (!IS_ERR_OR_NULL(hist->dma_ch)) dma_release_channel(hist->dma_ch); kfree(hist_cfg); } return ret; } /* * omap3isp_hist_cleanup - Module cleanup. */ void omap3isp_hist_cleanup(struct isp_device *isp) { struct ispstat *hist = &isp->isp_hist; if (hist->dma_ch) dma_release_channel(hist->dma_ch); omap3isp_stat_cleanup(hist); }
linux-master
drivers/media/platform/ti/omap3isp/isphist.c
// SPDX-License-Identifier: GPL-2.0-only /* * isph3a_af.c * * TI OMAP3 ISP - H3A AF module * * Copyright (C) 2010 Nokia Corporation * Copyright (C) 2009 Texas Instruments, Inc. * * Contacts: David Cohen <[email protected]> * Laurent Pinchart <[email protected]> * Sakari Ailus <[email protected]> */ /* Linux specific include files */ #include <linux/device.h> #include <linux/slab.h> #include "isp.h" #include "isph3a.h" #include "ispstat.h" #define IS_OUT_OF_BOUNDS(value, min, max) \ ((((unsigned int)value) < (min)) || (((unsigned int)value) > (max))) static void h3a_af_setup_regs(struct ispstat *af, void *priv) { struct omap3isp_h3a_af_config *conf = priv; u32 pcr; u32 pax1; u32 pax2; u32 paxstart; u32 coef; u32 base_coef_set0; u32 base_coef_set1; int index; if (af->state == ISPSTAT_DISABLED) return; isp_reg_writel(af->isp, af->active_buf->dma_addr, OMAP3_ISP_IOMEM_H3A, ISPH3A_AFBUFST); if (!af->update) return; /* Configure Hardware Registers */ pax1 = ((conf->paxel.width >> 1) - 1) << AF_PAXW_SHIFT; /* Set height in AFPAX1 */ pax1 |= (conf->paxel.height >> 1) - 1; isp_reg_writel(af->isp, pax1, OMAP3_ISP_IOMEM_H3A, ISPH3A_AFPAX1); /* Configure AFPAX2 Register */ /* Set Line Increment in AFPAX2 Register */ pax2 = ((conf->paxel.line_inc >> 1) - 1) << AF_LINE_INCR_SHIFT; /* Set Vertical Count */ pax2 |= (conf->paxel.v_cnt - 1) << AF_VT_COUNT_SHIFT; /* Set Horizontal Count */ pax2 |= (conf->paxel.h_cnt - 1); isp_reg_writel(af->isp, pax2, OMAP3_ISP_IOMEM_H3A, ISPH3A_AFPAX2); /* Configure PAXSTART Register */ /*Configure Horizontal Start */ paxstart = conf->paxel.h_start << AF_HZ_START_SHIFT; /* Configure Vertical Start */ paxstart |= conf->paxel.v_start; isp_reg_writel(af->isp, paxstart, OMAP3_ISP_IOMEM_H3A, ISPH3A_AFPAXSTART); /*SetIIRSH Register */ isp_reg_writel(af->isp, conf->iir.h_start, OMAP3_ISP_IOMEM_H3A, ISPH3A_AFIIRSH); base_coef_set0 = ISPH3A_AFCOEF010; base_coef_set1 = ISPH3A_AFCOEF110; for (index = 0; index <= 8; index += 2) { /*Set IIR Filter0 Coefficients */ coef = 0; coef |= conf->iir.coeff_set0[index]; coef |= conf->iir.coeff_set0[index + 1] << AF_COEF_SHIFT; isp_reg_writel(af->isp, coef, OMAP3_ISP_IOMEM_H3A, base_coef_set0); base_coef_set0 += AFCOEF_OFFSET; /*Set IIR Filter1 Coefficients */ coef = 0; coef |= conf->iir.coeff_set1[index]; coef |= conf->iir.coeff_set1[index + 1] << AF_COEF_SHIFT; isp_reg_writel(af->isp, coef, OMAP3_ISP_IOMEM_H3A, base_coef_set1); base_coef_set1 += AFCOEF_OFFSET; } /* set AFCOEF0010 Register */ isp_reg_writel(af->isp, conf->iir.coeff_set0[10], OMAP3_ISP_IOMEM_H3A, ISPH3A_AFCOEF0010); /* set AFCOEF1010 Register */ isp_reg_writel(af->isp, conf->iir.coeff_set1[10], OMAP3_ISP_IOMEM_H3A, ISPH3A_AFCOEF1010); /* PCR Register */ /* Set RGB Position */ pcr = conf->rgb_pos << AF_RGBPOS_SHIFT; /* Set Accumulator Mode */ if (conf->fvmode == OMAP3ISP_AF_MODE_PEAK) pcr |= AF_FVMODE; /* Set A-law */ if (conf->alaw_enable) pcr |= AF_ALAW_EN; /* HMF Configurations */ if (conf->hmf.enable) { /* Enable HMF */ pcr |= AF_MED_EN; /* Set Median Threshold */ pcr |= conf->hmf.threshold << AF_MED_TH_SHIFT; } /* Set PCR Register */ isp_reg_clr_set(af->isp, OMAP3_ISP_IOMEM_H3A, ISPH3A_PCR, AF_PCR_MASK, pcr); af->update = 0; af->config_counter += af->inc_config; af->inc_config = 0; af->buf_size = conf->buf_size; } static void h3a_af_enable(struct ispstat *af, int enable) { if (enable) { isp_reg_set(af->isp, OMAP3_ISP_IOMEM_H3A, ISPH3A_PCR, ISPH3A_PCR_AF_EN); omap3isp_subclk_enable(af->isp, OMAP3_ISP_SUBCLK_AF); } else { isp_reg_clr(af->isp, OMAP3_ISP_IOMEM_H3A, ISPH3A_PCR, ISPH3A_PCR_AF_EN); omap3isp_subclk_disable(af->isp, OMAP3_ISP_SUBCLK_AF); } } static int h3a_af_busy(struct ispstat *af) { return isp_reg_readl(af->isp, OMAP3_ISP_IOMEM_H3A, ISPH3A_PCR) & ISPH3A_PCR_BUSYAF; } static u32 h3a_af_get_buf_size(struct omap3isp_h3a_af_config *conf) { return conf->paxel.h_cnt * conf->paxel.v_cnt * OMAP3ISP_AF_PAXEL_SIZE; } /* Function to check paxel parameters */ static int h3a_af_validate_params(struct ispstat *af, void *new_conf) { struct omap3isp_h3a_af_config *user_cfg = new_conf; struct omap3isp_h3a_af_paxel *paxel_cfg = &user_cfg->paxel; struct omap3isp_h3a_af_iir *iir_cfg = &user_cfg->iir; int index; u32 buf_size; /* Check horizontal Count */ if (IS_OUT_OF_BOUNDS(paxel_cfg->h_cnt, OMAP3ISP_AF_PAXEL_HORIZONTAL_COUNT_MIN, OMAP3ISP_AF_PAXEL_HORIZONTAL_COUNT_MAX)) return -EINVAL; /* Check Vertical Count */ if (IS_OUT_OF_BOUNDS(paxel_cfg->v_cnt, OMAP3ISP_AF_PAXEL_VERTICAL_COUNT_MIN, OMAP3ISP_AF_PAXEL_VERTICAL_COUNT_MAX)) return -EINVAL; if (IS_OUT_OF_BOUNDS(paxel_cfg->height, OMAP3ISP_AF_PAXEL_HEIGHT_MIN, OMAP3ISP_AF_PAXEL_HEIGHT_MAX) || paxel_cfg->height % 2) return -EINVAL; /* Check width */ if (IS_OUT_OF_BOUNDS(paxel_cfg->width, OMAP3ISP_AF_PAXEL_WIDTH_MIN, OMAP3ISP_AF_PAXEL_WIDTH_MAX) || paxel_cfg->width % 2) return -EINVAL; /* Check Line Increment */ if (IS_OUT_OF_BOUNDS(paxel_cfg->line_inc, OMAP3ISP_AF_PAXEL_INCREMENT_MIN, OMAP3ISP_AF_PAXEL_INCREMENT_MAX) || paxel_cfg->line_inc % 2) return -EINVAL; /* Check Horizontal Start */ if ((paxel_cfg->h_start < iir_cfg->h_start) || IS_OUT_OF_BOUNDS(paxel_cfg->h_start, OMAP3ISP_AF_PAXEL_HZSTART_MIN, OMAP3ISP_AF_PAXEL_HZSTART_MAX)) return -EINVAL; /* Check IIR */ for (index = 0; index < OMAP3ISP_AF_NUM_COEF; index++) { if ((iir_cfg->coeff_set0[index]) > OMAP3ISP_AF_COEF_MAX) return -EINVAL; if ((iir_cfg->coeff_set1[index]) > OMAP3ISP_AF_COEF_MAX) return -EINVAL; } if (IS_OUT_OF_BOUNDS(iir_cfg->h_start, OMAP3ISP_AF_IIRSH_MIN, OMAP3ISP_AF_IIRSH_MAX)) return -EINVAL; /* Hack: If paxel size is 12, the 10th AF window may be corrupted */ if ((paxel_cfg->h_cnt * paxel_cfg->v_cnt > 9) && (paxel_cfg->width * paxel_cfg->height == 12)) return -EINVAL; buf_size = h3a_af_get_buf_size(user_cfg); if (buf_size > user_cfg->buf_size) /* User buf_size request wasn't enough */ user_cfg->buf_size = buf_size; else if (user_cfg->buf_size > OMAP3ISP_AF_MAX_BUF_SIZE) user_cfg->buf_size = OMAP3ISP_AF_MAX_BUF_SIZE; return 0; } /* Update local parameters */ static void h3a_af_set_params(struct ispstat *af, void *new_conf) { struct omap3isp_h3a_af_config *user_cfg = new_conf; struct omap3isp_h3a_af_config *cur_cfg = af->priv; int update = 0; int index; /* alaw */ if (cur_cfg->alaw_enable != user_cfg->alaw_enable) { update = 1; goto out; } /* hmf */ if (cur_cfg->hmf.enable != user_cfg->hmf.enable) { update = 1; goto out; } if (cur_cfg->hmf.threshold != user_cfg->hmf.threshold) { update = 1; goto out; } /* rgbpos */ if (cur_cfg->rgb_pos != user_cfg->rgb_pos) { update = 1; goto out; } /* iir */ if (cur_cfg->iir.h_start != user_cfg->iir.h_start) { update = 1; goto out; } for (index = 0; index < OMAP3ISP_AF_NUM_COEF; index++) { if (cur_cfg->iir.coeff_set0[index] != user_cfg->iir.coeff_set0[index]) { update = 1; goto out; } if (cur_cfg->iir.coeff_set1[index] != user_cfg->iir.coeff_set1[index]) { update = 1; goto out; } } /* paxel */ if ((cur_cfg->paxel.width != user_cfg->paxel.width) || (cur_cfg->paxel.height != user_cfg->paxel.height) || (cur_cfg->paxel.h_start != user_cfg->paxel.h_start) || (cur_cfg->paxel.v_start != user_cfg->paxel.v_start) || (cur_cfg->paxel.h_cnt != user_cfg->paxel.h_cnt) || (cur_cfg->paxel.v_cnt != user_cfg->paxel.v_cnt) || (cur_cfg->paxel.line_inc != user_cfg->paxel.line_inc)) { update = 1; goto out; } /* af_mode */ if (cur_cfg->fvmode != user_cfg->fvmode) update = 1; out: if (update || !af->configured) { memcpy(cur_cfg, user_cfg, sizeof(*cur_cfg)); af->inc_config++; af->update = 1; /* * User might be asked for a bigger buffer than necessary for * this configuration. In order to return the right amount of * data during buffer request, let's calculate the size here * instead of stick with user_cfg->buf_size. */ cur_cfg->buf_size = h3a_af_get_buf_size(cur_cfg); } } static long h3a_af_ioctl(struct v4l2_subdev *sd, unsigned int cmd, void *arg) { struct ispstat *stat = v4l2_get_subdevdata(sd); switch (cmd) { case VIDIOC_OMAP3ISP_AF_CFG: return omap3isp_stat_config(stat, arg); case VIDIOC_OMAP3ISP_STAT_REQ: return omap3isp_stat_request_statistics(stat, arg); case VIDIOC_OMAP3ISP_STAT_REQ_TIME32: return omap3isp_stat_request_statistics_time32(stat, arg); case VIDIOC_OMAP3ISP_STAT_EN: { int *en = arg; return omap3isp_stat_enable(stat, !!*en); } } return -ENOIOCTLCMD; } static const struct ispstat_ops h3a_af_ops = { .validate_params = h3a_af_validate_params, .set_params = h3a_af_set_params, .setup_regs = h3a_af_setup_regs, .enable = h3a_af_enable, .busy = h3a_af_busy, }; static const struct v4l2_subdev_core_ops h3a_af_subdev_core_ops = { .ioctl = h3a_af_ioctl, .subscribe_event = omap3isp_stat_subscribe_event, .unsubscribe_event = omap3isp_stat_unsubscribe_event, }; static const struct v4l2_subdev_video_ops h3a_af_subdev_video_ops = { .s_stream = omap3isp_stat_s_stream, }; static const struct v4l2_subdev_ops h3a_af_subdev_ops = { .core = &h3a_af_subdev_core_ops, .video = &h3a_af_subdev_video_ops, }; /* Function to register the AF character device driver. */ int omap3isp_h3a_af_init(struct isp_device *isp) { struct ispstat *af = &isp->isp_af; struct omap3isp_h3a_af_config *af_cfg; struct omap3isp_h3a_af_config *af_recover_cfg = NULL; int ret; af_cfg = kzalloc(sizeof(*af_cfg), GFP_KERNEL); if (af_cfg == NULL) return -ENOMEM; af->ops = &h3a_af_ops; af->priv = af_cfg; af->event_type = V4L2_EVENT_OMAP3ISP_AF; af->isp = isp; /* Set recover state configuration */ af_recover_cfg = kzalloc(sizeof(*af_recover_cfg), GFP_KERNEL); if (!af_recover_cfg) { dev_err(af->isp->dev, "AF: cannot allocate memory for recover configuration.\n"); ret = -ENOMEM; goto err; } af_recover_cfg->paxel.h_start = OMAP3ISP_AF_PAXEL_HZSTART_MIN; af_recover_cfg->paxel.width = OMAP3ISP_AF_PAXEL_WIDTH_MIN; af_recover_cfg->paxel.height = OMAP3ISP_AF_PAXEL_HEIGHT_MIN; af_recover_cfg->paxel.h_cnt = OMAP3ISP_AF_PAXEL_HORIZONTAL_COUNT_MIN; af_recover_cfg->paxel.v_cnt = OMAP3ISP_AF_PAXEL_VERTICAL_COUNT_MIN; af_recover_cfg->paxel.line_inc = OMAP3ISP_AF_PAXEL_INCREMENT_MIN; if (h3a_af_validate_params(af, af_recover_cfg)) { dev_err(af->isp->dev, "AF: recover configuration is invalid.\n"); ret = -EINVAL; goto err; } af_recover_cfg->buf_size = h3a_af_get_buf_size(af_recover_cfg); af->recover_priv = af_recover_cfg; ret = omap3isp_stat_init(af, "AF", &h3a_af_subdev_ops); err: if (ret) { kfree(af_cfg); kfree(af_recover_cfg); } return ret; } void omap3isp_h3a_af_cleanup(struct isp_device *isp) { omap3isp_stat_cleanup(&isp->isp_af); }
linux-master
drivers/media/platform/ti/omap3isp/isph3a_af.c
// SPDX-License-Identifier: GPL-2.0-only /* * ispcsiphy.c * * TI OMAP3 ISP - CSI PHY module * * Copyright (C) 2010 Nokia Corporation * Copyright (C) 2009 Texas Instruments, Inc. * * Contacts: Laurent Pinchart <[email protected]> * Sakari Ailus <[email protected]> */ #include <linux/delay.h> #include <linux/device.h> #include <linux/regmap.h> #include <linux/regulator/consumer.h> #include "isp.h" #include "ispreg.h" #include "ispcsiphy.h" static void csiphy_routing_cfg_3630(struct isp_csiphy *phy, enum isp_interface_type iface, bool ccp2_strobe) { u32 reg; u32 shift, mode; regmap_read(phy->isp->syscon, phy->isp->syscon_offset, &reg); switch (iface) { default: /* Should not happen in practice, but let's keep the compiler happy. */ return; case ISP_INTERFACE_CCP2B_PHY1: reg &= ~OMAP3630_CONTROL_CAMERA_PHY_CTRL_CSI1_RX_SEL_PHY2; shift = OMAP3630_CONTROL_CAMERA_PHY_CTRL_CAMMODE_PHY1_SHIFT; break; case ISP_INTERFACE_CSI2C_PHY1: shift = OMAP3630_CONTROL_CAMERA_PHY_CTRL_CAMMODE_PHY1_SHIFT; mode = OMAP3630_CONTROL_CAMERA_PHY_CTRL_CAMMODE_DPHY; break; case ISP_INTERFACE_CCP2B_PHY2: reg |= OMAP3630_CONTROL_CAMERA_PHY_CTRL_CSI1_RX_SEL_PHY2; shift = OMAP3630_CONTROL_CAMERA_PHY_CTRL_CAMMODE_PHY2_SHIFT; break; case ISP_INTERFACE_CSI2A_PHY2: shift = OMAP3630_CONTROL_CAMERA_PHY_CTRL_CAMMODE_PHY2_SHIFT; mode = OMAP3630_CONTROL_CAMERA_PHY_CTRL_CAMMODE_DPHY; break; } /* Select data/clock or data/strobe mode for CCP2 */ if (iface == ISP_INTERFACE_CCP2B_PHY1 || iface == ISP_INTERFACE_CCP2B_PHY2) { if (ccp2_strobe) mode = OMAP3630_CONTROL_CAMERA_PHY_CTRL_CAMMODE_CCP2_DATA_STROBE; else mode = OMAP3630_CONTROL_CAMERA_PHY_CTRL_CAMMODE_CCP2_DATA_CLOCK; } reg &= ~(OMAP3630_CONTROL_CAMERA_PHY_CTRL_CAMMODE_MASK << shift); reg |= mode << shift; regmap_write(phy->isp->syscon, phy->isp->syscon_offset, reg); } static void csiphy_routing_cfg_3430(struct isp_csiphy *phy, u32 iface, bool on, bool ccp2_strobe) { u32 csirxfe = OMAP343X_CONTROL_CSIRXFE_PWRDNZ | OMAP343X_CONTROL_CSIRXFE_RESET; /* Only the CCP2B on PHY1 is configurable. */ if (iface != ISP_INTERFACE_CCP2B_PHY1) return; if (!on) { regmap_write(phy->isp->syscon, phy->isp->syscon_offset, 0); return; } if (ccp2_strobe) csirxfe |= OMAP343X_CONTROL_CSIRXFE_SELFORM; regmap_write(phy->isp->syscon, phy->isp->syscon_offset, csirxfe); } /* * Configure OMAP 3 CSI PHY routing. * @phy: relevant phy device * @iface: ISP_INTERFACE_* * @on: power on or off * @ccp2_strobe: false: data/clock, true: data/strobe * * Note that the underlying routing configuration registers are part of the * control (SCM) register space and part of the CORE power domain on both 3430 * and 3630, so they will not hold their contents in off-mode. This isn't an * issue since the MPU power domain is forced on whilst the ISP is in use. */ static void csiphy_routing_cfg(struct isp_csiphy *phy, enum isp_interface_type iface, bool on, bool ccp2_strobe) { if (phy->isp->phy_type == ISP_PHY_TYPE_3630 && on) return csiphy_routing_cfg_3630(phy, iface, ccp2_strobe); if (phy->isp->phy_type == ISP_PHY_TYPE_3430) return csiphy_routing_cfg_3430(phy, iface, on, ccp2_strobe); } /* * csiphy_power_autoswitch_enable * @enable: Sets or clears the autoswitch function enable flag. */ static void csiphy_power_autoswitch_enable(struct isp_csiphy *phy, bool enable) { isp_reg_clr_set(phy->isp, phy->cfg_regs, ISPCSI2_PHY_CFG, ISPCSI2_PHY_CFG_PWR_AUTO, enable ? ISPCSI2_PHY_CFG_PWR_AUTO : 0); } /* * csiphy_set_power * @power: Power state to be set. * * Returns 0 if successful, or -EBUSY if the retry count is exceeded. */ static int csiphy_set_power(struct isp_csiphy *phy, u32 power) { u32 reg; u8 retry_count; isp_reg_clr_set(phy->isp, phy->cfg_regs, ISPCSI2_PHY_CFG, ISPCSI2_PHY_CFG_PWR_CMD_MASK, power); retry_count = 0; do { udelay(50); reg = isp_reg_readl(phy->isp, phy->cfg_regs, ISPCSI2_PHY_CFG) & ISPCSI2_PHY_CFG_PWR_STATUS_MASK; if (reg != power >> 2) retry_count++; } while ((reg != power >> 2) && (retry_count < 100)); if (retry_count == 100) { dev_err(phy->isp->dev, "CSI2 CIO set power failed!\n"); return -EBUSY; } return 0; } /* * TCLK values are OK at their reset values */ #define TCLK_TERM 0 #define TCLK_MISS 1 #define TCLK_SETTLE 14 static int omap3isp_csiphy_config(struct isp_csiphy *phy) { struct isp_pipeline *pipe = to_isp_pipeline(phy->entity); struct isp_bus_cfg *buscfg; struct isp_csiphy_lanes_cfg *lanes; int csi2_ddrclk_khz; unsigned int num_data_lanes, used_lanes = 0; unsigned int i; u32 reg; buscfg = v4l2_subdev_to_bus_cfg(pipe->external); if (WARN_ON(!buscfg)) return -EPIPE; if (buscfg->interface == ISP_INTERFACE_CCP2B_PHY1 || buscfg->interface == ISP_INTERFACE_CCP2B_PHY2) { lanes = &buscfg->bus.ccp2.lanecfg; num_data_lanes = 1; } else { lanes = &buscfg->bus.csi2.lanecfg; num_data_lanes = buscfg->bus.csi2.num_data_lanes; } if (num_data_lanes > phy->num_data_lanes) return -EINVAL; /* Clock and data lanes verification */ for (i = 0; i < num_data_lanes; i++) { if (lanes->data[i].pol > 1 || lanes->data[i].pos > 3) return -EINVAL; if (used_lanes & (1 << lanes->data[i].pos)) return -EINVAL; used_lanes |= 1 << lanes->data[i].pos; } if (lanes->clk.pol > 1 || lanes->clk.pos > 3) return -EINVAL; if (lanes->clk.pos == 0 || used_lanes & (1 << lanes->clk.pos)) return -EINVAL; /* * The PHY configuration is lost in off mode, that's not an * issue since the MPU power domain is forced on whilst the * ISP is in use. */ csiphy_routing_cfg(phy, buscfg->interface, true, buscfg->bus.ccp2.phy_layer); /* DPHY timing configuration */ /* CSI-2 is DDR and we only count used lanes. */ csi2_ddrclk_khz = pipe->external_rate / 1000 / (2 * hweight32(used_lanes)) * pipe->external_width; reg = isp_reg_readl(phy->isp, phy->phy_regs, ISPCSIPHY_REG0); reg &= ~(ISPCSIPHY_REG0_THS_TERM_MASK | ISPCSIPHY_REG0_THS_SETTLE_MASK); /* THS_TERM: Programmed value = ceil(12.5 ns/DDRClk period) - 1. */ reg |= (DIV_ROUND_UP(25 * csi2_ddrclk_khz, 2000000) - 1) << ISPCSIPHY_REG0_THS_TERM_SHIFT; /* THS_SETTLE: Programmed value = ceil(90 ns/DDRClk period) + 3. */ reg |= (DIV_ROUND_UP(90 * csi2_ddrclk_khz, 1000000) + 3) << ISPCSIPHY_REG0_THS_SETTLE_SHIFT; isp_reg_writel(phy->isp, reg, phy->phy_regs, ISPCSIPHY_REG0); reg = isp_reg_readl(phy->isp, phy->phy_regs, ISPCSIPHY_REG1); reg &= ~(ISPCSIPHY_REG1_TCLK_TERM_MASK | ISPCSIPHY_REG1_TCLK_MISS_MASK | ISPCSIPHY_REG1_TCLK_SETTLE_MASK); reg |= TCLK_TERM << ISPCSIPHY_REG1_TCLK_TERM_SHIFT; reg |= TCLK_MISS << ISPCSIPHY_REG1_TCLK_MISS_SHIFT; reg |= TCLK_SETTLE << ISPCSIPHY_REG1_TCLK_SETTLE_SHIFT; isp_reg_writel(phy->isp, reg, phy->phy_regs, ISPCSIPHY_REG1); /* DPHY lane configuration */ reg = isp_reg_readl(phy->isp, phy->cfg_regs, ISPCSI2_PHY_CFG); for (i = 0; i < num_data_lanes; i++) { reg &= ~(ISPCSI2_PHY_CFG_DATA_POL_MASK(i + 1) | ISPCSI2_PHY_CFG_DATA_POSITION_MASK(i + 1)); reg |= (lanes->data[i].pol << ISPCSI2_PHY_CFG_DATA_POL_SHIFT(i + 1)); reg |= (lanes->data[i].pos << ISPCSI2_PHY_CFG_DATA_POSITION_SHIFT(i + 1)); } reg &= ~(ISPCSI2_PHY_CFG_CLOCK_POL_MASK | ISPCSI2_PHY_CFG_CLOCK_POSITION_MASK); reg |= lanes->clk.pol << ISPCSI2_PHY_CFG_CLOCK_POL_SHIFT; reg |= lanes->clk.pos << ISPCSI2_PHY_CFG_CLOCK_POSITION_SHIFT; isp_reg_writel(phy->isp, reg, phy->cfg_regs, ISPCSI2_PHY_CFG); return 0; } int omap3isp_csiphy_acquire(struct isp_csiphy *phy, struct media_entity *entity) { int rval; if (phy->vdd == NULL) { dev_err(phy->isp->dev, "Power regulator for CSI PHY not available\n"); return -ENODEV; } mutex_lock(&phy->mutex); rval = regulator_enable(phy->vdd); if (rval < 0) goto done; rval = omap3isp_csi2_reset(phy->csi2); if (rval < 0) goto done; phy->entity = entity; rval = omap3isp_csiphy_config(phy); if (rval < 0) goto done; if (phy->isp->revision == ISP_REVISION_15_0) { rval = csiphy_set_power(phy, ISPCSI2_PHY_CFG_PWR_CMD_ON); if (rval) { regulator_disable(phy->vdd); goto done; } csiphy_power_autoswitch_enable(phy, true); } done: if (rval < 0) phy->entity = NULL; mutex_unlock(&phy->mutex); return rval; } void omap3isp_csiphy_release(struct isp_csiphy *phy) { mutex_lock(&phy->mutex); if (phy->entity) { struct isp_pipeline *pipe = to_isp_pipeline(phy->entity); struct isp_bus_cfg *buscfg; buscfg = v4l2_subdev_to_bus_cfg(pipe->external); if (WARN_ON(!buscfg)) { mutex_unlock(&phy->mutex); return; } csiphy_routing_cfg(phy, buscfg->interface, false, buscfg->bus.ccp2.phy_layer); if (phy->isp->revision == ISP_REVISION_15_0) { csiphy_power_autoswitch_enable(phy, false); csiphy_set_power(phy, ISPCSI2_PHY_CFG_PWR_CMD_OFF); } regulator_disable(phy->vdd); phy->entity = NULL; } mutex_unlock(&phy->mutex); } /* * omap3isp_csiphy_init - Initialize the CSI PHY frontends */ int omap3isp_csiphy_init(struct isp_device *isp) { struct isp_csiphy *phy1 = &isp->isp_csiphy1; struct isp_csiphy *phy2 = &isp->isp_csiphy2; phy2->isp = isp; phy2->csi2 = &isp->isp_csi2a; phy2->num_data_lanes = ISP_CSIPHY2_NUM_DATA_LANES; phy2->cfg_regs = OMAP3_ISP_IOMEM_CSI2A_REGS1; phy2->phy_regs = OMAP3_ISP_IOMEM_CSIPHY2; mutex_init(&phy2->mutex); phy1->isp = isp; mutex_init(&phy1->mutex); if (isp->revision == ISP_REVISION_15_0) { phy1->csi2 = &isp->isp_csi2c; phy1->num_data_lanes = ISP_CSIPHY1_NUM_DATA_LANES; phy1->cfg_regs = OMAP3_ISP_IOMEM_CSI2C_REGS1; phy1->phy_regs = OMAP3_ISP_IOMEM_CSIPHY1; } return 0; } void omap3isp_csiphy_cleanup(struct isp_device *isp) { mutex_destroy(&isp->isp_csiphy1.mutex); mutex_destroy(&isp->isp_csiphy2.mutex); }
linux-master
drivers/media/platform/ti/omap3isp/ispcsiphy.c
// SPDX-License-Identifier: GPL-2.0-only /* * isp.c * * TI OMAP3 ISP - Core * * Copyright (C) 2006-2010 Nokia Corporation * Copyright (C) 2007-2009 Texas Instruments, Inc. * * Contacts: Laurent Pinchart <[email protected]> * Sakari Ailus <[email protected]> * * Contributors: * Laurent Pinchart <[email protected]> * Sakari Ailus <[email protected]> * David Cohen <[email protected]> * Stanimir Varbanov <[email protected]> * Vimarsh Zutshi <[email protected]> * Tuukka Toivonen <[email protected]> * Sergio Aguirre <[email protected]> * Antti Koskipaa <[email protected]> * Ivan T. Ivanov <[email protected]> * RaniSuneela <[email protected]> * Atanas Filipov <[email protected]> * Gjorgji Rosikopulos <[email protected]> * Hiroshi DOYU <[email protected]> * Nayden Kanchev <[email protected]> * Phil Carmody <[email protected]> * Artem Bityutskiy <[email protected]> * Dominic Curran <[email protected]> * Ilkka Myllyperkio <[email protected]> * Pallavi Kulkarni <[email protected]> * Vaibhav Hiremath <[email protected]> * Mohit Jalori <[email protected]> * Sameer Venkatraman <[email protected]> * Senthilvadivu Guruswamy <[email protected]> * Thara Gopinath <[email protected]> * Toni Leinonen <[email protected]> * Troy Laramy <[email protected]> */ #include <linux/clk.h> #include <linux/clkdev.h> #include <linux/delay.h> #include <linux/device.h> #include <linux/dma-mapping.h> #include <linux/i2c.h> #include <linux/interrupt.h> #include <linux/mfd/syscon.h> #include <linux/module.h> #include <linux/omap-iommu.h> #include <linux/platform_device.h> #include <linux/property.h> #include <linux/regulator/consumer.h> #include <linux/slab.h> #include <linux/sched.h> #include <linux/vmalloc.h> #ifdef CONFIG_ARM_DMA_USE_IOMMU #include <asm/dma-iommu.h> #endif #include <media/v4l2-common.h> #include <media/v4l2-fwnode.h> #include <media/v4l2-device.h> #include <media/v4l2-mc.h> #include "isp.h" #include "ispreg.h" #include "ispccdc.h" #include "isppreview.h" #include "ispresizer.h" #include "ispcsi2.h" #include "ispccp2.h" #include "isph3a.h" #include "isphist.h" static unsigned int autoidle; module_param(autoidle, int, 0444); MODULE_PARM_DESC(autoidle, "Enable OMAP3ISP AUTOIDLE support"); static void isp_save_ctx(struct isp_device *isp); static void isp_restore_ctx(struct isp_device *isp); static const struct isp_res_mapping isp_res_maps[] = { { .isp_rev = ISP_REVISION_2_0, .offset = { /* first MMIO area */ 0x0000, /* base, len 0x0070 */ 0x0400, /* ccp2, len 0x01f0 */ 0x0600, /* ccdc, len 0x00a8 */ 0x0a00, /* hist, len 0x0048 */ 0x0c00, /* h3a, len 0x0060 */ 0x0e00, /* preview, len 0x00a0 */ 0x1000, /* resizer, len 0x00ac */ 0x1200, /* sbl, len 0x00fc */ /* second MMIO area */ 0x0000, /* csi2a, len 0x0170 */ 0x0170, /* csiphy2, len 0x000c */ }, .phy_type = ISP_PHY_TYPE_3430, }, { .isp_rev = ISP_REVISION_15_0, .offset = { /* first MMIO area */ 0x0000, /* base, len 0x0070 */ 0x0400, /* ccp2, len 0x01f0 */ 0x0600, /* ccdc, len 0x00a8 */ 0x0a00, /* hist, len 0x0048 */ 0x0c00, /* h3a, len 0x0060 */ 0x0e00, /* preview, len 0x00a0 */ 0x1000, /* resizer, len 0x00ac */ 0x1200, /* sbl, len 0x00fc */ /* second MMIO area */ 0x0000, /* csi2a, len 0x0170 (1st area) */ 0x0170, /* csiphy2, len 0x000c */ 0x01c0, /* csi2a, len 0x0040 (2nd area) */ 0x0400, /* csi2c, len 0x0170 (1st area) */ 0x0570, /* csiphy1, len 0x000c */ 0x05c0, /* csi2c, len 0x0040 (2nd area) */ }, .phy_type = ISP_PHY_TYPE_3630, }, }; /* Structure for saving/restoring ISP module registers */ static struct isp_reg isp_reg_list[] = { {OMAP3_ISP_IOMEM_MAIN, ISP_SYSCONFIG, 0}, {OMAP3_ISP_IOMEM_MAIN, ISP_CTRL, 0}, {OMAP3_ISP_IOMEM_MAIN, ISP_TCTRL_CTRL, 0}, {0, ISP_TOK_TERM, 0} }; /* * omap3isp_flush - Post pending L3 bus writes by doing a register readback * @isp: OMAP3 ISP device * * In order to force posting of pending writes, we need to write and * readback the same register, in this case the revision register. * * See this link for reference: * https://www.mail-archive.com/[email protected]/msg08149.html */ void omap3isp_flush(struct isp_device *isp) { isp_reg_writel(isp, 0, OMAP3_ISP_IOMEM_MAIN, ISP_REVISION); isp_reg_readl(isp, OMAP3_ISP_IOMEM_MAIN, ISP_REVISION); } /* ----------------------------------------------------------------------------- * XCLK */ #define to_isp_xclk(_hw) container_of(_hw, struct isp_xclk, hw) static void isp_xclk_update(struct isp_xclk *xclk, u32 divider) { switch (xclk->id) { case ISP_XCLK_A: isp_reg_clr_set(xclk->isp, OMAP3_ISP_IOMEM_MAIN, ISP_TCTRL_CTRL, ISPTCTRL_CTRL_DIVA_MASK, divider << ISPTCTRL_CTRL_DIVA_SHIFT); break; case ISP_XCLK_B: isp_reg_clr_set(xclk->isp, OMAP3_ISP_IOMEM_MAIN, ISP_TCTRL_CTRL, ISPTCTRL_CTRL_DIVB_MASK, divider << ISPTCTRL_CTRL_DIVB_SHIFT); break; } } static int isp_xclk_prepare(struct clk_hw *hw) { struct isp_xclk *xclk = to_isp_xclk(hw); omap3isp_get(xclk->isp); return 0; } static void isp_xclk_unprepare(struct clk_hw *hw) { struct isp_xclk *xclk = to_isp_xclk(hw); omap3isp_put(xclk->isp); } static int isp_xclk_enable(struct clk_hw *hw) { struct isp_xclk *xclk = to_isp_xclk(hw); unsigned long flags; spin_lock_irqsave(&xclk->lock, flags); isp_xclk_update(xclk, xclk->divider); xclk->enabled = true; spin_unlock_irqrestore(&xclk->lock, flags); return 0; } static void isp_xclk_disable(struct clk_hw *hw) { struct isp_xclk *xclk = to_isp_xclk(hw); unsigned long flags; spin_lock_irqsave(&xclk->lock, flags); isp_xclk_update(xclk, 0); xclk->enabled = false; spin_unlock_irqrestore(&xclk->lock, flags); } static unsigned long isp_xclk_recalc_rate(struct clk_hw *hw, unsigned long parent_rate) { struct isp_xclk *xclk = to_isp_xclk(hw); return parent_rate / xclk->divider; } static u32 isp_xclk_calc_divider(unsigned long *rate, unsigned long parent_rate) { u32 divider; if (*rate >= parent_rate) { *rate = parent_rate; return ISPTCTRL_CTRL_DIV_BYPASS; } if (*rate == 0) *rate = 1; divider = DIV_ROUND_CLOSEST(parent_rate, *rate); if (divider >= ISPTCTRL_CTRL_DIV_BYPASS) divider = ISPTCTRL_CTRL_DIV_BYPASS - 1; *rate = parent_rate / divider; return divider; } static long isp_xclk_round_rate(struct clk_hw *hw, unsigned long rate, unsigned long *parent_rate) { isp_xclk_calc_divider(&rate, *parent_rate); return rate; } static int isp_xclk_set_rate(struct clk_hw *hw, unsigned long rate, unsigned long parent_rate) { struct isp_xclk *xclk = to_isp_xclk(hw); unsigned long flags; u32 divider; divider = isp_xclk_calc_divider(&rate, parent_rate); spin_lock_irqsave(&xclk->lock, flags); xclk->divider = divider; if (xclk->enabled) isp_xclk_update(xclk, divider); spin_unlock_irqrestore(&xclk->lock, flags); dev_dbg(xclk->isp->dev, "%s: cam_xclk%c set to %lu Hz (div %u)\n", __func__, xclk->id == ISP_XCLK_A ? 'a' : 'b', rate, divider); return 0; } static const struct clk_ops isp_xclk_ops = { .prepare = isp_xclk_prepare, .unprepare = isp_xclk_unprepare, .enable = isp_xclk_enable, .disable = isp_xclk_disable, .recalc_rate = isp_xclk_recalc_rate, .round_rate = isp_xclk_round_rate, .set_rate = isp_xclk_set_rate, }; static const char *isp_xclk_parent_name = "cam_mclk"; static struct clk *isp_xclk_src_get(struct of_phandle_args *clkspec, void *data) { unsigned int idx = clkspec->args[0]; struct isp_device *isp = data; if (idx >= ARRAY_SIZE(isp->xclks)) return ERR_PTR(-ENOENT); return isp->xclks[idx].clk; } static int isp_xclk_init(struct isp_device *isp) { struct device_node *np = isp->dev->of_node; struct clk_init_data init = {}; unsigned int i; for (i = 0; i < ARRAY_SIZE(isp->xclks); ++i) isp->xclks[i].clk = ERR_PTR(-EINVAL); for (i = 0; i < ARRAY_SIZE(isp->xclks); ++i) { struct isp_xclk *xclk = &isp->xclks[i]; xclk->isp = isp; xclk->id = i == 0 ? ISP_XCLK_A : ISP_XCLK_B; xclk->divider = 1; spin_lock_init(&xclk->lock); init.name = i == 0 ? "cam_xclka" : "cam_xclkb"; init.ops = &isp_xclk_ops; init.parent_names = &isp_xclk_parent_name; init.num_parents = 1; xclk->hw.init = &init; /* * The first argument is NULL in order to avoid circular * reference, as this driver takes reference on the * sensor subdevice modules and the sensors would take * reference on this module through clk_get(). */ xclk->clk = clk_register(NULL, &xclk->hw); if (IS_ERR(xclk->clk)) return PTR_ERR(xclk->clk); } if (np) of_clk_add_provider(np, isp_xclk_src_get, isp); return 0; } static void isp_xclk_cleanup(struct isp_device *isp) { struct device_node *np = isp->dev->of_node; unsigned int i; if (np) of_clk_del_provider(np); for (i = 0; i < ARRAY_SIZE(isp->xclks); ++i) { struct isp_xclk *xclk = &isp->xclks[i]; if (!IS_ERR(xclk->clk)) clk_unregister(xclk->clk); } } /* ----------------------------------------------------------------------------- * Interrupts */ /* * isp_enable_interrupts - Enable ISP interrupts. * @isp: OMAP3 ISP device */ static void isp_enable_interrupts(struct isp_device *isp) { static const u32 irq = IRQ0ENABLE_CSIA_IRQ | IRQ0ENABLE_CSIB_IRQ | IRQ0ENABLE_CCDC_LSC_PREF_ERR_IRQ | IRQ0ENABLE_CCDC_LSC_DONE_IRQ | IRQ0ENABLE_CCDC_VD0_IRQ | IRQ0ENABLE_CCDC_VD1_IRQ | IRQ0ENABLE_HS_VS_IRQ | IRQ0ENABLE_HIST_DONE_IRQ | IRQ0ENABLE_H3A_AWB_DONE_IRQ | IRQ0ENABLE_H3A_AF_DONE_IRQ | IRQ0ENABLE_PRV_DONE_IRQ | IRQ0ENABLE_RSZ_DONE_IRQ; isp_reg_writel(isp, irq, OMAP3_ISP_IOMEM_MAIN, ISP_IRQ0STATUS); isp_reg_writel(isp, irq, OMAP3_ISP_IOMEM_MAIN, ISP_IRQ0ENABLE); } /* * isp_disable_interrupts - Disable ISP interrupts. * @isp: OMAP3 ISP device */ static void isp_disable_interrupts(struct isp_device *isp) { isp_reg_writel(isp, 0, OMAP3_ISP_IOMEM_MAIN, ISP_IRQ0ENABLE); } /* * isp_core_init - ISP core settings * @isp: OMAP3 ISP device * @idle: Consider idle state. * * Set the power settings for the ISP and SBL bus and configure the HS/VS * interrupt source. * * We need to configure the HS/VS interrupt source before interrupts get * enabled, as the sensor might be free-running and the ISP default setting * (HS edge) would put an unnecessary burden on the CPU. */ static void isp_core_init(struct isp_device *isp, int idle) { isp_reg_writel(isp, ((idle ? ISP_SYSCONFIG_MIDLEMODE_SMARTSTANDBY : ISP_SYSCONFIG_MIDLEMODE_FORCESTANDBY) << ISP_SYSCONFIG_MIDLEMODE_SHIFT) | ((isp->revision == ISP_REVISION_15_0) ? ISP_SYSCONFIG_AUTOIDLE : 0), OMAP3_ISP_IOMEM_MAIN, ISP_SYSCONFIG); isp_reg_writel(isp, (isp->autoidle ? ISPCTRL_SBL_AUTOIDLE : 0) | ISPCTRL_SYNC_DETECT_VSRISE, OMAP3_ISP_IOMEM_MAIN, ISP_CTRL); } /* * Configure the bridge and lane shifter. Valid inputs are * * CCDC_INPUT_PARALLEL: Parallel interface * CCDC_INPUT_CSI2A: CSI2a receiver * CCDC_INPUT_CCP2B: CCP2b receiver * CCDC_INPUT_CSI2C: CSI2c receiver * * The bridge and lane shifter are configured according to the selected input * and the ISP platform data. */ void omap3isp_configure_bridge(struct isp_device *isp, enum ccdc_input_entity input, const struct isp_parallel_cfg *parcfg, unsigned int shift, unsigned int bridge) { u32 ispctrl_val; ispctrl_val = isp_reg_readl(isp, OMAP3_ISP_IOMEM_MAIN, ISP_CTRL); ispctrl_val &= ~ISPCTRL_SHIFT_MASK; ispctrl_val &= ~ISPCTRL_PAR_CLK_POL_INV; ispctrl_val &= ~ISPCTRL_PAR_SER_CLK_SEL_MASK; ispctrl_val &= ~ISPCTRL_PAR_BRIDGE_MASK; ispctrl_val |= bridge; switch (input) { case CCDC_INPUT_PARALLEL: ispctrl_val |= ISPCTRL_PAR_SER_CLK_SEL_PARALLEL; ispctrl_val |= parcfg->clk_pol << ISPCTRL_PAR_CLK_POL_SHIFT; shift += parcfg->data_lane_shift; break; case CCDC_INPUT_CSI2A: ispctrl_val |= ISPCTRL_PAR_SER_CLK_SEL_CSIA; break; case CCDC_INPUT_CCP2B: ispctrl_val |= ISPCTRL_PAR_SER_CLK_SEL_CSIB; break; case CCDC_INPUT_CSI2C: ispctrl_val |= ISPCTRL_PAR_SER_CLK_SEL_CSIC; break; default: return; } ispctrl_val |= ((shift/2) << ISPCTRL_SHIFT_SHIFT) & ISPCTRL_SHIFT_MASK; isp_reg_writel(isp, ispctrl_val, OMAP3_ISP_IOMEM_MAIN, ISP_CTRL); } void omap3isp_hist_dma_done(struct isp_device *isp) { if (omap3isp_ccdc_busy(&isp->isp_ccdc) || omap3isp_stat_pcr_busy(&isp->isp_hist)) { /* Histogram cannot be enabled in this frame anymore */ atomic_set(&isp->isp_hist.buf_err, 1); dev_dbg(isp->dev, "hist: Out of synchronization with CCDC. Ignoring next buffer.\n"); } } static inline void __maybe_unused isp_isr_dbg(struct isp_device *isp, u32 irqstatus) { static const char *name[] = { "CSIA_IRQ", "res1", "res2", "CSIB_LCM_IRQ", "CSIB_IRQ", "res5", "res6", "res7", "CCDC_VD0_IRQ", "CCDC_VD1_IRQ", "CCDC_VD2_IRQ", "CCDC_ERR_IRQ", "H3A_AF_DONE_IRQ", "H3A_AWB_DONE_IRQ", "res14", "res15", "HIST_DONE_IRQ", "CCDC_LSC_DONE", "CCDC_LSC_PREFETCH_COMPLETED", "CCDC_LSC_PREFETCH_ERROR", "PRV_DONE_IRQ", "CBUFF_IRQ", "res22", "res23", "RSZ_DONE_IRQ", "OVF_IRQ", "res26", "res27", "MMU_ERR_IRQ", "OCP_ERR_IRQ", "SEC_ERR_IRQ", "HS_VS_IRQ", }; int i; dev_dbg(isp->dev, "ISP IRQ: "); for (i = 0; i < ARRAY_SIZE(name); i++) { if ((1 << i) & irqstatus) printk(KERN_CONT "%s ", name[i]); } printk(KERN_CONT "\n"); } static void isp_isr_sbl(struct isp_device *isp) { struct device *dev = isp->dev; struct isp_pipeline *pipe; u32 sbl_pcr; /* * Handle shared buffer logic overflows for video buffers. * ISPSBL_PCR_CCDCPRV_2_RSZ_OVF can be safely ignored. */ sbl_pcr = isp_reg_readl(isp, OMAP3_ISP_IOMEM_SBL, ISPSBL_PCR); isp_reg_writel(isp, sbl_pcr, OMAP3_ISP_IOMEM_SBL, ISPSBL_PCR); sbl_pcr &= ~ISPSBL_PCR_CCDCPRV_2_RSZ_OVF; if (sbl_pcr) dev_dbg(dev, "SBL overflow (PCR = 0x%08x)\n", sbl_pcr); if (sbl_pcr & ISPSBL_PCR_CSIB_WBL_OVF) { pipe = to_isp_pipeline(&isp->isp_ccp2.subdev.entity); if (pipe != NULL) pipe->error = true; } if (sbl_pcr & ISPSBL_PCR_CSIA_WBL_OVF) { pipe = to_isp_pipeline(&isp->isp_csi2a.subdev.entity); if (pipe != NULL) pipe->error = true; } if (sbl_pcr & ISPSBL_PCR_CCDC_WBL_OVF) { pipe = to_isp_pipeline(&isp->isp_ccdc.subdev.entity); if (pipe != NULL) pipe->error = true; } if (sbl_pcr & ISPSBL_PCR_PRV_WBL_OVF) { pipe = to_isp_pipeline(&isp->isp_prev.subdev.entity); if (pipe != NULL) pipe->error = true; } if (sbl_pcr & (ISPSBL_PCR_RSZ1_WBL_OVF | ISPSBL_PCR_RSZ2_WBL_OVF | ISPSBL_PCR_RSZ3_WBL_OVF | ISPSBL_PCR_RSZ4_WBL_OVF)) { pipe = to_isp_pipeline(&isp->isp_res.subdev.entity); if (pipe != NULL) pipe->error = true; } if (sbl_pcr & ISPSBL_PCR_H3A_AF_WBL_OVF) omap3isp_stat_sbl_overflow(&isp->isp_af); if (sbl_pcr & ISPSBL_PCR_H3A_AEAWB_WBL_OVF) omap3isp_stat_sbl_overflow(&isp->isp_aewb); } /* * isp_isr - Interrupt Service Routine for Camera ISP module. * @irq: Not used currently. * @_isp: Pointer to the OMAP3 ISP device * * Handles the corresponding callback if plugged in. */ static irqreturn_t isp_isr(int irq, void *_isp) { static const u32 ccdc_events = IRQ0STATUS_CCDC_LSC_PREF_ERR_IRQ | IRQ0STATUS_CCDC_LSC_DONE_IRQ | IRQ0STATUS_CCDC_VD0_IRQ | IRQ0STATUS_CCDC_VD1_IRQ | IRQ0STATUS_HS_VS_IRQ; struct isp_device *isp = _isp; u32 irqstatus; irqstatus = isp_reg_readl(isp, OMAP3_ISP_IOMEM_MAIN, ISP_IRQ0STATUS); isp_reg_writel(isp, irqstatus, OMAP3_ISP_IOMEM_MAIN, ISP_IRQ0STATUS); isp_isr_sbl(isp); if (irqstatus & IRQ0STATUS_CSIA_IRQ) omap3isp_csi2_isr(&isp->isp_csi2a); if (irqstatus & IRQ0STATUS_CSIB_IRQ) omap3isp_ccp2_isr(&isp->isp_ccp2); if (irqstatus & IRQ0STATUS_CCDC_VD0_IRQ) { if (isp->isp_ccdc.output & CCDC_OUTPUT_PREVIEW) omap3isp_preview_isr_frame_sync(&isp->isp_prev); if (isp->isp_ccdc.output & CCDC_OUTPUT_RESIZER) omap3isp_resizer_isr_frame_sync(&isp->isp_res); omap3isp_stat_isr_frame_sync(&isp->isp_aewb); omap3isp_stat_isr_frame_sync(&isp->isp_af); omap3isp_stat_isr_frame_sync(&isp->isp_hist); } if (irqstatus & ccdc_events) omap3isp_ccdc_isr(&isp->isp_ccdc, irqstatus & ccdc_events); if (irqstatus & IRQ0STATUS_PRV_DONE_IRQ) { if (isp->isp_prev.output & PREVIEW_OUTPUT_RESIZER) omap3isp_resizer_isr_frame_sync(&isp->isp_res); omap3isp_preview_isr(&isp->isp_prev); } if (irqstatus & IRQ0STATUS_RSZ_DONE_IRQ) omap3isp_resizer_isr(&isp->isp_res); if (irqstatus & IRQ0STATUS_H3A_AWB_DONE_IRQ) omap3isp_stat_isr(&isp->isp_aewb); if (irqstatus & IRQ0STATUS_H3A_AF_DONE_IRQ) omap3isp_stat_isr(&isp->isp_af); if (irqstatus & IRQ0STATUS_HIST_DONE_IRQ) omap3isp_stat_isr(&isp->isp_hist); omap3isp_flush(isp); #if defined(DEBUG) && defined(ISP_ISR_DEBUG) isp_isr_dbg(isp, irqstatus); #endif return IRQ_HANDLED; } static const struct media_device_ops isp_media_ops = { .link_notify = v4l2_pipeline_link_notify, }; /* ----------------------------------------------------------------------------- * Pipeline stream management */ /* * isp_pipeline_enable - Enable streaming on a pipeline * @pipe: ISP pipeline * @mode: Stream mode (single shot or continuous) * * Walk the entities chain starting at the pipeline output video node and start * all modules in the chain in the given mode. * * Return 0 if successful, or the return value of the failed video::s_stream * operation otherwise. */ static int isp_pipeline_enable(struct isp_pipeline *pipe, enum isp_pipeline_stream_state mode) { struct isp_device *isp = pipe->output->isp; struct media_entity *entity; struct media_pad *pad; struct v4l2_subdev *subdev; unsigned long flags; int ret; /* Refuse to start streaming if an entity included in the pipeline has * crashed. This check must be performed before the loop below to avoid * starting entities if the pipeline won't start anyway (those entities * would then likely fail to stop, making the problem worse). */ if (media_entity_enum_intersects(&pipe->ent_enum, &isp->crashed)) return -EIO; spin_lock_irqsave(&pipe->lock, flags); pipe->state &= ~(ISP_PIPELINE_IDLE_INPUT | ISP_PIPELINE_IDLE_OUTPUT); spin_unlock_irqrestore(&pipe->lock, flags); pipe->do_propagation = false; mutex_lock(&isp->media_dev.graph_mutex); entity = &pipe->output->video.entity; while (1) { pad = &entity->pads[0]; if (!(pad->flags & MEDIA_PAD_FL_SINK)) break; pad = media_pad_remote_pad_first(pad); if (!pad || !is_media_entity_v4l2_subdev(pad->entity)) break; entity = pad->entity; subdev = media_entity_to_v4l2_subdev(entity); ret = v4l2_subdev_call(subdev, video, s_stream, mode); if (ret < 0 && ret != -ENOIOCTLCMD) { mutex_unlock(&isp->media_dev.graph_mutex); return ret; } if (subdev == &isp->isp_ccdc.subdev) { v4l2_subdev_call(&isp->isp_aewb.subdev, video, s_stream, mode); v4l2_subdev_call(&isp->isp_af.subdev, video, s_stream, mode); v4l2_subdev_call(&isp->isp_hist.subdev, video, s_stream, mode); pipe->do_propagation = true; } /* Stop at the first external sub-device. */ if (subdev->dev != isp->dev) break; } mutex_unlock(&isp->media_dev.graph_mutex); return 0; } static int isp_pipeline_wait_resizer(struct isp_device *isp) { return omap3isp_resizer_busy(&isp->isp_res); } static int isp_pipeline_wait_preview(struct isp_device *isp) { return omap3isp_preview_busy(&isp->isp_prev); } static int isp_pipeline_wait_ccdc(struct isp_device *isp) { return omap3isp_stat_busy(&isp->isp_af) || omap3isp_stat_busy(&isp->isp_aewb) || omap3isp_stat_busy(&isp->isp_hist) || omap3isp_ccdc_busy(&isp->isp_ccdc); } #define ISP_STOP_TIMEOUT msecs_to_jiffies(1000) static int isp_pipeline_wait(struct isp_device *isp, int(*busy)(struct isp_device *isp)) { unsigned long timeout = jiffies + ISP_STOP_TIMEOUT; while (!time_after(jiffies, timeout)) { if (!busy(isp)) return 0; } return 1; } /* * isp_pipeline_disable - Disable streaming on a pipeline * @pipe: ISP pipeline * * Walk the entities chain starting at the pipeline output video node and stop * all modules in the chain. Wait synchronously for the modules to be stopped if * necessary. * * Return 0 if all modules have been properly stopped, or -ETIMEDOUT if a module * can't be stopped (in which case a software reset of the ISP is probably * necessary). */ static int isp_pipeline_disable(struct isp_pipeline *pipe) { struct isp_device *isp = pipe->output->isp; struct media_entity *entity; struct media_pad *pad; struct v4l2_subdev *subdev; int failure = 0; int ret; /* * We need to stop all the modules after CCDC first or they'll * never stop since they may not get a full frame from CCDC. */ entity = &pipe->output->video.entity; while (1) { pad = &entity->pads[0]; if (!(pad->flags & MEDIA_PAD_FL_SINK)) break; pad = media_pad_remote_pad_first(pad); if (!pad || !is_media_entity_v4l2_subdev(pad->entity)) break; entity = pad->entity; subdev = media_entity_to_v4l2_subdev(entity); if (subdev == &isp->isp_ccdc.subdev) { v4l2_subdev_call(&isp->isp_aewb.subdev, video, s_stream, 0); v4l2_subdev_call(&isp->isp_af.subdev, video, s_stream, 0); v4l2_subdev_call(&isp->isp_hist.subdev, video, s_stream, 0); } ret = v4l2_subdev_call(subdev, video, s_stream, 0); /* Stop at the first external sub-device. */ if (subdev->dev != isp->dev) break; if (subdev == &isp->isp_res.subdev) ret |= isp_pipeline_wait(isp, isp_pipeline_wait_resizer); else if (subdev == &isp->isp_prev.subdev) ret |= isp_pipeline_wait(isp, isp_pipeline_wait_preview); else if (subdev == &isp->isp_ccdc.subdev) ret |= isp_pipeline_wait(isp, isp_pipeline_wait_ccdc); /* Handle stop failures. An entity that fails to stop can * usually just be restarted. Flag the stop failure nonetheless * to trigger an ISP reset the next time the device is released, * just in case. * * The preview engine is a special case. A failure to stop can * mean a hardware crash. When that happens the preview engine * won't respond to read/write operations on the L4 bus anymore, * resulting in a bus fault and a kernel oops next time it gets * accessed. Mark it as crashed to prevent pipelines including * it from being started. */ if (ret) { dev_info(isp->dev, "Unable to stop %s\n", subdev->name); isp->stop_failure = true; if (subdev == &isp->isp_prev.subdev) media_entity_enum_set(&isp->crashed, &subdev->entity); failure = -ETIMEDOUT; } } return failure; } /* * omap3isp_pipeline_set_stream - Enable/disable streaming on a pipeline * @pipe: ISP pipeline * @state: Stream state (stopped, single shot or continuous) * * Set the pipeline to the given stream state. Pipelines can be started in * single-shot or continuous mode. * * Return 0 if successful, or the return value of the failed video::s_stream * operation otherwise. The pipeline state is not updated when the operation * fails, except when stopping the pipeline. */ int omap3isp_pipeline_set_stream(struct isp_pipeline *pipe, enum isp_pipeline_stream_state state) { int ret; if (state == ISP_PIPELINE_STREAM_STOPPED) ret = isp_pipeline_disable(pipe); else ret = isp_pipeline_enable(pipe, state); if (ret == 0 || state == ISP_PIPELINE_STREAM_STOPPED) pipe->stream_state = state; return ret; } /* * omap3isp_pipeline_cancel_stream - Cancel stream on a pipeline * @pipe: ISP pipeline * * Cancelling a stream mark all buffers on all video nodes in the pipeline as * erroneous and makes sure no new buffer can be queued. This function is called * when a fatal error that prevents any further operation on the pipeline * occurs. */ void omap3isp_pipeline_cancel_stream(struct isp_pipeline *pipe) { if (pipe->input) omap3isp_video_cancel_stream(pipe->input); if (pipe->output) omap3isp_video_cancel_stream(pipe->output); } /* * isp_pipeline_resume - Resume streaming on a pipeline * @pipe: ISP pipeline * * Resume video output and input and re-enable pipeline. */ static void isp_pipeline_resume(struct isp_pipeline *pipe) { int singleshot = pipe->stream_state == ISP_PIPELINE_STREAM_SINGLESHOT; omap3isp_video_resume(pipe->output, !singleshot); if (singleshot) omap3isp_video_resume(pipe->input, 0); isp_pipeline_enable(pipe, pipe->stream_state); } /* * isp_pipeline_suspend - Suspend streaming on a pipeline * @pipe: ISP pipeline * * Suspend pipeline. */ static void isp_pipeline_suspend(struct isp_pipeline *pipe) { isp_pipeline_disable(pipe); } /* * isp_pipeline_is_last - Verify if entity has an enabled link to the output * video node * @me: ISP module's media entity * * Returns 1 if the entity has an enabled link to the output video node or 0 * otherwise. It's true only while pipeline can have no more than one output * node. */ static int isp_pipeline_is_last(struct media_entity *me) { struct isp_pipeline *pipe; struct media_pad *pad; pipe = to_isp_pipeline(me); if (!pipe || pipe->stream_state == ISP_PIPELINE_STREAM_STOPPED) return 0; pad = media_pad_remote_pad_first(&pipe->output->pad); return pad->entity == me; } /* * isp_suspend_module_pipeline - Suspend pipeline to which belongs the module * @me: ISP module's media entity * * Suspend the whole pipeline if module's entity has an enabled link to the * output video node. It works only while pipeline can have no more than one * output node. */ static void isp_suspend_module_pipeline(struct media_entity *me) { if (isp_pipeline_is_last(me)) isp_pipeline_suspend(to_isp_pipeline(me)); } /* * isp_resume_module_pipeline - Resume pipeline to which belongs the module * @me: ISP module's media entity * * Resume the whole pipeline if module's entity has an enabled link to the * output video node. It works only while pipeline can have no more than one * output node. */ static void isp_resume_module_pipeline(struct media_entity *me) { if (isp_pipeline_is_last(me)) isp_pipeline_resume(to_isp_pipeline(me)); } /* * isp_suspend_modules - Suspend ISP submodules. * @isp: OMAP3 ISP device * * Returns 0 if suspend left in idle state all the submodules properly, * or returns 1 if a general Reset is required to suspend the submodules. */ static int __maybe_unused isp_suspend_modules(struct isp_device *isp) { unsigned long timeout; omap3isp_stat_suspend(&isp->isp_aewb); omap3isp_stat_suspend(&isp->isp_af); omap3isp_stat_suspend(&isp->isp_hist); isp_suspend_module_pipeline(&isp->isp_res.subdev.entity); isp_suspend_module_pipeline(&isp->isp_prev.subdev.entity); isp_suspend_module_pipeline(&isp->isp_ccdc.subdev.entity); isp_suspend_module_pipeline(&isp->isp_csi2a.subdev.entity); isp_suspend_module_pipeline(&isp->isp_ccp2.subdev.entity); timeout = jiffies + ISP_STOP_TIMEOUT; while (omap3isp_stat_busy(&isp->isp_af) || omap3isp_stat_busy(&isp->isp_aewb) || omap3isp_stat_busy(&isp->isp_hist) || omap3isp_preview_busy(&isp->isp_prev) || omap3isp_resizer_busy(&isp->isp_res) || omap3isp_ccdc_busy(&isp->isp_ccdc)) { if (time_after(jiffies, timeout)) { dev_info(isp->dev, "can't stop modules.\n"); return 1; } msleep(1); } return 0; } /* * isp_resume_modules - Resume ISP submodules. * @isp: OMAP3 ISP device */ static void __maybe_unused isp_resume_modules(struct isp_device *isp) { omap3isp_stat_resume(&isp->isp_aewb); omap3isp_stat_resume(&isp->isp_af); omap3isp_stat_resume(&isp->isp_hist); isp_resume_module_pipeline(&isp->isp_res.subdev.entity); isp_resume_module_pipeline(&isp->isp_prev.subdev.entity); isp_resume_module_pipeline(&isp->isp_ccdc.subdev.entity); isp_resume_module_pipeline(&isp->isp_csi2a.subdev.entity); isp_resume_module_pipeline(&isp->isp_ccp2.subdev.entity); } /* * isp_reset - Reset ISP with a timeout wait for idle. * @isp: OMAP3 ISP device */ static int isp_reset(struct isp_device *isp) { unsigned long timeout = 0; isp_reg_writel(isp, isp_reg_readl(isp, OMAP3_ISP_IOMEM_MAIN, ISP_SYSCONFIG) | ISP_SYSCONFIG_SOFTRESET, OMAP3_ISP_IOMEM_MAIN, ISP_SYSCONFIG); while (!(isp_reg_readl(isp, OMAP3_ISP_IOMEM_MAIN, ISP_SYSSTATUS) & 0x1)) { if (timeout++ > 10000) { dev_alert(isp->dev, "cannot reset ISP\n"); return -ETIMEDOUT; } udelay(1); } isp->stop_failure = false; media_entity_enum_zero(&isp->crashed); return 0; } /* * isp_save_context - Saves the values of the ISP module registers. * @isp: OMAP3 ISP device * @reg_list: Structure containing pairs of register address and value to * modify on OMAP. */ static void isp_save_context(struct isp_device *isp, struct isp_reg *reg_list) { struct isp_reg *next = reg_list; for (; next->reg != ISP_TOK_TERM; next++) next->val = isp_reg_readl(isp, next->mmio_range, next->reg); } /* * isp_restore_context - Restores the values of the ISP module registers. * @isp: OMAP3 ISP device * @reg_list: Structure containing pairs of register address and value to * modify on OMAP. */ static void isp_restore_context(struct isp_device *isp, struct isp_reg *reg_list) { struct isp_reg *next = reg_list; for (; next->reg != ISP_TOK_TERM; next++) isp_reg_writel(isp, next->val, next->mmio_range, next->reg); } /* * isp_save_ctx - Saves ISP, CCDC, HIST, H3A, PREV, RESZ & MMU context. * @isp: OMAP3 ISP device * * Routine for saving the context of each module in the ISP. * CCDC, HIST, H3A, PREV, RESZ and MMU. */ static void isp_save_ctx(struct isp_device *isp) { isp_save_context(isp, isp_reg_list); omap_iommu_save_ctx(isp->dev); } /* * isp_restore_ctx - Restores ISP, CCDC, HIST, H3A, PREV, RESZ & MMU context. * @isp: OMAP3 ISP device * * Routine for restoring the context of each module in the ISP. * CCDC, HIST, H3A, PREV, RESZ and MMU. */ static void isp_restore_ctx(struct isp_device *isp) { isp_restore_context(isp, isp_reg_list); omap_iommu_restore_ctx(isp->dev); omap3isp_ccdc_restore_context(isp); omap3isp_preview_restore_context(isp); } /* ----------------------------------------------------------------------------- * SBL resources management */ #define OMAP3_ISP_SBL_READ (OMAP3_ISP_SBL_CSI1_READ | \ OMAP3_ISP_SBL_CCDC_LSC_READ | \ OMAP3_ISP_SBL_PREVIEW_READ | \ OMAP3_ISP_SBL_RESIZER_READ) #define OMAP3_ISP_SBL_WRITE (OMAP3_ISP_SBL_CSI1_WRITE | \ OMAP3_ISP_SBL_CSI2A_WRITE | \ OMAP3_ISP_SBL_CSI2C_WRITE | \ OMAP3_ISP_SBL_CCDC_WRITE | \ OMAP3_ISP_SBL_PREVIEW_WRITE) void omap3isp_sbl_enable(struct isp_device *isp, enum isp_sbl_resource res) { u32 sbl = 0; isp->sbl_resources |= res; if (isp->sbl_resources & OMAP3_ISP_SBL_CSI1_READ) sbl |= ISPCTRL_SBL_SHARED_RPORTA; if (isp->sbl_resources & OMAP3_ISP_SBL_CCDC_LSC_READ) sbl |= ISPCTRL_SBL_SHARED_RPORTB; if (isp->sbl_resources & OMAP3_ISP_SBL_CSI2C_WRITE) sbl |= ISPCTRL_SBL_SHARED_WPORTC; if (isp->sbl_resources & OMAP3_ISP_SBL_RESIZER_WRITE) sbl |= ISPCTRL_SBL_WR0_RAM_EN; if (isp->sbl_resources & OMAP3_ISP_SBL_WRITE) sbl |= ISPCTRL_SBL_WR1_RAM_EN; if (isp->sbl_resources & OMAP3_ISP_SBL_READ) sbl |= ISPCTRL_SBL_RD_RAM_EN; isp_reg_set(isp, OMAP3_ISP_IOMEM_MAIN, ISP_CTRL, sbl); } void omap3isp_sbl_disable(struct isp_device *isp, enum isp_sbl_resource res) { u32 sbl = 0; isp->sbl_resources &= ~res; if (!(isp->sbl_resources & OMAP3_ISP_SBL_CSI1_READ)) sbl |= ISPCTRL_SBL_SHARED_RPORTA; if (!(isp->sbl_resources & OMAP3_ISP_SBL_CCDC_LSC_READ)) sbl |= ISPCTRL_SBL_SHARED_RPORTB; if (!(isp->sbl_resources & OMAP3_ISP_SBL_CSI2C_WRITE)) sbl |= ISPCTRL_SBL_SHARED_WPORTC; if (!(isp->sbl_resources & OMAP3_ISP_SBL_RESIZER_WRITE)) sbl |= ISPCTRL_SBL_WR0_RAM_EN; if (!(isp->sbl_resources & OMAP3_ISP_SBL_WRITE)) sbl |= ISPCTRL_SBL_WR1_RAM_EN; if (!(isp->sbl_resources & OMAP3_ISP_SBL_READ)) sbl |= ISPCTRL_SBL_RD_RAM_EN; isp_reg_clr(isp, OMAP3_ISP_IOMEM_MAIN, ISP_CTRL, sbl); } /* * isp_module_sync_idle - Helper to sync module with its idle state * @me: ISP submodule's media entity * @wait: ISP submodule's wait queue for streamoff/interrupt synchronization * @stopping: flag which tells module wants to stop * * This function checks if ISP submodule needs to wait for next interrupt. If * yes, makes the caller to sleep while waiting for such event. */ int omap3isp_module_sync_idle(struct media_entity *me, wait_queue_head_t *wait, atomic_t *stopping) { struct isp_pipeline *pipe = to_isp_pipeline(me); if (pipe->stream_state == ISP_PIPELINE_STREAM_STOPPED || (pipe->stream_state == ISP_PIPELINE_STREAM_SINGLESHOT && !isp_pipeline_ready(pipe))) return 0; /* * atomic_set() doesn't include memory barrier on ARM platform for SMP * scenario. We'll call it here to avoid race conditions. */ atomic_set(stopping, 1); smp_mb(); /* * If module is the last one, it's writing to memory. In this case, * it's necessary to check if the module is already paused due to * DMA queue underrun or if it has to wait for next interrupt to be * idle. * If it isn't the last one, the function won't sleep but *stopping * will still be set to warn next submodule caller's interrupt the * module wants to be idle. */ if (isp_pipeline_is_last(me)) { struct isp_video *video = pipe->output; unsigned long flags; spin_lock_irqsave(&video->irqlock, flags); if (video->dmaqueue_flags & ISP_VIDEO_DMAQUEUE_UNDERRUN) { spin_unlock_irqrestore(&video->irqlock, flags); atomic_set(stopping, 0); smp_mb(); return 0; } spin_unlock_irqrestore(&video->irqlock, flags); if (!wait_event_timeout(*wait, !atomic_read(stopping), msecs_to_jiffies(1000))) { atomic_set(stopping, 0); smp_mb(); return -ETIMEDOUT; } } return 0; } /* * omap3isp_module_sync_is_stopping - Helper to verify if module was stopping * @wait: ISP submodule's wait queue for streamoff/interrupt synchronization * @stopping: flag which tells module wants to stop * * This function checks if ISP submodule was stopping. In case of yes, it * notices the caller by setting stopping to 0 and waking up the wait queue. * Returns 1 if it was stopping or 0 otherwise. */ int omap3isp_module_sync_is_stopping(wait_queue_head_t *wait, atomic_t *stopping) { if (atomic_cmpxchg(stopping, 1, 0)) { wake_up(wait); return 1; } return 0; } /* -------------------------------------------------------------------------- * Clock management */ #define ISPCTRL_CLKS_MASK (ISPCTRL_H3A_CLK_EN | \ ISPCTRL_HIST_CLK_EN | \ ISPCTRL_RSZ_CLK_EN | \ (ISPCTRL_CCDC_CLK_EN | ISPCTRL_CCDC_RAM_EN) | \ (ISPCTRL_PREV_CLK_EN | ISPCTRL_PREV_RAM_EN)) static void __isp_subclk_update(struct isp_device *isp) { u32 clk = 0; /* AEWB and AF share the same clock. */ if (isp->subclk_resources & (OMAP3_ISP_SUBCLK_AEWB | OMAP3_ISP_SUBCLK_AF)) clk |= ISPCTRL_H3A_CLK_EN; if (isp->subclk_resources & OMAP3_ISP_SUBCLK_HIST) clk |= ISPCTRL_HIST_CLK_EN; if (isp->subclk_resources & OMAP3_ISP_SUBCLK_RESIZER) clk |= ISPCTRL_RSZ_CLK_EN; /* NOTE: For CCDC & Preview submodules, we need to affect internal * RAM as well. */ if (isp->subclk_resources & OMAP3_ISP_SUBCLK_CCDC) clk |= ISPCTRL_CCDC_CLK_EN | ISPCTRL_CCDC_RAM_EN; if (isp->subclk_resources & OMAP3_ISP_SUBCLK_PREVIEW) clk |= ISPCTRL_PREV_CLK_EN | ISPCTRL_PREV_RAM_EN; isp_reg_clr_set(isp, OMAP3_ISP_IOMEM_MAIN, ISP_CTRL, ISPCTRL_CLKS_MASK, clk); } void omap3isp_subclk_enable(struct isp_device *isp, enum isp_subclk_resource res) { isp->subclk_resources |= res; __isp_subclk_update(isp); } void omap3isp_subclk_disable(struct isp_device *isp, enum isp_subclk_resource res) { isp->subclk_resources &= ~res; __isp_subclk_update(isp); } /* * isp_enable_clocks - Enable ISP clocks * @isp: OMAP3 ISP device * * Return 0 if successful, or clk_prepare_enable return value if any of them * fails. */ static int isp_enable_clocks(struct isp_device *isp) { int r; unsigned long rate; r = clk_prepare_enable(isp->clock[ISP_CLK_CAM_ICK]); if (r) { dev_err(isp->dev, "failed to enable cam_ick clock\n"); goto out_clk_enable_ick; } r = clk_set_rate(isp->clock[ISP_CLK_CAM_MCLK], CM_CAM_MCLK_HZ); if (r) { dev_err(isp->dev, "clk_set_rate for cam_mclk failed\n"); goto out_clk_enable_mclk; } r = clk_prepare_enable(isp->clock[ISP_CLK_CAM_MCLK]); if (r) { dev_err(isp->dev, "failed to enable cam_mclk clock\n"); goto out_clk_enable_mclk; } rate = clk_get_rate(isp->clock[ISP_CLK_CAM_MCLK]); if (rate != CM_CAM_MCLK_HZ) dev_warn(isp->dev, "unexpected cam_mclk rate:\n" " expected : %d\n" " actual : %ld\n", CM_CAM_MCLK_HZ, rate); r = clk_prepare_enable(isp->clock[ISP_CLK_CSI2_FCK]); if (r) { dev_err(isp->dev, "failed to enable csi2_fck clock\n"); goto out_clk_enable_csi2_fclk; } return 0; out_clk_enable_csi2_fclk: clk_disable_unprepare(isp->clock[ISP_CLK_CAM_MCLK]); out_clk_enable_mclk: clk_disable_unprepare(isp->clock[ISP_CLK_CAM_ICK]); out_clk_enable_ick: return r; } /* * isp_disable_clocks - Disable ISP clocks * @isp: OMAP3 ISP device */ static void isp_disable_clocks(struct isp_device *isp) { clk_disable_unprepare(isp->clock[ISP_CLK_CAM_ICK]); clk_disable_unprepare(isp->clock[ISP_CLK_CAM_MCLK]); clk_disable_unprepare(isp->clock[ISP_CLK_CSI2_FCK]); } static const char *isp_clocks[] = { "cam_ick", "cam_mclk", "csi2_96m_fck", "l3_ick", }; static int isp_get_clocks(struct isp_device *isp) { struct clk *clk; unsigned int i; for (i = 0; i < ARRAY_SIZE(isp_clocks); ++i) { clk = devm_clk_get(isp->dev, isp_clocks[i]); if (IS_ERR(clk)) { dev_err(isp->dev, "clk_get %s failed\n", isp_clocks[i]); return PTR_ERR(clk); } isp->clock[i] = clk; } return 0; } /* * omap3isp_get - Acquire the ISP resource. * * Initializes the clocks for the first acquire. * * Increment the reference count on the ISP. If the first reference is taken, * enable clocks and power-up all submodules. * * Return a pointer to the ISP device structure, or NULL if an error occurred. */ static struct isp_device *__omap3isp_get(struct isp_device *isp, bool irq) { struct isp_device *__isp = isp; if (isp == NULL) return NULL; mutex_lock(&isp->isp_mutex); if (isp->ref_count > 0) goto out; if (isp_enable_clocks(isp) < 0) { __isp = NULL; goto out; } /* We don't want to restore context before saving it! */ if (isp->has_context) isp_restore_ctx(isp); if (irq) isp_enable_interrupts(isp); out: if (__isp != NULL) isp->ref_count++; mutex_unlock(&isp->isp_mutex); return __isp; } struct isp_device *omap3isp_get(struct isp_device *isp) { return __omap3isp_get(isp, true); } /* * omap3isp_put - Release the ISP * * Decrement the reference count on the ISP. If the last reference is released, * power-down all submodules, disable clocks and free temporary buffers. */ static void __omap3isp_put(struct isp_device *isp, bool save_ctx) { if (isp == NULL) return; mutex_lock(&isp->isp_mutex); BUG_ON(isp->ref_count == 0); if (--isp->ref_count == 0) { isp_disable_interrupts(isp); if (save_ctx) { isp_save_ctx(isp); isp->has_context = 1; } /* Reset the ISP if an entity has failed to stop. This is the * only way to recover from such conditions. */ if (!media_entity_enum_empty(&isp->crashed) || isp->stop_failure) isp_reset(isp); isp_disable_clocks(isp); } mutex_unlock(&isp->isp_mutex); } void omap3isp_put(struct isp_device *isp) { __omap3isp_put(isp, true); } /* -------------------------------------------------------------------------- * Platform device driver */ /* * omap3isp_print_status - Prints the values of the ISP Control Module registers * @isp: OMAP3 ISP device */ #define ISP_PRINT_REGISTER(isp, name)\ dev_dbg(isp->dev, "###ISP " #name "=0x%08x\n", \ isp_reg_readl(isp, OMAP3_ISP_IOMEM_MAIN, ISP_##name)) #define SBL_PRINT_REGISTER(isp, name)\ dev_dbg(isp->dev, "###SBL " #name "=0x%08x\n", \ isp_reg_readl(isp, OMAP3_ISP_IOMEM_SBL, ISPSBL_##name)) void omap3isp_print_status(struct isp_device *isp) { dev_dbg(isp->dev, "-------------ISP Register dump--------------\n"); ISP_PRINT_REGISTER(isp, SYSCONFIG); ISP_PRINT_REGISTER(isp, SYSSTATUS); ISP_PRINT_REGISTER(isp, IRQ0ENABLE); ISP_PRINT_REGISTER(isp, IRQ0STATUS); ISP_PRINT_REGISTER(isp, TCTRL_GRESET_LENGTH); ISP_PRINT_REGISTER(isp, TCTRL_PSTRB_REPLAY); ISP_PRINT_REGISTER(isp, CTRL); ISP_PRINT_REGISTER(isp, TCTRL_CTRL); ISP_PRINT_REGISTER(isp, TCTRL_FRAME); ISP_PRINT_REGISTER(isp, TCTRL_PSTRB_DELAY); ISP_PRINT_REGISTER(isp, TCTRL_STRB_DELAY); ISP_PRINT_REGISTER(isp, TCTRL_SHUT_DELAY); ISP_PRINT_REGISTER(isp, TCTRL_PSTRB_LENGTH); ISP_PRINT_REGISTER(isp, TCTRL_STRB_LENGTH); ISP_PRINT_REGISTER(isp, TCTRL_SHUT_LENGTH); SBL_PRINT_REGISTER(isp, PCR); SBL_PRINT_REGISTER(isp, SDR_REQ_EXP); dev_dbg(isp->dev, "--------------------------------------------\n"); } #ifdef CONFIG_PM /* * Power management support. * * As the ISP can't properly handle an input video stream interruption on a non * frame boundary, the ISP pipelines need to be stopped before sensors get * suspended. However, as suspending the sensors can require a running clock, * which can be provided by the ISP, the ISP can't be completely suspended * before the sensor. * * To solve this problem power management support is split into prepare/complete * and suspend/resume operations. The pipelines are stopped in prepare() and the * ISP clocks get disabled in suspend(). Similarly, the clocks are re-enabled in * resume(), and the pipelines are restarted in complete(). * * TODO: PM dependencies between the ISP and sensors are not modelled explicitly * yet. */ static int isp_pm_prepare(struct device *dev) { struct isp_device *isp = dev_get_drvdata(dev); int reset; WARN_ON(mutex_is_locked(&isp->isp_mutex)); if (isp->ref_count == 0) return 0; reset = isp_suspend_modules(isp); isp_disable_interrupts(isp); isp_save_ctx(isp); if (reset) isp_reset(isp); return 0; } static int isp_pm_suspend(struct device *dev) { struct isp_device *isp = dev_get_drvdata(dev); WARN_ON(mutex_is_locked(&isp->isp_mutex)); if (isp->ref_count) isp_disable_clocks(isp); return 0; } static int isp_pm_resume(struct device *dev) { struct isp_device *isp = dev_get_drvdata(dev); if (isp->ref_count == 0) return 0; return isp_enable_clocks(isp); } static void isp_pm_complete(struct device *dev) { struct isp_device *isp = dev_get_drvdata(dev); if (isp->ref_count == 0) return; isp_restore_ctx(isp); isp_enable_interrupts(isp); isp_resume_modules(isp); } #else #define isp_pm_prepare NULL #define isp_pm_suspend NULL #define isp_pm_resume NULL #define isp_pm_complete NULL #endif /* CONFIG_PM */ static void isp_unregister_entities(struct isp_device *isp) { media_device_unregister(&isp->media_dev); omap3isp_csi2_unregister_entities(&isp->isp_csi2a); omap3isp_ccp2_unregister_entities(&isp->isp_ccp2); omap3isp_ccdc_unregister_entities(&isp->isp_ccdc); omap3isp_preview_unregister_entities(&isp->isp_prev); omap3isp_resizer_unregister_entities(&isp->isp_res); omap3isp_stat_unregister_entities(&isp->isp_aewb); omap3isp_stat_unregister_entities(&isp->isp_af); omap3isp_stat_unregister_entities(&isp->isp_hist); v4l2_device_unregister(&isp->v4l2_dev); media_device_cleanup(&isp->media_dev); } static int isp_link_entity( struct isp_device *isp, struct media_entity *entity, enum isp_interface_type interface) { struct media_entity *input; unsigned int flags; unsigned int pad; unsigned int i; /* Connect the sensor to the correct interface module. * Parallel sensors are connected directly to the CCDC, while * serial sensors are connected to the CSI2a, CCP2b or CSI2c * receiver through CSIPHY1 or CSIPHY2. */ switch (interface) { case ISP_INTERFACE_PARALLEL: input = &isp->isp_ccdc.subdev.entity; pad = CCDC_PAD_SINK; flags = 0; break; case ISP_INTERFACE_CSI2A_PHY2: input = &isp->isp_csi2a.subdev.entity; pad = CSI2_PAD_SINK; flags = MEDIA_LNK_FL_IMMUTABLE | MEDIA_LNK_FL_ENABLED; break; case ISP_INTERFACE_CCP2B_PHY1: case ISP_INTERFACE_CCP2B_PHY2: input = &isp->isp_ccp2.subdev.entity; pad = CCP2_PAD_SINK; flags = 0; break; case ISP_INTERFACE_CSI2C_PHY1: input = &isp->isp_csi2c.subdev.entity; pad = CSI2_PAD_SINK; flags = MEDIA_LNK_FL_IMMUTABLE | MEDIA_LNK_FL_ENABLED; break; default: dev_err(isp->dev, "%s: invalid interface type %u\n", __func__, interface); return -EINVAL; } /* * Not all interfaces are available on all revisions of the * ISP. The sub-devices of those interfaces aren't initialised * in such a case. Check this by ensuring the num_pads is * non-zero. */ if (!input->num_pads) { dev_err(isp->dev, "%s: invalid input %u\n", entity->name, interface); return -EINVAL; } for (i = 0; i < entity->num_pads; i++) { if (entity->pads[i].flags & MEDIA_PAD_FL_SOURCE) break; } if (i == entity->num_pads) { dev_err(isp->dev, "%s: no source pad in external entity %s\n", __func__, entity->name); return -EINVAL; } return media_create_pad_link(entity, i, input, pad, flags); } static int isp_register_entities(struct isp_device *isp) { int ret; isp->media_dev.dev = isp->dev; strscpy(isp->media_dev.model, "TI OMAP3 ISP", sizeof(isp->media_dev.model)); isp->media_dev.hw_revision = isp->revision; isp->media_dev.ops = &isp_media_ops; media_device_init(&isp->media_dev); isp->v4l2_dev.mdev = &isp->media_dev; ret = v4l2_device_register(isp->dev, &isp->v4l2_dev); if (ret < 0) { dev_err(isp->dev, "%s: V4L2 device registration failed (%d)\n", __func__, ret); goto done; } /* Register internal entities */ ret = omap3isp_ccp2_register_entities(&isp->isp_ccp2, &isp->v4l2_dev); if (ret < 0) goto done; ret = omap3isp_csi2_register_entities(&isp->isp_csi2a, &isp->v4l2_dev); if (ret < 0) goto done; ret = omap3isp_ccdc_register_entities(&isp->isp_ccdc, &isp->v4l2_dev); if (ret < 0) goto done; ret = omap3isp_preview_register_entities(&isp->isp_prev, &isp->v4l2_dev); if (ret < 0) goto done; ret = omap3isp_resizer_register_entities(&isp->isp_res, &isp->v4l2_dev); if (ret < 0) goto done; ret = omap3isp_stat_register_entities(&isp->isp_aewb, &isp->v4l2_dev); if (ret < 0) goto done; ret = omap3isp_stat_register_entities(&isp->isp_af, &isp->v4l2_dev); if (ret < 0) goto done; ret = omap3isp_stat_register_entities(&isp->isp_hist, &isp->v4l2_dev); if (ret < 0) goto done; done: if (ret < 0) isp_unregister_entities(isp); return ret; } /* * isp_create_links() - Create links for internal and external ISP entities * @isp : Pointer to ISP device * * This function creates all links between ISP internal and external entities. * * Return: A negative error code on failure or zero on success. Possible error * codes are those returned by media_create_pad_link(). */ static int isp_create_links(struct isp_device *isp) { int ret; /* Create links between entities and video nodes. */ ret = media_create_pad_link( &isp->isp_csi2a.subdev.entity, CSI2_PAD_SOURCE, &isp->isp_csi2a.video_out.video.entity, 0, 0); if (ret < 0) return ret; ret = media_create_pad_link( &isp->isp_ccp2.video_in.video.entity, 0, &isp->isp_ccp2.subdev.entity, CCP2_PAD_SINK, 0); if (ret < 0) return ret; ret = media_create_pad_link( &isp->isp_ccdc.subdev.entity, CCDC_PAD_SOURCE_OF, &isp->isp_ccdc.video_out.video.entity, 0, 0); if (ret < 0) return ret; ret = media_create_pad_link( &isp->isp_prev.video_in.video.entity, 0, &isp->isp_prev.subdev.entity, PREV_PAD_SINK, 0); if (ret < 0) return ret; ret = media_create_pad_link( &isp->isp_prev.subdev.entity, PREV_PAD_SOURCE, &isp->isp_prev.video_out.video.entity, 0, 0); if (ret < 0) return ret; ret = media_create_pad_link( &isp->isp_res.video_in.video.entity, 0, &isp->isp_res.subdev.entity, RESZ_PAD_SINK, 0); if (ret < 0) return ret; ret = media_create_pad_link( &isp->isp_res.subdev.entity, RESZ_PAD_SOURCE, &isp->isp_res.video_out.video.entity, 0, 0); if (ret < 0) return ret; /* Create links between entities. */ ret = media_create_pad_link( &isp->isp_csi2a.subdev.entity, CSI2_PAD_SOURCE, &isp->isp_ccdc.subdev.entity, CCDC_PAD_SINK, 0); if (ret < 0) return ret; ret = media_create_pad_link( &isp->isp_ccp2.subdev.entity, CCP2_PAD_SOURCE, &isp->isp_ccdc.subdev.entity, CCDC_PAD_SINK, 0); if (ret < 0) return ret; ret = media_create_pad_link( &isp->isp_ccdc.subdev.entity, CCDC_PAD_SOURCE_VP, &isp->isp_prev.subdev.entity, PREV_PAD_SINK, 0); if (ret < 0) return ret; ret = media_create_pad_link( &isp->isp_ccdc.subdev.entity, CCDC_PAD_SOURCE_OF, &isp->isp_res.subdev.entity, RESZ_PAD_SINK, 0); if (ret < 0) return ret; ret = media_create_pad_link( &isp->isp_prev.subdev.entity, PREV_PAD_SOURCE, &isp->isp_res.subdev.entity, RESZ_PAD_SINK, 0); if (ret < 0) return ret; ret = media_create_pad_link( &isp->isp_ccdc.subdev.entity, CCDC_PAD_SOURCE_VP, &isp->isp_aewb.subdev.entity, 0, MEDIA_LNK_FL_ENABLED | MEDIA_LNK_FL_IMMUTABLE); if (ret < 0) return ret; ret = media_create_pad_link( &isp->isp_ccdc.subdev.entity, CCDC_PAD_SOURCE_VP, &isp->isp_af.subdev.entity, 0, MEDIA_LNK_FL_ENABLED | MEDIA_LNK_FL_IMMUTABLE); if (ret < 0) return ret; ret = media_create_pad_link( &isp->isp_ccdc.subdev.entity, CCDC_PAD_SOURCE_VP, &isp->isp_hist.subdev.entity, 0, MEDIA_LNK_FL_ENABLED | MEDIA_LNK_FL_IMMUTABLE); if (ret < 0) return ret; return 0; } static void isp_cleanup_modules(struct isp_device *isp) { omap3isp_h3a_aewb_cleanup(isp); omap3isp_h3a_af_cleanup(isp); omap3isp_hist_cleanup(isp); omap3isp_resizer_cleanup(isp); omap3isp_preview_cleanup(isp); omap3isp_ccdc_cleanup(isp); omap3isp_ccp2_cleanup(isp); omap3isp_csi2_cleanup(isp); omap3isp_csiphy_cleanup(isp); } static int isp_initialize_modules(struct isp_device *isp) { int ret; ret = omap3isp_csiphy_init(isp); if (ret < 0) { dev_err(isp->dev, "CSI PHY initialization failed\n"); return ret; } ret = omap3isp_csi2_init(isp); if (ret < 0) { dev_err(isp->dev, "CSI2 initialization failed\n"); goto error_csi2; } ret = omap3isp_ccp2_init(isp); if (ret < 0) { dev_err_probe(isp->dev, ret, "CCP2 initialization failed\n"); goto error_ccp2; } ret = omap3isp_ccdc_init(isp); if (ret < 0) { dev_err(isp->dev, "CCDC initialization failed\n"); goto error_ccdc; } ret = omap3isp_preview_init(isp); if (ret < 0) { dev_err(isp->dev, "Preview initialization failed\n"); goto error_preview; } ret = omap3isp_resizer_init(isp); if (ret < 0) { dev_err(isp->dev, "Resizer initialization failed\n"); goto error_resizer; } ret = omap3isp_hist_init(isp); if (ret < 0) { dev_err(isp->dev, "Histogram initialization failed\n"); goto error_hist; } ret = omap3isp_h3a_aewb_init(isp); if (ret < 0) { dev_err(isp->dev, "H3A AEWB initialization failed\n"); goto error_h3a_aewb; } ret = omap3isp_h3a_af_init(isp); if (ret < 0) { dev_err(isp->dev, "H3A AF initialization failed\n"); goto error_h3a_af; } return 0; error_h3a_af: omap3isp_h3a_aewb_cleanup(isp); error_h3a_aewb: omap3isp_hist_cleanup(isp); error_hist: omap3isp_resizer_cleanup(isp); error_resizer: omap3isp_preview_cleanup(isp); error_preview: omap3isp_ccdc_cleanup(isp); error_ccdc: omap3isp_ccp2_cleanup(isp); error_ccp2: omap3isp_csi2_cleanup(isp); error_csi2: omap3isp_csiphy_cleanup(isp); return ret; } static void isp_detach_iommu(struct isp_device *isp) { #ifdef CONFIG_ARM_DMA_USE_IOMMU arm_iommu_detach_device(isp->dev); arm_iommu_release_mapping(isp->mapping); isp->mapping = NULL; #endif } static int isp_attach_iommu(struct isp_device *isp) { #ifdef CONFIG_ARM_DMA_USE_IOMMU struct dma_iommu_mapping *mapping; int ret; /* * Create the ARM mapping, used by the ARM DMA mapping core to allocate * VAs. This will allocate a corresponding IOMMU domain. */ mapping = arm_iommu_create_mapping(&platform_bus_type, SZ_1G, SZ_2G); if (IS_ERR(mapping)) { dev_err(isp->dev, "failed to create ARM IOMMU mapping\n"); return PTR_ERR(mapping); } isp->mapping = mapping; /* Attach the ARM VA mapping to the device. */ ret = arm_iommu_attach_device(isp->dev, mapping); if (ret < 0) { dev_err(isp->dev, "failed to attach device to VA mapping\n"); goto error; } return 0; error: arm_iommu_release_mapping(isp->mapping); isp->mapping = NULL; return ret; #else return -ENODEV; #endif } /* * isp_remove - Remove ISP platform device * @pdev: Pointer to ISP platform device * * Always returns 0. */ static void isp_remove(struct platform_device *pdev) { struct isp_device *isp = platform_get_drvdata(pdev); v4l2_async_nf_unregister(&isp->notifier); v4l2_async_nf_cleanup(&isp->notifier); isp_unregister_entities(isp); isp_cleanup_modules(isp); isp_xclk_cleanup(isp); __omap3isp_get(isp, false); isp_detach_iommu(isp); __omap3isp_put(isp, false); media_entity_enum_cleanup(&isp->crashed); kfree(isp); } enum isp_of_phy { ISP_OF_PHY_PARALLEL = 0, ISP_OF_PHY_CSIPHY1, ISP_OF_PHY_CSIPHY2, }; static int isp_subdev_notifier_bound(struct v4l2_async_notifier *async, struct v4l2_subdev *sd, struct v4l2_async_connection *asc) { struct isp_device *isp = container_of(async, struct isp_device, notifier); struct isp_bus_cfg *bus_cfg = &container_of(asc, struct isp_async_subdev, asd)->bus; int ret; mutex_lock(&isp->media_dev.graph_mutex); ret = isp_link_entity(isp, &sd->entity, bus_cfg->interface); mutex_unlock(&isp->media_dev.graph_mutex); return ret; } static int isp_subdev_notifier_complete(struct v4l2_async_notifier *async) { struct isp_device *isp = container_of(async, struct isp_device, notifier); int ret; mutex_lock(&isp->media_dev.graph_mutex); ret = media_entity_enum_init(&isp->crashed, &isp->media_dev); mutex_unlock(&isp->media_dev.graph_mutex); if (ret) return ret; ret = v4l2_device_register_subdev_nodes(&isp->v4l2_dev); if (ret < 0) return ret; return media_device_register(&isp->media_dev); } static void isp_parse_of_parallel_endpoint(struct device *dev, struct v4l2_fwnode_endpoint *vep, struct isp_bus_cfg *buscfg) { buscfg->interface = ISP_INTERFACE_PARALLEL; buscfg->bus.parallel.data_lane_shift = vep->bus.parallel.data_shift; buscfg->bus.parallel.clk_pol = !!(vep->bus.parallel.flags & V4L2_MBUS_PCLK_SAMPLE_FALLING); buscfg->bus.parallel.hs_pol = !!(vep->bus.parallel.flags & V4L2_MBUS_VSYNC_ACTIVE_LOW); buscfg->bus.parallel.vs_pol = !!(vep->bus.parallel.flags & V4L2_MBUS_HSYNC_ACTIVE_LOW); buscfg->bus.parallel.fld_pol = !!(vep->bus.parallel.flags & V4L2_MBUS_FIELD_EVEN_LOW); buscfg->bus.parallel.data_pol = !!(vep->bus.parallel.flags & V4L2_MBUS_DATA_ACTIVE_LOW); buscfg->bus.parallel.bt656 = vep->bus_type == V4L2_MBUS_BT656; } static void isp_parse_of_csi2_endpoint(struct device *dev, struct v4l2_fwnode_endpoint *vep, struct isp_bus_cfg *buscfg) { unsigned int i; buscfg->bus.csi2.lanecfg.clk.pos = vep->bus.mipi_csi2.clock_lane; buscfg->bus.csi2.lanecfg.clk.pol = vep->bus.mipi_csi2.lane_polarities[0]; dev_dbg(dev, "clock lane polarity %u, pos %u\n", buscfg->bus.csi2.lanecfg.clk.pol, buscfg->bus.csi2.lanecfg.clk.pos); buscfg->bus.csi2.num_data_lanes = vep->bus.mipi_csi2.num_data_lanes; for (i = 0; i < buscfg->bus.csi2.num_data_lanes; i++) { buscfg->bus.csi2.lanecfg.data[i].pos = vep->bus.mipi_csi2.data_lanes[i]; buscfg->bus.csi2.lanecfg.data[i].pol = vep->bus.mipi_csi2.lane_polarities[i + 1]; dev_dbg(dev, "data lane %u polarity %u, pos %u\n", i, buscfg->bus.csi2.lanecfg.data[i].pol, buscfg->bus.csi2.lanecfg.data[i].pos); } /* * FIXME: now we assume the CRC is always there. Implement a way to * obtain this information from the sensor. Frame descriptors, perhaps? */ buscfg->bus.csi2.crc = 1; } static void isp_parse_of_csi1_endpoint(struct device *dev, struct v4l2_fwnode_endpoint *vep, struct isp_bus_cfg *buscfg) { buscfg->bus.ccp2.lanecfg.clk.pos = vep->bus.mipi_csi1.clock_lane; buscfg->bus.ccp2.lanecfg.clk.pol = vep->bus.mipi_csi1.lane_polarity[0]; dev_dbg(dev, "clock lane polarity %u, pos %u\n", buscfg->bus.ccp2.lanecfg.clk.pol, buscfg->bus.ccp2.lanecfg.clk.pos); buscfg->bus.ccp2.lanecfg.data[0].pos = vep->bus.mipi_csi1.data_lane; buscfg->bus.ccp2.lanecfg.data[0].pol = vep->bus.mipi_csi1.lane_polarity[1]; dev_dbg(dev, "data lane polarity %u, pos %u\n", buscfg->bus.ccp2.lanecfg.data[0].pol, buscfg->bus.ccp2.lanecfg.data[0].pos); buscfg->bus.ccp2.strobe_clk_pol = vep->bus.mipi_csi1.clock_inv; buscfg->bus.ccp2.phy_layer = vep->bus.mipi_csi1.strobe; buscfg->bus.ccp2.ccp2_mode = vep->bus_type == V4L2_MBUS_CCP2; buscfg->bus.ccp2.vp_clk_pol = 1; buscfg->bus.ccp2.crc = 1; } static struct { u32 phy; u32 csi2_if; u32 csi1_if; } isp_bus_interfaces[2] = { { ISP_OF_PHY_CSIPHY1, ISP_INTERFACE_CSI2C_PHY1, ISP_INTERFACE_CCP2B_PHY1 }, { ISP_OF_PHY_CSIPHY2, ISP_INTERFACE_CSI2A_PHY2, ISP_INTERFACE_CCP2B_PHY2 }, }; static int isp_parse_of_endpoints(struct isp_device *isp) { struct fwnode_handle *ep; struct isp_async_subdev *isd = NULL; unsigned int i; ep = fwnode_graph_get_endpoint_by_id( dev_fwnode(isp->dev), ISP_OF_PHY_PARALLEL, 0, FWNODE_GRAPH_ENDPOINT_NEXT); if (ep) { struct v4l2_fwnode_endpoint vep = { .bus_type = V4L2_MBUS_PARALLEL }; int ret; dev_dbg(isp->dev, "parsing parallel interface\n"); ret = v4l2_fwnode_endpoint_parse(ep, &vep); if (!ret) { isd = v4l2_async_nf_add_fwnode_remote(&isp->notifier, ep, struct isp_async_subdev); if (!IS_ERR(isd)) isp_parse_of_parallel_endpoint(isp->dev, &vep, &isd->bus); } fwnode_handle_put(ep); } for (i = 0; i < ARRAY_SIZE(isp_bus_interfaces); i++) { struct v4l2_fwnode_endpoint vep = { .bus_type = V4L2_MBUS_CSI2_DPHY }; int ret; ep = fwnode_graph_get_endpoint_by_id( dev_fwnode(isp->dev), isp_bus_interfaces[i].phy, 0, FWNODE_GRAPH_ENDPOINT_NEXT); if (!ep) continue; dev_dbg(isp->dev, "parsing serial interface %u, node %pOF\n", i, to_of_node(ep)); ret = v4l2_fwnode_endpoint_parse(ep, &vep); if (ret == -ENXIO) { vep = (struct v4l2_fwnode_endpoint) { .bus_type = V4L2_MBUS_CSI1 }; ret = v4l2_fwnode_endpoint_parse(ep, &vep); if (ret == -ENXIO) { vep = (struct v4l2_fwnode_endpoint) { .bus_type = V4L2_MBUS_CCP2 }; ret = v4l2_fwnode_endpoint_parse(ep, &vep); } } if (!ret) { isd = v4l2_async_nf_add_fwnode_remote(&isp->notifier, ep, struct isp_async_subdev); if (!IS_ERR(isd)) { switch (vep.bus_type) { case V4L2_MBUS_CSI2_DPHY: isd->bus.interface = isp_bus_interfaces[i].csi2_if; isp_parse_of_csi2_endpoint(isp->dev, &vep, &isd->bus); break; case V4L2_MBUS_CSI1: case V4L2_MBUS_CCP2: isd->bus.interface = isp_bus_interfaces[i].csi1_if; isp_parse_of_csi1_endpoint(isp->dev, &vep, &isd->bus); break; default: break; } } } fwnode_handle_put(ep); } return 0; } static const struct v4l2_async_notifier_operations isp_subdev_notifier_ops = { .bound = isp_subdev_notifier_bound, .complete = isp_subdev_notifier_complete, }; /* * isp_probe - Probe ISP platform device * @pdev: Pointer to ISP platform device * * Returns 0 if successful, * -ENOMEM if no memory available, * -ENODEV if no platform device resources found * or no space for remapping registers, * -EINVAL if couldn't install ISR, * or clk_get return error value. */ static int isp_probe(struct platform_device *pdev) { struct isp_device *isp; struct resource *mem; int ret; int i, m; isp = kzalloc(sizeof(*isp), GFP_KERNEL); if (!isp) { dev_err(&pdev->dev, "could not allocate memory\n"); return -ENOMEM; } ret = fwnode_property_read_u32(of_fwnode_handle(pdev->dev.of_node), "ti,phy-type", &isp->phy_type); if (ret) goto error_release_isp; isp->syscon = syscon_regmap_lookup_by_phandle(pdev->dev.of_node, "syscon"); if (IS_ERR(isp->syscon)) { ret = PTR_ERR(isp->syscon); goto error_release_isp; } ret = of_property_read_u32_index(pdev->dev.of_node, "syscon", 1, &isp->syscon_offset); if (ret) goto error_release_isp; isp->autoidle = autoidle; mutex_init(&isp->isp_mutex); spin_lock_init(&isp->stat_lock); isp->dev = &pdev->dev; isp->ref_count = 0; ret = dma_coerce_mask_and_coherent(isp->dev, DMA_BIT_MASK(32)); if (ret) goto error; platform_set_drvdata(pdev, isp); /* Regulators */ isp->isp_csiphy1.vdd = devm_regulator_get(&pdev->dev, "vdd-csiphy1"); if (IS_ERR(isp->isp_csiphy1.vdd)) { ret = PTR_ERR(isp->isp_csiphy1.vdd); goto error; } isp->isp_csiphy2.vdd = devm_regulator_get(&pdev->dev, "vdd-csiphy2"); if (IS_ERR(isp->isp_csiphy2.vdd)) { ret = PTR_ERR(isp->isp_csiphy2.vdd); goto error; } /* Clocks * * The ISP clock tree is revision-dependent. We thus need to enable ICLK * manually to read the revision before calling __omap3isp_get(). * * Start by mapping the ISP MMIO area, which is in two pieces. * The ISP IOMMU is in between. Map both now, and fill in the * ISP revision specific portions a little later in the * function. */ for (i = 0; i < 2; i++) { unsigned int map_idx = i ? OMAP3_ISP_IOMEM_CSI2A_REGS1 : 0; isp->mmio_base[map_idx] = devm_platform_get_and_ioremap_resource(pdev, i, &mem); if (IS_ERR(isp->mmio_base[map_idx])) { ret = PTR_ERR(isp->mmio_base[map_idx]); goto error; } } ret = isp_get_clocks(isp); if (ret < 0) goto error; ret = clk_enable(isp->clock[ISP_CLK_CAM_ICK]); if (ret < 0) goto error; isp->revision = isp_reg_readl(isp, OMAP3_ISP_IOMEM_MAIN, ISP_REVISION); dev_info(isp->dev, "Revision %d.%d found\n", (isp->revision & 0xf0) >> 4, isp->revision & 0x0f); clk_disable(isp->clock[ISP_CLK_CAM_ICK]); if (__omap3isp_get(isp, false) == NULL) { ret = -ENODEV; goto error; } ret = isp_reset(isp); if (ret < 0) goto error_isp; ret = isp_xclk_init(isp); if (ret < 0) goto error_isp; /* Memory resources */ for (m = 0; m < ARRAY_SIZE(isp_res_maps); m++) if (isp->revision == isp_res_maps[m].isp_rev) break; if (m == ARRAY_SIZE(isp_res_maps)) { dev_err(isp->dev, "No resource map found for ISP rev %d.%d\n", (isp->revision & 0xf0) >> 4, isp->revision & 0xf); ret = -ENODEV; goto error_isp; } for (i = 1; i < OMAP3_ISP_IOMEM_CSI2A_REGS1; i++) isp->mmio_base[i] = isp->mmio_base[0] + isp_res_maps[m].offset[i]; for (i = OMAP3_ISP_IOMEM_CSIPHY2; i < OMAP3_ISP_IOMEM_LAST; i++) isp->mmio_base[i] = isp->mmio_base[OMAP3_ISP_IOMEM_CSI2A_REGS1] + isp_res_maps[m].offset[i]; isp->mmio_hist_base_phys = mem->start + isp_res_maps[m].offset[OMAP3_ISP_IOMEM_HIST]; /* IOMMU */ ret = isp_attach_iommu(isp); if (ret < 0) { dev_err(&pdev->dev, "unable to attach to IOMMU\n"); goto error_isp; } /* Interrupt */ ret = platform_get_irq(pdev, 0); if (ret < 0) goto error_iommu; isp->irq_num = ret; if (devm_request_irq(isp->dev, isp->irq_num, isp_isr, IRQF_SHARED, "OMAP3 ISP", isp)) { dev_err(isp->dev, "Unable to request IRQ\n"); ret = -EINVAL; goto error_iommu; } /* Entities */ ret = isp_initialize_modules(isp); if (ret < 0) goto error_iommu; ret = isp_register_entities(isp); if (ret < 0) goto error_modules; ret = isp_create_links(isp); if (ret < 0) goto error_register_entities; isp->notifier.ops = &isp_subdev_notifier_ops; v4l2_async_nf_init(&isp->notifier, &isp->v4l2_dev); ret = isp_parse_of_endpoints(isp); if (ret < 0) goto error_register_entities; ret = v4l2_async_nf_register(&isp->notifier); if (ret) goto error_register_entities; isp_core_init(isp, 1); omap3isp_put(isp); return 0; error_register_entities: v4l2_async_nf_cleanup(&isp->notifier); isp_unregister_entities(isp); error_modules: isp_cleanup_modules(isp); error_iommu: isp_detach_iommu(isp); error_isp: isp_xclk_cleanup(isp); __omap3isp_put(isp, false); error: mutex_destroy(&isp->isp_mutex); error_release_isp: kfree(isp); return ret; } static const struct dev_pm_ops omap3isp_pm_ops = { .prepare = isp_pm_prepare, .suspend = isp_pm_suspend, .resume = isp_pm_resume, .complete = isp_pm_complete, }; static const struct platform_device_id omap3isp_id_table[] = { { "omap3isp", 0 }, { }, }; MODULE_DEVICE_TABLE(platform, omap3isp_id_table); static const struct of_device_id omap3isp_of_table[] = { { .compatible = "ti,omap3-isp" }, { }, }; MODULE_DEVICE_TABLE(of, omap3isp_of_table); static struct platform_driver omap3isp_driver = { .probe = isp_probe, .remove_new = isp_remove, .id_table = omap3isp_id_table, .driver = { .name = "omap3isp", .pm = &omap3isp_pm_ops, .of_match_table = omap3isp_of_table, }, }; module_platform_driver(omap3isp_driver); MODULE_AUTHOR("Nokia Corporation"); MODULE_DESCRIPTION("TI OMAP3 ISP driver"); MODULE_LICENSE("GPL"); MODULE_VERSION(ISP_VIDEO_DRIVER_VERSION);
linux-master
drivers/media/platform/ti/omap3isp/isp.c
// SPDX-License-Identifier: GPL-2.0-only /* * ispcsi2.c * * TI OMAP3 ISP - CSI2 module * * Copyright (C) 2010 Nokia Corporation * Copyright (C) 2009 Texas Instruments, Inc. * * Contacts: Laurent Pinchart <[email protected]> * Sakari Ailus <[email protected]> */ #include <linux/delay.h> #include <media/v4l2-common.h> #include <linux/v4l2-mediabus.h> #include <linux/mm.h> #include "isp.h" #include "ispreg.h" #include "ispcsi2.h" /* * csi2_if_enable - Enable CSI2 Receiver interface. * @enable: enable flag * */ static void csi2_if_enable(struct isp_device *isp, struct isp_csi2_device *csi2, u8 enable) { struct isp_csi2_ctrl_cfg *currctrl = &csi2->ctrl; isp_reg_clr_set(isp, csi2->regs1, ISPCSI2_CTRL, ISPCSI2_CTRL_IF_EN, enable ? ISPCSI2_CTRL_IF_EN : 0); currctrl->if_enable = enable; } /* * csi2_recv_config - CSI2 receiver module configuration. * @currctrl: isp_csi2_ctrl_cfg structure * */ static void csi2_recv_config(struct isp_device *isp, struct isp_csi2_device *csi2, struct isp_csi2_ctrl_cfg *currctrl) { u32 reg; reg = isp_reg_readl(isp, csi2->regs1, ISPCSI2_CTRL); if (currctrl->frame_mode) reg |= ISPCSI2_CTRL_FRAME; else reg &= ~ISPCSI2_CTRL_FRAME; if (currctrl->vp_clk_enable) reg |= ISPCSI2_CTRL_VP_CLK_EN; else reg &= ~ISPCSI2_CTRL_VP_CLK_EN; if (currctrl->vp_only_enable) reg |= ISPCSI2_CTRL_VP_ONLY_EN; else reg &= ~ISPCSI2_CTRL_VP_ONLY_EN; reg &= ~ISPCSI2_CTRL_VP_OUT_CTRL_MASK; reg |= currctrl->vp_out_ctrl << ISPCSI2_CTRL_VP_OUT_CTRL_SHIFT; if (currctrl->ecc_enable) reg |= ISPCSI2_CTRL_ECC_EN; else reg &= ~ISPCSI2_CTRL_ECC_EN; isp_reg_writel(isp, reg, csi2->regs1, ISPCSI2_CTRL); } static const unsigned int csi2_input_fmts[] = { MEDIA_BUS_FMT_SGRBG10_1X10, MEDIA_BUS_FMT_SGRBG10_DPCM8_1X8, MEDIA_BUS_FMT_SRGGB10_1X10, MEDIA_BUS_FMT_SRGGB10_DPCM8_1X8, MEDIA_BUS_FMT_SBGGR10_1X10, MEDIA_BUS_FMT_SBGGR10_DPCM8_1X8, MEDIA_BUS_FMT_SGBRG10_1X10, MEDIA_BUS_FMT_SGBRG10_DPCM8_1X8, MEDIA_BUS_FMT_YUYV8_2X8, }; /* To set the format on the CSI2 requires a mapping function that takes * the following inputs: * - 3 different formats (at this time) * - 2 destinations (mem, vp+mem) (vp only handled separately) * - 2 decompression options (on, off) * - 2 isp revisions (certain format must be handled differently on OMAP3630) * Output should be CSI2 frame format code * Array indices as follows: [format][dest][decompr][is_3630] * Not all combinations are valid. 0 means invalid. */ static const u16 __csi2_fmt_map[3][2][2][2] = { /* RAW10 formats */ { /* Output to memory */ { /* No DPCM decompression */ { CSI2_PIX_FMT_RAW10_EXP16, CSI2_PIX_FMT_RAW10_EXP16 }, /* DPCM decompression */ { 0, 0 }, }, /* Output to both */ { /* No DPCM decompression */ { CSI2_PIX_FMT_RAW10_EXP16_VP, CSI2_PIX_FMT_RAW10_EXP16_VP }, /* DPCM decompression */ { 0, 0 }, }, }, /* RAW10 DPCM8 formats */ { /* Output to memory */ { /* No DPCM decompression */ { CSI2_PIX_FMT_RAW8, CSI2_USERDEF_8BIT_DATA1 }, /* DPCM decompression */ { CSI2_PIX_FMT_RAW8_DPCM10_EXP16, CSI2_USERDEF_8BIT_DATA1_DPCM10 }, }, /* Output to both */ { /* No DPCM decompression */ { CSI2_PIX_FMT_RAW8_VP, CSI2_PIX_FMT_RAW8_VP }, /* DPCM decompression */ { CSI2_PIX_FMT_RAW8_DPCM10_VP, CSI2_USERDEF_8BIT_DATA1_DPCM10_VP }, }, }, /* YUYV8 2X8 formats */ { /* Output to memory */ { /* No DPCM decompression */ { CSI2_PIX_FMT_YUV422_8BIT, CSI2_PIX_FMT_YUV422_8BIT }, /* DPCM decompression */ { 0, 0 }, }, /* Output to both */ { /* No DPCM decompression */ { CSI2_PIX_FMT_YUV422_8BIT_VP, CSI2_PIX_FMT_YUV422_8BIT_VP }, /* DPCM decompression */ { 0, 0 }, }, }, }; /* * csi2_ctx_map_format - Map CSI2 sink media bus format to CSI2 format ID * @csi2: ISP CSI2 device * * Returns CSI2 physical format id */ static u16 csi2_ctx_map_format(struct isp_csi2_device *csi2) { const struct v4l2_mbus_framefmt *fmt = &csi2->formats[CSI2_PAD_SINK]; int fmtidx, destidx, is_3630; switch (fmt->code) { case MEDIA_BUS_FMT_SGRBG10_1X10: case MEDIA_BUS_FMT_SRGGB10_1X10: case MEDIA_BUS_FMT_SBGGR10_1X10: case MEDIA_BUS_FMT_SGBRG10_1X10: fmtidx = 0; break; case MEDIA_BUS_FMT_SGRBG10_DPCM8_1X8: case MEDIA_BUS_FMT_SRGGB10_DPCM8_1X8: case MEDIA_BUS_FMT_SBGGR10_DPCM8_1X8: case MEDIA_BUS_FMT_SGBRG10_DPCM8_1X8: fmtidx = 1; break; case MEDIA_BUS_FMT_YUYV8_2X8: fmtidx = 2; break; default: WARN(1, KERN_ERR "CSI2: pixel format %08x unsupported!\n", fmt->code); return 0; } if (!(csi2->output & CSI2_OUTPUT_CCDC) && !(csi2->output & CSI2_OUTPUT_MEMORY)) { /* Neither output enabled is a valid combination */ return CSI2_PIX_FMT_OTHERS; } /* If we need to skip frames at the beginning of the stream disable the * video port to avoid sending the skipped frames to the CCDC. */ destidx = csi2->frame_skip ? 0 : !!(csi2->output & CSI2_OUTPUT_CCDC); is_3630 = csi2->isp->revision == ISP_REVISION_15_0; return __csi2_fmt_map[fmtidx][destidx][csi2->dpcm_decompress][is_3630]; } /* * csi2_set_outaddr - Set memory address to save output image * @csi2: Pointer to ISP CSI2a device. * @addr: ISP MMU Mapped 32-bit memory address aligned on 32 byte boundary. * * Sets the memory address where the output will be saved. * * Returns 0 if successful, or -EINVAL if the address is not in the 32 byte * boundary. */ static void csi2_set_outaddr(struct isp_csi2_device *csi2, u32 addr) { struct isp_device *isp = csi2->isp; struct isp_csi2_ctx_cfg *ctx = &csi2->contexts[0]; ctx->ping_addr = addr; ctx->pong_addr = addr; isp_reg_writel(isp, ctx->ping_addr, csi2->regs1, ISPCSI2_CTX_DAT_PING_ADDR(ctx->ctxnum)); isp_reg_writel(isp, ctx->pong_addr, csi2->regs1, ISPCSI2_CTX_DAT_PONG_ADDR(ctx->ctxnum)); } /* * is_usr_def_mapping - Checks whether USER_DEF_MAPPING should * be enabled by CSI2. * @format_id: mapped format id * */ static inline int is_usr_def_mapping(u32 format_id) { return (format_id & 0x40) ? 1 : 0; } /* * csi2_ctx_enable - Enable specified CSI2 context * @ctxnum: Context number, valid between 0 and 7 values. * @enable: enable * */ static void csi2_ctx_enable(struct isp_device *isp, struct isp_csi2_device *csi2, u8 ctxnum, u8 enable) { struct isp_csi2_ctx_cfg *ctx = &csi2->contexts[ctxnum]; unsigned int skip = 0; u32 reg; reg = isp_reg_readl(isp, csi2->regs1, ISPCSI2_CTX_CTRL1(ctxnum)); if (enable) { if (csi2->frame_skip) skip = csi2->frame_skip; else if (csi2->output & CSI2_OUTPUT_MEMORY) skip = 1; reg &= ~ISPCSI2_CTX_CTRL1_COUNT_MASK; reg |= ISPCSI2_CTX_CTRL1_COUNT_UNLOCK | (skip << ISPCSI2_CTX_CTRL1_COUNT_SHIFT) | ISPCSI2_CTX_CTRL1_CTX_EN; } else { reg &= ~ISPCSI2_CTX_CTRL1_CTX_EN; } isp_reg_writel(isp, reg, csi2->regs1, ISPCSI2_CTX_CTRL1(ctxnum)); ctx->enabled = enable; } /* * csi2_ctx_config - CSI2 context configuration. * @ctx: context configuration * */ static void csi2_ctx_config(struct isp_device *isp, struct isp_csi2_device *csi2, struct isp_csi2_ctx_cfg *ctx) { u32 reg; /* Set up CSI2_CTx_CTRL1 */ reg = isp_reg_readl(isp, csi2->regs1, ISPCSI2_CTX_CTRL1(ctx->ctxnum)); if (ctx->eof_enabled) reg |= ISPCSI2_CTX_CTRL1_EOF_EN; else reg &= ~ISPCSI2_CTX_CTRL1_EOF_EN; if (ctx->eol_enabled) reg |= ISPCSI2_CTX_CTRL1_EOL_EN; else reg &= ~ISPCSI2_CTX_CTRL1_EOL_EN; if (ctx->checksum_enabled) reg |= ISPCSI2_CTX_CTRL1_CS_EN; else reg &= ~ISPCSI2_CTX_CTRL1_CS_EN; isp_reg_writel(isp, reg, csi2->regs1, ISPCSI2_CTX_CTRL1(ctx->ctxnum)); /* Set up CSI2_CTx_CTRL2 */ reg = isp_reg_readl(isp, csi2->regs1, ISPCSI2_CTX_CTRL2(ctx->ctxnum)); reg &= ~(ISPCSI2_CTX_CTRL2_VIRTUAL_ID_MASK); reg |= ctx->virtual_id << ISPCSI2_CTX_CTRL2_VIRTUAL_ID_SHIFT; reg &= ~(ISPCSI2_CTX_CTRL2_FORMAT_MASK); reg |= ctx->format_id << ISPCSI2_CTX_CTRL2_FORMAT_SHIFT; if (ctx->dpcm_decompress) { if (ctx->dpcm_predictor) reg |= ISPCSI2_CTX_CTRL2_DPCM_PRED; else reg &= ~ISPCSI2_CTX_CTRL2_DPCM_PRED; } if (is_usr_def_mapping(ctx->format_id)) { reg &= ~ISPCSI2_CTX_CTRL2_USER_DEF_MAP_MASK; reg |= 2 << ISPCSI2_CTX_CTRL2_USER_DEF_MAP_SHIFT; } isp_reg_writel(isp, reg, csi2->regs1, ISPCSI2_CTX_CTRL2(ctx->ctxnum)); /* Set up CSI2_CTx_CTRL3 */ reg = isp_reg_readl(isp, csi2->regs1, ISPCSI2_CTX_CTRL3(ctx->ctxnum)); reg &= ~(ISPCSI2_CTX_CTRL3_ALPHA_MASK); reg |= (ctx->alpha << ISPCSI2_CTX_CTRL3_ALPHA_SHIFT); isp_reg_writel(isp, reg, csi2->regs1, ISPCSI2_CTX_CTRL3(ctx->ctxnum)); /* Set up CSI2_CTx_DAT_OFST */ reg = isp_reg_readl(isp, csi2->regs1, ISPCSI2_CTX_DAT_OFST(ctx->ctxnum)); reg &= ~ISPCSI2_CTX_DAT_OFST_OFST_MASK; reg |= ctx->data_offset << ISPCSI2_CTX_DAT_OFST_OFST_SHIFT; isp_reg_writel(isp, reg, csi2->regs1, ISPCSI2_CTX_DAT_OFST(ctx->ctxnum)); isp_reg_writel(isp, ctx->ping_addr, csi2->regs1, ISPCSI2_CTX_DAT_PING_ADDR(ctx->ctxnum)); isp_reg_writel(isp, ctx->pong_addr, csi2->regs1, ISPCSI2_CTX_DAT_PONG_ADDR(ctx->ctxnum)); } /* * csi2_timing_config - CSI2 timing configuration. * @timing: csi2_timing_cfg structure */ static void csi2_timing_config(struct isp_device *isp, struct isp_csi2_device *csi2, struct isp_csi2_timing_cfg *timing) { u32 reg; reg = isp_reg_readl(isp, csi2->regs1, ISPCSI2_TIMING); if (timing->force_rx_mode) reg |= ISPCSI2_TIMING_FORCE_RX_MODE_IO(timing->ionum); else reg &= ~ISPCSI2_TIMING_FORCE_RX_MODE_IO(timing->ionum); if (timing->stop_state_16x) reg |= ISPCSI2_TIMING_STOP_STATE_X16_IO(timing->ionum); else reg &= ~ISPCSI2_TIMING_STOP_STATE_X16_IO(timing->ionum); if (timing->stop_state_4x) reg |= ISPCSI2_TIMING_STOP_STATE_X4_IO(timing->ionum); else reg &= ~ISPCSI2_TIMING_STOP_STATE_X4_IO(timing->ionum); reg &= ~ISPCSI2_TIMING_STOP_STATE_COUNTER_IO_MASK(timing->ionum); reg |= timing->stop_state_counter << ISPCSI2_TIMING_STOP_STATE_COUNTER_IO_SHIFT(timing->ionum); isp_reg_writel(isp, reg, csi2->regs1, ISPCSI2_TIMING); } /* * csi2_irq_ctx_set - Enables CSI2 Context IRQs. * @enable: Enable/disable CSI2 Context interrupts */ static void csi2_irq_ctx_set(struct isp_device *isp, struct isp_csi2_device *csi2, int enable) { int i; for (i = 0; i < 8; i++) { isp_reg_writel(isp, ISPCSI2_CTX_IRQSTATUS_FE_IRQ, csi2->regs1, ISPCSI2_CTX_IRQSTATUS(i)); if (enable) isp_reg_set(isp, csi2->regs1, ISPCSI2_CTX_IRQENABLE(i), ISPCSI2_CTX_IRQSTATUS_FE_IRQ); else isp_reg_clr(isp, csi2->regs1, ISPCSI2_CTX_IRQENABLE(i), ISPCSI2_CTX_IRQSTATUS_FE_IRQ); } } /* * csi2_irq_complexio1_set - Enables CSI2 ComplexIO IRQs. * @enable: Enable/disable CSI2 ComplexIO #1 interrupts */ static void csi2_irq_complexio1_set(struct isp_device *isp, struct isp_csi2_device *csi2, int enable) { u32 reg; reg = ISPCSI2_PHY_IRQENABLE_STATEALLULPMEXIT | ISPCSI2_PHY_IRQENABLE_STATEALLULPMENTER | ISPCSI2_PHY_IRQENABLE_STATEULPM5 | ISPCSI2_PHY_IRQENABLE_ERRCONTROL5 | ISPCSI2_PHY_IRQENABLE_ERRESC5 | ISPCSI2_PHY_IRQENABLE_ERRSOTSYNCHS5 | ISPCSI2_PHY_IRQENABLE_ERRSOTHS5 | ISPCSI2_PHY_IRQENABLE_STATEULPM4 | ISPCSI2_PHY_IRQENABLE_ERRCONTROL4 | ISPCSI2_PHY_IRQENABLE_ERRESC4 | ISPCSI2_PHY_IRQENABLE_ERRSOTSYNCHS4 | ISPCSI2_PHY_IRQENABLE_ERRSOTHS4 | ISPCSI2_PHY_IRQENABLE_STATEULPM3 | ISPCSI2_PHY_IRQENABLE_ERRCONTROL3 | ISPCSI2_PHY_IRQENABLE_ERRESC3 | ISPCSI2_PHY_IRQENABLE_ERRSOTSYNCHS3 | ISPCSI2_PHY_IRQENABLE_ERRSOTHS3 | ISPCSI2_PHY_IRQENABLE_STATEULPM2 | ISPCSI2_PHY_IRQENABLE_ERRCONTROL2 | ISPCSI2_PHY_IRQENABLE_ERRESC2 | ISPCSI2_PHY_IRQENABLE_ERRSOTSYNCHS2 | ISPCSI2_PHY_IRQENABLE_ERRSOTHS2 | ISPCSI2_PHY_IRQENABLE_STATEULPM1 | ISPCSI2_PHY_IRQENABLE_ERRCONTROL1 | ISPCSI2_PHY_IRQENABLE_ERRESC1 | ISPCSI2_PHY_IRQENABLE_ERRSOTSYNCHS1 | ISPCSI2_PHY_IRQENABLE_ERRSOTHS1; isp_reg_writel(isp, reg, csi2->regs1, ISPCSI2_PHY_IRQSTATUS); if (enable) reg |= isp_reg_readl(isp, csi2->regs1, ISPCSI2_PHY_IRQENABLE); else reg = 0; isp_reg_writel(isp, reg, csi2->regs1, ISPCSI2_PHY_IRQENABLE); } /* * csi2_irq_status_set - Enables CSI2 Status IRQs. * @enable: Enable/disable CSI2 Status interrupts */ static void csi2_irq_status_set(struct isp_device *isp, struct isp_csi2_device *csi2, int enable) { u32 reg; reg = ISPCSI2_IRQSTATUS_OCP_ERR_IRQ | ISPCSI2_IRQSTATUS_SHORT_PACKET_IRQ | ISPCSI2_IRQSTATUS_ECC_CORRECTION_IRQ | ISPCSI2_IRQSTATUS_ECC_NO_CORRECTION_IRQ | ISPCSI2_IRQSTATUS_COMPLEXIO2_ERR_IRQ | ISPCSI2_IRQSTATUS_COMPLEXIO1_ERR_IRQ | ISPCSI2_IRQSTATUS_FIFO_OVF_IRQ | ISPCSI2_IRQSTATUS_CONTEXT(0); isp_reg_writel(isp, reg, csi2->regs1, ISPCSI2_IRQSTATUS); if (enable) reg |= isp_reg_readl(isp, csi2->regs1, ISPCSI2_IRQENABLE); else reg = 0; isp_reg_writel(isp, reg, csi2->regs1, ISPCSI2_IRQENABLE); } /* * omap3isp_csi2_reset - Resets the CSI2 module. * * Must be called with the phy lock held. * * Returns 0 if successful, or -EBUSY if power command didn't respond. */ int omap3isp_csi2_reset(struct isp_csi2_device *csi2) { struct isp_device *isp = csi2->isp; u8 soft_reset_retries = 0; u32 reg; int i; if (!csi2->available) return -ENODEV; if (csi2->phy->entity) return -EBUSY; isp_reg_set(isp, csi2->regs1, ISPCSI2_SYSCONFIG, ISPCSI2_SYSCONFIG_SOFT_RESET); do { reg = isp_reg_readl(isp, csi2->regs1, ISPCSI2_SYSSTATUS) & ISPCSI2_SYSSTATUS_RESET_DONE; if (reg == ISPCSI2_SYSSTATUS_RESET_DONE) break; soft_reset_retries++; if (soft_reset_retries < 5) udelay(100); } while (soft_reset_retries < 5); if (soft_reset_retries == 5) { dev_err(isp->dev, "CSI2: Soft reset try count exceeded!\n"); return -EBUSY; } if (isp->revision == ISP_REVISION_15_0) isp_reg_set(isp, csi2->regs1, ISPCSI2_PHY_CFG, ISPCSI2_PHY_CFG_RESET_CTRL); i = 100; do { reg = isp_reg_readl(isp, csi2->phy->phy_regs, ISPCSIPHY_REG1) & ISPCSIPHY_REG1_RESET_DONE_CTRLCLK; if (reg == ISPCSIPHY_REG1_RESET_DONE_CTRLCLK) break; udelay(100); } while (--i > 0); if (i == 0) { dev_err(isp->dev, "CSI2: Reset for CSI2_96M_FCLK domain Failed!\n"); return -EBUSY; } if (isp->autoidle) isp_reg_clr_set(isp, csi2->regs1, ISPCSI2_SYSCONFIG, ISPCSI2_SYSCONFIG_MSTANDBY_MODE_MASK | ISPCSI2_SYSCONFIG_AUTO_IDLE, ISPCSI2_SYSCONFIG_MSTANDBY_MODE_SMART | ((isp->revision == ISP_REVISION_15_0) ? ISPCSI2_SYSCONFIG_AUTO_IDLE : 0)); else isp_reg_clr_set(isp, csi2->regs1, ISPCSI2_SYSCONFIG, ISPCSI2_SYSCONFIG_MSTANDBY_MODE_MASK | ISPCSI2_SYSCONFIG_AUTO_IDLE, ISPCSI2_SYSCONFIG_MSTANDBY_MODE_NO); return 0; } static int csi2_configure(struct isp_csi2_device *csi2) { struct isp_pipeline *pipe = to_isp_pipeline(&csi2->subdev.entity); const struct isp_bus_cfg *buscfg; struct isp_device *isp = csi2->isp; struct isp_csi2_timing_cfg *timing = &csi2->timing[0]; struct v4l2_subdev *sensor; struct media_pad *pad; /* * CSI2 fields that can be updated while the context has * been enabled or the interface has been enabled are not * updated dynamically currently. So we do not allow to * reconfigure if either has been enabled */ if (csi2->contexts[0].enabled || csi2->ctrl.if_enable) return -EBUSY; pad = media_pad_remote_pad_first(&csi2->pads[CSI2_PAD_SINK]); sensor = media_entity_to_v4l2_subdev(pad->entity); buscfg = v4l2_subdev_to_bus_cfg(pipe->external); if (WARN_ON(!buscfg)) return -EPIPE; csi2->frame_skip = 0; v4l2_subdev_call(sensor, sensor, g_skip_frames, &csi2->frame_skip); csi2->ctrl.vp_out_ctrl = clamp_t(unsigned int, pipe->l3_ick / pipe->external_rate - 1, 1, 3); dev_dbg(isp->dev, "%s: l3_ick %lu, external_rate %u, vp_out_ctrl %u\n", __func__, pipe->l3_ick, pipe->external_rate, csi2->ctrl.vp_out_ctrl); csi2->ctrl.frame_mode = ISP_CSI2_FRAME_IMMEDIATE; csi2->ctrl.ecc_enable = buscfg->bus.csi2.crc; timing->ionum = 1; timing->force_rx_mode = 1; timing->stop_state_16x = 1; timing->stop_state_4x = 1; timing->stop_state_counter = 0x1FF; /* * The CSI2 receiver can't do any format conversion except DPCM * decompression, so every set_format call configures both pads * and enables DPCM decompression as a special case: */ if (csi2->formats[CSI2_PAD_SINK].code != csi2->formats[CSI2_PAD_SOURCE].code) csi2->dpcm_decompress = true; else csi2->dpcm_decompress = false; csi2->contexts[0].format_id = csi2_ctx_map_format(csi2); if (csi2->video_out.bpl_padding == 0) csi2->contexts[0].data_offset = 0; else csi2->contexts[0].data_offset = csi2->video_out.bpl_value; /* * Enable end of frame and end of line signals generation for * context 0. These signals are generated from CSI2 receiver to * qualify the last pixel of a frame and the last pixel of a line. * Without enabling the signals CSI2 receiver writes data to memory * beyond buffer size and/or data line offset is not handled correctly. */ csi2->contexts[0].eof_enabled = 1; csi2->contexts[0].eol_enabled = 1; csi2_irq_complexio1_set(isp, csi2, 1); csi2_irq_ctx_set(isp, csi2, 1); csi2_irq_status_set(isp, csi2, 1); /* Set configuration (timings, format and links) */ csi2_timing_config(isp, csi2, timing); csi2_recv_config(isp, csi2, &csi2->ctrl); csi2_ctx_config(isp, csi2, &csi2->contexts[0]); return 0; } /* * csi2_print_status - Prints CSI2 debug information. */ #define CSI2_PRINT_REGISTER(isp, regs, name)\ dev_dbg(isp->dev, "###CSI2 " #name "=0x%08x\n", \ isp_reg_readl(isp, regs, ISPCSI2_##name)) static void csi2_print_status(struct isp_csi2_device *csi2) { struct isp_device *isp = csi2->isp; if (!csi2->available) return; dev_dbg(isp->dev, "-------------CSI2 Register dump-------------\n"); CSI2_PRINT_REGISTER(isp, csi2->regs1, SYSCONFIG); CSI2_PRINT_REGISTER(isp, csi2->regs1, SYSSTATUS); CSI2_PRINT_REGISTER(isp, csi2->regs1, IRQENABLE); CSI2_PRINT_REGISTER(isp, csi2->regs1, IRQSTATUS); CSI2_PRINT_REGISTER(isp, csi2->regs1, CTRL); CSI2_PRINT_REGISTER(isp, csi2->regs1, DBG_H); CSI2_PRINT_REGISTER(isp, csi2->regs1, GNQ); CSI2_PRINT_REGISTER(isp, csi2->regs1, PHY_CFG); CSI2_PRINT_REGISTER(isp, csi2->regs1, PHY_IRQSTATUS); CSI2_PRINT_REGISTER(isp, csi2->regs1, SHORT_PACKET); CSI2_PRINT_REGISTER(isp, csi2->regs1, PHY_IRQENABLE); CSI2_PRINT_REGISTER(isp, csi2->regs1, DBG_P); CSI2_PRINT_REGISTER(isp, csi2->regs1, TIMING); CSI2_PRINT_REGISTER(isp, csi2->regs1, CTX_CTRL1(0)); CSI2_PRINT_REGISTER(isp, csi2->regs1, CTX_CTRL2(0)); CSI2_PRINT_REGISTER(isp, csi2->regs1, CTX_DAT_OFST(0)); CSI2_PRINT_REGISTER(isp, csi2->regs1, CTX_DAT_PING_ADDR(0)); CSI2_PRINT_REGISTER(isp, csi2->regs1, CTX_DAT_PONG_ADDR(0)); CSI2_PRINT_REGISTER(isp, csi2->regs1, CTX_IRQENABLE(0)); CSI2_PRINT_REGISTER(isp, csi2->regs1, CTX_IRQSTATUS(0)); CSI2_PRINT_REGISTER(isp, csi2->regs1, CTX_CTRL3(0)); dev_dbg(isp->dev, "--------------------------------------------\n"); } /* ----------------------------------------------------------------------------- * Interrupt handling */ /* * csi2_isr_buffer - Does buffer handling at end-of-frame * when writing to memory. */ static void csi2_isr_buffer(struct isp_csi2_device *csi2) { struct isp_device *isp = csi2->isp; struct isp_buffer *buffer; csi2_ctx_enable(isp, csi2, 0, 0); buffer = omap3isp_video_buffer_next(&csi2->video_out); /* * Let video queue operation restart engine if there is an underrun * condition. */ if (buffer == NULL) return; csi2_set_outaddr(csi2, buffer->dma); csi2_ctx_enable(isp, csi2, 0, 1); } static void csi2_isr_ctx(struct isp_csi2_device *csi2, struct isp_csi2_ctx_cfg *ctx) { struct isp_device *isp = csi2->isp; unsigned int n = ctx->ctxnum; u32 status; status = isp_reg_readl(isp, csi2->regs1, ISPCSI2_CTX_IRQSTATUS(n)); isp_reg_writel(isp, status, csi2->regs1, ISPCSI2_CTX_IRQSTATUS(n)); if (!(status & ISPCSI2_CTX_IRQSTATUS_FE_IRQ)) return; /* Skip interrupts until we reach the frame skip count. The CSI2 will be * automatically disabled, as the frame skip count has been programmed * in the CSI2_CTx_CTRL1::COUNT field, so re-enable it. * * It would have been nice to rely on the FRAME_NUMBER interrupt instead * but it turned out that the interrupt is only generated when the CSI2 * writes to memory (the CSI2_CTx_CTRL1::COUNT field is decreased * correctly and reaches 0 when data is forwarded to the video port only * but no interrupt arrives). Maybe a CSI2 hardware bug. */ if (csi2->frame_skip) { csi2->frame_skip--; if (csi2->frame_skip == 0) { ctx->format_id = csi2_ctx_map_format(csi2); csi2_ctx_config(isp, csi2, ctx); csi2_ctx_enable(isp, csi2, n, 1); } return; } if (csi2->output & CSI2_OUTPUT_MEMORY) csi2_isr_buffer(csi2); } /* * omap3isp_csi2_isr - CSI2 interrupt handling. */ void omap3isp_csi2_isr(struct isp_csi2_device *csi2) { struct isp_pipeline *pipe = to_isp_pipeline(&csi2->subdev.entity); u32 csi2_irqstatus, cpxio1_irqstatus; struct isp_device *isp = csi2->isp; if (!csi2->available) return; csi2_irqstatus = isp_reg_readl(isp, csi2->regs1, ISPCSI2_IRQSTATUS); isp_reg_writel(isp, csi2_irqstatus, csi2->regs1, ISPCSI2_IRQSTATUS); /* Failure Cases */ if (csi2_irqstatus & ISPCSI2_IRQSTATUS_COMPLEXIO1_ERR_IRQ) { cpxio1_irqstatus = isp_reg_readl(isp, csi2->regs1, ISPCSI2_PHY_IRQSTATUS); isp_reg_writel(isp, cpxio1_irqstatus, csi2->regs1, ISPCSI2_PHY_IRQSTATUS); dev_dbg(isp->dev, "CSI2: ComplexIO Error IRQ %x\n", cpxio1_irqstatus); pipe->error = true; } if (csi2_irqstatus & (ISPCSI2_IRQSTATUS_OCP_ERR_IRQ | ISPCSI2_IRQSTATUS_SHORT_PACKET_IRQ | ISPCSI2_IRQSTATUS_ECC_NO_CORRECTION_IRQ | ISPCSI2_IRQSTATUS_COMPLEXIO2_ERR_IRQ | ISPCSI2_IRQSTATUS_FIFO_OVF_IRQ)) { dev_dbg(isp->dev, "CSI2 Err: OCP:%d, Short_pack:%d, ECC:%d, CPXIO2:%d, FIFO_OVF:%d,\n", (csi2_irqstatus & ISPCSI2_IRQSTATUS_OCP_ERR_IRQ) ? 1 : 0, (csi2_irqstatus & ISPCSI2_IRQSTATUS_SHORT_PACKET_IRQ) ? 1 : 0, (csi2_irqstatus & ISPCSI2_IRQSTATUS_ECC_NO_CORRECTION_IRQ) ? 1 : 0, (csi2_irqstatus & ISPCSI2_IRQSTATUS_COMPLEXIO2_ERR_IRQ) ? 1 : 0, (csi2_irqstatus & ISPCSI2_IRQSTATUS_FIFO_OVF_IRQ) ? 1 : 0); pipe->error = true; } if (omap3isp_module_sync_is_stopping(&csi2->wait, &csi2->stopping)) return; /* Successful cases */ if (csi2_irqstatus & ISPCSI2_IRQSTATUS_CONTEXT(0)) csi2_isr_ctx(csi2, &csi2->contexts[0]); if (csi2_irqstatus & ISPCSI2_IRQSTATUS_ECC_CORRECTION_IRQ) dev_dbg(isp->dev, "CSI2: ECC correction done\n"); } /* ----------------------------------------------------------------------------- * ISP video operations */ /* * csi2_queue - Queues the first buffer when using memory output * @video: The video node * @buffer: buffer to queue */ static int csi2_queue(struct isp_video *video, struct isp_buffer *buffer) { struct isp_device *isp = video->isp; struct isp_csi2_device *csi2 = &isp->isp_csi2a; csi2_set_outaddr(csi2, buffer->dma); /* * If streaming was enabled before there was a buffer queued * or underrun happened in the ISR, the hardware was not enabled * and DMA queue flag ISP_VIDEO_DMAQUEUE_UNDERRUN is still set. * Enable it now. */ if (csi2->video_out.dmaqueue_flags & ISP_VIDEO_DMAQUEUE_UNDERRUN) { /* Enable / disable context 0 and IRQs */ csi2_if_enable(isp, csi2, 1); csi2_ctx_enable(isp, csi2, 0, 1); isp_video_dmaqueue_flags_clr(&csi2->video_out); } return 0; } static const struct isp_video_operations csi2_ispvideo_ops = { .queue = csi2_queue, }; /* ----------------------------------------------------------------------------- * V4L2 subdev operations */ static struct v4l2_mbus_framefmt * __csi2_get_format(struct isp_csi2_device *csi2, struct v4l2_subdev_state *sd_state, unsigned int pad, enum v4l2_subdev_format_whence which) { if (which == V4L2_SUBDEV_FORMAT_TRY) return v4l2_subdev_get_try_format(&csi2->subdev, sd_state, pad); else return &csi2->formats[pad]; } static void csi2_try_format(struct isp_csi2_device *csi2, struct v4l2_subdev_state *sd_state, unsigned int pad, struct v4l2_mbus_framefmt *fmt, enum v4l2_subdev_format_whence which) { u32 pixelcode; struct v4l2_mbus_framefmt *format; const struct isp_format_info *info; unsigned int i; switch (pad) { case CSI2_PAD_SINK: /* Clamp the width and height to valid range (1-8191). */ for (i = 0; i < ARRAY_SIZE(csi2_input_fmts); i++) { if (fmt->code == csi2_input_fmts[i]) break; } /* If not found, use SGRBG10 as default */ if (i >= ARRAY_SIZE(csi2_input_fmts)) fmt->code = MEDIA_BUS_FMT_SGRBG10_1X10; fmt->width = clamp_t(u32, fmt->width, 1, 8191); fmt->height = clamp_t(u32, fmt->height, 1, 8191); break; case CSI2_PAD_SOURCE: /* Source format same as sink format, except for DPCM * compression. */ pixelcode = fmt->code; format = __csi2_get_format(csi2, sd_state, CSI2_PAD_SINK, which); memcpy(fmt, format, sizeof(*fmt)); /* * Only Allow DPCM decompression, and check that the * pattern is preserved */ info = omap3isp_video_format_info(fmt->code); if (info->uncompressed == pixelcode) fmt->code = pixelcode; break; } /* RGB, non-interlaced */ fmt->colorspace = V4L2_COLORSPACE_SRGB; fmt->field = V4L2_FIELD_NONE; } /* * csi2_enum_mbus_code - Handle pixel format enumeration * @sd : pointer to v4l2 subdev structure * @cfg: V4L2 subdev pad configuration * @code : pointer to v4l2_subdev_mbus_code_enum structure * return -EINVAL or zero on success */ static int csi2_enum_mbus_code(struct v4l2_subdev *sd, struct v4l2_subdev_state *sd_state, struct v4l2_subdev_mbus_code_enum *code) { struct isp_csi2_device *csi2 = v4l2_get_subdevdata(sd); struct v4l2_mbus_framefmt *format; const struct isp_format_info *info; if (code->pad == CSI2_PAD_SINK) { if (code->index >= ARRAY_SIZE(csi2_input_fmts)) return -EINVAL; code->code = csi2_input_fmts[code->index]; } else { format = __csi2_get_format(csi2, sd_state, CSI2_PAD_SINK, code->which); switch (code->index) { case 0: /* Passthrough sink pad code */ code->code = format->code; break; case 1: /* Uncompressed code */ info = omap3isp_video_format_info(format->code); if (info->uncompressed == format->code) return -EINVAL; code->code = info->uncompressed; break; default: return -EINVAL; } } return 0; } static int csi2_enum_frame_size(struct v4l2_subdev *sd, struct v4l2_subdev_state *sd_state, struct v4l2_subdev_frame_size_enum *fse) { struct isp_csi2_device *csi2 = v4l2_get_subdevdata(sd); struct v4l2_mbus_framefmt format; if (fse->index != 0) return -EINVAL; format.code = fse->code; format.width = 1; format.height = 1; csi2_try_format(csi2, sd_state, fse->pad, &format, fse->which); fse->min_width = format.width; fse->min_height = format.height; if (format.code != fse->code) return -EINVAL; format.code = fse->code; format.width = -1; format.height = -1; csi2_try_format(csi2, sd_state, fse->pad, &format, fse->which); fse->max_width = format.width; fse->max_height = format.height; return 0; } /* * csi2_get_format - Handle get format by pads subdev method * @sd : pointer to v4l2 subdev structure * @cfg: V4L2 subdev pad configuration * @fmt: pointer to v4l2 subdev format structure * return -EINVAL or zero on success */ static int csi2_get_format(struct v4l2_subdev *sd, struct v4l2_subdev_state *sd_state, struct v4l2_subdev_format *fmt) { struct isp_csi2_device *csi2 = v4l2_get_subdevdata(sd); struct v4l2_mbus_framefmt *format; format = __csi2_get_format(csi2, sd_state, fmt->pad, fmt->which); if (format == NULL) return -EINVAL; fmt->format = *format; return 0; } /* * csi2_set_format - Handle set format by pads subdev method * @sd : pointer to v4l2 subdev structure * @cfg: V4L2 subdev pad configuration * @fmt: pointer to v4l2 subdev format structure * return -EINVAL or zero on success */ static int csi2_set_format(struct v4l2_subdev *sd, struct v4l2_subdev_state *sd_state, struct v4l2_subdev_format *fmt) { struct isp_csi2_device *csi2 = v4l2_get_subdevdata(sd); struct v4l2_mbus_framefmt *format; format = __csi2_get_format(csi2, sd_state, fmt->pad, fmt->which); if (format == NULL) return -EINVAL; csi2_try_format(csi2, sd_state, fmt->pad, &fmt->format, fmt->which); *format = fmt->format; /* Propagate the format from sink to source */ if (fmt->pad == CSI2_PAD_SINK) { format = __csi2_get_format(csi2, sd_state, CSI2_PAD_SOURCE, fmt->which); *format = fmt->format; csi2_try_format(csi2, sd_state, CSI2_PAD_SOURCE, format, fmt->which); } return 0; } /* * csi2_init_formats - Initialize formats on all pads * @sd: ISP CSI2 V4L2 subdevice * @fh: V4L2 subdev file handle * * Initialize all pad formats with default values. If fh is not NULL, try * formats are initialized on the file handle. Otherwise active formats are * initialized on the device. */ static int csi2_init_formats(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh) { struct v4l2_subdev_format format; memset(&format, 0, sizeof(format)); format.pad = CSI2_PAD_SINK; format.which = fh ? V4L2_SUBDEV_FORMAT_TRY : V4L2_SUBDEV_FORMAT_ACTIVE; format.format.code = MEDIA_BUS_FMT_SGRBG10_1X10; format.format.width = 4096; format.format.height = 4096; csi2_set_format(sd, fh ? fh->state : NULL, &format); return 0; } /* * csi2_set_stream - Enable/Disable streaming on the CSI2 module * @sd: ISP CSI2 V4L2 subdevice * @enable: ISP pipeline stream state * * Return 0 on success or a negative error code otherwise. */ static int csi2_set_stream(struct v4l2_subdev *sd, int enable) { struct isp_csi2_device *csi2 = v4l2_get_subdevdata(sd); struct isp_device *isp = csi2->isp; struct isp_video *video_out = &csi2->video_out; switch (enable) { case ISP_PIPELINE_STREAM_CONTINUOUS: if (omap3isp_csiphy_acquire(csi2->phy, &sd->entity) < 0) return -ENODEV; if (csi2->output & CSI2_OUTPUT_MEMORY) omap3isp_sbl_enable(isp, OMAP3_ISP_SBL_CSI2A_WRITE); csi2_configure(csi2); csi2_print_status(csi2); /* * When outputting to memory with no buffer available, let the * buffer queue handler start the hardware. A DMA queue flag * ISP_VIDEO_DMAQUEUE_QUEUED will be set as soon as there is * a buffer available. */ if (csi2->output & CSI2_OUTPUT_MEMORY && !(video_out->dmaqueue_flags & ISP_VIDEO_DMAQUEUE_QUEUED)) break; /* Enable context 0 and IRQs */ atomic_set(&csi2->stopping, 0); csi2_ctx_enable(isp, csi2, 0, 1); csi2_if_enable(isp, csi2, 1); isp_video_dmaqueue_flags_clr(video_out); break; case ISP_PIPELINE_STREAM_STOPPED: if (csi2->state == ISP_PIPELINE_STREAM_STOPPED) return 0; if (omap3isp_module_sync_idle(&sd->entity, &csi2->wait, &csi2->stopping)) dev_dbg(isp->dev, "%s: module stop timeout.\n", sd->name); csi2_ctx_enable(isp, csi2, 0, 0); csi2_if_enable(isp, csi2, 0); csi2_irq_ctx_set(isp, csi2, 0); omap3isp_csiphy_release(csi2->phy); isp_video_dmaqueue_flags_clr(video_out); omap3isp_sbl_disable(isp, OMAP3_ISP_SBL_CSI2A_WRITE); break; } csi2->state = enable; return 0; } /* subdev video operations */ static const struct v4l2_subdev_video_ops csi2_video_ops = { .s_stream = csi2_set_stream, }; /* subdev pad operations */ static const struct v4l2_subdev_pad_ops csi2_pad_ops = { .enum_mbus_code = csi2_enum_mbus_code, .enum_frame_size = csi2_enum_frame_size, .get_fmt = csi2_get_format, .set_fmt = csi2_set_format, }; /* subdev operations */ static const struct v4l2_subdev_ops csi2_ops = { .video = &csi2_video_ops, .pad = &csi2_pad_ops, }; /* subdev internal operations */ static const struct v4l2_subdev_internal_ops csi2_internal_ops = { .open = csi2_init_formats, }; /* ----------------------------------------------------------------------------- * Media entity operations */ /* * csi2_link_setup - Setup CSI2 connections. * @entity : Pointer to media entity structure * @local : Pointer to local pad array * @remote : Pointer to remote pad array * @flags : Link flags * return -EINVAL or zero on success */ static int csi2_link_setup(struct media_entity *entity, const struct media_pad *local, const struct media_pad *remote, u32 flags) { struct v4l2_subdev *sd = media_entity_to_v4l2_subdev(entity); struct isp_csi2_device *csi2 = v4l2_get_subdevdata(sd); struct isp_csi2_ctrl_cfg *ctrl = &csi2->ctrl; unsigned int index = local->index; /* * The ISP core doesn't support pipelines with multiple video outputs. * Revisit this when it will be implemented, and return -EBUSY for now. */ /* FIXME: this is actually a hack! */ if (is_media_entity_v4l2_subdev(remote->entity)) index |= 2 << 16; switch (index) { case CSI2_PAD_SOURCE: if (flags & MEDIA_LNK_FL_ENABLED) { if (csi2->output & ~CSI2_OUTPUT_MEMORY) return -EBUSY; csi2->output |= CSI2_OUTPUT_MEMORY; } else { csi2->output &= ~CSI2_OUTPUT_MEMORY; } break; case CSI2_PAD_SOURCE | 2 << 16: if (flags & MEDIA_LNK_FL_ENABLED) { if (csi2->output & ~CSI2_OUTPUT_CCDC) return -EBUSY; csi2->output |= CSI2_OUTPUT_CCDC; } else { csi2->output &= ~CSI2_OUTPUT_CCDC; } break; default: /* Link from camera to CSI2 is fixed... */ return -EINVAL; } ctrl->vp_only_enable = (csi2->output & CSI2_OUTPUT_MEMORY) ? false : true; ctrl->vp_clk_enable = !!(csi2->output & CSI2_OUTPUT_CCDC); return 0; } /* media operations */ static const struct media_entity_operations csi2_media_ops = { .link_setup = csi2_link_setup, .link_validate = v4l2_subdev_link_validate, }; void omap3isp_csi2_unregister_entities(struct isp_csi2_device *csi2) { v4l2_device_unregister_subdev(&csi2->subdev); omap3isp_video_unregister(&csi2->video_out); } int omap3isp_csi2_register_entities(struct isp_csi2_device *csi2, struct v4l2_device *vdev) { int ret; /* Register the subdev and video nodes. */ csi2->subdev.dev = vdev->mdev->dev; ret = v4l2_device_register_subdev(vdev, &csi2->subdev); if (ret < 0) goto error; ret = omap3isp_video_register(&csi2->video_out, vdev); if (ret < 0) goto error; return 0; error: omap3isp_csi2_unregister_entities(csi2); return ret; } /* ----------------------------------------------------------------------------- * ISP CSI2 initialisation and cleanup */ /* * csi2_init_entities - Initialize subdev and media entity. * @csi2: Pointer to csi2 structure. * return -ENOMEM or zero on success */ static int csi2_init_entities(struct isp_csi2_device *csi2) { struct v4l2_subdev *sd = &csi2->subdev; struct media_pad *pads = csi2->pads; struct media_entity *me = &sd->entity; int ret; v4l2_subdev_init(sd, &csi2_ops); sd->internal_ops = &csi2_internal_ops; strscpy(sd->name, "OMAP3 ISP CSI2a", sizeof(sd->name)); sd->grp_id = 1 << 16; /* group ID for isp subdevs */ v4l2_set_subdevdata(sd, csi2); sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE; pads[CSI2_PAD_SOURCE].flags = MEDIA_PAD_FL_SOURCE; pads[CSI2_PAD_SINK].flags = MEDIA_PAD_FL_SINK | MEDIA_PAD_FL_MUST_CONNECT; me->ops = &csi2_media_ops; ret = media_entity_pads_init(me, CSI2_PADS_NUM, pads); if (ret < 0) return ret; csi2_init_formats(sd, NULL); /* Video device node */ csi2->video_out.type = V4L2_BUF_TYPE_VIDEO_CAPTURE; csi2->video_out.ops = &csi2_ispvideo_ops; csi2->video_out.bpl_alignment = 32; csi2->video_out.bpl_zero_padding = 1; csi2->video_out.bpl_max = 0x1ffe0; csi2->video_out.isp = csi2->isp; csi2->video_out.capture_mem = PAGE_ALIGN(4096 * 4096) * 3; ret = omap3isp_video_init(&csi2->video_out, "CSI2a"); if (ret < 0) goto error_video; return 0; error_video: media_entity_cleanup(&csi2->subdev.entity); return ret; } /* * omap3isp_csi2_init - Routine for module driver init */ int omap3isp_csi2_init(struct isp_device *isp) { struct isp_csi2_device *csi2a = &isp->isp_csi2a; struct isp_csi2_device *csi2c = &isp->isp_csi2c; int ret; csi2a->isp = isp; csi2a->available = 1; csi2a->regs1 = OMAP3_ISP_IOMEM_CSI2A_REGS1; csi2a->regs2 = OMAP3_ISP_IOMEM_CSI2A_REGS2; csi2a->phy = &isp->isp_csiphy2; csi2a->state = ISP_PIPELINE_STREAM_STOPPED; init_waitqueue_head(&csi2a->wait); ret = csi2_init_entities(csi2a); if (ret < 0) return ret; if (isp->revision == ISP_REVISION_15_0) { csi2c->isp = isp; csi2c->available = 1; csi2c->regs1 = OMAP3_ISP_IOMEM_CSI2C_REGS1; csi2c->regs2 = OMAP3_ISP_IOMEM_CSI2C_REGS2; csi2c->phy = &isp->isp_csiphy1; csi2c->state = ISP_PIPELINE_STREAM_STOPPED; init_waitqueue_head(&csi2c->wait); } return 0; } /* * omap3isp_csi2_cleanup - Routine for module driver cleanup */ void omap3isp_csi2_cleanup(struct isp_device *isp) { struct isp_csi2_device *csi2a = &isp->isp_csi2a; omap3isp_video_cleanup(&csi2a->video_out); media_entity_cleanup(&csi2a->subdev.entity); }
linux-master
drivers/media/platform/ti/omap3isp/ispcsi2.c
// SPDX-License-Identifier: GPL-2.0-only /* * isppreview.c * * TI OMAP3 ISP driver - Preview module * * Copyright (C) 2010 Nokia Corporation * Copyright (C) 2009 Texas Instruments, Inc. * * Contacts: Laurent Pinchart <[email protected]> * Sakari Ailus <[email protected]> */ #include <linux/device.h> #include <linux/mm.h> #include <linux/module.h> #include <linux/mutex.h> #include <linux/uaccess.h> #include "isp.h" #include "ispreg.h" #include "isppreview.h" /* Default values in Office Fluorescent Light for RGBtoRGB Blending */ static const struct omap3isp_prev_rgbtorgb flr_rgb2rgb = { { /* RGB-RGB Matrix */ {0x01E2, 0x0F30, 0x0FEE}, {0x0F9B, 0x01AC, 0x0FB9}, {0x0FE0, 0x0EC0, 0x0260} }, /* RGB Offset */ {0x0000, 0x0000, 0x0000} }; /* Default values in Office Fluorescent Light for RGB to YUV Conversion*/ static const struct omap3isp_prev_csc flr_prev_csc = { { /* CSC Coef Matrix */ {66, 129, 25}, {-38, -75, 112}, {112, -94 , -18} }, /* CSC Offset */ {0x0, 0x0, 0x0} }; /* Default values in Office Fluorescent Light for CFA Gradient*/ #define FLR_CFA_GRADTHRS_HORZ 0x28 #define FLR_CFA_GRADTHRS_VERT 0x28 /* Default values in Office Fluorescent Light for Chroma Suppression*/ #define FLR_CSUP_GAIN 0x0D #define FLR_CSUP_THRES 0xEB /* Default values in Office Fluorescent Light for Noise Filter*/ #define FLR_NF_STRGTH 0x03 /* Default values for White Balance */ #define FLR_WBAL_DGAIN 0x100 #define FLR_WBAL_COEF 0x20 /* Default values in Office Fluorescent Light for Black Adjustment*/ #define FLR_BLKADJ_BLUE 0x0 #define FLR_BLKADJ_GREEN 0x0 #define FLR_BLKADJ_RED 0x0 #define DEF_DETECT_CORRECT_VAL 0xe /* * Margins and image size limits. * * The preview engine crops several rows and columns internally depending on * which filters are enabled. To avoid format changes when the filters are * enabled or disabled (which would prevent them from being turned on or off * during streaming), the driver assumes all filters that can be configured * during streaming are enabled when computing sink crop and source format * limits. * * If a filter is disabled, additional cropping is automatically added at the * preview engine input by the driver to avoid overflow at line and frame end. * This is completely transparent for applications. * * Median filter 4 pixels * Noise filter, * Faulty pixels correction 4 pixels, 4 lines * Color suppression 2 pixels * or luma enhancement * ------------------------------------------------------------- * Maximum total 10 pixels, 4 lines * * The color suppression and luma enhancement filters are applied after bayer to * YUV conversion. They thus can crop one pixel on the left and one pixel on the * right side of the image without changing the color pattern. When both those * filters are disabled, the driver must crop the two pixels on the same side of * the image to avoid changing the bayer pattern. The left margin is thus set to * 6 pixels and the right margin to 4 pixels. */ #define PREV_MARGIN_LEFT 6 #define PREV_MARGIN_RIGHT 4 #define PREV_MARGIN_TOP 2 #define PREV_MARGIN_BOTTOM 2 #define PREV_MIN_IN_WIDTH 64 #define PREV_MIN_IN_HEIGHT 8 #define PREV_MAX_IN_HEIGHT 16384 #define PREV_MIN_OUT_WIDTH 0 #define PREV_MIN_OUT_HEIGHT 0 #define PREV_MAX_OUT_WIDTH_REV_1 1280 #define PREV_MAX_OUT_WIDTH_REV_2 3300 #define PREV_MAX_OUT_WIDTH_REV_15 4096 /* * Coefficient Tables for the submodules in Preview. * Array is initialised with the values from.the tables text file. */ /* * CFA Filter Coefficient Table * */ static u32 cfa_coef_table[4][OMAP3ISP_PREV_CFA_BLK_SIZE] = { #include "cfa_coef_table.h" }; /* * Default Gamma Correction Table - All components */ static u32 gamma_table[] = { #include "gamma_table.h" }; /* * Noise Filter Threshold table */ static u32 noise_filter_table[] = { #include "noise_filter_table.h" }; /* * Luminance Enhancement Table */ static u32 luma_enhance_table[] = { #include "luma_enhance_table.h" }; /* * preview_config_luma_enhancement - Configure the Luminance Enhancement table */ static void preview_config_luma_enhancement(struct isp_prev_device *prev, const struct prev_params *params) { struct isp_device *isp = to_isp_device(prev); const struct omap3isp_prev_luma *yt = &params->luma; unsigned int i; isp_reg_writel(isp, ISPPRV_YENH_TABLE_ADDR, OMAP3_ISP_IOMEM_PREV, ISPPRV_SET_TBL_ADDR); for (i = 0; i < OMAP3ISP_PREV_YENH_TBL_SIZE; i++) { isp_reg_writel(isp, yt->table[i], OMAP3_ISP_IOMEM_PREV, ISPPRV_SET_TBL_DATA); } } /* * preview_enable_luma_enhancement - Enable/disable Luminance Enhancement */ static void preview_enable_luma_enhancement(struct isp_prev_device *prev, bool enable) { struct isp_device *isp = to_isp_device(prev); if (enable) isp_reg_set(isp, OMAP3_ISP_IOMEM_PREV, ISPPRV_PCR, ISPPRV_PCR_YNENHEN); else isp_reg_clr(isp, OMAP3_ISP_IOMEM_PREV, ISPPRV_PCR, ISPPRV_PCR_YNENHEN); } /* * preview_enable_invalaw - Enable/disable Inverse A-Law decompression */ static void preview_enable_invalaw(struct isp_prev_device *prev, bool enable) { struct isp_device *isp = to_isp_device(prev); if (enable) isp_reg_set(isp, OMAP3_ISP_IOMEM_PREV, ISPPRV_PCR, ISPPRV_PCR_INVALAW); else isp_reg_clr(isp, OMAP3_ISP_IOMEM_PREV, ISPPRV_PCR, ISPPRV_PCR_INVALAW); } /* * preview_config_hmed - Configure the Horizontal Median Filter */ static void preview_config_hmed(struct isp_prev_device *prev, const struct prev_params *params) { struct isp_device *isp = to_isp_device(prev); const struct omap3isp_prev_hmed *hmed = &params->hmed; isp_reg_writel(isp, (hmed->odddist == 1 ? 0 : ISPPRV_HMED_ODDDIST) | (hmed->evendist == 1 ? 0 : ISPPRV_HMED_EVENDIST) | (hmed->thres << ISPPRV_HMED_THRESHOLD_SHIFT), OMAP3_ISP_IOMEM_PREV, ISPPRV_HMED); } /* * preview_enable_hmed - Enable/disable the Horizontal Median Filter */ static void preview_enable_hmed(struct isp_prev_device *prev, bool enable) { struct isp_device *isp = to_isp_device(prev); if (enable) isp_reg_set(isp, OMAP3_ISP_IOMEM_PREV, ISPPRV_PCR, ISPPRV_PCR_HMEDEN); else isp_reg_clr(isp, OMAP3_ISP_IOMEM_PREV, ISPPRV_PCR, ISPPRV_PCR_HMEDEN); } /* * preview_config_cfa - Configure CFA Interpolation for Bayer formats * * The CFA table is organised in four blocks, one per Bayer component. The * hardware expects blocks to follow the Bayer order of the input data, while * the driver stores the table in GRBG order in memory. The blocks need to be * reordered to support non-GRBG Bayer patterns. */ static void preview_config_cfa(struct isp_prev_device *prev, const struct prev_params *params) { static const unsigned int cfa_coef_order[4][4] = { { 0, 1, 2, 3 }, /* GRBG */ { 1, 0, 3, 2 }, /* RGGB */ { 2, 3, 0, 1 }, /* BGGR */ { 3, 2, 1, 0 }, /* GBRG */ }; const unsigned int *order = cfa_coef_order[prev->params.cfa_order]; const struct omap3isp_prev_cfa *cfa = &params->cfa; struct isp_device *isp = to_isp_device(prev); unsigned int i; unsigned int j; isp_reg_writel(isp, (cfa->gradthrs_vert << ISPPRV_CFA_GRADTH_VER_SHIFT) | (cfa->gradthrs_horz << ISPPRV_CFA_GRADTH_HOR_SHIFT), OMAP3_ISP_IOMEM_PREV, ISPPRV_CFA); isp_reg_writel(isp, ISPPRV_CFA_TABLE_ADDR, OMAP3_ISP_IOMEM_PREV, ISPPRV_SET_TBL_ADDR); for (i = 0; i < 4; ++i) { const __u32 *block = cfa->table[order[i]]; for (j = 0; j < OMAP3ISP_PREV_CFA_BLK_SIZE; ++j) isp_reg_writel(isp, block[j], OMAP3_ISP_IOMEM_PREV, ISPPRV_SET_TBL_DATA); } } /* * preview_config_chroma_suppression - Configure Chroma Suppression */ static void preview_config_chroma_suppression(struct isp_prev_device *prev, const struct prev_params *params) { struct isp_device *isp = to_isp_device(prev); const struct omap3isp_prev_csup *cs = &params->csup; isp_reg_writel(isp, cs->gain | (cs->thres << ISPPRV_CSUP_THRES_SHIFT) | (cs->hypf_en << ISPPRV_CSUP_HPYF_SHIFT), OMAP3_ISP_IOMEM_PREV, ISPPRV_CSUP); } /* * preview_enable_chroma_suppression - Enable/disable Chrominance Suppression */ static void preview_enable_chroma_suppression(struct isp_prev_device *prev, bool enable) { struct isp_device *isp = to_isp_device(prev); if (enable) isp_reg_set(isp, OMAP3_ISP_IOMEM_PREV, ISPPRV_PCR, ISPPRV_PCR_SUPEN); else isp_reg_clr(isp, OMAP3_ISP_IOMEM_PREV, ISPPRV_PCR, ISPPRV_PCR_SUPEN); } /* * preview_config_whitebalance - Configure White Balance parameters * * Coefficient matrix always with default values. */ static void preview_config_whitebalance(struct isp_prev_device *prev, const struct prev_params *params) { struct isp_device *isp = to_isp_device(prev); const struct omap3isp_prev_wbal *wbal = &params->wbal; u32 val; isp_reg_writel(isp, wbal->dgain, OMAP3_ISP_IOMEM_PREV, ISPPRV_WB_DGAIN); val = wbal->coef0 << ISPPRV_WBGAIN_COEF0_SHIFT; val |= wbal->coef1 << ISPPRV_WBGAIN_COEF1_SHIFT; val |= wbal->coef2 << ISPPRV_WBGAIN_COEF2_SHIFT; val |= wbal->coef3 << ISPPRV_WBGAIN_COEF3_SHIFT; isp_reg_writel(isp, val, OMAP3_ISP_IOMEM_PREV, ISPPRV_WBGAIN); isp_reg_writel(isp, ISPPRV_WBSEL_COEF0 << ISPPRV_WBSEL_N0_0_SHIFT | ISPPRV_WBSEL_COEF1 << ISPPRV_WBSEL_N0_1_SHIFT | ISPPRV_WBSEL_COEF0 << ISPPRV_WBSEL_N0_2_SHIFT | ISPPRV_WBSEL_COEF1 << ISPPRV_WBSEL_N0_3_SHIFT | ISPPRV_WBSEL_COEF2 << ISPPRV_WBSEL_N1_0_SHIFT | ISPPRV_WBSEL_COEF3 << ISPPRV_WBSEL_N1_1_SHIFT | ISPPRV_WBSEL_COEF2 << ISPPRV_WBSEL_N1_2_SHIFT | ISPPRV_WBSEL_COEF3 << ISPPRV_WBSEL_N1_3_SHIFT | ISPPRV_WBSEL_COEF0 << ISPPRV_WBSEL_N2_0_SHIFT | ISPPRV_WBSEL_COEF1 << ISPPRV_WBSEL_N2_1_SHIFT | ISPPRV_WBSEL_COEF0 << ISPPRV_WBSEL_N2_2_SHIFT | ISPPRV_WBSEL_COEF1 << ISPPRV_WBSEL_N2_3_SHIFT | ISPPRV_WBSEL_COEF2 << ISPPRV_WBSEL_N3_0_SHIFT | ISPPRV_WBSEL_COEF3 << ISPPRV_WBSEL_N3_1_SHIFT | ISPPRV_WBSEL_COEF2 << ISPPRV_WBSEL_N3_2_SHIFT | ISPPRV_WBSEL_COEF3 << ISPPRV_WBSEL_N3_3_SHIFT, OMAP3_ISP_IOMEM_PREV, ISPPRV_WBSEL); } /* * preview_config_blkadj - Configure Black Adjustment */ static void preview_config_blkadj(struct isp_prev_device *prev, const struct prev_params *params) { struct isp_device *isp = to_isp_device(prev); const struct omap3isp_prev_blkadj *blkadj = &params->blkadj; isp_reg_writel(isp, (blkadj->blue << ISPPRV_BLKADJOFF_B_SHIFT) | (blkadj->green << ISPPRV_BLKADJOFF_G_SHIFT) | (blkadj->red << ISPPRV_BLKADJOFF_R_SHIFT), OMAP3_ISP_IOMEM_PREV, ISPPRV_BLKADJOFF); } /* * preview_config_rgb_blending - Configure RGB-RGB Blending */ static void preview_config_rgb_blending(struct isp_prev_device *prev, const struct prev_params *params) { struct isp_device *isp = to_isp_device(prev); const struct omap3isp_prev_rgbtorgb *rgbrgb = &params->rgb2rgb; u32 val; val = (rgbrgb->matrix[0][0] & 0xfff) << ISPPRV_RGB_MAT1_MTX_RR_SHIFT; val |= (rgbrgb->matrix[0][1] & 0xfff) << ISPPRV_RGB_MAT1_MTX_GR_SHIFT; isp_reg_writel(isp, val, OMAP3_ISP_IOMEM_PREV, ISPPRV_RGB_MAT1); val = (rgbrgb->matrix[0][2] & 0xfff) << ISPPRV_RGB_MAT2_MTX_BR_SHIFT; val |= (rgbrgb->matrix[1][0] & 0xfff) << ISPPRV_RGB_MAT2_MTX_RG_SHIFT; isp_reg_writel(isp, val, OMAP3_ISP_IOMEM_PREV, ISPPRV_RGB_MAT2); val = (rgbrgb->matrix[1][1] & 0xfff) << ISPPRV_RGB_MAT3_MTX_GG_SHIFT; val |= (rgbrgb->matrix[1][2] & 0xfff) << ISPPRV_RGB_MAT3_MTX_BG_SHIFT; isp_reg_writel(isp, val, OMAP3_ISP_IOMEM_PREV, ISPPRV_RGB_MAT3); val = (rgbrgb->matrix[2][0] & 0xfff) << ISPPRV_RGB_MAT4_MTX_RB_SHIFT; val |= (rgbrgb->matrix[2][1] & 0xfff) << ISPPRV_RGB_MAT4_MTX_GB_SHIFT; isp_reg_writel(isp, val, OMAP3_ISP_IOMEM_PREV, ISPPRV_RGB_MAT4); val = (rgbrgb->matrix[2][2] & 0xfff) << ISPPRV_RGB_MAT5_MTX_BB_SHIFT; isp_reg_writel(isp, val, OMAP3_ISP_IOMEM_PREV, ISPPRV_RGB_MAT5); val = (rgbrgb->offset[0] & 0x3ff) << ISPPRV_RGB_OFF1_MTX_OFFR_SHIFT; val |= (rgbrgb->offset[1] & 0x3ff) << ISPPRV_RGB_OFF1_MTX_OFFG_SHIFT; isp_reg_writel(isp, val, OMAP3_ISP_IOMEM_PREV, ISPPRV_RGB_OFF1); val = (rgbrgb->offset[2] & 0x3ff) << ISPPRV_RGB_OFF2_MTX_OFFB_SHIFT; isp_reg_writel(isp, val, OMAP3_ISP_IOMEM_PREV, ISPPRV_RGB_OFF2); } /* * preview_config_csc - Configure Color Space Conversion (RGB to YCbYCr) */ static void preview_config_csc(struct isp_prev_device *prev, const struct prev_params *params) { struct isp_device *isp = to_isp_device(prev); const struct omap3isp_prev_csc *csc = &params->csc; u32 val; val = (csc->matrix[0][0] & 0x3ff) << ISPPRV_CSC0_RY_SHIFT; val |= (csc->matrix[0][1] & 0x3ff) << ISPPRV_CSC0_GY_SHIFT; val |= (csc->matrix[0][2] & 0x3ff) << ISPPRV_CSC0_BY_SHIFT; isp_reg_writel(isp, val, OMAP3_ISP_IOMEM_PREV, ISPPRV_CSC0); val = (csc->matrix[1][0] & 0x3ff) << ISPPRV_CSC1_RCB_SHIFT; val |= (csc->matrix[1][1] & 0x3ff) << ISPPRV_CSC1_GCB_SHIFT; val |= (csc->matrix[1][2] & 0x3ff) << ISPPRV_CSC1_BCB_SHIFT; isp_reg_writel(isp, val, OMAP3_ISP_IOMEM_PREV, ISPPRV_CSC1); val = (csc->matrix[2][0] & 0x3ff) << ISPPRV_CSC2_RCR_SHIFT; val |= (csc->matrix[2][1] & 0x3ff) << ISPPRV_CSC2_GCR_SHIFT; val |= (csc->matrix[2][2] & 0x3ff) << ISPPRV_CSC2_BCR_SHIFT; isp_reg_writel(isp, val, OMAP3_ISP_IOMEM_PREV, ISPPRV_CSC2); val = (csc->offset[0] & 0xff) << ISPPRV_CSC_OFFSET_Y_SHIFT; val |= (csc->offset[1] & 0xff) << ISPPRV_CSC_OFFSET_CB_SHIFT; val |= (csc->offset[2] & 0xff) << ISPPRV_CSC_OFFSET_CR_SHIFT; isp_reg_writel(isp, val, OMAP3_ISP_IOMEM_PREV, ISPPRV_CSC_OFFSET); } /* * preview_config_yc_range - Configure the max and min Y and C values */ static void preview_config_yc_range(struct isp_prev_device *prev, const struct prev_params *params) { struct isp_device *isp = to_isp_device(prev); const struct omap3isp_prev_yclimit *yc = &params->yclimit; isp_reg_writel(isp, yc->maxC << ISPPRV_SETUP_YC_MAXC_SHIFT | yc->maxY << ISPPRV_SETUP_YC_MAXY_SHIFT | yc->minC << ISPPRV_SETUP_YC_MINC_SHIFT | yc->minY << ISPPRV_SETUP_YC_MINY_SHIFT, OMAP3_ISP_IOMEM_PREV, ISPPRV_SETUP_YC); } /* * preview_config_dcor - Configure Couplet Defect Correction */ static void preview_config_dcor(struct isp_prev_device *prev, const struct prev_params *params) { struct isp_device *isp = to_isp_device(prev); const struct omap3isp_prev_dcor *dcor = &params->dcor; isp_reg_writel(isp, dcor->detect_correct[0], OMAP3_ISP_IOMEM_PREV, ISPPRV_CDC_THR0); isp_reg_writel(isp, dcor->detect_correct[1], OMAP3_ISP_IOMEM_PREV, ISPPRV_CDC_THR1); isp_reg_writel(isp, dcor->detect_correct[2], OMAP3_ISP_IOMEM_PREV, ISPPRV_CDC_THR2); isp_reg_writel(isp, dcor->detect_correct[3], OMAP3_ISP_IOMEM_PREV, ISPPRV_CDC_THR3); isp_reg_clr_set(isp, OMAP3_ISP_IOMEM_PREV, ISPPRV_PCR, ISPPRV_PCR_DCCOUP, dcor->couplet_mode_en ? ISPPRV_PCR_DCCOUP : 0); } /* * preview_enable_dcor - Enable/disable Couplet Defect Correction */ static void preview_enable_dcor(struct isp_prev_device *prev, bool enable) { struct isp_device *isp = to_isp_device(prev); if (enable) isp_reg_set(isp, OMAP3_ISP_IOMEM_PREV, ISPPRV_PCR, ISPPRV_PCR_DCOREN); else isp_reg_clr(isp, OMAP3_ISP_IOMEM_PREV, ISPPRV_PCR, ISPPRV_PCR_DCOREN); } /* * preview_enable_drkframe_capture - Enable/disable Dark Frame Capture */ static void preview_enable_drkframe_capture(struct isp_prev_device *prev, bool enable) { struct isp_device *isp = to_isp_device(prev); if (enable) isp_reg_set(isp, OMAP3_ISP_IOMEM_PREV, ISPPRV_PCR, ISPPRV_PCR_DRKFCAP); else isp_reg_clr(isp, OMAP3_ISP_IOMEM_PREV, ISPPRV_PCR, ISPPRV_PCR_DRKFCAP); } /* * preview_enable_drkframe - Enable/disable Dark Frame Subtraction */ static void preview_enable_drkframe(struct isp_prev_device *prev, bool enable) { struct isp_device *isp = to_isp_device(prev); if (enable) isp_reg_set(isp, OMAP3_ISP_IOMEM_PREV, ISPPRV_PCR, ISPPRV_PCR_DRKFEN); else isp_reg_clr(isp, OMAP3_ISP_IOMEM_PREV, ISPPRV_PCR, ISPPRV_PCR_DRKFEN); } /* * preview_config_noisefilter - Configure the Noise Filter */ static void preview_config_noisefilter(struct isp_prev_device *prev, const struct prev_params *params) { struct isp_device *isp = to_isp_device(prev); const struct omap3isp_prev_nf *nf = &params->nf; unsigned int i; isp_reg_writel(isp, nf->spread, OMAP3_ISP_IOMEM_PREV, ISPPRV_NF); isp_reg_writel(isp, ISPPRV_NF_TABLE_ADDR, OMAP3_ISP_IOMEM_PREV, ISPPRV_SET_TBL_ADDR); for (i = 0; i < OMAP3ISP_PREV_NF_TBL_SIZE; i++) { isp_reg_writel(isp, nf->table[i], OMAP3_ISP_IOMEM_PREV, ISPPRV_SET_TBL_DATA); } } /* * preview_enable_noisefilter - Enable/disable the Noise Filter */ static void preview_enable_noisefilter(struct isp_prev_device *prev, bool enable) { struct isp_device *isp = to_isp_device(prev); if (enable) isp_reg_set(isp, OMAP3_ISP_IOMEM_PREV, ISPPRV_PCR, ISPPRV_PCR_NFEN); else isp_reg_clr(isp, OMAP3_ISP_IOMEM_PREV, ISPPRV_PCR, ISPPRV_PCR_NFEN); } /* * preview_config_gammacorrn - Configure the Gamma Correction tables */ static void preview_config_gammacorrn(struct isp_prev_device *prev, const struct prev_params *params) { struct isp_device *isp = to_isp_device(prev); const struct omap3isp_prev_gtables *gt = &params->gamma; unsigned int i; isp_reg_writel(isp, ISPPRV_REDGAMMA_TABLE_ADDR, OMAP3_ISP_IOMEM_PREV, ISPPRV_SET_TBL_ADDR); for (i = 0; i < OMAP3ISP_PREV_GAMMA_TBL_SIZE; i++) isp_reg_writel(isp, gt->red[i], OMAP3_ISP_IOMEM_PREV, ISPPRV_SET_TBL_DATA); isp_reg_writel(isp, ISPPRV_GREENGAMMA_TABLE_ADDR, OMAP3_ISP_IOMEM_PREV, ISPPRV_SET_TBL_ADDR); for (i = 0; i < OMAP3ISP_PREV_GAMMA_TBL_SIZE; i++) isp_reg_writel(isp, gt->green[i], OMAP3_ISP_IOMEM_PREV, ISPPRV_SET_TBL_DATA); isp_reg_writel(isp, ISPPRV_BLUEGAMMA_TABLE_ADDR, OMAP3_ISP_IOMEM_PREV, ISPPRV_SET_TBL_ADDR); for (i = 0; i < OMAP3ISP_PREV_GAMMA_TBL_SIZE; i++) isp_reg_writel(isp, gt->blue[i], OMAP3_ISP_IOMEM_PREV, ISPPRV_SET_TBL_DATA); } /* * preview_enable_gammacorrn - Enable/disable Gamma Correction * * When gamma correction is disabled, the module is bypassed and its output is * the 8 MSB of the 10-bit input . */ static void preview_enable_gammacorrn(struct isp_prev_device *prev, bool enable) { struct isp_device *isp = to_isp_device(prev); if (enable) isp_reg_clr(isp, OMAP3_ISP_IOMEM_PREV, ISPPRV_PCR, ISPPRV_PCR_GAMMA_BYPASS); else isp_reg_set(isp, OMAP3_ISP_IOMEM_PREV, ISPPRV_PCR, ISPPRV_PCR_GAMMA_BYPASS); } /* * preview_config_contrast - Configure the Contrast * * Value should be programmed before enabling the module. */ static void preview_config_contrast(struct isp_prev_device *prev, const struct prev_params *params) { struct isp_device *isp = to_isp_device(prev); isp_reg_clr_set(isp, OMAP3_ISP_IOMEM_PREV, ISPPRV_CNT_BRT, 0xff << ISPPRV_CNT_BRT_CNT_SHIFT, params->contrast << ISPPRV_CNT_BRT_CNT_SHIFT); } /* * preview_config_brightness - Configure the Brightness */ static void preview_config_brightness(struct isp_prev_device *prev, const struct prev_params *params) { struct isp_device *isp = to_isp_device(prev); isp_reg_clr_set(isp, OMAP3_ISP_IOMEM_PREV, ISPPRV_CNT_BRT, 0xff << ISPPRV_CNT_BRT_BRT_SHIFT, params->brightness << ISPPRV_CNT_BRT_BRT_SHIFT); } /* * preview_update_contrast - Updates the contrast. * @contrast: Pointer to hold the current programmed contrast value. * * Value should be programmed before enabling the module. */ static void preview_update_contrast(struct isp_prev_device *prev, u8 contrast) { struct prev_params *params; unsigned long flags; spin_lock_irqsave(&prev->params.lock, flags); params = (prev->params.active & OMAP3ISP_PREV_CONTRAST) ? &prev->params.params[0] : &prev->params.params[1]; if (params->contrast != (contrast * ISPPRV_CONTRAST_UNITS)) { params->contrast = contrast * ISPPRV_CONTRAST_UNITS; params->update |= OMAP3ISP_PREV_CONTRAST; } spin_unlock_irqrestore(&prev->params.lock, flags); } /* * preview_update_brightness - Updates the brightness in preview module. * @brightness: Pointer to hold the current programmed brightness value. * */ static void preview_update_brightness(struct isp_prev_device *prev, u8 brightness) { struct prev_params *params; unsigned long flags; spin_lock_irqsave(&prev->params.lock, flags); params = (prev->params.active & OMAP3ISP_PREV_BRIGHTNESS) ? &prev->params.params[0] : &prev->params.params[1]; if (params->brightness != (brightness * ISPPRV_BRIGHT_UNITS)) { params->brightness = brightness * ISPPRV_BRIGHT_UNITS; params->update |= OMAP3ISP_PREV_BRIGHTNESS; } spin_unlock_irqrestore(&prev->params.lock, flags); } static u32 preview_params_lock(struct isp_prev_device *prev, u32 update, bool shadow) { u32 active = prev->params.active; if (shadow) { /* Mark all shadow parameters we are going to touch as busy. */ prev->params.params[0].busy |= ~active & update; prev->params.params[1].busy |= active & update; } else { /* Mark all active parameters we are going to touch as busy. */ update = (prev->params.params[0].update & active) | (prev->params.params[1].update & ~active); prev->params.params[0].busy |= active & update; prev->params.params[1].busy |= ~active & update; } return update; } static void preview_params_unlock(struct isp_prev_device *prev, u32 update, bool shadow) { u32 active = prev->params.active; if (shadow) { /* Set the update flag for shadow parameters that have been * updated and clear the busy flag for all shadow parameters. */ prev->params.params[0].update |= (~active & update); prev->params.params[1].update |= (active & update); prev->params.params[0].busy &= active; prev->params.params[1].busy &= ~active; } else { /* Clear the update flag for active parameters that have been * applied and the busy flag for all active parameters. */ prev->params.params[0].update &= ~(active & update); prev->params.params[1].update &= ~(~active & update); prev->params.params[0].busy &= ~active; prev->params.params[1].busy &= active; } } static void preview_params_switch(struct isp_prev_device *prev) { u32 to_switch; /* Switch active parameters with updated shadow parameters when the * shadow parameter has been updated and neither the active not the * shadow parameter is busy. */ to_switch = (prev->params.params[0].update & ~prev->params.active) | (prev->params.params[1].update & prev->params.active); to_switch &= ~(prev->params.params[0].busy | prev->params.params[1].busy); if (to_switch == 0) return; prev->params.active ^= to_switch; /* Remove the update flag for the shadow copy of parameters we have * switched. */ prev->params.params[0].update &= ~(~prev->params.active & to_switch); prev->params.params[1].update &= ~(prev->params.active & to_switch); } /* preview parameters update structure */ struct preview_update { void (*config)(struct isp_prev_device *, const struct prev_params *); void (*enable)(struct isp_prev_device *, bool); unsigned int param_offset; unsigned int param_size; unsigned int config_offset; bool skip; }; /* Keep the array indexed by the OMAP3ISP_PREV_* bit number. */ static const struct preview_update update_attrs[] = { /* OMAP3ISP_PREV_LUMAENH */ { preview_config_luma_enhancement, preview_enable_luma_enhancement, offsetof(struct prev_params, luma), sizeof_field(struct prev_params, luma), offsetof(struct omap3isp_prev_update_config, luma), }, /* OMAP3ISP_PREV_INVALAW */ { NULL, preview_enable_invalaw, }, /* OMAP3ISP_PREV_HRZ_MED */ { preview_config_hmed, preview_enable_hmed, offsetof(struct prev_params, hmed), sizeof_field(struct prev_params, hmed), offsetof(struct omap3isp_prev_update_config, hmed), }, /* OMAP3ISP_PREV_CFA */ { preview_config_cfa, NULL, offsetof(struct prev_params, cfa), sizeof_field(struct prev_params, cfa), offsetof(struct omap3isp_prev_update_config, cfa), }, /* OMAP3ISP_PREV_CHROMA_SUPP */ { preview_config_chroma_suppression, preview_enable_chroma_suppression, offsetof(struct prev_params, csup), sizeof_field(struct prev_params, csup), offsetof(struct omap3isp_prev_update_config, csup), }, /* OMAP3ISP_PREV_WB */ { preview_config_whitebalance, NULL, offsetof(struct prev_params, wbal), sizeof_field(struct prev_params, wbal), offsetof(struct omap3isp_prev_update_config, wbal), }, /* OMAP3ISP_PREV_BLKADJ */ { preview_config_blkadj, NULL, offsetof(struct prev_params, blkadj), sizeof_field(struct prev_params, blkadj), offsetof(struct omap3isp_prev_update_config, blkadj), }, /* OMAP3ISP_PREV_RGB2RGB */ { preview_config_rgb_blending, NULL, offsetof(struct prev_params, rgb2rgb), sizeof_field(struct prev_params, rgb2rgb), offsetof(struct omap3isp_prev_update_config, rgb2rgb), }, /* OMAP3ISP_PREV_COLOR_CONV */ { preview_config_csc, NULL, offsetof(struct prev_params, csc), sizeof_field(struct prev_params, csc), offsetof(struct omap3isp_prev_update_config, csc), }, /* OMAP3ISP_PREV_YC_LIMIT */ { preview_config_yc_range, NULL, offsetof(struct prev_params, yclimit), sizeof_field(struct prev_params, yclimit), offsetof(struct omap3isp_prev_update_config, yclimit), }, /* OMAP3ISP_PREV_DEFECT_COR */ { preview_config_dcor, preview_enable_dcor, offsetof(struct prev_params, dcor), sizeof_field(struct prev_params, dcor), offsetof(struct omap3isp_prev_update_config, dcor), }, /* Previously OMAP3ISP_PREV_GAMMABYPASS, not used anymore */ { NULL, NULL, }, /* OMAP3ISP_PREV_DRK_FRM_CAPTURE */ { NULL, preview_enable_drkframe_capture, }, /* OMAP3ISP_PREV_DRK_FRM_SUBTRACT */ { NULL, preview_enable_drkframe, }, /* OMAP3ISP_PREV_LENS_SHADING */ { NULL, preview_enable_drkframe, }, /* OMAP3ISP_PREV_NF */ { preview_config_noisefilter, preview_enable_noisefilter, offsetof(struct prev_params, nf), sizeof_field(struct prev_params, nf), offsetof(struct omap3isp_prev_update_config, nf), }, /* OMAP3ISP_PREV_GAMMA */ { preview_config_gammacorrn, preview_enable_gammacorrn, offsetof(struct prev_params, gamma), sizeof_field(struct prev_params, gamma), offsetof(struct omap3isp_prev_update_config, gamma), }, /* OMAP3ISP_PREV_CONTRAST */ { preview_config_contrast, NULL, 0, 0, 0, true, }, /* OMAP3ISP_PREV_BRIGHTNESS */ { preview_config_brightness, NULL, 0, 0, 0, true, }, }; /* * preview_config - Copy and update local structure with userspace preview * configuration. * @prev: ISP preview engine * @cfg: Configuration * * Return zero if success or -EFAULT if the configuration can't be copied from * userspace. */ static int preview_config(struct isp_prev_device *prev, struct omap3isp_prev_update_config *cfg) { unsigned long flags; unsigned int i; int rval = 0; u32 update; u32 active; if (cfg->update == 0) return 0; /* Mark the shadow parameters we're going to update as busy. */ spin_lock_irqsave(&prev->params.lock, flags); preview_params_lock(prev, cfg->update, true); active = prev->params.active; spin_unlock_irqrestore(&prev->params.lock, flags); update = 0; for (i = 0; i < ARRAY_SIZE(update_attrs); i++) { const struct preview_update *attr = &update_attrs[i]; struct prev_params *params; unsigned int bit = 1 << i; if (attr->skip || !(cfg->update & bit)) continue; params = &prev->params.params[!!(active & bit)]; if (cfg->flag & bit) { void __user *from = *(void __user **) ((void *)cfg + attr->config_offset); void *to = (void *)params + attr->param_offset; size_t size = attr->param_size; if (to && from && size) { if (copy_from_user(to, from, size)) { rval = -EFAULT; break; } } params->features |= bit; } else { params->features &= ~bit; } update |= bit; } spin_lock_irqsave(&prev->params.lock, flags); preview_params_unlock(prev, update, true); preview_params_switch(prev); spin_unlock_irqrestore(&prev->params.lock, flags); return rval; } /* * preview_setup_hw - Setup preview registers and/or internal memory * @prev: pointer to preview private structure * @update: Bitmask of parameters to setup * @active: Bitmask of parameters active in set 0 * Note: can be called from interrupt context * Return none */ static void preview_setup_hw(struct isp_prev_device *prev, u32 update, u32 active) { unsigned int i; if (update == 0) return; for (i = 0; i < ARRAY_SIZE(update_attrs); i++) { const struct preview_update *attr = &update_attrs[i]; struct prev_params *params; unsigned int bit = 1 << i; if (!(update & bit)) continue; params = &prev->params.params[!(active & bit)]; if (params->features & bit) { if (attr->config) attr->config(prev, params); if (attr->enable) attr->enable(prev, true); } else { if (attr->enable) attr->enable(prev, false); } } } /* * preview_config_ycpos - Configure byte layout of YUV image. * @prev: pointer to previewer private structure * @pixelcode: pixel code */ static void preview_config_ycpos(struct isp_prev_device *prev, u32 pixelcode) { struct isp_device *isp = to_isp_device(prev); enum preview_ycpos_mode mode; switch (pixelcode) { case MEDIA_BUS_FMT_YUYV8_1X16: mode = YCPOS_CrYCbY; break; case MEDIA_BUS_FMT_UYVY8_1X16: mode = YCPOS_YCrYCb; break; default: return; } isp_reg_clr_set(isp, OMAP3_ISP_IOMEM_PREV, ISPPRV_PCR, ISPPRV_PCR_YCPOS_CrYCbY, mode << ISPPRV_PCR_YCPOS_SHIFT); } /* * preview_config_averager - Enable / disable / configure averager * @average: Average value to be configured. */ static void preview_config_averager(struct isp_prev_device *prev, u8 average) { struct isp_device *isp = to_isp_device(prev); isp_reg_writel(isp, ISPPRV_AVE_EVENDIST_2 << ISPPRV_AVE_EVENDIST_SHIFT | ISPPRV_AVE_ODDDIST_2 << ISPPRV_AVE_ODDDIST_SHIFT | average, OMAP3_ISP_IOMEM_PREV, ISPPRV_AVE); } /* * preview_config_input_format - Configure the input format * @prev: The preview engine * @info: Sink pad format information * * Enable and configure CFA interpolation for Bayer formats and disable it for * greyscale formats. * * The CFA table is organised in four blocks, one per Bayer component. The * hardware expects blocks to follow the Bayer order of the input data, while * the driver stores the table in GRBG order in memory. The blocks need to be * reordered to support non-GRBG Bayer patterns. */ static void preview_config_input_format(struct isp_prev_device *prev, const struct isp_format_info *info) { struct isp_device *isp = to_isp_device(prev); struct prev_params *params; if (info->width == 8) isp_reg_set(isp, OMAP3_ISP_IOMEM_PREV, ISPPRV_PCR, ISPPRV_PCR_WIDTH); else isp_reg_clr(isp, OMAP3_ISP_IOMEM_PREV, ISPPRV_PCR, ISPPRV_PCR_WIDTH); switch (info->flavor) { case MEDIA_BUS_FMT_SGRBG8_1X8: prev->params.cfa_order = 0; break; case MEDIA_BUS_FMT_SRGGB8_1X8: prev->params.cfa_order = 1; break; case MEDIA_BUS_FMT_SBGGR8_1X8: prev->params.cfa_order = 2; break; case MEDIA_BUS_FMT_SGBRG8_1X8: prev->params.cfa_order = 3; break; default: /* Disable CFA for non-Bayer formats. */ isp_reg_clr(isp, OMAP3_ISP_IOMEM_PREV, ISPPRV_PCR, ISPPRV_PCR_CFAEN); return; } isp_reg_set(isp, OMAP3_ISP_IOMEM_PREV, ISPPRV_PCR, ISPPRV_PCR_CFAEN); isp_reg_clr_set(isp, OMAP3_ISP_IOMEM_PREV, ISPPRV_PCR, ISPPRV_PCR_CFAFMT_MASK, ISPPRV_PCR_CFAFMT_BAYER); params = (prev->params.active & OMAP3ISP_PREV_CFA) ? &prev->params.params[0] : &prev->params.params[1]; preview_config_cfa(prev, params); } /* * preview_config_input_size - Configure the input frame size * * The preview engine crops several rows and columns internally depending on * which processing blocks are enabled. The driver assumes all those blocks are * enabled when reporting source pad formats to userspace. If this assumption is * not true, rows and columns must be manually cropped at the preview engine * input to avoid overflows at the end of lines and frames. * * See the explanation at the PREV_MARGIN_* definitions for more details. */ static void preview_config_input_size(struct isp_prev_device *prev, u32 active) { const struct v4l2_mbus_framefmt *format = &prev->formats[PREV_PAD_SINK]; struct isp_device *isp = to_isp_device(prev); unsigned int sph = prev->crop.left; unsigned int eph = prev->crop.left + prev->crop.width - 1; unsigned int slv = prev->crop.top; unsigned int elv = prev->crop.top + prev->crop.height - 1; u32 features; if (format->code != MEDIA_BUS_FMT_Y8_1X8 && format->code != MEDIA_BUS_FMT_Y10_1X10) { sph -= 2; eph += 2; slv -= 2; elv += 2; } features = (prev->params.params[0].features & active) | (prev->params.params[1].features & ~active); if (features & (OMAP3ISP_PREV_DEFECT_COR | OMAP3ISP_PREV_NF)) { sph -= 2; eph += 2; slv -= 2; elv += 2; } if (features & OMAP3ISP_PREV_HRZ_MED) { sph -= 2; eph += 2; } if (features & (OMAP3ISP_PREV_CHROMA_SUPP | OMAP3ISP_PREV_LUMAENH)) sph -= 2; isp_reg_writel(isp, (sph << ISPPRV_HORZ_INFO_SPH_SHIFT) | eph, OMAP3_ISP_IOMEM_PREV, ISPPRV_HORZ_INFO); isp_reg_writel(isp, (slv << ISPPRV_VERT_INFO_SLV_SHIFT) | elv, OMAP3_ISP_IOMEM_PREV, ISPPRV_VERT_INFO); } /* * preview_config_inlineoffset - Configures the Read address line offset. * @prev: Preview module * @offset: Line offset * * According to the TRM, the line offset must be aligned on a 32 bytes boundary. * However, a hardware bug requires the memory start address to be aligned on a * 64 bytes boundary, so the offset probably should be aligned on 64 bytes as * well. */ static void preview_config_inlineoffset(struct isp_prev_device *prev, u32 offset) { struct isp_device *isp = to_isp_device(prev); isp_reg_writel(isp, offset & 0xffff, OMAP3_ISP_IOMEM_PREV, ISPPRV_RADR_OFFSET); } /* * preview_set_inaddr - Sets memory address of input frame. * @addr: 32bit memory address aligned on 32byte boundary. * * Configures the memory address from which the input frame is to be read. */ static void preview_set_inaddr(struct isp_prev_device *prev, u32 addr) { struct isp_device *isp = to_isp_device(prev); isp_reg_writel(isp, addr, OMAP3_ISP_IOMEM_PREV, ISPPRV_RSDR_ADDR); } /* * preview_config_outlineoffset - Configures the Write address line offset. * @offset: Line Offset for the preview output. * * The offset must be a multiple of 32 bytes. */ static void preview_config_outlineoffset(struct isp_prev_device *prev, u32 offset) { struct isp_device *isp = to_isp_device(prev); isp_reg_writel(isp, offset & 0xffff, OMAP3_ISP_IOMEM_PREV, ISPPRV_WADD_OFFSET); } /* * preview_set_outaddr - Sets the memory address to store output frame * @addr: 32bit memory address aligned on 32byte boundary. * * Configures the memory address to which the output frame is written. */ static void preview_set_outaddr(struct isp_prev_device *prev, u32 addr) { struct isp_device *isp = to_isp_device(prev); isp_reg_writel(isp, addr, OMAP3_ISP_IOMEM_PREV, ISPPRV_WSDR_ADDR); } static void preview_adjust_bandwidth(struct isp_prev_device *prev) { struct isp_pipeline *pipe = to_isp_pipeline(&prev->subdev.entity); struct isp_device *isp = to_isp_device(prev); const struct v4l2_mbus_framefmt *ifmt = &prev->formats[PREV_PAD_SINK]; unsigned long l3_ick = pipe->l3_ick; struct v4l2_fract *timeperframe; unsigned int cycles_per_frame; unsigned int requests_per_frame; unsigned int cycles_per_request; unsigned int minimum; unsigned int maximum; unsigned int value; if (prev->input != PREVIEW_INPUT_MEMORY) { isp_reg_clr(isp, OMAP3_ISP_IOMEM_SBL, ISPSBL_SDR_REQ_EXP, ISPSBL_SDR_REQ_PRV_EXP_MASK); return; } /* Compute the minimum number of cycles per request, based on the * pipeline maximum data rate. This is an absolute lower bound if we * don't want SBL overflows, so round the value up. */ cycles_per_request = div_u64((u64)l3_ick / 2 * 256 + pipe->max_rate - 1, pipe->max_rate); minimum = DIV_ROUND_UP(cycles_per_request, 32); /* Compute the maximum number of cycles per request, based on the * requested frame rate. This is a soft upper bound to achieve a frame * rate equal or higher than the requested value, so round the value * down. */ timeperframe = &pipe->max_timeperframe; requests_per_frame = DIV_ROUND_UP(ifmt->width * 2, 256) * ifmt->height; cycles_per_frame = div_u64((u64)l3_ick * timeperframe->numerator, timeperframe->denominator); cycles_per_request = cycles_per_frame / requests_per_frame; maximum = cycles_per_request / 32; value = max(minimum, maximum); dev_dbg(isp->dev, "%s: cycles per request = %u\n", __func__, value); isp_reg_clr_set(isp, OMAP3_ISP_IOMEM_SBL, ISPSBL_SDR_REQ_EXP, ISPSBL_SDR_REQ_PRV_EXP_MASK, value << ISPSBL_SDR_REQ_PRV_EXP_SHIFT); } /* * omap3isp_preview_busy - Gets busy state of preview module. */ int omap3isp_preview_busy(struct isp_prev_device *prev) { struct isp_device *isp = to_isp_device(prev); return isp_reg_readl(isp, OMAP3_ISP_IOMEM_PREV, ISPPRV_PCR) & ISPPRV_PCR_BUSY; } /* * omap3isp_preview_restore_context - Restores the values of preview registers */ void omap3isp_preview_restore_context(struct isp_device *isp) { struct isp_prev_device *prev = &isp->isp_prev; const u32 update = OMAP3ISP_PREV_FEATURES_END - 1; prev->params.params[0].update = prev->params.active & update; prev->params.params[1].update = ~prev->params.active & update; preview_setup_hw(prev, update, prev->params.active); prev->params.params[0].update = 0; prev->params.params[1].update = 0; } /* * preview_print_status - Dump preview module registers to the kernel log */ #define PREV_PRINT_REGISTER(isp, name)\ dev_dbg(isp->dev, "###PRV " #name "=0x%08x\n", \ isp_reg_readl(isp, OMAP3_ISP_IOMEM_PREV, ISPPRV_##name)) static void preview_print_status(struct isp_prev_device *prev) { struct isp_device *isp = to_isp_device(prev); dev_dbg(isp->dev, "-------------Preview Register dump----------\n"); PREV_PRINT_REGISTER(isp, PCR); PREV_PRINT_REGISTER(isp, HORZ_INFO); PREV_PRINT_REGISTER(isp, VERT_INFO); PREV_PRINT_REGISTER(isp, RSDR_ADDR); PREV_PRINT_REGISTER(isp, RADR_OFFSET); PREV_PRINT_REGISTER(isp, DSDR_ADDR); PREV_PRINT_REGISTER(isp, DRKF_OFFSET); PREV_PRINT_REGISTER(isp, WSDR_ADDR); PREV_PRINT_REGISTER(isp, WADD_OFFSET); PREV_PRINT_REGISTER(isp, AVE); PREV_PRINT_REGISTER(isp, HMED); PREV_PRINT_REGISTER(isp, NF); PREV_PRINT_REGISTER(isp, WB_DGAIN); PREV_PRINT_REGISTER(isp, WBGAIN); PREV_PRINT_REGISTER(isp, WBSEL); PREV_PRINT_REGISTER(isp, CFA); PREV_PRINT_REGISTER(isp, BLKADJOFF); PREV_PRINT_REGISTER(isp, RGB_MAT1); PREV_PRINT_REGISTER(isp, RGB_MAT2); PREV_PRINT_REGISTER(isp, RGB_MAT3); PREV_PRINT_REGISTER(isp, RGB_MAT4); PREV_PRINT_REGISTER(isp, RGB_MAT5); PREV_PRINT_REGISTER(isp, RGB_OFF1); PREV_PRINT_REGISTER(isp, RGB_OFF2); PREV_PRINT_REGISTER(isp, CSC0); PREV_PRINT_REGISTER(isp, CSC1); PREV_PRINT_REGISTER(isp, CSC2); PREV_PRINT_REGISTER(isp, CSC_OFFSET); PREV_PRINT_REGISTER(isp, CNT_BRT); PREV_PRINT_REGISTER(isp, CSUP); PREV_PRINT_REGISTER(isp, SETUP_YC); PREV_PRINT_REGISTER(isp, SET_TBL_ADDR); PREV_PRINT_REGISTER(isp, CDC_THR0); PREV_PRINT_REGISTER(isp, CDC_THR1); PREV_PRINT_REGISTER(isp, CDC_THR2); PREV_PRINT_REGISTER(isp, CDC_THR3); dev_dbg(isp->dev, "--------------------------------------------\n"); } /* * preview_init_params - init image processing parameters. * @prev: pointer to previewer private structure */ static void preview_init_params(struct isp_prev_device *prev) { struct prev_params *params; unsigned int i; spin_lock_init(&prev->params.lock); prev->params.active = ~0; prev->params.params[0].busy = 0; prev->params.params[0].update = OMAP3ISP_PREV_FEATURES_END - 1; prev->params.params[1].busy = 0; prev->params.params[1].update = 0; params = &prev->params.params[0]; /* Init values */ params->contrast = ISPPRV_CONTRAST_DEF * ISPPRV_CONTRAST_UNITS; params->brightness = ISPPRV_BRIGHT_DEF * ISPPRV_BRIGHT_UNITS; params->cfa.format = OMAP3ISP_CFAFMT_BAYER; memcpy(params->cfa.table, cfa_coef_table, sizeof(params->cfa.table)); params->cfa.gradthrs_horz = FLR_CFA_GRADTHRS_HORZ; params->cfa.gradthrs_vert = FLR_CFA_GRADTHRS_VERT; params->csup.gain = FLR_CSUP_GAIN; params->csup.thres = FLR_CSUP_THRES; params->csup.hypf_en = 0; memcpy(params->luma.table, luma_enhance_table, sizeof(params->luma.table)); params->nf.spread = FLR_NF_STRGTH; memcpy(params->nf.table, noise_filter_table, sizeof(params->nf.table)); params->dcor.couplet_mode_en = 1; for (i = 0; i < OMAP3ISP_PREV_DETECT_CORRECT_CHANNELS; i++) params->dcor.detect_correct[i] = DEF_DETECT_CORRECT_VAL; memcpy(params->gamma.blue, gamma_table, sizeof(params->gamma.blue)); memcpy(params->gamma.green, gamma_table, sizeof(params->gamma.green)); memcpy(params->gamma.red, gamma_table, sizeof(params->gamma.red)); params->wbal.dgain = FLR_WBAL_DGAIN; params->wbal.coef0 = FLR_WBAL_COEF; params->wbal.coef1 = FLR_WBAL_COEF; params->wbal.coef2 = FLR_WBAL_COEF; params->wbal.coef3 = FLR_WBAL_COEF; params->blkadj.red = FLR_BLKADJ_RED; params->blkadj.green = FLR_BLKADJ_GREEN; params->blkadj.blue = FLR_BLKADJ_BLUE; params->rgb2rgb = flr_rgb2rgb; params->csc = flr_prev_csc; params->yclimit.minC = ISPPRV_YC_MIN; params->yclimit.maxC = ISPPRV_YC_MAX; params->yclimit.minY = ISPPRV_YC_MIN; params->yclimit.maxY = ISPPRV_YC_MAX; params->features = OMAP3ISP_PREV_CFA | OMAP3ISP_PREV_DEFECT_COR | OMAP3ISP_PREV_NF | OMAP3ISP_PREV_GAMMA | OMAP3ISP_PREV_BLKADJ | OMAP3ISP_PREV_YC_LIMIT | OMAP3ISP_PREV_RGB2RGB | OMAP3ISP_PREV_COLOR_CONV | OMAP3ISP_PREV_WB | OMAP3ISP_PREV_BRIGHTNESS | OMAP3ISP_PREV_CONTRAST; } /* * preview_max_out_width - Handle previewer hardware output limitations * @prev: pointer to previewer private structure * returns maximum width output for current isp revision */ static unsigned int preview_max_out_width(struct isp_prev_device *prev) { struct isp_device *isp = to_isp_device(prev); switch (isp->revision) { case ISP_REVISION_1_0: return PREV_MAX_OUT_WIDTH_REV_1; case ISP_REVISION_2_0: default: return PREV_MAX_OUT_WIDTH_REV_2; case ISP_REVISION_15_0: return PREV_MAX_OUT_WIDTH_REV_15; } } static void preview_configure(struct isp_prev_device *prev) { struct isp_device *isp = to_isp_device(prev); const struct isp_format_info *info; struct v4l2_mbus_framefmt *format; unsigned long flags; u32 update; u32 active; spin_lock_irqsave(&prev->params.lock, flags); /* Mark all active parameters we are going to touch as busy. */ update = preview_params_lock(prev, 0, false); active = prev->params.active; spin_unlock_irqrestore(&prev->params.lock, flags); /* PREV_PAD_SINK */ format = &prev->formats[PREV_PAD_SINK]; info = omap3isp_video_format_info(format->code); preview_adjust_bandwidth(prev); preview_config_input_format(prev, info); preview_config_input_size(prev, active); if (prev->input == PREVIEW_INPUT_CCDC) preview_config_inlineoffset(prev, 0); else preview_config_inlineoffset(prev, ALIGN(format->width, 0x20) * info->bpp); preview_setup_hw(prev, update, active); /* PREV_PAD_SOURCE */ format = &prev->formats[PREV_PAD_SOURCE]; if (prev->output & PREVIEW_OUTPUT_MEMORY) isp_reg_set(isp, OMAP3_ISP_IOMEM_PREV, ISPPRV_PCR, ISPPRV_PCR_SDRPORT); else isp_reg_clr(isp, OMAP3_ISP_IOMEM_PREV, ISPPRV_PCR, ISPPRV_PCR_SDRPORT); if (prev->output & PREVIEW_OUTPUT_RESIZER) isp_reg_set(isp, OMAP3_ISP_IOMEM_PREV, ISPPRV_PCR, ISPPRV_PCR_RSZPORT); else isp_reg_clr(isp, OMAP3_ISP_IOMEM_PREV, ISPPRV_PCR, ISPPRV_PCR_RSZPORT); if (prev->output & PREVIEW_OUTPUT_MEMORY) preview_config_outlineoffset(prev, ALIGN(format->width, 0x10) * 2); preview_config_averager(prev, 0); preview_config_ycpos(prev, format->code); spin_lock_irqsave(&prev->params.lock, flags); preview_params_unlock(prev, update, false); spin_unlock_irqrestore(&prev->params.lock, flags); } /* ----------------------------------------------------------------------------- * Interrupt handling */ static void preview_enable_oneshot(struct isp_prev_device *prev) { struct isp_device *isp = to_isp_device(prev); /* The PCR.SOURCE bit is automatically reset to 0 when the PCR.ENABLE * bit is set. As the preview engine is used in single-shot mode, we * need to set PCR.SOURCE before enabling the preview engine. */ if (prev->input == PREVIEW_INPUT_MEMORY) isp_reg_set(isp, OMAP3_ISP_IOMEM_PREV, ISPPRV_PCR, ISPPRV_PCR_SOURCE); isp_reg_set(isp, OMAP3_ISP_IOMEM_PREV, ISPPRV_PCR, ISPPRV_PCR_EN | ISPPRV_PCR_ONESHOT); } void omap3isp_preview_isr_frame_sync(struct isp_prev_device *prev) { /* * If ISP_VIDEO_DMAQUEUE_QUEUED is set, DMA queue had an underrun * condition, the module was paused and now we have a buffer queued * on the output again. Restart the pipeline if running in continuous * mode. */ if (prev->state == ISP_PIPELINE_STREAM_CONTINUOUS && prev->video_out.dmaqueue_flags & ISP_VIDEO_DMAQUEUE_QUEUED) { preview_enable_oneshot(prev); isp_video_dmaqueue_flags_clr(&prev->video_out); } } static void preview_isr_buffer(struct isp_prev_device *prev) { struct isp_pipeline *pipe = to_isp_pipeline(&prev->subdev.entity); struct isp_buffer *buffer; int restart = 0; if (prev->output & PREVIEW_OUTPUT_MEMORY) { buffer = omap3isp_video_buffer_next(&prev->video_out); if (buffer != NULL) { preview_set_outaddr(prev, buffer->dma); restart = 1; } pipe->state |= ISP_PIPELINE_IDLE_OUTPUT; } if (prev->input == PREVIEW_INPUT_MEMORY) { buffer = omap3isp_video_buffer_next(&prev->video_in); if (buffer != NULL) preview_set_inaddr(prev, buffer->dma); pipe->state |= ISP_PIPELINE_IDLE_INPUT; } switch (prev->state) { case ISP_PIPELINE_STREAM_SINGLESHOT: if (isp_pipeline_ready(pipe)) omap3isp_pipeline_set_stream(pipe, ISP_PIPELINE_STREAM_SINGLESHOT); break; case ISP_PIPELINE_STREAM_CONTINUOUS: /* If an underrun occurs, the video queue operation handler will * restart the preview engine. Otherwise restart it immediately. */ if (restart) preview_enable_oneshot(prev); break; case ISP_PIPELINE_STREAM_STOPPED: default: return; } } /* * omap3isp_preview_isr - ISP preview engine interrupt handler * * Manage the preview engine video buffers and configure shadowed registers. */ void omap3isp_preview_isr(struct isp_prev_device *prev) { unsigned long flags; u32 update; u32 active; if (omap3isp_module_sync_is_stopping(&prev->wait, &prev->stopping)) return; spin_lock_irqsave(&prev->params.lock, flags); preview_params_switch(prev); update = preview_params_lock(prev, 0, false); active = prev->params.active; spin_unlock_irqrestore(&prev->params.lock, flags); preview_setup_hw(prev, update, active); preview_config_input_size(prev, active); if (prev->input == PREVIEW_INPUT_MEMORY || prev->output & PREVIEW_OUTPUT_MEMORY) preview_isr_buffer(prev); else if (prev->state == ISP_PIPELINE_STREAM_CONTINUOUS) preview_enable_oneshot(prev); spin_lock_irqsave(&prev->params.lock, flags); preview_params_unlock(prev, update, false); spin_unlock_irqrestore(&prev->params.lock, flags); } /* ----------------------------------------------------------------------------- * ISP video operations */ static int preview_video_queue(struct isp_video *video, struct isp_buffer *buffer) { struct isp_prev_device *prev = &video->isp->isp_prev; if (video->type == V4L2_BUF_TYPE_VIDEO_OUTPUT) preview_set_inaddr(prev, buffer->dma); if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) preview_set_outaddr(prev, buffer->dma); return 0; } static const struct isp_video_operations preview_video_ops = { .queue = preview_video_queue, }; /* ----------------------------------------------------------------------------- * V4L2 subdev operations */ /* * preview_s_ctrl - Handle set control subdev method * @ctrl: pointer to v4l2 control structure */ static int preview_s_ctrl(struct v4l2_ctrl *ctrl) { struct isp_prev_device *prev = container_of(ctrl->handler, struct isp_prev_device, ctrls); switch (ctrl->id) { case V4L2_CID_BRIGHTNESS: preview_update_brightness(prev, ctrl->val); break; case V4L2_CID_CONTRAST: preview_update_contrast(prev, ctrl->val); break; } return 0; } static const struct v4l2_ctrl_ops preview_ctrl_ops = { .s_ctrl = preview_s_ctrl, }; /* * preview_ioctl - Handle preview module private ioctl's * @sd: pointer to v4l2 subdev structure * @cmd: configuration command * @arg: configuration argument * return -EINVAL or zero on success */ static long preview_ioctl(struct v4l2_subdev *sd, unsigned int cmd, void *arg) { struct isp_prev_device *prev = v4l2_get_subdevdata(sd); switch (cmd) { case VIDIOC_OMAP3ISP_PRV_CFG: return preview_config(prev, arg); default: return -ENOIOCTLCMD; } } /* * preview_set_stream - Enable/Disable streaming on preview subdev * @sd : pointer to v4l2 subdev structure * @enable: 1 == Enable, 0 == Disable * return -EINVAL or zero on success */ static int preview_set_stream(struct v4l2_subdev *sd, int enable) { struct isp_prev_device *prev = v4l2_get_subdevdata(sd); struct isp_video *video_out = &prev->video_out; struct isp_device *isp = to_isp_device(prev); struct device *dev = to_device(prev); if (prev->state == ISP_PIPELINE_STREAM_STOPPED) { if (enable == ISP_PIPELINE_STREAM_STOPPED) return 0; omap3isp_subclk_enable(isp, OMAP3_ISP_SUBCLK_PREVIEW); preview_configure(prev); atomic_set(&prev->stopping, 0); preview_print_status(prev); } switch (enable) { case ISP_PIPELINE_STREAM_CONTINUOUS: if (prev->output & PREVIEW_OUTPUT_MEMORY) omap3isp_sbl_enable(isp, OMAP3_ISP_SBL_PREVIEW_WRITE); if (video_out->dmaqueue_flags & ISP_VIDEO_DMAQUEUE_QUEUED || !(prev->output & PREVIEW_OUTPUT_MEMORY)) preview_enable_oneshot(prev); isp_video_dmaqueue_flags_clr(video_out); break; case ISP_PIPELINE_STREAM_SINGLESHOT: if (prev->input == PREVIEW_INPUT_MEMORY) omap3isp_sbl_enable(isp, OMAP3_ISP_SBL_PREVIEW_READ); if (prev->output & PREVIEW_OUTPUT_MEMORY) omap3isp_sbl_enable(isp, OMAP3_ISP_SBL_PREVIEW_WRITE); preview_enable_oneshot(prev); break; case ISP_PIPELINE_STREAM_STOPPED: if (omap3isp_module_sync_idle(&sd->entity, &prev->wait, &prev->stopping)) dev_dbg(dev, "%s: stop timeout.\n", sd->name); omap3isp_sbl_disable(isp, OMAP3_ISP_SBL_PREVIEW_READ); omap3isp_sbl_disable(isp, OMAP3_ISP_SBL_PREVIEW_WRITE); omap3isp_subclk_disable(isp, OMAP3_ISP_SUBCLK_PREVIEW); isp_video_dmaqueue_flags_clr(video_out); break; } prev->state = enable; return 0; } static struct v4l2_mbus_framefmt * __preview_get_format(struct isp_prev_device *prev, struct v4l2_subdev_state *sd_state, unsigned int pad, enum v4l2_subdev_format_whence which) { if (which == V4L2_SUBDEV_FORMAT_TRY) return v4l2_subdev_get_try_format(&prev->subdev, sd_state, pad); else return &prev->formats[pad]; } static struct v4l2_rect * __preview_get_crop(struct isp_prev_device *prev, struct v4l2_subdev_state *sd_state, enum v4l2_subdev_format_whence which) { if (which == V4L2_SUBDEV_FORMAT_TRY) return v4l2_subdev_get_try_crop(&prev->subdev, sd_state, PREV_PAD_SINK); else return &prev->crop; } /* previewer format descriptions */ static const unsigned int preview_input_fmts[] = { MEDIA_BUS_FMT_Y8_1X8, MEDIA_BUS_FMT_SGRBG8_1X8, MEDIA_BUS_FMT_SRGGB8_1X8, MEDIA_BUS_FMT_SBGGR8_1X8, MEDIA_BUS_FMT_SGBRG8_1X8, MEDIA_BUS_FMT_Y10_1X10, MEDIA_BUS_FMT_SGRBG10_1X10, MEDIA_BUS_FMT_SRGGB10_1X10, MEDIA_BUS_FMT_SBGGR10_1X10, MEDIA_BUS_FMT_SGBRG10_1X10, }; static const unsigned int preview_output_fmts[] = { MEDIA_BUS_FMT_UYVY8_1X16, MEDIA_BUS_FMT_YUYV8_1X16, }; /* * preview_try_format - Validate a format * @prev: ISP preview engine * @cfg: V4L2 subdev pad configuration * @pad: pad number * @fmt: format to be validated * @which: try/active format selector * * Validate and adjust the given format for the given pad based on the preview * engine limits and the format and crop rectangles on other pads. */ static void preview_try_format(struct isp_prev_device *prev, struct v4l2_subdev_state *sd_state, unsigned int pad, struct v4l2_mbus_framefmt *fmt, enum v4l2_subdev_format_whence which) { u32 pixelcode; struct v4l2_rect *crop; unsigned int i; switch (pad) { case PREV_PAD_SINK: /* When reading data from the CCDC, the input size has already * been mangled by the CCDC output pad so it can be accepted * as-is. * * When reading data from memory, clamp the requested width and * height. The TRM doesn't specify a minimum input height, make * sure we got enough lines to enable the noise filter and color * filter array interpolation. */ if (prev->input == PREVIEW_INPUT_MEMORY) { fmt->width = clamp_t(u32, fmt->width, PREV_MIN_IN_WIDTH, preview_max_out_width(prev)); fmt->height = clamp_t(u32, fmt->height, PREV_MIN_IN_HEIGHT, PREV_MAX_IN_HEIGHT); } fmt->colorspace = V4L2_COLORSPACE_SRGB; for (i = 0; i < ARRAY_SIZE(preview_input_fmts); i++) { if (fmt->code == preview_input_fmts[i]) break; } /* If not found, use SGRBG10 as default */ if (i >= ARRAY_SIZE(preview_input_fmts)) fmt->code = MEDIA_BUS_FMT_SGRBG10_1X10; break; case PREV_PAD_SOURCE: pixelcode = fmt->code; *fmt = *__preview_get_format(prev, sd_state, PREV_PAD_SINK, which); switch (pixelcode) { case MEDIA_BUS_FMT_YUYV8_1X16: case MEDIA_BUS_FMT_UYVY8_1X16: fmt->code = pixelcode; break; default: fmt->code = MEDIA_BUS_FMT_YUYV8_1X16; break; } /* The preview module output size is configurable through the * averager (horizontal scaling by 1/1, 1/2, 1/4 or 1/8). This * is not supported yet, hardcode the output size to the crop * rectangle size. */ crop = __preview_get_crop(prev, sd_state, which); fmt->width = crop->width; fmt->height = crop->height; fmt->colorspace = V4L2_COLORSPACE_JPEG; break; } fmt->field = V4L2_FIELD_NONE; } /* * preview_try_crop - Validate a crop rectangle * @prev: ISP preview engine * @sink: format on the sink pad * @crop: crop rectangle to be validated * * The preview engine crops lines and columns for its internal operation, * depending on which filters are enabled. Enforce minimum crop margins to * handle that transparently for userspace. * * See the explanation at the PREV_MARGIN_* definitions for more details. */ static void preview_try_crop(struct isp_prev_device *prev, const struct v4l2_mbus_framefmt *sink, struct v4l2_rect *crop) { unsigned int left = PREV_MARGIN_LEFT; unsigned int right = sink->width - PREV_MARGIN_RIGHT; unsigned int top = PREV_MARGIN_TOP; unsigned int bottom = sink->height - PREV_MARGIN_BOTTOM; /* When processing data on-the-fly from the CCDC, at least 2 pixels must * be cropped from the left and right sides of the image. As we don't * know which filters will be enabled, increase the left and right * margins by two. */ if (prev->input == PREVIEW_INPUT_CCDC) { left += 2; right -= 2; } /* The CFA filter crops 4 lines and 4 columns in Bayer mode, and 2 lines * and no columns in other modes. Increase the margins based on the sink * format. */ if (sink->code != MEDIA_BUS_FMT_Y8_1X8 && sink->code != MEDIA_BUS_FMT_Y10_1X10) { left += 2; right -= 2; top += 2; bottom -= 2; } /* Restrict left/top to even values to keep the Bayer pattern. */ crop->left &= ~1; crop->top &= ~1; crop->left = clamp_t(u32, crop->left, left, right - PREV_MIN_OUT_WIDTH); crop->top = clamp_t(u32, crop->top, top, bottom - PREV_MIN_OUT_HEIGHT); crop->width = clamp_t(u32, crop->width, PREV_MIN_OUT_WIDTH, right - crop->left); crop->height = clamp_t(u32, crop->height, PREV_MIN_OUT_HEIGHT, bottom - crop->top); } /* * preview_enum_mbus_code - Handle pixel format enumeration * @sd : pointer to v4l2 subdev structure * @cfg: V4L2 subdev pad configuration * @code : pointer to v4l2_subdev_mbus_code_enum structure * return -EINVAL or zero on success */ static int preview_enum_mbus_code(struct v4l2_subdev *sd, struct v4l2_subdev_state *sd_state, struct v4l2_subdev_mbus_code_enum *code) { switch (code->pad) { case PREV_PAD_SINK: if (code->index >= ARRAY_SIZE(preview_input_fmts)) return -EINVAL; code->code = preview_input_fmts[code->index]; break; case PREV_PAD_SOURCE: if (code->index >= ARRAY_SIZE(preview_output_fmts)) return -EINVAL; code->code = preview_output_fmts[code->index]; break; default: return -EINVAL; } return 0; } static int preview_enum_frame_size(struct v4l2_subdev *sd, struct v4l2_subdev_state *sd_state, struct v4l2_subdev_frame_size_enum *fse) { struct isp_prev_device *prev = v4l2_get_subdevdata(sd); struct v4l2_mbus_framefmt format; if (fse->index != 0) return -EINVAL; format.code = fse->code; format.width = 1; format.height = 1; preview_try_format(prev, sd_state, fse->pad, &format, fse->which); fse->min_width = format.width; fse->min_height = format.height; if (format.code != fse->code) return -EINVAL; format.code = fse->code; format.width = -1; format.height = -1; preview_try_format(prev, sd_state, fse->pad, &format, fse->which); fse->max_width = format.width; fse->max_height = format.height; return 0; } /* * preview_get_selection - Retrieve a selection rectangle on a pad * @sd: ISP preview V4L2 subdevice * @cfg: V4L2 subdev pad configuration * @sel: Selection rectangle * * The only supported rectangles are the crop rectangles on the sink pad. * * Return 0 on success or a negative error code otherwise. */ static int preview_get_selection(struct v4l2_subdev *sd, struct v4l2_subdev_state *sd_state, struct v4l2_subdev_selection *sel) { struct isp_prev_device *prev = v4l2_get_subdevdata(sd); struct v4l2_mbus_framefmt *format; if (sel->pad != PREV_PAD_SINK) return -EINVAL; switch (sel->target) { case V4L2_SEL_TGT_CROP_BOUNDS: sel->r.left = 0; sel->r.top = 0; sel->r.width = INT_MAX; sel->r.height = INT_MAX; format = __preview_get_format(prev, sd_state, PREV_PAD_SINK, sel->which); preview_try_crop(prev, format, &sel->r); break; case V4L2_SEL_TGT_CROP: sel->r = *__preview_get_crop(prev, sd_state, sel->which); break; default: return -EINVAL; } return 0; } /* * preview_set_selection - Set a selection rectangle on a pad * @sd: ISP preview V4L2 subdevice * @cfg: V4L2 subdev pad configuration * @sel: Selection rectangle * * The only supported rectangle is the actual crop rectangle on the sink pad. * * Return 0 on success or a negative error code otherwise. */ static int preview_set_selection(struct v4l2_subdev *sd, struct v4l2_subdev_state *sd_state, struct v4l2_subdev_selection *sel) { struct isp_prev_device *prev = v4l2_get_subdevdata(sd); struct v4l2_mbus_framefmt *format; if (sel->target != V4L2_SEL_TGT_CROP || sel->pad != PREV_PAD_SINK) return -EINVAL; /* The crop rectangle can't be changed while streaming. */ if (prev->state != ISP_PIPELINE_STREAM_STOPPED) return -EBUSY; /* Modifying the crop rectangle always changes the format on the source * pad. If the KEEP_CONFIG flag is set, just return the current crop * rectangle. */ if (sel->flags & V4L2_SEL_FLAG_KEEP_CONFIG) { sel->r = *__preview_get_crop(prev, sd_state, sel->which); return 0; } format = __preview_get_format(prev, sd_state, PREV_PAD_SINK, sel->which); preview_try_crop(prev, format, &sel->r); *__preview_get_crop(prev, sd_state, sel->which) = sel->r; /* Update the source format. */ format = __preview_get_format(prev, sd_state, PREV_PAD_SOURCE, sel->which); preview_try_format(prev, sd_state, PREV_PAD_SOURCE, format, sel->which); return 0; } /* * preview_get_format - Handle get format by pads subdev method * @sd : pointer to v4l2 subdev structure * @cfg: V4L2 subdev pad configuration * @fmt: pointer to v4l2 subdev format structure * return -EINVAL or zero on success */ static int preview_get_format(struct v4l2_subdev *sd, struct v4l2_subdev_state *sd_state, struct v4l2_subdev_format *fmt) { struct isp_prev_device *prev = v4l2_get_subdevdata(sd); struct v4l2_mbus_framefmt *format; format = __preview_get_format(prev, sd_state, fmt->pad, fmt->which); if (format == NULL) return -EINVAL; fmt->format = *format; return 0; } /* * preview_set_format - Handle set format by pads subdev method * @sd : pointer to v4l2 subdev structure * @cfg: V4L2 subdev pad configuration * @fmt: pointer to v4l2 subdev format structure * return -EINVAL or zero on success */ static int preview_set_format(struct v4l2_subdev *sd, struct v4l2_subdev_state *sd_state, struct v4l2_subdev_format *fmt) { struct isp_prev_device *prev = v4l2_get_subdevdata(sd); struct v4l2_mbus_framefmt *format; struct v4l2_rect *crop; format = __preview_get_format(prev, sd_state, fmt->pad, fmt->which); if (format == NULL) return -EINVAL; preview_try_format(prev, sd_state, fmt->pad, &fmt->format, fmt->which); *format = fmt->format; /* Propagate the format from sink to source */ if (fmt->pad == PREV_PAD_SINK) { /* Reset the crop rectangle. */ crop = __preview_get_crop(prev, sd_state, fmt->which); crop->left = 0; crop->top = 0; crop->width = fmt->format.width; crop->height = fmt->format.height; preview_try_crop(prev, &fmt->format, crop); /* Update the source format. */ format = __preview_get_format(prev, sd_state, PREV_PAD_SOURCE, fmt->which); preview_try_format(prev, sd_state, PREV_PAD_SOURCE, format, fmt->which); } return 0; } /* * preview_init_formats - Initialize formats on all pads * @sd: ISP preview V4L2 subdevice * @fh: V4L2 subdev file handle * * Initialize all pad formats with default values. If fh is not NULL, try * formats are initialized on the file handle. Otherwise active formats are * initialized on the device. */ static int preview_init_formats(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh) { struct v4l2_subdev_format format; memset(&format, 0, sizeof(format)); format.pad = PREV_PAD_SINK; format.which = fh ? V4L2_SUBDEV_FORMAT_TRY : V4L2_SUBDEV_FORMAT_ACTIVE; format.format.code = MEDIA_BUS_FMT_SGRBG10_1X10; format.format.width = 4096; format.format.height = 4096; preview_set_format(sd, fh ? fh->state : NULL, &format); return 0; } /* subdev core operations */ static const struct v4l2_subdev_core_ops preview_v4l2_core_ops = { .ioctl = preview_ioctl, }; /* subdev video operations */ static const struct v4l2_subdev_video_ops preview_v4l2_video_ops = { .s_stream = preview_set_stream, }; /* subdev pad operations */ static const struct v4l2_subdev_pad_ops preview_v4l2_pad_ops = { .enum_mbus_code = preview_enum_mbus_code, .enum_frame_size = preview_enum_frame_size, .get_fmt = preview_get_format, .set_fmt = preview_set_format, .get_selection = preview_get_selection, .set_selection = preview_set_selection, }; /* subdev operations */ static const struct v4l2_subdev_ops preview_v4l2_ops = { .core = &preview_v4l2_core_ops, .video = &preview_v4l2_video_ops, .pad = &preview_v4l2_pad_ops, }; /* subdev internal operations */ static const struct v4l2_subdev_internal_ops preview_v4l2_internal_ops = { .open = preview_init_formats, }; /* ----------------------------------------------------------------------------- * Media entity operations */ /* * preview_link_setup - Setup previewer connections. * @entity : Pointer to media entity structure * @local : Pointer to local pad array * @remote : Pointer to remote pad array * @flags : Link flags * return -EINVAL or zero on success */ static int preview_link_setup(struct media_entity *entity, const struct media_pad *local, const struct media_pad *remote, u32 flags) { struct v4l2_subdev *sd = media_entity_to_v4l2_subdev(entity); struct isp_prev_device *prev = v4l2_get_subdevdata(sd); unsigned int index = local->index; /* FIXME: this is actually a hack! */ if (is_media_entity_v4l2_subdev(remote->entity)) index |= 2 << 16; switch (index) { case PREV_PAD_SINK: /* read from memory */ if (flags & MEDIA_LNK_FL_ENABLED) { if (prev->input == PREVIEW_INPUT_CCDC) return -EBUSY; prev->input = PREVIEW_INPUT_MEMORY; } else { if (prev->input == PREVIEW_INPUT_MEMORY) prev->input = PREVIEW_INPUT_NONE; } break; case PREV_PAD_SINK | 2 << 16: /* read from ccdc */ if (flags & MEDIA_LNK_FL_ENABLED) { if (prev->input == PREVIEW_INPUT_MEMORY) return -EBUSY; prev->input = PREVIEW_INPUT_CCDC; } else { if (prev->input == PREVIEW_INPUT_CCDC) prev->input = PREVIEW_INPUT_NONE; } break; /* * The ISP core doesn't support pipelines with multiple video outputs. * Revisit this when it will be implemented, and return -EBUSY for now. */ case PREV_PAD_SOURCE: /* write to memory */ if (flags & MEDIA_LNK_FL_ENABLED) { if (prev->output & ~PREVIEW_OUTPUT_MEMORY) return -EBUSY; prev->output |= PREVIEW_OUTPUT_MEMORY; } else { prev->output &= ~PREVIEW_OUTPUT_MEMORY; } break; case PREV_PAD_SOURCE | 2 << 16: /* write to resizer */ if (flags & MEDIA_LNK_FL_ENABLED) { if (prev->output & ~PREVIEW_OUTPUT_RESIZER) return -EBUSY; prev->output |= PREVIEW_OUTPUT_RESIZER; } else { prev->output &= ~PREVIEW_OUTPUT_RESIZER; } break; default: return -EINVAL; } return 0; } /* media operations */ static const struct media_entity_operations preview_media_ops = { .link_setup = preview_link_setup, .link_validate = v4l2_subdev_link_validate, }; void omap3isp_preview_unregister_entities(struct isp_prev_device *prev) { v4l2_device_unregister_subdev(&prev->subdev); omap3isp_video_unregister(&prev->video_in); omap3isp_video_unregister(&prev->video_out); } int omap3isp_preview_register_entities(struct isp_prev_device *prev, struct v4l2_device *vdev) { int ret; /* Register the subdev and video nodes. */ prev->subdev.dev = vdev->mdev->dev; ret = v4l2_device_register_subdev(vdev, &prev->subdev); if (ret < 0) goto error; ret = omap3isp_video_register(&prev->video_in, vdev); if (ret < 0) goto error; ret = omap3isp_video_register(&prev->video_out, vdev); if (ret < 0) goto error; return 0; error: omap3isp_preview_unregister_entities(prev); return ret; } /* ----------------------------------------------------------------------------- * ISP previewer initialisation and cleanup */ /* * preview_init_entities - Initialize subdev and media entity. * @prev : Pointer to preview structure * return -ENOMEM or zero on success */ static int preview_init_entities(struct isp_prev_device *prev) { struct v4l2_subdev *sd = &prev->subdev; struct media_pad *pads = prev->pads; struct media_entity *me = &sd->entity; int ret; prev->input = PREVIEW_INPUT_NONE; v4l2_subdev_init(sd, &preview_v4l2_ops); sd->internal_ops = &preview_v4l2_internal_ops; strscpy(sd->name, "OMAP3 ISP preview", sizeof(sd->name)); sd->grp_id = 1 << 16; /* group ID for isp subdevs */ v4l2_set_subdevdata(sd, prev); sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE; v4l2_ctrl_handler_init(&prev->ctrls, 2); v4l2_ctrl_new_std(&prev->ctrls, &preview_ctrl_ops, V4L2_CID_BRIGHTNESS, ISPPRV_BRIGHT_LOW, ISPPRV_BRIGHT_HIGH, ISPPRV_BRIGHT_STEP, ISPPRV_BRIGHT_DEF); v4l2_ctrl_new_std(&prev->ctrls, &preview_ctrl_ops, V4L2_CID_CONTRAST, ISPPRV_CONTRAST_LOW, ISPPRV_CONTRAST_HIGH, ISPPRV_CONTRAST_STEP, ISPPRV_CONTRAST_DEF); v4l2_ctrl_handler_setup(&prev->ctrls); sd->ctrl_handler = &prev->ctrls; pads[PREV_PAD_SINK].flags = MEDIA_PAD_FL_SINK | MEDIA_PAD_FL_MUST_CONNECT; pads[PREV_PAD_SOURCE].flags = MEDIA_PAD_FL_SOURCE; me->ops = &preview_media_ops; ret = media_entity_pads_init(me, PREV_PADS_NUM, pads); if (ret < 0) goto error_handler_free; preview_init_formats(sd, NULL); /* According to the OMAP34xx TRM, video buffers need to be aligned on a * 32 bytes boundary. However, an undocumented hardware bug requires a * 64 bytes boundary at the preview engine input. */ prev->video_in.type = V4L2_BUF_TYPE_VIDEO_OUTPUT; prev->video_in.ops = &preview_video_ops; prev->video_in.isp = to_isp_device(prev); prev->video_in.capture_mem = PAGE_ALIGN(4096 * 4096) * 2 * 3; prev->video_in.bpl_alignment = 64; prev->video_out.type = V4L2_BUF_TYPE_VIDEO_CAPTURE; prev->video_out.ops = &preview_video_ops; prev->video_out.isp = to_isp_device(prev); prev->video_out.capture_mem = PAGE_ALIGN(4096 * 4096) * 2 * 3; prev->video_out.bpl_alignment = 32; ret = omap3isp_video_init(&prev->video_in, "preview"); if (ret < 0) goto error_video_in; ret = omap3isp_video_init(&prev->video_out, "preview"); if (ret < 0) goto error_video_out; return 0; error_video_out: omap3isp_video_cleanup(&prev->video_in); error_video_in: media_entity_cleanup(&prev->subdev.entity); error_handler_free: v4l2_ctrl_handler_free(&prev->ctrls); return ret; } /* * omap3isp_preview_init - Previewer initialization. * @isp : Pointer to ISP device * return -ENOMEM or zero on success */ int omap3isp_preview_init(struct isp_device *isp) { struct isp_prev_device *prev = &isp->isp_prev; init_waitqueue_head(&prev->wait); preview_init_params(prev); return preview_init_entities(prev); } void omap3isp_preview_cleanup(struct isp_device *isp) { struct isp_prev_device *prev = &isp->isp_prev; v4l2_ctrl_handler_free(&prev->ctrls); omap3isp_video_cleanup(&prev->video_in); omap3isp_video_cleanup(&prev->video_out); media_entity_cleanup(&prev->subdev.entity); }
linux-master
drivers/media/platform/ti/omap3isp/isppreview.c
// SPDX-License-Identifier: GPL-2.0-only /* * ispccp2.c * * TI OMAP3 ISP - CCP2 module * * Copyright (C) 2010 Nokia Corporation * Copyright (C) 2010 Texas Instruments, Inc. * * Contacts: Laurent Pinchart <[email protected]> * Sakari Ailus <[email protected]> */ #include <linux/delay.h> #include <linux/device.h> #include <linux/mm.h> #include <linux/module.h> #include <linux/mutex.h> #include <linux/uaccess.h> #include <linux/regulator/consumer.h> #include "isp.h" #include "ispreg.h" #include "ispccp2.h" /* Number of LCX channels */ #define CCP2_LCx_CHANS_NUM 3 /* Max/Min size for CCP2 video port */ #define ISPCCP2_DAT_START_MIN 0 #define ISPCCP2_DAT_START_MAX 4095 #define ISPCCP2_DAT_SIZE_MIN 0 #define ISPCCP2_DAT_SIZE_MAX 4095 #define ISPCCP2_VPCLK_FRACDIV 65536 #define ISPCCP2_LCx_CTRL_FORMAT_RAW8_DPCM10_VP 0x12 #define ISPCCP2_LCx_CTRL_FORMAT_RAW10_VP 0x16 /* Max/Min size for CCP2 memory channel */ #define ISPCCP2_LCM_HSIZE_COUNT_MIN 16 #define ISPCCP2_LCM_HSIZE_COUNT_MAX 8191 #define ISPCCP2_LCM_HSIZE_SKIP_MIN 0 #define ISPCCP2_LCM_HSIZE_SKIP_MAX 8191 #define ISPCCP2_LCM_VSIZE_MIN 1 #define ISPCCP2_LCM_VSIZE_MAX 8191 #define ISPCCP2_LCM_HWORDS_MIN 1 #define ISPCCP2_LCM_HWORDS_MAX 4095 #define ISPCCP2_LCM_CTRL_BURST_SIZE_32X 5 #define ISPCCP2_LCM_CTRL_READ_THROTTLE_FULL 0 #define ISPCCP2_LCM_CTRL_SRC_DECOMPR_DPCM10 2 #define ISPCCP2_LCM_CTRL_SRC_FORMAT_RAW8 2 #define ISPCCP2_LCM_CTRL_SRC_FORMAT_RAW10 3 #define ISPCCP2_LCM_CTRL_DST_FORMAT_RAW10 3 #define ISPCCP2_LCM_CTRL_DST_PORT_VP 0 #define ISPCCP2_LCM_CTRL_DST_PORT_MEM 1 /* Set only the required bits */ #define BIT_SET(var, shift, mask, val) \ do { \ var = ((var) & ~((mask) << (shift))) \ | ((val) << (shift)); \ } while (0) /* * ccp2_print_status - Print current CCP2 module register values. */ #define CCP2_PRINT_REGISTER(isp, name)\ dev_dbg(isp->dev, "###CCP2 " #name "=0x%08x\n", \ isp_reg_readl(isp, OMAP3_ISP_IOMEM_CCP2, ISPCCP2_##name)) static void ccp2_print_status(struct isp_ccp2_device *ccp2) { struct isp_device *isp = to_isp_device(ccp2); dev_dbg(isp->dev, "-------------CCP2 Register dump-------------\n"); CCP2_PRINT_REGISTER(isp, SYSCONFIG); CCP2_PRINT_REGISTER(isp, SYSSTATUS); CCP2_PRINT_REGISTER(isp, LC01_IRQENABLE); CCP2_PRINT_REGISTER(isp, LC01_IRQSTATUS); CCP2_PRINT_REGISTER(isp, LC23_IRQENABLE); CCP2_PRINT_REGISTER(isp, LC23_IRQSTATUS); CCP2_PRINT_REGISTER(isp, LCM_IRQENABLE); CCP2_PRINT_REGISTER(isp, LCM_IRQSTATUS); CCP2_PRINT_REGISTER(isp, CTRL); CCP2_PRINT_REGISTER(isp, LCx_CTRL(0)); CCP2_PRINT_REGISTER(isp, LCx_CODE(0)); CCP2_PRINT_REGISTER(isp, LCx_STAT_START(0)); CCP2_PRINT_REGISTER(isp, LCx_STAT_SIZE(0)); CCP2_PRINT_REGISTER(isp, LCx_SOF_ADDR(0)); CCP2_PRINT_REGISTER(isp, LCx_EOF_ADDR(0)); CCP2_PRINT_REGISTER(isp, LCx_DAT_START(0)); CCP2_PRINT_REGISTER(isp, LCx_DAT_SIZE(0)); CCP2_PRINT_REGISTER(isp, LCx_DAT_PING_ADDR(0)); CCP2_PRINT_REGISTER(isp, LCx_DAT_PONG_ADDR(0)); CCP2_PRINT_REGISTER(isp, LCx_DAT_OFST(0)); CCP2_PRINT_REGISTER(isp, LCM_CTRL); CCP2_PRINT_REGISTER(isp, LCM_VSIZE); CCP2_PRINT_REGISTER(isp, LCM_HSIZE); CCP2_PRINT_REGISTER(isp, LCM_PREFETCH); CCP2_PRINT_REGISTER(isp, LCM_SRC_ADDR); CCP2_PRINT_REGISTER(isp, LCM_SRC_OFST); CCP2_PRINT_REGISTER(isp, LCM_DST_ADDR); CCP2_PRINT_REGISTER(isp, LCM_DST_OFST); dev_dbg(isp->dev, "--------------------------------------------\n"); } /* * ccp2_reset - Reset the CCP2 * @ccp2: pointer to ISP CCP2 device */ static void ccp2_reset(struct isp_ccp2_device *ccp2) { struct isp_device *isp = to_isp_device(ccp2); int i = 0; /* Reset the CSI1/CCP2B and wait for reset to complete */ isp_reg_set(isp, OMAP3_ISP_IOMEM_CCP2, ISPCCP2_SYSCONFIG, ISPCCP2_SYSCONFIG_SOFT_RESET); while (!(isp_reg_readl(isp, OMAP3_ISP_IOMEM_CCP2, ISPCCP2_SYSSTATUS) & ISPCCP2_SYSSTATUS_RESET_DONE)) { udelay(10); if (i++ > 10) { /* try read 10 times */ dev_warn(isp->dev, "omap3_isp: timeout waiting for ccp2 reset\n"); break; } } } /* * ccp2_pwr_cfg - Configure the power mode settings * @ccp2: pointer to ISP CCP2 device */ static void ccp2_pwr_cfg(struct isp_ccp2_device *ccp2) { struct isp_device *isp = to_isp_device(ccp2); isp_reg_writel(isp, ISPCCP2_SYSCONFIG_MSTANDBY_MODE_SMART | ((isp->revision == ISP_REVISION_15_0 && isp->autoidle) ? ISPCCP2_SYSCONFIG_AUTO_IDLE : 0), OMAP3_ISP_IOMEM_CCP2, ISPCCP2_SYSCONFIG); } /* * ccp2_if_enable - Enable CCP2 interface. * @ccp2: pointer to ISP CCP2 device * @enable: enable/disable flag */ static int ccp2_if_enable(struct isp_ccp2_device *ccp2, u8 enable) { struct isp_device *isp = to_isp_device(ccp2); int ret; int i; if (enable && ccp2->vdds_csib) { ret = regulator_enable(ccp2->vdds_csib); if (ret < 0) return ret; } /* Enable/Disable all the LCx channels */ for (i = 0; i < CCP2_LCx_CHANS_NUM; i++) isp_reg_clr_set(isp, OMAP3_ISP_IOMEM_CCP2, ISPCCP2_LCx_CTRL(i), ISPCCP2_LCx_CTRL_CHAN_EN, enable ? ISPCCP2_LCx_CTRL_CHAN_EN : 0); /* Enable/Disable ccp2 interface in ccp2 mode */ isp_reg_clr_set(isp, OMAP3_ISP_IOMEM_CCP2, ISPCCP2_CTRL, ISPCCP2_CTRL_MODE | ISPCCP2_CTRL_IF_EN, enable ? (ISPCCP2_CTRL_MODE | ISPCCP2_CTRL_IF_EN) : 0); if (!enable && ccp2->vdds_csib) regulator_disable(ccp2->vdds_csib); return 0; } /* * ccp2_mem_enable - Enable CCP2 memory interface. * @ccp2: pointer to ISP CCP2 device * @enable: enable/disable flag */ static void ccp2_mem_enable(struct isp_ccp2_device *ccp2, u8 enable) { struct isp_device *isp = to_isp_device(ccp2); if (enable) ccp2_if_enable(ccp2, 0); /* Enable/Disable ccp2 interface in ccp2 mode */ isp_reg_clr_set(isp, OMAP3_ISP_IOMEM_CCP2, ISPCCP2_CTRL, ISPCCP2_CTRL_MODE, enable ? ISPCCP2_CTRL_MODE : 0); isp_reg_clr_set(isp, OMAP3_ISP_IOMEM_CCP2, ISPCCP2_LCM_CTRL, ISPCCP2_LCM_CTRL_CHAN_EN, enable ? ISPCCP2_LCM_CTRL_CHAN_EN : 0); } /* * ccp2_phyif_config - Initialize CCP2 phy interface config * @ccp2: Pointer to ISP CCP2 device * @buscfg: CCP2 platform data * * Configure the CCP2 physical interface module from platform data. * * Returns -EIO if strobe is chosen in CSI1 mode, or 0 on success. */ static int ccp2_phyif_config(struct isp_ccp2_device *ccp2, const struct isp_ccp2_cfg *buscfg) { struct isp_device *isp = to_isp_device(ccp2); u32 val; val = isp_reg_readl(isp, OMAP3_ISP_IOMEM_CCP2, ISPCCP2_CTRL) | ISPCCP2_CTRL_MODE; /* Data/strobe physical layer */ BIT_SET(val, ISPCCP2_CTRL_PHY_SEL_SHIFT, ISPCCP2_CTRL_PHY_SEL_MASK, buscfg->phy_layer); BIT_SET(val, ISPCCP2_CTRL_IO_OUT_SEL_SHIFT, ISPCCP2_CTRL_IO_OUT_SEL_MASK, buscfg->ccp2_mode); BIT_SET(val, ISPCCP2_CTRL_INV_SHIFT, ISPCCP2_CTRL_INV_MASK, buscfg->strobe_clk_pol); BIT_SET(val, ISPCCP2_CTRL_VP_CLK_POL_SHIFT, ISPCCP2_CTRL_VP_CLK_POL_MASK, buscfg->vp_clk_pol); isp_reg_writel(isp, val, OMAP3_ISP_IOMEM_CCP2, ISPCCP2_CTRL); val = isp_reg_readl(isp, OMAP3_ISP_IOMEM_CCP2, ISPCCP2_CTRL); if (!(val & ISPCCP2_CTRL_MODE)) { if (buscfg->ccp2_mode == ISP_CCP2_MODE_CCP2) dev_warn(isp->dev, "OMAP3 CCP2 bus not available\n"); if (buscfg->phy_layer == ISP_CCP2_PHY_DATA_STROBE) /* Strobe mode requires CCP2 */ return -EIO; } return 0; } /* * ccp2_vp_config - Initialize CCP2 video port interface. * @ccp2: Pointer to ISP CCP2 device * @vpclk_div: Video port divisor * * Configure the CCP2 video port with the given clock divisor. The valid divisor * values depend on the ISP revision: * * - revision 1.0 and 2.0 1 to 4 * - revision 15.0 1 to 65536 * * The exact divisor value used might differ from the requested value, as ISP * revision 15.0 represent the divisor by 65536 divided by an integer. */ static void ccp2_vp_config(struct isp_ccp2_device *ccp2, unsigned int vpclk_div) { struct isp_device *isp = to_isp_device(ccp2); u32 val; /* ISPCCP2_CTRL Video port */ val = isp_reg_readl(isp, OMAP3_ISP_IOMEM_CCP2, ISPCCP2_CTRL); val |= ISPCCP2_CTRL_VP_ONLY_EN; /* Disable the memory write port */ if (isp->revision == ISP_REVISION_15_0) { vpclk_div = clamp_t(unsigned int, vpclk_div, 1, 65536); vpclk_div = min(ISPCCP2_VPCLK_FRACDIV / vpclk_div, 65535U); BIT_SET(val, ISPCCP2_CTRL_VPCLK_DIV_SHIFT, ISPCCP2_CTRL_VPCLK_DIV_MASK, vpclk_div); } else { vpclk_div = clamp_t(unsigned int, vpclk_div, 1, 4); BIT_SET(val, ISPCCP2_CTRL_VP_OUT_CTRL_SHIFT, ISPCCP2_CTRL_VP_OUT_CTRL_MASK, vpclk_div - 1); } isp_reg_writel(isp, val, OMAP3_ISP_IOMEM_CCP2, ISPCCP2_CTRL); } /* * ccp2_lcx_config - Initialize CCP2 logical channel interface. * @ccp2: Pointer to ISP CCP2 device * @config: Pointer to ISP LCx config structure. * * This will analyze the parameters passed by the interface config * and configure CSI1/CCP2 logical channel * */ static void ccp2_lcx_config(struct isp_ccp2_device *ccp2, struct isp_interface_lcx_config *config) { struct isp_device *isp = to_isp_device(ccp2); u32 val, format; switch (config->format) { case MEDIA_BUS_FMT_SGRBG10_DPCM8_1X8: format = ISPCCP2_LCx_CTRL_FORMAT_RAW8_DPCM10_VP; break; case MEDIA_BUS_FMT_SGRBG10_1X10: default: format = ISPCCP2_LCx_CTRL_FORMAT_RAW10_VP; /* RAW10+VP */ break; } /* ISPCCP2_LCx_CTRL logical channel #0 */ val = isp_reg_readl(isp, OMAP3_ISP_IOMEM_CCP2, ISPCCP2_LCx_CTRL(0)) | (ISPCCP2_LCx_CTRL_REGION_EN); /* Region */ if (isp->revision == ISP_REVISION_15_0) { /* CRC */ BIT_SET(val, ISPCCP2_LCx_CTRL_CRC_SHIFT_15_0, ISPCCP2_LCx_CTRL_CRC_MASK, config->crc); /* Format = RAW10+VP or RAW8+DPCM10+VP*/ BIT_SET(val, ISPCCP2_LCx_CTRL_FORMAT_SHIFT_15_0, ISPCCP2_LCx_CTRL_FORMAT_MASK_15_0, format); } else { BIT_SET(val, ISPCCP2_LCx_CTRL_CRC_SHIFT, ISPCCP2_LCx_CTRL_CRC_MASK, config->crc); BIT_SET(val, ISPCCP2_LCx_CTRL_FORMAT_SHIFT, ISPCCP2_LCx_CTRL_FORMAT_MASK, format); } isp_reg_writel(isp, val, OMAP3_ISP_IOMEM_CCP2, ISPCCP2_LCx_CTRL(0)); /* ISPCCP2_DAT_START for logical channel #0 */ isp_reg_writel(isp, config->data_start << ISPCCP2_LCx_DAT_SHIFT, OMAP3_ISP_IOMEM_CCP2, ISPCCP2_LCx_DAT_START(0)); /* ISPCCP2_DAT_SIZE for logical channel #0 */ isp_reg_writel(isp, config->data_size << ISPCCP2_LCx_DAT_SHIFT, OMAP3_ISP_IOMEM_CCP2, ISPCCP2_LCx_DAT_SIZE(0)); /* Enable error IRQs for logical channel #0 */ val = ISPCCP2_LC01_IRQSTATUS_LC0_FIFO_OVF_IRQ | ISPCCP2_LC01_IRQSTATUS_LC0_CRC_IRQ | ISPCCP2_LC01_IRQSTATUS_LC0_FSP_IRQ | ISPCCP2_LC01_IRQSTATUS_LC0_FW_IRQ | ISPCCP2_LC01_IRQSTATUS_LC0_FSC_IRQ | ISPCCP2_LC01_IRQSTATUS_LC0_SSC_IRQ; isp_reg_writel(isp, val, OMAP3_ISP_IOMEM_CCP2, ISPCCP2_LC01_IRQSTATUS); isp_reg_set(isp, OMAP3_ISP_IOMEM_CCP2, ISPCCP2_LC01_IRQENABLE, val); } /* * ccp2_if_configure - Configure ccp2 with data from sensor * @ccp2: Pointer to ISP CCP2 device * * Return 0 on success or a negative error code */ static int ccp2_if_configure(struct isp_ccp2_device *ccp2) { struct isp_pipeline *pipe = to_isp_pipeline(&ccp2->subdev.entity); const struct isp_bus_cfg *buscfg; struct v4l2_mbus_framefmt *format; struct media_pad *pad; struct v4l2_subdev *sensor; u32 lines = 0; int ret; ccp2_pwr_cfg(ccp2); pad = media_pad_remote_pad_first(&ccp2->pads[CCP2_PAD_SINK]); sensor = media_entity_to_v4l2_subdev(pad->entity); buscfg = v4l2_subdev_to_bus_cfg(pipe->external); if (WARN_ON(!buscfg)) return -EPIPE; ret = ccp2_phyif_config(ccp2, &buscfg->bus.ccp2); if (ret < 0) return ret; ccp2_vp_config(ccp2, buscfg->bus.ccp2.vpclk_div + 1); v4l2_subdev_call(sensor, sensor, g_skip_top_lines, &lines); format = &ccp2->formats[CCP2_PAD_SINK]; ccp2->if_cfg.data_start = lines; ccp2->if_cfg.crc = buscfg->bus.ccp2.crc; ccp2->if_cfg.format = format->code; ccp2->if_cfg.data_size = format->height; ccp2_lcx_config(ccp2, &ccp2->if_cfg); return 0; } static int ccp2_adjust_bandwidth(struct isp_ccp2_device *ccp2) { struct isp_pipeline *pipe = to_isp_pipeline(&ccp2->subdev.entity); struct isp_device *isp = to_isp_device(ccp2); const struct v4l2_mbus_framefmt *ofmt = &ccp2->formats[CCP2_PAD_SOURCE]; unsigned long l3_ick = pipe->l3_ick; struct v4l2_fract *timeperframe; unsigned int vpclk_div = 2; unsigned int value; u64 bound; u64 area; /* Compute the minimum clock divisor, based on the pipeline maximum * data rate. This is an absolute lower bound if we don't want SBL * overflows, so round the value up. */ vpclk_div = max_t(unsigned int, DIV_ROUND_UP(l3_ick, pipe->max_rate), vpclk_div); /* Compute the maximum clock divisor, based on the requested frame rate. * This is a soft lower bound to achieve a frame rate equal or higher * than the requested value, so round the value down. */ timeperframe = &pipe->max_timeperframe; if (timeperframe->numerator) { area = ofmt->width * ofmt->height; bound = div_u64(area * timeperframe->denominator, timeperframe->numerator); value = min_t(u64, bound, l3_ick); vpclk_div = max_t(unsigned int, l3_ick / value, vpclk_div); } dev_dbg(isp->dev, "%s: minimum clock divisor = %u\n", __func__, vpclk_div); return vpclk_div; } /* * ccp2_mem_configure - Initialize CCP2 memory input/output interface * @ccp2: Pointer to ISP CCP2 device * @config: Pointer to ISP mem interface config structure * * This will analyze the parameters passed by the interface config * structure, and configure the respective registers for proper * CSI1/CCP2 memory input. */ static void ccp2_mem_configure(struct isp_ccp2_device *ccp2, struct isp_interface_mem_config *config) { struct isp_device *isp = to_isp_device(ccp2); u32 sink_pixcode = ccp2->formats[CCP2_PAD_SINK].code; u32 source_pixcode = ccp2->formats[CCP2_PAD_SOURCE].code; unsigned int dpcm_decompress = 0; u32 val, hwords; if (sink_pixcode != source_pixcode && sink_pixcode == MEDIA_BUS_FMT_SGRBG10_DPCM8_1X8) dpcm_decompress = 1; ccp2_pwr_cfg(ccp2); /* Hsize, Skip */ isp_reg_writel(isp, ISPCCP2_LCM_HSIZE_SKIP_MIN | (config->hsize_count << ISPCCP2_LCM_HSIZE_SHIFT), OMAP3_ISP_IOMEM_CCP2, ISPCCP2_LCM_HSIZE); /* Vsize, no. of lines */ isp_reg_writel(isp, config->vsize_count << ISPCCP2_LCM_VSIZE_SHIFT, OMAP3_ISP_IOMEM_CCP2, ISPCCP2_LCM_VSIZE); if (ccp2->video_in.bpl_padding == 0) config->src_ofst = 0; else config->src_ofst = ccp2->video_in.bpl_value; isp_reg_writel(isp, config->src_ofst, OMAP3_ISP_IOMEM_CCP2, ISPCCP2_LCM_SRC_OFST); /* Source and Destination formats */ val = ISPCCP2_LCM_CTRL_DST_FORMAT_RAW10 << ISPCCP2_LCM_CTRL_DST_FORMAT_SHIFT; if (dpcm_decompress) { /* source format is RAW8 */ val |= ISPCCP2_LCM_CTRL_SRC_FORMAT_RAW8 << ISPCCP2_LCM_CTRL_SRC_FORMAT_SHIFT; /* RAW8 + DPCM10 - simple predictor */ val |= ISPCCP2_LCM_CTRL_SRC_DPCM_PRED; /* enable source DPCM decompression */ val |= ISPCCP2_LCM_CTRL_SRC_DECOMPR_DPCM10 << ISPCCP2_LCM_CTRL_SRC_DECOMPR_SHIFT; } else { /* source format is RAW10 */ val |= ISPCCP2_LCM_CTRL_SRC_FORMAT_RAW10 << ISPCCP2_LCM_CTRL_SRC_FORMAT_SHIFT; } /* Burst size to 32x64 */ val |= ISPCCP2_LCM_CTRL_BURST_SIZE_32X << ISPCCP2_LCM_CTRL_BURST_SIZE_SHIFT; isp_reg_writel(isp, val, OMAP3_ISP_IOMEM_CCP2, ISPCCP2_LCM_CTRL); /* Prefetch setup */ if (dpcm_decompress) hwords = (ISPCCP2_LCM_HSIZE_SKIP_MIN + config->hsize_count) >> 3; else hwords = (ISPCCP2_LCM_HSIZE_SKIP_MIN + config->hsize_count) >> 2; isp_reg_writel(isp, hwords << ISPCCP2_LCM_PREFETCH_SHIFT, OMAP3_ISP_IOMEM_CCP2, ISPCCP2_LCM_PREFETCH); /* Video port */ isp_reg_set(isp, OMAP3_ISP_IOMEM_CCP2, ISPCCP2_CTRL, ISPCCP2_CTRL_IO_OUT_SEL | ISPCCP2_CTRL_MODE); ccp2_vp_config(ccp2, ccp2_adjust_bandwidth(ccp2)); /* Clear LCM interrupts */ isp_reg_writel(isp, ISPCCP2_LCM_IRQSTATUS_OCPERROR_IRQ | ISPCCP2_LCM_IRQSTATUS_EOF_IRQ, OMAP3_ISP_IOMEM_CCP2, ISPCCP2_LCM_IRQSTATUS); /* Enable LCM interrupts */ isp_reg_set(isp, OMAP3_ISP_IOMEM_CCP2, ISPCCP2_LCM_IRQENABLE, ISPCCP2_LCM_IRQSTATUS_EOF_IRQ | ISPCCP2_LCM_IRQSTATUS_OCPERROR_IRQ); } /* * ccp2_set_inaddr - Sets memory address of input frame. * @ccp2: Pointer to ISP CCP2 device * @addr: 32bit memory address aligned on 32byte boundary. * * Configures the memory address from which the input frame is to be read. */ static void ccp2_set_inaddr(struct isp_ccp2_device *ccp2, u32 addr) { struct isp_device *isp = to_isp_device(ccp2); isp_reg_writel(isp, addr, OMAP3_ISP_IOMEM_CCP2, ISPCCP2_LCM_SRC_ADDR); } /* ----------------------------------------------------------------------------- * Interrupt handling */ static void ccp2_isr_buffer(struct isp_ccp2_device *ccp2) { struct isp_pipeline *pipe = to_isp_pipeline(&ccp2->subdev.entity); struct isp_buffer *buffer; buffer = omap3isp_video_buffer_next(&ccp2->video_in); if (buffer != NULL) ccp2_set_inaddr(ccp2, buffer->dma); pipe->state |= ISP_PIPELINE_IDLE_INPUT; if (ccp2->state == ISP_PIPELINE_STREAM_SINGLESHOT) { if (isp_pipeline_ready(pipe)) omap3isp_pipeline_set_stream(pipe, ISP_PIPELINE_STREAM_SINGLESHOT); } } /* * omap3isp_ccp2_isr - Handle ISP CCP2 interrupts * @ccp2: Pointer to ISP CCP2 device * * This will handle the CCP2 interrupts */ void omap3isp_ccp2_isr(struct isp_ccp2_device *ccp2) { struct isp_pipeline *pipe = to_isp_pipeline(&ccp2->subdev.entity); struct isp_device *isp = to_isp_device(ccp2); static const u32 ISPCCP2_LC01_ERROR = ISPCCP2_LC01_IRQSTATUS_LC0_FIFO_OVF_IRQ | ISPCCP2_LC01_IRQSTATUS_LC0_CRC_IRQ | ISPCCP2_LC01_IRQSTATUS_LC0_FSP_IRQ | ISPCCP2_LC01_IRQSTATUS_LC0_FW_IRQ | ISPCCP2_LC01_IRQSTATUS_LC0_FSC_IRQ | ISPCCP2_LC01_IRQSTATUS_LC0_SSC_IRQ; u32 lcx_irqstatus, lcm_irqstatus; /* First clear the interrupts */ lcx_irqstatus = isp_reg_readl(isp, OMAP3_ISP_IOMEM_CCP2, ISPCCP2_LC01_IRQSTATUS); isp_reg_writel(isp, lcx_irqstatus, OMAP3_ISP_IOMEM_CCP2, ISPCCP2_LC01_IRQSTATUS); lcm_irqstatus = isp_reg_readl(isp, OMAP3_ISP_IOMEM_CCP2, ISPCCP2_LCM_IRQSTATUS); isp_reg_writel(isp, lcm_irqstatus, OMAP3_ISP_IOMEM_CCP2, ISPCCP2_LCM_IRQSTATUS); /* Errors */ if (lcx_irqstatus & ISPCCP2_LC01_ERROR) { pipe->error = true; dev_dbg(isp->dev, "CCP2 err:%x\n", lcx_irqstatus); return; } if (lcm_irqstatus & ISPCCP2_LCM_IRQSTATUS_OCPERROR_IRQ) { pipe->error = true; dev_dbg(isp->dev, "CCP2 OCP err:%x\n", lcm_irqstatus); } if (omap3isp_module_sync_is_stopping(&ccp2->wait, &ccp2->stopping)) return; /* Handle queued buffers on frame end interrupts */ if (lcm_irqstatus & ISPCCP2_LCM_IRQSTATUS_EOF_IRQ) ccp2_isr_buffer(ccp2); } /* ----------------------------------------------------------------------------- * V4L2 subdev operations */ static const unsigned int ccp2_fmts[] = { MEDIA_BUS_FMT_SGRBG10_1X10, MEDIA_BUS_FMT_SGRBG10_DPCM8_1X8, }; /* * __ccp2_get_format - helper function for getting ccp2 format * @ccp2 : Pointer to ISP CCP2 device * @cfg: V4L2 subdev pad configuration * @pad : pad number * @which : wanted subdev format * return format structure or NULL on error */ static struct v4l2_mbus_framefmt * __ccp2_get_format(struct isp_ccp2_device *ccp2, struct v4l2_subdev_state *sd_state, unsigned int pad, enum v4l2_subdev_format_whence which) { if (which == V4L2_SUBDEV_FORMAT_TRY) return v4l2_subdev_get_try_format(&ccp2->subdev, sd_state, pad); else return &ccp2->formats[pad]; } /* * ccp2_try_format - Handle try format by pad subdev method * @ccp2 : Pointer to ISP CCP2 device * @cfg: V4L2 subdev pad configuration * @pad : pad num * @fmt : pointer to v4l2 mbus format structure * @which : wanted subdev format */ static void ccp2_try_format(struct isp_ccp2_device *ccp2, struct v4l2_subdev_state *sd_state, unsigned int pad, struct v4l2_mbus_framefmt *fmt, enum v4l2_subdev_format_whence which) { struct v4l2_mbus_framefmt *format; switch (pad) { case CCP2_PAD_SINK: if (fmt->code != MEDIA_BUS_FMT_SGRBG10_DPCM8_1X8) fmt->code = MEDIA_BUS_FMT_SGRBG10_1X10; if (ccp2->input == CCP2_INPUT_SENSOR) { fmt->width = clamp_t(u32, fmt->width, ISPCCP2_DAT_START_MIN, ISPCCP2_DAT_START_MAX); fmt->height = clamp_t(u32, fmt->height, ISPCCP2_DAT_SIZE_MIN, ISPCCP2_DAT_SIZE_MAX); } else if (ccp2->input == CCP2_INPUT_MEMORY) { fmt->width = clamp_t(u32, fmt->width, ISPCCP2_LCM_HSIZE_COUNT_MIN, ISPCCP2_LCM_HSIZE_COUNT_MAX); fmt->height = clamp_t(u32, fmt->height, ISPCCP2_LCM_VSIZE_MIN, ISPCCP2_LCM_VSIZE_MAX); } break; case CCP2_PAD_SOURCE: /* Source format - copy sink format and change pixel code * to SGRBG10_1X10 as we don't support CCP2 write to memory. * When CCP2 write to memory feature will be added this * should be changed properly. */ format = __ccp2_get_format(ccp2, sd_state, CCP2_PAD_SINK, which); memcpy(fmt, format, sizeof(*fmt)); fmt->code = MEDIA_BUS_FMT_SGRBG10_1X10; break; } fmt->field = V4L2_FIELD_NONE; fmt->colorspace = V4L2_COLORSPACE_SRGB; } /* * ccp2_enum_mbus_code - Handle pixel format enumeration * @sd : pointer to v4l2 subdev structure * @cfg: V4L2 subdev pad configuration * @code : pointer to v4l2_subdev_mbus_code_enum structure * return -EINVAL or zero on success */ static int ccp2_enum_mbus_code(struct v4l2_subdev *sd, struct v4l2_subdev_state *sd_state, struct v4l2_subdev_mbus_code_enum *code) { struct isp_ccp2_device *ccp2 = v4l2_get_subdevdata(sd); struct v4l2_mbus_framefmt *format; if (code->pad == CCP2_PAD_SINK) { if (code->index >= ARRAY_SIZE(ccp2_fmts)) return -EINVAL; code->code = ccp2_fmts[code->index]; } else { if (code->index != 0) return -EINVAL; format = __ccp2_get_format(ccp2, sd_state, CCP2_PAD_SINK, code->which); code->code = format->code; } return 0; } static int ccp2_enum_frame_size(struct v4l2_subdev *sd, struct v4l2_subdev_state *sd_state, struct v4l2_subdev_frame_size_enum *fse) { struct isp_ccp2_device *ccp2 = v4l2_get_subdevdata(sd); struct v4l2_mbus_framefmt format; if (fse->index != 0) return -EINVAL; format.code = fse->code; format.width = 1; format.height = 1; ccp2_try_format(ccp2, sd_state, fse->pad, &format, fse->which); fse->min_width = format.width; fse->min_height = format.height; if (format.code != fse->code) return -EINVAL; format.code = fse->code; format.width = -1; format.height = -1; ccp2_try_format(ccp2, sd_state, fse->pad, &format, fse->which); fse->max_width = format.width; fse->max_height = format.height; return 0; } /* * ccp2_get_format - Handle get format by pads subdev method * @sd : pointer to v4l2 subdev structure * @cfg: V4L2 subdev pad configuration * @fmt : pointer to v4l2 subdev format structure * return -EINVAL or zero on success */ static int ccp2_get_format(struct v4l2_subdev *sd, struct v4l2_subdev_state *sd_state, struct v4l2_subdev_format *fmt) { struct isp_ccp2_device *ccp2 = v4l2_get_subdevdata(sd); struct v4l2_mbus_framefmt *format; format = __ccp2_get_format(ccp2, sd_state, fmt->pad, fmt->which); if (format == NULL) return -EINVAL; fmt->format = *format; return 0; } /* * ccp2_set_format - Handle set format by pads subdev method * @sd : pointer to v4l2 subdev structure * @cfg: V4L2 subdev pad configuration * @fmt : pointer to v4l2 subdev format structure * returns zero */ static int ccp2_set_format(struct v4l2_subdev *sd, struct v4l2_subdev_state *sd_state, struct v4l2_subdev_format *fmt) { struct isp_ccp2_device *ccp2 = v4l2_get_subdevdata(sd); struct v4l2_mbus_framefmt *format; format = __ccp2_get_format(ccp2, sd_state, fmt->pad, fmt->which); if (format == NULL) return -EINVAL; ccp2_try_format(ccp2, sd_state, fmt->pad, &fmt->format, fmt->which); *format = fmt->format; /* Propagate the format from sink to source */ if (fmt->pad == CCP2_PAD_SINK) { format = __ccp2_get_format(ccp2, sd_state, CCP2_PAD_SOURCE, fmt->which); *format = fmt->format; ccp2_try_format(ccp2, sd_state, CCP2_PAD_SOURCE, format, fmt->which); } return 0; } /* * ccp2_init_formats - Initialize formats on all pads * @sd: ISP CCP2 V4L2 subdevice * @fh: V4L2 subdev file handle * * Initialize all pad formats with default values. If fh is not NULL, try * formats are initialized on the file handle. Otherwise active formats are * initialized on the device. */ static int ccp2_init_formats(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh) { struct v4l2_subdev_format format; memset(&format, 0, sizeof(format)); format.pad = CCP2_PAD_SINK; format.which = fh ? V4L2_SUBDEV_FORMAT_TRY : V4L2_SUBDEV_FORMAT_ACTIVE; format.format.code = MEDIA_BUS_FMT_SGRBG10_1X10; format.format.width = 4096; format.format.height = 4096; ccp2_set_format(sd, fh ? fh->state : NULL, &format); return 0; } /* * ccp2_s_stream - Enable/Disable streaming on ccp2 subdev * @sd : pointer to v4l2 subdev structure * @enable: 1 == Enable, 0 == Disable * return zero */ static int ccp2_s_stream(struct v4l2_subdev *sd, int enable) { struct isp_ccp2_device *ccp2 = v4l2_get_subdevdata(sd); struct isp_device *isp = to_isp_device(ccp2); struct device *dev = to_device(ccp2); int ret; if (ccp2->state == ISP_PIPELINE_STREAM_STOPPED) { if (enable == ISP_PIPELINE_STREAM_STOPPED) return 0; atomic_set(&ccp2->stopping, 0); } switch (enable) { case ISP_PIPELINE_STREAM_CONTINUOUS: if (ccp2->phy) { ret = omap3isp_csiphy_acquire(ccp2->phy, &sd->entity); if (ret < 0) return ret; } ccp2_if_configure(ccp2); ccp2_print_status(ccp2); /* Enable CSI1/CCP2 interface */ ret = ccp2_if_enable(ccp2, 1); if (ret < 0) { if (ccp2->phy) omap3isp_csiphy_release(ccp2->phy); return ret; } break; case ISP_PIPELINE_STREAM_SINGLESHOT: if (ccp2->state != ISP_PIPELINE_STREAM_SINGLESHOT) { struct v4l2_mbus_framefmt *format; format = &ccp2->formats[CCP2_PAD_SINK]; ccp2->mem_cfg.hsize_count = format->width; ccp2->mem_cfg.vsize_count = format->height; ccp2->mem_cfg.src_ofst = 0; ccp2_mem_configure(ccp2, &ccp2->mem_cfg); omap3isp_sbl_enable(isp, OMAP3_ISP_SBL_CSI1_READ); ccp2_print_status(ccp2); } ccp2_mem_enable(ccp2, 1); break; case ISP_PIPELINE_STREAM_STOPPED: if (omap3isp_module_sync_idle(&sd->entity, &ccp2->wait, &ccp2->stopping)) dev_dbg(dev, "%s: module stop timeout.\n", sd->name); if (ccp2->input == CCP2_INPUT_MEMORY) { ccp2_mem_enable(ccp2, 0); omap3isp_sbl_disable(isp, OMAP3_ISP_SBL_CSI1_READ); } else if (ccp2->input == CCP2_INPUT_SENSOR) { /* Disable CSI1/CCP2 interface */ ccp2_if_enable(ccp2, 0); if (ccp2->phy) omap3isp_csiphy_release(ccp2->phy); } break; } ccp2->state = enable; return 0; } /* subdev video operations */ static const struct v4l2_subdev_video_ops ccp2_sd_video_ops = { .s_stream = ccp2_s_stream, }; /* subdev pad operations */ static const struct v4l2_subdev_pad_ops ccp2_sd_pad_ops = { .enum_mbus_code = ccp2_enum_mbus_code, .enum_frame_size = ccp2_enum_frame_size, .get_fmt = ccp2_get_format, .set_fmt = ccp2_set_format, }; /* subdev operations */ static const struct v4l2_subdev_ops ccp2_sd_ops = { .video = &ccp2_sd_video_ops, .pad = &ccp2_sd_pad_ops, }; /* subdev internal operations */ static const struct v4l2_subdev_internal_ops ccp2_sd_internal_ops = { .open = ccp2_init_formats, }; /* -------------------------------------------------------------------------- * ISP ccp2 video device node */ /* * ccp2_video_queue - Queue video buffer. * @video : Pointer to isp video structure * @buffer: Pointer to isp_buffer structure * return -EIO or zero on success */ static int ccp2_video_queue(struct isp_video *video, struct isp_buffer *buffer) { struct isp_ccp2_device *ccp2 = &video->isp->isp_ccp2; ccp2_set_inaddr(ccp2, buffer->dma); return 0; } static const struct isp_video_operations ccp2_video_ops = { .queue = ccp2_video_queue, }; /* ----------------------------------------------------------------------------- * Media entity operations */ /* * ccp2_link_setup - Setup ccp2 connections. * @entity : Pointer to media entity structure * @local : Pointer to local pad array * @remote : Pointer to remote pad array * @flags : Link flags * return -EINVAL on error or zero on success */ static int ccp2_link_setup(struct media_entity *entity, const struct media_pad *local, const struct media_pad *remote, u32 flags) { struct v4l2_subdev *sd = media_entity_to_v4l2_subdev(entity); struct isp_ccp2_device *ccp2 = v4l2_get_subdevdata(sd); unsigned int index = local->index; /* FIXME: this is actually a hack! */ if (is_media_entity_v4l2_subdev(remote->entity)) index |= 2 << 16; switch (index) { case CCP2_PAD_SINK: /* read from memory */ if (flags & MEDIA_LNK_FL_ENABLED) { if (ccp2->input == CCP2_INPUT_SENSOR) return -EBUSY; ccp2->input = CCP2_INPUT_MEMORY; } else { if (ccp2->input == CCP2_INPUT_MEMORY) ccp2->input = CCP2_INPUT_NONE; } break; case CCP2_PAD_SINK | 2 << 16: /* read from sensor/phy */ if (flags & MEDIA_LNK_FL_ENABLED) { if (ccp2->input == CCP2_INPUT_MEMORY) return -EBUSY; ccp2->input = CCP2_INPUT_SENSOR; } else { if (ccp2->input == CCP2_INPUT_SENSOR) ccp2->input = CCP2_INPUT_NONE; } break; case CCP2_PAD_SOURCE | 2 << 16: /* write to video port/ccdc */ if (flags & MEDIA_LNK_FL_ENABLED) ccp2->output = CCP2_OUTPUT_CCDC; else ccp2->output = CCP2_OUTPUT_NONE; break; default: return -EINVAL; } return 0; } /* media operations */ static const struct media_entity_operations ccp2_media_ops = { .link_setup = ccp2_link_setup, .link_validate = v4l2_subdev_link_validate, }; /* * omap3isp_ccp2_unregister_entities - Unregister media entities: subdev * @ccp2: Pointer to ISP CCP2 device */ void omap3isp_ccp2_unregister_entities(struct isp_ccp2_device *ccp2) { v4l2_device_unregister_subdev(&ccp2->subdev); omap3isp_video_unregister(&ccp2->video_in); } /* * omap3isp_ccp2_register_entities - Register the subdev media entity * @ccp2: Pointer to ISP CCP2 device * @vdev: Pointer to v4l device * return negative error code or zero on success */ int omap3isp_ccp2_register_entities(struct isp_ccp2_device *ccp2, struct v4l2_device *vdev) { int ret; /* Register the subdev and video nodes. */ ccp2->subdev.dev = vdev->mdev->dev; ret = v4l2_device_register_subdev(vdev, &ccp2->subdev); if (ret < 0) goto error; ret = omap3isp_video_register(&ccp2->video_in, vdev); if (ret < 0) goto error; return 0; error: omap3isp_ccp2_unregister_entities(ccp2); return ret; } /* ----------------------------------------------------------------------------- * ISP ccp2 initialisation and cleanup */ /* * ccp2_init_entities - Initialize ccp2 subdev and media entity. * @ccp2: Pointer to ISP CCP2 device * return negative error code or zero on success */ static int ccp2_init_entities(struct isp_ccp2_device *ccp2) { struct v4l2_subdev *sd = &ccp2->subdev; struct media_pad *pads = ccp2->pads; struct media_entity *me = &sd->entity; int ret; ccp2->input = CCP2_INPUT_NONE; ccp2->output = CCP2_OUTPUT_NONE; v4l2_subdev_init(sd, &ccp2_sd_ops); sd->internal_ops = &ccp2_sd_internal_ops; strscpy(sd->name, "OMAP3 ISP CCP2", sizeof(sd->name)); sd->grp_id = 1 << 16; /* group ID for isp subdevs */ v4l2_set_subdevdata(sd, ccp2); sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE; pads[CCP2_PAD_SINK].flags = MEDIA_PAD_FL_SINK | MEDIA_PAD_FL_MUST_CONNECT; pads[CCP2_PAD_SOURCE].flags = MEDIA_PAD_FL_SOURCE; me->ops = &ccp2_media_ops; ret = media_entity_pads_init(me, CCP2_PADS_NUM, pads); if (ret < 0) return ret; ccp2_init_formats(sd, NULL); /* * The CCP2 has weird line alignment requirements, possibly caused by * DPCM8 decompression. Line length for data read from memory must be a * multiple of 128 bits (16 bytes) in continuous mode (when no padding * is present at end of lines). Additionally, if padding is used, the * padded line length must be a multiple of 32 bytes. To simplify the * implementation we use a fixed 32 bytes alignment regardless of the * input format and width. If strict 128 bits alignment support is * required ispvideo will need to be made aware of this special dual * alignment requirements. */ ccp2->video_in.type = V4L2_BUF_TYPE_VIDEO_OUTPUT; ccp2->video_in.bpl_alignment = 32; ccp2->video_in.bpl_max = 0xffffffe0; ccp2->video_in.isp = to_isp_device(ccp2); ccp2->video_in.ops = &ccp2_video_ops; ccp2->video_in.capture_mem = PAGE_ALIGN(4096 * 4096) * 3; ret = omap3isp_video_init(&ccp2->video_in, "CCP2"); if (ret < 0) goto error; return 0; error: media_entity_cleanup(&ccp2->subdev.entity); return ret; } /* * omap3isp_ccp2_init - CCP2 initialization. * @isp : Pointer to ISP device * return negative error code or zero on success */ int omap3isp_ccp2_init(struct isp_device *isp) { struct isp_ccp2_device *ccp2 = &isp->isp_ccp2; int ret; init_waitqueue_head(&ccp2->wait); /* * On the OMAP34xx the CSI1 receiver is operated in the CSIb IO * complex, which is powered by vdds_csib power rail. Hence the * request for the regulator. * * On the OMAP36xx, the CCP2 uses the CSI PHY1 or PHY2, shared with * the CSI2c or CSI2a receivers. The PHY then needs to be explicitly * configured. * * TODO: Don't hardcode the usage of PHY1 (shared with CSI2c). */ if (isp->revision == ISP_REVISION_2_0) { ccp2->vdds_csib = devm_regulator_get(isp->dev, "vdds_csib"); if (IS_ERR(ccp2->vdds_csib)) { if (PTR_ERR(ccp2->vdds_csib) == -EPROBE_DEFER) { dev_dbg(isp->dev, "Can't get regulator vdds_csib, deferring probing\n"); return -EPROBE_DEFER; } dev_dbg(isp->dev, "Could not get regulator vdds_csib\n"); ccp2->vdds_csib = NULL; } ccp2->phy = &isp->isp_csiphy2; } else if (isp->revision == ISP_REVISION_15_0) { ccp2->phy = &isp->isp_csiphy1; } ret = ccp2_init_entities(ccp2); if (ret < 0) return ret; ccp2_reset(ccp2); return 0; } /* * omap3isp_ccp2_cleanup - CCP2 un-initialization * @isp : Pointer to ISP device */ void omap3isp_ccp2_cleanup(struct isp_device *isp) { struct isp_ccp2_device *ccp2 = &isp->isp_ccp2; omap3isp_video_cleanup(&ccp2->video_in); media_entity_cleanup(&ccp2->subdev.entity); }
linux-master
drivers/media/platform/ti/omap3isp/ispccp2.c
// SPDX-License-Identifier: GPL-2.0-only /* * ispstat.c * * TI OMAP3 ISP - Statistics core * * Copyright (C) 2010 Nokia Corporation * Copyright (C) 2009 Texas Instruments, Inc * * Contacts: David Cohen <[email protected]> * Laurent Pinchart <[email protected]> * Sakari Ailus <[email protected]> */ #include <linux/dma-mapping.h> #include <linux/slab.h> #include <linux/timekeeping.h> #include <linux/uaccess.h> #include "isp.h" #define ISP_STAT_USES_DMAENGINE(stat) ((stat)->dma_ch != NULL) /* * MAGIC_SIZE must always be the greatest common divisor of * AEWB_PACKET_SIZE and AF_PAXEL_SIZE. */ #define MAGIC_SIZE 16 #define MAGIC_NUM 0x55 /* HACK: AF module seems to be writing one more paxel data than it should. */ #define AF_EXTRA_DATA OMAP3ISP_AF_PAXEL_SIZE /* * HACK: H3A modules go to an invalid state after have a SBL overflow. It makes * the next buffer to start to be written in the same point where the overflow * occurred instead of the configured address. The only known way to make it to * go back to a valid state is having a valid buffer processing. Of course it * requires at least a doubled buffer size to avoid an access to invalid memory * region. But it does not fix everything. It may happen more than one * consecutive SBL overflows. In that case, it might be unpredictable how many * buffers the allocated memory should fit. For that case, a recover * configuration was created. It produces the minimum buffer size for each H3A * module and decrease the change for more SBL overflows. This recover state * will be enabled every time a SBL overflow occur. As the output buffer size * isn't big, it's possible to have an extra size able to fit many recover * buffers making it extreamily unlikely to have an access to invalid memory * region. */ #define NUM_H3A_RECOVER_BUFS 10 /* * HACK: Because of HW issues the generic layer sometimes need to have * different behaviour for different statistic modules. */ #define IS_H3A_AF(stat) ((stat) == &(stat)->isp->isp_af) #define IS_H3A_AEWB(stat) ((stat) == &(stat)->isp->isp_aewb) #define IS_H3A(stat) (IS_H3A_AF(stat) || IS_H3A_AEWB(stat)) static void __isp_stat_buf_sync_magic(struct ispstat *stat, struct ispstat_buffer *buf, u32 buf_size, enum dma_data_direction dir, void (*dma_sync)(struct device *, dma_addr_t, unsigned long, size_t, enum dma_data_direction)) { /* Sync the initial and final magic words. */ dma_sync(stat->isp->dev, buf->dma_addr, 0, MAGIC_SIZE, dir); dma_sync(stat->isp->dev, buf->dma_addr + (buf_size & PAGE_MASK), buf_size & ~PAGE_MASK, MAGIC_SIZE, dir); } static void isp_stat_buf_sync_magic_for_device(struct ispstat *stat, struct ispstat_buffer *buf, u32 buf_size, enum dma_data_direction dir) { if (ISP_STAT_USES_DMAENGINE(stat)) return; __isp_stat_buf_sync_magic(stat, buf, buf_size, dir, dma_sync_single_range_for_device); } static void isp_stat_buf_sync_magic_for_cpu(struct ispstat *stat, struct ispstat_buffer *buf, u32 buf_size, enum dma_data_direction dir) { if (ISP_STAT_USES_DMAENGINE(stat)) return; __isp_stat_buf_sync_magic(stat, buf, buf_size, dir, dma_sync_single_range_for_cpu); } static int isp_stat_buf_check_magic(struct ispstat *stat, struct ispstat_buffer *buf) { const u32 buf_size = IS_H3A_AF(stat) ? buf->buf_size + AF_EXTRA_DATA : buf->buf_size; u8 *w; u8 *end; int ret = -EINVAL; isp_stat_buf_sync_magic_for_cpu(stat, buf, buf_size, DMA_FROM_DEVICE); /* Checking initial magic numbers. They shouldn't be here anymore. */ for (w = buf->virt_addr, end = w + MAGIC_SIZE; w < end; w++) if (likely(*w != MAGIC_NUM)) ret = 0; if (ret) { dev_dbg(stat->isp->dev, "%s: beginning magic check does not match.\n", stat->subdev.name); return ret; } /* Checking magic numbers at the end. They must be still here. */ for (w = buf->virt_addr + buf_size, end = w + MAGIC_SIZE; w < end; w++) { if (unlikely(*w != MAGIC_NUM)) { dev_dbg(stat->isp->dev, "%s: ending magic check does not match.\n", stat->subdev.name); return -EINVAL; } } isp_stat_buf_sync_magic_for_device(stat, buf, buf_size, DMA_FROM_DEVICE); return 0; } static void isp_stat_buf_insert_magic(struct ispstat *stat, struct ispstat_buffer *buf) { const u32 buf_size = IS_H3A_AF(stat) ? stat->buf_size + AF_EXTRA_DATA : stat->buf_size; isp_stat_buf_sync_magic_for_cpu(stat, buf, buf_size, DMA_FROM_DEVICE); /* * Inserting MAGIC_NUM at the beginning and end of the buffer. * buf->buf_size is set only after the buffer is queued. For now the * right buf_size for the current configuration is pointed by * stat->buf_size. */ memset(buf->virt_addr, MAGIC_NUM, MAGIC_SIZE); memset(buf->virt_addr + buf_size, MAGIC_NUM, MAGIC_SIZE); isp_stat_buf_sync_magic_for_device(stat, buf, buf_size, DMA_BIDIRECTIONAL); } static void isp_stat_buf_sync_for_device(struct ispstat *stat, struct ispstat_buffer *buf) { if (ISP_STAT_USES_DMAENGINE(stat)) return; dma_sync_sg_for_device(stat->isp->dev, buf->sgt.sgl, buf->sgt.nents, DMA_FROM_DEVICE); } static void isp_stat_buf_sync_for_cpu(struct ispstat *stat, struct ispstat_buffer *buf) { if (ISP_STAT_USES_DMAENGINE(stat)) return; dma_sync_sg_for_cpu(stat->isp->dev, buf->sgt.sgl, buf->sgt.nents, DMA_FROM_DEVICE); } static void isp_stat_buf_clear(struct ispstat *stat) { int i; for (i = 0; i < STAT_MAX_BUFS; i++) stat->buf[i].empty = 1; } static struct ispstat_buffer * __isp_stat_buf_find(struct ispstat *stat, int look_empty) { struct ispstat_buffer *found = NULL; int i; for (i = 0; i < STAT_MAX_BUFS; i++) { struct ispstat_buffer *curr = &stat->buf[i]; /* * Don't select the buffer which is being copied to * userspace or used by the module. */ if (curr == stat->locked_buf || curr == stat->active_buf) continue; /* Don't select uninitialised buffers if it's not required */ if (!look_empty && curr->empty) continue; /* Pick uninitialised buffer over anything else if look_empty */ if (curr->empty) { found = curr; break; } /* Choose the oldest buffer */ if (!found || (s32)curr->frame_number - (s32)found->frame_number < 0) found = curr; } return found; } static inline struct ispstat_buffer * isp_stat_buf_find_oldest(struct ispstat *stat) { return __isp_stat_buf_find(stat, 0); } static inline struct ispstat_buffer * isp_stat_buf_find_oldest_or_empty(struct ispstat *stat) { return __isp_stat_buf_find(stat, 1); } static int isp_stat_buf_queue(struct ispstat *stat) { if (!stat->active_buf) return STAT_NO_BUF; ktime_get_ts64(&stat->active_buf->ts); stat->active_buf->buf_size = stat->buf_size; if (isp_stat_buf_check_magic(stat, stat->active_buf)) { dev_dbg(stat->isp->dev, "%s: data wasn't properly written.\n", stat->subdev.name); return STAT_NO_BUF; } stat->active_buf->config_counter = stat->config_counter; stat->active_buf->frame_number = stat->frame_number; stat->active_buf->empty = 0; stat->active_buf = NULL; return STAT_BUF_DONE; } /* Get next free buffer to write the statistics to and mark it active. */ static void isp_stat_buf_next(struct ispstat *stat) { if (unlikely(stat->active_buf)) /* Overwriting unused active buffer */ dev_dbg(stat->isp->dev, "%s: new buffer requested without queuing active one.\n", stat->subdev.name); else stat->active_buf = isp_stat_buf_find_oldest_or_empty(stat); } static void isp_stat_buf_release(struct ispstat *stat) { unsigned long flags; isp_stat_buf_sync_for_device(stat, stat->locked_buf); spin_lock_irqsave(&stat->isp->stat_lock, flags); stat->locked_buf = NULL; spin_unlock_irqrestore(&stat->isp->stat_lock, flags); } /* Get buffer to userspace. */ static struct ispstat_buffer *isp_stat_buf_get(struct ispstat *stat, struct omap3isp_stat_data *data) { int rval = 0; unsigned long flags; struct ispstat_buffer *buf; spin_lock_irqsave(&stat->isp->stat_lock, flags); while (1) { buf = isp_stat_buf_find_oldest(stat); if (!buf) { spin_unlock_irqrestore(&stat->isp->stat_lock, flags); dev_dbg(stat->isp->dev, "%s: cannot find a buffer.\n", stat->subdev.name); return ERR_PTR(-EBUSY); } if (isp_stat_buf_check_magic(stat, buf)) { dev_dbg(stat->isp->dev, "%s: current buffer has corrupted data\n.", stat->subdev.name); /* Mark empty because it doesn't have valid data. */ buf->empty = 1; } else { /* Buffer isn't corrupted. */ break; } } stat->locked_buf = buf; spin_unlock_irqrestore(&stat->isp->stat_lock, flags); if (buf->buf_size > data->buf_size) { dev_warn(stat->isp->dev, "%s: userspace's buffer size is not enough.\n", stat->subdev.name); isp_stat_buf_release(stat); return ERR_PTR(-EINVAL); } isp_stat_buf_sync_for_cpu(stat, buf); rval = copy_to_user(data->buf, buf->virt_addr, buf->buf_size); if (rval) { dev_info(stat->isp->dev, "%s: failed copying %d bytes of stat data\n", stat->subdev.name, rval); buf = ERR_PTR(-EFAULT); isp_stat_buf_release(stat); } return buf; } static void isp_stat_bufs_free(struct ispstat *stat) { struct device *dev = ISP_STAT_USES_DMAENGINE(stat) ? NULL : stat->isp->dev; unsigned int i; for (i = 0; i < STAT_MAX_BUFS; i++) { struct ispstat_buffer *buf = &stat->buf[i]; if (!buf->virt_addr) continue; sg_free_table(&buf->sgt); dma_free_coherent(dev, stat->buf_alloc_size, buf->virt_addr, buf->dma_addr); buf->dma_addr = 0; buf->virt_addr = NULL; buf->empty = 1; } dev_dbg(stat->isp->dev, "%s: all buffers were freed.\n", stat->subdev.name); stat->buf_alloc_size = 0; stat->active_buf = NULL; } static int isp_stat_bufs_alloc_one(struct device *dev, struct ispstat_buffer *buf, unsigned int size) { int ret; buf->virt_addr = dma_alloc_coherent(dev, size, &buf->dma_addr, GFP_KERNEL); if (!buf->virt_addr) return -ENOMEM; ret = dma_get_sgtable(dev, &buf->sgt, buf->virt_addr, buf->dma_addr, size); if (ret < 0) { dma_free_coherent(dev, size, buf->virt_addr, buf->dma_addr); buf->virt_addr = NULL; buf->dma_addr = 0; return ret; } return 0; } /* * The device passed to the DMA API depends on whether the statistics block uses * ISP DMA, external DMA or PIO to transfer data. * * The first case (for the AEWB and AF engines) passes the ISP device, resulting * in the DMA buffers being mapped through the ISP IOMMU. * * The second case (for the histogram engine) should pass the DMA engine device. * As that device isn't accessible through the OMAP DMA engine API the driver * passes NULL instead, resulting in the buffers being mapped directly as * physical pages. * * The third case (for the histogram engine) doesn't require any mapping. The * buffers could be allocated with kmalloc/vmalloc, but we still use * dma_alloc_coherent() for consistency purpose. */ static int isp_stat_bufs_alloc(struct ispstat *stat, u32 size) { struct device *dev = ISP_STAT_USES_DMAENGINE(stat) ? NULL : stat->isp->dev; unsigned long flags; unsigned int i; spin_lock_irqsave(&stat->isp->stat_lock, flags); BUG_ON(stat->locked_buf != NULL); /* Are the old buffers big enough? */ if (stat->buf_alloc_size >= size) { spin_unlock_irqrestore(&stat->isp->stat_lock, flags); return 0; } if (stat->state != ISPSTAT_DISABLED || stat->buf_processing) { dev_info(stat->isp->dev, "%s: trying to allocate memory when busy\n", stat->subdev.name); spin_unlock_irqrestore(&stat->isp->stat_lock, flags); return -EBUSY; } spin_unlock_irqrestore(&stat->isp->stat_lock, flags); isp_stat_bufs_free(stat); stat->buf_alloc_size = size; for (i = 0; i < STAT_MAX_BUFS; i++) { struct ispstat_buffer *buf = &stat->buf[i]; int ret; ret = isp_stat_bufs_alloc_one(dev, buf, size); if (ret < 0) { dev_err(stat->isp->dev, "%s: Failed to allocate DMA buffer %u\n", stat->subdev.name, i); isp_stat_bufs_free(stat); return ret; } buf->empty = 1; dev_dbg(stat->isp->dev, "%s: buffer[%u] allocated. dma=%pad virt=%p", stat->subdev.name, i, &buf->dma_addr, buf->virt_addr); } return 0; } static void isp_stat_queue_event(struct ispstat *stat, int err) { struct video_device *vdev = stat->subdev.devnode; struct v4l2_event event; struct omap3isp_stat_event_status *status = (void *)event.u.data; memset(&event, 0, sizeof(event)); if (!err) { status->frame_number = stat->frame_number; status->config_counter = stat->config_counter; } else { status->buf_err = 1; } event.type = stat->event_type; v4l2_event_queue(vdev, &event); } /* * omap3isp_stat_request_statistics - Request statistics. * @data: Pointer to return statistics data. * * Returns 0 if successful. */ int omap3isp_stat_request_statistics(struct ispstat *stat, struct omap3isp_stat_data *data) { struct ispstat_buffer *buf; if (stat->state != ISPSTAT_ENABLED) { dev_dbg(stat->isp->dev, "%s: engine not enabled.\n", stat->subdev.name); return -EINVAL; } mutex_lock(&stat->ioctl_lock); buf = isp_stat_buf_get(stat, data); if (IS_ERR(buf)) { mutex_unlock(&stat->ioctl_lock); return PTR_ERR(buf); } data->ts.tv_sec = buf->ts.tv_sec; data->ts.tv_usec = buf->ts.tv_nsec / NSEC_PER_USEC; data->config_counter = buf->config_counter; data->frame_number = buf->frame_number; data->buf_size = buf->buf_size; buf->empty = 1; isp_stat_buf_release(stat); mutex_unlock(&stat->ioctl_lock); return 0; } int omap3isp_stat_request_statistics_time32(struct ispstat *stat, struct omap3isp_stat_data_time32 *data) { struct omap3isp_stat_data data64 = { }; int ret; ret = omap3isp_stat_request_statistics(stat, &data64); if (ret) return ret; data->ts.tv_sec = data64.ts.tv_sec; data->ts.tv_usec = data64.ts.tv_usec; data->buf = (uintptr_t)data64.buf; memcpy(&data->frame, &data64.frame, sizeof(data->frame)); return 0; } /* * omap3isp_stat_config - Receives new statistic engine configuration. * @new_conf: Pointer to config structure. * * Returns 0 if successful, -EINVAL if new_conf pointer is NULL, -ENOMEM if * was unable to allocate memory for the buffer, or other errors if parameters * are invalid. */ int omap3isp_stat_config(struct ispstat *stat, void *new_conf) { int ret; unsigned long irqflags; struct ispstat_generic_config *user_cfg = new_conf; u32 buf_size = user_cfg->buf_size; mutex_lock(&stat->ioctl_lock); dev_dbg(stat->isp->dev, "%s: configuring module with buffer size=0x%08lx\n", stat->subdev.name, (unsigned long)buf_size); ret = stat->ops->validate_params(stat, new_conf); if (ret) { mutex_unlock(&stat->ioctl_lock); dev_dbg(stat->isp->dev, "%s: configuration values are invalid.\n", stat->subdev.name); return ret; } if (buf_size != user_cfg->buf_size) dev_dbg(stat->isp->dev, "%s: driver has corrected buffer size request to 0x%08lx\n", stat->subdev.name, (unsigned long)user_cfg->buf_size); /* * Hack: H3A modules may need a doubled buffer size to avoid access * to a invalid memory address after a SBL overflow. * The buffer size is always PAGE_ALIGNED. * Hack 2: MAGIC_SIZE is added to buf_size so a magic word can be * inserted at the end to data integrity check purpose. * Hack 3: AF module writes one paxel data more than it should, so * the buffer allocation must consider it to avoid invalid memory * access. * Hack 4: H3A need to allocate extra space for the recover state. */ if (IS_H3A(stat)) { buf_size = user_cfg->buf_size * 2 + MAGIC_SIZE; if (IS_H3A_AF(stat)) /* * Adding one extra paxel data size for each recover * buffer + 2 regular ones. */ buf_size += AF_EXTRA_DATA * (NUM_H3A_RECOVER_BUFS + 2); if (stat->recover_priv) { struct ispstat_generic_config *recover_cfg = stat->recover_priv; buf_size += recover_cfg->buf_size * NUM_H3A_RECOVER_BUFS; } buf_size = PAGE_ALIGN(buf_size); } else { /* Histogram */ buf_size = PAGE_ALIGN(user_cfg->buf_size + MAGIC_SIZE); } ret = isp_stat_bufs_alloc(stat, buf_size); if (ret) { mutex_unlock(&stat->ioctl_lock); return ret; } spin_lock_irqsave(&stat->isp->stat_lock, irqflags); stat->ops->set_params(stat, new_conf); spin_unlock_irqrestore(&stat->isp->stat_lock, irqflags); /* * Returning the right future config_counter for this setup, so * userspace can *know* when it has been applied. */ user_cfg->config_counter = stat->config_counter + stat->inc_config; /* Module has a valid configuration. */ stat->configured = 1; dev_dbg(stat->isp->dev, "%s: module has been successfully configured.\n", stat->subdev.name); mutex_unlock(&stat->ioctl_lock); return 0; } /* * isp_stat_buf_process - Process statistic buffers. * @buf_state: points out if buffer is ready to be processed. It's necessary * because histogram needs to copy the data from internal memory * before be able to process the buffer. */ static int isp_stat_buf_process(struct ispstat *stat, int buf_state) { int ret = STAT_NO_BUF; if (!atomic_add_unless(&stat->buf_err, -1, 0) && buf_state == STAT_BUF_DONE && stat->state == ISPSTAT_ENABLED) { ret = isp_stat_buf_queue(stat); isp_stat_buf_next(stat); } return ret; } int omap3isp_stat_pcr_busy(struct ispstat *stat) { return stat->ops->busy(stat); } int omap3isp_stat_busy(struct ispstat *stat) { return omap3isp_stat_pcr_busy(stat) | stat->buf_processing | (stat->state != ISPSTAT_DISABLED); } /* * isp_stat_pcr_enable - Disables/Enables statistic engines. * @pcr_enable: 0/1 - Disables/Enables the engine. * * Must be called from ISP driver when the module is idle and synchronized * with CCDC. */ static void isp_stat_pcr_enable(struct ispstat *stat, u8 pcr_enable) { if ((stat->state != ISPSTAT_ENABLING && stat->state != ISPSTAT_ENABLED) && pcr_enable) /* Userspace has disabled the module. Aborting. */ return; stat->ops->enable(stat, pcr_enable); if (stat->state == ISPSTAT_DISABLING && !pcr_enable) stat->state = ISPSTAT_DISABLED; else if (stat->state == ISPSTAT_ENABLING && pcr_enable) stat->state = ISPSTAT_ENABLED; } void omap3isp_stat_suspend(struct ispstat *stat) { unsigned long flags; spin_lock_irqsave(&stat->isp->stat_lock, flags); if (stat->state != ISPSTAT_DISABLED) stat->ops->enable(stat, 0); if (stat->state == ISPSTAT_ENABLED) stat->state = ISPSTAT_SUSPENDED; spin_unlock_irqrestore(&stat->isp->stat_lock, flags); } void omap3isp_stat_resume(struct ispstat *stat) { /* Module will be re-enabled with its pipeline */ if (stat->state == ISPSTAT_SUSPENDED) stat->state = ISPSTAT_ENABLING; } static void isp_stat_try_enable(struct ispstat *stat) { unsigned long irqflags; if (stat->priv == NULL) /* driver wasn't initialised */ return; spin_lock_irqsave(&stat->isp->stat_lock, irqflags); if (stat->state == ISPSTAT_ENABLING && !stat->buf_processing && stat->buf_alloc_size) { /* * Userspace's requested to enable the engine but it wasn't yet. * Let's do that now. */ stat->update = 1; isp_stat_buf_next(stat); stat->ops->setup_regs(stat, stat->priv); isp_stat_buf_insert_magic(stat, stat->active_buf); /* * H3A module has some hw issues which forces the driver to * ignore next buffers even if it was disabled in the meantime. * On the other hand, Histogram shouldn't ignore buffers anymore * if it's being enabled. */ if (!IS_H3A(stat)) atomic_set(&stat->buf_err, 0); isp_stat_pcr_enable(stat, 1); spin_unlock_irqrestore(&stat->isp->stat_lock, irqflags); dev_dbg(stat->isp->dev, "%s: module is enabled.\n", stat->subdev.name); } else { spin_unlock_irqrestore(&stat->isp->stat_lock, irqflags); } } void omap3isp_stat_isr_frame_sync(struct ispstat *stat) { isp_stat_try_enable(stat); } void omap3isp_stat_sbl_overflow(struct ispstat *stat) { unsigned long irqflags; spin_lock_irqsave(&stat->isp->stat_lock, irqflags); /* * Due to a H3A hw issue which prevents the next buffer to start from * the correct memory address, 2 buffers must be ignored. */ atomic_set(&stat->buf_err, 2); /* * If more than one SBL overflow happen in a row, H3A module may access * invalid memory region. * stat->sbl_ovl_recover is set to tell to the driver to temporarily use * a soft configuration which helps to avoid consecutive overflows. */ if (stat->recover_priv) stat->sbl_ovl_recover = 1; spin_unlock_irqrestore(&stat->isp->stat_lock, irqflags); } /* * omap3isp_stat_enable - Disable/Enable statistic engine as soon as possible * @enable: 0/1 - Disables/Enables the engine. * * Client should configure all the module registers before this. * This function can be called from a userspace request. */ int omap3isp_stat_enable(struct ispstat *stat, u8 enable) { unsigned long irqflags; dev_dbg(stat->isp->dev, "%s: user wants to %s module.\n", stat->subdev.name, enable ? "enable" : "disable"); /* Prevent enabling while configuring */ mutex_lock(&stat->ioctl_lock); spin_lock_irqsave(&stat->isp->stat_lock, irqflags); if (!stat->configured && enable) { spin_unlock_irqrestore(&stat->isp->stat_lock, irqflags); mutex_unlock(&stat->ioctl_lock); dev_dbg(stat->isp->dev, "%s: cannot enable module as it's never been successfully configured so far.\n", stat->subdev.name); return -EINVAL; } if (enable) { if (stat->state == ISPSTAT_DISABLING) /* Previous disabling request wasn't done yet */ stat->state = ISPSTAT_ENABLED; else if (stat->state == ISPSTAT_DISABLED) /* Module is now being enabled */ stat->state = ISPSTAT_ENABLING; } else { if (stat->state == ISPSTAT_ENABLING) { /* Previous enabling request wasn't done yet */ stat->state = ISPSTAT_DISABLED; } else if (stat->state == ISPSTAT_ENABLED) { /* Module is now being disabled */ stat->state = ISPSTAT_DISABLING; isp_stat_buf_clear(stat); } } spin_unlock_irqrestore(&stat->isp->stat_lock, irqflags); mutex_unlock(&stat->ioctl_lock); return 0; } int omap3isp_stat_s_stream(struct v4l2_subdev *subdev, int enable) { struct ispstat *stat = v4l2_get_subdevdata(subdev); if (enable) { /* * Only set enable PCR bit if the module was previously * enabled through ioctl. */ isp_stat_try_enable(stat); } else { unsigned long flags; /* Disable PCR bit and config enable field */ omap3isp_stat_enable(stat, 0); spin_lock_irqsave(&stat->isp->stat_lock, flags); stat->ops->enable(stat, 0); spin_unlock_irqrestore(&stat->isp->stat_lock, flags); /* * If module isn't busy, a new interrupt may come or not to * set the state to DISABLED. As Histogram needs to read its * internal memory to clear it, let interrupt handler * responsible of changing state to DISABLED. If the last * interrupt is coming, it's still safe as the handler will * ignore the second time when state is already set to DISABLED. * It's necessary to synchronize Histogram with streamoff, once * the module may be considered idle before last SDMA transfer * starts if we return here. */ if (!omap3isp_stat_pcr_busy(stat)) omap3isp_stat_isr(stat); dev_dbg(stat->isp->dev, "%s: module is being disabled\n", stat->subdev.name); } return 0; } /* * __stat_isr - Interrupt handler for statistic drivers */ static void __stat_isr(struct ispstat *stat, int from_dma) { int ret = STAT_BUF_DONE; int buf_processing; unsigned long irqflags; struct isp_pipeline *pipe; /* * stat->buf_processing must be set before disable module. It's * necessary to not inform too early the buffers aren't busy in case * of SDMA is going to be used. */ spin_lock_irqsave(&stat->isp->stat_lock, irqflags); if (stat->state == ISPSTAT_DISABLED) { spin_unlock_irqrestore(&stat->isp->stat_lock, irqflags); return; } buf_processing = stat->buf_processing; stat->buf_processing = 1; stat->ops->enable(stat, 0); if (buf_processing && !from_dma) { if (stat->state == ISPSTAT_ENABLED) { spin_unlock_irqrestore(&stat->isp->stat_lock, irqflags); dev_err(stat->isp->dev, "%s: interrupt occurred when module was still processing a buffer.\n", stat->subdev.name); ret = STAT_NO_BUF; goto out; } else { /* * Interrupt handler was called from streamoff when * the module wasn't busy anymore to ensure it is being * disabled after process last buffer. If such buffer * processing has already started, no need to do * anything else. */ spin_unlock_irqrestore(&stat->isp->stat_lock, irqflags); return; } } spin_unlock_irqrestore(&stat->isp->stat_lock, irqflags); /* If it's busy we can't process this buffer anymore */ if (!omap3isp_stat_pcr_busy(stat)) { if (!from_dma && stat->ops->buf_process) /* Module still need to copy data to buffer. */ ret = stat->ops->buf_process(stat); if (ret == STAT_BUF_WAITING_DMA) /* Buffer is not ready yet */ return; spin_lock_irqsave(&stat->isp->stat_lock, irqflags); /* * Histogram needs to read its internal memory to clear it * before be disabled. For that reason, common statistic layer * can return only after call stat's buf_process() operator. */ if (stat->state == ISPSTAT_DISABLING) { stat->state = ISPSTAT_DISABLED; spin_unlock_irqrestore(&stat->isp->stat_lock, irqflags); stat->buf_processing = 0; return; } pipe = to_isp_pipeline(&stat->subdev.entity); stat->frame_number = atomic_read(&pipe->frame_number); /* * Before this point, 'ret' stores the buffer's status if it's * ready to be processed. Afterwards, it holds the status if * it was processed successfully. */ ret = isp_stat_buf_process(stat, ret); if (likely(!stat->sbl_ovl_recover)) { stat->ops->setup_regs(stat, stat->priv); } else { /* * Using recover config to increase the chance to have * a good buffer processing and make the H3A module to * go back to a valid state. */ stat->update = 1; stat->ops->setup_regs(stat, stat->recover_priv); stat->sbl_ovl_recover = 0; /* * Set 'update' in case of the module needs to use * regular configuration after next buffer. */ stat->update = 1; } isp_stat_buf_insert_magic(stat, stat->active_buf); /* * Hack: H3A modules may access invalid memory address or send * corrupted data to userspace if more than 1 SBL overflow * happens in a row without re-writing its buffer's start memory * address in the meantime. Such situation is avoided if the * module is not immediately re-enabled when the ISR misses the * timing to process the buffer and to setup the registers. * Because of that, pcr_enable(1) was moved to inside this 'if' * block. But the next interruption will still happen as during * pcr_enable(0) the module was busy. */ isp_stat_pcr_enable(stat, 1); spin_unlock_irqrestore(&stat->isp->stat_lock, irqflags); } else { /* * If a SBL overflow occurs and the H3A driver misses the timing * to process the buffer, stat->buf_err is set and won't be * cleared now. So the next buffer will be correctly ignored. * It's necessary due to a hw issue which makes the next H3A * buffer to start from the memory address where the previous * one stopped, instead of start where it was configured to. * Do not "stat->buf_err = 0" here. */ if (stat->ops->buf_process) /* * Driver may need to erase current data prior to * process a new buffer. If it misses the timing, the * next buffer might be wrong. So should be ignored. * It happens only for Histogram. */ atomic_set(&stat->buf_err, 1); ret = STAT_NO_BUF; dev_dbg(stat->isp->dev, "%s: cannot process buffer, device is busy.\n", stat->subdev.name); } out: stat->buf_processing = 0; isp_stat_queue_event(stat, ret != STAT_BUF_DONE); } void omap3isp_stat_isr(struct ispstat *stat) { __stat_isr(stat, 0); } void omap3isp_stat_dma_isr(struct ispstat *stat) { __stat_isr(stat, 1); } int omap3isp_stat_subscribe_event(struct v4l2_subdev *subdev, struct v4l2_fh *fh, struct v4l2_event_subscription *sub) { struct ispstat *stat = v4l2_get_subdevdata(subdev); if (sub->type != stat->event_type) return -EINVAL; return v4l2_event_subscribe(fh, sub, STAT_NEVENTS, NULL); } int omap3isp_stat_unsubscribe_event(struct v4l2_subdev *subdev, struct v4l2_fh *fh, struct v4l2_event_subscription *sub) { return v4l2_event_unsubscribe(fh, sub); } void omap3isp_stat_unregister_entities(struct ispstat *stat) { v4l2_device_unregister_subdev(&stat->subdev); } int omap3isp_stat_register_entities(struct ispstat *stat, struct v4l2_device *vdev) { stat->subdev.dev = vdev->mdev->dev; return v4l2_device_register_subdev(vdev, &stat->subdev); } static int isp_stat_init_entities(struct ispstat *stat, const char *name, const struct v4l2_subdev_ops *sd_ops) { struct v4l2_subdev *subdev = &stat->subdev; struct media_entity *me = &subdev->entity; v4l2_subdev_init(subdev, sd_ops); snprintf(subdev->name, V4L2_SUBDEV_NAME_SIZE, "OMAP3 ISP %s", name); subdev->grp_id = BIT(16); /* group ID for isp subdevs */ subdev->flags |= V4L2_SUBDEV_FL_HAS_EVENTS | V4L2_SUBDEV_FL_HAS_DEVNODE; v4l2_set_subdevdata(subdev, stat); stat->pad.flags = MEDIA_PAD_FL_SINK | MEDIA_PAD_FL_MUST_CONNECT; me->ops = NULL; return media_entity_pads_init(me, 1, &stat->pad); } int omap3isp_stat_init(struct ispstat *stat, const char *name, const struct v4l2_subdev_ops *sd_ops) { int ret; stat->buf = kcalloc(STAT_MAX_BUFS, sizeof(*stat->buf), GFP_KERNEL); if (!stat->buf) return -ENOMEM; isp_stat_buf_clear(stat); mutex_init(&stat->ioctl_lock); atomic_set(&stat->buf_err, 0); ret = isp_stat_init_entities(stat, name, sd_ops); if (ret < 0) { mutex_destroy(&stat->ioctl_lock); kfree(stat->buf); } return ret; } void omap3isp_stat_cleanup(struct ispstat *stat) { media_entity_cleanup(&stat->subdev.entity); mutex_destroy(&stat->ioctl_lock); isp_stat_bufs_free(stat); kfree(stat->buf); kfree(stat->priv); kfree(stat->recover_priv); }
linux-master
drivers/media/platform/ti/omap3isp/ispstat.c
// SPDX-License-Identifier: GPL-2.0-only /* * TI Camera Access Layer (CAL) - Driver * * Copyright (c) 2015-2020 Texas Instruments Inc. * * Authors: * Benoit Parrot <[email protected]> * Laurent Pinchart <[email protected]> */ #include <linux/clk.h> #include <linux/interrupt.h> #include <linux/mfd/syscon.h> #include <linux/module.h> #include <linux/of.h> #include <linux/platform_device.h> #include <linux/pm_runtime.h> #include <linux/regmap.h> #include <linux/slab.h> #include <linux/videodev2.h> #include <media/media-device.h> #include <media/v4l2-async.h> #include <media/v4l2-common.h> #include <media/v4l2-device.h> #include <media/videobuf2-core.h> #include <media/videobuf2-dma-contig.h> #include "cal.h" #include "cal_regs.h" MODULE_DESCRIPTION("TI CAL driver"); MODULE_AUTHOR("Benoit Parrot, <[email protected]>"); MODULE_LICENSE("GPL v2"); MODULE_VERSION("0.1.0"); int cal_video_nr = -1; module_param_named(video_nr, cal_video_nr, uint, 0644); MODULE_PARM_DESC(video_nr, "videoX start number, -1 is autodetect"); unsigned int cal_debug; module_param_named(debug, cal_debug, uint, 0644); MODULE_PARM_DESC(debug, "activates debug info"); #ifdef CONFIG_VIDEO_TI_CAL_MC #define CAL_MC_API_DEFAULT 1 #else #define CAL_MC_API_DEFAULT 0 #endif bool cal_mc_api = CAL_MC_API_DEFAULT; module_param_named(mc_api, cal_mc_api, bool, 0444); MODULE_PARM_DESC(mc_api, "activates the MC API"); /* ------------------------------------------------------------------ * Format Handling * ------------------------------------------------------------------ */ const struct cal_format_info cal_formats[] = { { .fourcc = V4L2_PIX_FMT_YUYV, .code = MEDIA_BUS_FMT_YUYV8_1X16, .bpp = 16, }, { .fourcc = V4L2_PIX_FMT_UYVY, .code = MEDIA_BUS_FMT_UYVY8_1X16, .bpp = 16, }, { .fourcc = V4L2_PIX_FMT_YVYU, .code = MEDIA_BUS_FMT_YVYU8_1X16, .bpp = 16, }, { .fourcc = V4L2_PIX_FMT_VYUY, .code = MEDIA_BUS_FMT_VYUY8_1X16, .bpp = 16, }, { .fourcc = V4L2_PIX_FMT_RGB565, .code = MEDIA_BUS_FMT_RGB565_1X16, .bpp = 16, }, { .fourcc = V4L2_PIX_FMT_SBGGR8, .code = MEDIA_BUS_FMT_SBGGR8_1X8, .bpp = 8, }, { .fourcc = V4L2_PIX_FMT_SGBRG8, .code = MEDIA_BUS_FMT_SGBRG8_1X8, .bpp = 8, }, { .fourcc = V4L2_PIX_FMT_SGRBG8, .code = MEDIA_BUS_FMT_SGRBG8_1X8, .bpp = 8, }, { .fourcc = V4L2_PIX_FMT_SRGGB8, .code = MEDIA_BUS_FMT_SRGGB8_1X8, .bpp = 8, }, { .fourcc = V4L2_PIX_FMT_SBGGR10, .code = MEDIA_BUS_FMT_SBGGR10_1X10, .bpp = 10, }, { .fourcc = V4L2_PIX_FMT_SGBRG10, .code = MEDIA_BUS_FMT_SGBRG10_1X10, .bpp = 10, }, { .fourcc = V4L2_PIX_FMT_SGRBG10, .code = MEDIA_BUS_FMT_SGRBG10_1X10, .bpp = 10, }, { .fourcc = V4L2_PIX_FMT_SRGGB10, .code = MEDIA_BUS_FMT_SRGGB10_1X10, .bpp = 10, }, { .fourcc = V4L2_PIX_FMT_SBGGR12, .code = MEDIA_BUS_FMT_SBGGR12_1X12, .bpp = 12, }, { .fourcc = V4L2_PIX_FMT_SGBRG12, .code = MEDIA_BUS_FMT_SGBRG12_1X12, .bpp = 12, }, { .fourcc = V4L2_PIX_FMT_SGRBG12, .code = MEDIA_BUS_FMT_SGRBG12_1X12, .bpp = 12, }, { .fourcc = V4L2_PIX_FMT_SRGGB12, .code = MEDIA_BUS_FMT_SRGGB12_1X12, .bpp = 12, }, }; const unsigned int cal_num_formats = ARRAY_SIZE(cal_formats); const struct cal_format_info *cal_format_by_fourcc(u32 fourcc) { unsigned int i; for (i = 0; i < ARRAY_SIZE(cal_formats); ++i) { if (cal_formats[i].fourcc == fourcc) return &cal_formats[i]; } return NULL; } const struct cal_format_info *cal_format_by_code(u32 code) { unsigned int i; for (i = 0; i < ARRAY_SIZE(cal_formats); ++i) { if (cal_formats[i].code == code) return &cal_formats[i]; } return NULL; } /* ------------------------------------------------------------------ * Platform Data * ------------------------------------------------------------------ */ static const struct cal_camerarx_data dra72x_cal_camerarx[] = { { .fields = { [F_CTRLCLKEN] = { 10, 10 }, [F_CAMMODE] = { 11, 12 }, [F_LANEENABLE] = { 13, 16 }, [F_CSI_MODE] = { 17, 17 }, }, .num_lanes = 4, }, { .fields = { [F_CTRLCLKEN] = { 0, 0 }, [F_CAMMODE] = { 1, 2 }, [F_LANEENABLE] = { 3, 4 }, [F_CSI_MODE] = { 5, 5 }, }, .num_lanes = 2, }, }; static const struct cal_data dra72x_cal_data = { .camerarx = dra72x_cal_camerarx, .num_csi2_phy = ARRAY_SIZE(dra72x_cal_camerarx), }; static const struct cal_data dra72x_es1_cal_data = { .camerarx = dra72x_cal_camerarx, .num_csi2_phy = ARRAY_SIZE(dra72x_cal_camerarx), .flags = DRA72_CAL_PRE_ES2_LDO_DISABLE, }; static const struct cal_camerarx_data dra76x_cal_csi_phy[] = { { .fields = { [F_CTRLCLKEN] = { 8, 8 }, [F_CAMMODE] = { 9, 10 }, [F_CSI_MODE] = { 11, 11 }, [F_LANEENABLE] = { 27, 31 }, }, .num_lanes = 5, }, { .fields = { [F_CTRLCLKEN] = { 0, 0 }, [F_CAMMODE] = { 1, 2 }, [F_CSI_MODE] = { 3, 3 }, [F_LANEENABLE] = { 24, 26 }, }, .num_lanes = 3, }, }; static const struct cal_data dra76x_cal_data = { .camerarx = dra76x_cal_csi_phy, .num_csi2_phy = ARRAY_SIZE(dra76x_cal_csi_phy), }; static const struct cal_camerarx_data am654_cal_csi_phy[] = { { .fields = { [F_CTRLCLKEN] = { 15, 15 }, [F_CAMMODE] = { 24, 25 }, [F_LANEENABLE] = { 0, 4 }, }, .num_lanes = 5, }, }; static const struct cal_data am654_cal_data = { .camerarx = am654_cal_csi_phy, .num_csi2_phy = ARRAY_SIZE(am654_cal_csi_phy), }; /* ------------------------------------------------------------------ * I/O Register Accessors * ------------------------------------------------------------------ */ void cal_quickdump_regs(struct cal_dev *cal) { unsigned int i; cal_info(cal, "CAL Registers @ 0x%pa:\n", &cal->res->start); print_hex_dump(KERN_INFO, "", DUMP_PREFIX_OFFSET, 16, 4, (__force const void *)cal->base, resource_size(cal->res), false); for (i = 0; i < cal->data->num_csi2_phy; ++i) { struct cal_camerarx *phy = cal->phy[i]; cal_info(cal, "CSI2 Core %u Registers @ %pa:\n", i, &phy->res->start); print_hex_dump(KERN_INFO, "", DUMP_PREFIX_OFFSET, 16, 4, (__force const void *)phy->base, resource_size(phy->res), false); } } /* ------------------------------------------------------------------ * Context Management * ------------------------------------------------------------------ */ #define CAL_MAX_PIX_PROC 4 static int cal_reserve_pix_proc(struct cal_dev *cal) { unsigned long ret; spin_lock(&cal->v4l2_dev.lock); ret = find_first_zero_bit(&cal->reserved_pix_proc_mask, CAL_MAX_PIX_PROC); if (ret == CAL_MAX_PIX_PROC) { spin_unlock(&cal->v4l2_dev.lock); return -ENOSPC; } cal->reserved_pix_proc_mask |= BIT(ret); spin_unlock(&cal->v4l2_dev.lock); return ret; } static void cal_release_pix_proc(struct cal_dev *cal, unsigned int pix_proc_num) { spin_lock(&cal->v4l2_dev.lock); cal->reserved_pix_proc_mask &= ~BIT(pix_proc_num); spin_unlock(&cal->v4l2_dev.lock); } static void cal_ctx_csi2_config(struct cal_ctx *ctx) { u32 val; val = cal_read(ctx->cal, CAL_CSI2_CTX(ctx->phy->instance, ctx->csi2_ctx)); cal_set_field(&val, ctx->cport, CAL_CSI2_CTX_CPORT_MASK); /* * DT type: MIPI CSI-2 Specs * 0x1: All - DT filter is disabled * 0x24: RGB888 1 pixel = 3 bytes * 0x2B: RAW10 4 pixels = 5 bytes * 0x2A: RAW8 1 pixel = 1 byte * 0x1E: YUV422 2 pixels = 4 bytes */ cal_set_field(&val, ctx->datatype, CAL_CSI2_CTX_DT_MASK); cal_set_field(&val, ctx->vc, CAL_CSI2_CTX_VC_MASK); cal_set_field(&val, ctx->v_fmt.fmt.pix.height, CAL_CSI2_CTX_LINES_MASK); cal_set_field(&val, CAL_CSI2_CTX_ATT_PIX, CAL_CSI2_CTX_ATT_MASK); cal_set_field(&val, CAL_CSI2_CTX_PACK_MODE_LINE, CAL_CSI2_CTX_PACK_MODE_MASK); cal_write(ctx->cal, CAL_CSI2_CTX(ctx->phy->instance, ctx->csi2_ctx), val); ctx_dbg(3, ctx, "CAL_CSI2_CTX(%u, %u) = 0x%08x\n", ctx->phy->instance, ctx->csi2_ctx, cal_read(ctx->cal, CAL_CSI2_CTX(ctx->phy->instance, ctx->csi2_ctx))); } static void cal_ctx_pix_proc_config(struct cal_ctx *ctx) { u32 val, extract, pack; switch (ctx->fmtinfo->bpp) { case 8: extract = CAL_PIX_PROC_EXTRACT_B8; pack = CAL_PIX_PROC_PACK_B8; break; case 10: extract = CAL_PIX_PROC_EXTRACT_B10_MIPI; pack = CAL_PIX_PROC_PACK_B16; break; case 12: extract = CAL_PIX_PROC_EXTRACT_B12_MIPI; pack = CAL_PIX_PROC_PACK_B16; break; case 16: extract = CAL_PIX_PROC_EXTRACT_B16_LE; pack = CAL_PIX_PROC_PACK_B16; break; default: /* * If you see this warning then it means that you added * some new entry in the cal_formats[] array with a different * bit per pixel values then the one supported below. * Either add support for the new bpp value below or adjust * the new entry to use one of the value below. * * Instead of failing here just use 8 bpp as a default. */ dev_warn_once(ctx->cal->dev, "%s:%d:%s: bpp:%d unsupported! Overwritten with 8.\n", __FILE__, __LINE__, __func__, ctx->fmtinfo->bpp); extract = CAL_PIX_PROC_EXTRACT_B8; pack = CAL_PIX_PROC_PACK_B8; break; } val = cal_read(ctx->cal, CAL_PIX_PROC(ctx->pix_proc)); cal_set_field(&val, extract, CAL_PIX_PROC_EXTRACT_MASK); cal_set_field(&val, CAL_PIX_PROC_DPCMD_BYPASS, CAL_PIX_PROC_DPCMD_MASK); cal_set_field(&val, CAL_PIX_PROC_DPCME_BYPASS, CAL_PIX_PROC_DPCME_MASK); cal_set_field(&val, pack, CAL_PIX_PROC_PACK_MASK); cal_set_field(&val, ctx->cport, CAL_PIX_PROC_CPORT_MASK); cal_set_field(&val, 1, CAL_PIX_PROC_EN_MASK); cal_write(ctx->cal, CAL_PIX_PROC(ctx->pix_proc), val); ctx_dbg(3, ctx, "CAL_PIX_PROC(%u) = 0x%08x\n", ctx->pix_proc, cal_read(ctx->cal, CAL_PIX_PROC(ctx->pix_proc))); } static void cal_ctx_wr_dma_config(struct cal_ctx *ctx) { unsigned int stride = ctx->v_fmt.fmt.pix.bytesperline; u32 val; val = cal_read(ctx->cal, CAL_WR_DMA_CTRL(ctx->dma_ctx)); cal_set_field(&val, ctx->cport, CAL_WR_DMA_CTRL_CPORT_MASK); cal_set_field(&val, ctx->v_fmt.fmt.pix.height, CAL_WR_DMA_CTRL_YSIZE_MASK); cal_set_field(&val, CAL_WR_DMA_CTRL_DTAG_PIX_DAT, CAL_WR_DMA_CTRL_DTAG_MASK); cal_set_field(&val, CAL_WR_DMA_CTRL_PATTERN_LINEAR, CAL_WR_DMA_CTRL_PATTERN_MASK); cal_set_field(&val, 1, CAL_WR_DMA_CTRL_STALL_RD_MASK); cal_write(ctx->cal, CAL_WR_DMA_CTRL(ctx->dma_ctx), val); ctx_dbg(3, ctx, "CAL_WR_DMA_CTRL(%d) = 0x%08x\n", ctx->dma_ctx, cal_read(ctx->cal, CAL_WR_DMA_CTRL(ctx->dma_ctx))); cal_write_field(ctx->cal, CAL_WR_DMA_OFST(ctx->dma_ctx), stride / 16, CAL_WR_DMA_OFST_MASK); ctx_dbg(3, ctx, "CAL_WR_DMA_OFST(%d) = 0x%08x\n", ctx->dma_ctx, cal_read(ctx->cal, CAL_WR_DMA_OFST(ctx->dma_ctx))); val = cal_read(ctx->cal, CAL_WR_DMA_XSIZE(ctx->dma_ctx)); /* 64 bit word means no skipping */ cal_set_field(&val, 0, CAL_WR_DMA_XSIZE_XSKIP_MASK); /* * The XSIZE field is expressed in 64-bit units and prevents overflows * in case of synchronization issues by limiting the number of bytes * written per line. */ cal_set_field(&val, stride / 8, CAL_WR_DMA_XSIZE_MASK); cal_write(ctx->cal, CAL_WR_DMA_XSIZE(ctx->dma_ctx), val); ctx_dbg(3, ctx, "CAL_WR_DMA_XSIZE(%d) = 0x%08x\n", ctx->dma_ctx, cal_read(ctx->cal, CAL_WR_DMA_XSIZE(ctx->dma_ctx))); } void cal_ctx_set_dma_addr(struct cal_ctx *ctx, dma_addr_t addr) { cal_write(ctx->cal, CAL_WR_DMA_ADDR(ctx->dma_ctx), addr); } static void cal_ctx_wr_dma_enable(struct cal_ctx *ctx) { u32 val = cal_read(ctx->cal, CAL_WR_DMA_CTRL(ctx->dma_ctx)); cal_set_field(&val, CAL_WR_DMA_CTRL_MODE_CONST, CAL_WR_DMA_CTRL_MODE_MASK); cal_write(ctx->cal, CAL_WR_DMA_CTRL(ctx->dma_ctx), val); } static void cal_ctx_wr_dma_disable(struct cal_ctx *ctx) { u32 val = cal_read(ctx->cal, CAL_WR_DMA_CTRL(ctx->dma_ctx)); cal_set_field(&val, CAL_WR_DMA_CTRL_MODE_DIS, CAL_WR_DMA_CTRL_MODE_MASK); cal_write(ctx->cal, CAL_WR_DMA_CTRL(ctx->dma_ctx), val); } static bool cal_ctx_wr_dma_stopped(struct cal_ctx *ctx) { bool stopped; spin_lock_irq(&ctx->dma.lock); stopped = ctx->dma.state == CAL_DMA_STOPPED; spin_unlock_irq(&ctx->dma.lock); return stopped; } static int cal_get_remote_frame_desc_entry(struct cal_ctx *ctx, struct v4l2_mbus_frame_desc_entry *entry) { struct v4l2_mbus_frame_desc fd; struct media_pad *phy_source_pad; int ret; phy_source_pad = media_pad_remote_pad_first(&ctx->pad); if (!phy_source_pad) return -ENODEV; ret = v4l2_subdev_call(&ctx->phy->subdev, pad, get_frame_desc, phy_source_pad->index, &fd); if (ret) return ret; if (fd.num_entries != 1) return -EINVAL; *entry = fd.entry[0]; return 0; } int cal_ctx_prepare(struct cal_ctx *ctx) { struct v4l2_mbus_frame_desc_entry entry; int ret; ret = cal_get_remote_frame_desc_entry(ctx, &entry); if (ret == -ENOIOCTLCMD) { ctx->vc = 0; ctx->datatype = CAL_CSI2_CTX_DT_ANY; } else if (!ret) { ctx_dbg(2, ctx, "Framedesc: len %u, vc %u, dt %#x\n", entry.length, entry.bus.csi2.vc, entry.bus.csi2.dt); ctx->vc = entry.bus.csi2.vc; ctx->datatype = entry.bus.csi2.dt; } else { return ret; } ctx->use_pix_proc = !ctx->fmtinfo->meta; if (ctx->use_pix_proc) { ret = cal_reserve_pix_proc(ctx->cal); if (ret < 0) { ctx_err(ctx, "Failed to reserve pix proc: %d\n", ret); return ret; } ctx->pix_proc = ret; } return 0; } void cal_ctx_unprepare(struct cal_ctx *ctx) { if (ctx->use_pix_proc) cal_release_pix_proc(ctx->cal, ctx->pix_proc); } void cal_ctx_start(struct cal_ctx *ctx) { struct cal_camerarx *phy = ctx->phy; /* * Reset the frame number & sequence number, but only if the * virtual channel is not already in use. */ spin_lock(&phy->vc_lock); if (phy->vc_enable_count[ctx->vc]++ == 0) { phy->vc_frame_number[ctx->vc] = 0; phy->vc_sequence[ctx->vc] = 0; } spin_unlock(&phy->vc_lock); ctx->dma.state = CAL_DMA_RUNNING; /* Configure the CSI-2, pixel processing and write DMA contexts. */ cal_ctx_csi2_config(ctx); if (ctx->use_pix_proc) cal_ctx_pix_proc_config(ctx); cal_ctx_wr_dma_config(ctx); /* Enable IRQ_WDMA_END and IRQ_WDMA_START. */ cal_write(ctx->cal, CAL_HL_IRQENABLE_SET(1), CAL_HL_IRQ_WDMA_END_MASK(ctx->dma_ctx)); cal_write(ctx->cal, CAL_HL_IRQENABLE_SET(2), CAL_HL_IRQ_WDMA_START_MASK(ctx->dma_ctx)); cal_ctx_wr_dma_enable(ctx); } void cal_ctx_stop(struct cal_ctx *ctx) { struct cal_camerarx *phy = ctx->phy; long timeout; WARN_ON(phy->vc_enable_count[ctx->vc] == 0); spin_lock(&phy->vc_lock); phy->vc_enable_count[ctx->vc]--; spin_unlock(&phy->vc_lock); /* * Request DMA stop and wait until it completes. If completion times * out, forcefully disable the DMA. */ spin_lock_irq(&ctx->dma.lock); ctx->dma.state = CAL_DMA_STOP_REQUESTED; spin_unlock_irq(&ctx->dma.lock); timeout = wait_event_timeout(ctx->dma.wait, cal_ctx_wr_dma_stopped(ctx), msecs_to_jiffies(500)); if (!timeout) { ctx_err(ctx, "failed to disable dma cleanly\n"); cal_ctx_wr_dma_disable(ctx); } /* Disable IRQ_WDMA_END and IRQ_WDMA_START. */ cal_write(ctx->cal, CAL_HL_IRQENABLE_CLR(1), CAL_HL_IRQ_WDMA_END_MASK(ctx->dma_ctx)); cal_write(ctx->cal, CAL_HL_IRQENABLE_CLR(2), CAL_HL_IRQ_WDMA_START_MASK(ctx->dma_ctx)); ctx->dma.state = CAL_DMA_STOPPED; /* Disable CSI2 context */ cal_write(ctx->cal, CAL_CSI2_CTX(ctx->phy->instance, ctx->csi2_ctx), 0); /* Disable pix proc */ if (ctx->use_pix_proc) cal_write(ctx->cal, CAL_PIX_PROC(ctx->pix_proc), 0); } /* ------------------------------------------------------------------ * IRQ Handling * ------------------------------------------------------------------ */ /* * Track a sequence number for each virtual channel, which is shared by * all contexts using the same virtual channel. This is done using the * CSI-2 frame number as a base. */ static void cal_update_seq_number(struct cal_ctx *ctx) { struct cal_dev *cal = ctx->cal; struct cal_camerarx *phy = ctx->phy; u16 prev_frame_num, frame_num; u8 vc = ctx->vc; frame_num = cal_read(cal, CAL_CSI2_STATUS(phy->instance, ctx->csi2_ctx)) & 0xffff; if (phy->vc_frame_number[vc] != frame_num) { prev_frame_num = phy->vc_frame_number[vc]; if (prev_frame_num >= frame_num) phy->vc_sequence[vc] += 1; else phy->vc_sequence[vc] += frame_num - prev_frame_num; phy->vc_frame_number[vc] = frame_num; } } static inline void cal_irq_wdma_start(struct cal_ctx *ctx) { spin_lock(&ctx->dma.lock); if (ctx->dma.state == CAL_DMA_STOP_REQUESTED) { /* * If a stop is requested, disable the write DMA context * immediately. The CAL_WR_DMA_CTRL_j.MODE field is shadowed, * the current frame will complete and the DMA will then stop. */ cal_ctx_wr_dma_disable(ctx); ctx->dma.state = CAL_DMA_STOP_PENDING; } else if (!list_empty(&ctx->dma.queue) && !ctx->dma.pending) { /* * Otherwise, if a new buffer is available, queue it to the * hardware. */ struct cal_buffer *buf; dma_addr_t addr; buf = list_first_entry(&ctx->dma.queue, struct cal_buffer, list); addr = vb2_dma_contig_plane_dma_addr(&buf->vb.vb2_buf, 0); cal_ctx_set_dma_addr(ctx, addr); ctx->dma.pending = buf; list_del(&buf->list); } spin_unlock(&ctx->dma.lock); cal_update_seq_number(ctx); } static inline void cal_irq_wdma_end(struct cal_ctx *ctx) { struct cal_buffer *buf = NULL; spin_lock(&ctx->dma.lock); /* If the DMA context was stopping, it is now stopped. */ if (ctx->dma.state == CAL_DMA_STOP_PENDING) { ctx->dma.state = CAL_DMA_STOPPED; wake_up(&ctx->dma.wait); } /* If a new buffer was queued, complete the current buffer. */ if (ctx->dma.pending) { buf = ctx->dma.active; ctx->dma.active = ctx->dma.pending; ctx->dma.pending = NULL; } spin_unlock(&ctx->dma.lock); if (buf) { buf->vb.vb2_buf.timestamp = ktime_get_ns(); buf->vb.field = ctx->v_fmt.fmt.pix.field; buf->vb.sequence = ctx->phy->vc_sequence[ctx->vc]; vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_DONE); } } static void cal_irq_handle_wdma(struct cal_ctx *ctx, bool start, bool end) { /* * CAL HW interrupts are inherently racy. If we get both start and end * interrupts, we don't know what has happened: did the DMA for a single * frame start and end, or did one frame end and a new frame start? * * Usually for normal pixel frames we get the interrupts separately. If * we do get both, we have to guess. The assumption in the code below is * that the active vertical area is larger than the blanking vertical * area, and thus it is more likely that we get the end of the old frame * and the start of a new frame. * * However, for embedded data, which is only a few lines high, we always * get both interrupts. Here the assumption is that we get both for the * same frame. */ if (ctx->v_fmt.fmt.pix.height < 10) { if (start) cal_irq_wdma_start(ctx); if (end) cal_irq_wdma_end(ctx); } else { if (end) cal_irq_wdma_end(ctx); if (start) cal_irq_wdma_start(ctx); } } static irqreturn_t cal_irq(int irq_cal, void *data) { struct cal_dev *cal = data; u32 status[3]; unsigned int i; for (i = 0; i < 3; ++i) { status[i] = cal_read(cal, CAL_HL_IRQSTATUS(i)); if (status[i]) cal_write(cal, CAL_HL_IRQSTATUS(i), status[i]); } if (status[0]) { if (status[0] & CAL_HL_IRQ_OCPO_ERR_MASK) dev_err_ratelimited(cal->dev, "OCPO ERROR\n"); for (i = 0; i < cal->data->num_csi2_phy; ++i) { if (status[0] & CAL_HL_IRQ_CIO_MASK(i)) { u32 cio_stat = cal_read(cal, CAL_CSI2_COMPLEXIO_IRQSTATUS(i)); dev_err_ratelimited(cal->dev, "CIO%u error: %#08x\n", i, cio_stat); cal_write(cal, CAL_CSI2_COMPLEXIO_IRQSTATUS(i), cio_stat); } if (status[0] & CAL_HL_IRQ_VC_MASK(i)) { u32 vc_stat = cal_read(cal, CAL_CSI2_VC_IRQSTATUS(i)); dev_err_ratelimited(cal->dev, "CIO%u VC error: %#08x\n", i, vc_stat); cal_write(cal, CAL_CSI2_VC_IRQSTATUS(i), vc_stat); } } } for (i = 0; i < cal->num_contexts; ++i) { bool end = !!(status[1] & CAL_HL_IRQ_WDMA_END_MASK(i)); bool start = !!(status[2] & CAL_HL_IRQ_WDMA_START_MASK(i)); if (start || end) cal_irq_handle_wdma(cal->ctx[i], start, end); } return IRQ_HANDLED; } /* ------------------------------------------------------------------ * Asynchronous V4L2 subdev binding * ------------------------------------------------------------------ */ struct cal_v4l2_async_subdev { struct v4l2_async_connection asd; /* Must be first */ struct cal_camerarx *phy; }; static inline struct cal_v4l2_async_subdev * to_cal_asd(struct v4l2_async_connection *asd) { return container_of(asd, struct cal_v4l2_async_subdev, asd); } static int cal_async_notifier_bound(struct v4l2_async_notifier *notifier, struct v4l2_subdev *subdev, struct v4l2_async_connection *asd) { struct cal_camerarx *phy = to_cal_asd(asd)->phy; int pad; int ret; if (phy->source) { phy_info(phy, "Rejecting subdev %s (Already set!!)", subdev->name); return 0; } phy->source = subdev; phy_dbg(1, phy, "Using source %s for capture\n", subdev->name); pad = media_entity_get_fwnode_pad(&subdev->entity, of_fwnode_handle(phy->source_ep_node), MEDIA_PAD_FL_SOURCE); if (pad < 0) { phy_err(phy, "Source %s has no connected source pad\n", subdev->name); return pad; } ret = media_create_pad_link(&subdev->entity, pad, &phy->subdev.entity, CAL_CAMERARX_PAD_SINK, MEDIA_LNK_FL_IMMUTABLE | MEDIA_LNK_FL_ENABLED); if (ret) { phy_err(phy, "Failed to create media link for source %s\n", subdev->name); return ret; } return 0; } static int cal_async_notifier_complete(struct v4l2_async_notifier *notifier) { struct cal_dev *cal = container_of(notifier, struct cal_dev, notifier); unsigned int i; int ret; for (i = 0; i < cal->num_contexts; ++i) { ret = cal_ctx_v4l2_register(cal->ctx[i]); if (ret) goto err_ctx_unreg; } if (!cal_mc_api) return 0; ret = v4l2_device_register_subdev_nodes(&cal->v4l2_dev); if (ret) goto err_ctx_unreg; return 0; err_ctx_unreg: for (; i > 0; --i) { if (!cal->ctx[i - 1]) continue; cal_ctx_v4l2_unregister(cal->ctx[i - 1]); } return ret; } static const struct v4l2_async_notifier_operations cal_async_notifier_ops = { .bound = cal_async_notifier_bound, .complete = cal_async_notifier_complete, }; static int cal_async_notifier_register(struct cal_dev *cal) { unsigned int i; int ret; v4l2_async_nf_init(&cal->notifier, &cal->v4l2_dev); cal->notifier.ops = &cal_async_notifier_ops; for (i = 0; i < cal->data->num_csi2_phy; ++i) { struct cal_camerarx *phy = cal->phy[i]; struct cal_v4l2_async_subdev *casd; struct fwnode_handle *fwnode; if (!phy->source_node) continue; fwnode = of_fwnode_handle(phy->source_node); casd = v4l2_async_nf_add_fwnode(&cal->notifier, fwnode, struct cal_v4l2_async_subdev); if (IS_ERR(casd)) { phy_err(phy, "Failed to add subdev to notifier\n"); ret = PTR_ERR(casd); goto error; } casd->phy = phy; } ret = v4l2_async_nf_register(&cal->notifier); if (ret) { cal_err(cal, "Error registering async notifier\n"); goto error; } return 0; error: v4l2_async_nf_cleanup(&cal->notifier); return ret; } static void cal_async_notifier_unregister(struct cal_dev *cal) { v4l2_async_nf_unregister(&cal->notifier); v4l2_async_nf_cleanup(&cal->notifier); } /* ------------------------------------------------------------------ * Media and V4L2 device handling * ------------------------------------------------------------------ */ /* * Register user-facing devices. To be called at the end of the probe function * when all resources are initialized and ready. */ static int cal_media_register(struct cal_dev *cal) { int ret; ret = media_device_register(&cal->mdev); if (ret) { cal_err(cal, "Failed to register media device\n"); return ret; } /* * Register the async notifier. This may trigger registration of the * V4L2 video devices if all subdevs are ready. */ ret = cal_async_notifier_register(cal); if (ret) { media_device_unregister(&cal->mdev); return ret; } return 0; } /* * Unregister the user-facing devices, but don't free memory yet. To be called * at the beginning of the remove function, to disallow access from userspace. */ static void cal_media_unregister(struct cal_dev *cal) { unsigned int i; /* Unregister all the V4L2 video devices. */ for (i = 0; i < cal->num_contexts; i++) cal_ctx_v4l2_unregister(cal->ctx[i]); cal_async_notifier_unregister(cal); media_device_unregister(&cal->mdev); } /* * Initialize the in-kernel objects. To be called at the beginning of the probe * function, before the V4L2 device is used by the driver. */ static int cal_media_init(struct cal_dev *cal) { struct media_device *mdev = &cal->mdev; int ret; mdev->dev = cal->dev; mdev->hw_revision = cal->revision; strscpy(mdev->model, "CAL", sizeof(mdev->model)); media_device_init(mdev); /* * Initialize the V4L2 device (despite the function name, this performs * initialization, not registration). */ cal->v4l2_dev.mdev = mdev; ret = v4l2_device_register(cal->dev, &cal->v4l2_dev); if (ret) { cal_err(cal, "Failed to register V4L2 device\n"); return ret; } vb2_dma_contig_set_max_seg_size(cal->dev, DMA_BIT_MASK(32)); return 0; } /* * Cleanup the in-kernel objects, freeing memory. To be called at the very end * of the remove sequence, when nothing (including userspace) can access the * objects anymore. */ static void cal_media_cleanup(struct cal_dev *cal) { v4l2_device_unregister(&cal->v4l2_dev); media_device_cleanup(&cal->mdev); vb2_dma_contig_clear_max_seg_size(cal->dev); } /* ------------------------------------------------------------------ * Initialization and module stuff * ------------------------------------------------------------------ */ static struct cal_ctx *cal_ctx_create(struct cal_dev *cal, int inst) { struct cal_ctx *ctx; int ret; ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); if (!ctx) return NULL; ctx->cal = cal; ctx->phy = cal->phy[inst]; ctx->dma_ctx = inst; ctx->csi2_ctx = inst; ctx->cport = inst; ret = cal_ctx_v4l2_init(ctx); if (ret) { kfree(ctx); return NULL; } return ctx; } static void cal_ctx_destroy(struct cal_ctx *ctx) { cal_ctx_v4l2_cleanup(ctx); kfree(ctx); } static const struct of_device_id cal_of_match[] = { { .compatible = "ti,dra72-cal", .data = (void *)&dra72x_cal_data, }, { .compatible = "ti,dra72-pre-es2-cal", .data = (void *)&dra72x_es1_cal_data, }, { .compatible = "ti,dra76-cal", .data = (void *)&dra76x_cal_data, }, { .compatible = "ti,am654-cal", .data = (void *)&am654_cal_data, }, {}, }; MODULE_DEVICE_TABLE(of, cal_of_match); /* Get hardware revision and info. */ #define CAL_HL_HWINFO_VALUE 0xa3c90469 static void cal_get_hwinfo(struct cal_dev *cal) { u32 hwinfo; cal->revision = cal_read(cal, CAL_HL_REVISION); switch (FIELD_GET(CAL_HL_REVISION_SCHEME_MASK, cal->revision)) { case CAL_HL_REVISION_SCHEME_H08: cal_dbg(3, cal, "CAL HW revision %lu.%lu.%lu (0x%08x)\n", FIELD_GET(CAL_HL_REVISION_MAJOR_MASK, cal->revision), FIELD_GET(CAL_HL_REVISION_MINOR_MASK, cal->revision), FIELD_GET(CAL_HL_REVISION_RTL_MASK, cal->revision), cal->revision); break; case CAL_HL_REVISION_SCHEME_LEGACY: default: cal_info(cal, "Unexpected CAL HW revision 0x%08x\n", cal->revision); break; } hwinfo = cal_read(cal, CAL_HL_HWINFO); if (hwinfo != CAL_HL_HWINFO_VALUE) cal_info(cal, "CAL_HL_HWINFO = 0x%08x, expected 0x%08x\n", hwinfo, CAL_HL_HWINFO_VALUE); } static int cal_init_camerarx_regmap(struct cal_dev *cal) { struct platform_device *pdev = to_platform_device(cal->dev); struct device_node *np = cal->dev->of_node; struct regmap_config config = { }; struct regmap *syscon; struct resource *res; unsigned int offset; void __iomem *base; syscon = syscon_regmap_lookup_by_phandle_args(np, "ti,camerrx-control", 1, &offset); if (!IS_ERR(syscon)) { cal->syscon_camerrx = syscon; cal->syscon_camerrx_offset = offset; return 0; } dev_warn(cal->dev, "failed to get ti,camerrx-control: %ld\n", PTR_ERR(syscon)); /* * Backward DTS compatibility. If syscon entry is not present then * check if the camerrx_control resource is present. */ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "camerrx_control"); base = devm_ioremap_resource(cal->dev, res); if (IS_ERR(base)) { cal_err(cal, "failed to ioremap camerrx_control\n"); return PTR_ERR(base); } cal_dbg(1, cal, "ioresource %s at %pa - %pa\n", res->name, &res->start, &res->end); config.reg_bits = 32; config.reg_stride = 4; config.val_bits = 32; config.max_register = resource_size(res) - 4; syscon = regmap_init_mmio(NULL, base, &config); if (IS_ERR(syscon)) { pr_err("regmap init failed\n"); return PTR_ERR(syscon); } /* * In this case the base already point to the direct CM register so no * need for an offset. */ cal->syscon_camerrx = syscon; cal->syscon_camerrx_offset = 0; return 0; } static int cal_probe(struct platform_device *pdev) { struct cal_dev *cal; bool connected = false; unsigned int i; int ret; int irq; cal = devm_kzalloc(&pdev->dev, sizeof(*cal), GFP_KERNEL); if (!cal) return -ENOMEM; cal->data = of_device_get_match_data(&pdev->dev); if (!cal->data) { dev_err(&pdev->dev, "Could not get feature data based on compatible version\n"); return -ENODEV; } cal->dev = &pdev->dev; platform_set_drvdata(pdev, cal); /* Acquire resources: clocks, CAMERARX regmap, I/O memory and IRQ. */ cal->fclk = devm_clk_get(&pdev->dev, "fck"); if (IS_ERR(cal->fclk)) { dev_err(&pdev->dev, "cannot get CAL fclk\n"); return PTR_ERR(cal->fclk); } ret = cal_init_camerarx_regmap(cal); if (ret < 0) return ret; cal->res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cal_top"); cal->base = devm_ioremap_resource(&pdev->dev, cal->res); if (IS_ERR(cal->base)) return PTR_ERR(cal->base); cal_dbg(1, cal, "ioresource %s at %pa - %pa\n", cal->res->name, &cal->res->start, &cal->res->end); irq = platform_get_irq(pdev, 0); cal_dbg(1, cal, "got irq# %d\n", irq); ret = devm_request_irq(&pdev->dev, irq, cal_irq, 0, CAL_MODULE_NAME, cal); if (ret) return ret; /* Read the revision and hardware info to verify hardware access. */ pm_runtime_enable(&pdev->dev); ret = pm_runtime_resume_and_get(&pdev->dev); if (ret) goto error_pm_runtime; cal_get_hwinfo(cal); pm_runtime_put_sync(&pdev->dev); /* Initialize the media device. */ ret = cal_media_init(cal); if (ret < 0) goto error_pm_runtime; /* Create CAMERARX PHYs. */ for (i = 0; i < cal->data->num_csi2_phy; ++i) { cal->phy[i] = cal_camerarx_create(cal, i); if (IS_ERR(cal->phy[i])) { ret = PTR_ERR(cal->phy[i]); cal->phy[i] = NULL; goto error_camerarx; } if (cal->phy[i]->source_node) connected = true; } if (!connected) { cal_err(cal, "Neither port is configured, no point in staying up\n"); ret = -ENODEV; goto error_camerarx; } /* Create contexts. */ for (i = 0; i < cal->data->num_csi2_phy; ++i) { if (!cal->phy[i]->source_node) continue; cal->ctx[cal->num_contexts] = cal_ctx_create(cal, i); if (!cal->ctx[cal->num_contexts]) { cal_err(cal, "Failed to create context %u\n", cal->num_contexts); ret = -ENODEV; goto error_context; } cal->num_contexts++; } /* Register the media device. */ ret = cal_media_register(cal); if (ret) goto error_context; return 0; error_context: for (i = 0; i < cal->num_contexts; i++) cal_ctx_destroy(cal->ctx[i]); error_camerarx: for (i = 0; i < cal->data->num_csi2_phy; i++) cal_camerarx_destroy(cal->phy[i]); cal_media_cleanup(cal); error_pm_runtime: pm_runtime_disable(&pdev->dev); return ret; } static void cal_remove(struct platform_device *pdev) { struct cal_dev *cal = platform_get_drvdata(pdev); unsigned int i; int ret; cal_dbg(1, cal, "Removing %s\n", CAL_MODULE_NAME); ret = pm_runtime_resume_and_get(&pdev->dev); cal_media_unregister(cal); for (i = 0; i < cal->data->num_csi2_phy; i++) cal_camerarx_disable(cal->phy[i]); for (i = 0; i < cal->num_contexts; i++) cal_ctx_destroy(cal->ctx[i]); for (i = 0; i < cal->data->num_csi2_phy; i++) cal_camerarx_destroy(cal->phy[i]); cal_media_cleanup(cal); if (ret >= 0) pm_runtime_put_sync(&pdev->dev); pm_runtime_disable(&pdev->dev); } static int cal_runtime_resume(struct device *dev) { struct cal_dev *cal = dev_get_drvdata(dev); unsigned int i; u32 val; if (cal->data->flags & DRA72_CAL_PRE_ES2_LDO_DISABLE) { /* * Apply errata on both port everytime we (re-)enable * the clock */ for (i = 0; i < cal->data->num_csi2_phy; i++) cal_camerarx_i913_errata(cal->phy[i]); } /* * Enable global interrupts that are not related to a particular * CAMERARAX or context. */ cal_write(cal, CAL_HL_IRQENABLE_SET(0), CAL_HL_IRQ_OCPO_ERR_MASK); val = cal_read(cal, CAL_CTRL); cal_set_field(&val, CAL_CTRL_BURSTSIZE_BURST128, CAL_CTRL_BURSTSIZE_MASK); cal_set_field(&val, 0xf, CAL_CTRL_TAGCNT_MASK); cal_set_field(&val, CAL_CTRL_POSTED_WRITES_NONPOSTED, CAL_CTRL_POSTED_WRITES_MASK); cal_set_field(&val, 0xff, CAL_CTRL_MFLAGL_MASK); cal_set_field(&val, 0xff, CAL_CTRL_MFLAGH_MASK); cal_write(cal, CAL_CTRL, val); cal_dbg(3, cal, "CAL_CTRL = 0x%08x\n", cal_read(cal, CAL_CTRL)); return 0; } static const struct dev_pm_ops cal_pm_ops = { .runtime_resume = cal_runtime_resume, }; static struct platform_driver cal_pdrv = { .probe = cal_probe, .remove_new = cal_remove, .driver = { .name = CAL_MODULE_NAME, .pm = &cal_pm_ops, .of_match_table = cal_of_match, }, }; module_platform_driver(cal_pdrv);
linux-master
drivers/media/platform/ti/cal/cal.c
// SPDX-License-Identifier: GPL-2.0-only /* * TI Camera Access Layer (CAL) - Video Device * * Copyright (c) 2015-2020 Texas Instruments Inc. * * Authors: * Benoit Parrot <[email protected]> * Laurent Pinchart <[email protected]> */ #include <linux/ioctl.h> #include <linux/pm_runtime.h> #include <linux/videodev2.h> #include <media/media-device.h> #include <media/v4l2-common.h> #include <media/v4l2-ctrls.h> #include <media/v4l2-device.h> #include <media/v4l2-event.h> #include <media/v4l2-fh.h> #include <media/v4l2-ioctl.h> #include <media/videobuf2-core.h> #include <media/videobuf2-dma-contig.h> #include "cal.h" /* Print Four-character-code (FOURCC) */ static char *fourcc_to_str(u32 fmt) { static char code[5]; code[0] = (unsigned char)(fmt & 0xff); code[1] = (unsigned char)((fmt >> 8) & 0xff); code[2] = (unsigned char)((fmt >> 16) & 0xff); code[3] = (unsigned char)((fmt >> 24) & 0xff); code[4] = '\0'; return code; } /* ------------------------------------------------------------------ * V4L2 Common IOCTLs * ------------------------------------------------------------------ */ static int cal_querycap(struct file *file, void *priv, struct v4l2_capability *cap) { strscpy(cap->driver, CAL_MODULE_NAME, sizeof(cap->driver)); strscpy(cap->card, CAL_MODULE_NAME, sizeof(cap->card)); return 0; } static int cal_g_fmt_vid_cap(struct file *file, void *priv, struct v4l2_format *f) { struct cal_ctx *ctx = video_drvdata(file); *f = ctx->v_fmt; return 0; } /* ------------------------------------------------------------------ * V4L2 Video Node Centric IOCTLs * ------------------------------------------------------------------ */ static const struct cal_format_info *find_format_by_pix(struct cal_ctx *ctx, u32 pixelformat) { const struct cal_format_info *fmtinfo; unsigned int k; for (k = 0; k < ctx->num_active_fmt; k++) { fmtinfo = ctx->active_fmt[k]; if (fmtinfo->fourcc == pixelformat) return fmtinfo; } return NULL; } static const struct cal_format_info *find_format_by_code(struct cal_ctx *ctx, u32 code) { const struct cal_format_info *fmtinfo; unsigned int k; for (k = 0; k < ctx->num_active_fmt; k++) { fmtinfo = ctx->active_fmt[k]; if (fmtinfo->code == code) return fmtinfo; } return NULL; } static int cal_legacy_enum_fmt_vid_cap(struct file *file, void *priv, struct v4l2_fmtdesc *f) { struct cal_ctx *ctx = video_drvdata(file); const struct cal_format_info *fmtinfo; if (f->index >= ctx->num_active_fmt) return -EINVAL; fmtinfo = ctx->active_fmt[f->index]; f->pixelformat = fmtinfo->fourcc; f->type = V4L2_BUF_TYPE_VIDEO_CAPTURE; return 0; } static int __subdev_get_format(struct cal_ctx *ctx, struct v4l2_mbus_framefmt *fmt) { struct v4l2_subdev_format sd_fmt = { .which = V4L2_SUBDEV_FORMAT_ACTIVE, .pad = 0, }; struct v4l2_mbus_framefmt *mbus_fmt = &sd_fmt.format; int ret; ret = v4l2_subdev_call(ctx->phy->source, pad, get_fmt, NULL, &sd_fmt); if (ret) return ret; *fmt = *mbus_fmt; ctx_dbg(1, ctx, "%s %dx%d code:%04X\n", __func__, fmt->width, fmt->height, fmt->code); return 0; } static int __subdev_set_format(struct cal_ctx *ctx, struct v4l2_mbus_framefmt *fmt) { struct v4l2_subdev_format sd_fmt = { .which = V4L2_SUBDEV_FORMAT_ACTIVE, .pad = 0, }; struct v4l2_mbus_framefmt *mbus_fmt = &sd_fmt.format; int ret; *mbus_fmt = *fmt; ret = v4l2_subdev_call(ctx->phy->source, pad, set_fmt, NULL, &sd_fmt); if (ret) return ret; ctx_dbg(1, ctx, "%s %dx%d code:%04X\n", __func__, fmt->width, fmt->height, fmt->code); return 0; } static void cal_calc_format_size(struct cal_ctx *ctx, const struct cal_format_info *fmtinfo, struct v4l2_format *f) { u32 bpl, max_width; /* * Maximum width is bound by the DMA max width in bytes. * We need to recalculate the actual maxi width depending on the * number of bytes per pixels required. */ max_width = CAL_MAX_WIDTH_BYTES / (ALIGN(fmtinfo->bpp, 8) >> 3); v4l_bound_align_image(&f->fmt.pix.width, 48, max_width, 2, &f->fmt.pix.height, 32, CAL_MAX_HEIGHT_LINES, 0, 0); bpl = (f->fmt.pix.width * ALIGN(fmtinfo->bpp, 8)) >> 3; f->fmt.pix.bytesperline = ALIGN(bpl, 16); f->fmt.pix.sizeimage = f->fmt.pix.height * f->fmt.pix.bytesperline; ctx_dbg(3, ctx, "%s: fourcc: %s size: %dx%d bpl:%d img_size:%d\n", __func__, fourcc_to_str(f->fmt.pix.pixelformat), f->fmt.pix.width, f->fmt.pix.height, f->fmt.pix.bytesperline, f->fmt.pix.sizeimage); } static int cal_legacy_try_fmt_vid_cap(struct file *file, void *priv, struct v4l2_format *f) { struct cal_ctx *ctx = video_drvdata(file); const struct cal_format_info *fmtinfo; struct v4l2_subdev_frame_size_enum fse = { .which = V4L2_SUBDEV_FORMAT_ACTIVE, }; int found; fmtinfo = find_format_by_pix(ctx, f->fmt.pix.pixelformat); if (!fmtinfo) { ctx_dbg(3, ctx, "Fourcc format (0x%08x) not found.\n", f->fmt.pix.pixelformat); /* Just get the first one enumerated */ fmtinfo = ctx->active_fmt[0]; f->fmt.pix.pixelformat = fmtinfo->fourcc; } f->fmt.pix.field = ctx->v_fmt.fmt.pix.field; /* check for/find a valid width/height */ found = false; fse.pad = 0; fse.code = fmtinfo->code; for (fse.index = 0; ; fse.index++) { int ret; ret = v4l2_subdev_call(ctx->phy->source, pad, enum_frame_size, NULL, &fse); if (ret) break; if ((f->fmt.pix.width == fse.max_width) && (f->fmt.pix.height == fse.max_height)) { found = true; break; } else if ((f->fmt.pix.width >= fse.min_width) && (f->fmt.pix.width <= fse.max_width) && (f->fmt.pix.height >= fse.min_height) && (f->fmt.pix.height <= fse.max_height)) { found = true; break; } } if (!found) { /* use existing values as default */ f->fmt.pix.width = ctx->v_fmt.fmt.pix.width; f->fmt.pix.height = ctx->v_fmt.fmt.pix.height; } /* * Use current colorspace for now, it will get * updated properly during s_fmt */ f->fmt.pix.colorspace = ctx->v_fmt.fmt.pix.colorspace; cal_calc_format_size(ctx, fmtinfo, f); return 0; } static int cal_legacy_s_fmt_vid_cap(struct file *file, void *priv, struct v4l2_format *f) { struct cal_ctx *ctx = video_drvdata(file); struct vb2_queue *q = &ctx->vb_vidq; struct v4l2_subdev_format sd_fmt = { .which = V4L2_SUBDEV_FORMAT_ACTIVE, .pad = CAL_CAMERARX_PAD_SINK, }; const struct cal_format_info *fmtinfo; int ret; if (vb2_is_busy(q)) { ctx_dbg(3, ctx, "%s device busy\n", __func__); return -EBUSY; } ret = cal_legacy_try_fmt_vid_cap(file, priv, f); if (ret < 0) return ret; fmtinfo = find_format_by_pix(ctx, f->fmt.pix.pixelformat); v4l2_fill_mbus_format(&sd_fmt.format, &f->fmt.pix, fmtinfo->code); ret = __subdev_set_format(ctx, &sd_fmt.format); if (ret) return ret; /* Just double check nothing has gone wrong */ if (sd_fmt.format.code != fmtinfo->code) { ctx_dbg(3, ctx, "%s subdev changed format on us, this should not happen\n", __func__); return -EINVAL; } v4l2_fill_pix_format(&ctx->v_fmt.fmt.pix, &sd_fmt.format); ctx->v_fmt.type = V4L2_BUF_TYPE_VIDEO_CAPTURE; ctx->v_fmt.fmt.pix.pixelformat = fmtinfo->fourcc; ctx->v_fmt.fmt.pix.field = sd_fmt.format.field; cal_calc_format_size(ctx, fmtinfo, &ctx->v_fmt); v4l2_subdev_call(&ctx->phy->subdev, pad, set_fmt, NULL, &sd_fmt); ctx->fmtinfo = fmtinfo; *f = ctx->v_fmt; return 0; } static int cal_legacy_enum_framesizes(struct file *file, void *fh, struct v4l2_frmsizeenum *fsize) { struct cal_ctx *ctx = video_drvdata(file); const struct cal_format_info *fmtinfo; struct v4l2_subdev_frame_size_enum fse = { .index = fsize->index, .pad = 0, .which = V4L2_SUBDEV_FORMAT_ACTIVE, }; int ret; /* check for valid format */ fmtinfo = find_format_by_pix(ctx, fsize->pixel_format); if (!fmtinfo) { ctx_dbg(3, ctx, "Invalid pixel code: %x\n", fsize->pixel_format); return -EINVAL; } fse.code = fmtinfo->code; ret = v4l2_subdev_call(ctx->phy->source, pad, enum_frame_size, NULL, &fse); if (ret) return ret; ctx_dbg(1, ctx, "%s: index: %d code: %x W:[%d,%d] H:[%d,%d]\n", __func__, fse.index, fse.code, fse.min_width, fse.max_width, fse.min_height, fse.max_height); fsize->type = V4L2_FRMSIZE_TYPE_DISCRETE; fsize->discrete.width = fse.max_width; fsize->discrete.height = fse.max_height; return 0; } static int cal_legacy_enum_input(struct file *file, void *priv, struct v4l2_input *inp) { if (inp->index > 0) return -EINVAL; inp->type = V4L2_INPUT_TYPE_CAMERA; sprintf(inp->name, "Camera %u", inp->index); return 0; } static int cal_legacy_g_input(struct file *file, void *priv, unsigned int *i) { *i = 0; return 0; } static int cal_legacy_s_input(struct file *file, void *priv, unsigned int i) { return i > 0 ? -EINVAL : 0; } /* timeperframe is arbitrary and continuous */ static int cal_legacy_enum_frameintervals(struct file *file, void *priv, struct v4l2_frmivalenum *fival) { struct cal_ctx *ctx = video_drvdata(file); const struct cal_format_info *fmtinfo; struct v4l2_subdev_frame_interval_enum fie = { .index = fival->index, .width = fival->width, .height = fival->height, .which = V4L2_SUBDEV_FORMAT_ACTIVE, }; int ret; fmtinfo = find_format_by_pix(ctx, fival->pixel_format); if (!fmtinfo) return -EINVAL; fie.code = fmtinfo->code; ret = v4l2_subdev_call(ctx->phy->source, pad, enum_frame_interval, NULL, &fie); if (ret) return ret; fival->type = V4L2_FRMIVAL_TYPE_DISCRETE; fival->discrete = fie.interval; return 0; } static int cal_legacy_g_parm(struct file *file, void *fh, struct v4l2_streamparm *a) { struct cal_ctx *ctx = video_drvdata(file); return v4l2_g_parm_cap(video_devdata(file), ctx->phy->source, a); } static int cal_legacy_s_parm(struct file *file, void *fh, struct v4l2_streamparm *a) { struct cal_ctx *ctx = video_drvdata(file); return v4l2_s_parm_cap(video_devdata(file), ctx->phy->source, a); } static const struct v4l2_ioctl_ops cal_ioctl_legacy_ops = { .vidioc_querycap = cal_querycap, .vidioc_enum_fmt_vid_cap = cal_legacy_enum_fmt_vid_cap, .vidioc_g_fmt_vid_cap = cal_g_fmt_vid_cap, .vidioc_try_fmt_vid_cap = cal_legacy_try_fmt_vid_cap, .vidioc_s_fmt_vid_cap = cal_legacy_s_fmt_vid_cap, .vidioc_enum_framesizes = cal_legacy_enum_framesizes, .vidioc_reqbufs = vb2_ioctl_reqbufs, .vidioc_create_bufs = vb2_ioctl_create_bufs, .vidioc_prepare_buf = vb2_ioctl_prepare_buf, .vidioc_querybuf = vb2_ioctl_querybuf, .vidioc_qbuf = vb2_ioctl_qbuf, .vidioc_dqbuf = vb2_ioctl_dqbuf, .vidioc_expbuf = vb2_ioctl_expbuf, .vidioc_enum_input = cal_legacy_enum_input, .vidioc_g_input = cal_legacy_g_input, .vidioc_s_input = cal_legacy_s_input, .vidioc_enum_frameintervals = cal_legacy_enum_frameintervals, .vidioc_streamon = vb2_ioctl_streamon, .vidioc_streamoff = vb2_ioctl_streamoff, .vidioc_log_status = v4l2_ctrl_log_status, .vidioc_subscribe_event = v4l2_ctrl_subscribe_event, .vidioc_unsubscribe_event = v4l2_event_unsubscribe, .vidioc_g_parm = cal_legacy_g_parm, .vidioc_s_parm = cal_legacy_s_parm, }; /* ------------------------------------------------------------------ * V4L2 Media Controller Centric IOCTLs * ------------------------------------------------------------------ */ static int cal_mc_enum_fmt_vid_cap(struct file *file, void *priv, struct v4l2_fmtdesc *f) { unsigned int i; unsigned int idx; if (f->index >= cal_num_formats) return -EINVAL; idx = 0; for (i = 0; i < cal_num_formats; ++i) { if (f->mbus_code && cal_formats[i].code != f->mbus_code) continue; if (idx == f->index) { f->pixelformat = cal_formats[i].fourcc; f->type = V4L2_BUF_TYPE_VIDEO_CAPTURE; return 0; } idx++; } return -EINVAL; } static void cal_mc_try_fmt(struct cal_ctx *ctx, struct v4l2_format *f, const struct cal_format_info **info) { struct v4l2_pix_format *format = &f->fmt.pix; const struct cal_format_info *fmtinfo; unsigned int bpp; /* * Default to the first format if the requested pixel format code isn't * supported. */ fmtinfo = cal_format_by_fourcc(f->fmt.pix.pixelformat); if (!fmtinfo) fmtinfo = &cal_formats[0]; /* * Clamp the size, update the pixel format. The field and colorspace are * accepted as-is, except for V4L2_FIELD_ANY that is turned into * V4L2_FIELD_NONE. */ bpp = ALIGN(fmtinfo->bpp, 8); format->width = clamp_t(unsigned int, format->width, CAL_MIN_WIDTH_BYTES * 8 / bpp, CAL_MAX_WIDTH_BYTES * 8 / bpp); format->height = clamp_t(unsigned int, format->height, CAL_MIN_HEIGHT_LINES, CAL_MAX_HEIGHT_LINES); format->pixelformat = fmtinfo->fourcc; if (format->field == V4L2_FIELD_ANY) format->field = V4L2_FIELD_NONE; /* * Calculate the number of bytes per line and the image size. The * hardware stores the stride as a number of 16 bytes words, in a * signed 15-bit value. Only 14 bits are thus usable. */ format->bytesperline = ALIGN(clamp(format->bytesperline, format->width * bpp / 8, ((1U << 14) - 1) * 16), 16); format->sizeimage = format->height * format->bytesperline; format->colorspace = ctx->v_fmt.fmt.pix.colorspace; if (info) *info = fmtinfo; ctx_dbg(3, ctx, "%s: %s %ux%u (bytesperline %u sizeimage %u)\n", __func__, fourcc_to_str(format->pixelformat), format->width, format->height, format->bytesperline, format->sizeimage); } static int cal_mc_try_fmt_vid_cap(struct file *file, void *priv, struct v4l2_format *f) { struct cal_ctx *ctx = video_drvdata(file); cal_mc_try_fmt(ctx, f, NULL); return 0; } static int cal_mc_s_fmt_vid_cap(struct file *file, void *priv, struct v4l2_format *f) { struct cal_ctx *ctx = video_drvdata(file); const struct cal_format_info *fmtinfo; if (vb2_is_busy(&ctx->vb_vidq)) { ctx_dbg(3, ctx, "%s device busy\n", __func__); return -EBUSY; } cal_mc_try_fmt(ctx, f, &fmtinfo); ctx->v_fmt = *f; ctx->fmtinfo = fmtinfo; return 0; } static int cal_mc_enum_framesizes(struct file *file, void *fh, struct v4l2_frmsizeenum *fsize) { struct cal_ctx *ctx = video_drvdata(file); const struct cal_format_info *fmtinfo; unsigned int bpp; if (fsize->index > 0) return -EINVAL; fmtinfo = cal_format_by_fourcc(fsize->pixel_format); if (!fmtinfo) { ctx_dbg(3, ctx, "Invalid pixel format 0x%08x\n", fsize->pixel_format); return -EINVAL; } bpp = ALIGN(fmtinfo->bpp, 8); fsize->type = V4L2_FRMSIZE_TYPE_STEPWISE; fsize->stepwise.min_width = CAL_MIN_WIDTH_BYTES * 8 / bpp; fsize->stepwise.max_width = CAL_MAX_WIDTH_BYTES * 8 / bpp; fsize->stepwise.step_width = 64 / bpp; fsize->stepwise.min_height = CAL_MIN_HEIGHT_LINES; fsize->stepwise.max_height = CAL_MAX_HEIGHT_LINES; fsize->stepwise.step_height = 1; return 0; } static const struct v4l2_ioctl_ops cal_ioctl_mc_ops = { .vidioc_querycap = cal_querycap, .vidioc_enum_fmt_vid_cap = cal_mc_enum_fmt_vid_cap, .vidioc_g_fmt_vid_cap = cal_g_fmt_vid_cap, .vidioc_try_fmt_vid_cap = cal_mc_try_fmt_vid_cap, .vidioc_s_fmt_vid_cap = cal_mc_s_fmt_vid_cap, .vidioc_enum_framesizes = cal_mc_enum_framesizes, .vidioc_reqbufs = vb2_ioctl_reqbufs, .vidioc_create_bufs = vb2_ioctl_create_bufs, .vidioc_prepare_buf = vb2_ioctl_prepare_buf, .vidioc_querybuf = vb2_ioctl_querybuf, .vidioc_qbuf = vb2_ioctl_qbuf, .vidioc_dqbuf = vb2_ioctl_dqbuf, .vidioc_expbuf = vb2_ioctl_expbuf, .vidioc_streamon = vb2_ioctl_streamon, .vidioc_streamoff = vb2_ioctl_streamoff, .vidioc_log_status = v4l2_ctrl_log_status, }; /* ------------------------------------------------------------------ * videobuf2 Common Operations * ------------------------------------------------------------------ */ static int cal_queue_setup(struct vb2_queue *vq, unsigned int *nbuffers, unsigned int *nplanes, unsigned int sizes[], struct device *alloc_devs[]) { struct cal_ctx *ctx = vb2_get_drv_priv(vq); unsigned int size = ctx->v_fmt.fmt.pix.sizeimage; if (vq->num_buffers + *nbuffers < 3) *nbuffers = 3 - vq->num_buffers; if (*nplanes) { if (sizes[0] < size) return -EINVAL; size = sizes[0]; } *nplanes = 1; sizes[0] = size; ctx_dbg(3, ctx, "nbuffers=%d, size=%d\n", *nbuffers, sizes[0]); return 0; } static int cal_buffer_prepare(struct vb2_buffer *vb) { struct cal_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue); struct cal_buffer *buf = container_of(vb, struct cal_buffer, vb.vb2_buf); unsigned long size; size = ctx->v_fmt.fmt.pix.sizeimage; if (vb2_plane_size(vb, 0) < size) { ctx_err(ctx, "data will not fit into plane (%lu < %lu)\n", vb2_plane_size(vb, 0), size); return -EINVAL; } vb2_set_plane_payload(&buf->vb.vb2_buf, 0, size); return 0; } static void cal_buffer_queue(struct vb2_buffer *vb) { struct cal_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue); struct cal_buffer *buf = container_of(vb, struct cal_buffer, vb.vb2_buf); unsigned long flags; /* recheck locking */ spin_lock_irqsave(&ctx->dma.lock, flags); list_add_tail(&buf->list, &ctx->dma.queue); spin_unlock_irqrestore(&ctx->dma.lock, flags); } static void cal_release_buffers(struct cal_ctx *ctx, enum vb2_buffer_state state) { struct cal_buffer *buf, *tmp; /* Release all queued buffers. */ spin_lock_irq(&ctx->dma.lock); list_for_each_entry_safe(buf, tmp, &ctx->dma.queue, list) { list_del(&buf->list); vb2_buffer_done(&buf->vb.vb2_buf, state); } if (ctx->dma.pending) { vb2_buffer_done(&ctx->dma.pending->vb.vb2_buf, state); ctx->dma.pending = NULL; } if (ctx->dma.active) { vb2_buffer_done(&ctx->dma.active->vb.vb2_buf, state); ctx->dma.active = NULL; } spin_unlock_irq(&ctx->dma.lock); } /* ------------------------------------------------------------------ * videobuf2 Operations * ------------------------------------------------------------------ */ static int cal_video_check_format(struct cal_ctx *ctx) { const struct v4l2_mbus_framefmt *format; struct v4l2_subdev_state *state; struct media_pad *remote_pad; int ret = 0; remote_pad = media_pad_remote_pad_first(&ctx->pad); if (!remote_pad) return -ENODEV; state = v4l2_subdev_lock_and_get_active_state(&ctx->phy->subdev); format = v4l2_subdev_get_pad_format(&ctx->phy->subdev, state, remote_pad->index); if (!format) { ret = -EINVAL; goto out; } if (ctx->fmtinfo->code != format->code || ctx->v_fmt.fmt.pix.height != format->height || ctx->v_fmt.fmt.pix.width != format->width || ctx->v_fmt.fmt.pix.field != format->field) { ret = -EPIPE; goto out; } out: v4l2_subdev_unlock_state(state); return ret; } static int cal_start_streaming(struct vb2_queue *vq, unsigned int count) { struct cal_ctx *ctx = vb2_get_drv_priv(vq); struct cal_buffer *buf; dma_addr_t addr; int ret; ret = video_device_pipeline_alloc_start(&ctx->vdev); if (ret < 0) { ctx_err(ctx, "Failed to start media pipeline: %d\n", ret); goto error_release_buffers; } /* * Verify that the currently configured format matches the output of * the connected CAMERARX. */ ret = cal_video_check_format(ctx); if (ret < 0) { ctx_dbg(3, ctx, "Format mismatch between CAMERARX and video node\n"); goto error_pipeline; } ret = cal_ctx_prepare(ctx); if (ret) { ctx_err(ctx, "Failed to prepare context: %d\n", ret); goto error_pipeline; } spin_lock_irq(&ctx->dma.lock); buf = list_first_entry(&ctx->dma.queue, struct cal_buffer, list); ctx->dma.active = buf; list_del(&buf->list); spin_unlock_irq(&ctx->dma.lock); addr = vb2_dma_contig_plane_dma_addr(&buf->vb.vb2_buf, 0); ret = pm_runtime_resume_and_get(ctx->cal->dev); if (ret < 0) goto error_pipeline; cal_ctx_set_dma_addr(ctx, addr); cal_ctx_start(ctx); ret = v4l2_subdev_call(&ctx->phy->subdev, video, s_stream, 1); if (ret) goto error_stop; if (cal_debug >= 4) cal_quickdump_regs(ctx->cal); return 0; error_stop: cal_ctx_stop(ctx); pm_runtime_put_sync(ctx->cal->dev); cal_ctx_unprepare(ctx); error_pipeline: video_device_pipeline_stop(&ctx->vdev); error_release_buffers: cal_release_buffers(ctx, VB2_BUF_STATE_QUEUED); return ret; } static void cal_stop_streaming(struct vb2_queue *vq) { struct cal_ctx *ctx = vb2_get_drv_priv(vq); cal_ctx_stop(ctx); v4l2_subdev_call(&ctx->phy->subdev, video, s_stream, 0); pm_runtime_put_sync(ctx->cal->dev); cal_ctx_unprepare(ctx); cal_release_buffers(ctx, VB2_BUF_STATE_ERROR); video_device_pipeline_stop(&ctx->vdev); } static const struct vb2_ops cal_video_qops = { .queue_setup = cal_queue_setup, .buf_prepare = cal_buffer_prepare, .buf_queue = cal_buffer_queue, .start_streaming = cal_start_streaming, .stop_streaming = cal_stop_streaming, .wait_prepare = vb2_ops_wait_prepare, .wait_finish = vb2_ops_wait_finish, }; /* ------------------------------------------------------------------ * V4L2 Initialization and Registration * ------------------------------------------------------------------ */ static const struct v4l2_file_operations cal_fops = { .owner = THIS_MODULE, .open = v4l2_fh_open, .release = vb2_fop_release, .poll = vb2_fop_poll, .unlocked_ioctl = video_ioctl2, /* V4L2 ioctl handler */ .mmap = vb2_fop_mmap, }; static int cal_ctx_v4l2_init_formats(struct cal_ctx *ctx) { struct v4l2_mbus_framefmt mbus_fmt; const struct cal_format_info *fmtinfo; unsigned int i, j, k; int ret = 0; /* Enumerate sub device formats and enable all matching local formats */ ctx->active_fmt = devm_kcalloc(ctx->cal->dev, cal_num_formats, sizeof(*ctx->active_fmt), GFP_KERNEL); if (!ctx->active_fmt) return -ENOMEM; ctx->num_active_fmt = 0; for (j = 0, i = 0; ; ++j) { struct v4l2_subdev_mbus_code_enum mbus_code = { .index = j, .which = V4L2_SUBDEV_FORMAT_ACTIVE, }; ret = v4l2_subdev_call(ctx->phy->source, pad, enum_mbus_code, NULL, &mbus_code); if (ret == -EINVAL) break; if (ret) { ctx_err(ctx, "Error enumerating mbus codes in subdev %s: %d\n", ctx->phy->source->name, ret); return ret; } ctx_dbg(2, ctx, "subdev %s: code: %04x idx: %u\n", ctx->phy->source->name, mbus_code.code, j); for (k = 0; k < cal_num_formats; k++) { fmtinfo = &cal_formats[k]; if (mbus_code.code == fmtinfo->code) { ctx->active_fmt[i] = fmtinfo; ctx_dbg(2, ctx, "matched fourcc: %s: code: %04x idx: %u\n", fourcc_to_str(fmtinfo->fourcc), fmtinfo->code, i); ctx->num_active_fmt = ++i; } } } if (i == 0) { ctx_err(ctx, "No suitable format reported by subdev %s\n", ctx->phy->source->name); return -EINVAL; } ret = __subdev_get_format(ctx, &mbus_fmt); if (ret) return ret; fmtinfo = find_format_by_code(ctx, mbus_fmt.code); if (!fmtinfo) { ctx_dbg(3, ctx, "mbus code format (0x%08x) not found.\n", mbus_fmt.code); return -EINVAL; } /* Save current format */ v4l2_fill_pix_format(&ctx->v_fmt.fmt.pix, &mbus_fmt); ctx->v_fmt.type = V4L2_BUF_TYPE_VIDEO_CAPTURE; ctx->v_fmt.fmt.pix.pixelformat = fmtinfo->fourcc; cal_calc_format_size(ctx, fmtinfo, &ctx->v_fmt); ctx->fmtinfo = fmtinfo; return 0; } static int cal_ctx_v4l2_init_mc_format(struct cal_ctx *ctx) { const struct cal_format_info *fmtinfo; struct v4l2_pix_format *pix_fmt = &ctx->v_fmt.fmt.pix; fmtinfo = cal_format_by_code(MEDIA_BUS_FMT_UYVY8_1X16); if (!fmtinfo) return -EINVAL; pix_fmt->width = 640; pix_fmt->height = 480; pix_fmt->field = V4L2_FIELD_NONE; pix_fmt->colorspace = V4L2_COLORSPACE_SRGB; pix_fmt->ycbcr_enc = V4L2_YCBCR_ENC_601; pix_fmt->quantization = V4L2_QUANTIZATION_LIM_RANGE; pix_fmt->xfer_func = V4L2_XFER_FUNC_SRGB; pix_fmt->pixelformat = fmtinfo->fourcc; ctx->v_fmt.type = V4L2_BUF_TYPE_VIDEO_CAPTURE; /* Save current format */ cal_calc_format_size(ctx, fmtinfo, &ctx->v_fmt); ctx->fmtinfo = fmtinfo; return 0; } int cal_ctx_v4l2_register(struct cal_ctx *ctx) { struct video_device *vfd = &ctx->vdev; int ret; if (!cal_mc_api) { struct v4l2_ctrl_handler *hdl = &ctx->ctrl_handler; ret = cal_ctx_v4l2_init_formats(ctx); if (ret) { ctx_err(ctx, "Failed to init formats: %d\n", ret); return ret; } ret = v4l2_ctrl_add_handler(hdl, ctx->phy->source->ctrl_handler, NULL, true); if (ret < 0) { ctx_err(ctx, "Failed to add source ctrl handler\n"); return ret; } } else { ret = cal_ctx_v4l2_init_mc_format(ctx); if (ret) { ctx_err(ctx, "Failed to init format: %d\n", ret); return ret; } } ret = video_register_device(vfd, VFL_TYPE_VIDEO, cal_video_nr); if (ret < 0) { ctx_err(ctx, "Failed to register video device\n"); return ret; } ret = media_create_pad_link(&ctx->phy->subdev.entity, CAL_CAMERARX_PAD_FIRST_SOURCE, &vfd->entity, 0, MEDIA_LNK_FL_IMMUTABLE | MEDIA_LNK_FL_ENABLED); if (ret) { ctx_err(ctx, "Failed to create media link for context %u\n", ctx->dma_ctx); video_unregister_device(vfd); return ret; } ctx_info(ctx, "V4L2 device registered as %s\n", video_device_node_name(vfd)); return 0; } void cal_ctx_v4l2_unregister(struct cal_ctx *ctx) { ctx_dbg(1, ctx, "unregistering %s\n", video_device_node_name(&ctx->vdev)); video_unregister_device(&ctx->vdev); } int cal_ctx_v4l2_init(struct cal_ctx *ctx) { struct video_device *vfd = &ctx->vdev; struct vb2_queue *q = &ctx->vb_vidq; int ret; INIT_LIST_HEAD(&ctx->dma.queue); spin_lock_init(&ctx->dma.lock); mutex_init(&ctx->mutex); init_waitqueue_head(&ctx->dma.wait); /* Initialize the vb2 queue. */ q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE; q->io_modes = VB2_MMAP | VB2_DMABUF; q->drv_priv = ctx; q->buf_struct_size = sizeof(struct cal_buffer); q->ops = &cal_video_qops; q->mem_ops = &vb2_dma_contig_memops; q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC; q->lock = &ctx->mutex; q->min_buffers_needed = 3; q->dev = ctx->cal->dev; ret = vb2_queue_init(q); if (ret) return ret; /* Initialize the video device and media entity. */ vfd->fops = &cal_fops; vfd->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING | (cal_mc_api ? V4L2_CAP_IO_MC : 0); vfd->v4l2_dev = &ctx->cal->v4l2_dev; vfd->queue = q; snprintf(vfd->name, sizeof(vfd->name), "CAL output %u", ctx->dma_ctx); vfd->release = video_device_release_empty; vfd->ioctl_ops = cal_mc_api ? &cal_ioctl_mc_ops : &cal_ioctl_legacy_ops; vfd->lock = &ctx->mutex; video_set_drvdata(vfd, ctx); ctx->pad.flags = MEDIA_PAD_FL_SINK; ret = media_entity_pads_init(&vfd->entity, 1, &ctx->pad); if (ret < 0) return ret; if (!cal_mc_api) { /* Initialize the control handler. */ struct v4l2_ctrl_handler *hdl = &ctx->ctrl_handler; ret = v4l2_ctrl_handler_init(hdl, 11); if (ret < 0) { ctx_err(ctx, "Failed to init ctrl handler\n"); goto error; } vfd->ctrl_handler = hdl; } return 0; error: media_entity_cleanup(&vfd->entity); return ret; } void cal_ctx_v4l2_cleanup(struct cal_ctx *ctx) { if (!cal_mc_api) v4l2_ctrl_handler_free(&ctx->ctrl_handler); media_entity_cleanup(&ctx->vdev.entity); }
linux-master
drivers/media/platform/ti/cal/cal-video.c
// SPDX-License-Identifier: GPL-2.0-only /* * TI Camera Access Layer (CAL) - CAMERARX * * Copyright (c) 2015-2020 Texas Instruments Inc. * * Authors: * Benoit Parrot <[email protected]> * Laurent Pinchart <[email protected]> */ #include <linux/clk.h> #include <linux/delay.h> #include <linux/mfd/syscon.h> #include <linux/module.h> #include <linux/of_graph.h> #include <linux/platform_device.h> #include <linux/regmap.h> #include <linux/slab.h> #include <media/v4l2-ctrls.h> #include <media/v4l2-fwnode.h> #include <media/v4l2-subdev.h> #include "cal.h" #include "cal_regs.h" /* ------------------------------------------------------------------ * I/O Register Accessors * ------------------------------------------------------------------ */ static inline u32 camerarx_read(struct cal_camerarx *phy, u32 offset) { return ioread32(phy->base + offset); } static inline void camerarx_write(struct cal_camerarx *phy, u32 offset, u32 val) { iowrite32(val, phy->base + offset); } /* ------------------------------------------------------------------ * CAMERARX Management * ------------------------------------------------------------------ */ static s64 cal_camerarx_get_ext_link_freq(struct cal_camerarx *phy) { struct v4l2_mbus_config_mipi_csi2 *mipi_csi2 = &phy->endpoint.bus.mipi_csi2; u32 num_lanes = mipi_csi2->num_data_lanes; const struct cal_format_info *fmtinfo; struct v4l2_subdev_state *state; struct v4l2_mbus_framefmt *fmt; u32 bpp; s64 freq; state = v4l2_subdev_get_locked_active_state(&phy->subdev); fmt = v4l2_subdev_get_pad_format(&phy->subdev, state, CAL_CAMERARX_PAD_SINK); fmtinfo = cal_format_by_code(fmt->code); if (!fmtinfo) return -EINVAL; bpp = fmtinfo->bpp; freq = v4l2_get_link_freq(phy->source->ctrl_handler, bpp, 2 * num_lanes); if (freq < 0) { phy_err(phy, "failed to get link freq for subdev '%s'\n", phy->source->name); return freq; } phy_dbg(3, phy, "Source Link Freq: %llu\n", freq); return freq; } static void cal_camerarx_lane_config(struct cal_camerarx *phy) { u32 val = cal_read(phy->cal, CAL_CSI2_COMPLEXIO_CFG(phy->instance)); u32 lane_mask = CAL_CSI2_COMPLEXIO_CFG_CLOCK_POSITION_MASK; u32 polarity_mask = CAL_CSI2_COMPLEXIO_CFG_CLOCK_POL_MASK; struct v4l2_mbus_config_mipi_csi2 *mipi_csi2 = &phy->endpoint.bus.mipi_csi2; int lane; cal_set_field(&val, mipi_csi2->clock_lane + 1, lane_mask); cal_set_field(&val, mipi_csi2->lane_polarities[0], polarity_mask); for (lane = 0; lane < mipi_csi2->num_data_lanes; lane++) { /* * Every lane are one nibble apart starting with the * clock followed by the data lanes so shift masks by 4. */ lane_mask <<= 4; polarity_mask <<= 4; cal_set_field(&val, mipi_csi2->data_lanes[lane] + 1, lane_mask); cal_set_field(&val, mipi_csi2->lane_polarities[lane + 1], polarity_mask); } cal_write(phy->cal, CAL_CSI2_COMPLEXIO_CFG(phy->instance), val); phy_dbg(3, phy, "CAL_CSI2_COMPLEXIO_CFG(%d) = 0x%08x\n", phy->instance, val); } static void cal_camerarx_enable(struct cal_camerarx *phy) { u32 num_lanes = phy->cal->data->camerarx[phy->instance].num_lanes; regmap_field_write(phy->fields[F_CAMMODE], 0); /* Always enable all lanes at the phy control level */ regmap_field_write(phy->fields[F_LANEENABLE], (1 << num_lanes) - 1); /* F_CSI_MODE is not present on every architecture */ if (phy->fields[F_CSI_MODE]) regmap_field_write(phy->fields[F_CSI_MODE], 1); regmap_field_write(phy->fields[F_CTRLCLKEN], 1); } void cal_camerarx_disable(struct cal_camerarx *phy) { regmap_field_write(phy->fields[F_CTRLCLKEN], 0); } /* * TCLK values are OK at their reset values */ #define TCLK_TERM 0 #define TCLK_MISS 1 #define TCLK_SETTLE 14 static void cal_camerarx_config(struct cal_camerarx *phy, s64 link_freq) { unsigned int reg0, reg1; unsigned int ths_term, ths_settle; /* DPHY timing configuration */ /* THS_TERM: Programmed value = floor(20 ns/DDRClk period) */ ths_term = div_s64(20 * link_freq, 1000 * 1000 * 1000); phy_dbg(1, phy, "ths_term: %d (0x%02x)\n", ths_term, ths_term); /* THS_SETTLE: Programmed value = floor(105 ns/DDRClk period) + 4 */ ths_settle = div_s64(105 * link_freq, 1000 * 1000 * 1000) + 4; phy_dbg(1, phy, "ths_settle: %d (0x%02x)\n", ths_settle, ths_settle); reg0 = camerarx_read(phy, CAL_CSI2_PHY_REG0); cal_set_field(&reg0, CAL_CSI2_PHY_REG0_HSCLOCKCONFIG_DISABLE, CAL_CSI2_PHY_REG0_HSCLOCKCONFIG_MASK); cal_set_field(&reg0, ths_term, CAL_CSI2_PHY_REG0_THS_TERM_MASK); cal_set_field(&reg0, ths_settle, CAL_CSI2_PHY_REG0_THS_SETTLE_MASK); phy_dbg(1, phy, "CSI2_%d_REG0 = 0x%08x\n", phy->instance, reg0); camerarx_write(phy, CAL_CSI2_PHY_REG0, reg0); reg1 = camerarx_read(phy, CAL_CSI2_PHY_REG1); cal_set_field(&reg1, TCLK_TERM, CAL_CSI2_PHY_REG1_TCLK_TERM_MASK); cal_set_field(&reg1, 0xb8, CAL_CSI2_PHY_REG1_DPHY_HS_SYNC_PATTERN_MASK); cal_set_field(&reg1, TCLK_MISS, CAL_CSI2_PHY_REG1_CTRLCLK_DIV_FACTOR_MASK); cal_set_field(&reg1, TCLK_SETTLE, CAL_CSI2_PHY_REG1_TCLK_SETTLE_MASK); phy_dbg(1, phy, "CSI2_%d_REG1 = 0x%08x\n", phy->instance, reg1); camerarx_write(phy, CAL_CSI2_PHY_REG1, reg1); } static void cal_camerarx_power(struct cal_camerarx *phy, bool enable) { u32 target_state; unsigned int i; target_state = enable ? CAL_CSI2_COMPLEXIO_CFG_PWR_CMD_STATE_ON : CAL_CSI2_COMPLEXIO_CFG_PWR_CMD_STATE_OFF; cal_write_field(phy->cal, CAL_CSI2_COMPLEXIO_CFG(phy->instance), target_state, CAL_CSI2_COMPLEXIO_CFG_PWR_CMD_MASK); for (i = 0; i < 10; i++) { u32 current_state; current_state = cal_read_field(phy->cal, CAL_CSI2_COMPLEXIO_CFG(phy->instance), CAL_CSI2_COMPLEXIO_CFG_PWR_STATUS_MASK); if (current_state == target_state) break; usleep_range(1000, 1100); } if (i == 10) phy_err(phy, "Failed to power %s complexio\n", enable ? "up" : "down"); } static void cal_camerarx_wait_reset(struct cal_camerarx *phy) { unsigned long timeout; timeout = jiffies + msecs_to_jiffies(750); while (time_before(jiffies, timeout)) { if (cal_read_field(phy->cal, CAL_CSI2_COMPLEXIO_CFG(phy->instance), CAL_CSI2_COMPLEXIO_CFG_RESET_DONE_MASK) == CAL_CSI2_COMPLEXIO_CFG_RESET_DONE_RESETCOMPLETED) break; usleep_range(500, 5000); } if (cal_read_field(phy->cal, CAL_CSI2_COMPLEXIO_CFG(phy->instance), CAL_CSI2_COMPLEXIO_CFG_RESET_DONE_MASK) != CAL_CSI2_COMPLEXIO_CFG_RESET_DONE_RESETCOMPLETED) phy_err(phy, "Timeout waiting for Complex IO reset done\n"); } static void cal_camerarx_wait_stop_state(struct cal_camerarx *phy) { unsigned long timeout; timeout = jiffies + msecs_to_jiffies(750); while (time_before(jiffies, timeout)) { if (cal_read_field(phy->cal, CAL_CSI2_TIMING(phy->instance), CAL_CSI2_TIMING_FORCE_RX_MODE_IO1_MASK) == 0) break; usleep_range(500, 5000); } if (cal_read_field(phy->cal, CAL_CSI2_TIMING(phy->instance), CAL_CSI2_TIMING_FORCE_RX_MODE_IO1_MASK) != 0) phy_err(phy, "Timeout waiting for stop state\n"); } static void cal_camerarx_enable_irqs(struct cal_camerarx *phy) { const u32 cio_err_mask = CAL_CSI2_COMPLEXIO_IRQ_LANE_ERRORS_MASK | CAL_CSI2_COMPLEXIO_IRQ_FIFO_OVR_MASK | CAL_CSI2_COMPLEXIO_IRQ_SHORT_PACKET_MASK | CAL_CSI2_COMPLEXIO_IRQ_ECC_NO_CORRECTION_MASK; const u32 vc_err_mask = CAL_CSI2_VC_IRQ_CS_IRQ_MASK(0) | CAL_CSI2_VC_IRQ_CS_IRQ_MASK(1) | CAL_CSI2_VC_IRQ_CS_IRQ_MASK(2) | CAL_CSI2_VC_IRQ_CS_IRQ_MASK(3) | CAL_CSI2_VC_IRQ_ECC_CORRECTION_IRQ_MASK(0) | CAL_CSI2_VC_IRQ_ECC_CORRECTION_IRQ_MASK(1) | CAL_CSI2_VC_IRQ_ECC_CORRECTION_IRQ_MASK(2) | CAL_CSI2_VC_IRQ_ECC_CORRECTION_IRQ_MASK(3); /* Enable CIO & VC error IRQs. */ cal_write(phy->cal, CAL_HL_IRQENABLE_SET(0), CAL_HL_IRQ_CIO_MASK(phy->instance) | CAL_HL_IRQ_VC_MASK(phy->instance)); cal_write(phy->cal, CAL_CSI2_COMPLEXIO_IRQENABLE(phy->instance), cio_err_mask); cal_write(phy->cal, CAL_CSI2_VC_IRQENABLE(phy->instance), vc_err_mask); } static void cal_camerarx_disable_irqs(struct cal_camerarx *phy) { /* Disable CIO error irqs */ cal_write(phy->cal, CAL_HL_IRQENABLE_CLR(0), CAL_HL_IRQ_CIO_MASK(phy->instance) | CAL_HL_IRQ_VC_MASK(phy->instance)); cal_write(phy->cal, CAL_CSI2_COMPLEXIO_IRQENABLE(phy->instance), 0); cal_write(phy->cal, CAL_CSI2_VC_IRQENABLE(phy->instance), 0); } static void cal_camerarx_ppi_enable(struct cal_camerarx *phy) { cal_write_field(phy->cal, CAL_CSI2_PPI_CTRL(phy->instance), 1, CAL_CSI2_PPI_CTRL_ECC_EN_MASK); cal_write_field(phy->cal, CAL_CSI2_PPI_CTRL(phy->instance), 1, CAL_CSI2_PPI_CTRL_IF_EN_MASK); } static void cal_camerarx_ppi_disable(struct cal_camerarx *phy) { cal_write_field(phy->cal, CAL_CSI2_PPI_CTRL(phy->instance), 0, CAL_CSI2_PPI_CTRL_IF_EN_MASK); } static int cal_camerarx_start(struct cal_camerarx *phy) { s64 link_freq; u32 sscounter; u32 val; int ret; if (phy->enable_count > 0) { phy->enable_count++; return 0; } link_freq = cal_camerarx_get_ext_link_freq(phy); if (link_freq < 0) return link_freq; ret = v4l2_subdev_call(phy->source, core, s_power, 1); if (ret < 0 && ret != -ENOIOCTLCMD && ret != -ENODEV) { phy_err(phy, "power on failed in subdev\n"); return ret; } cal_camerarx_enable_irqs(phy); /* * CSI-2 PHY Link Initialization Sequence, according to the DRA74xP / * DRA75xP / DRA76xP / DRA77xP TRM. The DRA71x / DRA72x and the AM65x / * DRA80xM TRMs have a slightly simplified sequence. */ /* * 1. Configure all CSI-2 low level protocol registers to be ready to * receive signals/data from the CSI-2 PHY. * * i.-v. Configure the lanes position and polarity. */ cal_camerarx_lane_config(phy); /* * vi.-vii. Configure D-PHY mode, enable the required lanes and * enable the CAMERARX clock. */ cal_camerarx_enable(phy); /* * 2. CSI PHY and link initialization sequence. * * a. Deassert the CSI-2 PHY reset. Do not wait for reset completion * at this point, as it requires the external source to send the * CSI-2 HS clock. */ cal_write_field(phy->cal, CAL_CSI2_COMPLEXIO_CFG(phy->instance), CAL_CSI2_COMPLEXIO_CFG_RESET_CTRL_OPERATIONAL, CAL_CSI2_COMPLEXIO_CFG_RESET_CTRL_MASK); phy_dbg(3, phy, "CAL_CSI2_COMPLEXIO_CFG(%d) = 0x%08x De-assert Complex IO Reset\n", phy->instance, cal_read(phy->cal, CAL_CSI2_COMPLEXIO_CFG(phy->instance))); /* Dummy read to allow SCP reset to complete. */ camerarx_read(phy, CAL_CSI2_PHY_REG0); /* Program the PHY timing parameters. */ cal_camerarx_config(phy, link_freq); /* * b. Assert the FORCERXMODE signal. * * The stop-state-counter is based on fclk cycles, and we always use * the x16 and x4 settings, so stop-state-timeout = * fclk-cycle * 16 * 4 * counter. * * Stop-state-timeout must be more than 100us as per CSI-2 spec, so we * calculate a timeout that's 100us (rounding up). */ sscounter = DIV_ROUND_UP(clk_get_rate(phy->cal->fclk), 10000 * 16 * 4); val = cal_read(phy->cal, CAL_CSI2_TIMING(phy->instance)); cal_set_field(&val, 1, CAL_CSI2_TIMING_STOP_STATE_X16_IO1_MASK); cal_set_field(&val, 1, CAL_CSI2_TIMING_STOP_STATE_X4_IO1_MASK); cal_set_field(&val, sscounter, CAL_CSI2_TIMING_STOP_STATE_COUNTER_IO1_MASK); cal_write(phy->cal, CAL_CSI2_TIMING(phy->instance), val); phy_dbg(3, phy, "CAL_CSI2_TIMING(%d) = 0x%08x Stop States\n", phy->instance, cal_read(phy->cal, CAL_CSI2_TIMING(phy->instance))); /* Assert the FORCERXMODE signal. */ cal_write_field(phy->cal, CAL_CSI2_TIMING(phy->instance), 1, CAL_CSI2_TIMING_FORCE_RX_MODE_IO1_MASK); phy_dbg(3, phy, "CAL_CSI2_TIMING(%d) = 0x%08x Force RXMODE\n", phy->instance, cal_read(phy->cal, CAL_CSI2_TIMING(phy->instance))); /* * c. Connect pull-down on CSI-2 PHY link (using pad control). * * This is not required on DRA71x, DRA72x, AM65x and DRA80xM. Not * implemented. */ /* * d. Power up the CSI-2 PHY. * e. Check whether the state status reaches the ON state. */ cal_camerarx_power(phy, true); /* * Start the source to enable the CSI-2 HS clock. We can now wait for * CSI-2 PHY reset to complete. */ ret = v4l2_subdev_call(phy->source, video, s_stream, 1); if (ret) { v4l2_subdev_call(phy->source, core, s_power, 0); cal_camerarx_disable_irqs(phy); phy_err(phy, "stream on failed in subdev\n"); return ret; } cal_camerarx_wait_reset(phy); /* f. Wait for STOPSTATE=1 for all enabled lane modules. */ cal_camerarx_wait_stop_state(phy); phy_dbg(1, phy, "CSI2_%u_REG1 = 0x%08x (bits 31-28 should be set)\n", phy->instance, camerarx_read(phy, CAL_CSI2_PHY_REG1)); /* * g. Disable pull-down on CSI-2 PHY link (using pad control). * * This is not required on DRA71x, DRA72x, AM65x and DRA80xM. Not * implemented. */ /* Finally, enable the PHY Protocol Interface (PPI). */ cal_camerarx_ppi_enable(phy); phy->enable_count++; return 0; } static void cal_camerarx_stop(struct cal_camerarx *phy) { int ret; if (--phy->enable_count > 0) return; cal_camerarx_ppi_disable(phy); cal_camerarx_disable_irqs(phy); cal_camerarx_power(phy, false); /* Assert Complex IO Reset */ cal_write_field(phy->cal, CAL_CSI2_COMPLEXIO_CFG(phy->instance), CAL_CSI2_COMPLEXIO_CFG_RESET_CTRL, CAL_CSI2_COMPLEXIO_CFG_RESET_CTRL_MASK); phy_dbg(3, phy, "CAL_CSI2_COMPLEXIO_CFG(%d) = 0x%08x Complex IO in Reset\n", phy->instance, cal_read(phy->cal, CAL_CSI2_COMPLEXIO_CFG(phy->instance))); /* Disable the phy */ cal_camerarx_disable(phy); if (v4l2_subdev_call(phy->source, video, s_stream, 0)) phy_err(phy, "stream off failed in subdev\n"); ret = v4l2_subdev_call(phy->source, core, s_power, 0); if (ret < 0 && ret != -ENOIOCTLCMD && ret != -ENODEV) phy_err(phy, "power off failed in subdev\n"); } /* * Errata i913: CSI2 LDO Needs to be disabled when module is powered on * * Enabling CSI2 LDO shorts it to core supply. It is crucial the 2 CSI2 * LDOs on the device are disabled if CSI-2 module is powered on * (0x4845 B304 | 0x4845 B384 [28:27] = 0x1) or in ULPS (0x4845 B304 * | 0x4845 B384 [28:27] = 0x2) mode. Common concerns include: high * current draw on the module supply in active mode. * * Errata does not apply when CSI-2 module is powered off * (0x4845 B304 | 0x4845 B384 [28:27] = 0x0). * * SW Workaround: * Set the following register bits to disable the LDO, * which is essentially CSI2 REG10 bit 6: * * Core 0: 0x4845 B828 = 0x0000 0040 * Core 1: 0x4845 B928 = 0x0000 0040 */ void cal_camerarx_i913_errata(struct cal_camerarx *phy) { u32 reg10 = camerarx_read(phy, CAL_CSI2_PHY_REG10); cal_set_field(&reg10, 1, CAL_CSI2_PHY_REG10_I933_LDO_DISABLE_MASK); phy_dbg(1, phy, "CSI2_%d_REG10 = 0x%08x\n", phy->instance, reg10); camerarx_write(phy, CAL_CSI2_PHY_REG10, reg10); } static int cal_camerarx_regmap_init(struct cal_dev *cal, struct cal_camerarx *phy) { const struct cal_camerarx_data *phy_data; unsigned int i; if (!cal->data) return -EINVAL; phy_data = &cal->data->camerarx[phy->instance]; for (i = 0; i < F_MAX_FIELDS; i++) { struct reg_field field = { .reg = cal->syscon_camerrx_offset, .lsb = phy_data->fields[i].lsb, .msb = phy_data->fields[i].msb, }; /* * Here we update the reg offset with the * value found in DT */ phy->fields[i] = devm_regmap_field_alloc(cal->dev, cal->syscon_camerrx, field); if (IS_ERR(phy->fields[i])) { cal_err(cal, "Unable to allocate regmap fields\n"); return PTR_ERR(phy->fields[i]); } } return 0; } static int cal_camerarx_parse_dt(struct cal_camerarx *phy) { struct v4l2_fwnode_endpoint *endpoint = &phy->endpoint; char data_lanes[V4L2_MBUS_CSI2_MAX_DATA_LANES * 2]; struct device_node *ep_node; unsigned int i; int ret; /* * Find the endpoint node for the port corresponding to the PHY * instance, and parse its CSI-2-related properties. */ ep_node = of_graph_get_endpoint_by_regs(phy->cal->dev->of_node, phy->instance, 0); if (!ep_node) { /* * The endpoint is not mandatory, not all PHY instances need to * be connected in DT. */ phy_dbg(3, phy, "Port has no endpoint\n"); return 0; } endpoint->bus_type = V4L2_MBUS_CSI2_DPHY; ret = v4l2_fwnode_endpoint_parse(of_fwnode_handle(ep_node), endpoint); if (ret < 0) { phy_err(phy, "Failed to parse endpoint\n"); goto done; } for (i = 0; i < endpoint->bus.mipi_csi2.num_data_lanes; i++) { unsigned int lane = endpoint->bus.mipi_csi2.data_lanes[i]; if (lane > 4) { phy_err(phy, "Invalid position %u for data lane %u\n", lane, i); ret = -EINVAL; goto done; } data_lanes[i*2] = '0' + lane; data_lanes[i*2+1] = ' '; } data_lanes[i*2-1] = '\0'; phy_dbg(3, phy, "CSI-2 bus: clock lane <%u>, data lanes <%s>, flags 0x%08x\n", endpoint->bus.mipi_csi2.clock_lane, data_lanes, endpoint->bus.mipi_csi2.flags); /* Retrieve the connected device and store it for later use. */ phy->source_ep_node = of_graph_get_remote_endpoint(ep_node); phy->source_node = of_graph_get_port_parent(phy->source_ep_node); if (!phy->source_node) { phy_dbg(3, phy, "Can't get remote parent\n"); of_node_put(phy->source_ep_node); ret = -EINVAL; goto done; } phy_dbg(1, phy, "Found connected device %pOFn\n", phy->source_node); done: of_node_put(ep_node); return ret; } /* ------------------------------------------------------------------ * V4L2 Subdev Operations * ------------------------------------------------------------------ */ static inline struct cal_camerarx *to_cal_camerarx(struct v4l2_subdev *sd) { return container_of(sd, struct cal_camerarx, subdev); } static int cal_camerarx_sd_s_stream(struct v4l2_subdev *sd, int enable) { struct cal_camerarx *phy = to_cal_camerarx(sd); struct v4l2_subdev_state *state; int ret = 0; state = v4l2_subdev_lock_and_get_active_state(sd); if (enable) ret = cal_camerarx_start(phy); else cal_camerarx_stop(phy); v4l2_subdev_unlock_state(state); return ret; } static int cal_camerarx_sd_enum_mbus_code(struct v4l2_subdev *sd, struct v4l2_subdev_state *state, struct v4l2_subdev_mbus_code_enum *code) { struct cal_camerarx *phy = to_cal_camerarx(sd); /* No transcoding, source and sink codes must match. */ if (cal_rx_pad_is_source(code->pad)) { struct v4l2_mbus_framefmt *fmt; if (code->index > 0) return -EINVAL; fmt = v4l2_subdev_get_pad_format(&phy->subdev, state, CAL_CAMERARX_PAD_SINK); code->code = fmt->code; } else { if (code->index >= cal_num_formats) return -EINVAL; code->code = cal_formats[code->index].code; } return 0; } static int cal_camerarx_sd_enum_frame_size(struct v4l2_subdev *sd, struct v4l2_subdev_state *state, struct v4l2_subdev_frame_size_enum *fse) { const struct cal_format_info *fmtinfo; if (fse->index > 0) return -EINVAL; /* No transcoding, source and sink formats must match. */ if (cal_rx_pad_is_source(fse->pad)) { struct v4l2_mbus_framefmt *fmt; fmt = v4l2_subdev_get_pad_format(sd, state, CAL_CAMERARX_PAD_SINK); if (fse->code != fmt->code) return -EINVAL; fse->min_width = fmt->width; fse->max_width = fmt->width; fse->min_height = fmt->height; fse->max_height = fmt->height; } else { fmtinfo = cal_format_by_code(fse->code); if (!fmtinfo) return -EINVAL; fse->min_width = CAL_MIN_WIDTH_BYTES * 8 / ALIGN(fmtinfo->bpp, 8); fse->max_width = CAL_MAX_WIDTH_BYTES * 8 / ALIGN(fmtinfo->bpp, 8); fse->min_height = CAL_MIN_HEIGHT_LINES; fse->max_height = CAL_MAX_HEIGHT_LINES; } return 0; } static int cal_camerarx_sd_set_fmt(struct v4l2_subdev *sd, struct v4l2_subdev_state *state, struct v4l2_subdev_format *format) { const struct cal_format_info *fmtinfo; struct v4l2_mbus_framefmt *fmt; unsigned int bpp; /* No transcoding, source and sink formats must match. */ if (cal_rx_pad_is_source(format->pad)) return v4l2_subdev_get_fmt(sd, state, format); /* * Default to the first format if the requested media bus code isn't * supported. */ fmtinfo = cal_format_by_code(format->format.code); if (!fmtinfo) fmtinfo = &cal_formats[0]; /* Clamp the size, update the code. The colorspace is accepted as-is. */ bpp = ALIGN(fmtinfo->bpp, 8); format->format.width = clamp_t(unsigned int, format->format.width, CAL_MIN_WIDTH_BYTES * 8 / bpp, CAL_MAX_WIDTH_BYTES * 8 / bpp); format->format.height = clamp_t(unsigned int, format->format.height, CAL_MIN_HEIGHT_LINES, CAL_MAX_HEIGHT_LINES); format->format.code = fmtinfo->code; format->format.field = V4L2_FIELD_NONE; /* Store the format and propagate it to the source pad. */ fmt = v4l2_subdev_get_pad_format(sd, state, CAL_CAMERARX_PAD_SINK); *fmt = format->format; fmt = v4l2_subdev_get_pad_format(sd, state, CAL_CAMERARX_PAD_FIRST_SOURCE); *fmt = format->format; return 0; } static int cal_camerarx_sd_init_cfg(struct v4l2_subdev *sd, struct v4l2_subdev_state *state) { struct v4l2_subdev_format format = { .which = state ? V4L2_SUBDEV_FORMAT_TRY : V4L2_SUBDEV_FORMAT_ACTIVE, .pad = CAL_CAMERARX_PAD_SINK, .format = { .width = 640, .height = 480, .code = MEDIA_BUS_FMT_UYVY8_1X16, .field = V4L2_FIELD_NONE, .colorspace = V4L2_COLORSPACE_SRGB, .ycbcr_enc = V4L2_YCBCR_ENC_601, .quantization = V4L2_QUANTIZATION_LIM_RANGE, .xfer_func = V4L2_XFER_FUNC_SRGB, }, }; return cal_camerarx_sd_set_fmt(sd, state, &format); } static int cal_camerarx_get_frame_desc(struct v4l2_subdev *sd, unsigned int pad, struct v4l2_mbus_frame_desc *fd) { struct cal_camerarx *phy = to_cal_camerarx(sd); struct v4l2_mbus_frame_desc remote_desc; const struct media_pad *remote_pad; int ret; remote_pad = media_pad_remote_pad_first(&phy->pads[CAL_CAMERARX_PAD_SINK]); if (!remote_pad) return -EPIPE; ret = v4l2_subdev_call(phy->source, pad, get_frame_desc, remote_pad->index, &remote_desc); if (ret) return ret; if (remote_desc.type != V4L2_MBUS_FRAME_DESC_TYPE_CSI2) { cal_err(phy->cal, "Frame descriptor does not describe CSI-2 link"); return -EINVAL; } if (remote_desc.num_entries > 1) cal_err(phy->cal, "Multiple streams not supported in remote frame descriptor, using the first one\n"); fd->type = V4L2_MBUS_FRAME_DESC_TYPE_CSI2; fd->num_entries = 1; fd->entry[0] = remote_desc.entry[0]; return 0; } static const struct v4l2_subdev_video_ops cal_camerarx_video_ops = { .s_stream = cal_camerarx_sd_s_stream, }; static const struct v4l2_subdev_pad_ops cal_camerarx_pad_ops = { .init_cfg = cal_camerarx_sd_init_cfg, .enum_mbus_code = cal_camerarx_sd_enum_mbus_code, .enum_frame_size = cal_camerarx_sd_enum_frame_size, .get_fmt = v4l2_subdev_get_fmt, .set_fmt = cal_camerarx_sd_set_fmt, .get_frame_desc = cal_camerarx_get_frame_desc, }; static const struct v4l2_subdev_ops cal_camerarx_subdev_ops = { .video = &cal_camerarx_video_ops, .pad = &cal_camerarx_pad_ops, }; static struct media_entity_operations cal_camerarx_media_ops = { .link_validate = v4l2_subdev_link_validate, }; /* ------------------------------------------------------------------ * Create and Destroy * ------------------------------------------------------------------ */ struct cal_camerarx *cal_camerarx_create(struct cal_dev *cal, unsigned int instance) { struct platform_device *pdev = to_platform_device(cal->dev); struct cal_camerarx *phy; struct v4l2_subdev *sd; unsigned int i; int ret; phy = devm_kzalloc(cal->dev, sizeof(*phy), GFP_KERNEL); if (!phy) return ERR_PTR(-ENOMEM); phy->cal = cal; phy->instance = instance; spin_lock_init(&phy->vc_lock); phy->res = platform_get_resource_byname(pdev, IORESOURCE_MEM, (instance == 0) ? "cal_rx_core0" : "cal_rx_core1"); phy->base = devm_ioremap_resource(cal->dev, phy->res); if (IS_ERR(phy->base)) { cal_err(cal, "failed to ioremap\n"); return ERR_CAST(phy->base); } cal_dbg(1, cal, "ioresource %s at %pa - %pa\n", phy->res->name, &phy->res->start, &phy->res->end); ret = cal_camerarx_regmap_init(cal, phy); if (ret) return ERR_PTR(ret); ret = cal_camerarx_parse_dt(phy); if (ret) return ERR_PTR(ret); /* Initialize the V4L2 subdev and media entity. */ sd = &phy->subdev; v4l2_subdev_init(sd, &cal_camerarx_subdev_ops); sd->entity.function = MEDIA_ENT_F_VID_IF_BRIDGE; sd->flags = V4L2_SUBDEV_FL_HAS_DEVNODE; snprintf(sd->name, sizeof(sd->name), "CAMERARX%u", instance); sd->dev = cal->dev; phy->pads[CAL_CAMERARX_PAD_SINK].flags = MEDIA_PAD_FL_SINK; for (i = CAL_CAMERARX_PAD_FIRST_SOURCE; i < CAL_CAMERARX_NUM_PADS; ++i) phy->pads[i].flags = MEDIA_PAD_FL_SOURCE; sd->entity.ops = &cal_camerarx_media_ops; ret = media_entity_pads_init(&sd->entity, ARRAY_SIZE(phy->pads), phy->pads); if (ret) goto err_node_put; ret = v4l2_subdev_init_finalize(sd); if (ret) goto err_entity_cleanup; ret = v4l2_device_register_subdev(&cal->v4l2_dev, sd); if (ret) goto err_free_state; return phy; err_free_state: v4l2_subdev_cleanup(sd); err_entity_cleanup: media_entity_cleanup(&phy->subdev.entity); err_node_put: of_node_put(phy->source_ep_node); of_node_put(phy->source_node); return ERR_PTR(ret); } void cal_camerarx_destroy(struct cal_camerarx *phy) { if (!phy) return; v4l2_device_unregister_subdev(&phy->subdev); v4l2_subdev_cleanup(&phy->subdev); media_entity_cleanup(&phy->subdev.entity); of_node_put(phy->source_ep_node); of_node_put(phy->source_node); }
linux-master
drivers/media/platform/ti/cal/cal-camerarx.c
// SPDX-License-Identifier: GPL-2.0-only /* * TI VPFE capture Driver * * Copyright (C) 2013 - 2014 Texas Instruments, Inc. * * Benoit Parrot <[email protected]> * Lad, Prabhakar <[email protected]> */ #include <linux/delay.h> #include <linux/err.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/io.h> #include <linux/module.h> #include <linux/of_graph.h> #include <linux/pinctrl/consumer.h> #include <linux/platform_device.h> #include <linux/pm_runtime.h> #include <linux/slab.h> #include <linux/uaccess.h> #include <linux/videodev2.h> #include <media/v4l2-common.h> #include <media/v4l2-ctrls.h> #include <media/v4l2-event.h> #include <media/v4l2-fwnode.h> #include <media/v4l2-rect.h> #include "am437x-vpfe.h" #define VPFE_MODULE_NAME "vpfe" #define VPFE_VERSION "0.1.0" static int debug; module_param(debug, int, 0644); MODULE_PARM_DESC(debug, "Debug level 0-8"); #define vpfe_dbg(level, dev, fmt, arg...) \ v4l2_dbg(level, debug, &dev->v4l2_dev, fmt, ##arg) #define vpfe_info(dev, fmt, arg...) \ v4l2_info(&dev->v4l2_dev, fmt, ##arg) #define vpfe_err(dev, fmt, arg...) \ v4l2_err(&dev->v4l2_dev, fmt, ##arg) /* standard information */ struct vpfe_standard { v4l2_std_id std_id; unsigned int width; unsigned int height; struct v4l2_fract pixelaspect; int frame_format; }; static const struct vpfe_standard vpfe_standards[] = { {V4L2_STD_525_60, 720, 480, {11, 10}, 1}, {V4L2_STD_625_50, 720, 576, {54, 59}, 1}, }; static struct vpfe_fmt formats[VPFE_NUM_FORMATS] = { { .fourcc = V4L2_PIX_FMT_YUYV, .code = MEDIA_BUS_FMT_YUYV8_2X8, .bitsperpixel = 16, }, { .fourcc = V4L2_PIX_FMT_UYVY, .code = MEDIA_BUS_FMT_UYVY8_2X8, .bitsperpixel = 16, }, { .fourcc = V4L2_PIX_FMT_YVYU, .code = MEDIA_BUS_FMT_YVYU8_2X8, .bitsperpixel = 16, }, { .fourcc = V4L2_PIX_FMT_VYUY, .code = MEDIA_BUS_FMT_VYUY8_2X8, .bitsperpixel = 16, }, { .fourcc = V4L2_PIX_FMT_SBGGR8, .code = MEDIA_BUS_FMT_SBGGR8_1X8, .bitsperpixel = 8, }, { .fourcc = V4L2_PIX_FMT_SGBRG8, .code = MEDIA_BUS_FMT_SGBRG8_1X8, .bitsperpixel = 8, }, { .fourcc = V4L2_PIX_FMT_SGRBG8, .code = MEDIA_BUS_FMT_SGRBG8_1X8, .bitsperpixel = 8, }, { .fourcc = V4L2_PIX_FMT_SRGGB8, .code = MEDIA_BUS_FMT_SRGGB8_1X8, .bitsperpixel = 8, }, { .fourcc = V4L2_PIX_FMT_RGB565, .code = MEDIA_BUS_FMT_RGB565_2X8_LE, .bitsperpixel = 16, }, { .fourcc = V4L2_PIX_FMT_RGB565X, .code = MEDIA_BUS_FMT_RGB565_2X8_BE, .bitsperpixel = 16, }, }; static int __subdev_get_format(struct vpfe_device *vpfe, struct v4l2_mbus_framefmt *fmt); static int vpfe_calc_format_size(struct vpfe_device *vpfe, const struct vpfe_fmt *fmt, struct v4l2_format *f); static struct vpfe_fmt *find_format_by_code(struct vpfe_device *vpfe, unsigned int code) { struct vpfe_fmt *fmt; unsigned int k; for (k = 0; k < vpfe->num_active_fmt; k++) { fmt = vpfe->active_fmt[k]; if (fmt->code == code) return fmt; } return NULL; } static struct vpfe_fmt *find_format_by_pix(struct vpfe_device *vpfe, unsigned int pixelformat) { struct vpfe_fmt *fmt; unsigned int k; for (k = 0; k < vpfe->num_active_fmt; k++) { fmt = vpfe->active_fmt[k]; if (fmt->fourcc == pixelformat) return fmt; } return NULL; } static unsigned int __get_bytesperpixel(struct vpfe_device *vpfe, const struct vpfe_fmt *fmt) { struct vpfe_subdev_info *sdinfo = vpfe->current_subdev; unsigned int bus_width = sdinfo->vpfe_param.bus_width; u32 bpp, bus_width_bytes, clocksperpixel; bus_width_bytes = ALIGN(bus_width, 8) >> 3; clocksperpixel = DIV_ROUND_UP(fmt->bitsperpixel, bus_width); bpp = clocksperpixel * bus_width_bytes; return bpp; } /* Print Four-character-code (FOURCC) */ static char *print_fourcc(u32 fmt) { static char code[5]; code[0] = (unsigned char)(fmt & 0xff); code[1] = (unsigned char)((fmt >> 8) & 0xff); code[2] = (unsigned char)((fmt >> 16) & 0xff); code[3] = (unsigned char)((fmt >> 24) & 0xff); code[4] = '\0'; return code; } static inline u32 vpfe_reg_read(struct vpfe_ccdc *ccdc, u32 offset) { return ioread32(ccdc->ccdc_cfg.base_addr + offset); } static inline void vpfe_reg_write(struct vpfe_ccdc *ccdc, u32 val, u32 offset) { iowrite32(val, ccdc->ccdc_cfg.base_addr + offset); } static inline struct vpfe_device *to_vpfe(struct vpfe_ccdc *ccdc) { return container_of(ccdc, struct vpfe_device, ccdc); } static inline struct vpfe_cap_buffer *to_vpfe_buffer(struct vb2_v4l2_buffer *vb) { return container_of(vb, struct vpfe_cap_buffer, vb); } static inline void vpfe_pcr_enable(struct vpfe_ccdc *ccdc, int flag) { vpfe_reg_write(ccdc, !!flag, VPFE_PCR); } static void vpfe_config_enable(struct vpfe_ccdc *ccdc, int flag) { unsigned int cfg; if (!flag) { cfg = vpfe_reg_read(ccdc, VPFE_CONFIG); cfg &= ~(VPFE_CONFIG_EN_ENABLE << VPFE_CONFIG_EN_SHIFT); } else { cfg = VPFE_CONFIG_EN_ENABLE << VPFE_CONFIG_EN_SHIFT; } vpfe_reg_write(ccdc, cfg, VPFE_CONFIG); } static void vpfe_ccdc_setwin(struct vpfe_ccdc *ccdc, struct v4l2_rect *image_win, enum ccdc_frmfmt frm_fmt, int bpp) { int horz_start, horz_nr_pixels; int vert_start, vert_nr_lines; int val, mid_img; /* * ppc - per pixel count. indicates how many pixels per cell * output to SDRAM. example, for ycbcr, it is one y and one c, so 2. * raw capture this is 1 */ horz_start = image_win->left * bpp; horz_nr_pixels = (image_win->width * bpp) - 1; vpfe_reg_write(ccdc, (horz_start << VPFE_HORZ_INFO_SPH_SHIFT) | horz_nr_pixels, VPFE_HORZ_INFO); vert_start = image_win->top; if (frm_fmt == CCDC_FRMFMT_INTERLACED) { vert_nr_lines = (image_win->height >> 1) - 1; vert_start >>= 1; /* configure VDINT0 */ val = (vert_start << VPFE_VDINT_VDINT0_SHIFT); } else { vert_nr_lines = image_win->height - 1; /* * configure VDINT0 and VDINT1. VDINT1 will be at half * of image height */ mid_img = vert_start + (image_win->height / 2); val = (vert_start << VPFE_VDINT_VDINT0_SHIFT) | (mid_img & VPFE_VDINT_VDINT1_MASK); } vpfe_reg_write(ccdc, val, VPFE_VDINT); vpfe_reg_write(ccdc, (vert_start << VPFE_VERT_START_SLV0_SHIFT) | vert_start, VPFE_VERT_START); vpfe_reg_write(ccdc, vert_nr_lines, VPFE_VERT_LINES); } static void vpfe_reg_dump(struct vpfe_ccdc *ccdc) { struct vpfe_device *vpfe = to_vpfe(ccdc); vpfe_dbg(3, vpfe, "ALAW: 0x%x\n", vpfe_reg_read(ccdc, VPFE_ALAW)); vpfe_dbg(3, vpfe, "CLAMP: 0x%x\n", vpfe_reg_read(ccdc, VPFE_CLAMP)); vpfe_dbg(3, vpfe, "DCSUB: 0x%x\n", vpfe_reg_read(ccdc, VPFE_DCSUB)); vpfe_dbg(3, vpfe, "BLKCMP: 0x%x\n", vpfe_reg_read(ccdc, VPFE_BLKCMP)); vpfe_dbg(3, vpfe, "COLPTN: 0x%x\n", vpfe_reg_read(ccdc, VPFE_COLPTN)); vpfe_dbg(3, vpfe, "SDOFST: 0x%x\n", vpfe_reg_read(ccdc, VPFE_SDOFST)); vpfe_dbg(3, vpfe, "SYN_MODE: 0x%x\n", vpfe_reg_read(ccdc, VPFE_SYNMODE)); vpfe_dbg(3, vpfe, "HSIZE_OFF: 0x%x\n", vpfe_reg_read(ccdc, VPFE_HSIZE_OFF)); vpfe_dbg(3, vpfe, "HORZ_INFO: 0x%x\n", vpfe_reg_read(ccdc, VPFE_HORZ_INFO)); vpfe_dbg(3, vpfe, "VERT_START: 0x%x\n", vpfe_reg_read(ccdc, VPFE_VERT_START)); vpfe_dbg(3, vpfe, "VERT_LINES: 0x%x\n", vpfe_reg_read(ccdc, VPFE_VERT_LINES)); } static int vpfe_ccdc_validate_param(struct vpfe_ccdc *ccdc, struct vpfe_ccdc_config_params_raw *ccdcparam) { struct vpfe_device *vpfe = to_vpfe(ccdc); u8 max_gamma, max_data; if (!ccdcparam->alaw.enable) return 0; max_gamma = ccdc_gamma_width_max_bit(ccdcparam->alaw.gamma_wd); max_data = ccdc_data_size_max_bit(ccdcparam->data_sz); if (ccdcparam->alaw.gamma_wd > VPFE_CCDC_GAMMA_BITS_09_0 || ccdcparam->data_sz > VPFE_CCDC_DATA_8BITS || max_gamma > max_data) { vpfe_dbg(1, vpfe, "Invalid data line select\n"); return -EINVAL; } return 0; } static void vpfe_ccdc_update_raw_params(struct vpfe_ccdc *ccdc, struct vpfe_ccdc_config_params_raw *raw_params) { struct vpfe_ccdc_config_params_raw *config_params = &ccdc->ccdc_cfg.bayer.config_params; *config_params = *raw_params; } /* * vpfe_ccdc_restore_defaults() * This function will write defaults to all CCDC registers */ static void vpfe_ccdc_restore_defaults(struct vpfe_ccdc *ccdc) { int i; /* Disable CCDC */ vpfe_pcr_enable(ccdc, 0); /* set all registers to default value */ for (i = 4; i <= 0x94; i += 4) vpfe_reg_write(ccdc, 0, i); vpfe_reg_write(ccdc, VPFE_NO_CULLING, VPFE_CULLING); vpfe_reg_write(ccdc, VPFE_CCDC_GAMMA_BITS_11_2, VPFE_ALAW); } static int vpfe_ccdc_close(struct vpfe_ccdc *ccdc, struct device *dev) { struct vpfe_device *vpfe = to_vpfe(ccdc); u32 dma_cntl, pcr; pcr = vpfe_reg_read(ccdc, VPFE_PCR); if (pcr) vpfe_dbg(1, vpfe, "VPFE_PCR is still set (%x)", pcr); dma_cntl = vpfe_reg_read(ccdc, VPFE_DMA_CNTL); if ((dma_cntl & VPFE_DMA_CNTL_OVERFLOW)) vpfe_dbg(1, vpfe, "VPFE_DMA_CNTL_OVERFLOW is still set (%x)", dma_cntl); /* Disable CCDC by resetting all register to default POR values */ vpfe_ccdc_restore_defaults(ccdc); /* Disabled the module at the CONFIG level */ vpfe_config_enable(ccdc, 0); pm_runtime_put_sync(dev); return 0; } static int vpfe_ccdc_set_params(struct vpfe_ccdc *ccdc, void __user *params) { struct vpfe_device *vpfe = to_vpfe(ccdc); struct vpfe_ccdc_config_params_raw raw_params; int x; if (ccdc->ccdc_cfg.if_type != VPFE_RAW_BAYER) return -EINVAL; x = copy_from_user(&raw_params, params, sizeof(raw_params)); if (x) { vpfe_dbg(1, vpfe, "%s: error in copying ccdc params, %d\n", __func__, x); return -EFAULT; } if (!vpfe_ccdc_validate_param(ccdc, &raw_params)) { vpfe_ccdc_update_raw_params(ccdc, &raw_params); return 0; } return -EINVAL; } /* * vpfe_ccdc_config_ycbcr() * This function will configure CCDC for YCbCr video capture */ static void vpfe_ccdc_config_ycbcr(struct vpfe_ccdc *ccdc) { struct ccdc_params_ycbcr *params = &ccdc->ccdc_cfg.ycbcr; u32 syn_mode; /* * first restore the CCDC registers to default values * This is important since we assume default values to be set in * a lot of registers that we didn't touch */ vpfe_ccdc_restore_defaults(ccdc); /* * configure pixel format, frame format, configure video frame * format, enable output to SDRAM, enable internal timing generator * and 8bit pack mode */ syn_mode = (((params->pix_fmt & VPFE_SYN_MODE_INPMOD_MASK) << VPFE_SYN_MODE_INPMOD_SHIFT) | ((params->frm_fmt & VPFE_SYN_FLDMODE_MASK) << VPFE_SYN_FLDMODE_SHIFT) | VPFE_VDHDEN_ENABLE | VPFE_WEN_ENABLE | VPFE_DATA_PACK_ENABLE); /* setup BT.656 sync mode */ if (params->bt656_enable) { vpfe_reg_write(ccdc, VPFE_REC656IF_BT656_EN, VPFE_REC656IF); /* * configure the FID, VD, HD pin polarity, * fld,hd pol positive, vd negative, 8-bit data */ syn_mode |= VPFE_SYN_MODE_VD_POL_NEGATIVE; if (ccdc->ccdc_cfg.if_type == VPFE_BT656_10BIT) syn_mode |= VPFE_SYN_MODE_10BITS; else syn_mode |= VPFE_SYN_MODE_8BITS; } else { /* y/c external sync mode */ syn_mode |= (((params->fid_pol & VPFE_FID_POL_MASK) << VPFE_FID_POL_SHIFT) | ((params->hd_pol & VPFE_HD_POL_MASK) << VPFE_HD_POL_SHIFT) | ((params->vd_pol & VPFE_VD_POL_MASK) << VPFE_VD_POL_SHIFT)); } vpfe_reg_write(ccdc, syn_mode, VPFE_SYNMODE); /* configure video window */ vpfe_ccdc_setwin(ccdc, &params->win, params->frm_fmt, params->bytesperpixel); /* * configure the order of y cb cr in SDRAM, and disable latch * internal register on vsync */ if (ccdc->ccdc_cfg.if_type == VPFE_BT656_10BIT) vpfe_reg_write(ccdc, (params->pix_order << VPFE_CCDCFG_Y8POS_SHIFT) | VPFE_LATCH_ON_VSYNC_DISABLE | VPFE_CCDCFG_BW656_10BIT, VPFE_CCDCFG); else vpfe_reg_write(ccdc, (params->pix_order << VPFE_CCDCFG_Y8POS_SHIFT) | VPFE_LATCH_ON_VSYNC_DISABLE, VPFE_CCDCFG); /* * configure the horizontal line offset. This should be a * on 32 byte boundary. So clear LSB 5 bits */ vpfe_reg_write(ccdc, params->bytesperline, VPFE_HSIZE_OFF); /* configure the memory line offset */ if (params->buf_type == CCDC_BUFTYPE_FLD_INTERLEAVED) /* two fields are interleaved in memory */ vpfe_reg_write(ccdc, VPFE_SDOFST_FIELD_INTERLEAVED, VPFE_SDOFST); } static void vpfe_ccdc_config_black_clamp(struct vpfe_ccdc *ccdc, struct vpfe_ccdc_black_clamp *bclamp) { u32 val; if (!bclamp->enable) { /* configure DCSub */ val = (bclamp->dc_sub) & VPFE_BLK_DC_SUB_MASK; vpfe_reg_write(ccdc, val, VPFE_DCSUB); vpfe_reg_write(ccdc, VPFE_CLAMP_DEFAULT_VAL, VPFE_CLAMP); return; } /* * Configure gain, Start pixel, No of line to be avg, * No of pixel/line to be avg, & Enable the Black clamping */ val = ((bclamp->sgain & VPFE_BLK_SGAIN_MASK) | ((bclamp->start_pixel & VPFE_BLK_ST_PXL_MASK) << VPFE_BLK_ST_PXL_SHIFT) | ((bclamp->sample_ln & VPFE_BLK_SAMPLE_LINE_MASK) << VPFE_BLK_SAMPLE_LINE_SHIFT) | ((bclamp->sample_pixel & VPFE_BLK_SAMPLE_LN_MASK) << VPFE_BLK_SAMPLE_LN_SHIFT) | VPFE_BLK_CLAMP_ENABLE); vpfe_reg_write(ccdc, val, VPFE_CLAMP); /* If Black clamping is enable then make dcsub 0 */ vpfe_reg_write(ccdc, VPFE_DCSUB_DEFAULT_VAL, VPFE_DCSUB); } static void vpfe_ccdc_config_black_compense(struct vpfe_ccdc *ccdc, struct vpfe_ccdc_black_compensation *bcomp) { u32 val; val = ((bcomp->b & VPFE_BLK_COMP_MASK) | ((bcomp->gb & VPFE_BLK_COMP_MASK) << VPFE_BLK_COMP_GB_COMP_SHIFT) | ((bcomp->gr & VPFE_BLK_COMP_MASK) << VPFE_BLK_COMP_GR_COMP_SHIFT) | ((bcomp->r & VPFE_BLK_COMP_MASK) << VPFE_BLK_COMP_R_COMP_SHIFT)); vpfe_reg_write(ccdc, val, VPFE_BLKCMP); } /* * vpfe_ccdc_config_raw() * This function will configure CCDC for Raw capture mode */ static void vpfe_ccdc_config_raw(struct vpfe_ccdc *ccdc) { struct vpfe_device *vpfe = to_vpfe(ccdc); struct vpfe_ccdc_config_params_raw *config_params = &ccdc->ccdc_cfg.bayer.config_params; struct ccdc_params_raw *params = &ccdc->ccdc_cfg.bayer; unsigned int syn_mode; unsigned int val; /* Reset CCDC */ vpfe_ccdc_restore_defaults(ccdc); /* Disable latching function registers on VSYNC */ vpfe_reg_write(ccdc, VPFE_LATCH_ON_VSYNC_DISABLE, VPFE_CCDCFG); /* * Configure the vertical sync polarity(SYN_MODE.VDPOL), * horizontal sync polarity (SYN_MODE.HDPOL), frame id polarity * (SYN_MODE.FLDPOL), frame format(progressive or interlace), * data size(SYNMODE.DATSIZ), &pixel format (Input mode), output * SDRAM, enable internal timing generator */ syn_mode = (((params->vd_pol & VPFE_VD_POL_MASK) << VPFE_VD_POL_SHIFT) | ((params->hd_pol & VPFE_HD_POL_MASK) << VPFE_HD_POL_SHIFT) | ((params->fid_pol & VPFE_FID_POL_MASK) << VPFE_FID_POL_SHIFT) | ((params->frm_fmt & VPFE_FRM_FMT_MASK) << VPFE_FRM_FMT_SHIFT) | ((config_params->data_sz & VPFE_DATA_SZ_MASK) << VPFE_DATA_SZ_SHIFT) | ((params->pix_fmt & VPFE_PIX_FMT_MASK) << VPFE_PIX_FMT_SHIFT) | VPFE_WEN_ENABLE | VPFE_VDHDEN_ENABLE); /* Enable and configure aLaw register if needed */ if (config_params->alaw.enable) { val = ((config_params->alaw.gamma_wd & VPFE_ALAW_GAMMA_WD_MASK) | VPFE_ALAW_ENABLE); vpfe_reg_write(ccdc, val, VPFE_ALAW); vpfe_dbg(3, vpfe, "\nWriting 0x%x to ALAW...\n", val); } /* Configure video window */ vpfe_ccdc_setwin(ccdc, &params->win, params->frm_fmt, params->bytesperpixel); /* Configure Black Clamp */ vpfe_ccdc_config_black_clamp(ccdc, &config_params->blk_clamp); /* Configure Black level compensation */ vpfe_ccdc_config_black_compense(ccdc, &config_params->blk_comp); /* If data size is 8 bit then pack the data */ if ((config_params->data_sz == VPFE_CCDC_DATA_8BITS) || config_params->alaw.enable) syn_mode |= VPFE_DATA_PACK_ENABLE; /* * Configure Horizontal offset register. If pack 8 is enabled then * 1 pixel will take 1 byte */ vpfe_reg_write(ccdc, params->bytesperline, VPFE_HSIZE_OFF); vpfe_dbg(3, vpfe, "Writing %d (%x) to HSIZE_OFF\n", params->bytesperline, params->bytesperline); /* Set value for SDOFST */ if (params->frm_fmt == CCDC_FRMFMT_INTERLACED) { if (params->image_invert_enable) { /* For interlace inverse mode */ vpfe_reg_write(ccdc, VPFE_INTERLACED_IMAGE_INVERT, VPFE_SDOFST); } else { /* For interlace non inverse mode */ vpfe_reg_write(ccdc, VPFE_INTERLACED_NO_IMAGE_INVERT, VPFE_SDOFST); } } else if (params->frm_fmt == CCDC_FRMFMT_PROGRESSIVE) { vpfe_reg_write(ccdc, VPFE_PROGRESSIVE_NO_IMAGE_INVERT, VPFE_SDOFST); } vpfe_reg_write(ccdc, syn_mode, VPFE_SYNMODE); vpfe_reg_dump(ccdc); } static inline int vpfe_ccdc_set_buftype(struct vpfe_ccdc *ccdc, enum ccdc_buftype buf_type) { if (ccdc->ccdc_cfg.if_type == VPFE_RAW_BAYER) ccdc->ccdc_cfg.bayer.buf_type = buf_type; else ccdc->ccdc_cfg.ycbcr.buf_type = buf_type; return 0; } static inline enum ccdc_buftype vpfe_ccdc_get_buftype(struct vpfe_ccdc *ccdc) { if (ccdc->ccdc_cfg.if_type == VPFE_RAW_BAYER) return ccdc->ccdc_cfg.bayer.buf_type; return ccdc->ccdc_cfg.ycbcr.buf_type; } static int vpfe_ccdc_set_pixel_format(struct vpfe_ccdc *ccdc, u32 pixfmt) { struct vpfe_device *vpfe = to_vpfe(ccdc); vpfe_dbg(1, vpfe, "%s: if_type: %d, pixfmt:%s\n", __func__, ccdc->ccdc_cfg.if_type, print_fourcc(pixfmt)); if (ccdc->ccdc_cfg.if_type == VPFE_RAW_BAYER) { ccdc->ccdc_cfg.bayer.pix_fmt = CCDC_PIXFMT_RAW; /* * Need to clear it in case it was left on * after the last capture. */ ccdc->ccdc_cfg.bayer.config_params.alaw.enable = 0; switch (pixfmt) { case V4L2_PIX_FMT_SBGGR8: ccdc->ccdc_cfg.bayer.config_params.alaw.enable = 1; break; case V4L2_PIX_FMT_YUYV: case V4L2_PIX_FMT_UYVY: case V4L2_PIX_FMT_YUV420: case V4L2_PIX_FMT_NV12: case V4L2_PIX_FMT_RGB565X: break; case V4L2_PIX_FMT_SBGGR16: default: return -EINVAL; } } else { switch (pixfmt) { case V4L2_PIX_FMT_YUYV: ccdc->ccdc_cfg.ycbcr.pix_order = CCDC_PIXORDER_YCBYCR; break; case V4L2_PIX_FMT_UYVY: ccdc->ccdc_cfg.ycbcr.pix_order = CCDC_PIXORDER_CBYCRY; break; default: return -EINVAL; } } return 0; } static u32 vpfe_ccdc_get_pixel_format(struct vpfe_ccdc *ccdc) { u32 pixfmt; if (ccdc->ccdc_cfg.if_type == VPFE_RAW_BAYER) { pixfmt = V4L2_PIX_FMT_YUYV; } else { if (ccdc->ccdc_cfg.ycbcr.pix_order == CCDC_PIXORDER_YCBYCR) pixfmt = V4L2_PIX_FMT_YUYV; else pixfmt = V4L2_PIX_FMT_UYVY; } return pixfmt; } static int vpfe_ccdc_set_image_window(struct vpfe_ccdc *ccdc, struct v4l2_rect *win, unsigned int bpp) { if (ccdc->ccdc_cfg.if_type == VPFE_RAW_BAYER) { ccdc->ccdc_cfg.bayer.win = *win; ccdc->ccdc_cfg.bayer.bytesperpixel = bpp; ccdc->ccdc_cfg.bayer.bytesperline = ALIGN(win->width * bpp, 32); } else { ccdc->ccdc_cfg.ycbcr.win = *win; ccdc->ccdc_cfg.ycbcr.bytesperpixel = bpp; ccdc->ccdc_cfg.ycbcr.bytesperline = ALIGN(win->width * bpp, 32); } return 0; } static inline void vpfe_ccdc_get_image_window(struct vpfe_ccdc *ccdc, struct v4l2_rect *win) { if (ccdc->ccdc_cfg.if_type == VPFE_RAW_BAYER) *win = ccdc->ccdc_cfg.bayer.win; else *win = ccdc->ccdc_cfg.ycbcr.win; } static inline unsigned int vpfe_ccdc_get_line_length(struct vpfe_ccdc *ccdc) { if (ccdc->ccdc_cfg.if_type == VPFE_RAW_BAYER) return ccdc->ccdc_cfg.bayer.bytesperline; return ccdc->ccdc_cfg.ycbcr.bytesperline; } static inline int vpfe_ccdc_set_frame_format(struct vpfe_ccdc *ccdc, enum ccdc_frmfmt frm_fmt) { if (ccdc->ccdc_cfg.if_type == VPFE_RAW_BAYER) ccdc->ccdc_cfg.bayer.frm_fmt = frm_fmt; else ccdc->ccdc_cfg.ycbcr.frm_fmt = frm_fmt; return 0; } static inline enum ccdc_frmfmt vpfe_ccdc_get_frame_format(struct vpfe_ccdc *ccdc) { if (ccdc->ccdc_cfg.if_type == VPFE_RAW_BAYER) return ccdc->ccdc_cfg.bayer.frm_fmt; return ccdc->ccdc_cfg.ycbcr.frm_fmt; } static inline int vpfe_ccdc_getfid(struct vpfe_ccdc *ccdc) { return (vpfe_reg_read(ccdc, VPFE_SYNMODE) >> 15) & 1; } static inline void vpfe_set_sdr_addr(struct vpfe_ccdc *ccdc, unsigned long addr) { vpfe_reg_write(ccdc, addr & 0xffffffe0, VPFE_SDR_ADDR); } static int vpfe_ccdc_set_hw_if_params(struct vpfe_ccdc *ccdc, struct vpfe_hw_if_param *params) { struct vpfe_device *vpfe = to_vpfe(ccdc); ccdc->ccdc_cfg.if_type = params->if_type; switch (params->if_type) { case VPFE_BT656: case VPFE_YCBCR_SYNC_16: case VPFE_YCBCR_SYNC_8: case VPFE_BT656_10BIT: ccdc->ccdc_cfg.ycbcr.vd_pol = params->vdpol; ccdc->ccdc_cfg.ycbcr.hd_pol = params->hdpol; break; case VPFE_RAW_BAYER: ccdc->ccdc_cfg.bayer.vd_pol = params->vdpol; ccdc->ccdc_cfg.bayer.hd_pol = params->hdpol; if (params->bus_width == 10) ccdc->ccdc_cfg.bayer.config_params.data_sz = VPFE_CCDC_DATA_10BITS; else ccdc->ccdc_cfg.bayer.config_params.data_sz = VPFE_CCDC_DATA_8BITS; vpfe_dbg(1, vpfe, "params.bus_width: %d\n", params->bus_width); vpfe_dbg(1, vpfe, "config_params.data_sz: %d\n", ccdc->ccdc_cfg.bayer.config_params.data_sz); break; default: return -EINVAL; } return 0; } static void vpfe_clear_intr(struct vpfe_ccdc *ccdc, int vdint) { unsigned int vpfe_int_status; vpfe_int_status = vpfe_reg_read(ccdc, VPFE_IRQ_STS); switch (vdint) { /* VD0 interrupt */ case VPFE_VDINT0: vpfe_int_status &= ~VPFE_VDINT0; vpfe_int_status |= VPFE_VDINT0; break; /* VD1 interrupt */ case VPFE_VDINT1: vpfe_int_status &= ~VPFE_VDINT1; vpfe_int_status |= VPFE_VDINT1; break; /* VD2 interrupt */ case VPFE_VDINT2: vpfe_int_status &= ~VPFE_VDINT2; vpfe_int_status |= VPFE_VDINT2; break; /* Clear all interrupts */ default: vpfe_int_status &= ~(VPFE_VDINT0 | VPFE_VDINT1 | VPFE_VDINT2); vpfe_int_status |= (VPFE_VDINT0 | VPFE_VDINT1 | VPFE_VDINT2); break; } /* Clear specific VDINT from the status register */ vpfe_reg_write(ccdc, vpfe_int_status, VPFE_IRQ_STS); vpfe_int_status = vpfe_reg_read(ccdc, VPFE_IRQ_STS); /* Acknowledge that we are done with all interrupts */ vpfe_reg_write(ccdc, 1, VPFE_IRQ_EOI); } static void vpfe_ccdc_config_defaults(struct vpfe_ccdc *ccdc) { ccdc->ccdc_cfg.if_type = VPFE_RAW_BAYER; ccdc->ccdc_cfg.ycbcr.pix_fmt = CCDC_PIXFMT_YCBCR_8BIT; ccdc->ccdc_cfg.ycbcr.frm_fmt = CCDC_FRMFMT_INTERLACED; ccdc->ccdc_cfg.ycbcr.fid_pol = VPFE_PINPOL_POSITIVE; ccdc->ccdc_cfg.ycbcr.vd_pol = VPFE_PINPOL_POSITIVE; ccdc->ccdc_cfg.ycbcr.hd_pol = VPFE_PINPOL_POSITIVE; ccdc->ccdc_cfg.ycbcr.pix_order = CCDC_PIXORDER_CBYCRY; ccdc->ccdc_cfg.ycbcr.buf_type = CCDC_BUFTYPE_FLD_INTERLEAVED; ccdc->ccdc_cfg.ycbcr.win.left = 0; ccdc->ccdc_cfg.ycbcr.win.top = 0; ccdc->ccdc_cfg.ycbcr.win.width = 720; ccdc->ccdc_cfg.ycbcr.win.height = 576; ccdc->ccdc_cfg.ycbcr.bt656_enable = 1; ccdc->ccdc_cfg.bayer.pix_fmt = CCDC_PIXFMT_RAW; ccdc->ccdc_cfg.bayer.frm_fmt = CCDC_FRMFMT_PROGRESSIVE; ccdc->ccdc_cfg.bayer.fid_pol = VPFE_PINPOL_POSITIVE; ccdc->ccdc_cfg.bayer.vd_pol = VPFE_PINPOL_POSITIVE; ccdc->ccdc_cfg.bayer.hd_pol = VPFE_PINPOL_POSITIVE; ccdc->ccdc_cfg.bayer.win.left = 0; ccdc->ccdc_cfg.bayer.win.top = 0; ccdc->ccdc_cfg.bayer.win.width = 800; ccdc->ccdc_cfg.bayer.win.height = 600; ccdc->ccdc_cfg.bayer.config_params.data_sz = VPFE_CCDC_DATA_8BITS; ccdc->ccdc_cfg.bayer.config_params.alaw.gamma_wd = VPFE_CCDC_GAMMA_BITS_09_0; } /* * vpfe_get_ccdc_image_format - Get image parameters based on CCDC settings */ static int vpfe_get_ccdc_image_format(struct vpfe_device *vpfe, struct v4l2_format *f) { struct v4l2_rect image_win; enum ccdc_buftype buf_type; enum ccdc_frmfmt frm_fmt; memset(f, 0, sizeof(*f)); f->type = V4L2_BUF_TYPE_VIDEO_CAPTURE; vpfe_ccdc_get_image_window(&vpfe->ccdc, &image_win); f->fmt.pix.width = image_win.width; f->fmt.pix.height = image_win.height; f->fmt.pix.bytesperline = vpfe_ccdc_get_line_length(&vpfe->ccdc); f->fmt.pix.sizeimage = f->fmt.pix.bytesperline * f->fmt.pix.height; buf_type = vpfe_ccdc_get_buftype(&vpfe->ccdc); f->fmt.pix.pixelformat = vpfe_ccdc_get_pixel_format(&vpfe->ccdc); frm_fmt = vpfe_ccdc_get_frame_format(&vpfe->ccdc); if (frm_fmt == CCDC_FRMFMT_PROGRESSIVE) { f->fmt.pix.field = V4L2_FIELD_NONE; } else if (frm_fmt == CCDC_FRMFMT_INTERLACED) { if (buf_type == CCDC_BUFTYPE_FLD_INTERLEAVED) { f->fmt.pix.field = V4L2_FIELD_INTERLACED; } else if (buf_type == CCDC_BUFTYPE_FLD_SEPARATED) { f->fmt.pix.field = V4L2_FIELD_SEQ_TB; } else { vpfe_err(vpfe, "Invalid buf_type\n"); return -EINVAL; } } else { vpfe_err(vpfe, "Invalid frm_fmt\n"); return -EINVAL; } return 0; } static int vpfe_config_ccdc_image_format(struct vpfe_device *vpfe) { enum ccdc_frmfmt frm_fmt = CCDC_FRMFMT_INTERLACED; u32 bpp; int ret = 0; vpfe_dbg(1, vpfe, "pixelformat: %s\n", print_fourcc(vpfe->fmt.fmt.pix.pixelformat)); if (vpfe_ccdc_set_pixel_format(&vpfe->ccdc, vpfe->fmt.fmt.pix.pixelformat) < 0) { vpfe_err(vpfe, "couldn't set pix format in ccdc\n"); return -EINVAL; } /* configure the image window */ bpp = __get_bytesperpixel(vpfe, vpfe->current_vpfe_fmt); vpfe_ccdc_set_image_window(&vpfe->ccdc, &vpfe->crop, bpp); switch (vpfe->fmt.fmt.pix.field) { case V4L2_FIELD_INTERLACED: /* do nothing, since it is default */ ret = vpfe_ccdc_set_buftype( &vpfe->ccdc, CCDC_BUFTYPE_FLD_INTERLEAVED); break; case V4L2_FIELD_NONE: frm_fmt = CCDC_FRMFMT_PROGRESSIVE; /* buffer type only applicable for interlaced scan */ break; case V4L2_FIELD_SEQ_TB: ret = vpfe_ccdc_set_buftype( &vpfe->ccdc, CCDC_BUFTYPE_FLD_SEPARATED); break; default: return -EINVAL; } if (ret) return ret; return vpfe_ccdc_set_frame_format(&vpfe->ccdc, frm_fmt); } /* * vpfe_config_image_format() * For a given standard, this functions sets up the default * pix format & crop values in the vpfe device and ccdc. It first * starts with defaults based values from the standard table. * It then checks if sub device supports get_fmt and then override the * values based on that.Sets crop values to match with scan resolution * starting at 0,0. It calls vpfe_config_ccdc_image_format() set the * values in ccdc */ static int vpfe_config_image_format(struct vpfe_device *vpfe, v4l2_std_id std_id) { struct vpfe_fmt *fmt; struct v4l2_mbus_framefmt mbus_fmt; int i, ret; for (i = 0; i < ARRAY_SIZE(vpfe_standards); i++) { if (vpfe_standards[i].std_id & std_id) { vpfe->std_info.active_pixels = vpfe_standards[i].width; vpfe->std_info.active_lines = vpfe_standards[i].height; vpfe->std_info.frame_format = vpfe_standards[i].frame_format; vpfe->std_index = i; break; } } if (i == ARRAY_SIZE(vpfe_standards)) { vpfe_err(vpfe, "standard not supported\n"); return -EINVAL; } ret = __subdev_get_format(vpfe, &mbus_fmt); if (ret) return ret; fmt = find_format_by_code(vpfe, mbus_fmt.code); if (!fmt) { vpfe_dbg(3, vpfe, "mbus code format (0x%08x) not found.\n", mbus_fmt.code); return -EINVAL; } /* Save current subdev format */ v4l2_fill_pix_format(&vpfe->fmt.fmt.pix, &mbus_fmt); vpfe->fmt.type = V4L2_BUF_TYPE_VIDEO_CAPTURE; vpfe->fmt.fmt.pix.pixelformat = fmt->fourcc; vpfe_calc_format_size(vpfe, fmt, &vpfe->fmt); vpfe->current_vpfe_fmt = fmt; /* Update the crop window based on found values */ vpfe->crop.top = 0; vpfe->crop.left = 0; vpfe->crop.width = mbus_fmt.width; vpfe->crop.height = mbus_fmt.height; return vpfe_config_ccdc_image_format(vpfe); } static int vpfe_initialize_device(struct vpfe_device *vpfe) { struct vpfe_subdev_info *sdinfo; int ret; sdinfo = &vpfe->cfg->sub_devs[0]; sdinfo->sd = vpfe->sd[0]; vpfe->current_input = 0; vpfe->std_index = 0; /* Configure the default format information */ ret = vpfe_config_image_format(vpfe, vpfe_standards[vpfe->std_index].std_id); if (ret) return ret; ret = pm_runtime_resume_and_get(vpfe->pdev); if (ret < 0) return ret; vpfe_config_enable(&vpfe->ccdc, 1); vpfe_ccdc_restore_defaults(&vpfe->ccdc); /* Clear all VPFE interrupts */ vpfe_clear_intr(&vpfe->ccdc, -1); return ret; } /* * vpfe_release : This function is based on the vb2_fop_release * helper function. * It has been augmented to handle module power management, * by disabling/enabling h/w module fcntl clock when necessary. */ static int vpfe_release(struct file *file) { struct vpfe_device *vpfe = video_drvdata(file); bool fh_singular; int ret; mutex_lock(&vpfe->lock); /* Save the singular status before we call the clean-up helper */ fh_singular = v4l2_fh_is_singular_file(file); /* the release helper will cleanup any on-going streaming */ ret = _vb2_fop_release(file, NULL); /* * If this was the last open file. * Then de-initialize hw module. */ if (fh_singular) vpfe_ccdc_close(&vpfe->ccdc, vpfe->pdev); mutex_unlock(&vpfe->lock); return ret; } /* * vpfe_open : This function is based on the v4l2_fh_open helper function. * It has been augmented to handle module power management, * by disabling/enabling h/w module fcntl clock when necessary. */ static int vpfe_open(struct file *file) { struct vpfe_device *vpfe = video_drvdata(file); int ret; mutex_lock(&vpfe->lock); ret = v4l2_fh_open(file); if (ret) { vpfe_err(vpfe, "v4l2_fh_open failed\n"); goto unlock; } if (!v4l2_fh_is_singular_file(file)) goto unlock; if (vpfe_initialize_device(vpfe)) { v4l2_fh_release(file); ret = -ENODEV; } unlock: mutex_unlock(&vpfe->lock); return ret; } /** * vpfe_schedule_next_buffer: set next buffer address for capture * @vpfe : ptr to vpfe device * * This function will get next buffer from the dma queue and * set the buffer address in the vpfe register for capture. * the buffer is marked active */ static void vpfe_schedule_next_buffer(struct vpfe_device *vpfe) { dma_addr_t addr; spin_lock(&vpfe->dma_queue_lock); if (list_empty(&vpfe->dma_queue)) { spin_unlock(&vpfe->dma_queue_lock); return; } vpfe->next_frm = list_entry(vpfe->dma_queue.next, struct vpfe_cap_buffer, list); list_del(&vpfe->next_frm->list); spin_unlock(&vpfe->dma_queue_lock); addr = vb2_dma_contig_plane_dma_addr(&vpfe->next_frm->vb.vb2_buf, 0); vpfe_set_sdr_addr(&vpfe->ccdc, addr); } static inline void vpfe_schedule_bottom_field(struct vpfe_device *vpfe) { dma_addr_t addr; addr = vb2_dma_contig_plane_dma_addr(&vpfe->next_frm->vb.vb2_buf, 0) + vpfe->field_off; vpfe_set_sdr_addr(&vpfe->ccdc, addr); } /* * vpfe_process_buffer_complete: process a completed buffer * @vpfe : ptr to vpfe device * * This function time stamp the buffer and mark it as DONE. It also * wake up any process waiting on the QUEUE and set the next buffer * as current */ static inline void vpfe_process_buffer_complete(struct vpfe_device *vpfe) { vpfe->cur_frm->vb.vb2_buf.timestamp = ktime_get_ns(); vpfe->cur_frm->vb.field = vpfe->fmt.fmt.pix.field; vpfe->cur_frm->vb.sequence = vpfe->sequence++; vb2_buffer_done(&vpfe->cur_frm->vb.vb2_buf, VB2_BUF_STATE_DONE); vpfe->cur_frm = vpfe->next_frm; } static void vpfe_handle_interlaced_irq(struct vpfe_device *vpfe, enum v4l2_field field) { int fid; /* interlaced or TB capture check which field * we are in hardware */ fid = vpfe_ccdc_getfid(&vpfe->ccdc); /* switch the software maintained field id */ vpfe->field ^= 1; if (fid == vpfe->field) { /* we are in-sync here,continue */ if (fid == 0) { /* * One frame is just being captured. If the * next frame is available, release the * current frame and move on */ if (vpfe->cur_frm != vpfe->next_frm) vpfe_process_buffer_complete(vpfe); if (vpfe->stopping) return; /* * based on whether the two fields are stored * interleave or separately in memory, * reconfigure the CCDC memory address */ if (field == V4L2_FIELD_SEQ_TB) vpfe_schedule_bottom_field(vpfe); } else { /* * if one field is just being captured configure * the next frame get the next frame from the empty * queue if no frame is available hold on to the * current buffer */ if (vpfe->cur_frm == vpfe->next_frm) vpfe_schedule_next_buffer(vpfe); } } else if (fid == 0) { /* * out of sync. Recover from any hardware out-of-sync. * May loose one frame */ vpfe->field = fid; } } /* * vpfe_isr : ISR handler for vpfe capture (VINT0) * @irq: irq number * @dev_id: dev_id ptr * * It changes status of the captured buffer, takes next buffer from the queue * and sets its address in VPFE registers */ static irqreturn_t vpfe_isr(int irq, void *dev) { struct vpfe_device *vpfe = (struct vpfe_device *)dev; enum v4l2_field field = vpfe->fmt.fmt.pix.field; int intr_status, stopping = vpfe->stopping; intr_status = vpfe_reg_read(&vpfe->ccdc, VPFE_IRQ_STS); if (intr_status & VPFE_VDINT0) { if (field == V4L2_FIELD_NONE) { if (vpfe->cur_frm != vpfe->next_frm) vpfe_process_buffer_complete(vpfe); } else { vpfe_handle_interlaced_irq(vpfe, field); } if (stopping) { vpfe->stopping = false; complete(&vpfe->capture_stop); } } if (intr_status & VPFE_VDINT1 && !stopping) { if (field == V4L2_FIELD_NONE && vpfe->cur_frm == vpfe->next_frm) vpfe_schedule_next_buffer(vpfe); } vpfe_clear_intr(&vpfe->ccdc, intr_status); return IRQ_HANDLED; } static inline void vpfe_detach_irq(struct vpfe_device *vpfe) { unsigned int intr = VPFE_VDINT0; enum ccdc_frmfmt frame_format; frame_format = vpfe_ccdc_get_frame_format(&vpfe->ccdc); if (frame_format == CCDC_FRMFMT_PROGRESSIVE) intr |= VPFE_VDINT1; vpfe_reg_write(&vpfe->ccdc, intr, VPFE_IRQ_EN_CLR); } static inline void vpfe_attach_irq(struct vpfe_device *vpfe) { unsigned int intr = VPFE_VDINT0; enum ccdc_frmfmt frame_format; frame_format = vpfe_ccdc_get_frame_format(&vpfe->ccdc); if (frame_format == CCDC_FRMFMT_PROGRESSIVE) intr |= VPFE_VDINT1; vpfe_reg_write(&vpfe->ccdc, intr, VPFE_IRQ_EN_SET); } static int vpfe_querycap(struct file *file, void *priv, struct v4l2_capability *cap) { struct vpfe_device *vpfe = video_drvdata(file); strscpy(cap->driver, VPFE_MODULE_NAME, sizeof(cap->driver)); strscpy(cap->card, "TI AM437x VPFE", sizeof(cap->card)); snprintf(cap->bus_info, sizeof(cap->bus_info), "platform:%s", vpfe->v4l2_dev.name); return 0; } /* get the format set at output pad of the adjacent subdev */ static int __subdev_get_format(struct vpfe_device *vpfe, struct v4l2_mbus_framefmt *fmt) { struct v4l2_subdev *sd = vpfe->current_subdev->sd; struct v4l2_subdev_format sd_fmt = { .which = V4L2_SUBDEV_FORMAT_ACTIVE, .pad = 0, }; struct v4l2_mbus_framefmt *mbus_fmt = &sd_fmt.format; int ret; ret = v4l2_subdev_call(sd, pad, get_fmt, NULL, &sd_fmt); if (ret) return ret; *fmt = *mbus_fmt; vpfe_dbg(1, vpfe, "%s: %dx%d code:%04X\n", __func__, fmt->width, fmt->height, fmt->code); return 0; } /* set the format at output pad of the adjacent subdev */ static int __subdev_set_format(struct vpfe_device *vpfe, struct v4l2_mbus_framefmt *fmt) { struct v4l2_subdev *sd = vpfe->current_subdev->sd; struct v4l2_subdev_format sd_fmt = { .which = V4L2_SUBDEV_FORMAT_ACTIVE, .pad = 0, }; struct v4l2_mbus_framefmt *mbus_fmt = &sd_fmt.format; int ret; *mbus_fmt = *fmt; ret = v4l2_subdev_call(sd, pad, set_fmt, NULL, &sd_fmt); if (ret) return ret; vpfe_dbg(1, vpfe, "%s %dx%d code:%04X\n", __func__, fmt->width, fmt->height, fmt->code); return 0; } static int vpfe_calc_format_size(struct vpfe_device *vpfe, const struct vpfe_fmt *fmt, struct v4l2_format *f) { u32 bpp; if (!fmt) { vpfe_dbg(3, vpfe, "No vpfe_fmt provided!\n"); return -EINVAL; } bpp = __get_bytesperpixel(vpfe, fmt); /* pitch should be 32 bytes aligned */ f->fmt.pix.bytesperline = ALIGN(f->fmt.pix.width * bpp, 32); f->fmt.pix.sizeimage = f->fmt.pix.bytesperline * f->fmt.pix.height; vpfe_dbg(3, vpfe, "%s: fourcc: %s size: %dx%d bpl:%d img_size:%d\n", __func__, print_fourcc(f->fmt.pix.pixelformat), f->fmt.pix.width, f->fmt.pix.height, f->fmt.pix.bytesperline, f->fmt.pix.sizeimage); return 0; } static int vpfe_g_fmt(struct file *file, void *priv, struct v4l2_format *fmt) { struct vpfe_device *vpfe = video_drvdata(file); *fmt = vpfe->fmt; return 0; } static int vpfe_enum_fmt(struct file *file, void *priv, struct v4l2_fmtdesc *f) { struct vpfe_device *vpfe = video_drvdata(file); struct vpfe_subdev_info *sdinfo; struct vpfe_fmt *fmt; sdinfo = vpfe->current_subdev; if (!sdinfo->sd) return -EINVAL; if (f->index >= vpfe->num_active_fmt) return -EINVAL; fmt = vpfe->active_fmt[f->index]; f->pixelformat = fmt->fourcc; vpfe_dbg(1, vpfe, "%s: mbus index: %d code: %x pixelformat: %s\n", __func__, f->index, fmt->code, print_fourcc(fmt->fourcc)); return 0; } static int vpfe_try_fmt(struct file *file, void *priv, struct v4l2_format *f) { struct vpfe_device *vpfe = video_drvdata(file); struct v4l2_subdev *sd = vpfe->current_subdev->sd; const struct vpfe_fmt *fmt; struct v4l2_subdev_frame_size_enum fse = { .which = V4L2_SUBDEV_FORMAT_ACTIVE, }; int ret, found; fmt = find_format_by_pix(vpfe, f->fmt.pix.pixelformat); if (!fmt) { /* default to first entry */ vpfe_dbg(3, vpfe, "Invalid pixel code: %x, default used instead\n", f->fmt.pix.pixelformat); fmt = vpfe->active_fmt[0]; f->fmt.pix.pixelformat = fmt->fourcc; } f->fmt.pix.field = vpfe->fmt.fmt.pix.field; /* check for/find a valid width/height */ ret = 0; found = false; fse.pad = 0; fse.code = fmt->code; for (fse.index = 0; ; fse.index++) { ret = v4l2_subdev_call(sd, pad, enum_frame_size, NULL, &fse); if (ret) break; if (f->fmt.pix.width == fse.max_width && f->fmt.pix.height == fse.max_height) { found = true; break; } else if (f->fmt.pix.width >= fse.min_width && f->fmt.pix.width <= fse.max_width && f->fmt.pix.height >= fse.min_height && f->fmt.pix.height <= fse.max_height) { found = true; break; } } if (!found) { /* use existing values as default */ f->fmt.pix.width = vpfe->fmt.fmt.pix.width; f->fmt.pix.height = vpfe->fmt.fmt.pix.height; } /* * Use current colorspace for now, it will get * updated properly during s_fmt */ f->fmt.pix.colorspace = vpfe->fmt.fmt.pix.colorspace; return vpfe_calc_format_size(vpfe, fmt, f); } static int vpfe_s_fmt(struct file *file, void *priv, struct v4l2_format *fmt) { struct vpfe_device *vpfe = video_drvdata(file); struct vpfe_fmt *f; struct v4l2_mbus_framefmt mbus_fmt; int ret; /* If streaming is started, return error */ if (vb2_is_busy(&vpfe->buffer_queue)) { vpfe_err(vpfe, "%s device busy\n", __func__); return -EBUSY; } ret = vpfe_try_fmt(file, priv, fmt); if (ret < 0) return ret; f = find_format_by_pix(vpfe, fmt->fmt.pix.pixelformat); v4l2_fill_mbus_format(&mbus_fmt, &fmt->fmt.pix, f->code); ret = __subdev_set_format(vpfe, &mbus_fmt); if (ret) return ret; /* Just double check nothing has gone wrong */ if (mbus_fmt.code != f->code) { vpfe_dbg(3, vpfe, "%s subdev changed format on us, this should not happen\n", __func__); return -EINVAL; } v4l2_fill_pix_format(&vpfe->fmt.fmt.pix, &mbus_fmt); vpfe->fmt.type = V4L2_BUF_TYPE_VIDEO_CAPTURE; vpfe->fmt.fmt.pix.pixelformat = f->fourcc; vpfe_calc_format_size(vpfe, f, &vpfe->fmt); *fmt = vpfe->fmt; vpfe->current_vpfe_fmt = f; /* Update the crop window based on found values */ vpfe->crop.width = fmt->fmt.pix.width; vpfe->crop.height = fmt->fmt.pix.height; /* set image capture parameters in the ccdc */ return vpfe_config_ccdc_image_format(vpfe); } static int vpfe_enum_size(struct file *file, void *priv, struct v4l2_frmsizeenum *fsize) { struct vpfe_device *vpfe = video_drvdata(file); struct v4l2_subdev_frame_size_enum fse = { .which = V4L2_SUBDEV_FORMAT_ACTIVE, }; struct v4l2_subdev *sd = vpfe->current_subdev->sd; struct vpfe_fmt *fmt; int ret; /* check for valid format */ fmt = find_format_by_pix(vpfe, fsize->pixel_format); if (!fmt) { vpfe_dbg(3, vpfe, "Invalid pixel code: %x\n", fsize->pixel_format); return -EINVAL; } memset(fsize->reserved, 0x0, sizeof(fsize->reserved)); fse.index = fsize->index; fse.pad = 0; fse.code = fmt->code; ret = v4l2_subdev_call(sd, pad, enum_frame_size, NULL, &fse); if (ret) return ret; vpfe_dbg(1, vpfe, "%s: index: %d code: %x W:[%d,%d] H:[%d,%d]\n", __func__, fse.index, fse.code, fse.min_width, fse.max_width, fse.min_height, fse.max_height); fsize->type = V4L2_FRMSIZE_TYPE_DISCRETE; fsize->discrete.width = fse.max_width; fsize->discrete.height = fse.max_height; vpfe_dbg(1, vpfe, "%s: index: %d pixformat: %s size: %dx%d\n", __func__, fsize->index, print_fourcc(fsize->pixel_format), fsize->discrete.width, fsize->discrete.height); return 0; } /* * vpfe_get_subdev_input_index - Get subdev index and subdev input index for a * given app input index */ static int vpfe_get_subdev_input_index(struct vpfe_device *vpfe, int *subdev_index, int *subdev_input_index, int app_input_index) { int i, j = 0; for (i = 0; i < ARRAY_SIZE(vpfe->cfg->asd); i++) { if (app_input_index < (j + 1)) { *subdev_index = i; *subdev_input_index = app_input_index - j; return 0; } j++; } return -EINVAL; } /* * vpfe_get_app_input - Get app input index for a given subdev input index * driver stores the input index of the current sub device and translate it * when application request the current input */ static int vpfe_get_app_input_index(struct vpfe_device *vpfe, int *app_input_index) { struct vpfe_config *cfg = vpfe->cfg; struct vpfe_subdev_info *sdinfo; struct i2c_client *client; struct i2c_client *curr_client; int i, j = 0; curr_client = v4l2_get_subdevdata(vpfe->current_subdev->sd); for (i = 0; i < ARRAY_SIZE(vpfe->cfg->asd); i++) { sdinfo = &cfg->sub_devs[i]; client = v4l2_get_subdevdata(sdinfo->sd); if (client->addr == curr_client->addr && client->adapter->nr == curr_client->adapter->nr) { if (vpfe->current_input >= 1) return -1; *app_input_index = j + vpfe->current_input; return 0; } j++; } return -EINVAL; } static int vpfe_enum_input(struct file *file, void *priv, struct v4l2_input *inp) { struct vpfe_device *vpfe = video_drvdata(file); struct vpfe_subdev_info *sdinfo; int subdev, index; if (vpfe_get_subdev_input_index(vpfe, &subdev, &index, inp->index) < 0) { vpfe_dbg(1, vpfe, "input information not found for the subdev\n"); return -EINVAL; } sdinfo = &vpfe->cfg->sub_devs[subdev]; *inp = sdinfo->inputs[index]; return 0; } static int vpfe_g_input(struct file *file, void *priv, unsigned int *index) { struct vpfe_device *vpfe = video_drvdata(file); return vpfe_get_app_input_index(vpfe, index); } /* Assumes caller is holding vpfe_dev->lock */ static int vpfe_set_input(struct vpfe_device *vpfe, unsigned int index) { int subdev_index = 0, inp_index = 0; struct vpfe_subdev_info *sdinfo; struct vpfe_route *route; u32 input, output; int ret; /* If streaming is started, return error */ if (vb2_is_busy(&vpfe->buffer_queue)) { vpfe_err(vpfe, "%s device busy\n", __func__); return -EBUSY; } ret = vpfe_get_subdev_input_index(vpfe, &subdev_index, &inp_index, index); if (ret < 0) { vpfe_err(vpfe, "invalid input index: %d\n", index); goto get_out; } sdinfo = &vpfe->cfg->sub_devs[subdev_index]; sdinfo->sd = vpfe->sd[subdev_index]; route = &sdinfo->routes[inp_index]; if (route && sdinfo->can_route) { input = route->input; output = route->output; if (sdinfo->sd) { ret = v4l2_subdev_call(sdinfo->sd, video, s_routing, input, output, 0); if (ret) { vpfe_err(vpfe, "s_routing failed\n"); ret = -EINVAL; goto get_out; } } } vpfe->current_subdev = sdinfo; if (sdinfo->sd) vpfe->v4l2_dev.ctrl_handler = sdinfo->sd->ctrl_handler; vpfe->current_input = index; vpfe->std_index = 0; /* set the bus/interface parameter for the sub device in ccdc */ ret = vpfe_ccdc_set_hw_if_params(&vpfe->ccdc, &sdinfo->vpfe_param); if (ret) return ret; /* set the default image parameters in the device */ return vpfe_config_image_format(vpfe, vpfe_standards[vpfe->std_index].std_id); get_out: return ret; } static int vpfe_s_input(struct file *file, void *priv, unsigned int index) { struct vpfe_device *vpfe = video_drvdata(file); return vpfe_set_input(vpfe, index); } static int vpfe_querystd(struct file *file, void *priv, v4l2_std_id *std_id) { struct vpfe_device *vpfe = video_drvdata(file); struct vpfe_subdev_info *sdinfo; sdinfo = vpfe->current_subdev; if (!(sdinfo->inputs[0].capabilities & V4L2_IN_CAP_STD)) return -ENODATA; /* Call querystd function of decoder device */ return v4l2_device_call_until_err(&vpfe->v4l2_dev, sdinfo->grp_id, video, querystd, std_id); } static int vpfe_s_std(struct file *file, void *priv, v4l2_std_id std_id) { struct vpfe_device *vpfe = video_drvdata(file); struct vpfe_subdev_info *sdinfo; int ret; sdinfo = vpfe->current_subdev; if (!(sdinfo->inputs[0].capabilities & V4L2_IN_CAP_STD)) return -ENODATA; /* if trying to set the same std then nothing to do */ if (vpfe_standards[vpfe->std_index].std_id == std_id) return 0; /* If streaming is started, return error */ if (vb2_is_busy(&vpfe->buffer_queue)) { vpfe_err(vpfe, "%s device busy\n", __func__); ret = -EBUSY; return ret; } ret = v4l2_device_call_until_err(&vpfe->v4l2_dev, sdinfo->grp_id, video, s_std, std_id); if (ret < 0) { vpfe_err(vpfe, "Failed to set standard\n"); return ret; } ret = vpfe_config_image_format(vpfe, std_id); return ret; } static int vpfe_g_std(struct file *file, void *priv, v4l2_std_id *std_id) { struct vpfe_device *vpfe = video_drvdata(file); struct vpfe_subdev_info *sdinfo; sdinfo = vpfe->current_subdev; if (sdinfo->inputs[0].capabilities != V4L2_IN_CAP_STD) return -ENODATA; *std_id = vpfe_standards[vpfe->std_index].std_id; return 0; } /* * vpfe_calculate_offsets : This function calculates buffers offset * for top and bottom field */ static void vpfe_calculate_offsets(struct vpfe_device *vpfe) { struct v4l2_rect image_win; vpfe_ccdc_get_image_window(&vpfe->ccdc, &image_win); vpfe->field_off = image_win.height * image_win.width; } /* * vpfe_queue_setup - Callback function for buffer setup. * @vq: vb2_queue ptr * @nbuffers: ptr to number of buffers requested by application * @nplanes:: contains number of distinct video planes needed to hold a frame * @sizes[]: contains the size (in bytes) of each plane. * @alloc_devs: ptr to allocation context * * This callback function is called when reqbuf() is called to adjust * the buffer count and buffer size */ static int vpfe_queue_setup(struct vb2_queue *vq, unsigned int *nbuffers, unsigned int *nplanes, unsigned int sizes[], struct device *alloc_devs[]) { struct vpfe_device *vpfe = vb2_get_drv_priv(vq); unsigned size = vpfe->fmt.fmt.pix.sizeimage; if (vq->num_buffers + *nbuffers < 3) *nbuffers = 3 - vq->num_buffers; if (*nplanes) { if (sizes[0] < size) return -EINVAL; size = sizes[0]; } *nplanes = 1; sizes[0] = size; vpfe_dbg(1, vpfe, "nbuffers=%d, size=%u\n", *nbuffers, sizes[0]); /* Calculate field offset */ vpfe_calculate_offsets(vpfe); return 0; } /* * vpfe_buffer_prepare : callback function for buffer prepare * @vb: ptr to vb2_buffer * * This is the callback function for buffer prepare when vb2_qbuf() * function is called. The buffer is prepared and user space virtual address * or user address is converted into physical address */ static int vpfe_buffer_prepare(struct vb2_buffer *vb) { struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb); struct vpfe_device *vpfe = vb2_get_drv_priv(vb->vb2_queue); vb2_set_plane_payload(vb, 0, vpfe->fmt.fmt.pix.sizeimage); if (vb2_get_plane_payload(vb, 0) > vb2_plane_size(vb, 0)) return -EINVAL; vbuf->field = vpfe->fmt.fmt.pix.field; return 0; } /* * vpfe_buffer_queue : Callback function to add buffer to DMA queue * @vb: ptr to vb2_buffer */ static void vpfe_buffer_queue(struct vb2_buffer *vb) { struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb); struct vpfe_device *vpfe = vb2_get_drv_priv(vb->vb2_queue); struct vpfe_cap_buffer *buf = to_vpfe_buffer(vbuf); unsigned long flags = 0; /* add the buffer to the DMA queue */ spin_lock_irqsave(&vpfe->dma_queue_lock, flags); list_add_tail(&buf->list, &vpfe->dma_queue); spin_unlock_irqrestore(&vpfe->dma_queue_lock, flags); } static void vpfe_return_all_buffers(struct vpfe_device *vpfe, enum vb2_buffer_state state) { struct vpfe_cap_buffer *buf, *node; unsigned long flags; spin_lock_irqsave(&vpfe->dma_queue_lock, flags); list_for_each_entry_safe(buf, node, &vpfe->dma_queue, list) { vb2_buffer_done(&buf->vb.vb2_buf, state); list_del(&buf->list); } if (vpfe->cur_frm) vb2_buffer_done(&vpfe->cur_frm->vb.vb2_buf, state); if (vpfe->next_frm && vpfe->next_frm != vpfe->cur_frm) vb2_buffer_done(&vpfe->next_frm->vb.vb2_buf, state); vpfe->cur_frm = NULL; vpfe->next_frm = NULL; spin_unlock_irqrestore(&vpfe->dma_queue_lock, flags); } /* * vpfe_start_streaming : Starts the DMA engine for streaming * @vb: ptr to vb2_buffer * @count: number of buffers */ static int vpfe_start_streaming(struct vb2_queue *vq, unsigned int count) { struct vpfe_device *vpfe = vb2_get_drv_priv(vq); struct vpfe_subdev_info *sdinfo; unsigned long flags; unsigned long addr; int ret; spin_lock_irqsave(&vpfe->dma_queue_lock, flags); vpfe->field = 0; vpfe->sequence = 0; sdinfo = vpfe->current_subdev; vpfe_attach_irq(vpfe); vpfe->stopping = false; init_completion(&vpfe->capture_stop); if (vpfe->ccdc.ccdc_cfg.if_type == VPFE_RAW_BAYER) vpfe_ccdc_config_raw(&vpfe->ccdc); else vpfe_ccdc_config_ycbcr(&vpfe->ccdc); /* Get the next frame from the buffer queue */ vpfe->next_frm = list_entry(vpfe->dma_queue.next, struct vpfe_cap_buffer, list); vpfe->cur_frm = vpfe->next_frm; /* Remove buffer from the buffer queue */ list_del(&vpfe->cur_frm->list); spin_unlock_irqrestore(&vpfe->dma_queue_lock, flags); addr = vb2_dma_contig_plane_dma_addr(&vpfe->cur_frm->vb.vb2_buf, 0); vpfe_set_sdr_addr(&vpfe->ccdc, (unsigned long)(addr)); vpfe_pcr_enable(&vpfe->ccdc, 1); ret = v4l2_subdev_call(sdinfo->sd, video, s_stream, 1); if (ret < 0) { vpfe_err(vpfe, "Error in attaching interrupt handle\n"); goto err; } return 0; err: vpfe_return_all_buffers(vpfe, VB2_BUF_STATE_QUEUED); vpfe_pcr_enable(&vpfe->ccdc, 0); return ret; } /* * vpfe_stop_streaming : Stop the DMA engine * @vq: ptr to vb2_queue * * This callback stops the DMA engine and any remaining buffers * in the DMA queue are released. */ static void vpfe_stop_streaming(struct vb2_queue *vq) { struct vpfe_device *vpfe = vb2_get_drv_priv(vq); struct vpfe_subdev_info *sdinfo; int ret; vpfe_pcr_enable(&vpfe->ccdc, 0); /* Wait for the last frame to be captured */ vpfe->stopping = true; wait_for_completion_timeout(&vpfe->capture_stop, msecs_to_jiffies(250)); vpfe_detach_irq(vpfe); sdinfo = vpfe->current_subdev; ret = v4l2_subdev_call(sdinfo->sd, video, s_stream, 0); if (ret && ret != -ENOIOCTLCMD && ret != -ENODEV) vpfe_dbg(1, vpfe, "stream off failed in subdev\n"); /* release all active buffers */ vpfe_return_all_buffers(vpfe, VB2_BUF_STATE_ERROR); } static int vpfe_g_pixelaspect(struct file *file, void *priv, int type, struct v4l2_fract *f) { struct vpfe_device *vpfe = video_drvdata(file); if (type != V4L2_BUF_TYPE_VIDEO_CAPTURE || vpfe->std_index >= ARRAY_SIZE(vpfe_standards)) return -EINVAL; *f = vpfe_standards[vpfe->std_index].pixelaspect; return 0; } static int vpfe_g_selection(struct file *file, void *fh, struct v4l2_selection *s) { struct vpfe_device *vpfe = video_drvdata(file); if (s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE || vpfe->std_index >= ARRAY_SIZE(vpfe_standards)) return -EINVAL; switch (s->target) { case V4L2_SEL_TGT_CROP_BOUNDS: case V4L2_SEL_TGT_CROP_DEFAULT: s->r.left = 0; s->r.top = 0; s->r.width = vpfe_standards[vpfe->std_index].width; s->r.height = vpfe_standards[vpfe->std_index].height; break; case V4L2_SEL_TGT_CROP: s->r = vpfe->crop; break; default: return -EINVAL; } return 0; } static int vpfe_s_selection(struct file *file, void *fh, struct v4l2_selection *s) { struct vpfe_device *vpfe = video_drvdata(file); struct v4l2_rect cr = vpfe->crop; struct v4l2_rect r = s->r; u32 bpp; /* If streaming is started, return error */ if (vb2_is_busy(&vpfe->buffer_queue)) { vpfe_err(vpfe, "%s device busy\n", __func__); return -EBUSY; } if (s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE || s->target != V4L2_SEL_TGT_CROP) return -EINVAL; v4l_bound_align_image(&r.width, 0, cr.width, 0, &r.height, 0, cr.height, 0, 0); r.left = clamp_t(unsigned int, r.left, 0, cr.width - r.width); r.top = clamp_t(unsigned int, r.top, 0, cr.height - r.height); if (s->flags & V4L2_SEL_FLAG_LE && !v4l2_rect_enclosed(&r, &s->r)) return -ERANGE; if (s->flags & V4L2_SEL_FLAG_GE && !v4l2_rect_enclosed(&s->r, &r)) return -ERANGE; s->r = vpfe->crop = r; bpp = __get_bytesperpixel(vpfe, vpfe->current_vpfe_fmt); vpfe_ccdc_set_image_window(&vpfe->ccdc, &r, bpp); vpfe->fmt.fmt.pix.width = r.width; vpfe->fmt.fmt.pix.height = r.height; vpfe->fmt.fmt.pix.bytesperline = vpfe_ccdc_get_line_length(&vpfe->ccdc); vpfe->fmt.fmt.pix.sizeimage = vpfe->fmt.fmt.pix.bytesperline * vpfe->fmt.fmt.pix.height; vpfe_dbg(1, vpfe, "cropped (%d,%d)/%dx%d of %dx%d\n", r.left, r.top, r.width, r.height, cr.width, cr.height); return 0; } static long vpfe_ioctl_default(struct file *file, void *priv, bool valid_prio, unsigned int cmd, void *param) { struct vpfe_device *vpfe = video_drvdata(file); int ret; if (!valid_prio) { vpfe_err(vpfe, "%s device busy\n", __func__); return -EBUSY; } /* If streaming is started, return error */ if (vb2_is_busy(&vpfe->buffer_queue)) { vpfe_err(vpfe, "%s device busy\n", __func__); return -EBUSY; } switch (cmd) { case VIDIOC_AM437X_CCDC_CFG: ret = vpfe_ccdc_set_params(&vpfe->ccdc, (void __user *)param); if (ret) { vpfe_dbg(2, vpfe, "Error setting parameters in CCDC\n"); return ret; } ret = vpfe_get_ccdc_image_format(vpfe, &vpfe->fmt); if (ret < 0) { vpfe_dbg(2, vpfe, "Invalid image format at CCDC\n"); return ret; } break; default: ret = -ENOTTY; break; } return ret; } static const struct vb2_ops vpfe_video_qops = { .wait_prepare = vb2_ops_wait_prepare, .wait_finish = vb2_ops_wait_finish, .queue_setup = vpfe_queue_setup, .buf_prepare = vpfe_buffer_prepare, .buf_queue = vpfe_buffer_queue, .start_streaming = vpfe_start_streaming, .stop_streaming = vpfe_stop_streaming, }; /* vpfe capture driver file operations */ static const struct v4l2_file_operations vpfe_fops = { .owner = THIS_MODULE, .open = vpfe_open, .release = vpfe_release, .read = vb2_fop_read, .poll = vb2_fop_poll, .unlocked_ioctl = video_ioctl2, .mmap = vb2_fop_mmap, }; /* vpfe capture ioctl operations */ static const struct v4l2_ioctl_ops vpfe_ioctl_ops = { .vidioc_querycap = vpfe_querycap, .vidioc_enum_fmt_vid_cap = vpfe_enum_fmt, .vidioc_g_fmt_vid_cap = vpfe_g_fmt, .vidioc_s_fmt_vid_cap = vpfe_s_fmt, .vidioc_try_fmt_vid_cap = vpfe_try_fmt, .vidioc_enum_framesizes = vpfe_enum_size, .vidioc_enum_input = vpfe_enum_input, .vidioc_g_input = vpfe_g_input, .vidioc_s_input = vpfe_s_input, .vidioc_querystd = vpfe_querystd, .vidioc_s_std = vpfe_s_std, .vidioc_g_std = vpfe_g_std, .vidioc_reqbufs = vb2_ioctl_reqbufs, .vidioc_create_bufs = vb2_ioctl_create_bufs, .vidioc_prepare_buf = vb2_ioctl_prepare_buf, .vidioc_querybuf = vb2_ioctl_querybuf, .vidioc_qbuf = vb2_ioctl_qbuf, .vidioc_dqbuf = vb2_ioctl_dqbuf, .vidioc_expbuf = vb2_ioctl_expbuf, .vidioc_streamon = vb2_ioctl_streamon, .vidioc_streamoff = vb2_ioctl_streamoff, .vidioc_log_status = v4l2_ctrl_log_status, .vidioc_subscribe_event = v4l2_ctrl_subscribe_event, .vidioc_unsubscribe_event = v4l2_event_unsubscribe, .vidioc_g_pixelaspect = vpfe_g_pixelaspect, .vidioc_g_selection = vpfe_g_selection, .vidioc_s_selection = vpfe_s_selection, .vidioc_default = vpfe_ioctl_default, }; static int vpfe_async_bound(struct v4l2_async_notifier *notifier, struct v4l2_subdev *subdev, struct v4l2_async_connection *asd) { struct vpfe_device *vpfe = container_of(notifier->v4l2_dev, struct vpfe_device, v4l2_dev); struct vpfe_subdev_info *sdinfo; struct vpfe_fmt *fmt; int ret = 0; bool found = false; int i, j, k; for (i = 0; i < ARRAY_SIZE(vpfe->cfg->asd); i++) { if (vpfe->cfg->asd[i]->match.fwnode == asd[i].match.fwnode) { sdinfo = &vpfe->cfg->sub_devs[i]; vpfe->sd[i] = subdev; vpfe->sd[i]->grp_id = sdinfo->grp_id; found = true; break; } } if (!found) { vpfe_info(vpfe, "sub device (%s) not matched\n", subdev->name); return -EINVAL; } vpfe->video_dev.tvnorms |= sdinfo->inputs[0].std; vpfe->num_active_fmt = 0; for (j = 0, i = 0; (ret != -EINVAL); ++j) { struct v4l2_subdev_mbus_code_enum mbus_code = { .index = j, .which = V4L2_SUBDEV_FORMAT_ACTIVE, }; ret = v4l2_subdev_call(subdev, pad, enum_mbus_code, NULL, &mbus_code); if (ret) continue; vpfe_dbg(3, vpfe, "subdev %s: code: %04x idx: %d\n", subdev->name, mbus_code.code, j); for (k = 0; k < ARRAY_SIZE(formats); k++) { fmt = &formats[k]; if (mbus_code.code != fmt->code) continue; vpfe->active_fmt[i] = fmt; vpfe_dbg(3, vpfe, "matched fourcc: %s code: %04x idx: %d\n", print_fourcc(fmt->fourcc), mbus_code.code, i); vpfe->num_active_fmt = ++i; } } if (!i) { vpfe_err(vpfe, "No suitable format reported by subdev %s\n", subdev->name); return -EINVAL; } return 0; } static int vpfe_probe_complete(struct vpfe_device *vpfe) { struct video_device *vdev; struct vb2_queue *q; int err; spin_lock_init(&vpfe->dma_queue_lock); mutex_init(&vpfe->lock); vpfe->fmt.type = V4L2_BUF_TYPE_VIDEO_CAPTURE; /* set first sub device as current one */ vpfe->current_subdev = &vpfe->cfg->sub_devs[0]; vpfe->v4l2_dev.ctrl_handler = vpfe->sd[0]->ctrl_handler; err = vpfe_set_input(vpfe, 0); if (err) goto probe_out; /* Initialize videobuf2 queue as per the buffer type */ q = &vpfe->buffer_queue; q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE; q->io_modes = VB2_MMAP | VB2_DMABUF | VB2_READ; q->drv_priv = vpfe; q->ops = &vpfe_video_qops; q->mem_ops = &vb2_dma_contig_memops; q->buf_struct_size = sizeof(struct vpfe_cap_buffer); q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC; q->lock = &vpfe->lock; q->min_buffers_needed = 1; q->dev = vpfe->pdev; err = vb2_queue_init(q); if (err) { vpfe_err(vpfe, "vb2_queue_init() failed\n"); goto probe_out; } INIT_LIST_HEAD(&vpfe->dma_queue); vdev = &vpfe->video_dev; strscpy(vdev->name, VPFE_MODULE_NAME, sizeof(vdev->name)); vdev->release = video_device_release_empty; vdev->fops = &vpfe_fops; vdev->ioctl_ops = &vpfe_ioctl_ops; vdev->v4l2_dev = &vpfe->v4l2_dev; vdev->vfl_dir = VFL_DIR_RX; vdev->queue = q; vdev->lock = &vpfe->lock; vdev->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING | V4L2_CAP_READWRITE; video_set_drvdata(vdev, vpfe); err = video_register_device(&vpfe->video_dev, VFL_TYPE_VIDEO, -1); if (err) { vpfe_err(vpfe, "Unable to register video device.\n"); goto probe_out; } return 0; probe_out: v4l2_device_unregister(&vpfe->v4l2_dev); return err; } static int vpfe_async_complete(struct v4l2_async_notifier *notifier) { struct vpfe_device *vpfe = container_of(notifier->v4l2_dev, struct vpfe_device, v4l2_dev); return vpfe_probe_complete(vpfe); } static const struct v4l2_async_notifier_operations vpfe_async_ops = { .bound = vpfe_async_bound, .complete = vpfe_async_complete, }; static struct vpfe_config * vpfe_get_pdata(struct vpfe_device *vpfe) { struct device_node *endpoint = NULL; struct device *dev = vpfe->pdev; struct vpfe_subdev_info *sdinfo; struct vpfe_config *pdata; unsigned int flags; unsigned int i; int err; dev_dbg(dev, "vpfe_get_pdata\n"); v4l2_async_nf_init(&vpfe->notifier, &vpfe->v4l2_dev); if (!IS_ENABLED(CONFIG_OF) || !dev->of_node) return dev->platform_data; pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL); if (!pdata) return NULL; for (i = 0; ; i++) { struct v4l2_fwnode_endpoint bus_cfg = { .bus_type = 0 }; struct device_node *rem; endpoint = of_graph_get_next_endpoint(dev->of_node, endpoint); if (!endpoint) break; sdinfo = &pdata->sub_devs[i]; sdinfo->grp_id = 0; /* we only support camera */ sdinfo->inputs[0].index = i; strscpy(sdinfo->inputs[0].name, "Camera", sizeof(sdinfo->inputs[0].name)); sdinfo->inputs[0].type = V4L2_INPUT_TYPE_CAMERA; sdinfo->inputs[0].std = V4L2_STD_ALL; sdinfo->inputs[0].capabilities = V4L2_IN_CAP_STD; sdinfo->can_route = 0; sdinfo->routes = NULL; of_property_read_u32(endpoint, "ti,am437x-vpfe-interface", &sdinfo->vpfe_param.if_type); if (sdinfo->vpfe_param.if_type < 0 || sdinfo->vpfe_param.if_type > 4) { sdinfo->vpfe_param.if_type = VPFE_RAW_BAYER; } err = v4l2_fwnode_endpoint_parse(of_fwnode_handle(endpoint), &bus_cfg); if (err) { dev_err(dev, "Could not parse the endpoint\n"); goto cleanup; } sdinfo->vpfe_param.bus_width = bus_cfg.bus.parallel.bus_width; if (sdinfo->vpfe_param.bus_width < 8 || sdinfo->vpfe_param.bus_width > 16) { dev_err(dev, "Invalid bus width.\n"); goto cleanup; } flags = bus_cfg.bus.parallel.flags; if (flags & V4L2_MBUS_HSYNC_ACTIVE_HIGH) sdinfo->vpfe_param.hdpol = 1; if (flags & V4L2_MBUS_VSYNC_ACTIVE_HIGH) sdinfo->vpfe_param.vdpol = 1; rem = of_graph_get_remote_port_parent(endpoint); if (!rem) { dev_err(dev, "Remote device at %pOF not found\n", endpoint); goto cleanup; } pdata->asd[i] = v4l2_async_nf_add_fwnode(&vpfe->notifier, of_fwnode_handle(rem), struct v4l2_async_connection); of_node_put(rem); if (IS_ERR(pdata->asd[i])) goto cleanup; } of_node_put(endpoint); return pdata; cleanup: v4l2_async_nf_cleanup(&vpfe->notifier); of_node_put(endpoint); return NULL; } /* * vpfe_probe : This function creates device entries by register * itself to the V4L2 driver and initializes fields of each * device objects */ static int vpfe_probe(struct platform_device *pdev) { struct vpfe_config *vpfe_cfg; struct vpfe_device *vpfe; struct vpfe_ccdc *ccdc; int ret; vpfe = devm_kzalloc(&pdev->dev, sizeof(*vpfe), GFP_KERNEL); if (!vpfe) return -ENOMEM; vpfe->pdev = &pdev->dev; ret = v4l2_device_register(&pdev->dev, &vpfe->v4l2_dev); if (ret) { vpfe_err(vpfe, "Unable to register v4l2 device.\n"); return ret; } vpfe_cfg = vpfe_get_pdata(vpfe); if (!vpfe_cfg) { dev_err(&pdev->dev, "No platform data\n"); ret = -EINVAL; goto probe_out_cleanup; } vpfe->cfg = vpfe_cfg; ccdc = &vpfe->ccdc; ccdc->ccdc_cfg.base_addr = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(ccdc->ccdc_cfg.base_addr)) { ret = PTR_ERR(ccdc->ccdc_cfg.base_addr); goto probe_out_cleanup; } ret = platform_get_irq(pdev, 0); if (ret < 0) goto probe_out_cleanup; vpfe->irq = ret; ret = devm_request_irq(vpfe->pdev, vpfe->irq, vpfe_isr, 0, "vpfe_capture0", vpfe); if (ret) { dev_err(&pdev->dev, "Unable to request interrupt\n"); ret = -EINVAL; goto probe_out_cleanup; } /* set the driver data in platform device */ platform_set_drvdata(pdev, vpfe); /* Enabling module functional clock */ pm_runtime_enable(&pdev->dev); /* for now just enable it here instead of waiting for the open */ ret = pm_runtime_resume_and_get(&pdev->dev); if (ret < 0) { vpfe_err(vpfe, "Unable to resume device.\n"); goto probe_out_cleanup; } vpfe_ccdc_config_defaults(ccdc); pm_runtime_put_sync(&pdev->dev); vpfe->sd = devm_kcalloc(&pdev->dev, ARRAY_SIZE(vpfe->cfg->asd), sizeof(struct v4l2_subdev *), GFP_KERNEL); if (!vpfe->sd) { ret = -ENOMEM; goto probe_out_cleanup; } vpfe->notifier.ops = &vpfe_async_ops; ret = v4l2_async_nf_register(&vpfe->notifier); if (ret) { vpfe_err(vpfe, "Error registering async notifier\n"); ret = -EINVAL; goto probe_out_cleanup; } return 0; probe_out_cleanup: v4l2_async_nf_cleanup(&vpfe->notifier); v4l2_device_unregister(&vpfe->v4l2_dev); return ret; } /* * vpfe_remove : It un-register device from V4L2 driver */ static void vpfe_remove(struct platform_device *pdev) { struct vpfe_device *vpfe = platform_get_drvdata(pdev); pm_runtime_disable(&pdev->dev); v4l2_async_nf_unregister(&vpfe->notifier); v4l2_async_nf_cleanup(&vpfe->notifier); video_unregister_device(&vpfe->video_dev); v4l2_device_unregister(&vpfe->v4l2_dev); } #ifdef CONFIG_PM_SLEEP static void vpfe_save_context(struct vpfe_ccdc *ccdc) { ccdc->ccdc_ctx[VPFE_PCR >> 2] = vpfe_reg_read(ccdc, VPFE_PCR); ccdc->ccdc_ctx[VPFE_SYNMODE >> 2] = vpfe_reg_read(ccdc, VPFE_SYNMODE); ccdc->ccdc_ctx[VPFE_SDOFST >> 2] = vpfe_reg_read(ccdc, VPFE_SDOFST); ccdc->ccdc_ctx[VPFE_SDR_ADDR >> 2] = vpfe_reg_read(ccdc, VPFE_SDR_ADDR); ccdc->ccdc_ctx[VPFE_CLAMP >> 2] = vpfe_reg_read(ccdc, VPFE_CLAMP); ccdc->ccdc_ctx[VPFE_DCSUB >> 2] = vpfe_reg_read(ccdc, VPFE_DCSUB); ccdc->ccdc_ctx[VPFE_COLPTN >> 2] = vpfe_reg_read(ccdc, VPFE_COLPTN); ccdc->ccdc_ctx[VPFE_BLKCMP >> 2] = vpfe_reg_read(ccdc, VPFE_BLKCMP); ccdc->ccdc_ctx[VPFE_VDINT >> 2] = vpfe_reg_read(ccdc, VPFE_VDINT); ccdc->ccdc_ctx[VPFE_ALAW >> 2] = vpfe_reg_read(ccdc, VPFE_ALAW); ccdc->ccdc_ctx[VPFE_REC656IF >> 2] = vpfe_reg_read(ccdc, VPFE_REC656IF); ccdc->ccdc_ctx[VPFE_CCDCFG >> 2] = vpfe_reg_read(ccdc, VPFE_CCDCFG); ccdc->ccdc_ctx[VPFE_CULLING >> 2] = vpfe_reg_read(ccdc, VPFE_CULLING); ccdc->ccdc_ctx[VPFE_HD_VD_WID >> 2] = vpfe_reg_read(ccdc, VPFE_HD_VD_WID); ccdc->ccdc_ctx[VPFE_PIX_LINES >> 2] = vpfe_reg_read(ccdc, VPFE_PIX_LINES); ccdc->ccdc_ctx[VPFE_HORZ_INFO >> 2] = vpfe_reg_read(ccdc, VPFE_HORZ_INFO); ccdc->ccdc_ctx[VPFE_VERT_START >> 2] = vpfe_reg_read(ccdc, VPFE_VERT_START); ccdc->ccdc_ctx[VPFE_VERT_LINES >> 2] = vpfe_reg_read(ccdc, VPFE_VERT_LINES); ccdc->ccdc_ctx[VPFE_HSIZE_OFF >> 2] = vpfe_reg_read(ccdc, VPFE_HSIZE_OFF); } static int vpfe_suspend(struct device *dev) { struct vpfe_device *vpfe = dev_get_drvdata(dev); struct vpfe_ccdc *ccdc = &vpfe->ccdc; /* only do full suspend if streaming has started */ if (vb2_start_streaming_called(&vpfe->buffer_queue)) { /* * ignore RPM resume errors here, as it is already too late. * A check like that should happen earlier, either at * open() or just before start streaming. */ pm_runtime_get_sync(dev); vpfe_config_enable(ccdc, 1); /* Save VPFE context */ vpfe_save_context(ccdc); /* Disable CCDC */ vpfe_pcr_enable(ccdc, 0); vpfe_config_enable(ccdc, 0); /* Disable both master and slave clock */ pm_runtime_put_sync(dev); } /* Select sleep pin state */ pinctrl_pm_select_sleep_state(dev); return 0; } static void vpfe_restore_context(struct vpfe_ccdc *ccdc) { vpfe_reg_write(ccdc, ccdc->ccdc_ctx[VPFE_SYNMODE >> 2], VPFE_SYNMODE); vpfe_reg_write(ccdc, ccdc->ccdc_ctx[VPFE_CULLING >> 2], VPFE_CULLING); vpfe_reg_write(ccdc, ccdc->ccdc_ctx[VPFE_SDOFST >> 2], VPFE_SDOFST); vpfe_reg_write(ccdc, ccdc->ccdc_ctx[VPFE_SDR_ADDR >> 2], VPFE_SDR_ADDR); vpfe_reg_write(ccdc, ccdc->ccdc_ctx[VPFE_CLAMP >> 2], VPFE_CLAMP); vpfe_reg_write(ccdc, ccdc->ccdc_ctx[VPFE_DCSUB >> 2], VPFE_DCSUB); vpfe_reg_write(ccdc, ccdc->ccdc_ctx[VPFE_COLPTN >> 2], VPFE_COLPTN); vpfe_reg_write(ccdc, ccdc->ccdc_ctx[VPFE_BLKCMP >> 2], VPFE_BLKCMP); vpfe_reg_write(ccdc, ccdc->ccdc_ctx[VPFE_VDINT >> 2], VPFE_VDINT); vpfe_reg_write(ccdc, ccdc->ccdc_ctx[VPFE_ALAW >> 2], VPFE_ALAW); vpfe_reg_write(ccdc, ccdc->ccdc_ctx[VPFE_REC656IF >> 2], VPFE_REC656IF); vpfe_reg_write(ccdc, ccdc->ccdc_ctx[VPFE_CCDCFG >> 2], VPFE_CCDCFG); vpfe_reg_write(ccdc, ccdc->ccdc_ctx[VPFE_PCR >> 2], VPFE_PCR); vpfe_reg_write(ccdc, ccdc->ccdc_ctx[VPFE_HD_VD_WID >> 2], VPFE_HD_VD_WID); vpfe_reg_write(ccdc, ccdc->ccdc_ctx[VPFE_PIX_LINES >> 2], VPFE_PIX_LINES); vpfe_reg_write(ccdc, ccdc->ccdc_ctx[VPFE_HORZ_INFO >> 2], VPFE_HORZ_INFO); vpfe_reg_write(ccdc, ccdc->ccdc_ctx[VPFE_VERT_START >> 2], VPFE_VERT_START); vpfe_reg_write(ccdc, ccdc->ccdc_ctx[VPFE_VERT_LINES >> 2], VPFE_VERT_LINES); vpfe_reg_write(ccdc, ccdc->ccdc_ctx[VPFE_HSIZE_OFF >> 2], VPFE_HSIZE_OFF); } static int vpfe_resume(struct device *dev) { struct vpfe_device *vpfe = dev_get_drvdata(dev); struct vpfe_ccdc *ccdc = &vpfe->ccdc; /* only do full resume if streaming has started */ if (vb2_start_streaming_called(&vpfe->buffer_queue)) { /* Enable both master and slave clock */ pm_runtime_get_sync(dev); vpfe_config_enable(ccdc, 1); /* Restore VPFE context */ vpfe_restore_context(ccdc); vpfe_config_enable(ccdc, 0); pm_runtime_put_sync(dev); } /* Select default pin state */ pinctrl_pm_select_default_state(dev); return 0; } #endif static SIMPLE_DEV_PM_OPS(vpfe_pm_ops, vpfe_suspend, vpfe_resume); static const struct of_device_id vpfe_of_match[] = { { .compatible = "ti,am437x-vpfe", }, { /* sentinel */ }, }; MODULE_DEVICE_TABLE(of, vpfe_of_match); static struct platform_driver vpfe_driver = { .probe = vpfe_probe, .remove_new = vpfe_remove, .driver = { .name = VPFE_MODULE_NAME, .pm = &vpfe_pm_ops, .of_match_table = vpfe_of_match, }, }; module_platform_driver(vpfe_driver); MODULE_AUTHOR("Texas Instruments"); MODULE_DESCRIPTION("TI AM437x VPFE driver"); MODULE_LICENSE("GPL"); MODULE_VERSION(VPFE_VERSION);
linux-master
drivers/media/platform/ti/am437x/am437x-vpfe.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright (C) 2009 Texas Instruments Inc * Copyright (C) 2014 Lad, Prabhakar <[email protected]> * * TODO : add support for VBI & HBI data service * add static buffer allocation */ #include <linux/module.h> #include <linux/interrupt.h> #include <linux/of_graph.h> #include <linux/platform_device.h> #include <linux/slab.h> #include <media/v4l2-fwnode.h> #include <media/v4l2-ioctl.h> #include <media/i2c/tvp514x.h> #include <media/v4l2-mediabus.h> #include <linux/videodev2.h> #include "vpif.h" #include "vpif_capture.h" MODULE_DESCRIPTION("TI DaVinci VPIF Capture driver"); MODULE_LICENSE("GPL"); MODULE_VERSION(VPIF_CAPTURE_VERSION); #define vpif_err(fmt, arg...) v4l2_err(&vpif_obj.v4l2_dev, fmt, ## arg) #define vpif_dbg(level, debug, fmt, arg...) \ v4l2_dbg(level, debug, &vpif_obj.v4l2_dev, fmt, ## arg) static int debug = 1; module_param(debug, int, 0644); MODULE_PARM_DESC(debug, "Debug level 0-1"); #define VPIF_DRIVER_NAME "vpif_capture" MODULE_ALIAS("platform:" VPIF_DRIVER_NAME); /* global variables */ static struct vpif_device vpif_obj = { {NULL} }; static struct device *vpif_dev; static void vpif_calculate_offsets(struct channel_obj *ch); static void vpif_config_addr(struct channel_obj *ch, int muxmode); static u8 channel_first_int[VPIF_NUMBER_OF_OBJECTS][2] = { {1, 1} }; /* Is set to 1 in case of SDTV formats, 2 in case of HDTV formats. */ static int ycmux_mode; static inline struct vpif_cap_buffer *to_vpif_buffer(struct vb2_v4l2_buffer *vb) { return container_of(vb, struct vpif_cap_buffer, vb); } /** * vpif_buffer_prepare : callback function for buffer prepare * @vb: ptr to vb2_buffer * * This is the callback function for buffer prepare when vb2_qbuf() * function is called. The buffer is prepared and user space virtual address * or user address is converted into physical address */ static int vpif_buffer_prepare(struct vb2_buffer *vb) { struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb); struct vb2_queue *q = vb->vb2_queue; struct channel_obj *ch = vb2_get_drv_priv(q); struct common_obj *common; unsigned long addr; vpif_dbg(2, debug, "vpif_buffer_prepare\n"); common = &ch->common[VPIF_VIDEO_INDEX]; vb2_set_plane_payload(vb, 0, common->fmt.fmt.pix.sizeimage); if (vb2_get_plane_payload(vb, 0) > vb2_plane_size(vb, 0)) return -EINVAL; vbuf->field = common->fmt.fmt.pix.field; addr = vb2_dma_contig_plane_dma_addr(vb, 0); if (!IS_ALIGNED((addr + common->ytop_off), 8) || !IS_ALIGNED((addr + common->ybtm_off), 8) || !IS_ALIGNED((addr + common->ctop_off), 8) || !IS_ALIGNED((addr + common->cbtm_off), 8)) { vpif_dbg(1, debug, "offset is not aligned\n"); return -EINVAL; } return 0; } /** * vpif_buffer_queue_setup : Callback function for buffer setup. * @vq: vb2_queue ptr * @nbuffers: ptr to number of buffers requested by application * @nplanes: contains number of distinct video planes needed to hold a frame * @sizes: contains the size (in bytes) of each plane. * @alloc_devs: ptr to allocation context * * This callback function is called when reqbuf() is called to adjust * the buffer count and buffer size */ static int vpif_buffer_queue_setup(struct vb2_queue *vq, unsigned int *nbuffers, unsigned int *nplanes, unsigned int sizes[], struct device *alloc_devs[]) { struct channel_obj *ch = vb2_get_drv_priv(vq); struct common_obj *common = &ch->common[VPIF_VIDEO_INDEX]; unsigned size = common->fmt.fmt.pix.sizeimage; vpif_dbg(2, debug, "vpif_buffer_setup\n"); if (*nplanes) { if (sizes[0] < size) return -EINVAL; size = sizes[0]; } if (vq->num_buffers + *nbuffers < 3) *nbuffers = 3 - vq->num_buffers; *nplanes = 1; sizes[0] = size; /* Calculate the offset for Y and C data in the buffer */ vpif_calculate_offsets(ch); return 0; } /** * vpif_buffer_queue : Callback function to add buffer to DMA queue * @vb: ptr to vb2_buffer */ static void vpif_buffer_queue(struct vb2_buffer *vb) { struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb); struct channel_obj *ch = vb2_get_drv_priv(vb->vb2_queue); struct vpif_cap_buffer *buf = to_vpif_buffer(vbuf); struct common_obj *common; unsigned long flags; common = &ch->common[VPIF_VIDEO_INDEX]; vpif_dbg(2, debug, "vpif_buffer_queue\n"); spin_lock_irqsave(&common->irqlock, flags); /* add the buffer to the DMA queue */ list_add_tail(&buf->list, &common->dma_queue); spin_unlock_irqrestore(&common->irqlock, flags); } /** * vpif_start_streaming : Starts the DMA engine for streaming * @vq: ptr to vb2_buffer * @count: number of buffers */ static int vpif_start_streaming(struct vb2_queue *vq, unsigned int count) { struct vpif_capture_config *vpif_config_data = vpif_dev->platform_data; struct channel_obj *ch = vb2_get_drv_priv(vq); struct common_obj *common = &ch->common[VPIF_VIDEO_INDEX]; struct vpif_params *vpif = &ch->vpifparams; struct vpif_cap_buffer *buf, *tmp; unsigned long addr, flags; int ret; /* Initialize field_id */ ch->field_id = 0; /* configure 1 or 2 channel mode */ if (vpif_config_data->setup_input_channel_mode) { ret = vpif_config_data-> setup_input_channel_mode(vpif->std_info.ycmux_mode); if (ret < 0) { vpif_dbg(1, debug, "can't set vpif channel mode\n"); goto err; } } ret = v4l2_subdev_call(ch->sd, video, s_stream, 1); if (ret && ret != -ENOIOCTLCMD && ret != -ENODEV) { vpif_dbg(1, debug, "stream on failed in subdev\n"); goto err; } /* Call vpif_set_params function to set the parameters and addresses */ ret = vpif_set_video_params(vpif, ch->channel_id); if (ret < 0) { vpif_dbg(1, debug, "can't set video params\n"); goto err; } ycmux_mode = ret; vpif_config_addr(ch, ret); /* Get the next frame from the buffer queue */ spin_lock_irqsave(&common->irqlock, flags); common->cur_frm = common->next_frm = list_entry(common->dma_queue.next, struct vpif_cap_buffer, list); /* Remove buffer from the buffer queue */ list_del(&common->cur_frm->list); spin_unlock_irqrestore(&common->irqlock, flags); addr = vb2_dma_contig_plane_dma_addr(&common->cur_frm->vb.vb2_buf, 0); common->set_addr(addr + common->ytop_off, addr + common->ybtm_off, addr + common->ctop_off, addr + common->cbtm_off); /** * Set interrupt for both the fields in VPIF Register enable channel in * VPIF register */ channel_first_int[VPIF_VIDEO_INDEX][ch->channel_id] = 1; if (VPIF_CHANNEL0_VIDEO == ch->channel_id) { channel0_intr_assert(); channel0_intr_enable(1); enable_channel0(1); } if (VPIF_CHANNEL1_VIDEO == ch->channel_id || ycmux_mode == 2) { channel1_intr_assert(); channel1_intr_enable(1); enable_channel1(1); } return 0; err: spin_lock_irqsave(&common->irqlock, flags); list_for_each_entry_safe(buf, tmp, &common->dma_queue, list) { list_del(&buf->list); vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_QUEUED); } spin_unlock_irqrestore(&common->irqlock, flags); return ret; } /** * vpif_stop_streaming : Stop the DMA engine * @vq: ptr to vb2_queue * * This callback stops the DMA engine and any remaining buffers * in the DMA queue are released. */ static void vpif_stop_streaming(struct vb2_queue *vq) { struct channel_obj *ch = vb2_get_drv_priv(vq); struct common_obj *common; unsigned long flags; int ret; common = &ch->common[VPIF_VIDEO_INDEX]; /* Disable channel as per its device type and channel id */ if (VPIF_CHANNEL0_VIDEO == ch->channel_id) { enable_channel0(0); channel0_intr_enable(0); } if (VPIF_CHANNEL1_VIDEO == ch->channel_id || ycmux_mode == 2) { enable_channel1(0); channel1_intr_enable(0); } ycmux_mode = 0; ret = v4l2_subdev_call(ch->sd, video, s_stream, 0); if (ret && ret != -ENOIOCTLCMD && ret != -ENODEV) vpif_dbg(1, debug, "stream off failed in subdev\n"); /* release all active buffers */ if (common->cur_frm == common->next_frm) { vb2_buffer_done(&common->cur_frm->vb.vb2_buf, VB2_BUF_STATE_ERROR); } else { if (common->cur_frm) vb2_buffer_done(&common->cur_frm->vb.vb2_buf, VB2_BUF_STATE_ERROR); if (common->next_frm) vb2_buffer_done(&common->next_frm->vb.vb2_buf, VB2_BUF_STATE_ERROR); } spin_lock_irqsave(&common->irqlock, flags); while (!list_empty(&common->dma_queue)) { common->next_frm = list_entry(common->dma_queue.next, struct vpif_cap_buffer, list); list_del(&common->next_frm->list); vb2_buffer_done(&common->next_frm->vb.vb2_buf, VB2_BUF_STATE_ERROR); } spin_unlock_irqrestore(&common->irqlock, flags); } static const struct vb2_ops video_qops = { .queue_setup = vpif_buffer_queue_setup, .buf_prepare = vpif_buffer_prepare, .start_streaming = vpif_start_streaming, .stop_streaming = vpif_stop_streaming, .buf_queue = vpif_buffer_queue, .wait_prepare = vb2_ops_wait_prepare, .wait_finish = vb2_ops_wait_finish, }; /** * vpif_process_buffer_complete: process a completed buffer * @common: ptr to common channel object * * This function time stamp the buffer and mark it as DONE. It also * wake up any process waiting on the QUEUE and set the next buffer * as current */ static void vpif_process_buffer_complete(struct common_obj *common) { common->cur_frm->vb.vb2_buf.timestamp = ktime_get_ns(); vb2_buffer_done(&common->cur_frm->vb.vb2_buf, VB2_BUF_STATE_DONE); /* Make curFrm pointing to nextFrm */ common->cur_frm = common->next_frm; } /** * vpif_schedule_next_buffer: set next buffer address for capture * @common : ptr to common channel object * * This function will get next buffer from the dma queue and * set the buffer address in the vpif register for capture. * the buffer is marked active */ static void vpif_schedule_next_buffer(struct common_obj *common) { unsigned long addr = 0; spin_lock(&common->irqlock); common->next_frm = list_entry(common->dma_queue.next, struct vpif_cap_buffer, list); /* Remove that buffer from the buffer queue */ list_del(&common->next_frm->list); spin_unlock(&common->irqlock); addr = vb2_dma_contig_plane_dma_addr(&common->next_frm->vb.vb2_buf, 0); /* Set top and bottom field addresses in VPIF registers */ common->set_addr(addr + common->ytop_off, addr + common->ybtm_off, addr + common->ctop_off, addr + common->cbtm_off); } /** * vpif_channel_isr : ISR handler for vpif capture * @irq: irq number * @dev_id: dev_id ptr * * It changes status of the captured buffer, takes next buffer from the queue * and sets its address in VPIF registers */ static irqreturn_t vpif_channel_isr(int irq, void *dev_id) { struct vpif_device *dev = &vpif_obj; struct common_obj *common; struct channel_obj *ch; int channel_id; int fid = -1, i; channel_id = *(int *)(dev_id); if (!vpif_intr_status(channel_id)) return IRQ_NONE; ch = dev->dev[channel_id]; for (i = 0; i < VPIF_NUMBER_OF_OBJECTS; i++) { common = &ch->common[i]; /* skip If streaming is not started in this channel */ /* Check the field format */ if (1 == ch->vpifparams.std_info.frm_fmt || common->fmt.fmt.pix.field == V4L2_FIELD_NONE) { /* Progressive mode */ spin_lock(&common->irqlock); if (list_empty(&common->dma_queue)) { spin_unlock(&common->irqlock); continue; } spin_unlock(&common->irqlock); if (!channel_first_int[i][channel_id]) vpif_process_buffer_complete(common); channel_first_int[i][channel_id] = 0; vpif_schedule_next_buffer(common); channel_first_int[i][channel_id] = 0; } else { /** * Interlaced mode. If it is first interrupt, ignore * it */ if (channel_first_int[i][channel_id]) { channel_first_int[i][channel_id] = 0; continue; } if (0 == i) { ch->field_id ^= 1; /* Get field id from VPIF registers */ fid = vpif_channel_getfid(ch->channel_id); if (fid != ch->field_id) { /** * If field id does not match stored * field id, make them in sync */ if (0 == fid) ch->field_id = fid; return IRQ_HANDLED; } } /* device field id and local field id are in sync */ if (0 == fid) { /* this is even field */ if (common->cur_frm == common->next_frm) continue; /* mark the current buffer as done */ vpif_process_buffer_complete(common); } else if (1 == fid) { /* odd field */ spin_lock(&common->irqlock); if (list_empty(&common->dma_queue) || (common->cur_frm != common->next_frm)) { spin_unlock(&common->irqlock); continue; } spin_unlock(&common->irqlock); vpif_schedule_next_buffer(common); } } } return IRQ_HANDLED; } /** * vpif_update_std_info() - update standard related info * @ch: ptr to channel object * * For a given standard selected by application, update values * in the device data structures */ static int vpif_update_std_info(struct channel_obj *ch) { struct common_obj *common = &ch->common[VPIF_VIDEO_INDEX]; struct vpif_params *vpifparams = &ch->vpifparams; const struct vpif_channel_config_params *config; struct vpif_channel_config_params *std_info = &vpifparams->std_info; struct video_obj *vid_ch = &ch->video; int index; struct v4l2_pix_format *pixfmt = &common->fmt.fmt.pix; vpif_dbg(2, debug, "vpif_update_std_info\n"); /* * if called after try_fmt or g_fmt, there will already be a size * so use that by default. */ if (pixfmt->width && pixfmt->height) { if (pixfmt->field == V4L2_FIELD_ANY || pixfmt->field == V4L2_FIELD_NONE) pixfmt->field = V4L2_FIELD_NONE; vpifparams->iface.if_type = VPIF_IF_BT656; if (pixfmt->pixelformat == V4L2_PIX_FMT_SGRBG10 || pixfmt->pixelformat == V4L2_PIX_FMT_SBGGR8) vpifparams->iface.if_type = VPIF_IF_RAW_BAYER; if (pixfmt->pixelformat == V4L2_PIX_FMT_SGRBG10) vpifparams->params.data_sz = 1; /* 10 bits/pixel. */ /* * For raw formats from camera sensors, we don't need * the std_info from table lookup, so nothing else to do here. */ if (vpifparams->iface.if_type == VPIF_IF_RAW_BAYER) { memset(std_info, 0, sizeof(struct vpif_channel_config_params)); vpifparams->std_info.capture_format = 1; /* CCD/raw mode */ return 0; } } for (index = 0; index < vpif_ch_params_count; index++) { config = &vpif_ch_params[index]; if (config->hd_sd == 0) { vpif_dbg(2, debug, "SD format\n"); if (config->stdid & vid_ch->stdid) { memcpy(std_info, config, sizeof(*config)); break; } } else { vpif_dbg(2, debug, "HD format\n"); if (!memcmp(&config->dv_timings, &vid_ch->dv_timings, sizeof(vid_ch->dv_timings))) { memcpy(std_info, config, sizeof(*config)); break; } } } /* standard not found */ if (index == vpif_ch_params_count) return -EINVAL; common->fmt.fmt.pix.width = std_info->width; common->width = std_info->width; common->fmt.fmt.pix.height = std_info->height; common->height = std_info->height; common->fmt.fmt.pix.sizeimage = common->height * common->width * 2; common->fmt.fmt.pix.bytesperline = std_info->width; vpifparams->video_params.hpitch = std_info->width; vpifparams->video_params.storage_mode = std_info->frm_fmt; if (vid_ch->stdid) common->fmt.fmt.pix.colorspace = V4L2_COLORSPACE_SMPTE170M; else common->fmt.fmt.pix.colorspace = V4L2_COLORSPACE_REC709; if (ch->vpifparams.std_info.frm_fmt) common->fmt.fmt.pix.field = V4L2_FIELD_NONE; else common->fmt.fmt.pix.field = V4L2_FIELD_INTERLACED; if (ch->vpifparams.iface.if_type == VPIF_IF_RAW_BAYER) common->fmt.fmt.pix.pixelformat = V4L2_PIX_FMT_SBGGR8; else common->fmt.fmt.pix.pixelformat = V4L2_PIX_FMT_NV16; common->fmt.type = V4L2_BUF_TYPE_VIDEO_CAPTURE; return 0; } /** * vpif_calculate_offsets : This function calculates buffers offsets * @ch : ptr to channel object * * This function calculates buffer offsets for Y and C in the top and * bottom field */ static void vpif_calculate_offsets(struct channel_obj *ch) { unsigned int hpitch, sizeimage; struct video_obj *vid_ch = &(ch->video); struct vpif_params *vpifparams = &ch->vpifparams; struct common_obj *common = &ch->common[VPIF_VIDEO_INDEX]; enum v4l2_field field = common->fmt.fmt.pix.field; vpif_dbg(2, debug, "vpif_calculate_offsets\n"); if (V4L2_FIELD_ANY == field) { if (vpifparams->std_info.frm_fmt) vid_ch->buf_field = V4L2_FIELD_NONE; else vid_ch->buf_field = V4L2_FIELD_INTERLACED; } else vid_ch->buf_field = common->fmt.fmt.pix.field; sizeimage = common->fmt.fmt.pix.sizeimage; hpitch = common->fmt.fmt.pix.bytesperline; if ((V4L2_FIELD_NONE == vid_ch->buf_field) || (V4L2_FIELD_INTERLACED == vid_ch->buf_field)) { /* Calculate offsets for Y top, Y Bottom, C top and C Bottom */ common->ytop_off = 0; common->ybtm_off = hpitch; common->ctop_off = sizeimage / 2; common->cbtm_off = sizeimage / 2 + hpitch; } else if (V4L2_FIELD_SEQ_TB == vid_ch->buf_field) { /* Calculate offsets for Y top, Y Bottom, C top and C Bottom */ common->ytop_off = 0; common->ybtm_off = sizeimage / 4; common->ctop_off = sizeimage / 2; common->cbtm_off = common->ctop_off + sizeimage / 4; } else if (V4L2_FIELD_SEQ_BT == vid_ch->buf_field) { /* Calculate offsets for Y top, Y Bottom, C top and C Bottom */ common->ybtm_off = 0; common->ytop_off = sizeimage / 4; common->cbtm_off = sizeimage / 2; common->ctop_off = common->cbtm_off + sizeimage / 4; } if ((V4L2_FIELD_NONE == vid_ch->buf_field) || (V4L2_FIELD_INTERLACED == vid_ch->buf_field)) vpifparams->video_params.storage_mode = 1; else vpifparams->video_params.storage_mode = 0; if (1 == vpifparams->std_info.frm_fmt) vpifparams->video_params.hpitch = common->fmt.fmt.pix.bytesperline; else { if ((field == V4L2_FIELD_ANY) || (field == V4L2_FIELD_INTERLACED)) vpifparams->video_params.hpitch = common->fmt.fmt.pix.bytesperline * 2; else vpifparams->video_params.hpitch = common->fmt.fmt.pix.bytesperline; } ch->vpifparams.video_params.stdid = vpifparams->std_info.stdid; } /** * vpif_config_addr() - function to configure buffer address in vpif * @ch: channel ptr * @muxmode: channel mux mode */ static void vpif_config_addr(struct channel_obj *ch, int muxmode) { struct common_obj *common; vpif_dbg(2, debug, "vpif_config_addr\n"); common = &(ch->common[VPIF_VIDEO_INDEX]); if (VPIF_CHANNEL1_VIDEO == ch->channel_id) common->set_addr = ch1_set_video_buf_addr; else if (2 == muxmode) common->set_addr = ch0_set_video_buf_addr_yc_nmux; else common->set_addr = ch0_set_video_buf_addr; } /** * vpif_input_to_subdev() - Maps input to sub device * @vpif_cfg: global config ptr * @chan_cfg: channel config ptr * @input_index: Given input index from application * * lookup the sub device information for a given input index. * we report all the inputs to application. inputs table also * has sub device name for the each input */ static int vpif_input_to_subdev( struct vpif_capture_config *vpif_cfg, struct vpif_capture_chan_config *chan_cfg, int input_index) { struct vpif_subdev_info *subdev_info; const char *subdev_name; int i; vpif_dbg(2, debug, "vpif_input_to_subdev\n"); if (!chan_cfg) return -1; if (input_index >= chan_cfg->input_count) return -1; subdev_name = chan_cfg->inputs[input_index].subdev_name; if (!subdev_name) return -1; /* loop through the sub device list to get the sub device info */ for (i = 0; i < vpif_cfg->subdev_count; i++) { subdev_info = &vpif_cfg->subdev_info[i]; if (subdev_info && !strcmp(subdev_info->name, subdev_name)) return i; } return -1; } /** * vpif_set_input() - Select an input * @vpif_cfg: global config ptr * @ch: channel * @index: Given input index from application * * Select the given input. */ static int vpif_set_input( struct vpif_capture_config *vpif_cfg, struct channel_obj *ch, int index) { struct vpif_capture_chan_config *chan_cfg = &vpif_cfg->chan_config[ch->channel_id]; struct vpif_subdev_info *subdev_info = NULL; struct v4l2_subdev *sd = NULL; u32 input = 0, output = 0; int sd_index; int ret; sd_index = vpif_input_to_subdev(vpif_cfg, chan_cfg, index); if (sd_index >= 0) { sd = vpif_obj.sd[sd_index]; subdev_info = &vpif_cfg->subdev_info[sd_index]; } else { /* no subdevice, no input to setup */ return 0; } /* first setup input path from sub device to vpif */ if (sd && vpif_cfg->setup_input_path) { ret = vpif_cfg->setup_input_path(ch->channel_id, subdev_info->name); if (ret < 0) { vpif_dbg(1, debug, "couldn't setup input path for the" \ " sub device %s, for input index %d\n", subdev_info->name, index); return ret; } } if (sd) { input = chan_cfg->inputs[index].input_route; output = chan_cfg->inputs[index].output_route; ret = v4l2_subdev_call(sd, video, s_routing, input, output, 0); if (ret < 0 && ret != -ENOIOCTLCMD) { vpif_dbg(1, debug, "Failed to set input\n"); return ret; } } ch->input_idx = index; ch->sd = sd; /* copy interface parameters to vpif */ ch->vpifparams.iface = chan_cfg->vpif_if; /* update tvnorms from the sub device input info */ ch->video_dev.tvnorms = chan_cfg->inputs[index].input.std; return 0; } /** * vpif_querystd() - querystd handler * @file: file ptr * @priv: file handle * @std_id: ptr to std id * * This function is called to detect standard at the selected input */ static int vpif_querystd(struct file *file, void *priv, v4l2_std_id *std_id) { struct video_device *vdev = video_devdata(file); struct channel_obj *ch = video_get_drvdata(vdev); int ret; vpif_dbg(2, debug, "vpif_querystd\n"); /* Call querystd function of decoder device */ ret = v4l2_subdev_call(ch->sd, video, querystd, std_id); if (ret == -ENOIOCTLCMD || ret == -ENODEV) return -ENODATA; if (ret) { vpif_dbg(1, debug, "Failed to query standard for sub devices\n"); return ret; } return 0; } /** * vpif_g_std() - get STD handler * @file: file ptr * @priv: file handle * @std: ptr to std id */ static int vpif_g_std(struct file *file, void *priv, v4l2_std_id *std) { struct vpif_capture_config *config = vpif_dev->platform_data; struct video_device *vdev = video_devdata(file); struct channel_obj *ch = video_get_drvdata(vdev); struct vpif_capture_chan_config *chan_cfg; struct v4l2_input input; vpif_dbg(2, debug, "vpif_g_std\n"); if (!config->chan_config[ch->channel_id].inputs) return -ENODATA; chan_cfg = &config->chan_config[ch->channel_id]; input = chan_cfg->inputs[ch->input_idx].input; if (input.capabilities != V4L2_IN_CAP_STD) return -ENODATA; *std = ch->video.stdid; return 0; } /** * vpif_s_std() - set STD handler * @file: file ptr * @priv: file handle * @std_id: ptr to std id */ static int vpif_s_std(struct file *file, void *priv, v4l2_std_id std_id) { struct vpif_capture_config *config = vpif_dev->platform_data; struct video_device *vdev = video_devdata(file); struct channel_obj *ch = video_get_drvdata(vdev); struct common_obj *common = &ch->common[VPIF_VIDEO_INDEX]; struct vpif_capture_chan_config *chan_cfg; struct v4l2_input input; int ret; vpif_dbg(2, debug, "vpif_s_std\n"); if (!config->chan_config[ch->channel_id].inputs) return -ENODATA; chan_cfg = &config->chan_config[ch->channel_id]; input = chan_cfg->inputs[ch->input_idx].input; if (input.capabilities != V4L2_IN_CAP_STD) return -ENODATA; if (vb2_is_busy(&common->buffer_queue)) return -EBUSY; /* Call encoder subdevice function to set the standard */ ch->video.stdid = std_id; memset(&ch->video.dv_timings, 0, sizeof(ch->video.dv_timings)); /* Get the information about the standard */ if (vpif_update_std_info(ch)) { vpif_err("Error getting the standard info\n"); return -EINVAL; } /* set standard in the sub device */ ret = v4l2_subdev_call(ch->sd, video, s_std, std_id); if (ret && ret != -ENOIOCTLCMD && ret != -ENODEV) { vpif_dbg(1, debug, "Failed to set standard for sub devices\n"); return ret; } return 0; } /** * vpif_enum_input() - ENUMINPUT handler * @file: file ptr * @priv: file handle * @input: ptr to input structure */ static int vpif_enum_input(struct file *file, void *priv, struct v4l2_input *input) { struct vpif_capture_config *config = vpif_dev->platform_data; struct video_device *vdev = video_devdata(file); struct channel_obj *ch = video_get_drvdata(vdev); struct vpif_capture_chan_config *chan_cfg; chan_cfg = &config->chan_config[ch->channel_id]; if (input->index >= chan_cfg->input_count) return -EINVAL; memcpy(input, &chan_cfg->inputs[input->index].input, sizeof(*input)); return 0; } /** * vpif_g_input() - Get INPUT handler * @file: file ptr * @priv: file handle * @index: ptr to input index */ static int vpif_g_input(struct file *file, void *priv, unsigned int *index) { struct video_device *vdev = video_devdata(file); struct channel_obj *ch = video_get_drvdata(vdev); *index = ch->input_idx; return 0; } /** * vpif_s_input() - Set INPUT handler * @file: file ptr * @priv: file handle * @index: input index */ static int vpif_s_input(struct file *file, void *priv, unsigned int index) { struct vpif_capture_config *config = vpif_dev->platform_data; struct video_device *vdev = video_devdata(file); struct channel_obj *ch = video_get_drvdata(vdev); struct common_obj *common = &ch->common[VPIF_VIDEO_INDEX]; struct vpif_capture_chan_config *chan_cfg; chan_cfg = &config->chan_config[ch->channel_id]; if (index >= chan_cfg->input_count) return -EINVAL; if (vb2_is_busy(&common->buffer_queue)) return -EBUSY; return vpif_set_input(config, ch, index); } /** * vpif_enum_fmt_vid_cap() - ENUM_FMT handler * @file: file ptr * @priv: file handle * @fmt: ptr to V4L2 format descriptor */ static int vpif_enum_fmt_vid_cap(struct file *file, void *priv, struct v4l2_fmtdesc *fmt) { struct video_device *vdev = video_devdata(file); struct channel_obj *ch = video_get_drvdata(vdev); if (fmt->index != 0) { vpif_dbg(1, debug, "Invalid format index\n"); return -EINVAL; } /* Fill in the information about format */ if (ch->vpifparams.iface.if_type == VPIF_IF_RAW_BAYER) fmt->pixelformat = V4L2_PIX_FMT_SBGGR8; else fmt->pixelformat = V4L2_PIX_FMT_NV16; return 0; } /** * vpif_try_fmt_vid_cap() - TRY_FMT handler * @file: file ptr * @priv: file handle * @fmt: ptr to v4l2 format structure */ static int vpif_try_fmt_vid_cap(struct file *file, void *priv, struct v4l2_format *fmt) { struct video_device *vdev = video_devdata(file); struct channel_obj *ch = video_get_drvdata(vdev); struct v4l2_pix_format *pixfmt = &fmt->fmt.pix; struct common_obj *common = &(ch->common[VPIF_VIDEO_INDEX]); common->fmt = *fmt; vpif_update_std_info(ch); pixfmt->field = common->fmt.fmt.pix.field; pixfmt->colorspace = common->fmt.fmt.pix.colorspace; pixfmt->bytesperline = common->fmt.fmt.pix.width; pixfmt->width = common->fmt.fmt.pix.width; pixfmt->height = common->fmt.fmt.pix.height; pixfmt->sizeimage = pixfmt->bytesperline * pixfmt->height * 2; if (pixfmt->pixelformat == V4L2_PIX_FMT_SGRBG10) { pixfmt->bytesperline = common->fmt.fmt.pix.width * 2; pixfmt->sizeimage = pixfmt->bytesperline * pixfmt->height; } dev_dbg(vpif_dev, "%s: %d x %d; pitch=%d pixelformat=0x%08x, field=%d, size=%d\n", __func__, pixfmt->width, pixfmt->height, pixfmt->bytesperline, pixfmt->pixelformat, pixfmt->field, pixfmt->sizeimage); return 0; } /** * vpif_g_fmt_vid_cap() - Set INPUT handler * @file: file ptr * @priv: file handle * @fmt: ptr to v4l2 format structure */ static int vpif_g_fmt_vid_cap(struct file *file, void *priv, struct v4l2_format *fmt) { struct video_device *vdev = video_devdata(file); struct channel_obj *ch = video_get_drvdata(vdev); struct common_obj *common = &ch->common[VPIF_VIDEO_INDEX]; struct v4l2_pix_format *pix_fmt = &fmt->fmt.pix; struct v4l2_subdev_format format = { .which = V4L2_SUBDEV_FORMAT_ACTIVE, }; struct v4l2_mbus_framefmt *mbus_fmt = &format.format; int ret; /* Check the validity of the buffer type */ if (common->fmt.type != fmt->type) return -EINVAL; /* By default, use currently set fmt */ *fmt = common->fmt; /* If subdev has get_fmt, use that to override */ ret = v4l2_subdev_call(ch->sd, pad, get_fmt, NULL, &format); if (!ret && mbus_fmt->code) { v4l2_fill_pix_format(pix_fmt, mbus_fmt); pix_fmt->bytesperline = pix_fmt->width; if (mbus_fmt->code == MEDIA_BUS_FMT_SGRBG10_1X10) { /* e.g. mt9v032 */ pix_fmt->pixelformat = V4L2_PIX_FMT_SGRBG10; pix_fmt->bytesperline = pix_fmt->width * 2; } else if (mbus_fmt->code == MEDIA_BUS_FMT_UYVY8_2X8) { /* e.g. tvp514x */ pix_fmt->pixelformat = V4L2_PIX_FMT_NV16; pix_fmt->bytesperline = pix_fmt->width * 2; } else { dev_warn(vpif_dev, "%s: Unhandled media-bus format 0x%x\n", __func__, mbus_fmt->code); } pix_fmt->sizeimage = pix_fmt->bytesperline * pix_fmt->height; dev_dbg(vpif_dev, "%s: %d x %d; pitch=%d, pixelformat=0x%08x, code=0x%x, field=%d, size=%d\n", __func__, pix_fmt->width, pix_fmt->height, pix_fmt->bytesperline, pix_fmt->pixelformat, mbus_fmt->code, pix_fmt->field, pix_fmt->sizeimage); common->fmt = *fmt; vpif_update_std_info(ch); } return 0; } /** * vpif_s_fmt_vid_cap() - Set FMT handler * @file: file ptr * @priv: file handle * @fmt: ptr to v4l2 format structure */ static int vpif_s_fmt_vid_cap(struct file *file, void *priv, struct v4l2_format *fmt) { struct video_device *vdev = video_devdata(file); struct channel_obj *ch = video_get_drvdata(vdev); struct common_obj *common = &ch->common[VPIF_VIDEO_INDEX]; int ret; vpif_dbg(2, debug, "%s\n", __func__); if (vb2_is_busy(&common->buffer_queue)) return -EBUSY; ret = vpif_try_fmt_vid_cap(file, priv, fmt); if (ret) return ret; /* store the format in the channel object */ common->fmt = *fmt; return 0; } /** * vpif_querycap() - QUERYCAP handler * @file: file ptr * @priv: file handle * @cap: ptr to v4l2_capability structure */ static int vpif_querycap(struct file *file, void *priv, struct v4l2_capability *cap) { struct vpif_capture_config *config = vpif_dev->platform_data; strscpy(cap->driver, VPIF_DRIVER_NAME, sizeof(cap->driver)); strscpy(cap->card, config->card_name, sizeof(cap->card)); return 0; } /** * vpif_enum_dv_timings() - ENUM_DV_TIMINGS handler * @file: file ptr * @priv: file handle * @timings: input timings */ static int vpif_enum_dv_timings(struct file *file, void *priv, struct v4l2_enum_dv_timings *timings) { struct vpif_capture_config *config = vpif_dev->platform_data; struct video_device *vdev = video_devdata(file); struct channel_obj *ch = video_get_drvdata(vdev); struct vpif_capture_chan_config *chan_cfg; struct v4l2_input input; int ret; if (!config->chan_config[ch->channel_id].inputs) return -ENODATA; chan_cfg = &config->chan_config[ch->channel_id]; input = chan_cfg->inputs[ch->input_idx].input; if (input.capabilities != V4L2_IN_CAP_DV_TIMINGS) return -ENODATA; timings->pad = 0; ret = v4l2_subdev_call(ch->sd, pad, enum_dv_timings, timings); if (ret == -ENOIOCTLCMD || ret == -ENODEV) return -EINVAL; return ret; } /** * vpif_query_dv_timings() - QUERY_DV_TIMINGS handler * @file: file ptr * @priv: file handle * @timings: input timings */ static int vpif_query_dv_timings(struct file *file, void *priv, struct v4l2_dv_timings *timings) { struct vpif_capture_config *config = vpif_dev->platform_data; struct video_device *vdev = video_devdata(file); struct channel_obj *ch = video_get_drvdata(vdev); struct vpif_capture_chan_config *chan_cfg; struct v4l2_input input; int ret; if (!config->chan_config[ch->channel_id].inputs) return -ENODATA; chan_cfg = &config->chan_config[ch->channel_id]; input = chan_cfg->inputs[ch->input_idx].input; if (input.capabilities != V4L2_IN_CAP_DV_TIMINGS) return -ENODATA; ret = v4l2_subdev_call(ch->sd, video, query_dv_timings, timings); if (ret == -ENOIOCTLCMD || ret == -ENODEV) return -ENODATA; return ret; } /** * vpif_s_dv_timings() - S_DV_TIMINGS handler * @file: file ptr * @priv: file handle * @timings: digital video timings */ static int vpif_s_dv_timings(struct file *file, void *priv, struct v4l2_dv_timings *timings) { struct vpif_capture_config *config = vpif_dev->platform_data; struct video_device *vdev = video_devdata(file); struct channel_obj *ch = video_get_drvdata(vdev); struct vpif_params *vpifparams = &ch->vpifparams; struct vpif_channel_config_params *std_info = &vpifparams->std_info; struct common_obj *common = &ch->common[VPIF_VIDEO_INDEX]; struct video_obj *vid_ch = &ch->video; struct v4l2_bt_timings *bt = &vid_ch->dv_timings.bt; struct vpif_capture_chan_config *chan_cfg; struct v4l2_input input; int ret; if (!config->chan_config[ch->channel_id].inputs) return -ENODATA; chan_cfg = &config->chan_config[ch->channel_id]; input = chan_cfg->inputs[ch->input_idx].input; if (input.capabilities != V4L2_IN_CAP_DV_TIMINGS) return -ENODATA; if (timings->type != V4L2_DV_BT_656_1120) { vpif_dbg(2, debug, "Timing type not defined\n"); return -EINVAL; } if (vb2_is_busy(&common->buffer_queue)) return -EBUSY; /* Configure subdevice timings, if any */ ret = v4l2_subdev_call(ch->sd, video, s_dv_timings, timings); if (ret == -ENOIOCTLCMD || ret == -ENODEV) ret = 0; if (ret < 0) { vpif_dbg(2, debug, "Error setting custom DV timings\n"); return ret; } if (!(timings->bt.width && timings->bt.height && (timings->bt.hbackporch || timings->bt.hfrontporch || timings->bt.hsync) && timings->bt.vfrontporch && (timings->bt.vbackporch || timings->bt.vsync))) { vpif_dbg(2, debug, "Timings for width, height, horizontal back porch, horizontal sync, horizontal front porch, vertical back porch, vertical sync and vertical back porch must be defined\n"); return -EINVAL; } vid_ch->dv_timings = *timings; /* Configure video port timings */ std_info->eav2sav = V4L2_DV_BT_BLANKING_WIDTH(bt) - 8; std_info->sav2eav = bt->width; std_info->l1 = 1; std_info->l3 = bt->vsync + bt->vbackporch + 1; std_info->vsize = V4L2_DV_BT_FRAME_HEIGHT(bt); if (bt->interlaced) { if (bt->il_vbackporch || bt->il_vfrontporch || bt->il_vsync) { std_info->l5 = std_info->vsize/2 - (bt->vfrontporch - 1); std_info->l7 = std_info->vsize/2 + 1; std_info->l9 = std_info->l7 + bt->il_vsync + bt->il_vbackporch + 1; std_info->l11 = std_info->vsize - (bt->il_vfrontporch - 1); } else { vpif_dbg(2, debug, "Required timing values for interlaced BT format missing\n"); return -EINVAL; } } else { std_info->l5 = std_info->vsize - (bt->vfrontporch - 1); } strscpy(std_info->name, "Custom timings BT656/1120", sizeof(std_info->name)); std_info->width = bt->width; std_info->height = bt->height; std_info->frm_fmt = bt->interlaced ? 0 : 1; std_info->ycmux_mode = 0; std_info->capture_format = 0; std_info->vbi_supported = 0; std_info->hd_sd = 1; std_info->stdid = 0; vid_ch->stdid = 0; return 0; } /** * vpif_g_dv_timings() - G_DV_TIMINGS handler * @file: file ptr * @priv: file handle * @timings: digital video timings */ static int vpif_g_dv_timings(struct file *file, void *priv, struct v4l2_dv_timings *timings) { struct vpif_capture_config *config = vpif_dev->platform_data; struct video_device *vdev = video_devdata(file); struct channel_obj *ch = video_get_drvdata(vdev); struct video_obj *vid_ch = &ch->video; struct vpif_capture_chan_config *chan_cfg; struct v4l2_input input; if (!config->chan_config[ch->channel_id].inputs) return -ENODATA; chan_cfg = &config->chan_config[ch->channel_id]; input = chan_cfg->inputs[ch->input_idx].input; if (input.capabilities != V4L2_IN_CAP_DV_TIMINGS) return -ENODATA; *timings = vid_ch->dv_timings; return 0; } /* * vpif_log_status() - Status information * @file: file ptr * @priv: file handle * * Returns zero. */ static int vpif_log_status(struct file *filep, void *priv) { /* status for sub devices */ v4l2_device_call_all(&vpif_obj.v4l2_dev, 0, core, log_status); return 0; } /* vpif capture ioctl operations */ static const struct v4l2_ioctl_ops vpif_ioctl_ops = { .vidioc_querycap = vpif_querycap, .vidioc_enum_fmt_vid_cap = vpif_enum_fmt_vid_cap, .vidioc_g_fmt_vid_cap = vpif_g_fmt_vid_cap, .vidioc_s_fmt_vid_cap = vpif_s_fmt_vid_cap, .vidioc_try_fmt_vid_cap = vpif_try_fmt_vid_cap, .vidioc_enum_input = vpif_enum_input, .vidioc_s_input = vpif_s_input, .vidioc_g_input = vpif_g_input, .vidioc_reqbufs = vb2_ioctl_reqbufs, .vidioc_create_bufs = vb2_ioctl_create_bufs, .vidioc_querybuf = vb2_ioctl_querybuf, .vidioc_qbuf = vb2_ioctl_qbuf, .vidioc_dqbuf = vb2_ioctl_dqbuf, .vidioc_expbuf = vb2_ioctl_expbuf, .vidioc_streamon = vb2_ioctl_streamon, .vidioc_streamoff = vb2_ioctl_streamoff, .vidioc_querystd = vpif_querystd, .vidioc_s_std = vpif_s_std, .vidioc_g_std = vpif_g_std, .vidioc_enum_dv_timings = vpif_enum_dv_timings, .vidioc_query_dv_timings = vpif_query_dv_timings, .vidioc_s_dv_timings = vpif_s_dv_timings, .vidioc_g_dv_timings = vpif_g_dv_timings, .vidioc_log_status = vpif_log_status, }; /* vpif file operations */ static const struct v4l2_file_operations vpif_fops = { .owner = THIS_MODULE, .open = v4l2_fh_open, .release = vb2_fop_release, .unlocked_ioctl = video_ioctl2, .mmap = vb2_fop_mmap, .poll = vb2_fop_poll }; /** * initialize_vpif() - Initialize vpif data structures * * Allocate memory for data structures and initialize them */ static int initialize_vpif(void) { int err, i, j; int free_channel_objects_index; /* Allocate memory for six channel objects */ for (i = 0; i < VPIF_CAPTURE_MAX_DEVICES; i++) { vpif_obj.dev[i] = kzalloc(sizeof(*vpif_obj.dev[i]), GFP_KERNEL); /* If memory allocation fails, return error */ if (!vpif_obj.dev[i]) { free_channel_objects_index = i; err = -ENOMEM; goto vpif_init_free_channel_objects; } } return 0; vpif_init_free_channel_objects: for (j = 0; j < free_channel_objects_index; j++) kfree(vpif_obj.dev[j]); return err; } static inline void free_vpif_objs(void) { int i; for (i = 0; i < VPIF_CAPTURE_MAX_DEVICES; i++) kfree(vpif_obj.dev[i]); } static int vpif_async_bound(struct v4l2_async_notifier *notifier, struct v4l2_subdev *subdev, struct v4l2_async_connection *asd) { int i; for (i = 0; i < vpif_obj.config->asd_sizes[0]; i++) { struct v4l2_async_connection *_asd = vpif_obj.config->asd[i]; const struct fwnode_handle *fwnode = _asd->match.fwnode; if (fwnode == subdev->fwnode) { vpif_obj.sd[i] = subdev; vpif_obj.config->chan_config->inputs[i].subdev_name = (char *)to_of_node(subdev->fwnode)->full_name; vpif_dbg(2, debug, "%s: setting input %d subdev_name = %s\n", __func__, i, vpif_obj.config->chan_config->inputs[i].subdev_name); return 0; } } for (i = 0; i < vpif_obj.config->subdev_count; i++) if (!strcmp(vpif_obj.config->subdev_info[i].name, subdev->name)) { vpif_obj.sd[i] = subdev; return 0; } return -EINVAL; } static int vpif_probe_complete(void) { struct common_obj *common; struct video_device *vdev; struct channel_obj *ch; struct vb2_queue *q; int j, err, k; for (j = 0; j < VPIF_CAPTURE_MAX_DEVICES; j++) { ch = vpif_obj.dev[j]; ch->channel_id = j; common = &(ch->common[VPIF_VIDEO_INDEX]); spin_lock_init(&common->irqlock); mutex_init(&common->lock); /* select input 0 */ err = vpif_set_input(vpif_obj.config, ch, 0); if (err) goto probe_out; /* set initial format */ ch->video.stdid = V4L2_STD_525_60; memset(&ch->video.dv_timings, 0, sizeof(ch->video.dv_timings)); common->fmt.type = V4L2_BUF_TYPE_VIDEO_CAPTURE; vpif_update_std_info(ch); /* Initialize vb2 queue */ q = &common->buffer_queue; q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE; q->io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF; q->drv_priv = ch; q->ops = &video_qops; q->mem_ops = &vb2_dma_contig_memops; q->buf_struct_size = sizeof(struct vpif_cap_buffer); q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC; q->min_buffers_needed = 1; q->lock = &common->lock; q->dev = vpif_dev; err = vb2_queue_init(q); if (err) { vpif_err("vpif_capture: vb2_queue_init() failed\n"); goto probe_out; } INIT_LIST_HEAD(&common->dma_queue); /* Initialize the video_device structure */ vdev = &ch->video_dev; strscpy(vdev->name, VPIF_DRIVER_NAME, sizeof(vdev->name)); vdev->release = video_device_release_empty; vdev->fops = &vpif_fops; vdev->ioctl_ops = &vpif_ioctl_ops; vdev->v4l2_dev = &vpif_obj.v4l2_dev; vdev->vfl_dir = VFL_DIR_RX; vdev->queue = q; vdev->lock = &common->lock; vdev->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING; video_set_drvdata(&ch->video_dev, ch); err = video_register_device(vdev, VFL_TYPE_VIDEO, (j ? 1 : 0)); if (err) goto probe_out; } v4l2_info(&vpif_obj.v4l2_dev, "VPIF capture driver initialized\n"); return 0; probe_out: for (k = 0; k < j; k++) { /* Get the pointer to the channel object */ ch = vpif_obj.dev[k]; /* Unregister video device */ video_unregister_device(&ch->video_dev); } return err; } static int vpif_async_complete(struct v4l2_async_notifier *notifier) { return vpif_probe_complete(); } static const struct v4l2_async_notifier_operations vpif_async_ops = { .bound = vpif_async_bound, .complete = vpif_async_complete, }; static struct vpif_capture_config * vpif_capture_get_pdata(struct platform_device *pdev, struct v4l2_device *v4l2_dev) { struct device_node *endpoint = NULL; struct device_node *rem = NULL; struct vpif_capture_config *pdata; struct vpif_subdev_info *sdinfo; struct vpif_capture_chan_config *chan; unsigned int i; v4l2_async_nf_init(&vpif_obj.notifier, v4l2_dev); /* * DT boot: OF node from parent device contains * video ports & endpoints data. */ if (pdev->dev.parent && pdev->dev.parent->of_node) pdev->dev.of_node = pdev->dev.parent->of_node; if (!IS_ENABLED(CONFIG_OF) || !pdev->dev.of_node) return pdev->dev.platform_data; pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL); if (!pdata) return NULL; pdata->subdev_info = devm_kcalloc(&pdev->dev, VPIF_CAPTURE_NUM_CHANNELS, sizeof(*pdata->subdev_info), GFP_KERNEL); if (!pdata->subdev_info) return NULL; for (i = 0; i < VPIF_CAPTURE_NUM_CHANNELS; i++) { struct v4l2_fwnode_endpoint bus_cfg = { .bus_type = 0 }; unsigned int flags; int err; endpoint = of_graph_get_next_endpoint(pdev->dev.of_node, endpoint); if (!endpoint) break; rem = of_graph_get_remote_port_parent(endpoint); if (!rem) { dev_dbg(&pdev->dev, "Remote device at %pOF not found\n", endpoint); goto done; } sdinfo = &pdata->subdev_info[i]; chan = &pdata->chan_config[i]; chan->inputs = devm_kcalloc(&pdev->dev, VPIF_CAPTURE_NUM_CHANNELS, sizeof(*chan->inputs), GFP_KERNEL); if (!chan->inputs) goto err_cleanup; chan->input_count++; chan->inputs[i].input.type = V4L2_INPUT_TYPE_CAMERA; chan->inputs[i].input.std = V4L2_STD_ALL; chan->inputs[i].input.capabilities = V4L2_IN_CAP_STD; err = v4l2_fwnode_endpoint_parse(of_fwnode_handle(endpoint), &bus_cfg); if (err) { dev_err(&pdev->dev, "Could not parse the endpoint\n"); of_node_put(rem); goto done; } dev_dbg(&pdev->dev, "Endpoint %pOF, bus_width = %d\n", endpoint, bus_cfg.bus.parallel.bus_width); flags = bus_cfg.bus.parallel.flags; if (flags & V4L2_MBUS_HSYNC_ACTIVE_HIGH) chan->vpif_if.hd_pol = 1; if (flags & V4L2_MBUS_VSYNC_ACTIVE_HIGH) chan->vpif_if.vd_pol = 1; dev_dbg(&pdev->dev, "Remote device %pOF found\n", rem); sdinfo->name = rem->full_name; pdata->asd[i] = v4l2_async_nf_add_fwnode(&vpif_obj.notifier, of_fwnode_handle(rem), struct v4l2_async_connection); if (IS_ERR(pdata->asd[i])) goto err_cleanup; of_node_put(rem); } done: of_node_put(endpoint); pdata->asd_sizes[0] = i; pdata->subdev_count = i; pdata->card_name = "DA850/OMAP-L138 Video Capture"; return pdata; err_cleanup: of_node_put(rem); of_node_put(endpoint); v4l2_async_nf_cleanup(&vpif_obj.notifier); return NULL; } /** * vpif_probe : This function probes the vpif capture driver * @pdev: platform device pointer * * This creates device entries by register itself to the V4L2 driver and * initializes fields of each channel objects */ static __init int vpif_probe(struct platform_device *pdev) { struct vpif_subdev_info *subdevdata; struct i2c_adapter *i2c_adap; int subdev_count; int res_idx = 0; int i, err; vpif_dev = &pdev->dev; err = initialize_vpif(); if (err) { v4l2_err(vpif_dev->driver, "Error initializing vpif\n"); return err; } err = v4l2_device_register(vpif_dev, &vpif_obj.v4l2_dev); if (err) { v4l2_err(vpif_dev->driver, "Error registering v4l2 device\n"); goto vpif_free; } do { int irq; err = platform_get_irq_optional(pdev, res_idx); if (err < 0 && err != -ENXIO) goto vpif_unregister; if (err > 0) irq = err; else break; err = devm_request_irq(&pdev->dev, irq, vpif_channel_isr, IRQF_SHARED, VPIF_DRIVER_NAME, (void *)(&vpif_obj.dev[res_idx]->channel_id)); if (err) goto vpif_unregister; } while (++res_idx); pdev->dev.platform_data = vpif_capture_get_pdata(pdev, &vpif_obj.v4l2_dev); if (!pdev->dev.platform_data) { err = -EINVAL; dev_warn(&pdev->dev, "Missing platform data. Giving up.\n"); goto vpif_unregister; } vpif_obj.config = pdev->dev.platform_data; subdev_count = vpif_obj.config->subdev_count; vpif_obj.sd = kcalloc(subdev_count, sizeof(*vpif_obj.sd), GFP_KERNEL); if (!vpif_obj.sd) { err = -ENOMEM; goto probe_subdev_out; } if (!vpif_obj.config->asd_sizes[0]) { int i2c_id = vpif_obj.config->i2c_adapter_id; i2c_adap = i2c_get_adapter(i2c_id); WARN_ON(!i2c_adap); for (i = 0; i < subdev_count; i++) { subdevdata = &vpif_obj.config->subdev_info[i]; vpif_obj.sd[i] = v4l2_i2c_new_subdev_board(&vpif_obj.v4l2_dev, i2c_adap, &subdevdata-> board_info, NULL); if (!vpif_obj.sd[i]) { vpif_err("Error registering v4l2 subdevice\n"); err = -ENODEV; goto probe_subdev_out; } v4l2_info(&vpif_obj.v4l2_dev, "registered sub device %s\n", subdevdata->name); } err = vpif_probe_complete(); if (err) goto probe_subdev_out; } else { vpif_obj.notifier.ops = &vpif_async_ops; err = v4l2_async_nf_register(&vpif_obj.notifier); if (err) { vpif_err("Error registering async notifier\n"); err = -EINVAL; goto probe_subdev_out; } } return 0; probe_subdev_out: v4l2_async_nf_cleanup(&vpif_obj.notifier); /* free sub devices memory */ kfree(vpif_obj.sd); vpif_unregister: v4l2_device_unregister(&vpif_obj.v4l2_dev); vpif_free: free_vpif_objs(); return err; } /** * vpif_remove() - driver remove handler * @device: ptr to platform device structure * * The vidoe device is unregistered */ static void vpif_remove(struct platform_device *device) { struct channel_obj *ch; int i; v4l2_async_nf_unregister(&vpif_obj.notifier); v4l2_async_nf_cleanup(&vpif_obj.notifier); v4l2_device_unregister(&vpif_obj.v4l2_dev); kfree(vpif_obj.sd); /* un-register device */ for (i = 0; i < VPIF_CAPTURE_MAX_DEVICES; i++) { /* Get the pointer to the channel object */ ch = vpif_obj.dev[i]; /* Unregister video device */ video_unregister_device(&ch->video_dev); kfree(vpif_obj.dev[i]); } } #ifdef CONFIG_PM_SLEEP /** * vpif_suspend: vpif device suspend * @dev: pointer to &struct device */ static int vpif_suspend(struct device *dev) { struct common_obj *common; struct channel_obj *ch; int i; for (i = 0; i < VPIF_CAPTURE_MAX_DEVICES; i++) { /* Get the pointer to the channel object */ ch = vpif_obj.dev[i]; common = &ch->common[VPIF_VIDEO_INDEX]; if (!vb2_start_streaming_called(&common->buffer_queue)) continue; mutex_lock(&common->lock); /* Disable channel */ if (ch->channel_id == VPIF_CHANNEL0_VIDEO) { enable_channel0(0); channel0_intr_enable(0); } if (ch->channel_id == VPIF_CHANNEL1_VIDEO || ycmux_mode == 2) { enable_channel1(0); channel1_intr_enable(0); } mutex_unlock(&common->lock); } return 0; } /* * vpif_resume: vpif device suspend */ static int vpif_resume(struct device *dev) { struct common_obj *common; struct channel_obj *ch; int i; for (i = 0; i < VPIF_CAPTURE_MAX_DEVICES; i++) { /* Get the pointer to the channel object */ ch = vpif_obj.dev[i]; common = &ch->common[VPIF_VIDEO_INDEX]; if (!vb2_start_streaming_called(&common->buffer_queue)) continue; mutex_lock(&common->lock); /* Enable channel */ if (ch->channel_id == VPIF_CHANNEL0_VIDEO) { enable_channel0(1); channel0_intr_enable(1); } if (ch->channel_id == VPIF_CHANNEL1_VIDEO || ycmux_mode == 2) { enable_channel1(1); channel1_intr_enable(1); } mutex_unlock(&common->lock); } return 0; } #endif static SIMPLE_DEV_PM_OPS(vpif_pm_ops, vpif_suspend, vpif_resume); static __refdata struct platform_driver vpif_driver = { .driver = { .name = VPIF_DRIVER_NAME, .pm = &vpif_pm_ops, }, .probe = vpif_probe, .remove_new = vpif_remove, }; module_platform_driver(vpif_driver);
linux-master
drivers/media/platform/ti/davinci/vpif_capture.c
/* * vpif - Video Port Interface driver * VPIF is a receiver and transmitter for video data. It has two channels(0, 1) * that receiving video byte stream and two channels(2, 3) for video output. * The hardware supports SDTV, HDTV formats, raw data capture. * Currently, the driver supports NTSC and PAL standards. * * Copyright (C) 2009 Texas Instruments Incorporated - https://www.ti.com/ * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation version 2. * * This program is distributed .as is. WITHOUT ANY WARRANTY of any * kind, whether express or implied; without even the implied warranty * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include <linux/err.h> #include <linux/init.h> #include <linux/io.h> #include <linux/irq.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/of.h> #include <linux/platform_device.h> #include <linux/pm_runtime.h> #include <linux/spinlock.h> #include <linux/v4l2-dv-timings.h> #include <linux/of_graph.h> #include "vpif.h" MODULE_DESCRIPTION("TI DaVinci Video Port Interface driver"); MODULE_LICENSE("GPL"); #define VPIF_DRIVER_NAME "vpif" MODULE_ALIAS("platform:" VPIF_DRIVER_NAME); #define VPIF_CH0_MAX_MODES 22 #define VPIF_CH1_MAX_MODES 2 #define VPIF_CH2_MAX_MODES 15 #define VPIF_CH3_MAX_MODES 2 struct vpif_data { struct platform_device *capture; struct platform_device *display; }; DEFINE_SPINLOCK(vpif_lock); EXPORT_SYMBOL_GPL(vpif_lock); void __iomem *vpif_base; EXPORT_SYMBOL_GPL(vpif_base); /* * vpif_ch_params: video standard configuration parameters for vpif * * The table must include all presets from supported subdevices. */ const struct vpif_channel_config_params vpif_ch_params[] = { /* HDTV formats */ { .name = "480p59_94", .width = 720, .height = 480, .frm_fmt = 1, .ycmux_mode = 0, .eav2sav = 138-8, .sav2eav = 720, .l1 = 1, .l3 = 43, .l5 = 523, .vsize = 525, .capture_format = 0, .vbi_supported = 0, .hd_sd = 1, .dv_timings = V4L2_DV_BT_CEA_720X480P59_94, }, { .name = "576p50", .width = 720, .height = 576, .frm_fmt = 1, .ycmux_mode = 0, .eav2sav = 144-8, .sav2eav = 720, .l1 = 1, .l3 = 45, .l5 = 621, .vsize = 625, .capture_format = 0, .vbi_supported = 0, .hd_sd = 1, .dv_timings = V4L2_DV_BT_CEA_720X576P50, }, { .name = "720p50", .width = 1280, .height = 720, .frm_fmt = 1, .ycmux_mode = 0, .eav2sav = 700-8, .sav2eav = 1280, .l1 = 1, .l3 = 26, .l5 = 746, .vsize = 750, .capture_format = 0, .vbi_supported = 0, .hd_sd = 1, .dv_timings = V4L2_DV_BT_CEA_1280X720P50, }, { .name = "720p60", .width = 1280, .height = 720, .frm_fmt = 1, .ycmux_mode = 0, .eav2sav = 370 - 8, .sav2eav = 1280, .l1 = 1, .l3 = 26, .l5 = 746, .vsize = 750, .capture_format = 0, .vbi_supported = 0, .hd_sd = 1, .dv_timings = V4L2_DV_BT_CEA_1280X720P60, }, { .name = "1080I50", .width = 1920, .height = 1080, .frm_fmt = 0, .ycmux_mode = 0, .eav2sav = 720 - 8, .sav2eav = 1920, .l1 = 1, .l3 = 21, .l5 = 561, .l7 = 563, .l9 = 584, .l11 = 1124, .vsize = 1125, .capture_format = 0, .vbi_supported = 0, .hd_sd = 1, .dv_timings = V4L2_DV_BT_CEA_1920X1080I50, }, { .name = "1080I60", .width = 1920, .height = 1080, .frm_fmt = 0, .ycmux_mode = 0, .eav2sav = 280 - 8, .sav2eav = 1920, .l1 = 1, .l3 = 21, .l5 = 561, .l7 = 563, .l9 = 584, .l11 = 1124, .vsize = 1125, .capture_format = 0, .vbi_supported = 0, .hd_sd = 1, .dv_timings = V4L2_DV_BT_CEA_1920X1080I60, }, { .name = "1080p60", .width = 1920, .height = 1080, .frm_fmt = 1, .ycmux_mode = 0, .eav2sav = 280 - 8, .sav2eav = 1920, .l1 = 1, .l3 = 42, .l5 = 1122, .vsize = 1125, .capture_format = 0, .vbi_supported = 0, .hd_sd = 1, .dv_timings = V4L2_DV_BT_CEA_1920X1080P60, }, /* SDTV formats */ { .name = "NTSC_M", .width = 720, .height = 480, .frm_fmt = 0, .ycmux_mode = 1, .eav2sav = 268, .sav2eav = 1440, .l1 = 1, .l3 = 23, .l5 = 263, .l7 = 266, .l9 = 286, .l11 = 525, .vsize = 525, .capture_format = 0, .vbi_supported = 1, .hd_sd = 0, .stdid = V4L2_STD_525_60, }, { .name = "PAL_BDGHIK", .width = 720, .height = 576, .frm_fmt = 0, .ycmux_mode = 1, .eav2sav = 280, .sav2eav = 1440, .l1 = 1, .l3 = 23, .l5 = 311, .l7 = 313, .l9 = 336, .l11 = 624, .vsize = 625, .capture_format = 0, .vbi_supported = 1, .hd_sd = 0, .stdid = V4L2_STD_625_50, }, }; EXPORT_SYMBOL_GPL(vpif_ch_params); const unsigned int vpif_ch_params_count = ARRAY_SIZE(vpif_ch_params); EXPORT_SYMBOL_GPL(vpif_ch_params_count); static inline void vpif_wr_bit(u32 reg, u32 bit, u32 val) { if (val) vpif_set_bit(reg, bit); else vpif_clr_bit(reg, bit); } /* This structure is used to keep track of VPIF size register's offsets */ struct vpif_registers { u32 h_cfg, v_cfg_00, v_cfg_01, v_cfg_02, v_cfg, ch_ctrl; u32 line_offset, vanc0_strt, vanc0_size, vanc1_strt; u32 vanc1_size, width_mask, len_mask; u8 max_modes; }; static const struct vpif_registers vpifregs[VPIF_NUM_CHANNELS] = { /* Channel0 */ { VPIF_CH0_H_CFG, VPIF_CH0_V_CFG_00, VPIF_CH0_V_CFG_01, VPIF_CH0_V_CFG_02, VPIF_CH0_V_CFG_03, VPIF_CH0_CTRL, VPIF_CH0_IMG_ADD_OFST, 0, 0, 0, 0, 0x1FFF, 0xFFF, VPIF_CH0_MAX_MODES, }, /* Channel1 */ { VPIF_CH1_H_CFG, VPIF_CH1_V_CFG_00, VPIF_CH1_V_CFG_01, VPIF_CH1_V_CFG_02, VPIF_CH1_V_CFG_03, VPIF_CH1_CTRL, VPIF_CH1_IMG_ADD_OFST, 0, 0, 0, 0, 0x1FFF, 0xFFF, VPIF_CH1_MAX_MODES, }, /* Channel2 */ { VPIF_CH2_H_CFG, VPIF_CH2_V_CFG_00, VPIF_CH2_V_CFG_01, VPIF_CH2_V_CFG_02, VPIF_CH2_V_CFG_03, VPIF_CH2_CTRL, VPIF_CH2_IMG_ADD_OFST, VPIF_CH2_VANC0_STRT, VPIF_CH2_VANC0_SIZE, VPIF_CH2_VANC1_STRT, VPIF_CH2_VANC1_SIZE, 0x7FF, 0x7FF, VPIF_CH2_MAX_MODES }, /* Channel3 */ { VPIF_CH3_H_CFG, VPIF_CH3_V_CFG_00, VPIF_CH3_V_CFG_01, VPIF_CH3_V_CFG_02, VPIF_CH3_V_CFG_03, VPIF_CH3_CTRL, VPIF_CH3_IMG_ADD_OFST, VPIF_CH3_VANC0_STRT, VPIF_CH3_VANC0_SIZE, VPIF_CH3_VANC1_STRT, VPIF_CH3_VANC1_SIZE, 0x7FF, 0x7FF, VPIF_CH3_MAX_MODES }, }; /* vpif_set_mode_info: * This function is used to set horizontal and vertical config parameters * As per the standard in the channel, configure the values of L1, L3, * L5, L7 L9, L11 in VPIF Register , also write width and height */ static void vpif_set_mode_info(const struct vpif_channel_config_params *config, u8 channel_id, u8 config_channel_id) { u32 value; value = (config->eav2sav & vpifregs[config_channel_id].width_mask); value <<= VPIF_CH_LEN_SHIFT; value |= (config->sav2eav & vpifregs[config_channel_id].width_mask); regw(value, vpifregs[channel_id].h_cfg); value = (config->l1 & vpifregs[config_channel_id].len_mask); value <<= VPIF_CH_LEN_SHIFT; value |= (config->l3 & vpifregs[config_channel_id].len_mask); regw(value, vpifregs[channel_id].v_cfg_00); value = (config->l5 & vpifregs[config_channel_id].len_mask); value <<= VPIF_CH_LEN_SHIFT; value |= (config->l7 & vpifregs[config_channel_id].len_mask); regw(value, vpifregs[channel_id].v_cfg_01); value = (config->l9 & vpifregs[config_channel_id].len_mask); value <<= VPIF_CH_LEN_SHIFT; value |= (config->l11 & vpifregs[config_channel_id].len_mask); regw(value, vpifregs[channel_id].v_cfg_02); value = (config->vsize & vpifregs[config_channel_id].len_mask); regw(value, vpifregs[channel_id].v_cfg); } /* config_vpif_params * Function to set the parameters of a channel * Mainly modifies the channel ciontrol register * It sets frame format, yc mux mode */ static void config_vpif_params(struct vpif_params *vpifparams, u8 channel_id, u8 found) { const struct vpif_channel_config_params *config = &vpifparams->std_info; u32 value, ch_nip, reg; u8 start, end; int i; start = channel_id; end = channel_id + found; for (i = start; i < end; i++) { reg = vpifregs[i].ch_ctrl; if (channel_id < 2) ch_nip = VPIF_CAPTURE_CH_NIP; else ch_nip = VPIF_DISPLAY_CH_NIP; vpif_wr_bit(reg, ch_nip, config->frm_fmt); vpif_wr_bit(reg, VPIF_CH_YC_MUX_BIT, config->ycmux_mode); vpif_wr_bit(reg, VPIF_CH_INPUT_FIELD_FRAME_BIT, vpifparams->video_params.storage_mode); /* Set raster scanning SDR Format */ vpif_clr_bit(reg, VPIF_CH_SDR_FMT_BIT); vpif_wr_bit(reg, VPIF_CH_DATA_MODE_BIT, config->capture_format); if (channel_id > 1) /* Set the Pixel enable bit */ vpif_set_bit(reg, VPIF_DISPLAY_PIX_EN_BIT); else if (config->capture_format) { /* Set the polarity of various pins */ vpif_wr_bit(reg, VPIF_CH_FID_POLARITY_BIT, vpifparams->iface.fid_pol); vpif_wr_bit(reg, VPIF_CH_V_VALID_POLARITY_BIT, vpifparams->iface.vd_pol); vpif_wr_bit(reg, VPIF_CH_H_VALID_POLARITY_BIT, vpifparams->iface.hd_pol); value = regr(reg); /* Set data width */ value &= ~(0x3u << VPIF_CH_DATA_WIDTH_BIT); value |= ((vpifparams->params.data_sz) << VPIF_CH_DATA_WIDTH_BIT); regw(value, reg); } /* Write the pitch in the driver */ regw((vpifparams->video_params.hpitch), vpifregs[i].line_offset); } } /* vpif_set_video_params * This function is used to set video parameters in VPIF register */ int vpif_set_video_params(struct vpif_params *vpifparams, u8 channel_id) { const struct vpif_channel_config_params *config = &vpifparams->std_info; int found = 1; vpif_set_mode_info(config, channel_id, channel_id); if (!config->ycmux_mode) { /* YC are on separate channels (HDTV formats) */ vpif_set_mode_info(config, channel_id + 1, channel_id); found = 2; } config_vpif_params(vpifparams, channel_id, found); regw(0x80, VPIF_REQ_SIZE); regw(0x01, VPIF_EMULATION_CTRL); return found; } EXPORT_SYMBOL(vpif_set_video_params); void vpif_set_vbi_display_params(struct vpif_vbi_params *vbiparams, u8 channel_id) { u32 value; value = 0x3F8 & (vbiparams->hstart0); value |= 0x3FFFFFF & ((vbiparams->vstart0) << 16); regw(value, vpifregs[channel_id].vanc0_strt); value = 0x3F8 & (vbiparams->hstart1); value |= 0x3FFFFFF & ((vbiparams->vstart1) << 16); regw(value, vpifregs[channel_id].vanc1_strt); value = 0x3F8 & (vbiparams->hsize0); value |= 0x3FFFFFF & ((vbiparams->vsize0) << 16); regw(value, vpifregs[channel_id].vanc0_size); value = 0x3F8 & (vbiparams->hsize1); value |= 0x3FFFFFF & ((vbiparams->vsize1) << 16); regw(value, vpifregs[channel_id].vanc1_size); } EXPORT_SYMBOL(vpif_set_vbi_display_params); int vpif_channel_getfid(u8 channel_id) { return (regr(vpifregs[channel_id].ch_ctrl) & VPIF_CH_FID_MASK) >> VPIF_CH_FID_SHIFT; } EXPORT_SYMBOL(vpif_channel_getfid); static void vpif_pdev_release(struct device *dev) { struct platform_device *pdev = to_platform_device(dev); kfree(pdev); } static int vpif_probe(struct platform_device *pdev) { static struct resource res_irq; struct platform_device *pdev_capture, *pdev_display; struct device_node *endpoint = NULL; struct vpif_data *data; int ret; int irq; vpif_base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(vpif_base)) return PTR_ERR(vpif_base); data = kzalloc(sizeof(*data), GFP_KERNEL); if (!data) return -ENOMEM; platform_set_drvdata(pdev, data); pm_runtime_enable(&pdev->dev); pm_runtime_get(&pdev->dev); /* * If VPIF Node has endpoints, assume "new" DT support, * where capture and display drivers don't have DT nodes * so their devices need to be registered manually here * for their legacy platform_drivers to work. */ endpoint = of_graph_get_next_endpoint(pdev->dev.of_node, endpoint); if (!endpoint) return 0; of_node_put(endpoint); /* * For DT platforms, manually create platform_devices for * capture/display drivers. */ irq = platform_get_irq(pdev, 0); if (irq < 0) { ret = irq; goto err_put_rpm; } res_irq = DEFINE_RES_IRQ_NAMED(irq, of_node_full_name(pdev->dev.of_node)); res_irq.flags |= irq_get_trigger_type(irq); pdev_capture = kzalloc(sizeof(*pdev_capture), GFP_KERNEL); if (!pdev_capture) { ret = -ENOMEM; goto err_put_rpm; } pdev_capture->name = "vpif_capture"; pdev_capture->id = -1; pdev_capture->resource = &res_irq; pdev_capture->num_resources = 1; pdev_capture->dev.dma_mask = pdev->dev.dma_mask; pdev_capture->dev.coherent_dma_mask = pdev->dev.coherent_dma_mask; pdev_capture->dev.parent = &pdev->dev; pdev_capture->dev.release = vpif_pdev_release; ret = platform_device_register(pdev_capture); if (ret) goto err_put_pdev_capture; pdev_display = kzalloc(sizeof(*pdev_display), GFP_KERNEL); if (!pdev_display) { ret = -ENOMEM; goto err_put_pdev_capture; } pdev_display->name = "vpif_display"; pdev_display->id = -1; pdev_display->resource = &res_irq; pdev_display->num_resources = 1; pdev_display->dev.dma_mask = pdev->dev.dma_mask; pdev_display->dev.coherent_dma_mask = pdev->dev.coherent_dma_mask; pdev_display->dev.parent = &pdev->dev; pdev_display->dev.release = vpif_pdev_release; ret = platform_device_register(pdev_display); if (ret) goto err_put_pdev_display; data->capture = pdev_capture; data->display = pdev_display; return 0; err_put_pdev_display: platform_device_put(pdev_display); err_put_pdev_capture: platform_device_put(pdev_capture); err_put_rpm: pm_runtime_put(&pdev->dev); pm_runtime_disable(&pdev->dev); kfree(data); return ret; } static void vpif_remove(struct platform_device *pdev) { struct vpif_data *data = platform_get_drvdata(pdev); if (data->capture) platform_device_unregister(data->capture); if (data->display) platform_device_unregister(data->display); pm_runtime_put(&pdev->dev); pm_runtime_disable(&pdev->dev); kfree(data); } #ifdef CONFIG_PM static int vpif_suspend(struct device *dev) { pm_runtime_put(dev); return 0; } static int vpif_resume(struct device *dev) { pm_runtime_get(dev); return 0; } static const struct dev_pm_ops vpif_pm = { .suspend = vpif_suspend, .resume = vpif_resume, }; #define vpif_pm_ops (&vpif_pm) #else #define vpif_pm_ops NULL #endif #if IS_ENABLED(CONFIG_OF) static const struct of_device_id vpif_of_match[] = { { .compatible = "ti,da850-vpif", }, { /* sentinel */ }, }; MODULE_DEVICE_TABLE(of, vpif_of_match); #endif static struct platform_driver vpif_driver = { .driver = { .of_match_table = of_match_ptr(vpif_of_match), .name = VPIF_DRIVER_NAME, .pm = vpif_pm_ops, }, .remove_new = vpif_remove, .probe = vpif_probe, }; static void vpif_exit(void) { platform_driver_unregister(&vpif_driver); } static int __init vpif_init(void) { return platform_driver_register(&vpif_driver); } subsys_initcall(vpif_init); module_exit(vpif_exit);
linux-master
drivers/media/platform/ti/davinci/vpif.c
/* * vpif-display - VPIF display driver * Display driver for TI DaVinci VPIF * * Copyright (C) 2009 Texas Instruments Incorporated - https://www.ti.com/ * Copyright (C) 2014 Lad, Prabhakar <[email protected]> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation version 2. * * This program is distributed .as is. WITHOUT ANY WARRANTY of any * kind, whether express or implied; without even the implied warranty * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include <linux/interrupt.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/slab.h> #include <media/v4l2-ioctl.h> #include "vpif.h" #include "vpif_display.h" MODULE_DESCRIPTION("TI DaVinci VPIF Display driver"); MODULE_LICENSE("GPL"); MODULE_VERSION(VPIF_DISPLAY_VERSION); #define VPIF_V4L2_STD (V4L2_STD_525_60 | V4L2_STD_625_50) #define vpif_err(fmt, arg...) v4l2_err(&vpif_obj.v4l2_dev, fmt, ## arg) #define vpif_dbg(level, debug, fmt, arg...) \ v4l2_dbg(level, debug, &vpif_obj.v4l2_dev, fmt, ## arg) static int debug = 1; module_param(debug, int, 0644); MODULE_PARM_DESC(debug, "Debug level 0-1"); #define VPIF_DRIVER_NAME "vpif_display" MODULE_ALIAS("platform:" VPIF_DRIVER_NAME); /* Is set to 1 in case of SDTV formats, 2 in case of HDTV formats. */ static int ycmux_mode; static u8 channel_first_int[VPIF_NUMOBJECTS][2] = { {1, 1} }; static struct vpif_device vpif_obj = { {NULL} }; static struct device *vpif_dev; static void vpif_calculate_offsets(struct channel_obj *ch); static void vpif_config_addr(struct channel_obj *ch, int muxmode); static inline struct vpif_disp_buffer *to_vpif_buffer(struct vb2_v4l2_buffer *vb) { return container_of(vb, struct vpif_disp_buffer, vb); } /** * vpif_buffer_prepare : callback function for buffer prepare * @vb: ptr to vb2_buffer * * This is the callback function for buffer prepare when vb2_qbuf() * function is called. The buffer is prepared and user space virtual address * or user address is converted into physical address */ static int vpif_buffer_prepare(struct vb2_buffer *vb) { struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb); struct channel_obj *ch = vb2_get_drv_priv(vb->vb2_queue); struct common_obj *common; common = &ch->common[VPIF_VIDEO_INDEX]; vb2_set_plane_payload(vb, 0, common->fmt.fmt.pix.sizeimage); if (vb2_get_plane_payload(vb, 0) > vb2_plane_size(vb, 0)) return -EINVAL; vbuf->field = common->fmt.fmt.pix.field; if (vb->vb2_queue->type != V4L2_BUF_TYPE_SLICED_VBI_OUTPUT) { unsigned long addr = vb2_dma_contig_plane_dma_addr(vb, 0); if (!ISALIGNED(addr + common->ytop_off) || !ISALIGNED(addr + common->ybtm_off) || !ISALIGNED(addr + common->ctop_off) || !ISALIGNED(addr + common->cbtm_off)) { vpif_err("buffer offset not aligned to 8 bytes\n"); return -EINVAL; } } return 0; } /** * vpif_buffer_queue_setup : Callback function for buffer setup. * @vq: vb2_queue ptr * @nbuffers: ptr to number of buffers requested by application * @nplanes: contains number of distinct video planes needed to hold a frame * @sizes: contains the size (in bytes) of each plane. * @alloc_devs: ptr to allocation context * * This callback function is called when reqbuf() is called to adjust * the buffer count and buffer size */ static int vpif_buffer_queue_setup(struct vb2_queue *vq, unsigned int *nbuffers, unsigned int *nplanes, unsigned int sizes[], struct device *alloc_devs[]) { struct channel_obj *ch = vb2_get_drv_priv(vq); struct common_obj *common = &ch->common[VPIF_VIDEO_INDEX]; unsigned size = common->fmt.fmt.pix.sizeimage; if (*nplanes) { if (sizes[0] < size) return -EINVAL; size = sizes[0]; } if (vq->num_buffers + *nbuffers < 3) *nbuffers = 3 - vq->num_buffers; *nplanes = 1; sizes[0] = size; /* Calculate the offset for Y and C data in the buffer */ vpif_calculate_offsets(ch); return 0; } /** * vpif_buffer_queue : Callback function to add buffer to DMA queue * @vb: ptr to vb2_buffer * * This callback function queues the buffer to DMA engine */ static void vpif_buffer_queue(struct vb2_buffer *vb) { struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb); struct vpif_disp_buffer *buf = to_vpif_buffer(vbuf); struct channel_obj *ch = vb2_get_drv_priv(vb->vb2_queue); struct common_obj *common; unsigned long flags; common = &ch->common[VPIF_VIDEO_INDEX]; /* add the buffer to the DMA queue */ spin_lock_irqsave(&common->irqlock, flags); list_add_tail(&buf->list, &common->dma_queue); spin_unlock_irqrestore(&common->irqlock, flags); } /** * vpif_start_streaming : Starts the DMA engine for streaming * @vq: ptr to vb2_buffer * @count: number of buffers */ static int vpif_start_streaming(struct vb2_queue *vq, unsigned int count) { struct vpif_display_config *vpif_config_data = vpif_dev->platform_data; struct channel_obj *ch = vb2_get_drv_priv(vq); struct common_obj *common = &ch->common[VPIF_VIDEO_INDEX]; struct vpif_params *vpif = &ch->vpifparams; struct vpif_disp_buffer *buf, *tmp; unsigned long addr, flags; int ret; spin_lock_irqsave(&common->irqlock, flags); /* Initialize field_id */ ch->field_id = 0; /* clock settings */ if (vpif_config_data->set_clock) { ret = vpif_config_data->set_clock(ch->vpifparams.std_info. ycmux_mode, ch->vpifparams.std_info.hd_sd); if (ret < 0) { vpif_err("can't set clock\n"); goto err; } } /* set the parameters and addresses */ ret = vpif_set_video_params(vpif, ch->channel_id + 2); if (ret < 0) goto err; ycmux_mode = ret; vpif_config_addr(ch, ret); /* Get the next frame from the buffer queue */ common->next_frm = common->cur_frm = list_entry(common->dma_queue.next, struct vpif_disp_buffer, list); list_del(&common->cur_frm->list); spin_unlock_irqrestore(&common->irqlock, flags); addr = vb2_dma_contig_plane_dma_addr(&common->cur_frm->vb.vb2_buf, 0); common->set_addr((addr + common->ytop_off), (addr + common->ybtm_off), (addr + common->ctop_off), (addr + common->cbtm_off)); /* * Set interrupt for both the fields in VPIF * Register enable channel in VPIF register */ channel_first_int[VPIF_VIDEO_INDEX][ch->channel_id] = 1; if (VPIF_CHANNEL2_VIDEO == ch->channel_id) { channel2_intr_assert(); channel2_intr_enable(1); enable_channel2(1); if (vpif_config_data->chan_config[VPIF_CHANNEL2_VIDEO].clip_en) channel2_clipping_enable(1); } if (VPIF_CHANNEL3_VIDEO == ch->channel_id || ycmux_mode == 2) { channel3_intr_assert(); channel3_intr_enable(1); enable_channel3(1); if (vpif_config_data->chan_config[VPIF_CHANNEL3_VIDEO].clip_en) channel3_clipping_enable(1); } return 0; err: list_for_each_entry_safe(buf, tmp, &common->dma_queue, list) { list_del(&buf->list); vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_QUEUED); } spin_unlock_irqrestore(&common->irqlock, flags); return ret; } /** * vpif_stop_streaming : Stop the DMA engine * @vq: ptr to vb2_queue * * This callback stops the DMA engine and any remaining buffers * in the DMA queue are released. */ static void vpif_stop_streaming(struct vb2_queue *vq) { struct channel_obj *ch = vb2_get_drv_priv(vq); struct common_obj *common; unsigned long flags; common = &ch->common[VPIF_VIDEO_INDEX]; /* Disable channel */ if (VPIF_CHANNEL2_VIDEO == ch->channel_id) { enable_channel2(0); channel2_intr_enable(0); } if (VPIF_CHANNEL3_VIDEO == ch->channel_id || ycmux_mode == 2) { enable_channel3(0); channel3_intr_enable(0); } /* release all active buffers */ spin_lock_irqsave(&common->irqlock, flags); if (common->cur_frm == common->next_frm) { vb2_buffer_done(&common->cur_frm->vb.vb2_buf, VB2_BUF_STATE_ERROR); } else { if (common->cur_frm) vb2_buffer_done(&common->cur_frm->vb.vb2_buf, VB2_BUF_STATE_ERROR); if (common->next_frm) vb2_buffer_done(&common->next_frm->vb.vb2_buf, VB2_BUF_STATE_ERROR); } while (!list_empty(&common->dma_queue)) { common->next_frm = list_entry(common->dma_queue.next, struct vpif_disp_buffer, list); list_del(&common->next_frm->list); vb2_buffer_done(&common->next_frm->vb.vb2_buf, VB2_BUF_STATE_ERROR); } spin_unlock_irqrestore(&common->irqlock, flags); } static const struct vb2_ops video_qops = { .queue_setup = vpif_buffer_queue_setup, .wait_prepare = vb2_ops_wait_prepare, .wait_finish = vb2_ops_wait_finish, .buf_prepare = vpif_buffer_prepare, .start_streaming = vpif_start_streaming, .stop_streaming = vpif_stop_streaming, .buf_queue = vpif_buffer_queue, }; static void process_progressive_mode(struct common_obj *common) { unsigned long addr; spin_lock(&common->irqlock); /* Get the next buffer from buffer queue */ common->next_frm = list_entry(common->dma_queue.next, struct vpif_disp_buffer, list); /* Remove that buffer from the buffer queue */ list_del(&common->next_frm->list); spin_unlock(&common->irqlock); /* Set top and bottom field addrs in VPIF registers */ addr = vb2_dma_contig_plane_dma_addr(&common->next_frm->vb.vb2_buf, 0); common->set_addr(addr + common->ytop_off, addr + common->ybtm_off, addr + common->ctop_off, addr + common->cbtm_off); } static void process_interlaced_mode(int fid, struct common_obj *common) { /* device field id and local field id are in sync */ /* If this is even field */ if (0 == fid) { if (common->cur_frm == common->next_frm) return; /* one frame is displayed If next frame is * available, release cur_frm and move on */ /* Copy frame display time */ common->cur_frm->vb.vb2_buf.timestamp = ktime_get_ns(); /* Change status of the cur_frm */ vb2_buffer_done(&common->cur_frm->vb.vb2_buf, VB2_BUF_STATE_DONE); /* Make cur_frm pointing to next_frm */ common->cur_frm = common->next_frm; } else if (1 == fid) { /* odd field */ spin_lock(&common->irqlock); if (list_empty(&common->dma_queue) || (common->cur_frm != common->next_frm)) { spin_unlock(&common->irqlock); return; } spin_unlock(&common->irqlock); /* one field is displayed configure the next * frame if it is available else hold on current * frame */ /* Get next from the buffer queue */ process_progressive_mode(common); } } /* * vpif_channel_isr: It changes status of the displayed buffer, takes next * buffer from the queue and sets its address in VPIF registers */ static irqreturn_t vpif_channel_isr(int irq, void *dev_id) { struct vpif_device *dev = &vpif_obj; struct channel_obj *ch; struct common_obj *common; int fid = -1, i; int channel_id; channel_id = *(int *)(dev_id); if (!vpif_intr_status(channel_id + 2)) return IRQ_NONE; ch = dev->dev[channel_id]; for (i = 0; i < VPIF_NUMOBJECTS; i++) { common = &ch->common[i]; /* If streaming is started in this channel */ if (1 == ch->vpifparams.std_info.frm_fmt) { spin_lock(&common->irqlock); if (list_empty(&common->dma_queue)) { spin_unlock(&common->irqlock); continue; } spin_unlock(&common->irqlock); /* Progressive mode */ if (!channel_first_int[i][channel_id]) { /* Mark status of the cur_frm to * done and unlock semaphore on it */ common->cur_frm->vb.vb2_buf.timestamp = ktime_get_ns(); vb2_buffer_done(&common->cur_frm->vb.vb2_buf, VB2_BUF_STATE_DONE); /* Make cur_frm pointing to next_frm */ common->cur_frm = common->next_frm; } channel_first_int[i][channel_id] = 0; process_progressive_mode(common); } else { /* Interlaced mode */ /* If it is first interrupt, ignore it */ if (channel_first_int[i][channel_id]) { channel_first_int[i][channel_id] = 0; continue; } if (0 == i) { ch->field_id ^= 1; /* Get field id from VPIF registers */ fid = vpif_channel_getfid(ch->channel_id + 2); /* If fid does not match with stored field id */ if (fid != ch->field_id) { /* Make them in sync */ if (0 == fid) ch->field_id = fid; return IRQ_HANDLED; } } process_interlaced_mode(fid, common); } } return IRQ_HANDLED; } static int vpif_update_std_info(struct channel_obj *ch) { struct video_obj *vid_ch = &ch->video; struct vpif_params *vpifparams = &ch->vpifparams; struct vpif_channel_config_params *std_info = &vpifparams->std_info; const struct vpif_channel_config_params *config; int i; for (i = 0; i < vpif_ch_params_count; i++) { config = &vpif_ch_params[i]; if (config->hd_sd == 0) { vpif_dbg(2, debug, "SD format\n"); if (config->stdid & vid_ch->stdid) { memcpy(std_info, config, sizeof(*config)); break; } } } if (i == vpif_ch_params_count) { vpif_dbg(1, debug, "Format not found\n"); return -EINVAL; } return 0; } static int vpif_update_resolution(struct channel_obj *ch) { struct common_obj *common = &ch->common[VPIF_VIDEO_INDEX]; struct video_obj *vid_ch = &ch->video; struct vpif_params *vpifparams = &ch->vpifparams; struct vpif_channel_config_params *std_info = &vpifparams->std_info; if (!vid_ch->stdid && !vid_ch->dv_timings.bt.height) return -EINVAL; if (vid_ch->stdid) { if (vpif_update_std_info(ch)) return -EINVAL; } common->fmt.fmt.pix.pixelformat = V4L2_PIX_FMT_YUV422P; common->fmt.fmt.pix.width = std_info->width; common->fmt.fmt.pix.height = std_info->height; vpif_dbg(1, debug, "Pixel details: Width = %d,Height = %d\n", common->fmt.fmt.pix.width, common->fmt.fmt.pix.height); /* Set height and width paramateres */ common->height = std_info->height; common->width = std_info->width; common->fmt.fmt.pix.sizeimage = common->height * common->width * 2; if (vid_ch->stdid) common->fmt.fmt.pix.colorspace = V4L2_COLORSPACE_SMPTE170M; else common->fmt.fmt.pix.colorspace = V4L2_COLORSPACE_REC709; if (ch->vpifparams.std_info.frm_fmt) common->fmt.fmt.pix.field = V4L2_FIELD_NONE; else common->fmt.fmt.pix.field = V4L2_FIELD_INTERLACED; return 0; } /* * vpif_calculate_offsets: This function calculates buffers offset for Y and C * in the top and bottom field */ static void vpif_calculate_offsets(struct channel_obj *ch) { struct common_obj *common = &ch->common[VPIF_VIDEO_INDEX]; struct vpif_params *vpifparams = &ch->vpifparams; enum v4l2_field field = common->fmt.fmt.pix.field; struct video_obj *vid_ch = &ch->video; unsigned int hpitch, sizeimage; if (V4L2_FIELD_ANY == common->fmt.fmt.pix.field) { if (ch->vpifparams.std_info.frm_fmt) vid_ch->buf_field = V4L2_FIELD_NONE; else vid_ch->buf_field = V4L2_FIELD_INTERLACED; } else { vid_ch->buf_field = common->fmt.fmt.pix.field; } sizeimage = common->fmt.fmt.pix.sizeimage; hpitch = common->fmt.fmt.pix.bytesperline; if ((V4L2_FIELD_NONE == vid_ch->buf_field) || (V4L2_FIELD_INTERLACED == vid_ch->buf_field)) { common->ytop_off = 0; common->ybtm_off = hpitch; common->ctop_off = sizeimage / 2; common->cbtm_off = sizeimage / 2 + hpitch; } else if (V4L2_FIELD_SEQ_TB == vid_ch->buf_field) { common->ytop_off = 0; common->ybtm_off = sizeimage / 4; common->ctop_off = sizeimage / 2; common->cbtm_off = common->ctop_off + sizeimage / 4; } else if (V4L2_FIELD_SEQ_BT == vid_ch->buf_field) { common->ybtm_off = 0; common->ytop_off = sizeimage / 4; common->cbtm_off = sizeimage / 2; common->ctop_off = common->cbtm_off + sizeimage / 4; } if ((V4L2_FIELD_NONE == vid_ch->buf_field) || (V4L2_FIELD_INTERLACED == vid_ch->buf_field)) { vpifparams->video_params.storage_mode = 1; } else { vpifparams->video_params.storage_mode = 0; } if (ch->vpifparams.std_info.frm_fmt == 1) { vpifparams->video_params.hpitch = common->fmt.fmt.pix.bytesperline; } else { if ((field == V4L2_FIELD_ANY) || (field == V4L2_FIELD_INTERLACED)) vpifparams->video_params.hpitch = common->fmt.fmt.pix.bytesperline * 2; else vpifparams->video_params.hpitch = common->fmt.fmt.pix.bytesperline; } ch->vpifparams.video_params.stdid = ch->vpifparams.std_info.stdid; } static void vpif_config_addr(struct channel_obj *ch, int muxmode) { struct common_obj *common = &ch->common[VPIF_VIDEO_INDEX]; if (VPIF_CHANNEL3_VIDEO == ch->channel_id) { common->set_addr = ch3_set_video_buf_addr; } else { if (2 == muxmode) common->set_addr = ch2_set_video_buf_addr_yc_nmux; else common->set_addr = ch2_set_video_buf_addr; } } /* functions implementing ioctls */ /** * vpif_querycap() - QUERYCAP handler * @file: file ptr * @priv: file handle * @cap: ptr to v4l2_capability structure */ static int vpif_querycap(struct file *file, void *priv, struct v4l2_capability *cap) { struct vpif_display_config *config = vpif_dev->platform_data; strscpy(cap->driver, VPIF_DRIVER_NAME, sizeof(cap->driver)); strscpy(cap->card, config->card_name, sizeof(cap->card)); return 0; } static int vpif_enum_fmt_vid_out(struct file *file, void *priv, struct v4l2_fmtdesc *fmt) { if (fmt->index != 0) return -EINVAL; /* Fill in the information about format */ fmt->pixelformat = V4L2_PIX_FMT_YUV422P; return 0; } static int vpif_g_fmt_vid_out(struct file *file, void *priv, struct v4l2_format *fmt) { struct video_device *vdev = video_devdata(file); struct channel_obj *ch = video_get_drvdata(vdev); struct common_obj *common = &ch->common[VPIF_VIDEO_INDEX]; /* Check the validity of the buffer type */ if (common->fmt.type != fmt->type) return -EINVAL; if (vpif_update_resolution(ch)) return -EINVAL; *fmt = common->fmt; return 0; } static int vpif_try_fmt_vid_out(struct file *file, void *priv, struct v4l2_format *fmt) { struct video_device *vdev = video_devdata(file); struct channel_obj *ch = video_get_drvdata(vdev); struct common_obj *common = &ch->common[VPIF_VIDEO_INDEX]; struct v4l2_pix_format *pixfmt = &fmt->fmt.pix; /* * to suppress v4l-compliance warnings silently correct * the pixelformat */ if (pixfmt->pixelformat != V4L2_PIX_FMT_YUV422P) pixfmt->pixelformat = common->fmt.fmt.pix.pixelformat; if (vpif_update_resolution(ch)) return -EINVAL; pixfmt->colorspace = common->fmt.fmt.pix.colorspace; pixfmt->field = common->fmt.fmt.pix.field; pixfmt->bytesperline = common->fmt.fmt.pix.width; pixfmt->width = common->fmt.fmt.pix.width; pixfmt->height = common->fmt.fmt.pix.height; pixfmt->sizeimage = pixfmt->bytesperline * pixfmt->height * 2; return 0; } static int vpif_s_fmt_vid_out(struct file *file, void *priv, struct v4l2_format *fmt) { struct video_device *vdev = video_devdata(file); struct channel_obj *ch = video_get_drvdata(vdev); struct common_obj *common = &ch->common[VPIF_VIDEO_INDEX]; struct v4l2_pix_format *pixfmt = &fmt->fmt.pix; int ret; if (vb2_is_busy(&common->buffer_queue)) return -EBUSY; ret = vpif_try_fmt_vid_out(file, priv, fmt); if (ret) return ret; /* store the pix format in the channel object */ common->fmt.fmt.pix = *pixfmt; /* store the format in the channel object */ common->fmt = *fmt; return 0; } static int vpif_s_std(struct file *file, void *priv, v4l2_std_id std_id) { struct vpif_display_config *config = vpif_dev->platform_data; struct video_device *vdev = video_devdata(file); struct channel_obj *ch = video_get_drvdata(vdev); struct common_obj *common = &ch->common[VPIF_VIDEO_INDEX]; struct vpif_display_chan_config *chan_cfg; struct v4l2_output output; int ret; if (!config->chan_config[ch->channel_id].outputs) return -ENODATA; chan_cfg = &config->chan_config[ch->channel_id]; output = chan_cfg->outputs[ch->output_idx].output; if (output.capabilities != V4L2_OUT_CAP_STD) return -ENODATA; if (vb2_is_busy(&common->buffer_queue)) return -EBUSY; if (!(std_id & VPIF_V4L2_STD)) return -EINVAL; /* Call encoder subdevice function to set the standard */ ch->video.stdid = std_id; memset(&ch->video.dv_timings, 0, sizeof(ch->video.dv_timings)); /* Get the information about the standard */ if (vpif_update_resolution(ch)) return -EINVAL; common->fmt.fmt.pix.bytesperline = common->fmt.fmt.pix.width; ret = v4l2_device_call_until_err(&vpif_obj.v4l2_dev, 1, video, s_std_output, std_id); if (ret < 0) { vpif_err("Failed to set output standard\n"); return ret; } ret = v4l2_device_call_until_err(&vpif_obj.v4l2_dev, 1, video, s_std, std_id); if (ret < 0) vpif_err("Failed to set standard for sub devices\n"); return ret; } static int vpif_g_std(struct file *file, void *priv, v4l2_std_id *std) { struct vpif_display_config *config = vpif_dev->platform_data; struct video_device *vdev = video_devdata(file); struct channel_obj *ch = video_get_drvdata(vdev); struct vpif_display_chan_config *chan_cfg; struct v4l2_output output; if (!config->chan_config[ch->channel_id].outputs) return -ENODATA; chan_cfg = &config->chan_config[ch->channel_id]; output = chan_cfg->outputs[ch->output_idx].output; if (output.capabilities != V4L2_OUT_CAP_STD) return -ENODATA; *std = ch->video.stdid; return 0; } static int vpif_enum_output(struct file *file, void *fh, struct v4l2_output *output) { struct vpif_display_config *config = vpif_dev->platform_data; struct video_device *vdev = video_devdata(file); struct channel_obj *ch = video_get_drvdata(vdev); struct vpif_display_chan_config *chan_cfg; chan_cfg = &config->chan_config[ch->channel_id]; if (output->index >= chan_cfg->output_count) { vpif_dbg(1, debug, "Invalid output index\n"); return -EINVAL; } *output = chan_cfg->outputs[output->index].output; return 0; } /** * vpif_output_to_subdev() - Maps output to sub device * @vpif_cfg: global config ptr * @chan_cfg: channel config ptr * @index: Given output index from application * * lookup the sub device information for a given output index. * we report all the output to application. output table also * has sub device name for the each output */ static int vpif_output_to_subdev(struct vpif_display_config *vpif_cfg, struct vpif_display_chan_config *chan_cfg, int index) { struct vpif_subdev_info *subdev_info; const char *subdev_name; int i; vpif_dbg(2, debug, "vpif_output_to_subdev\n"); if (!chan_cfg->outputs) return -1; subdev_name = chan_cfg->outputs[index].subdev_name; if (!subdev_name) return -1; /* loop through the sub device list to get the sub device info */ for (i = 0; i < vpif_cfg->subdev_count; i++) { subdev_info = &vpif_cfg->subdevinfo[i]; if (!strcmp(subdev_info->name, subdev_name)) return i; } return -1; } /** * vpif_set_output() - Select an output * @vpif_cfg: global config ptr * @ch: channel * @index: Given output index from application * * Select the given output. */ static int vpif_set_output(struct vpif_display_config *vpif_cfg, struct channel_obj *ch, int index) { struct vpif_display_chan_config *chan_cfg = &vpif_cfg->chan_config[ch->channel_id]; struct v4l2_subdev *sd = NULL; u32 input = 0, output = 0; int sd_index; int ret; sd_index = vpif_output_to_subdev(vpif_cfg, chan_cfg, index); if (sd_index >= 0) sd = vpif_obj.sd[sd_index]; if (sd) { input = chan_cfg->outputs[index].input_route; output = chan_cfg->outputs[index].output_route; ret = v4l2_subdev_call(sd, video, s_routing, input, output, 0); if (ret < 0 && ret != -ENOIOCTLCMD) { vpif_err("Failed to set output\n"); return ret; } } ch->output_idx = index; ch->sd = sd; if (chan_cfg->outputs) /* update tvnorms from the sub device output info */ ch->video_dev.tvnorms = chan_cfg->outputs[index].output.std; return 0; } static int vpif_s_output(struct file *file, void *priv, unsigned int i) { struct vpif_display_config *config = vpif_dev->platform_data; struct video_device *vdev = video_devdata(file); struct channel_obj *ch = video_get_drvdata(vdev); struct vpif_display_chan_config *chan_cfg; struct common_obj *common = &ch->common[VPIF_VIDEO_INDEX]; if (vb2_is_busy(&common->buffer_queue)) return -EBUSY; chan_cfg = &config->chan_config[ch->channel_id]; if (i >= chan_cfg->output_count) return -EINVAL; return vpif_set_output(config, ch, i); } static int vpif_g_output(struct file *file, void *priv, unsigned int *i) { struct video_device *vdev = video_devdata(file); struct channel_obj *ch = video_get_drvdata(vdev); *i = ch->output_idx; return 0; } /** * vpif_enum_dv_timings() - ENUM_DV_TIMINGS handler * @file: file ptr * @priv: file handle * @timings: input timings */ static int vpif_enum_dv_timings(struct file *file, void *priv, struct v4l2_enum_dv_timings *timings) { struct vpif_display_config *config = vpif_dev->platform_data; struct video_device *vdev = video_devdata(file); struct channel_obj *ch = video_get_drvdata(vdev); struct vpif_display_chan_config *chan_cfg; struct v4l2_output output; int ret; if (!config->chan_config[ch->channel_id].outputs) return -ENODATA; chan_cfg = &config->chan_config[ch->channel_id]; output = chan_cfg->outputs[ch->output_idx].output; if (output.capabilities != V4L2_OUT_CAP_DV_TIMINGS) return -ENODATA; timings->pad = 0; ret = v4l2_subdev_call(ch->sd, pad, enum_dv_timings, timings); if (ret == -ENOIOCTLCMD || ret == -ENODEV) return -EINVAL; return ret; } /** * vpif_s_dv_timings() - S_DV_TIMINGS handler * @file: file ptr * @priv: file handle * @timings: digital video timings */ static int vpif_s_dv_timings(struct file *file, void *priv, struct v4l2_dv_timings *timings) { struct vpif_display_config *config = vpif_dev->platform_data; struct video_device *vdev = video_devdata(file); struct channel_obj *ch = video_get_drvdata(vdev); struct vpif_params *vpifparams = &ch->vpifparams; struct common_obj *common = &ch->common[VPIF_VIDEO_INDEX]; struct vpif_channel_config_params *std_info = &vpifparams->std_info; struct video_obj *vid_ch = &ch->video; struct v4l2_bt_timings *bt = &vid_ch->dv_timings.bt; struct vpif_display_chan_config *chan_cfg; struct v4l2_output output; int ret; if (!config->chan_config[ch->channel_id].outputs) return -ENODATA; chan_cfg = &config->chan_config[ch->channel_id]; output = chan_cfg->outputs[ch->output_idx].output; if (output.capabilities != V4L2_OUT_CAP_DV_TIMINGS) return -ENODATA; if (vb2_is_busy(&common->buffer_queue)) return -EBUSY; if (timings->type != V4L2_DV_BT_656_1120) { vpif_dbg(2, debug, "Timing type not defined\n"); return -EINVAL; } /* Configure subdevice timings, if any */ ret = v4l2_subdev_call(ch->sd, video, s_dv_timings, timings); if (ret == -ENOIOCTLCMD || ret == -ENODEV) ret = 0; if (ret < 0) { vpif_dbg(2, debug, "Error setting custom DV timings\n"); return ret; } if (!(timings->bt.width && timings->bt.height && (timings->bt.hbackporch || timings->bt.hfrontporch || timings->bt.hsync) && timings->bt.vfrontporch && (timings->bt.vbackporch || timings->bt.vsync))) { vpif_dbg(2, debug, "Timings for width, height, horizontal back porch, horizontal sync, horizontal front porch, vertical back porch, vertical sync and vertical back porch must be defined\n"); return -EINVAL; } vid_ch->dv_timings = *timings; /* Configure video port timings */ std_info->eav2sav = V4L2_DV_BT_BLANKING_WIDTH(bt) - 8; std_info->sav2eav = bt->width; std_info->l1 = 1; std_info->l3 = bt->vsync + bt->vbackporch + 1; std_info->vsize = V4L2_DV_BT_FRAME_HEIGHT(bt); if (bt->interlaced) { if (bt->il_vbackporch || bt->il_vfrontporch || bt->il_vsync) { std_info->l5 = std_info->vsize/2 - (bt->vfrontporch - 1); std_info->l7 = std_info->vsize/2 + 1; std_info->l9 = std_info->l7 + bt->il_vsync + bt->il_vbackporch + 1; std_info->l11 = std_info->vsize - (bt->il_vfrontporch - 1); } else { vpif_dbg(2, debug, "Required timing values for interlaced BT format missing\n"); return -EINVAL; } } else { std_info->l5 = std_info->vsize - (bt->vfrontporch - 1); } strscpy(std_info->name, "Custom timings BT656/1120", sizeof(std_info->name)); std_info->width = bt->width; std_info->height = bt->height; std_info->frm_fmt = bt->interlaced ? 0 : 1; std_info->ycmux_mode = 0; std_info->capture_format = 0; std_info->vbi_supported = 0; std_info->hd_sd = 1; std_info->stdid = 0; vid_ch->stdid = 0; return 0; } /** * vpif_g_dv_timings() - G_DV_TIMINGS handler * @file: file ptr * @priv: file handle * @timings: digital video timings */ static int vpif_g_dv_timings(struct file *file, void *priv, struct v4l2_dv_timings *timings) { struct vpif_display_config *config = vpif_dev->platform_data; struct video_device *vdev = video_devdata(file); struct channel_obj *ch = video_get_drvdata(vdev); struct vpif_display_chan_config *chan_cfg; struct video_obj *vid_ch = &ch->video; struct v4l2_output output; if (!config->chan_config[ch->channel_id].outputs) goto error; chan_cfg = &config->chan_config[ch->channel_id]; output = chan_cfg->outputs[ch->output_idx].output; if (output.capabilities != V4L2_OUT_CAP_DV_TIMINGS) goto error; *timings = vid_ch->dv_timings; return 0; error: return -ENODATA; } /* * vpif_log_status() - Status information * @file: file ptr * @priv: file handle * * Returns zero. */ static int vpif_log_status(struct file *filep, void *priv) { /* status for sub devices */ v4l2_device_call_all(&vpif_obj.v4l2_dev, 0, core, log_status); return 0; } /* vpif display ioctl operations */ static const struct v4l2_ioctl_ops vpif_ioctl_ops = { .vidioc_querycap = vpif_querycap, .vidioc_enum_fmt_vid_out = vpif_enum_fmt_vid_out, .vidioc_g_fmt_vid_out = vpif_g_fmt_vid_out, .vidioc_s_fmt_vid_out = vpif_s_fmt_vid_out, .vidioc_try_fmt_vid_out = vpif_try_fmt_vid_out, .vidioc_reqbufs = vb2_ioctl_reqbufs, .vidioc_create_bufs = vb2_ioctl_create_bufs, .vidioc_querybuf = vb2_ioctl_querybuf, .vidioc_qbuf = vb2_ioctl_qbuf, .vidioc_dqbuf = vb2_ioctl_dqbuf, .vidioc_expbuf = vb2_ioctl_expbuf, .vidioc_streamon = vb2_ioctl_streamon, .vidioc_streamoff = vb2_ioctl_streamoff, .vidioc_s_std = vpif_s_std, .vidioc_g_std = vpif_g_std, .vidioc_enum_output = vpif_enum_output, .vidioc_s_output = vpif_s_output, .vidioc_g_output = vpif_g_output, .vidioc_enum_dv_timings = vpif_enum_dv_timings, .vidioc_s_dv_timings = vpif_s_dv_timings, .vidioc_g_dv_timings = vpif_g_dv_timings, .vidioc_log_status = vpif_log_status, }; static const struct v4l2_file_operations vpif_fops = { .owner = THIS_MODULE, .open = v4l2_fh_open, .release = vb2_fop_release, .unlocked_ioctl = video_ioctl2, .mmap = vb2_fop_mmap, .poll = vb2_fop_poll }; /*Configure the channels, buffer sizei, request irq */ static int initialize_vpif(void) { int free_channel_objects_index; int err, i, j; /* Allocate memory for six channel objects */ for (i = 0; i < VPIF_DISPLAY_MAX_DEVICES; i++) { vpif_obj.dev[i] = kzalloc(sizeof(struct channel_obj), GFP_KERNEL); /* If memory allocation fails, return error */ if (!vpif_obj.dev[i]) { free_channel_objects_index = i; err = -ENOMEM; goto vpif_init_free_channel_objects; } } return 0; vpif_init_free_channel_objects: for (j = 0; j < free_channel_objects_index; j++) kfree(vpif_obj.dev[j]); return err; } static void free_vpif_objs(void) { int i; for (i = 0; i < VPIF_DISPLAY_MAX_DEVICES; i++) kfree(vpif_obj.dev[i]); } static int vpif_probe_complete(void) { struct common_obj *common; struct video_device *vdev; struct channel_obj *ch; struct vb2_queue *q; int j, err, k; for (j = 0; j < VPIF_DISPLAY_MAX_DEVICES; j++) { ch = vpif_obj.dev[j]; /* Initialize field of the channel objects */ for (k = 0; k < VPIF_NUMOBJECTS; k++) { common = &ch->common[k]; spin_lock_init(&common->irqlock); mutex_init(&common->lock); common->set_addr = NULL; common->ytop_off = 0; common->ybtm_off = 0; common->ctop_off = 0; common->cbtm_off = 0; common->cur_frm = NULL; common->next_frm = NULL; memset(&common->fmt, 0, sizeof(common->fmt)); } ch->initialized = 0; if (vpif_obj.config->subdev_count) ch->sd = vpif_obj.sd[0]; ch->channel_id = j; memset(&ch->vpifparams, 0, sizeof(ch->vpifparams)); ch->common[VPIF_VIDEO_INDEX].fmt.type = V4L2_BUF_TYPE_VIDEO_OUTPUT; /* select output 0 */ err = vpif_set_output(vpif_obj.config, ch, 0); if (err) goto probe_out; /* set initial format */ ch->video.stdid = V4L2_STD_525_60; memset(&ch->video.dv_timings, 0, sizeof(ch->video.dv_timings)); vpif_update_resolution(ch); /* Initialize vb2 queue */ q = &common->buffer_queue; q->type = V4L2_BUF_TYPE_VIDEO_OUTPUT; q->io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF; q->drv_priv = ch; q->ops = &video_qops; q->mem_ops = &vb2_dma_contig_memops; q->buf_struct_size = sizeof(struct vpif_disp_buffer); q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC; q->min_buffers_needed = 1; q->lock = &common->lock; q->dev = vpif_dev; err = vb2_queue_init(q); if (err) { vpif_err("vpif_display: vb2_queue_init() failed\n"); goto probe_out; } INIT_LIST_HEAD(&common->dma_queue); /* register video device */ vpif_dbg(1, debug, "channel=%p,channel->video_dev=%p\n", ch, &ch->video_dev); /* Initialize the video_device structure */ vdev = &ch->video_dev; strscpy(vdev->name, VPIF_DRIVER_NAME, sizeof(vdev->name)); vdev->release = video_device_release_empty; vdev->fops = &vpif_fops; vdev->ioctl_ops = &vpif_ioctl_ops; vdev->v4l2_dev = &vpif_obj.v4l2_dev; vdev->vfl_dir = VFL_DIR_TX; vdev->queue = q; vdev->lock = &common->lock; vdev->device_caps = V4L2_CAP_VIDEO_OUTPUT | V4L2_CAP_STREAMING; video_set_drvdata(&ch->video_dev, ch); err = video_register_device(vdev, VFL_TYPE_VIDEO, (j ? 3 : 2)); if (err < 0) goto probe_out; } return 0; probe_out: for (k = 0; k < j; k++) { ch = vpif_obj.dev[k]; video_unregister_device(&ch->video_dev); } return err; } /* * vpif_probe: This function creates device entries by register itself to the * V4L2 driver and initializes fields of each channel objects */ static __init int vpif_probe(struct platform_device *pdev) { struct vpif_subdev_info *subdevdata; struct i2c_adapter *i2c_adap; int subdev_count; int res_idx = 0; int i, err; if (!pdev->dev.platform_data) { dev_warn(&pdev->dev, "Missing platform data. Giving up.\n"); return -EINVAL; } vpif_dev = &pdev->dev; err = initialize_vpif(); if (err) { v4l2_err(vpif_dev->driver, "Error initializing vpif\n"); return err; } err = v4l2_device_register(vpif_dev, &vpif_obj.v4l2_dev); if (err) { v4l2_err(vpif_dev->driver, "Error registering v4l2 device\n"); goto vpif_free; } do { int irq; err = platform_get_irq_optional(pdev, res_idx); if (err < 0 && err != -ENXIO) goto vpif_unregister; if (err > 0) irq = err; else break; err = devm_request_irq(&pdev->dev, irq, vpif_channel_isr, IRQF_SHARED, VPIF_DRIVER_NAME, (void *)(&vpif_obj.dev[res_idx]->channel_id)); if (err) { vpif_err("VPIF IRQ request failed\n"); goto vpif_unregister; } } while (++res_idx); vpif_obj.config = pdev->dev.platform_data; subdev_count = vpif_obj.config->subdev_count; subdevdata = vpif_obj.config->subdevinfo; vpif_obj.sd = kcalloc(subdev_count, sizeof(*vpif_obj.sd), GFP_KERNEL); if (!vpif_obj.sd) { err = -ENOMEM; goto vpif_unregister; } i2c_adap = i2c_get_adapter(vpif_obj.config->i2c_adapter_id); for (i = 0; i < subdev_count; i++) { vpif_obj.sd[i] = v4l2_i2c_new_subdev_board(&vpif_obj.v4l2_dev, i2c_adap, &subdevdata[i].board_info, NULL); if (!vpif_obj.sd[i]) { vpif_err("Error registering v4l2 subdevice\n"); err = -ENODEV; goto probe_subdev_out; } vpif_obj.sd[i]->grp_id = 1 << i; } err = vpif_probe_complete(); if (err) goto probe_subdev_out; return 0; probe_subdev_out: kfree(vpif_obj.sd); vpif_unregister: v4l2_device_unregister(&vpif_obj.v4l2_dev); vpif_free: free_vpif_objs(); return err; } /* * vpif_remove: It un-register channels from V4L2 driver */ static void vpif_remove(struct platform_device *device) { struct channel_obj *ch; int i; v4l2_device_unregister(&vpif_obj.v4l2_dev); kfree(vpif_obj.sd); /* un-register device */ for (i = 0; i < VPIF_DISPLAY_MAX_DEVICES; i++) { /* Get the pointer to the channel object */ ch = vpif_obj.dev[i]; /* Unregister video device */ video_unregister_device(&ch->video_dev); } free_vpif_objs(); } #ifdef CONFIG_PM_SLEEP static int vpif_suspend(struct device *dev) { struct common_obj *common; struct channel_obj *ch; int i; for (i = 0; i < VPIF_DISPLAY_MAX_DEVICES; i++) { /* Get the pointer to the channel object */ ch = vpif_obj.dev[i]; common = &ch->common[VPIF_VIDEO_INDEX]; if (!vb2_start_streaming_called(&common->buffer_queue)) continue; mutex_lock(&common->lock); /* Disable channel */ if (ch->channel_id == VPIF_CHANNEL2_VIDEO) { enable_channel2(0); channel2_intr_enable(0); } if (ch->channel_id == VPIF_CHANNEL3_VIDEO || ycmux_mode == 2) { enable_channel3(0); channel3_intr_enable(0); } mutex_unlock(&common->lock); } return 0; } static int vpif_resume(struct device *dev) { struct common_obj *common; struct channel_obj *ch; int i; for (i = 0; i < VPIF_DISPLAY_MAX_DEVICES; i++) { /* Get the pointer to the channel object */ ch = vpif_obj.dev[i]; common = &ch->common[VPIF_VIDEO_INDEX]; if (!vb2_start_streaming_called(&common->buffer_queue)) continue; mutex_lock(&common->lock); /* Enable channel */ if (ch->channel_id == VPIF_CHANNEL2_VIDEO) { enable_channel2(1); channel2_intr_enable(1); } if (ch->channel_id == VPIF_CHANNEL3_VIDEO || ycmux_mode == 2) { enable_channel3(1); channel3_intr_enable(1); } mutex_unlock(&common->lock); } return 0; } #endif static SIMPLE_DEV_PM_OPS(vpif_pm_ops, vpif_suspend, vpif_resume); static __refdata struct platform_driver vpif_driver = { .driver = { .name = VPIF_DRIVER_NAME, .pm = &vpif_pm_ops, }, .probe = vpif_probe, .remove_new = vpif_remove, }; module_platform_driver(vpif_driver);
linux-master
drivers/media/platform/ti/davinci/vpif_display.c
// SPDX-License-Identifier: GPL-2.0-only /* * Scaler library * * Copyright (c) 2013 Texas Instruments Inc. * * David Griego, <[email protected]> * Dale Farnsworth, <[email protected]> * Archit Taneja, <[email protected]> */ #include <linux/err.h> #include <linux/io.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/slab.h> #include "sc.h" #include "sc_coeff.h" void sc_dump_regs(struct sc_data *sc) { struct device *dev = &sc->pdev->dev; #define DUMPREG(r) dev_dbg(dev, "%-35s %08x\n", #r, \ ioread32(sc->base + CFG_##r)) dev_dbg(dev, "SC Registers @ %pa:\n", &sc->res->start); DUMPREG(SC0); DUMPREG(SC1); DUMPREG(SC2); DUMPREG(SC3); DUMPREG(SC4); DUMPREG(SC5); DUMPREG(SC6); DUMPREG(SC8); DUMPREG(SC9); DUMPREG(SC10); DUMPREG(SC11); DUMPREG(SC12); DUMPREG(SC13); DUMPREG(SC17); DUMPREG(SC18); DUMPREG(SC19); DUMPREG(SC20); DUMPREG(SC21); DUMPREG(SC22); DUMPREG(SC23); DUMPREG(SC24); DUMPREG(SC25); #undef DUMPREG } EXPORT_SYMBOL(sc_dump_regs); /* * set the horizontal scaler coefficients according to the ratio of output to * input widths, after accounting for up to two levels of decimation */ void sc_set_hs_coeffs(struct sc_data *sc, void *addr, unsigned int src_w, unsigned int dst_w) { int sixteenths; int idx; int i, j; u16 *coeff_h = addr; const u16 *cp; if (dst_w > src_w) { idx = HS_UP_SCALE; } else { if ((dst_w << 1) < src_w) dst_w <<= 1; /* first level decimation */ if ((dst_w << 1) < src_w) dst_w <<= 1; /* second level decimation */ if (dst_w == src_w) { idx = HS_LE_16_16_SCALE; } else { sixteenths = (dst_w << 4) / src_w; if (sixteenths < 8) sixteenths = 8; idx = HS_LT_9_16_SCALE + sixteenths - 8; } } cp = scaler_hs_coeffs[idx]; for (i = 0; i < SC_NUM_PHASES * 2; i++) { for (j = 0; j < SC_H_NUM_TAPS; j++) *coeff_h++ = *cp++; /* * for each phase, the scaler expects space for 8 coefficients * in it's memory. For the horizontal scaler, we copy the first * 7 coefficients and skip the last slot to move to the next * row to hold coefficients for the next phase */ coeff_h += SC_NUM_TAPS_MEM_ALIGN - SC_H_NUM_TAPS; } sc->load_coeff_h = true; } EXPORT_SYMBOL(sc_set_hs_coeffs); /* * set the vertical scaler coefficients according to the ratio of output to * input heights */ void sc_set_vs_coeffs(struct sc_data *sc, void *addr, unsigned int src_h, unsigned int dst_h) { int sixteenths; int idx; int i, j; u16 *coeff_v = addr; const u16 *cp; if (dst_h > src_h) { idx = VS_UP_SCALE; } else if (dst_h == src_h) { idx = VS_1_TO_1_SCALE; } else { sixteenths = (dst_h << 4) / src_h; if (sixteenths < 8) sixteenths = 8; idx = VS_LT_9_16_SCALE + sixteenths - 8; } cp = scaler_vs_coeffs[idx]; for (i = 0; i < SC_NUM_PHASES * 2; i++) { for (j = 0; j < SC_V_NUM_TAPS; j++) *coeff_v++ = *cp++; /* * for the vertical scaler, we copy the first 5 coefficients and * skip the last 3 slots to move to the next row to hold * coefficients for the next phase */ coeff_v += SC_NUM_TAPS_MEM_ALIGN - SC_V_NUM_TAPS; } sc->load_coeff_v = true; } EXPORT_SYMBOL(sc_set_vs_coeffs); void sc_config_scaler(struct sc_data *sc, u32 *sc_reg0, u32 *sc_reg8, u32 *sc_reg17, unsigned int src_w, unsigned int src_h, unsigned int dst_w, unsigned int dst_h) { struct device *dev = &sc->pdev->dev; u32 val; int dcm_x, dcm_shift; bool use_rav; unsigned long lltmp; u32 lin_acc_inc, lin_acc_inc_u; u32 col_acc_offset; u16 factor = 0; int row_acc_init_rav = 0, row_acc_init_rav_b = 0; u32 row_acc_inc = 0, row_acc_offset = 0, row_acc_offset_b = 0; /* * location of SC register in payload memory with respect to the first * register in the mmr address data block */ u32 *sc_reg9 = sc_reg8 + 1; u32 *sc_reg12 = sc_reg8 + 4; u32 *sc_reg13 = sc_reg8 + 5; u32 *sc_reg24 = sc_reg17 + 7; val = sc_reg0[0]; /* clear all the features(they may get enabled elsewhere later) */ val &= ~(CFG_SELFGEN_FID | CFG_TRIM | CFG_ENABLE_SIN2_VER_INTP | CFG_INTERLACE_I | CFG_DCM_4X | CFG_DCM_2X | CFG_AUTO_HS | CFG_ENABLE_EV | CFG_USE_RAV | CFG_INVT_FID | CFG_SC_BYPASS | CFG_INTERLACE_O | CFG_Y_PK_EN | CFG_HP_BYPASS | CFG_LINEAR); if (src_w == dst_w && src_h == dst_h) { val |= CFG_SC_BYPASS; sc_reg0[0] = val; return; } /* we only support linear scaling for now */ val |= CFG_LINEAR; /* configure horizontal scaler */ /* enable 2X or 4X decimation */ dcm_x = src_w / dst_w; if (dcm_x > 4) { val |= CFG_DCM_4X; dcm_shift = 2; } else if (dcm_x > 2) { val |= CFG_DCM_2X; dcm_shift = 1; } else { dcm_shift = 0; } lltmp = dst_w - 1; lin_acc_inc = div64_u64(((u64)(src_w >> dcm_shift) - 1) << 24, lltmp); lin_acc_inc_u = 0; col_acc_offset = 0; dev_dbg(dev, "hs config: src_w = %d, dst_w = %d, decimation = %s, lin_acc_inc = %08x\n", src_w, dst_w, dcm_shift == 2 ? "4x" : (dcm_shift == 1 ? "2x" : "none"), lin_acc_inc); /* configure vertical scaler */ /* use RAV for vertical scaler if vertical downscaling is > 4x */ if (dst_h < (src_h >> 2)) { use_rav = true; val |= CFG_USE_RAV; } else { use_rav = false; } if (use_rav) { /* use RAV */ factor = (u16) ((dst_h << 10) / src_h); row_acc_init_rav = factor + ((1 + factor) >> 1); if (row_acc_init_rav >= 1024) row_acc_init_rav -= 1024; row_acc_init_rav_b = row_acc_init_rav + (1 + (row_acc_init_rav >> 1)) - (1024 >> 1); if (row_acc_init_rav_b < 0) { row_acc_init_rav_b += row_acc_init_rav; row_acc_init_rav *= 2; } dev_dbg(dev, "vs config(RAV): src_h = %d, dst_h = %d, factor = %d, acc_init = %08x, acc_init_b = %08x\n", src_h, dst_h, factor, row_acc_init_rav, row_acc_init_rav_b); } else { /* use polyphase */ row_acc_inc = ((src_h - 1) << 16) / (dst_h - 1); row_acc_offset = 0; row_acc_offset_b = 0; dev_dbg(dev, "vs config(POLY): src_h = %d, dst_h = %d,row_acc_inc = %08x\n", src_h, dst_h, row_acc_inc); } sc_reg0[0] = val; sc_reg0[1] = row_acc_inc; sc_reg0[2] = row_acc_offset; sc_reg0[3] = row_acc_offset_b; sc_reg0[4] = ((lin_acc_inc_u & CFG_LIN_ACC_INC_U_MASK) << CFG_LIN_ACC_INC_U_SHIFT) | (dst_w << CFG_TAR_W_SHIFT) | (dst_h << CFG_TAR_H_SHIFT); sc_reg0[5] = (src_w << CFG_SRC_W_SHIFT) | (src_h << CFG_SRC_H_SHIFT); sc_reg0[6] = (row_acc_init_rav_b << CFG_ROW_ACC_INIT_RAV_B_SHIFT) | (row_acc_init_rav << CFG_ROW_ACC_INIT_RAV_SHIFT); *sc_reg9 = lin_acc_inc; *sc_reg12 = col_acc_offset << CFG_COL_ACC_OFFSET_SHIFT; *sc_reg13 = factor; *sc_reg24 = (src_w << CFG_ORG_W_SHIFT) | (src_h << CFG_ORG_H_SHIFT); } EXPORT_SYMBOL(sc_config_scaler); struct sc_data *sc_create(struct platform_device *pdev, const char *res_name) { struct sc_data *sc; dev_dbg(&pdev->dev, "sc_create\n"); sc = devm_kzalloc(&pdev->dev, sizeof(*sc), GFP_KERNEL); if (!sc) { dev_err(&pdev->dev, "couldn't alloc sc_data\n"); return ERR_PTR(-ENOMEM); } sc->pdev = pdev; sc->res = platform_get_resource_byname(pdev, IORESOURCE_MEM, res_name); if (!sc->res) { dev_err(&pdev->dev, "missing '%s' platform resources data\n", res_name); return ERR_PTR(-ENODEV); } sc->base = devm_ioremap_resource(&pdev->dev, sc->res); if (IS_ERR(sc->base)) return ERR_CAST(sc->base); return sc; } EXPORT_SYMBOL(sc_create); MODULE_DESCRIPTION("TI VIP/VPE Scaler"); MODULE_AUTHOR("Texas Instruments Inc."); MODULE_LICENSE("GPL v2");
linux-master
drivers/media/platform/ti/vpe/sc.c
// SPDX-License-Identifier: GPL-2.0-only /* * TI VPE mem2mem driver, based on the virtual v4l2-mem2mem example driver * * Copyright (c) 2013 Texas Instruments Inc. * David Griego, <[email protected]> * Dale Farnsworth, <[email protected]> * Archit Taneja, <[email protected]> * * Copyright (c) 2009-2010 Samsung Electronics Co., Ltd. * Pawel Osciak, <[email protected]> * Marek Szyprowski, <[email protected]> * * Based on the virtual v4l2-mem2mem example device */ #include <linux/delay.h> #include <linux/dma-mapping.h> #include <linux/err.h> #include <linux/fs.h> #include <linux/interrupt.h> #include <linux/io.h> #include <linux/ioctl.h> #include <linux/module.h> #include <linux/of.h> #include <linux/platform_device.h> #include <linux/pm_runtime.h> #include <linux/sched.h> #include <linux/slab.h> #include <linux/videodev2.h> #include <linux/log2.h> #include <linux/sizes.h> #include <media/v4l2-common.h> #include <media/v4l2-ctrls.h> #include <media/v4l2-device.h> #include <media/v4l2-event.h> #include <media/v4l2-ioctl.h> #include <media/v4l2-mem2mem.h> #include <media/videobuf2-v4l2.h> #include <media/videobuf2-dma-contig.h> #include "vpdma.h" #include "vpdma_priv.h" #include "vpe_regs.h" #include "sc.h" #include "csc.h" #define VPE_MODULE_NAME "vpe" /* minimum and maximum frame sizes */ #define MIN_W 32 #define MIN_H 32 #define MAX_W 2048 #define MAX_H 2048 /* required alignments */ #define S_ALIGN 0 /* multiple of 1 */ #define H_ALIGN 1 /* multiple of 2 */ /* flags that indicate a format can be used for capture/output */ #define VPE_FMT_TYPE_CAPTURE (1 << 0) #define VPE_FMT_TYPE_OUTPUT (1 << 1) /* used as plane indices */ #define VPE_MAX_PLANES 2 #define VPE_LUMA 0 #define VPE_CHROMA 1 /* per m2m context info */ #define VPE_MAX_SRC_BUFS 3 /* need 3 src fields to de-interlace */ #define VPE_DEF_BUFS_PER_JOB 1 /* default one buffer per batch job */ /* * each VPE context can need up to 3 config descriptors, 7 input descriptors, * 3 output descriptors, and 10 control descriptors */ #define VPE_DESC_LIST_SIZE (10 * VPDMA_DTD_DESC_SIZE + \ 13 * VPDMA_CFD_CTD_DESC_SIZE) #define vpe_dbg(vpedev, fmt, arg...) \ dev_dbg((vpedev)->v4l2_dev.dev, fmt, ##arg) #define vpe_err(vpedev, fmt, arg...) \ dev_err((vpedev)->v4l2_dev.dev, fmt, ##arg) struct vpe_us_coeffs { unsigned short anchor_fid0_c0; unsigned short anchor_fid0_c1; unsigned short anchor_fid0_c2; unsigned short anchor_fid0_c3; unsigned short interp_fid0_c0; unsigned short interp_fid0_c1; unsigned short interp_fid0_c2; unsigned short interp_fid0_c3; unsigned short anchor_fid1_c0; unsigned short anchor_fid1_c1; unsigned short anchor_fid1_c2; unsigned short anchor_fid1_c3; unsigned short interp_fid1_c0; unsigned short interp_fid1_c1; unsigned short interp_fid1_c2; unsigned short interp_fid1_c3; }; /* * Default upsampler coefficients */ static const struct vpe_us_coeffs us_coeffs[] = { { /* Coefficients for progressive input */ 0x00C8, 0x0348, 0x0018, 0x3FD8, 0x3FB8, 0x0378, 0x00E8, 0x3FE8, 0x00C8, 0x0348, 0x0018, 0x3FD8, 0x3FB8, 0x0378, 0x00E8, 0x3FE8, }, { /* Coefficients for Top Field Interlaced input */ 0x0051, 0x03D5, 0x3FE3, 0x3FF7, 0x3FB5, 0x02E9, 0x018F, 0x3FD3, /* Coefficients for Bottom Field Interlaced input */ 0x016B, 0x0247, 0x00B1, 0x3F9D, 0x3FCF, 0x03DB, 0x005D, 0x3FF9, }, }; /* * the following registers are for configuring some of the parameters of the * motion and edge detection blocks inside DEI, these generally remain the same, * these could be passed later via userspace if some one needs to tweak these. */ struct vpe_dei_regs { unsigned long mdt_spacial_freq_thr_reg; /* VPE_DEI_REG2 */ unsigned long edi_config_reg; /* VPE_DEI_REG3 */ unsigned long edi_lut_reg0; /* VPE_DEI_REG4 */ unsigned long edi_lut_reg1; /* VPE_DEI_REG5 */ unsigned long edi_lut_reg2; /* VPE_DEI_REG6 */ unsigned long edi_lut_reg3; /* VPE_DEI_REG7 */ }; /* * default expert DEI register values, unlikely to be modified. */ static const struct vpe_dei_regs dei_regs = { .mdt_spacial_freq_thr_reg = 0x020C0804u, .edi_config_reg = 0x0118100Cu, .edi_lut_reg0 = 0x08040200u, .edi_lut_reg1 = 0x1010100Cu, .edi_lut_reg2 = 0x10101010u, .edi_lut_reg3 = 0x10101010u, }; /* * The port_data structure contains per-port data. */ struct vpe_port_data { enum vpdma_channel channel; /* VPDMA channel */ u8 vb_index; /* input frame f, f-1, f-2 index */ u8 vb_part; /* plane index for co-panar formats */ }; /* * Define indices into the port_data tables */ #define VPE_PORT_LUMA1_IN 0 #define VPE_PORT_CHROMA1_IN 1 #define VPE_PORT_LUMA2_IN 2 #define VPE_PORT_CHROMA2_IN 3 #define VPE_PORT_LUMA3_IN 4 #define VPE_PORT_CHROMA3_IN 5 #define VPE_PORT_MV_IN 6 #define VPE_PORT_MV_OUT 7 #define VPE_PORT_LUMA_OUT 8 #define VPE_PORT_CHROMA_OUT 9 #define VPE_PORT_RGB_OUT 10 static const struct vpe_port_data port_data[11] = { [VPE_PORT_LUMA1_IN] = { .channel = VPE_CHAN_LUMA1_IN, .vb_index = 0, .vb_part = VPE_LUMA, }, [VPE_PORT_CHROMA1_IN] = { .channel = VPE_CHAN_CHROMA1_IN, .vb_index = 0, .vb_part = VPE_CHROMA, }, [VPE_PORT_LUMA2_IN] = { .channel = VPE_CHAN_LUMA2_IN, .vb_index = 1, .vb_part = VPE_LUMA, }, [VPE_PORT_CHROMA2_IN] = { .channel = VPE_CHAN_CHROMA2_IN, .vb_index = 1, .vb_part = VPE_CHROMA, }, [VPE_PORT_LUMA3_IN] = { .channel = VPE_CHAN_LUMA3_IN, .vb_index = 2, .vb_part = VPE_LUMA, }, [VPE_PORT_CHROMA3_IN] = { .channel = VPE_CHAN_CHROMA3_IN, .vb_index = 2, .vb_part = VPE_CHROMA, }, [VPE_PORT_MV_IN] = { .channel = VPE_CHAN_MV_IN, }, [VPE_PORT_MV_OUT] = { .channel = VPE_CHAN_MV_OUT, }, [VPE_PORT_LUMA_OUT] = { .channel = VPE_CHAN_LUMA_OUT, .vb_part = VPE_LUMA, }, [VPE_PORT_CHROMA_OUT] = { .channel = VPE_CHAN_CHROMA_OUT, .vb_part = VPE_CHROMA, }, [VPE_PORT_RGB_OUT] = { .channel = VPE_CHAN_RGB_OUT, .vb_part = VPE_LUMA, }, }; /* driver info for each of the supported video formats */ struct vpe_fmt { u32 fourcc; /* standard format identifier */ u8 types; /* CAPTURE and/or OUTPUT */ u8 coplanar; /* set for unpacked Luma and Chroma */ /* vpdma format info for each plane */ struct vpdma_data_format const *vpdma_fmt[VPE_MAX_PLANES]; }; static struct vpe_fmt vpe_formats[] = { { .fourcc = V4L2_PIX_FMT_NV16, .types = VPE_FMT_TYPE_CAPTURE | VPE_FMT_TYPE_OUTPUT, .coplanar = 1, .vpdma_fmt = { &vpdma_yuv_fmts[VPDMA_DATA_FMT_Y444], &vpdma_yuv_fmts[VPDMA_DATA_FMT_C444], }, }, { .fourcc = V4L2_PIX_FMT_NV12, .types = VPE_FMT_TYPE_CAPTURE | VPE_FMT_TYPE_OUTPUT, .coplanar = 1, .vpdma_fmt = { &vpdma_yuv_fmts[VPDMA_DATA_FMT_Y420], &vpdma_yuv_fmts[VPDMA_DATA_FMT_C420], }, }, { .fourcc = V4L2_PIX_FMT_NV21, .types = VPE_FMT_TYPE_CAPTURE | VPE_FMT_TYPE_OUTPUT, .coplanar = 1, .vpdma_fmt = { &vpdma_yuv_fmts[VPDMA_DATA_FMT_Y420], &vpdma_yuv_fmts[VPDMA_DATA_FMT_CB420], }, }, { .fourcc = V4L2_PIX_FMT_YUYV, .types = VPE_FMT_TYPE_CAPTURE | VPE_FMT_TYPE_OUTPUT, .coplanar = 0, .vpdma_fmt = { &vpdma_yuv_fmts[VPDMA_DATA_FMT_YCB422], }, }, { .fourcc = V4L2_PIX_FMT_UYVY, .types = VPE_FMT_TYPE_CAPTURE | VPE_FMT_TYPE_OUTPUT, .coplanar = 0, .vpdma_fmt = { &vpdma_yuv_fmts[VPDMA_DATA_FMT_CBY422], }, }, { .fourcc = V4L2_PIX_FMT_RGB24, .types = VPE_FMT_TYPE_CAPTURE, .coplanar = 0, .vpdma_fmt = { &vpdma_rgb_fmts[VPDMA_DATA_FMT_RGB24], }, }, { .fourcc = V4L2_PIX_FMT_RGB32, .types = VPE_FMT_TYPE_CAPTURE, .coplanar = 0, .vpdma_fmt = { &vpdma_rgb_fmts[VPDMA_DATA_FMT_ARGB32], }, }, { .fourcc = V4L2_PIX_FMT_BGR24, .types = VPE_FMT_TYPE_CAPTURE, .coplanar = 0, .vpdma_fmt = { &vpdma_rgb_fmts[VPDMA_DATA_FMT_BGR24], }, }, { .fourcc = V4L2_PIX_FMT_BGR32, .types = VPE_FMT_TYPE_CAPTURE, .coplanar = 0, .vpdma_fmt = { &vpdma_rgb_fmts[VPDMA_DATA_FMT_ABGR32], }, }, { .fourcc = V4L2_PIX_FMT_RGB565, .types = VPE_FMT_TYPE_CAPTURE, .coplanar = 0, .vpdma_fmt = { &vpdma_rgb_fmts[VPDMA_DATA_FMT_RGB565], }, }, { .fourcc = V4L2_PIX_FMT_RGB555, .types = VPE_FMT_TYPE_CAPTURE, .coplanar = 0, .vpdma_fmt = { &vpdma_rgb_fmts[VPDMA_DATA_FMT_RGBA16_5551], }, }, }; /* * per-queue, driver-specific private data. * there is one source queue and one destination queue for each m2m context. */ struct vpe_q_data { /* current v4l2 format info */ struct v4l2_format format; unsigned int flags; struct v4l2_rect c_rect; /* crop/compose rectangle */ struct vpe_fmt *fmt; /* format info */ }; /* vpe_q_data flag bits */ #define Q_DATA_FRAME_1D BIT(0) #define Q_DATA_MODE_TILED BIT(1) #define Q_DATA_INTERLACED_ALTERNATE BIT(2) #define Q_DATA_INTERLACED_SEQ_TB BIT(3) #define Q_DATA_INTERLACED_SEQ_BT BIT(4) #define Q_IS_SEQ_XX (Q_DATA_INTERLACED_SEQ_TB | \ Q_DATA_INTERLACED_SEQ_BT) #define Q_IS_INTERLACED (Q_DATA_INTERLACED_ALTERNATE | \ Q_DATA_INTERLACED_SEQ_TB | \ Q_DATA_INTERLACED_SEQ_BT) enum { Q_DATA_SRC = 0, Q_DATA_DST = 1, }; /* find our format description corresponding to the passed v4l2_format */ static struct vpe_fmt *__find_format(u32 fourcc) { struct vpe_fmt *fmt; unsigned int k; for (k = 0; k < ARRAY_SIZE(vpe_formats); k++) { fmt = &vpe_formats[k]; if (fmt->fourcc == fourcc) return fmt; } return NULL; } static struct vpe_fmt *find_format(struct v4l2_format *f) { return __find_format(f->fmt.pix.pixelformat); } /* * there is one vpe_dev structure in the driver, it is shared by * all instances. */ struct vpe_dev { struct v4l2_device v4l2_dev; struct video_device vfd; struct v4l2_m2m_dev *m2m_dev; atomic_t num_instances; /* count of driver instances */ dma_addr_t loaded_mmrs; /* shadow mmrs in device */ struct mutex dev_mutex; spinlock_t lock; int irq; void __iomem *base; struct resource *res; struct vpdma_data vpdma_data; struct vpdma_data *vpdma; /* vpdma data handle */ struct sc_data *sc; /* scaler data handle */ struct csc_data *csc; /* csc data handle */ }; /* * There is one vpe_ctx structure for each m2m context. */ struct vpe_ctx { struct v4l2_fh fh; struct vpe_dev *dev; struct v4l2_ctrl_handler hdl; unsigned int field; /* current field */ unsigned int sequence; /* current frame/field seq */ unsigned int aborting; /* abort after next irq */ unsigned int bufs_per_job; /* input buffers per batch */ unsigned int bufs_completed; /* bufs done in this batch */ struct vpe_q_data q_data[2]; /* src & dst queue data */ struct vb2_v4l2_buffer *src_vbs[VPE_MAX_SRC_BUFS]; struct vb2_v4l2_buffer *dst_vb; dma_addr_t mv_buf_dma[2]; /* dma addrs of motion vector in/out bufs */ void *mv_buf[2]; /* virtual addrs of motion vector bufs */ size_t mv_buf_size; /* current motion vector buffer size */ struct vpdma_buf mmr_adb; /* shadow reg addr/data block */ struct vpdma_buf sc_coeff_h; /* h coeff buffer */ struct vpdma_buf sc_coeff_v; /* v coeff buffer */ struct vpdma_desc_list desc_list; /* DMA descriptor list */ bool deinterlacing; /* using de-interlacer */ bool load_mmrs; /* have new shadow reg values */ unsigned int src_mv_buf_selector; }; /* * M2M devices get 2 queues. * Return the queue given the type. */ static struct vpe_q_data *get_q_data(struct vpe_ctx *ctx, enum v4l2_buf_type type) { switch (type) { case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE: case V4L2_BUF_TYPE_VIDEO_OUTPUT: return &ctx->q_data[Q_DATA_SRC]; case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE: case V4L2_BUF_TYPE_VIDEO_CAPTURE: return &ctx->q_data[Q_DATA_DST]; default: return NULL; } return NULL; } static u32 read_reg(struct vpe_dev *dev, int offset) { return ioread32(dev->base + offset); } static void write_reg(struct vpe_dev *dev, int offset, u32 value) { iowrite32(value, dev->base + offset); } /* register field read/write helpers */ static int get_field(u32 value, u32 mask, int shift) { return (value & (mask << shift)) >> shift; } static int read_field_reg(struct vpe_dev *dev, int offset, u32 mask, int shift) { return get_field(read_reg(dev, offset), mask, shift); } static void write_field(u32 *valp, u32 field, u32 mask, int shift) { u32 val = *valp; val &= ~(mask << shift); val |= (field & mask) << shift; *valp = val; } static void write_field_reg(struct vpe_dev *dev, int offset, u32 field, u32 mask, int shift) { u32 val = read_reg(dev, offset); write_field(&val, field, mask, shift); write_reg(dev, offset, val); } /* * DMA address/data block for the shadow registers */ struct vpe_mmr_adb { struct vpdma_adb_hdr out_fmt_hdr; u32 out_fmt_reg[1]; u32 out_fmt_pad[3]; struct vpdma_adb_hdr us1_hdr; u32 us1_regs[8]; struct vpdma_adb_hdr us2_hdr; u32 us2_regs[8]; struct vpdma_adb_hdr us3_hdr; u32 us3_regs[8]; struct vpdma_adb_hdr dei_hdr; u32 dei_regs[8]; struct vpdma_adb_hdr sc_hdr0; u32 sc_regs0[7]; u32 sc_pad0[1]; struct vpdma_adb_hdr sc_hdr8; u32 sc_regs8[6]; u32 sc_pad8[2]; struct vpdma_adb_hdr sc_hdr17; u32 sc_regs17[9]; u32 sc_pad17[3]; struct vpdma_adb_hdr csc_hdr; u32 csc_regs[6]; u32 csc_pad[2]; }; #define GET_OFFSET_TOP(ctx, obj, reg) \ ((obj)->res->start - ctx->dev->res->start + reg) #define VPE_SET_MMR_ADB_HDR(ctx, hdr, regs, offset_a) \ VPDMA_SET_MMR_ADB_HDR(ctx->mmr_adb, vpe_mmr_adb, hdr, regs, offset_a) /* * Set the headers for all of the address/data block structures. */ static void init_adb_hdrs(struct vpe_ctx *ctx) { VPE_SET_MMR_ADB_HDR(ctx, out_fmt_hdr, out_fmt_reg, VPE_CLK_FORMAT_SELECT); VPE_SET_MMR_ADB_HDR(ctx, us1_hdr, us1_regs, VPE_US1_R0); VPE_SET_MMR_ADB_HDR(ctx, us2_hdr, us2_regs, VPE_US2_R0); VPE_SET_MMR_ADB_HDR(ctx, us3_hdr, us3_regs, VPE_US3_R0); VPE_SET_MMR_ADB_HDR(ctx, dei_hdr, dei_regs, VPE_DEI_FRAME_SIZE); VPE_SET_MMR_ADB_HDR(ctx, sc_hdr0, sc_regs0, GET_OFFSET_TOP(ctx, ctx->dev->sc, CFG_SC0)); VPE_SET_MMR_ADB_HDR(ctx, sc_hdr8, sc_regs8, GET_OFFSET_TOP(ctx, ctx->dev->sc, CFG_SC8)); VPE_SET_MMR_ADB_HDR(ctx, sc_hdr17, sc_regs17, GET_OFFSET_TOP(ctx, ctx->dev->sc, CFG_SC17)); VPE_SET_MMR_ADB_HDR(ctx, csc_hdr, csc_regs, GET_OFFSET_TOP(ctx, ctx->dev->csc, CSC_CSC00)); }; /* * Allocate or re-allocate the motion vector DMA buffers * There are two buffers, one for input and one for output. * However, the roles are reversed after each field is processed. * In other words, after each field is processed, the previous * output (dst) MV buffer becomes the new input (src) MV buffer. */ static int realloc_mv_buffers(struct vpe_ctx *ctx, size_t size) { struct device *dev = ctx->dev->v4l2_dev.dev; if (ctx->mv_buf_size == size) return 0; if (ctx->mv_buf[0]) dma_free_coherent(dev, ctx->mv_buf_size, ctx->mv_buf[0], ctx->mv_buf_dma[0]); if (ctx->mv_buf[1]) dma_free_coherent(dev, ctx->mv_buf_size, ctx->mv_buf[1], ctx->mv_buf_dma[1]); if (size == 0) return 0; ctx->mv_buf[0] = dma_alloc_coherent(dev, size, &ctx->mv_buf_dma[0], GFP_KERNEL); if (!ctx->mv_buf[0]) { vpe_err(ctx->dev, "failed to allocate motion vector buffer\n"); return -ENOMEM; } ctx->mv_buf[1] = dma_alloc_coherent(dev, size, &ctx->mv_buf_dma[1], GFP_KERNEL); if (!ctx->mv_buf[1]) { vpe_err(ctx->dev, "failed to allocate motion vector buffer\n"); dma_free_coherent(dev, size, ctx->mv_buf[0], ctx->mv_buf_dma[0]); return -ENOMEM; } ctx->mv_buf_size = size; ctx->src_mv_buf_selector = 0; return 0; } static void free_mv_buffers(struct vpe_ctx *ctx) { realloc_mv_buffers(ctx, 0); } /* * While de-interlacing, we keep the two most recent input buffers * around. This function frees those two buffers when we have * finished processing the current stream. */ static void free_vbs(struct vpe_ctx *ctx) { struct vpe_dev *dev = ctx->dev; unsigned long flags; if (ctx->src_vbs[2] == NULL) return; spin_lock_irqsave(&dev->lock, flags); if (ctx->src_vbs[2]) { v4l2_m2m_buf_done(ctx->src_vbs[2], VB2_BUF_STATE_DONE); if (ctx->src_vbs[1] && (ctx->src_vbs[1] != ctx->src_vbs[2])) v4l2_m2m_buf_done(ctx->src_vbs[1], VB2_BUF_STATE_DONE); ctx->src_vbs[2] = NULL; ctx->src_vbs[1] = NULL; } spin_unlock_irqrestore(&dev->lock, flags); } /* * Enable or disable the VPE clocks */ static void vpe_set_clock_enable(struct vpe_dev *dev, bool on) { u32 val = 0; if (on) val = VPE_DATA_PATH_CLK_ENABLE | VPE_VPEDMA_CLK_ENABLE; write_reg(dev, VPE_CLK_ENABLE, val); } static void vpe_top_reset(struct vpe_dev *dev) { write_field_reg(dev, VPE_CLK_RESET, 1, VPE_DATA_PATH_CLK_RESET_MASK, VPE_DATA_PATH_CLK_RESET_SHIFT); usleep_range(100, 150); write_field_reg(dev, VPE_CLK_RESET, 0, VPE_DATA_PATH_CLK_RESET_MASK, VPE_DATA_PATH_CLK_RESET_SHIFT); } static void vpe_top_vpdma_reset(struct vpe_dev *dev) { write_field_reg(dev, VPE_CLK_RESET, 1, VPE_VPDMA_CLK_RESET_MASK, VPE_VPDMA_CLK_RESET_SHIFT); usleep_range(100, 150); write_field_reg(dev, VPE_CLK_RESET, 0, VPE_VPDMA_CLK_RESET_MASK, VPE_VPDMA_CLK_RESET_SHIFT); } /* * Load the correct of upsampler coefficients into the shadow MMRs */ static void set_us_coefficients(struct vpe_ctx *ctx) { struct vpe_mmr_adb *mmr_adb = ctx->mmr_adb.addr; struct vpe_q_data *s_q_data = &ctx->q_data[Q_DATA_SRC]; u32 *us1_reg = &mmr_adb->us1_regs[0]; u32 *us2_reg = &mmr_adb->us2_regs[0]; u32 *us3_reg = &mmr_adb->us3_regs[0]; const unsigned short *cp, *end_cp; cp = &us_coeffs[0].anchor_fid0_c0; if (s_q_data->flags & Q_IS_INTERLACED) /* interlaced */ cp += sizeof(us_coeffs[0]) / sizeof(*cp); end_cp = cp + sizeof(us_coeffs[0]) / sizeof(*cp); while (cp < end_cp) { write_field(us1_reg, *cp++, VPE_US_C0_MASK, VPE_US_C0_SHIFT); write_field(us1_reg, *cp++, VPE_US_C1_MASK, VPE_US_C1_SHIFT); *us2_reg++ = *us1_reg; *us3_reg++ = *us1_reg++; } ctx->load_mmrs = true; } /* * Set the upsampler config mode and the VPDMA line mode in the shadow MMRs. */ static void set_cfg_modes(struct vpe_ctx *ctx) { struct vpe_fmt *fmt = ctx->q_data[Q_DATA_SRC].fmt; struct vpe_mmr_adb *mmr_adb = ctx->mmr_adb.addr; u32 *us1_reg0 = &mmr_adb->us1_regs[0]; u32 *us2_reg0 = &mmr_adb->us2_regs[0]; u32 *us3_reg0 = &mmr_adb->us3_regs[0]; int cfg_mode = 1; /* * Cfg Mode 0: YUV420 source, enable upsampler, DEI is de-interlacing. * Cfg Mode 1: YUV422 source, disable upsampler, DEI is de-interlacing. */ if (fmt->fourcc == V4L2_PIX_FMT_NV12 || fmt->fourcc == V4L2_PIX_FMT_NV21) cfg_mode = 0; write_field(us1_reg0, cfg_mode, VPE_US_MODE_MASK, VPE_US_MODE_SHIFT); write_field(us2_reg0, cfg_mode, VPE_US_MODE_MASK, VPE_US_MODE_SHIFT); write_field(us3_reg0, cfg_mode, VPE_US_MODE_MASK, VPE_US_MODE_SHIFT); ctx->load_mmrs = true; } static void set_line_modes(struct vpe_ctx *ctx) { struct vpe_fmt *fmt = ctx->q_data[Q_DATA_SRC].fmt; int line_mode = 1; if (fmt->fourcc == V4L2_PIX_FMT_NV12 || fmt->fourcc == V4L2_PIX_FMT_NV21) line_mode = 0; /* double lines to line buffer */ /* regs for now */ vpdma_set_line_mode(ctx->dev->vpdma, line_mode, VPE_CHAN_CHROMA1_IN); vpdma_set_line_mode(ctx->dev->vpdma, line_mode, VPE_CHAN_CHROMA2_IN); vpdma_set_line_mode(ctx->dev->vpdma, line_mode, VPE_CHAN_CHROMA3_IN); /* frame start for input luma */ vpdma_set_frame_start_event(ctx->dev->vpdma, VPDMA_FSEVENT_CHANNEL_ACTIVE, VPE_CHAN_LUMA1_IN); vpdma_set_frame_start_event(ctx->dev->vpdma, VPDMA_FSEVENT_CHANNEL_ACTIVE, VPE_CHAN_LUMA2_IN); vpdma_set_frame_start_event(ctx->dev->vpdma, VPDMA_FSEVENT_CHANNEL_ACTIVE, VPE_CHAN_LUMA3_IN); /* frame start for input chroma */ vpdma_set_frame_start_event(ctx->dev->vpdma, VPDMA_FSEVENT_CHANNEL_ACTIVE, VPE_CHAN_CHROMA1_IN); vpdma_set_frame_start_event(ctx->dev->vpdma, VPDMA_FSEVENT_CHANNEL_ACTIVE, VPE_CHAN_CHROMA2_IN); vpdma_set_frame_start_event(ctx->dev->vpdma, VPDMA_FSEVENT_CHANNEL_ACTIVE, VPE_CHAN_CHROMA3_IN); /* frame start for MV in client */ vpdma_set_frame_start_event(ctx->dev->vpdma, VPDMA_FSEVENT_CHANNEL_ACTIVE, VPE_CHAN_MV_IN); } /* * Set the shadow registers that are modified when the source * format changes. */ static void set_src_registers(struct vpe_ctx *ctx) { set_us_coefficients(ctx); } /* * Set the shadow registers that are modified when the destination * format changes. */ static void set_dst_registers(struct vpe_ctx *ctx) { struct vpe_mmr_adb *mmr_adb = ctx->mmr_adb.addr; struct vpe_fmt *fmt = ctx->q_data[Q_DATA_DST].fmt; const struct v4l2_format_info *finfo; u32 val = 0; finfo = v4l2_format_info(fmt->fourcc); if (v4l2_is_format_rgb(finfo)) { val |= VPE_RGB_OUT_SELECT; vpdma_set_bg_color(ctx->dev->vpdma, (struct vpdma_data_format *)fmt->vpdma_fmt[0], 0xff); } else if (fmt->fourcc == V4L2_PIX_FMT_NV16) val |= VPE_COLOR_SEPARATE_422; /* * the source of CHR_DS and CSC is always the scaler, irrespective of * whether it's used or not */ val |= VPE_DS_SRC_DEI_SCALER | VPE_CSC_SRC_DEI_SCALER; if (fmt->fourcc != V4L2_PIX_FMT_NV12 && fmt->fourcc != V4L2_PIX_FMT_NV21) val |= VPE_DS_BYPASS; mmr_adb->out_fmt_reg[0] = val; ctx->load_mmrs = true; } /* * Set the de-interlacer shadow register values */ static void set_dei_regs(struct vpe_ctx *ctx) { struct vpe_mmr_adb *mmr_adb = ctx->mmr_adb.addr; struct vpe_q_data *s_q_data = &ctx->q_data[Q_DATA_SRC]; unsigned int src_h = s_q_data->c_rect.height; unsigned int src_w = s_q_data->c_rect.width; u32 *dei_mmr0 = &mmr_adb->dei_regs[0]; bool deinterlace = true; u32 val = 0; /* * according to TRM, we should set DEI in progressive bypass mode when * the input content is progressive, however, DEI is bypassed correctly * for both progressive and interlace content in interlace bypass mode. * It has been recommended not to use progressive bypass mode. */ if (!(s_q_data->flags & Q_IS_INTERLACED) || !ctx->deinterlacing) { deinterlace = false; val = VPE_DEI_INTERLACE_BYPASS; } src_h = deinterlace ? src_h * 2 : src_h; val |= (src_h << VPE_DEI_HEIGHT_SHIFT) | (src_w << VPE_DEI_WIDTH_SHIFT) | VPE_DEI_FIELD_FLUSH; *dei_mmr0 = val; ctx->load_mmrs = true; } static void set_dei_shadow_registers(struct vpe_ctx *ctx) { struct vpe_mmr_adb *mmr_adb = ctx->mmr_adb.addr; u32 *dei_mmr = &mmr_adb->dei_regs[0]; const struct vpe_dei_regs *cur = &dei_regs; dei_mmr[2] = cur->mdt_spacial_freq_thr_reg; dei_mmr[3] = cur->edi_config_reg; dei_mmr[4] = cur->edi_lut_reg0; dei_mmr[5] = cur->edi_lut_reg1; dei_mmr[6] = cur->edi_lut_reg2; dei_mmr[7] = cur->edi_lut_reg3; ctx->load_mmrs = true; } static void config_edi_input_mode(struct vpe_ctx *ctx, int mode) { struct vpe_mmr_adb *mmr_adb = ctx->mmr_adb.addr; u32 *edi_config_reg = &mmr_adb->dei_regs[3]; if (mode & 0x2) write_field(edi_config_reg, 1, 1, 2); /* EDI_ENABLE_3D */ if (mode & 0x3) write_field(edi_config_reg, 1, 1, 3); /* EDI_CHROMA_3D */ write_field(edi_config_reg, mode, VPE_EDI_INP_MODE_MASK, VPE_EDI_INP_MODE_SHIFT); ctx->load_mmrs = true; } /* * Set the shadow registers whose values are modified when either the * source or destination format is changed. */ static int set_srcdst_params(struct vpe_ctx *ctx) { struct vpe_q_data *s_q_data = &ctx->q_data[Q_DATA_SRC]; struct vpe_q_data *d_q_data = &ctx->q_data[Q_DATA_DST]; struct vpe_mmr_adb *mmr_adb = ctx->mmr_adb.addr; unsigned int src_w = s_q_data->c_rect.width; unsigned int src_h = s_q_data->c_rect.height; unsigned int dst_w = d_q_data->c_rect.width; unsigned int dst_h = d_q_data->c_rect.height; struct v4l2_pix_format_mplane *spix; size_t mv_buf_size; int ret; ctx->sequence = 0; ctx->field = V4L2_FIELD_TOP; spix = &s_q_data->format.fmt.pix_mp; if ((s_q_data->flags & Q_IS_INTERLACED) && !(d_q_data->flags & Q_IS_INTERLACED)) { int bytes_per_line; const struct vpdma_data_format *mv = &vpdma_misc_fmts[VPDMA_DATA_FMT_MV]; /* * we make sure that the source image has a 16 byte aligned * stride, we need to do the same for the motion vector buffer * by aligning it's stride to the next 16 byte boundary. this * extra space will not be used by the de-interlacer, but will * ensure that vpdma operates correctly */ bytes_per_line = ALIGN((spix->width * mv->depth) >> 3, VPDMA_STRIDE_ALIGN); mv_buf_size = bytes_per_line * spix->height; ctx->deinterlacing = true; src_h <<= 1; } else { ctx->deinterlacing = false; mv_buf_size = 0; } free_vbs(ctx); ctx->src_vbs[2] = ctx->src_vbs[1] = ctx->src_vbs[0] = NULL; ret = realloc_mv_buffers(ctx, mv_buf_size); if (ret) return ret; set_cfg_modes(ctx); set_dei_regs(ctx); csc_set_coeff(ctx->dev->csc, &mmr_adb->csc_regs[0], &s_q_data->format, &d_q_data->format); sc_set_hs_coeffs(ctx->dev->sc, ctx->sc_coeff_h.addr, src_w, dst_w); sc_set_vs_coeffs(ctx->dev->sc, ctx->sc_coeff_v.addr, src_h, dst_h); sc_config_scaler(ctx->dev->sc, &mmr_adb->sc_regs0[0], &mmr_adb->sc_regs8[0], &mmr_adb->sc_regs17[0], src_w, src_h, dst_w, dst_h); return 0; } /* * mem2mem callbacks */ /* * job_ready() - check whether an instance is ready to be scheduled to run */ static int job_ready(void *priv) { struct vpe_ctx *ctx = priv; /* * This check is needed as this might be called directly from driver * When called by m2m framework, this will always satisfy, but when * called from vpe_irq, this might fail. (src stream with zero buffers) */ if (v4l2_m2m_num_src_bufs_ready(ctx->fh.m2m_ctx) <= 0 || v4l2_m2m_num_dst_bufs_ready(ctx->fh.m2m_ctx) <= 0) return 0; return 1; } static void job_abort(void *priv) { struct vpe_ctx *ctx = priv; /* Will cancel the transaction in the next interrupt handler */ ctx->aborting = 1; } static void vpe_dump_regs(struct vpe_dev *dev) { #define DUMPREG(r) vpe_dbg(dev, "%-35s %08x\n", #r, read_reg(dev, VPE_##r)) vpe_dbg(dev, "VPE Registers:\n"); DUMPREG(PID); DUMPREG(SYSCONFIG); DUMPREG(INT0_STATUS0_RAW); DUMPREG(INT0_STATUS0); DUMPREG(INT0_ENABLE0); DUMPREG(INT0_STATUS1_RAW); DUMPREG(INT0_STATUS1); DUMPREG(INT0_ENABLE1); DUMPREG(CLK_ENABLE); DUMPREG(CLK_RESET); DUMPREG(CLK_FORMAT_SELECT); DUMPREG(CLK_RANGE_MAP); DUMPREG(US1_R0); DUMPREG(US1_R1); DUMPREG(US1_R2); DUMPREG(US1_R3); DUMPREG(US1_R4); DUMPREG(US1_R5); DUMPREG(US1_R6); DUMPREG(US1_R7); DUMPREG(US2_R0); DUMPREG(US2_R1); DUMPREG(US2_R2); DUMPREG(US2_R3); DUMPREG(US2_R4); DUMPREG(US2_R5); DUMPREG(US2_R6); DUMPREG(US2_R7); DUMPREG(US3_R0); DUMPREG(US3_R1); DUMPREG(US3_R2); DUMPREG(US3_R3); DUMPREG(US3_R4); DUMPREG(US3_R5); DUMPREG(US3_R6); DUMPREG(US3_R7); DUMPREG(DEI_FRAME_SIZE); DUMPREG(MDT_BYPASS); DUMPREG(MDT_SF_THRESHOLD); DUMPREG(EDI_CONFIG); DUMPREG(DEI_EDI_LUT_R0); DUMPREG(DEI_EDI_LUT_R1); DUMPREG(DEI_EDI_LUT_R2); DUMPREG(DEI_EDI_LUT_R3); DUMPREG(DEI_FMD_WINDOW_R0); DUMPREG(DEI_FMD_WINDOW_R1); DUMPREG(DEI_FMD_CONTROL_R0); DUMPREG(DEI_FMD_CONTROL_R1); DUMPREG(DEI_FMD_STATUS_R0); DUMPREG(DEI_FMD_STATUS_R1); DUMPREG(DEI_FMD_STATUS_R2); #undef DUMPREG sc_dump_regs(dev->sc); csc_dump_regs(dev->csc); } static void add_out_dtd(struct vpe_ctx *ctx, int port) { struct vpe_q_data *q_data = &ctx->q_data[Q_DATA_DST]; const struct vpe_port_data *p_data = &port_data[port]; struct vb2_buffer *vb = &ctx->dst_vb->vb2_buf; struct vpe_fmt *fmt = q_data->fmt; const struct vpdma_data_format *vpdma_fmt; int mv_buf_selector = !ctx->src_mv_buf_selector; struct v4l2_pix_format_mplane *pix; dma_addr_t dma_addr; u32 flags = 0; u32 offset = 0; u32 stride; if (port == VPE_PORT_MV_OUT) { vpdma_fmt = &vpdma_misc_fmts[VPDMA_DATA_FMT_MV]; dma_addr = ctx->mv_buf_dma[mv_buf_selector]; q_data = &ctx->q_data[Q_DATA_SRC]; pix = &q_data->format.fmt.pix_mp; stride = ALIGN((pix->width * vpdma_fmt->depth) >> 3, VPDMA_STRIDE_ALIGN); } else { /* to incorporate interleaved formats */ int plane = fmt->coplanar ? p_data->vb_part : 0; pix = &q_data->format.fmt.pix_mp; vpdma_fmt = fmt->vpdma_fmt[plane]; /* * If we are using a single plane buffer and * we need to set a separate vpdma chroma channel. */ if (pix->num_planes == 1 && plane) { dma_addr = vb2_dma_contig_plane_dma_addr(vb, 0); /* Compute required offset */ offset = pix->plane_fmt[0].bytesperline * pix->height; } else { dma_addr = vb2_dma_contig_plane_dma_addr(vb, plane); /* Use address as is, no offset */ offset = 0; } if (!dma_addr) { vpe_err(ctx->dev, "acquiring output buffer(%d) dma_addr failed\n", port); return; } /* Apply the offset */ dma_addr += offset; stride = pix->plane_fmt[VPE_LUMA].bytesperline; } if (q_data->flags & Q_DATA_FRAME_1D) flags |= VPDMA_DATA_FRAME_1D; if (q_data->flags & Q_DATA_MODE_TILED) flags |= VPDMA_DATA_MODE_TILED; vpdma_set_max_size(ctx->dev->vpdma, VPDMA_MAX_SIZE1, MAX_W, MAX_H); vpdma_add_out_dtd(&ctx->desc_list, pix->width, stride, &q_data->c_rect, vpdma_fmt, dma_addr, MAX_OUT_WIDTH_REG1, MAX_OUT_HEIGHT_REG1, p_data->channel, flags); } static void add_in_dtd(struct vpe_ctx *ctx, int port) { struct vpe_q_data *q_data = &ctx->q_data[Q_DATA_SRC]; const struct vpe_port_data *p_data = &port_data[port]; struct vb2_buffer *vb = &ctx->src_vbs[p_data->vb_index]->vb2_buf; struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb); struct vpe_fmt *fmt = q_data->fmt; struct v4l2_pix_format_mplane *pix; const struct vpdma_data_format *vpdma_fmt; int mv_buf_selector = ctx->src_mv_buf_selector; int field = vbuf->field == V4L2_FIELD_BOTTOM; int frame_width, frame_height; dma_addr_t dma_addr; u32 flags = 0; u32 offset = 0; u32 stride; pix = &q_data->format.fmt.pix_mp; if (port == VPE_PORT_MV_IN) { vpdma_fmt = &vpdma_misc_fmts[VPDMA_DATA_FMT_MV]; dma_addr = ctx->mv_buf_dma[mv_buf_selector]; stride = ALIGN((pix->width * vpdma_fmt->depth) >> 3, VPDMA_STRIDE_ALIGN); } else { /* to incorporate interleaved formats */ int plane = fmt->coplanar ? p_data->vb_part : 0; vpdma_fmt = fmt->vpdma_fmt[plane]; /* * If we are using a single plane buffer and * we need to set a separate vpdma chroma channel. */ if (pix->num_planes == 1 && plane) { dma_addr = vb2_dma_contig_plane_dma_addr(vb, 0); /* Compute required offset */ offset = pix->plane_fmt[0].bytesperline * pix->height; } else { dma_addr = vb2_dma_contig_plane_dma_addr(vb, plane); /* Use address as is, no offset */ offset = 0; } if (!dma_addr) { vpe_err(ctx->dev, "acquiring output buffer(%d) dma_addr failed\n", port); return; } /* Apply the offset */ dma_addr += offset; stride = pix->plane_fmt[VPE_LUMA].bytesperline; /* * field used in VPDMA desc = 0 (top) / 1 (bottom) * Use top or bottom field from same vb alternately * For each de-interlacing operation, f,f-1,f-2 should be one * of TBT or BTB */ if (q_data->flags & Q_DATA_INTERLACED_SEQ_TB || q_data->flags & Q_DATA_INTERLACED_SEQ_BT) { /* Select initial value based on format */ if (q_data->flags & Q_DATA_INTERLACED_SEQ_BT) field = 1; else field = 0; /* Toggle for each vb_index and each operation */ field = (field + p_data->vb_index + ctx->sequence) % 2; if (field) { int height = pix->height / 2; int bpp; if (fmt->fourcc == V4L2_PIX_FMT_NV12 || fmt->fourcc == V4L2_PIX_FMT_NV21) bpp = 1; else bpp = vpdma_fmt->depth >> 3; if (plane) height /= 2; dma_addr += pix->width * height * bpp; } } } if (q_data->flags & Q_DATA_FRAME_1D) flags |= VPDMA_DATA_FRAME_1D; if (q_data->flags & Q_DATA_MODE_TILED) flags |= VPDMA_DATA_MODE_TILED; frame_width = q_data->c_rect.width; frame_height = q_data->c_rect.height; if (p_data->vb_part && (fmt->fourcc == V4L2_PIX_FMT_NV12 || fmt->fourcc == V4L2_PIX_FMT_NV21)) frame_height /= 2; vpdma_add_in_dtd(&ctx->desc_list, pix->width, stride, &q_data->c_rect, vpdma_fmt, dma_addr, p_data->channel, field, flags, frame_width, frame_height, 0, 0); } /* * Enable the expected IRQ sources */ static void enable_irqs(struct vpe_ctx *ctx) { write_reg(ctx->dev, VPE_INT0_ENABLE0_SET, VPE_INT0_LIST0_COMPLETE); write_reg(ctx->dev, VPE_INT0_ENABLE1_SET, VPE_DEI_ERROR_INT | VPE_DS1_UV_ERROR_INT); vpdma_enable_list_complete_irq(ctx->dev->vpdma, 0, 0, true); } static void disable_irqs(struct vpe_ctx *ctx) { write_reg(ctx->dev, VPE_INT0_ENABLE0_CLR, 0xffffffff); write_reg(ctx->dev, VPE_INT0_ENABLE1_CLR, 0xffffffff); vpdma_enable_list_complete_irq(ctx->dev->vpdma, 0, 0, false); } /* device_run() - prepares and starts the device * * This function is only called when both the source and destination * buffers are in place. */ static void device_run(void *priv) { struct vpe_ctx *ctx = priv; struct sc_data *sc = ctx->dev->sc; struct vpe_q_data *d_q_data = &ctx->q_data[Q_DATA_DST]; struct vpe_q_data *s_q_data = &ctx->q_data[Q_DATA_SRC]; const struct v4l2_format_info *d_finfo; d_finfo = v4l2_format_info(d_q_data->fmt->fourcc); if (ctx->deinterlacing && s_q_data->flags & Q_IS_SEQ_XX && ctx->sequence % 2 == 0) { /* When using SEQ_XX type buffers, each buffer has two fields * each buffer has two fields (top & bottom) * Removing one buffer is actually getting two fields * Alternate between two operations:- * Even : consume one field but DO NOT REMOVE from queue * Odd : consume other field and REMOVE from queue */ ctx->src_vbs[0] = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx); WARN_ON(ctx->src_vbs[0] == NULL); } else { ctx->src_vbs[0] = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx); WARN_ON(ctx->src_vbs[0] == NULL); } ctx->dst_vb = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx); WARN_ON(ctx->dst_vb == NULL); if (ctx->deinterlacing) { if (ctx->src_vbs[2] == NULL) { ctx->src_vbs[2] = ctx->src_vbs[0]; WARN_ON(ctx->src_vbs[2] == NULL); ctx->src_vbs[1] = ctx->src_vbs[0]; WARN_ON(ctx->src_vbs[1] == NULL); } /* * we have output the first 2 frames through line average, we * now switch to EDI de-interlacer */ if (ctx->sequence == 2) config_edi_input_mode(ctx, 0x3); /* EDI (Y + UV) */ } /* config descriptors */ if (ctx->dev->loaded_mmrs != ctx->mmr_adb.dma_addr || ctx->load_mmrs) { vpdma_map_desc_buf(ctx->dev->vpdma, &ctx->mmr_adb); vpdma_add_cfd_adb(&ctx->desc_list, CFD_MMR_CLIENT, &ctx->mmr_adb); set_line_modes(ctx); ctx->dev->loaded_mmrs = ctx->mmr_adb.dma_addr; ctx->load_mmrs = false; } if (sc->loaded_coeff_h != ctx->sc_coeff_h.dma_addr || sc->load_coeff_h) { vpdma_map_desc_buf(ctx->dev->vpdma, &ctx->sc_coeff_h); vpdma_add_cfd_block(&ctx->desc_list, CFD_SC_CLIENT, &ctx->sc_coeff_h, 0); sc->loaded_coeff_h = ctx->sc_coeff_h.dma_addr; sc->load_coeff_h = false; } if (sc->loaded_coeff_v != ctx->sc_coeff_v.dma_addr || sc->load_coeff_v) { vpdma_map_desc_buf(ctx->dev->vpdma, &ctx->sc_coeff_v); vpdma_add_cfd_block(&ctx->desc_list, CFD_SC_CLIENT, &ctx->sc_coeff_v, SC_COEF_SRAM_SIZE >> 4); sc->loaded_coeff_v = ctx->sc_coeff_v.dma_addr; sc->load_coeff_v = false; } /* output data descriptors */ if (ctx->deinterlacing) add_out_dtd(ctx, VPE_PORT_MV_OUT); if (v4l2_is_format_rgb(d_finfo)) { add_out_dtd(ctx, VPE_PORT_RGB_OUT); } else { add_out_dtd(ctx, VPE_PORT_LUMA_OUT); if (d_q_data->fmt->coplanar) add_out_dtd(ctx, VPE_PORT_CHROMA_OUT); } /* input data descriptors */ if (ctx->deinterlacing) { add_in_dtd(ctx, VPE_PORT_LUMA3_IN); add_in_dtd(ctx, VPE_PORT_CHROMA3_IN); add_in_dtd(ctx, VPE_PORT_LUMA2_IN); add_in_dtd(ctx, VPE_PORT_CHROMA2_IN); } add_in_dtd(ctx, VPE_PORT_LUMA1_IN); add_in_dtd(ctx, VPE_PORT_CHROMA1_IN); if (ctx->deinterlacing) add_in_dtd(ctx, VPE_PORT_MV_IN); /* sync on channel control descriptors for input ports */ vpdma_add_sync_on_channel_ctd(&ctx->desc_list, VPE_CHAN_LUMA1_IN); vpdma_add_sync_on_channel_ctd(&ctx->desc_list, VPE_CHAN_CHROMA1_IN); if (ctx->deinterlacing) { vpdma_add_sync_on_channel_ctd(&ctx->desc_list, VPE_CHAN_LUMA2_IN); vpdma_add_sync_on_channel_ctd(&ctx->desc_list, VPE_CHAN_CHROMA2_IN); vpdma_add_sync_on_channel_ctd(&ctx->desc_list, VPE_CHAN_LUMA3_IN); vpdma_add_sync_on_channel_ctd(&ctx->desc_list, VPE_CHAN_CHROMA3_IN); vpdma_add_sync_on_channel_ctd(&ctx->desc_list, VPE_CHAN_MV_IN); } /* sync on channel control descriptors for output ports */ if (v4l2_is_format_rgb(d_finfo)) { vpdma_add_sync_on_channel_ctd(&ctx->desc_list, VPE_CHAN_RGB_OUT); } else { vpdma_add_sync_on_channel_ctd(&ctx->desc_list, VPE_CHAN_LUMA_OUT); if (d_q_data->fmt->coplanar) vpdma_add_sync_on_channel_ctd(&ctx->desc_list, VPE_CHAN_CHROMA_OUT); } if (ctx->deinterlacing) vpdma_add_sync_on_channel_ctd(&ctx->desc_list, VPE_CHAN_MV_OUT); enable_irqs(ctx); vpdma_map_desc_buf(ctx->dev->vpdma, &ctx->desc_list.buf); vpdma_submit_descs(ctx->dev->vpdma, &ctx->desc_list, 0); } static void dei_error(struct vpe_ctx *ctx) { dev_warn(ctx->dev->v4l2_dev.dev, "received DEI error interrupt\n"); } static void ds1_uv_error(struct vpe_ctx *ctx) { dev_warn(ctx->dev->v4l2_dev.dev, "received downsampler error interrupt\n"); } static irqreturn_t vpe_irq(int irq_vpe, void *data) { struct vpe_dev *dev = (struct vpe_dev *)data; struct vpe_ctx *ctx; struct vpe_q_data *d_q_data; struct vb2_v4l2_buffer *s_vb, *d_vb; unsigned long flags; u32 irqst0, irqst1; bool list_complete = false; irqst0 = read_reg(dev, VPE_INT0_STATUS0); if (irqst0) { write_reg(dev, VPE_INT0_STATUS0_CLR, irqst0); vpe_dbg(dev, "INT0_STATUS0 = 0x%08x\n", irqst0); } irqst1 = read_reg(dev, VPE_INT0_STATUS1); if (irqst1) { write_reg(dev, VPE_INT0_STATUS1_CLR, irqst1); vpe_dbg(dev, "INT0_STATUS1 = 0x%08x\n", irqst1); } ctx = v4l2_m2m_get_curr_priv(dev->m2m_dev); if (!ctx) { vpe_err(dev, "instance released before end of transaction\n"); goto handled; } if (irqst1) { if (irqst1 & VPE_DEI_ERROR_INT) { irqst1 &= ~VPE_DEI_ERROR_INT; dei_error(ctx); } if (irqst1 & VPE_DS1_UV_ERROR_INT) { irqst1 &= ~VPE_DS1_UV_ERROR_INT; ds1_uv_error(ctx); } } if (irqst0) { if (irqst0 & VPE_INT0_LIST0_COMPLETE) vpdma_clear_list_stat(ctx->dev->vpdma, 0, 0); irqst0 &= ~(VPE_INT0_LIST0_COMPLETE); list_complete = true; } if (irqst0 | irqst1) { dev_warn(dev->v4l2_dev.dev, "Unexpected interrupt: INT0_STATUS0 = 0x%08x, INT0_STATUS1 = 0x%08x\n", irqst0, irqst1); } /* * Setup next operation only when list complete IRQ occurs * otherwise, skip the following code */ if (!list_complete) goto handled; disable_irqs(ctx); vpdma_unmap_desc_buf(dev->vpdma, &ctx->desc_list.buf); vpdma_unmap_desc_buf(dev->vpdma, &ctx->mmr_adb); vpdma_unmap_desc_buf(dev->vpdma, &ctx->sc_coeff_h); vpdma_unmap_desc_buf(dev->vpdma, &ctx->sc_coeff_v); vpdma_reset_desc_list(&ctx->desc_list); /* the previous dst mv buffer becomes the next src mv buffer */ ctx->src_mv_buf_selector = !ctx->src_mv_buf_selector; s_vb = ctx->src_vbs[0]; d_vb = ctx->dst_vb; d_vb->flags = s_vb->flags; d_vb->vb2_buf.timestamp = s_vb->vb2_buf.timestamp; if (s_vb->flags & V4L2_BUF_FLAG_TIMECODE) d_vb->timecode = s_vb->timecode; d_vb->sequence = ctx->sequence; s_vb->sequence = ctx->sequence; d_q_data = &ctx->q_data[Q_DATA_DST]; if (d_q_data->flags & Q_IS_INTERLACED) { d_vb->field = ctx->field; if (ctx->field == V4L2_FIELD_BOTTOM) { ctx->sequence++; ctx->field = V4L2_FIELD_TOP; } else { WARN_ON(ctx->field != V4L2_FIELD_TOP); ctx->field = V4L2_FIELD_BOTTOM; } } else { d_vb->field = V4L2_FIELD_NONE; ctx->sequence++; } if (ctx->deinterlacing) { /* * Allow source buffer to be dequeued only if it won't be used * in the next iteration. All vbs are initialized to first * buffer and we are shifting buffers every iteration, for the * first two iterations, no buffer will be dequeued. * This ensures that driver will keep (n-2)th (n-1)th and (n)th * field when deinterlacing is enabled */ if (ctx->src_vbs[2] != ctx->src_vbs[1]) s_vb = ctx->src_vbs[2]; else s_vb = NULL; } spin_lock_irqsave(&dev->lock, flags); if (s_vb) v4l2_m2m_buf_done(s_vb, VB2_BUF_STATE_DONE); v4l2_m2m_buf_done(d_vb, VB2_BUF_STATE_DONE); spin_unlock_irqrestore(&dev->lock, flags); if (ctx->deinterlacing) { ctx->src_vbs[2] = ctx->src_vbs[1]; ctx->src_vbs[1] = ctx->src_vbs[0]; } /* * Since the vb2_buf_done has already been called fir therse * buffer we can now NULL them out so that we won't try * to clean out stray pointer later on. */ ctx->src_vbs[0] = NULL; ctx->dst_vb = NULL; if (ctx->aborting) goto finished; ctx->bufs_completed++; if (ctx->bufs_completed < ctx->bufs_per_job && job_ready(ctx)) { device_run(ctx); goto handled; } finished: vpe_dbg(ctx->dev, "finishing transaction\n"); ctx->bufs_completed = 0; v4l2_m2m_job_finish(dev->m2m_dev, ctx->fh.m2m_ctx); handled: return IRQ_HANDLED; } /* * video ioctls */ static int vpe_querycap(struct file *file, void *priv, struct v4l2_capability *cap) { strscpy(cap->driver, VPE_MODULE_NAME, sizeof(cap->driver)); strscpy(cap->card, VPE_MODULE_NAME, sizeof(cap->card)); snprintf(cap->bus_info, sizeof(cap->bus_info), "platform:%s", VPE_MODULE_NAME); return 0; } static int __enum_fmt(struct v4l2_fmtdesc *f, u32 type) { int i, index; struct vpe_fmt *fmt = NULL; index = 0; for (i = 0; i < ARRAY_SIZE(vpe_formats); ++i) { if (vpe_formats[i].types & type) { if (index == f->index) { fmt = &vpe_formats[i]; break; } index++; } } if (!fmt) return -EINVAL; f->pixelformat = fmt->fourcc; return 0; } static int vpe_enum_fmt(struct file *file, void *priv, struct v4l2_fmtdesc *f) { if (V4L2_TYPE_IS_OUTPUT(f->type)) return __enum_fmt(f, VPE_FMT_TYPE_OUTPUT); return __enum_fmt(f, VPE_FMT_TYPE_CAPTURE); } static int vpe_g_fmt(struct file *file, void *priv, struct v4l2_format *f) { struct v4l2_pix_format_mplane *pix = &f->fmt.pix_mp; struct vpe_ctx *ctx = file->private_data; struct vb2_queue *vq; struct vpe_q_data *q_data; vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, f->type); if (!vq) return -EINVAL; q_data = get_q_data(ctx, f->type); if (!q_data) return -EINVAL; *f = q_data->format; if (V4L2_TYPE_IS_CAPTURE(f->type)) { struct vpe_q_data *s_q_data; struct v4l2_pix_format_mplane *spix; /* get colorimetry from the source queue */ s_q_data = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE); spix = &s_q_data->format.fmt.pix_mp; pix->colorspace = spix->colorspace; pix->xfer_func = spix->xfer_func; pix->ycbcr_enc = spix->ycbcr_enc; pix->quantization = spix->quantization; } return 0; } static int __vpe_try_fmt(struct vpe_ctx *ctx, struct v4l2_format *f, struct vpe_fmt *fmt, int type) { struct v4l2_pix_format_mplane *pix = &f->fmt.pix_mp; struct v4l2_plane_pix_format *plane_fmt; unsigned int w_align; int i, depth, depth_bytes, height; unsigned int stride = 0; const struct v4l2_format_info *finfo; if (!fmt || !(fmt->types & type)) { vpe_dbg(ctx->dev, "Fourcc format (0x%08x) invalid.\n", pix->pixelformat); fmt = __find_format(V4L2_PIX_FMT_YUYV); } if (pix->field != V4L2_FIELD_NONE && pix->field != V4L2_FIELD_ALTERNATE && pix->field != V4L2_FIELD_SEQ_TB && pix->field != V4L2_FIELD_SEQ_BT) pix->field = V4L2_FIELD_NONE; depth = fmt->vpdma_fmt[VPE_LUMA]->depth; /* * the line stride should 16 byte aligned for VPDMA to work, based on * the bytes per pixel, figure out how much the width should be aligned * to make sure line stride is 16 byte aligned */ depth_bytes = depth >> 3; if (depth_bytes == 3) { /* * if bpp is 3(as in some RGB formats), the pixel width doesn't * really help in ensuring line stride is 16 byte aligned */ w_align = 4; } else { /* * for the remainder bpp(4, 2 and 1), the pixel width alignment * can ensure a line stride alignment of 16 bytes. For example, * if bpp is 2, then the line stride can be 16 byte aligned if * the width is 8 byte aligned */ /* * HACK: using order_base_2() here causes lots of asm output * errors with smatch, on i386: * ./arch/x86/include/asm/bitops.h:457:22: * warning: asm output is not an lvalue * Perhaps some gcc optimization is doing the wrong thing * there. * Let's get rid of them by doing the calculus on two steps */ w_align = roundup_pow_of_two(VPDMA_DESC_ALIGN / depth_bytes); w_align = ilog2(w_align); } v4l_bound_align_image(&pix->width, MIN_W, MAX_W, w_align, &pix->height, MIN_H, MAX_H, H_ALIGN, S_ALIGN); if (!pix->num_planes || pix->num_planes > 2) pix->num_planes = fmt->coplanar ? 2 : 1; else if (pix->num_planes > 1 && !fmt->coplanar) pix->num_planes = 1; pix->pixelformat = fmt->fourcc; finfo = v4l2_format_info(fmt->fourcc); /* * For the actual image parameters, we need to consider the field * height of the image for SEQ_XX buffers. */ if (pix->field == V4L2_FIELD_SEQ_TB || pix->field == V4L2_FIELD_SEQ_BT) height = pix->height / 2; else height = pix->height; if (!pix->colorspace) { if (v4l2_is_format_rgb(finfo)) { pix->colorspace = V4L2_COLORSPACE_SRGB; } else { if (height > 1280) /* HD */ pix->colorspace = V4L2_COLORSPACE_REC709; else /* SD */ pix->colorspace = V4L2_COLORSPACE_SMPTE170M; } } for (i = 0; i < pix->num_planes; i++) { plane_fmt = &pix->plane_fmt[i]; depth = fmt->vpdma_fmt[i]->depth; stride = (pix->width * fmt->vpdma_fmt[VPE_LUMA]->depth) >> 3; if (stride > plane_fmt->bytesperline) plane_fmt->bytesperline = stride; plane_fmt->bytesperline = clamp_t(u32, plane_fmt->bytesperline, stride, VPDMA_MAX_STRIDE); plane_fmt->bytesperline = ALIGN(plane_fmt->bytesperline, VPDMA_STRIDE_ALIGN); if (i == VPE_LUMA) { plane_fmt->sizeimage = pix->height * plane_fmt->bytesperline; if (pix->num_planes == 1 && fmt->coplanar) plane_fmt->sizeimage += pix->height * plane_fmt->bytesperline * fmt->vpdma_fmt[VPE_CHROMA]->depth >> 3; } else { /* i == VIP_CHROMA */ plane_fmt->sizeimage = (pix->height * plane_fmt->bytesperline * depth) >> 3; } } return 0; } static int vpe_try_fmt(struct file *file, void *priv, struct v4l2_format *f) { struct vpe_ctx *ctx = file->private_data; struct vpe_fmt *fmt = find_format(f); if (V4L2_TYPE_IS_OUTPUT(f->type)) return __vpe_try_fmt(ctx, f, fmt, VPE_FMT_TYPE_OUTPUT); else return __vpe_try_fmt(ctx, f, fmt, VPE_FMT_TYPE_CAPTURE); } static int __vpe_s_fmt(struct vpe_ctx *ctx, struct v4l2_format *f) { struct v4l2_pix_format_mplane *pix = &f->fmt.pix_mp; struct v4l2_pix_format_mplane *qpix; struct vpe_q_data *q_data; struct vb2_queue *vq; vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, f->type); if (!vq) return -EINVAL; if (vb2_is_busy(vq)) { vpe_err(ctx->dev, "queue busy\n"); return -EBUSY; } q_data = get_q_data(ctx, f->type); if (!q_data) return -EINVAL; qpix = &q_data->format.fmt.pix_mp; q_data->fmt = find_format(f); q_data->format = *f; q_data->c_rect.left = 0; q_data->c_rect.top = 0; q_data->c_rect.width = pix->width; q_data->c_rect.height = pix->height; if (qpix->field == V4L2_FIELD_ALTERNATE) q_data->flags |= Q_DATA_INTERLACED_ALTERNATE; else if (qpix->field == V4L2_FIELD_SEQ_TB) q_data->flags |= Q_DATA_INTERLACED_SEQ_TB; else if (qpix->field == V4L2_FIELD_SEQ_BT) q_data->flags |= Q_DATA_INTERLACED_SEQ_BT; else q_data->flags &= ~Q_IS_INTERLACED; /* the crop height is halved for the case of SEQ_XX buffers */ if (q_data->flags & Q_IS_SEQ_XX) q_data->c_rect.height /= 2; vpe_dbg(ctx->dev, "Setting format for type %d, wxh: %dx%d, fmt: %d bpl_y %d", f->type, pix->width, pix->height, pix->pixelformat, pix->plane_fmt[0].bytesperline); if (pix->num_planes == 2) vpe_dbg(ctx->dev, " bpl_uv %d\n", pix->plane_fmt[1].bytesperline); return 0; } static int vpe_s_fmt(struct file *file, void *priv, struct v4l2_format *f) { int ret; struct vpe_ctx *ctx = file->private_data; ret = vpe_try_fmt(file, priv, f); if (ret) return ret; ret = __vpe_s_fmt(ctx, f); if (ret) return ret; if (V4L2_TYPE_IS_OUTPUT(f->type)) set_src_registers(ctx); else set_dst_registers(ctx); return set_srcdst_params(ctx); } static int __vpe_try_selection(struct vpe_ctx *ctx, struct v4l2_selection *s) { struct vpe_q_data *q_data; struct v4l2_pix_format_mplane *pix; int height; if ((s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) && (s->type != V4L2_BUF_TYPE_VIDEO_OUTPUT)) return -EINVAL; q_data = get_q_data(ctx, s->type); if (!q_data) return -EINVAL; pix = &q_data->format.fmt.pix_mp; switch (s->target) { case V4L2_SEL_TGT_COMPOSE: /* * COMPOSE target is only valid for capture buffer type, return * error for output buffer type */ if (s->type == V4L2_BUF_TYPE_VIDEO_OUTPUT) return -EINVAL; break; case V4L2_SEL_TGT_CROP: /* * CROP target is only valid for output buffer type, return * error for capture buffer type */ if (s->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) return -EINVAL; break; /* * bound and default crop/compose targets are invalid targets to * try/set */ default: return -EINVAL; } /* * For SEQ_XX buffers, crop height should be less than the height of * the field height, not the buffer height */ if (q_data->flags & Q_IS_SEQ_XX) height = pix->height / 2; else height = pix->height; if (s->r.top < 0 || s->r.left < 0) { vpe_err(ctx->dev, "negative values for top and left\n"); s->r.top = s->r.left = 0; } v4l_bound_align_image(&s->r.width, MIN_W, pix->width, 1, &s->r.height, MIN_H, height, H_ALIGN, S_ALIGN); /* adjust left/top if cropping rectangle is out of bounds */ if (s->r.left + s->r.width > pix->width) s->r.left = pix->width - s->r.width; if (s->r.top + s->r.height > pix->height) s->r.top = pix->height - s->r.height; return 0; } static int vpe_g_selection(struct file *file, void *fh, struct v4l2_selection *s) { struct vpe_ctx *ctx = file->private_data; struct vpe_q_data *q_data; struct v4l2_pix_format_mplane *pix; bool use_c_rect = false; if ((s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) && (s->type != V4L2_BUF_TYPE_VIDEO_OUTPUT)) return -EINVAL; q_data = get_q_data(ctx, s->type); if (!q_data) return -EINVAL; pix = &q_data->format.fmt.pix_mp; switch (s->target) { case V4L2_SEL_TGT_COMPOSE_DEFAULT: case V4L2_SEL_TGT_COMPOSE_BOUNDS: if (s->type == V4L2_BUF_TYPE_VIDEO_OUTPUT) return -EINVAL; break; case V4L2_SEL_TGT_CROP_BOUNDS: case V4L2_SEL_TGT_CROP_DEFAULT: if (s->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) return -EINVAL; break; case V4L2_SEL_TGT_COMPOSE: if (s->type == V4L2_BUF_TYPE_VIDEO_OUTPUT) return -EINVAL; use_c_rect = true; break; case V4L2_SEL_TGT_CROP: if (s->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) return -EINVAL; use_c_rect = true; break; default: return -EINVAL; } if (use_c_rect) { /* * for CROP/COMPOSE target type, return c_rect params from the * respective buffer type */ s->r = q_data->c_rect; } else { /* * for DEFAULT/BOUNDS target type, return width and height from * S_FMT of the respective buffer type */ s->r.left = 0; s->r.top = 0; s->r.width = pix->width; s->r.height = pix->height; } return 0; } static int vpe_s_selection(struct file *file, void *fh, struct v4l2_selection *s) { struct vpe_ctx *ctx = file->private_data; struct vpe_q_data *q_data; struct v4l2_selection sel = *s; int ret; ret = __vpe_try_selection(ctx, &sel); if (ret) return ret; q_data = get_q_data(ctx, sel.type); if (!q_data) return -EINVAL; if ((q_data->c_rect.left == sel.r.left) && (q_data->c_rect.top == sel.r.top) && (q_data->c_rect.width == sel.r.width) && (q_data->c_rect.height == sel.r.height)) { vpe_dbg(ctx->dev, "requested crop/compose values are already set\n"); return 0; } q_data->c_rect = sel.r; return set_srcdst_params(ctx); } /* * defines number of buffers/frames a context can process with VPE before * switching to a different context. default value is 1 buffer per context */ #define V4L2_CID_VPE_BUFS_PER_JOB (V4L2_CID_USER_TI_VPE_BASE + 0) static int vpe_s_ctrl(struct v4l2_ctrl *ctrl) { struct vpe_ctx *ctx = container_of(ctrl->handler, struct vpe_ctx, hdl); switch (ctrl->id) { case V4L2_CID_VPE_BUFS_PER_JOB: ctx->bufs_per_job = ctrl->val; break; default: vpe_err(ctx->dev, "Invalid control\n"); return -EINVAL; } return 0; } static const struct v4l2_ctrl_ops vpe_ctrl_ops = { .s_ctrl = vpe_s_ctrl, }; static const struct v4l2_ioctl_ops vpe_ioctl_ops = { .vidioc_querycap = vpe_querycap, .vidioc_enum_fmt_vid_cap = vpe_enum_fmt, .vidioc_g_fmt_vid_cap_mplane = vpe_g_fmt, .vidioc_try_fmt_vid_cap_mplane = vpe_try_fmt, .vidioc_s_fmt_vid_cap_mplane = vpe_s_fmt, .vidioc_enum_fmt_vid_out = vpe_enum_fmt, .vidioc_g_fmt_vid_out_mplane = vpe_g_fmt, .vidioc_try_fmt_vid_out_mplane = vpe_try_fmt, .vidioc_s_fmt_vid_out_mplane = vpe_s_fmt, .vidioc_g_selection = vpe_g_selection, .vidioc_s_selection = vpe_s_selection, .vidioc_reqbufs = v4l2_m2m_ioctl_reqbufs, .vidioc_querybuf = v4l2_m2m_ioctl_querybuf, .vidioc_qbuf = v4l2_m2m_ioctl_qbuf, .vidioc_dqbuf = v4l2_m2m_ioctl_dqbuf, .vidioc_expbuf = v4l2_m2m_ioctl_expbuf, .vidioc_streamon = v4l2_m2m_ioctl_streamon, .vidioc_streamoff = v4l2_m2m_ioctl_streamoff, .vidioc_subscribe_event = v4l2_ctrl_subscribe_event, .vidioc_unsubscribe_event = v4l2_event_unsubscribe, }; /* * Queue operations */ static int vpe_queue_setup(struct vb2_queue *vq, unsigned int *nbuffers, unsigned int *nplanes, unsigned int sizes[], struct device *alloc_devs[]) { int i; struct vpe_ctx *ctx = vb2_get_drv_priv(vq); struct vpe_q_data *q_data; struct v4l2_pix_format_mplane *pix; q_data = get_q_data(ctx, vq->type); if (!q_data) return -EINVAL; pix = &q_data->format.fmt.pix_mp; *nplanes = pix->num_planes; for (i = 0; i < *nplanes; i++) sizes[i] = pix->plane_fmt[i].sizeimage; vpe_dbg(ctx->dev, "get %d buffer(s) of size %d", *nbuffers, sizes[VPE_LUMA]); if (*nplanes == 2) vpe_dbg(ctx->dev, " and %d\n", sizes[VPE_CHROMA]); return 0; } static int vpe_buf_prepare(struct vb2_buffer *vb) { struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb); struct vpe_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue); struct vpe_q_data *q_data; struct v4l2_pix_format_mplane *pix; int i; vpe_dbg(ctx->dev, "type: %d\n", vb->vb2_queue->type); q_data = get_q_data(ctx, vb->vb2_queue->type); if (!q_data) return -EINVAL; pix = &q_data->format.fmt.pix_mp; if (vb->vb2_queue->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) { if (!(q_data->flags & Q_IS_INTERLACED)) { vbuf->field = V4L2_FIELD_NONE; } else { if (vbuf->field != V4L2_FIELD_TOP && vbuf->field != V4L2_FIELD_BOTTOM && vbuf->field != V4L2_FIELD_SEQ_TB && vbuf->field != V4L2_FIELD_SEQ_BT) return -EINVAL; } } for (i = 0; i < pix->num_planes; i++) { if (vb2_plane_size(vb, i) < pix->plane_fmt[i].sizeimage) { vpe_err(ctx->dev, "data will not fit into plane (%lu < %lu)\n", vb2_plane_size(vb, i), (long)pix->plane_fmt[i].sizeimage); return -EINVAL; } } for (i = 0; i < pix->num_planes; i++) vb2_set_plane_payload(vb, i, pix->plane_fmt[i].sizeimage); return 0; } static void vpe_buf_queue(struct vb2_buffer *vb) { struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb); struct vpe_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue); v4l2_m2m_buf_queue(ctx->fh.m2m_ctx, vbuf); } static int check_srcdst_sizes(struct vpe_ctx *ctx) { struct vpe_q_data *s_q_data = &ctx->q_data[Q_DATA_SRC]; struct vpe_q_data *d_q_data = &ctx->q_data[Q_DATA_DST]; unsigned int src_w = s_q_data->c_rect.width; unsigned int src_h = s_q_data->c_rect.height; unsigned int dst_w = d_q_data->c_rect.width; unsigned int dst_h = d_q_data->c_rect.height; if (src_w == dst_w && src_h == dst_h) return 0; if (src_h <= SC_MAX_PIXEL_HEIGHT && src_w <= SC_MAX_PIXEL_WIDTH && dst_h <= SC_MAX_PIXEL_HEIGHT && dst_w <= SC_MAX_PIXEL_WIDTH) return 0; return -1; } static void vpe_return_all_buffers(struct vpe_ctx *ctx, struct vb2_queue *q, enum vb2_buffer_state state) { struct vb2_v4l2_buffer *vb; unsigned long flags; for (;;) { if (V4L2_TYPE_IS_OUTPUT(q->type)) vb = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx); else vb = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx); if (!vb) break; spin_lock_irqsave(&ctx->dev->lock, flags); v4l2_m2m_buf_done(vb, state); spin_unlock_irqrestore(&ctx->dev->lock, flags); } /* * Cleanup the in-transit vb2 buffers that have been * removed from their respective queue already but for * which procecessing has not been completed yet. */ if (V4L2_TYPE_IS_OUTPUT(q->type)) { spin_lock_irqsave(&ctx->dev->lock, flags); if (ctx->src_vbs[2]) v4l2_m2m_buf_done(ctx->src_vbs[2], state); if (ctx->src_vbs[1] && (ctx->src_vbs[1] != ctx->src_vbs[2])) v4l2_m2m_buf_done(ctx->src_vbs[1], state); if (ctx->src_vbs[0] && (ctx->src_vbs[0] != ctx->src_vbs[1]) && (ctx->src_vbs[0] != ctx->src_vbs[2])) v4l2_m2m_buf_done(ctx->src_vbs[0], state); ctx->src_vbs[2] = NULL; ctx->src_vbs[1] = NULL; ctx->src_vbs[0] = NULL; spin_unlock_irqrestore(&ctx->dev->lock, flags); } else { if (ctx->dst_vb) { spin_lock_irqsave(&ctx->dev->lock, flags); v4l2_m2m_buf_done(ctx->dst_vb, state); ctx->dst_vb = NULL; spin_unlock_irqrestore(&ctx->dev->lock, flags); } } } static int vpe_start_streaming(struct vb2_queue *q, unsigned int count) { struct vpe_ctx *ctx = vb2_get_drv_priv(q); /* Check any of the size exceed maximum scaling sizes */ if (check_srcdst_sizes(ctx)) { vpe_err(ctx->dev, "Conversion setup failed, check source and destination parameters\n" ); vpe_return_all_buffers(ctx, q, VB2_BUF_STATE_QUEUED); return -EINVAL; } if (ctx->deinterlacing) config_edi_input_mode(ctx, 0x0); if (ctx->sequence != 0) set_srcdst_params(ctx); return 0; } static void vpe_stop_streaming(struct vb2_queue *q) { struct vpe_ctx *ctx = vb2_get_drv_priv(q); vpe_dump_regs(ctx->dev); vpdma_dump_regs(ctx->dev->vpdma); vpe_return_all_buffers(ctx, q, VB2_BUF_STATE_ERROR); } static const struct vb2_ops vpe_qops = { .queue_setup = vpe_queue_setup, .buf_prepare = vpe_buf_prepare, .buf_queue = vpe_buf_queue, .wait_prepare = vb2_ops_wait_prepare, .wait_finish = vb2_ops_wait_finish, .start_streaming = vpe_start_streaming, .stop_streaming = vpe_stop_streaming, }; static int queue_init(void *priv, struct vb2_queue *src_vq, struct vb2_queue *dst_vq) { struct vpe_ctx *ctx = priv; struct vpe_dev *dev = ctx->dev; int ret; memset(src_vq, 0, sizeof(*src_vq)); src_vq->type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE; src_vq->io_modes = VB2_MMAP | VB2_DMABUF; src_vq->drv_priv = ctx; src_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer); src_vq->ops = &vpe_qops; src_vq->mem_ops = &vb2_dma_contig_memops; src_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY; src_vq->lock = &dev->dev_mutex; src_vq->dev = dev->v4l2_dev.dev; ret = vb2_queue_init(src_vq); if (ret) return ret; memset(dst_vq, 0, sizeof(*dst_vq)); dst_vq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE; dst_vq->io_modes = VB2_MMAP | VB2_DMABUF; dst_vq->drv_priv = ctx; dst_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer); dst_vq->ops = &vpe_qops; dst_vq->mem_ops = &vb2_dma_contig_memops; dst_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY; dst_vq->lock = &dev->dev_mutex; dst_vq->dev = dev->v4l2_dev.dev; return vb2_queue_init(dst_vq); } static const struct v4l2_ctrl_config vpe_bufs_per_job = { .ops = &vpe_ctrl_ops, .id = V4L2_CID_VPE_BUFS_PER_JOB, .name = "Buffers Per Transaction", .type = V4L2_CTRL_TYPE_INTEGER, .def = VPE_DEF_BUFS_PER_JOB, .min = 1, .max = VIDEO_MAX_FRAME, .step = 1, }; /* * File operations */ static int vpe_open(struct file *file) { struct vpe_dev *dev = video_drvdata(file); struct vpe_q_data *s_q_data; struct v4l2_ctrl_handler *hdl; struct vpe_ctx *ctx; struct v4l2_pix_format_mplane *pix; int ret; vpe_dbg(dev, "vpe_open\n"); ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); if (!ctx) return -ENOMEM; ctx->dev = dev; if (mutex_lock_interruptible(&dev->dev_mutex)) { ret = -ERESTARTSYS; goto free_ctx; } ret = vpdma_create_desc_list(&ctx->desc_list, VPE_DESC_LIST_SIZE, VPDMA_LIST_TYPE_NORMAL); if (ret != 0) goto unlock; ret = vpdma_alloc_desc_buf(&ctx->mmr_adb, sizeof(struct vpe_mmr_adb)); if (ret != 0) goto free_desc_list; ret = vpdma_alloc_desc_buf(&ctx->sc_coeff_h, SC_COEF_SRAM_SIZE); if (ret != 0) goto free_mmr_adb; ret = vpdma_alloc_desc_buf(&ctx->sc_coeff_v, SC_COEF_SRAM_SIZE); if (ret != 0) goto free_sc_h; init_adb_hdrs(ctx); v4l2_fh_init(&ctx->fh, video_devdata(file)); file->private_data = ctx; hdl = &ctx->hdl; v4l2_ctrl_handler_init(hdl, 1); v4l2_ctrl_new_custom(hdl, &vpe_bufs_per_job, NULL); if (hdl->error) { ret = hdl->error; goto exit_fh; } ctx->fh.ctrl_handler = hdl; v4l2_ctrl_handler_setup(hdl); s_q_data = &ctx->q_data[Q_DATA_SRC]; pix = &s_q_data->format.fmt.pix_mp; s_q_data->fmt = __find_format(V4L2_PIX_FMT_YUYV); pix->pixelformat = s_q_data->fmt->fourcc; s_q_data->format.type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE; pix->width = 1920; pix->height = 1080; pix->num_planes = 1; pix->plane_fmt[VPE_LUMA].bytesperline = (pix->width * s_q_data->fmt->vpdma_fmt[VPE_LUMA]->depth) >> 3; pix->plane_fmt[VPE_LUMA].sizeimage = pix->plane_fmt[VPE_LUMA].bytesperline * pix->height; pix->colorspace = V4L2_COLORSPACE_REC709; pix->xfer_func = V4L2_XFER_FUNC_DEFAULT; pix->ycbcr_enc = V4L2_YCBCR_ENC_DEFAULT; pix->quantization = V4L2_QUANTIZATION_DEFAULT; pix->field = V4L2_FIELD_NONE; s_q_data->c_rect.left = 0; s_q_data->c_rect.top = 0; s_q_data->c_rect.width = pix->width; s_q_data->c_rect.height = pix->height; s_q_data->flags = 0; ctx->q_data[Q_DATA_DST] = *s_q_data; ctx->q_data[Q_DATA_DST].format.type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE; set_dei_shadow_registers(ctx); set_src_registers(ctx); set_dst_registers(ctx); ret = set_srcdst_params(ctx); if (ret) goto exit_fh; ctx->fh.m2m_ctx = v4l2_m2m_ctx_init(dev->m2m_dev, ctx, &queue_init); if (IS_ERR(ctx->fh.m2m_ctx)) { ret = PTR_ERR(ctx->fh.m2m_ctx); goto exit_fh; } v4l2_fh_add(&ctx->fh); /* * for now, just report the creation of the first instance, we can later * optimize the driver to enable or disable clocks when the first * instance is created or the last instance released */ if (atomic_inc_return(&dev->num_instances) == 1) vpe_dbg(dev, "first instance created\n"); ctx->bufs_per_job = VPE_DEF_BUFS_PER_JOB; ctx->load_mmrs = true; vpe_dbg(dev, "created instance %p, m2m_ctx: %p\n", ctx, ctx->fh.m2m_ctx); mutex_unlock(&dev->dev_mutex); return 0; exit_fh: v4l2_ctrl_handler_free(hdl); v4l2_fh_exit(&ctx->fh); vpdma_free_desc_buf(&ctx->sc_coeff_v); free_sc_h: vpdma_free_desc_buf(&ctx->sc_coeff_h); free_mmr_adb: vpdma_free_desc_buf(&ctx->mmr_adb); free_desc_list: vpdma_free_desc_list(&ctx->desc_list); unlock: mutex_unlock(&dev->dev_mutex); free_ctx: kfree(ctx); return ret; } static int vpe_release(struct file *file) { struct vpe_dev *dev = video_drvdata(file); struct vpe_ctx *ctx = file->private_data; vpe_dbg(dev, "releasing instance %p\n", ctx); mutex_lock(&dev->dev_mutex); free_mv_buffers(ctx); vpdma_unmap_desc_buf(dev->vpdma, &ctx->desc_list.buf); vpdma_unmap_desc_buf(dev->vpdma, &ctx->mmr_adb); vpdma_unmap_desc_buf(dev->vpdma, &ctx->sc_coeff_h); vpdma_unmap_desc_buf(dev->vpdma, &ctx->sc_coeff_v); vpdma_free_desc_list(&ctx->desc_list); vpdma_free_desc_buf(&ctx->mmr_adb); vpdma_free_desc_buf(&ctx->sc_coeff_v); vpdma_free_desc_buf(&ctx->sc_coeff_h); v4l2_fh_del(&ctx->fh); v4l2_fh_exit(&ctx->fh); v4l2_ctrl_handler_free(&ctx->hdl); v4l2_m2m_ctx_release(ctx->fh.m2m_ctx); kfree(ctx); /* * for now, just report the release of the last instance, we can later * optimize the driver to enable or disable clocks when the first * instance is created or the last instance released */ if (atomic_dec_return(&dev->num_instances) == 0) vpe_dbg(dev, "last instance released\n"); mutex_unlock(&dev->dev_mutex); return 0; } static const struct v4l2_file_operations vpe_fops = { .owner = THIS_MODULE, .open = vpe_open, .release = vpe_release, .poll = v4l2_m2m_fop_poll, .unlocked_ioctl = video_ioctl2, .mmap = v4l2_m2m_fop_mmap, }; static const struct video_device vpe_videodev = { .name = VPE_MODULE_NAME, .fops = &vpe_fops, .ioctl_ops = &vpe_ioctl_ops, .minor = -1, .release = video_device_release_empty, .vfl_dir = VFL_DIR_M2M, .device_caps = V4L2_CAP_VIDEO_M2M_MPLANE | V4L2_CAP_STREAMING, }; static const struct v4l2_m2m_ops m2m_ops = { .device_run = device_run, .job_ready = job_ready, .job_abort = job_abort, }; static int vpe_runtime_get(struct platform_device *pdev) { int r; dev_dbg(&pdev->dev, "vpe_runtime_get\n"); r = pm_runtime_resume_and_get(&pdev->dev); WARN_ON(r < 0); return r; } static void vpe_runtime_put(struct platform_device *pdev) { int r; dev_dbg(&pdev->dev, "vpe_runtime_put\n"); r = pm_runtime_put_sync(&pdev->dev); WARN_ON(r < 0 && r != -ENOSYS); } static void vpe_fw_cb(struct platform_device *pdev) { struct vpe_dev *dev = platform_get_drvdata(pdev); struct video_device *vfd; int ret; vfd = &dev->vfd; *vfd = vpe_videodev; vfd->lock = &dev->dev_mutex; vfd->v4l2_dev = &dev->v4l2_dev; ret = video_register_device(vfd, VFL_TYPE_VIDEO, 0); if (ret) { vpe_err(dev, "Failed to register video device\n"); vpe_set_clock_enable(dev, 0); vpe_runtime_put(pdev); pm_runtime_disable(&pdev->dev); v4l2_m2m_release(dev->m2m_dev); v4l2_device_unregister(&dev->v4l2_dev); return; } video_set_drvdata(vfd, dev); dev_info(dev->v4l2_dev.dev, "Device registered as /dev/video%d\n", vfd->num); } static int vpe_probe(struct platform_device *pdev) { struct vpe_dev *dev; int ret, irq, func; ret = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); if (ret) { dev_err(&pdev->dev, "32-bit consistent DMA enable failed\n"); return ret; } dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL); if (!dev) return -ENOMEM; spin_lock_init(&dev->lock); ret = v4l2_device_register(&pdev->dev, &dev->v4l2_dev); if (ret) return ret; atomic_set(&dev->num_instances, 0); mutex_init(&dev->dev_mutex); dev->res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "vpe_top"); if (!dev->res) { dev_err(&pdev->dev, "missing 'vpe_top' resources data\n"); return -ENODEV; } /* * HACK: we get resource info from device tree in the form of a list of * VPE sub blocks, the driver currently uses only the base of vpe_top * for register access, the driver should be changed later to access * registers based on the sub block base addresses */ dev->base = devm_ioremap(&pdev->dev, dev->res->start, SZ_32K); if (!dev->base) { ret = -ENOMEM; goto v4l2_dev_unreg; } irq = platform_get_irq(pdev, 0); ret = devm_request_irq(&pdev->dev, irq, vpe_irq, 0, VPE_MODULE_NAME, dev); if (ret) goto v4l2_dev_unreg; platform_set_drvdata(pdev, dev); dev->m2m_dev = v4l2_m2m_init(&m2m_ops); if (IS_ERR(dev->m2m_dev)) { vpe_err(dev, "Failed to init mem2mem device\n"); ret = PTR_ERR(dev->m2m_dev); goto v4l2_dev_unreg; } pm_runtime_enable(&pdev->dev); ret = vpe_runtime_get(pdev); if (ret < 0) goto rel_m2m; /* Perform clk enable followed by reset */ vpe_set_clock_enable(dev, 1); vpe_top_reset(dev); func = read_field_reg(dev, VPE_PID, VPE_PID_FUNC_MASK, VPE_PID_FUNC_SHIFT); vpe_dbg(dev, "VPE PID function %x\n", func); vpe_top_vpdma_reset(dev); dev->sc = sc_create(pdev, "sc"); if (IS_ERR(dev->sc)) { ret = PTR_ERR(dev->sc); goto runtime_put; } dev->csc = csc_create(pdev, "csc"); if (IS_ERR(dev->csc)) { ret = PTR_ERR(dev->csc); goto runtime_put; } dev->vpdma = &dev->vpdma_data; ret = vpdma_create(pdev, dev->vpdma, vpe_fw_cb); if (ret) goto runtime_put; return 0; runtime_put: vpe_runtime_put(pdev); rel_m2m: pm_runtime_disable(&pdev->dev); v4l2_m2m_release(dev->m2m_dev); v4l2_dev_unreg: v4l2_device_unregister(&dev->v4l2_dev); return ret; } static void vpe_remove(struct platform_device *pdev) { struct vpe_dev *dev = platform_get_drvdata(pdev); v4l2_info(&dev->v4l2_dev, "Removing " VPE_MODULE_NAME); v4l2_m2m_release(dev->m2m_dev); video_unregister_device(&dev->vfd); v4l2_device_unregister(&dev->v4l2_dev); vpe_set_clock_enable(dev, 0); vpe_runtime_put(pdev); pm_runtime_disable(&pdev->dev); } #if defined(CONFIG_OF) static const struct of_device_id vpe_of_match[] = { { .compatible = "ti,dra7-vpe", }, {}, }; MODULE_DEVICE_TABLE(of, vpe_of_match); #endif static struct platform_driver vpe_pdrv = { .probe = vpe_probe, .remove_new = vpe_remove, .driver = { .name = VPE_MODULE_NAME, .of_match_table = of_match_ptr(vpe_of_match), }, }; module_platform_driver(vpe_pdrv); MODULE_DESCRIPTION("TI VPE driver"); MODULE_AUTHOR("Dale Farnsworth, <[email protected]>"); MODULE_LICENSE("GPL");
linux-master
drivers/media/platform/ti/vpe/vpe.c
// SPDX-License-Identifier: GPL-2.0-only /* * VPDMA helper library * * Copyright (c) 2013 Texas Instruments Inc. * * David Griego, <[email protected]> * Dale Farnsworth, <[email protected]> * Archit Taneja, <[email protected]> */ #include <linux/delay.h> #include <linux/dma-mapping.h> #include <linux/err.h> #include <linux/firmware.h> #include <linux/io.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/sched.h> #include <linux/slab.h> #include <linux/videodev2.h> #include "vpdma.h" #include "vpdma_priv.h" #define VPDMA_FIRMWARE "vpdma-1b8.bin" const struct vpdma_data_format vpdma_yuv_fmts[] = { [VPDMA_DATA_FMT_Y444] = { .type = VPDMA_DATA_FMT_TYPE_YUV, .data_type = DATA_TYPE_Y444, .depth = 8, }, [VPDMA_DATA_FMT_Y422] = { .type = VPDMA_DATA_FMT_TYPE_YUV, .data_type = DATA_TYPE_Y422, .depth = 8, }, [VPDMA_DATA_FMT_Y420] = { .type = VPDMA_DATA_FMT_TYPE_YUV, .data_type = DATA_TYPE_Y420, .depth = 8, }, [VPDMA_DATA_FMT_C444] = { .type = VPDMA_DATA_FMT_TYPE_YUV, .data_type = DATA_TYPE_C444, .depth = 8, }, [VPDMA_DATA_FMT_C422] = { .type = VPDMA_DATA_FMT_TYPE_YUV, .data_type = DATA_TYPE_C422, .depth = 8, }, [VPDMA_DATA_FMT_C420] = { .type = VPDMA_DATA_FMT_TYPE_YUV, .data_type = DATA_TYPE_C420, .depth = 4, }, [VPDMA_DATA_FMT_CB420] = { .type = VPDMA_DATA_FMT_TYPE_YUV, .data_type = DATA_TYPE_CB420, .depth = 4, }, [VPDMA_DATA_FMT_YCR422] = { .type = VPDMA_DATA_FMT_TYPE_YUV, .data_type = DATA_TYPE_YCR422, .depth = 16, }, [VPDMA_DATA_FMT_YC444] = { .type = VPDMA_DATA_FMT_TYPE_YUV, .data_type = DATA_TYPE_YC444, .depth = 24, }, [VPDMA_DATA_FMT_CRY422] = { .type = VPDMA_DATA_FMT_TYPE_YUV, .data_type = DATA_TYPE_CRY422, .depth = 16, }, [VPDMA_DATA_FMT_CBY422] = { .type = VPDMA_DATA_FMT_TYPE_YUV, .data_type = DATA_TYPE_CBY422, .depth = 16, }, [VPDMA_DATA_FMT_YCB422] = { .type = VPDMA_DATA_FMT_TYPE_YUV, .data_type = DATA_TYPE_YCB422, .depth = 16, }, }; EXPORT_SYMBOL(vpdma_yuv_fmts); const struct vpdma_data_format vpdma_rgb_fmts[] = { [VPDMA_DATA_FMT_RGB565] = { .type = VPDMA_DATA_FMT_TYPE_RGB, .data_type = DATA_TYPE_RGB16_565, .depth = 16, }, [VPDMA_DATA_FMT_ARGB16_1555] = { .type = VPDMA_DATA_FMT_TYPE_RGB, .data_type = DATA_TYPE_ARGB_1555, .depth = 16, }, [VPDMA_DATA_FMT_ARGB16] = { .type = VPDMA_DATA_FMT_TYPE_RGB, .data_type = DATA_TYPE_ARGB_4444, .depth = 16, }, [VPDMA_DATA_FMT_RGBA16_5551] = { .type = VPDMA_DATA_FMT_TYPE_RGB, .data_type = DATA_TYPE_RGBA_5551, .depth = 16, }, [VPDMA_DATA_FMT_RGBA16] = { .type = VPDMA_DATA_FMT_TYPE_RGB, .data_type = DATA_TYPE_RGBA_4444, .depth = 16, }, [VPDMA_DATA_FMT_ARGB24] = { .type = VPDMA_DATA_FMT_TYPE_RGB, .data_type = DATA_TYPE_ARGB24_6666, .depth = 24, }, [VPDMA_DATA_FMT_RGB24] = { .type = VPDMA_DATA_FMT_TYPE_RGB, .data_type = DATA_TYPE_RGB24_888, .depth = 24, }, [VPDMA_DATA_FMT_ARGB32] = { .type = VPDMA_DATA_FMT_TYPE_RGB, .data_type = DATA_TYPE_ARGB32_8888, .depth = 32, }, [VPDMA_DATA_FMT_RGBA24] = { .type = VPDMA_DATA_FMT_TYPE_RGB, .data_type = DATA_TYPE_RGBA24_6666, .depth = 24, }, [VPDMA_DATA_FMT_RGBA32] = { .type = VPDMA_DATA_FMT_TYPE_RGB, .data_type = DATA_TYPE_RGBA32_8888, .depth = 32, }, [VPDMA_DATA_FMT_BGR565] = { .type = VPDMA_DATA_FMT_TYPE_RGB, .data_type = DATA_TYPE_BGR16_565, .depth = 16, }, [VPDMA_DATA_FMT_ABGR16_1555] = { .type = VPDMA_DATA_FMT_TYPE_RGB, .data_type = DATA_TYPE_ABGR_1555, .depth = 16, }, [VPDMA_DATA_FMT_ABGR16] = { .type = VPDMA_DATA_FMT_TYPE_RGB, .data_type = DATA_TYPE_ABGR_4444, .depth = 16, }, [VPDMA_DATA_FMT_BGRA16_5551] = { .type = VPDMA_DATA_FMT_TYPE_RGB, .data_type = DATA_TYPE_BGRA_5551, .depth = 16, }, [VPDMA_DATA_FMT_BGRA16] = { .type = VPDMA_DATA_FMT_TYPE_RGB, .data_type = DATA_TYPE_BGRA_4444, .depth = 16, }, [VPDMA_DATA_FMT_ABGR24] = { .type = VPDMA_DATA_FMT_TYPE_RGB, .data_type = DATA_TYPE_ABGR24_6666, .depth = 24, }, [VPDMA_DATA_FMT_BGR24] = { .type = VPDMA_DATA_FMT_TYPE_RGB, .data_type = DATA_TYPE_BGR24_888, .depth = 24, }, [VPDMA_DATA_FMT_ABGR32] = { .type = VPDMA_DATA_FMT_TYPE_RGB, .data_type = DATA_TYPE_ABGR32_8888, .depth = 32, }, [VPDMA_DATA_FMT_BGRA24] = { .type = VPDMA_DATA_FMT_TYPE_RGB, .data_type = DATA_TYPE_BGRA24_6666, .depth = 24, }, [VPDMA_DATA_FMT_BGRA32] = { .type = VPDMA_DATA_FMT_TYPE_RGB, .data_type = DATA_TYPE_BGRA32_8888, .depth = 32, }, }; EXPORT_SYMBOL(vpdma_rgb_fmts); /* * To handle RAW format we are re-using the CBY422 * vpdma data type so that we use the vpdma to re-order * the incoming bytes, as the parser assumes that the * first byte presented on the bus is the MSB of a 2 * bytes value. * RAW8 handles from 1 to 8 bits * RAW16 handles from 9 to 16 bits */ const struct vpdma_data_format vpdma_raw_fmts[] = { [VPDMA_DATA_FMT_RAW8] = { .type = VPDMA_DATA_FMT_TYPE_YUV, .data_type = DATA_TYPE_CBY422, .depth = 8, }, [VPDMA_DATA_FMT_RAW16] = { .type = VPDMA_DATA_FMT_TYPE_YUV, .data_type = DATA_TYPE_CBY422, .depth = 16, }, }; EXPORT_SYMBOL(vpdma_raw_fmts); const struct vpdma_data_format vpdma_misc_fmts[] = { [VPDMA_DATA_FMT_MV] = { .type = VPDMA_DATA_FMT_TYPE_MISC, .data_type = DATA_TYPE_MV, .depth = 4, }, }; EXPORT_SYMBOL(vpdma_misc_fmts); struct vpdma_channel_info { int num; /* VPDMA channel number */ int cstat_offset; /* client CSTAT register offset */ }; static const struct vpdma_channel_info chan_info[] = { [VPE_CHAN_LUMA1_IN] = { .num = VPE_CHAN_NUM_LUMA1_IN, .cstat_offset = VPDMA_DEI_LUMA1_CSTAT, }, [VPE_CHAN_CHROMA1_IN] = { .num = VPE_CHAN_NUM_CHROMA1_IN, .cstat_offset = VPDMA_DEI_CHROMA1_CSTAT, }, [VPE_CHAN_LUMA2_IN] = { .num = VPE_CHAN_NUM_LUMA2_IN, .cstat_offset = VPDMA_DEI_LUMA2_CSTAT, }, [VPE_CHAN_CHROMA2_IN] = { .num = VPE_CHAN_NUM_CHROMA2_IN, .cstat_offset = VPDMA_DEI_CHROMA2_CSTAT, }, [VPE_CHAN_LUMA3_IN] = { .num = VPE_CHAN_NUM_LUMA3_IN, .cstat_offset = VPDMA_DEI_LUMA3_CSTAT, }, [VPE_CHAN_CHROMA3_IN] = { .num = VPE_CHAN_NUM_CHROMA3_IN, .cstat_offset = VPDMA_DEI_CHROMA3_CSTAT, }, [VPE_CHAN_MV_IN] = { .num = VPE_CHAN_NUM_MV_IN, .cstat_offset = VPDMA_DEI_MV_IN_CSTAT, }, [VPE_CHAN_MV_OUT] = { .num = VPE_CHAN_NUM_MV_OUT, .cstat_offset = VPDMA_DEI_MV_OUT_CSTAT, }, [VPE_CHAN_LUMA_OUT] = { .num = VPE_CHAN_NUM_LUMA_OUT, .cstat_offset = VPDMA_VIP_UP_Y_CSTAT, }, [VPE_CHAN_CHROMA_OUT] = { .num = VPE_CHAN_NUM_CHROMA_OUT, .cstat_offset = VPDMA_VIP_UP_UV_CSTAT, }, [VPE_CHAN_RGB_OUT] = { .num = VPE_CHAN_NUM_RGB_OUT, .cstat_offset = VPDMA_VIP_UP_Y_CSTAT, }, }; static u32 read_reg(struct vpdma_data *vpdma, int offset) { return ioread32(vpdma->base + offset); } static void write_reg(struct vpdma_data *vpdma, int offset, u32 value) { iowrite32(value, vpdma->base + offset); } static int read_field_reg(struct vpdma_data *vpdma, int offset, u32 mask, int shift) { return (read_reg(vpdma, offset) & (mask << shift)) >> shift; } static void write_field_reg(struct vpdma_data *vpdma, int offset, u32 field, u32 mask, int shift) { u32 val = read_reg(vpdma, offset); val &= ~(mask << shift); val |= (field & mask) << shift; write_reg(vpdma, offset, val); } void vpdma_dump_regs(struct vpdma_data *vpdma) { struct device *dev = &vpdma->pdev->dev; #define DUMPREG(r) dev_dbg(dev, "%-35s %08x\n", #r, read_reg(vpdma, VPDMA_##r)) dev_dbg(dev, "VPDMA Registers:\n"); DUMPREG(PID); DUMPREG(LIST_ADDR); DUMPREG(LIST_ATTR); DUMPREG(LIST_STAT_SYNC); DUMPREG(BG_RGB); DUMPREG(BG_YUV); DUMPREG(SETUP); DUMPREG(MAX_SIZE1); DUMPREG(MAX_SIZE2); DUMPREG(MAX_SIZE3); /* * dumping registers of only group0 and group3, because VPE channels * lie within group0 and group3 registers */ DUMPREG(INT_CHAN_STAT(0)); DUMPREG(INT_CHAN_MASK(0)); DUMPREG(INT_CHAN_STAT(3)); DUMPREG(INT_CHAN_MASK(3)); DUMPREG(INT_CLIENT0_STAT); DUMPREG(INT_CLIENT0_MASK); DUMPREG(INT_CLIENT1_STAT); DUMPREG(INT_CLIENT1_MASK); DUMPREG(INT_LIST0_STAT); DUMPREG(INT_LIST0_MASK); /* * these are registers specific to VPE clients, we can make this * function dump client registers specific to VPE or VIP based on * who is using it */ DUMPREG(DEI_CHROMA1_CSTAT); DUMPREG(DEI_LUMA1_CSTAT); DUMPREG(DEI_CHROMA2_CSTAT); DUMPREG(DEI_LUMA2_CSTAT); DUMPREG(DEI_CHROMA3_CSTAT); DUMPREG(DEI_LUMA3_CSTAT); DUMPREG(DEI_MV_IN_CSTAT); DUMPREG(DEI_MV_OUT_CSTAT); DUMPREG(VIP_UP_Y_CSTAT); DUMPREG(VIP_UP_UV_CSTAT); DUMPREG(VPI_CTL_CSTAT); } EXPORT_SYMBOL(vpdma_dump_regs); /* * Allocate a DMA buffer */ int vpdma_alloc_desc_buf(struct vpdma_buf *buf, size_t size) { buf->size = size; buf->mapped = false; buf->addr = kzalloc(size, GFP_KERNEL); if (!buf->addr) return -ENOMEM; WARN_ON(((unsigned long)buf->addr & VPDMA_DESC_ALIGN) != 0); return 0; } EXPORT_SYMBOL(vpdma_alloc_desc_buf); void vpdma_free_desc_buf(struct vpdma_buf *buf) { WARN_ON(buf->mapped); kfree(buf->addr); buf->addr = NULL; buf->size = 0; } EXPORT_SYMBOL(vpdma_free_desc_buf); /* * map descriptor/payload DMA buffer, enabling DMA access */ int vpdma_map_desc_buf(struct vpdma_data *vpdma, struct vpdma_buf *buf) { struct device *dev = &vpdma->pdev->dev; WARN_ON(buf->mapped); buf->dma_addr = dma_map_single(dev, buf->addr, buf->size, DMA_BIDIRECTIONAL); if (dma_mapping_error(dev, buf->dma_addr)) { dev_err(dev, "failed to map buffer\n"); return -EINVAL; } buf->mapped = true; return 0; } EXPORT_SYMBOL(vpdma_map_desc_buf); /* * unmap descriptor/payload DMA buffer, disabling DMA access and * allowing the main processor to access the data */ void vpdma_unmap_desc_buf(struct vpdma_data *vpdma, struct vpdma_buf *buf) { struct device *dev = &vpdma->pdev->dev; if (buf->mapped) dma_unmap_single(dev, buf->dma_addr, buf->size, DMA_BIDIRECTIONAL); buf->mapped = false; } EXPORT_SYMBOL(vpdma_unmap_desc_buf); /* * Cleanup all pending descriptors of a list * First, stop the current list being processed. * If the VPDMA was busy, this step makes vpdma to accept post lists. * To cleanup the internal FSM, post abort list descriptor for all the * channels from @channels array of size @size. */ int vpdma_list_cleanup(struct vpdma_data *vpdma, int list_num, int *channels, int size) { struct vpdma_desc_list abort_list; int i, ret, timeout = 500; write_reg(vpdma, VPDMA_LIST_ATTR, (list_num << VPDMA_LIST_NUM_SHFT) | (1 << VPDMA_LIST_STOP_SHFT)); if (size <= 0 || !channels) return 0; ret = vpdma_create_desc_list(&abort_list, size * sizeof(struct vpdma_dtd), VPDMA_LIST_TYPE_NORMAL); if (ret) return ret; for (i = 0; i < size; i++) vpdma_add_abort_channel_ctd(&abort_list, channels[i]); ret = vpdma_map_desc_buf(vpdma, &abort_list.buf); if (ret) goto free_desc; ret = vpdma_submit_descs(vpdma, &abort_list, list_num); if (ret) goto unmap_desc; while (vpdma_list_busy(vpdma, list_num) && --timeout) ; if (timeout == 0) { dev_err(&vpdma->pdev->dev, "Timed out cleaning up VPDMA list\n"); ret = -EBUSY; } unmap_desc: vpdma_unmap_desc_buf(vpdma, &abort_list.buf); free_desc: vpdma_free_desc_buf(&abort_list.buf); return ret; } EXPORT_SYMBOL(vpdma_list_cleanup); /* * create a descriptor list, the user of this list will append configuration, * control and data descriptors to this list, this list will be submitted to * VPDMA. VPDMA's list parser will go through each descriptor and perform the * required DMA operations */ int vpdma_create_desc_list(struct vpdma_desc_list *list, size_t size, int type) { int r; r = vpdma_alloc_desc_buf(&list->buf, size); if (r) return r; list->next = list->buf.addr; list->type = type; return 0; } EXPORT_SYMBOL(vpdma_create_desc_list); /* * once a descriptor list is parsed by VPDMA, we reset the list by emptying it, * to allow new descriptors to be added to the list. */ void vpdma_reset_desc_list(struct vpdma_desc_list *list) { list->next = list->buf.addr; } EXPORT_SYMBOL(vpdma_reset_desc_list); /* * free the buffer allocated for the VPDMA descriptor list, this should be * called when the user doesn't want to use VPDMA any more. */ void vpdma_free_desc_list(struct vpdma_desc_list *list) { vpdma_free_desc_buf(&list->buf); list->next = NULL; } EXPORT_SYMBOL(vpdma_free_desc_list); bool vpdma_list_busy(struct vpdma_data *vpdma, int list_num) { return read_reg(vpdma, VPDMA_LIST_STAT_SYNC) & BIT(list_num + 16); } EXPORT_SYMBOL(vpdma_list_busy); /* * submit a list of DMA descriptors to the VPE VPDMA, do not wait for completion */ int vpdma_submit_descs(struct vpdma_data *vpdma, struct vpdma_desc_list *list, int list_num) { int list_size; unsigned long flags; if (vpdma_list_busy(vpdma, list_num)) return -EBUSY; /* 16-byte granularity */ list_size = (list->next - list->buf.addr) >> 4; spin_lock_irqsave(&vpdma->lock, flags); write_reg(vpdma, VPDMA_LIST_ADDR, (u32) list->buf.dma_addr); write_reg(vpdma, VPDMA_LIST_ATTR, (list_num << VPDMA_LIST_NUM_SHFT) | (list->type << VPDMA_LIST_TYPE_SHFT) | list_size); spin_unlock_irqrestore(&vpdma->lock, flags); return 0; } EXPORT_SYMBOL(vpdma_submit_descs); static void dump_dtd(struct vpdma_dtd *dtd); void vpdma_update_dma_addr(struct vpdma_data *vpdma, struct vpdma_desc_list *list, dma_addr_t dma_addr, void *write_dtd, int drop, int idx) { struct vpdma_dtd *dtd = list->buf.addr; dma_addr_t write_desc_addr; int offset; dtd += idx; vpdma_unmap_desc_buf(vpdma, &list->buf); dtd->start_addr = dma_addr; /* Calculate write address from the offset of write_dtd from start * of the list->buf */ offset = (void *)write_dtd - list->buf.addr; write_desc_addr = list->buf.dma_addr + offset; if (drop) dtd->desc_write_addr = dtd_desc_write_addr(write_desc_addr, 1, 1, 0); else dtd->desc_write_addr = dtd_desc_write_addr(write_desc_addr, 1, 0, 0); vpdma_map_desc_buf(vpdma, &list->buf); dump_dtd(dtd); } EXPORT_SYMBOL(vpdma_update_dma_addr); void vpdma_set_max_size(struct vpdma_data *vpdma, int reg_addr, u32 width, u32 height) { if (reg_addr != VPDMA_MAX_SIZE1 && reg_addr != VPDMA_MAX_SIZE2 && reg_addr != VPDMA_MAX_SIZE3) reg_addr = VPDMA_MAX_SIZE1; write_field_reg(vpdma, reg_addr, width - 1, VPDMA_MAX_SIZE_WIDTH_MASK, VPDMA_MAX_SIZE_WIDTH_SHFT); write_field_reg(vpdma, reg_addr, height - 1, VPDMA_MAX_SIZE_HEIGHT_MASK, VPDMA_MAX_SIZE_HEIGHT_SHFT); } EXPORT_SYMBOL(vpdma_set_max_size); static void dump_cfd(struct vpdma_cfd *cfd) { int class; class = cfd_get_class(cfd); pr_debug("config descriptor of payload class: %s\n", class == CFD_CLS_BLOCK ? "simple block" : "address data block"); if (class == CFD_CLS_BLOCK) pr_debug("word0: dst_addr_offset = 0x%08x\n", cfd->dest_addr_offset); if (class == CFD_CLS_BLOCK) pr_debug("word1: num_data_wrds = %d\n", cfd->block_len); pr_debug("word2: payload_addr = 0x%08x\n", cfd->payload_addr); pr_debug("word3: pkt_type = %d, direct = %d, class = %d, dest = %d, payload_len = %d\n", cfd_get_pkt_type(cfd), cfd_get_direct(cfd), class, cfd_get_dest(cfd), cfd_get_payload_len(cfd)); } /* * append a configuration descriptor to the given descriptor list, where the * payload is in the form of a simple data block specified in the descriptor * header, this is used to upload scaler coefficients to the scaler module */ void vpdma_add_cfd_block(struct vpdma_desc_list *list, int client, struct vpdma_buf *blk, u32 dest_offset) { struct vpdma_cfd *cfd; int len = blk->size; WARN_ON(blk->dma_addr & VPDMA_DESC_ALIGN); cfd = list->next; WARN_ON((void *)(cfd + 1) > (list->buf.addr + list->buf.size)); cfd->dest_addr_offset = dest_offset; cfd->block_len = len; cfd->payload_addr = (u32) blk->dma_addr; cfd->ctl_payload_len = cfd_pkt_payload_len(CFD_INDIRECT, CFD_CLS_BLOCK, client, len >> 4); list->next = cfd + 1; dump_cfd(cfd); } EXPORT_SYMBOL(vpdma_add_cfd_block); /* * append a configuration descriptor to the given descriptor list, where the * payload is in the address data block format, this is used to a configure a * discontiguous set of MMRs */ void vpdma_add_cfd_adb(struct vpdma_desc_list *list, int client, struct vpdma_buf *adb) { struct vpdma_cfd *cfd; unsigned int len = adb->size; WARN_ON(len & VPDMA_ADB_SIZE_ALIGN); WARN_ON(adb->dma_addr & VPDMA_DESC_ALIGN); cfd = list->next; BUG_ON((void *)(cfd + 1) > (list->buf.addr + list->buf.size)); cfd->w0 = 0; cfd->w1 = 0; cfd->payload_addr = (u32) adb->dma_addr; cfd->ctl_payload_len = cfd_pkt_payload_len(CFD_INDIRECT, CFD_CLS_ADB, client, len >> 4); list->next = cfd + 1; dump_cfd(cfd); }; EXPORT_SYMBOL(vpdma_add_cfd_adb); /* * control descriptor format change based on what type of control descriptor it * is, we only use 'sync on channel' control descriptors for now, so assume it's * that */ static void dump_ctd(struct vpdma_ctd *ctd) { pr_debug("control descriptor\n"); pr_debug("word3: pkt_type = %d, source = %d, ctl_type = %d\n", ctd_get_pkt_type(ctd), ctd_get_source(ctd), ctd_get_ctl(ctd)); } /* * append a 'sync on channel' type control descriptor to the given descriptor * list, this descriptor stalls the VPDMA list till the time DMA is completed * on the specified channel */ void vpdma_add_sync_on_channel_ctd(struct vpdma_desc_list *list, enum vpdma_channel chan) { struct vpdma_ctd *ctd; ctd = list->next; WARN_ON((void *)(ctd + 1) > (list->buf.addr + list->buf.size)); ctd->w0 = 0; ctd->w1 = 0; ctd->w2 = 0; ctd->type_source_ctl = ctd_type_source_ctl(chan_info[chan].num, CTD_TYPE_SYNC_ON_CHANNEL); list->next = ctd + 1; dump_ctd(ctd); } EXPORT_SYMBOL(vpdma_add_sync_on_channel_ctd); /* * append an 'abort_channel' type control descriptor to the given descriptor * list, this descriptor aborts any DMA transaction happening using the * specified channel */ void vpdma_add_abort_channel_ctd(struct vpdma_desc_list *list, int chan_num) { struct vpdma_ctd *ctd; ctd = list->next; WARN_ON((void *)(ctd + 1) > (list->buf.addr + list->buf.size)); ctd->w0 = 0; ctd->w1 = 0; ctd->w2 = 0; ctd->type_source_ctl = ctd_type_source_ctl(chan_num, CTD_TYPE_ABORT_CHANNEL); list->next = ctd + 1; dump_ctd(ctd); } EXPORT_SYMBOL(vpdma_add_abort_channel_ctd); static void dump_dtd(struct vpdma_dtd *dtd) { int dir, chan; dir = dtd_get_dir(dtd); chan = dtd_get_chan(dtd); pr_debug("%s data transfer descriptor for channel %d\n", dir == DTD_DIR_OUT ? "outbound" : "inbound", chan); pr_debug("word0: data_type = %d, notify = %d, field = %d, 1D = %d, even_ln_skp = %d, odd_ln_skp = %d, line_stride = %d\n", dtd_get_data_type(dtd), dtd_get_notify(dtd), dtd_get_field(dtd), dtd_get_1d(dtd), dtd_get_even_line_skip(dtd), dtd_get_odd_line_skip(dtd), dtd_get_line_stride(dtd)); if (dir == DTD_DIR_IN) pr_debug("word1: line_length = %d, xfer_height = %d\n", dtd_get_line_length(dtd), dtd_get_xfer_height(dtd)); pr_debug("word2: start_addr = %x\n", dtd->start_addr); pr_debug("word3: pkt_type = %d, mode = %d, dir = %d, chan = %d, pri = %d, next_chan = %d\n", dtd_get_pkt_type(dtd), dtd_get_mode(dtd), dir, chan, dtd_get_priority(dtd), dtd_get_next_chan(dtd)); if (dir == DTD_DIR_IN) pr_debug("word4: frame_width = %d, frame_height = %d\n", dtd_get_frame_width(dtd), dtd_get_frame_height(dtd)); else pr_debug("word4: desc_write_addr = 0x%08x, write_desc = %d, drp_data = %d, use_desc_reg = %d\n", dtd_get_desc_write_addr(dtd), dtd_get_write_desc(dtd), dtd_get_drop_data(dtd), dtd_get_use_desc(dtd)); if (dir == DTD_DIR_IN) pr_debug("word5: hor_start = %d, ver_start = %d\n", dtd_get_h_start(dtd), dtd_get_v_start(dtd)); else pr_debug("word5: max_width %d, max_height %d\n", dtd_get_max_width(dtd), dtd_get_max_height(dtd)); pr_debug("word6: client specific attr0 = 0x%08x\n", dtd->client_attr0); pr_debug("word7: client specific attr1 = 0x%08x\n", dtd->client_attr1); } /* * append an outbound data transfer descriptor to the given descriptor list, * this sets up a 'client to memory' VPDMA transfer for the given VPDMA channel * * @list: vpdma desc list to which we add this descriptor * @width: width of the image in pixels in memory * @c_rect: compose params of output image * @fmt: vpdma data format of the buffer * dma_addr: dma address as seen by VPDMA * max_width: enum for maximum width of data transfer * max_height: enum for maximum height of data transfer * chan: VPDMA channel * flags: VPDMA flags to configure some descriptor fields */ void vpdma_add_out_dtd(struct vpdma_desc_list *list, int width, int stride, const struct v4l2_rect *c_rect, const struct vpdma_data_format *fmt, dma_addr_t dma_addr, int max_w, int max_h, enum vpdma_channel chan, u32 flags) { vpdma_rawchan_add_out_dtd(list, width, stride, c_rect, fmt, dma_addr, max_w, max_h, chan_info[chan].num, flags); } EXPORT_SYMBOL(vpdma_add_out_dtd); void vpdma_rawchan_add_out_dtd(struct vpdma_desc_list *list, int width, int stride, const struct v4l2_rect *c_rect, const struct vpdma_data_format *fmt, dma_addr_t dma_addr, int max_w, int max_h, int raw_vpdma_chan, u32 flags) { int priority = 0; int field = 0; int notify = 1; int channel, next_chan; struct v4l2_rect rect = *c_rect; int depth = fmt->depth; struct vpdma_dtd *dtd; channel = next_chan = raw_vpdma_chan; if (fmt->type == VPDMA_DATA_FMT_TYPE_YUV && (fmt->data_type == DATA_TYPE_C420 || fmt->data_type == DATA_TYPE_CB420)) { rect.height >>= 1; rect.top >>= 1; depth = 8; } dma_addr += rect.top * stride + (rect.left * depth >> 3); dtd = list->next; WARN_ON((void *)(dtd + 1) > (list->buf.addr + list->buf.size)); dtd->type_ctl_stride = dtd_type_ctl_stride(fmt->data_type, notify, field, !!(flags & VPDMA_DATA_FRAME_1D), !!(flags & VPDMA_DATA_EVEN_LINE_SKIP), !!(flags & VPDMA_DATA_ODD_LINE_SKIP), stride); dtd->w1 = 0; dtd->start_addr = (u32) dma_addr; dtd->pkt_ctl = dtd_pkt_ctl(!!(flags & VPDMA_DATA_MODE_TILED), DTD_DIR_OUT, channel, priority, next_chan); dtd->desc_write_addr = dtd_desc_write_addr(0, 0, 0, 0); dtd->max_width_height = dtd_max_width_height(max_w, max_h); dtd->client_attr0 = 0; dtd->client_attr1 = 0; list->next = dtd + 1; dump_dtd(dtd); } EXPORT_SYMBOL(vpdma_rawchan_add_out_dtd); /* * append an inbound data transfer descriptor to the given descriptor list, * this sets up a 'memory to client' VPDMA transfer for the given VPDMA channel * * @list: vpdma desc list to which we add this descriptor * @width: width of the image in pixels in memory(not the cropped width) * @c_rect: crop params of input image * @fmt: vpdma data format of the buffer * dma_addr: dma address as seen by VPDMA * chan: VPDMA channel * field: top or bottom field info of the input image * flags: VPDMA flags to configure some descriptor fields * frame_width/height: the complete width/height of the image presented to the * client (this makes sense when multiple channels are * connected to the same client, forming a larger frame) * start_h, start_v: position where the given channel starts providing pixel * data to the client (makes sense when multiple channels * contribute to the client) */ void vpdma_add_in_dtd(struct vpdma_desc_list *list, int width, int stride, const struct v4l2_rect *c_rect, const struct vpdma_data_format *fmt, dma_addr_t dma_addr, enum vpdma_channel chan, int field, u32 flags, int frame_width, int frame_height, int start_h, int start_v) { int priority = 0; int notify = 1; int depth = fmt->depth; int channel, next_chan; struct v4l2_rect rect = *c_rect; struct vpdma_dtd *dtd; channel = next_chan = chan_info[chan].num; if (fmt->type == VPDMA_DATA_FMT_TYPE_YUV && (fmt->data_type == DATA_TYPE_C420 || fmt->data_type == DATA_TYPE_CB420)) { rect.height >>= 1; rect.top >>= 1; depth = 8; } dma_addr += rect.top * stride + (rect.left * depth >> 3); dtd = list->next; WARN_ON((void *)(dtd + 1) > (list->buf.addr + list->buf.size)); dtd->type_ctl_stride = dtd_type_ctl_stride(fmt->data_type, notify, field, !!(flags & VPDMA_DATA_FRAME_1D), !!(flags & VPDMA_DATA_EVEN_LINE_SKIP), !!(flags & VPDMA_DATA_ODD_LINE_SKIP), stride); dtd->xfer_length_height = dtd_xfer_length_height(rect.width, rect.height); dtd->start_addr = (u32) dma_addr; dtd->pkt_ctl = dtd_pkt_ctl(!!(flags & VPDMA_DATA_MODE_TILED), DTD_DIR_IN, channel, priority, next_chan); dtd->frame_width_height = dtd_frame_width_height(frame_width, frame_height); dtd->start_h_v = dtd_start_h_v(start_h, start_v); dtd->client_attr0 = 0; dtd->client_attr1 = 0; list->next = dtd + 1; dump_dtd(dtd); } EXPORT_SYMBOL(vpdma_add_in_dtd); int vpdma_hwlist_alloc(struct vpdma_data *vpdma, void *priv) { int i, list_num = -1; unsigned long flags; spin_lock_irqsave(&vpdma->lock, flags); for (i = 0; i < VPDMA_MAX_NUM_LIST && vpdma->hwlist_used[i]; i++) ; if (i < VPDMA_MAX_NUM_LIST) { list_num = i; vpdma->hwlist_used[i] = true; vpdma->hwlist_priv[i] = priv; } spin_unlock_irqrestore(&vpdma->lock, flags); return list_num; } EXPORT_SYMBOL(vpdma_hwlist_alloc); void *vpdma_hwlist_get_priv(struct vpdma_data *vpdma, int list_num) { if (!vpdma || list_num >= VPDMA_MAX_NUM_LIST) return NULL; return vpdma->hwlist_priv[list_num]; } EXPORT_SYMBOL(vpdma_hwlist_get_priv); void *vpdma_hwlist_release(struct vpdma_data *vpdma, int list_num) { void *priv; unsigned long flags; spin_lock_irqsave(&vpdma->lock, flags); vpdma->hwlist_used[list_num] = false; priv = vpdma->hwlist_priv; spin_unlock_irqrestore(&vpdma->lock, flags); return priv; } EXPORT_SYMBOL(vpdma_hwlist_release); /* set or clear the mask for list complete interrupt */ void vpdma_enable_list_complete_irq(struct vpdma_data *vpdma, int irq_num, int list_num, bool enable) { u32 reg_addr = VPDMA_INT_LIST0_MASK + VPDMA_INTX_OFFSET * irq_num; u32 val; val = read_reg(vpdma, reg_addr); if (enable) val |= (1 << (list_num * 2)); else val &= ~(1 << (list_num * 2)); write_reg(vpdma, reg_addr, val); } EXPORT_SYMBOL(vpdma_enable_list_complete_irq); /* get the LIST_STAT register */ unsigned int vpdma_get_list_stat(struct vpdma_data *vpdma, int irq_num) { u32 reg_addr = VPDMA_INT_LIST0_STAT + VPDMA_INTX_OFFSET * irq_num; return read_reg(vpdma, reg_addr); } EXPORT_SYMBOL(vpdma_get_list_stat); /* get the LIST_MASK register */ unsigned int vpdma_get_list_mask(struct vpdma_data *vpdma, int irq_num) { u32 reg_addr = VPDMA_INT_LIST0_MASK + VPDMA_INTX_OFFSET * irq_num; return read_reg(vpdma, reg_addr); } EXPORT_SYMBOL(vpdma_get_list_mask); /* clear previously occurred list interrupts in the LIST_STAT register */ void vpdma_clear_list_stat(struct vpdma_data *vpdma, int irq_num, int list_num) { u32 reg_addr = VPDMA_INT_LIST0_STAT + VPDMA_INTX_OFFSET * irq_num; write_reg(vpdma, reg_addr, 3 << (list_num * 2)); } EXPORT_SYMBOL(vpdma_clear_list_stat); void vpdma_set_bg_color(struct vpdma_data *vpdma, struct vpdma_data_format *fmt, u32 color) { if (fmt->type == VPDMA_DATA_FMT_TYPE_RGB) write_reg(vpdma, VPDMA_BG_RGB, color); else if (fmt->type == VPDMA_DATA_FMT_TYPE_YUV) write_reg(vpdma, VPDMA_BG_YUV, color); } EXPORT_SYMBOL(vpdma_set_bg_color); /* * configures the output mode of the line buffer for the given client, the * line buffer content can either be mirrored(each line repeated twice) or * passed to the client as is */ void vpdma_set_line_mode(struct vpdma_data *vpdma, int line_mode, enum vpdma_channel chan) { int client_cstat = chan_info[chan].cstat_offset; write_field_reg(vpdma, client_cstat, line_mode, VPDMA_CSTAT_LINE_MODE_MASK, VPDMA_CSTAT_LINE_MODE_SHIFT); } EXPORT_SYMBOL(vpdma_set_line_mode); /* * configures the event which should trigger VPDMA transfer for the given * client */ void vpdma_set_frame_start_event(struct vpdma_data *vpdma, enum vpdma_frame_start_event fs_event, enum vpdma_channel chan) { int client_cstat = chan_info[chan].cstat_offset; write_field_reg(vpdma, client_cstat, fs_event, VPDMA_CSTAT_FRAME_START_MASK, VPDMA_CSTAT_FRAME_START_SHIFT); } EXPORT_SYMBOL(vpdma_set_frame_start_event); static void vpdma_firmware_cb(const struct firmware *f, void *context) { struct vpdma_data *vpdma = context; struct vpdma_buf fw_dma_buf; int i, r; dev_dbg(&vpdma->pdev->dev, "firmware callback\n"); if (!f || !f->data) { dev_err(&vpdma->pdev->dev, "couldn't get firmware\n"); return; } /* already initialized */ if (read_field_reg(vpdma, VPDMA_LIST_ATTR, VPDMA_LIST_RDY_MASK, VPDMA_LIST_RDY_SHFT)) { vpdma->cb(vpdma->pdev); return; } r = vpdma_alloc_desc_buf(&fw_dma_buf, f->size); if (r) { dev_err(&vpdma->pdev->dev, "failed to allocate dma buffer for firmware\n"); goto rel_fw; } memcpy(fw_dma_buf.addr, f->data, f->size); vpdma_map_desc_buf(vpdma, &fw_dma_buf); write_reg(vpdma, VPDMA_LIST_ADDR, (u32) fw_dma_buf.dma_addr); for (i = 0; i < 100; i++) { /* max 1 second */ msleep_interruptible(10); if (read_field_reg(vpdma, VPDMA_LIST_ATTR, VPDMA_LIST_RDY_MASK, VPDMA_LIST_RDY_SHFT)) break; } if (i == 100) { dev_err(&vpdma->pdev->dev, "firmware upload failed\n"); goto free_buf; } vpdma->cb(vpdma->pdev); free_buf: vpdma_unmap_desc_buf(vpdma, &fw_dma_buf); vpdma_free_desc_buf(&fw_dma_buf); rel_fw: release_firmware(f); } static int vpdma_load_firmware(struct vpdma_data *vpdma) { int r; struct device *dev = &vpdma->pdev->dev; r = request_firmware_nowait(THIS_MODULE, 1, (const char *) VPDMA_FIRMWARE, dev, GFP_KERNEL, vpdma, vpdma_firmware_cb); if (r) { dev_err(dev, "firmware not available %s\n", VPDMA_FIRMWARE); return r; } else { dev_info(dev, "loading firmware %s\n", VPDMA_FIRMWARE); } return 0; } int vpdma_create(struct platform_device *pdev, struct vpdma_data *vpdma, void (*cb)(struct platform_device *pdev)) { struct resource *res; int r; dev_dbg(&pdev->dev, "vpdma_create\n"); vpdma->pdev = pdev; vpdma->cb = cb; spin_lock_init(&vpdma->lock); res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "vpdma"); if (res == NULL) { dev_err(&pdev->dev, "missing platform resources data\n"); return -ENODEV; } vpdma->base = devm_ioremap(&pdev->dev, res->start, resource_size(res)); if (!vpdma->base) { dev_err(&pdev->dev, "failed to ioremap\n"); return -ENOMEM; } r = vpdma_load_firmware(vpdma); if (r) { pr_err("failed to load firmware %s\n", VPDMA_FIRMWARE); return r; } return 0; } EXPORT_SYMBOL(vpdma_create); MODULE_AUTHOR("Texas Instruments Inc."); MODULE_FIRMWARE(VPDMA_FIRMWARE); MODULE_LICENSE("GPL v2");
linux-master
drivers/media/platform/ti/vpe/vpdma.c
// SPDX-License-Identifier: GPL-2.0-only /* * Color space converter library * * Copyright (c) 2013 Texas Instruments Inc. * * David Griego, <[email protected]> * Dale Farnsworth, <[email protected]> * Archit Taneja, <[email protected]> */ #include <linux/err.h> #include <linux/io.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/slab.h> #include <linux/videodev2.h> #include <media/v4l2-common.h> #include "csc.h" /* * 12 coefficients in the order: * a0, b0, c0, a1, b1, c1, a2, b2, c2, d0, d1, d2 */ struct quantization { u16 coeff[12]; }; struct colorspace { struct quantization limited; struct quantization full; }; struct encoding_direction { struct colorspace r601; struct colorspace r709; }; struct csc_coeffs { struct encoding_direction y2r; struct encoding_direction r2y; }; /* default colorspace coefficients */ static struct csc_coeffs csc_coeffs = { .y2r = { .r601 = { .limited = { { /* SDTV */ 0x0400, 0x0000, 0x057D, 0x0400, 0x1EA7, 0x1D35, 0x0400, 0x06EF, 0x1FFE, 0x0D40, 0x0210, 0x0C88, } }, .full = { { /* SDTV */ 0x04A8, 0x1FFE, 0x0662, 0x04A8, 0x1E6F, 0x1CBF, 0x04A8, 0x0812, 0x1FFF, 0x0C84, 0x0220, 0x0BAC, } }, }, .r709 = { .limited = { { /* HDTV */ 0x0400, 0x0000, 0x0629, 0x0400, 0x1F45, 0x1E2B, 0x0400, 0x0742, 0x0000, 0x0CEC, 0x0148, 0x0C60, } }, .full = { { /* HDTV */ 0x04A8, 0x0000, 0x072C, 0x04A8, 0x1F26, 0x1DDE, 0x04A8, 0x0873, 0x0000, 0x0C20, 0x0134, 0x0B7C, } }, }, }, .r2y = { .r601 = { .limited = { { /* SDTV */ 0x0132, 0x0259, 0x0075, 0x1F50, 0x1EA5, 0x020B, 0x020B, 0x1E4A, 0x1FAB, 0x0000, 0x0200, 0x0200, } }, .full = { { /* SDTV */ 0x0107, 0x0204, 0x0064, 0x1F68, 0x1ED6, 0x01C2, 0x01C2, 0x1E87, 0x1FB7, 0x0040, 0x0200, 0x0200, } }, }, .r709 = { .limited = { { /* HDTV */ 0x00DA, 0x02DC, 0x004A, 0x1F88, 0x1E6C, 0x020C, 0x020C, 0x1E24, 0x1FD0, 0x0000, 0x0200, 0x0200, } }, .full = { { /* HDTV */ 0x00bb, 0x0275, 0x003f, 0x1f99, 0x1ea5, 0x01c2, 0x01c2, 0x1e67, 0x1fd7, 0x0040, 0x0200, 0x0200, } }, }, }, }; void csc_dump_regs(struct csc_data *csc) { struct device *dev = &csc->pdev->dev; #define DUMPREG(r) dev_dbg(dev, "%-35s %08x\n", #r, \ ioread32(csc->base + CSC_##r)) dev_dbg(dev, "CSC Registers @ %pa:\n", &csc->res->start); DUMPREG(CSC00); DUMPREG(CSC01); DUMPREG(CSC02); DUMPREG(CSC03); DUMPREG(CSC04); DUMPREG(CSC05); #undef DUMPREG } EXPORT_SYMBOL(csc_dump_regs); void csc_set_coeff_bypass(struct csc_data *csc, u32 *csc_reg5) { *csc_reg5 |= CSC_BYPASS; } EXPORT_SYMBOL(csc_set_coeff_bypass); /* * set the color space converter coefficient shadow register values */ void csc_set_coeff(struct csc_data *csc, u32 *csc_reg0, struct v4l2_format *src_fmt, struct v4l2_format *dst_fmt) { u32 *csc_reg5 = csc_reg0 + 5; u32 *shadow_csc = csc_reg0; u16 *coeff, *end_coeff; const struct v4l2_pix_format *pix; const struct v4l2_pix_format_mplane *mp; const struct v4l2_format_info *src_finfo, *dst_finfo; enum v4l2_ycbcr_encoding src_ycbcr_enc, dst_ycbcr_enc; enum v4l2_quantization src_quantization, dst_quantization; u32 src_pixelformat, dst_pixelformat; if (V4L2_TYPE_IS_MULTIPLANAR(src_fmt->type)) { mp = &src_fmt->fmt.pix_mp; src_pixelformat = mp->pixelformat; src_ycbcr_enc = mp->ycbcr_enc; src_quantization = mp->quantization; } else { pix = &src_fmt->fmt.pix; src_pixelformat = pix->pixelformat; src_ycbcr_enc = pix->ycbcr_enc; src_quantization = pix->quantization; } if (V4L2_TYPE_IS_MULTIPLANAR(dst_fmt->type)) { mp = &dst_fmt->fmt.pix_mp; dst_pixelformat = mp->pixelformat; dst_ycbcr_enc = mp->ycbcr_enc; dst_quantization = mp->quantization; } else { pix = &dst_fmt->fmt.pix; dst_pixelformat = pix->pixelformat; dst_ycbcr_enc = pix->ycbcr_enc; dst_quantization = pix->quantization; } src_finfo = v4l2_format_info(src_pixelformat); dst_finfo = v4l2_format_info(dst_pixelformat); if (v4l2_is_format_yuv(src_finfo) && v4l2_is_format_rgb(dst_finfo)) { /* Y2R */ /* * These are not the standard default values but are * set this way for historical compatibility */ if (src_ycbcr_enc == V4L2_YCBCR_ENC_DEFAULT) src_ycbcr_enc = V4L2_YCBCR_ENC_601; if (src_quantization == V4L2_QUANTIZATION_DEFAULT) src_quantization = V4L2_QUANTIZATION_FULL_RANGE; if (src_ycbcr_enc == V4L2_YCBCR_ENC_601) { if (src_quantization == V4L2_QUANTIZATION_FULL_RANGE) coeff = csc_coeffs.y2r.r601.full.coeff; else coeff = csc_coeffs.y2r.r601.limited.coeff; } else if (src_ycbcr_enc == V4L2_YCBCR_ENC_709) { if (src_quantization == V4L2_QUANTIZATION_FULL_RANGE) coeff = csc_coeffs.y2r.r709.full.coeff; else coeff = csc_coeffs.y2r.r709.limited.coeff; } else { /* Should never reach this, but it keeps gcc happy */ coeff = csc_coeffs.y2r.r601.full.coeff; } } else if (v4l2_is_format_rgb(src_finfo) && v4l2_is_format_yuv(dst_finfo)) { /* R2Y */ /* * These are not the standard default values but are * set this way for historical compatibility */ if (dst_ycbcr_enc == V4L2_YCBCR_ENC_DEFAULT) dst_ycbcr_enc = V4L2_YCBCR_ENC_601; if (dst_quantization == V4L2_QUANTIZATION_DEFAULT) dst_quantization = V4L2_QUANTIZATION_FULL_RANGE; if (dst_ycbcr_enc == V4L2_YCBCR_ENC_601) { if (dst_quantization == V4L2_QUANTIZATION_FULL_RANGE) coeff = csc_coeffs.r2y.r601.full.coeff; else coeff = csc_coeffs.r2y.r601.limited.coeff; } else if (dst_ycbcr_enc == V4L2_YCBCR_ENC_709) { if (dst_quantization == V4L2_QUANTIZATION_FULL_RANGE) coeff = csc_coeffs.r2y.r709.full.coeff; else coeff = csc_coeffs.r2y.r709.limited.coeff; } else { /* Should never reach this, but it keeps gcc happy */ coeff = csc_coeffs.r2y.r601.full.coeff; } } else { *csc_reg5 |= CSC_BYPASS; return; } end_coeff = coeff + 12; for (; coeff < end_coeff; coeff += 2) *shadow_csc++ = (*(coeff + 1) << 16) | *coeff; } EXPORT_SYMBOL(csc_set_coeff); struct csc_data *csc_create(struct platform_device *pdev, const char *res_name) { struct csc_data *csc; dev_dbg(&pdev->dev, "csc_create\n"); csc = devm_kzalloc(&pdev->dev, sizeof(*csc), GFP_KERNEL); if (!csc) { dev_err(&pdev->dev, "couldn't alloc csc_data\n"); return ERR_PTR(-ENOMEM); } csc->pdev = pdev; csc->res = platform_get_resource_byname(pdev, IORESOURCE_MEM, res_name); if (csc->res == NULL) { dev_err(&pdev->dev, "missing '%s' platform resources data\n", res_name); return ERR_PTR(-ENODEV); } csc->base = devm_ioremap_resource(&pdev->dev, csc->res); if (IS_ERR(csc->base)) return ERR_CAST(csc->base); return csc; } EXPORT_SYMBOL(csc_create); MODULE_DESCRIPTION("TI VIP/VPE Color Space Converter"); MODULE_AUTHOR("Texas Instruments Inc."); MODULE_LICENSE("GPL v2");
linux-master
drivers/media/platform/ti/vpe/csc.c
/* * omap_vout_vrfb.c * * Copyright (C) 2010 Texas Instruments. * * This file is licensed under the terms of the GNU General Public License * version 2. This program is licensed "as is" without any warranty of any * kind, whether express or implied. * */ #include <linux/sched.h> #include <linux/platform_device.h> #include <linux/videodev2.h> #include <linux/slab.h> #include <media/v4l2-device.h> #include <video/omapvrfb.h> #include "omap_voutdef.h" #include "omap_voutlib.h" #include "omap_vout_vrfb.h" #define OMAP_DMA_NO_DEVICE 0 /* * Function for allocating video buffers */ static int omap_vout_allocate_vrfb_buffers(struct omap_vout_device *vout, unsigned int *count, int startindex) { int i, j; for (i = 0; i < *count; i++) { if (!vout->smsshado_virt_addr[i]) { vout->smsshado_virt_addr[i] = omap_vout_alloc_buffer(vout->smsshado_size, &vout->smsshado_phy_addr[i]); } if (!vout->smsshado_virt_addr[i] && startindex != -1) { if (vout->vq.memory == V4L2_MEMORY_MMAP && i >= startindex) break; } if (!vout->smsshado_virt_addr[i]) { for (j = 0; j < i; j++) { omap_vout_free_buffer( vout->smsshado_virt_addr[j], vout->smsshado_size); vout->smsshado_virt_addr[j] = 0; vout->smsshado_phy_addr[j] = 0; } *count = 0; return -ENOMEM; } memset((void *)(long)vout->smsshado_virt_addr[i], 0, vout->smsshado_size); } return 0; } /* * Wakes up the application once the DMA transfer to VRFB space is completed. */ static void omap_vout_vrfb_dma_tx_callback(void *data) { struct vid_vrfb_dma *t = (struct vid_vrfb_dma *) data; t->tx_status = 1; wake_up_interruptible(&t->wait); } /* * Free VRFB buffers */ void omap_vout_free_vrfb_buffers(struct omap_vout_device *vout) { int j; for (j = 0; j < VRFB_NUM_BUFS; j++) { if (vout->smsshado_virt_addr[j]) { omap_vout_free_buffer(vout->smsshado_virt_addr[j], vout->smsshado_size); vout->smsshado_virt_addr[j] = 0; vout->smsshado_phy_addr[j] = 0; } } } int omap_vout_setup_vrfb_bufs(struct platform_device *pdev, int vid_num, bool static_vrfb_allocation) { int ret = 0, i, j; struct omap_vout_device *vout; struct video_device *vfd; dma_cap_mask_t mask; int image_width, image_height; int vrfb_num_bufs = VRFB_NUM_BUFS; struct v4l2_device *v4l2_dev = platform_get_drvdata(pdev); struct omap2video_device *vid_dev = container_of(v4l2_dev, struct omap2video_device, v4l2_dev); vout = vid_dev->vouts[vid_num]; vfd = vout->vfd; for (i = 0; i < VRFB_NUM_BUFS; i++) { if (omap_vrfb_request_ctx(&vout->vrfb_context[i])) { dev_info(&pdev->dev, ": VRFB allocation failed\n"); for (j = 0; j < i; j++) omap_vrfb_release_ctx(&vout->vrfb_context[j]); return -ENOMEM; } } /* Calculate VRFB memory size */ /* allocate for worst case size */ image_width = VID_MAX_WIDTH / TILE_SIZE; if (VID_MAX_WIDTH % TILE_SIZE) image_width++; image_width = image_width * TILE_SIZE; image_height = VID_MAX_HEIGHT / TILE_SIZE; if (VID_MAX_HEIGHT % TILE_SIZE) image_height++; image_height = image_height * TILE_SIZE; vout->smsshado_size = PAGE_ALIGN(image_width * image_height * 2 * 2); /* * Request and Initialize DMA, for DMA based VRFB transfer */ dma_cap_zero(mask); dma_cap_set(DMA_INTERLEAVE, mask); vout->vrfb_dma_tx.chan = dma_request_chan_by_mask(&mask); if (IS_ERR(vout->vrfb_dma_tx.chan)) { vout->vrfb_dma_tx.req_status = DMA_CHAN_NOT_ALLOTED; } else { size_t xt_size = sizeof(struct dma_interleaved_template) + sizeof(struct data_chunk); vout->vrfb_dma_tx.xt = kzalloc(xt_size, GFP_KERNEL); if (!vout->vrfb_dma_tx.xt) { dma_release_channel(vout->vrfb_dma_tx.chan); vout->vrfb_dma_tx.req_status = DMA_CHAN_NOT_ALLOTED; } } if (vout->vrfb_dma_tx.req_status == DMA_CHAN_NOT_ALLOTED) dev_info(&pdev->dev, ": failed to allocate DMA Channel for video%d\n", vfd->minor); init_waitqueue_head(&vout->vrfb_dma_tx.wait); /* * statically allocated the VRFB buffer is done through * command line arguments */ if (static_vrfb_allocation) { if (omap_vout_allocate_vrfb_buffers(vout, &vrfb_num_bufs, -1)) { ret = -ENOMEM; goto release_vrfb_ctx; } vout->vrfb_static_allocation = true; } return 0; release_vrfb_ctx: for (j = 0; j < VRFB_NUM_BUFS; j++) omap_vrfb_release_ctx(&vout->vrfb_context[j]); return ret; } /* * Release the VRFB context once the module exits */ void omap_vout_release_vrfb(struct omap_vout_device *vout) { int i; for (i = 0; i < VRFB_NUM_BUFS; i++) omap_vrfb_release_ctx(&vout->vrfb_context[i]); if (vout->vrfb_dma_tx.req_status == DMA_CHAN_ALLOTED) { vout->vrfb_dma_tx.req_status = DMA_CHAN_NOT_ALLOTED; kfree(vout->vrfb_dma_tx.xt); dmaengine_terminate_sync(vout->vrfb_dma_tx.chan); dma_release_channel(vout->vrfb_dma_tx.chan); } } /* * Allocate the buffers for the VRFB space. Data is copied from V4L2 * buffers to the VRFB buffers using the DMA engine. */ int omap_vout_vrfb_buffer_setup(struct omap_vout_device *vout, unsigned int *count, unsigned int startindex) { int i; bool yuv_mode; if (!is_rotation_enabled(vout)) return 0; /* If rotation is enabled, allocate memory for VRFB space also */ *count = *count > VRFB_NUM_BUFS ? VRFB_NUM_BUFS : *count; /* Allocate the VRFB buffers only if the buffers are not * allocated during init time. */ if (!vout->vrfb_static_allocation) if (omap_vout_allocate_vrfb_buffers(vout, count, startindex)) return -ENOMEM; if (vout->dss_mode == OMAP_DSS_COLOR_YUV2 || vout->dss_mode == OMAP_DSS_COLOR_UYVY) yuv_mode = true; else yuv_mode = false; for (i = 0; i < *count; i++) omap_vrfb_setup(&vout->vrfb_context[i], vout->smsshado_phy_addr[i], vout->pix.width, vout->pix.height, vout->bpp, yuv_mode); return 0; } int omap_vout_prepare_vrfb(struct omap_vout_device *vout, struct vb2_buffer *vb) { struct dma_async_tx_descriptor *tx; enum dma_ctrl_flags flags = DMA_PREP_INTERRUPT | DMA_CTRL_ACK; struct dma_chan *chan = vout->vrfb_dma_tx.chan; struct dma_interleaved_template *xt = vout->vrfb_dma_tx.xt; dma_cookie_t cookie; dma_addr_t buf_phy_addr = vb2_dma_contig_plane_dma_addr(vb, 0); enum dma_status status; enum dss_rotation rotation; size_t dst_icg; u32 pixsize; if (!is_rotation_enabled(vout)) return 0; /* If rotation is enabled, copy input buffer into VRFB * memory space using DMA. We are copying input buffer * into VRFB memory space of desired angle and DSS will * read image VRFB memory for 0 degree angle */ pixsize = vout->bpp * vout->vrfb_bpp; dst_icg = MAX_PIXELS_PER_LINE * pixsize - vout->pix.width * vout->bpp; xt->src_start = buf_phy_addr; xt->dst_start = vout->vrfb_context[vb->index].paddr[0]; xt->numf = vout->pix.height; xt->frame_size = 1; xt->sgl[0].size = vout->pix.width * vout->bpp; xt->sgl[0].icg = dst_icg; xt->dir = DMA_MEM_TO_MEM; xt->src_sgl = false; xt->src_inc = true; xt->dst_sgl = true; xt->dst_inc = true; tx = dmaengine_prep_interleaved_dma(chan, xt, flags); if (tx == NULL) { pr_err("%s: DMA interleaved prep error\n", __func__); return -EINVAL; } tx->callback = omap_vout_vrfb_dma_tx_callback; tx->callback_param = &vout->vrfb_dma_tx; cookie = dmaengine_submit(tx); if (dma_submit_error(cookie)) { pr_err("%s: dmaengine_submit failed (%d)\n", __func__, cookie); return -EINVAL; } vout->vrfb_dma_tx.tx_status = 0; dma_async_issue_pending(chan); wait_event_interruptible_timeout(vout->vrfb_dma_tx.wait, vout->vrfb_dma_tx.tx_status == 1, VRFB_TX_TIMEOUT); status = dma_async_is_tx_complete(chan, cookie, NULL, NULL); if (vout->vrfb_dma_tx.tx_status == 0) { pr_err("%s: Timeout while waiting for DMA\n", __func__); dmaengine_terminate_sync(chan); return -EINVAL; } else if (status != DMA_COMPLETE) { pr_err("%s: DMA completion %s status\n", __func__, status == DMA_ERROR ? "error" : "busy"); dmaengine_terminate_sync(chan); return -EINVAL; } /* Store buffers physical address into an array. Addresses * from this array will be used to configure DSS */ rotation = calc_rotation(vout); vout->queued_buf_addr[vb->index] = vout->vrfb_context[vb->index].paddr[rotation]; return 0; } /* * Calculate the buffer offsets from which the streaming should * start. This offset calculation is mainly required because of * the VRFB 32 pixels alignment with rotation. */ void omap_vout_calculate_vrfb_offset(struct omap_vout_device *vout) { enum dss_rotation rotation; bool mirroring = vout->mirror; struct v4l2_rect *crop = &vout->crop; struct v4l2_pix_format *pix = &vout->pix; int *cropped_offset = &vout->cropped_offset; int vr_ps = 1, ps = 2, temp_ps = 2; int offset = 0, ctop = 0, cleft = 0, line_length = 0; rotation = calc_rotation(vout); if (V4L2_PIX_FMT_YUYV == pix->pixelformat || V4L2_PIX_FMT_UYVY == pix->pixelformat) { if (is_rotation_enabled(vout)) { /* * ps - Actual pixel size for YUYV/UYVY for * VRFB/Mirroring is 4 bytes * vr_ps - Virtually pixel size for YUYV/UYVY is * 2 bytes */ ps = 4; vr_ps = 2; } else { ps = 2; /* otherwise the pixel size is 2 byte */ } } else if (V4L2_PIX_FMT_RGB32 == pix->pixelformat) { ps = 4; } else if (V4L2_PIX_FMT_RGB24 == pix->pixelformat) { ps = 3; } vout->ps = ps; vout->vr_ps = vr_ps; if (is_rotation_enabled(vout)) { line_length = MAX_PIXELS_PER_LINE; ctop = (pix->height - crop->height) - crop->top; cleft = (pix->width - crop->width) - crop->left; } else { line_length = pix->width; } vout->line_length = line_length; switch (rotation) { case dss_rotation_90_degree: offset = vout->vrfb_context[0].yoffset * vout->vrfb_context[0].bytespp; temp_ps = ps / vr_ps; if (!mirroring) { *cropped_offset = offset + line_length * temp_ps * cleft + crop->top * temp_ps; } else { *cropped_offset = offset + line_length * temp_ps * cleft + crop->top * temp_ps + (line_length * ((crop->width / (vr_ps)) - 1) * ps); } break; case dss_rotation_180_degree: offset = ((MAX_PIXELS_PER_LINE * vout->vrfb_context[0].yoffset * vout->vrfb_context[0].bytespp) + (vout->vrfb_context[0].xoffset * vout->vrfb_context[0].bytespp)); if (!mirroring) { *cropped_offset = offset + (line_length * ps * ctop) + (cleft / vr_ps) * ps; } else { *cropped_offset = offset + (line_length * ps * ctop) + (cleft / vr_ps) * ps + (line_length * (crop->height - 1) * ps); } break; case dss_rotation_270_degree: offset = MAX_PIXELS_PER_LINE * vout->vrfb_context[0].xoffset * vout->vrfb_context[0].bytespp; temp_ps = ps / vr_ps; if (!mirroring) { *cropped_offset = offset + line_length * temp_ps * crop->left + ctop * ps; } else { *cropped_offset = offset + line_length * temp_ps * crop->left + ctop * ps + (line_length * ((crop->width / vr_ps) - 1) * ps); } break; case dss_rotation_0_degree: if (!mirroring) { *cropped_offset = (line_length * ps) * crop->top + (crop->left / vr_ps) * ps; } else { *cropped_offset = (line_length * ps) * crop->top + (crop->left / vr_ps) * ps + (line_length * (crop->height - 1) * ps); } break; default: *cropped_offset = (line_length * ps * crop->top) / vr_ps + (crop->left * ps) / vr_ps + ((crop->width / vr_ps) - 1) * ps; break; } }
linux-master
drivers/media/platform/ti/omap/omap_vout_vrfb.c
/* * omap_voutlib.c * * Copyright (C) 2005-2010 Texas Instruments. * * This file is licensed under the terms of the GNU General Public License * version 2. This program is licensed "as is" without any warranty of any * kind, whether express or implied. * * Based on the OMAP2 camera driver * Video-for-Linux (Version 2) camera capture driver for * the OMAP24xx camera controller. * * Author: Andy Lowe ([email protected]) * * Copyright (C) 2004 MontaVista Software, Inc. * Copyright (C) 2010 Texas Instruments. * */ #include <linux/module.h> #include <linux/errno.h> #include <linux/kernel.h> #include <linux/types.h> #include <linux/videodev2.h> #include <linux/dma-mapping.h> #include <video/omapfb_dss.h> #include "omap_voutlib.h" MODULE_AUTHOR("Texas Instruments"); MODULE_DESCRIPTION("OMAP Video library"); MODULE_LICENSE("GPL"); /* Return the default overlay cropping rectangle in crop given the image * size in pix and the video display size in fbuf. The default * cropping rectangle is the largest rectangle no larger than the capture size * that will fit on the display. The default cropping rectangle is centered in * the image. All dimensions and offsets are rounded down to even numbers. */ void omap_vout_default_crop(struct v4l2_pix_format *pix, struct v4l2_framebuffer *fbuf, struct v4l2_rect *crop) { crop->width = (pix->width < fbuf->fmt.width) ? pix->width : fbuf->fmt.width; crop->height = (pix->height < fbuf->fmt.height) ? pix->height : fbuf->fmt.height; crop->width &= ~1; crop->height &= ~1; crop->left = ((pix->width - crop->width) >> 1) & ~1; crop->top = ((pix->height - crop->height) >> 1) & ~1; } EXPORT_SYMBOL_GPL(omap_vout_default_crop); /* Given a new render window in new_win, adjust the window to the * nearest supported configuration. The adjusted window parameters are * returned in new_win. * Returns zero if successful, or -EINVAL if the requested window is * impossible and cannot reasonably be adjusted. */ int omap_vout_try_window(struct v4l2_framebuffer *fbuf, struct v4l2_window *new_win) { struct v4l2_rect try_win; /* make a working copy of the new_win rectangle */ try_win = new_win->w; /* adjust the preview window so it fits on the display by clipping any * offscreen areas */ if (try_win.left < 0) { try_win.width += try_win.left; try_win.left = 0; } if (try_win.top < 0) { try_win.height += try_win.top; try_win.top = 0; } try_win.width = (try_win.width < fbuf->fmt.width) ? try_win.width : fbuf->fmt.width; try_win.height = (try_win.height < fbuf->fmt.height) ? try_win.height : fbuf->fmt.height; if (try_win.left + try_win.width > fbuf->fmt.width) try_win.width = fbuf->fmt.width - try_win.left; if (try_win.top + try_win.height > fbuf->fmt.height) try_win.height = fbuf->fmt.height - try_win.top; try_win.width &= ~1; try_win.height &= ~1; if (try_win.width <= 0 || try_win.height <= 0) return -EINVAL; /* We now have a valid preview window, so go with it */ new_win->w = try_win; new_win->field = V4L2_FIELD_NONE; new_win->clips = NULL; new_win->clipcount = 0; new_win->bitmap = NULL; return 0; } EXPORT_SYMBOL_GPL(omap_vout_try_window); /* Given a new render window in new_win, adjust the window to the * nearest supported configuration. The image cropping window in crop * will also be adjusted if necessary. Preference is given to keeping the * window as close to the requested configuration as possible. If * successful, new_win, vout->win, and crop are updated. * Returns zero if successful, or -EINVAL if the requested preview window is * impossible and cannot reasonably be adjusted. */ int omap_vout_new_window(struct v4l2_rect *crop, struct v4l2_window *win, struct v4l2_framebuffer *fbuf, struct v4l2_window *new_win) { int err; err = omap_vout_try_window(fbuf, new_win); if (err) return err; /* update our preview window */ win->w = new_win->w; win->field = new_win->field; win->chromakey = new_win->chromakey; /* Adjust the cropping window to allow for resizing limitation */ if (omap_vout_dss_omap24xx()) { /* For 24xx limit is 8x to 1/2x scaling. */ if ((crop->height/win->w.height) >= 2) crop->height = win->w.height * 2; if ((crop->width/win->w.width) >= 2) crop->width = win->w.width * 2; if (crop->width > 768) { /* The OMAP2420 vertical resizing line buffer is 768 * pixels wide. If the cropped image is wider than * 768 pixels then it cannot be vertically resized. */ if (crop->height != win->w.height) crop->width = 768; } } else if (omap_vout_dss_omap34xx()) { /* For 34xx limit is 8x to 1/4x scaling. */ if ((crop->height/win->w.height) >= 4) crop->height = win->w.height * 4; if ((crop->width/win->w.width) >= 4) crop->width = win->w.width * 4; } return 0; } EXPORT_SYMBOL_GPL(omap_vout_new_window); /* Given a new cropping rectangle in new_crop, adjust the cropping rectangle to * the nearest supported configuration. The image render window in win will * also be adjusted if necessary. The preview window is adjusted such that the * horizontal and vertical rescaling ratios stay constant. If the render * window would fall outside the display boundaries, the cropping rectangle * will also be adjusted to maintain the rescaling ratios. If successful, crop * and win are updated. * Returns zero if successful, or -EINVAL if the requested cropping rectangle is * impossible and cannot reasonably be adjusted. */ int omap_vout_new_crop(struct v4l2_pix_format *pix, struct v4l2_rect *crop, struct v4l2_window *win, struct v4l2_framebuffer *fbuf, const struct v4l2_rect *new_crop) { struct v4l2_rect try_crop; unsigned long vresize, hresize; /* make a working copy of the new_crop rectangle */ try_crop = *new_crop; /* adjust the cropping rectangle so it fits in the image */ if (try_crop.left < 0) { try_crop.width += try_crop.left; try_crop.left = 0; } if (try_crop.top < 0) { try_crop.height += try_crop.top; try_crop.top = 0; } try_crop.width = (try_crop.width < pix->width) ? try_crop.width : pix->width; try_crop.height = (try_crop.height < pix->height) ? try_crop.height : pix->height; if (try_crop.left + try_crop.width > pix->width) try_crop.width = pix->width - try_crop.left; if (try_crop.top + try_crop.height > pix->height) try_crop.height = pix->height - try_crop.top; try_crop.width &= ~1; try_crop.height &= ~1; if (try_crop.width <= 0 || try_crop.height <= 0) return -EINVAL; if (omap_vout_dss_omap24xx()) { if (try_crop.height != win->w.height) { /* If we're resizing vertically, we can't support a * crop width wider than 768 pixels. */ if (try_crop.width > 768) try_crop.width = 768; } } /* vertical resizing */ vresize = (1024 * try_crop.height) / win->w.height; if (omap_vout_dss_omap24xx() && (vresize > 2048)) vresize = 2048; else if (omap_vout_dss_omap34xx() && (vresize > 4096)) vresize = 4096; win->w.height = ((1024 * try_crop.height) / vresize) & ~1; if (win->w.height == 0) win->w.height = 2; if (win->w.height + win->w.top > fbuf->fmt.height) { /* We made the preview window extend below the bottom of the * display, so clip it to the display boundary and resize the * cropping height to maintain the vertical resizing ratio. */ win->w.height = (fbuf->fmt.height - win->w.top) & ~1; if (try_crop.height == 0) try_crop.height = 2; } /* horizontal resizing */ hresize = (1024 * try_crop.width) / win->w.width; if (omap_vout_dss_omap24xx() && (hresize > 2048)) hresize = 2048; else if (omap_vout_dss_omap34xx() && (hresize > 4096)) hresize = 4096; win->w.width = ((1024 * try_crop.width) / hresize) & ~1; if (win->w.width == 0) win->w.width = 2; if (win->w.width + win->w.left > fbuf->fmt.width) { /* We made the preview window extend past the right side of the * display, so clip it to the display boundary and resize the * cropping width to maintain the horizontal resizing ratio. */ win->w.width = (fbuf->fmt.width - win->w.left) & ~1; if (try_crop.width == 0) try_crop.width = 2; } if (omap_vout_dss_omap24xx()) { if ((try_crop.height/win->w.height) >= 2) try_crop.height = win->w.height * 2; if ((try_crop.width/win->w.width) >= 2) try_crop.width = win->w.width * 2; if (try_crop.width > 768) { /* The OMAP2420 vertical resizing line buffer is * 768 pixels wide. If the cropped image is wider * than 768 pixels then it cannot be vertically resized. */ if (try_crop.height != win->w.height) try_crop.width = 768; } } else if (omap_vout_dss_omap34xx()) { if ((try_crop.height/win->w.height) >= 4) try_crop.height = win->w.height * 4; if ((try_crop.width/win->w.width) >= 4) try_crop.width = win->w.width * 4; } /* update our cropping rectangle and we're done */ *crop = try_crop; return 0; } EXPORT_SYMBOL_GPL(omap_vout_new_crop); /* Given a new format in pix and fbuf, crop and win * structures are initialized to default values. crop * is initialized to the largest window size that will fit on the display. The * crop window is centered in the image. win is initialized to * the same size as crop and is centered on the display. * All sizes and offsets are constrained to be even numbers. */ void omap_vout_new_format(struct v4l2_pix_format *pix, struct v4l2_framebuffer *fbuf, struct v4l2_rect *crop, struct v4l2_window *win) { /* crop defines the preview source window in the image capture * buffer */ omap_vout_default_crop(pix, fbuf, crop); /* win defines the preview target window on the display */ win->w.width = crop->width; win->w.height = crop->height; win->w.left = ((fbuf->fmt.width - win->w.width) >> 1) & ~1; win->w.top = ((fbuf->fmt.height - win->w.height) >> 1) & ~1; } EXPORT_SYMBOL_GPL(omap_vout_new_format); /* * Allocate buffers */ unsigned long omap_vout_alloc_buffer(u32 buf_size, u32 *phys_addr) { u32 order, size; unsigned long virt_addr, addr; size = PAGE_ALIGN(buf_size); order = get_order(size); virt_addr = __get_free_pages(GFP_KERNEL, order); addr = virt_addr; if (virt_addr) { while (size > 0) { SetPageReserved(virt_to_page((void *)addr)); addr += PAGE_SIZE; size -= PAGE_SIZE; } } *phys_addr = (u32) virt_to_phys((void *) virt_addr); return virt_addr; } /* * Free buffers */ void omap_vout_free_buffer(unsigned long virtaddr, u32 buf_size) { u32 order, size; unsigned long addr = virtaddr; size = PAGE_ALIGN(buf_size); order = get_order(size); while (size > 0) { ClearPageReserved(virt_to_page((void *)addr)); addr += PAGE_SIZE; size -= PAGE_SIZE; } free_pages((unsigned long) virtaddr, order); } bool omap_vout_dss_omap24xx(void) { return omapdss_get_version() == OMAPDSS_VER_OMAP24xx; } bool omap_vout_dss_omap34xx(void) { switch (omapdss_get_version()) { case OMAPDSS_VER_OMAP34xx_ES1: case OMAPDSS_VER_OMAP34xx_ES3: case OMAPDSS_VER_OMAP3630: case OMAPDSS_VER_AM35xx: return true; default: return false; } }
linux-master
drivers/media/platform/ti/omap/omap_voutlib.c
/* * omap_vout.c * * Copyright (C) 2005-2010 Texas Instruments. * * This file is licensed under the terms of the GNU General Public License * version 2. This program is licensed "as is" without any warranty of any * kind, whether express or implied. * * Leveraged code from the OMAP2 camera driver * Video-for-Linux (Version 2) camera capture driver for * the OMAP24xx camera controller. * * Author: Andy Lowe ([email protected]) * * Copyright (C) 2004 MontaVista Software, Inc. * Copyright (C) 2010 Texas Instruments. * * History: * 20-APR-2006 Khasim Modified VRFB based Rotation, * The image data is always read from 0 degree * view and written * to the virtual space of desired rotation angle * 4-DEC-2006 Jian Changed to support better memory management * * 17-Nov-2008 Hardik Changed driver to use video_ioctl2 * * 23-Feb-2010 Vaibhav H Modified to use new DSS2 interface * */ #include <linux/init.h> #include <linux/module.h> #include <linux/vmalloc.h> #include <linux/sched.h> #include <linux/types.h> #include <linux/platform_device.h> #include <linux/irq.h> #include <linux/videodev2.h> #include <linux/dma-mapping.h> #include <linux/slab.h> #include <media/v4l2-device.h> #include <media/v4l2-ioctl.h> #include <media/v4l2-event.h> #include <video/omapvrfb.h> #include <video/omapfb_dss.h> #include "omap_voutlib.h" #include "omap_voutdef.h" #include "omap_vout_vrfb.h" MODULE_AUTHOR("Texas Instruments"); MODULE_DESCRIPTION("OMAP Video for Linux Video out driver"); MODULE_LICENSE("GPL"); /* Driver Configuration macros */ #define VOUT_NAME "omap_vout" enum omap_vout_channels { OMAP_VIDEO1, OMAP_VIDEO2, }; /* Variables configurable through module params*/ static bool vid1_static_vrfb_alloc; static bool vid2_static_vrfb_alloc; static bool debug; /* Module parameters */ module_param(vid1_static_vrfb_alloc, bool, S_IRUGO); MODULE_PARM_DESC(vid1_static_vrfb_alloc, "Static allocation of the VRFB buffer for video1 device"); module_param(vid2_static_vrfb_alloc, bool, S_IRUGO); MODULE_PARM_DESC(vid2_static_vrfb_alloc, "Static allocation of the VRFB buffer for video2 device"); module_param(debug, bool, S_IRUGO); MODULE_PARM_DESC(debug, "Debug level (0-1)"); /* list of image formats supported by OMAP2 video pipelines */ static const struct v4l2_fmtdesc omap_formats[] = { { /* Note: V4L2 defines RGB565 as: * * Byte 0 Byte 1 * g2 g1 g0 r4 r3 r2 r1 r0 b4 b3 b2 b1 b0 g5 g4 g3 * * We interpret RGB565 as: * * Byte 0 Byte 1 * g2 g1 g0 b4 b3 b2 b1 b0 r4 r3 r2 r1 r0 g5 g4 g3 */ .pixelformat = V4L2_PIX_FMT_RGB565, }, { /* Note: V4L2 defines RGB32 as: RGB-8-8-8-8 we use * this for RGB24 unpack mode, the last 8 bits are ignored * */ .pixelformat = V4L2_PIX_FMT_RGB32, }, { /* Note: V4L2 defines RGB24 as: RGB-8-8-8 we use * this for RGB24 packed mode * */ .pixelformat = V4L2_PIX_FMT_RGB24, }, { .pixelformat = V4L2_PIX_FMT_YUYV, }, { .pixelformat = V4L2_PIX_FMT_UYVY, }, }; #define NUM_OUTPUT_FORMATS (ARRAY_SIZE(omap_formats)) /* * Try format */ static int omap_vout_try_format(struct v4l2_pix_format *pix) { int ifmt, bpp = 0; pix->height = clamp(pix->height, (u32)VID_MIN_HEIGHT, (u32)VID_MAX_HEIGHT); pix->width = clamp(pix->width, (u32)VID_MIN_WIDTH, (u32)VID_MAX_WIDTH); for (ifmt = 0; ifmt < NUM_OUTPUT_FORMATS; ifmt++) { if (pix->pixelformat == omap_formats[ifmt].pixelformat) break; } if (ifmt == NUM_OUTPUT_FORMATS) ifmt = 0; pix->pixelformat = omap_formats[ifmt].pixelformat; pix->field = V4L2_FIELD_NONE; switch (pix->pixelformat) { case V4L2_PIX_FMT_YUYV: case V4L2_PIX_FMT_UYVY: default: pix->colorspace = V4L2_COLORSPACE_SRGB; bpp = YUYV_BPP; break; case V4L2_PIX_FMT_RGB565: case V4L2_PIX_FMT_RGB565X: pix->colorspace = V4L2_COLORSPACE_SRGB; bpp = RGB565_BPP; break; case V4L2_PIX_FMT_RGB24: pix->colorspace = V4L2_COLORSPACE_SRGB; bpp = RGB24_BPP; break; case V4L2_PIX_FMT_RGB32: case V4L2_PIX_FMT_BGR32: pix->colorspace = V4L2_COLORSPACE_SRGB; bpp = RGB32_BPP; break; } pix->bytesperline = pix->width * bpp; pix->sizeimage = pix->bytesperline * pix->height; return bpp; } /* * Convert V4L2 rotation to DSS rotation * V4L2 understand 0, 90, 180, 270. * Convert to 0, 1, 2 and 3 respectively for DSS */ static int v4l2_rot_to_dss_rot(int v4l2_rotation, enum dss_rotation *rotation, bool mirror) { int ret = 0; switch (v4l2_rotation) { case 90: *rotation = dss_rotation_90_degree; break; case 180: *rotation = dss_rotation_180_degree; break; case 270: *rotation = dss_rotation_270_degree; break; case 0: *rotation = dss_rotation_0_degree; break; default: ret = -EINVAL; } return ret; } static int omap_vout_calculate_offset(struct omap_vout_device *vout) { struct omapvideo_info *ovid; struct v4l2_rect *crop = &vout->crop; struct v4l2_pix_format *pix = &vout->pix; int *cropped_offset = &vout->cropped_offset; int ps = 2, line_length = 0; ovid = &vout->vid_info; if (ovid->rotation_type == VOUT_ROT_VRFB) { omap_vout_calculate_vrfb_offset(vout); } else { vout->line_length = line_length = pix->width; if (V4L2_PIX_FMT_YUYV == pix->pixelformat || V4L2_PIX_FMT_UYVY == pix->pixelformat) ps = 2; else if (V4L2_PIX_FMT_RGB32 == pix->pixelformat) ps = 4; else if (V4L2_PIX_FMT_RGB24 == pix->pixelformat) ps = 3; vout->ps = ps; *cropped_offset = (line_length * ps) * crop->top + crop->left * ps; } v4l2_dbg(1, debug, &vout->vid_dev->v4l2_dev, "%s Offset:%x\n", __func__, vout->cropped_offset); return 0; } /* * Convert V4L2 pixel format to DSS pixel format */ static int video_mode_to_dss_mode(struct omap_vout_device *vout) { struct omap_overlay *ovl; struct omapvideo_info *ovid; struct v4l2_pix_format *pix = &vout->pix; enum omap_color_mode mode; ovid = &vout->vid_info; ovl = ovid->overlays[0]; switch (pix->pixelformat) { case V4L2_PIX_FMT_YUYV: mode = OMAP_DSS_COLOR_YUV2; break; case V4L2_PIX_FMT_UYVY: mode = OMAP_DSS_COLOR_UYVY; break; case V4L2_PIX_FMT_RGB565: mode = OMAP_DSS_COLOR_RGB16; break; case V4L2_PIX_FMT_RGB24: mode = OMAP_DSS_COLOR_RGB24P; break; case V4L2_PIX_FMT_RGB32: mode = (ovl->id == OMAP_DSS_VIDEO1) ? OMAP_DSS_COLOR_RGB24U : OMAP_DSS_COLOR_ARGB32; break; case V4L2_PIX_FMT_BGR32: mode = OMAP_DSS_COLOR_RGBX32; break; default: mode = -EINVAL; break; } return mode; } /* * Setup the overlay */ static int omapvid_setup_overlay(struct omap_vout_device *vout, struct omap_overlay *ovl, int posx, int posy, int outw, int outh, dma_addr_t addr) { int ret = 0; struct omap_overlay_info info; int cropheight, cropwidth, pixwidth; if ((ovl->caps & OMAP_DSS_OVL_CAP_SCALE) == 0 && (outw != vout->pix.width || outh != vout->pix.height)) { ret = -EINVAL; goto setup_ovl_err; } vout->dss_mode = video_mode_to_dss_mode(vout); if (vout->dss_mode == -EINVAL) { ret = -EINVAL; goto setup_ovl_err; } /* Setup the input plane parameters according to * rotation value selected. */ if (is_rotation_90_or_270(vout)) { cropheight = vout->crop.width; cropwidth = vout->crop.height; pixwidth = vout->pix.height; } else { cropheight = vout->crop.height; cropwidth = vout->crop.width; pixwidth = vout->pix.width; } ovl->get_overlay_info(ovl, &info); info.paddr = addr; info.width = cropwidth; info.height = cropheight; info.color_mode = vout->dss_mode; info.mirror = vout->mirror; info.pos_x = posx; info.pos_y = posy; info.out_width = outw; info.out_height = outh; info.global_alpha = vout->win.global_alpha; if (!is_rotation_enabled(vout)) { info.rotation = 0; info.rotation_type = OMAP_DSS_ROT_DMA; info.screen_width = pixwidth; } else { info.rotation = vout->rotation; info.rotation_type = OMAP_DSS_ROT_VRFB; info.screen_width = 2048; } v4l2_dbg(1, debug, &vout->vid_dev->v4l2_dev, "%s enable=%d addr=%pad width=%d\n height=%d color_mode=%d\n" "rotation=%d mirror=%d posx=%d posy=%d out_width = %d \n" "out_height=%d rotation_type=%d screen_width=%d\n", __func__, ovl->is_enabled(ovl), &info.paddr, info.width, info.height, info.color_mode, info.rotation, info.mirror, info.pos_x, info.pos_y, info.out_width, info.out_height, info.rotation_type, info.screen_width); ret = ovl->set_overlay_info(ovl, &info); if (ret) goto setup_ovl_err; return 0; setup_ovl_err: v4l2_warn(&vout->vid_dev->v4l2_dev, "setup_overlay failed\n"); return ret; } /* * Initialize the overlay structure */ static int omapvid_init(struct omap_vout_device *vout, dma_addr_t addr) { int ret = 0, i; struct v4l2_window *win; struct omap_overlay *ovl; int posx, posy, outw, outh; struct omap_video_timings *timing; struct omapvideo_info *ovid = &vout->vid_info; win = &vout->win; for (i = 0; i < ovid->num_overlays; i++) { struct omap_dss_device *dssdev; ovl = ovid->overlays[i]; dssdev = ovl->get_device(ovl); if (!dssdev) return -EINVAL; timing = &dssdev->panel.timings; outw = win->w.width; outh = win->w.height; switch (vout->rotation) { case dss_rotation_90_degree: /* Invert the height and width for 90 * and 270 degree rotation */ swap(outw, outh); posy = (timing->y_res - win->w.width) - win->w.left; posx = win->w.top; break; case dss_rotation_180_degree: posx = (timing->x_res - win->w.width) - win->w.left; posy = (timing->y_res - win->w.height) - win->w.top; break; case dss_rotation_270_degree: swap(outw, outh); posy = win->w.left; posx = (timing->x_res - win->w.height) - win->w.top; break; default: posx = win->w.left; posy = win->w.top; break; } ret = omapvid_setup_overlay(vout, ovl, posx, posy, outw, outh, addr); if (ret) goto omapvid_init_err; } return 0; omapvid_init_err: v4l2_warn(&vout->vid_dev->v4l2_dev, "apply_changes failed\n"); return ret; } /* * Apply the changes set the go bit of DSS */ static int omapvid_apply_changes(struct omap_vout_device *vout) { int i; struct omap_overlay *ovl; struct omapvideo_info *ovid = &vout->vid_info; for (i = 0; i < ovid->num_overlays; i++) { struct omap_dss_device *dssdev; ovl = ovid->overlays[i]; dssdev = ovl->get_device(ovl); if (!dssdev) return -EINVAL; ovl->manager->apply(ovl->manager); } return 0; } static int omapvid_handle_interlace_display(struct omap_vout_device *vout, unsigned int irqstatus, u64 ts) { u32 fid; if (vout->first_int) { vout->first_int = 0; goto err; } if (irqstatus & DISPC_IRQ_EVSYNC_ODD) fid = 1; else if (irqstatus & DISPC_IRQ_EVSYNC_EVEN) fid = 0; else goto err; vout->field_id ^= 1; if (fid != vout->field_id) { if (fid == 0) vout->field_id = fid; } else if (0 == fid) { if (vout->cur_frm == vout->next_frm) goto err; vout->cur_frm->vbuf.vb2_buf.timestamp = ts; vout->cur_frm->vbuf.sequence = vout->sequence++; vb2_buffer_done(&vout->cur_frm->vbuf.vb2_buf, VB2_BUF_STATE_DONE); vout->cur_frm = vout->next_frm; } else { if (list_empty(&vout->dma_queue) || (vout->cur_frm != vout->next_frm)) goto err; } return vout->field_id; err: return 0; } static void omap_vout_isr(void *arg, unsigned int irqstatus) { int ret, fid, mgr_id; dma_addr_t addr; u32 irq; struct omap_overlay *ovl; u64 ts; struct omapvideo_info *ovid; struct omap_dss_device *cur_display; struct omap_vout_device *vout = (struct omap_vout_device *)arg; ovid = &vout->vid_info; ovl = ovid->overlays[0]; mgr_id = ovl->manager->id; /* get the display device attached to the overlay */ cur_display = ovl->get_device(ovl); if (!cur_display) return; spin_lock(&vout->vbq_lock); ts = ktime_get_ns(); switch (cur_display->type) { case OMAP_DISPLAY_TYPE_DSI: case OMAP_DISPLAY_TYPE_DPI: case OMAP_DISPLAY_TYPE_DVI: if (mgr_id == OMAP_DSS_CHANNEL_LCD) irq = DISPC_IRQ_VSYNC; else if (mgr_id == OMAP_DSS_CHANNEL_LCD2) irq = DISPC_IRQ_VSYNC2; else goto vout_isr_err; if (!(irqstatus & irq)) goto vout_isr_err; break; case OMAP_DISPLAY_TYPE_VENC: fid = omapvid_handle_interlace_display(vout, irqstatus, ts); if (!fid) goto vout_isr_err; break; case OMAP_DISPLAY_TYPE_HDMI: if (!(irqstatus & DISPC_IRQ_EVSYNC_EVEN)) goto vout_isr_err; break; default: goto vout_isr_err; } if (!vout->first_int && (vout->cur_frm != vout->next_frm)) { vout->cur_frm->vbuf.vb2_buf.timestamp = ts; vout->cur_frm->vbuf.sequence = vout->sequence++; vb2_buffer_done(&vout->cur_frm->vbuf.vb2_buf, VB2_BUF_STATE_DONE); vout->cur_frm = vout->next_frm; } vout->first_int = 0; if (list_empty(&vout->dma_queue)) goto vout_isr_err; vout->next_frm = list_entry(vout->dma_queue.next, struct omap_vout_buffer, queue); list_del(&vout->next_frm->queue); addr = vout->queued_buf_addr[vout->next_frm->vbuf.vb2_buf.index] + vout->cropped_offset; /* First save the configuration in ovelray structure */ ret = omapvid_init(vout, addr); if (ret) { printk(KERN_ERR VOUT_NAME "failed to set overlay info\n"); goto vout_isr_err; } /* Enable the pipeline and set the Go bit */ ret = omapvid_apply_changes(vout); if (ret) printk(KERN_ERR VOUT_NAME "failed to change mode\n"); vout_isr_err: spin_unlock(&vout->vbq_lock); } /* * V4L2 ioctls */ static int vidioc_querycap(struct file *file, void *fh, struct v4l2_capability *cap) { struct omap_vout_device *vout = video_drvdata(file); strscpy(cap->driver, VOUT_NAME, sizeof(cap->driver)); strscpy(cap->card, vout->vfd->name, sizeof(cap->card)); snprintf(cap->bus_info, sizeof(cap->bus_info), "platform:%s.%d", VOUT_NAME, vout->vid); return 0; } static int vidioc_enum_fmt_vid_out(struct file *file, void *fh, struct v4l2_fmtdesc *fmt) { int index = fmt->index; if (index >= NUM_OUTPUT_FORMATS) return -EINVAL; fmt->flags = omap_formats[index].flags; fmt->pixelformat = omap_formats[index].pixelformat; return 0; } static int vidioc_g_fmt_vid_out(struct file *file, void *fh, struct v4l2_format *f) { struct omap_vout_device *vout = video_drvdata(file); f->fmt.pix = vout->pix; return 0; } static int vidioc_try_fmt_vid_out(struct file *file, void *fh, struct v4l2_format *f) { struct omap_overlay *ovl; struct omapvideo_info *ovid; struct omap_video_timings *timing; struct omap_vout_device *vout = video_drvdata(file); struct omap_dss_device *dssdev; ovid = &vout->vid_info; ovl = ovid->overlays[0]; /* get the display device attached to the overlay */ dssdev = ovl->get_device(ovl); if (!dssdev) return -EINVAL; timing = &dssdev->panel.timings; vout->fbuf.fmt.height = timing->y_res; vout->fbuf.fmt.width = timing->x_res; omap_vout_try_format(&f->fmt.pix); return 0; } static int vidioc_s_fmt_vid_out(struct file *file, void *fh, struct v4l2_format *f) { int ret, bpp; struct omap_overlay *ovl; struct omapvideo_info *ovid; struct omap_video_timings *timing; struct omap_vout_device *vout = video_drvdata(file); struct omap_dss_device *dssdev; if (vb2_is_busy(&vout->vq)) return -EBUSY; ovid = &vout->vid_info; ovl = ovid->overlays[0]; dssdev = ovl->get_device(ovl); /* get the display device attached to the overlay */ if (!dssdev) { ret = -EINVAL; goto s_fmt_vid_out_exit; } timing = &dssdev->panel.timings; /* We don't support RGB24-packed mode if vrfb rotation * is enabled*/ if ((is_rotation_enabled(vout)) && f->fmt.pix.pixelformat == V4L2_PIX_FMT_RGB24) { ret = -EINVAL; goto s_fmt_vid_out_exit; } /* get the framebuffer parameters */ if (is_rotation_90_or_270(vout)) { vout->fbuf.fmt.height = timing->x_res; vout->fbuf.fmt.width = timing->y_res; } else { vout->fbuf.fmt.height = timing->y_res; vout->fbuf.fmt.width = timing->x_res; } /* change to smaller size is OK */ bpp = omap_vout_try_format(&f->fmt.pix); f->fmt.pix.sizeimage = f->fmt.pix.width * f->fmt.pix.height * bpp; /* try & set the new output format */ vout->bpp = bpp; vout->pix = f->fmt.pix; vout->vrfb_bpp = 1; /* If YUYV then vrfb bpp is 2, for others its 1 */ if (V4L2_PIX_FMT_YUYV == vout->pix.pixelformat || V4L2_PIX_FMT_UYVY == vout->pix.pixelformat) vout->vrfb_bpp = 2; /* set default crop and win */ omap_vout_new_format(&vout->pix, &vout->fbuf, &vout->crop, &vout->win); ret = 0; s_fmt_vid_out_exit: return ret; } static int vidioc_try_fmt_vid_overlay(struct file *file, void *fh, struct v4l2_format *f) { int ret = 0; struct omap_vout_device *vout = video_drvdata(file); struct omap_overlay *ovl; struct omapvideo_info *ovid; struct v4l2_window *win = &f->fmt.win; ovid = &vout->vid_info; ovl = ovid->overlays[0]; ret = omap_vout_try_window(&vout->fbuf, win); if (!ret && !(ovl->caps & OMAP_DSS_OVL_CAP_GLOBAL_ALPHA)) win->global_alpha = 0; return ret; } static int vidioc_s_fmt_vid_overlay(struct file *file, void *fh, struct v4l2_format *f) { int ret = 0; struct omap_overlay *ovl; struct omapvideo_info *ovid; struct omap_vout_device *vout = video_drvdata(file); struct v4l2_window *win = &f->fmt.win; ovid = &vout->vid_info; ovl = ovid->overlays[0]; ret = omap_vout_new_window(&vout->crop, &vout->win, &vout->fbuf, win); if (!ret) { enum omap_dss_trans_key_type key_type = OMAP_DSS_COLOR_KEY_GFX_DST; int enable; /* Video1 plane does not support global alpha on OMAP3 */ if (ovl->caps & OMAP_DSS_OVL_CAP_GLOBAL_ALPHA) vout->win.global_alpha = win->global_alpha; else win->global_alpha = 0; if (vout->fbuf.flags & (V4L2_FBUF_FLAG_CHROMAKEY | V4L2_FBUF_FLAG_SRC_CHROMAKEY)) enable = 1; else enable = 0; if (vout->fbuf.flags & V4L2_FBUF_FLAG_SRC_CHROMAKEY) key_type = OMAP_DSS_COLOR_KEY_VID_SRC; if (ovl->manager && ovl->manager->get_manager_info && ovl->manager->set_manager_info) { struct omap_overlay_manager_info info; ovl->manager->get_manager_info(ovl->manager, &info); info.trans_enabled = enable; info.trans_key_type = key_type; info.trans_key = vout->win.chromakey; if (ovl->manager->set_manager_info(ovl->manager, &info)) return -EINVAL; } } return ret; } static int vidioc_g_fmt_vid_overlay(struct file *file, void *fh, struct v4l2_format *f) { struct omap_overlay *ovl; struct omapvideo_info *ovid; struct omap_vout_device *vout = video_drvdata(file); struct v4l2_window *win = &f->fmt.win; ovid = &vout->vid_info; ovl = ovid->overlays[0]; win->w = vout->win.w; win->field = vout->win.field; win->chromakey = vout->win.chromakey; if (ovl->caps & OMAP_DSS_OVL_CAP_GLOBAL_ALPHA) win->global_alpha = vout->win.global_alpha; else win->global_alpha = 0; win->clips = NULL; win->clipcount = 0; win->bitmap = NULL; return 0; } static int vidioc_g_selection(struct file *file, void *fh, struct v4l2_selection *sel) { struct omap_vout_device *vout = video_drvdata(file); struct v4l2_pix_format *pix = &vout->pix; if (sel->type != V4L2_BUF_TYPE_VIDEO_OUTPUT) return -EINVAL; switch (sel->target) { case V4L2_SEL_TGT_CROP: sel->r = vout->crop; break; case V4L2_SEL_TGT_CROP_DEFAULT: omap_vout_default_crop(&vout->pix, &vout->fbuf, &sel->r); break; case V4L2_SEL_TGT_CROP_BOUNDS: /* Width and height are always even */ sel->r.width = pix->width & ~1; sel->r.height = pix->height & ~1; break; default: return -EINVAL; } return 0; } static int vidioc_s_selection(struct file *file, void *fh, struct v4l2_selection *sel) { int ret = -EINVAL; struct omap_vout_device *vout = video_drvdata(file); struct omapvideo_info *ovid; struct omap_overlay *ovl; struct omap_video_timings *timing; struct omap_dss_device *dssdev; if (sel->type != V4L2_BUF_TYPE_VIDEO_OUTPUT) return -EINVAL; if (sel->target != V4L2_SEL_TGT_CROP) return -EINVAL; if (vb2_is_busy(&vout->vq)) return -EBUSY; ovid = &vout->vid_info; ovl = ovid->overlays[0]; /* get the display device attached to the overlay */ dssdev = ovl->get_device(ovl); if (!dssdev) { ret = -EINVAL; goto s_crop_err; } timing = &dssdev->panel.timings; if (is_rotation_90_or_270(vout)) { vout->fbuf.fmt.height = timing->x_res; vout->fbuf.fmt.width = timing->y_res; } else { vout->fbuf.fmt.height = timing->y_res; vout->fbuf.fmt.width = timing->x_res; } ret = omap_vout_new_crop(&vout->pix, &vout->crop, &vout->win, &vout->fbuf, &sel->r); s_crop_err: return ret; } static int omap_vout_s_ctrl(struct v4l2_ctrl *ctrl) { struct omap_vout_device *vout = container_of(ctrl->handler, struct omap_vout_device, ctrl_handler); int ret = 0; switch (ctrl->id) { case V4L2_CID_ROTATE: { struct omapvideo_info *ovid; int rotation = ctrl->val; ovid = &vout->vid_info; if (rotation && ovid->rotation_type == VOUT_ROT_NONE) { ret = -ERANGE; break; } if (rotation && vout->pix.pixelformat == V4L2_PIX_FMT_RGB24) { ret = -EINVAL; break; } if (v4l2_rot_to_dss_rot(rotation, &vout->rotation, vout->mirror)) { ret = -EINVAL; break; } break; } case V4L2_CID_BG_COLOR: { struct omap_overlay *ovl; unsigned int color = ctrl->val; struct omap_overlay_manager_info info; ovl = vout->vid_info.overlays[0]; if (!ovl->manager || !ovl->manager->get_manager_info) { ret = -EINVAL; break; } ovl->manager->get_manager_info(ovl->manager, &info); info.default_color = color; if (ovl->manager->set_manager_info(ovl->manager, &info)) { ret = -EINVAL; break; } break; } case V4L2_CID_VFLIP: { struct omapvideo_info *ovid; unsigned int mirror = ctrl->val; ovid = &vout->vid_info; if (mirror && ovid->rotation_type == VOUT_ROT_NONE) { ret = -ERANGE; break; } if (mirror && vout->pix.pixelformat == V4L2_PIX_FMT_RGB24) { ret = -EINVAL; break; } vout->mirror = mirror; break; } default: return -EINVAL; } return ret; } static const struct v4l2_ctrl_ops omap_vout_ctrl_ops = { .s_ctrl = omap_vout_s_ctrl, }; static int omap_vout_vb2_queue_setup(struct vb2_queue *vq, unsigned int *nbufs, unsigned int *num_planes, unsigned int sizes[], struct device *alloc_devs[]) { struct omap_vout_device *vout = vb2_get_drv_priv(vq); int size = vout->pix.sizeimage; if (is_rotation_enabled(vout) && vq->num_buffers + *nbufs > VRFB_NUM_BUFS) { *nbufs = VRFB_NUM_BUFS - vq->num_buffers; if (*nbufs == 0) return -EINVAL; } if (*num_planes) return sizes[0] < size ? -EINVAL : 0; *num_planes = 1; sizes[0] = size; return 0; } static int omap_vout_vb2_prepare(struct vb2_buffer *vb) { struct omap_vout_device *vout = vb2_get_drv_priv(vb->vb2_queue); struct omapvideo_info *ovid = &vout->vid_info; struct omap_vout_buffer *voutbuf = vb2_to_omap_vout_buffer(vb); dma_addr_t buf_phy_addr = vb2_dma_contig_plane_dma_addr(vb, 0); if (vb2_plane_size(vb, 0) < vout->pix.sizeimage) { v4l2_dbg(1, debug, &vout->vid_dev->v4l2_dev, "%s data will not fit into plane (%lu < %u)\n", __func__, vb2_plane_size(vb, 0), vout->pix.sizeimage); return -EINVAL; } vb2_set_plane_payload(vb, 0, vout->pix.sizeimage); voutbuf->vbuf.field = V4L2_FIELD_NONE; vout->queued_buf_addr[vb->index] = buf_phy_addr; if (ovid->rotation_type == VOUT_ROT_VRFB) return omap_vout_prepare_vrfb(vout, vb); return 0; } static void omap_vout_vb2_queue(struct vb2_buffer *vb) { struct omap_vout_device *vout = vb2_get_drv_priv(vb->vb2_queue); struct omap_vout_buffer *voutbuf = vb2_to_omap_vout_buffer(vb); list_add_tail(&voutbuf->queue, &vout->dma_queue); } static int omap_vout_vb2_start_streaming(struct vb2_queue *vq, unsigned int count) { struct omap_vout_device *vout = vb2_get_drv_priv(vq); struct omapvideo_info *ovid = &vout->vid_info; struct omap_vout_buffer *buf, *tmp; dma_addr_t addr = 0; u32 mask = 0; int ret, j; /* Get the next frame from the buffer queue */ vout->next_frm = vout->cur_frm = list_entry(vout->dma_queue.next, struct omap_vout_buffer, queue); /* Remove buffer from the buffer queue */ list_del(&vout->cur_frm->queue); /* Initialize field_id and started member */ vout->field_id = 0; vout->first_int = 1; vout->sequence = 0; if (omap_vout_calculate_offset(vout)) { ret = -EINVAL; goto out; } if (ovid->rotation_type == VOUT_ROT_VRFB) if (omap_vout_vrfb_buffer_setup(vout, &count, 0)) { ret = -ENOMEM; goto out; } addr = vout->queued_buf_addr[vout->cur_frm->vbuf.vb2_buf.index] + vout->cropped_offset; mask = DISPC_IRQ_VSYNC | DISPC_IRQ_EVSYNC_EVEN | DISPC_IRQ_EVSYNC_ODD | DISPC_IRQ_VSYNC2; /* First save the configuration in overlay structure */ ret = omapvid_init(vout, addr); if (ret) { v4l2_err(&vout->vid_dev->v4l2_dev, "failed to set overlay info\n"); goto streamon_err1; } omap_dispc_register_isr(omap_vout_isr, vout, mask); /* Enable the pipeline and set the Go bit */ ret = omapvid_apply_changes(vout); if (ret) v4l2_err(&vout->vid_dev->v4l2_dev, "failed to change mode\n"); for (j = 0; j < ovid->num_overlays; j++) { struct omap_overlay *ovl = ovid->overlays[j]; struct omap_dss_device *dssdev = ovl->get_device(ovl); if (dssdev) { ret = ovl->enable(ovl); if (ret) goto streamon_err1; } } return 0; streamon_err1: mask = DISPC_IRQ_VSYNC | DISPC_IRQ_EVSYNC_EVEN | DISPC_IRQ_EVSYNC_ODD | DISPC_IRQ_VSYNC2; omap_dispc_unregister_isr(omap_vout_isr, vout, mask); for (j = 0; j < ovid->num_overlays; j++) { struct omap_overlay *ovl = ovid->overlays[j]; struct omap_dss_device *dssdev = ovl->get_device(ovl); if (dssdev) ovl->disable(ovl); } /* Turn of the pipeline */ if (omapvid_apply_changes(vout)) v4l2_err(&vout->vid_dev->v4l2_dev, "failed to change mode in streamoff\n"); out: vb2_buffer_done(&vout->cur_frm->vbuf.vb2_buf, VB2_BUF_STATE_QUEUED); list_for_each_entry_safe(buf, tmp, &vout->dma_queue, queue) { list_del(&buf->queue); vb2_buffer_done(&buf->vbuf.vb2_buf, VB2_BUF_STATE_QUEUED); } return ret; } static void omap_vout_vb2_stop_streaming(struct vb2_queue *vq) { struct omap_vout_device *vout = vb2_get_drv_priv(vq); struct omapvideo_info *ovid = &vout->vid_info; struct omap_vout_buffer *buf, *tmp; u32 mask = 0; int j; mask = DISPC_IRQ_VSYNC | DISPC_IRQ_EVSYNC_EVEN | DISPC_IRQ_EVSYNC_ODD | DISPC_IRQ_VSYNC2; omap_dispc_unregister_isr(omap_vout_isr, vout, mask); for (j = 0; j < ovid->num_overlays; j++) { struct omap_overlay *ovl = ovid->overlays[j]; struct omap_dss_device *dssdev = ovl->get_device(ovl); if (dssdev) ovl->disable(ovl); } /* Turn of the pipeline */ if (omapvid_apply_changes(vout)) v4l2_err(&vout->vid_dev->v4l2_dev, "failed to change mode in streamoff\n"); if (vout->next_frm != vout->cur_frm) vb2_buffer_done(&vout->next_frm->vbuf.vb2_buf, VB2_BUF_STATE_ERROR); vb2_buffer_done(&vout->cur_frm->vbuf.vb2_buf, VB2_BUF_STATE_ERROR); list_for_each_entry_safe(buf, tmp, &vout->dma_queue, queue) { list_del(&buf->queue); vb2_buffer_done(&buf->vbuf.vb2_buf, VB2_BUF_STATE_ERROR); } } static int vidioc_s_fbuf(struct file *file, void *fh, const struct v4l2_framebuffer *a) { int enable = 0; struct omap_overlay *ovl; struct omapvideo_info *ovid; struct omap_vout_device *vout = video_drvdata(file); struct omap_overlay_manager_info info; enum omap_dss_trans_key_type key_type = OMAP_DSS_COLOR_KEY_GFX_DST; ovid = &vout->vid_info; ovl = ovid->overlays[0]; /* OMAP DSS doesn't support Source and Destination color key together */ if ((a->flags & V4L2_FBUF_FLAG_SRC_CHROMAKEY) && (a->flags & V4L2_FBUF_FLAG_CHROMAKEY)) return -EINVAL; /* OMAP DSS Doesn't support the Destination color key and alpha blending together */ if ((a->flags & V4L2_FBUF_FLAG_CHROMAKEY) && (a->flags & V4L2_FBUF_FLAG_LOCAL_ALPHA)) return -EINVAL; if ((a->flags & V4L2_FBUF_FLAG_SRC_CHROMAKEY)) { vout->fbuf.flags |= V4L2_FBUF_FLAG_SRC_CHROMAKEY; key_type = OMAP_DSS_COLOR_KEY_VID_SRC; } else vout->fbuf.flags &= ~V4L2_FBUF_FLAG_SRC_CHROMAKEY; if ((a->flags & V4L2_FBUF_FLAG_CHROMAKEY)) { vout->fbuf.flags |= V4L2_FBUF_FLAG_CHROMAKEY; key_type = OMAP_DSS_COLOR_KEY_GFX_DST; } else vout->fbuf.flags &= ~V4L2_FBUF_FLAG_CHROMAKEY; if (a->flags & (V4L2_FBUF_FLAG_CHROMAKEY | V4L2_FBUF_FLAG_SRC_CHROMAKEY)) enable = 1; else enable = 0; if (ovl->manager && ovl->manager->get_manager_info && ovl->manager->set_manager_info) { ovl->manager->get_manager_info(ovl->manager, &info); info.trans_enabled = enable; info.trans_key_type = key_type; info.trans_key = vout->win.chromakey; if (ovl->manager->set_manager_info(ovl->manager, &info)) return -EINVAL; } if (a->flags & V4L2_FBUF_FLAG_LOCAL_ALPHA) { vout->fbuf.flags |= V4L2_FBUF_FLAG_LOCAL_ALPHA; enable = 1; } else { vout->fbuf.flags &= ~V4L2_FBUF_FLAG_LOCAL_ALPHA; enable = 0; } if (ovl->manager && ovl->manager->get_manager_info && ovl->manager->set_manager_info) { ovl->manager->get_manager_info(ovl->manager, &info); /* enable this only if there is no zorder cap */ if ((ovl->caps & OMAP_DSS_OVL_CAP_ZORDER) == 0) info.partial_alpha_enabled = enable; if (ovl->manager->set_manager_info(ovl->manager, &info)) return -EINVAL; } return 0; } static int vidioc_g_fbuf(struct file *file, void *fh, struct v4l2_framebuffer *a) { struct omap_overlay *ovl; struct omapvideo_info *ovid; struct omap_vout_device *vout = video_drvdata(file); struct omap_overlay_manager_info info; struct omap_video_timings *timing; struct omap_dss_device *dssdev; ovid = &vout->vid_info; ovl = ovid->overlays[0]; /* get the display device attached to the overlay */ dssdev = ovl->get_device(ovl); if (!dssdev) return -EINVAL; timing = &dssdev->panel.timings; vout->fbuf.fmt.height = timing->y_res; vout->fbuf.fmt.width = timing->x_res; a->fmt.field = V4L2_FIELD_NONE; a->fmt.colorspace = V4L2_COLORSPACE_SRGB; a->fmt.pixelformat = V4L2_PIX_FMT_RGBA32; a->fmt.height = vout->fbuf.fmt.height; a->fmt.width = vout->fbuf.fmt.width; a->fmt.bytesperline = vout->fbuf.fmt.width * 4; a->fmt.sizeimage = a->fmt.height * a->fmt.bytesperline; a->base = vout->fbuf.base; a->flags = vout->fbuf.flags; a->capability = vout->fbuf.capability; a->flags &= ~(V4L2_FBUF_FLAG_SRC_CHROMAKEY | V4L2_FBUF_FLAG_CHROMAKEY | V4L2_FBUF_FLAG_LOCAL_ALPHA); if (ovl->manager && ovl->manager->get_manager_info) { ovl->manager->get_manager_info(ovl->manager, &info); if (info.trans_key_type == OMAP_DSS_COLOR_KEY_VID_SRC) a->flags |= V4L2_FBUF_FLAG_SRC_CHROMAKEY; if (info.trans_key_type == OMAP_DSS_COLOR_KEY_GFX_DST) a->flags |= V4L2_FBUF_FLAG_CHROMAKEY; if (info.partial_alpha_enabled) a->flags |= V4L2_FBUF_FLAG_LOCAL_ALPHA; } return 0; } static int vidioc_enum_output(struct file *file, void *priv_fh, struct v4l2_output *out) { if (out->index) return -EINVAL; snprintf(out->name, sizeof(out->name), "Overlay"); out->type = V4L2_OUTPUT_TYPE_ANALOGVGAOVERLAY; return 0; } static int vidioc_g_output(struct file *file, void *priv_fh, unsigned int *i) { *i = 0; return 0; } static int vidioc_s_output(struct file *file, void *priv_fh, unsigned int i) { return i ? -EINVAL : 0; } static const struct v4l2_ioctl_ops vout_ioctl_ops = { .vidioc_querycap = vidioc_querycap, .vidioc_enum_fmt_vid_out = vidioc_enum_fmt_vid_out, .vidioc_g_fmt_vid_out = vidioc_g_fmt_vid_out, .vidioc_try_fmt_vid_out = vidioc_try_fmt_vid_out, .vidioc_s_fmt_vid_out = vidioc_s_fmt_vid_out, .vidioc_s_fbuf = vidioc_s_fbuf, .vidioc_g_fbuf = vidioc_g_fbuf, .vidioc_try_fmt_vid_out_overlay = vidioc_try_fmt_vid_overlay, .vidioc_s_fmt_vid_out_overlay = vidioc_s_fmt_vid_overlay, .vidioc_g_fmt_vid_out_overlay = vidioc_g_fmt_vid_overlay, .vidioc_g_selection = vidioc_g_selection, .vidioc_s_selection = vidioc_s_selection, .vidioc_enum_output = vidioc_enum_output, .vidioc_g_output = vidioc_g_output, .vidioc_s_output = vidioc_s_output, .vidioc_reqbufs = vb2_ioctl_reqbufs, .vidioc_create_bufs = vb2_ioctl_create_bufs, .vidioc_querybuf = vb2_ioctl_querybuf, .vidioc_qbuf = vb2_ioctl_qbuf, .vidioc_dqbuf = vb2_ioctl_dqbuf, .vidioc_expbuf = vb2_ioctl_expbuf, .vidioc_streamon = vb2_ioctl_streamon, .vidioc_streamoff = vb2_ioctl_streamoff, .vidioc_subscribe_event = v4l2_ctrl_subscribe_event, .vidioc_unsubscribe_event = v4l2_event_unsubscribe, }; static const struct v4l2_file_operations omap_vout_fops = { .owner = THIS_MODULE, .unlocked_ioctl = video_ioctl2, .poll = vb2_fop_poll, .mmap = vb2_fop_mmap, .open = v4l2_fh_open, .release = vb2_fop_release, }; static const struct vb2_ops omap_vout_vb2_ops = { .queue_setup = omap_vout_vb2_queue_setup, .buf_queue = omap_vout_vb2_queue, .buf_prepare = omap_vout_vb2_prepare, .start_streaming = omap_vout_vb2_start_streaming, .stop_streaming = omap_vout_vb2_stop_streaming, .wait_prepare = vb2_ops_wait_prepare, .wait_finish = vb2_ops_wait_finish, }; /* Init functions used during driver initialization */ /* Initial setup of video_data */ static int __init omap_vout_setup_video_data(struct omap_vout_device *vout) { struct video_device *vfd; struct v4l2_pix_format *pix; struct omap_overlay *ovl = vout->vid_info.overlays[0]; struct omap_dss_device *display = ovl->get_device(ovl); struct v4l2_ctrl_handler *hdl; struct vb2_queue *vq; int ret; /* set the default pix */ pix = &vout->pix; /* Set the default picture of QVGA */ pix->width = QQVGA_WIDTH; pix->height = QQVGA_HEIGHT; /* Default pixel format is RGB 5-6-5 */ pix->pixelformat = V4L2_PIX_FMT_RGB565; pix->field = V4L2_FIELD_NONE; pix->bytesperline = pix->width * 2; pix->sizeimage = pix->bytesperline * pix->height; pix->colorspace = V4L2_COLORSPACE_SRGB; vout->bpp = RGB565_BPP; vout->fbuf.fmt.width = display->panel.timings.x_res; vout->fbuf.fmt.height = display->panel.timings.y_res; vout->cropped_offset = 0; /* Set the data structures for the overlay parameters*/ vout->fbuf.flags = V4L2_FBUF_FLAG_OVERLAY; vout->fbuf.capability = V4L2_FBUF_CAP_LOCAL_ALPHA | V4L2_FBUF_CAP_SRC_CHROMAKEY | V4L2_FBUF_CAP_CHROMAKEY | V4L2_FBUF_CAP_EXTERNOVERLAY; if (ovl->caps & OMAP_DSS_OVL_CAP_GLOBAL_ALPHA) { vout->win.global_alpha = 255; vout->fbuf.capability |= V4L2_FBUF_CAP_GLOBAL_ALPHA; vout->fbuf.flags |= V4L2_FBUF_FLAG_GLOBAL_ALPHA; } else { vout->win.global_alpha = 0; } vout->win.field = V4L2_FIELD_NONE; omap_vout_new_format(pix, &vout->fbuf, &vout->crop, &vout->win); hdl = &vout->ctrl_handler; v4l2_ctrl_handler_init(hdl, 3); if (vout->vid_info.rotation_type == VOUT_ROT_VRFB) { v4l2_ctrl_new_std(hdl, &omap_vout_ctrl_ops, V4L2_CID_ROTATE, 0, 270, 90, 0); v4l2_ctrl_new_std(hdl, &omap_vout_ctrl_ops, V4L2_CID_VFLIP, 0, 1, 1, 0); } v4l2_ctrl_new_std(hdl, &omap_vout_ctrl_ops, V4L2_CID_BG_COLOR, 0, 0xffffff, 1, 0); if (hdl->error) return hdl->error; vout->rotation = 0; vout->mirror = false; INIT_LIST_HEAD(&vout->dma_queue); if (vout->vid_info.rotation_type == VOUT_ROT_VRFB) vout->vrfb_bpp = 2; /* initialize the video_device struct */ vfd = vout->vfd = video_device_alloc(); if (!vfd) { printk(KERN_ERR VOUT_NAME ": could not allocate video device struct\n"); v4l2_ctrl_handler_free(hdl); return -ENOMEM; } vfd->ctrl_handler = hdl; vfd->release = video_device_release; vfd->ioctl_ops = &vout_ioctl_ops; strscpy(vfd->name, VOUT_NAME, sizeof(vfd->name)); vfd->fops = &omap_vout_fops; vfd->v4l2_dev = &vout->vid_dev->v4l2_dev; vfd->vfl_dir = VFL_DIR_TX; vfd->minor = -1; vfd->device_caps = V4L2_CAP_STREAMING | V4L2_CAP_VIDEO_OUTPUT | V4L2_CAP_VIDEO_OUTPUT_OVERLAY; mutex_init(&vout->lock); vq = &vout->vq; vq->type = V4L2_BUF_TYPE_VIDEO_OUTPUT; vq->io_modes = VB2_MMAP | VB2_DMABUF; vq->drv_priv = vout; vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC; vq->buf_struct_size = sizeof(struct omap_vout_buffer); vq->dev = vfd->v4l2_dev->dev; vq->ops = &omap_vout_vb2_ops; vq->mem_ops = &vb2_dma_contig_memops; vq->lock = &vout->lock; vq->min_buffers_needed = 1; vfd->queue = vq; ret = vb2_queue_init(vq); if (ret) { v4l2_ctrl_handler_free(hdl); video_device_release(vfd); } return ret; } /* Setup video buffers */ static int __init omap_vout_setup_video_bufs(struct platform_device *pdev, int vid_num) { struct omapvideo_info *ovid; struct omap_vout_device *vout; struct v4l2_device *v4l2_dev = platform_get_drvdata(pdev); struct omap2video_device *vid_dev = container_of(v4l2_dev, struct omap2video_device, v4l2_dev); int ret = 0; vout = vid_dev->vouts[vid_num]; ovid = &vout->vid_info; if (ovid->rotation_type == VOUT_ROT_VRFB) { bool static_vrfb_allocation = (vid_num == 0) ? vid1_static_vrfb_alloc : vid2_static_vrfb_alloc; ret = omap_vout_setup_vrfb_bufs(pdev, vid_num, static_vrfb_allocation); } return ret; } /* Create video out devices */ static int __init omap_vout_create_video_devices(struct platform_device *pdev) { int ret = 0, k; struct omap_vout_device *vout; struct video_device *vfd = NULL; struct v4l2_device *v4l2_dev = platform_get_drvdata(pdev); struct omap2video_device *vid_dev = container_of(v4l2_dev, struct omap2video_device, v4l2_dev); struct omap_overlay *ovl = vid_dev->overlays[0]; struct omap_overlay_info info; ovl->get_overlay_info(ovl, &info); for (k = 0; k < pdev->num_resources; k++) { vout = kzalloc(sizeof(struct omap_vout_device), GFP_KERNEL); if (!vout) { dev_err(&pdev->dev, ": could not allocate memory\n"); return -ENOMEM; } vout->vid = k; vid_dev->vouts[k] = vout; vout->vid_dev = vid_dev; /* Select video2 if only 1 overlay is controlled by V4L2 */ if (pdev->num_resources == 1) vout->vid_info.overlays[0] = vid_dev->overlays[k + 2]; else /* Else select video1 and video2 one by one. */ vout->vid_info.overlays[0] = vid_dev->overlays[k + 1]; vout->vid_info.num_overlays = 1; vout->vid_info.id = k + 1; spin_lock_init(&vout->vbq_lock); /* * Set the framebuffer base, this allows applications to find * the fb corresponding to this overlay. * * To be precise: fbuf.base should match smem_start of * struct fb_fix_screeninfo. */ vout->fbuf.base = (void *)(uintptr_t)info.paddr; /* Set VRFB as rotation_type for omap2 and omap3 */ if (omap_vout_dss_omap24xx() || omap_vout_dss_omap34xx()) vout->vid_info.rotation_type = VOUT_ROT_VRFB; /* Setup the default configuration for the video devices */ if (omap_vout_setup_video_data(vout) != 0) { ret = -ENOMEM; goto error; } /* Allocate default number of buffers for the video streaming * and reserve the VRFB space for rotation */ if (omap_vout_setup_video_bufs(pdev, k) != 0) { ret = -ENOMEM; goto error1; } /* Register the Video device with V4L2 */ vfd = vout->vfd; if (video_register_device(vfd, VFL_TYPE_VIDEO, -1) < 0) { dev_err(&pdev->dev, ": Could not register Video for Linux device\n"); vfd->minor = -1; ret = -ENODEV; goto error2; } video_set_drvdata(vfd, vout); dev_info(&pdev->dev, ": registered and initialized video device %d\n", vfd->minor); if (k == (pdev->num_resources - 1)) return 0; continue; error2: if (vout->vid_info.rotation_type == VOUT_ROT_VRFB) omap_vout_release_vrfb(vout); error1: video_device_release(vfd); error: kfree(vout); return ret; } return -ENODEV; } /* Driver functions */ static void omap_vout_cleanup_device(struct omap_vout_device *vout) { struct video_device *vfd; struct omapvideo_info *ovid; if (!vout) return; vfd = vout->vfd; ovid = &vout->vid_info; if (vfd) { if (!video_is_registered(vfd)) { /* * The device was never registered, so release the * video_device struct directly. */ video_device_release(vfd); } else { /* * The unregister function will release the video_device * struct as well as unregistering it. */ video_unregister_device(vfd); } } v4l2_ctrl_handler_free(&vout->ctrl_handler); if (ovid->rotation_type == VOUT_ROT_VRFB) { omap_vout_release_vrfb(vout); /* Free the VRFB buffer if allocated * init time */ if (vout->vrfb_static_allocation) omap_vout_free_vrfb_buffers(vout); } kfree(vout); } static void omap_vout_remove(struct platform_device *pdev) { int k; struct v4l2_device *v4l2_dev = platform_get_drvdata(pdev); struct omap2video_device *vid_dev = container_of(v4l2_dev, struct omap2video_device, v4l2_dev); v4l2_device_unregister(v4l2_dev); for (k = 0; k < pdev->num_resources; k++) omap_vout_cleanup_device(vid_dev->vouts[k]); for (k = 0; k < vid_dev->num_displays; k++) { if (vid_dev->displays[k]->state != OMAP_DSS_DISPLAY_DISABLED) vid_dev->displays[k]->driver->disable(vid_dev->displays[k]); omap_dss_put_device(vid_dev->displays[k]); } kfree(vid_dev); } static int __init omap_vout_probe(struct platform_device *pdev) { int ret = 0, i; struct omap_overlay *ovl; struct omap_dss_device *dssdev = NULL; struct omap_dss_device *def_display; struct omap2video_device *vid_dev = NULL; if (omapdss_is_initialized() == false) return -EPROBE_DEFER; ret = omapdss_compat_init(); if (ret) { dev_err(&pdev->dev, "failed to init dss\n"); return ret; } if (pdev->num_resources == 0) { dev_err(&pdev->dev, "probed for an unknown device\n"); ret = -ENODEV; goto err_dss_init; } vid_dev = kzalloc(sizeof(struct omap2video_device), GFP_KERNEL); if (vid_dev == NULL) { ret = -ENOMEM; goto err_dss_init; } vid_dev->num_displays = 0; for_each_dss_dev(dssdev) { omap_dss_get_device(dssdev); if (!dssdev->driver) { dev_warn(&pdev->dev, "no driver for display: %s\n", dssdev->name); omap_dss_put_device(dssdev); continue; } vid_dev->displays[vid_dev->num_displays++] = dssdev; } if (vid_dev->num_displays == 0) { dev_err(&pdev->dev, "no displays\n"); ret = -EINVAL; goto probe_err0; } vid_dev->num_overlays = omap_dss_get_num_overlays(); for (i = 0; i < vid_dev->num_overlays; i++) vid_dev->overlays[i] = omap_dss_get_overlay(i); vid_dev->num_managers = omap_dss_get_num_overlay_managers(); for (i = 0; i < vid_dev->num_managers; i++) vid_dev->managers[i] = omap_dss_get_overlay_manager(i); /* Get the Video1 overlay and video2 overlay. * Setup the Display attached to that overlays */ for (i = 1; i < vid_dev->num_overlays; i++) { ovl = omap_dss_get_overlay(i); dssdev = ovl->get_device(ovl); if (dssdev) { def_display = dssdev; } else { dev_warn(&pdev->dev, "cannot find display\n"); def_display = NULL; } if (def_display) { struct omap_dss_driver *dssdrv = def_display->driver; ret = dssdrv->enable(def_display); if (ret) { /* Here we are not considering a error * as display may be enabled by frame * buffer driver */ dev_warn(&pdev->dev, "'%s' Display already enabled\n", def_display->name); } } } if (v4l2_device_register(&pdev->dev, &vid_dev->v4l2_dev) < 0) { dev_err(&pdev->dev, "v4l2_device_register failed\n"); ret = -ENODEV; goto probe_err1; } ret = omap_vout_create_video_devices(pdev); if (ret) goto probe_err2; for (i = 0; i < vid_dev->num_displays; i++) { struct omap_dss_device *display = vid_dev->displays[i]; if (display->driver->update) display->driver->update(display, 0, 0, display->panel.timings.x_res, display->panel.timings.y_res); } return 0; probe_err2: v4l2_device_unregister(&vid_dev->v4l2_dev); probe_err1: for (i = 1; i < vid_dev->num_overlays; i++) { def_display = NULL; ovl = omap_dss_get_overlay(i); dssdev = ovl->get_device(ovl); if (dssdev) def_display = dssdev; if (def_display && def_display->driver) def_display->driver->disable(def_display); } probe_err0: kfree(vid_dev); err_dss_init: omapdss_compat_uninit(); return ret; } static struct platform_driver omap_vout_driver = { .driver = { .name = VOUT_NAME, }, .remove_new = omap_vout_remove, }; static int __init omap_vout_init(void) { if (platform_driver_probe(&omap_vout_driver, omap_vout_probe) != 0) { printk(KERN_ERR VOUT_NAME ":Could not register Video driver\n"); return -EINVAL; } return 0; } static void omap_vout_cleanup(void) { platform_driver_unregister(&omap_vout_driver); } late_initcall(omap_vout_init); module_exit(omap_vout_cleanup);
linux-master
drivers/media/platform/ti/omap/omap_vout.c
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (c) 2011 Atmel Corporation * Josh Wu, <[email protected]> * * Based on previous work by Lars Haring, <[email protected]> * and Sedji Gaouaou * Based on the bttv driver for Bt848 with respective copyright holders */ #include <linux/clk.h> #include <linux/completion.h> #include <linux/delay.h> #include <linux/fs.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/of_graph.h> #include <linux/platform_device.h> #include <linux/pm_runtime.h> #include <linux/slab.h> #include <linux/of.h> #include <linux/videodev2.h> #include <media/v4l2-ctrls.h> #include <media/v4l2-device.h> #include <media/v4l2-dev.h> #include <media/v4l2-ioctl.h> #include <media/v4l2-event.h> #include <media/v4l2-fwnode.h> #include <media/videobuf2-dma-contig.h> #include <media/v4l2-image-sizes.h> #include "atmel-isi.h" #define MAX_SUPPORT_WIDTH 2048U #define MAX_SUPPORT_HEIGHT 2048U #define MIN_FRAME_RATE 15 #define FRAME_INTERVAL_MILLI_SEC (1000 / MIN_FRAME_RATE) /* Frame buffer descriptor */ struct fbd { /* Physical address of the frame buffer */ u32 fb_address; /* DMA Control Register(only in HISI2) */ u32 dma_ctrl; /* Physical address of the next fbd */ u32 next_fbd_address; }; static void set_dma_ctrl(struct fbd *fb_desc, u32 ctrl) { fb_desc->dma_ctrl = ctrl; } struct isi_dma_desc { struct list_head list; struct fbd *p_fbd; dma_addr_t fbd_phys; }; /* Frame buffer data */ struct frame_buffer { struct vb2_v4l2_buffer vb; struct isi_dma_desc *p_dma_desc; struct list_head list; }; struct isi_graph_entity { struct device_node *node; struct v4l2_subdev *subdev; }; /* * struct isi_format - ISI media bus format information * @fourcc: Fourcc code for this format * @mbus_code: V4L2 media bus format code. * @bpp: Bytes per pixel (when stored in memory) * @swap: Byte swap configuration value * @support: Indicates format supported by subdev * @skip: Skip duplicate format supported by subdev */ struct isi_format { u32 fourcc; u32 mbus_code; u8 bpp; u32 swap; }; struct atmel_isi { /* Protects the access of variables shared with the ISR */ spinlock_t irqlock; struct device *dev; void __iomem *regs; int sequence; /* Allocate descriptors for dma buffer use */ struct fbd *p_fb_descriptors; dma_addr_t fb_descriptors_phys; struct list_head dma_desc_head; struct isi_dma_desc dma_desc[VIDEO_MAX_FRAME]; bool enable_preview_path; struct completion complete; /* ISI peripheral clock */ struct clk *pclk; unsigned int irq; struct isi_platform_data pdata; u16 width_flags; /* max 12 bits */ struct list_head video_buffer_list; struct frame_buffer *active; struct v4l2_device v4l2_dev; struct video_device *vdev; struct v4l2_async_notifier notifier; struct isi_graph_entity entity; struct v4l2_format fmt; const struct isi_format **user_formats; unsigned int num_user_formats; const struct isi_format *current_fmt; struct mutex lock; struct vb2_queue queue; }; #define notifier_to_isi(n) container_of(n, struct atmel_isi, notifier) static void isi_writel(struct atmel_isi *isi, u32 reg, u32 val) { writel(val, isi->regs + reg); } static u32 isi_readl(struct atmel_isi *isi, u32 reg) { return readl(isi->regs + reg); } static void configure_geometry(struct atmel_isi *isi) { u32 cfg2, psize; u32 fourcc = isi->current_fmt->fourcc; isi->enable_preview_path = fourcc == V4L2_PIX_FMT_RGB565 || fourcc == V4L2_PIX_FMT_RGB32 || fourcc == V4L2_PIX_FMT_Y16; /* According to sensor's output format to set cfg2 */ cfg2 = isi->current_fmt->swap; isi_writel(isi, ISI_CTRL, ISI_CTRL_DIS); /* Set width */ cfg2 |= ((isi->fmt.fmt.pix.width - 1) << ISI_CFG2_IM_HSIZE_OFFSET) & ISI_CFG2_IM_HSIZE_MASK; /* Set height */ cfg2 |= ((isi->fmt.fmt.pix.height - 1) << ISI_CFG2_IM_VSIZE_OFFSET) & ISI_CFG2_IM_VSIZE_MASK; isi_writel(isi, ISI_CFG2, cfg2); /* No down sampling, preview size equal to sensor output size */ psize = ((isi->fmt.fmt.pix.width - 1) << ISI_PSIZE_PREV_HSIZE_OFFSET) & ISI_PSIZE_PREV_HSIZE_MASK; psize |= ((isi->fmt.fmt.pix.height - 1) << ISI_PSIZE_PREV_VSIZE_OFFSET) & ISI_PSIZE_PREV_VSIZE_MASK; isi_writel(isi, ISI_PSIZE, psize); isi_writel(isi, ISI_PDECF, ISI_PDECF_NO_SAMPLING); } static irqreturn_t atmel_isi_handle_streaming(struct atmel_isi *isi) { if (isi->active) { struct vb2_v4l2_buffer *vbuf = &isi->active->vb; struct frame_buffer *buf = isi->active; list_del_init(&buf->list); vbuf->vb2_buf.timestamp = ktime_get_ns(); vbuf->sequence = isi->sequence++; vbuf->field = V4L2_FIELD_NONE; vb2_buffer_done(&vbuf->vb2_buf, VB2_BUF_STATE_DONE); } if (list_empty(&isi->video_buffer_list)) { isi->active = NULL; } else { /* start next dma frame. */ isi->active = list_entry(isi->video_buffer_list.next, struct frame_buffer, list); if (!isi->enable_preview_path) { isi_writel(isi, ISI_DMA_C_DSCR, (u32)isi->active->p_dma_desc->fbd_phys); isi_writel(isi, ISI_DMA_C_CTRL, ISI_DMA_CTRL_FETCH | ISI_DMA_CTRL_DONE); isi_writel(isi, ISI_DMA_CHER, ISI_DMA_CHSR_C_CH); } else { isi_writel(isi, ISI_DMA_P_DSCR, (u32)isi->active->p_dma_desc->fbd_phys); isi_writel(isi, ISI_DMA_P_CTRL, ISI_DMA_CTRL_FETCH | ISI_DMA_CTRL_DONE); isi_writel(isi, ISI_DMA_CHER, ISI_DMA_CHSR_P_CH); } } return IRQ_HANDLED; } /* ISI interrupt service routine */ static irqreturn_t isi_interrupt(int irq, void *dev_id) { struct atmel_isi *isi = dev_id; u32 status, mask, pending; irqreturn_t ret = IRQ_NONE; spin_lock(&isi->irqlock); status = isi_readl(isi, ISI_STATUS); mask = isi_readl(isi, ISI_INTMASK); pending = status & mask; if (pending & ISI_CTRL_SRST) { complete(&isi->complete); isi_writel(isi, ISI_INTDIS, ISI_CTRL_SRST); ret = IRQ_HANDLED; } else if (pending & ISI_CTRL_DIS) { complete(&isi->complete); isi_writel(isi, ISI_INTDIS, ISI_CTRL_DIS); ret = IRQ_HANDLED; } else { if (likely(pending & ISI_SR_CXFR_DONE) || likely(pending & ISI_SR_PXFR_DONE)) ret = atmel_isi_handle_streaming(isi); } spin_unlock(&isi->irqlock); return ret; } #define WAIT_ISI_RESET 1 #define WAIT_ISI_DISABLE 0 static int atmel_isi_wait_status(struct atmel_isi *isi, int wait_reset) { unsigned long timeout; /* * The reset or disable will only succeed if we have a * pixel clock from the camera. */ init_completion(&isi->complete); if (wait_reset) { isi_writel(isi, ISI_INTEN, ISI_CTRL_SRST); isi_writel(isi, ISI_CTRL, ISI_CTRL_SRST); } else { isi_writel(isi, ISI_INTEN, ISI_CTRL_DIS); isi_writel(isi, ISI_CTRL, ISI_CTRL_DIS); } timeout = wait_for_completion_timeout(&isi->complete, msecs_to_jiffies(500)); if (timeout == 0) return -ETIMEDOUT; return 0; } /* ------------------------------------------------------------------ Videobuf operations ------------------------------------------------------------------*/ static int queue_setup(struct vb2_queue *vq, unsigned int *nbuffers, unsigned int *nplanes, unsigned int sizes[], struct device *alloc_devs[]) { struct atmel_isi *isi = vb2_get_drv_priv(vq); unsigned long size; size = isi->fmt.fmt.pix.sizeimage; /* Make sure the image size is large enough. */ if (*nplanes) return sizes[0] < size ? -EINVAL : 0; *nplanes = 1; sizes[0] = size; isi->active = NULL; dev_dbg(isi->dev, "%s, count=%d, size=%ld\n", __func__, *nbuffers, size); return 0; } static int buffer_init(struct vb2_buffer *vb) { struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb); struct frame_buffer *buf = container_of(vbuf, struct frame_buffer, vb); buf->p_dma_desc = NULL; INIT_LIST_HEAD(&buf->list); return 0; } static int buffer_prepare(struct vb2_buffer *vb) { struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb); struct frame_buffer *buf = container_of(vbuf, struct frame_buffer, vb); struct atmel_isi *isi = vb2_get_drv_priv(vb->vb2_queue); unsigned long size; struct isi_dma_desc *desc; size = isi->fmt.fmt.pix.sizeimage; if (vb2_plane_size(vb, 0) < size) { dev_err(isi->dev, "%s data will not fit into plane (%lu < %lu)\n", __func__, vb2_plane_size(vb, 0), size); return -EINVAL; } vb2_set_plane_payload(vb, 0, size); if (!buf->p_dma_desc) { if (list_empty(&isi->dma_desc_head)) { dev_err(isi->dev, "Not enough dma descriptors.\n"); return -EINVAL; } else { /* Get an available descriptor */ desc = list_entry(isi->dma_desc_head.next, struct isi_dma_desc, list); /* Delete the descriptor since now it is used */ list_del_init(&desc->list); /* Initialize the dma descriptor */ desc->p_fbd->fb_address = vb2_dma_contig_plane_dma_addr(vb, 0); desc->p_fbd->next_fbd_address = 0; set_dma_ctrl(desc->p_fbd, ISI_DMA_CTRL_WB); buf->p_dma_desc = desc; } } return 0; } static void buffer_cleanup(struct vb2_buffer *vb) { struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb); struct atmel_isi *isi = vb2_get_drv_priv(vb->vb2_queue); struct frame_buffer *buf = container_of(vbuf, struct frame_buffer, vb); /* This descriptor is available now and we add to head list */ if (buf->p_dma_desc) list_add(&buf->p_dma_desc->list, &isi->dma_desc_head); } static void start_dma(struct atmel_isi *isi, struct frame_buffer *buffer) { u32 ctrl, cfg1; cfg1 = isi_readl(isi, ISI_CFG1); /* Enable irq: cxfr for the codec path, pxfr for the preview path */ isi_writel(isi, ISI_INTEN, ISI_SR_CXFR_DONE | ISI_SR_PXFR_DONE); /* Check if already in a frame */ if (!isi->enable_preview_path) { if (isi_readl(isi, ISI_STATUS) & ISI_CTRL_CDC) { dev_err(isi->dev, "Already in frame handling.\n"); return; } isi_writel(isi, ISI_DMA_C_DSCR, (u32)buffer->p_dma_desc->fbd_phys); isi_writel(isi, ISI_DMA_C_CTRL, ISI_DMA_CTRL_FETCH | ISI_DMA_CTRL_DONE); isi_writel(isi, ISI_DMA_CHER, ISI_DMA_CHSR_C_CH); } else { isi_writel(isi, ISI_DMA_P_DSCR, (u32)buffer->p_dma_desc->fbd_phys); isi_writel(isi, ISI_DMA_P_CTRL, ISI_DMA_CTRL_FETCH | ISI_DMA_CTRL_DONE); isi_writel(isi, ISI_DMA_CHER, ISI_DMA_CHSR_P_CH); } cfg1 &= ~ISI_CFG1_FRATE_DIV_MASK; /* Enable linked list */ cfg1 |= isi->pdata.frate | ISI_CFG1_DISCR; /* Enable ISI */ ctrl = ISI_CTRL_EN; if (!isi->enable_preview_path) ctrl |= ISI_CTRL_CDC; isi_writel(isi, ISI_CTRL, ctrl); isi_writel(isi, ISI_CFG1, cfg1); } static void buffer_queue(struct vb2_buffer *vb) { struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb); struct atmel_isi *isi = vb2_get_drv_priv(vb->vb2_queue); struct frame_buffer *buf = container_of(vbuf, struct frame_buffer, vb); unsigned long flags = 0; spin_lock_irqsave(&isi->irqlock, flags); list_add_tail(&buf->list, &isi->video_buffer_list); if (!isi->active) { isi->active = buf; if (vb2_is_streaming(vb->vb2_queue)) start_dma(isi, buf); } spin_unlock_irqrestore(&isi->irqlock, flags); } static int start_streaming(struct vb2_queue *vq, unsigned int count) { struct atmel_isi *isi = vb2_get_drv_priv(vq); struct frame_buffer *buf, *node; int ret; ret = pm_runtime_resume_and_get(isi->dev); if (ret < 0) return ret; /* Enable stream on the sub device */ ret = v4l2_subdev_call(isi->entity.subdev, video, s_stream, 1); if (ret && ret != -ENOIOCTLCMD) { dev_err(isi->dev, "stream on failed in subdev\n"); goto err_start_stream; } /* Reset ISI */ ret = atmel_isi_wait_status(isi, WAIT_ISI_RESET); if (ret < 0) { dev_err(isi->dev, "Reset ISI timed out\n"); goto err_reset; } /* Disable all interrupts */ isi_writel(isi, ISI_INTDIS, (u32)~0UL); isi->sequence = 0; configure_geometry(isi); spin_lock_irq(&isi->irqlock); /* Clear any pending interrupt */ isi_readl(isi, ISI_STATUS); start_dma(isi, isi->active); spin_unlock_irq(&isi->irqlock); return 0; err_reset: v4l2_subdev_call(isi->entity.subdev, video, s_stream, 0); err_start_stream: pm_runtime_put(isi->dev); spin_lock_irq(&isi->irqlock); isi->active = NULL; /* Release all active buffers */ list_for_each_entry_safe(buf, node, &isi->video_buffer_list, list) { list_del_init(&buf->list); vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_QUEUED); } spin_unlock_irq(&isi->irqlock); return ret; } /* abort streaming and wait for last buffer */ static void stop_streaming(struct vb2_queue *vq) { struct atmel_isi *isi = vb2_get_drv_priv(vq); struct frame_buffer *buf, *node; int ret = 0; unsigned long timeout; /* Disable stream on the sub device */ ret = v4l2_subdev_call(isi->entity.subdev, video, s_stream, 0); if (ret && ret != -ENOIOCTLCMD) dev_err(isi->dev, "stream off failed in subdev\n"); spin_lock_irq(&isi->irqlock); isi->active = NULL; /* Release all active buffers */ list_for_each_entry_safe(buf, node, &isi->video_buffer_list, list) { list_del_init(&buf->list); vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR); } spin_unlock_irq(&isi->irqlock); if (!isi->enable_preview_path) { timeout = jiffies + (FRAME_INTERVAL_MILLI_SEC * HZ) / 1000; /* Wait until the end of the current frame. */ while ((isi_readl(isi, ISI_STATUS) & ISI_CTRL_CDC) && time_before(jiffies, timeout)) msleep(1); if (time_after(jiffies, timeout)) dev_err(isi->dev, "Timeout waiting for finishing codec request\n"); } /* Disable interrupts */ isi_writel(isi, ISI_INTDIS, ISI_SR_CXFR_DONE | ISI_SR_PXFR_DONE); /* Disable ISI and wait for it is done */ ret = atmel_isi_wait_status(isi, WAIT_ISI_DISABLE); if (ret < 0) dev_err(isi->dev, "Disable ISI timed out\n"); pm_runtime_put(isi->dev); } static const struct vb2_ops isi_video_qops = { .queue_setup = queue_setup, .buf_init = buffer_init, .buf_prepare = buffer_prepare, .buf_cleanup = buffer_cleanup, .buf_queue = buffer_queue, .start_streaming = start_streaming, .stop_streaming = stop_streaming, .wait_prepare = vb2_ops_wait_prepare, .wait_finish = vb2_ops_wait_finish, }; static int isi_g_fmt_vid_cap(struct file *file, void *priv, struct v4l2_format *fmt) { struct atmel_isi *isi = video_drvdata(file); *fmt = isi->fmt; return 0; } static const struct isi_format *find_format_by_fourcc(struct atmel_isi *isi, unsigned int fourcc) { unsigned int num_formats = isi->num_user_formats; const struct isi_format *fmt; unsigned int i; for (i = 0; i < num_formats; i++) { fmt = isi->user_formats[i]; if (fmt->fourcc == fourcc) return fmt; } return NULL; } static void isi_try_fse(struct atmel_isi *isi, const struct isi_format *isi_fmt, struct v4l2_subdev_state *sd_state) { int ret; struct v4l2_subdev_frame_size_enum fse = { .code = isi_fmt->mbus_code, .which = V4L2_SUBDEV_FORMAT_TRY, }; ret = v4l2_subdev_call(isi->entity.subdev, pad, enum_frame_size, sd_state, &fse); /* * Attempt to obtain format size from subdev. If not available, * just use the maximum ISI can receive. */ if (ret) { sd_state->pads->try_crop.width = MAX_SUPPORT_WIDTH; sd_state->pads->try_crop.height = MAX_SUPPORT_HEIGHT; } else { sd_state->pads->try_crop.width = fse.max_width; sd_state->pads->try_crop.height = fse.max_height; } } static int isi_try_fmt(struct atmel_isi *isi, struct v4l2_format *f, const struct isi_format **current_fmt) { const struct isi_format *isi_fmt; struct v4l2_pix_format *pixfmt = &f->fmt.pix; struct v4l2_subdev_pad_config pad_cfg = {}; struct v4l2_subdev_state pad_state = { .pads = &pad_cfg, }; struct v4l2_subdev_format format = { .which = V4L2_SUBDEV_FORMAT_TRY, }; int ret; isi_fmt = find_format_by_fourcc(isi, pixfmt->pixelformat); if (!isi_fmt) { isi_fmt = isi->user_formats[isi->num_user_formats - 1]; pixfmt->pixelformat = isi_fmt->fourcc; } /* Limit to Atmel ISI hardware capabilities */ pixfmt->width = clamp(pixfmt->width, 0U, MAX_SUPPORT_WIDTH); pixfmt->height = clamp(pixfmt->height, 0U, MAX_SUPPORT_HEIGHT); v4l2_fill_mbus_format(&format.format, pixfmt, isi_fmt->mbus_code); isi_try_fse(isi, isi_fmt, &pad_state); ret = v4l2_subdev_call(isi->entity.subdev, pad, set_fmt, &pad_state, &format); if (ret < 0) return ret; v4l2_fill_pix_format(pixfmt, &format.format); pixfmt->field = V4L2_FIELD_NONE; pixfmt->bytesperline = pixfmt->width * isi_fmt->bpp; pixfmt->sizeimage = pixfmt->bytesperline * pixfmt->height; if (current_fmt) *current_fmt = isi_fmt; return 0; } static int isi_set_fmt(struct atmel_isi *isi, struct v4l2_format *f) { struct v4l2_subdev_format format = { .which = V4L2_SUBDEV_FORMAT_ACTIVE, }; const struct isi_format *current_fmt; int ret; ret = isi_try_fmt(isi, f, &current_fmt); if (ret) return ret; v4l2_fill_mbus_format(&format.format, &f->fmt.pix, current_fmt->mbus_code); ret = v4l2_subdev_call(isi->entity.subdev, pad, set_fmt, NULL, &format); if (ret < 0) return ret; isi->fmt = *f; isi->current_fmt = current_fmt; return 0; } static int isi_s_fmt_vid_cap(struct file *file, void *priv, struct v4l2_format *f) { struct atmel_isi *isi = video_drvdata(file); if (vb2_is_streaming(&isi->queue)) return -EBUSY; return isi_set_fmt(isi, f); } static int isi_try_fmt_vid_cap(struct file *file, void *priv, struct v4l2_format *f) { struct atmel_isi *isi = video_drvdata(file); return isi_try_fmt(isi, f, NULL); } static int isi_enum_fmt_vid_cap(struct file *file, void *priv, struct v4l2_fmtdesc *f) { struct atmel_isi *isi = video_drvdata(file); if (f->index >= isi->num_user_formats) return -EINVAL; f->pixelformat = isi->user_formats[f->index]->fourcc; return 0; } static int isi_querycap(struct file *file, void *priv, struct v4l2_capability *cap) { strscpy(cap->driver, "atmel-isi", sizeof(cap->driver)); strscpy(cap->card, "Atmel Image Sensor Interface", sizeof(cap->card)); strscpy(cap->bus_info, "platform:isi", sizeof(cap->bus_info)); return 0; } static int isi_enum_input(struct file *file, void *priv, struct v4l2_input *i) { if (i->index != 0) return -EINVAL; i->type = V4L2_INPUT_TYPE_CAMERA; strscpy(i->name, "Camera", sizeof(i->name)); return 0; } static int isi_g_input(struct file *file, void *priv, unsigned int *i) { *i = 0; return 0; } static int isi_s_input(struct file *file, void *priv, unsigned int i) { if (i > 0) return -EINVAL; return 0; } static int isi_g_parm(struct file *file, void *fh, struct v4l2_streamparm *a) { struct atmel_isi *isi = video_drvdata(file); return v4l2_g_parm_cap(video_devdata(file), isi->entity.subdev, a); } static int isi_s_parm(struct file *file, void *fh, struct v4l2_streamparm *a) { struct atmel_isi *isi = video_drvdata(file); return v4l2_s_parm_cap(video_devdata(file), isi->entity.subdev, a); } static int isi_enum_framesizes(struct file *file, void *fh, struct v4l2_frmsizeenum *fsize) { struct atmel_isi *isi = video_drvdata(file); const struct isi_format *isi_fmt; struct v4l2_subdev_frame_size_enum fse = { .index = fsize->index, .which = V4L2_SUBDEV_FORMAT_ACTIVE, }; int ret; isi_fmt = find_format_by_fourcc(isi, fsize->pixel_format); if (!isi_fmt) return -EINVAL; fse.code = isi_fmt->mbus_code; ret = v4l2_subdev_call(isi->entity.subdev, pad, enum_frame_size, NULL, &fse); if (ret) return ret; fsize->type = V4L2_FRMSIZE_TYPE_DISCRETE; fsize->discrete.width = fse.max_width; fsize->discrete.height = fse.max_height; return 0; } static int isi_enum_frameintervals(struct file *file, void *fh, struct v4l2_frmivalenum *fival) { struct atmel_isi *isi = video_drvdata(file); const struct isi_format *isi_fmt; struct v4l2_subdev_frame_interval_enum fie = { .index = fival->index, .width = fival->width, .height = fival->height, .which = V4L2_SUBDEV_FORMAT_ACTIVE, }; int ret; isi_fmt = find_format_by_fourcc(isi, fival->pixel_format); if (!isi_fmt) return -EINVAL; fie.code = isi_fmt->mbus_code; ret = v4l2_subdev_call(isi->entity.subdev, pad, enum_frame_interval, NULL, &fie); if (ret) return ret; fival->type = V4L2_FRMIVAL_TYPE_DISCRETE; fival->discrete = fie.interval; return 0; } static int isi_camera_set_bus_param(struct atmel_isi *isi) { u32 cfg1 = 0; int ret; /* set bus param for ISI */ if (isi->pdata.hsync_act_low) cfg1 |= ISI_CFG1_HSYNC_POL_ACTIVE_LOW; if (isi->pdata.vsync_act_low) cfg1 |= ISI_CFG1_VSYNC_POL_ACTIVE_LOW; if (isi->pdata.pclk_act_falling) cfg1 |= ISI_CFG1_PIXCLK_POL_ACTIVE_FALLING; if (isi->pdata.has_emb_sync) cfg1 |= ISI_CFG1_EMB_SYNC; if (isi->pdata.full_mode) cfg1 |= ISI_CFG1_FULL_MODE; cfg1 |= ISI_CFG1_THMASK_BEATS_16; /* Enable PM and peripheral clock before operate isi registers */ ret = pm_runtime_resume_and_get(isi->dev); if (ret < 0) return ret; isi_writel(isi, ISI_CTRL, ISI_CTRL_DIS); isi_writel(isi, ISI_CFG1, cfg1); pm_runtime_put(isi->dev); return 0; } /* -----------------------------------------------------------------------*/ static int atmel_isi_parse_dt(struct atmel_isi *isi, struct platform_device *pdev) { struct device_node *np = pdev->dev.of_node; struct v4l2_fwnode_endpoint ep = { .bus_type = 0 }; int err; /* Default settings for ISI */ isi->pdata.full_mode = 1; isi->pdata.frate = ISI_CFG1_FRATE_CAPTURE_ALL; np = of_graph_get_next_endpoint(np, NULL); if (!np) { dev_err(&pdev->dev, "Could not find the endpoint\n"); return -EINVAL; } err = v4l2_fwnode_endpoint_parse(of_fwnode_handle(np), &ep); of_node_put(np); if (err) { dev_err(&pdev->dev, "Could not parse the endpoint\n"); return err; } switch (ep.bus.parallel.bus_width) { case 8: isi->pdata.data_width_flags = ISI_DATAWIDTH_8; break; case 10: isi->pdata.data_width_flags = ISI_DATAWIDTH_8 | ISI_DATAWIDTH_10; break; default: dev_err(&pdev->dev, "Unsupported bus width: %d\n", ep.bus.parallel.bus_width); return -EINVAL; } if (ep.bus.parallel.flags & V4L2_MBUS_HSYNC_ACTIVE_LOW) isi->pdata.hsync_act_low = true; if (ep.bus.parallel.flags & V4L2_MBUS_VSYNC_ACTIVE_LOW) isi->pdata.vsync_act_low = true; if (ep.bus.parallel.flags & V4L2_MBUS_PCLK_SAMPLE_FALLING) isi->pdata.pclk_act_falling = true; if (ep.bus_type == V4L2_MBUS_BT656) isi->pdata.has_emb_sync = true; return 0; } static int isi_open(struct file *file) { struct atmel_isi *isi = video_drvdata(file); struct v4l2_subdev *sd = isi->entity.subdev; int ret; if (mutex_lock_interruptible(&isi->lock)) return -ERESTARTSYS; ret = v4l2_fh_open(file); if (ret < 0) goto unlock; if (!v4l2_fh_is_singular_file(file)) goto fh_rel; ret = v4l2_subdev_call(sd, core, s_power, 1); if (ret < 0 && ret != -ENOIOCTLCMD) goto fh_rel; ret = isi_set_fmt(isi, &isi->fmt); if (ret) v4l2_subdev_call(sd, core, s_power, 0); fh_rel: if (ret) v4l2_fh_release(file); unlock: mutex_unlock(&isi->lock); return ret; } static int isi_release(struct file *file) { struct atmel_isi *isi = video_drvdata(file); struct v4l2_subdev *sd = isi->entity.subdev; bool fh_singular; int ret; mutex_lock(&isi->lock); fh_singular = v4l2_fh_is_singular_file(file); ret = _vb2_fop_release(file, NULL); if (fh_singular) v4l2_subdev_call(sd, core, s_power, 0); mutex_unlock(&isi->lock); return ret; } static const struct v4l2_ioctl_ops isi_ioctl_ops = { .vidioc_querycap = isi_querycap, .vidioc_try_fmt_vid_cap = isi_try_fmt_vid_cap, .vidioc_g_fmt_vid_cap = isi_g_fmt_vid_cap, .vidioc_s_fmt_vid_cap = isi_s_fmt_vid_cap, .vidioc_enum_fmt_vid_cap = isi_enum_fmt_vid_cap, .vidioc_enum_input = isi_enum_input, .vidioc_g_input = isi_g_input, .vidioc_s_input = isi_s_input, .vidioc_g_parm = isi_g_parm, .vidioc_s_parm = isi_s_parm, .vidioc_enum_framesizes = isi_enum_framesizes, .vidioc_enum_frameintervals = isi_enum_frameintervals, .vidioc_reqbufs = vb2_ioctl_reqbufs, .vidioc_create_bufs = vb2_ioctl_create_bufs, .vidioc_querybuf = vb2_ioctl_querybuf, .vidioc_qbuf = vb2_ioctl_qbuf, .vidioc_dqbuf = vb2_ioctl_dqbuf, .vidioc_expbuf = vb2_ioctl_expbuf, .vidioc_prepare_buf = vb2_ioctl_prepare_buf, .vidioc_streamon = vb2_ioctl_streamon, .vidioc_streamoff = vb2_ioctl_streamoff, .vidioc_log_status = v4l2_ctrl_log_status, .vidioc_subscribe_event = v4l2_ctrl_subscribe_event, .vidioc_unsubscribe_event = v4l2_event_unsubscribe, }; static const struct v4l2_file_operations isi_fops = { .owner = THIS_MODULE, .unlocked_ioctl = video_ioctl2, .open = isi_open, .release = isi_release, .poll = vb2_fop_poll, .mmap = vb2_fop_mmap, .read = vb2_fop_read, }; static int isi_set_default_fmt(struct atmel_isi *isi) { struct v4l2_format f = { .type = V4L2_BUF_TYPE_VIDEO_CAPTURE, .fmt.pix = { .width = VGA_WIDTH, .height = VGA_HEIGHT, .field = V4L2_FIELD_NONE, .pixelformat = isi->user_formats[0]->fourcc, }, }; int ret; ret = isi_try_fmt(isi, &f, NULL); if (ret) return ret; isi->current_fmt = isi->user_formats[0]; isi->fmt = f; return 0; } static const struct isi_format isi_formats[] = { { .fourcc = V4L2_PIX_FMT_YUYV, .mbus_code = MEDIA_BUS_FMT_YUYV8_2X8, .bpp = 2, .swap = ISI_CFG2_YCC_SWAP_DEFAULT, }, { .fourcc = V4L2_PIX_FMT_YUYV, .mbus_code = MEDIA_BUS_FMT_YVYU8_2X8, .bpp = 2, .swap = ISI_CFG2_YCC_SWAP_MODE_1, }, { .fourcc = V4L2_PIX_FMT_YUYV, .mbus_code = MEDIA_BUS_FMT_UYVY8_2X8, .bpp = 2, .swap = ISI_CFG2_YCC_SWAP_MODE_2, }, { .fourcc = V4L2_PIX_FMT_YUYV, .mbus_code = MEDIA_BUS_FMT_VYUY8_2X8, .bpp = 2, .swap = ISI_CFG2_YCC_SWAP_MODE_3, }, { .fourcc = V4L2_PIX_FMT_RGB565, .mbus_code = MEDIA_BUS_FMT_YUYV8_2X8, .bpp = 2, .swap = ISI_CFG2_YCC_SWAP_MODE_2, }, { .fourcc = V4L2_PIX_FMT_RGB565, .mbus_code = MEDIA_BUS_FMT_YVYU8_2X8, .bpp = 2, .swap = ISI_CFG2_YCC_SWAP_MODE_3, }, { .fourcc = V4L2_PIX_FMT_RGB565, .mbus_code = MEDIA_BUS_FMT_UYVY8_2X8, .bpp = 2, .swap = ISI_CFG2_YCC_SWAP_DEFAULT, }, { .fourcc = V4L2_PIX_FMT_RGB565, .mbus_code = MEDIA_BUS_FMT_VYUY8_2X8, .bpp = 2, .swap = ISI_CFG2_YCC_SWAP_MODE_1, }, { .fourcc = V4L2_PIX_FMT_GREY, .mbus_code = MEDIA_BUS_FMT_Y10_1X10, .bpp = 1, .swap = ISI_CFG2_GS_MODE_2_PIXEL | ISI_CFG2_GRAYSCALE, }, { .fourcc = V4L2_PIX_FMT_Y16, .mbus_code = MEDIA_BUS_FMT_Y10_1X10, .bpp = 2, .swap = ISI_CFG2_GS_MODE_2_PIXEL | ISI_CFG2_GRAYSCALE, }, }; static int isi_formats_init(struct atmel_isi *isi) { const struct isi_format *isi_fmts[ARRAY_SIZE(isi_formats)]; unsigned int num_fmts = 0, i, j; struct v4l2_subdev *subdev = isi->entity.subdev; struct v4l2_subdev_mbus_code_enum mbus_code = { .which = V4L2_SUBDEV_FORMAT_ACTIVE, }; while (!v4l2_subdev_call(subdev, pad, enum_mbus_code, NULL, &mbus_code)) { for (i = 0; i < ARRAY_SIZE(isi_formats); i++) { if (isi_formats[i].mbus_code != mbus_code.code) continue; /* Code supported, have we got this fourcc yet? */ for (j = 0; j < num_fmts; j++) if (isi_fmts[j]->fourcc == isi_formats[i].fourcc) /* Already available */ break; if (j == num_fmts) /* new */ isi_fmts[num_fmts++] = isi_formats + i; } mbus_code.index++; } if (!num_fmts) return -ENXIO; isi->num_user_formats = num_fmts; isi->user_formats = devm_kcalloc(isi->dev, num_fmts, sizeof(struct isi_format *), GFP_KERNEL); if (!isi->user_formats) return -ENOMEM; memcpy(isi->user_formats, isi_fmts, num_fmts * sizeof(struct isi_format *)); isi->current_fmt = isi->user_formats[0]; return 0; } static int isi_graph_notify_complete(struct v4l2_async_notifier *notifier) { struct atmel_isi *isi = notifier_to_isi(notifier); int ret; isi->vdev->ctrl_handler = isi->entity.subdev->ctrl_handler; ret = isi_formats_init(isi); if (ret) { dev_err(isi->dev, "No supported mediabus format found\n"); return ret; } ret = isi_camera_set_bus_param(isi); if (ret) { dev_err(isi->dev, "Can't wake up device\n"); return ret; } ret = isi_set_default_fmt(isi); if (ret) { dev_err(isi->dev, "Could not set default format\n"); return ret; } ret = video_register_device(isi->vdev, VFL_TYPE_VIDEO, -1); if (ret) { dev_err(isi->dev, "Failed to register video device\n"); return ret; } dev_dbg(isi->dev, "Device registered as %s\n", video_device_node_name(isi->vdev)); return 0; } static void isi_graph_notify_unbind(struct v4l2_async_notifier *notifier, struct v4l2_subdev *sd, struct v4l2_async_connection *asd) { struct atmel_isi *isi = notifier_to_isi(notifier); dev_dbg(isi->dev, "Removing %s\n", video_device_node_name(isi->vdev)); /* Checks internally if vdev have been init or not */ video_unregister_device(isi->vdev); } static int isi_graph_notify_bound(struct v4l2_async_notifier *notifier, struct v4l2_subdev *subdev, struct v4l2_async_connection *asd) { struct atmel_isi *isi = notifier_to_isi(notifier); dev_dbg(isi->dev, "subdev %s bound\n", subdev->name); isi->entity.subdev = subdev; return 0; } static const struct v4l2_async_notifier_operations isi_graph_notify_ops = { .bound = isi_graph_notify_bound, .unbind = isi_graph_notify_unbind, .complete = isi_graph_notify_complete, }; static int isi_graph_init(struct atmel_isi *isi) { struct v4l2_async_connection *asd; struct device_node *ep; int ret; ep = of_graph_get_next_endpoint(isi->dev->of_node, NULL); if (!ep) return -EINVAL; v4l2_async_nf_init(&isi->notifier, &isi->v4l2_dev); asd = v4l2_async_nf_add_fwnode_remote(&isi->notifier, of_fwnode_handle(ep), struct v4l2_async_connection); of_node_put(ep); if (IS_ERR(asd)) return PTR_ERR(asd); isi->notifier.ops = &isi_graph_notify_ops; ret = v4l2_async_nf_register(&isi->notifier); if (ret < 0) { dev_err(isi->dev, "Notifier registration failed\n"); v4l2_async_nf_cleanup(&isi->notifier); return ret; } return 0; } static int atmel_isi_probe(struct platform_device *pdev) { int irq; struct atmel_isi *isi; struct vb2_queue *q; int ret, i; isi = devm_kzalloc(&pdev->dev, sizeof(struct atmel_isi), GFP_KERNEL); if (!isi) return -ENOMEM; isi->pclk = devm_clk_get(&pdev->dev, "isi_clk"); if (IS_ERR(isi->pclk)) return PTR_ERR(isi->pclk); ret = atmel_isi_parse_dt(isi, pdev); if (ret) return ret; isi->active = NULL; isi->dev = &pdev->dev; mutex_init(&isi->lock); spin_lock_init(&isi->irqlock); INIT_LIST_HEAD(&isi->video_buffer_list); INIT_LIST_HEAD(&isi->dma_desc_head); q = &isi->queue; /* Initialize the top-level structure */ ret = v4l2_device_register(&pdev->dev, &isi->v4l2_dev); if (ret) return ret; isi->vdev = video_device_alloc(); if (!isi->vdev) { ret = -ENOMEM; goto err_vdev_alloc; } /* video node */ isi->vdev->fops = &isi_fops; isi->vdev->v4l2_dev = &isi->v4l2_dev; isi->vdev->queue = &isi->queue; strscpy(isi->vdev->name, KBUILD_MODNAME, sizeof(isi->vdev->name)); isi->vdev->release = video_device_release; isi->vdev->ioctl_ops = &isi_ioctl_ops; isi->vdev->lock = &isi->lock; isi->vdev->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING | V4L2_CAP_READWRITE; video_set_drvdata(isi->vdev, isi); /* buffer queue */ q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE; q->io_modes = VB2_MMAP | VB2_READ | VB2_DMABUF; q->lock = &isi->lock; q->drv_priv = isi; q->buf_struct_size = sizeof(struct frame_buffer); q->ops = &isi_video_qops; q->mem_ops = &vb2_dma_contig_memops; q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC; q->min_buffers_needed = 2; q->dev = &pdev->dev; ret = vb2_queue_init(q); if (ret < 0) { dev_err(&pdev->dev, "failed to initialize VB2 queue\n"); goto err_vb2_queue; } isi->p_fb_descriptors = dma_alloc_coherent(&pdev->dev, sizeof(struct fbd) * VIDEO_MAX_FRAME, &isi->fb_descriptors_phys, GFP_KERNEL); if (!isi->p_fb_descriptors) { dev_err(&pdev->dev, "Can't allocate descriptors!\n"); ret = -ENOMEM; goto err_dma_alloc; } for (i = 0; i < VIDEO_MAX_FRAME; i++) { isi->dma_desc[i].p_fbd = isi->p_fb_descriptors + i; isi->dma_desc[i].fbd_phys = isi->fb_descriptors_phys + i * sizeof(struct fbd); list_add(&isi->dma_desc[i].list, &isi->dma_desc_head); } isi->regs = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(isi->regs)) { ret = PTR_ERR(isi->regs); goto err_ioremap; } if (isi->pdata.data_width_flags & ISI_DATAWIDTH_8) isi->width_flags = 1 << 7; if (isi->pdata.data_width_flags & ISI_DATAWIDTH_10) isi->width_flags |= 1 << 9; irq = platform_get_irq(pdev, 0); if (irq < 0) { ret = irq; goto err_req_irq; } ret = devm_request_irq(&pdev->dev, irq, isi_interrupt, 0, "isi", isi); if (ret) { dev_err(&pdev->dev, "Unable to request irq %d\n", irq); goto err_req_irq; } isi->irq = irq; ret = isi_graph_init(isi); if (ret < 0) goto err_req_irq; pm_suspend_ignore_children(&pdev->dev, true); pm_runtime_enable(&pdev->dev); platform_set_drvdata(pdev, isi); return 0; err_req_irq: err_ioremap: dma_free_coherent(&pdev->dev, sizeof(struct fbd) * VIDEO_MAX_FRAME, isi->p_fb_descriptors, isi->fb_descriptors_phys); err_dma_alloc: err_vb2_queue: video_device_release(isi->vdev); err_vdev_alloc: v4l2_device_unregister(&isi->v4l2_dev); return ret; } static void atmel_isi_remove(struct platform_device *pdev) { struct atmel_isi *isi = platform_get_drvdata(pdev); dma_free_coherent(&pdev->dev, sizeof(struct fbd) * VIDEO_MAX_FRAME, isi->p_fb_descriptors, isi->fb_descriptors_phys); pm_runtime_disable(&pdev->dev); v4l2_async_nf_unregister(&isi->notifier); v4l2_async_nf_cleanup(&isi->notifier); v4l2_device_unregister(&isi->v4l2_dev); } #ifdef CONFIG_PM static int atmel_isi_runtime_suspend(struct device *dev) { struct atmel_isi *isi = dev_get_drvdata(dev); clk_disable_unprepare(isi->pclk); return 0; } static int atmel_isi_runtime_resume(struct device *dev) { struct atmel_isi *isi = dev_get_drvdata(dev); return clk_prepare_enable(isi->pclk); } #endif /* CONFIG_PM */ static const struct dev_pm_ops atmel_isi_dev_pm_ops = { SET_RUNTIME_PM_OPS(atmel_isi_runtime_suspend, atmel_isi_runtime_resume, NULL) }; static const struct of_device_id atmel_isi_of_match[] = { { .compatible = "atmel,at91sam9g45-isi" }, { } }; MODULE_DEVICE_TABLE(of, atmel_isi_of_match); static struct platform_driver atmel_isi_driver = { .driver = { .name = "atmel_isi", .of_match_table = of_match_ptr(atmel_isi_of_match), .pm = &atmel_isi_dev_pm_ops, }, .probe = atmel_isi_probe, .remove_new = atmel_isi_remove, }; module_platform_driver(atmel_isi_driver); MODULE_AUTHOR("Josh Wu <[email protected]>"); MODULE_DESCRIPTION("The V4L2 driver for Atmel Linux"); MODULE_LICENSE("GPL");
linux-master
drivers/media/platform/atmel/atmel-isi.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * * keyboard input driver for i2c IR remote controls * * Copyright (c) 2000-2003 Gerd Knorr <[email protected]> * modified for PixelView (BT878P+W/FM) by * Michal Kochanowicz <[email protected]> * Christoph Bartelmus <[email protected]> * modified for KNC ONE TV Station/Anubis Typhoon TView Tuner by * Ulrich Mueller <[email protected]> * modified for em2820 based USB TV tuners by * Markus Rechberger <[email protected]> * modified for DViCO Fusion HDTV 5 RT GOLD by * Chaogui Zhang <[email protected]> * modified for MSI TV@nywhere Plus by * Henry Wong <[email protected]> * Mark Schultz <[email protected]> * Brian Rogers <[email protected]> * modified for AVerMedia Cardbus by * Oldrich Jedlicka <[email protected]> * Zilog Transmitter portions/ideas were derived from GPLv2+ sources: * - drivers/char/pctv_zilogir.[ch] from Hauppauge Broadway product * Copyright 2011 Hauppauge Computer works * - drivers/staging/media/lirc/lirc_zilog.c * Copyright (c) 2000 Gerd Knorr <[email protected]> * Michal Kochanowicz <[email protected]> * Christoph Bartelmus <[email protected]> * Ulrich Mueller <[email protected]> * Stefan Jahn <[email protected]> * Jerome Brock <[email protected]> * Thomas Reitmayr ([email protected]) * Mark Weaver <[email protected]> * Jarod Wilson <[email protected]> * Copyright (C) 2011 Andy Walls <[email protected]> */ #include <asm/unaligned.h> #include <linux/module.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/string.h> #include <linux/timer.h> #include <linux/delay.h> #include <linux/errno.h> #include <linux/slab.h> #include <linux/i2c.h> #include <linux/workqueue.h> #include <media/rc-core.h> #include <media/i2c/ir-kbd-i2c.h> #define FLAG_TX 1 #define FLAG_HDPVR 2 static bool enable_hdpvr; module_param(enable_hdpvr, bool, 0644); static int get_key_haup_common(struct IR_i2c *ir, enum rc_proto *protocol, u32 *scancode, u8 *ptoggle, int size) { unsigned char buf[6]; int start, range, toggle, dev, code, ircode, vendor; /* poll IR chip */ if (size != i2c_master_recv(ir->c, buf, size)) return -EIO; if (buf[0] & 0x80) { int offset = (size == 6) ? 3 : 0; /* split rc5 data block ... */ start = (buf[offset] >> 7) & 1; range = (buf[offset] >> 6) & 1; toggle = (buf[offset] >> 5) & 1; dev = buf[offset] & 0x1f; code = (buf[offset+1] >> 2) & 0x3f; /* rc5 has two start bits * the first bit must be one * the second bit defines the command range: * 1 = 0-63, 0 = 64 - 127 */ if (!start) /* no key pressed */ return 0; /* filter out invalid key presses */ ircode = (start << 12) | (toggle << 11) | (dev << 6) | code; if ((ircode & 0x1fff) == 0x1fff) return 0; if (!range) code += 64; dev_dbg(&ir->rc->dev, "ir hauppauge (rc5): s%d r%d t%d dev=%d code=%d\n", start, range, toggle, dev, code); *protocol = RC_PROTO_RC5; *scancode = RC_SCANCODE_RC5(dev, code); *ptoggle = toggle; return 1; } else if (size == 6 && (buf[0] & 0x40)) { code = buf[4]; dev = buf[3]; vendor = get_unaligned_be16(buf + 1); if (vendor == 0x800f) { *ptoggle = (dev & 0x80) != 0; *protocol = RC_PROTO_RC6_MCE; dev &= 0x7f; dev_dbg(&ir->rc->dev, "ir hauppauge (rc6-mce): t%d vendor=%d dev=%d code=%d\n", *ptoggle, vendor, dev, code); } else { *ptoggle = 0; *protocol = RC_PROTO_RC6_6A_32; dev_dbg(&ir->rc->dev, "ir hauppauge (rc6-6a-32): vendor=%d dev=%d code=%d\n", vendor, dev, code); } *scancode = RC_SCANCODE_RC6_6A(vendor, dev, code); return 1; } return 0; } static int get_key_haup(struct IR_i2c *ir, enum rc_proto *protocol, u32 *scancode, u8 *toggle) { return get_key_haup_common(ir, protocol, scancode, toggle, 3); } static int get_key_haup_xvr(struct IR_i2c *ir, enum rc_proto *protocol, u32 *scancode, u8 *toggle) { int ret; unsigned char buf[1] = { 0 }; /* * This is the same apparent "are you ready?" poll command observed * watching Windows driver traffic and implemented in lirc_zilog. With * this added, we get far saner remote behavior with z8 chips on usb * connected devices, even with the default polling interval of 100ms. */ ret = i2c_master_send(ir->c, buf, 1); if (ret != 1) return (ret < 0) ? ret : -EINVAL; return get_key_haup_common(ir, protocol, scancode, toggle, 6); } static int get_key_pixelview(struct IR_i2c *ir, enum rc_proto *protocol, u32 *scancode, u8 *toggle) { int rc; unsigned char b; /* poll IR chip */ rc = i2c_master_recv(ir->c, &b, 1); if (rc != 1) { dev_dbg(&ir->rc->dev, "read error\n"); if (rc < 0) return rc; return -EIO; } *protocol = RC_PROTO_OTHER; *scancode = b; *toggle = 0; return 1; } static int get_key_fusionhdtv(struct IR_i2c *ir, enum rc_proto *protocol, u32 *scancode, u8 *toggle) { int rc; unsigned char buf[4]; /* poll IR chip */ rc = i2c_master_recv(ir->c, buf, 4); if (rc != 4) { dev_dbg(&ir->rc->dev, "read error\n"); if (rc < 0) return rc; return -EIO; } if (buf[0] != 0 || buf[1] != 0 || buf[2] != 0 || buf[3] != 0) dev_dbg(&ir->rc->dev, "%s: %*ph\n", __func__, 4, buf); /* no key pressed or signal from other ir remote */ if(buf[0] != 0x1 || buf[1] != 0xfe) return 0; *protocol = RC_PROTO_UNKNOWN; *scancode = buf[2]; *toggle = 0; return 1; } static int get_key_knc1(struct IR_i2c *ir, enum rc_proto *protocol, u32 *scancode, u8 *toggle) { int rc; unsigned char b; /* poll IR chip */ rc = i2c_master_recv(ir->c, &b, 1); if (rc != 1) { dev_dbg(&ir->rc->dev, "read error\n"); if (rc < 0) return rc; return -EIO; } /* it seems that 0xFE indicates that a button is still hold down, while 0xff indicates that no button is hold down. 0xfe sequences are sometimes interrupted by 0xFF */ dev_dbg(&ir->rc->dev, "key %02x\n", b); if (b == 0xff) return 0; if (b == 0xfe) /* keep old data */ return 1; *protocol = RC_PROTO_UNKNOWN; *scancode = b; *toggle = 0; return 1; } static int get_key_geniatech(struct IR_i2c *ir, enum rc_proto *protocol, u32 *scancode, u8 *toggle) { int i, rc; unsigned char b; /* poll IR chip */ for (i = 0; i < 4; i++) { rc = i2c_master_recv(ir->c, &b, 1); if (rc == 1) break; msleep(20); } if (rc != 1) { dev_dbg(&ir->rc->dev, "read error\n"); if (rc < 0) return rc; return -EIO; } /* don't repeat the key */ if (ir->old == b) return 0; ir->old = b; /* decode to RC5 */ b &= 0x7f; b = (b - 1) / 2; dev_dbg(&ir->rc->dev, "key %02x\n", b); *protocol = RC_PROTO_RC5; *scancode = b; *toggle = ir->old >> 7; return 1; } static int get_key_avermedia_cardbus(struct IR_i2c *ir, enum rc_proto *protocol, u32 *scancode, u8 *toggle) { unsigned char subaddr, key, keygroup; struct i2c_msg msg[] = { { .addr = ir->c->addr, .flags = 0, .buf = &subaddr, .len = 1}, { .addr = ir->c->addr, .flags = I2C_M_RD, .buf = &key, .len = 1} }; subaddr = 0x0d; if (2 != i2c_transfer(ir->c->adapter, msg, 2)) { dev_dbg(&ir->rc->dev, "read error\n"); return -EIO; } if (key == 0xff) return 0; subaddr = 0x0b; msg[1].buf = &keygroup; if (2 != i2c_transfer(ir->c->adapter, msg, 2)) { dev_dbg(&ir->rc->dev, "read error\n"); return -EIO; } if (keygroup == 0xff) return 0; dev_dbg(&ir->rc->dev, "read key 0x%02x/0x%02x\n", key, keygroup); if (keygroup < 2 || keygroup > 4) { dev_warn(&ir->rc->dev, "warning: invalid key group 0x%02x for key 0x%02x\n", keygroup, key); } key |= (keygroup & 1) << 6; *protocol = RC_PROTO_UNKNOWN; *scancode = key; if (ir->c->addr == 0x41) /* AVerMedia EM78P153 */ *scancode |= keygroup << 8; *toggle = 0; return 1; } /* ----------------------------------------------------------------------- */ static int ir_key_poll(struct IR_i2c *ir) { enum rc_proto protocol; u32 scancode; u8 toggle; int rc; dev_dbg(&ir->rc->dev, "%s\n", __func__); rc = ir->get_key(ir, &protocol, &scancode, &toggle); if (rc < 0) { dev_warn(&ir->rc->dev, "error %d\n", rc); return rc; } if (rc) { dev_dbg(&ir->rc->dev, "%s: proto = 0x%04x, scancode = 0x%08x\n", __func__, protocol, scancode); rc_keydown(ir->rc, protocol, scancode, toggle); } return 0; } static void ir_work(struct work_struct *work) { int rc; struct IR_i2c *ir = container_of(work, struct IR_i2c, work.work); /* * If the transmit code is holding the lock, skip polling for * IR, we'll get it to it next time round */ if (mutex_trylock(&ir->lock)) { rc = ir_key_poll(ir); mutex_unlock(&ir->lock); if (rc == -ENODEV) { rc_unregister_device(ir->rc); ir->rc = NULL; return; } } schedule_delayed_work(&ir->work, msecs_to_jiffies(ir->polling_interval)); } static int ir_open(struct rc_dev *dev) { struct IR_i2c *ir = dev->priv; schedule_delayed_work(&ir->work, 0); return 0; } static void ir_close(struct rc_dev *dev) { struct IR_i2c *ir = dev->priv; cancel_delayed_work_sync(&ir->work); } /* Zilog Transmit Interface */ #define XTAL_FREQ 18432000 #define ZILOG_SEND 0x80 #define ZILOG_UIR_END 0x40 #define ZILOG_INIT_END 0x20 #define ZILOG_LIR_END 0x10 #define ZILOG_STATUS_OK 0x80 #define ZILOG_STATUS_TX 0x40 #define ZILOG_STATUS_SET 0x20 /* * As you can see here, very few different lengths of pulse and space * can be encoded. This means that the hardware does not work well with * recorded IR. It's best to work with generated IR, like from ir-ctl or * the in-kernel encoders. */ struct code_block { u8 length; u16 pulse[7]; /* not aligned */ u8 carrier_pulse; u8 carrier_space; u16 space[8]; /* not aligned */ u8 codes[61]; u8 csum[2]; } __packed; static int send_data_block(struct IR_i2c *ir, int cmd, struct code_block *code_block) { int i, j, ret; u8 buf[5], *p; p = &code_block->length; for (i = 0; p < code_block->csum; i++) code_block->csum[i & 1] ^= *p++; p = &code_block->length; for (i = 0; i < sizeof(*code_block);) { int tosend = sizeof(*code_block) - i; if (tosend > 4) tosend = 4; buf[0] = i + 1; for (j = 0; j < tosend; ++j) buf[1 + j] = p[i + j]; dev_dbg(&ir->rc->dev, "%*ph", tosend + 1, buf); ret = i2c_master_send(ir->tx_c, buf, tosend + 1); if (ret != tosend + 1) { dev_dbg(&ir->rc->dev, "i2c_master_send failed with %d\n", ret); return ret < 0 ? ret : -EIO; } i += tosend; } buf[0] = 0; buf[1] = cmd; ret = i2c_master_send(ir->tx_c, buf, 2); if (ret != 2) { dev_err(&ir->rc->dev, "i2c_master_send failed with %d\n", ret); return ret < 0 ? ret : -EIO; } usleep_range(2000, 5000); ret = i2c_master_send(ir->tx_c, buf, 1); if (ret != 1) { dev_err(&ir->rc->dev, "i2c_master_send failed with %d\n", ret); return ret < 0 ? ret : -EIO; } return 0; } static int zilog_init(struct IR_i2c *ir) { struct code_block code_block = { .length = sizeof(code_block) }; u8 buf[4]; int ret; put_unaligned_be16(0x1000, &code_block.pulse[3]); ret = send_data_block(ir, ZILOG_INIT_END, &code_block); if (ret) return ret; ret = i2c_master_recv(ir->tx_c, buf, 4); if (ret != 4) { dev_err(&ir->c->dev, "failed to retrieve firmware version: %d\n", ret); return ret < 0 ? ret : -EIO; } dev_info(&ir->c->dev, "Zilog/Hauppauge IR blaster firmware version %d.%d.%d\n", buf[1], buf[2], buf[3]); return 0; } /* * If the last slot for pulse is the same as the current slot for pulse, * then use slot no 7. */ static void copy_codes(u8 *dst, u8 *src, unsigned int count) { u8 c, last = 0xff; while (count--) { c = *src++; if ((c & 0xf0) == last) { *dst++ = 0x70 | (c & 0xf); } else { *dst++ = c; last = c & 0xf0; } } } /* * When looking for repeats, we don't care about the trailing space. This * is set to the shortest possible anyway. */ static int cmp_no_trail(u8 *a, u8 *b, unsigned int count) { while (--count) { if (*a++ != *b++) return 1; } return (*a & 0xf0) - (*b & 0xf0); } static int find_slot(u16 *array, unsigned int size, u16 val) { int i; for (i = 0; i < size; i++) { if (get_unaligned_be16(&array[i]) == val) { return i; } else if (!array[i]) { put_unaligned_be16(val, &array[i]); return i; } } return -1; } static int zilog_ir_format(struct rc_dev *rcdev, unsigned int *txbuf, unsigned int count, struct code_block *code_block) { struct IR_i2c *ir = rcdev->priv; int rep, i, l, p = 0, s, c = 0; bool repeating; u8 codes[174]; code_block->carrier_pulse = DIV_ROUND_CLOSEST( ir->duty_cycle * XTAL_FREQ / 1000, ir->carrier); code_block->carrier_space = DIV_ROUND_CLOSEST( (100 - ir->duty_cycle) * XTAL_FREQ / 1000, ir->carrier); for (i = 0; i < count; i++) { if (c >= ARRAY_SIZE(codes) - 1) { dev_warn(&rcdev->dev, "IR too long, cannot transmit\n"); return -EINVAL; } /* * Lengths more than 142220us cannot be encoded; also * this checks for multiply overflow */ if (txbuf[i] > 142220) return -EINVAL; l = DIV_ROUND_CLOSEST((XTAL_FREQ / 1000) * txbuf[i], 40000); if (i & 1) { s = find_slot(code_block->space, ARRAY_SIZE(code_block->space), l); if (s == -1) { dev_warn(&rcdev->dev, "Too many different lengths spaces, cannot transmit"); return -EINVAL; } /* We have a pulse and space */ codes[c++] = (p << 4) | s; } else { p = find_slot(code_block->pulse, ARRAY_SIZE(code_block->pulse), l); if (p == -1) { dev_warn(&rcdev->dev, "Too many different lengths pulses, cannot transmit"); return -EINVAL; } } } /* We have to encode the trailing pulse. Find the shortest space */ s = 0; for (i = 1; i < ARRAY_SIZE(code_block->space); i++) { u16 d = get_unaligned_be16(&code_block->space[i]); if (get_unaligned_be16(&code_block->space[s]) > d) s = i; } codes[c++] = (p << 4) | s; dev_dbg(&rcdev->dev, "generated %d codes\n", c); /* * Are the last N codes (so pulse + space) repeating 3 times? * if so we can shorten the codes list and use code 0xc0 to repeat * them. */ repeating = false; for (rep = c / 3; rep >= 1; rep--) { if (!memcmp(&codes[c - rep * 3], &codes[c - rep * 2], rep) && !cmp_no_trail(&codes[c - rep], &codes[c - rep * 2], rep)) { repeating = true; break; } } if (repeating) { /* first copy any leading non-repeating */ int leading = c - rep * 3; if (leading >= ARRAY_SIZE(code_block->codes) - 3 - rep) { dev_warn(&rcdev->dev, "IR too long, cannot transmit\n"); return -EINVAL; } dev_dbg(&rcdev->dev, "found trailing %d repeat\n", rep); copy_codes(code_block->codes, codes, leading); code_block->codes[leading] = 0x82; copy_codes(code_block->codes + leading + 1, codes + leading, rep); c = leading + 1 + rep; code_block->codes[c++] = 0xc0; } else { if (c >= ARRAY_SIZE(code_block->codes) - 3) { dev_warn(&rcdev->dev, "IR too long, cannot transmit\n"); return -EINVAL; } dev_dbg(&rcdev->dev, "found no trailing repeat\n"); code_block->codes[0] = 0x82; copy_codes(code_block->codes + 1, codes, c); c++; code_block->codes[c++] = 0xc4; } while (c < ARRAY_SIZE(code_block->codes)) code_block->codes[c++] = 0x83; return 0; } static int zilog_tx(struct rc_dev *rcdev, unsigned int *txbuf, unsigned int count) { struct IR_i2c *ir = rcdev->priv; struct code_block code_block = { .length = sizeof(code_block) }; u8 buf[2]; int ret, i; ret = zilog_ir_format(rcdev, txbuf, count, &code_block); if (ret) return ret; ret = mutex_lock_interruptible(&ir->lock); if (ret) return ret; ret = send_data_block(ir, ZILOG_UIR_END, &code_block); if (ret) goto out_unlock; ret = i2c_master_recv(ir->tx_c, buf, 1); if (ret != 1) { dev_err(&ir->rc->dev, "i2c_master_recv failed with %d\n", ret); goto out_unlock; } dev_dbg(&ir->rc->dev, "code set status: %02x\n", buf[0]); if (buf[0] != (ZILOG_STATUS_OK | ZILOG_STATUS_SET)) { dev_err(&ir->rc->dev, "unexpected IR TX response %02x\n", buf[0]); ret = -EIO; goto out_unlock; } buf[0] = 0x00; buf[1] = ZILOG_SEND; ret = i2c_master_send(ir->tx_c, buf, 2); if (ret != 2) { dev_err(&ir->rc->dev, "i2c_master_send failed with %d\n", ret); if (ret >= 0) ret = -EIO; goto out_unlock; } dev_dbg(&ir->rc->dev, "send command sent\n"); /* * This bit NAKs until the device is ready, so we retry it * sleeping a bit each time. This seems to be what the windows * driver does, approximately. * Try for up to 1s. */ for (i = 0; i < 20; ++i) { set_current_state(TASK_UNINTERRUPTIBLE); schedule_timeout(msecs_to_jiffies(50)); ret = i2c_master_send(ir->tx_c, buf, 1); if (ret == 1) break; dev_dbg(&ir->rc->dev, "NAK expected: i2c_master_send failed with %d (try %d)\n", ret, i + 1); } if (ret != 1) { dev_err(&ir->rc->dev, "IR TX chip never got ready: last i2c_master_send failed with %d\n", ret); if (ret >= 0) ret = -EIO; goto out_unlock; } ret = i2c_master_recv(ir->tx_c, buf, 1); if (ret != 1) { dev_err(&ir->rc->dev, "i2c_master_recv failed with %d\n", ret); ret = -EIO; goto out_unlock; } else if (buf[0] != ZILOG_STATUS_OK) { dev_err(&ir->rc->dev, "unexpected IR TX response #2: %02x\n", buf[0]); ret = -EIO; goto out_unlock; } dev_dbg(&ir->rc->dev, "transmit complete\n"); /* Oh good, it worked */ ret = count; out_unlock: mutex_unlock(&ir->lock); return ret; } static int zilog_tx_carrier(struct rc_dev *dev, u32 carrier) { struct IR_i2c *ir = dev->priv; if (carrier > 500000 || carrier < 20000) return -EINVAL; ir->carrier = carrier; return 0; } static int zilog_tx_duty_cycle(struct rc_dev *dev, u32 duty_cycle) { struct IR_i2c *ir = dev->priv; ir->duty_cycle = duty_cycle; return 0; } static int ir_probe(struct i2c_client *client) { const struct i2c_device_id *id = i2c_client_get_device_id(client); char *ir_codes = NULL; const char *name = NULL; u64 rc_proto = RC_PROTO_BIT_UNKNOWN; struct IR_i2c *ir; struct rc_dev *rc = NULL; struct i2c_adapter *adap = client->adapter; unsigned short addr = client->addr; bool probe_tx = (id->driver_data & FLAG_TX) != 0; int err; if ((id->driver_data & FLAG_HDPVR) && !enable_hdpvr) { dev_err(&client->dev, "IR for HDPVR is known to cause problems during recording, use enable_hdpvr modparam to enable\n"); return -ENODEV; } ir = devm_kzalloc(&client->dev, sizeof(*ir), GFP_KERNEL); if (!ir) return -ENOMEM; ir->c = client; ir->polling_interval = DEFAULT_POLLING_INTERVAL; i2c_set_clientdata(client, ir); switch(addr) { case 0x64: name = "Pixelview"; ir->get_key = get_key_pixelview; rc_proto = RC_PROTO_BIT_OTHER; ir_codes = RC_MAP_EMPTY; break; case 0x18: case 0x1f: case 0x1a: name = "Hauppauge"; ir->get_key = get_key_haup; rc_proto = RC_PROTO_BIT_RC5; ir_codes = RC_MAP_HAUPPAUGE; break; case 0x30: name = "KNC One"; ir->get_key = get_key_knc1; rc_proto = RC_PROTO_BIT_OTHER; ir_codes = RC_MAP_EMPTY; break; case 0x33: name = "Geniatech"; ir->get_key = get_key_geniatech; rc_proto = RC_PROTO_BIT_RC5; ir_codes = RC_MAP_TOTAL_MEDIA_IN_HAND_02; ir->old = 0xfc; break; case 0x6b: name = "FusionHDTV"; ir->get_key = get_key_fusionhdtv; rc_proto = RC_PROTO_BIT_UNKNOWN; ir_codes = RC_MAP_FUSIONHDTV_MCE; break; case 0x40: name = "AVerMedia Cardbus remote"; ir->get_key = get_key_avermedia_cardbus; rc_proto = RC_PROTO_BIT_OTHER; ir_codes = RC_MAP_AVERMEDIA_CARDBUS; break; case 0x41: name = "AVerMedia EM78P153"; ir->get_key = get_key_avermedia_cardbus; rc_proto = RC_PROTO_BIT_OTHER; /* RM-KV remote, seems to be same as RM-K6 */ ir_codes = RC_MAP_AVERMEDIA_M733A_RM_K6; break; case 0x71: name = "Hauppauge/Zilog Z8"; ir->get_key = get_key_haup_xvr; rc_proto = RC_PROTO_BIT_RC5 | RC_PROTO_BIT_RC6_MCE | RC_PROTO_BIT_RC6_6A_32; ir_codes = RC_MAP_HAUPPAUGE; ir->polling_interval = 125; probe_tx = true; break; } /* Let the caller override settings */ if (client->dev.platform_data) { const struct IR_i2c_init_data *init_data = client->dev.platform_data; ir_codes = init_data->ir_codes; rc = init_data->rc_dev; name = init_data->name; if (init_data->type) rc_proto = init_data->type; if (init_data->polling_interval) ir->polling_interval = init_data->polling_interval; switch (init_data->internal_get_key_func) { case IR_KBD_GET_KEY_CUSTOM: /* The bridge driver provided us its own function */ ir->get_key = init_data->get_key; break; case IR_KBD_GET_KEY_PIXELVIEW: ir->get_key = get_key_pixelview; break; case IR_KBD_GET_KEY_HAUP: ir->get_key = get_key_haup; break; case IR_KBD_GET_KEY_KNC1: ir->get_key = get_key_knc1; break; case IR_KBD_GET_KEY_GENIATECH: ir->get_key = get_key_geniatech; break; case IR_KBD_GET_KEY_FUSIONHDTV: ir->get_key = get_key_fusionhdtv; break; case IR_KBD_GET_KEY_HAUP_XVR: ir->get_key = get_key_haup_xvr; break; case IR_KBD_GET_KEY_AVERMEDIA_CARDBUS: ir->get_key = get_key_avermedia_cardbus; break; } } if (!rc) { /* * If platform_data doesn't specify rc_dev, initialize it * internally */ rc = rc_allocate_device(RC_DRIVER_SCANCODE); if (!rc) return -ENOMEM; } ir->rc = rc; /* Make sure we are all setup before going on */ if (!name || !ir->get_key || !rc_proto || !ir_codes) { dev_warn(&client->dev, "Unsupported device at address 0x%02x\n", addr); err = -ENODEV; goto err_out_free; } ir->ir_codes = ir_codes; snprintf(ir->phys, sizeof(ir->phys), "%s/%s", dev_name(&adap->dev), dev_name(&client->dev)); /* * Initialize input_dev fields * It doesn't make sense to allow overriding them via platform_data */ rc->input_id.bustype = BUS_I2C; rc->input_phys = ir->phys; rc->device_name = name; rc->dev.parent = &client->dev; rc->priv = ir; rc->open = ir_open; rc->close = ir_close; /* * Initialize the other fields of rc_dev */ rc->map_name = ir->ir_codes; rc->allowed_protocols = rc_proto; if (!rc->driver_name) rc->driver_name = KBUILD_MODNAME; mutex_init(&ir->lock); INIT_DELAYED_WORK(&ir->work, ir_work); if (probe_tx) { ir->tx_c = i2c_new_dummy_device(client->adapter, 0x70); if (IS_ERR(ir->tx_c)) { dev_err(&client->dev, "failed to setup tx i2c address"); err = PTR_ERR(ir->tx_c); goto err_out_free; } else if (!zilog_init(ir)) { ir->carrier = 38000; ir->duty_cycle = 40; rc->tx_ir = zilog_tx; rc->s_tx_carrier = zilog_tx_carrier; rc->s_tx_duty_cycle = zilog_tx_duty_cycle; } } err = rc_register_device(rc); if (err) goto err_out_free; return 0; err_out_free: if (!IS_ERR(ir->tx_c)) i2c_unregister_device(ir->tx_c); /* Only frees rc if it were allocated internally */ rc_free_device(rc); return err; } static void ir_remove(struct i2c_client *client) { struct IR_i2c *ir = i2c_get_clientdata(client); cancel_delayed_work_sync(&ir->work); i2c_unregister_device(ir->tx_c); rc_unregister_device(ir->rc); } static const struct i2c_device_id ir_kbd_id[] = { /* Generic entry for any IR receiver */ { "ir_video", 0 }, /* IR device specific entries should be added here */ { "ir_z8f0811_haup", FLAG_TX }, { "ir_z8f0811_hdpvr", FLAG_TX | FLAG_HDPVR }, { } }; MODULE_DEVICE_TABLE(i2c, ir_kbd_id); static struct i2c_driver ir_kbd_driver = { .driver = { .name = "ir-kbd-i2c", }, .probe = ir_probe, .remove = ir_remove, .id_table = ir_kbd_id, }; module_i2c_driver(ir_kbd_driver); /* ----------------------------------------------------------------------- */ MODULE_AUTHOR("Gerd Knorr, Michal Kochanowicz, Christoph Bartelmus, Ulrich Mueller"); MODULE_DESCRIPTION("input driver for i2c IR remote controls"); MODULE_LICENSE("GPL");
linux-master
drivers/media/i2c/ir-kbd-i2c.c
// SPDX-License-Identifier: GPL-2.0 // Copyright (c) 2015--2017 Intel Corporation. #include <linux/delay.h> #include <linux/i2c.h> #include <linux/module.h> #include <linux/pm_runtime.h> #include <linux/regulator/consumer.h> #include <media/v4l2-ctrls.h> #include <media/v4l2-device.h> #include <media/v4l2-event.h> #define DW9714_NAME "dw9714" #define DW9714_MAX_FOCUS_POS 1023 /* * This sets the minimum granularity for the focus positions. * A value of 1 gives maximum accuracy for a desired focus position */ #define DW9714_FOCUS_STEPS 1 /* * This acts as the minimum granularity of lens movement. * Keep this value power of 2, so the control steps can be * uniformly adjusted for gradual lens movement, with desired * number of control steps. */ #define DW9714_CTRL_STEPS 16 #define DW9714_CTRL_DELAY_US 1000 /* * S[3:2] = 0x00, codes per step for "Linear Slope Control" * S[1:0] = 0x00, step period */ #define DW9714_DEFAULT_S 0x0 #define DW9714_VAL(data, s) ((data) << 4 | (s)) /* dw9714 device structure */ struct dw9714_device { struct v4l2_ctrl_handler ctrls_vcm; struct v4l2_subdev sd; u16 current_val; struct regulator *vcc; }; static inline struct dw9714_device *to_dw9714_vcm(struct v4l2_ctrl *ctrl) { return container_of(ctrl->handler, struct dw9714_device, ctrls_vcm); } static inline struct dw9714_device *sd_to_dw9714_vcm(struct v4l2_subdev *subdev) { return container_of(subdev, struct dw9714_device, sd); } static int dw9714_i2c_write(struct i2c_client *client, u16 data) { int ret; __be16 val = cpu_to_be16(data); ret = i2c_master_send(client, (const char *)&val, sizeof(val)); if (ret != sizeof(val)) { dev_err(&client->dev, "I2C write fail\n"); return -EIO; } return 0; } static int dw9714_t_focus_vcm(struct dw9714_device *dw9714_dev, u16 val) { struct i2c_client *client = v4l2_get_subdevdata(&dw9714_dev->sd); dw9714_dev->current_val = val; return dw9714_i2c_write(client, DW9714_VAL(val, DW9714_DEFAULT_S)); } static int dw9714_set_ctrl(struct v4l2_ctrl *ctrl) { struct dw9714_device *dev_vcm = to_dw9714_vcm(ctrl); if (ctrl->id == V4L2_CID_FOCUS_ABSOLUTE) return dw9714_t_focus_vcm(dev_vcm, ctrl->val); return -EINVAL; } static const struct v4l2_ctrl_ops dw9714_vcm_ctrl_ops = { .s_ctrl = dw9714_set_ctrl, }; static int dw9714_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh) { return pm_runtime_resume_and_get(sd->dev); } static int dw9714_close(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh) { pm_runtime_put(sd->dev); return 0; } static const struct v4l2_subdev_internal_ops dw9714_int_ops = { .open = dw9714_open, .close = dw9714_close, }; static const struct v4l2_subdev_core_ops dw9714_core_ops = { .log_status = v4l2_ctrl_subdev_log_status, .subscribe_event = v4l2_ctrl_subdev_subscribe_event, .unsubscribe_event = v4l2_event_subdev_unsubscribe, }; static const struct v4l2_subdev_ops dw9714_ops = { .core = &dw9714_core_ops, }; static void dw9714_subdev_cleanup(struct dw9714_device *dw9714_dev) { v4l2_async_unregister_subdev(&dw9714_dev->sd); v4l2_ctrl_handler_free(&dw9714_dev->ctrls_vcm); media_entity_cleanup(&dw9714_dev->sd.entity); } static int dw9714_init_controls(struct dw9714_device *dev_vcm) { struct v4l2_ctrl_handler *hdl = &dev_vcm->ctrls_vcm; const struct v4l2_ctrl_ops *ops = &dw9714_vcm_ctrl_ops; v4l2_ctrl_handler_init(hdl, 1); v4l2_ctrl_new_std(hdl, ops, V4L2_CID_FOCUS_ABSOLUTE, 0, DW9714_MAX_FOCUS_POS, DW9714_FOCUS_STEPS, 0); if (hdl->error) dev_err(dev_vcm->sd.dev, "%s fail error: 0x%x\n", __func__, hdl->error); dev_vcm->sd.ctrl_handler = hdl; return hdl->error; } static int dw9714_probe(struct i2c_client *client) { struct dw9714_device *dw9714_dev; int rval; dw9714_dev = devm_kzalloc(&client->dev, sizeof(*dw9714_dev), GFP_KERNEL); if (dw9714_dev == NULL) return -ENOMEM; dw9714_dev->vcc = devm_regulator_get(&client->dev, "vcc"); if (IS_ERR(dw9714_dev->vcc)) return PTR_ERR(dw9714_dev->vcc); rval = regulator_enable(dw9714_dev->vcc); if (rval < 0) { dev_err(&client->dev, "failed to enable vcc: %d\n", rval); return rval; } v4l2_i2c_subdev_init(&dw9714_dev->sd, client, &dw9714_ops); dw9714_dev->sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE | V4L2_SUBDEV_FL_HAS_EVENTS; dw9714_dev->sd.internal_ops = &dw9714_int_ops; rval = dw9714_init_controls(dw9714_dev); if (rval) goto err_cleanup; rval = media_entity_pads_init(&dw9714_dev->sd.entity, 0, NULL); if (rval < 0) goto err_cleanup; dw9714_dev->sd.entity.function = MEDIA_ENT_F_LENS; rval = v4l2_async_register_subdev(&dw9714_dev->sd); if (rval < 0) goto err_cleanup; pm_runtime_set_active(&client->dev); pm_runtime_enable(&client->dev); pm_runtime_idle(&client->dev); return 0; err_cleanup: regulator_disable(dw9714_dev->vcc); v4l2_ctrl_handler_free(&dw9714_dev->ctrls_vcm); media_entity_cleanup(&dw9714_dev->sd.entity); return rval; } static void dw9714_remove(struct i2c_client *client) { struct v4l2_subdev *sd = i2c_get_clientdata(client); struct dw9714_device *dw9714_dev = sd_to_dw9714_vcm(sd); int ret; pm_runtime_disable(&client->dev); if (!pm_runtime_status_suspended(&client->dev)) { ret = regulator_disable(dw9714_dev->vcc); if (ret) { dev_err(&client->dev, "Failed to disable vcc: %d\n", ret); } } pm_runtime_set_suspended(&client->dev); dw9714_subdev_cleanup(dw9714_dev); } /* * This function sets the vcm position, so it consumes least current * The lens position is gradually moved in units of DW9714_CTRL_STEPS, * to make the movements smoothly. */ static int __maybe_unused dw9714_vcm_suspend(struct device *dev) { struct i2c_client *client = to_i2c_client(dev); struct v4l2_subdev *sd = i2c_get_clientdata(client); struct dw9714_device *dw9714_dev = sd_to_dw9714_vcm(sd); int ret, val; if (pm_runtime_suspended(&client->dev)) return 0; for (val = dw9714_dev->current_val & ~(DW9714_CTRL_STEPS - 1); val >= 0; val -= DW9714_CTRL_STEPS) { ret = dw9714_i2c_write(client, DW9714_VAL(val, DW9714_DEFAULT_S)); if (ret) dev_err_once(dev, "%s I2C failure: %d", __func__, ret); usleep_range(DW9714_CTRL_DELAY_US, DW9714_CTRL_DELAY_US + 10); } ret = regulator_disable(dw9714_dev->vcc); if (ret) dev_err(dev, "Failed to disable vcc: %d\n", ret); return ret; } /* * This function sets the vcm position to the value set by the user * through v4l2_ctrl_ops s_ctrl handler * The lens position is gradually moved in units of DW9714_CTRL_STEPS, * to make the movements smoothly. */ static int __maybe_unused dw9714_vcm_resume(struct device *dev) { struct i2c_client *client = to_i2c_client(dev); struct v4l2_subdev *sd = i2c_get_clientdata(client); struct dw9714_device *dw9714_dev = sd_to_dw9714_vcm(sd); int ret, val; if (pm_runtime_suspended(&client->dev)) return 0; ret = regulator_enable(dw9714_dev->vcc); if (ret) { dev_err(dev, "Failed to enable vcc: %d\n", ret); return ret; } usleep_range(1000, 2000); for (val = dw9714_dev->current_val % DW9714_CTRL_STEPS; val < dw9714_dev->current_val + DW9714_CTRL_STEPS - 1; val += DW9714_CTRL_STEPS) { ret = dw9714_i2c_write(client, DW9714_VAL(val, DW9714_DEFAULT_S)); if (ret) dev_err_ratelimited(dev, "%s I2C failure: %d", __func__, ret); usleep_range(DW9714_CTRL_DELAY_US, DW9714_CTRL_DELAY_US + 10); } return 0; } static const struct i2c_device_id dw9714_id_table[] = { { DW9714_NAME, 0 }, { { 0 } } }; MODULE_DEVICE_TABLE(i2c, dw9714_id_table); static const struct of_device_id dw9714_of_table[] = { { .compatible = "dongwoon,dw9714" }, { { 0 } } }; MODULE_DEVICE_TABLE(of, dw9714_of_table); static const struct dev_pm_ops dw9714_pm_ops = { SET_SYSTEM_SLEEP_PM_OPS(dw9714_vcm_suspend, dw9714_vcm_resume) SET_RUNTIME_PM_OPS(dw9714_vcm_suspend, dw9714_vcm_resume, NULL) }; static struct i2c_driver dw9714_i2c_driver = { .driver = { .name = DW9714_NAME, .pm = &dw9714_pm_ops, .of_match_table = dw9714_of_table, }, .probe = dw9714_probe, .remove = dw9714_remove, .id_table = dw9714_id_table, }; module_i2c_driver(dw9714_i2c_driver); MODULE_AUTHOR("Tianshu Qiu <[email protected]>"); MODULE_AUTHOR("Jian Xu Zheng"); MODULE_AUTHOR("Yuning Pu <[email protected]>"); MODULE_AUTHOR("Jouni Ukkonen <[email protected]>"); MODULE_AUTHOR("Tommi Franttila <[email protected]>"); MODULE_DESCRIPTION("DW9714 VCM driver"); MODULE_LICENSE("GPL v2");
linux-master
drivers/media/i2c/dw9714.c
// SPDX-License-Identifier: GPL-2.0 // Copyright (c) 2017 Intel Corporation. #include <linux/acpi.h> #include <linux/i2c.h> #include <linux/module.h> #include <linux/pm_runtime.h> #include <media/v4l2-ctrls.h> #include <media/v4l2-device.h> #include <media/v4l2-event.h> #include <media/v4l2-fwnode.h> #define OV13858_REG_VALUE_08BIT 1 #define OV13858_REG_VALUE_16BIT 2 #define OV13858_REG_VALUE_24BIT 3 #define OV13858_REG_MODE_SELECT 0x0100 #define OV13858_MODE_STANDBY 0x00 #define OV13858_MODE_STREAMING 0x01 #define OV13858_REG_SOFTWARE_RST 0x0103 #define OV13858_SOFTWARE_RST 0x01 /* PLL1 generates PCLK and MIPI_PHY_CLK */ #define OV13858_REG_PLL1_CTRL_0 0x0300 #define OV13858_REG_PLL1_CTRL_1 0x0301 #define OV13858_REG_PLL1_CTRL_2 0x0302 #define OV13858_REG_PLL1_CTRL_3 0x0303 #define OV13858_REG_PLL1_CTRL_4 0x0304 #define OV13858_REG_PLL1_CTRL_5 0x0305 /* PLL2 generates DAC_CLK, SCLK and SRAM_CLK */ #define OV13858_REG_PLL2_CTRL_B 0x030b #define OV13858_REG_PLL2_CTRL_C 0x030c #define OV13858_REG_PLL2_CTRL_D 0x030d #define OV13858_REG_PLL2_CTRL_E 0x030e #define OV13858_REG_PLL2_CTRL_F 0x030f #define OV13858_REG_PLL2_CTRL_12 0x0312 #define OV13858_REG_MIPI_SC_CTRL0 0x3016 #define OV13858_REG_MIPI_SC_CTRL1 0x3022 /* Chip ID */ #define OV13858_REG_CHIP_ID 0x300a #define OV13858_CHIP_ID 0x00d855 /* V_TIMING internal */ #define OV13858_REG_VTS 0x380e #define OV13858_VTS_30FPS 0x0c8e /* 30 fps */ #define OV13858_VTS_60FPS 0x0648 /* 60 fps */ #define OV13858_VTS_MAX 0x7fff /* HBLANK control - read only */ #define OV13858_PPL_270MHZ 2244 #define OV13858_PPL_540MHZ 4488 /* Exposure control */ #define OV13858_REG_EXPOSURE 0x3500 #define OV13858_EXPOSURE_MIN 4 #define OV13858_EXPOSURE_STEP 1 #define OV13858_EXPOSURE_DEFAULT 0x640 /* Analog gain control */ #define OV13858_REG_ANALOG_GAIN 0x3508 #define OV13858_ANA_GAIN_MIN 0 #define OV13858_ANA_GAIN_MAX 0x1fff #define OV13858_ANA_GAIN_STEP 1 #define OV13858_ANA_GAIN_DEFAULT 0x80 /* Digital gain control */ #define OV13858_REG_B_MWB_GAIN 0x5100 #define OV13858_REG_G_MWB_GAIN 0x5102 #define OV13858_REG_R_MWB_GAIN 0x5104 #define OV13858_DGTL_GAIN_MIN 0 #define OV13858_DGTL_GAIN_MAX 16384 /* Max = 16 X */ #define OV13858_DGTL_GAIN_DEFAULT 1024 /* Default gain = 1 X */ #define OV13858_DGTL_GAIN_STEP 1 /* Each step = 1/1024 */ /* Test Pattern Control */ #define OV13858_REG_TEST_PATTERN 0x4503 #define OV13858_TEST_PATTERN_ENABLE BIT(7) #define OV13858_TEST_PATTERN_MASK 0xfc /* Number of frames to skip */ #define OV13858_NUM_OF_SKIP_FRAMES 2 struct ov13858_reg { u16 address; u8 val; }; struct ov13858_reg_list { u32 num_of_regs; const struct ov13858_reg *regs; }; /* Link frequency config */ struct ov13858_link_freq_config { u32 pixels_per_line; /* PLL registers for this link frequency */ struct ov13858_reg_list reg_list; }; /* Mode : resolution and related config&values */ struct ov13858_mode { /* Frame width */ u32 width; /* Frame height */ u32 height; /* V-timing */ u32 vts_def; u32 vts_min; /* Index of Link frequency config to be used */ u32 link_freq_index; /* Default register values */ struct ov13858_reg_list reg_list; }; /* 4224x3136 needs 1080Mbps/lane, 4 lanes */ static const struct ov13858_reg mipi_data_rate_1080mbps[] = { /* PLL1 registers */ {OV13858_REG_PLL1_CTRL_0, 0x07}, {OV13858_REG_PLL1_CTRL_1, 0x01}, {OV13858_REG_PLL1_CTRL_2, 0xc2}, {OV13858_REG_PLL1_CTRL_3, 0x00}, {OV13858_REG_PLL1_CTRL_4, 0x00}, {OV13858_REG_PLL1_CTRL_5, 0x01}, /* PLL2 registers */ {OV13858_REG_PLL2_CTRL_B, 0x05}, {OV13858_REG_PLL2_CTRL_C, 0x01}, {OV13858_REG_PLL2_CTRL_D, 0x0e}, {OV13858_REG_PLL2_CTRL_E, 0x05}, {OV13858_REG_PLL2_CTRL_F, 0x01}, {OV13858_REG_PLL2_CTRL_12, 0x01}, {OV13858_REG_MIPI_SC_CTRL0, 0x72}, {OV13858_REG_MIPI_SC_CTRL1, 0x01}, }; /* * 2112x1568, 2112x1188, 1056x784 need 540Mbps/lane, * 4 lanes */ static const struct ov13858_reg mipi_data_rate_540mbps[] = { /* PLL1 registers */ {OV13858_REG_PLL1_CTRL_0, 0x07}, {OV13858_REG_PLL1_CTRL_1, 0x01}, {OV13858_REG_PLL1_CTRL_2, 0xc2}, {OV13858_REG_PLL1_CTRL_3, 0x01}, {OV13858_REG_PLL1_CTRL_4, 0x00}, {OV13858_REG_PLL1_CTRL_5, 0x01}, /* PLL2 registers */ {OV13858_REG_PLL2_CTRL_B, 0x05}, {OV13858_REG_PLL2_CTRL_C, 0x01}, {OV13858_REG_PLL2_CTRL_D, 0x0e}, {OV13858_REG_PLL2_CTRL_E, 0x05}, {OV13858_REG_PLL2_CTRL_F, 0x01}, {OV13858_REG_PLL2_CTRL_12, 0x01}, {OV13858_REG_MIPI_SC_CTRL0, 0x72}, {OV13858_REG_MIPI_SC_CTRL1, 0x01}, }; static const struct ov13858_reg mode_4224x3136_regs[] = { {0x3013, 0x32}, {0x301b, 0xf0}, {0x301f, 0xd0}, {0x3106, 0x15}, {0x3107, 0x23}, {0x350a, 0x00}, {0x350e, 0x00}, {0x3510, 0x00}, {0x3511, 0x02}, {0x3512, 0x00}, {0x3600, 0x2b}, {0x3601, 0x52}, {0x3602, 0x60}, {0x3612, 0x05}, {0x3613, 0xa4}, {0x3620, 0x80}, {0x3621, 0x10}, {0x3622, 0x30}, {0x3624, 0x1c}, {0x3640, 0x10}, {0x3641, 0x70}, {0x3660, 0x04}, {0x3661, 0x80}, {0x3662, 0x12}, {0x3664, 0x73}, {0x3665, 0xa7}, {0x366e, 0xff}, {0x366f, 0xf4}, {0x3674, 0x00}, {0x3679, 0x0c}, {0x367f, 0x01}, {0x3680, 0x0c}, {0x3681, 0x50}, {0x3682, 0x50}, {0x3683, 0xa9}, {0x3684, 0xa9}, {0x3709, 0x5f}, {0x3714, 0x24}, {0x371a, 0x3e}, {0x3737, 0x04}, {0x3738, 0xcc}, {0x3739, 0x12}, {0x373d, 0x26}, {0x3764, 0x20}, {0x3765, 0x20}, {0x37a1, 0x36}, {0x37a8, 0x3b}, {0x37ab, 0x31}, {0x37c2, 0x04}, {0x37c3, 0xf1}, {0x37c5, 0x00}, {0x37d8, 0x03}, {0x37d9, 0x0c}, {0x37da, 0xc2}, {0x37dc, 0x02}, {0x37e0, 0x00}, {0x37e1, 0x0a}, {0x37e2, 0x14}, {0x37e3, 0x04}, {0x37e4, 0x2a}, {0x37e5, 0x03}, {0x37e6, 0x04}, {0x3800, 0x00}, {0x3801, 0x00}, {0x3802, 0x00}, {0x3803, 0x08}, {0x3804, 0x10}, {0x3805, 0x9f}, {0x3806, 0x0c}, {0x3807, 0x57}, {0x3808, 0x10}, {0x3809, 0x80}, {0x380a, 0x0c}, {0x380b, 0x40}, {0x380c, 0x04}, {0x380d, 0x62}, {0x380e, 0x0c}, {0x380f, 0x8e}, {0x3811, 0x04}, {0x3813, 0x05}, {0x3814, 0x01}, {0x3815, 0x01}, {0x3816, 0x01}, {0x3817, 0x01}, {0x3820, 0xa8}, {0x3821, 0x00}, {0x3822, 0xc2}, {0x3823, 0x18}, {0x3826, 0x11}, {0x3827, 0x1c}, {0x3829, 0x03}, {0x3832, 0x00}, {0x3c80, 0x00}, {0x3c87, 0x01}, {0x3c8c, 0x19}, {0x3c8d, 0x1c}, {0x3c90, 0x00}, {0x3c91, 0x00}, {0x3c92, 0x00}, {0x3c93, 0x00}, {0x3c94, 0x40}, {0x3c95, 0x54}, {0x3c96, 0x34}, {0x3c97, 0x04}, {0x3c98, 0x00}, {0x3d8c, 0x73}, {0x3d8d, 0xc0}, {0x3f00, 0x0b}, {0x3f03, 0x00}, {0x4001, 0xe0}, {0x4008, 0x00}, {0x4009, 0x0f}, {0x4011, 0xf0}, {0x4017, 0x08}, {0x4050, 0x04}, {0x4051, 0x0b}, {0x4052, 0x00}, {0x4053, 0x80}, {0x4054, 0x00}, {0x4055, 0x80}, {0x4056, 0x00}, {0x4057, 0x80}, {0x4058, 0x00}, {0x4059, 0x80}, {0x405e, 0x20}, {0x4500, 0x07}, {0x4503, 0x00}, {0x450a, 0x04}, {0x4809, 0x04}, {0x480c, 0x12}, {0x481f, 0x30}, {0x4833, 0x10}, {0x4837, 0x0e}, {0x4902, 0x01}, {0x4d00, 0x03}, {0x4d01, 0xc9}, {0x4d02, 0xbc}, {0x4d03, 0xd7}, {0x4d04, 0xf0}, {0x4d05, 0xa2}, {0x5000, 0xfd}, {0x5001, 0x01}, {0x5040, 0x39}, {0x5041, 0x10}, {0x5042, 0x10}, {0x5043, 0x84}, {0x5044, 0x62}, {0x5180, 0x00}, {0x5181, 0x10}, {0x5182, 0x02}, {0x5183, 0x0f}, {0x5200, 0x1b}, {0x520b, 0x07}, {0x520c, 0x0f}, {0x5300, 0x04}, {0x5301, 0x0c}, {0x5302, 0x0c}, {0x5303, 0x0f}, {0x5304, 0x00}, {0x5305, 0x70}, {0x5306, 0x00}, {0x5307, 0x80}, {0x5308, 0x00}, {0x5309, 0xa5}, {0x530a, 0x00}, {0x530b, 0xd3}, {0x530c, 0x00}, {0x530d, 0xf0}, {0x530e, 0x01}, {0x530f, 0x10}, {0x5310, 0x01}, {0x5311, 0x20}, {0x5312, 0x01}, {0x5313, 0x20}, {0x5314, 0x01}, {0x5315, 0x20}, {0x5316, 0x08}, {0x5317, 0x08}, {0x5318, 0x10}, {0x5319, 0x88}, {0x531a, 0x88}, {0x531b, 0xa9}, {0x531c, 0xaa}, {0x531d, 0x0a}, {0x5405, 0x02}, {0x5406, 0x67}, {0x5407, 0x01}, {0x5408, 0x4a}, }; static const struct ov13858_reg mode_2112x1568_regs[] = { {0x3013, 0x32}, {0x301b, 0xf0}, {0x301f, 0xd0}, {0x3106, 0x15}, {0x3107, 0x23}, {0x350a, 0x00}, {0x350e, 0x00}, {0x3510, 0x00}, {0x3511, 0x02}, {0x3512, 0x00}, {0x3600, 0x2b}, {0x3601, 0x52}, {0x3602, 0x60}, {0x3612, 0x05}, {0x3613, 0xa4}, {0x3620, 0x80}, {0x3621, 0x10}, {0x3622, 0x30}, {0x3624, 0x1c}, {0x3640, 0x10}, {0x3641, 0x70}, {0x3660, 0x04}, {0x3661, 0x80}, {0x3662, 0x10}, {0x3664, 0x73}, {0x3665, 0xa7}, {0x366e, 0xff}, {0x366f, 0xf4}, {0x3674, 0x00}, {0x3679, 0x0c}, {0x367f, 0x01}, {0x3680, 0x0c}, {0x3681, 0x50}, {0x3682, 0x50}, {0x3683, 0xa9}, {0x3684, 0xa9}, {0x3709, 0x5f}, {0x3714, 0x28}, {0x371a, 0x3e}, {0x3737, 0x08}, {0x3738, 0xcc}, {0x3739, 0x20}, {0x373d, 0x26}, {0x3764, 0x20}, {0x3765, 0x20}, {0x37a1, 0x36}, {0x37a8, 0x3b}, {0x37ab, 0x31}, {0x37c2, 0x14}, {0x37c3, 0xf1}, {0x37c5, 0x00}, {0x37d8, 0x03}, {0x37d9, 0x0c}, {0x37da, 0xc2}, {0x37dc, 0x02}, {0x37e0, 0x00}, {0x37e1, 0x0a}, {0x37e2, 0x14}, {0x37e3, 0x08}, {0x37e4, 0x38}, {0x37e5, 0x03}, {0x37e6, 0x08}, {0x3800, 0x00}, {0x3801, 0x00}, {0x3802, 0x00}, {0x3803, 0x00}, {0x3804, 0x10}, {0x3805, 0x9f}, {0x3806, 0x0c}, {0x3807, 0x5f}, {0x3808, 0x08}, {0x3809, 0x40}, {0x380a, 0x06}, {0x380b, 0x20}, {0x380c, 0x04}, {0x380d, 0x62}, {0x380e, 0x0c}, {0x380f, 0x8e}, {0x3811, 0x04}, {0x3813, 0x05}, {0x3814, 0x03}, {0x3815, 0x01}, {0x3816, 0x03}, {0x3817, 0x01}, {0x3820, 0xab}, {0x3821, 0x00}, {0x3822, 0xc2}, {0x3823, 0x18}, {0x3826, 0x04}, {0x3827, 0x90}, {0x3829, 0x07}, {0x3832, 0x00}, {0x3c80, 0x00}, {0x3c87, 0x01}, {0x3c8c, 0x19}, {0x3c8d, 0x1c}, {0x3c90, 0x00}, {0x3c91, 0x00}, {0x3c92, 0x00}, {0x3c93, 0x00}, {0x3c94, 0x40}, {0x3c95, 0x54}, {0x3c96, 0x34}, {0x3c97, 0x04}, {0x3c98, 0x00}, {0x3d8c, 0x73}, {0x3d8d, 0xc0}, {0x3f00, 0x0b}, {0x3f03, 0x00}, {0x4001, 0xe0}, {0x4008, 0x00}, {0x4009, 0x0d}, {0x4011, 0xf0}, {0x4017, 0x08}, {0x4050, 0x04}, {0x4051, 0x0b}, {0x4052, 0x00}, {0x4053, 0x80}, {0x4054, 0x00}, {0x4055, 0x80}, {0x4056, 0x00}, {0x4057, 0x80}, {0x4058, 0x00}, {0x4059, 0x80}, {0x405e, 0x20}, {0x4500, 0x07}, {0x4503, 0x00}, {0x450a, 0x04}, {0x4809, 0x04}, {0x480c, 0x12}, {0x481f, 0x30}, {0x4833, 0x10}, {0x4837, 0x1c}, {0x4902, 0x01}, {0x4d00, 0x03}, {0x4d01, 0xc9}, {0x4d02, 0xbc}, {0x4d03, 0xd7}, {0x4d04, 0xf0}, {0x4d05, 0xa2}, {0x5000, 0xfd}, {0x5001, 0x01}, {0x5040, 0x39}, {0x5041, 0x10}, {0x5042, 0x10}, {0x5043, 0x84}, {0x5044, 0x62}, {0x5180, 0x00}, {0x5181, 0x10}, {0x5182, 0x02}, {0x5183, 0x0f}, {0x5200, 0x1b}, {0x520b, 0x07}, {0x520c, 0x0f}, {0x5300, 0x04}, {0x5301, 0x0c}, {0x5302, 0x0c}, {0x5303, 0x0f}, {0x5304, 0x00}, {0x5305, 0x70}, {0x5306, 0x00}, {0x5307, 0x80}, {0x5308, 0x00}, {0x5309, 0xa5}, {0x530a, 0x00}, {0x530b, 0xd3}, {0x530c, 0x00}, {0x530d, 0xf0}, {0x530e, 0x01}, {0x530f, 0x10}, {0x5310, 0x01}, {0x5311, 0x20}, {0x5312, 0x01}, {0x5313, 0x20}, {0x5314, 0x01}, {0x5315, 0x20}, {0x5316, 0x08}, {0x5317, 0x08}, {0x5318, 0x10}, {0x5319, 0x88}, {0x531a, 0x88}, {0x531b, 0xa9}, {0x531c, 0xaa}, {0x531d, 0x0a}, {0x5405, 0x02}, {0x5406, 0x67}, {0x5407, 0x01}, {0x5408, 0x4a}, }; static const struct ov13858_reg mode_2112x1188_regs[] = { {0x3013, 0x32}, {0x301b, 0xf0}, {0x301f, 0xd0}, {0x3106, 0x15}, {0x3107, 0x23}, {0x350a, 0x00}, {0x350e, 0x00}, {0x3510, 0x00}, {0x3511, 0x02}, {0x3512, 0x00}, {0x3600, 0x2b}, {0x3601, 0x52}, {0x3602, 0x60}, {0x3612, 0x05}, {0x3613, 0xa4}, {0x3620, 0x80}, {0x3621, 0x10}, {0x3622, 0x30}, {0x3624, 0x1c}, {0x3640, 0x10}, {0x3641, 0x70}, {0x3660, 0x04}, {0x3661, 0x80}, {0x3662, 0x10}, {0x3664, 0x73}, {0x3665, 0xa7}, {0x366e, 0xff}, {0x366f, 0xf4}, {0x3674, 0x00}, {0x3679, 0x0c}, {0x367f, 0x01}, {0x3680, 0x0c}, {0x3681, 0x50}, {0x3682, 0x50}, {0x3683, 0xa9}, {0x3684, 0xa9}, {0x3709, 0x5f}, {0x3714, 0x28}, {0x371a, 0x3e}, {0x3737, 0x08}, {0x3738, 0xcc}, {0x3739, 0x20}, {0x373d, 0x26}, {0x3764, 0x20}, {0x3765, 0x20}, {0x37a1, 0x36}, {0x37a8, 0x3b}, {0x37ab, 0x31}, {0x37c2, 0x14}, {0x37c3, 0xf1}, {0x37c5, 0x00}, {0x37d8, 0x03}, {0x37d9, 0x0c}, {0x37da, 0xc2}, {0x37dc, 0x02}, {0x37e0, 0x00}, {0x37e1, 0x0a}, {0x37e2, 0x14}, {0x37e3, 0x08}, {0x37e4, 0x38}, {0x37e5, 0x03}, {0x37e6, 0x08}, {0x3800, 0x00}, {0x3801, 0x00}, {0x3802, 0x01}, {0x3803, 0x84}, {0x3804, 0x10}, {0x3805, 0x9f}, {0x3806, 0x0a}, {0x3807, 0xd3}, {0x3808, 0x08}, {0x3809, 0x40}, {0x380a, 0x04}, {0x380b, 0xa4}, {0x380c, 0x04}, {0x380d, 0x62}, {0x380e, 0x0c}, {0x380f, 0x8e}, {0x3811, 0x08}, {0x3813, 0x03}, {0x3814, 0x03}, {0x3815, 0x01}, {0x3816, 0x03}, {0x3817, 0x01}, {0x3820, 0xab}, {0x3821, 0x00}, {0x3822, 0xc2}, {0x3823, 0x18}, {0x3826, 0x04}, {0x3827, 0x90}, {0x3829, 0x07}, {0x3832, 0x00}, {0x3c80, 0x00}, {0x3c87, 0x01}, {0x3c8c, 0x19}, {0x3c8d, 0x1c}, {0x3c90, 0x00}, {0x3c91, 0x00}, {0x3c92, 0x00}, {0x3c93, 0x00}, {0x3c94, 0x40}, {0x3c95, 0x54}, {0x3c96, 0x34}, {0x3c97, 0x04}, {0x3c98, 0x00}, {0x3d8c, 0x73}, {0x3d8d, 0xc0}, {0x3f00, 0x0b}, {0x3f03, 0x00}, {0x4001, 0xe0}, {0x4008, 0x00}, {0x4009, 0x0d}, {0x4011, 0xf0}, {0x4017, 0x08}, {0x4050, 0x04}, {0x4051, 0x0b}, {0x4052, 0x00}, {0x4053, 0x80}, {0x4054, 0x00}, {0x4055, 0x80}, {0x4056, 0x00}, {0x4057, 0x80}, {0x4058, 0x00}, {0x4059, 0x80}, {0x405e, 0x20}, {0x4500, 0x07}, {0x4503, 0x00}, {0x450a, 0x04}, {0x4809, 0x04}, {0x480c, 0x12}, {0x481f, 0x30}, {0x4833, 0x10}, {0x4837, 0x1c}, {0x4902, 0x01}, {0x4d00, 0x03}, {0x4d01, 0xc9}, {0x4d02, 0xbc}, {0x4d03, 0xd7}, {0x4d04, 0xf0}, {0x4d05, 0xa2}, {0x5000, 0xfd}, {0x5001, 0x01}, {0x5040, 0x39}, {0x5041, 0x10}, {0x5042, 0x10}, {0x5043, 0x84}, {0x5044, 0x62}, {0x5180, 0x00}, {0x5181, 0x10}, {0x5182, 0x02}, {0x5183, 0x0f}, {0x5200, 0x1b}, {0x520b, 0x07}, {0x520c, 0x0f}, {0x5300, 0x04}, {0x5301, 0x0c}, {0x5302, 0x0c}, {0x5303, 0x0f}, {0x5304, 0x00}, {0x5305, 0x70}, {0x5306, 0x00}, {0x5307, 0x80}, {0x5308, 0x00}, {0x5309, 0xa5}, {0x530a, 0x00}, {0x530b, 0xd3}, {0x530c, 0x00}, {0x530d, 0xf0}, {0x530e, 0x01}, {0x530f, 0x10}, {0x5310, 0x01}, {0x5311, 0x20}, {0x5312, 0x01}, {0x5313, 0x20}, {0x5314, 0x01}, {0x5315, 0x20}, {0x5316, 0x08}, {0x5317, 0x08}, {0x5318, 0x10}, {0x5319, 0x88}, {0x531a, 0x88}, {0x531b, 0xa9}, {0x531c, 0xaa}, {0x531d, 0x0a}, {0x5405, 0x02}, {0x5406, 0x67}, {0x5407, 0x01}, {0x5408, 0x4a}, }; static const struct ov13858_reg mode_1056x784_regs[] = { {0x3013, 0x32}, {0x301b, 0xf0}, {0x301f, 0xd0}, {0x3106, 0x15}, {0x3107, 0x23}, {0x350a, 0x00}, {0x350e, 0x00}, {0x3510, 0x00}, {0x3511, 0x02}, {0x3512, 0x00}, {0x3600, 0x2b}, {0x3601, 0x52}, {0x3602, 0x60}, {0x3612, 0x05}, {0x3613, 0xa4}, {0x3620, 0x80}, {0x3621, 0x10}, {0x3622, 0x30}, {0x3624, 0x1c}, {0x3640, 0x10}, {0x3641, 0x70}, {0x3660, 0x04}, {0x3661, 0x80}, {0x3662, 0x08}, {0x3664, 0x73}, {0x3665, 0xa7}, {0x366e, 0xff}, {0x366f, 0xf4}, {0x3674, 0x00}, {0x3679, 0x0c}, {0x367f, 0x01}, {0x3680, 0x0c}, {0x3681, 0x50}, {0x3682, 0x50}, {0x3683, 0xa9}, {0x3684, 0xa9}, {0x3709, 0x5f}, {0x3714, 0x30}, {0x371a, 0x3e}, {0x3737, 0x08}, {0x3738, 0xcc}, {0x3739, 0x20}, {0x373d, 0x26}, {0x3764, 0x20}, {0x3765, 0x20}, {0x37a1, 0x36}, {0x37a8, 0x3b}, {0x37ab, 0x31}, {0x37c2, 0x2c}, {0x37c3, 0xf1}, {0x37c5, 0x00}, {0x37d8, 0x03}, {0x37d9, 0x06}, {0x37da, 0xc2}, {0x37dc, 0x02}, {0x37e0, 0x00}, {0x37e1, 0x0a}, {0x37e2, 0x14}, {0x37e3, 0x08}, {0x37e4, 0x36}, {0x37e5, 0x03}, {0x37e6, 0x08}, {0x3800, 0x00}, {0x3801, 0x00}, {0x3802, 0x00}, {0x3803, 0x00}, {0x3804, 0x10}, {0x3805, 0x9f}, {0x3806, 0x0c}, {0x3807, 0x5f}, {0x3808, 0x04}, {0x3809, 0x20}, {0x380a, 0x03}, {0x380b, 0x10}, {0x380c, 0x04}, {0x380d, 0x62}, {0x380e, 0x0c}, {0x380f, 0x8e}, {0x3811, 0x04}, {0x3813, 0x05}, {0x3814, 0x07}, {0x3815, 0x01}, {0x3816, 0x07}, {0x3817, 0x01}, {0x3820, 0xac}, {0x3821, 0x00}, {0x3822, 0xc2}, {0x3823, 0x18}, {0x3826, 0x04}, {0x3827, 0x48}, {0x3829, 0x03}, {0x3832, 0x00}, {0x3c80, 0x00}, {0x3c87, 0x01}, {0x3c8c, 0x19}, {0x3c8d, 0x1c}, {0x3c90, 0x00}, {0x3c91, 0x00}, {0x3c92, 0x00}, {0x3c93, 0x00}, {0x3c94, 0x40}, {0x3c95, 0x54}, {0x3c96, 0x34}, {0x3c97, 0x04}, {0x3c98, 0x00}, {0x3d8c, 0x73}, {0x3d8d, 0xc0}, {0x3f00, 0x0b}, {0x3f03, 0x00}, {0x4001, 0xe0}, {0x4008, 0x00}, {0x4009, 0x05}, {0x4011, 0xf0}, {0x4017, 0x08}, {0x4050, 0x02}, {0x4051, 0x05}, {0x4052, 0x00}, {0x4053, 0x80}, {0x4054, 0x00}, {0x4055, 0x80}, {0x4056, 0x00}, {0x4057, 0x80}, {0x4058, 0x00}, {0x4059, 0x80}, {0x405e, 0x20}, {0x4500, 0x07}, {0x4503, 0x00}, {0x450a, 0x04}, {0x4809, 0x04}, {0x480c, 0x12}, {0x481f, 0x30}, {0x4833, 0x10}, {0x4837, 0x1e}, {0x4902, 0x02}, {0x4d00, 0x03}, {0x4d01, 0xc9}, {0x4d02, 0xbc}, {0x4d03, 0xd7}, {0x4d04, 0xf0}, {0x4d05, 0xa2}, {0x5000, 0xfd}, {0x5001, 0x01}, {0x5040, 0x39}, {0x5041, 0x10}, {0x5042, 0x10}, {0x5043, 0x84}, {0x5044, 0x62}, {0x5180, 0x00}, {0x5181, 0x10}, {0x5182, 0x02}, {0x5183, 0x0f}, {0x5200, 0x1b}, {0x520b, 0x07}, {0x520c, 0x0f}, {0x5300, 0x04}, {0x5301, 0x0c}, {0x5302, 0x0c}, {0x5303, 0x0f}, {0x5304, 0x00}, {0x5305, 0x70}, {0x5306, 0x00}, {0x5307, 0x80}, {0x5308, 0x00}, {0x5309, 0xa5}, {0x530a, 0x00}, {0x530b, 0xd3}, {0x530c, 0x00}, {0x530d, 0xf0}, {0x530e, 0x01}, {0x530f, 0x10}, {0x5310, 0x01}, {0x5311, 0x20}, {0x5312, 0x01}, {0x5313, 0x20}, {0x5314, 0x01}, {0x5315, 0x20}, {0x5316, 0x08}, {0x5317, 0x08}, {0x5318, 0x10}, {0x5319, 0x88}, {0x531a, 0x88}, {0x531b, 0xa9}, {0x531c, 0xaa}, {0x531d, 0x0a}, {0x5405, 0x02}, {0x5406, 0x67}, {0x5407, 0x01}, {0x5408, 0x4a}, }; static const char * const ov13858_test_pattern_menu[] = { "Disabled", "Vertical Color Bar Type 1", "Vertical Color Bar Type 2", "Vertical Color Bar Type 3", "Vertical Color Bar Type 4" }; /* Configurations for supported link frequencies */ #define OV13858_NUM_OF_LINK_FREQS 2 #define OV13858_LINK_FREQ_540MHZ 540000000ULL #define OV13858_LINK_FREQ_270MHZ 270000000ULL #define OV13858_LINK_FREQ_INDEX_0 0 #define OV13858_LINK_FREQ_INDEX_1 1 /* * pixel_rate = link_freq * data-rate * nr_of_lanes / bits_per_sample * data rate => double data rate; number of lanes => 4; bits per pixel => 10 */ static u64 link_freq_to_pixel_rate(u64 f) { f *= 2 * 4; do_div(f, 10); return f; } /* Menu items for LINK_FREQ V4L2 control */ static const s64 link_freq_menu_items[OV13858_NUM_OF_LINK_FREQS] = { OV13858_LINK_FREQ_540MHZ, OV13858_LINK_FREQ_270MHZ }; /* Link frequency configs */ static const struct ov13858_link_freq_config link_freq_configs[OV13858_NUM_OF_LINK_FREQS] = { { .pixels_per_line = OV13858_PPL_540MHZ, .reg_list = { .num_of_regs = ARRAY_SIZE(mipi_data_rate_1080mbps), .regs = mipi_data_rate_1080mbps, } }, { .pixels_per_line = OV13858_PPL_270MHZ, .reg_list = { .num_of_regs = ARRAY_SIZE(mipi_data_rate_540mbps), .regs = mipi_data_rate_540mbps, } } }; /* Mode configs */ static const struct ov13858_mode supported_modes[] = { { .width = 4224, .height = 3136, .vts_def = OV13858_VTS_30FPS, .vts_min = OV13858_VTS_30FPS, .reg_list = { .num_of_regs = ARRAY_SIZE(mode_4224x3136_regs), .regs = mode_4224x3136_regs, }, .link_freq_index = OV13858_LINK_FREQ_INDEX_0, }, { .width = 2112, .height = 1568, .vts_def = OV13858_VTS_30FPS, .vts_min = 1608, .reg_list = { .num_of_regs = ARRAY_SIZE(mode_2112x1568_regs), .regs = mode_2112x1568_regs, }, .link_freq_index = OV13858_LINK_FREQ_INDEX_1, }, { .width = 2112, .height = 1188, .vts_def = OV13858_VTS_30FPS, .vts_min = 1608, .reg_list = { .num_of_regs = ARRAY_SIZE(mode_2112x1188_regs), .regs = mode_2112x1188_regs, }, .link_freq_index = OV13858_LINK_FREQ_INDEX_1, }, { .width = 1056, .height = 784, .vts_def = OV13858_VTS_30FPS, .vts_min = 804, .reg_list = { .num_of_regs = ARRAY_SIZE(mode_1056x784_regs), .regs = mode_1056x784_regs, }, .link_freq_index = OV13858_LINK_FREQ_INDEX_1, } }; struct ov13858 { struct v4l2_subdev sd; struct media_pad pad; struct v4l2_ctrl_handler ctrl_handler; /* V4L2 Controls */ struct v4l2_ctrl *link_freq; struct v4l2_ctrl *pixel_rate; struct v4l2_ctrl *vblank; struct v4l2_ctrl *hblank; struct v4l2_ctrl *exposure; /* Current mode */ const struct ov13858_mode *cur_mode; /* Mutex for serialized access */ struct mutex mutex; /* Streaming on/off */ bool streaming; }; #define to_ov13858(_sd) container_of(_sd, struct ov13858, sd) /* Read registers up to 4 at a time */ static int ov13858_read_reg(struct ov13858 *ov13858, u16 reg, u32 len, u32 *val) { struct i2c_client *client = v4l2_get_subdevdata(&ov13858->sd); struct i2c_msg msgs[2]; u8 *data_be_p; int ret; __be32 data_be = 0; __be16 reg_addr_be = cpu_to_be16(reg); if (len > 4) return -EINVAL; data_be_p = (u8 *)&data_be; /* Write register address */ msgs[0].addr = client->addr; msgs[0].flags = 0; msgs[0].len = 2; msgs[0].buf = (u8 *)&reg_addr_be; /* Read data from register */ msgs[1].addr = client->addr; msgs[1].flags = I2C_M_RD; msgs[1].len = len; msgs[1].buf = &data_be_p[4 - len]; ret = i2c_transfer(client->adapter, msgs, ARRAY_SIZE(msgs)); if (ret != ARRAY_SIZE(msgs)) return -EIO; *val = be32_to_cpu(data_be); return 0; } /* Write registers up to 4 at a time */ static int ov13858_write_reg(struct ov13858 *ov13858, u16 reg, u32 len, u32 __val) { struct i2c_client *client = v4l2_get_subdevdata(&ov13858->sd); int buf_i, val_i; u8 buf[6], *val_p; __be32 val; if (len > 4) return -EINVAL; buf[0] = reg >> 8; buf[1] = reg & 0xff; val = cpu_to_be32(__val); val_p = (u8 *)&val; buf_i = 2; val_i = 4 - len; while (val_i < 4) buf[buf_i++] = val_p[val_i++]; if (i2c_master_send(client, buf, len + 2) != len + 2) return -EIO; return 0; } /* Write a list of registers */ static int ov13858_write_regs(struct ov13858 *ov13858, const struct ov13858_reg *regs, u32 len) { struct i2c_client *client = v4l2_get_subdevdata(&ov13858->sd); int ret; u32 i; for (i = 0; i < len; i++) { ret = ov13858_write_reg(ov13858, regs[i].address, 1, regs[i].val); if (ret) { dev_err_ratelimited( &client->dev, "Failed to write reg 0x%4.4x. error = %d\n", regs[i].address, ret); return ret; } } return 0; } static int ov13858_write_reg_list(struct ov13858 *ov13858, const struct ov13858_reg_list *r_list) { return ov13858_write_regs(ov13858, r_list->regs, r_list->num_of_regs); } /* Open sub-device */ static int ov13858_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh) { struct ov13858 *ov13858 = to_ov13858(sd); struct v4l2_mbus_framefmt *try_fmt = v4l2_subdev_get_try_format(sd, fh->state, 0); mutex_lock(&ov13858->mutex); /* Initialize try_fmt */ try_fmt->width = ov13858->cur_mode->width; try_fmt->height = ov13858->cur_mode->height; try_fmt->code = MEDIA_BUS_FMT_SGRBG10_1X10; try_fmt->field = V4L2_FIELD_NONE; /* No crop or compose */ mutex_unlock(&ov13858->mutex); return 0; } static int ov13858_update_digital_gain(struct ov13858 *ov13858, u32 d_gain) { int ret; ret = ov13858_write_reg(ov13858, OV13858_REG_B_MWB_GAIN, OV13858_REG_VALUE_16BIT, d_gain); if (ret) return ret; ret = ov13858_write_reg(ov13858, OV13858_REG_G_MWB_GAIN, OV13858_REG_VALUE_16BIT, d_gain); if (ret) return ret; ret = ov13858_write_reg(ov13858, OV13858_REG_R_MWB_GAIN, OV13858_REG_VALUE_16BIT, d_gain); return ret; } static int ov13858_enable_test_pattern(struct ov13858 *ov13858, u32 pattern) { int ret; u32 val; ret = ov13858_read_reg(ov13858, OV13858_REG_TEST_PATTERN, OV13858_REG_VALUE_08BIT, &val); if (ret) return ret; if (pattern) { val &= OV13858_TEST_PATTERN_MASK; val |= (pattern - 1) | OV13858_TEST_PATTERN_ENABLE; } else { val &= ~OV13858_TEST_PATTERN_ENABLE; } return ov13858_write_reg(ov13858, OV13858_REG_TEST_PATTERN, OV13858_REG_VALUE_08BIT, val); } static int ov13858_set_ctrl(struct v4l2_ctrl *ctrl) { struct ov13858 *ov13858 = container_of(ctrl->handler, struct ov13858, ctrl_handler); struct i2c_client *client = v4l2_get_subdevdata(&ov13858->sd); s64 max; int ret; /* Propagate change of current control to all related controls */ switch (ctrl->id) { case V4L2_CID_VBLANK: /* Update max exposure while meeting expected vblanking */ max = ov13858->cur_mode->height + ctrl->val - 8; __v4l2_ctrl_modify_range(ov13858->exposure, ov13858->exposure->minimum, max, ov13858->exposure->step, max); break; } /* * Applying V4L2 control value only happens * when power is up for streaming */ if (!pm_runtime_get_if_in_use(&client->dev)) return 0; ret = 0; switch (ctrl->id) { case V4L2_CID_ANALOGUE_GAIN: ret = ov13858_write_reg(ov13858, OV13858_REG_ANALOG_GAIN, OV13858_REG_VALUE_16BIT, ctrl->val); break; case V4L2_CID_DIGITAL_GAIN: ret = ov13858_update_digital_gain(ov13858, ctrl->val); break; case V4L2_CID_EXPOSURE: ret = ov13858_write_reg(ov13858, OV13858_REG_EXPOSURE, OV13858_REG_VALUE_24BIT, ctrl->val << 4); break; case V4L2_CID_VBLANK: /* Update VTS that meets expected vertical blanking */ ret = ov13858_write_reg(ov13858, OV13858_REG_VTS, OV13858_REG_VALUE_16BIT, ov13858->cur_mode->height + ctrl->val); break; case V4L2_CID_TEST_PATTERN: ret = ov13858_enable_test_pattern(ov13858, ctrl->val); break; default: dev_info(&client->dev, "ctrl(id:0x%x,val:0x%x) is not handled\n", ctrl->id, ctrl->val); break; } pm_runtime_put(&client->dev); return ret; } static const struct v4l2_ctrl_ops ov13858_ctrl_ops = { .s_ctrl = ov13858_set_ctrl, }; static int ov13858_enum_mbus_code(struct v4l2_subdev *sd, struct v4l2_subdev_state *sd_state, struct v4l2_subdev_mbus_code_enum *code) { /* Only one bayer order(GRBG) is supported */ if (code->index > 0) return -EINVAL; code->code = MEDIA_BUS_FMT_SGRBG10_1X10; return 0; } static int ov13858_enum_frame_size(struct v4l2_subdev *sd, struct v4l2_subdev_state *sd_state, struct v4l2_subdev_frame_size_enum *fse) { if (fse->index >= ARRAY_SIZE(supported_modes)) return -EINVAL; if (fse->code != MEDIA_BUS_FMT_SGRBG10_1X10) return -EINVAL; fse->min_width = supported_modes[fse->index].width; fse->max_width = fse->min_width; fse->min_height = supported_modes[fse->index].height; fse->max_height = fse->min_height; return 0; } static void ov13858_update_pad_format(const struct ov13858_mode *mode, struct v4l2_subdev_format *fmt) { fmt->format.width = mode->width; fmt->format.height = mode->height; fmt->format.code = MEDIA_BUS_FMT_SGRBG10_1X10; fmt->format.field = V4L2_FIELD_NONE; } static int ov13858_do_get_pad_format(struct ov13858 *ov13858, struct v4l2_subdev_state *sd_state, struct v4l2_subdev_format *fmt) { struct v4l2_mbus_framefmt *framefmt; struct v4l2_subdev *sd = &ov13858->sd; if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) { framefmt = v4l2_subdev_get_try_format(sd, sd_state, fmt->pad); fmt->format = *framefmt; } else { ov13858_update_pad_format(ov13858->cur_mode, fmt); } return 0; } static int ov13858_get_pad_format(struct v4l2_subdev *sd, struct v4l2_subdev_state *sd_state, struct v4l2_subdev_format *fmt) { struct ov13858 *ov13858 = to_ov13858(sd); int ret; mutex_lock(&ov13858->mutex); ret = ov13858_do_get_pad_format(ov13858, sd_state, fmt); mutex_unlock(&ov13858->mutex); return ret; } static int ov13858_set_pad_format(struct v4l2_subdev *sd, struct v4l2_subdev_state *sd_state, struct v4l2_subdev_format *fmt) { struct ov13858 *ov13858 = to_ov13858(sd); const struct ov13858_mode *mode; struct v4l2_mbus_framefmt *framefmt; s32 vblank_def; s32 vblank_min; s64 h_blank; s64 pixel_rate; s64 link_freq; mutex_lock(&ov13858->mutex); /* Only one raw bayer(GRBG) order is supported */ if (fmt->format.code != MEDIA_BUS_FMT_SGRBG10_1X10) fmt->format.code = MEDIA_BUS_FMT_SGRBG10_1X10; mode = v4l2_find_nearest_size(supported_modes, ARRAY_SIZE(supported_modes), width, height, fmt->format.width, fmt->format.height); ov13858_update_pad_format(mode, fmt); if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) { framefmt = v4l2_subdev_get_try_format(sd, sd_state, fmt->pad); *framefmt = fmt->format; } else { ov13858->cur_mode = mode; __v4l2_ctrl_s_ctrl(ov13858->link_freq, mode->link_freq_index); link_freq = link_freq_menu_items[mode->link_freq_index]; pixel_rate = link_freq_to_pixel_rate(link_freq); __v4l2_ctrl_s_ctrl_int64(ov13858->pixel_rate, pixel_rate); /* Update limits and set FPS to default */ vblank_def = ov13858->cur_mode->vts_def - ov13858->cur_mode->height; vblank_min = ov13858->cur_mode->vts_min - ov13858->cur_mode->height; __v4l2_ctrl_modify_range( ov13858->vblank, vblank_min, OV13858_VTS_MAX - ov13858->cur_mode->height, 1, vblank_def); __v4l2_ctrl_s_ctrl(ov13858->vblank, vblank_def); h_blank = link_freq_configs[mode->link_freq_index].pixels_per_line - ov13858->cur_mode->width; __v4l2_ctrl_modify_range(ov13858->hblank, h_blank, h_blank, 1, h_blank); } mutex_unlock(&ov13858->mutex); return 0; } static int ov13858_get_skip_frames(struct v4l2_subdev *sd, u32 *frames) { *frames = OV13858_NUM_OF_SKIP_FRAMES; return 0; } /* Start streaming */ static int ov13858_start_streaming(struct ov13858 *ov13858) { struct i2c_client *client = v4l2_get_subdevdata(&ov13858->sd); const struct ov13858_reg_list *reg_list; int ret, link_freq_index; /* Get out of from software reset */ ret = ov13858_write_reg(ov13858, OV13858_REG_SOFTWARE_RST, OV13858_REG_VALUE_08BIT, OV13858_SOFTWARE_RST); if (ret) { dev_err(&client->dev, "%s failed to set powerup registers\n", __func__); return ret; } /* Setup PLL */ link_freq_index = ov13858->cur_mode->link_freq_index; reg_list = &link_freq_configs[link_freq_index].reg_list; ret = ov13858_write_reg_list(ov13858, reg_list); if (ret) { dev_err(&client->dev, "%s failed to set plls\n", __func__); return ret; } /* Apply default values of current mode */ reg_list = &ov13858->cur_mode->reg_list; ret = ov13858_write_reg_list(ov13858, reg_list); if (ret) { dev_err(&client->dev, "%s failed to set mode\n", __func__); return ret; } /* Apply customized values from user */ ret = __v4l2_ctrl_handler_setup(ov13858->sd.ctrl_handler); if (ret) return ret; return ov13858_write_reg(ov13858, OV13858_REG_MODE_SELECT, OV13858_REG_VALUE_08BIT, OV13858_MODE_STREAMING); } /* Stop streaming */ static int ov13858_stop_streaming(struct ov13858 *ov13858) { return ov13858_write_reg(ov13858, OV13858_REG_MODE_SELECT, OV13858_REG_VALUE_08BIT, OV13858_MODE_STANDBY); } static int ov13858_set_stream(struct v4l2_subdev *sd, int enable) { struct ov13858 *ov13858 = to_ov13858(sd); struct i2c_client *client = v4l2_get_subdevdata(sd); int ret = 0; mutex_lock(&ov13858->mutex); if (ov13858->streaming == enable) { mutex_unlock(&ov13858->mutex); return 0; } if (enable) { ret = pm_runtime_resume_and_get(&client->dev); if (ret < 0) goto err_unlock; /* * Apply default & customized values * and then start streaming. */ ret = ov13858_start_streaming(ov13858); if (ret) goto err_rpm_put; } else { ov13858_stop_streaming(ov13858); pm_runtime_put(&client->dev); } ov13858->streaming = enable; mutex_unlock(&ov13858->mutex); return ret; err_rpm_put: pm_runtime_put(&client->dev); err_unlock: mutex_unlock(&ov13858->mutex); return ret; } static int __maybe_unused ov13858_suspend(struct device *dev) { struct v4l2_subdev *sd = dev_get_drvdata(dev); struct ov13858 *ov13858 = to_ov13858(sd); if (ov13858->streaming) ov13858_stop_streaming(ov13858); return 0; } static int __maybe_unused ov13858_resume(struct device *dev) { struct v4l2_subdev *sd = dev_get_drvdata(dev); struct ov13858 *ov13858 = to_ov13858(sd); int ret; if (ov13858->streaming) { ret = ov13858_start_streaming(ov13858); if (ret) goto error; } return 0; error: ov13858_stop_streaming(ov13858); ov13858->streaming = false; return ret; } /* Verify chip ID */ static int ov13858_identify_module(struct ov13858 *ov13858) { struct i2c_client *client = v4l2_get_subdevdata(&ov13858->sd); int ret; u32 val; ret = ov13858_read_reg(ov13858, OV13858_REG_CHIP_ID, OV13858_REG_VALUE_24BIT, &val); if (ret) return ret; if (val != OV13858_CHIP_ID) { dev_err(&client->dev, "chip id mismatch: %x!=%x\n", OV13858_CHIP_ID, val); return -EIO; } return 0; } static const struct v4l2_subdev_core_ops ov13858_core_ops = { .log_status = v4l2_ctrl_subdev_log_status, .subscribe_event = v4l2_ctrl_subdev_subscribe_event, .unsubscribe_event = v4l2_event_subdev_unsubscribe, }; static const struct v4l2_subdev_video_ops ov13858_video_ops = { .s_stream = ov13858_set_stream, }; static const struct v4l2_subdev_pad_ops ov13858_pad_ops = { .enum_mbus_code = ov13858_enum_mbus_code, .get_fmt = ov13858_get_pad_format, .set_fmt = ov13858_set_pad_format, .enum_frame_size = ov13858_enum_frame_size, }; static const struct v4l2_subdev_sensor_ops ov13858_sensor_ops = { .g_skip_frames = ov13858_get_skip_frames, }; static const struct v4l2_subdev_ops ov13858_subdev_ops = { .core = &ov13858_core_ops, .video = &ov13858_video_ops, .pad = &ov13858_pad_ops, .sensor = &ov13858_sensor_ops, }; static const struct media_entity_operations ov13858_subdev_entity_ops = { .link_validate = v4l2_subdev_link_validate, }; static const struct v4l2_subdev_internal_ops ov13858_internal_ops = { .open = ov13858_open, }; /* Initialize control handlers */ static int ov13858_init_controls(struct ov13858 *ov13858) { struct i2c_client *client = v4l2_get_subdevdata(&ov13858->sd); struct v4l2_fwnode_device_properties props; struct v4l2_ctrl_handler *ctrl_hdlr; s64 exposure_max; s64 vblank_def; s64 vblank_min; s64 hblank; s64 pixel_rate_min; s64 pixel_rate_max; const struct ov13858_mode *mode; int ret; ctrl_hdlr = &ov13858->ctrl_handler; ret = v4l2_ctrl_handler_init(ctrl_hdlr, 10); if (ret) return ret; mutex_init(&ov13858->mutex); ctrl_hdlr->lock = &ov13858->mutex; ov13858->link_freq = v4l2_ctrl_new_int_menu(ctrl_hdlr, &ov13858_ctrl_ops, V4L2_CID_LINK_FREQ, OV13858_NUM_OF_LINK_FREQS - 1, 0, link_freq_menu_items); if (ov13858->link_freq) ov13858->link_freq->flags |= V4L2_CTRL_FLAG_READ_ONLY; pixel_rate_max = link_freq_to_pixel_rate(link_freq_menu_items[0]); pixel_rate_min = link_freq_to_pixel_rate(link_freq_menu_items[1]); /* By default, PIXEL_RATE is read only */ ov13858->pixel_rate = v4l2_ctrl_new_std(ctrl_hdlr, &ov13858_ctrl_ops, V4L2_CID_PIXEL_RATE, pixel_rate_min, pixel_rate_max, 1, pixel_rate_max); mode = ov13858->cur_mode; vblank_def = mode->vts_def - mode->height; vblank_min = mode->vts_min - mode->height; ov13858->vblank = v4l2_ctrl_new_std( ctrl_hdlr, &ov13858_ctrl_ops, V4L2_CID_VBLANK, vblank_min, OV13858_VTS_MAX - mode->height, 1, vblank_def); hblank = link_freq_configs[mode->link_freq_index].pixels_per_line - mode->width; ov13858->hblank = v4l2_ctrl_new_std( ctrl_hdlr, &ov13858_ctrl_ops, V4L2_CID_HBLANK, hblank, hblank, 1, hblank); if (ov13858->hblank) ov13858->hblank->flags |= V4L2_CTRL_FLAG_READ_ONLY; exposure_max = mode->vts_def - 8; ov13858->exposure = v4l2_ctrl_new_std( ctrl_hdlr, &ov13858_ctrl_ops, V4L2_CID_EXPOSURE, OV13858_EXPOSURE_MIN, exposure_max, OV13858_EXPOSURE_STEP, OV13858_EXPOSURE_DEFAULT); v4l2_ctrl_new_std(ctrl_hdlr, &ov13858_ctrl_ops, V4L2_CID_ANALOGUE_GAIN, OV13858_ANA_GAIN_MIN, OV13858_ANA_GAIN_MAX, OV13858_ANA_GAIN_STEP, OV13858_ANA_GAIN_DEFAULT); /* Digital gain */ v4l2_ctrl_new_std(ctrl_hdlr, &ov13858_ctrl_ops, V4L2_CID_DIGITAL_GAIN, OV13858_DGTL_GAIN_MIN, OV13858_DGTL_GAIN_MAX, OV13858_DGTL_GAIN_STEP, OV13858_DGTL_GAIN_DEFAULT); v4l2_ctrl_new_std_menu_items(ctrl_hdlr, &ov13858_ctrl_ops, V4L2_CID_TEST_PATTERN, ARRAY_SIZE(ov13858_test_pattern_menu) - 1, 0, 0, ov13858_test_pattern_menu); if (ctrl_hdlr->error) { ret = ctrl_hdlr->error; dev_err(&client->dev, "%s control init failed (%d)\n", __func__, ret); goto error; } ret = v4l2_fwnode_device_parse(&client->dev, &props); if (ret) goto error; ret = v4l2_ctrl_new_fwnode_properties(ctrl_hdlr, &ov13858_ctrl_ops, &props); if (ret) goto error; ov13858->sd.ctrl_handler = ctrl_hdlr; return 0; error: v4l2_ctrl_handler_free(ctrl_hdlr); mutex_destroy(&ov13858->mutex); return ret; } static void ov13858_free_controls(struct ov13858 *ov13858) { v4l2_ctrl_handler_free(ov13858->sd.ctrl_handler); mutex_destroy(&ov13858->mutex); } static int ov13858_probe(struct i2c_client *client) { struct ov13858 *ov13858; int ret; u32 val = 0; device_property_read_u32(&client->dev, "clock-frequency", &val); if (val != 19200000) return -EINVAL; ov13858 = devm_kzalloc(&client->dev, sizeof(*ov13858), GFP_KERNEL); if (!ov13858) return -ENOMEM; /* Initialize subdev */ v4l2_i2c_subdev_init(&ov13858->sd, client, &ov13858_subdev_ops); /* Check module identity */ ret = ov13858_identify_module(ov13858); if (ret) { dev_err(&client->dev, "failed to find sensor: %d\n", ret); return ret; } /* Set default mode to max resolution */ ov13858->cur_mode = &supported_modes[0]; ret = ov13858_init_controls(ov13858); if (ret) return ret; /* Initialize subdev */ ov13858->sd.internal_ops = &ov13858_internal_ops; ov13858->sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE | V4L2_SUBDEV_FL_HAS_EVENTS; ov13858->sd.entity.ops = &ov13858_subdev_entity_ops; ov13858->sd.entity.function = MEDIA_ENT_F_CAM_SENSOR; /* Initialize source pad */ ov13858->pad.flags = MEDIA_PAD_FL_SOURCE; ret = media_entity_pads_init(&ov13858->sd.entity, 1, &ov13858->pad); if (ret) { dev_err(&client->dev, "%s failed:%d\n", __func__, ret); goto error_handler_free; } ret = v4l2_async_register_subdev_sensor(&ov13858->sd); if (ret < 0) goto error_media_entity; /* * Device is already turned on by i2c-core with ACPI domain PM. * Enable runtime PM and turn off the device. */ pm_runtime_set_active(&client->dev); pm_runtime_enable(&client->dev); pm_runtime_idle(&client->dev); return 0; error_media_entity: media_entity_cleanup(&ov13858->sd.entity); error_handler_free: ov13858_free_controls(ov13858); dev_err(&client->dev, "%s failed:%d\n", __func__, ret); return ret; } static void ov13858_remove(struct i2c_client *client) { struct v4l2_subdev *sd = i2c_get_clientdata(client); struct ov13858 *ov13858 = to_ov13858(sd); v4l2_async_unregister_subdev(sd); media_entity_cleanup(&sd->entity); ov13858_free_controls(ov13858); pm_runtime_disable(&client->dev); } static const struct i2c_device_id ov13858_id_table[] = { {"ov13858", 0}, {}, }; MODULE_DEVICE_TABLE(i2c, ov13858_id_table); static const struct dev_pm_ops ov13858_pm_ops = { SET_SYSTEM_SLEEP_PM_OPS(ov13858_suspend, ov13858_resume) }; #ifdef CONFIG_ACPI static const struct acpi_device_id ov13858_acpi_ids[] = { {"OVTID858"}, { /* sentinel */ } }; MODULE_DEVICE_TABLE(acpi, ov13858_acpi_ids); #endif static struct i2c_driver ov13858_i2c_driver = { .driver = { .name = "ov13858", .pm = &ov13858_pm_ops, .acpi_match_table = ACPI_PTR(ov13858_acpi_ids), }, .probe = ov13858_probe, .remove = ov13858_remove, .id_table = ov13858_id_table, }; module_i2c_driver(ov13858_i2c_driver); MODULE_AUTHOR("Kan, Chris <[email protected]>"); MODULE_AUTHOR("Rapolu, Chiranjeevi"); MODULE_AUTHOR("Yang, Hyungwoo"); MODULE_DESCRIPTION("Omnivision ov13858 sensor driver"); MODULE_LICENSE("GPL v2");
linux-master
drivers/media/i2c/ov13858.c
// SPDX-License-Identifier: GPL-2.0 /* * Driver for IMX296 CMOS Image Sensor from Sony * * Copyright 2019 Laurent Pinchart <[email protected]> */ #include <linux/clk.h> #include <linux/gpio/consumer.h> #include <linux/i2c.h> #include <linux/module.h> #include <linux/of.h> #include <linux/pm_runtime.h> #include <linux/regmap.h> #include <linux/regulator/consumer.h> #include <linux/slab.h> #include <linux/videodev2.h> #include <media/v4l2-ctrls.h> #include <media/v4l2-fwnode.h> #include <media/v4l2-subdev.h> #define IMX296_PIXEL_ARRAY_WIDTH 1456 #define IMX296_PIXEL_ARRAY_HEIGHT 1088 #define IMX296_REG_8BIT(n) ((1 << 16) | (n)) #define IMX296_REG_16BIT(n) ((2 << 16) | (n)) #define IMX296_REG_24BIT(n) ((3 << 16) | (n)) #define IMX296_REG_SIZE_SHIFT 16 #define IMX296_REG_ADDR_MASK 0xffff #define IMX296_CTRL00 IMX296_REG_8BIT(0x3000) #define IMX296_CTRL00_STANDBY BIT(0) #define IMX296_CTRL08 IMX296_REG_8BIT(0x3008) #define IMX296_CTRL08_REGHOLD BIT(0) #define IMX296_CTRL0A IMX296_REG_8BIT(0x300a) #define IMX296_CTRL0A_XMSTA BIT(0) #define IMX296_CTRL0B IMX296_REG_8BIT(0x300b) #define IMX296_CTRL0B_TRIGEN BIT(0) #define IMX296_CTRL0D IMX296_REG_8BIT(0x300d) #define IMX296_CTRL0D_WINMODE_ALL (0 << 0) #define IMX296_CTRL0D_WINMODE_FD_BINNING (2 << 0) #define IMX296_CTRL0D_HADD_ON_BINNING BIT(5) #define IMX296_CTRL0D_SAT_CNT BIT(6) #define IMX296_CTRL0E IMX296_REG_8BIT(0x300e) #define IMX296_CTRL0E_VREVERSE BIT(0) #define IMX296_CTRL0E_HREVERSE BIT(1) #define IMX296_VMAX IMX296_REG_24BIT(0x3010) #define IMX296_HMAX IMX296_REG_16BIT(0x3014) #define IMX296_TMDCTRL IMX296_REG_8BIT(0x301d) #define IMX296_TMDCTRL_LATCH BIT(0) #define IMX296_TMDOUT IMX296_REG_16BIT(0x301e) #define IMX296_TMDOUT_MASK 0x3ff #define IMX296_WDSEL IMX296_REG_8BIT(0x3021) #define IMX296_WDSEL_NORMAL (0 << 0) #define IMX296_WDSEL_MULTI_2 (1 << 0) #define IMX296_WDSEL_MULTI_4 (3 << 0) #define IMX296_BLKLEVELAUTO IMX296_REG_8BIT(0x3022) #define IMX296_BLKLEVELAUTO_ON 0x01 #define IMX296_BLKLEVELAUTO_OFF 0xf0 #define IMX296_SST IMX296_REG_8BIT(0x3024) #define IMX296_SST_EN BIT(0) #define IMX296_CTRLTOUT IMX296_REG_8BIT(0x3026) #define IMX296_CTRLTOUT_TOUT1SEL_LOW (0 << 0) #define IMX296_CTRLTOUT_TOUT1SEL_PULSE (3 << 0) #define IMX296_CTRLTOUT_TOUT2SEL_LOW (0 << 2) #define IMX296_CTRLTOUT_TOUT2SEL_PULSE (3 << 2) #define IMX296_CTRLTRIG IMX296_REG_8BIT(0x3029) #define IMX296_CTRLTRIG_TOUT1_SEL_LOW (0 << 0) #define IMX296_CTRLTRIG_TOUT1_SEL_PULSE1 (1 << 0) #define IMX296_CTRLTRIG_TOUT2_SEL_LOW (0 << 4) #define IMX296_CTRLTRIG_TOUT2_SEL_PULSE2 (2 << 4) #define IMX296_SYNCSEL IMX296_REG_8BIT(0x3036) #define IMX296_SYNCSEL_NORMAL 0xc0 #define IMX296_SYNCSEL_HIZ 0xf0 #define IMX296_PULSE1 IMX296_REG_8BIT(0x306d) #define IMX296_PULSE1_EN_NOR BIT(0) #define IMX296_PULSE1_EN_TRIG BIT(1) #define IMX296_PULSE1_POL_HIGH (0 << 2) #define IMX296_PULSE1_POL_LOW (1 << 2) #define IMX296_PULSE1_UP IMX296_REG_24BIT(0x3070) #define IMX296_PULSE1_DN IMX296_REG_24BIT(0x3074) #define IMX296_PULSE2 IMX296_REG_8BIT(0x3079) #define IMX296_PULSE2_EN_NOR BIT(0) #define IMX296_PULSE2_EN_TRIG BIT(1) #define IMX296_PULSE2_POL_HIGH (0 << 2) #define IMX296_PULSE2_POL_LOW (1 << 2) #define IMX296_PULSE2_UP IMX296_REG_24BIT(0x307c) #define IMX296_PULSE2_DN IMX296_REG_24BIT(0x3080) #define IMX296_INCKSEL(n) IMX296_REG_8BIT(0x3089 + (n)) #define IMX296_SHS1 IMX296_REG_24BIT(0x308d) #define IMX296_SHS2 IMX296_REG_24BIT(0x3090) #define IMX296_SHS3 IMX296_REG_24BIT(0x3094) #define IMX296_SHS4 IMX296_REG_24BIT(0x3098) #define IMX296_VBLANKLP IMX296_REG_8BIT(0x309c) #define IMX296_VBLANKLP_NORMAL 0x04 #define IMX296_VBLANKLP_LOW_POWER 0x2c #define IMX296_EXP_CNT IMX296_REG_8BIT(0x30a3) #define IMX296_EXP_CNT_RESET BIT(0) #define IMX296_EXP_MAX IMX296_REG_16BIT(0x30a6) #define IMX296_VINT IMX296_REG_8BIT(0x30aa) #define IMX296_VINT_EN BIT(0) #define IMX296_LOWLAGTRG IMX296_REG_8BIT(0x30ae) #define IMX296_LOWLAGTRG_FAST BIT(0) #define IMX296_I2CCTRL IMX296_REG_8BIT(0x30ef) #define IMX296_I2CCTRL_I2CACKEN BIT(0) #define IMX296_SENSOR_INFO IMX296_REG_16BIT(0x3148) #define IMX296_SENSOR_INFO_MONO BIT(15) #define IMX296_SENSOR_INFO_IMX296LQ 0x4a00 #define IMX296_SENSOR_INFO_IMX296LL 0xca00 #define IMX296_S_SHSA IMX296_REG_16BIT(0x31ca) #define IMX296_S_SHSB IMX296_REG_16BIT(0x31d2) /* * Registers 0x31c8 to 0x31cd, 0x31d0 to 0x31d5, 0x31e2, 0x31e3, 0x31ea and * 0x31eb are related to exposure mode but otherwise not documented. */ #define IMX296_GAINCTRL IMX296_REG_8BIT(0x3200) #define IMX296_GAINCTRL_WD_GAIN_MODE_NORMAL 0x01 #define IMX296_GAINCTRL_WD_GAIN_MODE_MULTI 0x41 #define IMX296_GAIN IMX296_REG_16BIT(0x3204) #define IMX296_GAIN_MIN 0 #define IMX296_GAIN_MAX 480 #define IMX296_GAIN1 IMX296_REG_16BIT(0x3208) #define IMX296_GAIN2 IMX296_REG_16BIT(0x320c) #define IMX296_GAIN3 IMX296_REG_16BIT(0x3210) #define IMX296_GAINDLY IMX296_REG_8BIT(0x3212) #define IMX296_GAINDLY_NONE 0x08 #define IMX296_GAINDLY_1FRAME 0x09 #define IMX296_PGCTRL IMX296_REG_8BIT(0x3238) #define IMX296_PGCTRL_REGEN BIT(0) #define IMX296_PGCTRL_THRU BIT(1) #define IMX296_PGCTRL_CLKEN BIT(2) #define IMX296_PGCTRL_MODE(n) ((n) << 3) #define IMX296_PGHPOS IMX296_REG_16BIT(0x3239) #define IMX296_PGVPOS IMX296_REG_16BIT(0x323c) #define IMX296_PGHPSTEP IMX296_REG_8BIT(0x323e) #define IMX296_PGVPSTEP IMX296_REG_8BIT(0x323f) #define IMX296_PGHPNUM IMX296_REG_8BIT(0x3240) #define IMX296_PGVPNUM IMX296_REG_8BIT(0x3241) #define IMX296_PGDATA1 IMX296_REG_16BIT(0x3244) #define IMX296_PGDATA2 IMX296_REG_16BIT(0x3246) #define IMX296_PGHGSTEP IMX296_REG_8BIT(0x3249) #define IMX296_BLKLEVEL IMX296_REG_16BIT(0x3254) #define IMX296_FID0_ROI IMX296_REG_8BIT(0x3300) #define IMX296_FID0_ROIH1ON BIT(0) #define IMX296_FID0_ROIV1ON BIT(1) #define IMX296_FID0_ROIPH1 IMX296_REG_16BIT(0x3310) #define IMX296_FID0_ROIPV1 IMX296_REG_16BIT(0x3312) #define IMX296_FID0_ROIWH1 IMX296_REG_16BIT(0x3314) #define IMX296_FID0_ROIWH1_MIN 80 #define IMX296_FID0_ROIWV1 IMX296_REG_16BIT(0x3316) #define IMX296_FID0_ROIWV1_MIN 4 #define IMX296_CM_HSST_STARTTMG IMX296_REG_16BIT(0x4018) #define IMX296_CM_HSST_ENDTMG IMX296_REG_16BIT(0x401a) #define IMX296_DA_HSST_STARTTMG IMX296_REG_16BIT(0x404d) #define IMX296_DA_HSST_ENDTMG IMX296_REG_16BIT(0x4050) #define IMX296_LM_HSST_STARTTMG IMX296_REG_16BIT(0x4094) #define IMX296_LM_HSST_ENDTMG IMX296_REG_16BIT(0x4096) #define IMX296_SST_SIEASTA1_SET IMX296_REG_8BIT(0x40c9) #define IMX296_SST_SIEASTA1PRE_1U IMX296_REG_16BIT(0x40cc) #define IMX296_SST_SIEASTA1PRE_1D IMX296_REG_16BIT(0x40ce) #define IMX296_SST_SIEASTA1PRE_2U IMX296_REG_16BIT(0x40d0) #define IMX296_SST_SIEASTA1PRE_2D IMX296_REG_16BIT(0x40d2) #define IMX296_HSST IMX296_REG_8BIT(0x40dc) #define IMX296_HSST_EN BIT(2) #define IMX296_CKREQSEL IMX296_REG_8BIT(0x4101) #define IMX296_CKREQSEL_HS BIT(2) #define IMX296_GTTABLENUM IMX296_REG_8BIT(0x4114) #define IMX296_CTRL418C IMX296_REG_8BIT(0x418c) struct imx296_clk_params { unsigned int freq; u8 incksel[4]; u8 ctrl418c; }; static const struct imx296_clk_params imx296_clk_params[] = { { 37125000, { 0x80, 0x0b, 0x80, 0x08 }, 116 }, { 54000000, { 0xb0, 0x0f, 0xb0, 0x0c }, 168 }, { 74250000, { 0x80, 0x0f, 0x80, 0x0c }, 232 }, }; static const char * const imx296_supply_names[] = { "dvdd", "ovdd", "avdd", }; struct imx296 { struct device *dev; struct clk *clk; struct regulator_bulk_data supplies[ARRAY_SIZE(imx296_supply_names)]; struct gpio_desc *reset; struct regmap *regmap; const struct imx296_clk_params *clk_params; bool mono; bool streaming; struct v4l2_subdev subdev; struct media_pad pad; struct v4l2_ctrl_handler ctrls; struct v4l2_ctrl *hblank; struct v4l2_ctrl *vblank; }; static inline struct imx296 *to_imx296(struct v4l2_subdev *sd) { return container_of(sd, struct imx296, subdev); } static int imx296_read(struct imx296 *sensor, u32 addr) { u8 data[3] = { 0, 0, 0 }; int ret; ret = regmap_raw_read(sensor->regmap, addr & IMX296_REG_ADDR_MASK, data, (addr >> IMX296_REG_SIZE_SHIFT) & 3); if (ret < 0) return ret; return (data[2] << 16) | (data[1] << 8) | data[0]; } static int imx296_write(struct imx296 *sensor, u32 addr, u32 value, int *err) { u8 data[3] = { value & 0xff, (value >> 8) & 0xff, value >> 16 }; int ret; if (err && *err) return *err; ret = regmap_raw_write(sensor->regmap, addr & IMX296_REG_ADDR_MASK, data, (addr >> IMX296_REG_SIZE_SHIFT) & 3); if (ret < 0) { dev_err(sensor->dev, "%u-bit write to 0x%04x failed: %d\n", ((addr >> IMX296_REG_SIZE_SHIFT) & 3) * 8, addr & IMX296_REG_ADDR_MASK, ret); if (err) *err = ret; } return ret; } static int imx296_power_on(struct imx296 *sensor) { int ret; ret = regulator_bulk_enable(ARRAY_SIZE(sensor->supplies), sensor->supplies); if (ret < 0) return ret; udelay(1); ret = gpiod_direction_output(sensor->reset, 0); if (ret < 0) goto err_supply; udelay(1); ret = clk_prepare_enable(sensor->clk); if (ret < 0) goto err_reset; /* * The documentation doesn't explicitly say how much time is required * after providing a clock and before starting I2C communication. It * mentions a delay of 20µs in 4-wire mode, but tests showed that a * delay of 100µs resulted in I2C communication failures, while 500µs * seems to be enough. Be conservative. */ usleep_range(1000, 2000); return 0; err_reset: gpiod_direction_output(sensor->reset, 1); err_supply: regulator_bulk_disable(ARRAY_SIZE(sensor->supplies), sensor->supplies); return ret; } static void imx296_power_off(struct imx296 *sensor) { clk_disable_unprepare(sensor->clk); gpiod_direction_output(sensor->reset, 1); regulator_bulk_disable(ARRAY_SIZE(sensor->supplies), sensor->supplies); } /* ----------------------------------------------------------------------------- * Controls */ static const char * const imx296_test_pattern_menu[] = { "Disabled", "Multiple Pixels", "Sequence 1", "Sequence 2", "Gradient", "Row", "Column", "Cross", "Stripe", "Checks", }; static int imx296_s_ctrl(struct v4l2_ctrl *ctrl) { struct imx296 *sensor = container_of(ctrl->handler, struct imx296, ctrls); const struct v4l2_mbus_framefmt *format; struct v4l2_subdev_state *state; unsigned int vmax; int ret = 0; if (!sensor->streaming) return 0; state = v4l2_subdev_get_locked_active_state(&sensor->subdev); format = v4l2_subdev_get_pad_format(&sensor->subdev, state, 0); switch (ctrl->id) { case V4L2_CID_EXPOSURE: /* Clamp the exposure value to VMAX. */ vmax = format->height + sensor->vblank->cur.val; ctrl->val = min_t(int, ctrl->val, vmax); imx296_write(sensor, IMX296_SHS1, vmax - ctrl->val, &ret); break; case V4L2_CID_ANALOGUE_GAIN: imx296_write(sensor, IMX296_GAIN, ctrl->val, &ret); break; case V4L2_CID_VBLANK: imx296_write(sensor, IMX296_VMAX, format->height + ctrl->val, &ret); break; case V4L2_CID_TEST_PATTERN: if (ctrl->val) { imx296_write(sensor, IMX296_PGHPOS, 8, &ret); imx296_write(sensor, IMX296_PGVPOS, 8, &ret); imx296_write(sensor, IMX296_PGHPSTEP, 8, &ret); imx296_write(sensor, IMX296_PGVPSTEP, 8, &ret); imx296_write(sensor, IMX296_PGHPNUM, 100, &ret); imx296_write(sensor, IMX296_PGVPNUM, 100, &ret); imx296_write(sensor, IMX296_PGDATA1, 0x300, &ret); imx296_write(sensor, IMX296_PGDATA2, 0x100, &ret); imx296_write(sensor, IMX296_PGHGSTEP, 0, &ret); imx296_write(sensor, IMX296_BLKLEVEL, 0, &ret); imx296_write(sensor, IMX296_BLKLEVELAUTO, IMX296_BLKLEVELAUTO_OFF, &ret); imx296_write(sensor, IMX296_PGCTRL, IMX296_PGCTRL_REGEN | IMX296_PGCTRL_CLKEN | IMX296_PGCTRL_MODE(ctrl->val - 1), &ret); } else { imx296_write(sensor, IMX296_PGCTRL, IMX296_PGCTRL_CLKEN, &ret); imx296_write(sensor, IMX296_BLKLEVEL, 0x3c, &ret); imx296_write(sensor, IMX296_BLKLEVELAUTO, IMX296_BLKLEVELAUTO_ON, &ret); } break; default: ret = -EINVAL; break; } return ret; } static const struct v4l2_ctrl_ops imx296_ctrl_ops = { .s_ctrl = imx296_s_ctrl, }; static int imx296_ctrls_init(struct imx296 *sensor) { struct v4l2_fwnode_device_properties props; unsigned int hblank; int ret; ret = v4l2_fwnode_device_parse(sensor->dev, &props); if (ret < 0) return ret; v4l2_ctrl_handler_init(&sensor->ctrls, 9); v4l2_ctrl_new_std(&sensor->ctrls, &imx296_ctrl_ops, V4L2_CID_EXPOSURE, 1, 1048575, 1, 1104); v4l2_ctrl_new_std(&sensor->ctrls, &imx296_ctrl_ops, V4L2_CID_ANALOGUE_GAIN, IMX296_GAIN_MIN, IMX296_GAIN_MAX, 1, IMX296_GAIN_MIN); /* * Horizontal blanking is controlled through the HMAX register, which * contains a line length in INCK clock units. The INCK frequency is * fixed to 74.25 MHz. The HMAX value is currently fixed to 1100, * convert it to a number of pixels based on the nominal pixel rate. */ hblank = 1100 * 1188000000ULL / 10 / 74250000 - IMX296_PIXEL_ARRAY_WIDTH; sensor->hblank = v4l2_ctrl_new_std(&sensor->ctrls, &imx296_ctrl_ops, V4L2_CID_HBLANK, hblank, hblank, 1, hblank); if (sensor->hblank) sensor->hblank->flags |= V4L2_CTRL_FLAG_READ_ONLY; sensor->vblank = v4l2_ctrl_new_std(&sensor->ctrls, &imx296_ctrl_ops, V4L2_CID_VBLANK, 30, 1048575 - IMX296_PIXEL_ARRAY_HEIGHT, 1, 30); /* * The sensor calculates the MIPI timings internally to achieve a bit * rate between 1122 and 1198 Mbps. The exact value is unfortunately not * reported, at least according to the documentation. Report a nominal * rate of 1188 Mbps as that is used by the datasheet in multiple * examples. */ v4l2_ctrl_new_std(&sensor->ctrls, NULL, V4L2_CID_PIXEL_RATE, 1122000000 / 10, 1198000000 / 10, 1, 1188000000 / 10); v4l2_ctrl_new_std_menu_items(&sensor->ctrls, &imx296_ctrl_ops, V4L2_CID_TEST_PATTERN, ARRAY_SIZE(imx296_test_pattern_menu) - 1, 0, 0, imx296_test_pattern_menu); v4l2_ctrl_new_fwnode_properties(&sensor->ctrls, &imx296_ctrl_ops, &props); if (sensor->ctrls.error) { dev_err(sensor->dev, "failed to add controls (%d)\n", sensor->ctrls.error); v4l2_ctrl_handler_free(&sensor->ctrls); return sensor->ctrls.error; } sensor->subdev.ctrl_handler = &sensor->ctrls; return 0; } /* ----------------------------------------------------------------------------- * V4L2 Subdev Operations */ /* * This table is extracted from vendor data that is entirely undocumented. The * first register write is required to activate the CSI-2 output. The other * entries may or may not be optional? */ static const struct { unsigned int reg; unsigned int value; } imx296_init_table[] = { { IMX296_REG_8BIT(0x3005), 0xf0 }, { IMX296_REG_8BIT(0x309e), 0x04 }, { IMX296_REG_8BIT(0x30a0), 0x04 }, { IMX296_REG_8BIT(0x30a1), 0x3c }, { IMX296_REG_8BIT(0x30a4), 0x5f }, { IMX296_REG_8BIT(0x30a8), 0x91 }, { IMX296_REG_8BIT(0x30ac), 0x28 }, { IMX296_REG_8BIT(0x30af), 0x09 }, { IMX296_REG_8BIT(0x30df), 0x00 }, { IMX296_REG_8BIT(0x3165), 0x00 }, { IMX296_REG_8BIT(0x3169), 0x10 }, { IMX296_REG_8BIT(0x316a), 0x02 }, { IMX296_REG_8BIT(0x31c8), 0xf3 }, /* Exposure-related */ { IMX296_REG_8BIT(0x31d0), 0xf4 }, /* Exposure-related */ { IMX296_REG_8BIT(0x321a), 0x00 }, { IMX296_REG_8BIT(0x3226), 0x02 }, { IMX296_REG_8BIT(0x3256), 0x01 }, { IMX296_REG_8BIT(0x3541), 0x72 }, { IMX296_REG_8BIT(0x3516), 0x77 }, { IMX296_REG_8BIT(0x350b), 0x7f }, { IMX296_REG_8BIT(0x3758), 0xa3 }, { IMX296_REG_8BIT(0x3759), 0x00 }, { IMX296_REG_8BIT(0x375a), 0x85 }, { IMX296_REG_8BIT(0x375b), 0x00 }, { IMX296_REG_8BIT(0x3832), 0xf5 }, { IMX296_REG_8BIT(0x3833), 0x00 }, { IMX296_REG_8BIT(0x38a2), 0xf6 }, { IMX296_REG_8BIT(0x38a3), 0x00 }, { IMX296_REG_8BIT(0x3a00), 0x80 }, { IMX296_REG_8BIT(0x3d48), 0xa3 }, { IMX296_REG_8BIT(0x3d49), 0x00 }, { IMX296_REG_8BIT(0x3d4a), 0x85 }, { IMX296_REG_8BIT(0x3d4b), 0x00 }, { IMX296_REG_8BIT(0x400e), 0x58 }, { IMX296_REG_8BIT(0x4014), 0x1c }, { IMX296_REG_8BIT(0x4041), 0x2a }, { IMX296_REG_8BIT(0x40a2), 0x06 }, { IMX296_REG_8BIT(0x40c1), 0xf6 }, { IMX296_REG_8BIT(0x40c7), 0x0f }, { IMX296_REG_8BIT(0x40c8), 0x00 }, { IMX296_REG_8BIT(0x4174), 0x00 }, }; static int imx296_setup(struct imx296 *sensor, struct v4l2_subdev_state *state) { const struct v4l2_mbus_framefmt *format; const struct v4l2_rect *crop; unsigned int i; int ret = 0; format = v4l2_subdev_get_pad_format(&sensor->subdev, state, 0); crop = v4l2_subdev_get_pad_crop(&sensor->subdev, state, 0); for (i = 0; i < ARRAY_SIZE(imx296_init_table); ++i) imx296_write(sensor, imx296_init_table[i].reg, imx296_init_table[i].value, &ret); if (crop->width != IMX296_PIXEL_ARRAY_WIDTH || crop->height != IMX296_PIXEL_ARRAY_HEIGHT) { imx296_write(sensor, IMX296_FID0_ROI, IMX296_FID0_ROIH1ON | IMX296_FID0_ROIV1ON, &ret); imx296_write(sensor, IMX296_FID0_ROIPH1, crop->left, &ret); imx296_write(sensor, IMX296_FID0_ROIPV1, crop->top, &ret); imx296_write(sensor, IMX296_FID0_ROIWH1, crop->width, &ret); imx296_write(sensor, IMX296_FID0_ROIWV1, crop->height, &ret); } else { imx296_write(sensor, IMX296_FID0_ROI, 0, &ret); } imx296_write(sensor, IMX296_CTRL0D, (crop->width != format->width ? IMX296_CTRL0D_HADD_ON_BINNING : 0) | (crop->height != format->height ? IMX296_CTRL0D_WINMODE_FD_BINNING : 0), &ret); /* * HMAX and VMAX configure horizontal and vertical blanking by * specifying the total line time and frame time respectively. The line * time is specified in operational clock units (which appears to be the * output of an internal PLL, fixed at 74.25 MHz regardless of the * exernal clock frequency), while the frame time is specified as a * number of lines. * * In the vertical direction the sensor outputs the following: * * - one line for the FS packet * - two lines of embedded data (DT 0x12) * - six null lines (DT 0x10) * - four lines of vertical effective optical black (DT 0x37) * - 8 to 1088 lines of active image data (RAW10, DT 0x2b) * - one line for the FE packet * - 16 or more lines of vertical blanking */ imx296_write(sensor, IMX296_HMAX, 1100, &ret); imx296_write(sensor, IMX296_VMAX, format->height + sensor->vblank->cur.val, &ret); for (i = 0; i < ARRAY_SIZE(sensor->clk_params->incksel); ++i) imx296_write(sensor, IMX296_INCKSEL(i), sensor->clk_params->incksel[i], &ret); imx296_write(sensor, IMX296_GTTABLENUM, 0xc5, &ret); imx296_write(sensor, IMX296_CTRL418C, sensor->clk_params->ctrl418c, &ret); imx296_write(sensor, IMX296_GAINDLY, IMX296_GAINDLY_NONE, &ret); imx296_write(sensor, IMX296_BLKLEVEL, 0x03c, &ret); return ret; } static int imx296_stream_on(struct imx296 *sensor) { int ret = 0; imx296_write(sensor, IMX296_CTRL00, 0, &ret); usleep_range(2000, 5000); imx296_write(sensor, IMX296_CTRL0A, 0, &ret); return ret; } static int imx296_stream_off(struct imx296 *sensor) { int ret = 0; imx296_write(sensor, IMX296_CTRL0A, IMX296_CTRL0A_XMSTA, &ret); imx296_write(sensor, IMX296_CTRL00, IMX296_CTRL00_STANDBY, &ret); return ret; } static int imx296_s_stream(struct v4l2_subdev *sd, int enable) { struct imx296 *sensor = to_imx296(sd); struct v4l2_subdev_state *state; int ret; state = v4l2_subdev_lock_and_get_active_state(sd); if (!enable) { ret = imx296_stream_off(sensor); pm_runtime_mark_last_busy(sensor->dev); pm_runtime_put_autosuspend(sensor->dev); sensor->streaming = false; goto unlock; } ret = pm_runtime_resume_and_get(sensor->dev); if (ret < 0) goto unlock; ret = imx296_setup(sensor, state); if (ret < 0) goto err_pm; /* * Set streaming to true to ensure __v4l2_ctrl_handler_setup() will set * the controls. The flag is reset to false further down if an error * occurs. */ sensor->streaming = true; ret = __v4l2_ctrl_handler_setup(&sensor->ctrls); if (ret < 0) goto err_pm; ret = imx296_stream_on(sensor); if (ret) goto err_pm; unlock: v4l2_subdev_unlock_state(state); return ret; err_pm: /* * In case of error, turn the power off synchronously as the device * likely has no other chance to recover. */ pm_runtime_put_sync(sensor->dev); sensor->streaming = false; goto unlock; } static int imx296_enum_mbus_code(struct v4l2_subdev *sd, struct v4l2_subdev_state *state, struct v4l2_subdev_mbus_code_enum *code) { struct imx296 *sensor = to_imx296(sd); if (code->index != 0) return -EINVAL; code->code = sensor->mono ? MEDIA_BUS_FMT_Y10_1X10 : MEDIA_BUS_FMT_SBGGR10_1X10; return 0; } static int imx296_enum_frame_size(struct v4l2_subdev *sd, struct v4l2_subdev_state *state, struct v4l2_subdev_frame_size_enum *fse) { const struct v4l2_mbus_framefmt *format; format = v4l2_subdev_get_pad_format(sd, state, fse->pad); if (fse->index >= 2 || fse->code != format->code) return -EINVAL; fse->min_width = IMX296_PIXEL_ARRAY_WIDTH / (fse->index + 1); fse->max_width = fse->min_width; fse->min_height = IMX296_PIXEL_ARRAY_HEIGHT / (fse->index + 1); fse->max_height = fse->min_height; return 0; } static int imx296_set_format(struct v4l2_subdev *sd, struct v4l2_subdev_state *state, struct v4l2_subdev_format *fmt) { struct imx296 *sensor = to_imx296(sd); struct v4l2_mbus_framefmt *format; struct v4l2_rect *crop; crop = v4l2_subdev_get_pad_crop(sd, state, fmt->pad); format = v4l2_subdev_get_pad_format(sd, state, fmt->pad); /* * Binning is only allowed when cropping is disabled according to the * documentation. This should be double-checked. */ if (crop->width == IMX296_PIXEL_ARRAY_WIDTH && crop->height == IMX296_PIXEL_ARRAY_HEIGHT) { unsigned int width; unsigned int height; unsigned int hratio; unsigned int vratio; /* Clamp the width and height to avoid dividing by zero. */ width = clamp_t(unsigned int, fmt->format.width, crop->width / 2, crop->width); height = clamp_t(unsigned int, fmt->format.height, crop->height / 2, crop->height); hratio = DIV_ROUND_CLOSEST(crop->width, width); vratio = DIV_ROUND_CLOSEST(crop->height, height); format->width = crop->width / hratio; format->height = crop->height / vratio; } else { format->width = crop->width; format->height = crop->height; } format->code = sensor->mono ? MEDIA_BUS_FMT_Y10_1X10 : MEDIA_BUS_FMT_SBGGR10_1X10; format->field = V4L2_FIELD_NONE; format->colorspace = V4L2_COLORSPACE_RAW; format->ycbcr_enc = V4L2_YCBCR_ENC_DEFAULT; format->quantization = V4L2_QUANTIZATION_FULL_RANGE; format->xfer_func = V4L2_XFER_FUNC_NONE; fmt->format = *format; return 0; } static int imx296_get_selection(struct v4l2_subdev *sd, struct v4l2_subdev_state *state, struct v4l2_subdev_selection *sel) { switch (sel->target) { case V4L2_SEL_TGT_CROP: sel->r = *v4l2_subdev_get_pad_crop(sd, state, sel->pad); break; case V4L2_SEL_TGT_CROP_DEFAULT: case V4L2_SEL_TGT_CROP_BOUNDS: case V4L2_SEL_TGT_NATIVE_SIZE: sel->r.left = 0; sel->r.top = 0; sel->r.width = IMX296_PIXEL_ARRAY_WIDTH; sel->r.height = IMX296_PIXEL_ARRAY_HEIGHT; break; default: return -EINVAL; } return 0; } static int imx296_set_selection(struct v4l2_subdev *sd, struct v4l2_subdev_state *state, struct v4l2_subdev_selection *sel) { struct v4l2_mbus_framefmt *format; struct v4l2_rect *crop; struct v4l2_rect rect; if (sel->target != V4L2_SEL_TGT_CROP) return -EINVAL; /* * Clamp the crop rectangle boundaries and align them to a multiple of 4 * pixels to satisfy hardware requirements. */ rect.left = clamp(ALIGN(sel->r.left, 4), 0, IMX296_PIXEL_ARRAY_WIDTH - IMX296_FID0_ROIWH1_MIN); rect.top = clamp(ALIGN(sel->r.top, 4), 0, IMX296_PIXEL_ARRAY_HEIGHT - IMX296_FID0_ROIWV1_MIN); rect.width = clamp_t(unsigned int, ALIGN(sel->r.width, 4), IMX296_FID0_ROIWH1_MIN, IMX296_PIXEL_ARRAY_WIDTH); rect.height = clamp_t(unsigned int, ALIGN(sel->r.height, 4), IMX296_FID0_ROIWV1_MIN, IMX296_PIXEL_ARRAY_HEIGHT); rect.width = min_t(unsigned int, rect.width, IMX296_PIXEL_ARRAY_WIDTH - rect.left); rect.height = min_t(unsigned int, rect.height, IMX296_PIXEL_ARRAY_HEIGHT - rect.top); crop = v4l2_subdev_get_pad_crop(sd, state, sel->pad); if (rect.width != crop->width || rect.height != crop->height) { /* * Reset the output image size if the crop rectangle size has * been modified. */ format = v4l2_subdev_get_pad_format(sd, state, sel->pad); format->width = rect.width; format->height = rect.height; } *crop = rect; sel->r = rect; return 0; } static int imx296_init_cfg(struct v4l2_subdev *sd, struct v4l2_subdev_state *state) { struct v4l2_subdev_selection sel = { .target = V4L2_SEL_TGT_CROP, .r.width = IMX296_PIXEL_ARRAY_WIDTH, .r.height = IMX296_PIXEL_ARRAY_HEIGHT, }; struct v4l2_subdev_format format = { .format = { .width = IMX296_PIXEL_ARRAY_WIDTH, .height = IMX296_PIXEL_ARRAY_HEIGHT, }, }; imx296_set_selection(sd, state, &sel); imx296_set_format(sd, state, &format); return 0; } static const struct v4l2_subdev_video_ops imx296_subdev_video_ops = { .s_stream = imx296_s_stream, }; static const struct v4l2_subdev_pad_ops imx296_subdev_pad_ops = { .enum_mbus_code = imx296_enum_mbus_code, .enum_frame_size = imx296_enum_frame_size, .get_fmt = v4l2_subdev_get_fmt, .set_fmt = imx296_set_format, .get_selection = imx296_get_selection, .set_selection = imx296_set_selection, .init_cfg = imx296_init_cfg, }; static const struct v4l2_subdev_ops imx296_subdev_ops = { .video = &imx296_subdev_video_ops, .pad = &imx296_subdev_pad_ops, }; static int imx296_subdev_init(struct imx296 *sensor) { struct i2c_client *client = to_i2c_client(sensor->dev); int ret; v4l2_i2c_subdev_init(&sensor->subdev, client, &imx296_subdev_ops); ret = imx296_ctrls_init(sensor); if (ret < 0) return ret; sensor->subdev.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE; sensor->pad.flags = MEDIA_PAD_FL_SOURCE; sensor->subdev.entity.function = MEDIA_ENT_F_CAM_SENSOR; ret = media_entity_pads_init(&sensor->subdev.entity, 1, &sensor->pad); if (ret < 0) { v4l2_ctrl_handler_free(&sensor->ctrls); return ret; } sensor->subdev.state_lock = sensor->subdev.ctrl_handler->lock; v4l2_subdev_init_finalize(&sensor->subdev); return ret; } static void imx296_subdev_cleanup(struct imx296 *sensor) { media_entity_cleanup(&sensor->subdev.entity); v4l2_ctrl_handler_free(&sensor->ctrls); } /* ----------------------------------------------------------------------------- * Power management */ static int __maybe_unused imx296_runtime_resume(struct device *dev) { struct i2c_client *client = to_i2c_client(dev); struct v4l2_subdev *subdev = i2c_get_clientdata(client); struct imx296 *sensor = to_imx296(subdev); return imx296_power_on(sensor); } static int __maybe_unused imx296_runtime_suspend(struct device *dev) { struct i2c_client *client = to_i2c_client(dev); struct v4l2_subdev *subdev = i2c_get_clientdata(client); struct imx296 *sensor = to_imx296(subdev); imx296_power_off(sensor); return 0; } static const struct dev_pm_ops imx296_pm_ops = { SET_RUNTIME_PM_OPS(imx296_runtime_suspend, imx296_runtime_resume, NULL) }; /* ----------------------------------------------------------------------------- * Probe & Remove */ static int imx296_read_temperature(struct imx296 *sensor, int *temp) { int tmdout; int ret; ret = imx296_write(sensor, IMX296_TMDCTRL, IMX296_TMDCTRL_LATCH, NULL); if (ret < 0) return ret; tmdout = imx296_read(sensor, IMX296_TMDOUT); if (tmdout < 0) return tmdout; tmdout &= IMX296_TMDOUT_MASK; /* T(°C) = 246.312 - 0.304 * TMDOUT */; *temp = 246312 - 304 * tmdout; return imx296_write(sensor, IMX296_TMDCTRL, 0, NULL); } static int imx296_identify_model(struct imx296 *sensor) { unsigned int model; int temp = 0; int ret; model = (uintptr_t)of_device_get_match_data(sensor->dev); if (model) { dev_dbg(sensor->dev, "sensor model auto-detection disabled, forcing 0x%04x\n", model); sensor->mono = model & IMX296_SENSOR_INFO_MONO; return 0; } /* * While most registers can be read when the sensor is in standby, this * is not the case of the sensor info register :-( */ ret = imx296_write(sensor, IMX296_CTRL00, 0, NULL); if (ret < 0) { dev_err(sensor->dev, "failed to get sensor out of standby (%d)\n", ret); return ret; } ret = imx296_read(sensor, IMX296_SENSOR_INFO); if (ret < 0) { dev_err(sensor->dev, "failed to read sensor information (%d)\n", ret); goto done; } model = (ret >> 6) & 0x1ff; switch (model) { case 296: sensor->mono = ret & IMX296_SENSOR_INFO_MONO; break; /* * The IMX297 seems to share features with the IMX296, it may be * possible to support it in the same driver. */ case 297: default: dev_err(sensor->dev, "invalid device model 0x%04x\n", ret); ret = -ENODEV; goto done; } ret = imx296_read_temperature(sensor, &temp); if (ret < 0) goto done; dev_info(sensor->dev, "found IMX%u%s (%u.%uC)\n", model, sensor->mono ? "LL" : "LQ", temp / 1000, (temp / 100) % 10); done: imx296_write(sensor, IMX296_CTRL00, IMX296_CTRL00_STANDBY, NULL); return ret; } static const struct regmap_config imx296_regmap_config = { .reg_bits = 16, .val_bits = 8, .wr_table = &(const struct regmap_access_table) { .no_ranges = (const struct regmap_range[]) { { .range_min = IMX296_SENSOR_INFO & 0xffff, .range_max = (IMX296_SENSOR_INFO & 0xffff) + 1, }, }, .n_no_ranges = 1, }, }; static int imx296_probe(struct i2c_client *client) { struct i2c_adapter *adapter = to_i2c_adapter(client->dev.parent); unsigned long clk_rate; struct imx296 *sensor; unsigned int i; int ret; if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA)) { dev_warn(&adapter->dev, "I2C-Adapter doesn't support I2C_FUNC_SMBUS_BYTE\n"); return -EIO; } sensor = devm_kzalloc(&client->dev, sizeof(*sensor), GFP_KERNEL); if (!sensor) return -ENOMEM; sensor->dev = &client->dev; /* Acquire resources. */ for (i = 0; i < ARRAY_SIZE(sensor->supplies); ++i) sensor->supplies[i].supply = imx296_supply_names[i]; ret = devm_regulator_bulk_get(sensor->dev, ARRAY_SIZE(sensor->supplies), sensor->supplies); if (ret) { dev_err_probe(sensor->dev, ret, "failed to get supplies\n"); return ret; } sensor->reset = devm_gpiod_get_optional(sensor->dev, "reset", GPIOD_OUT_HIGH); if (IS_ERR(sensor->reset)) return dev_err_probe(sensor->dev, PTR_ERR(sensor->reset), "failed to get reset GPIO\n"); sensor->clk = devm_clk_get(sensor->dev, "inck"); if (IS_ERR(sensor->clk)) return dev_err_probe(sensor->dev, PTR_ERR(sensor->clk), "failed to get clock\n"); clk_rate = clk_get_rate(sensor->clk); for (i = 0; i < ARRAY_SIZE(imx296_clk_params); ++i) { if (clk_rate == imx296_clk_params[i].freq) { sensor->clk_params = &imx296_clk_params[i]; break; } } if (!sensor->clk_params) { dev_err(sensor->dev, "unsupported clock rate %lu\n", clk_rate); return -EINVAL; } sensor->regmap = devm_regmap_init_i2c(client, &imx296_regmap_config); if (IS_ERR(sensor->regmap)) return PTR_ERR(sensor->regmap); /* * Enable power management. The driver supports runtime PM, but needs to * work when runtime PM is disabled in the kernel. To that end, power * the sensor on manually here, identify it, and fully initialize it. */ ret = imx296_power_on(sensor); if (ret < 0) return ret; ret = imx296_identify_model(sensor); if (ret < 0) goto err_power; /* Initialize the V4L2 subdev. */ ret = imx296_subdev_init(sensor); if (ret < 0) goto err_power; /* * Enable runtime PM. As the device has been powered manually, mark it * as active, and increase the usage count without resuming the device. */ pm_runtime_set_active(sensor->dev); pm_runtime_get_noresume(sensor->dev); pm_runtime_enable(sensor->dev); /* Register the V4L2 subdev. */ ret = v4l2_async_register_subdev(&sensor->subdev); if (ret < 0) goto err_pm; /* * Finally, enable autosuspend and decrease the usage count. The device * will get suspended after the autosuspend delay, turning the power * off. */ pm_runtime_set_autosuspend_delay(sensor->dev, 1000); pm_runtime_use_autosuspend(sensor->dev); pm_runtime_put_autosuspend(sensor->dev); return 0; err_pm: pm_runtime_disable(sensor->dev); pm_runtime_put_noidle(sensor->dev); imx296_subdev_cleanup(sensor); err_power: imx296_power_off(sensor); return ret; } static void imx296_remove(struct i2c_client *client) { struct v4l2_subdev *subdev = i2c_get_clientdata(client); struct imx296 *sensor = to_imx296(subdev); v4l2_async_unregister_subdev(subdev); imx296_subdev_cleanup(sensor); /* * Disable runtime PM. In case runtime PM is disabled in the kernel, * make sure to turn power off manually. */ pm_runtime_disable(sensor->dev); if (!pm_runtime_status_suspended(sensor->dev)) imx296_power_off(sensor); pm_runtime_set_suspended(sensor->dev); } static const struct of_device_id imx296_of_match[] = { { .compatible = "sony,imx296", .data = NULL }, { .compatible = "sony,imx296ll", .data = (void *)IMX296_SENSOR_INFO_IMX296LL }, { .compatible = "sony,imx296lq", .data = (void *)IMX296_SENSOR_INFO_IMX296LQ }, { /* sentinel */ }, }; MODULE_DEVICE_TABLE(of, imx296_of_match); static struct i2c_driver imx296_i2c_driver = { .driver = { .of_match_table = imx296_of_match, .name = "imx296", .pm = &imx296_pm_ops }, .probe = imx296_probe, .remove = imx296_remove, }; module_i2c_driver(imx296_i2c_driver); MODULE_DESCRIPTION("Sony IMX296 Camera driver"); MODULE_AUTHOR("Laurent Pinchart <[email protected]>"); MODULE_LICENSE("GPL");
linux-master
drivers/media/i2c/imx296.c
// SPDX-License-Identifier: GPL-2.0 /* * Copyright (C) 2021 Sieć Badawcza Łukasiewicz * - Przemysłowy Instytut Automatyki i Pomiarów PIAP * Written by Krzysztof Hałasa */ #include <linux/clk.h> #include <linux/delay.h> #include <linux/pm_runtime.h> #include <media/v4l2-ctrls.h> #include <media/v4l2-fwnode.h> #include <media/v4l2-subdev.h> /* External clock (extclk) frequencies */ #define AR0521_EXTCLK_MIN (10 * 1000 * 1000) #define AR0521_EXTCLK_MAX (48 * 1000 * 1000) /* PLL and PLL2 */ #define AR0521_PLL_MIN (320 * 1000 * 1000) #define AR0521_PLL_MAX (1280 * 1000 * 1000) /* Effective pixel sample rate on the pixel array. */ #define AR0521_PIXEL_CLOCK_RATE (184 * 1000 * 1000) #define AR0521_PIXEL_CLOCK_MIN (168 * 1000 * 1000) #define AR0521_PIXEL_CLOCK_MAX (414 * 1000 * 1000) #define AR0521_NATIVE_WIDTH 2604u #define AR0521_NATIVE_HEIGHT 1964u #define AR0521_MIN_X_ADDR_START 0u #define AR0521_MIN_Y_ADDR_START 0u #define AR0521_MAX_X_ADDR_END 2603u #define AR0521_MAX_Y_ADDR_END 1955u #define AR0521_WIDTH_MIN 8u #define AR0521_WIDTH_MAX 2592u #define AR0521_HEIGHT_MIN 8u #define AR0521_HEIGHT_MAX 1944u #define AR0521_WIDTH_BLANKING_MIN 572u #define AR0521_HEIGHT_BLANKING_MIN 38u /* must be even */ #define AR0521_TOTAL_HEIGHT_MAX 65535u /* max_frame_length_lines */ #define AR0521_TOTAL_WIDTH_MAX 65532u /* max_line_length_pck */ #define AR0521_ANA_GAIN_MIN 0x00 #define AR0521_ANA_GAIN_MAX 0x3f #define AR0521_ANA_GAIN_STEP 0x01 #define AR0521_ANA_GAIN_DEFAULT 0x00 /* AR0521 registers */ #define AR0521_REG_VT_PIX_CLK_DIV 0x0300 #define AR0521_REG_FRAME_LENGTH_LINES 0x0340 #define AR0521_REG_CHIP_ID 0x3000 #define AR0521_REG_COARSE_INTEGRATION_TIME 0x3012 #define AR0521_REG_ROW_SPEED 0x3016 #define AR0521_REG_EXTRA_DELAY 0x3018 #define AR0521_REG_RESET 0x301A #define AR0521_REG_RESET_DEFAULTS 0x0238 #define AR0521_REG_RESET_GROUP_PARAM_HOLD 0x8000 #define AR0521_REG_RESET_STREAM BIT(2) #define AR0521_REG_RESET_RESTART BIT(1) #define AR0521_REG_RESET_INIT BIT(0) #define AR0521_REG_ANA_GAIN_CODE_GLOBAL 0x3028 #define AR0521_REG_GREEN1_GAIN 0x3056 #define AR0521_REG_BLUE_GAIN 0x3058 #define AR0521_REG_RED_GAIN 0x305A #define AR0521_REG_GREEN2_GAIN 0x305C #define AR0521_REG_GLOBAL_GAIN 0x305E #define AR0521_REG_HISPI_TEST_MODE 0x3066 #define AR0521_REG_HISPI_TEST_MODE_LP11 0x0004 #define AR0521_REG_TEST_PATTERN_MODE 0x3070 #define AR0521_REG_SERIAL_FORMAT 0x31AE #define AR0521_REG_SERIAL_FORMAT_MIPI 0x0200 #define AR0521_REG_HISPI_CONTROL_STATUS 0x31C6 #define AR0521_REG_HISPI_CONTROL_STATUS_FRAMER_TEST_MODE_ENABLE 0x80 #define be cpu_to_be16 static const char * const ar0521_supply_names[] = { "vdd_io", /* I/O (1.8V) supply */ "vdd", /* Core, PLL and MIPI (1.2V) supply */ "vaa", /* Analog (2.7V) supply */ }; static const s64 ar0521_link_frequencies[] = { 184000000, }; struct ar0521_ctrls { struct v4l2_ctrl_handler handler; struct { struct v4l2_ctrl *gain; struct v4l2_ctrl *red_balance; struct v4l2_ctrl *blue_balance; }; struct { struct v4l2_ctrl *hblank; struct v4l2_ctrl *vblank; }; struct v4l2_ctrl *pixrate; struct v4l2_ctrl *exposure; struct v4l2_ctrl *test_pattern; }; struct ar0521_dev { struct i2c_client *i2c_client; struct v4l2_subdev sd; struct media_pad pad; struct clk *extclk; u32 extclk_freq; struct regulator *supplies[ARRAY_SIZE(ar0521_supply_names)]; struct gpio_desc *reset_gpio; /* lock to protect all members below */ struct mutex lock; struct v4l2_mbus_framefmt fmt; struct ar0521_ctrls ctrls; unsigned int lane_count; struct { u16 pre; u16 mult; u16 pre2; u16 mult2; u16 vt_pix; } pll; bool streaming; }; static inline struct ar0521_dev *to_ar0521_dev(struct v4l2_subdev *sd) { return container_of(sd, struct ar0521_dev, sd); } static inline struct v4l2_subdev *ctrl_to_sd(struct v4l2_ctrl *ctrl) { return &container_of(ctrl->handler, struct ar0521_dev, ctrls.handler)->sd; } static u32 div64_round(u64 v, u32 d) { return div_u64(v + (d >> 1), d); } static u32 div64_round_up(u64 v, u32 d) { return div_u64(v + d - 1, d); } static int ar0521_code_to_bpp(struct ar0521_dev *sensor) { switch (sensor->fmt.code) { case MEDIA_BUS_FMT_SGRBG8_1X8: return 8; } return -EINVAL; } /* Data must be BE16, the first value is the register address */ static int ar0521_write_regs(struct ar0521_dev *sensor, const __be16 *data, unsigned int count) { struct i2c_client *client = sensor->i2c_client; struct i2c_msg msg; int ret; msg.addr = client->addr; msg.flags = client->flags; msg.buf = (u8 *)data; msg.len = count * sizeof(*data); ret = i2c_transfer(client->adapter, &msg, 1); if (ret < 0) { v4l2_err(&sensor->sd, "%s: I2C write error\n", __func__); return ret; } return 0; } static int ar0521_write_reg(struct ar0521_dev *sensor, u16 reg, u16 val) { __be16 buf[2] = {be(reg), be(val)}; return ar0521_write_regs(sensor, buf, 2); } static int ar0521_set_geometry(struct ar0521_dev *sensor) { /* Center the image in the visible output window. */ u16 x = clamp((AR0521_WIDTH_MAX - sensor->fmt.width) / 2, AR0521_MIN_X_ADDR_START, AR0521_MAX_X_ADDR_END); u16 y = clamp(((AR0521_HEIGHT_MAX - sensor->fmt.height) / 2) & ~1, AR0521_MIN_Y_ADDR_START, AR0521_MAX_Y_ADDR_END); /* All dimensions are unsigned 12-bit integers */ __be16 regs[] = { be(AR0521_REG_FRAME_LENGTH_LINES), be(sensor->fmt.height + sensor->ctrls.vblank->val), be(sensor->fmt.width + sensor->ctrls.hblank->val), be(x), be(y), be(x + sensor->fmt.width - 1), be(y + sensor->fmt.height - 1), be(sensor->fmt.width), be(sensor->fmt.height) }; return ar0521_write_regs(sensor, regs, ARRAY_SIZE(regs)); } static int ar0521_set_gains(struct ar0521_dev *sensor) { int green = sensor->ctrls.gain->val; int red = max(green + sensor->ctrls.red_balance->val, 0); int blue = max(green + sensor->ctrls.blue_balance->val, 0); unsigned int gain = min(red, min(green, blue)); unsigned int analog = min(gain, 64u); /* range is 0 - 127 */ __be16 regs[5]; red = min(red - analog + 64, 511u); green = min(green - analog + 64, 511u); blue = min(blue - analog + 64, 511u); regs[0] = be(AR0521_REG_GREEN1_GAIN); regs[1] = be(green << 7 | analog); regs[2] = be(blue << 7 | analog); regs[3] = be(red << 7 | analog); regs[4] = be(green << 7 | analog); return ar0521_write_regs(sensor, regs, ARRAY_SIZE(regs)); } static u32 calc_pll(struct ar0521_dev *sensor, u32 freq, u16 *pre_ptr, u16 *mult_ptr) { u16 pre = 1, mult = 1, new_pre; u32 pll = AR0521_PLL_MAX + 1; for (new_pre = 1; new_pre < 64; new_pre++) { u32 new_pll; u32 new_mult = div64_round_up((u64)freq * new_pre, sensor->extclk_freq); if (new_mult < 32) continue; /* Minimum value */ if (new_mult > 254) break; /* Maximum, larger pre won't work either */ if (sensor->extclk_freq * (u64)new_mult < AR0521_PLL_MIN * new_pre) continue; if (sensor->extclk_freq * (u64)new_mult > AR0521_PLL_MAX * new_pre) break; /* Larger pre won't work either */ new_pll = div64_round_up(sensor->extclk_freq * (u64)new_mult, new_pre); if (new_pll < pll) { pll = new_pll; pre = new_pre; mult = new_mult; } } pll = div64_round(sensor->extclk_freq * (u64)mult, pre); *pre_ptr = pre; *mult_ptr = mult; return pll; } static void ar0521_calc_pll(struct ar0521_dev *sensor) { unsigned int pixel_clock; u16 pre, mult; u32 vco; int bpp; /* * PLL1 and PLL2 are computed equally even if the application note * suggests a slower PLL1 clock. Maintain pll1 and pll2 divider and * multiplier separated to later specialize the calculation procedure. * * PLL1: * - mclk -> / pre_div1 * pre_mul1 = VCO1 = COUNTER_CLOCK * * PLL2: * - mclk -> / pre_div * pre_mul = VCO * * VCO -> / vt_pix = PIXEL_CLOCK * VCO -> / vt_pix / 2 = WORD_CLOCK * VCO -> / op_sys = SERIAL_CLOCK * * With: * - vt_pix = bpp / 2 * - WORD_CLOCK = PIXEL_CLOCK / 2 * - SERIAL_CLOCK = MIPI data rate (Mbps / lane) = WORD_CLOCK * bpp * NOTE: this implies the MIPI clock is divided internally by 2 * to account for DDR. * * As op_sys_div is fixed to 1: * * SERIAL_CLOCK = VCO * VCO = 2 * MIPI_CLK * VCO = PIXEL_CLOCK * bpp / 2 * * In the clock tree: * MIPI_CLK = PIXEL_CLOCK * bpp / 2 / 2 * * Generic pixel_rate to bus clock frequencey equation: * MIPI_CLK = V4L2_CID_PIXEL_RATE * bpp / lanes / 2 * * From which we derive the PIXEL_CLOCK to use in the clock tree: * PIXEL_CLOCK = V4L2_CID_PIXEL_RATE * 2 / lanes * * Documented clock ranges: * WORD_CLOCK = (35MHz - 120 MHz) * PIXEL_CLOCK = (84MHz - 207MHz) * VCO = (320MHz - 1280MHz) * * TODO: in case we have less data lanes we have to reduce the desired * VCO not to exceed the limits specified by the datasheet and * consequentially reduce the obtained pixel clock. */ pixel_clock = AR0521_PIXEL_CLOCK_RATE * 2 / sensor->lane_count; bpp = ar0521_code_to_bpp(sensor); sensor->pll.vt_pix = bpp / 2; vco = pixel_clock * sensor->pll.vt_pix; calc_pll(sensor, vco, &pre, &mult); sensor->pll.pre = sensor->pll.pre2 = pre; sensor->pll.mult = sensor->pll.mult2 = mult; } static int ar0521_pll_config(struct ar0521_dev *sensor) { __be16 pll_regs[] = { be(AR0521_REG_VT_PIX_CLK_DIV), /* 0x300 */ be(sensor->pll.vt_pix), /* vt_pix_clk_div = bpp / 2 */ /* 0x302 */ be(1), /* vt_sys_clk_div */ /* 0x304 */ be((sensor->pll.pre2 << 8) | sensor->pll.pre), /* 0x306 */ be((sensor->pll.mult2 << 8) | sensor->pll.mult), /* 0x308 */ be(sensor->pll.vt_pix * 2), /* op_pix_clk_div = 2 * vt_pix_clk_div */ /* 0x30A */ be(1) /* op_sys_clk_div */ }; ar0521_calc_pll(sensor); return ar0521_write_regs(sensor, pll_regs, ARRAY_SIZE(pll_regs)); } static int ar0521_set_stream(struct ar0521_dev *sensor, bool on) { int ret; if (on) { ret = pm_runtime_resume_and_get(&sensor->i2c_client->dev); if (ret < 0) return ret; /* Stop streaming for just a moment */ ret = ar0521_write_reg(sensor, AR0521_REG_RESET, AR0521_REG_RESET_DEFAULTS); if (ret) return ret; ret = ar0521_set_geometry(sensor); if (ret) return ret; ret = ar0521_pll_config(sensor); if (ret) goto err; ret = __v4l2_ctrl_handler_setup(&sensor->ctrls.handler); if (ret) goto err; /* Exit LP-11 mode on clock and data lanes */ ret = ar0521_write_reg(sensor, AR0521_REG_HISPI_CONTROL_STATUS, 0); if (ret) goto err; /* Start streaming */ ret = ar0521_write_reg(sensor, AR0521_REG_RESET, AR0521_REG_RESET_DEFAULTS | AR0521_REG_RESET_STREAM); if (ret) goto err; return 0; err: pm_runtime_put(&sensor->i2c_client->dev); return ret; } else { /* * Reset gain, the sensor may produce all white pixels without * this */ ret = ar0521_write_reg(sensor, AR0521_REG_GLOBAL_GAIN, 0x2000); if (ret) return ret; /* Stop streaming */ ret = ar0521_write_reg(sensor, AR0521_REG_RESET, AR0521_REG_RESET_DEFAULTS); if (ret) return ret; pm_runtime_put(&sensor->i2c_client->dev); return 0; } } static void ar0521_adj_fmt(struct v4l2_mbus_framefmt *fmt) { fmt->width = clamp(ALIGN(fmt->width, 4), AR0521_WIDTH_MIN, AR0521_WIDTH_MAX); fmt->height = clamp(ALIGN(fmt->height, 4), AR0521_HEIGHT_MIN, AR0521_HEIGHT_MAX); fmt->code = MEDIA_BUS_FMT_SGRBG8_1X8; fmt->field = V4L2_FIELD_NONE; fmt->colorspace = V4L2_COLORSPACE_SRGB; fmt->ycbcr_enc = V4L2_YCBCR_ENC_DEFAULT; fmt->quantization = V4L2_QUANTIZATION_FULL_RANGE; fmt->xfer_func = V4L2_XFER_FUNC_DEFAULT; } static int ar0521_get_fmt(struct v4l2_subdev *sd, struct v4l2_subdev_state *sd_state, struct v4l2_subdev_format *format) { struct ar0521_dev *sensor = to_ar0521_dev(sd); struct v4l2_mbus_framefmt *fmt; mutex_lock(&sensor->lock); if (format->which == V4L2_SUBDEV_FORMAT_TRY) fmt = v4l2_subdev_get_try_format(&sensor->sd, sd_state, 0 /* pad */); else fmt = &sensor->fmt; format->format = *fmt; mutex_unlock(&sensor->lock); return 0; } static int ar0521_set_fmt(struct v4l2_subdev *sd, struct v4l2_subdev_state *sd_state, struct v4l2_subdev_format *format) { struct ar0521_dev *sensor = to_ar0521_dev(sd); int max_vblank, max_hblank, exposure_max; int ret; ar0521_adj_fmt(&format->format); mutex_lock(&sensor->lock); if (format->which == V4L2_SUBDEV_FORMAT_TRY) { struct v4l2_mbus_framefmt *fmt; fmt = v4l2_subdev_get_try_format(sd, sd_state, 0 /* pad */); *fmt = format->format; mutex_unlock(&sensor->lock); return 0; } sensor->fmt = format->format; ar0521_calc_pll(sensor); /* * Update the exposure and blankings limits. Blankings are also reset * to the minimum. */ max_hblank = AR0521_TOTAL_WIDTH_MAX - sensor->fmt.width; ret = __v4l2_ctrl_modify_range(sensor->ctrls.hblank, sensor->ctrls.hblank->minimum, max_hblank, sensor->ctrls.hblank->step, sensor->ctrls.hblank->minimum); if (ret) goto unlock; ret = __v4l2_ctrl_s_ctrl(sensor->ctrls.hblank, sensor->ctrls.hblank->minimum); if (ret) goto unlock; max_vblank = AR0521_TOTAL_HEIGHT_MAX - sensor->fmt.height; ret = __v4l2_ctrl_modify_range(sensor->ctrls.vblank, sensor->ctrls.vblank->minimum, max_vblank, sensor->ctrls.vblank->step, sensor->ctrls.vblank->minimum); if (ret) goto unlock; ret = __v4l2_ctrl_s_ctrl(sensor->ctrls.vblank, sensor->ctrls.vblank->minimum); if (ret) goto unlock; exposure_max = sensor->fmt.height + AR0521_HEIGHT_BLANKING_MIN - 4; ret = __v4l2_ctrl_modify_range(sensor->ctrls.exposure, sensor->ctrls.exposure->minimum, exposure_max, sensor->ctrls.exposure->step, sensor->ctrls.exposure->default_value); unlock: mutex_unlock(&sensor->lock); return ret; } static int ar0521_s_ctrl(struct v4l2_ctrl *ctrl) { struct v4l2_subdev *sd = ctrl_to_sd(ctrl); struct ar0521_dev *sensor = to_ar0521_dev(sd); int exp_max; int ret; /* v4l2_ctrl_lock() locks our own mutex */ switch (ctrl->id) { case V4L2_CID_VBLANK: exp_max = sensor->fmt.height + ctrl->val - 4; __v4l2_ctrl_modify_range(sensor->ctrls.exposure, sensor->ctrls.exposure->minimum, exp_max, sensor->ctrls.exposure->step, sensor->ctrls.exposure->default_value); break; } /* access the sensor only if it's powered up */ if (!pm_runtime_get_if_in_use(&sensor->i2c_client->dev)) return 0; switch (ctrl->id) { case V4L2_CID_HBLANK: case V4L2_CID_VBLANK: ret = ar0521_set_geometry(sensor); break; case V4L2_CID_ANALOGUE_GAIN: ret = ar0521_write_reg(sensor, AR0521_REG_ANA_GAIN_CODE_GLOBAL, ctrl->val); break; case V4L2_CID_GAIN: case V4L2_CID_RED_BALANCE: case V4L2_CID_BLUE_BALANCE: ret = ar0521_set_gains(sensor); break; case V4L2_CID_EXPOSURE: ret = ar0521_write_reg(sensor, AR0521_REG_COARSE_INTEGRATION_TIME, ctrl->val); break; case V4L2_CID_TEST_PATTERN: ret = ar0521_write_reg(sensor, AR0521_REG_TEST_PATTERN_MODE, ctrl->val); break; default: dev_err(&sensor->i2c_client->dev, "Unsupported control %x\n", ctrl->id); ret = -EINVAL; break; } pm_runtime_put(&sensor->i2c_client->dev); return ret; } static const struct v4l2_ctrl_ops ar0521_ctrl_ops = { .s_ctrl = ar0521_s_ctrl, }; static const char * const test_pattern_menu[] = { "Disabled", "Solid color", "Color bars", "Faded color bars" }; static int ar0521_init_controls(struct ar0521_dev *sensor) { const struct v4l2_ctrl_ops *ops = &ar0521_ctrl_ops; struct ar0521_ctrls *ctrls = &sensor->ctrls; struct v4l2_ctrl_handler *hdl = &ctrls->handler; int max_vblank, max_hblank, exposure_max; struct v4l2_ctrl *link_freq; int ret; v4l2_ctrl_handler_init(hdl, 32); /* We can use our own mutex for the ctrl lock */ hdl->lock = &sensor->lock; /* Analog gain */ v4l2_ctrl_new_std(hdl, ops, V4L2_CID_ANALOGUE_GAIN, AR0521_ANA_GAIN_MIN, AR0521_ANA_GAIN_MAX, AR0521_ANA_GAIN_STEP, AR0521_ANA_GAIN_DEFAULT); /* Manual gain */ ctrls->gain = v4l2_ctrl_new_std(hdl, ops, V4L2_CID_GAIN, 0, 511, 1, 0); ctrls->red_balance = v4l2_ctrl_new_std(hdl, ops, V4L2_CID_RED_BALANCE, -512, 511, 1, 0); ctrls->blue_balance = v4l2_ctrl_new_std(hdl, ops, V4L2_CID_BLUE_BALANCE, -512, 511, 1, 0); v4l2_ctrl_cluster(3, &ctrls->gain); /* Initialize blanking limits using the default 2592x1944 format. */ max_hblank = AR0521_TOTAL_WIDTH_MAX - AR0521_WIDTH_MAX; ctrls->hblank = v4l2_ctrl_new_std(hdl, ops, V4L2_CID_HBLANK, AR0521_WIDTH_BLANKING_MIN, max_hblank, 1, AR0521_WIDTH_BLANKING_MIN); max_vblank = AR0521_TOTAL_HEIGHT_MAX - AR0521_HEIGHT_MAX; ctrls->vblank = v4l2_ctrl_new_std(hdl, ops, V4L2_CID_VBLANK, AR0521_HEIGHT_BLANKING_MIN, max_vblank, 2, AR0521_HEIGHT_BLANKING_MIN); v4l2_ctrl_cluster(2, &ctrls->hblank); /* Read-only */ ctrls->pixrate = v4l2_ctrl_new_std(hdl, ops, V4L2_CID_PIXEL_RATE, AR0521_PIXEL_CLOCK_MIN, AR0521_PIXEL_CLOCK_MAX, 1, AR0521_PIXEL_CLOCK_RATE); /* Manual exposure time: max exposure time = visible + blank - 4 */ exposure_max = AR0521_HEIGHT_MAX + AR0521_HEIGHT_BLANKING_MIN - 4; ctrls->exposure = v4l2_ctrl_new_std(hdl, ops, V4L2_CID_EXPOSURE, 0, exposure_max, 1, 0x70); link_freq = v4l2_ctrl_new_int_menu(hdl, ops, V4L2_CID_LINK_FREQ, ARRAY_SIZE(ar0521_link_frequencies) - 1, 0, ar0521_link_frequencies); if (link_freq) link_freq->flags |= V4L2_CTRL_FLAG_READ_ONLY; ctrls->test_pattern = v4l2_ctrl_new_std_menu_items(hdl, ops, V4L2_CID_TEST_PATTERN, ARRAY_SIZE(test_pattern_menu) - 1, 0, 0, test_pattern_menu); if (hdl->error) { ret = hdl->error; goto free_ctrls; } sensor->sd.ctrl_handler = hdl; return 0; free_ctrls: v4l2_ctrl_handler_free(hdl); return ret; } #define REGS_ENTRY(a) {(a), ARRAY_SIZE(a)} #define REGS(...) REGS_ENTRY(((const __be16[]){__VA_ARGS__})) static const struct initial_reg { const __be16 *data; /* data[0] is register address */ unsigned int count; } initial_regs[] = { REGS(be(0x0112), be(0x0808)), /* 8-bit/8-bit mode */ /* PEDESTAL+2 :+2 is a workaround for 10bit mode +0.5 rounding */ REGS(be(0x301E), be(0x00AA)), /* corrections_recommended_bayer */ REGS(be(0x3042), be(0x0004), /* 3042: RNC: enable b/w rnc mode */ be(0x4580)), /* 3044: RNC: enable row noise correction */ REGS(be(0x30D2), be(0x0000), /* 30D2: CRM/CC: enable crm on Visible and CC rows */ be(0x0000), /* 30D4: CC: CC enabled with 16 samples per column */ /* 30D6: CC: bw mode enabled/12 bit data resolution/bw mode */ be(0x2FFF)), REGS(be(0x30DA), be(0x0FFF), /* 30DA: CC: column correction clip level 2 is 0 */ be(0x0FFF), /* 30DC: CC: column correction clip level 3 is 0 */ be(0x0000)), /* 30DE: CC: Group FPN correction */ /* RNC: rnc scaling factor = * 54 / 64 (32 / 38 * 64 = 53.9) */ REGS(be(0x30EE), be(0x1136)), REGS(be(0x30FA), be(0xFD00)), /* GPIO0 = flash, GPIO1 = shutter */ REGS(be(0x3120), be(0x0005)), /* p1 dither enabled for 10bit mode */ REGS(be(0x3172), be(0x0206)), /* txlo clk divider options */ /* FDOC:fdoc settings with fdoc every frame turned of */ REGS(be(0x3180), be(0x9434)), REGS(be(0x31B0), be(0x008B), /* 31B0: frame_preamble - FIXME check WRT lanes# */ be(0x0050)), /* 31B2: line_preamble - FIXME check WRT lanes# */ /* don't use continuous clock mode while shut down */ REGS(be(0x31BC), be(0x068C)), REGS(be(0x31E0), be(0x0781)), /* Fuse/2DDC: enable 2ddc */ /* analog_setup_recommended_10bit */ REGS(be(0x341A), be(0x4735)), /* Samp&Hold pulse in ADC */ REGS(be(0x3420), be(0x4735)), /* Samp&Hold pulse in ADC */ REGS(be(0x3426), be(0x8A1A)), /* ADC offset distribution pulse */ REGS(be(0x342A), be(0x0018)), /* pulse_config */ /* pixel_timing_recommended */ REGS(be(0x3D00), /* 3D00 */ be(0x043E), be(0x4760), be(0xFFFF), be(0xFFFF), /* 3D08 */ be(0x8000), be(0x0510), be(0xAF08), be(0x0252), /* 3D10 */ be(0x486F), be(0x5D5D), be(0x8056), be(0x8313), /* 3D18 */ be(0x0087), be(0x6A48), be(0x6982), be(0x0280), /* 3D20 */ be(0x8359), be(0x8D02), be(0x8020), be(0x4882), /* 3D28 */ be(0x4269), be(0x6A95), be(0x5988), be(0x5A83), /* 3D30 */ be(0x5885), be(0x6280), be(0x6289), be(0x6097), /* 3D38 */ be(0x5782), be(0x605C), be(0xBF18), be(0x0961), /* 3D40 */ be(0x5080), be(0x2090), be(0x4390), be(0x4382), /* 3D48 */ be(0x5F8A), be(0x5D5D), be(0x9C63), be(0x8063), /* 3D50 */ be(0xA960), be(0x9757), be(0x8260), be(0x5CFF), /* 3D58 */ be(0xBF10), be(0x1681), be(0x0802), be(0x8000), /* 3D60 */ be(0x141C), be(0x6000), be(0x6022), be(0x4D80), /* 3D68 */ be(0x5C97), be(0x6A69), be(0xAC6F), be(0x4645), /* 3D70 */ be(0x4400), be(0x0513), be(0x8069), be(0x6AC6), /* 3D78 */ be(0x5F95), be(0x5F70), be(0x8040), be(0x4A81), /* 3D80 */ be(0x0300), be(0xE703), be(0x0088), be(0x4A83), /* 3D88 */ be(0x40FF), be(0xFFFF), be(0xFD70), be(0x8040), /* 3D90 */ be(0x4A85), be(0x4FA8), be(0x4F8C), be(0x0070), /* 3D98 */ be(0xBE47), be(0x8847), be(0xBC78), be(0x6B89), /* 3DA0 */ be(0x6A80), be(0x6986), be(0x6B8E), be(0x6B80), /* 3DA8 */ be(0x6980), be(0x6A88), be(0x7C9F), be(0x866B), /* 3DB0 */ be(0x8765), be(0x46FF), be(0xE365), be(0xA679), /* 3DB8 */ be(0x4A40), be(0x4580), be(0x44BC), be(0x7000), /* 3DC0 */ be(0x8040), be(0x0802), be(0x10EF), be(0x0104), /* 3DC8 */ be(0x3860), be(0x5D5D), be(0x5682), be(0x1300), /* 3DD0 */ be(0x8648), be(0x8202), be(0x8082), be(0x598A), /* 3DD8 */ be(0x0280), be(0x2048), be(0x3060), be(0x8042), /* 3DE0 */ be(0x9259), be(0x865A), be(0x8258), be(0x8562), /* 3DE8 */ be(0x8062), be(0x8560), be(0x9257), be(0x8221), /* 3DF0 */ be(0x10FF), be(0xB757), be(0x9361), be(0x1019), /* 3DF8 */ be(0x8020), be(0x9043), be(0x8E43), be(0x845F), /* 3E00 */ be(0x835D), be(0x805D), be(0x8163), be(0x8063), /* 3E08 */ be(0xA060), be(0x9157), be(0x8260), be(0x5CFF), /* 3E10 */ be(0xFFFF), be(0xFFE5), be(0x1016), be(0x2048), /* 3E18 */ be(0x0802), be(0x1C60), be(0x0014), be(0x0060), /* 3E20 */ be(0x2205), be(0x8120), be(0x908F), be(0x6A80), /* 3E28 */ be(0x6982), be(0x5F9F), be(0x6F46), be(0x4544), /* 3E30 */ be(0x0005), be(0x8013), be(0x8069), be(0x6A80), /* 3E38 */ be(0x7000), be(0x0000), be(0x0000), be(0x0000), /* 3E40 */ be(0x0000), be(0x0000), be(0x0000), be(0x0000), /* 3E48 */ be(0x0000), be(0x0000), be(0x0000), be(0x0000), /* 3E50 */ be(0x0000), be(0x0000), be(0x0000), be(0x0000), /* 3E58 */ be(0x0000), be(0x0000), be(0x0000), be(0x0000), /* 3E60 */ be(0x0000), be(0x0000), be(0x0000), be(0x0000), /* 3E68 */ be(0x0000), be(0x0000), be(0x0000), be(0x0000), /* 3E70 */ be(0x0000), be(0x0000), be(0x0000), be(0x0000), /* 3E78 */ be(0x0000), be(0x0000), be(0x0000), be(0x0000), /* 3E80 */ be(0x0000), be(0x0000), be(0x0000), be(0x0000), /* 3E88 */ be(0x0000), be(0x0000), be(0x0000), be(0x0000), /* 3E90 */ be(0x0000), be(0x0000), be(0x0000), be(0x0000), /* 3E98 */ be(0x0000), be(0x0000), be(0x0000), be(0x0000), /* 3EA0 */ be(0x0000), be(0x0000), be(0x0000), be(0x0000), /* 3EA8 */ be(0x0000), be(0x0000), be(0x0000), be(0x0000), /* 3EB0 */ be(0x0000), be(0x0000), be(0x0000)), REGS(be(0x3EB6), be(0x004C)), /* ECL */ REGS(be(0x3EBA), be(0xAAAD), /* 3EBA */ be(0x0086)), /* 3EBC: Bias currents for FSC/ECL */ REGS(be(0x3EC0), be(0x1E00), /* 3EC0: SFbin/SH mode settings */ be(0x100A), /* 3EC2: CLK divider for ramp for 10 bit 400MH */ /* 3EC4: FSC clamps for HDR mode and adc comp power down co */ be(0x3300), be(0xEA44), /* 3EC6: VLN and clk gating controls */ be(0x6F6F), /* 3EC8: Txl0 and Txlo1 settings for normal mode */ be(0x2F4A), /* 3ECA: CDAC/Txlo2/RSTGHI/RSTGLO settings */ be(0x0506), /* 3ECC: RSTDHI/RSTDLO/CDAC/TXHI settings */ /* 3ECE: Ramp buffer settings and Booster enable (bits 0-5) */ be(0x203B), be(0x13F0), /* 3ED0: TXLO from atest/sf bin settings */ be(0xA53D), /* 3ED2: Ramp offset */ be(0x862F), /* 3ED4: TXLO open loop/row driver settings */ be(0x4081), /* 3ED6: Txlatch fr cfpn rows/vln bias */ be(0x8003), /* 3ED8: Ramp step setting for 10 bit 400 Mhz */ be(0xA580), /* 3EDA: Ramp Offset */ be(0xC000), /* 3EDC: over range for rst and under range for sig */ be(0xC103)), /* 3EDE: over range for sig and col dec clk settings */ /* corrections_recommended_bayer */ REGS(be(0x3F00), be(0x0017), /* 3F00: BM_T0 */ be(0x02DD), /* 3F02: BM_T1 */ /* 3F04: if Ana_gain less than 2, use noise_floor0, multipl */ be(0x0020), /* 3F06: if Ana_gain between 4 and 7, use noise_floor2 and */ be(0x0040), /* 3F08: if Ana_gain between 4 and 7, use noise_floor2 and */ be(0x0070), /* 3F0A: Define noise_floor0(low address) and noise_floor1 */ be(0x0101), be(0x0302)), /* 3F0C: Define noise_floor2 and noise_floor3 */ REGS(be(0x3F10), be(0x0505), /* 3F10: single k factor 0 */ be(0x0505), /* 3F12: single k factor 1 */ be(0x0505), /* 3F14: single k factor 2 */ be(0x01FF), /* 3F16: cross factor 0 */ be(0x01FF), /* 3F18: cross factor 1 */ be(0x01FF), /* 3F1A: cross factor 2 */ be(0x0022)), /* 3F1E */ /* GTH_THRES_RTN: 4max,4min filtered out of every 46 samples and */ REGS(be(0x3F2C), be(0x442E)), REGS(be(0x3F3E), be(0x0000), /* 3F3E: Switch ADC from 12 bit to 10 bit mode */ be(0x1511), /* 3F40: couple k factor 0 */ be(0x1511), /* 3F42: couple k factor 1 */ be(0x0707)), /* 3F44: couple k factor 2 */ }; static int ar0521_power_off(struct device *dev) { struct v4l2_subdev *sd = dev_get_drvdata(dev); struct ar0521_dev *sensor = to_ar0521_dev(sd); int i; clk_disable_unprepare(sensor->extclk); if (sensor->reset_gpio) gpiod_set_value(sensor->reset_gpio, 1); /* assert RESET signal */ for (i = ARRAY_SIZE(ar0521_supply_names) - 1; i >= 0; i--) { if (sensor->supplies[i]) regulator_disable(sensor->supplies[i]); } return 0; } static int ar0521_power_on(struct device *dev) { struct v4l2_subdev *sd = dev_get_drvdata(dev); struct ar0521_dev *sensor = to_ar0521_dev(sd); unsigned int cnt; int ret; for (cnt = 0; cnt < ARRAY_SIZE(ar0521_supply_names); cnt++) if (sensor->supplies[cnt]) { ret = regulator_enable(sensor->supplies[cnt]); if (ret < 0) goto off; usleep_range(1000, 1500); /* min 1 ms */ } ret = clk_prepare_enable(sensor->extclk); if (ret < 0) { v4l2_err(&sensor->sd, "error enabling sensor clock\n"); goto off; } usleep_range(1000, 1500); /* min 1 ms */ if (sensor->reset_gpio) /* deassert RESET signal */ gpiod_set_value(sensor->reset_gpio, 0); usleep_range(4500, 5000); /* min 45000 clocks */ for (cnt = 0; cnt < ARRAY_SIZE(initial_regs); cnt++) { ret = ar0521_write_regs(sensor, initial_regs[cnt].data, initial_regs[cnt].count); if (ret) goto off; } ret = ar0521_write_reg(sensor, AR0521_REG_SERIAL_FORMAT, AR0521_REG_SERIAL_FORMAT_MIPI | sensor->lane_count); if (ret) goto off; /* set MIPI test mode - disabled for now */ ret = ar0521_write_reg(sensor, AR0521_REG_HISPI_TEST_MODE, ((0x40 << sensor->lane_count) - 0x40) | AR0521_REG_HISPI_TEST_MODE_LP11); if (ret) goto off; ret = ar0521_write_reg(sensor, AR0521_REG_ROW_SPEED, 0x110 | 4 / sensor->lane_count); if (ret) goto off; return 0; off: ar0521_power_off(dev); return ret; } static int ar0521_enum_mbus_code(struct v4l2_subdev *sd, struct v4l2_subdev_state *sd_state, struct v4l2_subdev_mbus_code_enum *code) { struct ar0521_dev *sensor = to_ar0521_dev(sd); if (code->index) return -EINVAL; code->code = sensor->fmt.code; return 0; } static int ar0521_enum_frame_size(struct v4l2_subdev *sd, struct v4l2_subdev_state *sd_state, struct v4l2_subdev_frame_size_enum *fse) { if (fse->index) return -EINVAL; if (fse->code != MEDIA_BUS_FMT_SGRBG8_1X8) return -EINVAL; fse->min_width = AR0521_WIDTH_MIN; fse->max_width = AR0521_WIDTH_MAX; fse->min_height = AR0521_HEIGHT_MIN; fse->max_height = AR0521_HEIGHT_MAX; return 0; } static int ar0521_pre_streamon(struct v4l2_subdev *sd, u32 flags) { struct ar0521_dev *sensor = to_ar0521_dev(sd); int ret; if (!(flags & V4L2_SUBDEV_PRE_STREAMON_FL_MANUAL_LP)) return -EACCES; ret = pm_runtime_resume_and_get(&sensor->i2c_client->dev); if (ret < 0) return ret; /* Set LP-11 on clock and data lanes */ ret = ar0521_write_reg(sensor, AR0521_REG_HISPI_CONTROL_STATUS, AR0521_REG_HISPI_CONTROL_STATUS_FRAMER_TEST_MODE_ENABLE); if (ret) goto err; /* Start streaming LP-11 */ ret = ar0521_write_reg(sensor, AR0521_REG_RESET, AR0521_REG_RESET_DEFAULTS | AR0521_REG_RESET_STREAM); if (ret) goto err; return 0; err: pm_runtime_put(&sensor->i2c_client->dev); return ret; } static int ar0521_post_streamoff(struct v4l2_subdev *sd) { struct ar0521_dev *sensor = to_ar0521_dev(sd); pm_runtime_put(&sensor->i2c_client->dev); return 0; } static int ar0521_s_stream(struct v4l2_subdev *sd, int enable) { struct ar0521_dev *sensor = to_ar0521_dev(sd); int ret; mutex_lock(&sensor->lock); ret = ar0521_set_stream(sensor, enable); if (!ret) sensor->streaming = enable; mutex_unlock(&sensor->lock); return ret; } static const struct v4l2_subdev_core_ops ar0521_core_ops = { .log_status = v4l2_ctrl_subdev_log_status, }; static const struct v4l2_subdev_video_ops ar0521_video_ops = { .s_stream = ar0521_s_stream, .pre_streamon = ar0521_pre_streamon, .post_streamoff = ar0521_post_streamoff, }; static const struct v4l2_subdev_pad_ops ar0521_pad_ops = { .enum_mbus_code = ar0521_enum_mbus_code, .enum_frame_size = ar0521_enum_frame_size, .get_fmt = ar0521_get_fmt, .set_fmt = ar0521_set_fmt, }; static const struct v4l2_subdev_ops ar0521_subdev_ops = { .core = &ar0521_core_ops, .video = &ar0521_video_ops, .pad = &ar0521_pad_ops, }; static int __maybe_unused ar0521_suspend(struct device *dev) { struct v4l2_subdev *sd = dev_get_drvdata(dev); struct ar0521_dev *sensor = to_ar0521_dev(sd); if (sensor->streaming) ar0521_set_stream(sensor, 0); return 0; } static int __maybe_unused ar0521_resume(struct device *dev) { struct v4l2_subdev *sd = dev_get_drvdata(dev); struct ar0521_dev *sensor = to_ar0521_dev(sd); if (sensor->streaming) return ar0521_set_stream(sensor, 1); return 0; } static int ar0521_probe(struct i2c_client *client) { struct v4l2_fwnode_endpoint ep = { .bus_type = V4L2_MBUS_CSI2_DPHY }; struct device *dev = &client->dev; struct fwnode_handle *endpoint; struct ar0521_dev *sensor; unsigned int cnt; int ret; sensor = devm_kzalloc(dev, sizeof(*sensor), GFP_KERNEL); if (!sensor) return -ENOMEM; sensor->i2c_client = client; sensor->fmt.width = AR0521_WIDTH_MAX; sensor->fmt.height = AR0521_HEIGHT_MAX; endpoint = fwnode_graph_get_endpoint_by_id(dev_fwnode(dev), 0, 0, FWNODE_GRAPH_ENDPOINT_NEXT); if (!endpoint) { dev_err(dev, "endpoint node not found\n"); return -EINVAL; } ret = v4l2_fwnode_endpoint_parse(endpoint, &ep); fwnode_handle_put(endpoint); if (ret) { dev_err(dev, "could not parse endpoint\n"); return ret; } if (ep.bus_type != V4L2_MBUS_CSI2_DPHY) { dev_err(dev, "invalid bus type, must be MIPI CSI2\n"); return -EINVAL; } sensor->lane_count = ep.bus.mipi_csi2.num_data_lanes; switch (sensor->lane_count) { case 1: case 2: case 4: break; default: dev_err(dev, "invalid number of MIPI data lanes\n"); return -EINVAL; } /* Get master clock (extclk) */ sensor->extclk = devm_clk_get(dev, "extclk"); if (IS_ERR(sensor->extclk)) { dev_err(dev, "failed to get extclk\n"); return PTR_ERR(sensor->extclk); } sensor->extclk_freq = clk_get_rate(sensor->extclk); if (sensor->extclk_freq < AR0521_EXTCLK_MIN || sensor->extclk_freq > AR0521_EXTCLK_MAX) { dev_err(dev, "extclk frequency out of range: %u Hz\n", sensor->extclk_freq); return -EINVAL; } /* Request optional reset pin (usually active low) and assert it */ sensor->reset_gpio = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH); v4l2_i2c_subdev_init(&sensor->sd, client, &ar0521_subdev_ops); sensor->sd.flags = V4L2_SUBDEV_FL_HAS_DEVNODE; sensor->pad.flags = MEDIA_PAD_FL_SOURCE; sensor->sd.entity.function = MEDIA_ENT_F_CAM_SENSOR; ret = media_entity_pads_init(&sensor->sd.entity, 1, &sensor->pad); if (ret) return ret; for (cnt = 0; cnt < ARRAY_SIZE(ar0521_supply_names); cnt++) { struct regulator *supply = devm_regulator_get(dev, ar0521_supply_names[cnt]); if (IS_ERR(supply)) { dev_info(dev, "no %s regulator found: %li\n", ar0521_supply_names[cnt], PTR_ERR(supply)); return PTR_ERR(supply); } sensor->supplies[cnt] = supply; } mutex_init(&sensor->lock); ret = ar0521_init_controls(sensor); if (ret) goto entity_cleanup; ar0521_adj_fmt(&sensor->fmt); ret = v4l2_async_register_subdev(&sensor->sd); if (ret) goto free_ctrls; /* Turn on the device and enable runtime PM */ ret = ar0521_power_on(&client->dev); if (ret) goto disable; pm_runtime_set_active(&client->dev); pm_runtime_enable(&client->dev); pm_runtime_idle(&client->dev); return 0; disable: v4l2_async_unregister_subdev(&sensor->sd); media_entity_cleanup(&sensor->sd.entity); free_ctrls: v4l2_ctrl_handler_free(&sensor->ctrls.handler); entity_cleanup: media_entity_cleanup(&sensor->sd.entity); mutex_destroy(&sensor->lock); return ret; } static void ar0521_remove(struct i2c_client *client) { struct v4l2_subdev *sd = i2c_get_clientdata(client); struct ar0521_dev *sensor = to_ar0521_dev(sd); v4l2_async_unregister_subdev(&sensor->sd); media_entity_cleanup(&sensor->sd.entity); v4l2_ctrl_handler_free(&sensor->ctrls.handler); pm_runtime_disable(&client->dev); if (!pm_runtime_status_suspended(&client->dev)) ar0521_power_off(&client->dev); pm_runtime_set_suspended(&client->dev); mutex_destroy(&sensor->lock); } static const struct dev_pm_ops ar0521_pm_ops = { SET_SYSTEM_SLEEP_PM_OPS(ar0521_suspend, ar0521_resume) SET_RUNTIME_PM_OPS(ar0521_power_off, ar0521_power_on, NULL) }; static const struct of_device_id ar0521_dt_ids[] = { {.compatible = "onnn,ar0521"}, {} }; MODULE_DEVICE_TABLE(of, ar0521_dt_ids); static struct i2c_driver ar0521_i2c_driver = { .driver = { .name = "ar0521", .pm = &ar0521_pm_ops, .of_match_table = ar0521_dt_ids, }, .probe = ar0521_probe, .remove = ar0521_remove, }; module_i2c_driver(ar0521_i2c_driver); MODULE_DESCRIPTION("AR0521 MIPI Camera subdev driver"); MODULE_AUTHOR("Krzysztof Hałasa <[email protected]>"); MODULE_LICENSE("GPL");
linux-master
drivers/media/i2c/ar0521.c
// SPDX-License-Identifier: GPL-2.0-only /* * Sony imx335 Camera Sensor Driver * * Copyright (C) 2021 Intel Corporation */ #include <asm/unaligned.h> #include <linux/clk.h> #include <linux/delay.h> #include <linux/i2c.h> #include <linux/module.h> #include <linux/pm_runtime.h> #include <media/v4l2-ctrls.h> #include <media/v4l2-fwnode.h> #include <media/v4l2-subdev.h> /* Streaming Mode */ #define IMX335_REG_MODE_SELECT 0x3000 #define IMX335_MODE_STANDBY 0x01 #define IMX335_MODE_STREAMING 0x00 /* Lines per frame */ #define IMX335_REG_LPFR 0x3030 /* Chip ID */ #define IMX335_REG_ID 0x3912 #define IMX335_ID 0x00 /* Exposure control */ #define IMX335_REG_SHUTTER 0x3058 #define IMX335_EXPOSURE_MIN 1 #define IMX335_EXPOSURE_OFFSET 9 #define IMX335_EXPOSURE_STEP 1 #define IMX335_EXPOSURE_DEFAULT 0x0648 /* Analog gain control */ #define IMX335_REG_AGAIN 0x30e8 #define IMX335_AGAIN_MIN 0 #define IMX335_AGAIN_MAX 240 #define IMX335_AGAIN_STEP 1 #define IMX335_AGAIN_DEFAULT 0 /* Group hold register */ #define IMX335_REG_HOLD 0x3001 /* Input clock rate */ #define IMX335_INCLK_RATE 24000000 /* CSI2 HW configuration */ #define IMX335_LINK_FREQ 594000000 #define IMX335_NUM_DATA_LANES 4 #define IMX335_REG_MIN 0x00 #define IMX335_REG_MAX 0xfffff /** * struct imx335_reg - imx335 sensor register * @address: Register address * @val: Register value */ struct imx335_reg { u16 address; u8 val; }; /** * struct imx335_reg_list - imx335 sensor register list * @num_of_regs: Number of registers in the list * @regs: Pointer to register list */ struct imx335_reg_list { u32 num_of_regs; const struct imx335_reg *regs; }; /** * struct imx335_mode - imx335 sensor mode structure * @width: Frame width * @height: Frame height * @code: Format code * @hblank: Horizontal blanking in lines * @vblank: Vertical blanking in lines * @vblank_min: Minimum vertical blanking in lines * @vblank_max: Maximum vertical blanking in lines * @pclk: Sensor pixel clock * @link_freq_idx: Link frequency index * @reg_list: Register list for sensor mode */ struct imx335_mode { u32 width; u32 height; u32 code; u32 hblank; u32 vblank; u32 vblank_min; u32 vblank_max; u64 pclk; u32 link_freq_idx; struct imx335_reg_list reg_list; }; /** * struct imx335 - imx335 sensor device structure * @dev: Pointer to generic device * @client: Pointer to i2c client * @sd: V4L2 sub-device * @pad: Media pad. Only one pad supported * @reset_gpio: Sensor reset gpio * @inclk: Sensor input clock * @ctrl_handler: V4L2 control handler * @link_freq_ctrl: Pointer to link frequency control * @pclk_ctrl: Pointer to pixel clock control * @hblank_ctrl: Pointer to horizontal blanking control * @vblank_ctrl: Pointer to vertical blanking control * @exp_ctrl: Pointer to exposure control * @again_ctrl: Pointer to analog gain control * @vblank: Vertical blanking in lines * @cur_mode: Pointer to current selected sensor mode * @mutex: Mutex for serializing sensor controls * @streaming: Flag indicating streaming state */ struct imx335 { struct device *dev; struct i2c_client *client; struct v4l2_subdev sd; struct media_pad pad; struct gpio_desc *reset_gpio; struct clk *inclk; struct v4l2_ctrl_handler ctrl_handler; struct v4l2_ctrl *link_freq_ctrl; struct v4l2_ctrl *pclk_ctrl; struct v4l2_ctrl *hblank_ctrl; struct v4l2_ctrl *vblank_ctrl; struct { struct v4l2_ctrl *exp_ctrl; struct v4l2_ctrl *again_ctrl; }; u32 vblank; const struct imx335_mode *cur_mode; struct mutex mutex; bool streaming; }; static const s64 link_freq[] = { IMX335_LINK_FREQ, }; /* Sensor mode registers */ static const struct imx335_reg mode_2592x1940_regs[] = { {0x3000, 0x01}, {0x3002, 0x00}, {0x300c, 0x3b}, {0x300d, 0x2a}, {0x3018, 0x04}, {0x302c, 0x3c}, {0x302e, 0x20}, {0x3056, 0x94}, {0x3074, 0xc8}, {0x3076, 0x28}, {0x304c, 0x00}, {0x314c, 0xc6}, {0x315a, 0x02}, {0x3168, 0xa0}, {0x316a, 0x7e}, {0x31a1, 0x00}, {0x3288, 0x21}, {0x328a, 0x02}, {0x3414, 0x05}, {0x3416, 0x18}, {0x3648, 0x01}, {0x364a, 0x04}, {0x364c, 0x04}, {0x3678, 0x01}, {0x367c, 0x31}, {0x367e, 0x31}, {0x3706, 0x10}, {0x3708, 0x03}, {0x3714, 0x02}, {0x3715, 0x02}, {0x3716, 0x01}, {0x3717, 0x03}, {0x371c, 0x3d}, {0x371d, 0x3f}, {0x372c, 0x00}, {0x372d, 0x00}, {0x372e, 0x46}, {0x372f, 0x00}, {0x3730, 0x89}, {0x3731, 0x00}, {0x3732, 0x08}, {0x3733, 0x01}, {0x3734, 0xfe}, {0x3735, 0x05}, {0x3740, 0x02}, {0x375d, 0x00}, {0x375e, 0x00}, {0x375f, 0x11}, {0x3760, 0x01}, {0x3768, 0x1b}, {0x3769, 0x1b}, {0x376a, 0x1b}, {0x376b, 0x1b}, {0x376c, 0x1a}, {0x376d, 0x17}, {0x376e, 0x0f}, {0x3776, 0x00}, {0x3777, 0x00}, {0x3778, 0x46}, {0x3779, 0x00}, {0x377a, 0x89}, {0x377b, 0x00}, {0x377c, 0x08}, {0x377d, 0x01}, {0x377e, 0x23}, {0x377f, 0x02}, {0x3780, 0xd9}, {0x3781, 0x03}, {0x3782, 0xf5}, {0x3783, 0x06}, {0x3784, 0xa5}, {0x3788, 0x0f}, {0x378a, 0xd9}, {0x378b, 0x03}, {0x378c, 0xeb}, {0x378d, 0x05}, {0x378e, 0x87}, {0x378f, 0x06}, {0x3790, 0xf5}, {0x3792, 0x43}, {0x3794, 0x7a}, {0x3796, 0xa1}, {0x37b0, 0x36}, {0x3a00, 0x01}, }; /* Supported sensor mode configurations */ static const struct imx335_mode supported_mode = { .width = 2592, .height = 1940, .hblank = 342, .vblank = 2560, .vblank_min = 2560, .vblank_max = 133060, .pclk = 396000000, .link_freq_idx = 0, .code = MEDIA_BUS_FMT_SRGGB12_1X12, .reg_list = { .num_of_regs = ARRAY_SIZE(mode_2592x1940_regs), .regs = mode_2592x1940_regs, }, }; /** * to_imx335() - imx335 V4L2 sub-device to imx335 device. * @subdev: pointer to imx335 V4L2 sub-device * * Return: pointer to imx335 device */ static inline struct imx335 *to_imx335(struct v4l2_subdev *subdev) { return container_of(subdev, struct imx335, sd); } /** * imx335_read_reg() - Read registers. * @imx335: pointer to imx335 device * @reg: register address * @len: length of bytes to read. Max supported bytes is 4 * @val: pointer to register value to be filled. * * Big endian register addresses with little endian values. * * Return: 0 if successful, error code otherwise. */ static int imx335_read_reg(struct imx335 *imx335, u16 reg, u32 len, u32 *val) { struct i2c_client *client = v4l2_get_subdevdata(&imx335->sd); struct i2c_msg msgs[2] = {0}; u8 addr_buf[2] = {0}; u8 data_buf[4] = {0}; int ret; if (WARN_ON(len > 4)) return -EINVAL; put_unaligned_be16(reg, addr_buf); /* Write register address */ msgs[0].addr = client->addr; msgs[0].flags = 0; msgs[0].len = ARRAY_SIZE(addr_buf); msgs[0].buf = addr_buf; /* Read data from register */ msgs[1].addr = client->addr; msgs[1].flags = I2C_M_RD; msgs[1].len = len; msgs[1].buf = data_buf; ret = i2c_transfer(client->adapter, msgs, ARRAY_SIZE(msgs)); if (ret != ARRAY_SIZE(msgs)) return -EIO; *val = get_unaligned_le32(data_buf); return 0; } /** * imx335_write_reg() - Write register * @imx335: pointer to imx335 device * @reg: register address * @len: length of bytes. Max supported bytes is 4 * @val: register value * * Big endian register addresses with little endian values. * * Return: 0 if successful, error code otherwise. */ static int imx335_write_reg(struct imx335 *imx335, u16 reg, u32 len, u32 val) { struct i2c_client *client = v4l2_get_subdevdata(&imx335->sd); u8 buf[6] = {0}; if (WARN_ON(len > 4)) return -EINVAL; put_unaligned_be16(reg, buf); put_unaligned_le32(val, buf + 2); if (i2c_master_send(client, buf, len + 2) != len + 2) return -EIO; return 0; } /** * imx335_write_regs() - Write a list of registers * @imx335: pointer to imx335 device * @regs: list of registers to be written * @len: length of registers array * * Return: 0 if successful. error code otherwise. */ static int imx335_write_regs(struct imx335 *imx335, const struct imx335_reg *regs, u32 len) { unsigned int i; int ret; for (i = 0; i < len; i++) { ret = imx335_write_reg(imx335, regs[i].address, 1, regs[i].val); if (ret) return ret; } return 0; } /** * imx335_update_controls() - Update control ranges based on streaming mode * @imx335: pointer to imx335 device * @mode: pointer to imx335_mode sensor mode * * Return: 0 if successful, error code otherwise. */ static int imx335_update_controls(struct imx335 *imx335, const struct imx335_mode *mode) { int ret; ret = __v4l2_ctrl_s_ctrl(imx335->link_freq_ctrl, mode->link_freq_idx); if (ret) return ret; ret = __v4l2_ctrl_s_ctrl(imx335->hblank_ctrl, mode->hblank); if (ret) return ret; return __v4l2_ctrl_modify_range(imx335->vblank_ctrl, mode->vblank_min, mode->vblank_max, 1, mode->vblank); } /** * imx335_update_exp_gain() - Set updated exposure and gain * @imx335: pointer to imx335 device * @exposure: updated exposure value * @gain: updated analog gain value * * Return: 0 if successful, error code otherwise. */ static int imx335_update_exp_gain(struct imx335 *imx335, u32 exposure, u32 gain) { u32 lpfr, shutter; int ret; lpfr = imx335->vblank + imx335->cur_mode->height; shutter = lpfr - exposure; dev_dbg(imx335->dev, "Set exp %u, analog gain %u, shutter %u, lpfr %u", exposure, gain, shutter, lpfr); ret = imx335_write_reg(imx335, IMX335_REG_HOLD, 1, 1); if (ret) return ret; ret = imx335_write_reg(imx335, IMX335_REG_LPFR, 3, lpfr); if (ret) goto error_release_group_hold; ret = imx335_write_reg(imx335, IMX335_REG_SHUTTER, 3, shutter); if (ret) goto error_release_group_hold; ret = imx335_write_reg(imx335, IMX335_REG_AGAIN, 2, gain); error_release_group_hold: imx335_write_reg(imx335, IMX335_REG_HOLD, 1, 0); return ret; } /** * imx335_set_ctrl() - Set subdevice control * @ctrl: pointer to v4l2_ctrl structure * * Supported controls: * - V4L2_CID_VBLANK * - cluster controls: * - V4L2_CID_ANALOGUE_GAIN * - V4L2_CID_EXPOSURE * * Return: 0 if successful, error code otherwise. */ static int imx335_set_ctrl(struct v4l2_ctrl *ctrl) { struct imx335 *imx335 = container_of(ctrl->handler, struct imx335, ctrl_handler); u32 analog_gain; u32 exposure; int ret; switch (ctrl->id) { case V4L2_CID_VBLANK: imx335->vblank = imx335->vblank_ctrl->val; dev_dbg(imx335->dev, "Received vblank %u, new lpfr %u", imx335->vblank, imx335->vblank + imx335->cur_mode->height); ret = __v4l2_ctrl_modify_range(imx335->exp_ctrl, IMX335_EXPOSURE_MIN, imx335->vblank + imx335->cur_mode->height - IMX335_EXPOSURE_OFFSET, 1, IMX335_EXPOSURE_DEFAULT); break; case V4L2_CID_EXPOSURE: /* Set controls only if sensor is in power on state */ if (!pm_runtime_get_if_in_use(imx335->dev)) return 0; exposure = ctrl->val; analog_gain = imx335->again_ctrl->val; dev_dbg(imx335->dev, "Received exp %u, analog gain %u", exposure, analog_gain); ret = imx335_update_exp_gain(imx335, exposure, analog_gain); pm_runtime_put(imx335->dev); break; default: dev_err(imx335->dev, "Invalid control %d", ctrl->id); ret = -EINVAL; } return ret; } /* V4l2 subdevice control ops*/ static const struct v4l2_ctrl_ops imx335_ctrl_ops = { .s_ctrl = imx335_set_ctrl, }; /** * imx335_enum_mbus_code() - Enumerate V4L2 sub-device mbus codes * @sd: pointer to imx335 V4L2 sub-device structure * @sd_state: V4L2 sub-device configuration * @code: V4L2 sub-device code enumeration need to be filled * * Return: 0 if successful, error code otherwise. */ static int imx335_enum_mbus_code(struct v4l2_subdev *sd, struct v4l2_subdev_state *sd_state, struct v4l2_subdev_mbus_code_enum *code) { if (code->index > 0) return -EINVAL; code->code = supported_mode.code; return 0; } /** * imx335_enum_frame_size() - Enumerate V4L2 sub-device frame sizes * @sd: pointer to imx335 V4L2 sub-device structure * @sd_state: V4L2 sub-device configuration * @fsize: V4L2 sub-device size enumeration need to be filled * * Return: 0 if successful, error code otherwise. */ static int imx335_enum_frame_size(struct v4l2_subdev *sd, struct v4l2_subdev_state *sd_state, struct v4l2_subdev_frame_size_enum *fsize) { if (fsize->index > 0) return -EINVAL; if (fsize->code != supported_mode.code) return -EINVAL; fsize->min_width = supported_mode.width; fsize->max_width = fsize->min_width; fsize->min_height = supported_mode.height; fsize->max_height = fsize->min_height; return 0; } /** * imx335_fill_pad_format() - Fill subdevice pad format * from selected sensor mode * @imx335: pointer to imx335 device * @mode: pointer to imx335_mode sensor mode * @fmt: V4L2 sub-device format need to be filled */ static void imx335_fill_pad_format(struct imx335 *imx335, const struct imx335_mode *mode, struct v4l2_subdev_format *fmt) { fmt->format.width = mode->width; fmt->format.height = mode->height; fmt->format.code = mode->code; fmt->format.field = V4L2_FIELD_NONE; fmt->format.colorspace = V4L2_COLORSPACE_RAW; fmt->format.ycbcr_enc = V4L2_YCBCR_ENC_DEFAULT; fmt->format.quantization = V4L2_QUANTIZATION_DEFAULT; fmt->format.xfer_func = V4L2_XFER_FUNC_NONE; } /** * imx335_get_pad_format() - Get subdevice pad format * @sd: pointer to imx335 V4L2 sub-device structure * @sd_state: V4L2 sub-device configuration * @fmt: V4L2 sub-device format need to be set * * Return: 0 if successful, error code otherwise. */ static int imx335_get_pad_format(struct v4l2_subdev *sd, struct v4l2_subdev_state *sd_state, struct v4l2_subdev_format *fmt) { struct imx335 *imx335 = to_imx335(sd); mutex_lock(&imx335->mutex); if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) { struct v4l2_mbus_framefmt *framefmt; framefmt = v4l2_subdev_get_try_format(sd, sd_state, fmt->pad); fmt->format = *framefmt; } else { imx335_fill_pad_format(imx335, imx335->cur_mode, fmt); } mutex_unlock(&imx335->mutex); return 0; } /** * imx335_set_pad_format() - Set subdevice pad format * @sd: pointer to imx335 V4L2 sub-device structure * @sd_state: V4L2 sub-device configuration * @fmt: V4L2 sub-device format need to be set * * Return: 0 if successful, error code otherwise. */ static int imx335_set_pad_format(struct v4l2_subdev *sd, struct v4l2_subdev_state *sd_state, struct v4l2_subdev_format *fmt) { struct imx335 *imx335 = to_imx335(sd); const struct imx335_mode *mode; int ret = 0; mutex_lock(&imx335->mutex); mode = &supported_mode; imx335_fill_pad_format(imx335, mode, fmt); if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) { struct v4l2_mbus_framefmt *framefmt; framefmt = v4l2_subdev_get_try_format(sd, sd_state, fmt->pad); *framefmt = fmt->format; } else { ret = imx335_update_controls(imx335, mode); if (!ret) imx335->cur_mode = mode; } mutex_unlock(&imx335->mutex); return ret; } /** * imx335_init_pad_cfg() - Initialize sub-device pad configuration * @sd: pointer to imx335 V4L2 sub-device structure * @sd_state: V4L2 sub-device configuration * * Return: 0 if successful, error code otherwise. */ static int imx335_init_pad_cfg(struct v4l2_subdev *sd, struct v4l2_subdev_state *sd_state) { struct imx335 *imx335 = to_imx335(sd); struct v4l2_subdev_format fmt = { 0 }; fmt.which = sd_state ? V4L2_SUBDEV_FORMAT_TRY : V4L2_SUBDEV_FORMAT_ACTIVE; imx335_fill_pad_format(imx335, &supported_mode, &fmt); return imx335_set_pad_format(sd, sd_state, &fmt); } /** * imx335_start_streaming() - Start sensor stream * @imx335: pointer to imx335 device * * Return: 0 if successful, error code otherwise. */ static int imx335_start_streaming(struct imx335 *imx335) { const struct imx335_reg_list *reg_list; int ret; /* Write sensor mode registers */ reg_list = &imx335->cur_mode->reg_list; ret = imx335_write_regs(imx335, reg_list->regs, reg_list->num_of_regs); if (ret) { dev_err(imx335->dev, "fail to write initial registers"); return ret; } /* Setup handler will write actual exposure and gain */ ret = __v4l2_ctrl_handler_setup(imx335->sd.ctrl_handler); if (ret) { dev_err(imx335->dev, "fail to setup handler"); return ret; } /* Start streaming */ ret = imx335_write_reg(imx335, IMX335_REG_MODE_SELECT, 1, IMX335_MODE_STREAMING); if (ret) { dev_err(imx335->dev, "fail to start streaming"); return ret; } /* Initial regulator stabilization period */ usleep_range(18000, 20000); return 0; } /** * imx335_stop_streaming() - Stop sensor stream * @imx335: pointer to imx335 device * * Return: 0 if successful, error code otherwise. */ static int imx335_stop_streaming(struct imx335 *imx335) { return imx335_write_reg(imx335, IMX335_REG_MODE_SELECT, 1, IMX335_MODE_STANDBY); } /** * imx335_set_stream() - Enable sensor streaming * @sd: pointer to imx335 subdevice * @enable: set to enable sensor streaming * * Return: 0 if successful, error code otherwise. */ static int imx335_set_stream(struct v4l2_subdev *sd, int enable) { struct imx335 *imx335 = to_imx335(sd); int ret; mutex_lock(&imx335->mutex); if (imx335->streaming == enable) { mutex_unlock(&imx335->mutex); return 0; } if (enable) { ret = pm_runtime_resume_and_get(imx335->dev); if (ret) goto error_unlock; ret = imx335_start_streaming(imx335); if (ret) goto error_power_off; } else { imx335_stop_streaming(imx335); pm_runtime_put(imx335->dev); } imx335->streaming = enable; mutex_unlock(&imx335->mutex); return 0; error_power_off: pm_runtime_put(imx335->dev); error_unlock: mutex_unlock(&imx335->mutex); return ret; } /** * imx335_detect() - Detect imx335 sensor * @imx335: pointer to imx335 device * * Return: 0 if successful, -EIO if sensor id does not match */ static int imx335_detect(struct imx335 *imx335) { int ret; u32 val; ret = imx335_read_reg(imx335, IMX335_REG_ID, 2, &val); if (ret) return ret; if (val != IMX335_ID) { dev_err(imx335->dev, "chip id mismatch: %x!=%x", IMX335_ID, val); return -ENXIO; } return 0; } /** * imx335_parse_hw_config() - Parse HW configuration and check if supported * @imx335: pointer to imx335 device * * Return: 0 if successful, error code otherwise. */ static int imx335_parse_hw_config(struct imx335 *imx335) { struct fwnode_handle *fwnode = dev_fwnode(imx335->dev); struct v4l2_fwnode_endpoint bus_cfg = { .bus_type = V4L2_MBUS_CSI2_DPHY }; struct fwnode_handle *ep; unsigned long rate; unsigned int i; int ret; if (!fwnode) return -ENXIO; /* Request optional reset pin */ imx335->reset_gpio = devm_gpiod_get_optional(imx335->dev, "reset", GPIOD_OUT_LOW); if (IS_ERR(imx335->reset_gpio)) { dev_err(imx335->dev, "failed to get reset gpio %ld", PTR_ERR(imx335->reset_gpio)); return PTR_ERR(imx335->reset_gpio); } /* Get sensor input clock */ imx335->inclk = devm_clk_get(imx335->dev, NULL); if (IS_ERR(imx335->inclk)) { dev_err(imx335->dev, "could not get inclk"); return PTR_ERR(imx335->inclk); } rate = clk_get_rate(imx335->inclk); if (rate != IMX335_INCLK_RATE) { dev_err(imx335->dev, "inclk frequency mismatch"); return -EINVAL; } ep = fwnode_graph_get_next_endpoint(fwnode, NULL); if (!ep) return -ENXIO; ret = v4l2_fwnode_endpoint_alloc_parse(ep, &bus_cfg); fwnode_handle_put(ep); if (ret) return ret; if (bus_cfg.bus.mipi_csi2.num_data_lanes != IMX335_NUM_DATA_LANES) { dev_err(imx335->dev, "number of CSI2 data lanes %d is not supported", bus_cfg.bus.mipi_csi2.num_data_lanes); ret = -EINVAL; goto done_endpoint_free; } if (!bus_cfg.nr_of_link_frequencies) { dev_err(imx335->dev, "no link frequencies defined"); ret = -EINVAL; goto done_endpoint_free; } for (i = 0; i < bus_cfg.nr_of_link_frequencies; i++) if (bus_cfg.link_frequencies[i] == IMX335_LINK_FREQ) goto done_endpoint_free; ret = -EINVAL; done_endpoint_free: v4l2_fwnode_endpoint_free(&bus_cfg); return ret; } /* V4l2 subdevice ops */ static const struct v4l2_subdev_video_ops imx335_video_ops = { .s_stream = imx335_set_stream, }; static const struct v4l2_subdev_pad_ops imx335_pad_ops = { .init_cfg = imx335_init_pad_cfg, .enum_mbus_code = imx335_enum_mbus_code, .enum_frame_size = imx335_enum_frame_size, .get_fmt = imx335_get_pad_format, .set_fmt = imx335_set_pad_format, }; static const struct v4l2_subdev_ops imx335_subdev_ops = { .video = &imx335_video_ops, .pad = &imx335_pad_ops, }; /** * imx335_power_on() - Sensor power on sequence * @dev: pointer to i2c device * * Return: 0 if successful, error code otherwise. */ static int imx335_power_on(struct device *dev) { struct v4l2_subdev *sd = dev_get_drvdata(dev); struct imx335 *imx335 = to_imx335(sd); int ret; gpiod_set_value_cansleep(imx335->reset_gpio, 1); ret = clk_prepare_enable(imx335->inclk); if (ret) { dev_err(imx335->dev, "fail to enable inclk"); goto error_reset; } usleep_range(20, 22); return 0; error_reset: gpiod_set_value_cansleep(imx335->reset_gpio, 0); return ret; } /** * imx335_power_off() - Sensor power off sequence * @dev: pointer to i2c device * * Return: 0 if successful, error code otherwise. */ static int imx335_power_off(struct device *dev) { struct v4l2_subdev *sd = dev_get_drvdata(dev); struct imx335 *imx335 = to_imx335(sd); gpiod_set_value_cansleep(imx335->reset_gpio, 0); clk_disable_unprepare(imx335->inclk); return 0; } /** * imx335_init_controls() - Initialize sensor subdevice controls * @imx335: pointer to imx335 device * * Return: 0 if successful, error code otherwise. */ static int imx335_init_controls(struct imx335 *imx335) { struct v4l2_ctrl_handler *ctrl_hdlr = &imx335->ctrl_handler; const struct imx335_mode *mode = imx335->cur_mode; u32 lpfr; int ret; ret = v4l2_ctrl_handler_init(ctrl_hdlr, 6); if (ret) return ret; /* Serialize controls with sensor device */ ctrl_hdlr->lock = &imx335->mutex; /* Initialize exposure and gain */ lpfr = mode->vblank + mode->height; imx335->exp_ctrl = v4l2_ctrl_new_std(ctrl_hdlr, &imx335_ctrl_ops, V4L2_CID_EXPOSURE, IMX335_EXPOSURE_MIN, lpfr - IMX335_EXPOSURE_OFFSET, IMX335_EXPOSURE_STEP, IMX335_EXPOSURE_DEFAULT); imx335->again_ctrl = v4l2_ctrl_new_std(ctrl_hdlr, &imx335_ctrl_ops, V4L2_CID_ANALOGUE_GAIN, IMX335_AGAIN_MIN, IMX335_AGAIN_MAX, IMX335_AGAIN_STEP, IMX335_AGAIN_DEFAULT); v4l2_ctrl_cluster(2, &imx335->exp_ctrl); imx335->vblank_ctrl = v4l2_ctrl_new_std(ctrl_hdlr, &imx335_ctrl_ops, V4L2_CID_VBLANK, mode->vblank_min, mode->vblank_max, 1, mode->vblank); /* Read only controls */ imx335->pclk_ctrl = v4l2_ctrl_new_std(ctrl_hdlr, &imx335_ctrl_ops, V4L2_CID_PIXEL_RATE, mode->pclk, mode->pclk, 1, mode->pclk); imx335->link_freq_ctrl = v4l2_ctrl_new_int_menu(ctrl_hdlr, &imx335_ctrl_ops, V4L2_CID_LINK_FREQ, ARRAY_SIZE(link_freq) - 1, mode->link_freq_idx, link_freq); if (imx335->link_freq_ctrl) imx335->link_freq_ctrl->flags |= V4L2_CTRL_FLAG_READ_ONLY; imx335->hblank_ctrl = v4l2_ctrl_new_std(ctrl_hdlr, &imx335_ctrl_ops, V4L2_CID_HBLANK, IMX335_REG_MIN, IMX335_REG_MAX, 1, mode->hblank); if (imx335->hblank_ctrl) imx335->hblank_ctrl->flags |= V4L2_CTRL_FLAG_READ_ONLY; if (ctrl_hdlr->error) { dev_err(imx335->dev, "control init failed: %d", ctrl_hdlr->error); v4l2_ctrl_handler_free(ctrl_hdlr); return ctrl_hdlr->error; } imx335->sd.ctrl_handler = ctrl_hdlr; return 0; } /** * imx335_probe() - I2C client device binding * @client: pointer to i2c client device * * Return: 0 if successful, error code otherwise. */ static int imx335_probe(struct i2c_client *client) { struct imx335 *imx335; int ret; imx335 = devm_kzalloc(&client->dev, sizeof(*imx335), GFP_KERNEL); if (!imx335) return -ENOMEM; imx335->dev = &client->dev; /* Initialize subdev */ v4l2_i2c_subdev_init(&imx335->sd, client, &imx335_subdev_ops); ret = imx335_parse_hw_config(imx335); if (ret) { dev_err(imx335->dev, "HW configuration is not supported"); return ret; } mutex_init(&imx335->mutex); ret = imx335_power_on(imx335->dev); if (ret) { dev_err(imx335->dev, "failed to power-on the sensor"); goto error_mutex_destroy; } /* Check module identity */ ret = imx335_detect(imx335); if (ret) { dev_err(imx335->dev, "failed to find sensor: %d", ret); goto error_power_off; } /* Set default mode to max resolution */ imx335->cur_mode = &supported_mode; imx335->vblank = imx335->cur_mode->vblank; ret = imx335_init_controls(imx335); if (ret) { dev_err(imx335->dev, "failed to init controls: %d", ret); goto error_power_off; } /* Initialize subdev */ imx335->sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE; imx335->sd.entity.function = MEDIA_ENT_F_CAM_SENSOR; /* Initialize source pad */ imx335->pad.flags = MEDIA_PAD_FL_SOURCE; ret = media_entity_pads_init(&imx335->sd.entity, 1, &imx335->pad); if (ret) { dev_err(imx335->dev, "failed to init entity pads: %d", ret); goto error_handler_free; } ret = v4l2_async_register_subdev_sensor(&imx335->sd); if (ret < 0) { dev_err(imx335->dev, "failed to register async subdev: %d", ret); goto error_media_entity; } pm_runtime_set_active(imx335->dev); pm_runtime_enable(imx335->dev); pm_runtime_idle(imx335->dev); return 0; error_media_entity: media_entity_cleanup(&imx335->sd.entity); error_handler_free: v4l2_ctrl_handler_free(imx335->sd.ctrl_handler); error_power_off: imx335_power_off(imx335->dev); error_mutex_destroy: mutex_destroy(&imx335->mutex); return ret; } /** * imx335_remove() - I2C client device unbinding * @client: pointer to I2C client device * * Return: 0 if successful, error code otherwise. */ static void imx335_remove(struct i2c_client *client) { struct v4l2_subdev *sd = i2c_get_clientdata(client); struct imx335 *imx335 = to_imx335(sd); v4l2_async_unregister_subdev(sd); media_entity_cleanup(&sd->entity); v4l2_ctrl_handler_free(sd->ctrl_handler); pm_runtime_disable(&client->dev); if (!pm_runtime_status_suspended(&client->dev)) imx335_power_off(&client->dev); pm_runtime_set_suspended(&client->dev); mutex_destroy(&imx335->mutex); } static const struct dev_pm_ops imx335_pm_ops = { SET_RUNTIME_PM_OPS(imx335_power_off, imx335_power_on, NULL) }; static const struct of_device_id imx335_of_match[] = { { .compatible = "sony,imx335" }, { } }; MODULE_DEVICE_TABLE(of, imx335_of_match); static struct i2c_driver imx335_driver = { .probe = imx335_probe, .remove = imx335_remove, .driver = { .name = "imx335", .pm = &imx335_pm_ops, .of_match_table = imx335_of_match, }, }; module_i2c_driver(imx335_driver); MODULE_DESCRIPTION("Sony imx335 sensor driver"); MODULE_LICENSE("GPL");
linux-master
drivers/media/i2c/imx335.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * upd6408x - NEC Electronics 3-Dimensional Y/C separation driver * * 2003 by T.Adachi ([email protected]) * 2003 by Takeru KOMORIYA <[email protected]> * 2006 by Hans Verkuil <[email protected]> */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/i2c.h> #include <linux/videodev2.h> #include <linux/slab.h> #include <media/v4l2-device.h> #include <media/i2c/upd64083.h> MODULE_DESCRIPTION("uPD64083 driver"); MODULE_AUTHOR("T. Adachi, Takeru KOMORIYA, Hans Verkuil"); MODULE_LICENSE("GPL"); static bool debug; module_param(debug, bool, 0644); MODULE_PARM_DESC(debug, "Debug level (0-1)"); enum { R00 = 0, R01, R02, R03, R04, R05, R06, R07, R08, R09, R0A, R0B, R0C, R0D, R0E, R0F, R10, R11, R12, R13, R14, R15, R16, TOT_REGS }; struct upd64083_state { struct v4l2_subdev sd; u8 mode; u8 ext_y_adc; u8 regs[TOT_REGS]; }; static inline struct upd64083_state *to_state(struct v4l2_subdev *sd) { return container_of(sd, struct upd64083_state, sd); } /* Initial values when used in combination with the NEC upd64031a ghost reduction chip. */ static u8 upd64083_init[] = { 0x1f, 0x01, 0xa0, 0x2d, 0x29, /* we use EXCSS=0 */ 0x36, 0xdd, 0x05, 0x56, 0x48, 0x00, 0x3a, 0xa0, 0x05, 0x08, 0x44, 0x60, 0x08, 0x52, 0xf8, 0x53, 0x60, 0x10 }; /* ------------------------------------------------------------------------ */ static void upd64083_write(struct v4l2_subdev *sd, u8 reg, u8 val) { struct i2c_client *client = v4l2_get_subdevdata(sd); u8 buf[2]; buf[0] = reg; buf[1] = val; v4l2_dbg(1, debug, sd, "write reg: %02x val: %02x\n", reg, val); if (i2c_master_send(client, buf, 2) != 2) v4l2_err(sd, "I/O error write 0x%02x/0x%02x\n", reg, val); } /* ------------------------------------------------------------------------ */ #ifdef CONFIG_VIDEO_ADV_DEBUG static u8 upd64083_read(struct v4l2_subdev *sd, u8 reg) { struct i2c_client *client = v4l2_get_subdevdata(sd); u8 buf[7]; if (reg >= sizeof(buf)) return 0xff; i2c_master_recv(client, buf, sizeof(buf)); return buf[reg]; } #endif /* ------------------------------------------------------------------------ */ static int upd64083_s_routing(struct v4l2_subdev *sd, u32 input, u32 output, u32 config) { struct upd64083_state *state = to_state(sd); u8 r00, r02; if (input > 7 || (input & 6) == 6) return -EINVAL; state->mode = (input & 3) << 6; state->ext_y_adc = (input & UPD64083_EXT_Y_ADC) << 3; r00 = (state->regs[R00] & ~(3 << 6)) | state->mode; r02 = (state->regs[R02] & ~(1 << 5)) | state->ext_y_adc; upd64083_write(sd, R00, r00); upd64083_write(sd, R02, r02); return 0; } #ifdef CONFIG_VIDEO_ADV_DEBUG static int upd64083_g_register(struct v4l2_subdev *sd, struct v4l2_dbg_register *reg) { reg->val = upd64083_read(sd, reg->reg & 0xff); reg->size = 1; return 0; } static int upd64083_s_register(struct v4l2_subdev *sd, const struct v4l2_dbg_register *reg) { upd64083_write(sd, reg->reg & 0xff, reg->val & 0xff); return 0; } #endif static int upd64083_log_status(struct v4l2_subdev *sd) { struct i2c_client *client = v4l2_get_subdevdata(sd); u8 buf[7]; i2c_master_recv(client, buf, 7); v4l2_info(sd, "Status: SA00=%02x SA01=%02x SA02=%02x SA03=%02x " "SA04=%02x SA05=%02x SA06=%02x\n", buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6]); return 0; } /* ----------------------------------------------------------------------- */ static const struct v4l2_subdev_core_ops upd64083_core_ops = { .log_status = upd64083_log_status, #ifdef CONFIG_VIDEO_ADV_DEBUG .g_register = upd64083_g_register, .s_register = upd64083_s_register, #endif }; static const struct v4l2_subdev_video_ops upd64083_video_ops = { .s_routing = upd64083_s_routing, }; static const struct v4l2_subdev_ops upd64083_ops = { .core = &upd64083_core_ops, .video = &upd64083_video_ops, }; /* ------------------------------------------------------------------------ */ /* i2c implementation */ static int upd64083_probe(struct i2c_client *client) { struct upd64083_state *state; struct v4l2_subdev *sd; int i; if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE_DATA)) return -EIO; v4l_info(client, "chip found @ 0x%x (%s)\n", client->addr << 1, client->adapter->name); state = devm_kzalloc(&client->dev, sizeof(*state), GFP_KERNEL); if (state == NULL) return -ENOMEM; sd = &state->sd; v4l2_i2c_subdev_init(sd, client, &upd64083_ops); /* Initially assume that a ghost reduction chip is present */ state->mode = 0; /* YCS mode */ state->ext_y_adc = (1 << 5); memcpy(state->regs, upd64083_init, TOT_REGS); for (i = 0; i < TOT_REGS; i++) upd64083_write(sd, i, state->regs[i]); return 0; } static void upd64083_remove(struct i2c_client *client) { struct v4l2_subdev *sd = i2c_get_clientdata(client); v4l2_device_unregister_subdev(sd); } /* ----------------------------------------------------------------------- */ static const struct i2c_device_id upd64083_id[] = { { "upd64083", 0 }, { } }; MODULE_DEVICE_TABLE(i2c, upd64083_id); static struct i2c_driver upd64083_driver = { .driver = { .name = "upd64083", }, .probe = upd64083_probe, .remove = upd64083_remove, .id_table = upd64083_id, }; module_i2c_driver(upd64083_driver);
linux-master
drivers/media/i2c/upd64083.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * saa717x - Philips SAA717xHL video decoder driver * * Based on the saa7115 driver * * Changes by Ohta Kyuma <[email protected]> * - Apply to SAA717x,NEC uPD64031,uPD64083. (1/31/2004) * * Changes by T.Adachi ([email protected]) * - support audio, video scaler etc, and checked the initialize sequence. * * Cleaned up by Hans Verkuil <[email protected]> * * Note: this is a reversed engineered driver based on captures from * the I2C bus under Windows. This chip is very similar to the saa7134, * though. Unfortunately, this driver is currently only working for NTSC. */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/slab.h> #include <linux/sched.h> #include <linux/videodev2.h> #include <linux/i2c.h> #include <media/v4l2-device.h> #include <media/v4l2-ctrls.h> MODULE_DESCRIPTION("Philips SAA717x audio/video decoder driver"); MODULE_AUTHOR("K. Ohta, T. Adachi, Hans Verkuil"); MODULE_LICENSE("GPL"); static int debug; module_param(debug, int, 0644); MODULE_PARM_DESC(debug, "Debug level (0-1)"); /* * Generic i2c probe * concerning the addresses: i2c wants 7 bit (without the r/w bit), so '>>1' */ struct saa717x_state { struct v4l2_subdev sd; struct v4l2_ctrl_handler hdl; v4l2_std_id std; int input; int enable; int radio; int playback; int audio; int tuner_audio_mode; int audio_main_mute; int audio_main_vol_r; int audio_main_vol_l; u16 audio_main_bass; u16 audio_main_treble; u16 audio_main_volume; u16 audio_main_balance; int audio_input; }; static inline struct saa717x_state *to_state(struct v4l2_subdev *sd) { return container_of(sd, struct saa717x_state, sd); } static inline struct v4l2_subdev *to_sd(struct v4l2_ctrl *ctrl) { return &container_of(ctrl->handler, struct saa717x_state, hdl)->sd; } /* ----------------------------------------------------------------------- */ /* for audio mode */ #define TUNER_AUDIO_MONO 0 /* LL */ #define TUNER_AUDIO_STEREO 1 /* LR */ #define TUNER_AUDIO_LANG1 2 /* LL */ #define TUNER_AUDIO_LANG2 3 /* RR */ #define SAA717X_NTSC_WIDTH (704) #define SAA717X_NTSC_HEIGHT (480) /* ----------------------------------------------------------------------- */ static int saa717x_write(struct v4l2_subdev *sd, u32 reg, u32 value) { struct i2c_client *client = v4l2_get_subdevdata(sd); struct i2c_adapter *adap = client->adapter; int fw_addr = reg == 0x454 || (reg >= 0x464 && reg <= 0x478) || reg == 0x480 || reg == 0x488; unsigned char mm1[6]; struct i2c_msg msg; msg.flags = 0; msg.addr = client->addr; mm1[0] = (reg >> 8) & 0xff; mm1[1] = reg & 0xff; if (fw_addr) { mm1[4] = (value >> 16) & 0xff; mm1[3] = (value >> 8) & 0xff; mm1[2] = value & 0xff; } else { mm1[2] = value & 0xff; } msg.len = fw_addr ? 5 : 3; /* Long Registers have *only* three bytes! */ msg.buf = mm1; v4l2_dbg(2, debug, sd, "wrote: reg 0x%03x=%08x\n", reg, value); return i2c_transfer(adap, &msg, 1) == 1; } static void saa717x_write_regs(struct v4l2_subdev *sd, u32 *data) { while (data[0] || data[1]) { saa717x_write(sd, data[0], data[1]); data += 2; } } static u32 saa717x_read(struct v4l2_subdev *sd, u32 reg) { struct i2c_client *client = v4l2_get_subdevdata(sd); struct i2c_adapter *adap = client->adapter; int fw_addr = (reg >= 0x404 && reg <= 0x4b8) || reg == 0x528; unsigned char mm1[2]; unsigned char mm2[4] = { 0, 0, 0, 0 }; struct i2c_msg msgs[2]; u32 value; msgs[0].flags = 0; msgs[1].flags = I2C_M_RD; msgs[0].addr = msgs[1].addr = client->addr; mm1[0] = (reg >> 8) & 0xff; mm1[1] = reg & 0xff; msgs[0].len = 2; msgs[0].buf = mm1; msgs[1].len = fw_addr ? 3 : 1; /* Multibyte Registers contains *only* 3 bytes */ msgs[1].buf = mm2; i2c_transfer(adap, msgs, 2); if (fw_addr) value = (mm2[2] << 16) | (mm2[1] << 8) | mm2[0]; else value = mm2[0]; v4l2_dbg(2, debug, sd, "read: reg 0x%03x=0x%08x\n", reg, value); return value; } /* ----------------------------------------------------------------------- */ static u32 reg_init_initialize[] = { /* from linux driver */ 0x101, 0x008, /* Increment delay */ 0x103, 0x000, /* Analog input control 2 */ 0x104, 0x090, /* Analog input control 3 */ 0x105, 0x090, /* Analog input control 4 */ 0x106, 0x0eb, /* Horizontal sync start */ 0x107, 0x0e0, /* Horizontal sync stop */ 0x109, 0x055, /* Luminance control */ 0x10f, 0x02a, /* Chroma gain control */ 0x110, 0x000, /* Chroma control 2 */ 0x114, 0x045, /* analog/ADC */ 0x118, 0x040, /* RAW data gain */ 0x119, 0x080, /* RAW data offset */ 0x044, 0x000, /* VBI horizontal input window start (L) TASK A */ 0x045, 0x000, /* VBI horizontal input window start (H) TASK A */ 0x046, 0x0cf, /* VBI horizontal input window stop (L) TASK A */ 0x047, 0x002, /* VBI horizontal input window stop (H) TASK A */ 0x049, 0x000, /* VBI vertical input window start (H) TASK A */ 0x04c, 0x0d0, /* VBI horizontal output length (L) TASK A */ 0x04d, 0x002, /* VBI horizontal output length (H) TASK A */ 0x064, 0x080, /* Lumina brightness TASK A */ 0x065, 0x040, /* Luminance contrast TASK A */ 0x066, 0x040, /* Chroma saturation TASK A */ /* 067H: Reserved */ 0x068, 0x000, /* VBI horizontal scaling increment (L) TASK A */ 0x069, 0x004, /* VBI horizontal scaling increment (H) TASK A */ 0x06a, 0x000, /* VBI phase offset TASK A */ 0x06e, 0x000, /* Horizontal phase offset Luma TASK A */ 0x06f, 0x000, /* Horizontal phase offset Chroma TASK A */ 0x072, 0x000, /* Vertical filter mode TASK A */ 0x084, 0x000, /* VBI horizontal input window start (L) TAKS B */ 0x085, 0x000, /* VBI horizontal input window start (H) TAKS B */ 0x086, 0x0cf, /* VBI horizontal input window stop (L) TAKS B */ 0x087, 0x002, /* VBI horizontal input window stop (H) TAKS B */ 0x089, 0x000, /* VBI vertical input window start (H) TAKS B */ 0x08c, 0x0d0, /* VBI horizontal output length (L) TASK B */ 0x08d, 0x002, /* VBI horizontal output length (H) TASK B */ 0x0a4, 0x080, /* Lumina brightness TASK B */ 0x0a5, 0x040, /* Luminance contrast TASK B */ 0x0a6, 0x040, /* Chroma saturation TASK B */ /* 0A7H reserved */ 0x0a8, 0x000, /* VBI horizontal scaling increment (L) TASK B */ 0x0a9, 0x004, /* VBI horizontal scaling increment (H) TASK B */ 0x0aa, 0x000, /* VBI phase offset TASK B */ 0x0ae, 0x000, /* Horizontal phase offset Luma TASK B */ 0x0af, 0x000, /*Horizontal phase offset Chroma TASK B */ 0x0b2, 0x000, /* Vertical filter mode TASK B */ 0x00c, 0x000, /* Start point GREEN path */ 0x00d, 0x000, /* Start point BLUE path */ 0x00e, 0x000, /* Start point RED path */ 0x010, 0x010, /* GREEN path gamma curve --- */ 0x011, 0x020, 0x012, 0x030, 0x013, 0x040, 0x014, 0x050, 0x015, 0x060, 0x016, 0x070, 0x017, 0x080, 0x018, 0x090, 0x019, 0x0a0, 0x01a, 0x0b0, 0x01b, 0x0c0, 0x01c, 0x0d0, 0x01d, 0x0e0, 0x01e, 0x0f0, 0x01f, 0x0ff, /* --- GREEN path gamma curve */ 0x020, 0x010, /* BLUE path gamma curve --- */ 0x021, 0x020, 0x022, 0x030, 0x023, 0x040, 0x024, 0x050, 0x025, 0x060, 0x026, 0x070, 0x027, 0x080, 0x028, 0x090, 0x029, 0x0a0, 0x02a, 0x0b0, 0x02b, 0x0c0, 0x02c, 0x0d0, 0x02d, 0x0e0, 0x02e, 0x0f0, 0x02f, 0x0ff, /* --- BLUE path gamma curve */ 0x030, 0x010, /* RED path gamma curve --- */ 0x031, 0x020, 0x032, 0x030, 0x033, 0x040, 0x034, 0x050, 0x035, 0x060, 0x036, 0x070, 0x037, 0x080, 0x038, 0x090, 0x039, 0x0a0, 0x03a, 0x0b0, 0x03b, 0x0c0, 0x03c, 0x0d0, 0x03d, 0x0e0, 0x03e, 0x0f0, 0x03f, 0x0ff, /* --- RED path gamma curve */ 0x109, 0x085, /* Luminance control */ /**** from app start ****/ 0x584, 0x000, /* AGC gain control */ 0x585, 0x000, /* Program count */ 0x586, 0x003, /* Status reset */ 0x588, 0x0ff, /* Number of audio samples (L) */ 0x589, 0x00f, /* Number of audio samples (M) */ 0x58a, 0x000, /* Number of audio samples (H) */ 0x58b, 0x000, /* Audio select */ 0x58c, 0x010, /* Audio channel assign1 */ 0x58d, 0x032, /* Audio channel assign2 */ 0x58e, 0x054, /* Audio channel assign3 */ 0x58f, 0x023, /* Audio format */ 0x590, 0x000, /* SIF control */ 0x595, 0x000, /* ?? */ 0x596, 0x000, /* ?? */ 0x597, 0x000, /* ?? */ 0x464, 0x00, /* Digital input crossbar1 */ 0x46c, 0xbbbb10, /* Digital output selection1-3 */ 0x470, 0x101010, /* Digital output selection4-6 */ 0x478, 0x00, /* Sound feature control */ 0x474, 0x18, /* Softmute control */ 0x454, 0x0425b9, /* Sound Easy programming(reset) */ 0x454, 0x042539, /* Sound Easy programming(reset) */ /**** common setting( of DVD play, including scaler commands) ****/ 0x042, 0x003, /* Data path configuration for VBI (TASK A) */ 0x082, 0x003, /* Data path configuration for VBI (TASK B) */ 0x108, 0x0f8, /* Sync control */ 0x2a9, 0x0fd, /* ??? */ 0x102, 0x089, /* select video input "mode 9" */ 0x111, 0x000, /* Mode/delay control */ 0x10e, 0x00a, /* Chroma control 1 */ 0x594, 0x002, /* SIF, analog I/O select */ 0x454, 0x0425b9, /* Sound */ 0x454, 0x042539, 0x111, 0x000, 0x10e, 0x00a, 0x464, 0x000, 0x300, 0x000, 0x301, 0x006, 0x302, 0x000, 0x303, 0x006, 0x308, 0x040, 0x309, 0x000, 0x30a, 0x000, 0x30b, 0x000, 0x000, 0x002, 0x001, 0x000, 0x002, 0x000, 0x003, 0x000, 0x004, 0x033, 0x040, 0x01d, 0x041, 0x001, 0x042, 0x004, 0x043, 0x000, 0x080, 0x01e, 0x081, 0x001, 0x082, 0x004, 0x083, 0x000, 0x190, 0x018, 0x115, 0x000, 0x116, 0x012, 0x117, 0x018, 0x04a, 0x011, 0x08a, 0x011, 0x04b, 0x000, 0x08b, 0x000, 0x048, 0x000, 0x088, 0x000, 0x04e, 0x012, 0x08e, 0x012, 0x058, 0x012, 0x098, 0x012, 0x059, 0x000, 0x099, 0x000, 0x05a, 0x003, 0x09a, 0x003, 0x05b, 0x001, 0x09b, 0x001, 0x054, 0x008, 0x094, 0x008, 0x055, 0x000, 0x095, 0x000, 0x056, 0x0c7, 0x096, 0x0c7, 0x057, 0x002, 0x097, 0x002, 0x0ff, 0x0ff, 0x060, 0x001, 0x0a0, 0x001, 0x061, 0x000, 0x0a1, 0x000, 0x062, 0x000, 0x0a2, 0x000, 0x063, 0x000, 0x0a3, 0x000, 0x070, 0x000, 0x0b0, 0x000, 0x071, 0x004, 0x0b1, 0x004, 0x06c, 0x0e9, 0x0ac, 0x0e9, 0x06d, 0x003, 0x0ad, 0x003, 0x05c, 0x0d0, 0x09c, 0x0d0, 0x05d, 0x002, 0x09d, 0x002, 0x05e, 0x0f2, 0x09e, 0x0f2, 0x05f, 0x000, 0x09f, 0x000, 0x074, 0x000, 0x0b4, 0x000, 0x075, 0x000, 0x0b5, 0x000, 0x076, 0x000, 0x0b6, 0x000, 0x077, 0x000, 0x0b7, 0x000, 0x195, 0x008, 0x0ff, 0x0ff, 0x108, 0x0f8, 0x111, 0x000, 0x10e, 0x00a, 0x2a9, 0x0fd, 0x464, 0x001, 0x454, 0x042135, 0x598, 0x0e7, 0x599, 0x07d, 0x59a, 0x018, 0x59c, 0x066, 0x59d, 0x090, 0x59e, 0x001, 0x584, 0x000, 0x585, 0x000, 0x586, 0x003, 0x588, 0x0ff, 0x589, 0x00f, 0x58a, 0x000, 0x58b, 0x000, 0x58c, 0x010, 0x58d, 0x032, 0x58e, 0x054, 0x58f, 0x023, 0x590, 0x000, 0x595, 0x000, 0x596, 0x000, 0x597, 0x000, 0x464, 0x000, 0x46c, 0xbbbb10, 0x470, 0x101010, 0x478, 0x000, 0x474, 0x018, 0x454, 0x042135, 0x598, 0x0e7, 0x599, 0x07d, 0x59a, 0x018, 0x59c, 0x066, 0x59d, 0x090, 0x59e, 0x001, 0x584, 0x000, 0x585, 0x000, 0x586, 0x003, 0x588, 0x0ff, 0x589, 0x00f, 0x58a, 0x000, 0x58b, 0x000, 0x58c, 0x010, 0x58d, 0x032, 0x58e, 0x054, 0x58f, 0x023, 0x590, 0x000, 0x595, 0x000, 0x596, 0x000, 0x597, 0x000, 0x464, 0x000, 0x46c, 0xbbbb10, 0x470, 0x101010, 0x478, 0x000, 0x474, 0x018, 0x454, 0x042135, 0x598, 0x0e7, 0x599, 0x07d, 0x59a, 0x018, 0x59c, 0x066, 0x59d, 0x090, 0x59e, 0x001, 0x584, 0x000, 0x585, 0x000, 0x586, 0x003, 0x588, 0x0ff, 0x589, 0x00f, 0x58a, 0x000, 0x58b, 0x000, 0x58c, 0x010, 0x58d, 0x032, 0x58e, 0x054, 0x58f, 0x023, 0x590, 0x000, 0x595, 0x000, 0x596, 0x000, 0x597, 0x000, 0x464, 0x000, 0x46c, 0xbbbb10, 0x470, 0x101010, 0x478, 0x000, 0x474, 0x018, 0x454, 0x042135, 0x193, 0x000, 0x300, 0x000, 0x301, 0x006, 0x302, 0x000, 0x303, 0x006, 0x308, 0x040, 0x309, 0x000, 0x30a, 0x000, 0x30b, 0x000, 0x000, 0x002, 0x001, 0x000, 0x002, 0x000, 0x003, 0x000, 0x004, 0x033, 0x040, 0x01d, 0x041, 0x001, 0x042, 0x004, 0x043, 0x000, 0x080, 0x01e, 0x081, 0x001, 0x082, 0x004, 0x083, 0x000, 0x190, 0x018, 0x115, 0x000, 0x116, 0x012, 0x117, 0x018, 0x04a, 0x011, 0x08a, 0x011, 0x04b, 0x000, 0x08b, 0x000, 0x048, 0x000, 0x088, 0x000, 0x04e, 0x012, 0x08e, 0x012, 0x058, 0x012, 0x098, 0x012, 0x059, 0x000, 0x099, 0x000, 0x05a, 0x003, 0x09a, 0x003, 0x05b, 0x001, 0x09b, 0x001, 0x054, 0x008, 0x094, 0x008, 0x055, 0x000, 0x095, 0x000, 0x056, 0x0c7, 0x096, 0x0c7, 0x057, 0x002, 0x097, 0x002, 0x060, 0x001, 0x0a0, 0x001, 0x061, 0x000, 0x0a1, 0x000, 0x062, 0x000, 0x0a2, 0x000, 0x063, 0x000, 0x0a3, 0x000, 0x070, 0x000, 0x0b0, 0x000, 0x071, 0x004, 0x0b1, 0x004, 0x06c, 0x0e9, 0x0ac, 0x0e9, 0x06d, 0x003, 0x0ad, 0x003, 0x05c, 0x0d0, 0x09c, 0x0d0, 0x05d, 0x002, 0x09d, 0x002, 0x05e, 0x0f2, 0x09e, 0x0f2, 0x05f, 0x000, 0x09f, 0x000, 0x074, 0x000, 0x0b4, 0x000, 0x075, 0x000, 0x0b5, 0x000, 0x076, 0x000, 0x0b6, 0x000, 0x077, 0x000, 0x0b7, 0x000, 0x195, 0x008, 0x598, 0x0e7, 0x599, 0x07d, 0x59a, 0x018, 0x59c, 0x066, 0x59d, 0x090, 0x59e, 0x001, 0x584, 0x000, 0x585, 0x000, 0x586, 0x003, 0x588, 0x0ff, 0x589, 0x00f, 0x58a, 0x000, 0x58b, 0x000, 0x58c, 0x010, 0x58d, 0x032, 0x58e, 0x054, 0x58f, 0x023, 0x590, 0x000, 0x595, 0x000, 0x596, 0x000, 0x597, 0x000, 0x464, 0x000, 0x46c, 0xbbbb10, 0x470, 0x101010, 0x478, 0x000, 0x474, 0x018, 0x454, 0x042135, 0x193, 0x0a6, 0x108, 0x0f8, 0x042, 0x003, 0x082, 0x003, 0x454, 0x0425b9, 0x454, 0x042539, 0x193, 0x000, 0x193, 0x0a6, 0x464, 0x000, 0, 0 }; /* Tuner */ static u32 reg_init_tuner_input[] = { 0x108, 0x0f8, /* Sync control */ 0x111, 0x000, /* Mode/delay control */ 0x10e, 0x00a, /* Chroma control 1 */ 0, 0 }; /* Composite */ static u32 reg_init_composite_input[] = { 0x108, 0x0e8, /* Sync control */ 0x111, 0x000, /* Mode/delay control */ 0x10e, 0x04a, /* Chroma control 1 */ 0, 0 }; /* S-Video */ static u32 reg_init_svideo_input[] = { 0x108, 0x0e8, /* Sync control */ 0x111, 0x000, /* Mode/delay control */ 0x10e, 0x04a, /* Chroma control 1 */ 0, 0 }; static u32 reg_set_audio_template[4][2] = { { /* for MONO tadachi 6/29 DMA audio output select? Register 0x46c 7-4: DMA2, 3-0: DMA1 ch. DMA4, DMA3 DMA2, DMA1 0: MAIN left, 1: MAIN right 2: AUX1 left, 3: AUX1 right 4: AUX2 left, 5: AUX2 right 6: DPL left, 7: DPL right 8: DPL center, 9: DPL surround A: monitor output, B: digital sense */ 0xbbbb00, /* tadachi 6/29 DAC and I2S output select? Register 0x470 7-4:DAC right ch. 3-0:DAC left ch. I2S1 right,left I2S2 right,left */ 0x00, }, { /* for STEREO */ 0xbbbb10, 0x101010, }, { /* for LANG1 */ 0xbbbb00, 0x00, }, { /* for LANG2/SAP */ 0xbbbb11, 0x111111, } }; /* Get detected audio flags (from saa7134 driver) */ static void get_inf_dev_status(struct v4l2_subdev *sd, int *dual_flag, int *stereo_flag) { u32 reg_data3; static char *stdres[0x20] = { [0x00] = "no standard detected", [0x01] = "B/G (in progress)", [0x02] = "D/K (in progress)", [0x03] = "M (in progress)", [0x04] = "B/G A2", [0x05] = "B/G NICAM", [0x06] = "D/K A2 (1)", [0x07] = "D/K A2 (2)", [0x08] = "D/K A2 (3)", [0x09] = "D/K NICAM", [0x0a] = "L NICAM", [0x0b] = "I NICAM", [0x0c] = "M Korea", [0x0d] = "M BTSC ", [0x0e] = "M EIAJ", [0x0f] = "FM radio / IF 10.7 / 50 deemp", [0x10] = "FM radio / IF 10.7 / 75 deemp", [0x11] = "FM radio / IF sel / 50 deemp", [0x12] = "FM radio / IF sel / 75 deemp", [0x13 ... 0x1e] = "unknown", [0x1f] = "??? [in progress]", }; *dual_flag = *stereo_flag = 0; /* (demdec status: 0x528) */ /* read current status */ reg_data3 = saa717x_read(sd, 0x0528); v4l2_dbg(1, debug, sd, "tvaudio thread status: 0x%x [%s%s%s]\n", reg_data3, stdres[reg_data3 & 0x1f], (reg_data3 & 0x000020) ? ",stereo" : "", (reg_data3 & 0x000040) ? ",dual" : ""); v4l2_dbg(1, debug, sd, "detailed status: " "%s#%s#%s#%s#%s#%s#%s#%s#%s#%s#%s#%s#%s#%s\n", (reg_data3 & 0x000080) ? " A2/EIAJ pilot tone " : "", (reg_data3 & 0x000100) ? " A2/EIAJ dual " : "", (reg_data3 & 0x000200) ? " A2/EIAJ stereo " : "", (reg_data3 & 0x000400) ? " A2/EIAJ noise mute " : "", (reg_data3 & 0x000800) ? " BTSC/FM radio pilot " : "", (reg_data3 & 0x001000) ? " SAP carrier " : "", (reg_data3 & 0x002000) ? " BTSC stereo noise mute " : "", (reg_data3 & 0x004000) ? " SAP noise mute " : "", (reg_data3 & 0x008000) ? " VDSP " : "", (reg_data3 & 0x010000) ? " NICST " : "", (reg_data3 & 0x020000) ? " NICDU " : "", (reg_data3 & 0x040000) ? " NICAM muted " : "", (reg_data3 & 0x080000) ? " NICAM reserve sound " : "", (reg_data3 & 0x100000) ? " init done " : ""); if (reg_data3 & 0x000220) { v4l2_dbg(1, debug, sd, "ST!!!\n"); *stereo_flag = 1; } if (reg_data3 & 0x000140) { v4l2_dbg(1, debug, sd, "DUAL!!!\n"); *dual_flag = 1; } } /* regs write to set audio mode */ static void set_audio_mode(struct v4l2_subdev *sd, int audio_mode) { v4l2_dbg(1, debug, sd, "writing registers to set audio mode by set %d\n", audio_mode); saa717x_write(sd, 0x46c, reg_set_audio_template[audio_mode][0]); saa717x_write(sd, 0x470, reg_set_audio_template[audio_mode][1]); } /* write regs to set audio volume, bass and treble */ static int set_audio_regs(struct v4l2_subdev *sd, struct saa717x_state *decoder) { u8 mute = 0xac; /* -84 dB */ u32 val; unsigned int work_l, work_r; /* set SIF analog I/O select */ saa717x_write(sd, 0x0594, decoder->audio_input); v4l2_dbg(1, debug, sd, "set audio input %d\n", decoder->audio_input); /* normalize ( 65535 to 0 -> 24 to -40 (not -84)) */ work_l = (min(65536 - decoder->audio_main_balance, 32768) * decoder->audio_main_volume) / 32768; work_r = (min(decoder->audio_main_balance, (u16)32768) * decoder->audio_main_volume) / 32768; decoder->audio_main_vol_l = (long)work_l * (24 - (-40)) / 65535 - 40; decoder->audio_main_vol_r = (long)work_r * (24 - (-40)) / 65535 - 40; /* set main volume */ /* main volume L[7-0],R[7-0],0x00 24=24dB,-83dB, -84(mute) */ /* def:0dB->6dB(MPG600GR) */ /* if mute is on, set mute */ if (decoder->audio_main_mute) { val = mute | (mute << 8); } else { val = (u8)decoder->audio_main_vol_l | ((u8)decoder->audio_main_vol_r << 8); } saa717x_write(sd, 0x480, val); /* set bass and treble */ val = decoder->audio_main_bass & 0x1f; val |= (decoder->audio_main_treble & 0x1f) << 5; saa717x_write(sd, 0x488, val); return 0; } /********** scaling staff ***********/ static void set_h_prescale(struct v4l2_subdev *sd, int task, int prescale) { static const struct { int xpsc; int xacl; int xc2_1; int xdcg; int vpfy; } vals[] = { /* XPSC XACL XC2_1 XDCG VPFY */ { 1, 0, 0, 0, 0 }, { 2, 2, 1, 2, 2 }, { 3, 4, 1, 3, 2 }, { 4, 8, 1, 4, 2 }, { 5, 8, 1, 4, 2 }, { 6, 8, 1, 4, 3 }, { 7, 8, 1, 4, 3 }, { 8, 15, 0, 4, 3 }, { 9, 15, 0, 4, 3 }, { 10, 16, 1, 5, 3 }, }; static const int count = ARRAY_SIZE(vals); int i, task_shift; task_shift = task * 0x40; for (i = 0; i < count; i++) if (vals[i].xpsc == prescale) break; if (i == count) return; /* horizontal prescaling */ saa717x_write(sd, 0x60 + task_shift, vals[i].xpsc); /* accumulation length */ saa717x_write(sd, 0x61 + task_shift, vals[i].xacl); /* level control */ saa717x_write(sd, 0x62 + task_shift, (vals[i].xc2_1 << 3) | vals[i].xdcg); /*FIR prefilter control */ saa717x_write(sd, 0x63 + task_shift, (vals[i].vpfy << 2) | vals[i].vpfy); } /********** scaling staff ***********/ static void set_v_scale(struct v4l2_subdev *sd, int task, int yscale) { int task_shift; task_shift = task * 0x40; /* Vertical scaling ratio (LOW) */ saa717x_write(sd, 0x70 + task_shift, yscale & 0xff); /* Vertical scaling ratio (HI) */ saa717x_write(sd, 0x71 + task_shift, yscale >> 8); } static int saa717x_s_ctrl(struct v4l2_ctrl *ctrl) { struct v4l2_subdev *sd = to_sd(ctrl); struct saa717x_state *state = to_state(sd); switch (ctrl->id) { case V4L2_CID_BRIGHTNESS: saa717x_write(sd, 0x10a, ctrl->val); return 0; case V4L2_CID_CONTRAST: saa717x_write(sd, 0x10b, ctrl->val); return 0; case V4L2_CID_SATURATION: saa717x_write(sd, 0x10c, ctrl->val); return 0; case V4L2_CID_HUE: saa717x_write(sd, 0x10d, ctrl->val); return 0; case V4L2_CID_AUDIO_MUTE: state->audio_main_mute = ctrl->val; break; case V4L2_CID_AUDIO_VOLUME: state->audio_main_volume = ctrl->val; break; case V4L2_CID_AUDIO_BALANCE: state->audio_main_balance = ctrl->val; break; case V4L2_CID_AUDIO_TREBLE: state->audio_main_treble = ctrl->val; break; case V4L2_CID_AUDIO_BASS: state->audio_main_bass = ctrl->val; break; default: return 0; } set_audio_regs(sd, state); return 0; } static int saa717x_s_video_routing(struct v4l2_subdev *sd, u32 input, u32 output, u32 config) { struct saa717x_state *decoder = to_state(sd); int is_tuner = input & 0x80; /* tuner input flag */ input &= 0x7f; v4l2_dbg(1, debug, sd, "decoder set input (%d)\n", input); /* inputs from 0-9 are available*/ /* saa717x have mode0-mode9 but mode5 is reserved. */ if (input > 9 || input == 5) return -EINVAL; if (decoder->input != input) { int input_line = input; decoder->input = input_line; v4l2_dbg(1, debug, sd, "now setting %s input %d\n", input_line >= 6 ? "S-Video" : "Composite", input_line); /* select mode */ saa717x_write(sd, 0x102, (saa717x_read(sd, 0x102) & 0xf0) | input_line); /* bypass chrominance trap for modes 6..9 */ saa717x_write(sd, 0x109, (saa717x_read(sd, 0x109) & 0x7f) | (input_line < 6 ? 0x0 : 0x80)); /* change audio_mode */ if (is_tuner) { /* tuner */ set_audio_mode(sd, decoder->tuner_audio_mode); } else { /* Force to STEREO mode if Composite or * S-Video were chosen */ set_audio_mode(sd, TUNER_AUDIO_STEREO); } /* change initialize procedure (Composite/S-Video) */ if (is_tuner) saa717x_write_regs(sd, reg_init_tuner_input); else if (input_line >= 6) saa717x_write_regs(sd, reg_init_svideo_input); else saa717x_write_regs(sd, reg_init_composite_input); } return 0; } #ifdef CONFIG_VIDEO_ADV_DEBUG static int saa717x_g_register(struct v4l2_subdev *sd, struct v4l2_dbg_register *reg) { reg->val = saa717x_read(sd, reg->reg); reg->size = 1; return 0; } static int saa717x_s_register(struct v4l2_subdev *sd, const struct v4l2_dbg_register *reg) { u16 addr = reg->reg & 0xffff; u8 val = reg->val & 0xff; saa717x_write(sd, addr, val); return 0; } #endif static int saa717x_set_fmt(struct v4l2_subdev *sd, struct v4l2_subdev_state *sd_state, struct v4l2_subdev_format *format) { struct v4l2_mbus_framefmt *fmt = &format->format; int prescale, h_scale, v_scale; v4l2_dbg(1, debug, sd, "decoder set size\n"); if (format->pad || fmt->code != MEDIA_BUS_FMT_FIXED) return -EINVAL; /* FIXME need better bounds checking here */ if (fmt->width < 1 || fmt->width > 1440) return -EINVAL; if (fmt->height < 1 || fmt->height > 960) return -EINVAL; fmt->field = V4L2_FIELD_INTERLACED; fmt->colorspace = V4L2_COLORSPACE_SMPTE170M; if (format->which == V4L2_SUBDEV_FORMAT_TRY) return 0; /* scaling setting */ /* NTSC and interlace only */ prescale = SAA717X_NTSC_WIDTH / fmt->width; if (prescale == 0) prescale = 1; h_scale = 1024 * SAA717X_NTSC_WIDTH / prescale / fmt->width; /* interlace */ v_scale = 512 * 2 * SAA717X_NTSC_HEIGHT / fmt->height; /* Horizontal prescaling etc */ set_h_prescale(sd, 0, prescale); set_h_prescale(sd, 1, prescale); /* Horizontal scaling increment */ /* TASK A */ saa717x_write(sd, 0x6C, (u8)(h_scale & 0xFF)); saa717x_write(sd, 0x6D, (u8)((h_scale >> 8) & 0xFF)); /* TASK B */ saa717x_write(sd, 0xAC, (u8)(h_scale & 0xFF)); saa717x_write(sd, 0xAD, (u8)((h_scale >> 8) & 0xFF)); /* Vertical prescaling etc */ set_v_scale(sd, 0, v_scale); set_v_scale(sd, 1, v_scale); /* set video output size */ /* video number of pixels at output */ /* TASK A */ saa717x_write(sd, 0x5C, (u8)(fmt->width & 0xFF)); saa717x_write(sd, 0x5D, (u8)((fmt->width >> 8) & 0xFF)); /* TASK B */ saa717x_write(sd, 0x9C, (u8)(fmt->width & 0xFF)); saa717x_write(sd, 0x9D, (u8)((fmt->width >> 8) & 0xFF)); /* video number of lines at output */ /* TASK A */ saa717x_write(sd, 0x5E, (u8)(fmt->height & 0xFF)); saa717x_write(sd, 0x5F, (u8)((fmt->height >> 8) & 0xFF)); /* TASK B */ saa717x_write(sd, 0x9E, (u8)(fmt->height & 0xFF)); saa717x_write(sd, 0x9F, (u8)((fmt->height >> 8) & 0xFF)); return 0; } static int saa717x_s_radio(struct v4l2_subdev *sd) { struct saa717x_state *decoder = to_state(sd); decoder->radio = 1; return 0; } static int saa717x_s_std(struct v4l2_subdev *sd, v4l2_std_id std) { struct saa717x_state *decoder = to_state(sd); v4l2_dbg(1, debug, sd, "decoder set norm "); v4l2_dbg(1, debug, sd, "(not yet implemented)\n"); decoder->radio = 0; decoder->std = std; return 0; } static int saa717x_s_audio_routing(struct v4l2_subdev *sd, u32 input, u32 output, u32 config) { struct saa717x_state *decoder = to_state(sd); if (input < 3) { /* FIXME! --tadachi */ decoder->audio_input = input; v4l2_dbg(1, debug, sd, "set decoder audio input to %d\n", decoder->audio_input); set_audio_regs(sd, decoder); return 0; } return -ERANGE; } static int saa717x_s_stream(struct v4l2_subdev *sd, int enable) { struct saa717x_state *decoder = to_state(sd); v4l2_dbg(1, debug, sd, "decoder %s output\n", enable ? "enable" : "disable"); decoder->enable = enable; saa717x_write(sd, 0x193, enable ? 0xa6 : 0x26); return 0; } /* change audio mode */ static int saa717x_s_tuner(struct v4l2_subdev *sd, const struct v4l2_tuner *vt) { struct saa717x_state *decoder = to_state(sd); int audio_mode; char *mes[4] = { "MONO", "STEREO", "LANG1", "LANG2/SAP" }; audio_mode = TUNER_AUDIO_STEREO; switch (vt->audmode) { case V4L2_TUNER_MODE_MONO: audio_mode = TUNER_AUDIO_MONO; break; case V4L2_TUNER_MODE_STEREO: audio_mode = TUNER_AUDIO_STEREO; break; case V4L2_TUNER_MODE_LANG2: audio_mode = TUNER_AUDIO_LANG2; break; case V4L2_TUNER_MODE_LANG1: audio_mode = TUNER_AUDIO_LANG1; break; } v4l2_dbg(1, debug, sd, "change audio mode to %s\n", mes[audio_mode]); decoder->tuner_audio_mode = audio_mode; /* The registers are not changed here. */ /* See DECODER_ENABLE_OUTPUT section. */ set_audio_mode(sd, decoder->tuner_audio_mode); return 0; } static int saa717x_g_tuner(struct v4l2_subdev *sd, struct v4l2_tuner *vt) { struct saa717x_state *decoder = to_state(sd); int dual_f, stereo_f; if (decoder->radio) return 0; get_inf_dev_status(sd, &dual_f, &stereo_f); v4l2_dbg(1, debug, sd, "DETECT==st:%d dual:%d\n", stereo_f, dual_f); /* mono */ if ((dual_f == 0) && (stereo_f == 0)) { vt->rxsubchans = V4L2_TUNER_SUB_MONO; v4l2_dbg(1, debug, sd, "DETECT==MONO\n"); } /* stereo */ if (stereo_f == 1) { if (vt->audmode == V4L2_TUNER_MODE_STEREO || vt->audmode == V4L2_TUNER_MODE_LANG1) { vt->rxsubchans = V4L2_TUNER_SUB_STEREO; v4l2_dbg(1, debug, sd, "DETECT==ST(ST)\n"); } else { vt->rxsubchans = V4L2_TUNER_SUB_MONO; v4l2_dbg(1, debug, sd, "DETECT==ST(MONO)\n"); } } /* dual */ if (dual_f == 1) { if (vt->audmode == V4L2_TUNER_MODE_LANG2) { vt->rxsubchans = V4L2_TUNER_SUB_LANG2 | V4L2_TUNER_SUB_MONO; v4l2_dbg(1, debug, sd, "DETECT==DUAL1\n"); } else { vt->rxsubchans = V4L2_TUNER_SUB_LANG1 | V4L2_TUNER_SUB_MONO; v4l2_dbg(1, debug, sd, "DETECT==DUAL2\n"); } } return 0; } static int saa717x_log_status(struct v4l2_subdev *sd) { struct saa717x_state *state = to_state(sd); v4l2_ctrl_handler_log_status(&state->hdl, sd->name); return 0; } /* ----------------------------------------------------------------------- */ static const struct v4l2_ctrl_ops saa717x_ctrl_ops = { .s_ctrl = saa717x_s_ctrl, }; static const struct v4l2_subdev_core_ops saa717x_core_ops = { #ifdef CONFIG_VIDEO_ADV_DEBUG .g_register = saa717x_g_register, .s_register = saa717x_s_register, #endif .log_status = saa717x_log_status, }; static const struct v4l2_subdev_tuner_ops saa717x_tuner_ops = { .g_tuner = saa717x_g_tuner, .s_tuner = saa717x_s_tuner, .s_radio = saa717x_s_radio, }; static const struct v4l2_subdev_video_ops saa717x_video_ops = { .s_std = saa717x_s_std, .s_routing = saa717x_s_video_routing, .s_stream = saa717x_s_stream, }; static const struct v4l2_subdev_audio_ops saa717x_audio_ops = { .s_routing = saa717x_s_audio_routing, }; static const struct v4l2_subdev_pad_ops saa717x_pad_ops = { .set_fmt = saa717x_set_fmt, }; static const struct v4l2_subdev_ops saa717x_ops = { .core = &saa717x_core_ops, .tuner = &saa717x_tuner_ops, .audio = &saa717x_audio_ops, .video = &saa717x_video_ops, .pad = &saa717x_pad_ops, }; /* ----------------------------------------------------------------------- */ /* i2c implementation */ /* ----------------------------------------------------------------------- */ static int saa717x_probe(struct i2c_client *client) { struct saa717x_state *decoder; struct v4l2_ctrl_handler *hdl; struct v4l2_subdev *sd; u8 id = 0; char *p = ""; /* Check if the adapter supports the needed features */ if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE_DATA)) return -EIO; decoder = devm_kzalloc(&client->dev, sizeof(*decoder), GFP_KERNEL); if (decoder == NULL) return -ENOMEM; sd = &decoder->sd; v4l2_i2c_subdev_init(sd, client, &saa717x_ops); if (saa717x_write(sd, 0x5a4, 0xfe) && saa717x_write(sd, 0x5a5, 0x0f) && saa717x_write(sd, 0x5a6, 0x00) && saa717x_write(sd, 0x5a7, 0x01)) id = saa717x_read(sd, 0x5a0); if (id != 0xc2 && id != 0x32 && id != 0xf2 && id != 0x6c) { v4l2_dbg(1, debug, sd, "saa717x not found (id=%02x)\n", id); return -ENODEV; } if (id == 0xc2) p = "saa7173"; else if (id == 0x32) p = "saa7174A"; else if (id == 0x6c) p = "saa7174HL"; else p = "saa7171"; v4l2_info(sd, "%s found @ 0x%x (%s)\n", p, client->addr << 1, client->adapter->name); hdl = &decoder->hdl; v4l2_ctrl_handler_init(hdl, 9); /* add in ascending ID order */ v4l2_ctrl_new_std(hdl, &saa717x_ctrl_ops, V4L2_CID_BRIGHTNESS, 0, 255, 1, 128); v4l2_ctrl_new_std(hdl, &saa717x_ctrl_ops, V4L2_CID_CONTRAST, 0, 255, 1, 68); v4l2_ctrl_new_std(hdl, &saa717x_ctrl_ops, V4L2_CID_SATURATION, 0, 255, 1, 64); v4l2_ctrl_new_std(hdl, &saa717x_ctrl_ops, V4L2_CID_HUE, -128, 127, 1, 0); v4l2_ctrl_new_std(hdl, &saa717x_ctrl_ops, V4L2_CID_AUDIO_VOLUME, 0, 65535, 65535 / 100, 42000); v4l2_ctrl_new_std(hdl, &saa717x_ctrl_ops, V4L2_CID_AUDIO_BALANCE, 0, 65535, 65535 / 100, 32768); v4l2_ctrl_new_std(hdl, &saa717x_ctrl_ops, V4L2_CID_AUDIO_BASS, -16, 15, 1, 0); v4l2_ctrl_new_std(hdl, &saa717x_ctrl_ops, V4L2_CID_AUDIO_TREBLE, -16, 15, 1, 0); v4l2_ctrl_new_std(hdl, &saa717x_ctrl_ops, V4L2_CID_AUDIO_MUTE, 0, 1, 1, 0); sd->ctrl_handler = hdl; if (hdl->error) { int err = hdl->error; v4l2_ctrl_handler_free(hdl); return err; } decoder->std = V4L2_STD_NTSC; decoder->input = -1; decoder->enable = 1; /* FIXME!! */ decoder->playback = 0; /* initially capture mode used */ decoder->audio = 1; /* DECODER_AUDIO_48_KHZ */ decoder->audio_input = 2; /* FIXME!! */ decoder->tuner_audio_mode = TUNER_AUDIO_STEREO; /* set volume, bass and treble */ decoder->audio_main_vol_l = 6; decoder->audio_main_vol_r = 6; v4l2_dbg(1, debug, sd, "writing init values\n"); /* FIXME!! */ saa717x_write_regs(sd, reg_init_initialize); v4l2_ctrl_handler_setup(hdl); set_current_state(TASK_INTERRUPTIBLE); schedule_timeout(2*HZ); return 0; } static void saa717x_remove(struct i2c_client *client) { struct v4l2_subdev *sd = i2c_get_clientdata(client); v4l2_device_unregister_subdev(sd); v4l2_ctrl_handler_free(sd->ctrl_handler); } /* ----------------------------------------------------------------------- */ static const struct i2c_device_id saa717x_id[] = { { "saa717x", 0 }, { } }; MODULE_DEVICE_TABLE(i2c, saa717x_id); static struct i2c_driver saa717x_driver = { .driver = { .name = "saa717x", }, .probe = saa717x_probe, .remove = saa717x_remove, .id_table = saa717x_id, }; module_i2c_driver(saa717x_driver);
linux-master
drivers/media/i2c/saa717x.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * cs53l32a (Adaptec AVC-2010 and AVC-2410) i2c ivtv driver. * Copyright (C) 2005 Martin Vaughan * * Audio source switching for Adaptec AVC-2410 added by Trev Jackson */ #include <linux/module.h> #include <linux/types.h> #include <linux/slab.h> #include <linux/ioctl.h> #include <linux/uaccess.h> #include <linux/i2c.h> #include <linux/videodev2.h> #include <media/v4l2-device.h> #include <media/v4l2-ctrls.h> MODULE_DESCRIPTION("i2c device driver for cs53l32a Audio ADC"); MODULE_AUTHOR("Martin Vaughan"); MODULE_LICENSE("GPL"); static bool debug; module_param(debug, bool, 0644); MODULE_PARM_DESC(debug, "Debugging messages, 0=Off (default), 1=On"); struct cs53l32a_state { struct v4l2_subdev sd; struct v4l2_ctrl_handler hdl; }; static inline struct cs53l32a_state *to_state(struct v4l2_subdev *sd) { return container_of(sd, struct cs53l32a_state, sd); } static inline struct v4l2_subdev *to_sd(struct v4l2_ctrl *ctrl) { return &container_of(ctrl->handler, struct cs53l32a_state, hdl)->sd; } /* ----------------------------------------------------------------------- */ static int cs53l32a_write(struct v4l2_subdev *sd, u8 reg, u8 value) { struct i2c_client *client = v4l2_get_subdevdata(sd); return i2c_smbus_write_byte_data(client, reg, value); } static int cs53l32a_read(struct v4l2_subdev *sd, u8 reg) { struct i2c_client *client = v4l2_get_subdevdata(sd); return i2c_smbus_read_byte_data(client, reg); } static int cs53l32a_s_routing(struct v4l2_subdev *sd, u32 input, u32 output, u32 config) { /* There are 2 physical inputs, but the second input can be placed in two modes, the first mode bypasses the PGA (gain), the second goes through the PGA. Hence there are three possible inputs to choose from. */ if (input > 2) { v4l2_err(sd, "Invalid input %d.\n", input); return -EINVAL; } cs53l32a_write(sd, 0x01, 0x01 + (input << 4)); return 0; } static int cs53l32a_s_ctrl(struct v4l2_ctrl *ctrl) { struct v4l2_subdev *sd = to_sd(ctrl); switch (ctrl->id) { case V4L2_CID_AUDIO_MUTE: cs53l32a_write(sd, 0x03, ctrl->val ? 0xf0 : 0x30); return 0; case V4L2_CID_AUDIO_VOLUME: cs53l32a_write(sd, 0x04, (u8)ctrl->val); cs53l32a_write(sd, 0x05, (u8)ctrl->val); return 0; } return -EINVAL; } static int cs53l32a_log_status(struct v4l2_subdev *sd) { struct cs53l32a_state *state = to_state(sd); u8 v = cs53l32a_read(sd, 0x01); v4l2_info(sd, "Input: %d\n", (v >> 4) & 3); v4l2_ctrl_handler_log_status(&state->hdl, sd->name); return 0; } /* ----------------------------------------------------------------------- */ static const struct v4l2_ctrl_ops cs53l32a_ctrl_ops = { .s_ctrl = cs53l32a_s_ctrl, }; static const struct v4l2_subdev_core_ops cs53l32a_core_ops = { .log_status = cs53l32a_log_status, }; static const struct v4l2_subdev_audio_ops cs53l32a_audio_ops = { .s_routing = cs53l32a_s_routing, }; static const struct v4l2_subdev_ops cs53l32a_ops = { .core = &cs53l32a_core_ops, .audio = &cs53l32a_audio_ops, }; /* ----------------------------------------------------------------------- */ /* i2c implementation */ /* * Generic i2c probe * concerning the addresses: i2c wants 7 bit (without the r/w bit), so '>>1' */ static int cs53l32a_probe(struct i2c_client *client) { const struct i2c_device_id *id = i2c_client_get_device_id(client); struct cs53l32a_state *state; struct v4l2_subdev *sd; int i; /* Check if the adapter supports the needed features */ if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE_DATA)) return -EIO; if (!id) strscpy(client->name, "cs53l32a", sizeof(client->name)); v4l_info(client, "chip found @ 0x%x (%s)\n", client->addr << 1, client->adapter->name); state = devm_kzalloc(&client->dev, sizeof(*state), GFP_KERNEL); if (state == NULL) return -ENOMEM; sd = &state->sd; v4l2_i2c_subdev_init(sd, client, &cs53l32a_ops); for (i = 1; i <= 7; i++) { u8 v = cs53l32a_read(sd, i); v4l2_dbg(1, debug, sd, "Read Reg %d %02x\n", i, v); } v4l2_ctrl_handler_init(&state->hdl, 2); v4l2_ctrl_new_std(&state->hdl, &cs53l32a_ctrl_ops, V4L2_CID_AUDIO_VOLUME, -96, 12, 1, 0); v4l2_ctrl_new_std(&state->hdl, &cs53l32a_ctrl_ops, V4L2_CID_AUDIO_MUTE, 0, 1, 1, 0); sd->ctrl_handler = &state->hdl; if (state->hdl.error) { int err = state->hdl.error; v4l2_ctrl_handler_free(&state->hdl); return err; } /* Set cs53l32a internal register for Adaptec 2010/2410 setup */ cs53l32a_write(sd, 0x01, 0x21); cs53l32a_write(sd, 0x02, 0x29); cs53l32a_write(sd, 0x03, 0x30); cs53l32a_write(sd, 0x04, 0x00); cs53l32a_write(sd, 0x05, 0x00); cs53l32a_write(sd, 0x06, 0x00); cs53l32a_write(sd, 0x07, 0x00); /* Display results, should be 0x21,0x29,0x30,0x00,0x00,0x00,0x00 */ for (i = 1; i <= 7; i++) { u8 v = cs53l32a_read(sd, i); v4l2_dbg(1, debug, sd, "Read Reg %d %02x\n", i, v); } return 0; } static void cs53l32a_remove(struct i2c_client *client) { struct v4l2_subdev *sd = i2c_get_clientdata(client); struct cs53l32a_state *state = to_state(sd); v4l2_device_unregister_subdev(sd); v4l2_ctrl_handler_free(&state->hdl); } static const struct i2c_device_id cs53l32a_id[] = { { "cs53l32a", 0 }, { } }; MODULE_DEVICE_TABLE(i2c, cs53l32a_id); static struct i2c_driver cs53l32a_driver = { .driver = { .name = "cs53l32a", }, .probe = cs53l32a_probe, .remove = cs53l32a_remove, .id_table = cs53l32a_id, }; module_i2c_driver(cs53l32a_driver);
linux-master
drivers/media/i2c/cs53l32a.c
// SPDX-License-Identifier: GPL-2.0 /* * imx214.c - imx214 sensor driver * * Copyright 2018 Qtechnology A/S * * Ricardo Ribalda <[email protected]> */ #include <linux/clk.h> #include <linux/delay.h> #include <linux/gpio/consumer.h> #include <linux/i2c.h> #include <linux/module.h> #include <linux/pm_runtime.h> #include <linux/regmap.h> #include <linux/regulator/consumer.h> #include <media/media-entity.h> #include <media/v4l2-ctrls.h> #include <media/v4l2-fwnode.h> #include <media/v4l2-subdev.h> #define IMX214_DEFAULT_CLK_FREQ 24000000 #define IMX214_DEFAULT_LINK_FREQ 480000000 #define IMX214_DEFAULT_PIXEL_RATE ((IMX214_DEFAULT_LINK_FREQ * 8LL) / 10) #define IMX214_FPS 30 #define IMX214_MBUS_CODE MEDIA_BUS_FMT_SRGGB10_1X10 static const char * const imx214_supply_name[] = { "vdda", "vddd", "vdddo", }; #define IMX214_NUM_SUPPLIES ARRAY_SIZE(imx214_supply_name) struct imx214 { struct device *dev; struct clk *xclk; struct regmap *regmap; struct v4l2_subdev sd; struct media_pad pad; struct v4l2_mbus_framefmt fmt; struct v4l2_rect crop; struct v4l2_ctrl_handler ctrls; struct v4l2_ctrl *pixel_rate; struct v4l2_ctrl *link_freq; struct v4l2_ctrl *exposure; struct v4l2_ctrl *unit_size; struct regulator_bulk_data supplies[IMX214_NUM_SUPPLIES]; struct gpio_desc *enable_gpio; /* * Serialize control access, get/set format, get selection * and start streaming. */ struct mutex mutex; bool streaming; }; struct reg_8 { u16 addr; u8 val; }; enum { IMX214_TABLE_WAIT_MS = 0, IMX214_TABLE_END, IMX214_MAX_RETRIES, IMX214_WAIT_MS }; /*From imx214_mode_tbls.h*/ static const struct reg_8 mode_4096x2304[] = { {0x0114, 0x03}, {0x0220, 0x00}, {0x0221, 0x11}, {0x0222, 0x01}, {0x0340, 0x0C}, {0x0341, 0x7A}, {0x0342, 0x13}, {0x0343, 0x90}, {0x0344, 0x00}, {0x0345, 0x38}, {0x0346, 0x01}, {0x0347, 0x98}, {0x0348, 0x10}, {0x0349, 0x37}, {0x034A, 0x0A}, {0x034B, 0x97}, {0x0381, 0x01}, {0x0383, 0x01}, {0x0385, 0x01}, {0x0387, 0x01}, {0x0900, 0x00}, {0x0901, 0x00}, {0x0902, 0x00}, {0x3000, 0x35}, {0x3054, 0x01}, {0x305C, 0x11}, {0x0112, 0x0A}, {0x0113, 0x0A}, {0x034C, 0x10}, {0x034D, 0x00}, {0x034E, 0x09}, {0x034F, 0x00}, {0x0401, 0x00}, {0x0404, 0x00}, {0x0405, 0x10}, {0x0408, 0x00}, {0x0409, 0x00}, {0x040A, 0x00}, {0x040B, 0x00}, {0x040C, 0x10}, {0x040D, 0x00}, {0x040E, 0x09}, {0x040F, 0x00}, {0x0301, 0x05}, {0x0303, 0x02}, {0x0305, 0x03}, {0x0306, 0x00}, {0x0307, 0x96}, {0x0309, 0x0A}, {0x030B, 0x01}, {0x0310, 0x00}, {0x0820, 0x12}, {0x0821, 0xC0}, {0x0822, 0x00}, {0x0823, 0x00}, {0x3A03, 0x09}, {0x3A04, 0x50}, {0x3A05, 0x01}, {0x0B06, 0x01}, {0x30A2, 0x00}, {0x30B4, 0x00}, {0x3A02, 0xFF}, {0x3011, 0x00}, {0x3013, 0x01}, {0x0202, 0x0C}, {0x0203, 0x70}, {0x0224, 0x01}, {0x0225, 0xF4}, {0x0204, 0x00}, {0x0205, 0x00}, {0x020E, 0x01}, {0x020F, 0x00}, {0x0210, 0x01}, {0x0211, 0x00}, {0x0212, 0x01}, {0x0213, 0x00}, {0x0214, 0x01}, {0x0215, 0x00}, {0x0216, 0x00}, {0x0217, 0x00}, {0x4170, 0x00}, {0x4171, 0x10}, {0x4176, 0x00}, {0x4177, 0x3C}, {0xAE20, 0x04}, {0xAE21, 0x5C}, {IMX214_TABLE_WAIT_MS, 10}, {0x0138, 0x01}, {IMX214_TABLE_END, 0x00} }; static const struct reg_8 mode_1920x1080[] = { {0x0114, 0x03}, {0x0220, 0x00}, {0x0221, 0x11}, {0x0222, 0x01}, {0x0340, 0x0C}, {0x0341, 0x7A}, {0x0342, 0x13}, {0x0343, 0x90}, {0x0344, 0x04}, {0x0345, 0x78}, {0x0346, 0x03}, {0x0347, 0xFC}, {0x0348, 0x0B}, {0x0349, 0xF7}, {0x034A, 0x08}, {0x034B, 0x33}, {0x0381, 0x01}, {0x0383, 0x01}, {0x0385, 0x01}, {0x0387, 0x01}, {0x0900, 0x00}, {0x0901, 0x00}, {0x0902, 0x00}, {0x3000, 0x35}, {0x3054, 0x01}, {0x305C, 0x11}, {0x0112, 0x0A}, {0x0113, 0x0A}, {0x034C, 0x07}, {0x034D, 0x80}, {0x034E, 0x04}, {0x034F, 0x38}, {0x0401, 0x00}, {0x0404, 0x00}, {0x0405, 0x10}, {0x0408, 0x00}, {0x0409, 0x00}, {0x040A, 0x00}, {0x040B, 0x00}, {0x040C, 0x07}, {0x040D, 0x80}, {0x040E, 0x04}, {0x040F, 0x38}, {0x0301, 0x05}, {0x0303, 0x02}, {0x0305, 0x03}, {0x0306, 0x00}, {0x0307, 0x96}, {0x0309, 0x0A}, {0x030B, 0x01}, {0x0310, 0x00}, {0x0820, 0x12}, {0x0821, 0xC0}, {0x0822, 0x00}, {0x0823, 0x00}, {0x3A03, 0x04}, {0x3A04, 0xF8}, {0x3A05, 0x02}, {0x0B06, 0x01}, {0x30A2, 0x00}, {0x30B4, 0x00}, {0x3A02, 0xFF}, {0x3011, 0x00}, {0x3013, 0x01}, {0x0202, 0x0C}, {0x0203, 0x70}, {0x0224, 0x01}, {0x0225, 0xF4}, {0x0204, 0x00}, {0x0205, 0x00}, {0x020E, 0x01}, {0x020F, 0x00}, {0x0210, 0x01}, {0x0211, 0x00}, {0x0212, 0x01}, {0x0213, 0x00}, {0x0214, 0x01}, {0x0215, 0x00}, {0x0216, 0x00}, {0x0217, 0x00}, {0x4170, 0x00}, {0x4171, 0x10}, {0x4176, 0x00}, {0x4177, 0x3C}, {0xAE20, 0x04}, {0xAE21, 0x5C}, {IMX214_TABLE_WAIT_MS, 10}, {0x0138, 0x01}, {IMX214_TABLE_END, 0x00} }; static const struct reg_8 mode_table_common[] = { /* software reset */ /* software standby settings */ {0x0100, 0x00}, /* ATR setting */ {0x9300, 0x02}, /* external clock setting */ {0x0136, 0x18}, {0x0137, 0x00}, /* global setting */ /* basic config */ {0x0101, 0x00}, {0x0105, 0x01}, {0x0106, 0x01}, {0x4550, 0x02}, {0x4601, 0x00}, {0x4642, 0x05}, {0x6227, 0x11}, {0x6276, 0x00}, {0x900E, 0x06}, {0xA802, 0x90}, {0xA803, 0x11}, {0xA804, 0x62}, {0xA805, 0x77}, {0xA806, 0xAE}, {0xA807, 0x34}, {0xA808, 0xAE}, {0xA809, 0x35}, {0xA80A, 0x62}, {0xA80B, 0x83}, {0xAE33, 0x00}, /* analog setting */ {0x4174, 0x00}, {0x4175, 0x11}, {0x4612, 0x29}, {0x461B, 0x12}, {0x461F, 0x06}, {0x4635, 0x07}, {0x4637, 0x30}, {0x463F, 0x18}, {0x4641, 0x0D}, {0x465B, 0x12}, {0x465F, 0x11}, {0x4663, 0x11}, {0x4667, 0x0F}, {0x466F, 0x0F}, {0x470E, 0x09}, {0x4909, 0xAB}, {0x490B, 0x95}, {0x4915, 0x5D}, {0x4A5F, 0xFF}, {0x4A61, 0xFF}, {0x4A73, 0x62}, {0x4A85, 0x00}, {0x4A87, 0xFF}, /* embedded data */ {0x5041, 0x04}, {0x583C, 0x04}, {0x620E, 0x04}, {0x6EB2, 0x01}, {0x6EB3, 0x00}, {0x9300, 0x02}, /* imagequality */ /* HDR setting */ {0x3001, 0x07}, {0x6D12, 0x3F}, {0x6D13, 0xFF}, {0x9344, 0x03}, {0x9706, 0x10}, {0x9707, 0x03}, {0x9708, 0x03}, {0x9E04, 0x01}, {0x9E05, 0x00}, {0x9E0C, 0x01}, {0x9E0D, 0x02}, {0x9E24, 0x00}, {0x9E25, 0x8C}, {0x9E26, 0x00}, {0x9E27, 0x94}, {0x9E28, 0x00}, {0x9E29, 0x96}, /* CNR parameter setting */ {0x69DB, 0x01}, /* Moire reduction */ {0x6957, 0x01}, /* image enhancement */ {0x6987, 0x17}, {0x698A, 0x03}, {0x698B, 0x03}, /* white balanace */ {0x0B8E, 0x01}, {0x0B8F, 0x00}, {0x0B90, 0x01}, {0x0B91, 0x00}, {0x0B92, 0x01}, {0x0B93, 0x00}, {0x0B94, 0x01}, {0x0B95, 0x00}, /* ATR setting */ {0x6E50, 0x00}, {0x6E51, 0x32}, {0x9340, 0x00}, {0x9341, 0x3C}, {0x9342, 0x03}, {0x9343, 0xFF}, {IMX214_TABLE_END, 0x00} }; /* * Declare modes in order, from biggest * to smallest height. */ static const struct imx214_mode { u32 width; u32 height; const struct reg_8 *reg_table; } imx214_modes[] = { { .width = 4096, .height = 2304, .reg_table = mode_4096x2304, }, { .width = 1920, .height = 1080, .reg_table = mode_1920x1080, }, }; static inline struct imx214 *to_imx214(struct v4l2_subdev *sd) { return container_of(sd, struct imx214, sd); } static int __maybe_unused imx214_power_on(struct device *dev) { struct i2c_client *client = to_i2c_client(dev); struct v4l2_subdev *sd = i2c_get_clientdata(client); struct imx214 *imx214 = to_imx214(sd); int ret; ret = regulator_bulk_enable(IMX214_NUM_SUPPLIES, imx214->supplies); if (ret < 0) { dev_err(imx214->dev, "failed to enable regulators: %d\n", ret); return ret; } usleep_range(2000, 3000); ret = clk_prepare_enable(imx214->xclk); if (ret < 0) { regulator_bulk_disable(IMX214_NUM_SUPPLIES, imx214->supplies); dev_err(imx214->dev, "clk prepare enable failed\n"); return ret; } gpiod_set_value_cansleep(imx214->enable_gpio, 1); usleep_range(12000, 15000); return 0; } static int __maybe_unused imx214_power_off(struct device *dev) { struct i2c_client *client = to_i2c_client(dev); struct v4l2_subdev *sd = i2c_get_clientdata(client); struct imx214 *imx214 = to_imx214(sd); gpiod_set_value_cansleep(imx214->enable_gpio, 0); clk_disable_unprepare(imx214->xclk); regulator_bulk_disable(IMX214_NUM_SUPPLIES, imx214->supplies); usleep_range(10, 20); return 0; } static int imx214_enum_mbus_code(struct v4l2_subdev *sd, struct v4l2_subdev_state *sd_state, struct v4l2_subdev_mbus_code_enum *code) { if (code->index > 0) return -EINVAL; code->code = IMX214_MBUS_CODE; return 0; } static int imx214_enum_frame_size(struct v4l2_subdev *subdev, struct v4l2_subdev_state *sd_state, struct v4l2_subdev_frame_size_enum *fse) { if (fse->code != IMX214_MBUS_CODE) return -EINVAL; if (fse->index >= ARRAY_SIZE(imx214_modes)) return -EINVAL; fse->min_width = fse->max_width = imx214_modes[fse->index].width; fse->min_height = fse->max_height = imx214_modes[fse->index].height; return 0; } #ifdef CONFIG_VIDEO_ADV_DEBUG static int imx214_s_register(struct v4l2_subdev *subdev, const struct v4l2_dbg_register *reg) { struct imx214 *imx214 = container_of(subdev, struct imx214, sd); return regmap_write(imx214->regmap, reg->reg, reg->val); } static int imx214_g_register(struct v4l2_subdev *subdev, struct v4l2_dbg_register *reg) { struct imx214 *imx214 = container_of(subdev, struct imx214, sd); unsigned int aux; int ret; reg->size = 1; ret = regmap_read(imx214->regmap, reg->reg, &aux); reg->val = aux; return ret; } #endif static const struct v4l2_subdev_core_ops imx214_core_ops = { #ifdef CONFIG_VIDEO_ADV_DEBUG .g_register = imx214_g_register, .s_register = imx214_s_register, #endif }; static struct v4l2_mbus_framefmt * __imx214_get_pad_format(struct imx214 *imx214, struct v4l2_subdev_state *sd_state, unsigned int pad, enum v4l2_subdev_format_whence which) { switch (which) { case V4L2_SUBDEV_FORMAT_TRY: return v4l2_subdev_get_try_format(&imx214->sd, sd_state, pad); case V4L2_SUBDEV_FORMAT_ACTIVE: return &imx214->fmt; default: return NULL; } } static int imx214_get_format(struct v4l2_subdev *sd, struct v4l2_subdev_state *sd_state, struct v4l2_subdev_format *format) { struct imx214 *imx214 = to_imx214(sd); mutex_lock(&imx214->mutex); format->format = *__imx214_get_pad_format(imx214, sd_state, format->pad, format->which); mutex_unlock(&imx214->mutex); return 0; } static struct v4l2_rect * __imx214_get_pad_crop(struct imx214 *imx214, struct v4l2_subdev_state *sd_state, unsigned int pad, enum v4l2_subdev_format_whence which) { switch (which) { case V4L2_SUBDEV_FORMAT_TRY: return v4l2_subdev_get_try_crop(&imx214->sd, sd_state, pad); case V4L2_SUBDEV_FORMAT_ACTIVE: return &imx214->crop; default: return NULL; } } static int imx214_set_format(struct v4l2_subdev *sd, struct v4l2_subdev_state *sd_state, struct v4l2_subdev_format *format) { struct imx214 *imx214 = to_imx214(sd); struct v4l2_mbus_framefmt *__format; struct v4l2_rect *__crop; const struct imx214_mode *mode; mutex_lock(&imx214->mutex); __crop = __imx214_get_pad_crop(imx214, sd_state, format->pad, format->which); mode = v4l2_find_nearest_size(imx214_modes, ARRAY_SIZE(imx214_modes), width, height, format->format.width, format->format.height); __crop->width = mode->width; __crop->height = mode->height; __format = __imx214_get_pad_format(imx214, sd_state, format->pad, format->which); __format->width = __crop->width; __format->height = __crop->height; __format->code = IMX214_MBUS_CODE; __format->field = V4L2_FIELD_NONE; __format->colorspace = V4L2_COLORSPACE_SRGB; __format->ycbcr_enc = V4L2_MAP_YCBCR_ENC_DEFAULT(__format->colorspace); __format->quantization = V4L2_MAP_QUANTIZATION_DEFAULT(true, __format->colorspace, __format->ycbcr_enc); __format->xfer_func = V4L2_MAP_XFER_FUNC_DEFAULT(__format->colorspace); format->format = *__format; mutex_unlock(&imx214->mutex); return 0; } static int imx214_get_selection(struct v4l2_subdev *sd, struct v4l2_subdev_state *sd_state, struct v4l2_subdev_selection *sel) { struct imx214 *imx214 = to_imx214(sd); if (sel->target != V4L2_SEL_TGT_CROP) return -EINVAL; mutex_lock(&imx214->mutex); sel->r = *__imx214_get_pad_crop(imx214, sd_state, sel->pad, sel->which); mutex_unlock(&imx214->mutex); return 0; } static int imx214_entity_init_cfg(struct v4l2_subdev *subdev, struct v4l2_subdev_state *sd_state) { struct v4l2_subdev_format fmt = { }; fmt.which = sd_state ? V4L2_SUBDEV_FORMAT_TRY : V4L2_SUBDEV_FORMAT_ACTIVE; fmt.format.width = imx214_modes[0].width; fmt.format.height = imx214_modes[0].height; imx214_set_format(subdev, sd_state, &fmt); return 0; } static int imx214_set_ctrl(struct v4l2_ctrl *ctrl) { struct imx214 *imx214 = container_of(ctrl->handler, struct imx214, ctrls); u8 vals[2]; int ret; /* * Applying V4L2 control value only happens * when power is up for streaming */ if (!pm_runtime_get_if_in_use(imx214->dev)) return 0; switch (ctrl->id) { case V4L2_CID_EXPOSURE: vals[1] = ctrl->val; vals[0] = ctrl->val >> 8; ret = regmap_bulk_write(imx214->regmap, 0x202, vals, 2); if (ret < 0) dev_err(imx214->dev, "Error %d\n", ret); ret = 0; break; default: ret = -EINVAL; } pm_runtime_put(imx214->dev); return ret; } static const struct v4l2_ctrl_ops imx214_ctrl_ops = { .s_ctrl = imx214_set_ctrl, }; #define MAX_CMD 4 static int imx214_write_table(struct imx214 *imx214, const struct reg_8 table[]) { u8 vals[MAX_CMD]; int i; int ret; for (; table->addr != IMX214_TABLE_END ; table++) { if (table->addr == IMX214_TABLE_WAIT_MS) { usleep_range(table->val * 1000, table->val * 1000 + 500); continue; } for (i = 0; i < MAX_CMD; i++) { if (table[i].addr != (table[0].addr + i)) break; vals[i] = table[i].val; } ret = regmap_bulk_write(imx214->regmap, table->addr, vals, i); if (ret) { dev_err(imx214->dev, "write_table error: %d\n", ret); return ret; } table += i - 1; } return 0; } static int imx214_start_streaming(struct imx214 *imx214) { const struct imx214_mode *mode; int ret; mutex_lock(&imx214->mutex); ret = imx214_write_table(imx214, mode_table_common); if (ret < 0) { dev_err(imx214->dev, "could not sent common table %d\n", ret); goto error; } mode = v4l2_find_nearest_size(imx214_modes, ARRAY_SIZE(imx214_modes), width, height, imx214->fmt.width, imx214->fmt.height); ret = imx214_write_table(imx214, mode->reg_table); if (ret < 0) { dev_err(imx214->dev, "could not sent mode table %d\n", ret); goto error; } ret = __v4l2_ctrl_handler_setup(&imx214->ctrls); if (ret < 0) { dev_err(imx214->dev, "could not sync v4l2 controls\n"); goto error; } ret = regmap_write(imx214->regmap, 0x100, 1); if (ret < 0) { dev_err(imx214->dev, "could not sent start table %d\n", ret); goto error; } mutex_unlock(&imx214->mutex); return 0; error: mutex_unlock(&imx214->mutex); return ret; } static int imx214_stop_streaming(struct imx214 *imx214) { int ret; ret = regmap_write(imx214->regmap, 0x100, 0); if (ret < 0) dev_err(imx214->dev, "could not sent stop table %d\n", ret); return ret; } static int imx214_s_stream(struct v4l2_subdev *subdev, int enable) { struct imx214 *imx214 = to_imx214(subdev); int ret; if (imx214->streaming == enable) return 0; if (enable) { ret = pm_runtime_resume_and_get(imx214->dev); if (ret < 0) return ret; ret = imx214_start_streaming(imx214); if (ret < 0) goto err_rpm_put; } else { ret = imx214_stop_streaming(imx214); if (ret < 0) goto err_rpm_put; pm_runtime_put(imx214->dev); } imx214->streaming = enable; return 0; err_rpm_put: pm_runtime_put(imx214->dev); return ret; } static int imx214_g_frame_interval(struct v4l2_subdev *subdev, struct v4l2_subdev_frame_interval *fival) { fival->interval.numerator = 1; fival->interval.denominator = IMX214_FPS; return 0; } static int imx214_enum_frame_interval(struct v4l2_subdev *subdev, struct v4l2_subdev_state *sd_state, struct v4l2_subdev_frame_interval_enum *fie) { const struct imx214_mode *mode; if (fie->index != 0) return -EINVAL; mode = v4l2_find_nearest_size(imx214_modes, ARRAY_SIZE(imx214_modes), width, height, fie->width, fie->height); fie->code = IMX214_MBUS_CODE; fie->width = mode->width; fie->height = mode->height; fie->interval.numerator = 1; fie->interval.denominator = IMX214_FPS; return 0; } static const struct v4l2_subdev_video_ops imx214_video_ops = { .s_stream = imx214_s_stream, .g_frame_interval = imx214_g_frame_interval, .s_frame_interval = imx214_g_frame_interval, }; static const struct v4l2_subdev_pad_ops imx214_subdev_pad_ops = { .enum_mbus_code = imx214_enum_mbus_code, .enum_frame_size = imx214_enum_frame_size, .enum_frame_interval = imx214_enum_frame_interval, .get_fmt = imx214_get_format, .set_fmt = imx214_set_format, .get_selection = imx214_get_selection, .init_cfg = imx214_entity_init_cfg, }; static const struct v4l2_subdev_ops imx214_subdev_ops = { .core = &imx214_core_ops, .video = &imx214_video_ops, .pad = &imx214_subdev_pad_ops, }; static const struct regmap_config sensor_regmap_config = { .reg_bits = 16, .val_bits = 8, .cache_type = REGCACHE_RBTREE, }; static int imx214_get_regulators(struct device *dev, struct imx214 *imx214) { unsigned int i; for (i = 0; i < IMX214_NUM_SUPPLIES; i++) imx214->supplies[i].supply = imx214_supply_name[i]; return devm_regulator_bulk_get(dev, IMX214_NUM_SUPPLIES, imx214->supplies); } static int imx214_parse_fwnode(struct device *dev) { struct fwnode_handle *endpoint; struct v4l2_fwnode_endpoint bus_cfg = { .bus_type = V4L2_MBUS_CSI2_DPHY, }; unsigned int i; int ret; endpoint = fwnode_graph_get_next_endpoint(dev_fwnode(dev), NULL); if (!endpoint) { dev_err(dev, "endpoint node not found\n"); return -EINVAL; } ret = v4l2_fwnode_endpoint_alloc_parse(endpoint, &bus_cfg); if (ret) { dev_err(dev, "parsing endpoint node failed\n"); goto done; } for (i = 0; i < bus_cfg.nr_of_link_frequencies; i++) if (bus_cfg.link_frequencies[i] == IMX214_DEFAULT_LINK_FREQ) break; if (i == bus_cfg.nr_of_link_frequencies) { dev_err(dev, "link-frequencies %d not supported, Please review your DT\n", IMX214_DEFAULT_LINK_FREQ); ret = -EINVAL; goto done; } done: v4l2_fwnode_endpoint_free(&bus_cfg); fwnode_handle_put(endpoint); return ret; } static int __maybe_unused imx214_suspend(struct device *dev) { struct i2c_client *client = to_i2c_client(dev); struct v4l2_subdev *sd = i2c_get_clientdata(client); struct imx214 *imx214 = to_imx214(sd); if (imx214->streaming) imx214_stop_streaming(imx214); return 0; } static int __maybe_unused imx214_resume(struct device *dev) { struct i2c_client *client = to_i2c_client(dev); struct v4l2_subdev *sd = i2c_get_clientdata(client); struct imx214 *imx214 = to_imx214(sd); int ret; if (imx214->streaming) { ret = imx214_start_streaming(imx214); if (ret) goto error; } return 0; error: imx214_stop_streaming(imx214); imx214->streaming = 0; return ret; } static int imx214_probe(struct i2c_client *client) { struct device *dev = &client->dev; struct imx214 *imx214; static const s64 link_freq[] = { IMX214_DEFAULT_LINK_FREQ, }; static const struct v4l2_area unit_size = { .width = 1120, .height = 1120, }; int ret; ret = imx214_parse_fwnode(dev); if (ret) return ret; imx214 = devm_kzalloc(dev, sizeof(*imx214), GFP_KERNEL); if (!imx214) return -ENOMEM; imx214->dev = dev; imx214->xclk = devm_clk_get(dev, NULL); if (IS_ERR(imx214->xclk)) { dev_err(dev, "could not get xclk"); return PTR_ERR(imx214->xclk); } ret = clk_set_rate(imx214->xclk, IMX214_DEFAULT_CLK_FREQ); if (ret) { dev_err(dev, "could not set xclk frequency\n"); return ret; } ret = imx214_get_regulators(dev, imx214); if (ret < 0) { dev_err(dev, "cannot get regulators\n"); return ret; } imx214->enable_gpio = devm_gpiod_get(dev, "enable", GPIOD_OUT_LOW); if (IS_ERR(imx214->enable_gpio)) { dev_err(dev, "cannot get enable gpio\n"); return PTR_ERR(imx214->enable_gpio); } imx214->regmap = devm_regmap_init_i2c(client, &sensor_regmap_config); if (IS_ERR(imx214->regmap)) { dev_err(dev, "regmap init failed\n"); return PTR_ERR(imx214->regmap); } v4l2_i2c_subdev_init(&imx214->sd, client, &imx214_subdev_ops); /* * Enable power initially, to avoid warnings * from clk_disable on power_off */ imx214_power_on(imx214->dev); pm_runtime_set_active(imx214->dev); pm_runtime_enable(imx214->dev); pm_runtime_idle(imx214->dev); v4l2_ctrl_handler_init(&imx214->ctrls, 3); imx214->pixel_rate = v4l2_ctrl_new_std(&imx214->ctrls, NULL, V4L2_CID_PIXEL_RATE, 0, IMX214_DEFAULT_PIXEL_RATE, 1, IMX214_DEFAULT_PIXEL_RATE); imx214->link_freq = v4l2_ctrl_new_int_menu(&imx214->ctrls, NULL, V4L2_CID_LINK_FREQ, ARRAY_SIZE(link_freq) - 1, 0, link_freq); if (imx214->link_freq) imx214->link_freq->flags |= V4L2_CTRL_FLAG_READ_ONLY; /* * WARNING! * Values obtained reverse engineering blobs and/or devices. * Ranges and functionality might be wrong. * * Sony, please release some register set documentation for the * device. * * Yours sincerely, Ricardo. */ imx214->exposure = v4l2_ctrl_new_std(&imx214->ctrls, &imx214_ctrl_ops, V4L2_CID_EXPOSURE, 0, 3184, 1, 0x0c70); imx214->unit_size = v4l2_ctrl_new_std_compound(&imx214->ctrls, NULL, V4L2_CID_UNIT_CELL_SIZE, v4l2_ctrl_ptr_create((void *)&unit_size)); ret = imx214->ctrls.error; if (ret) { dev_err(&client->dev, "%s control init failed (%d)\n", __func__, ret); goto free_ctrl; } imx214->sd.ctrl_handler = &imx214->ctrls; mutex_init(&imx214->mutex); imx214->ctrls.lock = &imx214->mutex; imx214->sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE; imx214->pad.flags = MEDIA_PAD_FL_SOURCE; imx214->sd.dev = &client->dev; imx214->sd.entity.function = MEDIA_ENT_F_CAM_SENSOR; ret = media_entity_pads_init(&imx214->sd.entity, 1, &imx214->pad); if (ret < 0) { dev_err(dev, "could not register media entity\n"); goto free_ctrl; } imx214_entity_init_cfg(&imx214->sd, NULL); ret = v4l2_async_register_subdev_sensor(&imx214->sd); if (ret < 0) { dev_err(dev, "could not register v4l2 device\n"); goto free_entity; } return 0; free_entity: media_entity_cleanup(&imx214->sd.entity); free_ctrl: mutex_destroy(&imx214->mutex); v4l2_ctrl_handler_free(&imx214->ctrls); pm_runtime_disable(imx214->dev); return ret; } static void imx214_remove(struct i2c_client *client) { struct v4l2_subdev *sd = i2c_get_clientdata(client); struct imx214 *imx214 = to_imx214(sd); v4l2_async_unregister_subdev(&imx214->sd); media_entity_cleanup(&imx214->sd.entity); v4l2_ctrl_handler_free(&imx214->ctrls); pm_runtime_disable(&client->dev); pm_runtime_set_suspended(&client->dev); mutex_destroy(&imx214->mutex); } static const struct of_device_id imx214_of_match[] = { { .compatible = "sony,imx214" }, { } }; MODULE_DEVICE_TABLE(of, imx214_of_match); static const struct dev_pm_ops imx214_pm_ops = { SET_SYSTEM_SLEEP_PM_OPS(imx214_suspend, imx214_resume) SET_RUNTIME_PM_OPS(imx214_power_off, imx214_power_on, NULL) }; static struct i2c_driver imx214_i2c_driver = { .driver = { .of_match_table = imx214_of_match, .pm = &imx214_pm_ops, .name = "imx214", }, .probe = imx214_probe, .remove = imx214_remove, }; module_i2c_driver(imx214_i2c_driver); MODULE_DESCRIPTION("Sony IMX214 Camera driver"); MODULE_AUTHOR("Ricardo Ribalda <[email protected]>"); MODULE_LICENSE("GPL v2");
linux-master
drivers/media/i2c/imx214.c
// SPDX-License-Identifier: GPL-2.0 /* * Driver for RJ54N1CB0C CMOS Image Sensor from Sharp * * Copyright (C) 2018, Jacopo Mondi <[email protected]> * * Copyright (C) 2009, Guennadi Liakhovetski <[email protected]> */ #include <linux/clk.h> #include <linux/delay.h> #include <linux/gpio/consumer.h> #include <linux/i2c.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/v4l2-mediabus.h> #include <linux/videodev2.h> #include <media/i2c/rj54n1cb0c.h> #include <media/v4l2-device.h> #include <media/v4l2-ctrls.h> #include <media/v4l2-subdev.h> #define RJ54N1_DEV_CODE 0x0400 #define RJ54N1_DEV_CODE2 0x0401 #define RJ54N1_OUT_SEL 0x0403 #define RJ54N1_XY_OUTPUT_SIZE_S_H 0x0404 #define RJ54N1_X_OUTPUT_SIZE_S_L 0x0405 #define RJ54N1_Y_OUTPUT_SIZE_S_L 0x0406 #define RJ54N1_XY_OUTPUT_SIZE_P_H 0x0407 #define RJ54N1_X_OUTPUT_SIZE_P_L 0x0408 #define RJ54N1_Y_OUTPUT_SIZE_P_L 0x0409 #define RJ54N1_LINE_LENGTH_PCK_S_H 0x040a #define RJ54N1_LINE_LENGTH_PCK_S_L 0x040b #define RJ54N1_LINE_LENGTH_PCK_P_H 0x040c #define RJ54N1_LINE_LENGTH_PCK_P_L 0x040d #define RJ54N1_RESIZE_N 0x040e #define RJ54N1_RESIZE_N_STEP 0x040f #define RJ54N1_RESIZE_STEP 0x0410 #define RJ54N1_RESIZE_HOLD_H 0x0411 #define RJ54N1_RESIZE_HOLD_L 0x0412 #define RJ54N1_H_OBEN_OFS 0x0413 #define RJ54N1_V_OBEN_OFS 0x0414 #define RJ54N1_RESIZE_CONTROL 0x0415 #define RJ54N1_STILL_CONTROL 0x0417 #define RJ54N1_INC_USE_SEL_H 0x0425 #define RJ54N1_INC_USE_SEL_L 0x0426 #define RJ54N1_MIRROR_STILL_MODE 0x0427 #define RJ54N1_INIT_START 0x0428 #define RJ54N1_SCALE_1_2_LEV 0x0429 #define RJ54N1_SCALE_4_LEV 0x042a #define RJ54N1_Y_GAIN 0x04d8 #define RJ54N1_APT_GAIN_UP 0x04fa #define RJ54N1_RA_SEL_UL 0x0530 #define RJ54N1_BYTE_SWAP 0x0531 #define RJ54N1_OUT_SIGPO 0x053b #define RJ54N1_WB_SEL_WEIGHT_I 0x054e #define RJ54N1_BIT8_WB 0x0569 #define RJ54N1_HCAPS_WB 0x056a #define RJ54N1_VCAPS_WB 0x056b #define RJ54N1_HCAPE_WB 0x056c #define RJ54N1_VCAPE_WB 0x056d #define RJ54N1_EXPOSURE_CONTROL 0x058c #define RJ54N1_FRAME_LENGTH_S_H 0x0595 #define RJ54N1_FRAME_LENGTH_S_L 0x0596 #define RJ54N1_FRAME_LENGTH_P_H 0x0597 #define RJ54N1_FRAME_LENGTH_P_L 0x0598 #define RJ54N1_PEAK_H 0x05b7 #define RJ54N1_PEAK_50 0x05b8 #define RJ54N1_PEAK_60 0x05b9 #define RJ54N1_PEAK_DIFF 0x05ba #define RJ54N1_IOC 0x05ef #define RJ54N1_TG_BYPASS 0x0700 #define RJ54N1_PLL_L 0x0701 #define RJ54N1_PLL_N 0x0702 #define RJ54N1_PLL_EN 0x0704 #define RJ54N1_RATIO_TG 0x0706 #define RJ54N1_RATIO_T 0x0707 #define RJ54N1_RATIO_R 0x0708 #define RJ54N1_RAMP_TGCLK_EN 0x0709 #define RJ54N1_OCLK_DSP 0x0710 #define RJ54N1_RATIO_OP 0x0711 #define RJ54N1_RATIO_O 0x0712 #define RJ54N1_OCLK_SEL_EN 0x0713 #define RJ54N1_CLK_RST 0x0717 #define RJ54N1_RESET_STANDBY 0x0718 #define RJ54N1_FWFLG 0x07fe #define E_EXCLK (1 << 7) #define SOFT_STDBY (1 << 4) #define SEN_RSTX (1 << 2) #define TG_RSTX (1 << 1) #define DSP_RSTX (1 << 0) #define RESIZE_HOLD_SEL (1 << 2) #define RESIZE_GO (1 << 1) /* * When cropping, the camera automatically centers the cropped region, there * doesn't seem to be a way to specify an explicit location of the rectangle. */ #define RJ54N1_COLUMN_SKIP 0 #define RJ54N1_ROW_SKIP 0 #define RJ54N1_MAX_WIDTH 1600 #define RJ54N1_MAX_HEIGHT 1200 #define PLL_L 2 #define PLL_N 0x31 /* I2C addresses: 0x50, 0x51, 0x60, 0x61 */ /* RJ54N1CB0C has only one fixed colorspace per pixelcode */ struct rj54n1_datafmt { u32 code; enum v4l2_colorspace colorspace; }; /* Find a data format by a pixel code in an array */ static const struct rj54n1_datafmt *rj54n1_find_datafmt( u32 code, const struct rj54n1_datafmt *fmt, int n) { int i; for (i = 0; i < n; i++) if (fmt[i].code == code) return fmt + i; return NULL; } static const struct rj54n1_datafmt rj54n1_colour_fmts[] = { {MEDIA_BUS_FMT_YUYV8_2X8, V4L2_COLORSPACE_JPEG}, {MEDIA_BUS_FMT_YVYU8_2X8, V4L2_COLORSPACE_JPEG}, {MEDIA_BUS_FMT_RGB565_2X8_LE, V4L2_COLORSPACE_SRGB}, {MEDIA_BUS_FMT_RGB565_2X8_BE, V4L2_COLORSPACE_SRGB}, {MEDIA_BUS_FMT_SBGGR10_2X8_PADHI_LE, V4L2_COLORSPACE_SRGB}, {MEDIA_BUS_FMT_SBGGR10_2X8_PADLO_LE, V4L2_COLORSPACE_SRGB}, {MEDIA_BUS_FMT_SBGGR10_2X8_PADHI_BE, V4L2_COLORSPACE_SRGB}, {MEDIA_BUS_FMT_SBGGR10_2X8_PADLO_BE, V4L2_COLORSPACE_SRGB}, {MEDIA_BUS_FMT_SBGGR10_1X10, V4L2_COLORSPACE_SRGB}, }; struct rj54n1_clock_div { u8 ratio_tg; /* can be 0 or an odd number */ u8 ratio_t; u8 ratio_r; u8 ratio_op; u8 ratio_o; }; struct rj54n1 { struct v4l2_subdev subdev; struct v4l2_ctrl_handler hdl; struct clk *clk; struct gpio_desc *pwup_gpio; struct gpio_desc *enable_gpio; struct rj54n1_clock_div clk_div; const struct rj54n1_datafmt *fmt; struct v4l2_rect rect; /* Sensor window */ unsigned int tgclk_mhz; bool auto_wb; unsigned short width; /* Output window */ unsigned short height; unsigned short resize; /* Sensor * 1024 / resize = Output */ unsigned short scale; u8 bank; }; struct rj54n1_reg_val { u16 reg; u8 val; }; static const struct rj54n1_reg_val bank_4[] = { {0x417, 0}, {0x42c, 0}, {0x42d, 0xf0}, {0x42e, 0}, {0x42f, 0x50}, {0x430, 0xf5}, {0x431, 0x16}, {0x432, 0x20}, {0x433, 0}, {0x434, 0xc8}, {0x43c, 8}, {0x43e, 0x90}, {0x445, 0x83}, {0x4ba, 0x58}, {0x4bb, 4}, {0x4bc, 0x20}, {0x4db, 4}, {0x4fe, 2}, }; static const struct rj54n1_reg_val bank_5[] = { {0x514, 0}, {0x516, 0}, {0x518, 0}, {0x51a, 0}, {0x51d, 0xff}, {0x56f, 0x28}, {0x575, 0x40}, {0x5bc, 0x48}, {0x5c1, 6}, {0x5e5, 0x11}, {0x5e6, 0x43}, {0x5e7, 0x33}, {0x5e8, 0x21}, {0x5e9, 0x30}, {0x5ea, 0x0}, {0x5eb, 0xa5}, {0x5ec, 0xff}, {0x5fe, 2}, }; static const struct rj54n1_reg_val bank_7[] = { {0x70a, 0}, {0x714, 0xff}, {0x715, 0xff}, {0x716, 0x1f}, {0x7FE, 2}, }; static const struct rj54n1_reg_val bank_8[] = { {0x800, 0x00}, {0x801, 0x01}, {0x802, 0x61}, {0x805, 0x00}, {0x806, 0x00}, {0x807, 0x00}, {0x808, 0x00}, {0x809, 0x01}, {0x80A, 0x61}, {0x80B, 0x00}, {0x80C, 0x01}, {0x80D, 0x00}, {0x80E, 0x00}, {0x80F, 0x00}, {0x810, 0x00}, {0x811, 0x01}, {0x812, 0x61}, {0x813, 0x00}, {0x814, 0x11}, {0x815, 0x00}, {0x816, 0x41}, {0x817, 0x00}, {0x818, 0x51}, {0x819, 0x01}, {0x81A, 0x1F}, {0x81B, 0x00}, {0x81C, 0x01}, {0x81D, 0x00}, {0x81E, 0x11}, {0x81F, 0x00}, {0x820, 0x41}, {0x821, 0x00}, {0x822, 0x51}, {0x823, 0x00}, {0x824, 0x00}, {0x825, 0x00}, {0x826, 0x47}, {0x827, 0x01}, {0x828, 0x4F}, {0x829, 0x00}, {0x82A, 0x00}, {0x82B, 0x00}, {0x82C, 0x30}, {0x82D, 0x00}, {0x82E, 0x40}, {0x82F, 0x00}, {0x830, 0xB3}, {0x831, 0x00}, {0x832, 0xE3}, {0x833, 0x00}, {0x834, 0x00}, {0x835, 0x00}, {0x836, 0x00}, {0x837, 0x00}, {0x838, 0x00}, {0x839, 0x01}, {0x83A, 0x61}, {0x83B, 0x00}, {0x83C, 0x01}, {0x83D, 0x00}, {0x83E, 0x00}, {0x83F, 0x00}, {0x840, 0x00}, {0x841, 0x01}, {0x842, 0x61}, {0x843, 0x00}, {0x844, 0x1D}, {0x845, 0x00}, {0x846, 0x00}, {0x847, 0x00}, {0x848, 0x00}, {0x849, 0x01}, {0x84A, 0x1F}, {0x84B, 0x00}, {0x84C, 0x05}, {0x84D, 0x00}, {0x84E, 0x19}, {0x84F, 0x01}, {0x850, 0x21}, {0x851, 0x01}, {0x852, 0x5D}, {0x853, 0x00}, {0x854, 0x00}, {0x855, 0x00}, {0x856, 0x19}, {0x857, 0x01}, {0x858, 0x21}, {0x859, 0x00}, {0x85A, 0x00}, {0x85B, 0x00}, {0x85C, 0x00}, {0x85D, 0x00}, {0x85E, 0x00}, {0x85F, 0x00}, {0x860, 0xB3}, {0x861, 0x00}, {0x862, 0xE3}, {0x863, 0x00}, {0x864, 0x00}, {0x865, 0x00}, {0x866, 0x00}, {0x867, 0x00}, {0x868, 0x00}, {0x869, 0xE2}, {0x86A, 0x00}, {0x86B, 0x01}, {0x86C, 0x06}, {0x86D, 0x00}, {0x86E, 0x00}, {0x86F, 0x00}, {0x870, 0x60}, {0x871, 0x8C}, {0x872, 0x10}, {0x873, 0x00}, {0x874, 0xE0}, {0x875, 0x00}, {0x876, 0x27}, {0x877, 0x01}, {0x878, 0x00}, {0x879, 0x00}, {0x87A, 0x00}, {0x87B, 0x03}, {0x87C, 0x00}, {0x87D, 0x00}, {0x87E, 0x00}, {0x87F, 0x00}, {0x880, 0x00}, {0x881, 0x00}, {0x882, 0x00}, {0x883, 0x00}, {0x884, 0x00}, {0x885, 0x00}, {0x886, 0xF8}, {0x887, 0x00}, {0x888, 0x03}, {0x889, 0x00}, {0x88A, 0x64}, {0x88B, 0x00}, {0x88C, 0x03}, {0x88D, 0x00}, {0x88E, 0xB1}, {0x88F, 0x00}, {0x890, 0x03}, {0x891, 0x01}, {0x892, 0x1D}, {0x893, 0x00}, {0x894, 0x03}, {0x895, 0x01}, {0x896, 0x4B}, {0x897, 0x00}, {0x898, 0xE5}, {0x899, 0x00}, {0x89A, 0x01}, {0x89B, 0x00}, {0x89C, 0x01}, {0x89D, 0x04}, {0x89E, 0xC8}, {0x89F, 0x00}, {0x8A0, 0x01}, {0x8A1, 0x01}, {0x8A2, 0x61}, {0x8A3, 0x00}, {0x8A4, 0x01}, {0x8A5, 0x00}, {0x8A6, 0x00}, {0x8A7, 0x00}, {0x8A8, 0x00}, {0x8A9, 0x00}, {0x8AA, 0x7F}, {0x8AB, 0x03}, {0x8AC, 0x00}, {0x8AD, 0x00}, {0x8AE, 0x00}, {0x8AF, 0x00}, {0x8B0, 0x00}, {0x8B1, 0x00}, {0x8B6, 0x00}, {0x8B7, 0x01}, {0x8B8, 0x00}, {0x8B9, 0x00}, {0x8BA, 0x02}, {0x8BB, 0x00}, {0x8BC, 0xFF}, {0x8BD, 0x00}, {0x8FE, 2}, }; static const struct rj54n1_reg_val bank_10[] = { {0x10bf, 0x69} }; /* Clock dividers - these are default register values, divider = register + 1 */ static const struct rj54n1_clock_div clk_div = { .ratio_tg = 3 /* default: 5 */, .ratio_t = 4 /* default: 1 */, .ratio_r = 4 /* default: 0 */, .ratio_op = 1 /* default: 5 */, .ratio_o = 9 /* default: 0 */, }; static struct rj54n1 *to_rj54n1(const struct i2c_client *client) { return container_of(i2c_get_clientdata(client), struct rj54n1, subdev); } static int reg_read(struct i2c_client *client, const u16 reg) { struct rj54n1 *rj54n1 = to_rj54n1(client); int ret; /* set bank */ if (rj54n1->bank != reg >> 8) { dev_dbg(&client->dev, "[0x%x] = 0x%x\n", 0xff, reg >> 8); ret = i2c_smbus_write_byte_data(client, 0xff, reg >> 8); if (ret < 0) return ret; rj54n1->bank = reg >> 8; } return i2c_smbus_read_byte_data(client, reg & 0xff); } static int reg_write(struct i2c_client *client, const u16 reg, const u8 data) { struct rj54n1 *rj54n1 = to_rj54n1(client); int ret; /* set bank */ if (rj54n1->bank != reg >> 8) { dev_dbg(&client->dev, "[0x%x] = 0x%x\n", 0xff, reg >> 8); ret = i2c_smbus_write_byte_data(client, 0xff, reg >> 8); if (ret < 0) return ret; rj54n1->bank = reg >> 8; } dev_dbg(&client->dev, "[0x%x] = 0x%x\n", reg & 0xff, data); return i2c_smbus_write_byte_data(client, reg & 0xff, data); } static int reg_set(struct i2c_client *client, const u16 reg, const u8 data, const u8 mask) { int ret; ret = reg_read(client, reg); if (ret < 0) return ret; return reg_write(client, reg, (ret & ~mask) | (data & mask)); } static int reg_write_multiple(struct i2c_client *client, const struct rj54n1_reg_val *rv, const int n) { int i, ret; for (i = 0; i < n; i++) { ret = reg_write(client, rv->reg, rv->val); if (ret < 0) return ret; rv++; } return 0; } static int rj54n1_enum_mbus_code(struct v4l2_subdev *sd, struct v4l2_subdev_state *sd_state, struct v4l2_subdev_mbus_code_enum *code) { if (code->pad || code->index >= ARRAY_SIZE(rj54n1_colour_fmts)) return -EINVAL; code->code = rj54n1_colour_fmts[code->index].code; return 0; } static int rj54n1_s_stream(struct v4l2_subdev *sd, int enable) { struct i2c_client *client = v4l2_get_subdevdata(sd); /* Switch between preview and still shot modes */ return reg_set(client, RJ54N1_STILL_CONTROL, (!enable) << 7, 0x80); } static int rj54n1_set_rect(struct i2c_client *client, u16 reg_x, u16 reg_y, u16 reg_xy, u32 width, u32 height) { int ret; ret = reg_write(client, reg_xy, ((width >> 4) & 0x70) | ((height >> 8) & 7)); if (!ret) ret = reg_write(client, reg_x, width & 0xff); if (!ret) ret = reg_write(client, reg_y, height & 0xff); return ret; } /* * Some commands, specifically certain initialisation sequences, require * a commit operation. */ static int rj54n1_commit(struct i2c_client *client) { int ret = reg_write(client, RJ54N1_INIT_START, 1); msleep(10); if (!ret) ret = reg_write(client, RJ54N1_INIT_START, 0); return ret; } static int rj54n1_sensor_scale(struct v4l2_subdev *sd, s32 *in_w, s32 *in_h, s32 *out_w, s32 *out_h); static int rj54n1_set_selection(struct v4l2_subdev *sd, struct v4l2_subdev_state *sd_state, struct v4l2_subdev_selection *sel) { struct i2c_client *client = v4l2_get_subdevdata(sd); struct rj54n1 *rj54n1 = to_rj54n1(client); const struct v4l2_rect *rect = &sel->r; int output_w, output_h, input_w = rect->width, input_h = rect->height; int ret; if (sel->which != V4L2_SUBDEV_FORMAT_ACTIVE || sel->target != V4L2_SEL_TGT_CROP) return -EINVAL; /* arbitrary minimum width and height, edges unimportant */ v4l_bound_align_image(&input_w, 8, RJ54N1_MAX_WIDTH, 0, &input_h, 8, RJ54N1_MAX_HEIGHT, 0, 0); output_w = (input_w * 1024 + rj54n1->resize / 2) / rj54n1->resize; output_h = (input_h * 1024 + rj54n1->resize / 2) / rj54n1->resize; dev_dbg(&client->dev, "Scaling for %dx%d : %u = %dx%d\n", input_w, input_h, rj54n1->resize, output_w, output_h); ret = rj54n1_sensor_scale(sd, &input_w, &input_h, &output_w, &output_h); if (ret < 0) return ret; rj54n1->width = output_w; rj54n1->height = output_h; rj54n1->resize = ret; rj54n1->rect.width = input_w; rj54n1->rect.height = input_h; return 0; } static int rj54n1_get_selection(struct v4l2_subdev *sd, struct v4l2_subdev_state *sd_state, struct v4l2_subdev_selection *sel) { struct i2c_client *client = v4l2_get_subdevdata(sd); struct rj54n1 *rj54n1 = to_rj54n1(client); if (sel->which != V4L2_SUBDEV_FORMAT_ACTIVE) return -EINVAL; switch (sel->target) { case V4L2_SEL_TGT_CROP_BOUNDS: sel->r.left = RJ54N1_COLUMN_SKIP; sel->r.top = RJ54N1_ROW_SKIP; sel->r.width = RJ54N1_MAX_WIDTH; sel->r.height = RJ54N1_MAX_HEIGHT; return 0; case V4L2_SEL_TGT_CROP: sel->r = rj54n1->rect; return 0; default: return -EINVAL; } } static int rj54n1_get_fmt(struct v4l2_subdev *sd, struct v4l2_subdev_state *sd_state, struct v4l2_subdev_format *format) { struct v4l2_mbus_framefmt *mf = &format->format; struct i2c_client *client = v4l2_get_subdevdata(sd); struct rj54n1 *rj54n1 = to_rj54n1(client); if (format->pad) return -EINVAL; mf->code = rj54n1->fmt->code; mf->colorspace = rj54n1->fmt->colorspace; mf->ycbcr_enc = V4L2_YCBCR_ENC_601; mf->xfer_func = V4L2_XFER_FUNC_SRGB; mf->quantization = V4L2_QUANTIZATION_DEFAULT; mf->field = V4L2_FIELD_NONE; mf->width = rj54n1->width; mf->height = rj54n1->height; return 0; } /* * The actual geometry configuration routine. It scales the input window into * the output one, updates the window sizes and returns an error or the resize * coefficient on success. Note: we only use the "Fixed Scaling" on this camera. */ static int rj54n1_sensor_scale(struct v4l2_subdev *sd, s32 *in_w, s32 *in_h, s32 *out_w, s32 *out_h) { struct i2c_client *client = v4l2_get_subdevdata(sd); struct rj54n1 *rj54n1 = to_rj54n1(client); unsigned int skip, resize, input_w = *in_w, input_h = *in_h, output_w = *out_w, output_h = *out_h; u16 inc_sel, wb_bit8, wb_left, wb_right, wb_top, wb_bottom; unsigned int peak, peak_50, peak_60; int ret; /* * We have a problem with crops, where the window is larger than 512x384 * and output window is larger than a half of the input one. In this * case we have to either reduce the input window to equal or below * 512x384 or the output window to equal or below 1/2 of the input. */ if (output_w > max(512U, input_w / 2)) { if (2 * output_w > RJ54N1_MAX_WIDTH) { input_w = RJ54N1_MAX_WIDTH; output_w = RJ54N1_MAX_WIDTH / 2; } else { input_w = output_w * 2; } dev_dbg(&client->dev, "Adjusted output width: in %u, out %u\n", input_w, output_w); } if (output_h > max(384U, input_h / 2)) { if (2 * output_h > RJ54N1_MAX_HEIGHT) { input_h = RJ54N1_MAX_HEIGHT; output_h = RJ54N1_MAX_HEIGHT / 2; } else { input_h = output_h * 2; } dev_dbg(&client->dev, "Adjusted output height: in %u, out %u\n", input_h, output_h); } /* Idea: use the read mode for snapshots, handle separate geometries */ ret = rj54n1_set_rect(client, RJ54N1_X_OUTPUT_SIZE_S_L, RJ54N1_Y_OUTPUT_SIZE_S_L, RJ54N1_XY_OUTPUT_SIZE_S_H, output_w, output_h); if (!ret) ret = rj54n1_set_rect(client, RJ54N1_X_OUTPUT_SIZE_P_L, RJ54N1_Y_OUTPUT_SIZE_P_L, RJ54N1_XY_OUTPUT_SIZE_P_H, output_w, output_h); if (ret < 0) return ret; if (output_w > input_w && output_h > input_h) { input_w = output_w; input_h = output_h; resize = 1024; } else { unsigned int resize_x, resize_y; resize_x = (input_w * 1024 + output_w / 2) / output_w; resize_y = (input_h * 1024 + output_h / 2) / output_h; /* We want max(resize_x, resize_y), check if it still fits */ if (resize_x > resize_y && (output_h * resize_x + 512) / 1024 > RJ54N1_MAX_HEIGHT) resize = (RJ54N1_MAX_HEIGHT * 1024 + output_h / 2) / output_h; else if (resize_y > resize_x && (output_w * resize_y + 512) / 1024 > RJ54N1_MAX_WIDTH) resize = (RJ54N1_MAX_WIDTH * 1024 + output_w / 2) / output_w; else resize = max(resize_x, resize_y); /* Prohibited value ranges */ switch (resize) { case 2040 ... 2047: resize = 2039; break; case 4080 ... 4095: resize = 4079; break; case 8160 ... 8191: resize = 8159; break; case 16320 ... 16384: resize = 16319; } } /* Set scaling */ ret = reg_write(client, RJ54N1_RESIZE_HOLD_L, resize & 0xff); if (!ret) ret = reg_write(client, RJ54N1_RESIZE_HOLD_H, resize >> 8); if (ret < 0) return ret; /* * Configure a skipping bitmask. The sensor will select a skipping value * among set bits automatically. This is very unclear in the datasheet * too. I was told, in this register one enables all skipping values, * that are required for a specific resize, and the camera selects * automatically, which ones to use. But it is unclear how to identify, * which cropping values are needed. Secondly, why don't we just set all * bits and let the camera choose? Would it increase processing time and * reduce the framerate? Using 0xfffc for INC_USE_SEL doesn't seem to * improve the image quality or stability for larger frames (see comment * above), but I didn't check the framerate. */ skip = min(resize / 1024, 15U); inc_sel = 1 << skip; if (inc_sel <= 2) inc_sel = 0xc; else if (resize & 1023 && skip < 15) inc_sel |= 1 << (skip + 1); ret = reg_write(client, RJ54N1_INC_USE_SEL_L, inc_sel & 0xfc); if (!ret) ret = reg_write(client, RJ54N1_INC_USE_SEL_H, inc_sel >> 8); if (!rj54n1->auto_wb) { /* Auto white balance window */ wb_left = output_w / 16; wb_right = (3 * output_w / 4 - 3) / 4; wb_top = output_h / 16; wb_bottom = (3 * output_h / 4 - 3) / 4; wb_bit8 = ((wb_left >> 2) & 0x40) | ((wb_top >> 4) & 0x10) | ((wb_right >> 6) & 4) | ((wb_bottom >> 8) & 1); if (!ret) ret = reg_write(client, RJ54N1_BIT8_WB, wb_bit8); if (!ret) ret = reg_write(client, RJ54N1_HCAPS_WB, wb_left); if (!ret) ret = reg_write(client, RJ54N1_VCAPS_WB, wb_top); if (!ret) ret = reg_write(client, RJ54N1_HCAPE_WB, wb_right); if (!ret) ret = reg_write(client, RJ54N1_VCAPE_WB, wb_bottom); } /* Antiflicker */ peak = 12 * RJ54N1_MAX_WIDTH * (1 << 14) * resize / rj54n1->tgclk_mhz / 10000; peak_50 = peak / 6; peak_60 = peak / 5; if (!ret) ret = reg_write(client, RJ54N1_PEAK_H, ((peak_50 >> 4) & 0xf0) | (peak_60 >> 8)); if (!ret) ret = reg_write(client, RJ54N1_PEAK_50, peak_50); if (!ret) ret = reg_write(client, RJ54N1_PEAK_60, peak_60); if (!ret) ret = reg_write(client, RJ54N1_PEAK_DIFF, peak / 150); /* Start resizing */ if (!ret) ret = reg_write(client, RJ54N1_RESIZE_CONTROL, RESIZE_HOLD_SEL | RESIZE_GO | 1); if (ret < 0) return ret; /* Constant taken from manufacturer's example */ msleep(230); ret = reg_write(client, RJ54N1_RESIZE_CONTROL, RESIZE_HOLD_SEL | 1); if (ret < 0) return ret; *in_w = (output_w * resize + 512) / 1024; *in_h = (output_h * resize + 512) / 1024; *out_w = output_w; *out_h = output_h; dev_dbg(&client->dev, "Scaled for %dx%d : %u = %ux%u, skip %u\n", *in_w, *in_h, resize, output_w, output_h, skip); return resize; } static int rj54n1_set_clock(struct i2c_client *client) { struct rj54n1 *rj54n1 = to_rj54n1(client); int ret; /* Enable external clock */ ret = reg_write(client, RJ54N1_RESET_STANDBY, E_EXCLK | SOFT_STDBY); /* Leave stand-by. Note: use this when implementing suspend / resume */ if (!ret) ret = reg_write(client, RJ54N1_RESET_STANDBY, E_EXCLK); if (!ret) ret = reg_write(client, RJ54N1_PLL_L, PLL_L); if (!ret) ret = reg_write(client, RJ54N1_PLL_N, PLL_N); /* TGCLK dividers */ if (!ret) ret = reg_write(client, RJ54N1_RATIO_TG, rj54n1->clk_div.ratio_tg); if (!ret) ret = reg_write(client, RJ54N1_RATIO_T, rj54n1->clk_div.ratio_t); if (!ret) ret = reg_write(client, RJ54N1_RATIO_R, rj54n1->clk_div.ratio_r); /* Enable TGCLK & RAMP */ if (!ret) ret = reg_write(client, RJ54N1_RAMP_TGCLK_EN, 3); /* Disable clock output */ if (!ret) ret = reg_write(client, RJ54N1_OCLK_DSP, 0); /* Set divisors */ if (!ret) ret = reg_write(client, RJ54N1_RATIO_OP, rj54n1->clk_div.ratio_op); if (!ret) ret = reg_write(client, RJ54N1_RATIO_O, rj54n1->clk_div.ratio_o); /* Enable OCLK */ if (!ret) ret = reg_write(client, RJ54N1_OCLK_SEL_EN, 1); /* Use PLL for Timing Generator, write 2 to reserved bits */ if (!ret) ret = reg_write(client, RJ54N1_TG_BYPASS, 2); /* Take sensor out of reset */ if (!ret) ret = reg_write(client, RJ54N1_RESET_STANDBY, E_EXCLK | SEN_RSTX); /* Enable PLL */ if (!ret) ret = reg_write(client, RJ54N1_PLL_EN, 1); /* Wait for PLL to stabilise */ msleep(10); /* Enable clock to frequency divider */ if (!ret) ret = reg_write(client, RJ54N1_CLK_RST, 1); if (!ret) ret = reg_read(client, RJ54N1_CLK_RST); if (ret != 1) { dev_err(&client->dev, "Resetting RJ54N1CB0C clock failed: %d!\n", ret); return -EIO; } /* Start the PLL */ ret = reg_set(client, RJ54N1_OCLK_DSP, 1, 1); /* Enable OCLK */ if (!ret) ret = reg_write(client, RJ54N1_OCLK_SEL_EN, 1); return ret; } static int rj54n1_reg_init(struct i2c_client *client) { struct rj54n1 *rj54n1 = to_rj54n1(client); int ret = rj54n1_set_clock(client); if (!ret) ret = reg_write_multiple(client, bank_7, ARRAY_SIZE(bank_7)); if (!ret) ret = reg_write_multiple(client, bank_10, ARRAY_SIZE(bank_10)); /* Set binning divisors */ if (!ret) ret = reg_write(client, RJ54N1_SCALE_1_2_LEV, 3 | (7 << 4)); if (!ret) ret = reg_write(client, RJ54N1_SCALE_4_LEV, 0xf); /* Switch to fixed resize mode */ if (!ret) ret = reg_write(client, RJ54N1_RESIZE_CONTROL, RESIZE_HOLD_SEL | 1); /* Set gain */ if (!ret) ret = reg_write(client, RJ54N1_Y_GAIN, 0x84); /* * Mirror the image back: default is upside down and left-to-right... * Set manual preview / still shot switching */ if (!ret) ret = reg_write(client, RJ54N1_MIRROR_STILL_MODE, 0x27); if (!ret) ret = reg_write_multiple(client, bank_4, ARRAY_SIZE(bank_4)); /* Auto exposure area */ if (!ret) ret = reg_write(client, RJ54N1_EXPOSURE_CONTROL, 0x80); /* Check current auto WB config */ if (!ret) ret = reg_read(client, RJ54N1_WB_SEL_WEIGHT_I); if (ret >= 0) { rj54n1->auto_wb = ret & 0x80; ret = reg_write_multiple(client, bank_5, ARRAY_SIZE(bank_5)); } if (!ret) ret = reg_write_multiple(client, bank_8, ARRAY_SIZE(bank_8)); if (!ret) ret = reg_write(client, RJ54N1_RESET_STANDBY, E_EXCLK | DSP_RSTX | SEN_RSTX); /* Commit init */ if (!ret) ret = rj54n1_commit(client); /* Take DSP, TG, sensor out of reset */ if (!ret) ret = reg_write(client, RJ54N1_RESET_STANDBY, E_EXCLK | DSP_RSTX | TG_RSTX | SEN_RSTX); /* Start register update? Same register as 0x?FE in many bank_* sets */ if (!ret) ret = reg_write(client, RJ54N1_FWFLG, 2); /* Constant taken from manufacturer's example */ msleep(700); return ret; } static int rj54n1_set_fmt(struct v4l2_subdev *sd, struct v4l2_subdev_state *sd_state, struct v4l2_subdev_format *format) { struct v4l2_mbus_framefmt *mf = &format->format; struct i2c_client *client = v4l2_get_subdevdata(sd); struct rj54n1 *rj54n1 = to_rj54n1(client); const struct rj54n1_datafmt *fmt; int output_w, output_h, max_w, max_h, input_w = rj54n1->rect.width, input_h = rj54n1->rect.height; int align = mf->code == MEDIA_BUS_FMT_SBGGR10_1X10 || mf->code == MEDIA_BUS_FMT_SBGGR10_2X8_PADHI_BE || mf->code == MEDIA_BUS_FMT_SBGGR10_2X8_PADLO_BE || mf->code == MEDIA_BUS_FMT_SBGGR10_2X8_PADHI_LE || mf->code == MEDIA_BUS_FMT_SBGGR10_2X8_PADLO_LE; int ret; if (format->pad) return -EINVAL; dev_dbg(&client->dev, "%s: code = %d, width = %u, height = %u\n", __func__, mf->code, mf->width, mf->height); fmt = rj54n1_find_datafmt(mf->code, rj54n1_colour_fmts, ARRAY_SIZE(rj54n1_colour_fmts)); if (!fmt) { fmt = rj54n1->fmt; mf->code = fmt->code; } mf->field = V4L2_FIELD_NONE; mf->colorspace = fmt->colorspace; v4l_bound_align_image(&mf->width, 112, RJ54N1_MAX_WIDTH, align, &mf->height, 84, RJ54N1_MAX_HEIGHT, align, 0); if (format->which == V4L2_SUBDEV_FORMAT_TRY) { sd_state->pads->try_fmt = *mf; return 0; } /* * Verify if the sensor has just been powered on. TODO: replace this * with proper PM, when a suitable API is available. */ ret = reg_read(client, RJ54N1_RESET_STANDBY); if (ret < 0) return ret; if (!(ret & E_EXCLK)) { ret = rj54n1_reg_init(client); if (ret < 0) return ret; } /* RA_SEL_UL is only relevant for raw modes, ignored otherwise. */ switch (mf->code) { case MEDIA_BUS_FMT_YUYV8_2X8: ret = reg_write(client, RJ54N1_OUT_SEL, 0); if (!ret) ret = reg_set(client, RJ54N1_BYTE_SWAP, 8, 8); break; case MEDIA_BUS_FMT_YVYU8_2X8: ret = reg_write(client, RJ54N1_OUT_SEL, 0); if (!ret) ret = reg_set(client, RJ54N1_BYTE_SWAP, 0, 8); break; case MEDIA_BUS_FMT_RGB565_2X8_LE: ret = reg_write(client, RJ54N1_OUT_SEL, 0x11); if (!ret) ret = reg_set(client, RJ54N1_BYTE_SWAP, 8, 8); break; case MEDIA_BUS_FMT_RGB565_2X8_BE: ret = reg_write(client, RJ54N1_OUT_SEL, 0x11); if (!ret) ret = reg_set(client, RJ54N1_BYTE_SWAP, 0, 8); break; case MEDIA_BUS_FMT_SBGGR10_2X8_PADLO_LE: ret = reg_write(client, RJ54N1_OUT_SEL, 4); if (!ret) ret = reg_set(client, RJ54N1_BYTE_SWAP, 8, 8); if (!ret) ret = reg_write(client, RJ54N1_RA_SEL_UL, 0); break; case MEDIA_BUS_FMT_SBGGR10_2X8_PADHI_LE: ret = reg_write(client, RJ54N1_OUT_SEL, 4); if (!ret) ret = reg_set(client, RJ54N1_BYTE_SWAP, 8, 8); if (!ret) ret = reg_write(client, RJ54N1_RA_SEL_UL, 8); break; case MEDIA_BUS_FMT_SBGGR10_2X8_PADLO_BE: ret = reg_write(client, RJ54N1_OUT_SEL, 4); if (!ret) ret = reg_set(client, RJ54N1_BYTE_SWAP, 0, 8); if (!ret) ret = reg_write(client, RJ54N1_RA_SEL_UL, 0); break; case MEDIA_BUS_FMT_SBGGR10_2X8_PADHI_BE: ret = reg_write(client, RJ54N1_OUT_SEL, 4); if (!ret) ret = reg_set(client, RJ54N1_BYTE_SWAP, 0, 8); if (!ret) ret = reg_write(client, RJ54N1_RA_SEL_UL, 8); break; case MEDIA_BUS_FMT_SBGGR10_1X10: ret = reg_write(client, RJ54N1_OUT_SEL, 5); break; default: ret = -EINVAL; } /* Special case: a raw mode with 10 bits of data per clock tick */ if (!ret) ret = reg_set(client, RJ54N1_OCLK_SEL_EN, (mf->code == MEDIA_BUS_FMT_SBGGR10_1X10) << 1, 2); if (ret < 0) return ret; /* Supported scales 1:1 >= scale > 1:16 */ max_w = mf->width * (16 * 1024 - 1) / 1024; if (input_w > max_w) input_w = max_w; max_h = mf->height * (16 * 1024 - 1) / 1024; if (input_h > max_h) input_h = max_h; output_w = mf->width; output_h = mf->height; ret = rj54n1_sensor_scale(sd, &input_w, &input_h, &output_w, &output_h); if (ret < 0) return ret; fmt = rj54n1_find_datafmt(mf->code, rj54n1_colour_fmts, ARRAY_SIZE(rj54n1_colour_fmts)); rj54n1->fmt = fmt; rj54n1->resize = ret; rj54n1->rect.width = input_w; rj54n1->rect.height = input_h; rj54n1->width = output_w; rj54n1->height = output_h; mf->width = output_w; mf->height = output_h; mf->field = V4L2_FIELD_NONE; mf->colorspace = fmt->colorspace; return 0; } #ifdef CONFIG_VIDEO_ADV_DEBUG static int rj54n1_g_register(struct v4l2_subdev *sd, struct v4l2_dbg_register *reg) { struct i2c_client *client = v4l2_get_subdevdata(sd); if (reg->reg < 0x400 || reg->reg > 0x1fff) /* Registers > 0x0800 are only available from Sharp support */ return -EINVAL; reg->size = 1; reg->val = reg_read(client, reg->reg); if (reg->val > 0xff) return -EIO; return 0; } static int rj54n1_s_register(struct v4l2_subdev *sd, const struct v4l2_dbg_register *reg) { struct i2c_client *client = v4l2_get_subdevdata(sd); if (reg->reg < 0x400 || reg->reg > 0x1fff) /* Registers >= 0x0800 are only available from Sharp support */ return -EINVAL; if (reg_write(client, reg->reg, reg->val) < 0) return -EIO; return 0; } #endif static int rj54n1_s_power(struct v4l2_subdev *sd, int on) { struct i2c_client *client = v4l2_get_subdevdata(sd); struct rj54n1 *rj54n1 = to_rj54n1(client); if (on) { if (rj54n1->pwup_gpio) gpiod_set_value(rj54n1->pwup_gpio, 1); if (rj54n1->enable_gpio) gpiod_set_value(rj54n1->enable_gpio, 1); msleep(1); return clk_prepare_enable(rj54n1->clk); } clk_disable_unprepare(rj54n1->clk); if (rj54n1->enable_gpio) gpiod_set_value(rj54n1->enable_gpio, 0); if (rj54n1->pwup_gpio) gpiod_set_value(rj54n1->pwup_gpio, 0); return 0; } static int rj54n1_s_ctrl(struct v4l2_ctrl *ctrl) { struct rj54n1 *rj54n1 = container_of(ctrl->handler, struct rj54n1, hdl); struct v4l2_subdev *sd = &rj54n1->subdev; struct i2c_client *client = v4l2_get_subdevdata(sd); int data; switch (ctrl->id) { case V4L2_CID_VFLIP: if (ctrl->val) data = reg_set(client, RJ54N1_MIRROR_STILL_MODE, 0, 1); else data = reg_set(client, RJ54N1_MIRROR_STILL_MODE, 1, 1); if (data < 0) return -EIO; return 0; case V4L2_CID_HFLIP: if (ctrl->val) data = reg_set(client, RJ54N1_MIRROR_STILL_MODE, 0, 2); else data = reg_set(client, RJ54N1_MIRROR_STILL_MODE, 2, 2); if (data < 0) return -EIO; return 0; case V4L2_CID_GAIN: if (reg_write(client, RJ54N1_Y_GAIN, ctrl->val * 2) < 0) return -EIO; return 0; case V4L2_CID_AUTO_WHITE_BALANCE: /* Auto WB area - whole image */ if (reg_set(client, RJ54N1_WB_SEL_WEIGHT_I, ctrl->val << 7, 0x80) < 0) return -EIO; rj54n1->auto_wb = ctrl->val; return 0; } return -EINVAL; } static const struct v4l2_ctrl_ops rj54n1_ctrl_ops = { .s_ctrl = rj54n1_s_ctrl, }; static const struct v4l2_subdev_core_ops rj54n1_subdev_core_ops = { #ifdef CONFIG_VIDEO_ADV_DEBUG .g_register = rj54n1_g_register, .s_register = rj54n1_s_register, #endif .s_power = rj54n1_s_power, }; static const struct v4l2_subdev_video_ops rj54n1_subdev_video_ops = { .s_stream = rj54n1_s_stream, }; static const struct v4l2_subdev_pad_ops rj54n1_subdev_pad_ops = { .enum_mbus_code = rj54n1_enum_mbus_code, .get_selection = rj54n1_get_selection, .set_selection = rj54n1_set_selection, .get_fmt = rj54n1_get_fmt, .set_fmt = rj54n1_set_fmt, }; static const struct v4l2_subdev_ops rj54n1_subdev_ops = { .core = &rj54n1_subdev_core_ops, .video = &rj54n1_subdev_video_ops, .pad = &rj54n1_subdev_pad_ops, }; /* * Interface active, can use i2c. If it fails, it can indeed mean, that * this wasn't our capture interface, so, we wait for the right one */ static int rj54n1_video_probe(struct i2c_client *client, struct rj54n1_pdata *priv) { struct rj54n1 *rj54n1 = to_rj54n1(client); int data1, data2; int ret; ret = rj54n1_s_power(&rj54n1->subdev, 1); if (ret < 0) return ret; /* Read out the chip version register */ data1 = reg_read(client, RJ54N1_DEV_CODE); data2 = reg_read(client, RJ54N1_DEV_CODE2); if (data1 != 0x51 || data2 != 0x10) { ret = -ENODEV; dev_info(&client->dev, "No RJ54N1CB0C found, read 0x%x:0x%x\n", data1, data2); goto done; } /* Configure IOCTL polarity from the platform data: 0 or 1 << 7. */ ret = reg_write(client, RJ54N1_IOC, priv->ioctl_high << 7); if (ret < 0) goto done; dev_info(&client->dev, "Detected a RJ54N1CB0C chip ID 0x%x:0x%x\n", data1, data2); ret = v4l2_ctrl_handler_setup(&rj54n1->hdl); done: rj54n1_s_power(&rj54n1->subdev, 0); return ret; } static int rj54n1_probe(struct i2c_client *client) { struct rj54n1 *rj54n1; struct i2c_adapter *adapter = client->adapter; struct rj54n1_pdata *rj54n1_priv; int ret; if (!client->dev.platform_data) { dev_err(&client->dev, "RJ54N1CB0C: missing platform data!\n"); return -EINVAL; } rj54n1_priv = client->dev.platform_data; if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA)) { dev_warn(&adapter->dev, "I2C-Adapter doesn't support I2C_FUNC_SMBUS_BYTE\n"); return -EIO; } rj54n1 = devm_kzalloc(&client->dev, sizeof(struct rj54n1), GFP_KERNEL); if (!rj54n1) return -ENOMEM; v4l2_i2c_subdev_init(&rj54n1->subdev, client, &rj54n1_subdev_ops); v4l2_ctrl_handler_init(&rj54n1->hdl, 4); v4l2_ctrl_new_std(&rj54n1->hdl, &rj54n1_ctrl_ops, V4L2_CID_VFLIP, 0, 1, 1, 0); v4l2_ctrl_new_std(&rj54n1->hdl, &rj54n1_ctrl_ops, V4L2_CID_HFLIP, 0, 1, 1, 0); v4l2_ctrl_new_std(&rj54n1->hdl, &rj54n1_ctrl_ops, V4L2_CID_GAIN, 0, 127, 1, 66); v4l2_ctrl_new_std(&rj54n1->hdl, &rj54n1_ctrl_ops, V4L2_CID_AUTO_WHITE_BALANCE, 0, 1, 1, 1); rj54n1->subdev.ctrl_handler = &rj54n1->hdl; if (rj54n1->hdl.error) return rj54n1->hdl.error; rj54n1->clk_div = clk_div; rj54n1->rect.left = RJ54N1_COLUMN_SKIP; rj54n1->rect.top = RJ54N1_ROW_SKIP; rj54n1->rect.width = RJ54N1_MAX_WIDTH; rj54n1->rect.height = RJ54N1_MAX_HEIGHT; rj54n1->width = RJ54N1_MAX_WIDTH; rj54n1->height = RJ54N1_MAX_HEIGHT; rj54n1->fmt = &rj54n1_colour_fmts[0]; rj54n1->resize = 1024; rj54n1->tgclk_mhz = (rj54n1_priv->mclk_freq / PLL_L * PLL_N) / (clk_div.ratio_tg + 1) / (clk_div.ratio_t + 1); rj54n1->clk = clk_get(&client->dev, NULL); if (IS_ERR(rj54n1->clk)) { ret = PTR_ERR(rj54n1->clk); goto err_free_ctrl; } rj54n1->pwup_gpio = gpiod_get_optional(&client->dev, "powerup", GPIOD_OUT_LOW); if (IS_ERR(rj54n1->pwup_gpio)) { dev_info(&client->dev, "Unable to get GPIO \"powerup\": %ld\n", PTR_ERR(rj54n1->pwup_gpio)); ret = PTR_ERR(rj54n1->pwup_gpio); goto err_clk_put; } rj54n1->enable_gpio = gpiod_get_optional(&client->dev, "enable", GPIOD_OUT_LOW); if (IS_ERR(rj54n1->enable_gpio)) { dev_info(&client->dev, "Unable to get GPIO \"enable\": %ld\n", PTR_ERR(rj54n1->enable_gpio)); ret = PTR_ERR(rj54n1->enable_gpio); goto err_gpio_put; } ret = rj54n1_video_probe(client, rj54n1_priv); if (ret < 0) goto err_gpio_put; ret = v4l2_async_register_subdev(&rj54n1->subdev); if (ret) goto err_gpio_put; return 0; err_gpio_put: if (rj54n1->enable_gpio) gpiod_put(rj54n1->enable_gpio); if (rj54n1->pwup_gpio) gpiod_put(rj54n1->pwup_gpio); err_clk_put: clk_put(rj54n1->clk); err_free_ctrl: v4l2_ctrl_handler_free(&rj54n1->hdl); return ret; } static void rj54n1_remove(struct i2c_client *client) { struct rj54n1 *rj54n1 = to_rj54n1(client); if (rj54n1->enable_gpio) gpiod_put(rj54n1->enable_gpio); if (rj54n1->pwup_gpio) gpiod_put(rj54n1->pwup_gpio); clk_put(rj54n1->clk); v4l2_ctrl_handler_free(&rj54n1->hdl); v4l2_async_unregister_subdev(&rj54n1->subdev); } static const struct i2c_device_id rj54n1_id[] = { { "rj54n1cb0c", 0 }, { } }; MODULE_DEVICE_TABLE(i2c, rj54n1_id); static struct i2c_driver rj54n1_i2c_driver = { .driver = { .name = "rj54n1cb0c", }, .probe = rj54n1_probe, .remove = rj54n1_remove, .id_table = rj54n1_id, }; module_i2c_driver(rj54n1_i2c_driver); MODULE_DESCRIPTION("Sharp RJ54N1CB0C Camera driver"); MODULE_AUTHOR("Guennadi Liakhovetski <[email protected]>"); MODULE_LICENSE("GPL v2");
linux-master
drivers/media/i2c/rj54n1cb0c.c