python_code
stringlengths 0
1.8M
| repo_name
stringclasses 7
values | file_path
stringlengths 5
99
|
---|---|---|
// SPDX-License-Identifier: GPL-2.0
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/spi/spi.h>
#include <linux/delay.h>
#include "fbtft.h"
#define DRVNAME "fb_ssd1351"
#define WIDTH 128
#define HEIGHT 128
#define GAMMA_NUM 1
#define GAMMA_LEN 63
#define DEFAULT_GAMMA "0 2 2 2 2 2 2 2 " \
"2 2 2 2 2 2 2 2 " \
"2 2 2 2 2 2 2 2 " \
"2 2 2 2 2 2 2 2 " \
"2 2 2 2 2 2 2 2 " \
"2 2 2 2 2 2 2 2 " \
"2 2 2 2 2 2 2 2 " \
"2 2 2 2 2 2 2" \
static void register_onboard_backlight(struct fbtft_par *par);
static int init_display(struct fbtft_par *par)
{
if (par->pdata &&
par->pdata->display.backlight == FBTFT_ONBOARD_BACKLIGHT) {
/* module uses onboard GPIO for panel power */
par->fbtftops.register_backlight = register_onboard_backlight;
}
par->fbtftops.reset(par);
write_reg(par, 0xfd, 0x12); /* Command Lock */
write_reg(par, 0xfd, 0xb1); /* Command Lock */
write_reg(par, 0xae); /* Display Off */
write_reg(par, 0xb3, 0xf1); /* Front Clock Div */
write_reg(par, 0xca, 0x7f); /* Set Mux Ratio */
write_reg(par, 0x15, 0x00, 0x7f); /* Set Column Address */
write_reg(par, 0x75, 0x00, 0x7f); /* Set Row Address */
write_reg(par, 0xa1, 0x00); /* Set Display Start Line */
write_reg(par, 0xa2, 0x00); /* Set Display Offset */
write_reg(par, 0xb5, 0x00); /* Set GPIO */
write_reg(par, 0xab, 0x01); /* Set Function Selection */
write_reg(par, 0xb1, 0x32); /* Set Phase Length */
write_reg(par, 0xb4, 0xa0, 0xb5, 0x55); /* Set Segment Low Voltage */
write_reg(par, 0xbb, 0x17); /* Set Precharge Voltage */
write_reg(par, 0xbe, 0x05); /* Set VComH Voltage */
write_reg(par, 0xc1, 0xc8, 0x80, 0xc8); /* Set Contrast */
write_reg(par, 0xc7, 0x0f); /* Set Master Contrast */
write_reg(par, 0xb6, 0x01); /* Set Second Precharge Period */
write_reg(par, 0xa6); /* Set Display Mode Reset */
write_reg(par, 0xaf); /* Set Sleep Mode Display On */
return 0;
}
static void set_addr_win(struct fbtft_par *par, int xs, int ys, int xe, int ye)
{
write_reg(par, 0x15, xs, xe);
write_reg(par, 0x75, ys, ye);
write_reg(par, 0x5c);
}
static int set_var(struct fbtft_par *par)
{
unsigned int remap;
if (par->fbtftops.init_display != init_display) {
/* don't risk messing up register A0h */
fbtft_par_dbg(DEBUG_INIT_DISPLAY, par,
"%s: skipping since custom init_display() is used\n",
__func__);
return 0;
}
remap = 0x60 | (par->bgr << 2); /* Set Colour Depth */
switch (par->info->var.rotate) {
case 0:
write_reg(par, 0xA0, remap | 0x00 | BIT(4));
break;
case 270:
write_reg(par, 0xA0, remap | 0x03 | BIT(4));
break;
case 180:
write_reg(par, 0xA0, remap | 0x02);
break;
case 90:
write_reg(par, 0xA0, remap | 0x01);
break;
}
return 0;
}
/*
* Grayscale Lookup Table
* GS1 - GS63
* The driver Gamma curve contains the relative values between the entries
* in the Lookup table.
*
* From datasheet:
* 8.8 Gray Scale Decoder
*
* there are total 180 Gamma Settings (Setting 0 to Setting 180)
* available for the Gray Scale table.
*
* The gray scale is defined in incremental way, with reference
* to the length of previous table entry:
* Setting of GS1 has to be >= 0
* Setting of GS2 has to be > Setting of GS1 +1
* Setting of GS3 has to be > Setting of GS2 +1
* :
* Setting of GS63 has to be > Setting of GS62 +1
*
*/
static int set_gamma(struct fbtft_par *par, u32 *curves)
{
unsigned long tmp[GAMMA_NUM * GAMMA_LEN];
int i, acc = 0;
for (i = 0; i < 63; i++) {
if (i > 0 && curves[i] < 2) {
dev_err(par->info->device,
"Illegal value in Grayscale Lookup Table at index %d : %d. Must be greater than 1\n",
i, curves[i]);
return -EINVAL;
}
acc += curves[i];
tmp[i] = acc;
if (acc > 180) {
dev_err(par->info->device,
"Illegal value(s) in Grayscale Lookup Table. At index=%d : %d, the accumulated value has exceeded 180\n",
i, acc);
return -EINVAL;
}
}
write_reg(par, 0xB8,
tmp[0], tmp[1], tmp[2], tmp[3],
tmp[4], tmp[5], tmp[6], tmp[7],
tmp[8], tmp[9], tmp[10], tmp[11],
tmp[12], tmp[13], tmp[14], tmp[15],
tmp[16], tmp[17], tmp[18], tmp[19],
tmp[20], tmp[21], tmp[22], tmp[23],
tmp[24], tmp[25], tmp[26], tmp[27],
tmp[28], tmp[29], tmp[30], tmp[31],
tmp[32], tmp[33], tmp[34], tmp[35],
tmp[36], tmp[37], tmp[38], tmp[39],
tmp[40], tmp[41], tmp[42], tmp[43],
tmp[44], tmp[45], tmp[46], tmp[47],
tmp[48], tmp[49], tmp[50], tmp[51],
tmp[52], tmp[53], tmp[54], tmp[55],
tmp[56], tmp[57], tmp[58], tmp[59],
tmp[60], tmp[61], tmp[62]);
return 0;
}
static int blank(struct fbtft_par *par, bool on)
{
fbtft_par_dbg(DEBUG_BLANK, par, "(%s=%s)\n",
__func__, on ? "true" : "false");
if (on)
write_reg(par, 0xAE);
else
write_reg(par, 0xAF);
return 0;
}
static struct fbtft_display display = {
.regwidth = 8,
.width = WIDTH,
.height = HEIGHT,
.gamma_num = GAMMA_NUM,
.gamma_len = GAMMA_LEN,
.gamma = DEFAULT_GAMMA,
.fbtftops = {
.init_display = init_display,
.set_addr_win = set_addr_win,
.set_var = set_var,
.set_gamma = set_gamma,
.blank = blank,
},
};
static int update_onboard_backlight(struct backlight_device *bd)
{
struct fbtft_par *par = bl_get_data(bd);
bool on;
fbtft_par_dbg(DEBUG_BACKLIGHT, par,
"%s: power=%d, fb_blank=%d\n",
__func__, bd->props.power, bd->props.fb_blank);
on = !backlight_is_blank(bd);
/* Onboard backlight connected to GPIO0 on SSD1351, GPIO1 unused */
write_reg(par, 0xB5, on ? 0x03 : 0x02);
return 0;
}
static const struct backlight_ops bl_ops = {
.update_status = update_onboard_backlight,
};
static void register_onboard_backlight(struct fbtft_par *par)
{
struct backlight_device *bd;
struct backlight_properties bl_props = { 0, };
bl_props.type = BACKLIGHT_RAW;
bl_props.power = FB_BLANK_POWERDOWN;
bd = backlight_device_register(dev_driver_string(par->info->device),
par->info->device, par, &bl_ops,
&bl_props);
if (IS_ERR(bd)) {
dev_err(par->info->device,
"cannot register backlight device (%ld)\n",
PTR_ERR(bd));
return;
}
par->info->bl_dev = bd;
if (!par->fbtftops.unregister_backlight)
par->fbtftops.unregister_backlight = fbtft_unregister_backlight;
}
FBTFT_REGISTER_DRIVER(DRVNAME, "solomon,ssd1351", &display);
MODULE_ALIAS("spi:" DRVNAME);
MODULE_ALIAS("platform:" DRVNAME);
MODULE_ALIAS("spi:ssd1351");
MODULE_ALIAS("platform:ssd1351");
MODULE_DESCRIPTION("SSD1351 OLED Driver");
MODULE_AUTHOR("James Davies");
MODULE_LICENSE("GPL");
| linux-master | drivers/staging/fbtft/fb_ssd1351.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* FB driver for the SSD1306 OLED Controller
*
* Copyright (C) 2013 Noralf Tronnes
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/gpio/consumer.h>
#include <linux/delay.h>
#include "fbtft.h"
#define DRVNAME "fb_ssd1306"
#define WIDTH 128
#define HEIGHT 64
/*
* write_reg() caveat:
*
* This doesn't work because D/C has to be LOW for both values:
* write_reg(par, val1, val2);
*
* Do it like this:
* write_reg(par, val1);
* write_reg(par, val2);
*/
/* Init sequence taken from the Adafruit SSD1306 Arduino library */
static int init_display(struct fbtft_par *par)
{
par->fbtftops.reset(par);
if (par->gamma.curves[0] == 0) {
mutex_lock(&par->gamma.lock);
if (par->info->var.yres == 64)
par->gamma.curves[0] = 0xCF;
else
par->gamma.curves[0] = 0x8F;
mutex_unlock(&par->gamma.lock);
}
/* Set Display OFF */
write_reg(par, 0xAE);
/* Set Display Clock Divide Ratio/ Oscillator Frequency */
write_reg(par, 0xD5);
write_reg(par, 0x80);
/* Set Multiplex Ratio */
write_reg(par, 0xA8);
if (par->info->var.yres == 64)
write_reg(par, 0x3F);
else if (par->info->var.yres == 48)
write_reg(par, 0x2F);
else
write_reg(par, 0x1F);
/* Set Display Offset */
write_reg(par, 0xD3);
write_reg(par, 0x0);
/* Set Display Start Line */
write_reg(par, 0x40 | 0x0);
/* Charge Pump Setting */
write_reg(par, 0x8D);
/* A[2] = 1b, Enable charge pump during display on */
write_reg(par, 0x14);
/* Set Memory Addressing Mode */
write_reg(par, 0x20);
/* Vertical addressing mode */
write_reg(par, 0x01);
/* Set Segment Re-map */
/* column address 127 is mapped to SEG0 */
write_reg(par, 0xA0 | 0x1);
/* Set COM Output Scan Direction */
/* remapped mode. Scan from COM[N-1] to COM0 */
write_reg(par, 0xC8);
/* Set COM Pins Hardware Configuration */
write_reg(par, 0xDA);
if (par->info->var.yres == 64)
/* A[4]=1b, Alternative COM pin configuration */
write_reg(par, 0x12);
else if (par->info->var.yres == 48)
/* A[4]=1b, Alternative COM pin configuration */
write_reg(par, 0x12);
else
/* A[4]=0b, Sequential COM pin configuration */
write_reg(par, 0x02);
/* Set Pre-charge Period */
write_reg(par, 0xD9);
write_reg(par, 0xF1);
/* Set VCOMH Deselect Level */
write_reg(par, 0xDB);
/* according to the datasheet, this value is out of bounds */
write_reg(par, 0x40);
/* Entire Display ON */
/* Resume to RAM content display. Output follows RAM content */
write_reg(par, 0xA4);
/* Set Normal Display
* 0 in RAM: OFF in display panel
* 1 in RAM: ON in display panel
*/
write_reg(par, 0xA6);
/* Set Display ON */
write_reg(par, 0xAF);
return 0;
}
static void set_addr_win_64x48(struct fbtft_par *par)
{
/* Set Column Address */
write_reg(par, 0x21);
write_reg(par, 0x20);
write_reg(par, 0x5F);
/* Set Page Address */
write_reg(par, 0x22);
write_reg(par, 0x0);
write_reg(par, 0x5);
}
static void set_addr_win(struct fbtft_par *par, int xs, int ys, int xe, int ye)
{
/* Set Lower Column Start Address for Page Addressing Mode */
write_reg(par, 0x00 | 0x0);
/* Set Higher Column Start Address for Page Addressing Mode */
write_reg(par, 0x10 | 0x0);
/* Set Display Start Line */
write_reg(par, 0x40 | 0x0);
if (par->info->var.xres == 64 && par->info->var.yres == 48)
set_addr_win_64x48(par);
}
static int blank(struct fbtft_par *par, bool on)
{
fbtft_par_dbg(DEBUG_BLANK, par, "(%s=%s)\n",
__func__, on ? "true" : "false");
if (on)
write_reg(par, 0xAE);
else
write_reg(par, 0xAF);
return 0;
}
/* Gamma is used to control Contrast */
static int set_gamma(struct fbtft_par *par, u32 *curves)
{
/* apply mask */
curves[0] &= 0xFF;
/* Set Contrast Control for BANK0 */
write_reg(par, 0x81);
write_reg(par, curves[0]);
return 0;
}
static int write_vmem(struct fbtft_par *par, size_t offset, size_t len)
{
u16 *vmem16 = (u16 *)par->info->screen_buffer;
u32 xres = par->info->var.xres;
u32 yres = par->info->var.yres;
u8 *buf = par->txbuf.buf;
int x, y, i;
int ret = 0;
for (x = 0; x < xres; x++) {
for (y = 0; y < yres / 8; y++) {
*buf = 0x00;
for (i = 0; i < 8; i++)
if (vmem16[(y * 8 + i) * xres + x])
*buf |= BIT(i);
buf++;
}
}
/* Write data */
gpiod_set_value(par->gpio.dc, 1);
ret = par->fbtftops.write(par, par->txbuf.buf, xres * yres / 8);
if (ret < 0)
dev_err(par->info->device, "write failed and returned: %d\n",
ret);
return ret;
}
static struct fbtft_display display = {
.regwidth = 8,
.width = WIDTH,
.height = HEIGHT,
.gamma_num = 1,
.gamma_len = 1,
.gamma = "00",
.fbtftops = {
.write_vmem = write_vmem,
.init_display = init_display,
.set_addr_win = set_addr_win,
.blank = blank,
.set_gamma = set_gamma,
},
};
FBTFT_REGISTER_DRIVER(DRVNAME, "solomon,ssd1306", &display);
MODULE_ALIAS("spi:" DRVNAME);
MODULE_ALIAS("platform:" DRVNAME);
MODULE_ALIAS("spi:ssd1306");
MODULE_ALIAS("platform:ssd1306");
MODULE_DESCRIPTION("SSD1306 OLED Driver");
MODULE_AUTHOR("Noralf Tronnes");
MODULE_LICENSE("GPL");
| linux-master | drivers/staging/fbtft/fb_ssd1306.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* FB driver for the UltraChip UC1611 LCD controller
*
* The display is 4-bit grayscale (16 shades) 240x160.
*
* Copyright (C) 2015 Henri Chain
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/gpio/consumer.h>
#include <linux/spi/spi.h>
#include <linux/delay.h>
#include "fbtft.h"
#define DRVNAME "fb_uc1611"
#define WIDTH 240
#define HEIGHT 160
#define BPP 8
#define FPS 40
/*
* LCD voltage is a combination of ratio, gain, pot and temp
*
* V_LCD = V_BIAS * ratio
* V_LCD = (C_V0 + C_PM × pot) * (1 + (T - 25) * temp)
* C_V0 and C_PM depend on ratio and gain
* T is ambient temperature
*/
/* BR -> actual ratio: 0-3 -> 5, 10, 11, 13 */
static unsigned int ratio = 2;
module_param(ratio, uint, 0000);
MODULE_PARM_DESC(ratio, "BR[1:0] Bias voltage ratio: 0-3 (default: 2)");
static unsigned int gain = 3;
module_param(gain, uint, 0000);
MODULE_PARM_DESC(gain, "GN[1:0] Bias voltage gain: 0-3 (default: 3)");
static unsigned int pot = 16;
module_param(pot, uint, 0000);
MODULE_PARM_DESC(pot, "PM[6:0] Bias voltage pot.: 0-63 (default: 16)");
/* TC -> % compensation per deg C: 0-3 -> -.05, -.10, -.015, -.20 */
static unsigned int temp;
module_param(temp, uint, 0000);
MODULE_PARM_DESC(temp, "TC[1:0] Temperature compensation: 0-3 (default: 0)");
/* PC[1:0] -> LCD capacitance: 0-3 -> <20nF, 20-28 nF, 29-40 nF, 40-56 nF */
static unsigned int load = 1;
module_param(load, uint, 0000);
MODULE_PARM_DESC(load, "PC[1:0] Panel Loading: 0-3 (default: 1)");
/* PC[3:2] -> V_LCD: 0, 1, 3 -> ext., int. with ratio = 5, int. standard */
static unsigned int pump = 3;
module_param(pump, uint, 0000);
MODULE_PARM_DESC(pump, "PC[3:2] Pump control: 0,1,3 (default: 3)");
static int init_display(struct fbtft_par *par)
{
int ret;
/*
* Set CS active inverse polarity: just setting SPI_CS_HIGH does not
* work with GPIO based chip selects that are logically active high
* but inverted inside the GPIO library, so enforce inverted
* semantics.
*/
par->spi->mode ^= SPI_CS_HIGH;
ret = spi_setup(par->spi);
if (ret) {
dev_err(par->info->device,
"Could not set inverse CS polarity\n");
return ret;
}
/* Reset controller */
write_reg(par, 0xE2);
/* Set bias ratio */
write_reg(par, 0xE8 | (ratio & 0x03));
/* Set bias gain and potentiometer */
write_reg(par, 0x81);
write_reg(par, (gain & 0x03) << 6 | (pot & 0x3F));
/* Set temperature compensation */
write_reg(par, 0x24 | (temp & 0x03));
/* Set panel loading */
write_reg(par, 0x28 | (load & 0x03));
/* Set pump control */
write_reg(par, 0x2C | (pump & 0x03));
/* Set inverse display */
write_reg(par, 0xA6 | 0x01);
/* Set 4-bit grayscale mode */
write_reg(par, 0xD0 | (0x02 & 0x03));
/* Set Display enable */
write_reg(par, 0xA8 | 0x07);
return 0;
}
static void set_addr_win(struct fbtft_par *par, int xs, int ys, int xe, int ye)
{
switch (par->info->var.rotate) {
case 90:
case 270:
/* Set column address */
write_reg(par, ys & 0x0F);
write_reg(par, 0x10 | (ys >> 4));
/* Set page address (divide xs by 2) (not used by driver) */
write_reg(par, 0x60 | ((xs >> 1) & 0x0F));
write_reg(par, 0x70 | (xs >> 5));
break;
default:
/* Set column address (not used by driver) */
write_reg(par, xs & 0x0F);
write_reg(par, 0x10 | (xs >> 4));
/* Set page address (divide ys by 2) */
write_reg(par, 0x60 | ((ys >> 1) & 0x0F));
write_reg(par, 0x70 | (ys >> 5));
break;
}
}
static int blank(struct fbtft_par *par, bool on)
{
fbtft_par_dbg(DEBUG_BLANK, par, "(%s=%s)\n",
__func__, on ? "true" : "false");
if (on)
write_reg(par, 0xA8 | 0x00);
else
write_reg(par, 0xA8 | 0x07);
return 0;
}
static int set_var(struct fbtft_par *par)
{
/* par->info->fix.visual = FB_VISUAL_PSEUDOCOLOR; */
par->info->var.grayscale = 1;
par->info->var.red.offset = 0;
par->info->var.red.length = 8;
par->info->var.green.offset = 0;
par->info->var.green.length = 8;
par->info->var.blue.offset = 0;
par->info->var.blue.length = 8;
par->info->var.transp.offset = 0;
par->info->var.transp.length = 0;
switch (par->info->var.rotate) {
case 90:
/* Set RAM address control */
write_reg(par, 0x88
| (0x0 & 0x1) << 2 /* Increment positively */
| (0x1 << 1) /* Increment page first */
| 0x1); /* Wrap around (default) */
/* Set LCD mapping */
write_reg(par, 0xC0
| (0x0 & 0x1) << 2 /* Mirror Y OFF */
| (0x0 & 0x1) << 1 /* Mirror X OFF */
| (0x0 & 0x1)); /* MS nibble last (default) */
break;
case 180:
/* Set RAM address control */
write_reg(par, 0x88
| (0x0 & 0x1) << 2 /* Increment positively */
| (0x0 & 0x1) << 1 /* Increment column first */
| 0x1); /* Wrap around (default) */
/* Set LCD mapping */
write_reg(par, 0xC0
| (0x1 << 2) /* Mirror Y ON */
| (0x0 & 0x1) << 1 /* Mirror X OFF */
| (0x0 & 0x1)); /* MS nibble last (default) */
break;
case 270:
/* Set RAM address control */
write_reg(par, 0x88
| (0x0 & 0x1) << 2 /* Increment positively */
| (0x1 << 1) /* Increment page first */
| 0x1); /* Wrap around (default) */
/* Set LCD mapping */
write_reg(par, 0xC0
| (0x1 << 2) /* Mirror Y ON */
| (0x1 << 1) /* Mirror X ON */
| (0x0 & 0x1)); /* MS nibble last (default) */
break;
default:
/* Set RAM address control */
write_reg(par, 0x88
| (0x0 & 0x1) << 2 /* Increment positively */
| (0x0 & 0x1) << 1 /* Increment column first */
| 0x1); /* Wrap around (default) */
/* Set LCD mapping */
write_reg(par, 0xC0
| (0x0 & 0x1) << 2 /* Mirror Y OFF */
| (0x1 << 1) /* Mirror X ON */
| (0x0 & 0x1)); /* MS nibble last (default) */
break;
}
return 0;
}
static int write_vmem(struct fbtft_par *par, size_t offset, size_t len)
{
u8 *vmem8 = (u8 *)(par->info->screen_buffer);
u8 *buf8 = par->txbuf.buf;
u16 *buf16 = par->txbuf.buf;
int line_length = par->info->fix.line_length;
int y_start = offset / line_length;
int y_end = (offset + len - 1) / line_length;
int x, y, i;
int ret = 0;
switch (par->pdata->display.buswidth) {
case 8:
switch (par->info->var.rotate) {
case 90:
case 270:
i = y_start * line_length;
for (y = y_start; y <= y_end; y++) {
for (x = 0; x < line_length; x += 2) {
*buf8 = vmem8[i] >> 4;
*buf8 |= vmem8[i + 1] & 0xF0;
buf8++;
i += 2;
}
}
break;
default:
/* Must be even because pages are two lines */
y_start &= 0xFE;
i = y_start * line_length;
for (y = y_start; y <= y_end; y += 2) {
for (x = 0; x < line_length; x++) {
*buf8 = vmem8[i] >> 4;
*buf8 |= vmem8[i + line_length] & 0xF0;
buf8++;
i++;
}
i += line_length;
}
break;
}
gpiod_set_value(par->gpio.dc, 1);
/* Write data */
ret = par->fbtftops.write(par, par->txbuf.buf, len / 2);
break;
case 9:
switch (par->info->var.rotate) {
case 90:
case 270:
i = y_start * line_length;
for (y = y_start; y <= y_end; y++) {
for (x = 0; x < line_length; x += 2) {
*buf16 = 0x100;
*buf16 |= vmem8[i] >> 4;
*buf16 |= vmem8[i + 1] & 0xF0;
buf16++;
i += 2;
}
}
break;
default:
/* Must be even because pages are two lines */
y_start &= 0xFE;
i = y_start * line_length;
for (y = y_start; y <= y_end; y += 2) {
for (x = 0; x < line_length; x++) {
*buf16 = 0x100;
*buf16 |= vmem8[i] >> 4;
*buf16 |= vmem8[i + line_length] & 0xF0;
buf16++;
i++;
}
i += line_length;
}
break;
}
/* Write data */
ret = par->fbtftops.write(par, par->txbuf.buf, len);
break;
default:
dev_err(par->info->device, "unsupported buswidth %d\n",
par->pdata->display.buswidth);
}
if (ret < 0)
dev_err(par->info->device, "write failed and returned: %d\n",
ret);
return ret;
}
static struct fbtft_display display = {
.txbuflen = -1,
.regwidth = 8,
.width = WIDTH,
.height = HEIGHT,
.bpp = BPP,
.fps = FPS,
.fbtftops = {
.write_vmem = write_vmem,
.init_display = init_display,
.set_addr_win = set_addr_win,
.set_var = set_var,
.blank = blank,
},
};
FBTFT_REGISTER_DRIVER(DRVNAME, "ultrachip,uc1611", &display);
MODULE_ALIAS("spi:" DRVNAME);
MODULE_ALIAS("platform:" DRVNAME);
MODULE_ALIAS("spi:uc1611");
MODULE_ALIAS("platform:uc1611");
MODULE_DESCRIPTION("FB driver for the UC1611 LCD controller");
MODULE_AUTHOR("Henri Chain");
MODULE_LICENSE("GPL");
| linux-master | drivers/staging/fbtft/fb_uc1611.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* FB driver for the ILI9341 LCD display controller
*
* This display uses 9-bit SPI: Data/Command bit + 8 data bits
* For platforms that doesn't support 9-bit, the driver is capable
* of emulating this using 8-bit transfer.
* This is done by transferring eight 9-bit words in 9 bytes.
*
* Copyright (C) 2013 Christian Vogelgsang
* Based on adafruit22fb.c by Noralf Tronnes
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/delay.h>
#include <video/mipi_display.h>
#include "fbtft.h"
#define DRVNAME "fb_ili9341"
#define WIDTH 240
#define HEIGHT 320
#define TXBUFLEN (4 * PAGE_SIZE)
#define DEFAULT_GAMMA "1F 1A 18 0A 0F 06 45 87 32 0A 07 02 07 05 00\n" \
"00 25 27 05 10 09 3A 78 4D 05 18 0D 38 3A 1F"
static int init_display(struct fbtft_par *par)
{
par->fbtftops.reset(par);
/* startup sequence for MI0283QT-9A */
write_reg(par, MIPI_DCS_SOFT_RESET);
mdelay(5);
write_reg(par, MIPI_DCS_SET_DISPLAY_OFF);
/* --------------------------------------------------------- */
write_reg(par, 0xCF, 0x00, 0x83, 0x30);
write_reg(par, 0xED, 0x64, 0x03, 0x12, 0x81);
write_reg(par, 0xE8, 0x85, 0x01, 0x79);
write_reg(par, 0xCB, 0x39, 0X2C, 0x00, 0x34, 0x02);
write_reg(par, 0xF7, 0x20);
write_reg(par, 0xEA, 0x00, 0x00);
/* ------------power control-------------------------------- */
write_reg(par, 0xC0, 0x26);
write_reg(par, 0xC1, 0x11);
/* ------------VCOM --------- */
write_reg(par, 0xC5, 0x35, 0x3E);
write_reg(par, 0xC7, 0xBE);
/* ------------memory access control------------------------ */
write_reg(par, MIPI_DCS_SET_PIXEL_FORMAT, 0x55); /* 16bit pixel */
/* ------------frame rate----------------------------------- */
write_reg(par, 0xB1, 0x00, 0x1B);
/* ------------Gamma---------------------------------------- */
/* write_reg(par, 0xF2, 0x08); */ /* Gamma Function Disable */
write_reg(par, MIPI_DCS_SET_GAMMA_CURVE, 0x01);
/* ------------display-------------------------------------- */
write_reg(par, 0xB7, 0x07); /* entry mode set */
write_reg(par, 0xB6, 0x0A, 0x82, 0x27, 0x00);
write_reg(par, MIPI_DCS_EXIT_SLEEP_MODE);
mdelay(100);
write_reg(par, MIPI_DCS_SET_DISPLAY_ON);
mdelay(20);
return 0;
}
static void set_addr_win(struct fbtft_par *par, int xs, int ys, int xe, int ye)
{
write_reg(par, MIPI_DCS_SET_COLUMN_ADDRESS,
(xs >> 8) & 0xFF, xs & 0xFF, (xe >> 8) & 0xFF, xe & 0xFF);
write_reg(par, MIPI_DCS_SET_PAGE_ADDRESS,
(ys >> 8) & 0xFF, ys & 0xFF, (ye >> 8) & 0xFF, ye & 0xFF);
write_reg(par, MIPI_DCS_WRITE_MEMORY_START);
}
#define MEM_Y BIT(7) /* MY row address order */
#define MEM_X BIT(6) /* MX column address order */
#define MEM_V BIT(5) /* MV row / column exchange */
#define MEM_L BIT(4) /* ML vertical refresh order */
#define MEM_H BIT(2) /* MH horizontal refresh order */
#define MEM_BGR (3) /* RGB-BGR Order */
static int set_var(struct fbtft_par *par)
{
switch (par->info->var.rotate) {
case 0:
write_reg(par, MIPI_DCS_SET_ADDRESS_MODE,
MEM_X | (par->bgr << MEM_BGR));
break;
case 270:
write_reg(par, MIPI_DCS_SET_ADDRESS_MODE,
MEM_V | MEM_L | (par->bgr << MEM_BGR));
break;
case 180:
write_reg(par, MIPI_DCS_SET_ADDRESS_MODE,
MEM_Y | (par->bgr << MEM_BGR));
break;
case 90:
write_reg(par, MIPI_DCS_SET_ADDRESS_MODE,
MEM_Y | MEM_X | MEM_V | (par->bgr << MEM_BGR));
break;
}
return 0;
}
/*
* Gamma string format:
* Positive: Par1 Par2 [...] Par15
* Negative: Par1 Par2 [...] Par15
*/
#define CURVE(num, idx) curves[(num) * par->gamma.num_values + (idx)]
static int set_gamma(struct fbtft_par *par, u32 *curves)
{
int i;
for (i = 0; i < par->gamma.num_curves; i++)
write_reg(par, 0xE0 + i,
CURVE(i, 0), CURVE(i, 1), CURVE(i, 2),
CURVE(i, 3), CURVE(i, 4), CURVE(i, 5),
CURVE(i, 6), CURVE(i, 7), CURVE(i, 8),
CURVE(i, 9), CURVE(i, 10), CURVE(i, 11),
CURVE(i, 12), CURVE(i, 13), CURVE(i, 14));
return 0;
}
#undef CURVE
static struct fbtft_display display = {
.regwidth = 8,
.width = WIDTH,
.height = HEIGHT,
.txbuflen = TXBUFLEN,
.gamma_num = 2,
.gamma_len = 15,
.gamma = DEFAULT_GAMMA,
.fbtftops = {
.init_display = init_display,
.set_addr_win = set_addr_win,
.set_var = set_var,
.set_gamma = set_gamma,
},
};
FBTFT_REGISTER_SPI_DRIVER(DRVNAME, "ilitek", "ili9341", &display);
MODULE_ALIAS("spi:" DRVNAME);
MODULE_ALIAS("platform:" DRVNAME);
MODULE_ALIAS("spi:ili9341");
MODULE_ALIAS("platform:ili9341");
MODULE_DESCRIPTION("FB driver for the ILI9341 LCD display controller");
MODULE_AUTHOR("Christian Vogelgsang");
MODULE_LICENSE("GPL");
| linux-master | drivers/staging/fbtft/fb_ili9341.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* FB driver for the S6D02A1 LCD Controller
*
* Based on fb_st7735r.c by Noralf Tronnes
* Init code from UTFT library by Henning Karlsen
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <video/mipi_display.h>
#include "fbtft.h"
#define DRVNAME "fb_s6d02a1"
static const s16 default_init_sequence[] = {
-1, 0xf0, 0x5a, 0x5a,
-1, 0xfc, 0x5a, 0x5a,
-1, 0xfa, 0x02, 0x1f, 0x00, 0x10, 0x22, 0x30, 0x38,
0x3A, 0x3A, 0x3A, 0x3A, 0x3A, 0x3d, 0x02, 0x01,
-1, 0xfb, 0x21, 0x00, 0x02, 0x04, 0x07, 0x0a, 0x0b,
0x0c, 0x0c, 0x16, 0x1e, 0x30, 0x3f, 0x01, 0x02,
/* power setting sequence */
-1, 0xfd, 0x00, 0x00, 0x00, 0x17, 0x10, 0x00, 0x01,
0x01, 0x00, 0x1f, 0x1f,
-1, 0xf4, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3f, 0x3f,
0x07, 0x00, 0x3C, 0x36, 0x00, 0x3C, 0x36, 0x00,
-1, 0xf5, 0x00, 0x70, 0x66, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x6d, 0x66, 0x06,
-1, 0xf6, 0x02, 0x00, 0x3f, 0x00, 0x00, 0x00, 0x02,
0x00, 0x06, 0x01, 0x00,
-1, 0xf2, 0x00, 0x01, 0x03, 0x08, 0x08, 0x04, 0x00,
0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x04, 0x08, 0x08,
-1, 0xf8, 0x11,
-1, 0xf7, 0xc8, 0x20, 0x00, 0x00,
-1, 0xf3, 0x00, 0x00,
-1, MIPI_DCS_EXIT_SLEEP_MODE,
-2, 50,
-1, 0xf3, 0x00, 0x01,
-2, 50,
-1, 0xf3, 0x00, 0x03,
-2, 50,
-1, 0xf3, 0x00, 0x07,
-2, 50,
-1, 0xf3, 0x00, 0x0f,
-2, 50,
-1, 0xf4, 0x00, 0x04, 0x00, 0x00, 0x00, 0x3f, 0x3f,
0x07, 0x00, 0x3C, 0x36, 0x00, 0x3C, 0x36, 0x00,
-2, 50,
-1, 0xf3, 0x00, 0x1f,
-2, 50,
-1, 0xf3, 0x00, 0x7f,
-2, 50,
-1, 0xf3, 0x00, 0xff,
-2, 50,
-1, 0xfd, 0x00, 0x00, 0x00, 0x17, 0x10, 0x00, 0x00,
0x01, 0x00, 0x16, 0x16,
-1, 0xf4, 0x00, 0x09, 0x00, 0x00, 0x00, 0x3f, 0x3f,
0x07, 0x00, 0x3C, 0x36, 0x00, 0x3C, 0x36, 0x00,
/* initializing sequence */
-1, MIPI_DCS_SET_ADDRESS_MODE, 0x08,
-1, MIPI_DCS_SET_TEAR_ON, 0x00,
-1, MIPI_DCS_SET_PIXEL_FORMAT, 0x05,
/* gamma setting - possible values 0x01, 0x02, 0x04, 0x08 */
-1, MIPI_DCS_SET_GAMMA_CURVE, 0x01,
-2, 150,
-1, MIPI_DCS_SET_DISPLAY_ON,
-1, MIPI_DCS_WRITE_MEMORY_START,
/* end marker */
-3
};
static void set_addr_win(struct fbtft_par *par, int xs, int ys, int xe, int ye)
{
write_reg(par, MIPI_DCS_SET_COLUMN_ADDRESS,
xs >> 8, xs & 0xFF, xe >> 8, xe & 0xFF);
write_reg(par, MIPI_DCS_SET_PAGE_ADDRESS,
ys >> 8, ys & 0xFF, ye >> 8, ye & 0xFF);
write_reg(par, MIPI_DCS_WRITE_MEMORY_START);
}
#define MY BIT(7)
#define MX BIT(6)
#define MV BIT(5)
static int set_var(struct fbtft_par *par)
{
/*
* Memory data access control (0x36h)
* RGB/BGR:
* 1. Mode selection pin SRGB
* RGB H/W pin for color filter setting: 0=RGB, 1=BGR
* 2. MADCTL RGB bit
* RGB-BGR ORDER color filter panel: 0=RGB, 1=BGR
*/
switch (par->info->var.rotate) {
case 0:
write_reg(par, MIPI_DCS_SET_ADDRESS_MODE,
MX | MY | (par->bgr << 3));
break;
case 270:
write_reg(par, MIPI_DCS_SET_ADDRESS_MODE,
MY | MV | (par->bgr << 3));
break;
case 180:
write_reg(par, MIPI_DCS_SET_ADDRESS_MODE,
par->bgr << 3);
break;
case 90:
write_reg(par, MIPI_DCS_SET_ADDRESS_MODE,
MX | MV | (par->bgr << 3));
break;
}
return 0;
}
static struct fbtft_display display = {
.regwidth = 8,
.width = 128,
.height = 160,
.init_sequence = default_init_sequence,
.fbtftops = {
.set_addr_win = set_addr_win,
.set_var = set_var,
},
};
FBTFT_REGISTER_DRIVER(DRVNAME, "samsung,s6d02a1", &display);
MODULE_ALIAS("spi:" DRVNAME);
MODULE_ALIAS("platform:" DRVNAME);
MODULE_ALIAS("spi:s6d02a1");
MODULE_ALIAS("platform:s6d02a1");
MODULE_DESCRIPTION("FB driver for the S6D02A1 LCD Controller");
MODULE_AUTHOR("WOLFGANG BUENING");
MODULE_LICENSE("GPL");
| linux-master | drivers/staging/fbtft/fb_s6d02a1.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* FB driver for the ILI9320 LCD Controller
*
* Copyright (C) 2013 Noralf Tronnes
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/spi/spi.h>
#include <linux/delay.h>
#include "fbtft.h"
#define DRVNAME "fb_ili9320"
#define WIDTH 240
#define HEIGHT 320
#define DEFAULT_GAMMA "07 07 6 0 0 0 5 5 4 0\n" \
"07 08 4 7 5 1 2 0 7 7"
static unsigned int read_devicecode(struct fbtft_par *par)
{
u8 rxbuf[8] = {0, };
write_reg(par, 0x0000);
par->fbtftops.read(par, rxbuf, 4);
return (rxbuf[2] << 8) | rxbuf[3];
}
static int init_display(struct fbtft_par *par)
{
unsigned int devcode;
par->fbtftops.reset(par);
devcode = read_devicecode(par);
fbtft_par_dbg(DEBUG_INIT_DISPLAY, par, "Device code: 0x%04X\n",
devcode);
if ((devcode != 0x0000) && (devcode != 0x9320))
dev_warn(par->info->device,
"Unrecognized Device code: 0x%04X (expected 0x9320)\n",
devcode);
/* Initialization sequence from ILI9320 Application Notes */
/* *********** Start Initial Sequence ********* */
/* Set the Vcore voltage and this setting is must. */
write_reg(par, 0x00E5, 0x8000);
/* Start internal OSC. */
write_reg(par, 0x0000, 0x0001);
/* set SS and SM bit */
write_reg(par, 0x0001, 0x0100);
/* set 1 line inversion */
write_reg(par, 0x0002, 0x0700);
/* Resize register */
write_reg(par, 0x0004, 0x0000);
/* set the back and front porch */
write_reg(par, 0x0008, 0x0202);
/* set non-display area refresh cycle */
write_reg(par, 0x0009, 0x0000);
/* FMARK function */
write_reg(par, 0x000A, 0x0000);
/* RGB interface setting */
write_reg(par, 0x000C, 0x0000);
/* Frame marker Position */
write_reg(par, 0x000D, 0x0000);
/* RGB interface polarity */
write_reg(par, 0x000F, 0x0000);
/* ***********Power On sequence *************** */
/* SAP, BT[3:0], AP, DSTB, SLP, STB */
write_reg(par, 0x0010, 0x0000);
/* DC1[2:0], DC0[2:0], VC[2:0] */
write_reg(par, 0x0011, 0x0007);
/* VREG1OUT voltage */
write_reg(par, 0x0012, 0x0000);
/* VDV[4:0] for VCOM amplitude */
write_reg(par, 0x0013, 0x0000);
/* Dis-charge capacitor power voltage */
mdelay(200);
/* SAP, BT[3:0], AP, DSTB, SLP, STB */
write_reg(par, 0x0010, 0x17B0);
/* R11h=0x0031 at VCI=3.3V DC1[2:0], DC0[2:0], VC[2:0] */
write_reg(par, 0x0011, 0x0031);
mdelay(50);
/* R12h=0x0138 at VCI=3.3V VREG1OUT voltage */
write_reg(par, 0x0012, 0x0138);
mdelay(50);
/* R13h=0x1800 at VCI=3.3V VDV[4:0] for VCOM amplitude */
write_reg(par, 0x0013, 0x1800);
/* R29h=0x0008 at VCI=3.3V VCM[4:0] for VCOMH */
write_reg(par, 0x0029, 0x0008);
mdelay(50);
/* GRAM horizontal Address */
write_reg(par, 0x0020, 0x0000);
/* GRAM Vertical Address */
write_reg(par, 0x0021, 0x0000);
/* ------------------ Set GRAM area --------------- */
/* Horizontal GRAM Start Address */
write_reg(par, 0x0050, 0x0000);
/* Horizontal GRAM End Address */
write_reg(par, 0x0051, 0x00EF);
/* Vertical GRAM Start Address */
write_reg(par, 0x0052, 0x0000);
/* Vertical GRAM End Address */
write_reg(par, 0x0053, 0x013F);
/* Gate Scan Line */
write_reg(par, 0x0060, 0x2700);
/* NDL,VLE, REV */
write_reg(par, 0x0061, 0x0001);
/* set scrolling line */
write_reg(par, 0x006A, 0x0000);
/* -------------- Partial Display Control --------- */
write_reg(par, 0x0080, 0x0000);
write_reg(par, 0x0081, 0x0000);
write_reg(par, 0x0082, 0x0000);
write_reg(par, 0x0083, 0x0000);
write_reg(par, 0x0084, 0x0000);
write_reg(par, 0x0085, 0x0000);
/* -------------- Panel Control ------------------- */
write_reg(par, 0x0090, 0x0010);
write_reg(par, 0x0092, 0x0000);
write_reg(par, 0x0093, 0x0003);
write_reg(par, 0x0095, 0x0110);
write_reg(par, 0x0097, 0x0000);
write_reg(par, 0x0098, 0x0000);
write_reg(par, 0x0007, 0x0173); /* 262K color and display ON */
return 0;
}
static void set_addr_win(struct fbtft_par *par, int xs, int ys, int xe, int ye)
{
switch (par->info->var.rotate) {
/* R20h = Horizontal GRAM Start Address */
/* R21h = Vertical GRAM Start Address */
case 0:
write_reg(par, 0x0020, xs);
write_reg(par, 0x0021, ys);
break;
case 180:
write_reg(par, 0x0020, WIDTH - 1 - xs);
write_reg(par, 0x0021, HEIGHT - 1 - ys);
break;
case 270:
write_reg(par, 0x0020, WIDTH - 1 - ys);
write_reg(par, 0x0021, xs);
break;
case 90:
write_reg(par, 0x0020, ys);
write_reg(par, 0x0021, HEIGHT - 1 - xs);
break;
}
write_reg(par, 0x0022); /* Write Data to GRAM */
}
static int set_var(struct fbtft_par *par)
{
switch (par->info->var.rotate) {
case 0:
write_reg(par, 0x3, (par->bgr << 12) | 0x30);
break;
case 270:
write_reg(par, 0x3, (par->bgr << 12) | 0x28);
break;
case 180:
write_reg(par, 0x3, (par->bgr << 12) | 0x00);
break;
case 90:
write_reg(par, 0x3, (par->bgr << 12) | 0x18);
break;
}
return 0;
}
/*
* Gamma string format:
* VRP0 VRP1 RP0 RP1 KP0 KP1 KP2 KP3 KP4 KP5
* VRN0 VRN1 RN0 RN1 KN0 KN1 KN2 KN3 KN4 KN5
*/
#define CURVE(num, idx) curves[(num) * par->gamma.num_values + (idx)]
static int set_gamma(struct fbtft_par *par, u32 *curves)
{
static const unsigned long mask[] = {
0x1f, 0x1f, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07,
0x1f, 0x1f, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07,
};
int i, j;
/* apply mask */
for (i = 0; i < 2; i++)
for (j = 0; j < 10; j++)
CURVE(i, j) &= mask[i * par->gamma.num_values + j];
write_reg(par, 0x0030, CURVE(0, 5) << 8 | CURVE(0, 4));
write_reg(par, 0x0031, CURVE(0, 7) << 8 | CURVE(0, 6));
write_reg(par, 0x0032, CURVE(0, 9) << 8 | CURVE(0, 8));
write_reg(par, 0x0035, CURVE(0, 3) << 8 | CURVE(0, 2));
write_reg(par, 0x0036, CURVE(0, 1) << 8 | CURVE(0, 0));
write_reg(par, 0x0037, CURVE(1, 5) << 8 | CURVE(1, 4));
write_reg(par, 0x0038, CURVE(1, 7) << 8 | CURVE(1, 6));
write_reg(par, 0x0039, CURVE(1, 9) << 8 | CURVE(1, 8));
write_reg(par, 0x003C, CURVE(1, 3) << 8 | CURVE(1, 2));
write_reg(par, 0x003D, CURVE(1, 1) << 8 | CURVE(1, 0));
return 0;
}
#undef CURVE
static struct fbtft_display display = {
.regwidth = 16,
.width = WIDTH,
.height = HEIGHT,
.gamma_num = 2,
.gamma_len = 10,
.gamma = DEFAULT_GAMMA,
.fbtftops = {
.init_display = init_display,
.set_addr_win = set_addr_win,
.set_var = set_var,
.set_gamma = set_gamma,
},
};
FBTFT_REGISTER_DRIVER(DRVNAME, "ilitek,ili9320", &display);
MODULE_ALIAS("spi:" DRVNAME);
MODULE_ALIAS("platform:" DRVNAME);
MODULE_ALIAS("spi:ili9320");
MODULE_ALIAS("platform:ili9320");
MODULE_DESCRIPTION("FB driver for the ILI9320 LCD Controller");
MODULE_AUTHOR("Noralf Tronnes");
MODULE_LICENSE("GPL");
| linux-master | drivers/staging/fbtft/fb_ili9320.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* FB driver for the uPD161704 LCD Controller
*
* Copyright (C) 2014 Seong-Woo Kim
*
* Based on fb_ili9325.c by Noralf Tronnes
* Based on ili9325.c by Jeroen Domburg
* Init code from UTFT library by Henning Karlsen
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/delay.h>
#include "fbtft.h"
#define DRVNAME "fb_bd663474"
#define WIDTH 240
#define HEIGHT 320
#define BPP 16
static int init_display(struct fbtft_par *par)
{
par->fbtftops.reset(par);
/* Initialization sequence from Lib_UTFT */
/* oscillator start */
write_reg(par, 0x000, 0x0001); /*oscillator 0: stop, 1: operation */
mdelay(10);
/* Power settings */
write_reg(par, 0x100, 0x0000); /* power supply setup */
write_reg(par, 0x101, 0x0000);
write_reg(par, 0x102, 0x3110);
write_reg(par, 0x103, 0xe200);
write_reg(par, 0x110, 0x009d);
write_reg(par, 0x111, 0x0022);
write_reg(par, 0x100, 0x0120);
mdelay(20);
write_reg(par, 0x100, 0x3120);
mdelay(80);
/* Display control */
write_reg(par, 0x001, 0x0100);
write_reg(par, 0x002, 0x0000);
write_reg(par, 0x003, 0x1230);
write_reg(par, 0x006, 0x0000);
write_reg(par, 0x007, 0x0101);
write_reg(par, 0x008, 0x0808);
write_reg(par, 0x009, 0x0000);
write_reg(par, 0x00b, 0x0000);
write_reg(par, 0x00c, 0x0000);
write_reg(par, 0x00d, 0x0018);
/* LTPS control settings */
write_reg(par, 0x012, 0x0000);
write_reg(par, 0x013, 0x0000);
write_reg(par, 0x018, 0x0000);
write_reg(par, 0x019, 0x0000);
write_reg(par, 0x203, 0x0000);
write_reg(par, 0x204, 0x0000);
write_reg(par, 0x210, 0x0000);
write_reg(par, 0x211, 0x00ef);
write_reg(par, 0x212, 0x0000);
write_reg(par, 0x213, 0x013f);
write_reg(par, 0x214, 0x0000);
write_reg(par, 0x215, 0x0000);
write_reg(par, 0x216, 0x0000);
write_reg(par, 0x217, 0x0000);
/* Gray scale settings */
write_reg(par, 0x300, 0x5343);
write_reg(par, 0x301, 0x1021);
write_reg(par, 0x302, 0x0003);
write_reg(par, 0x303, 0x0011);
write_reg(par, 0x304, 0x050a);
write_reg(par, 0x305, 0x4342);
write_reg(par, 0x306, 0x1100);
write_reg(par, 0x307, 0x0003);
write_reg(par, 0x308, 0x1201);
write_reg(par, 0x309, 0x050a);
/* RAM access settings */
write_reg(par, 0x400, 0x4027);
write_reg(par, 0x401, 0x0000);
write_reg(par, 0x402, 0x0000); /* First screen drive position (1) */
write_reg(par, 0x403, 0x013f); /* First screen drive position (2) */
write_reg(par, 0x404, 0x0000);
write_reg(par, 0x200, 0x0000);
write_reg(par, 0x201, 0x0000);
write_reg(par, 0x100, 0x7120);
write_reg(par, 0x007, 0x0103);
mdelay(10);
write_reg(par, 0x007, 0x0113);
return 0;
}
static void set_addr_win(struct fbtft_par *par, int xs, int ys, int xe, int ye)
{
switch (par->info->var.rotate) {
/* R200h = Horizontal GRAM Start Address */
/* R201h = Vertical GRAM Start Address */
case 0:
write_reg(par, 0x0200, xs);
write_reg(par, 0x0201, ys);
break;
case 180:
write_reg(par, 0x0200, WIDTH - 1 - xs);
write_reg(par, 0x0201, HEIGHT - 1 - ys);
break;
case 270:
write_reg(par, 0x0200, WIDTH - 1 - ys);
write_reg(par, 0x0201, xs);
break;
case 90:
write_reg(par, 0x0200, ys);
write_reg(par, 0x0201, HEIGHT - 1 - xs);
break;
}
write_reg(par, 0x202); /* Write Data to GRAM */
}
static int set_var(struct fbtft_par *par)
{
switch (par->info->var.rotate) {
/* AM: GRAM update direction */
case 0:
write_reg(par, 0x003, 0x1230);
break;
case 180:
write_reg(par, 0x003, 0x1200);
break;
case 270:
write_reg(par, 0x003, 0x1228);
break;
case 90:
write_reg(par, 0x003, 0x1218);
break;
}
return 0;
}
static struct fbtft_display display = {
.regwidth = 16,
.width = WIDTH,
.height = HEIGHT,
.bpp = BPP,
.fbtftops = {
.init_display = init_display,
.set_addr_win = set_addr_win,
.set_var = set_var,
},
};
FBTFT_REGISTER_DRIVER(DRVNAME, "hitachi,bd663474", &display);
MODULE_ALIAS("spi:" DRVNAME);
MODULE_ALIAS("platform:" DRVNAME);
MODULE_ALIAS("spi:bd663474");
MODULE_ALIAS("platform:bd663474");
MODULE_DESCRIPTION("FB driver for the uPD161704 LCD Controller");
MODULE_AUTHOR("Seong-Woo Kim");
MODULE_LICENSE("GPL");
| linux-master | drivers/staging/fbtft/fb_bd663474.c |
// SPDX-License-Identifier: GPL-2.0
/*
* FB driver for the NHD-1.69-160128UGC3 (Newhaven Display International, Inc.)
* using the SEPS525 (Syncoam) LCD Controller
*
* Copyright (C) 2016 Analog Devices Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/gpio.h>
#include <linux/delay.h>
#include "fbtft.h"
#define DRVNAME "fb_seps525"
#define WIDTH 160
#define HEIGHT 128
#define SEPS525_INDEX 0x00
#define SEPS525_STATUS_RD 0x01
#define SEPS525_OSC_CTL 0x02
#define SEPS525_IREF 0x80
#define SEPS525_CLOCK_DIV 0x03
#define SEPS525_REDUCE_CURRENT 0x04
#define SEPS525_SOFT_RST 0x05
#define SEPS525_DISP_ONOFF 0x06
#define SEPS525_PRECHARGE_TIME_R 0x08
#define SEPS525_PRECHARGE_TIME_G 0x09
#define SEPS525_PRECHARGE_TIME_B 0x0A
#define SEPS525_PRECHARGE_CURRENT_R 0x0B
#define SEPS525_PRECHARGE_CURRENT_G 0x0C
#define SEPS525_PRECHARGE_CURRENT_B 0x0D
#define SEPS525_DRIVING_CURRENT_R 0x10
#define SEPS525_DRIVING_CURRENT_G 0x11
#define SEPS525_DRIVING_CURRENT_B 0x12
#define SEPS525_DISPLAYMODE_SET 0x13
#define SEPS525_RGBIF 0x14
#define SEPS525_RGB_POL 0x15
#define SEPS525_MEMORY_WRITEMODE 0x16
#define SEPS525_MX1_ADDR 0x17
#define SEPS525_MX2_ADDR 0x18
#define SEPS525_MY1_ADDR 0x19
#define SEPS525_MY2_ADDR 0x1A
#define SEPS525_MEMORY_ACCESS_POINTER_X 0x20
#define SEPS525_MEMORY_ACCESS_POINTER_Y 0x21
#define SEPS525_DDRAM_DATA_ACCESS_PORT 0x22
#define SEPS525_GRAY_SCALE_TABLE_INDEX 0x50
#define SEPS525_GRAY_SCALE_TABLE_DATA 0x51
#define SEPS525_DUTY 0x28
#define SEPS525_DSL 0x29
#define SEPS525_D1_DDRAM_FAC 0x2E
#define SEPS525_D1_DDRAM_FAR 0x2F
#define SEPS525_D2_DDRAM_SAC 0x31
#define SEPS525_D2_DDRAM_SAR 0x32
#define SEPS525_SCR1_FX1 0x33
#define SEPS525_SCR1_FX2 0x34
#define SEPS525_SCR1_FY1 0x35
#define SEPS525_SCR1_FY2 0x36
#define SEPS525_SCR2_SX1 0x37
#define SEPS525_SCR2_SX2 0x38
#define SEPS525_SCR2_SY1 0x39
#define SEPS525_SCR2_SY2 0x3A
#define SEPS525_SCREEN_SAVER_CONTEROL 0x3B
#define SEPS525_SS_SLEEP_TIMER 0x3C
#define SEPS525_SCREEN_SAVER_MODE 0x3D
#define SEPS525_SS_SCR1_FU 0x3E
#define SEPS525_SS_SCR1_MXY 0x3F
#define SEPS525_SS_SCR2_FU 0x40
#define SEPS525_SS_SCR2_MXY 0x41
#define SEPS525_MOVING_DIRECTION 0x42
#define SEPS525_SS_SCR2_SX1 0x47
#define SEPS525_SS_SCR2_SX2 0x48
#define SEPS525_SS_SCR2_SY1 0x49
#define SEPS525_SS_SCR2_SY2 0x4A
/* SEPS525_DISPLAYMODE_SET */
#define MODE_SWAP_BGR BIT(7)
#define MODE_SM BIT(6)
#define MODE_RD BIT(5)
#define MODE_CD BIT(4)
#define seps525_use_window 0 /* FBTFT doesn't really use it today */
/* Init sequence taken from: Arduino Library for the Adafruit 2.2" display */
static int init_display(struct fbtft_par *par)
{
par->fbtftops.reset(par);
usleep_range(1000, 5000);
/* Disable Oscillator Power Down */
write_reg(par, SEPS525_REDUCE_CURRENT, 0x03);
usleep_range(1000, 5000);
/* Set Normal Driving Current */
write_reg(par, SEPS525_REDUCE_CURRENT, 0x00);
usleep_range(1000, 5000);
write_reg(par, SEPS525_SCREEN_SAVER_CONTEROL, 0x00);
/* Set EXPORT1 Pin at Internal Clock */
write_reg(par, SEPS525_OSC_CTL, 0x01);
/* Set Clock as 120 Frames/Sec */
write_reg(par, SEPS525_CLOCK_DIV, 0x90);
/* Set Reference Voltage Controlled by External Resister */
write_reg(par, SEPS525_IREF, 0x01);
/* precharge time R G B */
write_reg(par, SEPS525_PRECHARGE_TIME_R, 0x04);
write_reg(par, SEPS525_PRECHARGE_TIME_G, 0x05);
write_reg(par, SEPS525_PRECHARGE_TIME_B, 0x05);
/* precharge current R G B (uA) */
write_reg(par, SEPS525_PRECHARGE_CURRENT_R, 0x9D);
write_reg(par, SEPS525_PRECHARGE_CURRENT_G, 0x8C);
write_reg(par, SEPS525_PRECHARGE_CURRENT_B, 0x57);
/* driving current R G B (uA) */
write_reg(par, SEPS525_DRIVING_CURRENT_R, 0x56);
write_reg(par, SEPS525_DRIVING_CURRENT_G, 0x4D);
write_reg(par, SEPS525_DRIVING_CURRENT_B, 0x46);
/* Set Color Sequence */
write_reg(par, SEPS525_DISPLAYMODE_SET, 0xA0);
write_reg(par, SEPS525_RGBIF, 0x01); /* Set MCU Interface Mode */
/* Set Memory Write Mode */
write_reg(par, SEPS525_MEMORY_WRITEMODE, 0x66);
write_reg(par, SEPS525_DUTY, 0x7F); /* 1/128 Duty (0x0F~0x7F) */
/* Set Mapping RAM Display Start Line (0x00~0x7F) */
write_reg(par, SEPS525_DSL, 0x00);
write_reg(par, SEPS525_DISP_ONOFF, 0x01); /* Display On (0x00/0x01) */
/* Set All Internal Register Value as Normal Mode */
write_reg(par, SEPS525_SOFT_RST, 0x00);
/* Set RGB Interface Polarity as Active Low */
write_reg(par, SEPS525_RGB_POL, 0x00);
write_reg(par, SEPS525_DDRAM_DATA_ACCESS_PORT);
return 0;
}
static void set_addr_win(struct fbtft_par *par, int xs, int ys, int xe, int ye)
{
if (seps525_use_window) {
/* Set Window Xs,Ys Xe,Ye*/
write_reg(par, SEPS525_MX1_ADDR, xs);
write_reg(par, SEPS525_MX2_ADDR, xe);
write_reg(par, SEPS525_MY1_ADDR, ys);
write_reg(par, SEPS525_MY2_ADDR, ye);
}
/* start position X,Y */
write_reg(par, SEPS525_MEMORY_ACCESS_POINTER_X, xs);
write_reg(par, SEPS525_MEMORY_ACCESS_POINTER_Y, ys);
write_reg(par, SEPS525_DDRAM_DATA_ACCESS_PORT);
}
static int set_var(struct fbtft_par *par)
{
u8 val;
switch (par->info->var.rotate) {
case 0:
val = 0;
break;
case 180:
val = MODE_RD | MODE_CD;
break;
case 90:
case 270:
default:
return -EINVAL;
}
/* Memory Access Control */
write_reg(par, SEPS525_DISPLAYMODE_SET, val |
(par->bgr ? MODE_SWAP_BGR : 0));
write_reg(par, SEPS525_DDRAM_DATA_ACCESS_PORT);
return 0;
}
static struct fbtft_display display = {
.regwidth = 8,
.width = WIDTH,
.height = HEIGHT,
.fbtftops = {
.init_display = init_display,
.set_addr_win = set_addr_win,
.set_var = set_var,
},
};
FBTFT_REGISTER_DRIVER(DRVNAME, "syncoam,seps525", &display);
MODULE_ALIAS("spi:" DRVNAME);
MODULE_ALIAS("platform:" DRVNAME);
MODULE_ALIAS("spi:seps525");
MODULE_ALIAS("platform:seps525");
MODULE_DESCRIPTION("FB driver for the SEPS525 LCD Controller");
MODULE_AUTHOR("Michael Hennerich <[email protected]>");
MODULE_LICENSE("GPL");
| linux-master | drivers/staging/fbtft/fb_seps525.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* FB driver for the UC1701 LCD Controller
*
* The display is monochrome and the video memory is RGB565.
* Any pixel value except 0 turns the pixel on.
*
* Copyright (C) 2014 Juergen Holzmann
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/gpio/consumer.h>
#include <linux/spi/spi.h>
#include <linux/delay.h>
#include "fbtft.h"
#define DRVNAME "fb_uc1701"
#define WIDTH 102
#define HEIGHT 64
#define PAGES (HEIGHT / 8)
/* 1: Display on/off */
#define LCD_DISPLAY_ENABLE 0xAE
/* 2: display start line set */
#define LCD_START_LINE 0x40
/* 3: Page address set (lower 4 bits select one of the pages) */
#define LCD_PAGE_ADDRESS 0xB0
/* 4: column address */
#define LCD_COL_ADDRESS 0x10
/* 8: select orientation */
#define LCD_BOTTOMVIEW 0xA0
/* 9: inverted display */
#define LCD_DISPLAY_INVERT 0xA6
/* 10: show memory content or switch all pixels on */
#define LCD_ALL_PIXEL 0xA4
/* 11: lcd bias set */
#define LCD_BIAS 0xA2
/* 14: Reset Controller */
#define LCD_RESET_CMD 0xE2
/* 15: output mode select (turns display upside-down) */
#define LCD_SCAN_DIR 0xC0
/* 16: power control set */
#define LCD_POWER_CONTROL 0x28
/* 17: voltage regulator resistor ratio set */
#define LCD_VOLTAGE 0x20
/* 18: Volume mode set */
#define LCD_VOLUME_MODE 0x81
/* 22: NOP command */
#define LCD_NO_OP 0xE3
/* 25: advanced program control */
#define LCD_ADV_PROG_CTRL 0xFA
/* 25: advanced program control2 */
#define LCD_ADV_PROG_CTRL2 0x10
#define LCD_TEMPCOMP_HIGH 0x80
/* column offset for normal orientation */
#define SHIFT_ADDR_NORMAL 0
/* column offset for bottom view orientation */
#define SHIFT_ADDR_TOPVIEW 30
static int init_display(struct fbtft_par *par)
{
par->fbtftops.reset(par);
/* softreset of LCD */
write_reg(par, LCD_RESET_CMD);
mdelay(10);
/* set startpoint */
write_reg(par, LCD_START_LINE);
/* select orientation BOTTOMVIEW */
write_reg(par, LCD_BOTTOMVIEW | 1);
/* output mode select (turns display upside-down) */
write_reg(par, LCD_SCAN_DIR | 0x00);
/* Normal Pixel mode */
write_reg(par, LCD_ALL_PIXEL | 0);
/* positive display */
write_reg(par, LCD_DISPLAY_INVERT | 0);
/* bias 1/9 */
write_reg(par, LCD_BIAS | 0);
/* power control mode: all features on */
write_reg(par, LCD_POWER_CONTROL | 0x07);
/* set voltage regulator R/R */
write_reg(par, LCD_VOLTAGE | 0x07);
/* volume mode set */
write_reg(par, LCD_VOLUME_MODE);
write_reg(par, 0x09);
write_reg(par, LCD_NO_OP);
/* advanced program control */
write_reg(par, LCD_ADV_PROG_CTRL);
write_reg(par, LCD_ADV_PROG_CTRL2 | LCD_TEMPCOMP_HIGH);
/* enable display */
write_reg(par, LCD_DISPLAY_ENABLE | 1);
return 0;
}
static void set_addr_win(struct fbtft_par *par, int xs, int ys, int xe, int ye)
{
/* goto address */
write_reg(par, LCD_PAGE_ADDRESS);
write_reg(par, 0x00);
write_reg(par, LCD_COL_ADDRESS);
}
static int write_vmem(struct fbtft_par *par, size_t offset, size_t len)
{
u16 *vmem16 = (u16 *)par->info->screen_buffer;
u8 *buf;
int x, y, i;
int ret = 0;
for (y = 0; y < PAGES; y++) {
buf = par->txbuf.buf;
for (x = 0; x < WIDTH; x++) {
*buf = 0x00;
for (i = 0; i < 8; i++)
*buf |= (vmem16[((y * 8 * WIDTH) +
(i * WIDTH)) + x] ?
1 : 0) << i;
buf++;
}
write_reg(par, LCD_PAGE_ADDRESS | (u8)y);
write_reg(par, 0x00);
write_reg(par, LCD_COL_ADDRESS);
gpiod_set_value(par->gpio.dc, 1);
ret = par->fbtftops.write(par, par->txbuf.buf, WIDTH);
gpiod_set_value(par->gpio.dc, 0);
}
if (ret < 0)
dev_err(par->info->device, "write failed and returned: %d\n",
ret);
return ret;
}
static struct fbtft_display display = {
.regwidth = 8,
.width = WIDTH,
.height = HEIGHT,
.fbtftops = {
.init_display = init_display,
.set_addr_win = set_addr_win,
.write_vmem = write_vmem,
},
.backlight = 1,
};
FBTFT_REGISTER_DRIVER(DRVNAME, "UltraChip,uc1701", &display);
MODULE_ALIAS("spi:" DRVNAME);
MODULE_ALIAS("spi:uc1701");
MODULE_DESCRIPTION("FB driver for the UC1701 LCD Controller");
MODULE_AUTHOR("Juergen Holzmann");
MODULE_LICENSE("GPL");
| linux-master | drivers/staging/fbtft/fb_uc1701.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* FB driver for the S6D1121 LCD Controller
*
* Copyright (C) 2013 Roman Rolinsky
*
* Based on fb_ili9325.c by Noralf Tronnes
* Based on ili9325.c by Jeroen Domburg
* Init code from UTFT library by Henning Karlsen
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/delay.h>
#include "fbtft.h"
#define DRVNAME "fb_s6d1121"
#define WIDTH 240
#define HEIGHT 320
#define BPP 16
#define FPS 20
#define DEFAULT_GAMMA "26 09 24 2C 1F 23 24 25 22 26 25 23 0D 00\n" \
"1C 1A 13 1D 0B 11 12 10 13 15 36 19 00 0D"
static int init_display(struct fbtft_par *par)
{
par->fbtftops.reset(par);
/* Initialization sequence from Lib_UTFT */
write_reg(par, 0x0011, 0x2004);
write_reg(par, 0x0013, 0xCC00);
write_reg(par, 0x0015, 0x2600);
write_reg(par, 0x0014, 0x252A);
write_reg(par, 0x0012, 0x0033);
write_reg(par, 0x0013, 0xCC04);
write_reg(par, 0x0013, 0xCC06);
write_reg(par, 0x0013, 0xCC4F);
write_reg(par, 0x0013, 0x674F);
write_reg(par, 0x0011, 0x2003);
write_reg(par, 0x0016, 0x0007);
write_reg(par, 0x0002, 0x0013);
write_reg(par, 0x0003, 0x0003);
write_reg(par, 0x0001, 0x0127);
write_reg(par, 0x0008, 0x0303);
write_reg(par, 0x000A, 0x000B);
write_reg(par, 0x000B, 0x0003);
write_reg(par, 0x000C, 0x0000);
write_reg(par, 0x0041, 0x0000);
write_reg(par, 0x0050, 0x0000);
write_reg(par, 0x0060, 0x0005);
write_reg(par, 0x0070, 0x000B);
write_reg(par, 0x0071, 0x0000);
write_reg(par, 0x0078, 0x0000);
write_reg(par, 0x007A, 0x0000);
write_reg(par, 0x0079, 0x0007);
write_reg(par, 0x0007, 0x0051);
write_reg(par, 0x0007, 0x0053);
write_reg(par, 0x0079, 0x0000);
write_reg(par, 0x0022); /* Write Data to GRAM */
return 0;
}
static void set_addr_win(struct fbtft_par *par, int xs, int ys, int xe, int ye)
{
switch (par->info->var.rotate) {
/* R20h = Horizontal GRAM Start Address */
/* R21h = Vertical GRAM Start Address */
case 0:
write_reg(par, 0x0020, xs);
write_reg(par, 0x0021, ys);
break;
case 180:
write_reg(par, 0x0020, WIDTH - 1 - xs);
write_reg(par, 0x0021, HEIGHT - 1 - ys);
break;
case 270:
write_reg(par, 0x0020, WIDTH - 1 - ys);
write_reg(par, 0x0021, xs);
break;
case 90:
write_reg(par, 0x0020, ys);
write_reg(par, 0x0021, HEIGHT - 1 - xs);
break;
}
write_reg(par, 0x0022); /* Write Data to GRAM */
}
static int set_var(struct fbtft_par *par)
{
switch (par->info->var.rotate) {
/* AM: GRAM update direction */
case 0:
write_reg(par, 0x03, 0x0003 | (par->bgr << 12));
break;
case 180:
write_reg(par, 0x03, 0x0000 | (par->bgr << 12));
break;
case 270:
write_reg(par, 0x03, 0x000A | (par->bgr << 12));
break;
case 90:
write_reg(par, 0x03, 0x0009 | (par->bgr << 12));
break;
}
return 0;
}
/*
* Gamma string format:
* PKP0 PKP1 PKP2 PKP3 PKP4 PKP5 PKP6 PKP7 PKP8 PKP9 PKP10 PKP11 VRP0 VRP1
* PKN0 PKN1 PKN2 PKN3 PKN4 PKN5 PKN6 PKN7 PRN8 PRN9 PRN10 PRN11 VRN0 VRN1
*/
#define CURVE(num, idx) curves[(num) * par->gamma.num_values + (idx)]
static int set_gamma(struct fbtft_par *par, u32 *curves)
{
static const unsigned long mask[] = {
0x3f, 0x3f, 0x3f, 0x3f, 0x3f, 0x3f, 0x3f, 0x3f, 0x3f, 0x3f,
0x3f, 0x3f, 0x1f, 0x1f, 0x3f, 0x3f, 0x3f, 0x3f, 0x3f, 0x3f,
0x3f, 0x3f, 0x3f, 0x3f, 0x3f, 0x3f, 0x1f, 0x1f,
};
int i, j;
/* apply mask */
for (i = 0; i < 2; i++)
for (j = 0; j < 14; j++)
CURVE(i, j) &= mask[i * par->gamma.num_values + j];
write_reg(par, 0x0030, CURVE(0, 1) << 8 | CURVE(0, 0));
write_reg(par, 0x0031, CURVE(0, 3) << 8 | CURVE(0, 2));
write_reg(par, 0x0032, CURVE(0, 5) << 8 | CURVE(0, 3));
write_reg(par, 0x0033, CURVE(0, 7) << 8 | CURVE(0, 6));
write_reg(par, 0x0034, CURVE(0, 9) << 8 | CURVE(0, 8));
write_reg(par, 0x0035, CURVE(0, 11) << 8 | CURVE(0, 10));
write_reg(par, 0x0036, CURVE(1, 1) << 8 | CURVE(1, 0));
write_reg(par, 0x0037, CURVE(1, 3) << 8 | CURVE(1, 2));
write_reg(par, 0x0038, CURVE(1, 5) << 8 | CURVE(1, 4));
write_reg(par, 0x0039, CURVE(1, 7) << 8 | CURVE(1, 6));
write_reg(par, 0x003A, CURVE(1, 9) << 8 | CURVE(1, 8));
write_reg(par, 0x003B, CURVE(1, 11) << 8 | CURVE(1, 10));
write_reg(par, 0x003C, CURVE(0, 13) << 8 | CURVE(0, 12));
write_reg(par, 0x003D, CURVE(1, 13) << 8 | CURVE(1, 12));
return 0;
}
#undef CURVE
static struct fbtft_display display = {
.regwidth = 16,
.width = WIDTH,
.height = HEIGHT,
.bpp = BPP,
.fps = FPS,
.gamma_num = 2,
.gamma_len = 14,
.gamma = DEFAULT_GAMMA,
.fbtftops = {
.init_display = init_display,
.set_addr_win = set_addr_win,
.set_var = set_var,
.set_gamma = set_gamma,
},
};
FBTFT_REGISTER_DRIVER(DRVNAME, "samsung,s6d1121", &display);
MODULE_ALIAS("spi:" DRVNAME);
MODULE_ALIAS("platform:" DRVNAME);
MODULE_ALIAS("spi:s6d1121");
MODULE_ALIAS("platform:s6d1121");
MODULE_DESCRIPTION("FB driver for the S6D1121 LCD Controller");
MODULE_AUTHOR("Roman Rolinsky");
MODULE_LICENSE("GPL");
| linux-master | drivers/staging/fbtft/fb_s6d1121.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* FB driver for the PCD8544 LCD Controller
*
* The display is monochrome and the video memory is RGB565.
* Any pixel value except 0 turns the pixel on.
*
* Copyright (C) 2013 Noralf Tronnes
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/gpio/consumer.h>
#include <linux/spi/spi.h>
#include <linux/delay.h>
#include "fbtft.h"
#define DRVNAME "fb_pcd8544"
#define WIDTH 84
#define HEIGHT 48
#define TXBUFLEN (84 * 6)
#define DEFAULT_GAMMA "40" /* gamma controls the contrast in this driver */
static unsigned int tc;
module_param(tc, uint, 0000);
MODULE_PARM_DESC(tc, "TC[1:0] Temperature coefficient: 0-3 (default: 0)");
static unsigned int bs = 4;
module_param(bs, uint, 0000);
MODULE_PARM_DESC(bs, "BS[2:0] Bias voltage level: 0-7 (default: 4)");
static int init_display(struct fbtft_par *par)
{
par->fbtftops.reset(par);
/* Function set
*
* 5:1 1
* 2:0 PD - Powerdown control: chip is active
* 1:0 V - Entry mode: horizontal addressing
* 0:1 H - Extended instruction set control: extended
*/
write_reg(par, 0x21);
/* H=1 Temperature control
*
* 2:1 1
* 1:x TC1 - Temperature Coefficient: 0x10
* 0:x TC0
*/
write_reg(par, 0x04 | (tc & 0x3));
/* H=1 Bias system
*
* 4:1 1
* 3:0 0
* 2:x BS2 - Bias System
* 1:x BS1
* 0:x BS0
*/
write_reg(par, 0x10 | (bs & 0x7));
/* Function set
*
* 5:1 1
* 2:0 PD - Powerdown control: chip is active
* 1:1 V - Entry mode: vertical addressing
* 0:0 H - Extended instruction set control: basic
*/
write_reg(par, 0x22);
/* H=0 Display control
*
* 3:1 1
* 2:1 D - DE: 10=normal mode
* 1:0 0
* 0:0 E
*/
write_reg(par, 0x08 | 4);
return 0;
}
static void set_addr_win(struct fbtft_par *par, int xs, int ys, int xe, int ye)
{
/* H=0 Set X address of RAM
*
* 7:1 1
* 6-0: X[6:0] - 0x00
*/
write_reg(par, 0x80);
/* H=0 Set Y address of RAM
*
* 7:0 0
* 6:1 1
* 2-0: Y[2:0] - 0x0
*/
write_reg(par, 0x40);
}
static int write_vmem(struct fbtft_par *par, size_t offset, size_t len)
{
u16 *vmem16 = (u16 *)par->info->screen_buffer;
u8 *buf = par->txbuf.buf;
int x, y, i;
int ret = 0;
for (x = 0; x < 84; x++) {
for (y = 0; y < 6; y++) {
*buf = 0x00;
for (i = 0; i < 8; i++)
*buf |= (vmem16[(y * 8 + i) * 84 + x] ?
1 : 0) << i;
buf++;
}
}
/* Write data */
gpiod_set_value(par->gpio.dc, 1);
ret = par->fbtftops.write(par, par->txbuf.buf, 6 * 84);
if (ret < 0)
dev_err(par->info->device, "write failed and returned: %d\n",
ret);
return ret;
}
static int set_gamma(struct fbtft_par *par, u32 *curves)
{
/* apply mask */
curves[0] &= 0x7F;
write_reg(par, 0x23); /* turn on extended instruction set */
write_reg(par, 0x80 | curves[0]);
write_reg(par, 0x22); /* turn off extended instruction set */
return 0;
}
static struct fbtft_display display = {
.regwidth = 8,
.width = WIDTH,
.height = HEIGHT,
.txbuflen = TXBUFLEN,
.gamma_num = 1,
.gamma_len = 1,
.gamma = DEFAULT_GAMMA,
.fbtftops = {
.init_display = init_display,
.set_addr_win = set_addr_win,
.write_vmem = write_vmem,
.set_gamma = set_gamma,
},
.backlight = 1,
};
FBTFT_REGISTER_DRIVER(DRVNAME, "philips,pcd8544", &display);
MODULE_ALIAS("spi:" DRVNAME);
MODULE_ALIAS("spi:pcd8544");
MODULE_DESCRIPTION("FB driver for the PCD8544 LCD Controller");
MODULE_AUTHOR("Noralf Tronnes");
MODULE_LICENSE("GPL");
| linux-master | drivers/staging/fbtft/fb_pcd8544.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Rockchip Video Decoder H264 backend
*
* Copyright (C) 2019 Collabora, Ltd.
* Boris Brezillon <[email protected]>
*
* Copyright (C) 2016 Rockchip Electronics Co., Ltd.
* Jeffy Chen <[email protected]>
*/
#include <media/v4l2-h264.h>
#include <media/v4l2-mem2mem.h>
#include "rkvdec.h"
#include "rkvdec-regs.h"
/* Size with u32 units. */
#define RKV_CABAC_INIT_BUFFER_SIZE (3680 + 128)
#define RKV_RPS_SIZE ((128 + 128) / 4)
#define RKV_ERROR_INFO_SIZE (256 * 144 * 4)
#define RKVDEC_NUM_REFLIST 3
struct rkvdec_h264_scaling_list {
u8 scaling_list_4x4[6][16];
u8 scaling_list_8x8[6][64];
u8 padding[128];
};
struct rkvdec_sps_pps_packet {
u32 info[8];
};
struct rkvdec_ps_field {
u16 offset;
u8 len;
};
#define PS_FIELD(_offset, _len) \
((struct rkvdec_ps_field){ _offset, _len })
#define SEQ_PARAMETER_SET_ID PS_FIELD(0, 4)
#define PROFILE_IDC PS_FIELD(4, 8)
#define CONSTRAINT_SET3_FLAG PS_FIELD(12, 1)
#define CHROMA_FORMAT_IDC PS_FIELD(13, 2)
#define BIT_DEPTH_LUMA PS_FIELD(15, 3)
#define BIT_DEPTH_CHROMA PS_FIELD(18, 3)
#define QPPRIME_Y_ZERO_TRANSFORM_BYPASS_FLAG PS_FIELD(21, 1)
#define LOG2_MAX_FRAME_NUM_MINUS4 PS_FIELD(22, 4)
#define MAX_NUM_REF_FRAMES PS_FIELD(26, 5)
#define PIC_ORDER_CNT_TYPE PS_FIELD(31, 2)
#define LOG2_MAX_PIC_ORDER_CNT_LSB_MINUS4 PS_FIELD(33, 4)
#define DELTA_PIC_ORDER_ALWAYS_ZERO_FLAG PS_FIELD(37, 1)
#define PIC_WIDTH_IN_MBS PS_FIELD(38, 9)
#define PIC_HEIGHT_IN_MBS PS_FIELD(47, 9)
#define FRAME_MBS_ONLY_FLAG PS_FIELD(56, 1)
#define MB_ADAPTIVE_FRAME_FIELD_FLAG PS_FIELD(57, 1)
#define DIRECT_8X8_INFERENCE_FLAG PS_FIELD(58, 1)
#define MVC_EXTENSION_ENABLE PS_FIELD(59, 1)
#define NUM_VIEWS PS_FIELD(60, 2)
#define VIEW_ID(i) PS_FIELD(62 + ((i) * 10), 10)
#define NUM_ANCHOR_REFS_L(i) PS_FIELD(82 + ((i) * 11), 1)
#define ANCHOR_REF_L(i) PS_FIELD(83 + ((i) * 11), 10)
#define NUM_NON_ANCHOR_REFS_L(i) PS_FIELD(104 + ((i) * 11), 1)
#define NON_ANCHOR_REFS_L(i) PS_FIELD(105 + ((i) * 11), 10)
#define PIC_PARAMETER_SET_ID PS_FIELD(128, 8)
#define PPS_SEQ_PARAMETER_SET_ID PS_FIELD(136, 5)
#define ENTROPY_CODING_MODE_FLAG PS_FIELD(141, 1)
#define BOTTOM_FIELD_PIC_ORDER_IN_FRAME_PRESENT_FLAG PS_FIELD(142, 1)
#define NUM_REF_IDX_L_DEFAULT_ACTIVE_MINUS1(i) PS_FIELD(143 + ((i) * 5), 5)
#define WEIGHTED_PRED_FLAG PS_FIELD(153, 1)
#define WEIGHTED_BIPRED_IDC PS_FIELD(154, 2)
#define PIC_INIT_QP_MINUS26 PS_FIELD(156, 7)
#define PIC_INIT_QS_MINUS26 PS_FIELD(163, 6)
#define CHROMA_QP_INDEX_OFFSET PS_FIELD(169, 5)
#define DEBLOCKING_FILTER_CONTROL_PRESENT_FLAG PS_FIELD(174, 1)
#define CONSTRAINED_INTRA_PRED_FLAG PS_FIELD(175, 1)
#define REDUNDANT_PIC_CNT_PRESENT PS_FIELD(176, 1)
#define TRANSFORM_8X8_MODE_FLAG PS_FIELD(177, 1)
#define SECOND_CHROMA_QP_INDEX_OFFSET PS_FIELD(178, 5)
#define SCALING_LIST_ENABLE_FLAG PS_FIELD(183, 1)
#define SCALING_LIST_ADDRESS PS_FIELD(184, 32)
#define IS_LONG_TERM(i) PS_FIELD(216 + (i), 1)
#define DPB_OFFS(i, j) (288 + ((j) * 32 * 7) + ((i) * 7))
#define DPB_INFO(i, j) PS_FIELD(DPB_OFFS(i, j), 5)
#define BOTTOM_FLAG(i, j) PS_FIELD(DPB_OFFS(i, j) + 5, 1)
#define VIEW_INDEX_OFF(i, j) PS_FIELD(DPB_OFFS(i, j) + 6, 1)
/* Data structure describing auxiliary buffer format. */
struct rkvdec_h264_priv_tbl {
s8 cabac_table[4][464][2];
struct rkvdec_h264_scaling_list scaling_list;
u32 rps[RKV_RPS_SIZE];
struct rkvdec_sps_pps_packet param_set[256];
u8 err_info[RKV_ERROR_INFO_SIZE];
};
struct rkvdec_h264_reflists {
struct v4l2_h264_reference p[V4L2_H264_REF_LIST_LEN];
struct v4l2_h264_reference b0[V4L2_H264_REF_LIST_LEN];
struct v4l2_h264_reference b1[V4L2_H264_REF_LIST_LEN];
};
struct rkvdec_h264_run {
struct rkvdec_run base;
const struct v4l2_ctrl_h264_decode_params *decode_params;
const struct v4l2_ctrl_h264_sps *sps;
const struct v4l2_ctrl_h264_pps *pps;
const struct v4l2_ctrl_h264_scaling_matrix *scaling_matrix;
struct vb2_buffer *ref_buf[V4L2_H264_NUM_DPB_ENTRIES];
};
struct rkvdec_h264_ctx {
struct rkvdec_aux_buf priv_tbl;
struct rkvdec_h264_reflists reflists;
};
#define CABAC_ENTRY(ctxidx, idc0_m, idc0_n, idc1_m, idc1_n, \
idc2_m, idc2_n, intra_m, intra_n) \
[0][(ctxidx)] = {idc0_m, idc0_n}, \
[1][(ctxidx)] = {idc1_m, idc1_n}, \
[2][(ctxidx)] = {idc2_m, idc2_n}, \
[3][(ctxidx)] = {intra_m, intra_n}
/*
* Constant CABAC table.
* Built from the tables described in section '9.3.1.1 Initialisation process
* for context variables' of the H264 spec.
*/
static const s8 rkvdec_h264_cabac_table[4][464][2] = {
/* Table 9-12 – Values of variables m and n for ctxIdx from 0 to 10 */
CABAC_ENTRY(0, 20, -15, 20, -15, 20, -15, 20, -15),
CABAC_ENTRY(1, 2, 54, 2, 54, 2, 54, 2, 54),
CABAC_ENTRY(2, 3, 74, 3, 74, 3, 74, 3, 74),
CABAC_ENTRY(3, 20, -15, 20, -15, 20, -15, 20, -15),
CABAC_ENTRY(4, 2, 54, 2, 54, 2, 54, 2, 54),
CABAC_ENTRY(5, 3, 74, 3, 74, 3, 74, 3, 74),
CABAC_ENTRY(6, -28, 127, -28, 127, -28, 127, -28, 127),
CABAC_ENTRY(7, -23, 104, -23, 104, -23, 104, -23, 104),
CABAC_ENTRY(8, -6, 53, -6, 53, -6, 53, -6, 53),
CABAC_ENTRY(9, -1, 54, -1, 54, -1, 54, -1, 54),
CABAC_ENTRY(10, 7, 51, 7, 51, 7, 51, 7, 51),
/* Table 9-13 – Values of variables m and n for ctxIdx from 11 to 23 */
CABAC_ENTRY(11, 23, 33, 22, 25, 29, 16, 0, 0),
CABAC_ENTRY(12, 23, 2, 34, 0, 25, 0, 0, 0),
CABAC_ENTRY(13, 21, 0, 16, 0, 14, 0, 0, 0),
CABAC_ENTRY(14, 1, 9, -2, 9, -10, 51, 0, 0),
CABAC_ENTRY(15, 0, 49, 4, 41, -3, 62, 0, 0),
CABAC_ENTRY(16, -37, 118, -29, 118, -27, 99, 0, 0),
CABAC_ENTRY(17, 5, 57, 2, 65, 26, 16, 0, 0),
CABAC_ENTRY(18, -13, 78, -6, 71, -4, 85, 0, 0),
CABAC_ENTRY(19, -11, 65, -13, 79, -24, 102, 0, 0),
CABAC_ENTRY(20, 1, 62, 5, 52, 5, 57, 0, 0),
CABAC_ENTRY(21, 12, 49, 9, 50, 6, 57, 0, 0),
CABAC_ENTRY(22, -4, 73, -3, 70, -17, 73, 0, 0),
CABAC_ENTRY(23, 17, 50, 10, 54, 14, 57, 0, 0),
/* Table 9-14 – Values of variables m and n for ctxIdx from 24 to 39 */
CABAC_ENTRY(24, 18, 64, 26, 34, 20, 40, 0, 0),
CABAC_ENTRY(25, 9, 43, 19, 22, 20, 10, 0, 0),
CABAC_ENTRY(26, 29, 0, 40, 0, 29, 0, 0, 0),
CABAC_ENTRY(27, 26, 67, 57, 2, 54, 0, 0, 0),
CABAC_ENTRY(28, 16, 90, 41, 36, 37, 42, 0, 0),
CABAC_ENTRY(29, 9, 104, 26, 69, 12, 97, 0, 0),
CABAC_ENTRY(30, -46, 127, -45, 127, -32, 127, 0, 0),
CABAC_ENTRY(31, -20, 104, -15, 101, -22, 117, 0, 0),
CABAC_ENTRY(32, 1, 67, -4, 76, -2, 74, 0, 0),
CABAC_ENTRY(33, -13, 78, -6, 71, -4, 85, 0, 0),
CABAC_ENTRY(34, -11, 65, -13, 79, -24, 102, 0, 0),
CABAC_ENTRY(35, 1, 62, 5, 52, 5, 57, 0, 0),
CABAC_ENTRY(36, -6, 86, 6, 69, -6, 93, 0, 0),
CABAC_ENTRY(37, -17, 95, -13, 90, -14, 88, 0, 0),
CABAC_ENTRY(38, -6, 61, 0, 52, -6, 44, 0, 0),
CABAC_ENTRY(39, 9, 45, 8, 43, 4, 55, 0, 0),
/* Table 9-15 – Values of variables m and n for ctxIdx from 40 to 53 */
CABAC_ENTRY(40, -3, 69, -2, 69, -11, 89, 0, 0),
CABAC_ENTRY(41, -6, 81, -5, 82, -15, 103, 0, 0),
CABAC_ENTRY(42, -11, 96, -10, 96, -21, 116, 0, 0),
CABAC_ENTRY(43, 6, 55, 2, 59, 19, 57, 0, 0),
CABAC_ENTRY(44, 7, 67, 2, 75, 20, 58, 0, 0),
CABAC_ENTRY(45, -5, 86, -3, 87, 4, 84, 0, 0),
CABAC_ENTRY(46, 2, 88, -3, 100, 6, 96, 0, 0),
CABAC_ENTRY(47, 0, 58, 1, 56, 1, 63, 0, 0),
CABAC_ENTRY(48, -3, 76, -3, 74, -5, 85, 0, 0),
CABAC_ENTRY(49, -10, 94, -6, 85, -13, 106, 0, 0),
CABAC_ENTRY(50, 5, 54, 0, 59, 5, 63, 0, 0),
CABAC_ENTRY(51, 4, 69, -3, 81, 6, 75, 0, 0),
CABAC_ENTRY(52, -3, 81, -7, 86, -3, 90, 0, 0),
CABAC_ENTRY(53, 0, 88, -5, 95, -1, 101, 0, 0),
/* Table 9-16 – Values of variables m and n for ctxIdx from 54 to 59 */
CABAC_ENTRY(54, -7, 67, -1, 66, 3, 55, 0, 0),
CABAC_ENTRY(55, -5, 74, -1, 77, -4, 79, 0, 0),
CABAC_ENTRY(56, -4, 74, 1, 70, -2, 75, 0, 0),
CABAC_ENTRY(57, -5, 80, -2, 86, -12, 97, 0, 0),
CABAC_ENTRY(58, -7, 72, -5, 72, -7, 50, 0, 0),
CABAC_ENTRY(59, 1, 58, 0, 61, 1, 60, 0, 0),
/* Table 9-17 – Values of variables m and n for ctxIdx from 60 to 69 */
CABAC_ENTRY(60, 0, 41, 0, 41, 0, 41, 0, 41),
CABAC_ENTRY(61, 0, 63, 0, 63, 0, 63, 0, 63),
CABAC_ENTRY(62, 0, 63, 0, 63, 0, 63, 0, 63),
CABAC_ENTRY(63, 0, 63, 0, 63, 0, 63, 0, 63),
CABAC_ENTRY(64, -9, 83, -9, 83, -9, 83, -9, 83),
CABAC_ENTRY(65, 4, 86, 4, 86, 4, 86, 4, 86),
CABAC_ENTRY(66, 0, 97, 0, 97, 0, 97, 0, 97),
CABAC_ENTRY(67, -7, 72, -7, 72, -7, 72, -7, 72),
CABAC_ENTRY(68, 13, 41, 13, 41, 13, 41, 13, 41),
CABAC_ENTRY(69, 3, 62, 3, 62, 3, 62, 3, 62),
/* Table 9-18 – Values of variables m and n for ctxIdx from 70 to 104 */
CABAC_ENTRY(70, 0, 45, 13, 15, 7, 34, 0, 11),
CABAC_ENTRY(71, -4, 78, 7, 51, -9, 88, 1, 55),
CABAC_ENTRY(72, -3, 96, 2, 80, -20, 127, 0, 69),
CABAC_ENTRY(73, -27, 126, -39, 127, -36, 127, -17, 127),
CABAC_ENTRY(74, -28, 98, -18, 91, -17, 91, -13, 102),
CABAC_ENTRY(75, -25, 101, -17, 96, -14, 95, 0, 82),
CABAC_ENTRY(76, -23, 67, -26, 81, -25, 84, -7, 74),
CABAC_ENTRY(77, -28, 82, -35, 98, -25, 86, -21, 107),
CABAC_ENTRY(78, -20, 94, -24, 102, -12, 89, -27, 127),
CABAC_ENTRY(79, -16, 83, -23, 97, -17, 91, -31, 127),
CABAC_ENTRY(80, -22, 110, -27, 119, -31, 127, -24, 127),
CABAC_ENTRY(81, -21, 91, -24, 99, -14, 76, -18, 95),
CABAC_ENTRY(82, -18, 102, -21, 110, -18, 103, -27, 127),
CABAC_ENTRY(83, -13, 93, -18, 102, -13, 90, -21, 114),
CABAC_ENTRY(84, -29, 127, -36, 127, -37, 127, -30, 127),
CABAC_ENTRY(85, -7, 92, 0, 80, 11, 80, -17, 123),
CABAC_ENTRY(86, -5, 89, -5, 89, 5, 76, -12, 115),
CABAC_ENTRY(87, -7, 96, -7, 94, 2, 84, -16, 122),
CABAC_ENTRY(88, -13, 108, -4, 92, 5, 78, -11, 115),
CABAC_ENTRY(89, -3, 46, 0, 39, -6, 55, -12, 63),
CABAC_ENTRY(90, -1, 65, 0, 65, 4, 61, -2, 68),
CABAC_ENTRY(91, -1, 57, -15, 84, -14, 83, -15, 84),
CABAC_ENTRY(92, -9, 93, -35, 127, -37, 127, -13, 104),
CABAC_ENTRY(93, -3, 74, -2, 73, -5, 79, -3, 70),
CABAC_ENTRY(94, -9, 92, -12, 104, -11, 104, -8, 93),
CABAC_ENTRY(95, -8, 87, -9, 91, -11, 91, -10, 90),
CABAC_ENTRY(96, -23, 126, -31, 127, -30, 127, -30, 127),
CABAC_ENTRY(97, 5, 54, 3, 55, 0, 65, -1, 74),
CABAC_ENTRY(98, 6, 60, 7, 56, -2, 79, -6, 97),
CABAC_ENTRY(99, 6, 59, 7, 55, 0, 72, -7, 91),
CABAC_ENTRY(100, 6, 69, 8, 61, -4, 92, -20, 127),
CABAC_ENTRY(101, -1, 48, -3, 53, -6, 56, -4, 56),
CABAC_ENTRY(102, 0, 68, 0, 68, 3, 68, -5, 82),
CABAC_ENTRY(103, -4, 69, -7, 74, -8, 71, -7, 76),
CABAC_ENTRY(104, -8, 88, -9, 88, -13, 98, -22, 125),
/* Table 9-19 – Values of variables m and n for ctxIdx from 105 to 165 */
CABAC_ENTRY(105, -2, 85, -13, 103, -4, 86, -7, 93),
CABAC_ENTRY(106, -6, 78, -13, 91, -12, 88, -11, 87),
CABAC_ENTRY(107, -1, 75, -9, 89, -5, 82, -3, 77),
CABAC_ENTRY(108, -7, 77, -14, 92, -3, 72, -5, 71),
CABAC_ENTRY(109, 2, 54, -8, 76, -4, 67, -4, 63),
CABAC_ENTRY(110, 5, 50, -12, 87, -8, 72, -4, 68),
CABAC_ENTRY(111, -3, 68, -23, 110, -16, 89, -12, 84),
CABAC_ENTRY(112, 1, 50, -24, 105, -9, 69, -7, 62),
CABAC_ENTRY(113, 6, 42, -10, 78, -1, 59, -7, 65),
CABAC_ENTRY(114, -4, 81, -20, 112, 5, 66, 8, 61),
CABAC_ENTRY(115, 1, 63, -17, 99, 4, 57, 5, 56),
CABAC_ENTRY(116, -4, 70, -78, 127, -4, 71, -2, 66),
CABAC_ENTRY(117, 0, 67, -70, 127, -2, 71, 1, 64),
CABAC_ENTRY(118, 2, 57, -50, 127, 2, 58, 0, 61),
CABAC_ENTRY(119, -2, 76, -46, 127, -1, 74, -2, 78),
CABAC_ENTRY(120, 11, 35, -4, 66, -4, 44, 1, 50),
CABAC_ENTRY(121, 4, 64, -5, 78, -1, 69, 7, 52),
CABAC_ENTRY(122, 1, 61, -4, 71, 0, 62, 10, 35),
CABAC_ENTRY(123, 11, 35, -8, 72, -7, 51, 0, 44),
CABAC_ENTRY(124, 18, 25, 2, 59, -4, 47, 11, 38),
CABAC_ENTRY(125, 12, 24, -1, 55, -6, 42, 1, 45),
CABAC_ENTRY(126, 13, 29, -7, 70, -3, 41, 0, 46),
CABAC_ENTRY(127, 13, 36, -6, 75, -6, 53, 5, 44),
CABAC_ENTRY(128, -10, 93, -8, 89, 8, 76, 31, 17),
CABAC_ENTRY(129, -7, 73, -34, 119, -9, 78, 1, 51),
CABAC_ENTRY(130, -2, 73, -3, 75, -11, 83, 7, 50),
CABAC_ENTRY(131, 13, 46, 32, 20, 9, 52, 28, 19),
CABAC_ENTRY(132, 9, 49, 30, 22, 0, 67, 16, 33),
CABAC_ENTRY(133, -7, 100, -44, 127, -5, 90, 14, 62),
CABAC_ENTRY(134, 9, 53, 0, 54, 1, 67, -13, 108),
CABAC_ENTRY(135, 2, 53, -5, 61, -15, 72, -15, 100),
CABAC_ENTRY(136, 5, 53, 0, 58, -5, 75, -13, 101),
CABAC_ENTRY(137, -2, 61, -1, 60, -8, 80, -13, 91),
CABAC_ENTRY(138, 0, 56, -3, 61, -21, 83, -12, 94),
CABAC_ENTRY(139, 0, 56, -8, 67, -21, 64, -10, 88),
CABAC_ENTRY(140, -13, 63, -25, 84, -13, 31, -16, 84),
CABAC_ENTRY(141, -5, 60, -14, 74, -25, 64, -10, 86),
CABAC_ENTRY(142, -1, 62, -5, 65, -29, 94, -7, 83),
CABAC_ENTRY(143, 4, 57, 5, 52, 9, 75, -13, 87),
CABAC_ENTRY(144, -6, 69, 2, 57, 17, 63, -19, 94),
CABAC_ENTRY(145, 4, 57, 0, 61, -8, 74, 1, 70),
CABAC_ENTRY(146, 14, 39, -9, 69, -5, 35, 0, 72),
CABAC_ENTRY(147, 4, 51, -11, 70, -2, 27, -5, 74),
CABAC_ENTRY(148, 13, 68, 18, 55, 13, 91, 18, 59),
CABAC_ENTRY(149, 3, 64, -4, 71, 3, 65, -8, 102),
CABAC_ENTRY(150, 1, 61, 0, 58, -7, 69, -15, 100),
CABAC_ENTRY(151, 9, 63, 7, 61, 8, 77, 0, 95),
CABAC_ENTRY(152, 7, 50, 9, 41, -10, 66, -4, 75),
CABAC_ENTRY(153, 16, 39, 18, 25, 3, 62, 2, 72),
CABAC_ENTRY(154, 5, 44, 9, 32, -3, 68, -11, 75),
CABAC_ENTRY(155, 4, 52, 5, 43, -20, 81, -3, 71),
CABAC_ENTRY(156, 11, 48, 9, 47, 0, 30, 15, 46),
CABAC_ENTRY(157, -5, 60, 0, 44, 1, 7, -13, 69),
CABAC_ENTRY(158, -1, 59, 0, 51, -3, 23, 0, 62),
CABAC_ENTRY(159, 0, 59, 2, 46, -21, 74, 0, 65),
CABAC_ENTRY(160, 22, 33, 19, 38, 16, 66, 21, 37),
CABAC_ENTRY(161, 5, 44, -4, 66, -23, 124, -15, 72),
CABAC_ENTRY(162, 14, 43, 15, 38, 17, 37, 9, 57),
CABAC_ENTRY(163, -1, 78, 12, 42, 44, -18, 16, 54),
CABAC_ENTRY(164, 0, 60, 9, 34, 50, -34, 0, 62),
CABAC_ENTRY(165, 9, 69, 0, 89, -22, 127, 12, 72),
/* Table 9-20 – Values of variables m and n for ctxIdx from 166 to 226 */
CABAC_ENTRY(166, 11, 28, 4, 45, 4, 39, 24, 0),
CABAC_ENTRY(167, 2, 40, 10, 28, 0, 42, 15, 9),
CABAC_ENTRY(168, 3, 44, 10, 31, 7, 34, 8, 25),
CABAC_ENTRY(169, 0, 49, 33, -11, 11, 29, 13, 18),
CABAC_ENTRY(170, 0, 46, 52, -43, 8, 31, 15, 9),
CABAC_ENTRY(171, 2, 44, 18, 15, 6, 37, 13, 19),
CABAC_ENTRY(172, 2, 51, 28, 0, 7, 42, 10, 37),
CABAC_ENTRY(173, 0, 47, 35, -22, 3, 40, 12, 18),
CABAC_ENTRY(174, 4, 39, 38, -25, 8, 33, 6, 29),
CABAC_ENTRY(175, 2, 62, 34, 0, 13, 43, 20, 33),
CABAC_ENTRY(176, 6, 46, 39, -18, 13, 36, 15, 30),
CABAC_ENTRY(177, 0, 54, 32, -12, 4, 47, 4, 45),
CABAC_ENTRY(178, 3, 54, 102, -94, 3, 55, 1, 58),
CABAC_ENTRY(179, 2, 58, 0, 0, 2, 58, 0, 62),
CABAC_ENTRY(180, 4, 63, 56, -15, 6, 60, 7, 61),
CABAC_ENTRY(181, 6, 51, 33, -4, 8, 44, 12, 38),
CABAC_ENTRY(182, 6, 57, 29, 10, 11, 44, 11, 45),
CABAC_ENTRY(183, 7, 53, 37, -5, 14, 42, 15, 39),
CABAC_ENTRY(184, 6, 52, 51, -29, 7, 48, 11, 42),
CABAC_ENTRY(185, 6, 55, 39, -9, 4, 56, 13, 44),
CABAC_ENTRY(186, 11, 45, 52, -34, 4, 52, 16, 45),
CABAC_ENTRY(187, 14, 36, 69, -58, 13, 37, 12, 41),
CABAC_ENTRY(188, 8, 53, 67, -63, 9, 49, 10, 49),
CABAC_ENTRY(189, -1, 82, 44, -5, 19, 58, 30, 34),
CABAC_ENTRY(190, 7, 55, 32, 7, 10, 48, 18, 42),
CABAC_ENTRY(191, -3, 78, 55, -29, 12, 45, 10, 55),
CABAC_ENTRY(192, 15, 46, 32, 1, 0, 69, 17, 51),
CABAC_ENTRY(193, 22, 31, 0, 0, 20, 33, 17, 46),
CABAC_ENTRY(194, -1, 84, 27, 36, 8, 63, 0, 89),
CABAC_ENTRY(195, 25, 7, 33, -25, 35, -18, 26, -19),
CABAC_ENTRY(196, 30, -7, 34, -30, 33, -25, 22, -17),
CABAC_ENTRY(197, 28, 3, 36, -28, 28, -3, 26, -17),
CABAC_ENTRY(198, 28, 4, 38, -28, 24, 10, 30, -25),
CABAC_ENTRY(199, 32, 0, 38, -27, 27, 0, 28, -20),
CABAC_ENTRY(200, 34, -1, 34, -18, 34, -14, 33, -23),
CABAC_ENTRY(201, 30, 6, 35, -16, 52, -44, 37, -27),
CABAC_ENTRY(202, 30, 6, 34, -14, 39, -24, 33, -23),
CABAC_ENTRY(203, 32, 9, 32, -8, 19, 17, 40, -28),
CABAC_ENTRY(204, 31, 19, 37, -6, 31, 25, 38, -17),
CABAC_ENTRY(205, 26, 27, 35, 0, 36, 29, 33, -11),
CABAC_ENTRY(206, 26, 30, 30, 10, 24, 33, 40, -15),
CABAC_ENTRY(207, 37, 20, 28, 18, 34, 15, 41, -6),
CABAC_ENTRY(208, 28, 34, 26, 25, 30, 20, 38, 1),
CABAC_ENTRY(209, 17, 70, 29, 41, 22, 73, 41, 17),
CABAC_ENTRY(210, 1, 67, 0, 75, 20, 34, 30, -6),
CABAC_ENTRY(211, 5, 59, 2, 72, 19, 31, 27, 3),
CABAC_ENTRY(212, 9, 67, 8, 77, 27, 44, 26, 22),
CABAC_ENTRY(213, 16, 30, 14, 35, 19, 16, 37, -16),
CABAC_ENTRY(214, 18, 32, 18, 31, 15, 36, 35, -4),
CABAC_ENTRY(215, 18, 35, 17, 35, 15, 36, 38, -8),
CABAC_ENTRY(216, 22, 29, 21, 30, 21, 28, 38, -3),
CABAC_ENTRY(217, 24, 31, 17, 45, 25, 21, 37, 3),
CABAC_ENTRY(218, 23, 38, 20, 42, 30, 20, 38, 5),
CABAC_ENTRY(219, 18, 43, 18, 45, 31, 12, 42, 0),
CABAC_ENTRY(220, 20, 41, 27, 26, 27, 16, 35, 16),
CABAC_ENTRY(221, 11, 63, 16, 54, 24, 42, 39, 22),
CABAC_ENTRY(222, 9, 59, 7, 66, 0, 93, 14, 48),
CABAC_ENTRY(223, 9, 64, 16, 56, 14, 56, 27, 37),
CABAC_ENTRY(224, -1, 94, 11, 73, 15, 57, 21, 60),
CABAC_ENTRY(225, -2, 89, 10, 67, 26, 38, 12, 68),
CABAC_ENTRY(226, -9, 108, -10, 116, -24, 127, 2, 97),
/* Table 9-21 – Values of variables m and n for ctxIdx from 227 to 275 */
CABAC_ENTRY(227, -6, 76, -23, 112, -24, 115, -3, 71),
CABAC_ENTRY(228, -2, 44, -15, 71, -22, 82, -6, 42),
CABAC_ENTRY(229, 0, 45, -7, 61, -9, 62, -5, 50),
CABAC_ENTRY(230, 0, 52, 0, 53, 0, 53, -3, 54),
CABAC_ENTRY(231, -3, 64, -5, 66, 0, 59, -2, 62),
CABAC_ENTRY(232, -2, 59, -11, 77, -14, 85, 0, 58),
CABAC_ENTRY(233, -4, 70, -9, 80, -13, 89, 1, 63),
CABAC_ENTRY(234, -4, 75, -9, 84, -13, 94, -2, 72),
CABAC_ENTRY(235, -8, 82, -10, 87, -11, 92, -1, 74),
CABAC_ENTRY(236, -17, 102, -34, 127, -29, 127, -9, 91),
CABAC_ENTRY(237, -9, 77, -21, 101, -21, 100, -5, 67),
CABAC_ENTRY(238, 3, 24, -3, 39, -14, 57, -5, 27),
CABAC_ENTRY(239, 0, 42, -5, 53, -12, 67, -3, 39),
CABAC_ENTRY(240, 0, 48, -7, 61, -11, 71, -2, 44),
CABAC_ENTRY(241, 0, 55, -11, 75, -10, 77, 0, 46),
CABAC_ENTRY(242, -6, 59, -15, 77, -21, 85, -16, 64),
CABAC_ENTRY(243, -7, 71, -17, 91, -16, 88, -8, 68),
CABAC_ENTRY(244, -12, 83, -25, 107, -23, 104, -10, 78),
CABAC_ENTRY(245, -11, 87, -25, 111, -15, 98, -6, 77),
CABAC_ENTRY(246, -30, 119, -28, 122, -37, 127, -10, 86),
CABAC_ENTRY(247, 1, 58, -11, 76, -10, 82, -12, 92),
CABAC_ENTRY(248, -3, 29, -10, 44, -8, 48, -15, 55),
CABAC_ENTRY(249, -1, 36, -10, 52, -8, 61, -10, 60),
CABAC_ENTRY(250, 1, 38, -10, 57, -8, 66, -6, 62),
CABAC_ENTRY(251, 2, 43, -9, 58, -7, 70, -4, 65),
CABAC_ENTRY(252, -6, 55, -16, 72, -14, 75, -12, 73),
CABAC_ENTRY(253, 0, 58, -7, 69, -10, 79, -8, 76),
CABAC_ENTRY(254, 0, 64, -4, 69, -9, 83, -7, 80),
CABAC_ENTRY(255, -3, 74, -5, 74, -12, 92, -9, 88),
CABAC_ENTRY(256, -10, 90, -9, 86, -18, 108, -17, 110),
CABAC_ENTRY(257, 0, 70, 2, 66, -4, 79, -11, 97),
CABAC_ENTRY(258, -4, 29, -9, 34, -22, 69, -20, 84),
CABAC_ENTRY(259, 5, 31, 1, 32, -16, 75, -11, 79),
CABAC_ENTRY(260, 7, 42, 11, 31, -2, 58, -6, 73),
CABAC_ENTRY(261, 1, 59, 5, 52, 1, 58, -4, 74),
CABAC_ENTRY(262, -2, 58, -2, 55, -13, 78, -13, 86),
CABAC_ENTRY(263, -3, 72, -2, 67, -9, 83, -13, 96),
CABAC_ENTRY(264, -3, 81, 0, 73, -4, 81, -11, 97),
CABAC_ENTRY(265, -11, 97, -8, 89, -13, 99, -19, 117),
CABAC_ENTRY(266, 0, 58, 3, 52, -13, 81, -8, 78),
CABAC_ENTRY(267, 8, 5, 7, 4, -6, 38, -5, 33),
CABAC_ENTRY(268, 10, 14, 10, 8, -13, 62, -4, 48),
CABAC_ENTRY(269, 14, 18, 17, 8, -6, 58, -2, 53),
CABAC_ENTRY(270, 13, 27, 16, 19, -2, 59, -3, 62),
CABAC_ENTRY(271, 2, 40, 3, 37, -16, 73, -13, 71),
CABAC_ENTRY(272, 0, 58, -1, 61, -10, 76, -10, 79),
CABAC_ENTRY(273, -3, 70, -5, 73, -13, 86, -12, 86),
CABAC_ENTRY(274, -6, 79, -1, 70, -9, 83, -13, 90),
CABAC_ENTRY(275, -8, 85, -4, 78, -10, 87, -14, 97),
/* Table 9-22 – Values of variables m and n for ctxIdx from 277 to 337 */
CABAC_ENTRY(277, -13, 106, -21, 126, -22, 127, -6, 93),
CABAC_ENTRY(278, -16, 106, -23, 124, -25, 127, -6, 84),
CABAC_ENTRY(279, -10, 87, -20, 110, -25, 120, -8, 79),
CABAC_ENTRY(280, -21, 114, -26, 126, -27, 127, 0, 66),
CABAC_ENTRY(281, -18, 110, -25, 124, -19, 114, -1, 71),
CABAC_ENTRY(282, -14, 98, -17, 105, -23, 117, 0, 62),
CABAC_ENTRY(283, -22, 110, -27, 121, -25, 118, -2, 60),
CABAC_ENTRY(284, -21, 106, -27, 117, -26, 117, -2, 59),
CABAC_ENTRY(285, -18, 103, -17, 102, -24, 113, -5, 75),
CABAC_ENTRY(286, -21, 107, -26, 117, -28, 118, -3, 62),
CABAC_ENTRY(287, -23, 108, -27, 116, -31, 120, -4, 58),
CABAC_ENTRY(288, -26, 112, -33, 122, -37, 124, -9, 66),
CABAC_ENTRY(289, -10, 96, -10, 95, -10, 94, -1, 79),
CABAC_ENTRY(290, -12, 95, -14, 100, -15, 102, 0, 71),
CABAC_ENTRY(291, -5, 91, -8, 95, -10, 99, 3, 68),
CABAC_ENTRY(292, -9, 93, -17, 111, -13, 106, 10, 44),
CABAC_ENTRY(293, -22, 94, -28, 114, -50, 127, -7, 62),
CABAC_ENTRY(294, -5, 86, -6, 89, -5, 92, 15, 36),
CABAC_ENTRY(295, 9, 67, -2, 80, 17, 57, 14, 40),
CABAC_ENTRY(296, -4, 80, -4, 82, -5, 86, 16, 27),
CABAC_ENTRY(297, -10, 85, -9, 85, -13, 94, 12, 29),
CABAC_ENTRY(298, -1, 70, -8, 81, -12, 91, 1, 44),
CABAC_ENTRY(299, 7, 60, -1, 72, -2, 77, 20, 36),
CABAC_ENTRY(300, 9, 58, 5, 64, 0, 71, 18, 32),
CABAC_ENTRY(301, 5, 61, 1, 67, -1, 73, 5, 42),
CABAC_ENTRY(302, 12, 50, 9, 56, 4, 64, 1, 48),
CABAC_ENTRY(303, 15, 50, 0, 69, -7, 81, 10, 62),
CABAC_ENTRY(304, 18, 49, 1, 69, 5, 64, 17, 46),
CABAC_ENTRY(305, 17, 54, 7, 69, 15, 57, 9, 64),
CABAC_ENTRY(306, 10, 41, -7, 69, 1, 67, -12, 104),
CABAC_ENTRY(307, 7, 46, -6, 67, 0, 68, -11, 97),
CABAC_ENTRY(308, -1, 51, -16, 77, -10, 67, -16, 96),
CABAC_ENTRY(309, 7, 49, -2, 64, 1, 68, -7, 88),
CABAC_ENTRY(310, 8, 52, 2, 61, 0, 77, -8, 85),
CABAC_ENTRY(311, 9, 41, -6, 67, 2, 64, -7, 85),
CABAC_ENTRY(312, 6, 47, -3, 64, 0, 68, -9, 85),
CABAC_ENTRY(313, 2, 55, 2, 57, -5, 78, -13, 88),
CABAC_ENTRY(314, 13, 41, -3, 65, 7, 55, 4, 66),
CABAC_ENTRY(315, 10, 44, -3, 66, 5, 59, -3, 77),
CABAC_ENTRY(316, 6, 50, 0, 62, 2, 65, -3, 76),
CABAC_ENTRY(317, 5, 53, 9, 51, 14, 54, -6, 76),
CABAC_ENTRY(318, 13, 49, -1, 66, 15, 44, 10, 58),
CABAC_ENTRY(319, 4, 63, -2, 71, 5, 60, -1, 76),
CABAC_ENTRY(320, 6, 64, -2, 75, 2, 70, -1, 83),
CABAC_ENTRY(321, -2, 69, -1, 70, -2, 76, -7, 99),
CABAC_ENTRY(322, -2, 59, -9, 72, -18, 86, -14, 95),
CABAC_ENTRY(323, 6, 70, 14, 60, 12, 70, 2, 95),
CABAC_ENTRY(324, 10, 44, 16, 37, 5, 64, 0, 76),
CABAC_ENTRY(325, 9, 31, 0, 47, -12, 70, -5, 74),
CABAC_ENTRY(326, 12, 43, 18, 35, 11, 55, 0, 70),
CABAC_ENTRY(327, 3, 53, 11, 37, 5, 56, -11, 75),
CABAC_ENTRY(328, 14, 34, 12, 41, 0, 69, 1, 68),
CABAC_ENTRY(329, 10, 38, 10, 41, 2, 65, 0, 65),
CABAC_ENTRY(330, -3, 52, 2, 48, -6, 74, -14, 73),
CABAC_ENTRY(331, 13, 40, 12, 41, 5, 54, 3, 62),
CABAC_ENTRY(332, 17, 32, 13, 41, 7, 54, 4, 62),
CABAC_ENTRY(333, 7, 44, 0, 59, -6, 76, -1, 68),
CABAC_ENTRY(334, 7, 38, 3, 50, -11, 82, -13, 75),
CABAC_ENTRY(335, 13, 50, 19, 40, -2, 77, 11, 55),
CABAC_ENTRY(336, 10, 57, 3, 66, -2, 77, 5, 64),
CABAC_ENTRY(337, 26, 43, 18, 50, 25, 42, 12, 70),
/* Table 9-23 – Values of variables m and n for ctxIdx from 338 to 398 */
CABAC_ENTRY(338, 14, 11, 19, -6, 17, -13, 15, 6),
CABAC_ENTRY(339, 11, 14, 18, -6, 16, -9, 6, 19),
CABAC_ENTRY(340, 9, 11, 14, 0, 17, -12, 7, 16),
CABAC_ENTRY(341, 18, 11, 26, -12, 27, -21, 12, 14),
CABAC_ENTRY(342, 21, 9, 31, -16, 37, -30, 18, 13),
CABAC_ENTRY(343, 23, -2, 33, -25, 41, -40, 13, 11),
CABAC_ENTRY(344, 32, -15, 33, -22, 42, -41, 13, 15),
CABAC_ENTRY(345, 32, -15, 37, -28, 48, -47, 15, 16),
CABAC_ENTRY(346, 34, -21, 39, -30, 39, -32, 12, 23),
CABAC_ENTRY(347, 39, -23, 42, -30, 46, -40, 13, 23),
CABAC_ENTRY(348, 42, -33, 47, -42, 52, -51, 15, 20),
CABAC_ENTRY(349, 41, -31, 45, -36, 46, -41, 14, 26),
CABAC_ENTRY(350, 46, -28, 49, -34, 52, -39, 14, 44),
CABAC_ENTRY(351, 38, -12, 41, -17, 43, -19, 17, 40),
CABAC_ENTRY(352, 21, 29, 32, 9, 32, 11, 17, 47),
CABAC_ENTRY(353, 45, -24, 69, -71, 61, -55, 24, 17),
CABAC_ENTRY(354, 53, -45, 63, -63, 56, -46, 21, 21),
CABAC_ENTRY(355, 48, -26, 66, -64, 62, -50, 25, 22),
CABAC_ENTRY(356, 65, -43, 77, -74, 81, -67, 31, 27),
CABAC_ENTRY(357, 43, -19, 54, -39, 45, -20, 22, 29),
CABAC_ENTRY(358, 39, -10, 52, -35, 35, -2, 19, 35),
CABAC_ENTRY(359, 30, 9, 41, -10, 28, 15, 14, 50),
CABAC_ENTRY(360, 18, 26, 36, 0, 34, 1, 10, 57),
CABAC_ENTRY(361, 20, 27, 40, -1, 39, 1, 7, 63),
CABAC_ENTRY(362, 0, 57, 30, 14, 30, 17, -2, 77),
CABAC_ENTRY(363, -14, 82, 28, 26, 20, 38, -4, 82),
CABAC_ENTRY(364, -5, 75, 23, 37, 18, 45, -3, 94),
CABAC_ENTRY(365, -19, 97, 12, 55, 15, 54, 9, 69),
CABAC_ENTRY(366, -35, 125, 11, 65, 0, 79, -12, 109),
CABAC_ENTRY(367, 27, 0, 37, -33, 36, -16, 36, -35),
CABAC_ENTRY(368, 28, 0, 39, -36, 37, -14, 36, -34),
CABAC_ENTRY(369, 31, -4, 40, -37, 37, -17, 32, -26),
CABAC_ENTRY(370, 27, 6, 38, -30, 32, 1, 37, -30),
CABAC_ENTRY(371, 34, 8, 46, -33, 34, 15, 44, -32),
CABAC_ENTRY(372, 30, 10, 42, -30, 29, 15, 34, -18),
CABAC_ENTRY(373, 24, 22, 40, -24, 24, 25, 34, -15),
CABAC_ENTRY(374, 33, 19, 49, -29, 34, 22, 40, -15),
CABAC_ENTRY(375, 22, 32, 38, -12, 31, 16, 33, -7),
CABAC_ENTRY(376, 26, 31, 40, -10, 35, 18, 35, -5),
CABAC_ENTRY(377, 21, 41, 38, -3, 31, 28, 33, 0),
CABAC_ENTRY(378, 26, 44, 46, -5, 33, 41, 38, 2),
CABAC_ENTRY(379, 23, 47, 31, 20, 36, 28, 33, 13),
CABAC_ENTRY(380, 16, 65, 29, 30, 27, 47, 23, 35),
CABAC_ENTRY(381, 14, 71, 25, 44, 21, 62, 13, 58),
CABAC_ENTRY(382, 8, 60, 12, 48, 18, 31, 29, -3),
CABAC_ENTRY(383, 6, 63, 11, 49, 19, 26, 26, 0),
CABAC_ENTRY(384, 17, 65, 26, 45, 36, 24, 22, 30),
CABAC_ENTRY(385, 21, 24, 22, 22, 24, 23, 31, -7),
CABAC_ENTRY(386, 23, 20, 23, 22, 27, 16, 35, -15),
CABAC_ENTRY(387, 26, 23, 27, 21, 24, 30, 34, -3),
CABAC_ENTRY(388, 27, 32, 33, 20, 31, 29, 34, 3),
CABAC_ENTRY(389, 28, 23, 26, 28, 22, 41, 36, -1),
CABAC_ENTRY(390, 28, 24, 30, 24, 22, 42, 34, 5),
CABAC_ENTRY(391, 23, 40, 27, 34, 16, 60, 32, 11),
CABAC_ENTRY(392, 24, 32, 18, 42, 15, 52, 35, 5),
CABAC_ENTRY(393, 28, 29, 25, 39, 14, 60, 34, 12),
CABAC_ENTRY(394, 23, 42, 18, 50, 3, 78, 39, 11),
CABAC_ENTRY(395, 19, 57, 12, 70, -16, 123, 30, 29),
CABAC_ENTRY(396, 22, 53, 21, 54, 21, 53, 34, 26),
CABAC_ENTRY(397, 22, 61, 14, 71, 22, 56, 29, 39),
CABAC_ENTRY(398, 11, 86, 11, 83, 25, 61, 19, 66),
/* Values of variables m and n for ctxIdx from 399 to 463 (not documented) */
CABAC_ENTRY(399, 12, 40, 25, 32, 21, 33, 31, 21),
CABAC_ENTRY(400, 11, 51, 21, 49, 19, 50, 31, 31),
CABAC_ENTRY(401, 14, 59, 21, 54, 17, 61, 25, 50),
CABAC_ENTRY(402, -4, 79, -5, 85, -3, 78, -17, 120),
CABAC_ENTRY(403, -7, 71, -6, 81, -8, 74, -20, 112),
CABAC_ENTRY(404, -5, 69, -10, 77, -9, 72, -18, 114),
CABAC_ENTRY(405, -9, 70, -7, 81, -10, 72, -11, 85),
CABAC_ENTRY(406, -8, 66, -17, 80, -18, 75, -15, 92),
CABAC_ENTRY(407, -10, 68, -18, 73, -12, 71, -14, 89),
CABAC_ENTRY(408, -19, 73, -4, 74, -11, 63, -26, 71),
CABAC_ENTRY(409, -12, 69, -10, 83, -5, 70, -15, 81),
CABAC_ENTRY(410, -16, 70, -9, 71, -17, 75, -14, 80),
CABAC_ENTRY(411, -15, 67, -9, 67, -14, 72, 0, 68),
CABAC_ENTRY(412, -20, 62, -1, 61, -16, 67, -14, 70),
CABAC_ENTRY(413, -19, 70, -8, 66, -8, 53, -24, 56),
CABAC_ENTRY(414, -16, 66, -14, 66, -14, 59, -23, 68),
CABAC_ENTRY(415, -22, 65, 0, 59, -9, 52, -24, 50),
CABAC_ENTRY(416, -20, 63, 2, 59, -11, 68, -11, 74),
CABAC_ENTRY(417, 9, -2, 17, -10, 9, -2, 23, -13),
CABAC_ENTRY(418, 26, -9, 32, -13, 30, -10, 26, -13),
CABAC_ENTRY(419, 33, -9, 42, -9, 31, -4, 40, -15),
CABAC_ENTRY(420, 39, -7, 49, -5, 33, -1, 49, -14),
CABAC_ENTRY(421, 41, -2, 53, 0, 33, 7, 44, 3),
CABAC_ENTRY(422, 45, 3, 64, 3, 31, 12, 45, 6),
CABAC_ENTRY(423, 49, 9, 68, 10, 37, 23, 44, 34),
CABAC_ENTRY(424, 45, 27, 66, 27, 31, 38, 33, 54),
CABAC_ENTRY(425, 36, 59, 47, 57, 20, 64, 19, 82),
CABAC_ENTRY(426, -6, 66, -5, 71, -9, 71, -3, 75),
CABAC_ENTRY(427, -7, 35, 0, 24, -7, 37, -1, 23),
CABAC_ENTRY(428, -7, 42, -1, 36, -8, 44, 1, 34),
CABAC_ENTRY(429, -8, 45, -2, 42, -11, 49, 1, 43),
CABAC_ENTRY(430, -5, 48, -2, 52, -10, 56, 0, 54),
CABAC_ENTRY(431, -12, 56, -9, 57, -12, 59, -2, 55),
CABAC_ENTRY(432, -6, 60, -6, 63, -8, 63, 0, 61),
CABAC_ENTRY(433, -5, 62, -4, 65, -9, 67, 1, 64),
CABAC_ENTRY(434, -8, 66, -4, 67, -6, 68, 0, 68),
CABAC_ENTRY(435, -8, 76, -7, 82, -10, 79, -9, 92),
CABAC_ENTRY(436, -5, 85, -3, 81, -3, 78, -14, 106),
CABAC_ENTRY(437, -6, 81, -3, 76, -8, 74, -13, 97),
CABAC_ENTRY(438, -10, 77, -7, 72, -9, 72, -15, 90),
CABAC_ENTRY(439, -7, 81, -6, 78, -10, 72, -12, 90),
CABAC_ENTRY(440, -17, 80, -12, 72, -18, 75, -18, 88),
CABAC_ENTRY(441, -18, 73, -14, 68, -12, 71, -10, 73),
CABAC_ENTRY(442, -4, 74, -3, 70, -11, 63, -9, 79),
CABAC_ENTRY(443, -10, 83, -6, 76, -5, 70, -14, 86),
CABAC_ENTRY(444, -9, 71, -5, 66, -17, 75, -10, 73),
CABAC_ENTRY(445, -9, 67, -5, 62, -14, 72, -10, 70),
CABAC_ENTRY(446, -1, 61, 0, 57, -16, 67, -10, 69),
CABAC_ENTRY(447, -8, 66, -4, 61, -8, 53, -5, 66),
CABAC_ENTRY(448, -14, 66, -9, 60, -14, 59, -9, 64),
CABAC_ENTRY(449, 0, 59, 1, 54, -9, 52, -5, 58),
CABAC_ENTRY(450, 2, 59, 2, 58, -11, 68, 2, 59),
CABAC_ENTRY(451, 21, -13, 17, -10, 9, -2, 21, -10),
CABAC_ENTRY(452, 33, -14, 32, -13, 30, -10, 24, -11),
CABAC_ENTRY(453, 39, -7, 42, -9, 31, -4, 28, -8),
CABAC_ENTRY(454, 46, -2, 49, -5, 33, -1, 28, -1),
CABAC_ENTRY(455, 51, 2, 53, 0, 33, 7, 29, 3),
CABAC_ENTRY(456, 60, 6, 64, 3, 31, 12, 29, 9),
CABAC_ENTRY(457, 61, 17, 68, 10, 37, 23, 35, 20),
CABAC_ENTRY(458, 55, 34, 66, 27, 31, 38, 29, 36),
CABAC_ENTRY(459, 42, 62, 47, 57, 20, 64, 14, 67),
};
static void set_ps_field(u32 *buf, struct rkvdec_ps_field field, u32 value)
{
u8 bit = field.offset % 32, word = field.offset / 32;
u64 mask = GENMASK_ULL(bit + field.len - 1, bit);
u64 val = ((u64)value << bit) & mask;
buf[word] &= ~mask;
buf[word] |= val;
if (bit + field.len > 32) {
buf[word + 1] &= ~(mask >> 32);
buf[word + 1] |= val >> 32;
}
}
static void assemble_hw_pps(struct rkvdec_ctx *ctx,
struct rkvdec_h264_run *run)
{
struct rkvdec_h264_ctx *h264_ctx = ctx->priv;
const struct v4l2_ctrl_h264_sps *sps = run->sps;
const struct v4l2_ctrl_h264_pps *pps = run->pps;
const struct v4l2_ctrl_h264_decode_params *dec_params = run->decode_params;
const struct v4l2_h264_dpb_entry *dpb = dec_params->dpb;
struct rkvdec_h264_priv_tbl *priv_tbl = h264_ctx->priv_tbl.cpu;
struct rkvdec_sps_pps_packet *hw_ps;
dma_addr_t scaling_list_address;
u32 scaling_distance;
u32 i;
/*
* HW read the SPS/PPS information from PPS packet index by PPS id.
* offset from the base can be calculated by PPS_id * 32 (size per PPS
* packet unit). so the driver copy SPS/PPS information to the exact PPS
* packet unit for HW accessing.
*/
hw_ps = &priv_tbl->param_set[pps->pic_parameter_set_id];
memset(hw_ps, 0, sizeof(*hw_ps));
#define WRITE_PPS(value, field) set_ps_field(hw_ps->info, field, value)
/* write sps */
WRITE_PPS(0xf, SEQ_PARAMETER_SET_ID);
WRITE_PPS(0xff, PROFILE_IDC);
WRITE_PPS(1, CONSTRAINT_SET3_FLAG);
WRITE_PPS(sps->chroma_format_idc, CHROMA_FORMAT_IDC);
WRITE_PPS(sps->bit_depth_luma_minus8, BIT_DEPTH_LUMA);
WRITE_PPS(sps->bit_depth_chroma_minus8, BIT_DEPTH_CHROMA);
WRITE_PPS(0, QPPRIME_Y_ZERO_TRANSFORM_BYPASS_FLAG);
WRITE_PPS(sps->log2_max_frame_num_minus4, LOG2_MAX_FRAME_NUM_MINUS4);
WRITE_PPS(sps->max_num_ref_frames, MAX_NUM_REF_FRAMES);
WRITE_PPS(sps->pic_order_cnt_type, PIC_ORDER_CNT_TYPE);
WRITE_PPS(sps->log2_max_pic_order_cnt_lsb_minus4,
LOG2_MAX_PIC_ORDER_CNT_LSB_MINUS4);
WRITE_PPS(!!(sps->flags & V4L2_H264_SPS_FLAG_DELTA_PIC_ORDER_ALWAYS_ZERO),
DELTA_PIC_ORDER_ALWAYS_ZERO_FLAG);
/*
* Use the SPS values since they are already in macroblocks
* dimensions, height can be field height (halved) if
* V4L2_H264_SPS_FLAG_FRAME_MBS_ONLY is not set and also it allows
* decoding smaller images into larger allocation which can be used
* to implementing SVC spatial layer support.
*/
WRITE_PPS(sps->pic_width_in_mbs_minus1 + 1, PIC_WIDTH_IN_MBS);
WRITE_PPS(sps->pic_height_in_map_units_minus1 + 1, PIC_HEIGHT_IN_MBS);
WRITE_PPS(!!(sps->flags & V4L2_H264_SPS_FLAG_FRAME_MBS_ONLY),
FRAME_MBS_ONLY_FLAG);
WRITE_PPS(!!(sps->flags & V4L2_H264_SPS_FLAG_MB_ADAPTIVE_FRAME_FIELD),
MB_ADAPTIVE_FRAME_FIELD_FLAG);
WRITE_PPS(!!(sps->flags & V4L2_H264_SPS_FLAG_DIRECT_8X8_INFERENCE),
DIRECT_8X8_INFERENCE_FLAG);
/* write pps */
WRITE_PPS(0xff, PIC_PARAMETER_SET_ID);
WRITE_PPS(0x1f, PPS_SEQ_PARAMETER_SET_ID);
WRITE_PPS(!!(pps->flags & V4L2_H264_PPS_FLAG_ENTROPY_CODING_MODE),
ENTROPY_CODING_MODE_FLAG);
WRITE_PPS(!!(pps->flags & V4L2_H264_PPS_FLAG_BOTTOM_FIELD_PIC_ORDER_IN_FRAME_PRESENT),
BOTTOM_FIELD_PIC_ORDER_IN_FRAME_PRESENT_FLAG);
WRITE_PPS(pps->num_ref_idx_l0_default_active_minus1,
NUM_REF_IDX_L_DEFAULT_ACTIVE_MINUS1(0));
WRITE_PPS(pps->num_ref_idx_l1_default_active_minus1,
NUM_REF_IDX_L_DEFAULT_ACTIVE_MINUS1(1));
WRITE_PPS(!!(pps->flags & V4L2_H264_PPS_FLAG_WEIGHTED_PRED),
WEIGHTED_PRED_FLAG);
WRITE_PPS(pps->weighted_bipred_idc, WEIGHTED_BIPRED_IDC);
WRITE_PPS(pps->pic_init_qp_minus26, PIC_INIT_QP_MINUS26);
WRITE_PPS(pps->pic_init_qs_minus26, PIC_INIT_QS_MINUS26);
WRITE_PPS(pps->chroma_qp_index_offset, CHROMA_QP_INDEX_OFFSET);
WRITE_PPS(!!(pps->flags & V4L2_H264_PPS_FLAG_DEBLOCKING_FILTER_CONTROL_PRESENT),
DEBLOCKING_FILTER_CONTROL_PRESENT_FLAG);
WRITE_PPS(!!(pps->flags & V4L2_H264_PPS_FLAG_CONSTRAINED_INTRA_PRED),
CONSTRAINED_INTRA_PRED_FLAG);
WRITE_PPS(!!(pps->flags & V4L2_H264_PPS_FLAG_REDUNDANT_PIC_CNT_PRESENT),
REDUNDANT_PIC_CNT_PRESENT);
WRITE_PPS(!!(pps->flags & V4L2_H264_PPS_FLAG_TRANSFORM_8X8_MODE),
TRANSFORM_8X8_MODE_FLAG);
WRITE_PPS(pps->second_chroma_qp_index_offset,
SECOND_CHROMA_QP_INDEX_OFFSET);
WRITE_PPS(!!(pps->flags & V4L2_H264_PPS_FLAG_SCALING_MATRIX_PRESENT),
SCALING_LIST_ENABLE_FLAG);
/* To be on the safe side, program the scaling matrix address */
scaling_distance = offsetof(struct rkvdec_h264_priv_tbl, scaling_list);
scaling_list_address = h264_ctx->priv_tbl.dma + scaling_distance;
WRITE_PPS(scaling_list_address, SCALING_LIST_ADDRESS);
for (i = 0; i < ARRAY_SIZE(dec_params->dpb); i++) {
u32 is_longterm = 0;
if (dpb[i].flags & V4L2_H264_DPB_ENTRY_FLAG_LONG_TERM)
is_longterm = 1;
WRITE_PPS(is_longterm, IS_LONG_TERM(i));
}
}
static void lookup_ref_buf_idx(struct rkvdec_ctx *ctx,
struct rkvdec_h264_run *run)
{
const struct v4l2_ctrl_h264_decode_params *dec_params = run->decode_params;
u32 i;
for (i = 0; i < ARRAY_SIZE(dec_params->dpb); i++) {
struct v4l2_m2m_ctx *m2m_ctx = ctx->fh.m2m_ctx;
const struct v4l2_h264_dpb_entry *dpb = run->decode_params->dpb;
struct vb2_queue *cap_q = &m2m_ctx->cap_q_ctx.q;
struct vb2_buffer *buf = NULL;
if (dpb[i].flags & V4L2_H264_DPB_ENTRY_FLAG_ACTIVE) {
buf = vb2_find_buffer(cap_q, dpb[i].reference_ts);
if (!buf)
pr_debug("No buffer for reference_ts %llu",
dpb[i].reference_ts);
}
run->ref_buf[i] = buf;
}
}
static void assemble_hw_rps(struct rkvdec_ctx *ctx,
struct v4l2_h264_reflist_builder *builder,
struct rkvdec_h264_run *run)
{
const struct v4l2_ctrl_h264_decode_params *dec_params = run->decode_params;
const struct v4l2_h264_dpb_entry *dpb = dec_params->dpb;
struct rkvdec_h264_ctx *h264_ctx = ctx->priv;
struct rkvdec_h264_priv_tbl *priv_tbl = h264_ctx->priv_tbl.cpu;
u32 *hw_rps = priv_tbl->rps;
u32 i, j;
u16 *p = (u16 *)hw_rps;
memset(hw_rps, 0, sizeof(priv_tbl->rps));
/*
* Assign an invalid pic_num if DPB entry at that position is inactive.
* If we assign 0 in that position hardware will treat that as a real
* reference picture with pic_num 0, triggering output picture
* corruption.
*/
for (i = 0; i < ARRAY_SIZE(dec_params->dpb); i++) {
if (!(dpb[i].flags & V4L2_H264_DPB_ENTRY_FLAG_ACTIVE))
continue;
p[i] = builder->refs[i].frame_num;
}
for (j = 0; j < RKVDEC_NUM_REFLIST; j++) {
for (i = 0; i < builder->num_valid; i++) {
struct v4l2_h264_reference *ref;
bool dpb_valid;
bool bottom;
switch (j) {
case 0:
ref = &h264_ctx->reflists.p[i];
break;
case 1:
ref = &h264_ctx->reflists.b0[i];
break;
case 2:
ref = &h264_ctx->reflists.b1[i];
break;
}
if (WARN_ON(ref->index >= ARRAY_SIZE(dec_params->dpb)))
continue;
dpb_valid = run->ref_buf[ref->index] != NULL;
bottom = ref->fields == V4L2_H264_BOTTOM_FIELD_REF;
set_ps_field(hw_rps, DPB_INFO(i, j),
ref->index | dpb_valid << 4);
set_ps_field(hw_rps, BOTTOM_FLAG(i, j), bottom);
}
}
}
static void assemble_hw_scaling_list(struct rkvdec_ctx *ctx,
struct rkvdec_h264_run *run)
{
const struct v4l2_ctrl_h264_scaling_matrix *scaling = run->scaling_matrix;
const struct v4l2_ctrl_h264_pps *pps = run->pps;
struct rkvdec_h264_ctx *h264_ctx = ctx->priv;
struct rkvdec_h264_priv_tbl *tbl = h264_ctx->priv_tbl.cpu;
if (!(pps->flags & V4L2_H264_PPS_FLAG_SCALING_MATRIX_PRESENT))
return;
BUILD_BUG_ON(sizeof(tbl->scaling_list.scaling_list_4x4) !=
sizeof(scaling->scaling_list_4x4));
BUILD_BUG_ON(sizeof(tbl->scaling_list.scaling_list_8x8) !=
sizeof(scaling->scaling_list_8x8));
memcpy(tbl->scaling_list.scaling_list_4x4,
scaling->scaling_list_4x4,
sizeof(scaling->scaling_list_4x4));
memcpy(tbl->scaling_list.scaling_list_8x8,
scaling->scaling_list_8x8,
sizeof(scaling->scaling_list_8x8));
}
/*
* dpb poc related registers table
*/
static const u32 poc_reg_tbl_top_field[16] = {
RKVDEC_REG_H264_POC_REFER0(0),
RKVDEC_REG_H264_POC_REFER0(2),
RKVDEC_REG_H264_POC_REFER0(4),
RKVDEC_REG_H264_POC_REFER0(6),
RKVDEC_REG_H264_POC_REFER0(8),
RKVDEC_REG_H264_POC_REFER0(10),
RKVDEC_REG_H264_POC_REFER0(12),
RKVDEC_REG_H264_POC_REFER0(14),
RKVDEC_REG_H264_POC_REFER1(1),
RKVDEC_REG_H264_POC_REFER1(3),
RKVDEC_REG_H264_POC_REFER1(5),
RKVDEC_REG_H264_POC_REFER1(7),
RKVDEC_REG_H264_POC_REFER1(9),
RKVDEC_REG_H264_POC_REFER1(11),
RKVDEC_REG_H264_POC_REFER1(13),
RKVDEC_REG_H264_POC_REFER2(0)
};
static const u32 poc_reg_tbl_bottom_field[16] = {
RKVDEC_REG_H264_POC_REFER0(1),
RKVDEC_REG_H264_POC_REFER0(3),
RKVDEC_REG_H264_POC_REFER0(5),
RKVDEC_REG_H264_POC_REFER0(7),
RKVDEC_REG_H264_POC_REFER0(9),
RKVDEC_REG_H264_POC_REFER0(11),
RKVDEC_REG_H264_POC_REFER0(13),
RKVDEC_REG_H264_POC_REFER1(0),
RKVDEC_REG_H264_POC_REFER1(2),
RKVDEC_REG_H264_POC_REFER1(4),
RKVDEC_REG_H264_POC_REFER1(6),
RKVDEC_REG_H264_POC_REFER1(8),
RKVDEC_REG_H264_POC_REFER1(10),
RKVDEC_REG_H264_POC_REFER1(12),
RKVDEC_REG_H264_POC_REFER1(14),
RKVDEC_REG_H264_POC_REFER2(1)
};
static void config_registers(struct rkvdec_ctx *ctx,
struct rkvdec_h264_run *run)
{
struct rkvdec_dev *rkvdec = ctx->dev;
const struct v4l2_ctrl_h264_decode_params *dec_params = run->decode_params;
const struct v4l2_ctrl_h264_sps *sps = run->sps;
const struct v4l2_h264_dpb_entry *dpb = dec_params->dpb;
struct rkvdec_h264_ctx *h264_ctx = ctx->priv;
dma_addr_t priv_start_addr = h264_ctx->priv_tbl.dma;
const struct v4l2_pix_format_mplane *dst_fmt;
struct vb2_v4l2_buffer *src_buf = run->base.bufs.src;
struct vb2_v4l2_buffer *dst_buf = run->base.bufs.dst;
const struct v4l2_format *f;
dma_addr_t rlc_addr;
dma_addr_t refer_addr;
u32 rlc_len;
u32 hor_virstride = 0;
u32 ver_virstride = 0;
u32 y_virstride = 0;
u32 yuv_virstride = 0;
u32 offset;
dma_addr_t dst_addr;
u32 reg, i;
reg = RKVDEC_MODE(RKVDEC_MODE_H264);
writel_relaxed(reg, rkvdec->regs + RKVDEC_REG_SYSCTRL);
f = &ctx->decoded_fmt;
dst_fmt = &f->fmt.pix_mp;
hor_virstride = (sps->bit_depth_luma_minus8 + 8) * dst_fmt->width / 8;
ver_virstride = round_up(dst_fmt->height, 16);
y_virstride = hor_virstride * ver_virstride;
if (sps->chroma_format_idc == 0)
yuv_virstride = y_virstride;
else if (sps->chroma_format_idc == 1)
yuv_virstride += y_virstride + y_virstride / 2;
else if (sps->chroma_format_idc == 2)
yuv_virstride += 2 * y_virstride;
reg = RKVDEC_Y_HOR_VIRSTRIDE(hor_virstride / 16) |
RKVDEC_UV_HOR_VIRSTRIDE(hor_virstride / 16) |
RKVDEC_SLICE_NUM_HIGHBIT |
RKVDEC_SLICE_NUM_LOWBITS(0x7ff);
writel_relaxed(reg, rkvdec->regs + RKVDEC_REG_PICPAR);
/* config rlc base address */
rlc_addr = vb2_dma_contig_plane_dma_addr(&src_buf->vb2_buf, 0);
writel_relaxed(rlc_addr, rkvdec->regs + RKVDEC_REG_STRM_RLC_BASE);
writel_relaxed(rlc_addr, rkvdec->regs + RKVDEC_REG_RLCWRITE_BASE);
rlc_len = vb2_get_plane_payload(&src_buf->vb2_buf, 0);
reg = RKVDEC_STRM_LEN(rlc_len);
writel_relaxed(reg, rkvdec->regs + RKVDEC_REG_STRM_LEN);
/* config cabac table */
offset = offsetof(struct rkvdec_h264_priv_tbl, cabac_table);
writel_relaxed(priv_start_addr + offset,
rkvdec->regs + RKVDEC_REG_CABACTBL_PROB_BASE);
/* config output base address */
dst_addr = vb2_dma_contig_plane_dma_addr(&dst_buf->vb2_buf, 0);
writel_relaxed(dst_addr, rkvdec->regs + RKVDEC_REG_DECOUT_BASE);
reg = RKVDEC_Y_VIRSTRIDE(y_virstride / 16);
writel_relaxed(reg, rkvdec->regs + RKVDEC_REG_Y_VIRSTRIDE);
reg = RKVDEC_YUV_VIRSTRIDE(yuv_virstride / 16);
writel_relaxed(reg, rkvdec->regs + RKVDEC_REG_YUV_VIRSTRIDE);
/* config ref pic address & poc */
for (i = 0; i < ARRAY_SIZE(dec_params->dpb); i++) {
struct vb2_buffer *vb_buf = run->ref_buf[i];
/*
* If a DPB entry is unused or invalid, address of current destination
* buffer is returned.
*/
if (!vb_buf)
vb_buf = &dst_buf->vb2_buf;
refer_addr = vb2_dma_contig_plane_dma_addr(vb_buf, 0);
if (dpb[i].flags & V4L2_H264_DPB_ENTRY_FLAG_ACTIVE)
refer_addr |= RKVDEC_COLMV_USED_FLAG_REF;
if (dpb[i].flags & V4L2_H264_DPB_ENTRY_FLAG_FIELD)
refer_addr |= RKVDEC_FIELD_REF;
if (dpb[i].fields & V4L2_H264_TOP_FIELD_REF)
refer_addr |= RKVDEC_TOPFIELD_USED_REF;
if (dpb[i].fields & V4L2_H264_BOTTOM_FIELD_REF)
refer_addr |= RKVDEC_BOTFIELD_USED_REF;
writel_relaxed(dpb[i].top_field_order_cnt,
rkvdec->regs + poc_reg_tbl_top_field[i]);
writel_relaxed(dpb[i].bottom_field_order_cnt,
rkvdec->regs + poc_reg_tbl_bottom_field[i]);
if (i < V4L2_H264_NUM_DPB_ENTRIES - 1)
writel_relaxed(refer_addr,
rkvdec->regs + RKVDEC_REG_H264_BASE_REFER(i));
else
writel_relaxed(refer_addr,
rkvdec->regs + RKVDEC_REG_H264_BASE_REFER15);
}
reg = RKVDEC_CUR_POC(dec_params->top_field_order_cnt);
writel_relaxed(reg, rkvdec->regs + RKVDEC_REG_CUR_POC0);
reg = RKVDEC_CUR_POC(dec_params->bottom_field_order_cnt);
writel_relaxed(reg, rkvdec->regs + RKVDEC_REG_CUR_POC1);
/* config hw pps address */
offset = offsetof(struct rkvdec_h264_priv_tbl, param_set);
writel_relaxed(priv_start_addr + offset,
rkvdec->regs + RKVDEC_REG_PPS_BASE);
/* config hw rps address */
offset = offsetof(struct rkvdec_h264_priv_tbl, rps);
writel_relaxed(priv_start_addr + offset,
rkvdec->regs + RKVDEC_REG_RPS_BASE);
reg = RKVDEC_AXI_DDR_RDATA(0);
writel_relaxed(reg, rkvdec->regs + RKVDEC_REG_AXI_DDR_RDATA);
reg = RKVDEC_AXI_DDR_WDATA(0);
writel_relaxed(reg, rkvdec->regs + RKVDEC_REG_AXI_DDR_WDATA);
offset = offsetof(struct rkvdec_h264_priv_tbl, err_info);
writel_relaxed(priv_start_addr + offset,
rkvdec->regs + RKVDEC_REG_H264_ERRINFO_BASE);
}
#define RKVDEC_H264_MAX_DEPTH_IN_BYTES 2
static int rkvdec_h264_adjust_fmt(struct rkvdec_ctx *ctx,
struct v4l2_format *f)
{
struct v4l2_pix_format_mplane *fmt = &f->fmt.pix_mp;
fmt->num_planes = 1;
if (!fmt->plane_fmt[0].sizeimage)
fmt->plane_fmt[0].sizeimage = fmt->width * fmt->height *
RKVDEC_H264_MAX_DEPTH_IN_BYTES;
return 0;
}
static int rkvdec_h264_validate_sps(struct rkvdec_ctx *ctx,
const struct v4l2_ctrl_h264_sps *sps)
{
unsigned int width, height;
/*
* TODO: The hardware supports 10-bit and 4:2:2 profiles,
* but it's currently broken in the driver.
* Reject them for now, until it's fixed.
*/
if (sps->chroma_format_idc > 1)
/* Only 4:0:0 and 4:2:0 are supported */
return -EINVAL;
if (sps->bit_depth_luma_minus8 != sps->bit_depth_chroma_minus8)
/* Luma and chroma bit depth mismatch */
return -EINVAL;
if (sps->bit_depth_luma_minus8 != 0)
/* Only 8-bit is supported */
return -EINVAL;
width = (sps->pic_width_in_mbs_minus1 + 1) * 16;
height = (sps->pic_height_in_map_units_minus1 + 1) * 16;
/*
* When frame_mbs_only_flag is not set, this is field height,
* which is half the final height (see (7-18) in the
* specification)
*/
if (!(sps->flags & V4L2_H264_SPS_FLAG_FRAME_MBS_ONLY))
height *= 2;
if (width > ctx->coded_fmt.fmt.pix_mp.width ||
height > ctx->coded_fmt.fmt.pix_mp.height)
return -EINVAL;
return 0;
}
static int rkvdec_h264_start(struct rkvdec_ctx *ctx)
{
struct rkvdec_dev *rkvdec = ctx->dev;
struct rkvdec_h264_priv_tbl *priv_tbl;
struct rkvdec_h264_ctx *h264_ctx;
struct v4l2_ctrl *ctrl;
int ret;
ctrl = v4l2_ctrl_find(&ctx->ctrl_hdl,
V4L2_CID_STATELESS_H264_SPS);
if (!ctrl)
return -EINVAL;
ret = rkvdec_h264_validate_sps(ctx, ctrl->p_new.p_h264_sps);
if (ret)
return ret;
h264_ctx = kzalloc(sizeof(*h264_ctx), GFP_KERNEL);
if (!h264_ctx)
return -ENOMEM;
priv_tbl = dma_alloc_coherent(rkvdec->dev, sizeof(*priv_tbl),
&h264_ctx->priv_tbl.dma, GFP_KERNEL);
if (!priv_tbl) {
ret = -ENOMEM;
goto err_free_ctx;
}
h264_ctx->priv_tbl.size = sizeof(*priv_tbl);
h264_ctx->priv_tbl.cpu = priv_tbl;
memcpy(priv_tbl->cabac_table, rkvdec_h264_cabac_table,
sizeof(rkvdec_h264_cabac_table));
ctx->priv = h264_ctx;
return 0;
err_free_ctx:
kfree(h264_ctx);
return ret;
}
static void rkvdec_h264_stop(struct rkvdec_ctx *ctx)
{
struct rkvdec_h264_ctx *h264_ctx = ctx->priv;
struct rkvdec_dev *rkvdec = ctx->dev;
dma_free_coherent(rkvdec->dev, h264_ctx->priv_tbl.size,
h264_ctx->priv_tbl.cpu, h264_ctx->priv_tbl.dma);
kfree(h264_ctx);
}
static void rkvdec_h264_run_preamble(struct rkvdec_ctx *ctx,
struct rkvdec_h264_run *run)
{
struct v4l2_ctrl *ctrl;
ctrl = v4l2_ctrl_find(&ctx->ctrl_hdl,
V4L2_CID_STATELESS_H264_DECODE_PARAMS);
run->decode_params = ctrl ? ctrl->p_cur.p : NULL;
ctrl = v4l2_ctrl_find(&ctx->ctrl_hdl,
V4L2_CID_STATELESS_H264_SPS);
run->sps = ctrl ? ctrl->p_cur.p : NULL;
ctrl = v4l2_ctrl_find(&ctx->ctrl_hdl,
V4L2_CID_STATELESS_H264_PPS);
run->pps = ctrl ? ctrl->p_cur.p : NULL;
ctrl = v4l2_ctrl_find(&ctx->ctrl_hdl,
V4L2_CID_STATELESS_H264_SCALING_MATRIX);
run->scaling_matrix = ctrl ? ctrl->p_cur.p : NULL;
rkvdec_run_preamble(ctx, &run->base);
}
static int rkvdec_h264_run(struct rkvdec_ctx *ctx)
{
struct v4l2_h264_reflist_builder reflist_builder;
struct rkvdec_dev *rkvdec = ctx->dev;
struct rkvdec_h264_ctx *h264_ctx = ctx->priv;
struct rkvdec_h264_run run;
rkvdec_h264_run_preamble(ctx, &run);
/* Build the P/B{0,1} ref lists. */
v4l2_h264_init_reflist_builder(&reflist_builder, run.decode_params,
run.sps, run.decode_params->dpb);
v4l2_h264_build_p_ref_list(&reflist_builder, h264_ctx->reflists.p);
v4l2_h264_build_b_ref_lists(&reflist_builder, h264_ctx->reflists.b0,
h264_ctx->reflists.b1);
assemble_hw_scaling_list(ctx, &run);
assemble_hw_pps(ctx, &run);
lookup_ref_buf_idx(ctx, &run);
assemble_hw_rps(ctx, &reflist_builder, &run);
config_registers(ctx, &run);
rkvdec_run_postamble(ctx, &run.base);
schedule_delayed_work(&rkvdec->watchdog_work, msecs_to_jiffies(2000));
writel(0, rkvdec->regs + RKVDEC_REG_STRMD_ERR_EN);
writel(0, rkvdec->regs + RKVDEC_REG_H264_ERR_E);
writel(1, rkvdec->regs + RKVDEC_REG_PREF_LUMA_CACHE_COMMAND);
writel(1, rkvdec->regs + RKVDEC_REG_PREF_CHR_CACHE_COMMAND);
/* Start decoding! */
writel(RKVDEC_INTERRUPT_DEC_E | RKVDEC_CONFIG_DEC_CLK_GATE_E |
RKVDEC_TIMEOUT_E | RKVDEC_BUF_EMPTY_E,
rkvdec->regs + RKVDEC_REG_INTERRUPT);
return 0;
}
static int rkvdec_h264_try_ctrl(struct rkvdec_ctx *ctx, struct v4l2_ctrl *ctrl)
{
if (ctrl->id == V4L2_CID_STATELESS_H264_SPS)
return rkvdec_h264_validate_sps(ctx, ctrl->p_new.p_h264_sps);
return 0;
}
const struct rkvdec_coded_fmt_ops rkvdec_h264_fmt_ops = {
.adjust_fmt = rkvdec_h264_adjust_fmt,
.start = rkvdec_h264_start,
.stop = rkvdec_h264_stop,
.run = rkvdec_h264_run,
.try_ctrl = rkvdec_h264_try_ctrl,
};
| linux-master | drivers/staging/media/rkvdec/rkvdec-h264.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Rockchip Video Decoder driver
*
* Copyright (C) 2019 Collabora, Ltd.
*
* Based on rkvdec driver by Google LLC. (Tomasz Figa <[email protected]>)
* Based on s5p-mfc driver by Samsung Electronics Co., Ltd.
* Copyright (C) 2011 Samsung Electronics Co., Ltd.
*/
#include <linux/clk.h>
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pm.h>
#include <linux/pm_runtime.h>
#include <linux/slab.h>
#include <linux/videodev2.h>
#include <linux/workqueue.h>
#include <media/v4l2-event.h>
#include <media/v4l2-mem2mem.h>
#include <media/videobuf2-core.h>
#include <media/videobuf2-vmalloc.h>
#include "rkvdec.h"
#include "rkvdec-regs.h"
static int rkvdec_try_ctrl(struct v4l2_ctrl *ctrl)
{
struct rkvdec_ctx *ctx = container_of(ctrl->handler, struct rkvdec_ctx, ctrl_hdl);
const struct rkvdec_coded_fmt_desc *desc = ctx->coded_fmt_desc;
if (desc->ops->try_ctrl)
return desc->ops->try_ctrl(ctx, ctrl);
return 0;
}
static const struct v4l2_ctrl_ops rkvdec_ctrl_ops = {
.try_ctrl = rkvdec_try_ctrl,
};
static const struct rkvdec_ctrl_desc rkvdec_h264_ctrl_descs[] = {
{
.cfg.id = V4L2_CID_STATELESS_H264_DECODE_PARAMS,
},
{
.cfg.id = V4L2_CID_STATELESS_H264_SPS,
.cfg.ops = &rkvdec_ctrl_ops,
},
{
.cfg.id = V4L2_CID_STATELESS_H264_PPS,
},
{
.cfg.id = V4L2_CID_STATELESS_H264_SCALING_MATRIX,
},
{
.cfg.id = V4L2_CID_STATELESS_H264_DECODE_MODE,
.cfg.min = V4L2_STATELESS_H264_DECODE_MODE_FRAME_BASED,
.cfg.max = V4L2_STATELESS_H264_DECODE_MODE_FRAME_BASED,
.cfg.def = V4L2_STATELESS_H264_DECODE_MODE_FRAME_BASED,
},
{
.cfg.id = V4L2_CID_STATELESS_H264_START_CODE,
.cfg.min = V4L2_STATELESS_H264_START_CODE_ANNEX_B,
.cfg.def = V4L2_STATELESS_H264_START_CODE_ANNEX_B,
.cfg.max = V4L2_STATELESS_H264_START_CODE_ANNEX_B,
},
{
.cfg.id = V4L2_CID_MPEG_VIDEO_H264_PROFILE,
.cfg.min = V4L2_MPEG_VIDEO_H264_PROFILE_BASELINE,
.cfg.max = V4L2_MPEG_VIDEO_H264_PROFILE_HIGH,
.cfg.menu_skip_mask =
BIT(V4L2_MPEG_VIDEO_H264_PROFILE_EXTENDED),
.cfg.def = V4L2_MPEG_VIDEO_H264_PROFILE_MAIN,
},
{
.cfg.id = V4L2_CID_MPEG_VIDEO_H264_LEVEL,
.cfg.min = V4L2_MPEG_VIDEO_H264_LEVEL_1_0,
.cfg.max = V4L2_MPEG_VIDEO_H264_LEVEL_5_1,
},
};
static const struct rkvdec_ctrls rkvdec_h264_ctrls = {
.ctrls = rkvdec_h264_ctrl_descs,
.num_ctrls = ARRAY_SIZE(rkvdec_h264_ctrl_descs),
};
static const u32 rkvdec_h264_vp9_decoded_fmts[] = {
V4L2_PIX_FMT_NV12,
};
static const struct rkvdec_ctrl_desc rkvdec_vp9_ctrl_descs[] = {
{
.cfg.id = V4L2_CID_STATELESS_VP9_FRAME,
},
{
.cfg.id = V4L2_CID_STATELESS_VP9_COMPRESSED_HDR,
},
{
.cfg.id = V4L2_CID_MPEG_VIDEO_VP9_PROFILE,
.cfg.min = V4L2_MPEG_VIDEO_VP9_PROFILE_0,
.cfg.max = V4L2_MPEG_VIDEO_VP9_PROFILE_0,
.cfg.def = V4L2_MPEG_VIDEO_VP9_PROFILE_0,
},
};
static const struct rkvdec_ctrls rkvdec_vp9_ctrls = {
.ctrls = rkvdec_vp9_ctrl_descs,
.num_ctrls = ARRAY_SIZE(rkvdec_vp9_ctrl_descs),
};
static const struct rkvdec_coded_fmt_desc rkvdec_coded_fmts[] = {
{
.fourcc = V4L2_PIX_FMT_H264_SLICE,
.frmsize = {
.min_width = 48,
.max_width = 4096,
.step_width = 16,
.min_height = 48,
.max_height = 2560,
.step_height = 16,
},
.ctrls = &rkvdec_h264_ctrls,
.ops = &rkvdec_h264_fmt_ops,
.num_decoded_fmts = ARRAY_SIZE(rkvdec_h264_vp9_decoded_fmts),
.decoded_fmts = rkvdec_h264_vp9_decoded_fmts,
.subsystem_flags = VB2_V4L2_FL_SUPPORTS_M2M_HOLD_CAPTURE_BUF,
},
{
.fourcc = V4L2_PIX_FMT_VP9_FRAME,
.frmsize = {
.min_width = 64,
.max_width = 4096,
.step_width = 64,
.min_height = 64,
.max_height = 2304,
.step_height = 64,
},
.ctrls = &rkvdec_vp9_ctrls,
.ops = &rkvdec_vp9_fmt_ops,
.num_decoded_fmts = ARRAY_SIZE(rkvdec_h264_vp9_decoded_fmts),
.decoded_fmts = rkvdec_h264_vp9_decoded_fmts,
}
};
static const struct rkvdec_coded_fmt_desc *
rkvdec_find_coded_fmt_desc(u32 fourcc)
{
unsigned int i;
for (i = 0; i < ARRAY_SIZE(rkvdec_coded_fmts); i++) {
if (rkvdec_coded_fmts[i].fourcc == fourcc)
return &rkvdec_coded_fmts[i];
}
return NULL;
}
static void rkvdec_reset_fmt(struct rkvdec_ctx *ctx, struct v4l2_format *f,
u32 fourcc)
{
memset(f, 0, sizeof(*f));
f->fmt.pix_mp.pixelformat = fourcc;
f->fmt.pix_mp.field = V4L2_FIELD_NONE;
f->fmt.pix_mp.colorspace = V4L2_COLORSPACE_REC709;
f->fmt.pix_mp.ycbcr_enc = V4L2_YCBCR_ENC_DEFAULT;
f->fmt.pix_mp.quantization = V4L2_QUANTIZATION_DEFAULT;
f->fmt.pix_mp.xfer_func = V4L2_XFER_FUNC_DEFAULT;
}
static void rkvdec_reset_coded_fmt(struct rkvdec_ctx *ctx)
{
struct v4l2_format *f = &ctx->coded_fmt;
ctx->coded_fmt_desc = &rkvdec_coded_fmts[0];
rkvdec_reset_fmt(ctx, f, ctx->coded_fmt_desc->fourcc);
f->type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
f->fmt.pix_mp.width = ctx->coded_fmt_desc->frmsize.min_width;
f->fmt.pix_mp.height = ctx->coded_fmt_desc->frmsize.min_height;
if (ctx->coded_fmt_desc->ops->adjust_fmt)
ctx->coded_fmt_desc->ops->adjust_fmt(ctx, f);
}
static void rkvdec_reset_decoded_fmt(struct rkvdec_ctx *ctx)
{
struct v4l2_format *f = &ctx->decoded_fmt;
rkvdec_reset_fmt(ctx, f, ctx->coded_fmt_desc->decoded_fmts[0]);
f->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
v4l2_fill_pixfmt_mp(&f->fmt.pix_mp,
ctx->coded_fmt_desc->decoded_fmts[0],
ctx->coded_fmt.fmt.pix_mp.width,
ctx->coded_fmt.fmt.pix_mp.height);
f->fmt.pix_mp.plane_fmt[0].sizeimage += 128 *
DIV_ROUND_UP(f->fmt.pix_mp.width, 16) *
DIV_ROUND_UP(f->fmt.pix_mp.height, 16);
}
static int rkvdec_enum_framesizes(struct file *file, void *priv,
struct v4l2_frmsizeenum *fsize)
{
const struct rkvdec_coded_fmt_desc *fmt;
if (fsize->index != 0)
return -EINVAL;
fmt = rkvdec_find_coded_fmt_desc(fsize->pixel_format);
if (!fmt)
return -EINVAL;
fsize->type = V4L2_FRMSIZE_TYPE_STEPWISE;
fsize->stepwise = fmt->frmsize;
return 0;
}
static int rkvdec_querycap(struct file *file, void *priv,
struct v4l2_capability *cap)
{
struct rkvdec_dev *rkvdec = video_drvdata(file);
struct video_device *vdev = video_devdata(file);
strscpy(cap->driver, rkvdec->dev->driver->name,
sizeof(cap->driver));
strscpy(cap->card, vdev->name, sizeof(cap->card));
snprintf(cap->bus_info, sizeof(cap->bus_info), "platform:%s",
rkvdec->dev->driver->name);
return 0;
}
static int rkvdec_try_capture_fmt(struct file *file, void *priv,
struct v4l2_format *f)
{
struct v4l2_pix_format_mplane *pix_mp = &f->fmt.pix_mp;
struct rkvdec_ctx *ctx = fh_to_rkvdec_ctx(priv);
const struct rkvdec_coded_fmt_desc *coded_desc;
unsigned int i;
/*
* The codec context should point to a coded format desc, if the format
* on the coded end has not been set yet, it should point to the
* default value.
*/
coded_desc = ctx->coded_fmt_desc;
if (WARN_ON(!coded_desc))
return -EINVAL;
for (i = 0; i < coded_desc->num_decoded_fmts; i++) {
if (coded_desc->decoded_fmts[i] == pix_mp->pixelformat)
break;
}
if (i == coded_desc->num_decoded_fmts)
pix_mp->pixelformat = coded_desc->decoded_fmts[0];
/* Always apply the frmsize constraint of the coded end. */
pix_mp->width = max(pix_mp->width, ctx->coded_fmt.fmt.pix_mp.width);
pix_mp->height = max(pix_mp->height, ctx->coded_fmt.fmt.pix_mp.height);
v4l2_apply_frmsize_constraints(&pix_mp->width,
&pix_mp->height,
&coded_desc->frmsize);
v4l2_fill_pixfmt_mp(pix_mp, pix_mp->pixelformat,
pix_mp->width, pix_mp->height);
pix_mp->plane_fmt[0].sizeimage +=
128 *
DIV_ROUND_UP(pix_mp->width, 16) *
DIV_ROUND_UP(pix_mp->height, 16);
pix_mp->field = V4L2_FIELD_NONE;
return 0;
}
static int rkvdec_try_output_fmt(struct file *file, void *priv,
struct v4l2_format *f)
{
struct v4l2_pix_format_mplane *pix_mp = &f->fmt.pix_mp;
struct rkvdec_ctx *ctx = fh_to_rkvdec_ctx(priv);
const struct rkvdec_coded_fmt_desc *desc;
desc = rkvdec_find_coded_fmt_desc(pix_mp->pixelformat);
if (!desc) {
pix_mp->pixelformat = rkvdec_coded_fmts[0].fourcc;
desc = &rkvdec_coded_fmts[0];
}
v4l2_apply_frmsize_constraints(&pix_mp->width,
&pix_mp->height,
&desc->frmsize);
pix_mp->field = V4L2_FIELD_NONE;
/* All coded formats are considered single planar for now. */
pix_mp->num_planes = 1;
if (desc->ops->adjust_fmt) {
int ret;
ret = desc->ops->adjust_fmt(ctx, f);
if (ret)
return ret;
}
return 0;
}
static int rkvdec_s_capture_fmt(struct file *file, void *priv,
struct v4l2_format *f)
{
struct rkvdec_ctx *ctx = fh_to_rkvdec_ctx(priv);
struct vb2_queue *vq;
int ret;
/* Change not allowed if queue is busy */
vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx,
V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE);
if (vb2_is_busy(vq))
return -EBUSY;
ret = rkvdec_try_capture_fmt(file, priv, f);
if (ret)
return ret;
ctx->decoded_fmt = *f;
return 0;
}
static int rkvdec_s_output_fmt(struct file *file, void *priv,
struct v4l2_format *f)
{
struct rkvdec_ctx *ctx = fh_to_rkvdec_ctx(priv);
struct v4l2_m2m_ctx *m2m_ctx = ctx->fh.m2m_ctx;
const struct rkvdec_coded_fmt_desc *desc;
struct v4l2_format *cap_fmt;
struct vb2_queue *peer_vq, *vq;
int ret;
/*
* In order to support dynamic resolution change, the decoder admits
* a resolution change, as long as the pixelformat remains. Can't be
* done if streaming.
*/
vq = v4l2_m2m_get_vq(m2m_ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
if (vb2_is_streaming(vq) ||
(vb2_is_busy(vq) &&
f->fmt.pix_mp.pixelformat != ctx->coded_fmt.fmt.pix_mp.pixelformat))
return -EBUSY;
/*
* Since format change on the OUTPUT queue will reset the CAPTURE
* queue, we can't allow doing so when the CAPTURE queue has buffers
* allocated.
*/
peer_vq = v4l2_m2m_get_vq(m2m_ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE);
if (vb2_is_busy(peer_vq))
return -EBUSY;
ret = rkvdec_try_output_fmt(file, priv, f);
if (ret)
return ret;
desc = rkvdec_find_coded_fmt_desc(f->fmt.pix_mp.pixelformat);
if (!desc)
return -EINVAL;
ctx->coded_fmt_desc = desc;
ctx->coded_fmt = *f;
/*
* Current decoded format might have become invalid with newly
* selected codec, so reset it to default just to be safe and
* keep internal driver state sane. User is mandated to set
* the decoded format again after we return, so we don't need
* anything smarter.
*
* Note that this will propagates any size changes to the decoded format.
*/
rkvdec_reset_decoded_fmt(ctx);
/* Propagate colorspace information to capture. */
cap_fmt = &ctx->decoded_fmt;
cap_fmt->fmt.pix_mp.colorspace = f->fmt.pix_mp.colorspace;
cap_fmt->fmt.pix_mp.xfer_func = f->fmt.pix_mp.xfer_func;
cap_fmt->fmt.pix_mp.ycbcr_enc = f->fmt.pix_mp.ycbcr_enc;
cap_fmt->fmt.pix_mp.quantization = f->fmt.pix_mp.quantization;
/* Enable format specific queue features */
vq->subsystem_flags |= desc->subsystem_flags;
return 0;
}
static int rkvdec_g_output_fmt(struct file *file, void *priv,
struct v4l2_format *f)
{
struct rkvdec_ctx *ctx = fh_to_rkvdec_ctx(priv);
*f = ctx->coded_fmt;
return 0;
}
static int rkvdec_g_capture_fmt(struct file *file, void *priv,
struct v4l2_format *f)
{
struct rkvdec_ctx *ctx = fh_to_rkvdec_ctx(priv);
*f = ctx->decoded_fmt;
return 0;
}
static int rkvdec_enum_output_fmt(struct file *file, void *priv,
struct v4l2_fmtdesc *f)
{
if (f->index >= ARRAY_SIZE(rkvdec_coded_fmts))
return -EINVAL;
f->pixelformat = rkvdec_coded_fmts[f->index].fourcc;
return 0;
}
static int rkvdec_enum_capture_fmt(struct file *file, void *priv,
struct v4l2_fmtdesc *f)
{
struct rkvdec_ctx *ctx = fh_to_rkvdec_ctx(priv);
if (WARN_ON(!ctx->coded_fmt_desc))
return -EINVAL;
if (f->index >= ctx->coded_fmt_desc->num_decoded_fmts)
return -EINVAL;
f->pixelformat = ctx->coded_fmt_desc->decoded_fmts[f->index];
return 0;
}
static const struct v4l2_ioctl_ops rkvdec_ioctl_ops = {
.vidioc_querycap = rkvdec_querycap,
.vidioc_enum_framesizes = rkvdec_enum_framesizes,
.vidioc_try_fmt_vid_cap_mplane = rkvdec_try_capture_fmt,
.vidioc_try_fmt_vid_out_mplane = rkvdec_try_output_fmt,
.vidioc_s_fmt_vid_out_mplane = rkvdec_s_output_fmt,
.vidioc_s_fmt_vid_cap_mplane = rkvdec_s_capture_fmt,
.vidioc_g_fmt_vid_out_mplane = rkvdec_g_output_fmt,
.vidioc_g_fmt_vid_cap_mplane = rkvdec_g_capture_fmt,
.vidioc_enum_fmt_vid_out = rkvdec_enum_output_fmt,
.vidioc_enum_fmt_vid_cap = rkvdec_enum_capture_fmt,
.vidioc_reqbufs = v4l2_m2m_ioctl_reqbufs,
.vidioc_querybuf = v4l2_m2m_ioctl_querybuf,
.vidioc_qbuf = v4l2_m2m_ioctl_qbuf,
.vidioc_dqbuf = v4l2_m2m_ioctl_dqbuf,
.vidioc_prepare_buf = v4l2_m2m_ioctl_prepare_buf,
.vidioc_create_bufs = v4l2_m2m_ioctl_create_bufs,
.vidioc_expbuf = v4l2_m2m_ioctl_expbuf,
.vidioc_subscribe_event = v4l2_ctrl_subscribe_event,
.vidioc_unsubscribe_event = v4l2_event_unsubscribe,
.vidioc_streamon = v4l2_m2m_ioctl_streamon,
.vidioc_streamoff = v4l2_m2m_ioctl_streamoff,
};
static int rkvdec_queue_setup(struct vb2_queue *vq, unsigned int *num_buffers,
unsigned int *num_planes, unsigned int sizes[],
struct device *alloc_devs[])
{
struct rkvdec_ctx *ctx = vb2_get_drv_priv(vq);
struct v4l2_format *f;
unsigned int i;
if (V4L2_TYPE_IS_OUTPUT(vq->type))
f = &ctx->coded_fmt;
else
f = &ctx->decoded_fmt;
if (*num_planes) {
if (*num_planes != f->fmt.pix_mp.num_planes)
return -EINVAL;
for (i = 0; i < f->fmt.pix_mp.num_planes; i++) {
if (sizes[i] < f->fmt.pix_mp.plane_fmt[i].sizeimage)
return -EINVAL;
}
} else {
*num_planes = f->fmt.pix_mp.num_planes;
for (i = 0; i < f->fmt.pix_mp.num_planes; i++)
sizes[i] = f->fmt.pix_mp.plane_fmt[i].sizeimage;
}
return 0;
}
static int rkvdec_buf_prepare(struct vb2_buffer *vb)
{
struct vb2_queue *vq = vb->vb2_queue;
struct rkvdec_ctx *ctx = vb2_get_drv_priv(vq);
struct v4l2_format *f;
unsigned int i;
if (V4L2_TYPE_IS_OUTPUT(vq->type))
f = &ctx->coded_fmt;
else
f = &ctx->decoded_fmt;
for (i = 0; i < f->fmt.pix_mp.num_planes; ++i) {
u32 sizeimage = f->fmt.pix_mp.plane_fmt[i].sizeimage;
if (vb2_plane_size(vb, i) < sizeimage)
return -EINVAL;
}
/*
* Buffer's bytesused must be written by driver for CAPTURE buffers.
* (for OUTPUT buffers, if userspace passes 0 bytesused, v4l2-core sets
* it to buffer length).
*/
if (V4L2_TYPE_IS_CAPTURE(vq->type))
vb2_set_plane_payload(vb, 0, f->fmt.pix_mp.plane_fmt[0].sizeimage);
return 0;
}
static void rkvdec_buf_queue(struct vb2_buffer *vb)
{
struct rkvdec_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
v4l2_m2m_buf_queue(ctx->fh.m2m_ctx, vbuf);
}
static int rkvdec_buf_out_validate(struct vb2_buffer *vb)
{
struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
vbuf->field = V4L2_FIELD_NONE;
return 0;
}
static void rkvdec_buf_request_complete(struct vb2_buffer *vb)
{
struct rkvdec_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
v4l2_ctrl_request_complete(vb->req_obj.req, &ctx->ctrl_hdl);
}
static int rkvdec_start_streaming(struct vb2_queue *q, unsigned int count)
{
struct rkvdec_ctx *ctx = vb2_get_drv_priv(q);
const struct rkvdec_coded_fmt_desc *desc;
int ret;
if (V4L2_TYPE_IS_CAPTURE(q->type))
return 0;
desc = ctx->coded_fmt_desc;
if (WARN_ON(!desc))
return -EINVAL;
if (desc->ops->start) {
ret = desc->ops->start(ctx);
if (ret)
return ret;
}
return 0;
}
static void rkvdec_queue_cleanup(struct vb2_queue *vq, u32 state)
{
struct rkvdec_ctx *ctx = vb2_get_drv_priv(vq);
while (true) {
struct vb2_v4l2_buffer *vbuf;
if (V4L2_TYPE_IS_OUTPUT(vq->type))
vbuf = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
else
vbuf = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx);
if (!vbuf)
break;
v4l2_ctrl_request_complete(vbuf->vb2_buf.req_obj.req,
&ctx->ctrl_hdl);
v4l2_m2m_buf_done(vbuf, state);
}
}
static void rkvdec_stop_streaming(struct vb2_queue *q)
{
struct rkvdec_ctx *ctx = vb2_get_drv_priv(q);
if (V4L2_TYPE_IS_OUTPUT(q->type)) {
const struct rkvdec_coded_fmt_desc *desc = ctx->coded_fmt_desc;
if (WARN_ON(!desc))
return;
if (desc->ops->stop)
desc->ops->stop(ctx);
}
rkvdec_queue_cleanup(q, VB2_BUF_STATE_ERROR);
}
static const struct vb2_ops rkvdec_queue_ops = {
.queue_setup = rkvdec_queue_setup,
.buf_prepare = rkvdec_buf_prepare,
.buf_queue = rkvdec_buf_queue,
.buf_out_validate = rkvdec_buf_out_validate,
.buf_request_complete = rkvdec_buf_request_complete,
.start_streaming = rkvdec_start_streaming,
.stop_streaming = rkvdec_stop_streaming,
.wait_prepare = vb2_ops_wait_prepare,
.wait_finish = vb2_ops_wait_finish,
};
static int rkvdec_request_validate(struct media_request *req)
{
unsigned int count;
count = vb2_request_buffer_cnt(req);
if (!count)
return -ENOENT;
else if (count > 1)
return -EINVAL;
return vb2_request_validate(req);
}
static const struct media_device_ops rkvdec_media_ops = {
.req_validate = rkvdec_request_validate,
.req_queue = v4l2_m2m_request_queue,
};
static void rkvdec_job_finish_no_pm(struct rkvdec_ctx *ctx,
enum vb2_buffer_state result)
{
if (ctx->coded_fmt_desc->ops->done) {
struct vb2_v4l2_buffer *src_buf, *dst_buf;
src_buf = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
dst_buf = v4l2_m2m_next_dst_buf(ctx->fh.m2m_ctx);
ctx->coded_fmt_desc->ops->done(ctx, src_buf, dst_buf, result);
}
v4l2_m2m_buf_done_and_job_finish(ctx->dev->m2m_dev, ctx->fh.m2m_ctx,
result);
}
static void rkvdec_job_finish(struct rkvdec_ctx *ctx,
enum vb2_buffer_state result)
{
struct rkvdec_dev *rkvdec = ctx->dev;
pm_runtime_mark_last_busy(rkvdec->dev);
pm_runtime_put_autosuspend(rkvdec->dev);
rkvdec_job_finish_no_pm(ctx, result);
}
void rkvdec_run_preamble(struct rkvdec_ctx *ctx, struct rkvdec_run *run)
{
struct media_request *src_req;
memset(run, 0, sizeof(*run));
run->bufs.src = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
run->bufs.dst = v4l2_m2m_next_dst_buf(ctx->fh.m2m_ctx);
/* Apply request(s) controls if needed. */
src_req = run->bufs.src->vb2_buf.req_obj.req;
if (src_req)
v4l2_ctrl_request_setup(src_req, &ctx->ctrl_hdl);
v4l2_m2m_buf_copy_metadata(run->bufs.src, run->bufs.dst, true);
}
void rkvdec_run_postamble(struct rkvdec_ctx *ctx, struct rkvdec_run *run)
{
struct media_request *src_req = run->bufs.src->vb2_buf.req_obj.req;
if (src_req)
v4l2_ctrl_request_complete(src_req, &ctx->ctrl_hdl);
}
static void rkvdec_device_run(void *priv)
{
struct rkvdec_ctx *ctx = priv;
struct rkvdec_dev *rkvdec = ctx->dev;
const struct rkvdec_coded_fmt_desc *desc = ctx->coded_fmt_desc;
int ret;
if (WARN_ON(!desc))
return;
ret = pm_runtime_resume_and_get(rkvdec->dev);
if (ret < 0) {
rkvdec_job_finish_no_pm(ctx, VB2_BUF_STATE_ERROR);
return;
}
ret = desc->ops->run(ctx);
if (ret)
rkvdec_job_finish(ctx, VB2_BUF_STATE_ERROR);
}
static const struct v4l2_m2m_ops rkvdec_m2m_ops = {
.device_run = rkvdec_device_run,
};
static int rkvdec_queue_init(void *priv,
struct vb2_queue *src_vq,
struct vb2_queue *dst_vq)
{
struct rkvdec_ctx *ctx = priv;
struct rkvdec_dev *rkvdec = ctx->dev;
int ret;
src_vq->type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
src_vq->io_modes = VB2_MMAP | VB2_DMABUF;
src_vq->drv_priv = ctx;
src_vq->ops = &rkvdec_queue_ops;
src_vq->mem_ops = &vb2_dma_contig_memops;
/*
* Driver does mostly sequential access, so sacrifice TLB efficiency
* for faster allocation. Also, no CPU access on the source queue,
* so no kernel mapping needed.
*/
src_vq->dma_attrs = DMA_ATTR_ALLOC_SINGLE_PAGES |
DMA_ATTR_NO_KERNEL_MAPPING;
src_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
src_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
src_vq->lock = &rkvdec->vdev_lock;
src_vq->dev = rkvdec->v4l2_dev.dev;
src_vq->supports_requests = true;
src_vq->requires_requests = true;
ret = vb2_queue_init(src_vq);
if (ret)
return ret;
dst_vq->bidirectional = true;
dst_vq->mem_ops = &vb2_dma_contig_memops;
dst_vq->dma_attrs = DMA_ATTR_ALLOC_SINGLE_PAGES |
DMA_ATTR_NO_KERNEL_MAPPING;
dst_vq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
dst_vq->io_modes = VB2_MMAP | VB2_DMABUF;
dst_vq->drv_priv = ctx;
dst_vq->ops = &rkvdec_queue_ops;
dst_vq->buf_struct_size = sizeof(struct rkvdec_decoded_buffer);
dst_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
dst_vq->lock = &rkvdec->vdev_lock;
dst_vq->dev = rkvdec->v4l2_dev.dev;
return vb2_queue_init(dst_vq);
}
static int rkvdec_add_ctrls(struct rkvdec_ctx *ctx,
const struct rkvdec_ctrls *ctrls)
{
unsigned int i;
for (i = 0; i < ctrls->num_ctrls; i++) {
const struct v4l2_ctrl_config *cfg = &ctrls->ctrls[i].cfg;
v4l2_ctrl_new_custom(&ctx->ctrl_hdl, cfg, ctx);
if (ctx->ctrl_hdl.error)
return ctx->ctrl_hdl.error;
}
return 0;
}
static int rkvdec_init_ctrls(struct rkvdec_ctx *ctx)
{
unsigned int i, nctrls = 0;
int ret;
for (i = 0; i < ARRAY_SIZE(rkvdec_coded_fmts); i++)
nctrls += rkvdec_coded_fmts[i].ctrls->num_ctrls;
v4l2_ctrl_handler_init(&ctx->ctrl_hdl, nctrls);
for (i = 0; i < ARRAY_SIZE(rkvdec_coded_fmts); i++) {
ret = rkvdec_add_ctrls(ctx, rkvdec_coded_fmts[i].ctrls);
if (ret)
goto err_free_handler;
}
ret = v4l2_ctrl_handler_setup(&ctx->ctrl_hdl);
if (ret)
goto err_free_handler;
ctx->fh.ctrl_handler = &ctx->ctrl_hdl;
return 0;
err_free_handler:
v4l2_ctrl_handler_free(&ctx->ctrl_hdl);
return ret;
}
static int rkvdec_open(struct file *filp)
{
struct rkvdec_dev *rkvdec = video_drvdata(filp);
struct rkvdec_ctx *ctx;
int ret;
ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
if (!ctx)
return -ENOMEM;
ctx->dev = rkvdec;
rkvdec_reset_coded_fmt(ctx);
rkvdec_reset_decoded_fmt(ctx);
v4l2_fh_init(&ctx->fh, video_devdata(filp));
ret = rkvdec_init_ctrls(ctx);
if (ret)
goto err_free_ctx;
ctx->fh.m2m_ctx = v4l2_m2m_ctx_init(rkvdec->m2m_dev, ctx,
rkvdec_queue_init);
if (IS_ERR(ctx->fh.m2m_ctx)) {
ret = PTR_ERR(ctx->fh.m2m_ctx);
goto err_cleanup_ctrls;
}
filp->private_data = &ctx->fh;
v4l2_fh_add(&ctx->fh);
return 0;
err_cleanup_ctrls:
v4l2_ctrl_handler_free(&ctx->ctrl_hdl);
err_free_ctx:
kfree(ctx);
return ret;
}
static int rkvdec_release(struct file *filp)
{
struct rkvdec_ctx *ctx = fh_to_rkvdec_ctx(filp->private_data);
v4l2_fh_del(&ctx->fh);
v4l2_m2m_ctx_release(ctx->fh.m2m_ctx);
v4l2_ctrl_handler_free(&ctx->ctrl_hdl);
v4l2_fh_exit(&ctx->fh);
kfree(ctx);
return 0;
}
static const struct v4l2_file_operations rkvdec_fops = {
.owner = THIS_MODULE,
.open = rkvdec_open,
.release = rkvdec_release,
.poll = v4l2_m2m_fop_poll,
.unlocked_ioctl = video_ioctl2,
.mmap = v4l2_m2m_fop_mmap,
};
static int rkvdec_v4l2_init(struct rkvdec_dev *rkvdec)
{
int ret;
ret = v4l2_device_register(rkvdec->dev, &rkvdec->v4l2_dev);
if (ret) {
dev_err(rkvdec->dev, "Failed to register V4L2 device\n");
return ret;
}
rkvdec->m2m_dev = v4l2_m2m_init(&rkvdec_m2m_ops);
if (IS_ERR(rkvdec->m2m_dev)) {
v4l2_err(&rkvdec->v4l2_dev, "Failed to init mem2mem device\n");
ret = PTR_ERR(rkvdec->m2m_dev);
goto err_unregister_v4l2;
}
rkvdec->mdev.dev = rkvdec->dev;
strscpy(rkvdec->mdev.model, "rkvdec", sizeof(rkvdec->mdev.model));
strscpy(rkvdec->mdev.bus_info, "platform:rkvdec",
sizeof(rkvdec->mdev.bus_info));
media_device_init(&rkvdec->mdev);
rkvdec->mdev.ops = &rkvdec_media_ops;
rkvdec->v4l2_dev.mdev = &rkvdec->mdev;
rkvdec->vdev.lock = &rkvdec->vdev_lock;
rkvdec->vdev.v4l2_dev = &rkvdec->v4l2_dev;
rkvdec->vdev.fops = &rkvdec_fops;
rkvdec->vdev.release = video_device_release_empty;
rkvdec->vdev.vfl_dir = VFL_DIR_M2M;
rkvdec->vdev.device_caps = V4L2_CAP_STREAMING |
V4L2_CAP_VIDEO_M2M_MPLANE;
rkvdec->vdev.ioctl_ops = &rkvdec_ioctl_ops;
video_set_drvdata(&rkvdec->vdev, rkvdec);
strscpy(rkvdec->vdev.name, "rkvdec", sizeof(rkvdec->vdev.name));
ret = video_register_device(&rkvdec->vdev, VFL_TYPE_VIDEO, -1);
if (ret) {
v4l2_err(&rkvdec->v4l2_dev, "Failed to register video device\n");
goto err_cleanup_mc;
}
ret = v4l2_m2m_register_media_controller(rkvdec->m2m_dev, &rkvdec->vdev,
MEDIA_ENT_F_PROC_VIDEO_DECODER);
if (ret) {
v4l2_err(&rkvdec->v4l2_dev,
"Failed to initialize V4L2 M2M media controller\n");
goto err_unregister_vdev;
}
ret = media_device_register(&rkvdec->mdev);
if (ret) {
v4l2_err(&rkvdec->v4l2_dev, "Failed to register media device\n");
goto err_unregister_mc;
}
return 0;
err_unregister_mc:
v4l2_m2m_unregister_media_controller(rkvdec->m2m_dev);
err_unregister_vdev:
video_unregister_device(&rkvdec->vdev);
err_cleanup_mc:
media_device_cleanup(&rkvdec->mdev);
v4l2_m2m_release(rkvdec->m2m_dev);
err_unregister_v4l2:
v4l2_device_unregister(&rkvdec->v4l2_dev);
return ret;
}
static void rkvdec_v4l2_cleanup(struct rkvdec_dev *rkvdec)
{
media_device_unregister(&rkvdec->mdev);
v4l2_m2m_unregister_media_controller(rkvdec->m2m_dev);
video_unregister_device(&rkvdec->vdev);
media_device_cleanup(&rkvdec->mdev);
v4l2_m2m_release(rkvdec->m2m_dev);
v4l2_device_unregister(&rkvdec->v4l2_dev);
}
static irqreturn_t rkvdec_irq_handler(int irq, void *priv)
{
struct rkvdec_dev *rkvdec = priv;
enum vb2_buffer_state state;
u32 status;
status = readl(rkvdec->regs + RKVDEC_REG_INTERRUPT);
state = (status & RKVDEC_RDY_STA) ?
VB2_BUF_STATE_DONE : VB2_BUF_STATE_ERROR;
writel(0, rkvdec->regs + RKVDEC_REG_INTERRUPT);
if (cancel_delayed_work(&rkvdec->watchdog_work)) {
struct rkvdec_ctx *ctx;
ctx = v4l2_m2m_get_curr_priv(rkvdec->m2m_dev);
rkvdec_job_finish(ctx, state);
}
return IRQ_HANDLED;
}
static void rkvdec_watchdog_func(struct work_struct *work)
{
struct rkvdec_dev *rkvdec;
struct rkvdec_ctx *ctx;
rkvdec = container_of(to_delayed_work(work), struct rkvdec_dev,
watchdog_work);
ctx = v4l2_m2m_get_curr_priv(rkvdec->m2m_dev);
if (ctx) {
dev_err(rkvdec->dev, "Frame processing timed out!\n");
writel(RKVDEC_IRQ_DIS, rkvdec->regs + RKVDEC_REG_INTERRUPT);
writel(0, rkvdec->regs + RKVDEC_REG_SYSCTRL);
rkvdec_job_finish(ctx, VB2_BUF_STATE_ERROR);
}
}
static const struct of_device_id of_rkvdec_match[] = {
{ .compatible = "rockchip,rk3399-vdec" },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, of_rkvdec_match);
static const char * const rkvdec_clk_names[] = {
"axi", "ahb", "cabac", "core"
};
static int rkvdec_probe(struct platform_device *pdev)
{
struct rkvdec_dev *rkvdec;
unsigned int i;
int ret, irq;
rkvdec = devm_kzalloc(&pdev->dev, sizeof(*rkvdec), GFP_KERNEL);
if (!rkvdec)
return -ENOMEM;
platform_set_drvdata(pdev, rkvdec);
rkvdec->dev = &pdev->dev;
mutex_init(&rkvdec->vdev_lock);
INIT_DELAYED_WORK(&rkvdec->watchdog_work, rkvdec_watchdog_func);
rkvdec->clocks = devm_kcalloc(&pdev->dev, ARRAY_SIZE(rkvdec_clk_names),
sizeof(*rkvdec->clocks), GFP_KERNEL);
if (!rkvdec->clocks)
return -ENOMEM;
for (i = 0; i < ARRAY_SIZE(rkvdec_clk_names); i++)
rkvdec->clocks[i].id = rkvdec_clk_names[i];
ret = devm_clk_bulk_get(&pdev->dev, ARRAY_SIZE(rkvdec_clk_names),
rkvdec->clocks);
if (ret)
return ret;
rkvdec->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(rkvdec->regs))
return PTR_ERR(rkvdec->regs);
ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
if (ret) {
dev_err(&pdev->dev, "Could not set DMA coherent mask.\n");
return ret;
}
vb2_dma_contig_set_max_seg_size(&pdev->dev, DMA_BIT_MASK(32));
irq = platform_get_irq(pdev, 0);
if (irq <= 0)
return -ENXIO;
ret = devm_request_threaded_irq(&pdev->dev, irq, NULL,
rkvdec_irq_handler, IRQF_ONESHOT,
dev_name(&pdev->dev), rkvdec);
if (ret) {
dev_err(&pdev->dev, "Could not request vdec IRQ\n");
return ret;
}
pm_runtime_set_autosuspend_delay(&pdev->dev, 100);
pm_runtime_use_autosuspend(&pdev->dev);
pm_runtime_enable(&pdev->dev);
ret = rkvdec_v4l2_init(rkvdec);
if (ret)
goto err_disable_runtime_pm;
return 0;
err_disable_runtime_pm:
pm_runtime_dont_use_autosuspend(&pdev->dev);
pm_runtime_disable(&pdev->dev);
return ret;
}
static void rkvdec_remove(struct platform_device *pdev)
{
struct rkvdec_dev *rkvdec = platform_get_drvdata(pdev);
cancel_delayed_work_sync(&rkvdec->watchdog_work);
rkvdec_v4l2_cleanup(rkvdec);
pm_runtime_disable(&pdev->dev);
pm_runtime_dont_use_autosuspend(&pdev->dev);
}
#ifdef CONFIG_PM
static int rkvdec_runtime_resume(struct device *dev)
{
struct rkvdec_dev *rkvdec = dev_get_drvdata(dev);
return clk_bulk_prepare_enable(ARRAY_SIZE(rkvdec_clk_names),
rkvdec->clocks);
}
static int rkvdec_runtime_suspend(struct device *dev)
{
struct rkvdec_dev *rkvdec = dev_get_drvdata(dev);
clk_bulk_disable_unprepare(ARRAY_SIZE(rkvdec_clk_names),
rkvdec->clocks);
return 0;
}
#endif
static const struct dev_pm_ops rkvdec_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
pm_runtime_force_resume)
SET_RUNTIME_PM_OPS(rkvdec_runtime_suspend, rkvdec_runtime_resume, NULL)
};
static struct platform_driver rkvdec_driver = {
.probe = rkvdec_probe,
.remove_new = rkvdec_remove,
.driver = {
.name = "rkvdec",
.of_match_table = of_rkvdec_match,
.pm = &rkvdec_pm_ops,
},
};
module_platform_driver(rkvdec_driver);
MODULE_AUTHOR("Boris Brezillon <[email protected]>");
MODULE_DESCRIPTION("Rockchip Video Decoder driver");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/staging/media/rkvdec/rkvdec.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Rockchip Video Decoder VP9 backend
*
* Copyright (C) 2019 Collabora, Ltd.
* Boris Brezillon <[email protected]>
* Copyright (C) 2021 Collabora, Ltd.
* Andrzej Pietrasiewicz <[email protected]>
*
* Copyright (C) 2016 Rockchip Electronics Co., Ltd.
* Alpha Lin <[email protected]>
*/
/*
* For following the vp9 spec please start reading this driver
* code from rkvdec_vp9_run() followed by rkvdec_vp9_done().
*/
#include <linux/kernel.h>
#include <linux/vmalloc.h>
#include <media/v4l2-mem2mem.h>
#include <media/v4l2-vp9.h>
#include "rkvdec.h"
#include "rkvdec-regs.h"
#define RKVDEC_VP9_PROBE_SIZE 4864
#define RKVDEC_VP9_COUNT_SIZE 13232
#define RKVDEC_VP9_MAX_SEGMAP_SIZE 73728
struct rkvdec_vp9_intra_mode_probs {
u8 y_mode[105];
u8 uv_mode[23];
};
struct rkvdec_vp9_intra_only_frame_probs {
u8 coef_intra[4][2][128];
struct rkvdec_vp9_intra_mode_probs intra_mode[10];
};
struct rkvdec_vp9_inter_frame_probs {
u8 y_mode[4][9];
u8 comp_mode[5];
u8 comp_ref[5];
u8 single_ref[5][2];
u8 inter_mode[7][3];
u8 interp_filter[4][2];
u8 padding0[11];
u8 coef[2][4][2][128];
u8 uv_mode_0_2[3][9];
u8 padding1[5];
u8 uv_mode_3_5[3][9];
u8 padding2[5];
u8 uv_mode_6_8[3][9];
u8 padding3[5];
u8 uv_mode_9[9];
u8 padding4[7];
u8 padding5[16];
struct {
u8 joint[3];
u8 sign[2];
u8 classes[2][10];
u8 class0_bit[2];
u8 bits[2][10];
u8 class0_fr[2][2][3];
u8 fr[2][3];
u8 class0_hp[2];
u8 hp[2];
} mv;
};
struct rkvdec_vp9_probs {
u8 partition[16][3];
u8 pred[3];
u8 tree[7];
u8 skip[3];
u8 tx32[2][3];
u8 tx16[2][2];
u8 tx8[2][1];
u8 is_inter[4];
/* 128 bit alignment */
u8 padding0[3];
union {
struct rkvdec_vp9_inter_frame_probs inter;
struct rkvdec_vp9_intra_only_frame_probs intra_only;
};
/* 128 bit alignment */
u8 padding1[11];
};
/* Data structure describing auxiliary buffer format. */
struct rkvdec_vp9_priv_tbl {
struct rkvdec_vp9_probs probs;
u8 segmap[2][RKVDEC_VP9_MAX_SEGMAP_SIZE];
};
struct rkvdec_vp9_refs_counts {
u32 eob[2];
u32 coeff[3];
};
struct rkvdec_vp9_inter_frame_symbol_counts {
u32 partition[16][4];
u32 skip[3][2];
u32 inter[4][2];
u32 tx32p[2][4];
u32 tx16p[2][4];
u32 tx8p[2][2];
u32 y_mode[4][10];
u32 uv_mode[10][10];
u32 comp[5][2];
u32 comp_ref[5][2];
u32 single_ref[5][2][2];
u32 mv_mode[7][4];
u32 filter[4][3];
u32 mv_joint[4];
u32 sign[2][2];
/* add 1 element for align */
u32 classes[2][11 + 1];
u32 class0[2][2];
u32 bits[2][10][2];
u32 class0_fp[2][2][4];
u32 fp[2][4];
u32 class0_hp[2][2];
u32 hp[2][2];
struct rkvdec_vp9_refs_counts ref_cnt[2][4][2][6][6];
};
struct rkvdec_vp9_intra_frame_symbol_counts {
u32 partition[4][4][4];
u32 skip[3][2];
u32 intra[4][2];
u32 tx32p[2][4];
u32 tx16p[2][4];
u32 tx8p[2][2];
struct rkvdec_vp9_refs_counts ref_cnt[2][4][2][6][6];
};
struct rkvdec_vp9_run {
struct rkvdec_run base;
const struct v4l2_ctrl_vp9_frame *decode_params;
};
struct rkvdec_vp9_frame_info {
u32 valid : 1;
u32 segmapid : 1;
u32 frame_context_idx : 2;
u32 reference_mode : 2;
u32 tx_mode : 3;
u32 interpolation_filter : 3;
u32 flags;
u64 timestamp;
struct v4l2_vp9_segmentation seg;
struct v4l2_vp9_loop_filter lf;
};
struct rkvdec_vp9_ctx {
struct rkvdec_aux_buf priv_tbl;
struct rkvdec_aux_buf count_tbl;
struct v4l2_vp9_frame_symbol_counts inter_cnts;
struct v4l2_vp9_frame_symbol_counts intra_cnts;
struct v4l2_vp9_frame_context probability_tables;
struct v4l2_vp9_frame_context frame_context[4];
struct rkvdec_vp9_frame_info cur;
struct rkvdec_vp9_frame_info last;
};
static void write_coeff_plane(const u8 coef[6][6][3], u8 *coeff_plane)
{
unsigned int idx = 0, byte_count = 0;
int k, m, n;
u8 p;
for (k = 0; k < 6; k++) {
for (m = 0; m < 6; m++) {
for (n = 0; n < 3; n++) {
p = coef[k][m][n];
coeff_plane[idx++] = p;
byte_count++;
if (byte_count == 27) {
idx += 5;
byte_count = 0;
}
}
}
}
}
static void init_intra_only_probs(struct rkvdec_ctx *ctx,
const struct rkvdec_vp9_run *run)
{
struct rkvdec_vp9_ctx *vp9_ctx = ctx->priv;
struct rkvdec_vp9_priv_tbl *tbl = vp9_ctx->priv_tbl.cpu;
struct rkvdec_vp9_intra_only_frame_probs *rkprobs;
const struct v4l2_vp9_frame_context *probs;
unsigned int i, j, k;
rkprobs = &tbl->probs.intra_only;
probs = &vp9_ctx->probability_tables;
/*
* intra only 149 x 128 bits ,aligned to 152 x 128 bits coeff related
* prob 64 x 128 bits
*/
for (i = 0; i < ARRAY_SIZE(probs->coef); i++) {
for (j = 0; j < ARRAY_SIZE(probs->coef[0]); j++)
write_coeff_plane(probs->coef[i][j][0],
rkprobs->coef_intra[i][j]);
}
/* intra mode prob 80 x 128 bits */
for (i = 0; i < ARRAY_SIZE(v4l2_vp9_kf_y_mode_prob); i++) {
unsigned int byte_count = 0;
int idx = 0;
/* vp9_kf_y_mode_prob */
for (j = 0; j < ARRAY_SIZE(v4l2_vp9_kf_y_mode_prob[0]); j++) {
for (k = 0; k < ARRAY_SIZE(v4l2_vp9_kf_y_mode_prob[0][0]);
k++) {
u8 val = v4l2_vp9_kf_y_mode_prob[i][j][k];
rkprobs->intra_mode[i].y_mode[idx++] = val;
byte_count++;
if (byte_count == 27) {
byte_count = 0;
idx += 5;
}
}
}
}
for (i = 0; i < sizeof(v4l2_vp9_kf_uv_mode_prob); ++i) {
const u8 *ptr = (const u8 *)v4l2_vp9_kf_uv_mode_prob;
rkprobs->intra_mode[i / 23].uv_mode[i % 23] = ptr[i];
}
}
static void init_inter_probs(struct rkvdec_ctx *ctx,
const struct rkvdec_vp9_run *run)
{
struct rkvdec_vp9_ctx *vp9_ctx = ctx->priv;
struct rkvdec_vp9_priv_tbl *tbl = vp9_ctx->priv_tbl.cpu;
struct rkvdec_vp9_inter_frame_probs *rkprobs;
const struct v4l2_vp9_frame_context *probs;
unsigned int i, j, k;
rkprobs = &tbl->probs.inter;
probs = &vp9_ctx->probability_tables;
/*
* inter probs
* 151 x 128 bits, aligned to 152 x 128 bits
* inter only
* intra_y_mode & inter_block info 6 x 128 bits
*/
memcpy(rkprobs->y_mode, probs->y_mode, sizeof(rkprobs->y_mode));
memcpy(rkprobs->comp_mode, probs->comp_mode,
sizeof(rkprobs->comp_mode));
memcpy(rkprobs->comp_ref, probs->comp_ref,
sizeof(rkprobs->comp_ref));
memcpy(rkprobs->single_ref, probs->single_ref,
sizeof(rkprobs->single_ref));
memcpy(rkprobs->inter_mode, probs->inter_mode,
sizeof(rkprobs->inter_mode));
memcpy(rkprobs->interp_filter, probs->interp_filter,
sizeof(rkprobs->interp_filter));
/* 128 x 128 bits coeff related */
for (i = 0; i < ARRAY_SIZE(probs->coef); i++) {
for (j = 0; j < ARRAY_SIZE(probs->coef[0]); j++) {
for (k = 0; k < ARRAY_SIZE(probs->coef[0][0]); k++)
write_coeff_plane(probs->coef[i][j][k],
rkprobs->coef[k][i][j]);
}
}
/* intra uv mode 6 x 128 */
memcpy(rkprobs->uv_mode_0_2, &probs->uv_mode[0],
sizeof(rkprobs->uv_mode_0_2));
memcpy(rkprobs->uv_mode_3_5, &probs->uv_mode[3],
sizeof(rkprobs->uv_mode_3_5));
memcpy(rkprobs->uv_mode_6_8, &probs->uv_mode[6],
sizeof(rkprobs->uv_mode_6_8));
memcpy(rkprobs->uv_mode_9, &probs->uv_mode[9],
sizeof(rkprobs->uv_mode_9));
/* mv related 6 x 128 */
memcpy(rkprobs->mv.joint, probs->mv.joint,
sizeof(rkprobs->mv.joint));
memcpy(rkprobs->mv.sign, probs->mv.sign,
sizeof(rkprobs->mv.sign));
memcpy(rkprobs->mv.classes, probs->mv.classes,
sizeof(rkprobs->mv.classes));
memcpy(rkprobs->mv.class0_bit, probs->mv.class0_bit,
sizeof(rkprobs->mv.class0_bit));
memcpy(rkprobs->mv.bits, probs->mv.bits,
sizeof(rkprobs->mv.bits));
memcpy(rkprobs->mv.class0_fr, probs->mv.class0_fr,
sizeof(rkprobs->mv.class0_fr));
memcpy(rkprobs->mv.fr, probs->mv.fr,
sizeof(rkprobs->mv.fr));
memcpy(rkprobs->mv.class0_hp, probs->mv.class0_hp,
sizeof(rkprobs->mv.class0_hp));
memcpy(rkprobs->mv.hp, probs->mv.hp,
sizeof(rkprobs->mv.hp));
}
static void init_probs(struct rkvdec_ctx *ctx,
const struct rkvdec_vp9_run *run)
{
const struct v4l2_ctrl_vp9_frame *dec_params;
struct rkvdec_vp9_ctx *vp9_ctx = ctx->priv;
struct rkvdec_vp9_priv_tbl *tbl = vp9_ctx->priv_tbl.cpu;
struct rkvdec_vp9_probs *rkprobs = &tbl->probs;
const struct v4l2_vp9_segmentation *seg;
const struct v4l2_vp9_frame_context *probs;
bool intra_only;
dec_params = run->decode_params;
probs = &vp9_ctx->probability_tables;
seg = &dec_params->seg;
memset(rkprobs, 0, sizeof(*rkprobs));
intra_only = !!(dec_params->flags &
(V4L2_VP9_FRAME_FLAG_KEY_FRAME |
V4L2_VP9_FRAME_FLAG_INTRA_ONLY));
/* sb info 5 x 128 bit */
memcpy(rkprobs->partition,
intra_only ? v4l2_vp9_kf_partition_probs : probs->partition,
sizeof(rkprobs->partition));
memcpy(rkprobs->pred, seg->pred_probs, sizeof(rkprobs->pred));
memcpy(rkprobs->tree, seg->tree_probs, sizeof(rkprobs->tree));
memcpy(rkprobs->skip, probs->skip, sizeof(rkprobs->skip));
memcpy(rkprobs->tx32, probs->tx32, sizeof(rkprobs->tx32));
memcpy(rkprobs->tx16, probs->tx16, sizeof(rkprobs->tx16));
memcpy(rkprobs->tx8, probs->tx8, sizeof(rkprobs->tx8));
memcpy(rkprobs->is_inter, probs->is_inter, sizeof(rkprobs->is_inter));
if (intra_only)
init_intra_only_probs(ctx, run);
else
init_inter_probs(ctx, run);
}
struct rkvdec_vp9_ref_reg {
u32 reg_frm_size;
u32 reg_hor_stride;
u32 reg_y_stride;
u32 reg_yuv_stride;
u32 reg_ref_base;
};
static struct rkvdec_vp9_ref_reg ref_regs[] = {
{
.reg_frm_size = RKVDEC_REG_VP9_FRAME_SIZE(0),
.reg_hor_stride = RKVDEC_VP9_HOR_VIRSTRIDE(0),
.reg_y_stride = RKVDEC_VP9_LAST_FRAME_YSTRIDE,
.reg_yuv_stride = RKVDEC_VP9_LAST_FRAME_YUVSTRIDE,
.reg_ref_base = RKVDEC_REG_VP9_LAST_FRAME_BASE,
},
{
.reg_frm_size = RKVDEC_REG_VP9_FRAME_SIZE(1),
.reg_hor_stride = RKVDEC_VP9_HOR_VIRSTRIDE(1),
.reg_y_stride = RKVDEC_VP9_GOLDEN_FRAME_YSTRIDE,
.reg_yuv_stride = 0,
.reg_ref_base = RKVDEC_REG_VP9_GOLDEN_FRAME_BASE,
},
{
.reg_frm_size = RKVDEC_REG_VP9_FRAME_SIZE(2),
.reg_hor_stride = RKVDEC_VP9_HOR_VIRSTRIDE(2),
.reg_y_stride = RKVDEC_VP9_ALTREF_FRAME_YSTRIDE,
.reg_yuv_stride = 0,
.reg_ref_base = RKVDEC_REG_VP9_ALTREF_FRAME_BASE,
}
};
static struct rkvdec_decoded_buffer *
get_ref_buf(struct rkvdec_ctx *ctx, struct vb2_v4l2_buffer *dst, u64 timestamp)
{
struct v4l2_m2m_ctx *m2m_ctx = ctx->fh.m2m_ctx;
struct vb2_queue *cap_q = &m2m_ctx->cap_q_ctx.q;
struct vb2_buffer *buf;
/*
* If a ref is unused or invalid, address of current destination
* buffer is returned.
*/
buf = vb2_find_buffer(cap_q, timestamp);
if (!buf)
buf = &dst->vb2_buf;
return vb2_to_rkvdec_decoded_buf(buf);
}
static dma_addr_t get_mv_base_addr(struct rkvdec_decoded_buffer *buf)
{
unsigned int aligned_pitch, aligned_height, yuv_len;
aligned_height = round_up(buf->vp9.height, 64);
aligned_pitch = round_up(buf->vp9.width * buf->vp9.bit_depth, 512) / 8;
yuv_len = (aligned_height * aligned_pitch * 3) / 2;
return vb2_dma_contig_plane_dma_addr(&buf->base.vb.vb2_buf, 0) +
yuv_len;
}
static void config_ref_registers(struct rkvdec_ctx *ctx,
const struct rkvdec_vp9_run *run,
struct rkvdec_decoded_buffer *ref_buf,
struct rkvdec_vp9_ref_reg *ref_reg)
{
unsigned int aligned_pitch, aligned_height, y_len, yuv_len;
struct rkvdec_dev *rkvdec = ctx->dev;
aligned_height = round_up(ref_buf->vp9.height, 64);
writel_relaxed(RKVDEC_VP9_FRAMEWIDTH(ref_buf->vp9.width) |
RKVDEC_VP9_FRAMEHEIGHT(ref_buf->vp9.height),
rkvdec->regs + ref_reg->reg_frm_size);
writel_relaxed(vb2_dma_contig_plane_dma_addr(&ref_buf->base.vb.vb2_buf, 0),
rkvdec->regs + ref_reg->reg_ref_base);
if (&ref_buf->base.vb == run->base.bufs.dst)
return;
aligned_pitch = round_up(ref_buf->vp9.width * ref_buf->vp9.bit_depth, 512) / 8;
y_len = aligned_height * aligned_pitch;
yuv_len = (y_len * 3) / 2;
writel_relaxed(RKVDEC_HOR_Y_VIRSTRIDE(aligned_pitch / 16) |
RKVDEC_HOR_UV_VIRSTRIDE(aligned_pitch / 16),
rkvdec->regs + ref_reg->reg_hor_stride);
writel_relaxed(RKVDEC_VP9_REF_YSTRIDE(y_len / 16),
rkvdec->regs + ref_reg->reg_y_stride);
if (!ref_reg->reg_yuv_stride)
return;
writel_relaxed(RKVDEC_VP9_REF_YUVSTRIDE(yuv_len / 16),
rkvdec->regs + ref_reg->reg_yuv_stride);
}
static void config_seg_registers(struct rkvdec_ctx *ctx, unsigned int segid)
{
struct rkvdec_vp9_ctx *vp9_ctx = ctx->priv;
const struct v4l2_vp9_segmentation *seg;
struct rkvdec_dev *rkvdec = ctx->dev;
s16 feature_val;
int feature_id;
u32 val = 0;
seg = vp9_ctx->last.valid ? &vp9_ctx->last.seg : &vp9_ctx->cur.seg;
feature_id = V4L2_VP9_SEG_LVL_ALT_Q;
if (v4l2_vp9_seg_feat_enabled(seg->feature_enabled, feature_id, segid)) {
feature_val = seg->feature_data[segid][feature_id];
val |= RKVDEC_SEGID_FRAME_QP_DELTA_EN(1) |
RKVDEC_SEGID_FRAME_QP_DELTA(feature_val);
}
feature_id = V4L2_VP9_SEG_LVL_ALT_L;
if (v4l2_vp9_seg_feat_enabled(seg->feature_enabled, feature_id, segid)) {
feature_val = seg->feature_data[segid][feature_id];
val |= RKVDEC_SEGID_FRAME_LOOPFILTER_VALUE_EN(1) |
RKVDEC_SEGID_FRAME_LOOPFILTER_VALUE(feature_val);
}
feature_id = V4L2_VP9_SEG_LVL_REF_FRAME;
if (v4l2_vp9_seg_feat_enabled(seg->feature_enabled, feature_id, segid)) {
feature_val = seg->feature_data[segid][feature_id];
val |= RKVDEC_SEGID_REFERINFO_EN(1) |
RKVDEC_SEGID_REFERINFO(feature_val);
}
feature_id = V4L2_VP9_SEG_LVL_SKIP;
if (v4l2_vp9_seg_feat_enabled(seg->feature_enabled, feature_id, segid))
val |= RKVDEC_SEGID_FRAME_SKIP_EN(1);
if (!segid &&
(seg->flags & V4L2_VP9_SEGMENTATION_FLAG_ABS_OR_DELTA_UPDATE))
val |= RKVDEC_SEGID_ABS_DELTA(1);
writel_relaxed(val, rkvdec->regs + RKVDEC_VP9_SEGID_GRP(segid));
}
static void update_dec_buf_info(struct rkvdec_decoded_buffer *buf,
const struct v4l2_ctrl_vp9_frame *dec_params)
{
buf->vp9.width = dec_params->frame_width_minus_1 + 1;
buf->vp9.height = dec_params->frame_height_minus_1 + 1;
buf->vp9.bit_depth = dec_params->bit_depth;
}
static void update_ctx_cur_info(struct rkvdec_vp9_ctx *vp9_ctx,
struct rkvdec_decoded_buffer *buf,
const struct v4l2_ctrl_vp9_frame *dec_params)
{
vp9_ctx->cur.valid = true;
vp9_ctx->cur.reference_mode = dec_params->reference_mode;
vp9_ctx->cur.interpolation_filter = dec_params->interpolation_filter;
vp9_ctx->cur.flags = dec_params->flags;
vp9_ctx->cur.timestamp = buf->base.vb.vb2_buf.timestamp;
vp9_ctx->cur.seg = dec_params->seg;
vp9_ctx->cur.lf = dec_params->lf;
}
static void update_ctx_last_info(struct rkvdec_vp9_ctx *vp9_ctx)
{
vp9_ctx->last = vp9_ctx->cur;
}
static void config_registers(struct rkvdec_ctx *ctx,
const struct rkvdec_vp9_run *run)
{
unsigned int y_len, uv_len, yuv_len, bit_depth, aligned_height, aligned_pitch, stream_len;
const struct v4l2_ctrl_vp9_frame *dec_params;
struct rkvdec_decoded_buffer *ref_bufs[3];
struct rkvdec_decoded_buffer *dst, *last, *mv_ref;
struct rkvdec_vp9_ctx *vp9_ctx = ctx->priv;
u32 val, last_frame_info = 0;
const struct v4l2_vp9_segmentation *seg;
struct rkvdec_dev *rkvdec = ctx->dev;
dma_addr_t addr;
bool intra_only;
unsigned int i;
dec_params = run->decode_params;
dst = vb2_to_rkvdec_decoded_buf(&run->base.bufs.dst->vb2_buf);
ref_bufs[0] = get_ref_buf(ctx, &dst->base.vb, dec_params->last_frame_ts);
ref_bufs[1] = get_ref_buf(ctx, &dst->base.vb, dec_params->golden_frame_ts);
ref_bufs[2] = get_ref_buf(ctx, &dst->base.vb, dec_params->alt_frame_ts);
if (vp9_ctx->last.valid)
last = get_ref_buf(ctx, &dst->base.vb, vp9_ctx->last.timestamp);
else
last = dst;
update_dec_buf_info(dst, dec_params);
update_ctx_cur_info(vp9_ctx, dst, dec_params);
seg = &dec_params->seg;
intra_only = !!(dec_params->flags &
(V4L2_VP9_FRAME_FLAG_KEY_FRAME |
V4L2_VP9_FRAME_FLAG_INTRA_ONLY));
writel_relaxed(RKVDEC_MODE(RKVDEC_MODE_VP9),
rkvdec->regs + RKVDEC_REG_SYSCTRL);
bit_depth = dec_params->bit_depth;
aligned_height = round_up(ctx->decoded_fmt.fmt.pix_mp.height, 64);
aligned_pitch = round_up(ctx->decoded_fmt.fmt.pix_mp.width *
bit_depth,
512) / 8;
y_len = aligned_height * aligned_pitch;
uv_len = y_len / 2;
yuv_len = y_len + uv_len;
writel_relaxed(RKVDEC_Y_HOR_VIRSTRIDE(aligned_pitch / 16) |
RKVDEC_UV_HOR_VIRSTRIDE(aligned_pitch / 16),
rkvdec->regs + RKVDEC_REG_PICPAR);
writel_relaxed(RKVDEC_Y_VIRSTRIDE(y_len / 16),
rkvdec->regs + RKVDEC_REG_Y_VIRSTRIDE);
writel_relaxed(RKVDEC_YUV_VIRSTRIDE(yuv_len / 16),
rkvdec->regs + RKVDEC_REG_YUV_VIRSTRIDE);
stream_len = vb2_get_plane_payload(&run->base.bufs.src->vb2_buf, 0);
writel_relaxed(RKVDEC_STRM_LEN(stream_len),
rkvdec->regs + RKVDEC_REG_STRM_LEN);
/*
* Reset count buffer, because decoder only output intra related syntax
* counts when decoding intra frame, but update entropy need to update
* all the probabilities.
*/
if (intra_only)
memset(vp9_ctx->count_tbl.cpu, 0, vp9_ctx->count_tbl.size);
vp9_ctx->cur.segmapid = vp9_ctx->last.segmapid;
if (!intra_only &&
!(dec_params->flags & V4L2_VP9_FRAME_FLAG_ERROR_RESILIENT) &&
(!(seg->flags & V4L2_VP9_SEGMENTATION_FLAG_ENABLED) ||
(seg->flags & V4L2_VP9_SEGMENTATION_FLAG_UPDATE_MAP)))
vp9_ctx->cur.segmapid++;
for (i = 0; i < ARRAY_SIZE(ref_bufs); i++)
config_ref_registers(ctx, run, ref_bufs[i], &ref_regs[i]);
for (i = 0; i < 8; i++)
config_seg_registers(ctx, i);
writel_relaxed(RKVDEC_VP9_TX_MODE(vp9_ctx->cur.tx_mode) |
RKVDEC_VP9_FRAME_REF_MODE(dec_params->reference_mode),
rkvdec->regs + RKVDEC_VP9_CPRHEADER_CONFIG);
if (!intra_only) {
const struct v4l2_vp9_loop_filter *lf;
s8 delta;
if (vp9_ctx->last.valid)
lf = &vp9_ctx->last.lf;
else
lf = &vp9_ctx->cur.lf;
val = 0;
for (i = 0; i < ARRAY_SIZE(lf->ref_deltas); i++) {
delta = lf->ref_deltas[i];
val |= RKVDEC_REF_DELTAS_LASTFRAME(i, delta);
}
writel_relaxed(val,
rkvdec->regs + RKVDEC_VP9_REF_DELTAS_LASTFRAME);
for (i = 0; i < ARRAY_SIZE(lf->mode_deltas); i++) {
delta = lf->mode_deltas[i];
last_frame_info |= RKVDEC_MODE_DELTAS_LASTFRAME(i,
delta);
}
}
if (vp9_ctx->last.valid && !intra_only &&
vp9_ctx->last.seg.flags & V4L2_VP9_SEGMENTATION_FLAG_ENABLED)
last_frame_info |= RKVDEC_SEG_EN_LASTFRAME;
if (vp9_ctx->last.valid &&
vp9_ctx->last.flags & V4L2_VP9_FRAME_FLAG_SHOW_FRAME)
last_frame_info |= RKVDEC_LAST_SHOW_FRAME;
if (vp9_ctx->last.valid &&
vp9_ctx->last.flags &
(V4L2_VP9_FRAME_FLAG_KEY_FRAME | V4L2_VP9_FRAME_FLAG_INTRA_ONLY))
last_frame_info |= RKVDEC_LAST_INTRA_ONLY;
if (vp9_ctx->last.valid &&
last->vp9.width == dst->vp9.width &&
last->vp9.height == dst->vp9.height)
last_frame_info |= RKVDEC_LAST_WIDHHEIGHT_EQCUR;
writel_relaxed(last_frame_info,
rkvdec->regs + RKVDEC_VP9_INFO_LASTFRAME);
writel_relaxed(stream_len - dec_params->compressed_header_size -
dec_params->uncompressed_header_size,
rkvdec->regs + RKVDEC_VP9_LASTTILE_SIZE);
for (i = 0; !intra_only && i < ARRAY_SIZE(ref_bufs); i++) {
unsigned int refw = ref_bufs[i]->vp9.width;
unsigned int refh = ref_bufs[i]->vp9.height;
u32 hscale, vscale;
hscale = (refw << 14) / dst->vp9.width;
vscale = (refh << 14) / dst->vp9.height;
writel_relaxed(RKVDEC_VP9_REF_HOR_SCALE(hscale) |
RKVDEC_VP9_REF_VER_SCALE(vscale),
rkvdec->regs + RKVDEC_VP9_REF_SCALE(i));
}
addr = vb2_dma_contig_plane_dma_addr(&dst->base.vb.vb2_buf, 0);
writel_relaxed(addr, rkvdec->regs + RKVDEC_REG_DECOUT_BASE);
addr = vb2_dma_contig_plane_dma_addr(&run->base.bufs.src->vb2_buf, 0);
writel_relaxed(addr, rkvdec->regs + RKVDEC_REG_STRM_RLC_BASE);
writel_relaxed(vp9_ctx->priv_tbl.dma +
offsetof(struct rkvdec_vp9_priv_tbl, probs),
rkvdec->regs + RKVDEC_REG_CABACTBL_PROB_BASE);
writel_relaxed(vp9_ctx->count_tbl.dma,
rkvdec->regs + RKVDEC_REG_VP9COUNT_BASE);
writel_relaxed(vp9_ctx->priv_tbl.dma +
offsetof(struct rkvdec_vp9_priv_tbl, segmap) +
(RKVDEC_VP9_MAX_SEGMAP_SIZE * vp9_ctx->cur.segmapid),
rkvdec->regs + RKVDEC_REG_VP9_SEGIDCUR_BASE);
writel_relaxed(vp9_ctx->priv_tbl.dma +
offsetof(struct rkvdec_vp9_priv_tbl, segmap) +
(RKVDEC_VP9_MAX_SEGMAP_SIZE * (!vp9_ctx->cur.segmapid)),
rkvdec->regs + RKVDEC_REG_VP9_SEGIDLAST_BASE);
if (!intra_only &&
!(dec_params->flags & V4L2_VP9_FRAME_FLAG_ERROR_RESILIENT) &&
vp9_ctx->last.valid)
mv_ref = last;
else
mv_ref = dst;
writel_relaxed(get_mv_base_addr(mv_ref),
rkvdec->regs + RKVDEC_VP9_REF_COLMV_BASE);
writel_relaxed(ctx->decoded_fmt.fmt.pix_mp.width |
(ctx->decoded_fmt.fmt.pix_mp.height << 16),
rkvdec->regs + RKVDEC_REG_PERFORMANCE_CYCLE);
}
static int validate_dec_params(struct rkvdec_ctx *ctx,
const struct v4l2_ctrl_vp9_frame *dec_params)
{
unsigned int aligned_width, aligned_height;
/* We only support profile 0. */
if (dec_params->profile != 0) {
dev_err(ctx->dev->dev, "unsupported profile %d\n",
dec_params->profile);
return -EINVAL;
}
aligned_width = round_up(dec_params->frame_width_minus_1 + 1, 64);
aligned_height = round_up(dec_params->frame_height_minus_1 + 1, 64);
/*
* Userspace should update the capture/decoded format when the
* resolution changes.
*/
if (aligned_width != ctx->decoded_fmt.fmt.pix_mp.width ||
aligned_height != ctx->decoded_fmt.fmt.pix_mp.height) {
dev_err(ctx->dev->dev,
"unexpected bitstream resolution %dx%d\n",
dec_params->frame_width_minus_1 + 1,
dec_params->frame_height_minus_1 + 1);
return -EINVAL;
}
return 0;
}
static int rkvdec_vp9_run_preamble(struct rkvdec_ctx *ctx,
struct rkvdec_vp9_run *run)
{
const struct v4l2_ctrl_vp9_frame *dec_params;
const struct v4l2_ctrl_vp9_compressed_hdr *prob_updates;
struct rkvdec_vp9_ctx *vp9_ctx = ctx->priv;
struct v4l2_ctrl *ctrl;
unsigned int fctx_idx;
int ret;
/* v4l2-specific stuff */
rkvdec_run_preamble(ctx, &run->base);
ctrl = v4l2_ctrl_find(&ctx->ctrl_hdl,
V4L2_CID_STATELESS_VP9_FRAME);
if (WARN_ON(!ctrl))
return -EINVAL;
dec_params = ctrl->p_cur.p;
ret = validate_dec_params(ctx, dec_params);
if (ret)
return ret;
run->decode_params = dec_params;
ctrl = v4l2_ctrl_find(&ctx->ctrl_hdl, V4L2_CID_STATELESS_VP9_COMPRESSED_HDR);
if (WARN_ON(!ctrl))
return -EINVAL;
prob_updates = ctrl->p_cur.p;
vp9_ctx->cur.tx_mode = prob_updates->tx_mode;
/*
* vp9 stuff
*
* by this point the userspace has done all parts of 6.2 uncompressed_header()
* except this fragment:
* if ( FrameIsIntra || error_resilient_mode ) {
* setup_past_independence ( )
* if ( frame_type == KEY_FRAME || error_resilient_mode == 1 ||
* reset_frame_context == 3 ) {
* for ( i = 0; i < 4; i ++ ) {
* save_probs( i )
* }
* } else if ( reset_frame_context == 2 ) {
* save_probs( frame_context_idx )
* }
* frame_context_idx = 0
* }
*/
fctx_idx = v4l2_vp9_reset_frame_ctx(dec_params, vp9_ctx->frame_context);
vp9_ctx->cur.frame_context_idx = fctx_idx;
/* 6.1 frame(sz): load_probs() and load_probs2() */
vp9_ctx->probability_tables = vp9_ctx->frame_context[fctx_idx];
/*
* The userspace has also performed 6.3 compressed_header(), but handling the
* probs in a special way. All probs which need updating, except MV-related,
* have been read from the bitstream and translated through inv_map_table[],
* but no 6.3.6 inv_recenter_nonneg(v, m) has been performed. The values passed
* by userspace are either translated values (there are no 0 values in
* inv_map_table[]), or zero to indicate no update. All MV-related probs which need
* updating have been read from the bitstream and (mv_prob << 1) | 1 has been
* performed. The values passed by userspace are either new values
* to replace old ones (the above mentioned shift and bitwise or never result in
* a zero) or zero to indicate no update.
* fw_update_probs() performs actual probs updates or leaves probs as-is
* for values for which a zero was passed from userspace.
*/
v4l2_vp9_fw_update_probs(&vp9_ctx->probability_tables, prob_updates, dec_params);
return 0;
}
static int rkvdec_vp9_run(struct rkvdec_ctx *ctx)
{
struct rkvdec_dev *rkvdec = ctx->dev;
struct rkvdec_vp9_run run = { };
int ret;
ret = rkvdec_vp9_run_preamble(ctx, &run);
if (ret) {
rkvdec_run_postamble(ctx, &run.base);
return ret;
}
/* Prepare probs. */
init_probs(ctx, &run);
/* Configure hardware registers. */
config_registers(ctx, &run);
rkvdec_run_postamble(ctx, &run.base);
schedule_delayed_work(&rkvdec->watchdog_work, msecs_to_jiffies(2000));
writel(1, rkvdec->regs + RKVDEC_REG_PREF_LUMA_CACHE_COMMAND);
writel(1, rkvdec->regs + RKVDEC_REG_PREF_CHR_CACHE_COMMAND);
writel(0xe, rkvdec->regs + RKVDEC_REG_STRMD_ERR_EN);
/* Start decoding! */
writel(RKVDEC_INTERRUPT_DEC_E | RKVDEC_CONFIG_DEC_CLK_GATE_E |
RKVDEC_TIMEOUT_E | RKVDEC_BUF_EMPTY_E,
rkvdec->regs + RKVDEC_REG_INTERRUPT);
return 0;
}
#define copy_tx_and_skip(p1, p2) \
do { \
memcpy((p1)->tx8, (p2)->tx8, sizeof((p1)->tx8)); \
memcpy((p1)->tx16, (p2)->tx16, sizeof((p1)->tx16)); \
memcpy((p1)->tx32, (p2)->tx32, sizeof((p1)->tx32)); \
memcpy((p1)->skip, (p2)->skip, sizeof((p1)->skip)); \
} while (0)
static void rkvdec_vp9_done(struct rkvdec_ctx *ctx,
struct vb2_v4l2_buffer *src_buf,
struct vb2_v4l2_buffer *dst_buf,
enum vb2_buffer_state result)
{
struct rkvdec_vp9_ctx *vp9_ctx = ctx->priv;
unsigned int fctx_idx;
/* v4l2-specific stuff */
if (result == VB2_BUF_STATE_ERROR)
goto out_update_last;
/*
* vp9 stuff
*
* 6.1.2 refresh_probs()
*
* In the spec a complementary condition goes last in 6.1.2 refresh_probs(),
* but it makes no sense to perform all the activities from the first "if"
* there if we actually are not refreshing the frame context. On top of that,
* because of 6.2 uncompressed_header() whenever error_resilient_mode == 1,
* refresh_frame_context == 0. Consequently, if we don't jump to out_update_last
* it means error_resilient_mode must be 0.
*/
if (!(vp9_ctx->cur.flags & V4L2_VP9_FRAME_FLAG_REFRESH_FRAME_CTX))
goto out_update_last;
fctx_idx = vp9_ctx->cur.frame_context_idx;
if (!(vp9_ctx->cur.flags & V4L2_VP9_FRAME_FLAG_PARALLEL_DEC_MODE)) {
/* error_resilient_mode == 0 && frame_parallel_decoding_mode == 0 */
struct v4l2_vp9_frame_context *probs = &vp9_ctx->probability_tables;
bool frame_is_intra = vp9_ctx->cur.flags &
(V4L2_VP9_FRAME_FLAG_KEY_FRAME | V4L2_VP9_FRAME_FLAG_INTRA_ONLY);
struct tx_and_skip {
u8 tx8[2][1];
u8 tx16[2][2];
u8 tx32[2][3];
u8 skip[3];
} _tx_skip, *tx_skip = &_tx_skip;
struct v4l2_vp9_frame_symbol_counts *counts;
/* buffer the forward-updated TX and skip probs */
if (frame_is_intra)
copy_tx_and_skip(tx_skip, probs);
/* 6.1.2 refresh_probs(): load_probs() and load_probs2() */
*probs = vp9_ctx->frame_context[fctx_idx];
/* if FrameIsIntra then undo the effect of load_probs2() */
if (frame_is_intra)
copy_tx_and_skip(probs, tx_skip);
counts = frame_is_intra ? &vp9_ctx->intra_cnts : &vp9_ctx->inter_cnts;
v4l2_vp9_adapt_coef_probs(probs, counts,
!vp9_ctx->last.valid ||
vp9_ctx->last.flags & V4L2_VP9_FRAME_FLAG_KEY_FRAME,
frame_is_intra);
if (!frame_is_intra) {
const struct rkvdec_vp9_inter_frame_symbol_counts *inter_cnts;
u32 classes[2][11];
int i;
inter_cnts = vp9_ctx->count_tbl.cpu;
for (i = 0; i < ARRAY_SIZE(classes); ++i)
memcpy(classes[i], inter_cnts->classes[i], sizeof(classes[0]));
counts->classes = &classes;
/* load_probs2() already done */
v4l2_vp9_adapt_noncoef_probs(&vp9_ctx->probability_tables, counts,
vp9_ctx->cur.reference_mode,
vp9_ctx->cur.interpolation_filter,
vp9_ctx->cur.tx_mode, vp9_ctx->cur.flags);
}
}
/* 6.1.2 refresh_probs(): save_probs(fctx_idx) */
vp9_ctx->frame_context[fctx_idx] = vp9_ctx->probability_tables;
out_update_last:
update_ctx_last_info(vp9_ctx);
}
static void rkvdec_init_v4l2_vp9_count_tbl(struct rkvdec_ctx *ctx)
{
struct rkvdec_vp9_ctx *vp9_ctx = ctx->priv;
struct rkvdec_vp9_intra_frame_symbol_counts *intra_cnts = vp9_ctx->count_tbl.cpu;
struct rkvdec_vp9_inter_frame_symbol_counts *inter_cnts = vp9_ctx->count_tbl.cpu;
int i, j, k, l, m;
vp9_ctx->inter_cnts.partition = &inter_cnts->partition;
vp9_ctx->inter_cnts.skip = &inter_cnts->skip;
vp9_ctx->inter_cnts.intra_inter = &inter_cnts->inter;
vp9_ctx->inter_cnts.tx32p = &inter_cnts->tx32p;
vp9_ctx->inter_cnts.tx16p = &inter_cnts->tx16p;
vp9_ctx->inter_cnts.tx8p = &inter_cnts->tx8p;
vp9_ctx->intra_cnts.partition = (u32 (*)[16][4])(&intra_cnts->partition);
vp9_ctx->intra_cnts.skip = &intra_cnts->skip;
vp9_ctx->intra_cnts.intra_inter = &intra_cnts->intra;
vp9_ctx->intra_cnts.tx32p = &intra_cnts->tx32p;
vp9_ctx->intra_cnts.tx16p = &intra_cnts->tx16p;
vp9_ctx->intra_cnts.tx8p = &intra_cnts->tx8p;
vp9_ctx->inter_cnts.y_mode = &inter_cnts->y_mode;
vp9_ctx->inter_cnts.uv_mode = &inter_cnts->uv_mode;
vp9_ctx->inter_cnts.comp = &inter_cnts->comp;
vp9_ctx->inter_cnts.comp_ref = &inter_cnts->comp_ref;
vp9_ctx->inter_cnts.single_ref = &inter_cnts->single_ref;
vp9_ctx->inter_cnts.mv_mode = &inter_cnts->mv_mode;
vp9_ctx->inter_cnts.filter = &inter_cnts->filter;
vp9_ctx->inter_cnts.mv_joint = &inter_cnts->mv_joint;
vp9_ctx->inter_cnts.sign = &inter_cnts->sign;
/*
* rk hardware actually uses "u32 classes[2][11 + 1];"
* instead of "u32 classes[2][11];", so this must be explicitly
* copied into vp9_ctx->classes when passing the data to the
* vp9 library function
*/
vp9_ctx->inter_cnts.class0 = &inter_cnts->class0;
vp9_ctx->inter_cnts.bits = &inter_cnts->bits;
vp9_ctx->inter_cnts.class0_fp = &inter_cnts->class0_fp;
vp9_ctx->inter_cnts.fp = &inter_cnts->fp;
vp9_ctx->inter_cnts.class0_hp = &inter_cnts->class0_hp;
vp9_ctx->inter_cnts.hp = &inter_cnts->hp;
#define INNERMOST_LOOP \
do { \
for (m = 0; m < ARRAY_SIZE(vp9_ctx->inter_cnts.coeff[0][0][0][0]); ++m) {\
vp9_ctx->inter_cnts.coeff[i][j][k][l][m] = \
&inter_cnts->ref_cnt[k][i][j][l][m].coeff; \
vp9_ctx->inter_cnts.eob[i][j][k][l][m][0] = \
&inter_cnts->ref_cnt[k][i][j][l][m].eob[0]; \
vp9_ctx->inter_cnts.eob[i][j][k][l][m][1] = \
&inter_cnts->ref_cnt[k][i][j][l][m].eob[1]; \
\
vp9_ctx->intra_cnts.coeff[i][j][k][l][m] = \
&intra_cnts->ref_cnt[k][i][j][l][m].coeff; \
vp9_ctx->intra_cnts.eob[i][j][k][l][m][0] = \
&intra_cnts->ref_cnt[k][i][j][l][m].eob[0]; \
vp9_ctx->intra_cnts.eob[i][j][k][l][m][1] = \
&intra_cnts->ref_cnt[k][i][j][l][m].eob[1]; \
} \
} while (0)
for (i = 0; i < ARRAY_SIZE(vp9_ctx->inter_cnts.coeff); ++i)
for (j = 0; j < ARRAY_SIZE(vp9_ctx->inter_cnts.coeff[0]); ++j)
for (k = 0; k < ARRAY_SIZE(vp9_ctx->inter_cnts.coeff[0][0]); ++k)
for (l = 0; l < ARRAY_SIZE(vp9_ctx->inter_cnts.coeff[0][0][0]); ++l)
INNERMOST_LOOP;
#undef INNERMOST_LOOP
}
static int rkvdec_vp9_start(struct rkvdec_ctx *ctx)
{
struct rkvdec_dev *rkvdec = ctx->dev;
struct rkvdec_vp9_priv_tbl *priv_tbl;
struct rkvdec_vp9_ctx *vp9_ctx;
unsigned char *count_tbl;
int ret;
vp9_ctx = kzalloc(sizeof(*vp9_ctx), GFP_KERNEL);
if (!vp9_ctx)
return -ENOMEM;
ctx->priv = vp9_ctx;
BUILD_BUG_ON(sizeof(priv_tbl->probs) % 16); /* ensure probs size is 128-bit aligned */
priv_tbl = dma_alloc_coherent(rkvdec->dev, sizeof(*priv_tbl),
&vp9_ctx->priv_tbl.dma, GFP_KERNEL);
if (!priv_tbl) {
ret = -ENOMEM;
goto err_free_ctx;
}
vp9_ctx->priv_tbl.size = sizeof(*priv_tbl);
vp9_ctx->priv_tbl.cpu = priv_tbl;
count_tbl = dma_alloc_coherent(rkvdec->dev, RKVDEC_VP9_COUNT_SIZE,
&vp9_ctx->count_tbl.dma, GFP_KERNEL);
if (!count_tbl) {
ret = -ENOMEM;
goto err_free_priv_tbl;
}
vp9_ctx->count_tbl.size = RKVDEC_VP9_COUNT_SIZE;
vp9_ctx->count_tbl.cpu = count_tbl;
rkvdec_init_v4l2_vp9_count_tbl(ctx);
return 0;
err_free_priv_tbl:
dma_free_coherent(rkvdec->dev, vp9_ctx->priv_tbl.size,
vp9_ctx->priv_tbl.cpu, vp9_ctx->priv_tbl.dma);
err_free_ctx:
kfree(vp9_ctx);
return ret;
}
static void rkvdec_vp9_stop(struct rkvdec_ctx *ctx)
{
struct rkvdec_vp9_ctx *vp9_ctx = ctx->priv;
struct rkvdec_dev *rkvdec = ctx->dev;
dma_free_coherent(rkvdec->dev, vp9_ctx->count_tbl.size,
vp9_ctx->count_tbl.cpu, vp9_ctx->count_tbl.dma);
dma_free_coherent(rkvdec->dev, vp9_ctx->priv_tbl.size,
vp9_ctx->priv_tbl.cpu, vp9_ctx->priv_tbl.dma);
kfree(vp9_ctx);
}
static int rkvdec_vp9_adjust_fmt(struct rkvdec_ctx *ctx,
struct v4l2_format *f)
{
struct v4l2_pix_format_mplane *fmt = &f->fmt.pix_mp;
fmt->num_planes = 1;
if (!fmt->plane_fmt[0].sizeimage)
fmt->plane_fmt[0].sizeimage = fmt->width * fmt->height * 2;
return 0;
}
const struct rkvdec_coded_fmt_ops rkvdec_vp9_fmt_ops = {
.adjust_fmt = rkvdec_vp9_adjust_fmt,
.start = rkvdec_vp9_start,
.stop = rkvdec_vp9_stop,
.run = rkvdec_vp9_run,
.done = rkvdec_vp9_done,
};
| linux-master | drivers/staging/media/rkvdec/rkvdec-vp9.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* MIPI CSI-2 Receiver Subdev for Freescale i.MX6 SOC.
*
* Copyright (c) 2012-2017 Mentor Graphics Inc.
*/
#include <linux/clk.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/iopoll.h>
#include <linux/irq.h>
#include <linux/module.h>
#include <linux/of_graph.h>
#include <linux/platform_device.h>
#include <media/v4l2-common.h>
#include <media/v4l2-device.h>
#include <media/v4l2-fwnode.h>
#include <media/v4l2-mc.h>
#include <media/v4l2-subdev.h>
#include "imx-media.h"
/*
* there must be 5 pads: 1 input pad from sensor, and
* the 4 virtual channel output pads
*/
#define CSI2_SINK_PAD 0
#define CSI2_NUM_SINK_PADS 1
#define CSI2_NUM_SRC_PADS 4
#define CSI2_NUM_PADS 5
/*
* The default maximum bit-rate per lane in Mbps, if the
* source subdev does not provide V4L2_CID_LINK_FREQ.
*/
#define CSI2_DEFAULT_MAX_MBPS 849
struct csi2_dev {
struct device *dev;
struct v4l2_subdev sd;
struct v4l2_async_notifier notifier;
struct media_pad pad[CSI2_NUM_PADS];
struct clk *dphy_clk;
struct clk *pllref_clk;
struct clk *pix_clk; /* what is this? */
void __iomem *base;
struct v4l2_subdev *remote;
unsigned int remote_pad;
unsigned short data_lanes;
/* lock to protect all members below */
struct mutex lock;
struct v4l2_mbus_framefmt format_mbus;
int stream_count;
struct v4l2_subdev *src_sd;
bool sink_linked[CSI2_NUM_SRC_PADS];
};
#define DEVICE_NAME "imx6-mipi-csi2"
/* Register offsets */
#define CSI2_VERSION 0x000
#define CSI2_N_LANES 0x004
#define CSI2_PHY_SHUTDOWNZ 0x008
#define CSI2_DPHY_RSTZ 0x00c
#define CSI2_RESETN 0x010
#define CSI2_PHY_STATE 0x014
#define PHY_STOPSTATEDATA_BIT 4
#define PHY_STOPSTATEDATA(n) BIT(PHY_STOPSTATEDATA_BIT + (n))
#define PHY_RXCLKACTIVEHS BIT(8)
#define PHY_RXULPSCLKNOT BIT(9)
#define PHY_STOPSTATECLK BIT(10)
#define CSI2_DATA_IDS_1 0x018
#define CSI2_DATA_IDS_2 0x01c
#define CSI2_ERR1 0x020
#define CSI2_ERR2 0x024
#define CSI2_MSK1 0x028
#define CSI2_MSK2 0x02c
#define CSI2_PHY_TST_CTRL0 0x030
#define PHY_TESTCLR BIT(0)
#define PHY_TESTCLK BIT(1)
#define CSI2_PHY_TST_CTRL1 0x034
#define PHY_TESTEN BIT(16)
/*
* i.MX CSI2IPU Gasket registers follow. The CSI2IPU gasket is
* not part of the MIPI CSI-2 core, but its registers fall in the
* same register map range.
*/
#define CSI2IPU_GASKET 0xf00
#define CSI2IPU_YUV422_YUYV BIT(2)
static inline struct csi2_dev *sd_to_dev(struct v4l2_subdev *sdev)
{
return container_of(sdev, struct csi2_dev, sd);
}
static inline struct csi2_dev *notifier_to_dev(struct v4l2_async_notifier *n)
{
return container_of(n, struct csi2_dev, notifier);
}
/*
* The required sequence of MIPI CSI-2 startup as specified in the i.MX6
* reference manual is as follows:
*
* 1. Deassert presetn signal (global reset).
* It's not clear what this "global reset" signal is (maybe APB
* global reset), but in any case this step would be probably
* be carried out during driver load in csi2_probe().
*
* 2. Configure MIPI Camera Sensor to put all Tx lanes in LP-11 state.
* This must be carried out by the MIPI sensor's s_power(ON) subdev
* op.
*
* 3. D-PHY initialization.
* 4. CSI2 Controller programming (Set N_LANES, deassert PHY_SHUTDOWNZ,
* deassert PHY_RSTZ, deassert CSI2_RESETN).
* 5. Read the PHY status register (PHY_STATE) to confirm that all data and
* clock lanes of the D-PHY are in LP-11 state.
* 6. Configure the MIPI Camera Sensor to start transmitting a clock on the
* D-PHY clock lane.
* 7. CSI2 Controller programming - Read the PHY status register (PHY_STATE)
* to confirm that the D-PHY is receiving a clock on the D-PHY clock lane.
*
* All steps 3 through 7 are carried out by csi2_s_stream(ON) here. Step
* 6 is accomplished by calling the source subdev's s_stream(ON) between
* steps 5 and 7.
*/
static void csi2_enable(struct csi2_dev *csi2, bool enable)
{
if (enable) {
writel(0x1, csi2->base + CSI2_PHY_SHUTDOWNZ);
writel(0x1, csi2->base + CSI2_DPHY_RSTZ);
writel(0x1, csi2->base + CSI2_RESETN);
} else {
writel(0x0, csi2->base + CSI2_PHY_SHUTDOWNZ);
writel(0x0, csi2->base + CSI2_DPHY_RSTZ);
writel(0x0, csi2->base + CSI2_RESETN);
}
}
static void csi2_set_lanes(struct csi2_dev *csi2, unsigned int lanes)
{
writel(lanes - 1, csi2->base + CSI2_N_LANES);
}
static void dw_mipi_csi2_phy_write(struct csi2_dev *csi2,
u32 test_code, u32 test_data)
{
/* Clear PHY test interface */
writel(PHY_TESTCLR, csi2->base + CSI2_PHY_TST_CTRL0);
writel(0x0, csi2->base + CSI2_PHY_TST_CTRL1);
writel(0x0, csi2->base + CSI2_PHY_TST_CTRL0);
/* Raise test interface strobe signal */
writel(PHY_TESTCLK, csi2->base + CSI2_PHY_TST_CTRL0);
/* Configure address write on falling edge and lower strobe signal */
writel(PHY_TESTEN | test_code, csi2->base + CSI2_PHY_TST_CTRL1);
writel(0x0, csi2->base + CSI2_PHY_TST_CTRL0);
/* Configure data write on rising edge and raise strobe signal */
writel(test_data, csi2->base + CSI2_PHY_TST_CTRL1);
writel(PHY_TESTCLK, csi2->base + CSI2_PHY_TST_CTRL0);
/* Clear strobe signal */
writel(0x0, csi2->base + CSI2_PHY_TST_CTRL0);
}
/*
* This table is based on the table documented at
* https://community.nxp.com/docs/DOC-94312. It assumes
* a 27MHz D-PHY pll reference clock.
*/
static const struct {
u32 max_mbps;
u32 hsfreqrange_sel;
} hsfreq_map[] = {
{ 90, 0x00}, {100, 0x20}, {110, 0x40}, {125, 0x02},
{140, 0x22}, {150, 0x42}, {160, 0x04}, {180, 0x24},
{200, 0x44}, {210, 0x06}, {240, 0x26}, {250, 0x46},
{270, 0x08}, {300, 0x28}, {330, 0x48}, {360, 0x2a},
{400, 0x4a}, {450, 0x0c}, {500, 0x2c}, {550, 0x0e},
{600, 0x2e}, {650, 0x10}, {700, 0x30}, {750, 0x12},
{800, 0x32}, {850, 0x14}, {900, 0x34}, {950, 0x54},
{1000, 0x74},
};
static int max_mbps_to_hsfreqrange_sel(u32 max_mbps)
{
int i;
for (i = 0; i < ARRAY_SIZE(hsfreq_map); i++)
if (hsfreq_map[i].max_mbps > max_mbps)
return hsfreq_map[i].hsfreqrange_sel;
return -EINVAL;
}
static int csi2_dphy_init(struct csi2_dev *csi2)
{
struct v4l2_ctrl *ctrl;
u32 mbps_per_lane;
int sel;
ctrl = v4l2_ctrl_find(csi2->src_sd->ctrl_handler,
V4L2_CID_LINK_FREQ);
if (!ctrl)
mbps_per_lane = CSI2_DEFAULT_MAX_MBPS;
else
mbps_per_lane = DIV_ROUND_UP_ULL(2 * ctrl->qmenu_int[ctrl->val],
USEC_PER_SEC);
sel = max_mbps_to_hsfreqrange_sel(mbps_per_lane);
if (sel < 0)
return sel;
dw_mipi_csi2_phy_write(csi2, 0x44, sel);
return 0;
}
/*
* Waits for ultra-low-power state on D-PHY clock lane. This is currently
* unused and may not be needed at all, but keep around just in case.
*/
static int __maybe_unused csi2_dphy_wait_ulp(struct csi2_dev *csi2)
{
u32 reg;
int ret;
/* wait for ULP on clock lane */
ret = readl_poll_timeout(csi2->base + CSI2_PHY_STATE, reg,
!(reg & PHY_RXULPSCLKNOT), 0, 500000);
if (ret) {
v4l2_err(&csi2->sd, "ULP timeout, phy_state = 0x%08x\n", reg);
return ret;
}
/* wait until no errors on bus */
ret = readl_poll_timeout(csi2->base + CSI2_ERR1, reg,
reg == 0x0, 0, 500000);
if (ret) {
v4l2_err(&csi2->sd, "stable bus timeout, err1 = 0x%08x\n", reg);
return ret;
}
return 0;
}
/* Waits for low-power LP-11 state on data and clock lanes. */
static void csi2_dphy_wait_stopstate(struct csi2_dev *csi2, unsigned int lanes)
{
u32 mask, reg;
int ret;
mask = PHY_STOPSTATECLK | (((1 << lanes) - 1) << PHY_STOPSTATEDATA_BIT);
ret = readl_poll_timeout(csi2->base + CSI2_PHY_STATE, reg,
(reg & mask) == mask, 0, 500000);
if (ret) {
v4l2_warn(&csi2->sd, "LP-11 wait timeout, likely a sensor driver bug, expect capture failures.\n");
v4l2_warn(&csi2->sd, "phy_state = 0x%08x\n", reg);
}
}
/* Wait for active clock on the clock lane. */
static int csi2_dphy_wait_clock_lane(struct csi2_dev *csi2)
{
u32 reg;
int ret;
ret = readl_poll_timeout(csi2->base + CSI2_PHY_STATE, reg,
(reg & PHY_RXCLKACTIVEHS), 0, 500000);
if (ret) {
v4l2_err(&csi2->sd, "clock lane timeout, phy_state = 0x%08x\n",
reg);
return ret;
}
return 0;
}
/* Setup the i.MX CSI2IPU Gasket */
static void csi2ipu_gasket_init(struct csi2_dev *csi2)
{
u32 reg = 0;
switch (csi2->format_mbus.code) {
case MEDIA_BUS_FMT_YUYV8_2X8:
case MEDIA_BUS_FMT_YUYV8_1X16:
reg = CSI2IPU_YUV422_YUYV;
break;
default:
break;
}
writel(reg, csi2->base + CSI2IPU_GASKET);
}
static int csi2_get_active_lanes(struct csi2_dev *csi2, unsigned int *lanes)
{
struct v4l2_mbus_config mbus_config = { 0 };
int ret;
*lanes = csi2->data_lanes;
ret = v4l2_subdev_call(csi2->remote, pad, get_mbus_config,
csi2->remote_pad, &mbus_config);
if (ret == -ENOIOCTLCMD) {
dev_dbg(csi2->dev, "No remote mbus configuration available\n");
return 0;
}
if (ret) {
dev_err(csi2->dev, "Failed to get remote mbus configuration\n");
return ret;
}
if (mbus_config.type != V4L2_MBUS_CSI2_DPHY) {
dev_err(csi2->dev, "Unsupported media bus type %u\n",
mbus_config.type);
return -EINVAL;
}
if (mbus_config.bus.mipi_csi2.num_data_lanes > csi2->data_lanes) {
dev_err(csi2->dev,
"Unsupported mbus config: too many data lanes %u\n",
mbus_config.bus.mipi_csi2.num_data_lanes);
return -EINVAL;
}
*lanes = mbus_config.bus.mipi_csi2.num_data_lanes;
return 0;
}
static int csi2_start(struct csi2_dev *csi2)
{
unsigned int lanes;
int ret;
ret = clk_prepare_enable(csi2->pix_clk);
if (ret)
return ret;
/* setup the gasket */
csi2ipu_gasket_init(csi2);
/* Step 3 */
ret = csi2_dphy_init(csi2);
if (ret)
goto err_disable_clk;
ret = csi2_get_active_lanes(csi2, &lanes);
if (ret)
goto err_disable_clk;
/* Step 4 */
csi2_set_lanes(csi2, lanes);
csi2_enable(csi2, true);
/* Step 5 */
ret = v4l2_subdev_call(csi2->src_sd, video, pre_streamon,
V4L2_SUBDEV_PRE_STREAMON_FL_MANUAL_LP);
if (ret && ret != -ENOIOCTLCMD)
goto err_assert_reset;
csi2_dphy_wait_stopstate(csi2, lanes);
/* Step 6 */
ret = v4l2_subdev_call(csi2->src_sd, video, s_stream, 1);
ret = (ret && ret != -ENOIOCTLCMD) ? ret : 0;
if (ret)
goto err_stop_lp11;
/* Step 7 */
ret = csi2_dphy_wait_clock_lane(csi2);
if (ret)
goto err_stop_upstream;
return 0;
err_stop_upstream:
v4l2_subdev_call(csi2->src_sd, video, s_stream, 0);
err_stop_lp11:
v4l2_subdev_call(csi2->src_sd, video, post_streamoff);
err_assert_reset:
csi2_enable(csi2, false);
err_disable_clk:
clk_disable_unprepare(csi2->pix_clk);
return ret;
}
static void csi2_stop(struct csi2_dev *csi2)
{
/* stop upstream */
v4l2_subdev_call(csi2->src_sd, video, s_stream, 0);
v4l2_subdev_call(csi2->src_sd, video, post_streamoff);
csi2_enable(csi2, false);
clk_disable_unprepare(csi2->pix_clk);
}
/*
* V4L2 subdev operations.
*/
static int csi2_s_stream(struct v4l2_subdev *sd, int enable)
{
struct csi2_dev *csi2 = sd_to_dev(sd);
int i, ret = 0;
mutex_lock(&csi2->lock);
if (!csi2->src_sd) {
ret = -EPIPE;
goto out;
}
for (i = 0; i < CSI2_NUM_SRC_PADS; i++) {
if (csi2->sink_linked[i])
break;
}
if (i >= CSI2_NUM_SRC_PADS) {
ret = -EPIPE;
goto out;
}
/*
* enable/disable streaming only if stream_count is
* going from 0 to 1 / 1 to 0.
*/
if (csi2->stream_count != !enable)
goto update_count;
dev_dbg(csi2->dev, "stream %s\n", enable ? "ON" : "OFF");
if (enable)
ret = csi2_start(csi2);
else
csi2_stop(csi2);
if (ret)
goto out;
update_count:
csi2->stream_count += enable ? 1 : -1;
if (csi2->stream_count < 0)
csi2->stream_count = 0;
out:
mutex_unlock(&csi2->lock);
return ret;
}
static int csi2_link_setup(struct media_entity *entity,
const struct media_pad *local,
const struct media_pad *remote, u32 flags)
{
struct v4l2_subdev *sd = media_entity_to_v4l2_subdev(entity);
struct csi2_dev *csi2 = sd_to_dev(sd);
struct v4l2_subdev *remote_sd;
int ret = 0;
dev_dbg(csi2->dev, "link setup %s -> %s", remote->entity->name,
local->entity->name);
remote_sd = media_entity_to_v4l2_subdev(remote->entity);
mutex_lock(&csi2->lock);
if (local->flags & MEDIA_PAD_FL_SOURCE) {
if (flags & MEDIA_LNK_FL_ENABLED) {
if (csi2->sink_linked[local->index - 1]) {
ret = -EBUSY;
goto out;
}
csi2->sink_linked[local->index - 1] = true;
} else {
csi2->sink_linked[local->index - 1] = false;
}
} else {
if (flags & MEDIA_LNK_FL_ENABLED) {
if (csi2->src_sd) {
ret = -EBUSY;
goto out;
}
csi2->src_sd = remote_sd;
} else {
csi2->src_sd = NULL;
}
}
out:
mutex_unlock(&csi2->lock);
return ret;
}
static struct v4l2_mbus_framefmt *
__csi2_get_fmt(struct csi2_dev *csi2, struct v4l2_subdev_state *sd_state,
unsigned int pad, enum v4l2_subdev_format_whence which)
{
if (which == V4L2_SUBDEV_FORMAT_TRY)
return v4l2_subdev_get_try_format(&csi2->sd, sd_state, pad);
else
return &csi2->format_mbus;
}
static int csi2_get_fmt(struct v4l2_subdev *sd,
struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *sdformat)
{
struct csi2_dev *csi2 = sd_to_dev(sd);
struct v4l2_mbus_framefmt *fmt;
mutex_lock(&csi2->lock);
fmt = __csi2_get_fmt(csi2, sd_state, sdformat->pad, sdformat->which);
sdformat->format = *fmt;
mutex_unlock(&csi2->lock);
return 0;
}
static int csi2_set_fmt(struct v4l2_subdev *sd,
struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *sdformat)
{
struct csi2_dev *csi2 = sd_to_dev(sd);
struct v4l2_mbus_framefmt *fmt;
int ret = 0;
if (sdformat->pad >= CSI2_NUM_PADS)
return -EINVAL;
mutex_lock(&csi2->lock);
if (csi2->stream_count > 0) {
ret = -EBUSY;
goto out;
}
/* Output pads mirror active input pad, no limits on input pads */
if (sdformat->pad != CSI2_SINK_PAD)
sdformat->format = csi2->format_mbus;
fmt = __csi2_get_fmt(csi2, sd_state, sdformat->pad, sdformat->which);
*fmt = sdformat->format;
out:
mutex_unlock(&csi2->lock);
return ret;
}
static int csi2_registered(struct v4l2_subdev *sd)
{
struct csi2_dev *csi2 = sd_to_dev(sd);
/* set a default mbus format */
return imx_media_init_mbus_fmt(&csi2->format_mbus,
IMX_MEDIA_DEF_PIX_WIDTH,
IMX_MEDIA_DEF_PIX_HEIGHT, 0,
V4L2_FIELD_NONE, NULL);
}
/* --------------- CORE OPS --------------- */
static int csi2_log_status(struct v4l2_subdev *sd)
{
struct csi2_dev *csi2 = sd_to_dev(sd);
v4l2_info(sd, "-----MIPI CSI status-----\n");
v4l2_info(sd, "VERSION: 0x%x\n",
readl(csi2->base + CSI2_VERSION));
v4l2_info(sd, "N_LANES: 0x%x\n",
readl(csi2->base + CSI2_N_LANES));
v4l2_info(sd, "PHY_SHUTDOWNZ: 0x%x\n",
readl(csi2->base + CSI2_PHY_SHUTDOWNZ));
v4l2_info(sd, "DPHY_RSTZ: 0x%x\n",
readl(csi2->base + CSI2_DPHY_RSTZ));
v4l2_info(sd, "RESETN: 0x%x\n",
readl(csi2->base + CSI2_RESETN));
v4l2_info(sd, "PHY_STATE: 0x%x\n",
readl(csi2->base + CSI2_PHY_STATE));
v4l2_info(sd, "DATA_IDS_1: 0x%x\n",
readl(csi2->base + CSI2_DATA_IDS_1));
v4l2_info(sd, "DATA_IDS_2: 0x%x\n",
readl(csi2->base + CSI2_DATA_IDS_2));
v4l2_info(sd, "ERR1: 0x%x\n",
readl(csi2->base + CSI2_ERR1));
v4l2_info(sd, "ERR2: 0x%x\n",
readl(csi2->base + CSI2_ERR2));
v4l2_info(sd, "MSK1: 0x%x\n",
readl(csi2->base + CSI2_MSK1));
v4l2_info(sd, "MSK2: 0x%x\n",
readl(csi2->base + CSI2_MSK2));
v4l2_info(sd, "PHY_TST_CTRL0: 0x%x\n",
readl(csi2->base + CSI2_PHY_TST_CTRL0));
v4l2_info(sd, "PHY_TST_CTRL1: 0x%x\n",
readl(csi2->base + CSI2_PHY_TST_CTRL1));
return 0;
}
static const struct v4l2_subdev_core_ops csi2_core_ops = {
.log_status = csi2_log_status,
};
static const struct media_entity_operations csi2_entity_ops = {
.link_setup = csi2_link_setup,
.link_validate = v4l2_subdev_link_validate,
.get_fwnode_pad = v4l2_subdev_get_fwnode_pad_1_to_1,
};
static const struct v4l2_subdev_video_ops csi2_video_ops = {
.s_stream = csi2_s_stream,
};
static const struct v4l2_subdev_pad_ops csi2_pad_ops = {
.init_cfg = imx_media_init_cfg,
.get_fmt = csi2_get_fmt,
.set_fmt = csi2_set_fmt,
};
static const struct v4l2_subdev_ops csi2_subdev_ops = {
.core = &csi2_core_ops,
.video = &csi2_video_ops,
.pad = &csi2_pad_ops,
};
static const struct v4l2_subdev_internal_ops csi2_internal_ops = {
.registered = csi2_registered,
};
static int csi2_notify_bound(struct v4l2_async_notifier *notifier,
struct v4l2_subdev *sd,
struct v4l2_async_connection *asd)
{
struct csi2_dev *csi2 = notifier_to_dev(notifier);
struct media_pad *sink = &csi2->sd.entity.pads[CSI2_SINK_PAD];
int pad;
pad = media_entity_get_fwnode_pad(&sd->entity, asd->match.fwnode,
MEDIA_PAD_FL_SOURCE);
if (pad < 0) {
dev_err(csi2->dev, "Failed to find pad for %s\n", sd->name);
return pad;
}
csi2->remote = sd;
csi2->remote_pad = pad;
dev_dbg(csi2->dev, "Bound %s pad: %d\n", sd->name, pad);
return v4l2_create_fwnode_links_to_pad(sd, sink, 0);
}
static void csi2_notify_unbind(struct v4l2_async_notifier *notifier,
struct v4l2_subdev *sd,
struct v4l2_async_connection *asd)
{
struct csi2_dev *csi2 = notifier_to_dev(notifier);
csi2->remote = NULL;
}
static const struct v4l2_async_notifier_operations csi2_notify_ops = {
.bound = csi2_notify_bound,
.unbind = csi2_notify_unbind,
};
static int csi2_async_register(struct csi2_dev *csi2)
{
struct v4l2_fwnode_endpoint vep = {
.bus_type = V4L2_MBUS_CSI2_DPHY,
};
struct v4l2_async_connection *asd;
struct fwnode_handle *ep;
int ret;
v4l2_async_subdev_nf_init(&csi2->notifier, &csi2->sd);
ep = fwnode_graph_get_endpoint_by_id(dev_fwnode(csi2->dev), 0, 0,
FWNODE_GRAPH_ENDPOINT_NEXT);
if (!ep)
return -ENOTCONN;
ret = v4l2_fwnode_endpoint_parse(ep, &vep);
if (ret)
goto err_parse;
csi2->data_lanes = vep.bus.mipi_csi2.num_data_lanes;
dev_dbg(csi2->dev, "data lanes: %d\n", vep.bus.mipi_csi2.num_data_lanes);
dev_dbg(csi2->dev, "flags: 0x%08x\n", vep.bus.mipi_csi2.flags);
asd = v4l2_async_nf_add_fwnode_remote(&csi2->notifier, ep,
struct v4l2_async_connection);
fwnode_handle_put(ep);
if (IS_ERR(asd))
return PTR_ERR(asd);
csi2->notifier.ops = &csi2_notify_ops;
ret = v4l2_async_nf_register(&csi2->notifier);
if (ret)
return ret;
return v4l2_async_register_subdev(&csi2->sd);
err_parse:
fwnode_handle_put(ep);
return ret;
}
static int csi2_probe(struct platform_device *pdev)
{
struct csi2_dev *csi2;
struct resource *res;
int i, ret;
csi2 = devm_kzalloc(&pdev->dev, sizeof(*csi2), GFP_KERNEL);
if (!csi2)
return -ENOMEM;
csi2->dev = &pdev->dev;
v4l2_subdev_init(&csi2->sd, &csi2_subdev_ops);
v4l2_set_subdevdata(&csi2->sd, &pdev->dev);
csi2->sd.internal_ops = &csi2_internal_ops;
csi2->sd.entity.ops = &csi2_entity_ops;
csi2->sd.dev = &pdev->dev;
csi2->sd.owner = THIS_MODULE;
csi2->sd.flags = V4L2_SUBDEV_FL_HAS_DEVNODE;
strscpy(csi2->sd.name, DEVICE_NAME, sizeof(csi2->sd.name));
csi2->sd.entity.function = MEDIA_ENT_F_VID_IF_BRIDGE;
csi2->sd.grp_id = IMX_MEDIA_GRP_ID_CSI2;
for (i = 0; i < CSI2_NUM_PADS; i++) {
csi2->pad[i].flags = (i == CSI2_SINK_PAD) ?
MEDIA_PAD_FL_SINK : MEDIA_PAD_FL_SOURCE;
}
ret = media_entity_pads_init(&csi2->sd.entity, CSI2_NUM_PADS,
csi2->pad);
if (ret)
return ret;
csi2->pllref_clk = devm_clk_get(&pdev->dev, "ref");
if (IS_ERR(csi2->pllref_clk)) {
v4l2_err(&csi2->sd, "failed to get pll reference clock\n");
return PTR_ERR(csi2->pllref_clk);
}
csi2->dphy_clk = devm_clk_get(&pdev->dev, "dphy");
if (IS_ERR(csi2->dphy_clk)) {
v4l2_err(&csi2->sd, "failed to get dphy clock\n");
return PTR_ERR(csi2->dphy_clk);
}
csi2->pix_clk = devm_clk_get(&pdev->dev, "pix");
if (IS_ERR(csi2->pix_clk)) {
v4l2_err(&csi2->sd, "failed to get pixel clock\n");
return PTR_ERR(csi2->pix_clk);
}
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res) {
v4l2_err(&csi2->sd, "failed to get platform resources\n");
return -ENODEV;
}
csi2->base = devm_ioremap(&pdev->dev, res->start, PAGE_SIZE);
if (!csi2->base)
return -ENOMEM;
mutex_init(&csi2->lock);
ret = clk_prepare_enable(csi2->pllref_clk);
if (ret) {
v4l2_err(&csi2->sd, "failed to enable pllref_clk\n");
goto rmmutex;
}
ret = clk_prepare_enable(csi2->dphy_clk);
if (ret) {
v4l2_err(&csi2->sd, "failed to enable dphy_clk\n");
goto pllref_off;
}
platform_set_drvdata(pdev, &csi2->sd);
ret = csi2_async_register(csi2);
if (ret)
goto clean_notifier;
return 0;
clean_notifier:
v4l2_async_nf_unregister(&csi2->notifier);
v4l2_async_nf_cleanup(&csi2->notifier);
clk_disable_unprepare(csi2->dphy_clk);
pllref_off:
clk_disable_unprepare(csi2->pllref_clk);
rmmutex:
mutex_destroy(&csi2->lock);
return ret;
}
static void csi2_remove(struct platform_device *pdev)
{
struct v4l2_subdev *sd = platform_get_drvdata(pdev);
struct csi2_dev *csi2 = sd_to_dev(sd);
v4l2_async_nf_unregister(&csi2->notifier);
v4l2_async_nf_cleanup(&csi2->notifier);
v4l2_async_unregister_subdev(sd);
clk_disable_unprepare(csi2->dphy_clk);
clk_disable_unprepare(csi2->pllref_clk);
mutex_destroy(&csi2->lock);
media_entity_cleanup(&sd->entity);
}
static const struct of_device_id csi2_dt_ids[] = {
{ .compatible = "fsl,imx6-mipi-csi2", },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, csi2_dt_ids);
static struct platform_driver csi2_driver = {
.driver = {
.name = DEVICE_NAME,
.of_match_table = csi2_dt_ids,
},
.probe = csi2_probe,
.remove_new = csi2_remove,
};
module_platform_driver(csi2_driver);
MODULE_DESCRIPTION("i.MX5/6 MIPI CSI-2 Receiver driver");
MODULE_AUTHOR("Steve Longerbeam <[email protected]>");
MODULE_LICENSE("GPL");
| linux-master | drivers/staging/media/imx/imx6-mipi-csi2.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Frame Interval Monitor.
*
* Copyright (c) 2016 Mentor Graphics Inc.
*/
#include <linux/delay.h>
#include <linux/irq.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <media/v4l2-ctrls.h>
#include <media/v4l2-subdev.h>
#include <media/imx.h>
#include "imx-media.h"
enum {
FIM_CL_ENABLE = 0,
FIM_CL_NUM,
FIM_CL_TOLERANCE_MIN,
FIM_CL_TOLERANCE_MAX,
FIM_CL_NUM_SKIP,
FIM_NUM_CONTROLS,
};
enum {
FIM_CL_ICAP_EDGE = 0,
FIM_CL_ICAP_CHANNEL,
FIM_NUM_ICAP_CONTROLS,
};
#define FIM_CL_ENABLE_DEF 0 /* FIM disabled by default */
#define FIM_CL_NUM_DEF 8 /* average 8 frames */
#define FIM_CL_NUM_SKIP_DEF 2 /* skip 2 frames after restart */
#define FIM_CL_TOLERANCE_MIN_DEF 50 /* usec */
#define FIM_CL_TOLERANCE_MAX_DEF 0 /* no max tolerance (unbounded) */
struct imx_media_fim {
/* the owning subdev of this fim instance */
struct v4l2_subdev *sd;
/* FIM's control handler */
struct v4l2_ctrl_handler ctrl_handler;
/* control clusters */
struct v4l2_ctrl *ctrl[FIM_NUM_CONTROLS];
struct v4l2_ctrl *icap_ctrl[FIM_NUM_ICAP_CONTROLS];
spinlock_t lock; /* protect control values */
/* current control values */
bool enabled;
int num_avg;
int num_skip;
unsigned long tolerance_min; /* usec */
unsigned long tolerance_max; /* usec */
/* input capture method of measuring FI */
int icap_channel;
int icap_flags;
int counter;
ktime_t last_ts;
unsigned long sum; /* usec */
unsigned long nominal; /* usec */
struct completion icap_first_event;
bool stream_on;
};
static bool icap_enabled(struct imx_media_fim *fim)
{
return fim->icap_flags != IRQ_TYPE_NONE;
}
static void update_fim_nominal(struct imx_media_fim *fim,
const struct v4l2_fract *fi)
{
if (fi->denominator == 0) {
dev_dbg(fim->sd->dev, "no frame interval, FIM disabled\n");
fim->enabled = false;
return;
}
fim->nominal = DIV_ROUND_CLOSEST_ULL(1000000ULL * (u64)fi->numerator,
fi->denominator);
dev_dbg(fim->sd->dev, "FI=%lu usec\n", fim->nominal);
}
static void reset_fim(struct imx_media_fim *fim, bool curval)
{
struct v4l2_ctrl *icap_chan = fim->icap_ctrl[FIM_CL_ICAP_CHANNEL];
struct v4l2_ctrl *icap_edge = fim->icap_ctrl[FIM_CL_ICAP_EDGE];
struct v4l2_ctrl *en = fim->ctrl[FIM_CL_ENABLE];
struct v4l2_ctrl *num = fim->ctrl[FIM_CL_NUM];
struct v4l2_ctrl *skip = fim->ctrl[FIM_CL_NUM_SKIP];
struct v4l2_ctrl *tol_min = fim->ctrl[FIM_CL_TOLERANCE_MIN];
struct v4l2_ctrl *tol_max = fim->ctrl[FIM_CL_TOLERANCE_MAX];
if (curval) {
fim->enabled = en->cur.val;
fim->icap_flags = icap_edge->cur.val;
fim->icap_channel = icap_chan->cur.val;
fim->num_avg = num->cur.val;
fim->num_skip = skip->cur.val;
fim->tolerance_min = tol_min->cur.val;
fim->tolerance_max = tol_max->cur.val;
} else {
fim->enabled = en->val;
fim->icap_flags = icap_edge->val;
fim->icap_channel = icap_chan->val;
fim->num_avg = num->val;
fim->num_skip = skip->val;
fim->tolerance_min = tol_min->val;
fim->tolerance_max = tol_max->val;
}
/* disable tolerance range if max <= min */
if (fim->tolerance_max <= fim->tolerance_min)
fim->tolerance_max = 0;
/* num_skip must be >= 1 if input capture not used */
if (!icap_enabled(fim))
fim->num_skip = max_t(int, fim->num_skip, 1);
fim->counter = -fim->num_skip;
fim->sum = 0;
}
static void send_fim_event(struct imx_media_fim *fim, unsigned long error)
{
static const struct v4l2_event ev = {
.type = V4L2_EVENT_IMX_FRAME_INTERVAL_ERROR,
};
v4l2_subdev_notify_event(fim->sd, &ev);
}
/*
* Monitor an averaged frame interval. If the average deviates too much
* from the nominal frame rate, send the frame interval error event. The
* frame intervals are averaged in order to quiet noise from
* (presumably random) interrupt latency.
*/
static void frame_interval_monitor(struct imx_media_fim *fim,
ktime_t timestamp)
{
long long interval, error;
unsigned long error_avg;
bool send_event = false;
if (!fim->enabled || ++fim->counter <= 0)
goto out_update_ts;
/* max error is less than l00µs, so use 32-bit division or fail */
interval = ktime_to_ns(ktime_sub(timestamp, fim->last_ts));
error = abs(interval - NSEC_PER_USEC * (u64)fim->nominal);
if (error > U32_MAX)
error = U32_MAX;
else
error = abs((u32)error / NSEC_PER_USEC);
if (fim->tolerance_max && error >= fim->tolerance_max) {
dev_dbg(fim->sd->dev,
"FIM: %llu ignored, out of tolerance bounds\n",
error);
fim->counter--;
goto out_update_ts;
}
fim->sum += error;
if (fim->counter == fim->num_avg) {
error_avg = DIV_ROUND_CLOSEST(fim->sum, fim->num_avg);
if (error_avg > fim->tolerance_min)
send_event = true;
dev_dbg(fim->sd->dev, "FIM: error: %lu usec%s\n",
error_avg, send_event ? " (!!!)" : "");
fim->counter = 0;
fim->sum = 0;
}
out_update_ts:
fim->last_ts = timestamp;
if (send_event)
send_fim_event(fim, error_avg);
}
/*
* In case we are monitoring the first frame interval after streamon
* (when fim->num_skip = 0), we need a valid fim->last_ts before we
* can begin. This only applies to the input capture method. It is not
* possible to accurately measure the first FI after streamon using the
* EOF method, so fim->num_skip minimum is set to 1 in that case, so this
* function is a noop when the EOF method is used.
*/
static void fim_acquire_first_ts(struct imx_media_fim *fim)
{
unsigned long ret;
if (!fim->enabled || fim->num_skip > 0)
return;
ret = wait_for_completion_timeout(
&fim->icap_first_event,
msecs_to_jiffies(IMX_MEDIA_EOF_TIMEOUT));
if (ret == 0)
v4l2_warn(fim->sd, "wait first icap event timeout\n");
}
/* FIM Controls */
static int fim_s_ctrl(struct v4l2_ctrl *ctrl)
{
struct imx_media_fim *fim = container_of(ctrl->handler,
struct imx_media_fim,
ctrl_handler);
unsigned long flags;
int ret = 0;
spin_lock_irqsave(&fim->lock, flags);
switch (ctrl->id) {
case V4L2_CID_IMX_FIM_ENABLE:
break;
case V4L2_CID_IMX_FIM_ICAP_EDGE:
if (fim->stream_on)
ret = -EBUSY;
break;
default:
ret = -EINVAL;
}
if (!ret)
reset_fim(fim, false);
spin_unlock_irqrestore(&fim->lock, flags);
return ret;
}
static const struct v4l2_ctrl_ops fim_ctrl_ops = {
.s_ctrl = fim_s_ctrl,
};
static const struct v4l2_ctrl_config fim_ctrl[] = {
[FIM_CL_ENABLE] = {
.ops = &fim_ctrl_ops,
.id = V4L2_CID_IMX_FIM_ENABLE,
.name = "FIM Enable",
.type = V4L2_CTRL_TYPE_BOOLEAN,
.def = FIM_CL_ENABLE_DEF,
.min = 0,
.max = 1,
.step = 1,
},
[FIM_CL_NUM] = {
.ops = &fim_ctrl_ops,
.id = V4L2_CID_IMX_FIM_NUM,
.name = "FIM Num Average",
.type = V4L2_CTRL_TYPE_INTEGER,
.def = FIM_CL_NUM_DEF,
.min = 1, /* no averaging */
.max = 64, /* average 64 frames */
.step = 1,
},
[FIM_CL_TOLERANCE_MIN] = {
.ops = &fim_ctrl_ops,
.id = V4L2_CID_IMX_FIM_TOLERANCE_MIN,
.name = "FIM Tolerance Min",
.type = V4L2_CTRL_TYPE_INTEGER,
.def = FIM_CL_TOLERANCE_MIN_DEF,
.min = 2,
.max = 200,
.step = 1,
},
[FIM_CL_TOLERANCE_MAX] = {
.ops = &fim_ctrl_ops,
.id = V4L2_CID_IMX_FIM_TOLERANCE_MAX,
.name = "FIM Tolerance Max",
.type = V4L2_CTRL_TYPE_INTEGER,
.def = FIM_CL_TOLERANCE_MAX_DEF,
.min = 0,
.max = 500,
.step = 1,
},
[FIM_CL_NUM_SKIP] = {
.ops = &fim_ctrl_ops,
.id = V4L2_CID_IMX_FIM_NUM_SKIP,
.name = "FIM Num Skip",
.type = V4L2_CTRL_TYPE_INTEGER,
.def = FIM_CL_NUM_SKIP_DEF,
.min = 0, /* skip no frames */
.max = 256, /* skip 256 frames */
.step = 1,
},
};
static const struct v4l2_ctrl_config fim_icap_ctrl[] = {
[FIM_CL_ICAP_EDGE] = {
.ops = &fim_ctrl_ops,
.id = V4L2_CID_IMX_FIM_ICAP_EDGE,
.name = "FIM Input Capture Edge",
.type = V4L2_CTRL_TYPE_INTEGER,
.def = IRQ_TYPE_NONE, /* input capture disabled by default */
.min = IRQ_TYPE_NONE,
.max = IRQ_TYPE_EDGE_BOTH,
.step = 1,
},
[FIM_CL_ICAP_CHANNEL] = {
.ops = &fim_ctrl_ops,
.id = V4L2_CID_IMX_FIM_ICAP_CHANNEL,
.name = "FIM Input Capture Channel",
.type = V4L2_CTRL_TYPE_INTEGER,
.def = 0,
.min = 0,
.max = 1,
.step = 1,
},
};
static int init_fim_controls(struct imx_media_fim *fim)
{
struct v4l2_ctrl_handler *hdlr = &fim->ctrl_handler;
int i, ret;
v4l2_ctrl_handler_init(hdlr, FIM_NUM_CONTROLS + FIM_NUM_ICAP_CONTROLS);
for (i = 0; i < FIM_NUM_CONTROLS; i++)
fim->ctrl[i] = v4l2_ctrl_new_custom(hdlr,
&fim_ctrl[i],
NULL);
for (i = 0; i < FIM_NUM_ICAP_CONTROLS; i++)
fim->icap_ctrl[i] = v4l2_ctrl_new_custom(hdlr,
&fim_icap_ctrl[i],
NULL);
if (hdlr->error) {
ret = hdlr->error;
goto err_free;
}
v4l2_ctrl_cluster(FIM_NUM_CONTROLS, fim->ctrl);
v4l2_ctrl_cluster(FIM_NUM_ICAP_CONTROLS, fim->icap_ctrl);
return 0;
err_free:
v4l2_ctrl_handler_free(hdlr);
return ret;
}
/*
* Monitor frame intervals via EOF interrupt. This method is
* subject to uncertainty errors introduced by interrupt latency.
*
* This is a noop if the Input Capture method is being used, since
* the frame_interval_monitor() is called by the input capture event
* callback handler in that case.
*/
void imx_media_fim_eof_monitor(struct imx_media_fim *fim, ktime_t timestamp)
{
unsigned long flags;
spin_lock_irqsave(&fim->lock, flags);
if (!icap_enabled(fim))
frame_interval_monitor(fim, timestamp);
spin_unlock_irqrestore(&fim->lock, flags);
}
/* Called by the subdev in its s_stream callback */
void imx_media_fim_set_stream(struct imx_media_fim *fim,
const struct v4l2_fract *fi,
bool on)
{
unsigned long flags;
v4l2_ctrl_lock(fim->ctrl[FIM_CL_ENABLE]);
if (fim->stream_on == on)
goto out;
if (on) {
spin_lock_irqsave(&fim->lock, flags);
reset_fim(fim, true);
update_fim_nominal(fim, fi);
spin_unlock_irqrestore(&fim->lock, flags);
if (icap_enabled(fim))
fim_acquire_first_ts(fim);
}
fim->stream_on = on;
out:
v4l2_ctrl_unlock(fim->ctrl[FIM_CL_ENABLE]);
}
int imx_media_fim_add_controls(struct imx_media_fim *fim)
{
/* add the FIM controls to the calling subdev ctrl handler */
return v4l2_ctrl_add_handler(fim->sd->ctrl_handler,
&fim->ctrl_handler, NULL, false);
}
/* Called by the subdev in its subdev registered callback */
struct imx_media_fim *imx_media_fim_init(struct v4l2_subdev *sd)
{
struct imx_media_fim *fim;
int ret;
fim = devm_kzalloc(sd->dev, sizeof(*fim), GFP_KERNEL);
if (!fim)
return ERR_PTR(-ENOMEM);
fim->sd = sd;
spin_lock_init(&fim->lock);
ret = init_fim_controls(fim);
if (ret)
return ERR_PTR(ret);
return fim;
}
void imx_media_fim_free(struct imx_media_fim *fim)
{
v4l2_ctrl_handler_free(&fim->ctrl_handler);
}
| linux-master | drivers/staging/media/imx/imx-media-fim.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Media driver for Freescale i.MX5/6 SOC
*
* Adds the IPU internal subdevices and the media links between them.
*
* Copyright (c) 2016 Mentor Graphics Inc.
*/
#include <linux/platform_device.h>
#include "imx-media.h"
/* max pads per internal-sd */
#define MAX_INTERNAL_PADS 8
/* max links per internal-sd pad */
#define MAX_INTERNAL_LINKS 8
struct internal_subdev;
struct internal_link {
int remote;
int local_pad;
int remote_pad;
};
struct internal_pad {
int num_links;
struct internal_link link[MAX_INTERNAL_LINKS];
};
struct internal_subdev {
u32 grp_id;
struct internal_pad pad[MAX_INTERNAL_PADS];
struct v4l2_subdev * (*sync_register)(struct v4l2_device *v4l2_dev,
struct device *ipu_dev,
struct ipu_soc *ipu,
u32 grp_id);
int (*sync_unregister)(struct v4l2_subdev *sd);
};
static const struct internal_subdev int_subdev[NUM_IPU_SUBDEVS] = {
[IPU_CSI0] = {
.grp_id = IMX_MEDIA_GRP_ID_IPU_CSI0,
.pad[CSI_SRC_PAD_DIRECT] = {
.num_links = 2,
.link = {
{
.local_pad = CSI_SRC_PAD_DIRECT,
.remote = IPU_IC_PRP,
.remote_pad = PRP_SINK_PAD,
}, {
.local_pad = CSI_SRC_PAD_DIRECT,
.remote = IPU_VDIC,
.remote_pad = VDIC_SINK_PAD_DIRECT,
},
},
},
},
[IPU_CSI1] = {
.grp_id = IMX_MEDIA_GRP_ID_IPU_CSI1,
.pad[CSI_SRC_PAD_DIRECT] = {
.num_links = 2,
.link = {
{
.local_pad = CSI_SRC_PAD_DIRECT,
.remote = IPU_IC_PRP,
.remote_pad = PRP_SINK_PAD,
}, {
.local_pad = CSI_SRC_PAD_DIRECT,
.remote = IPU_VDIC,
.remote_pad = VDIC_SINK_PAD_DIRECT,
},
},
},
},
[IPU_VDIC] = {
.grp_id = IMX_MEDIA_GRP_ID_IPU_VDIC,
.sync_register = imx_media_vdic_register,
.sync_unregister = imx_media_vdic_unregister,
.pad[VDIC_SRC_PAD_DIRECT] = {
.num_links = 1,
.link = {
{
.local_pad = VDIC_SRC_PAD_DIRECT,
.remote = IPU_IC_PRP,
.remote_pad = PRP_SINK_PAD,
},
},
},
},
[IPU_IC_PRP] = {
.grp_id = IMX_MEDIA_GRP_ID_IPU_IC_PRP,
.sync_register = imx_media_ic_register,
.sync_unregister = imx_media_ic_unregister,
.pad[PRP_SRC_PAD_PRPENC] = {
.num_links = 1,
.link = {
{
.local_pad = PRP_SRC_PAD_PRPENC,
.remote = IPU_IC_PRPENC,
.remote_pad = PRPENCVF_SINK_PAD,
},
},
},
.pad[PRP_SRC_PAD_PRPVF] = {
.num_links = 1,
.link = {
{
.local_pad = PRP_SRC_PAD_PRPVF,
.remote = IPU_IC_PRPVF,
.remote_pad = PRPENCVF_SINK_PAD,
},
},
},
},
[IPU_IC_PRPENC] = {
.grp_id = IMX_MEDIA_GRP_ID_IPU_IC_PRPENC,
.sync_register = imx_media_ic_register,
.sync_unregister = imx_media_ic_unregister,
},
[IPU_IC_PRPVF] = {
.grp_id = IMX_MEDIA_GRP_ID_IPU_IC_PRPVF,
.sync_register = imx_media_ic_register,
.sync_unregister = imx_media_ic_unregister,
},
};
static int create_internal_link(struct imx_media_dev *imxmd,
struct v4l2_subdev *src,
struct v4l2_subdev *sink,
const struct internal_link *link)
{
int ret;
/* skip if this link already created */
if (media_entity_find_link(&src->entity.pads[link->local_pad],
&sink->entity.pads[link->remote_pad]))
return 0;
dev_dbg(imxmd->md.dev, "%s:%d -> %s:%d\n",
src->name, link->local_pad,
sink->name, link->remote_pad);
ret = media_create_pad_link(&src->entity, link->local_pad,
&sink->entity, link->remote_pad, 0);
if (ret)
v4l2_err(&imxmd->v4l2_dev, "%s failed: %d\n", __func__, ret);
return ret;
}
static int create_ipu_internal_links(struct imx_media_dev *imxmd,
const struct internal_subdev *intsd,
struct v4l2_subdev *sd,
int ipu_id)
{
const struct internal_pad *intpad;
const struct internal_link *link;
struct media_pad *pad;
int i, j, ret;
/* create the source->sink links */
for (i = 0; i < sd->entity.num_pads; i++) {
intpad = &intsd->pad[i];
pad = &sd->entity.pads[i];
if (!(pad->flags & MEDIA_PAD_FL_SOURCE))
continue;
for (j = 0; j < intpad->num_links; j++) {
struct v4l2_subdev *sink;
link = &intpad->link[j];
sink = imxmd->sync_sd[ipu_id][link->remote];
ret = create_internal_link(imxmd, sd, sink, link);
if (ret)
return ret;
}
}
return 0;
}
int imx_media_register_ipu_internal_subdevs(struct imx_media_dev *imxmd,
struct v4l2_subdev *csi)
{
struct device *ipu_dev = csi->dev->parent;
const struct internal_subdev *intsd;
struct v4l2_subdev *sd;
struct ipu_soc *ipu;
int i, ipu_id, ret;
ipu = dev_get_drvdata(ipu_dev);
if (!ipu) {
v4l2_err(&imxmd->v4l2_dev, "invalid IPU device!\n");
return -ENODEV;
}
ipu_id = ipu_get_num(ipu);
if (ipu_id > 1) {
v4l2_err(&imxmd->v4l2_dev, "invalid IPU id %d!\n", ipu_id);
return -ENODEV;
}
mutex_lock(&imxmd->mutex);
/* record this IPU */
if (!imxmd->ipu[ipu_id])
imxmd->ipu[ipu_id] = ipu;
/* register the synchronous subdevs */
for (i = 0; i < NUM_IPU_SUBDEVS; i++) {
intsd = &int_subdev[i];
sd = imxmd->sync_sd[ipu_id][i];
/*
* skip if this sync subdev already registered or its
* not a sync subdev (one of the CSIs)
*/
if (sd || !intsd->sync_register)
continue;
mutex_unlock(&imxmd->mutex);
sd = intsd->sync_register(&imxmd->v4l2_dev, ipu_dev, ipu,
intsd->grp_id);
mutex_lock(&imxmd->mutex);
if (IS_ERR(sd)) {
ret = PTR_ERR(sd);
goto err_unwind;
}
imxmd->sync_sd[ipu_id][i] = sd;
}
/*
* all the sync subdevs are registered, create the media links
* between them.
*/
for (i = 0; i < NUM_IPU_SUBDEVS; i++) {
intsd = &int_subdev[i];
if (intsd->grp_id == csi->grp_id) {
sd = csi;
} else {
sd = imxmd->sync_sd[ipu_id][i];
if (!sd)
continue;
}
ret = create_ipu_internal_links(imxmd, intsd, sd, ipu_id);
if (ret) {
mutex_unlock(&imxmd->mutex);
imx_media_unregister_ipu_internal_subdevs(imxmd);
return ret;
}
}
mutex_unlock(&imxmd->mutex);
return 0;
err_unwind:
while (--i >= 0) {
intsd = &int_subdev[i];
sd = imxmd->sync_sd[ipu_id][i];
if (!sd || !intsd->sync_unregister)
continue;
mutex_unlock(&imxmd->mutex);
intsd->sync_unregister(sd);
mutex_lock(&imxmd->mutex);
}
mutex_unlock(&imxmd->mutex);
return ret;
}
void imx_media_unregister_ipu_internal_subdevs(struct imx_media_dev *imxmd)
{
const struct internal_subdev *intsd;
struct v4l2_subdev *sd;
int i, j;
mutex_lock(&imxmd->mutex);
for (i = 0; i < 2; i++) {
for (j = 0; j < NUM_IPU_SUBDEVS; j++) {
intsd = &int_subdev[j];
sd = imxmd->sync_sd[i][j];
if (!sd || !intsd->sync_unregister)
continue;
mutex_unlock(&imxmd->mutex);
intsd->sync_unregister(sd);
mutex_lock(&imxmd->mutex);
}
}
mutex_unlock(&imxmd->mutex);
}
| linux-master | drivers/staging/media/imx/imx-media-internal-sd.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* V4L2 Deinterlacer Subdev for Freescale i.MX5/6 SOC
*
* Copyright (c) 2017 Mentor Graphics Inc.
*/
#include <media/v4l2-ctrls.h>
#include <media/v4l2-device.h>
#include <media/v4l2-ioctl.h>
#include <media/v4l2-mc.h>
#include <media/v4l2-subdev.h>
#include <media/imx.h>
#include "imx-media.h"
/*
* This subdev implements two different video pipelines:
*
* CSI -> VDIC
*
* In this pipeline, the CSI sends a single interlaced field F(n-1)
* directly to the VDIC (and optionally the following field F(n)
* can be sent to memory via IDMAC channel 13). This pipeline only works
* in VDIC's high motion mode, which only requires a single field for
* processing. The other motion modes (low and medium) require three
* fields, so this pipeline does not work in those modes. Also, it is
* not clear how this pipeline can deal with the various field orders
* (sequential BT/TB, interlaced BT/TB).
*
* MEM -> CH8,9,10 -> VDIC
*
* In this pipeline, previous field F(n-1), current field F(n), and next
* field F(n+1) are transferred to the VDIC via IDMAC channels 8,9,10.
* These memory buffers can come from a video output or mem2mem device.
* All motion modes are supported by this pipeline.
*
* The "direct" CSI->VDIC pipeline requires no DMA, but it can only be
* used in high motion mode.
*/
struct vdic_priv;
struct vdic_pipeline_ops {
int (*setup)(struct vdic_priv *priv);
void (*start)(struct vdic_priv *priv);
void (*stop)(struct vdic_priv *priv);
void (*disable)(struct vdic_priv *priv);
};
/*
* Min/Max supported width and heights.
*/
#define MIN_W 32
#define MIN_H 32
#define MAX_W_VDIC 968
#define MAX_H_VDIC 2048
#define W_ALIGN 4 /* multiple of 16 pixels */
#define H_ALIGN 1 /* multiple of 2 lines */
#define S_ALIGN 1 /* multiple of 2 */
struct vdic_priv {
struct device *ipu_dev;
struct ipu_soc *ipu;
struct v4l2_subdev sd;
struct media_pad pad[VDIC_NUM_PADS];
/* lock to protect all members below */
struct mutex lock;
/* IPU units we require */
struct ipu_vdi *vdi;
int active_input_pad;
struct ipuv3_channel *vdi_in_ch_p; /* F(n-1) transfer channel */
struct ipuv3_channel *vdi_in_ch; /* F(n) transfer channel */
struct ipuv3_channel *vdi_in_ch_n; /* F(n+1) transfer channel */
/* pipeline operations */
struct vdic_pipeline_ops *ops;
/* current and previous input buffers indirect path */
struct imx_media_buffer *curr_in_buf;
struct imx_media_buffer *prev_in_buf;
/*
* translated field type, input line stride, and field size
* for indirect path
*/
u32 fieldtype;
u32 in_stride;
u32 field_size;
/* the source (a video device or subdev) */
struct media_entity *src;
/* the sink that will receive the progressive out buffers */
struct v4l2_subdev *sink_sd;
struct v4l2_mbus_framefmt format_mbus[VDIC_NUM_PADS];
const struct imx_media_pixfmt *cc[VDIC_NUM_PADS];
struct v4l2_fract frame_interval[VDIC_NUM_PADS];
/* the video device at IDMAC input pad */
struct imx_media_video_dev *vdev;
bool csi_direct; /* using direct CSI->VDIC->IC pipeline */
/* motion select control */
struct v4l2_ctrl_handler ctrl_hdlr;
enum ipu_motion_sel motion;
int stream_count;
};
static void vdic_put_ipu_resources(struct vdic_priv *priv)
{
if (priv->vdi_in_ch_p)
ipu_idmac_put(priv->vdi_in_ch_p);
priv->vdi_in_ch_p = NULL;
if (priv->vdi_in_ch)
ipu_idmac_put(priv->vdi_in_ch);
priv->vdi_in_ch = NULL;
if (priv->vdi_in_ch_n)
ipu_idmac_put(priv->vdi_in_ch_n);
priv->vdi_in_ch_n = NULL;
if (!IS_ERR_OR_NULL(priv->vdi))
ipu_vdi_put(priv->vdi);
priv->vdi = NULL;
}
static int vdic_get_ipu_resources(struct vdic_priv *priv)
{
int ret, err_chan;
struct ipuv3_channel *ch;
struct ipu_vdi *vdi;
vdi = ipu_vdi_get(priv->ipu);
if (IS_ERR(vdi)) {
v4l2_err(&priv->sd, "failed to get VDIC\n");
ret = PTR_ERR(vdi);
goto out;
}
priv->vdi = vdi;
if (!priv->csi_direct) {
ch = ipu_idmac_get(priv->ipu, IPUV3_CHANNEL_MEM_VDI_PREV);
if (IS_ERR(ch)) {
err_chan = IPUV3_CHANNEL_MEM_VDI_PREV;
ret = PTR_ERR(ch);
goto out_err_chan;
}
priv->vdi_in_ch_p = ch;
ch = ipu_idmac_get(priv->ipu, IPUV3_CHANNEL_MEM_VDI_CUR);
if (IS_ERR(ch)) {
err_chan = IPUV3_CHANNEL_MEM_VDI_CUR;
ret = PTR_ERR(ch);
goto out_err_chan;
}
priv->vdi_in_ch = ch;
ch = ipu_idmac_get(priv->ipu, IPUV3_CHANNEL_MEM_VDI_NEXT);
if (IS_ERR(ch)) {
err_chan = IPUV3_CHANNEL_MEM_VDI_NEXT;
ret = PTR_ERR(ch);
goto out_err_chan;
}
priv->vdi_in_ch_n = ch;
}
return 0;
out_err_chan:
v4l2_err(&priv->sd, "could not get IDMAC channel %u\n", err_chan);
out:
vdic_put_ipu_resources(priv);
return ret;
}
/*
* This function is currently unused, but will be called when the
* output/mem2mem device at the IDMAC input pad sends us a new
* buffer. It kicks off the IDMAC read channels to bring in the
* buffer fields from memory and begin the conversions.
*/
static void __maybe_unused prepare_vdi_in_buffers(struct vdic_priv *priv,
struct imx_media_buffer *curr)
{
dma_addr_t prev_phys, curr_phys, next_phys;
struct imx_media_buffer *prev;
struct vb2_buffer *curr_vb, *prev_vb;
u32 fs = priv->field_size;
u32 is = priv->in_stride;
/* current input buffer is now previous */
priv->prev_in_buf = priv->curr_in_buf;
priv->curr_in_buf = curr;
prev = priv->prev_in_buf ? priv->prev_in_buf : curr;
prev_vb = &prev->vbuf.vb2_buf;
curr_vb = &curr->vbuf.vb2_buf;
switch (priv->fieldtype) {
case V4L2_FIELD_SEQ_TB:
case V4L2_FIELD_SEQ_BT:
prev_phys = vb2_dma_contig_plane_dma_addr(prev_vb, 0) + fs;
curr_phys = vb2_dma_contig_plane_dma_addr(curr_vb, 0);
next_phys = vb2_dma_contig_plane_dma_addr(curr_vb, 0) + fs;
break;
case V4L2_FIELD_INTERLACED_TB:
case V4L2_FIELD_INTERLACED_BT:
case V4L2_FIELD_INTERLACED:
prev_phys = vb2_dma_contig_plane_dma_addr(prev_vb, 0) + is;
curr_phys = vb2_dma_contig_plane_dma_addr(curr_vb, 0);
next_phys = vb2_dma_contig_plane_dma_addr(curr_vb, 0) + is;
break;
default:
/*
* can't get here, priv->fieldtype can only be one of
* the above. This is to quiet smatch errors.
*/
return;
}
ipu_cpmem_set_buffer(priv->vdi_in_ch_p, 0, prev_phys);
ipu_cpmem_set_buffer(priv->vdi_in_ch, 0, curr_phys);
ipu_cpmem_set_buffer(priv->vdi_in_ch_n, 0, next_phys);
ipu_idmac_select_buffer(priv->vdi_in_ch_p, 0);
ipu_idmac_select_buffer(priv->vdi_in_ch, 0);
ipu_idmac_select_buffer(priv->vdi_in_ch_n, 0);
}
static int setup_vdi_channel(struct vdic_priv *priv,
struct ipuv3_channel *channel,
dma_addr_t phys0, dma_addr_t phys1)
{
struct imx_media_video_dev *vdev = priv->vdev;
unsigned int burst_size;
struct ipu_image image;
int ret;
ipu_cpmem_zero(channel);
memset(&image, 0, sizeof(image));
image.pix = vdev->fmt;
image.rect = vdev->compose;
/* one field to VDIC channels */
image.pix.height /= 2;
image.rect.height /= 2;
image.phys0 = phys0;
image.phys1 = phys1;
ret = ipu_cpmem_set_image(channel, &image);
if (ret)
return ret;
burst_size = (image.pix.width & 0xf) ? 8 : 16;
ipu_cpmem_set_burstsize(channel, burst_size);
ipu_cpmem_set_axi_id(channel, 1);
ipu_idmac_set_double_buffer(channel, false);
return 0;
}
static int vdic_setup_direct(struct vdic_priv *priv)
{
/* set VDIC to receive from CSI for direct path */
ipu_fsu_link(priv->ipu, IPUV3_CHANNEL_CSI_DIRECT,
IPUV3_CHANNEL_CSI_VDI_PREV);
return 0;
}
static void vdic_start_direct(struct vdic_priv *priv)
{
}
static void vdic_stop_direct(struct vdic_priv *priv)
{
}
static void vdic_disable_direct(struct vdic_priv *priv)
{
ipu_fsu_unlink(priv->ipu, IPUV3_CHANNEL_CSI_DIRECT,
IPUV3_CHANNEL_CSI_VDI_PREV);
}
static int vdic_setup_indirect(struct vdic_priv *priv)
{
struct v4l2_mbus_framefmt *infmt;
const struct imx_media_pixfmt *incc;
int in_size, ret;
infmt = &priv->format_mbus[VDIC_SINK_PAD_IDMAC];
incc = priv->cc[VDIC_SINK_PAD_IDMAC];
in_size = (infmt->width * incc->bpp * infmt->height) >> 3;
/* 1/2 full image size */
priv->field_size = in_size / 2;
priv->in_stride = incc->planar ?
infmt->width : (infmt->width * incc->bpp) >> 3;
priv->prev_in_buf = NULL;
priv->curr_in_buf = NULL;
priv->fieldtype = infmt->field;
/* init the vdi-in channels */
ret = setup_vdi_channel(priv, priv->vdi_in_ch_p, 0, 0);
if (ret)
return ret;
ret = setup_vdi_channel(priv, priv->vdi_in_ch, 0, 0);
if (ret)
return ret;
return setup_vdi_channel(priv, priv->vdi_in_ch_n, 0, 0);
}
static void vdic_start_indirect(struct vdic_priv *priv)
{
/* enable the channels */
ipu_idmac_enable_channel(priv->vdi_in_ch_p);
ipu_idmac_enable_channel(priv->vdi_in_ch);
ipu_idmac_enable_channel(priv->vdi_in_ch_n);
}
static void vdic_stop_indirect(struct vdic_priv *priv)
{
/* disable channels */
ipu_idmac_disable_channel(priv->vdi_in_ch_p);
ipu_idmac_disable_channel(priv->vdi_in_ch);
ipu_idmac_disable_channel(priv->vdi_in_ch_n);
}
static void vdic_disable_indirect(struct vdic_priv *priv)
{
}
static struct vdic_pipeline_ops direct_ops = {
.setup = vdic_setup_direct,
.start = vdic_start_direct,
.stop = vdic_stop_direct,
.disable = vdic_disable_direct,
};
static struct vdic_pipeline_ops indirect_ops = {
.setup = vdic_setup_indirect,
.start = vdic_start_indirect,
.stop = vdic_stop_indirect,
.disable = vdic_disable_indirect,
};
static int vdic_start(struct vdic_priv *priv)
{
struct v4l2_mbus_framefmt *infmt;
int ret;
infmt = &priv->format_mbus[priv->active_input_pad];
priv->ops = priv->csi_direct ? &direct_ops : &indirect_ops;
ret = vdic_get_ipu_resources(priv);
if (ret)
return ret;
/*
* init the VDIC.
*
* note we don't give infmt->code to ipu_vdi_setup(). The VDIC
* only supports 4:2:2 or 4:2:0, and this subdev will only
* negotiate 4:2:2 at its sink pads.
*/
ipu_vdi_setup(priv->vdi, MEDIA_BUS_FMT_UYVY8_2X8,
infmt->width, infmt->height);
ipu_vdi_set_field_order(priv->vdi, V4L2_STD_UNKNOWN, infmt->field);
ipu_vdi_set_motion(priv->vdi, priv->motion);
ret = priv->ops->setup(priv);
if (ret)
goto out_put_ipu;
ipu_vdi_enable(priv->vdi);
priv->ops->start(priv);
return 0;
out_put_ipu:
vdic_put_ipu_resources(priv);
return ret;
}
static void vdic_stop(struct vdic_priv *priv)
{
priv->ops->stop(priv);
ipu_vdi_disable(priv->vdi);
priv->ops->disable(priv);
vdic_put_ipu_resources(priv);
}
/*
* V4L2 subdev operations.
*/
static int vdic_s_ctrl(struct v4l2_ctrl *ctrl)
{
struct vdic_priv *priv = container_of(ctrl->handler,
struct vdic_priv, ctrl_hdlr);
enum ipu_motion_sel motion;
int ret = 0;
mutex_lock(&priv->lock);
switch (ctrl->id) {
case V4L2_CID_DEINTERLACING_MODE:
motion = ctrl->val;
if (motion != priv->motion) {
/* can't change motion control mid-streaming */
if (priv->stream_count > 0) {
ret = -EBUSY;
goto out;
}
priv->motion = motion;
}
break;
default:
v4l2_err(&priv->sd, "Invalid control\n");
ret = -EINVAL;
}
out:
mutex_unlock(&priv->lock);
return ret;
}
static const struct v4l2_ctrl_ops vdic_ctrl_ops = {
.s_ctrl = vdic_s_ctrl,
};
static const char * const vdic_ctrl_motion_menu[] = {
"No Motion Compensation",
"Low Motion",
"Medium Motion",
"High Motion",
};
static int vdic_init_controls(struct vdic_priv *priv)
{
struct v4l2_ctrl_handler *hdlr = &priv->ctrl_hdlr;
int ret;
v4l2_ctrl_handler_init(hdlr, 1);
v4l2_ctrl_new_std_menu_items(hdlr, &vdic_ctrl_ops,
V4L2_CID_DEINTERLACING_MODE,
HIGH_MOTION, 0, HIGH_MOTION,
vdic_ctrl_motion_menu);
priv->sd.ctrl_handler = hdlr;
if (hdlr->error) {
ret = hdlr->error;
goto out_free;
}
v4l2_ctrl_handler_setup(hdlr);
return 0;
out_free:
v4l2_ctrl_handler_free(hdlr);
return ret;
}
static int vdic_s_stream(struct v4l2_subdev *sd, int enable)
{
struct vdic_priv *priv = v4l2_get_subdevdata(sd);
struct v4l2_subdev *src_sd = NULL;
int ret = 0;
mutex_lock(&priv->lock);
if (!priv->src || !priv->sink_sd) {
ret = -EPIPE;
goto out;
}
if (priv->csi_direct)
src_sd = media_entity_to_v4l2_subdev(priv->src);
/*
* enable/disable streaming only if stream_count is
* going from 0 to 1 / 1 to 0.
*/
if (priv->stream_count != !enable)
goto update_count;
dev_dbg(priv->ipu_dev, "%s: stream %s\n", sd->name,
enable ? "ON" : "OFF");
if (enable)
ret = vdic_start(priv);
else
vdic_stop(priv);
if (ret)
goto out;
if (src_sd) {
/* start/stop upstream */
ret = v4l2_subdev_call(src_sd, video, s_stream, enable);
ret = (ret && ret != -ENOIOCTLCMD) ? ret : 0;
if (ret) {
if (enable)
vdic_stop(priv);
goto out;
}
}
update_count:
priv->stream_count += enable ? 1 : -1;
if (priv->stream_count < 0)
priv->stream_count = 0;
out:
mutex_unlock(&priv->lock);
return ret;
}
static struct v4l2_mbus_framefmt *
__vdic_get_fmt(struct vdic_priv *priv, struct v4l2_subdev_state *sd_state,
unsigned int pad, enum v4l2_subdev_format_whence which)
{
if (which == V4L2_SUBDEV_FORMAT_TRY)
return v4l2_subdev_get_try_format(&priv->sd, sd_state, pad);
else
return &priv->format_mbus[pad];
}
static int vdic_enum_mbus_code(struct v4l2_subdev *sd,
struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_mbus_code_enum *code)
{
if (code->pad >= VDIC_NUM_PADS)
return -EINVAL;
return imx_media_enum_ipu_formats(&code->code, code->index,
PIXFMT_SEL_YUV);
}
static int vdic_get_fmt(struct v4l2_subdev *sd,
struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *sdformat)
{
struct vdic_priv *priv = v4l2_get_subdevdata(sd);
struct v4l2_mbus_framefmt *fmt;
int ret = 0;
if (sdformat->pad >= VDIC_NUM_PADS)
return -EINVAL;
mutex_lock(&priv->lock);
fmt = __vdic_get_fmt(priv, sd_state, sdformat->pad, sdformat->which);
if (!fmt) {
ret = -EINVAL;
goto out;
}
sdformat->format = *fmt;
out:
mutex_unlock(&priv->lock);
return ret;
}
static void vdic_try_fmt(struct vdic_priv *priv,
struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *sdformat,
const struct imx_media_pixfmt **cc)
{
struct v4l2_mbus_framefmt *infmt;
*cc = imx_media_find_ipu_format(sdformat->format.code,
PIXFMT_SEL_YUV);
if (!*cc) {
u32 code;
imx_media_enum_ipu_formats(&code, 0, PIXFMT_SEL_YUV);
*cc = imx_media_find_ipu_format(code, PIXFMT_SEL_YUV);
sdformat->format.code = (*cc)->codes[0];
}
infmt = __vdic_get_fmt(priv, sd_state, priv->active_input_pad,
sdformat->which);
switch (sdformat->pad) {
case VDIC_SRC_PAD_DIRECT:
sdformat->format = *infmt;
/* output is always progressive! */
sdformat->format.field = V4L2_FIELD_NONE;
break;
case VDIC_SINK_PAD_DIRECT:
case VDIC_SINK_PAD_IDMAC:
v4l_bound_align_image(&sdformat->format.width,
MIN_W, MAX_W_VDIC, W_ALIGN,
&sdformat->format.height,
MIN_H, MAX_H_VDIC, H_ALIGN, S_ALIGN);
/* input must be interlaced! Choose SEQ_TB if not */
if (!V4L2_FIELD_HAS_BOTH(sdformat->format.field))
sdformat->format.field = V4L2_FIELD_SEQ_TB;
break;
}
imx_media_try_colorimetry(&sdformat->format, true);
}
static int vdic_set_fmt(struct v4l2_subdev *sd,
struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *sdformat)
{
struct vdic_priv *priv = v4l2_get_subdevdata(sd);
const struct imx_media_pixfmt *cc;
struct v4l2_mbus_framefmt *fmt;
int ret = 0;
if (sdformat->pad >= VDIC_NUM_PADS)
return -EINVAL;
mutex_lock(&priv->lock);
if (priv->stream_count > 0) {
ret = -EBUSY;
goto out;
}
vdic_try_fmt(priv, sd_state, sdformat, &cc);
fmt = __vdic_get_fmt(priv, sd_state, sdformat->pad, sdformat->which);
*fmt = sdformat->format;
/* propagate format to source pad */
if (sdformat->pad == VDIC_SINK_PAD_DIRECT ||
sdformat->pad == VDIC_SINK_PAD_IDMAC) {
const struct imx_media_pixfmt *outcc;
struct v4l2_mbus_framefmt *outfmt;
struct v4l2_subdev_format format;
format.pad = VDIC_SRC_PAD_DIRECT;
format.which = sdformat->which;
format.format = sdformat->format;
vdic_try_fmt(priv, sd_state, &format, &outcc);
outfmt = __vdic_get_fmt(priv, sd_state, VDIC_SRC_PAD_DIRECT,
sdformat->which);
*outfmt = format.format;
if (sdformat->which == V4L2_SUBDEV_FORMAT_ACTIVE)
priv->cc[VDIC_SRC_PAD_DIRECT] = outcc;
}
if (sdformat->which == V4L2_SUBDEV_FORMAT_ACTIVE)
priv->cc[sdformat->pad] = cc;
out:
mutex_unlock(&priv->lock);
return ret;
}
static int vdic_link_setup(struct media_entity *entity,
const struct media_pad *local,
const struct media_pad *remote, u32 flags)
{
struct v4l2_subdev *sd = media_entity_to_v4l2_subdev(entity);
struct vdic_priv *priv = v4l2_get_subdevdata(sd);
struct v4l2_subdev *remote_sd;
int ret = 0;
dev_dbg(priv->ipu_dev, "%s: link setup %s -> %s",
sd->name, remote->entity->name, local->entity->name);
mutex_lock(&priv->lock);
if (local->flags & MEDIA_PAD_FL_SOURCE) {
if (!is_media_entity_v4l2_subdev(remote->entity)) {
ret = -EINVAL;
goto out;
}
remote_sd = media_entity_to_v4l2_subdev(remote->entity);
if (flags & MEDIA_LNK_FL_ENABLED) {
if (priv->sink_sd) {
ret = -EBUSY;
goto out;
}
priv->sink_sd = remote_sd;
} else {
priv->sink_sd = NULL;
}
goto out;
}
/* this is a sink pad */
if (flags & MEDIA_LNK_FL_ENABLED) {
if (priv->src) {
ret = -EBUSY;
goto out;
}
} else {
priv->src = NULL;
goto out;
}
if (local->index == VDIC_SINK_PAD_IDMAC) {
struct imx_media_video_dev *vdev = priv->vdev;
if (!is_media_entity_v4l2_video_device(remote->entity)) {
ret = -EINVAL;
goto out;
}
if (!vdev) {
ret = -ENODEV;
goto out;
}
priv->csi_direct = false;
} else {
if (!is_media_entity_v4l2_subdev(remote->entity)) {
ret = -EINVAL;
goto out;
}
remote_sd = media_entity_to_v4l2_subdev(remote->entity);
/* direct pad must connect to a CSI */
if (!(remote_sd->grp_id & IMX_MEDIA_GRP_ID_IPU_CSI) ||
remote->index != CSI_SRC_PAD_DIRECT) {
ret = -EINVAL;
goto out;
}
priv->csi_direct = true;
}
priv->src = remote->entity;
/* record which input pad is now active */
priv->active_input_pad = local->index;
out:
mutex_unlock(&priv->lock);
return ret;
}
static int vdic_link_validate(struct v4l2_subdev *sd,
struct media_link *link,
struct v4l2_subdev_format *source_fmt,
struct v4l2_subdev_format *sink_fmt)
{
struct vdic_priv *priv = v4l2_get_subdevdata(sd);
int ret;
ret = v4l2_subdev_link_validate_default(sd, link,
source_fmt, sink_fmt);
if (ret)
return ret;
mutex_lock(&priv->lock);
if (priv->csi_direct && priv->motion != HIGH_MOTION) {
v4l2_err(&priv->sd,
"direct CSI pipeline requires high motion\n");
ret = -EINVAL;
}
mutex_unlock(&priv->lock);
return ret;
}
static int vdic_g_frame_interval(struct v4l2_subdev *sd,
struct v4l2_subdev_frame_interval *fi)
{
struct vdic_priv *priv = v4l2_get_subdevdata(sd);
if (fi->pad >= VDIC_NUM_PADS)
return -EINVAL;
mutex_lock(&priv->lock);
fi->interval = priv->frame_interval[fi->pad];
mutex_unlock(&priv->lock);
return 0;
}
static int vdic_s_frame_interval(struct v4l2_subdev *sd,
struct v4l2_subdev_frame_interval *fi)
{
struct vdic_priv *priv = v4l2_get_subdevdata(sd);
struct v4l2_fract *input_fi, *output_fi;
int ret = 0;
mutex_lock(&priv->lock);
input_fi = &priv->frame_interval[priv->active_input_pad];
output_fi = &priv->frame_interval[VDIC_SRC_PAD_DIRECT];
switch (fi->pad) {
case VDIC_SINK_PAD_DIRECT:
case VDIC_SINK_PAD_IDMAC:
/* No limits on valid input frame intervals */
if (fi->interval.numerator == 0 ||
fi->interval.denominator == 0)
fi->interval = priv->frame_interval[fi->pad];
/* Reset output interval */
*output_fi = fi->interval;
if (priv->csi_direct)
output_fi->denominator *= 2;
break;
case VDIC_SRC_PAD_DIRECT:
/*
* frame rate at output pad is double input
* rate when using direct CSI->VDIC pipeline.
*
* TODO: implement VDIC frame skipping
*/
fi->interval = *input_fi;
if (priv->csi_direct)
fi->interval.denominator *= 2;
break;
default:
ret = -EINVAL;
goto out;
}
priv->frame_interval[fi->pad] = fi->interval;
out:
mutex_unlock(&priv->lock);
return ret;
}
static int vdic_registered(struct v4l2_subdev *sd)
{
struct vdic_priv *priv = v4l2_get_subdevdata(sd);
int i, ret;
u32 code;
for (i = 0; i < VDIC_NUM_PADS; i++) {
code = 0;
if (i != VDIC_SINK_PAD_IDMAC)
imx_media_enum_ipu_formats(&code, 0, PIXFMT_SEL_YUV);
/* set a default mbus format */
ret = imx_media_init_mbus_fmt(&priv->format_mbus[i],
IMX_MEDIA_DEF_PIX_WIDTH,
IMX_MEDIA_DEF_PIX_HEIGHT, code,
V4L2_FIELD_NONE, &priv->cc[i]);
if (ret)
return ret;
/* init default frame interval */
priv->frame_interval[i].numerator = 1;
priv->frame_interval[i].denominator = 30;
if (i == VDIC_SRC_PAD_DIRECT)
priv->frame_interval[i].denominator *= 2;
}
priv->active_input_pad = VDIC_SINK_PAD_DIRECT;
return vdic_init_controls(priv);
}
static void vdic_unregistered(struct v4l2_subdev *sd)
{
struct vdic_priv *priv = v4l2_get_subdevdata(sd);
v4l2_ctrl_handler_free(&priv->ctrl_hdlr);
}
static const struct v4l2_subdev_pad_ops vdic_pad_ops = {
.init_cfg = imx_media_init_cfg,
.enum_mbus_code = vdic_enum_mbus_code,
.get_fmt = vdic_get_fmt,
.set_fmt = vdic_set_fmt,
.link_validate = vdic_link_validate,
};
static const struct v4l2_subdev_video_ops vdic_video_ops = {
.g_frame_interval = vdic_g_frame_interval,
.s_frame_interval = vdic_s_frame_interval,
.s_stream = vdic_s_stream,
};
static const struct media_entity_operations vdic_entity_ops = {
.link_setup = vdic_link_setup,
.link_validate = v4l2_subdev_link_validate,
};
static const struct v4l2_subdev_ops vdic_subdev_ops = {
.video = &vdic_video_ops,
.pad = &vdic_pad_ops,
};
static const struct v4l2_subdev_internal_ops vdic_internal_ops = {
.registered = vdic_registered,
.unregistered = vdic_unregistered,
};
struct v4l2_subdev *imx_media_vdic_register(struct v4l2_device *v4l2_dev,
struct device *ipu_dev,
struct ipu_soc *ipu,
u32 grp_id)
{
struct vdic_priv *priv;
int i, ret;
priv = devm_kzalloc(ipu_dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return ERR_PTR(-ENOMEM);
priv->ipu_dev = ipu_dev;
priv->ipu = ipu;
v4l2_subdev_init(&priv->sd, &vdic_subdev_ops);
v4l2_set_subdevdata(&priv->sd, priv);
priv->sd.internal_ops = &vdic_internal_ops;
priv->sd.entity.ops = &vdic_entity_ops;
priv->sd.entity.function = MEDIA_ENT_F_PROC_VIDEO_PIXEL_FORMATTER;
priv->sd.owner = ipu_dev->driver->owner;
priv->sd.flags = V4L2_SUBDEV_FL_HAS_DEVNODE;
priv->sd.grp_id = grp_id;
imx_media_grp_id_to_sd_name(priv->sd.name, sizeof(priv->sd.name),
priv->sd.grp_id, ipu_get_num(ipu));
mutex_init(&priv->lock);
for (i = 0; i < VDIC_NUM_PADS; i++)
priv->pad[i].flags = (i == VDIC_SRC_PAD_DIRECT) ?
MEDIA_PAD_FL_SOURCE : MEDIA_PAD_FL_SINK;
ret = media_entity_pads_init(&priv->sd.entity, VDIC_NUM_PADS,
priv->pad);
if (ret)
goto free;
ret = v4l2_device_register_subdev(v4l2_dev, &priv->sd);
if (ret)
goto free;
return &priv->sd;
free:
mutex_destroy(&priv->lock);
return ERR_PTR(ret);
}
int imx_media_vdic_unregister(struct v4l2_subdev *sd)
{
struct vdic_priv *priv = v4l2_get_subdevdata(sd);
v4l2_info(sd, "Removing\n");
v4l2_device_unregister_subdev(sd);
mutex_destroy(&priv->lock);
media_entity_cleanup(&sd->entity);
return 0;
}
| linux-master | drivers/staging/media/imx/imx-media-vdic.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* V4L2 Image Converter Subdev for Freescale i.MX5/6 SOC
*
* Copyright (c) 2014-2016 Mentor Graphics Inc.
*/
#include <media/v4l2-device.h>
#include <media/v4l2-subdev.h>
#include "imx-media.h"
#include "imx-ic.h"
#define IC_TASK_PRP IC_NUM_TASKS
#define IC_NUM_OPS (IC_NUM_TASKS + 1)
static struct imx_ic_ops *ic_ops[IC_NUM_OPS] = {
[IC_TASK_PRP] = &imx_ic_prp_ops,
[IC_TASK_ENCODER] = &imx_ic_prpencvf_ops,
[IC_TASK_VIEWFINDER] = &imx_ic_prpencvf_ops,
};
struct v4l2_subdev *imx_media_ic_register(struct v4l2_device *v4l2_dev,
struct device *ipu_dev,
struct ipu_soc *ipu,
u32 grp_id)
{
struct imx_ic_priv *priv;
int ret;
priv = devm_kzalloc(ipu_dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return ERR_PTR(-ENOMEM);
priv->ipu_dev = ipu_dev;
priv->ipu = ipu;
/* get our IC task id */
switch (grp_id) {
case IMX_MEDIA_GRP_ID_IPU_IC_PRP:
priv->task_id = IC_TASK_PRP;
break;
case IMX_MEDIA_GRP_ID_IPU_IC_PRPENC:
priv->task_id = IC_TASK_ENCODER;
break;
case IMX_MEDIA_GRP_ID_IPU_IC_PRPVF:
priv->task_id = IC_TASK_VIEWFINDER;
break;
default:
return ERR_PTR(-EINVAL);
}
v4l2_subdev_init(&priv->sd, ic_ops[priv->task_id]->subdev_ops);
v4l2_set_subdevdata(&priv->sd, priv);
priv->sd.internal_ops = ic_ops[priv->task_id]->internal_ops;
priv->sd.entity.ops = ic_ops[priv->task_id]->entity_ops;
priv->sd.entity.function = MEDIA_ENT_F_PROC_VIDEO_SCALER;
priv->sd.owner = ipu_dev->driver->owner;
priv->sd.flags = V4L2_SUBDEV_FL_HAS_DEVNODE | V4L2_SUBDEV_FL_HAS_EVENTS;
priv->sd.grp_id = grp_id;
imx_media_grp_id_to_sd_name(priv->sd.name, sizeof(priv->sd.name),
priv->sd.grp_id, ipu_get_num(ipu));
ret = ic_ops[priv->task_id]->init(priv);
if (ret)
return ERR_PTR(ret);
ret = v4l2_device_register_subdev(v4l2_dev, &priv->sd);
if (ret) {
ic_ops[priv->task_id]->remove(priv);
return ERR_PTR(ret);
}
return &priv->sd;
}
int imx_media_ic_unregister(struct v4l2_subdev *sd)
{
struct imx_ic_priv *priv = container_of(sd, struct imx_ic_priv, sd);
v4l2_info(sd, "Removing\n");
ic_ops[priv->task_id]->remove(priv);
v4l2_device_unregister_subdev(sd);
media_entity_cleanup(&sd->entity);
return 0;
}
| linux-master | drivers/staging/media/imx/imx-ic-common.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Video Capture Subdev for Freescale i.MX5/6 SOC
*
* Copyright (c) 2012-2016 Mentor Graphics Inc.
*/
#include <linux/delay.h>
#include <linux/fs.h>
#include <linux/module.h>
#include <linux/pinctrl/consumer.h>
#include <linux/platform_device.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/timer.h>
#include <media/v4l2-ctrls.h>
#include <media/v4l2-device.h>
#include <media/v4l2-event.h>
#include <media/v4l2-fwnode.h>
#include <media/v4l2-ioctl.h>
#include <media/v4l2-mc.h>
#include <media/v4l2-subdev.h>
#include <media/videobuf2-dma-contig.h>
#include <video/imx-ipu-v3.h>
#include <media/imx.h>
#include "imx-media.h"
#define IMX_CAPTURE_NAME "imx-capture"
struct capture_priv {
struct imx_media_dev *md; /* Media device */
struct device *dev; /* Physical device */
struct imx_media_video_dev vdev; /* Video device */
struct media_pad vdev_pad; /* Video device pad */
struct v4l2_subdev *src_sd; /* Source subdev */
int src_sd_pad; /* Source subdev pad */
struct mutex mutex; /* Protect vdev operations */
struct vb2_queue q; /* The videobuf2 queue */
struct list_head ready_q; /* List of queued buffers */
spinlock_t q_lock; /* Protect ready_q */
struct v4l2_ctrl_handler ctrl_hdlr; /* Controls inherited from subdevs */
bool legacy_api; /* Use the legacy (pre-MC) API */
};
#define to_capture_priv(v) container_of(v, struct capture_priv, vdev)
/* In bytes, per queue */
#define VID_MEM_LIMIT SZ_64M
/* -----------------------------------------------------------------------------
* MC-Centric Video IOCTLs
*/
static const struct imx_media_pixfmt *capture_find_format(u32 code, u32 fourcc)
{
const struct imx_media_pixfmt *cc;
cc = imx_media_find_ipu_format(code, PIXFMT_SEL_YUV_RGB);
if (cc) {
enum imx_pixfmt_sel fmt_sel = cc->cs == IPUV3_COLORSPACE_YUV
? PIXFMT_SEL_YUV : PIXFMT_SEL_RGB;
cc = imx_media_find_pixel_format(fourcc, fmt_sel);
if (!cc) {
imx_media_enum_pixel_formats(&fourcc, 0, fmt_sel, 0);
cc = imx_media_find_pixel_format(fourcc, fmt_sel);
}
return cc;
}
return imx_media_find_mbus_format(code, PIXFMT_SEL_ANY);
}
static int capture_querycap(struct file *file, void *fh,
struct v4l2_capability *cap)
{
struct capture_priv *priv = video_drvdata(file);
strscpy(cap->driver, IMX_CAPTURE_NAME, sizeof(cap->driver));
strscpy(cap->card, IMX_CAPTURE_NAME, sizeof(cap->card));
snprintf(cap->bus_info, sizeof(cap->bus_info),
"platform:%s", dev_name(priv->dev));
return 0;
}
static int capture_enum_fmt_vid_cap(struct file *file, void *fh,
struct v4l2_fmtdesc *f)
{
return imx_media_enum_pixel_formats(&f->pixelformat, f->index,
PIXFMT_SEL_ANY, f->mbus_code);
}
static int capture_enum_framesizes(struct file *file, void *fh,
struct v4l2_frmsizeenum *fsize)
{
const struct imx_media_pixfmt *cc;
if (fsize->index > 0)
return -EINVAL;
cc = imx_media_find_pixel_format(fsize->pixel_format, PIXFMT_SEL_ANY);
if (!cc)
return -EINVAL;
/*
* TODO: The constraints are hardware-specific and may depend on the
* pixel format. This should come from the driver using
* imx_media_capture.
*/
fsize->type = V4L2_FRMSIZE_TYPE_CONTINUOUS;
fsize->stepwise.min_width = 1;
fsize->stepwise.max_width = 65535;
fsize->stepwise.min_height = 1;
fsize->stepwise.max_height = 65535;
fsize->stepwise.step_width = 1;
fsize->stepwise.step_height = 1;
return 0;
}
static int capture_g_fmt_vid_cap(struct file *file, void *fh,
struct v4l2_format *f)
{
struct capture_priv *priv = video_drvdata(file);
f->fmt.pix = priv->vdev.fmt;
return 0;
}
static const struct imx_media_pixfmt *
__capture_try_fmt(struct v4l2_pix_format *pixfmt, struct v4l2_rect *compose)
{
struct v4l2_mbus_framefmt fmt_src;
const struct imx_media_pixfmt *cc;
/*
* Find the pixel format, default to the first supported format if not
* found.
*/
cc = imx_media_find_pixel_format(pixfmt->pixelformat, PIXFMT_SEL_ANY);
if (!cc) {
imx_media_enum_pixel_formats(&pixfmt->pixelformat, 0,
PIXFMT_SEL_ANY, 0);
cc = imx_media_find_pixel_format(pixfmt->pixelformat,
PIXFMT_SEL_ANY);
}
/* Allow IDMAC interweave but enforce field order from source. */
if (V4L2_FIELD_IS_INTERLACED(pixfmt->field)) {
switch (pixfmt->field) {
case V4L2_FIELD_SEQ_TB:
pixfmt->field = V4L2_FIELD_INTERLACED_TB;
break;
case V4L2_FIELD_SEQ_BT:
pixfmt->field = V4L2_FIELD_INTERLACED_BT;
break;
default:
break;
}
}
v4l2_fill_mbus_format(&fmt_src, pixfmt, 0);
imx_media_mbus_fmt_to_pix_fmt(pixfmt, &fmt_src, cc);
if (compose) {
compose->width = fmt_src.width;
compose->height = fmt_src.height;
}
return cc;
}
static int capture_try_fmt_vid_cap(struct file *file, void *fh,
struct v4l2_format *f)
{
__capture_try_fmt(&f->fmt.pix, NULL);
return 0;
}
static int capture_s_fmt_vid_cap(struct file *file, void *fh,
struct v4l2_format *f)
{
struct capture_priv *priv = video_drvdata(file);
const struct imx_media_pixfmt *cc;
if (vb2_is_busy(&priv->q)) {
dev_err(priv->dev, "%s queue busy\n", __func__);
return -EBUSY;
}
cc = __capture_try_fmt(&f->fmt.pix, &priv->vdev.compose);
priv->vdev.cc = cc;
priv->vdev.fmt = f->fmt.pix;
return 0;
}
static int capture_g_selection(struct file *file, void *fh,
struct v4l2_selection *s)
{
struct capture_priv *priv = video_drvdata(file);
switch (s->target) {
case V4L2_SEL_TGT_COMPOSE:
case V4L2_SEL_TGT_COMPOSE_DEFAULT:
case V4L2_SEL_TGT_COMPOSE_BOUNDS:
/* The compose rectangle is fixed to the source format. */
s->r = priv->vdev.compose;
break;
case V4L2_SEL_TGT_COMPOSE_PADDED:
/*
* The hardware writes with a configurable but fixed DMA burst
* size. If the source format width is not burst size aligned,
* the written frame contains padding to the right.
*/
s->r.left = 0;
s->r.top = 0;
s->r.width = priv->vdev.fmt.width;
s->r.height = priv->vdev.fmt.height;
break;
default:
return -EINVAL;
}
return 0;
}
static int capture_subscribe_event(struct v4l2_fh *fh,
const struct v4l2_event_subscription *sub)
{
switch (sub->type) {
case V4L2_EVENT_IMX_FRAME_INTERVAL_ERROR:
return v4l2_event_subscribe(fh, sub, 0, NULL);
default:
return -EINVAL;
}
}
static const struct v4l2_ioctl_ops capture_ioctl_ops = {
.vidioc_querycap = capture_querycap,
.vidioc_enum_fmt_vid_cap = capture_enum_fmt_vid_cap,
.vidioc_enum_framesizes = capture_enum_framesizes,
.vidioc_g_fmt_vid_cap = capture_g_fmt_vid_cap,
.vidioc_try_fmt_vid_cap = capture_try_fmt_vid_cap,
.vidioc_s_fmt_vid_cap = capture_s_fmt_vid_cap,
.vidioc_g_selection = capture_g_selection,
.vidioc_reqbufs = vb2_ioctl_reqbufs,
.vidioc_create_bufs = vb2_ioctl_create_bufs,
.vidioc_prepare_buf = vb2_ioctl_prepare_buf,
.vidioc_querybuf = vb2_ioctl_querybuf,
.vidioc_qbuf = vb2_ioctl_qbuf,
.vidioc_dqbuf = vb2_ioctl_dqbuf,
.vidioc_expbuf = vb2_ioctl_expbuf,
.vidioc_streamon = vb2_ioctl_streamon,
.vidioc_streamoff = vb2_ioctl_streamoff,
.vidioc_subscribe_event = capture_subscribe_event,
.vidioc_unsubscribe_event = v4l2_event_unsubscribe,
};
/* -----------------------------------------------------------------------------
* Legacy Video IOCTLs
*/
static int capture_legacy_enum_framesizes(struct file *file, void *fh,
struct v4l2_frmsizeenum *fsize)
{
struct capture_priv *priv = video_drvdata(file);
const struct imx_media_pixfmt *cc;
struct v4l2_subdev_frame_size_enum fse = {
.index = fsize->index,
.pad = priv->src_sd_pad,
.which = V4L2_SUBDEV_FORMAT_ACTIVE,
};
int ret;
cc = imx_media_find_pixel_format(fsize->pixel_format, PIXFMT_SEL_ANY);
if (!cc)
return -EINVAL;
fse.code = cc->codes ? cc->codes[0] : 0;
ret = v4l2_subdev_call(priv->src_sd, pad, enum_frame_size, NULL, &fse);
if (ret)
return ret;
if (fse.min_width == fse.max_width &&
fse.min_height == fse.max_height) {
fsize->type = V4L2_FRMSIZE_TYPE_DISCRETE;
fsize->discrete.width = fse.min_width;
fsize->discrete.height = fse.min_height;
} else {
fsize->type = V4L2_FRMSIZE_TYPE_CONTINUOUS;
fsize->stepwise.min_width = fse.min_width;
fsize->stepwise.max_width = fse.max_width;
fsize->stepwise.min_height = fse.min_height;
fsize->stepwise.max_height = fse.max_height;
fsize->stepwise.step_width = 1;
fsize->stepwise.step_height = 1;
}
return 0;
}
static int capture_legacy_enum_frameintervals(struct file *file, void *fh,
struct v4l2_frmivalenum *fival)
{
struct capture_priv *priv = video_drvdata(file);
const struct imx_media_pixfmt *cc;
struct v4l2_subdev_frame_interval_enum fie = {
.index = fival->index,
.pad = priv->src_sd_pad,
.width = fival->width,
.height = fival->height,
.which = V4L2_SUBDEV_FORMAT_ACTIVE,
};
int ret;
cc = imx_media_find_pixel_format(fival->pixel_format, PIXFMT_SEL_ANY);
if (!cc)
return -EINVAL;
fie.code = cc->codes ? cc->codes[0] : 0;
ret = v4l2_subdev_call(priv->src_sd, pad, enum_frame_interval,
NULL, &fie);
if (ret)
return ret;
fival->type = V4L2_FRMIVAL_TYPE_DISCRETE;
fival->discrete = fie.interval;
return 0;
}
static int capture_legacy_enum_fmt_vid_cap(struct file *file, void *fh,
struct v4l2_fmtdesc *f)
{
struct capture_priv *priv = video_drvdata(file);
const struct imx_media_pixfmt *cc_src;
struct v4l2_subdev_format fmt_src = {
.pad = priv->src_sd_pad,
.which = V4L2_SUBDEV_FORMAT_ACTIVE,
};
u32 fourcc;
int ret;
ret = v4l2_subdev_call(priv->src_sd, pad, get_fmt, NULL, &fmt_src);
if (ret) {
dev_err(priv->dev, "failed to get src_sd format\n");
return ret;
}
cc_src = imx_media_find_ipu_format(fmt_src.format.code,
PIXFMT_SEL_YUV_RGB);
if (cc_src) {
enum imx_pixfmt_sel fmt_sel =
(cc_src->cs == IPUV3_COLORSPACE_YUV) ?
PIXFMT_SEL_YUV : PIXFMT_SEL_RGB;
ret = imx_media_enum_pixel_formats(&fourcc, f->index, fmt_sel,
0);
if (ret)
return ret;
} else {
cc_src = imx_media_find_mbus_format(fmt_src.format.code,
PIXFMT_SEL_ANY);
if (WARN_ON(!cc_src))
return -EINVAL;
if (f->index != 0)
return -EINVAL;
fourcc = cc_src->fourcc;
}
f->pixelformat = fourcc;
return 0;
}
static const struct imx_media_pixfmt *
__capture_legacy_try_fmt(struct capture_priv *priv,
struct v4l2_subdev_format *fmt_src,
struct v4l2_pix_format *pixfmt)
{
const struct imx_media_pixfmt *cc;
cc = capture_find_format(fmt_src->format.code, pixfmt->pixelformat);
if (WARN_ON(!cc))
return NULL;
/* allow IDMAC interweave but enforce field order from source */
if (V4L2_FIELD_IS_INTERLACED(pixfmt->field)) {
switch (fmt_src->format.field) {
case V4L2_FIELD_SEQ_TB:
fmt_src->format.field = V4L2_FIELD_INTERLACED_TB;
break;
case V4L2_FIELD_SEQ_BT:
fmt_src->format.field = V4L2_FIELD_INTERLACED_BT;
break;
default:
break;
}
}
imx_media_mbus_fmt_to_pix_fmt(pixfmt, &fmt_src->format, cc);
return cc;
}
static int capture_legacy_try_fmt_vid_cap(struct file *file, void *fh,
struct v4l2_format *f)
{
struct capture_priv *priv = video_drvdata(file);
struct v4l2_subdev_format fmt_src = {
.pad = priv->src_sd_pad,
.which = V4L2_SUBDEV_FORMAT_ACTIVE,
};
int ret;
ret = v4l2_subdev_call(priv->src_sd, pad, get_fmt, NULL, &fmt_src);
if (ret)
return ret;
if (!__capture_legacy_try_fmt(priv, &fmt_src, &f->fmt.pix))
return -EINVAL;
return 0;
}
static int capture_legacy_s_fmt_vid_cap(struct file *file, void *fh,
struct v4l2_format *f)
{
struct capture_priv *priv = video_drvdata(file);
struct v4l2_subdev_format fmt_src = {
.pad = priv->src_sd_pad,
.which = V4L2_SUBDEV_FORMAT_ACTIVE,
};
const struct imx_media_pixfmt *cc;
int ret;
if (vb2_is_busy(&priv->q)) {
dev_err(priv->dev, "%s queue busy\n", __func__);
return -EBUSY;
}
ret = v4l2_subdev_call(priv->src_sd, pad, get_fmt, NULL, &fmt_src);
if (ret)
return ret;
cc = __capture_legacy_try_fmt(priv, &fmt_src, &f->fmt.pix);
if (!cc)
return -EINVAL;
priv->vdev.cc = cc;
priv->vdev.fmt = f->fmt.pix;
priv->vdev.compose.width = fmt_src.format.width;
priv->vdev.compose.height = fmt_src.format.height;
return 0;
}
static int capture_legacy_querystd(struct file *file, void *fh,
v4l2_std_id *std)
{
struct capture_priv *priv = video_drvdata(file);
return v4l2_subdev_call(priv->src_sd, video, querystd, std);
}
static int capture_legacy_g_std(struct file *file, void *fh, v4l2_std_id *std)
{
struct capture_priv *priv = video_drvdata(file);
return v4l2_subdev_call(priv->src_sd, video, g_std, std);
}
static int capture_legacy_s_std(struct file *file, void *fh, v4l2_std_id std)
{
struct capture_priv *priv = video_drvdata(file);
if (vb2_is_busy(&priv->q))
return -EBUSY;
return v4l2_subdev_call(priv->src_sd, video, s_std, std);
}
static int capture_legacy_g_parm(struct file *file, void *fh,
struct v4l2_streamparm *a)
{
struct capture_priv *priv = video_drvdata(file);
struct v4l2_subdev_frame_interval fi = {
.pad = priv->src_sd_pad,
};
int ret;
if (a->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
return -EINVAL;
ret = v4l2_subdev_call(priv->src_sd, video, g_frame_interval, &fi);
if (ret < 0)
return ret;
a->parm.capture.capability = V4L2_CAP_TIMEPERFRAME;
a->parm.capture.timeperframe = fi.interval;
return 0;
}
static int capture_legacy_s_parm(struct file *file, void *fh,
struct v4l2_streamparm *a)
{
struct capture_priv *priv = video_drvdata(file);
struct v4l2_subdev_frame_interval fi = {
.pad = priv->src_sd_pad,
};
int ret;
if (a->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
return -EINVAL;
fi.interval = a->parm.capture.timeperframe;
ret = v4l2_subdev_call(priv->src_sd, video, s_frame_interval, &fi);
if (ret < 0)
return ret;
a->parm.capture.capability = V4L2_CAP_TIMEPERFRAME;
a->parm.capture.timeperframe = fi.interval;
return 0;
}
static int capture_legacy_subscribe_event(struct v4l2_fh *fh,
const struct v4l2_event_subscription *sub)
{
switch (sub->type) {
case V4L2_EVENT_IMX_FRAME_INTERVAL_ERROR:
return v4l2_event_subscribe(fh, sub, 0, NULL);
case V4L2_EVENT_SOURCE_CHANGE:
return v4l2_src_change_event_subscribe(fh, sub);
case V4L2_EVENT_CTRL:
return v4l2_ctrl_subscribe_event(fh, sub);
default:
return -EINVAL;
}
}
static const struct v4l2_ioctl_ops capture_legacy_ioctl_ops = {
.vidioc_querycap = capture_querycap,
.vidioc_enum_framesizes = capture_legacy_enum_framesizes,
.vidioc_enum_frameintervals = capture_legacy_enum_frameintervals,
.vidioc_enum_fmt_vid_cap = capture_legacy_enum_fmt_vid_cap,
.vidioc_g_fmt_vid_cap = capture_g_fmt_vid_cap,
.vidioc_try_fmt_vid_cap = capture_legacy_try_fmt_vid_cap,
.vidioc_s_fmt_vid_cap = capture_legacy_s_fmt_vid_cap,
.vidioc_querystd = capture_legacy_querystd,
.vidioc_g_std = capture_legacy_g_std,
.vidioc_s_std = capture_legacy_s_std,
.vidioc_g_selection = capture_g_selection,
.vidioc_g_parm = capture_legacy_g_parm,
.vidioc_s_parm = capture_legacy_s_parm,
.vidioc_reqbufs = vb2_ioctl_reqbufs,
.vidioc_create_bufs = vb2_ioctl_create_bufs,
.vidioc_prepare_buf = vb2_ioctl_prepare_buf,
.vidioc_querybuf = vb2_ioctl_querybuf,
.vidioc_qbuf = vb2_ioctl_qbuf,
.vidioc_dqbuf = vb2_ioctl_dqbuf,
.vidioc_expbuf = vb2_ioctl_expbuf,
.vidioc_streamon = vb2_ioctl_streamon,
.vidioc_streamoff = vb2_ioctl_streamoff,
.vidioc_subscribe_event = capture_legacy_subscribe_event,
.vidioc_unsubscribe_event = v4l2_event_unsubscribe,
};
/* -----------------------------------------------------------------------------
* Queue Operations
*/
static int capture_queue_setup(struct vb2_queue *vq,
unsigned int *nbuffers,
unsigned int *nplanes,
unsigned int sizes[],
struct device *alloc_devs[])
{
struct capture_priv *priv = vb2_get_drv_priv(vq);
struct v4l2_pix_format *pix = &priv->vdev.fmt;
unsigned int count = *nbuffers;
if (vq->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
return -EINVAL;
if (*nplanes) {
if (*nplanes != 1 || sizes[0] < pix->sizeimage)
return -EINVAL;
count += vq->num_buffers;
}
count = min_t(__u32, VID_MEM_LIMIT / pix->sizeimage, count);
if (*nplanes)
*nbuffers = (count < vq->num_buffers) ? 0 :
count - vq->num_buffers;
else
*nbuffers = count;
*nplanes = 1;
sizes[0] = pix->sizeimage;
return 0;
}
static int capture_buf_init(struct vb2_buffer *vb)
{
struct imx_media_buffer *buf = to_imx_media_vb(vb);
INIT_LIST_HEAD(&buf->list);
return 0;
}
static int capture_buf_prepare(struct vb2_buffer *vb)
{
struct vb2_queue *vq = vb->vb2_queue;
struct capture_priv *priv = vb2_get_drv_priv(vq);
struct v4l2_pix_format *pix = &priv->vdev.fmt;
if (vb2_plane_size(vb, 0) < pix->sizeimage) {
dev_err(priv->dev,
"data will not fit into plane (%lu < %lu)\n",
vb2_plane_size(vb, 0), (long)pix->sizeimage);
return -EINVAL;
}
vb2_set_plane_payload(vb, 0, pix->sizeimage);
return 0;
}
static void capture_buf_queue(struct vb2_buffer *vb)
{
struct capture_priv *priv = vb2_get_drv_priv(vb->vb2_queue);
struct imx_media_buffer *buf = to_imx_media_vb(vb);
unsigned long flags;
spin_lock_irqsave(&priv->q_lock, flags);
list_add_tail(&buf->list, &priv->ready_q);
spin_unlock_irqrestore(&priv->q_lock, flags);
}
static int capture_validate_fmt(struct capture_priv *priv)
{
struct v4l2_subdev_format fmt_src = {
.pad = priv->src_sd_pad,
.which = V4L2_SUBDEV_FORMAT_ACTIVE,
};
const struct imx_media_pixfmt *cc;
int ret;
/* Retrieve the media bus format on the source subdev. */
ret = v4l2_subdev_call(priv->src_sd, pad, get_fmt, NULL, &fmt_src);
if (ret)
return ret;
/*
* Verify that the media bus size matches the size set on the video
* node. It is sufficient to check the compose rectangle size without
* checking the rounded size from vdev.fmt, as the rounded size is
* derived directly from the compose rectangle size, and will thus
* always match if the compose rectangle matches.
*/
if (priv->vdev.compose.width != fmt_src.format.width ||
priv->vdev.compose.height != fmt_src.format.height)
return -EPIPE;
/*
* Verify that the media bus code is compatible with the pixel format
* set on the video node.
*/
cc = capture_find_format(fmt_src.format.code, 0);
if (!cc || priv->vdev.cc->cs != cc->cs)
return -EPIPE;
return 0;
}
static int capture_start_streaming(struct vb2_queue *vq, unsigned int count)
{
struct capture_priv *priv = vb2_get_drv_priv(vq);
struct imx_media_buffer *buf, *tmp;
unsigned long flags;
int ret;
ret = capture_validate_fmt(priv);
if (ret) {
dev_err(priv->dev, "capture format not valid\n");
goto return_bufs;
}
ret = imx_media_pipeline_set_stream(priv->md, &priv->src_sd->entity,
true);
if (ret) {
dev_err(priv->dev, "pipeline start failed with %d\n", ret);
goto return_bufs;
}
return 0;
return_bufs:
spin_lock_irqsave(&priv->q_lock, flags);
list_for_each_entry_safe(buf, tmp, &priv->ready_q, list) {
list_del(&buf->list);
vb2_buffer_done(&buf->vbuf.vb2_buf, VB2_BUF_STATE_QUEUED);
}
spin_unlock_irqrestore(&priv->q_lock, flags);
return ret;
}
static void capture_stop_streaming(struct vb2_queue *vq)
{
struct capture_priv *priv = vb2_get_drv_priv(vq);
struct imx_media_buffer *frame;
struct imx_media_buffer *tmp;
unsigned long flags;
int ret;
ret = imx_media_pipeline_set_stream(priv->md, &priv->src_sd->entity,
false);
if (ret)
dev_warn(priv->dev, "pipeline stop failed with %d\n", ret);
/* release all active buffers */
spin_lock_irqsave(&priv->q_lock, flags);
list_for_each_entry_safe(frame, tmp, &priv->ready_q, list) {
list_del(&frame->list);
vb2_buffer_done(&frame->vbuf.vb2_buf, VB2_BUF_STATE_ERROR);
}
spin_unlock_irqrestore(&priv->q_lock, flags);
}
static const struct vb2_ops capture_qops = {
.queue_setup = capture_queue_setup,
.buf_init = capture_buf_init,
.buf_prepare = capture_buf_prepare,
.buf_queue = capture_buf_queue,
.wait_prepare = vb2_ops_wait_prepare,
.wait_finish = vb2_ops_wait_finish,
.start_streaming = capture_start_streaming,
.stop_streaming = capture_stop_streaming,
};
/* -----------------------------------------------------------------------------
* File Operations
*/
static int capture_open(struct file *file)
{
struct capture_priv *priv = video_drvdata(file);
struct video_device *vfd = priv->vdev.vfd;
int ret;
if (mutex_lock_interruptible(&priv->mutex))
return -ERESTARTSYS;
ret = v4l2_fh_open(file);
if (ret) {
dev_err(priv->dev, "v4l2_fh_open failed\n");
goto out;
}
ret = v4l2_pipeline_pm_get(&vfd->entity);
if (ret)
v4l2_fh_release(file);
out:
mutex_unlock(&priv->mutex);
return ret;
}
static int capture_release(struct file *file)
{
struct capture_priv *priv = video_drvdata(file);
struct video_device *vfd = priv->vdev.vfd;
struct vb2_queue *vq = &priv->q;
mutex_lock(&priv->mutex);
if (file->private_data == vq->owner) {
vb2_queue_release(vq);
vq->owner = NULL;
}
v4l2_pipeline_pm_put(&vfd->entity);
v4l2_fh_release(file);
mutex_unlock(&priv->mutex);
return 0;
}
static const struct v4l2_file_operations capture_fops = {
.owner = THIS_MODULE,
.open = capture_open,
.release = capture_release,
.poll = vb2_fop_poll,
.unlocked_ioctl = video_ioctl2,
.mmap = vb2_fop_mmap,
};
/* -----------------------------------------------------------------------------
* Public API
*/
struct imx_media_buffer *
imx_media_capture_device_next_buf(struct imx_media_video_dev *vdev)
{
struct capture_priv *priv = to_capture_priv(vdev);
struct imx_media_buffer *buf = NULL;
unsigned long flags;
spin_lock_irqsave(&priv->q_lock, flags);
/* get next queued buffer */
if (!list_empty(&priv->ready_q)) {
buf = list_entry(priv->ready_q.next, struct imx_media_buffer,
list);
list_del(&buf->list);
}
spin_unlock_irqrestore(&priv->q_lock, flags);
return buf;
}
EXPORT_SYMBOL_GPL(imx_media_capture_device_next_buf);
void imx_media_capture_device_error(struct imx_media_video_dev *vdev)
{
struct capture_priv *priv = to_capture_priv(vdev);
struct vb2_queue *vq = &priv->q;
unsigned long flags;
if (!vb2_is_streaming(vq))
return;
spin_lock_irqsave(&priv->q_lock, flags);
vb2_queue_error(vq);
spin_unlock_irqrestore(&priv->q_lock, flags);
}
EXPORT_SYMBOL_GPL(imx_media_capture_device_error);
static int capture_init_format(struct capture_priv *priv)
{
struct v4l2_subdev_format fmt_src = {
.pad = priv->src_sd_pad,
.which = V4L2_SUBDEV_FORMAT_ACTIVE,
};
struct imx_media_video_dev *vdev = &priv->vdev;
int ret;
if (priv->legacy_api) {
ret = v4l2_subdev_call(priv->src_sd, pad, get_fmt, NULL,
&fmt_src);
if (ret) {
dev_err(priv->dev, "failed to get source format\n");
return ret;
}
} else {
fmt_src.format.code = MEDIA_BUS_FMT_UYVY8_2X8;
fmt_src.format.width = IMX_MEDIA_DEF_PIX_WIDTH;
fmt_src.format.height = IMX_MEDIA_DEF_PIX_HEIGHT;
}
imx_media_mbus_fmt_to_pix_fmt(&vdev->fmt, &fmt_src.format, NULL);
vdev->compose.width = fmt_src.format.width;
vdev->compose.height = fmt_src.format.height;
vdev->cc = imx_media_find_pixel_format(vdev->fmt.pixelformat,
PIXFMT_SEL_ANY);
return 0;
}
int imx_media_capture_device_register(struct imx_media_video_dev *vdev,
u32 link_flags)
{
struct capture_priv *priv = to_capture_priv(vdev);
struct v4l2_subdev *sd = priv->src_sd;
struct v4l2_device *v4l2_dev = sd->v4l2_dev;
struct video_device *vfd = vdev->vfd;
int ret;
/* get media device */
priv->md = container_of(v4l2_dev->mdev, struct imx_media_dev, md);
vfd->v4l2_dev = v4l2_dev;
/* Initialize the default format and compose rectangle. */
ret = capture_init_format(priv);
if (ret < 0)
return ret;
/* Register the video device. */
ret = video_register_device(vfd, VFL_TYPE_VIDEO, -1);
if (ret) {
dev_err(priv->dev, "Failed to register video device\n");
return ret;
}
dev_info(priv->dev, "Registered %s as /dev/%s\n", vfd->name,
video_device_node_name(vfd));
/* Create the link from the src_sd devnode pad to device node. */
if (link_flags & MEDIA_LNK_FL_IMMUTABLE)
link_flags |= MEDIA_LNK_FL_ENABLED;
ret = media_create_pad_link(&sd->entity, priv->src_sd_pad,
&vfd->entity, 0, link_flags);
if (ret) {
dev_err(priv->dev, "failed to create link to device node\n");
video_unregister_device(vfd);
return ret;
}
/* Add vdev to the video devices list. */
imx_media_add_video_device(priv->md, vdev);
return 0;
}
EXPORT_SYMBOL_GPL(imx_media_capture_device_register);
void imx_media_capture_device_unregister(struct imx_media_video_dev *vdev)
{
struct capture_priv *priv = to_capture_priv(vdev);
struct video_device *vfd = priv->vdev.vfd;
media_entity_cleanup(&vfd->entity);
video_unregister_device(vfd);
}
EXPORT_SYMBOL_GPL(imx_media_capture_device_unregister);
struct imx_media_video_dev *
imx_media_capture_device_init(struct device *dev, struct v4l2_subdev *src_sd,
int pad, bool legacy_api)
{
struct capture_priv *priv;
struct video_device *vfd;
struct vb2_queue *vq;
int ret;
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return ERR_PTR(-ENOMEM);
priv->src_sd = src_sd;
priv->src_sd_pad = pad;
priv->dev = dev;
priv->legacy_api = legacy_api;
mutex_init(&priv->mutex);
INIT_LIST_HEAD(&priv->ready_q);
spin_lock_init(&priv->q_lock);
/* Allocate and initialize the video device. */
vfd = video_device_alloc();
if (!vfd)
return ERR_PTR(-ENOMEM);
vfd->fops = &capture_fops;
vfd->ioctl_ops = legacy_api ? &capture_legacy_ioctl_ops
: &capture_ioctl_ops;
vfd->minor = -1;
vfd->release = video_device_release;
vfd->vfl_dir = VFL_DIR_RX;
vfd->tvnorms = V4L2_STD_NTSC | V4L2_STD_PAL | V4L2_STD_SECAM;
vfd->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING
| (!legacy_api ? V4L2_CAP_IO_MC : 0);
vfd->lock = &priv->mutex;
vfd->queue = &priv->q;
snprintf(vfd->name, sizeof(vfd->name), "%s capture", src_sd->name);
video_set_drvdata(vfd, priv);
priv->vdev.vfd = vfd;
INIT_LIST_HEAD(&priv->vdev.list);
/* Initialize the video device pad. */
priv->vdev_pad.flags = MEDIA_PAD_FL_SINK;
ret = media_entity_pads_init(&vfd->entity, 1, &priv->vdev_pad);
if (ret) {
video_device_release(vfd);
return ERR_PTR(ret);
}
/* Initialize the vb2 queue. */
vq = &priv->q;
vq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
vq->io_modes = VB2_MMAP | VB2_DMABUF;
vq->drv_priv = priv;
vq->buf_struct_size = sizeof(struct imx_media_buffer);
vq->ops = &capture_qops;
vq->mem_ops = &vb2_dma_contig_memops;
vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
vq->lock = &priv->mutex;
vq->min_buffers_needed = 2;
vq->dev = priv->dev;
ret = vb2_queue_init(vq);
if (ret) {
dev_err(priv->dev, "vb2_queue_init failed\n");
video_device_release(vfd);
return ERR_PTR(ret);
}
if (legacy_api) {
/* Initialize the control handler. */
v4l2_ctrl_handler_init(&priv->ctrl_hdlr, 0);
vfd->ctrl_handler = &priv->ctrl_hdlr;
}
return &priv->vdev;
}
EXPORT_SYMBOL_GPL(imx_media_capture_device_init);
void imx_media_capture_device_remove(struct imx_media_video_dev *vdev)
{
struct capture_priv *priv = to_capture_priv(vdev);
v4l2_ctrl_handler_free(&priv->ctrl_hdlr);
}
EXPORT_SYMBOL_GPL(imx_media_capture_device_remove);
MODULE_DESCRIPTION("i.MX5/6 v4l2 video capture interface driver");
MODULE_AUTHOR("Steve Longerbeam <[email protected]>");
MODULE_LICENSE("GPL");
| linux-master | drivers/staging/media/imx/imx-media-capture.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* V4L2 Capture CSI Subdev for Freescale i.MX5/6 SOC
*
* Copyright (c) 2014-2017 Mentor Graphics Inc.
* Copyright (C) 2017 Pengutronix, Philipp Zabel <[email protected]>
*/
#include <linux/delay.h>
#include <linux/gcd.h>
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/of_graph.h>
#include <linux/pinctrl/consumer.h>
#include <linux/platform_device.h>
#include <media/v4l2-ctrls.h>
#include <media/v4l2-device.h>
#include <media/v4l2-event.h>
#include <media/v4l2-fwnode.h>
#include <media/v4l2-mc.h>
#include <media/v4l2-subdev.h>
#include <media/videobuf2-dma-contig.h>
#include <video/imx-ipu-v3.h>
#include <media/imx.h>
#include "imx-media.h"
/*
* Min/Max supported width and heights.
*
* We allow planar output, so we have to align width by 16 pixels
* to meet IDMAC alignment requirements.
*
* TODO: move this into pad format negotiation, if capture device
* has not requested planar formats, we should allow 8 pixel
* alignment.
*/
#define MIN_W 32
#define MIN_H 32
#define MAX_W 4096
#define MAX_H 4096
#define W_ALIGN 1 /* multiple of 2 pixels */
#define H_ALIGN 1 /* multiple of 2 lines */
#define S_ALIGN 1 /* multiple of 2 */
/*
* struct csi_skip_desc - CSI frame skipping descriptor
* @keep - number of frames kept per max_ratio frames
* @max_ratio - width of skip_smfc, written to MAX_RATIO bitfield
* @skip_smfc - skip pattern written to the SKIP_SMFC bitfield
*/
struct csi_skip_desc {
u8 keep;
u8 max_ratio;
u8 skip_smfc;
};
struct csi_priv {
struct device *dev;
struct ipu_soc *ipu;
struct v4l2_subdev sd;
struct media_pad pad[CSI_NUM_PADS];
struct v4l2_async_notifier notifier;
/* the video device at IDMAC output pad */
struct imx_media_video_dev *vdev;
struct imx_media_fim *fim;
int csi_id;
int smfc_id;
/* lock to protect all members below */
struct mutex lock;
int active_output_pad;
struct ipuv3_channel *idmac_ch;
struct ipu_smfc *smfc;
struct ipu_csi *csi;
struct v4l2_mbus_framefmt format_mbus[CSI_NUM_PADS];
const struct imx_media_pixfmt *cc[CSI_NUM_PADS];
struct v4l2_fract frame_interval[CSI_NUM_PADS];
struct v4l2_rect crop;
struct v4l2_rect compose;
const struct csi_skip_desc *skip;
/* active vb2 buffers to send to video dev sink */
struct imx_media_buffer *active_vb2_buf[2];
struct imx_media_dma_buf underrun_buf;
int ipu_buf_num; /* ipu double buffer index: 0-1 */
/* the sink for the captured frames */
struct media_entity *sink;
enum ipu_csi_dest dest;
/* the source subdev */
struct v4l2_subdev *src_sd;
/* the mipi virtual channel number at link validate */
int vc_num;
/* media bus config of the upstream subdevice CSI is receiving from */
struct v4l2_mbus_config mbus_cfg;
spinlock_t irqlock; /* protect eof_irq handler */
struct timer_list eof_timeout_timer;
int eof_irq;
int nfb4eof_irq;
struct v4l2_ctrl_handler ctrl_hdlr;
int stream_count; /* streaming counter */
u32 frame_sequence; /* frame sequence counter */
bool last_eof; /* waiting for last EOF at stream off */
bool nfb4eof; /* NFB4EOF encountered during streaming */
bool interweave_swap; /* swap top/bottom lines when interweaving */
struct completion last_eof_comp;
};
static inline struct csi_priv *sd_to_dev(struct v4l2_subdev *sdev)
{
return container_of(sdev, struct csi_priv, sd);
}
static inline struct csi_priv *notifier_to_dev(struct v4l2_async_notifier *n)
{
return container_of(n, struct csi_priv, notifier);
}
static inline bool is_parallel_bus(struct v4l2_mbus_config *mbus_cfg)
{
return mbus_cfg->type != V4L2_MBUS_CSI2_DPHY;
}
static inline bool is_parallel_16bit_bus(struct v4l2_mbus_config *mbus_cfg)
{
return is_parallel_bus(mbus_cfg) && mbus_cfg->bus.parallel.bus_width >= 16;
}
/*
* Check for conditions that require the IPU to handle the
* data internally as generic data, aka passthrough mode:
* - raw bayer media bus formats, or
* - BT.656 and BT.1120 (8/10-bit YUV422) data can always be processed
* on-the-fly
* - the CSI is receiving from a 16-bit parallel bus, or
* - the CSI is receiving from an 8-bit parallel bus and the incoming
* media bus format is other than UYVY8_2X8/YUYV8_2X8.
*/
static inline bool requires_passthrough(struct v4l2_mbus_config *mbus_cfg,
struct v4l2_mbus_framefmt *infmt,
const struct imx_media_pixfmt *incc)
{
if (mbus_cfg->type == V4L2_MBUS_BT656) // including BT.1120
return false;
return incc->bayer || is_parallel_16bit_bus(mbus_cfg) ||
(is_parallel_bus(mbus_cfg) &&
infmt->code != MEDIA_BUS_FMT_UYVY8_2X8 &&
infmt->code != MEDIA_BUS_FMT_YUYV8_2X8);
}
/*
* Queries the media bus config of the upstream entity that provides data to
* the CSI. This will either be the entity directly upstream from the CSI-2
* receiver, directly upstream from a video mux, or directly upstream from
* the CSI itself.
*/
static int csi_get_upstream_mbus_config(struct csi_priv *priv,
struct v4l2_mbus_config *mbus_cfg)
{
struct v4l2_subdev *sd, *remote_sd;
struct media_pad *remote_pad;
int ret;
if (!priv->src_sd)
return -EPIPE;
sd = priv->src_sd;
switch (sd->grp_id) {
case IMX_MEDIA_GRP_ID_CSI_MUX:
/*
* CSI is connected directly to CSI mux, skip up to
* CSI-2 receiver if it is in the path, otherwise stay
* with the CSI mux.
*/
sd = imx_media_pipeline_subdev(&sd->entity,
IMX_MEDIA_GRP_ID_CSI2,
true);
if (IS_ERR(sd))
sd = priv->src_sd;
break;
case IMX_MEDIA_GRP_ID_CSI2:
break;
default:
/*
* the source is neither the CSI mux nor the CSI-2 receiver,
* get the source pad directly upstream from CSI itself.
*/
sd = &priv->sd;
break;
}
/* get source pad of entity directly upstream from sd */
remote_pad = media_entity_remote_pad_unique(&sd->entity,
MEDIA_PAD_FL_SOURCE);
if (IS_ERR(remote_pad))
return PTR_ERR(remote_pad);
remote_sd = media_entity_to_v4l2_subdev(remote_pad->entity);
ret = v4l2_subdev_call(remote_sd, pad, get_mbus_config,
remote_pad->index, mbus_cfg);
if (ret == -ENOIOCTLCMD)
v4l2_err(&priv->sd,
"entity %s does not implement get_mbus_config()\n",
remote_pad->entity->name);
return ret;
}
static void csi_idmac_put_ipu_resources(struct csi_priv *priv)
{
if (priv->idmac_ch)
ipu_idmac_put(priv->idmac_ch);
priv->idmac_ch = NULL;
if (priv->smfc)
ipu_smfc_put(priv->smfc);
priv->smfc = NULL;
}
static int csi_idmac_get_ipu_resources(struct csi_priv *priv)
{
int ch_num, ret;
struct ipu_smfc *smfc;
struct ipuv3_channel *idmac_ch;
ch_num = IPUV3_CHANNEL_CSI0 + priv->smfc_id;
smfc = ipu_smfc_get(priv->ipu, ch_num);
if (IS_ERR(smfc)) {
v4l2_err(&priv->sd, "failed to get SMFC\n");
ret = PTR_ERR(smfc);
goto out;
}
priv->smfc = smfc;
idmac_ch = ipu_idmac_get(priv->ipu, ch_num);
if (IS_ERR(idmac_ch)) {
v4l2_err(&priv->sd, "could not get IDMAC channel %u\n",
ch_num);
ret = PTR_ERR(idmac_ch);
goto out;
}
priv->idmac_ch = idmac_ch;
return 0;
out:
csi_idmac_put_ipu_resources(priv);
return ret;
}
static void csi_vb2_buf_done(struct csi_priv *priv)
{
struct imx_media_video_dev *vdev = priv->vdev;
struct imx_media_buffer *done, *next;
struct vb2_buffer *vb;
dma_addr_t phys;
done = priv->active_vb2_buf[priv->ipu_buf_num];
if (done) {
done->vbuf.field = vdev->fmt.field;
done->vbuf.sequence = priv->frame_sequence;
vb = &done->vbuf.vb2_buf;
vb->timestamp = ktime_get_ns();
vb2_buffer_done(vb, priv->nfb4eof ?
VB2_BUF_STATE_ERROR : VB2_BUF_STATE_DONE);
}
priv->frame_sequence++;
priv->nfb4eof = false;
/* get next queued buffer */
next = imx_media_capture_device_next_buf(vdev);
if (next) {
phys = vb2_dma_contig_plane_dma_addr(&next->vbuf.vb2_buf, 0);
priv->active_vb2_buf[priv->ipu_buf_num] = next;
} else {
phys = priv->underrun_buf.phys;
priv->active_vb2_buf[priv->ipu_buf_num] = NULL;
}
if (ipu_idmac_buffer_is_ready(priv->idmac_ch, priv->ipu_buf_num))
ipu_idmac_clear_buffer(priv->idmac_ch, priv->ipu_buf_num);
if (priv->interweave_swap)
phys += vdev->fmt.bytesperline;
ipu_cpmem_set_buffer(priv->idmac_ch, priv->ipu_buf_num, phys);
}
static irqreturn_t csi_idmac_eof_interrupt(int irq, void *dev_id)
{
struct csi_priv *priv = dev_id;
spin_lock(&priv->irqlock);
if (priv->last_eof) {
complete(&priv->last_eof_comp);
priv->last_eof = false;
goto unlock;
}
if (priv->fim)
/* call frame interval monitor */
imx_media_fim_eof_monitor(priv->fim, ktime_get());
csi_vb2_buf_done(priv);
/* select new IPU buf */
ipu_idmac_select_buffer(priv->idmac_ch, priv->ipu_buf_num);
/* toggle IPU double-buffer index */
priv->ipu_buf_num ^= 1;
/* bump the EOF timeout timer */
mod_timer(&priv->eof_timeout_timer,
jiffies + msecs_to_jiffies(IMX_MEDIA_EOF_TIMEOUT));
unlock:
spin_unlock(&priv->irqlock);
return IRQ_HANDLED;
}
static irqreturn_t csi_idmac_nfb4eof_interrupt(int irq, void *dev_id)
{
struct csi_priv *priv = dev_id;
spin_lock(&priv->irqlock);
/*
* this is not an unrecoverable error, just mark
* the next captured frame with vb2 error flag.
*/
priv->nfb4eof = true;
v4l2_err(&priv->sd, "NFB4EOF\n");
spin_unlock(&priv->irqlock);
return IRQ_HANDLED;
}
/*
* EOF timeout timer function. This is an unrecoverable condition
* without a stream restart.
*/
static void csi_idmac_eof_timeout(struct timer_list *t)
{
struct csi_priv *priv = from_timer(priv, t, eof_timeout_timer);
struct imx_media_video_dev *vdev = priv->vdev;
v4l2_err(&priv->sd, "EOF timeout\n");
/* signal a fatal error to capture device */
imx_media_capture_device_error(vdev);
}
static void csi_idmac_setup_vb2_buf(struct csi_priv *priv, dma_addr_t *phys)
{
struct imx_media_video_dev *vdev = priv->vdev;
struct imx_media_buffer *buf;
int i;
for (i = 0; i < 2; i++) {
buf = imx_media_capture_device_next_buf(vdev);
if (buf) {
priv->active_vb2_buf[i] = buf;
phys[i] = vb2_dma_contig_plane_dma_addr(
&buf->vbuf.vb2_buf, 0);
} else {
priv->active_vb2_buf[i] = NULL;
phys[i] = priv->underrun_buf.phys;
}
}
}
static void csi_idmac_unsetup_vb2_buf(struct csi_priv *priv,
enum vb2_buffer_state return_status)
{
struct imx_media_buffer *buf;
int i;
/* return any remaining active frames with return_status */
for (i = 0; i < 2; i++) {
buf = priv->active_vb2_buf[i];
if (buf) {
struct vb2_buffer *vb = &buf->vbuf.vb2_buf;
vb->timestamp = ktime_get_ns();
vb2_buffer_done(vb, return_status);
}
}
}
/* init the SMFC IDMAC channel */
static int csi_idmac_setup_channel(struct csi_priv *priv)
{
struct imx_media_video_dev *vdev = priv->vdev;
const struct imx_media_pixfmt *incc;
struct v4l2_mbus_framefmt *infmt;
struct v4l2_mbus_framefmt *outfmt;
bool passthrough, interweave;
struct ipu_image image;
u32 passthrough_bits;
u32 passthrough_cycles;
dma_addr_t phys[2];
u32 burst_size;
int ret;
infmt = &priv->format_mbus[CSI_SINK_PAD];
incc = priv->cc[CSI_SINK_PAD];
outfmt = &priv->format_mbus[CSI_SRC_PAD_IDMAC];
ipu_cpmem_zero(priv->idmac_ch);
memset(&image, 0, sizeof(image));
image.pix = vdev->fmt;
image.rect = vdev->compose;
csi_idmac_setup_vb2_buf(priv, phys);
image.phys0 = phys[0];
image.phys1 = phys[1];
passthrough = requires_passthrough(&priv->mbus_cfg, infmt, incc);
passthrough_cycles = 1;
/*
* If the field type at capture interface is interlaced, and
* the output IDMAC pad is sequential, enable interweave at
* the IDMAC output channel.
*/
interweave = V4L2_FIELD_IS_INTERLACED(image.pix.field) &&
V4L2_FIELD_IS_SEQUENTIAL(outfmt->field);
priv->interweave_swap = interweave &&
image.pix.field == V4L2_FIELD_INTERLACED_BT;
switch (image.pix.pixelformat) {
case V4L2_PIX_FMT_SBGGR8:
case V4L2_PIX_FMT_SGBRG8:
case V4L2_PIX_FMT_SGRBG8:
case V4L2_PIX_FMT_SRGGB8:
case V4L2_PIX_FMT_GREY:
burst_size = 16;
passthrough_bits = 8;
break;
case V4L2_PIX_FMT_SBGGR16:
case V4L2_PIX_FMT_SGBRG16:
case V4L2_PIX_FMT_SGRBG16:
case V4L2_PIX_FMT_SRGGB16:
case V4L2_PIX_FMT_Y10:
case V4L2_PIX_FMT_Y12:
burst_size = 8;
passthrough_bits = 16;
break;
case V4L2_PIX_FMT_YUV420:
case V4L2_PIX_FMT_YVU420:
case V4L2_PIX_FMT_NV12:
burst_size = (image.pix.width & 0x3f) ?
((image.pix.width & 0x1f) ?
((image.pix.width & 0xf) ? 8 : 16) : 32) : 64;
passthrough_bits = 16;
/*
* Skip writing U and V components to odd rows (but not
* when enabling IDMAC interweaving, they are incompatible).
*/
if (!interweave)
ipu_cpmem_skip_odd_chroma_rows(priv->idmac_ch);
break;
case V4L2_PIX_FMT_YUYV:
case V4L2_PIX_FMT_UYVY:
burst_size = (image.pix.width & 0x1f) ?
((image.pix.width & 0xf) ? 8 : 16) : 32;
passthrough_bits = 16;
break;
case V4L2_PIX_FMT_RGB565:
if (passthrough) {
burst_size = 16;
passthrough_bits = 8;
passthrough_cycles = incc->cycles;
break;
}
fallthrough; /* non-passthrough RGB565 (CSI-2 bus) */
default:
burst_size = (image.pix.width & 0xf) ? 8 : 16;
passthrough_bits = 16;
break;
}
if (passthrough) {
if (priv->interweave_swap) {
/* start interweave scan at 1st top line (2nd line) */
image.phys0 += image.pix.bytesperline;
image.phys1 += image.pix.bytesperline;
}
ipu_cpmem_set_resolution(priv->idmac_ch,
image.rect.width * passthrough_cycles,
image.rect.height);
ipu_cpmem_set_stride(priv->idmac_ch, image.pix.bytesperline);
ipu_cpmem_set_buffer(priv->idmac_ch, 0, image.phys0);
ipu_cpmem_set_buffer(priv->idmac_ch, 1, image.phys1);
ipu_cpmem_set_format_passthrough(priv->idmac_ch,
passthrough_bits);
} else {
if (priv->interweave_swap) {
/* start interweave scan at 1st top line (2nd line) */
image.rect.top = 1;
}
ret = ipu_cpmem_set_image(priv->idmac_ch, &image);
if (ret)
goto unsetup_vb2;
}
ipu_cpmem_set_burstsize(priv->idmac_ch, burst_size);
/*
* Set the channel for the direct CSI-->memory via SMFC
* use-case to very high priority, by enabling the watermark
* signal in the SMFC, enabling WM in the channel, and setting
* the channel priority to high.
*
* Refer to the i.mx6 rev. D TRM Table 36-8: Calculated priority
* value.
*
* The WM's are set very low by intention here to ensure that
* the SMFC FIFOs do not overflow.
*/
ipu_smfc_set_watermark(priv->smfc, 0x02, 0x01);
ipu_cpmem_set_high_priority(priv->idmac_ch);
ipu_idmac_enable_watermark(priv->idmac_ch, true);
ipu_cpmem_set_axi_id(priv->idmac_ch, 0);
burst_size = passthrough ?
(burst_size >> 3) - 1 : (burst_size >> 2) - 1;
ipu_smfc_set_burstsize(priv->smfc, burst_size);
if (interweave)
ipu_cpmem_interlaced_scan(priv->idmac_ch,
priv->interweave_swap ?
-image.pix.bytesperline :
image.pix.bytesperline,
image.pix.pixelformat);
ipu_idmac_set_double_buffer(priv->idmac_ch, true);
return 0;
unsetup_vb2:
csi_idmac_unsetup_vb2_buf(priv, VB2_BUF_STATE_QUEUED);
return ret;
}
static void csi_idmac_unsetup(struct csi_priv *priv,
enum vb2_buffer_state state)
{
ipu_idmac_disable_channel(priv->idmac_ch);
ipu_smfc_disable(priv->smfc);
csi_idmac_unsetup_vb2_buf(priv, state);
}
static int csi_idmac_setup(struct csi_priv *priv)
{
int ret;
ret = csi_idmac_setup_channel(priv);
if (ret)
return ret;
ipu_cpmem_dump(priv->idmac_ch);
ipu_dump(priv->ipu);
ipu_smfc_enable(priv->smfc);
/* set buffers ready */
ipu_idmac_select_buffer(priv->idmac_ch, 0);
ipu_idmac_select_buffer(priv->idmac_ch, 1);
/* enable the channels */
ipu_idmac_enable_channel(priv->idmac_ch);
return 0;
}
static int csi_idmac_start(struct csi_priv *priv)
{
struct imx_media_video_dev *vdev = priv->vdev;
int ret;
ret = csi_idmac_get_ipu_resources(priv);
if (ret)
return ret;
ipu_smfc_map_channel(priv->smfc, priv->csi_id, priv->vc_num);
ret = imx_media_alloc_dma_buf(priv->dev, &priv->underrun_buf,
vdev->fmt.sizeimage);
if (ret)
goto out_put_ipu;
priv->ipu_buf_num = 0;
/* init EOF completion waitq */
init_completion(&priv->last_eof_comp);
priv->frame_sequence = 0;
priv->last_eof = false;
priv->nfb4eof = false;
ret = csi_idmac_setup(priv);
if (ret) {
v4l2_err(&priv->sd, "csi_idmac_setup failed: %d\n", ret);
goto out_free_dma_buf;
}
priv->nfb4eof_irq = ipu_idmac_channel_irq(priv->ipu,
priv->idmac_ch,
IPU_IRQ_NFB4EOF);
ret = devm_request_irq(priv->dev, priv->nfb4eof_irq,
csi_idmac_nfb4eof_interrupt, 0,
"imx-smfc-nfb4eof", priv);
if (ret) {
v4l2_err(&priv->sd,
"Error registering NFB4EOF irq: %d\n", ret);
goto out_unsetup;
}
priv->eof_irq = ipu_idmac_channel_irq(priv->ipu, priv->idmac_ch,
IPU_IRQ_EOF);
ret = devm_request_irq(priv->dev, priv->eof_irq,
csi_idmac_eof_interrupt, 0,
"imx-smfc-eof", priv);
if (ret) {
v4l2_err(&priv->sd,
"Error registering eof irq: %d\n", ret);
goto out_free_nfb4eof_irq;
}
/* start the EOF timeout timer */
mod_timer(&priv->eof_timeout_timer,
jiffies + msecs_to_jiffies(IMX_MEDIA_EOF_TIMEOUT));
return 0;
out_free_nfb4eof_irq:
devm_free_irq(priv->dev, priv->nfb4eof_irq, priv);
out_unsetup:
csi_idmac_unsetup(priv, VB2_BUF_STATE_QUEUED);
out_free_dma_buf:
imx_media_free_dma_buf(priv->dev, &priv->underrun_buf);
out_put_ipu:
csi_idmac_put_ipu_resources(priv);
return ret;
}
static void csi_idmac_wait_last_eof(struct csi_priv *priv)
{
unsigned long flags;
int ret;
/* mark next EOF interrupt as the last before stream off */
spin_lock_irqsave(&priv->irqlock, flags);
priv->last_eof = true;
spin_unlock_irqrestore(&priv->irqlock, flags);
/*
* and then wait for interrupt handler to mark completion.
*/
ret = wait_for_completion_timeout(
&priv->last_eof_comp, msecs_to_jiffies(IMX_MEDIA_EOF_TIMEOUT));
if (ret == 0)
v4l2_warn(&priv->sd, "wait last EOF timeout\n");
}
static void csi_idmac_stop(struct csi_priv *priv)
{
devm_free_irq(priv->dev, priv->eof_irq, priv);
devm_free_irq(priv->dev, priv->nfb4eof_irq, priv);
csi_idmac_unsetup(priv, VB2_BUF_STATE_ERROR);
imx_media_free_dma_buf(priv->dev, &priv->underrun_buf);
/* cancel the EOF timeout timer */
del_timer_sync(&priv->eof_timeout_timer);
csi_idmac_put_ipu_resources(priv);
}
/* Update the CSI whole sensor and active windows */
static int csi_setup(struct csi_priv *priv)
{
struct v4l2_mbus_framefmt *infmt, *outfmt;
const struct imx_media_pixfmt *incc;
struct v4l2_mbus_framefmt if_fmt;
struct v4l2_rect crop;
infmt = &priv->format_mbus[CSI_SINK_PAD];
incc = priv->cc[CSI_SINK_PAD];
outfmt = &priv->format_mbus[priv->active_output_pad];
if_fmt = *infmt;
crop = priv->crop;
/*
* if cycles is set, we need to handle this over multiple cycles as
* generic/bayer data
*/
if (is_parallel_bus(&priv->mbus_cfg) && incc->cycles) {
if_fmt.width *= incc->cycles;
crop.width *= incc->cycles;
}
ipu_csi_set_window(priv->csi, &crop);
ipu_csi_set_downsize(priv->csi,
priv->crop.width == 2 * priv->compose.width,
priv->crop.height == 2 * priv->compose.height);
ipu_csi_init_interface(priv->csi, &priv->mbus_cfg, &if_fmt, outfmt);
ipu_csi_set_dest(priv->csi, priv->dest);
if (priv->dest == IPU_CSI_DEST_IDMAC)
ipu_csi_set_skip_smfc(priv->csi, priv->skip->skip_smfc,
priv->skip->max_ratio - 1, 0);
ipu_csi_dump(priv->csi);
return 0;
}
static int csi_start(struct csi_priv *priv)
{
struct v4l2_fract *input_fi, *output_fi;
int ret;
input_fi = &priv->frame_interval[CSI_SINK_PAD];
output_fi = &priv->frame_interval[priv->active_output_pad];
/* start upstream */
ret = v4l2_subdev_call(priv->src_sd, video, s_stream, 1);
ret = (ret && ret != -ENOIOCTLCMD) ? ret : 0;
if (ret)
return ret;
/* Skip first few frames from a BT.656 source */
if (priv->mbus_cfg.type == V4L2_MBUS_BT656) {
u32 delay_usec, bad_frames = 20;
delay_usec = DIV_ROUND_UP_ULL((u64)USEC_PER_SEC *
input_fi->numerator * bad_frames,
input_fi->denominator);
usleep_range(delay_usec, delay_usec + 1000);
}
if (priv->dest == IPU_CSI_DEST_IDMAC) {
ret = csi_idmac_start(priv);
if (ret)
goto stop_upstream;
}
ret = csi_setup(priv);
if (ret)
goto idmac_stop;
/* start the frame interval monitor */
if (priv->fim && priv->dest == IPU_CSI_DEST_IDMAC)
imx_media_fim_set_stream(priv->fim, output_fi, true);
ret = ipu_csi_enable(priv->csi);
if (ret) {
v4l2_err(&priv->sd, "CSI enable error: %d\n", ret);
goto fim_off;
}
return 0;
fim_off:
if (priv->fim && priv->dest == IPU_CSI_DEST_IDMAC)
imx_media_fim_set_stream(priv->fim, NULL, false);
idmac_stop:
if (priv->dest == IPU_CSI_DEST_IDMAC)
csi_idmac_stop(priv);
stop_upstream:
v4l2_subdev_call(priv->src_sd, video, s_stream, 0);
return ret;
}
static void csi_stop(struct csi_priv *priv)
{
if (priv->dest == IPU_CSI_DEST_IDMAC)
csi_idmac_wait_last_eof(priv);
/*
* Disable the CSI asap, after syncing with the last EOF.
* Doing so after the IDMA channel is disabled has shown to
* create hard system-wide hangs.
*/
ipu_csi_disable(priv->csi);
/* stop upstream */
v4l2_subdev_call(priv->src_sd, video, s_stream, 0);
if (priv->dest == IPU_CSI_DEST_IDMAC) {
csi_idmac_stop(priv);
/* stop the frame interval monitor */
if (priv->fim)
imx_media_fim_set_stream(priv->fim, NULL, false);
}
}
static const struct csi_skip_desc csi_skip[12] = {
{ 1, 1, 0x00 }, /* Keep all frames */
{ 5, 6, 0x10 }, /* Skip every sixth frame */
{ 4, 5, 0x08 }, /* Skip every fifth frame */
{ 3, 4, 0x04 }, /* Skip every fourth frame */
{ 2, 3, 0x02 }, /* Skip every third frame */
{ 3, 5, 0x0a }, /* Skip frames 1 and 3 of every 5 */
{ 1, 2, 0x01 }, /* Skip every second frame */
{ 2, 5, 0x0b }, /* Keep frames 1 and 4 of every 5 */
{ 1, 3, 0x03 }, /* Keep one in three frames */
{ 1, 4, 0x07 }, /* Keep one in four frames */
{ 1, 5, 0x0f }, /* Keep one in five frames */
{ 1, 6, 0x1f }, /* Keep one in six frames */
};
static void csi_apply_skip_interval(const struct csi_skip_desc *skip,
struct v4l2_fract *interval)
{
unsigned int div;
interval->numerator *= skip->max_ratio;
interval->denominator *= skip->keep;
/* Reduce fraction to lowest terms */
div = gcd(interval->numerator, interval->denominator);
if (div > 1) {
interval->numerator /= div;
interval->denominator /= div;
}
}
/*
* Find the skip pattern to produce the output frame interval closest to the
* requested one, for the given input frame interval. Updates the output frame
* interval to the exact value.
*/
static const struct csi_skip_desc *csi_find_best_skip(struct v4l2_fract *in,
struct v4l2_fract *out)
{
const struct csi_skip_desc *skip = &csi_skip[0], *best_skip = skip;
u32 min_err = UINT_MAX;
u64 want_us;
int i;
/* Default to 1:1 ratio */
if (out->numerator == 0 || out->denominator == 0 ||
in->numerator == 0 || in->denominator == 0) {
*out = *in;
return best_skip;
}
want_us = div_u64((u64)USEC_PER_SEC * out->numerator, out->denominator);
/* Find the reduction closest to the requested time per frame */
for (i = 0; i < ARRAY_SIZE(csi_skip); i++, skip++) {
u64 tmp, err;
tmp = div_u64((u64)USEC_PER_SEC * in->numerator *
skip->max_ratio, in->denominator * skip->keep);
err = abs((s64)tmp - want_us);
if (err < min_err) {
min_err = err;
best_skip = skip;
}
}
*out = *in;
csi_apply_skip_interval(best_skip, out);
return best_skip;
}
/*
* V4L2 subdev operations.
*/
static int csi_g_frame_interval(struct v4l2_subdev *sd,
struct v4l2_subdev_frame_interval *fi)
{
struct csi_priv *priv = v4l2_get_subdevdata(sd);
if (fi->pad >= CSI_NUM_PADS)
return -EINVAL;
mutex_lock(&priv->lock);
fi->interval = priv->frame_interval[fi->pad];
mutex_unlock(&priv->lock);
return 0;
}
static int csi_s_frame_interval(struct v4l2_subdev *sd,
struct v4l2_subdev_frame_interval *fi)
{
struct csi_priv *priv = v4l2_get_subdevdata(sd);
struct v4l2_fract *input_fi;
int ret = 0;
mutex_lock(&priv->lock);
input_fi = &priv->frame_interval[CSI_SINK_PAD];
switch (fi->pad) {
case CSI_SINK_PAD:
/* No limits on valid input frame intervals */
if (fi->interval.numerator == 0 ||
fi->interval.denominator == 0)
fi->interval = *input_fi;
/* Reset output intervals and frame skipping ratio to 1:1 */
priv->frame_interval[CSI_SRC_PAD_IDMAC] = fi->interval;
priv->frame_interval[CSI_SRC_PAD_DIRECT] = fi->interval;
priv->skip = &csi_skip[0];
break;
case CSI_SRC_PAD_IDMAC:
/*
* frame interval at IDMAC output pad depends on input
* interval, modified by frame skipping.
*/
priv->skip = csi_find_best_skip(input_fi, &fi->interval);
break;
case CSI_SRC_PAD_DIRECT:
/*
* frame interval at DIRECT output pad is same as input
* interval.
*/
fi->interval = *input_fi;
break;
default:
ret = -EINVAL;
goto out;
}
priv->frame_interval[fi->pad] = fi->interval;
out:
mutex_unlock(&priv->lock);
return ret;
}
static int csi_s_stream(struct v4l2_subdev *sd, int enable)
{
struct csi_priv *priv = v4l2_get_subdevdata(sd);
int ret = 0;
mutex_lock(&priv->lock);
if (!priv->src_sd || !priv->sink) {
ret = -EPIPE;
goto out;
}
/*
* enable/disable streaming only if stream_count is
* going from 0 to 1 / 1 to 0.
*/
if (priv->stream_count != !enable)
goto update_count;
if (enable) {
dev_dbg(priv->dev, "stream ON\n");
ret = csi_start(priv);
if (ret)
goto out;
} else {
dev_dbg(priv->dev, "stream OFF\n");
csi_stop(priv);
}
update_count:
priv->stream_count += enable ? 1 : -1;
if (priv->stream_count < 0)
priv->stream_count = 0;
out:
mutex_unlock(&priv->lock);
return ret;
}
static int csi_link_setup(struct media_entity *entity,
const struct media_pad *local,
const struct media_pad *remote, u32 flags)
{
struct v4l2_subdev *sd = media_entity_to_v4l2_subdev(entity);
struct csi_priv *priv = v4l2_get_subdevdata(sd);
struct v4l2_subdev *remote_sd;
int ret = 0;
dev_dbg(priv->dev, "link setup %s -> %s\n", remote->entity->name,
local->entity->name);
mutex_lock(&priv->lock);
if (local->flags & MEDIA_PAD_FL_SINK) {
if (!is_media_entity_v4l2_subdev(remote->entity)) {
ret = -EINVAL;
goto out;
}
remote_sd = media_entity_to_v4l2_subdev(remote->entity);
if (flags & MEDIA_LNK_FL_ENABLED) {
if (priv->src_sd) {
ret = -EBUSY;
goto out;
}
priv->src_sd = remote_sd;
} else {
priv->src_sd = NULL;
}
goto out;
}
/* this is a source pad */
if (flags & MEDIA_LNK_FL_ENABLED) {
if (priv->sink) {
ret = -EBUSY;
goto out;
}
} else {
v4l2_ctrl_handler_free(&priv->ctrl_hdlr);
v4l2_ctrl_handler_init(&priv->ctrl_hdlr, 0);
priv->sink = NULL;
/* do not apply IC burst alignment in csi_try_crop */
priv->active_output_pad = CSI_SRC_PAD_IDMAC;
goto out;
}
/* record which output pad is now active */
priv->active_output_pad = local->index;
/* set CSI destination */
if (local->index == CSI_SRC_PAD_IDMAC) {
if (!is_media_entity_v4l2_video_device(remote->entity)) {
ret = -EINVAL;
goto out;
}
if (priv->fim) {
ret = imx_media_fim_add_controls(priv->fim);
if (ret)
goto out;
}
priv->dest = IPU_CSI_DEST_IDMAC;
} else {
if (!is_media_entity_v4l2_subdev(remote->entity)) {
ret = -EINVAL;
goto out;
}
remote_sd = media_entity_to_v4l2_subdev(remote->entity);
switch (remote_sd->grp_id) {
case IMX_MEDIA_GRP_ID_IPU_VDIC:
priv->dest = IPU_CSI_DEST_VDIC;
break;
case IMX_MEDIA_GRP_ID_IPU_IC_PRP:
priv->dest = IPU_CSI_DEST_IC;
break;
default:
ret = -EINVAL;
goto out;
}
}
priv->sink = remote->entity;
out:
mutex_unlock(&priv->lock);
return ret;
}
static int csi_link_validate(struct v4l2_subdev *sd,
struct media_link *link,
struct v4l2_subdev_format *source_fmt,
struct v4l2_subdev_format *sink_fmt)
{
struct csi_priv *priv = v4l2_get_subdevdata(sd);
struct v4l2_mbus_config mbus_cfg = { .type = 0 };
bool is_csi2;
int ret;
ret = v4l2_subdev_link_validate_default(sd, link,
source_fmt, sink_fmt);
if (ret)
return ret;
ret = csi_get_upstream_mbus_config(priv, &mbus_cfg);
if (ret) {
v4l2_err(&priv->sd,
"failed to get upstream media bus configuration\n");
return ret;
}
mutex_lock(&priv->lock);
priv->mbus_cfg = mbus_cfg;
is_csi2 = !is_parallel_bus(&mbus_cfg);
if (is_csi2) {
/*
* NOTE! It seems the virtual channels from the mipi csi-2
* receiver are used only for routing by the video mux's,
* or for hard-wired routing to the CSI's. Once the stream
* enters the CSI's however, they are treated internally
* in the IPU as virtual channel 0.
*/
ipu_csi_set_mipi_datatype(priv->csi, 0,
&priv->format_mbus[CSI_SINK_PAD]);
}
/* select either parallel or MIPI-CSI2 as input to CSI */
ipu_set_csi_src_mux(priv->ipu, priv->csi_id, is_csi2);
mutex_unlock(&priv->lock);
return ret;
}
static struct v4l2_mbus_framefmt *
__csi_get_fmt(struct csi_priv *priv, struct v4l2_subdev_state *sd_state,
unsigned int pad, enum v4l2_subdev_format_whence which)
{
if (which == V4L2_SUBDEV_FORMAT_TRY)
return v4l2_subdev_get_try_format(&priv->sd, sd_state, pad);
else
return &priv->format_mbus[pad];
}
static struct v4l2_rect *
__csi_get_crop(struct csi_priv *priv, struct v4l2_subdev_state *sd_state,
enum v4l2_subdev_format_whence which)
{
if (which == V4L2_SUBDEV_FORMAT_TRY)
return v4l2_subdev_get_try_crop(&priv->sd, sd_state,
CSI_SINK_PAD);
else
return &priv->crop;
}
static struct v4l2_rect *
__csi_get_compose(struct csi_priv *priv, struct v4l2_subdev_state *sd_state,
enum v4l2_subdev_format_whence which)
{
if (which == V4L2_SUBDEV_FORMAT_TRY)
return v4l2_subdev_get_try_compose(&priv->sd, sd_state,
CSI_SINK_PAD);
else
return &priv->compose;
}
static void csi_try_crop(struct csi_priv *priv,
struct v4l2_rect *crop,
struct v4l2_subdev_state *sd_state,
struct v4l2_mbus_framefmt *infmt,
struct v4l2_mbus_config *mbus_cfg)
{
u32 in_height;
crop->width = min_t(__u32, infmt->width, crop->width);
if (crop->left + crop->width > infmt->width)
crop->left = infmt->width - crop->width;
/* adjust crop left/width to h/w alignment restrictions */
crop->left &= ~0x3;
if (priv->active_output_pad == CSI_SRC_PAD_DIRECT)
crop->width &= ~0x7; /* multiple of 8 pixels (IC burst) */
else
crop->width &= ~0x1; /* multiple of 2 pixels */
in_height = infmt->height;
if (infmt->field == V4L2_FIELD_ALTERNATE)
in_height *= 2;
/*
* FIXME: not sure why yet, but on interlaced bt.656,
* changing the vertical cropping causes loss of vertical
* sync, so fix it to NTSC/PAL active lines. NTSC contains
* 2 extra lines of active video that need to be cropped.
*/
if (mbus_cfg->type == V4L2_MBUS_BT656 &&
(V4L2_FIELD_HAS_BOTH(infmt->field) ||
infmt->field == V4L2_FIELD_ALTERNATE)) {
crop->height = in_height;
crop->top = (in_height == 480) ? 2 : 0;
} else {
crop->height = min_t(__u32, in_height, crop->height);
if (crop->top + crop->height > in_height)
crop->top = in_height - crop->height;
}
}
static int csi_enum_mbus_code(struct v4l2_subdev *sd,
struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_mbus_code_enum *code)
{
struct csi_priv *priv = v4l2_get_subdevdata(sd);
struct v4l2_mbus_config mbus_cfg = { .type = 0 };
const struct imx_media_pixfmt *incc;
struct v4l2_mbus_framefmt *infmt;
int ret = 0;
mutex_lock(&priv->lock);
infmt = __csi_get_fmt(priv, sd_state, CSI_SINK_PAD, code->which);
incc = imx_media_find_mbus_format(infmt->code, PIXFMT_SEL_ANY);
switch (code->pad) {
case CSI_SINK_PAD:
ret = imx_media_enum_mbus_formats(&code->code, code->index,
PIXFMT_SEL_ANY);
break;
case CSI_SRC_PAD_DIRECT:
case CSI_SRC_PAD_IDMAC:
ret = csi_get_upstream_mbus_config(priv, &mbus_cfg);
if (ret) {
v4l2_err(&priv->sd,
"failed to get upstream media bus configuration\n");
goto out;
}
if (requires_passthrough(&mbus_cfg, infmt, incc)) {
if (code->index != 0) {
ret = -EINVAL;
goto out;
}
code->code = infmt->code;
} else {
enum imx_pixfmt_sel fmt_sel =
(incc->cs == IPUV3_COLORSPACE_YUV) ?
PIXFMT_SEL_YUV : PIXFMT_SEL_RGB;
ret = imx_media_enum_ipu_formats(&code->code,
code->index,
fmt_sel);
}
break;
default:
ret = -EINVAL;
}
out:
mutex_unlock(&priv->lock);
return ret;
}
static int csi_enum_frame_size(struct v4l2_subdev *sd,
struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_frame_size_enum *fse)
{
struct csi_priv *priv = v4l2_get_subdevdata(sd);
struct v4l2_rect *crop;
int ret = 0;
if (fse->pad >= CSI_NUM_PADS ||
fse->index > (fse->pad == CSI_SINK_PAD ? 0 : 3))
return -EINVAL;
mutex_lock(&priv->lock);
if (fse->pad == CSI_SINK_PAD) {
fse->min_width = MIN_W;
fse->max_width = MAX_W;
fse->min_height = MIN_H;
fse->max_height = MAX_H;
} else {
crop = __csi_get_crop(priv, sd_state, fse->which);
fse->min_width = fse->index & 1 ?
crop->width / 2 : crop->width;
fse->max_width = fse->min_width;
fse->min_height = fse->index & 2 ?
crop->height / 2 : crop->height;
fse->max_height = fse->min_height;
}
mutex_unlock(&priv->lock);
return ret;
}
static int csi_enum_frame_interval(struct v4l2_subdev *sd,
struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_frame_interval_enum *fie)
{
struct csi_priv *priv = v4l2_get_subdevdata(sd);
struct v4l2_fract *input_fi;
struct v4l2_rect *crop;
int ret = 0;
if (fie->pad >= CSI_NUM_PADS ||
fie->index >= (fie->pad != CSI_SRC_PAD_IDMAC ?
1 : ARRAY_SIZE(csi_skip)))
return -EINVAL;
mutex_lock(&priv->lock);
input_fi = &priv->frame_interval[CSI_SINK_PAD];
crop = __csi_get_crop(priv, sd_state, fie->which);
if ((fie->width != crop->width && fie->width != crop->width / 2) ||
(fie->height != crop->height && fie->height != crop->height / 2)) {
ret = -EINVAL;
goto out;
}
fie->interval = *input_fi;
if (fie->pad == CSI_SRC_PAD_IDMAC)
csi_apply_skip_interval(&csi_skip[fie->index],
&fie->interval);
out:
mutex_unlock(&priv->lock);
return ret;
}
static int csi_get_fmt(struct v4l2_subdev *sd,
struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *sdformat)
{
struct csi_priv *priv = v4l2_get_subdevdata(sd);
struct v4l2_mbus_framefmt *fmt;
int ret = 0;
if (sdformat->pad >= CSI_NUM_PADS)
return -EINVAL;
mutex_lock(&priv->lock);
fmt = __csi_get_fmt(priv, sd_state, sdformat->pad, sdformat->which);
if (!fmt) {
ret = -EINVAL;
goto out;
}
sdformat->format = *fmt;
out:
mutex_unlock(&priv->lock);
return ret;
}
static void csi_try_field(struct csi_priv *priv,
struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *sdformat)
{
struct v4l2_mbus_framefmt *infmt =
__csi_get_fmt(priv, sd_state, CSI_SINK_PAD, sdformat->which);
/*
* no restrictions on sink pad field type except must
* be initialized.
*/
if (sdformat->pad == CSI_SINK_PAD) {
if (sdformat->format.field == V4L2_FIELD_ANY)
sdformat->format.field = V4L2_FIELD_NONE;
return;
}
switch (infmt->field) {
case V4L2_FIELD_SEQ_TB:
case V4L2_FIELD_SEQ_BT:
/*
* If the user requests sequential at the source pad,
* allow it (along with possibly inverting field order).
* Otherwise passthrough the field type.
*/
if (!V4L2_FIELD_IS_SEQUENTIAL(sdformat->format.field))
sdformat->format.field = infmt->field;
break;
case V4L2_FIELD_ALTERNATE:
/*
* This driver does not support alternate field mode, and
* the CSI captures a whole frame, so the CSI never presents
* alternate mode at its source pads. If user has not
* already requested sequential, translate ALTERNATE at
* sink pad to SEQ_TB or SEQ_BT at the source pad depending
* on input height (assume NTSC BT order if 480 total active
* frame lines, otherwise PAL TB order).
*/
if (!V4L2_FIELD_IS_SEQUENTIAL(sdformat->format.field))
sdformat->format.field = (infmt->height == 480 / 2) ?
V4L2_FIELD_SEQ_BT : V4L2_FIELD_SEQ_TB;
break;
default:
/* Passthrough for all other input field types */
sdformat->format.field = infmt->field;
break;
}
}
static void csi_try_fmt(struct csi_priv *priv,
struct v4l2_mbus_config *mbus_cfg,
struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *sdformat,
struct v4l2_rect *crop,
struct v4l2_rect *compose,
const struct imx_media_pixfmt **cc)
{
const struct imx_media_pixfmt *incc;
struct v4l2_mbus_framefmt *infmt;
u32 code;
infmt = __csi_get_fmt(priv, sd_state, CSI_SINK_PAD, sdformat->which);
switch (sdformat->pad) {
case CSI_SRC_PAD_DIRECT:
case CSI_SRC_PAD_IDMAC:
incc = imx_media_find_mbus_format(infmt->code, PIXFMT_SEL_ANY);
sdformat->format.width = compose->width;
sdformat->format.height = compose->height;
if (requires_passthrough(mbus_cfg, infmt, incc)) {
sdformat->format.code = infmt->code;
*cc = incc;
} else {
enum imx_pixfmt_sel fmt_sel =
(incc->cs == IPUV3_COLORSPACE_YUV) ?
PIXFMT_SEL_YUV : PIXFMT_SEL_RGB;
*cc = imx_media_find_ipu_format(sdformat->format.code,
fmt_sel);
if (!*cc) {
imx_media_enum_ipu_formats(&code, 0, fmt_sel);
*cc = imx_media_find_ipu_format(code, fmt_sel);
sdformat->format.code = (*cc)->codes[0];
}
}
csi_try_field(priv, sd_state, sdformat);
/* propagate colorimetry from sink */
sdformat->format.colorspace = infmt->colorspace;
sdformat->format.xfer_func = infmt->xfer_func;
sdformat->format.quantization = infmt->quantization;
sdformat->format.ycbcr_enc = infmt->ycbcr_enc;
break;
case CSI_SINK_PAD:
v4l_bound_align_image(&sdformat->format.width, MIN_W, MAX_W,
W_ALIGN, &sdformat->format.height,
MIN_H, MAX_H, H_ALIGN, S_ALIGN);
*cc = imx_media_find_mbus_format(sdformat->format.code,
PIXFMT_SEL_ANY);
if (!*cc) {
imx_media_enum_mbus_formats(&code, 0,
PIXFMT_SEL_YUV_RGB);
*cc = imx_media_find_mbus_format(code,
PIXFMT_SEL_YUV_RGB);
sdformat->format.code = (*cc)->codes[0];
}
csi_try_field(priv, sd_state, sdformat);
/* Reset crop and compose rectangles */
crop->left = 0;
crop->top = 0;
crop->width = sdformat->format.width;
crop->height = sdformat->format.height;
if (sdformat->format.field == V4L2_FIELD_ALTERNATE)
crop->height *= 2;
csi_try_crop(priv, crop, sd_state, &sdformat->format, mbus_cfg);
compose->left = 0;
compose->top = 0;
compose->width = crop->width;
compose->height = crop->height;
break;
}
imx_media_try_colorimetry(&sdformat->format,
priv->active_output_pad == CSI_SRC_PAD_DIRECT);
}
static int csi_set_fmt(struct v4l2_subdev *sd,
struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *sdformat)
{
struct csi_priv *priv = v4l2_get_subdevdata(sd);
struct v4l2_mbus_config mbus_cfg = { .type = 0 };
const struct imx_media_pixfmt *cc;
struct v4l2_mbus_framefmt *fmt;
struct v4l2_rect *crop, *compose;
int ret;
if (sdformat->pad >= CSI_NUM_PADS)
return -EINVAL;
ret = csi_get_upstream_mbus_config(priv, &mbus_cfg);
if (ret) {
v4l2_err(&priv->sd,
"failed to get upstream media bus configuration\n");
return ret;
}
mutex_lock(&priv->lock);
if (priv->stream_count > 0) {
ret = -EBUSY;
goto out;
}
crop = __csi_get_crop(priv, sd_state, sdformat->which);
compose = __csi_get_compose(priv, sd_state, sdformat->which);
csi_try_fmt(priv, &mbus_cfg, sd_state, sdformat, crop, compose, &cc);
fmt = __csi_get_fmt(priv, sd_state, sdformat->pad, sdformat->which);
*fmt = sdformat->format;
if (sdformat->pad == CSI_SINK_PAD) {
int pad;
/* propagate format to source pads */
for (pad = CSI_SINK_PAD + 1; pad < CSI_NUM_PADS; pad++) {
const struct imx_media_pixfmt *outcc;
struct v4l2_mbus_framefmt *outfmt;
struct v4l2_subdev_format format;
format.pad = pad;
format.which = sdformat->which;
format.format = sdformat->format;
csi_try_fmt(priv, &mbus_cfg, sd_state, &format, NULL,
compose, &outcc);
outfmt = __csi_get_fmt(priv, sd_state, pad,
sdformat->which);
*outfmt = format.format;
if (sdformat->which == V4L2_SUBDEV_FORMAT_ACTIVE)
priv->cc[pad] = outcc;
}
}
if (sdformat->which == V4L2_SUBDEV_FORMAT_ACTIVE)
priv->cc[sdformat->pad] = cc;
out:
mutex_unlock(&priv->lock);
return ret;
}
static int csi_get_selection(struct v4l2_subdev *sd,
struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_selection *sel)
{
struct csi_priv *priv = v4l2_get_subdevdata(sd);
struct v4l2_mbus_framefmt *infmt;
struct v4l2_rect *crop, *compose;
int ret = 0;
if (sel->pad != CSI_SINK_PAD)
return -EINVAL;
mutex_lock(&priv->lock);
infmt = __csi_get_fmt(priv, sd_state, CSI_SINK_PAD, sel->which);
crop = __csi_get_crop(priv, sd_state, sel->which);
compose = __csi_get_compose(priv, sd_state, sel->which);
switch (sel->target) {
case V4L2_SEL_TGT_CROP_BOUNDS:
sel->r.left = 0;
sel->r.top = 0;
sel->r.width = infmt->width;
sel->r.height = infmt->height;
if (infmt->field == V4L2_FIELD_ALTERNATE)
sel->r.height *= 2;
break;
case V4L2_SEL_TGT_CROP:
sel->r = *crop;
break;
case V4L2_SEL_TGT_COMPOSE_BOUNDS:
sel->r.left = 0;
sel->r.top = 0;
sel->r.width = crop->width;
sel->r.height = crop->height;
break;
case V4L2_SEL_TGT_COMPOSE:
sel->r = *compose;
break;
default:
ret = -EINVAL;
}
mutex_unlock(&priv->lock);
return ret;
}
static int csi_set_scale(u32 *compose, u32 crop, u32 flags)
{
if ((flags & (V4L2_SEL_FLAG_LE | V4L2_SEL_FLAG_GE)) ==
(V4L2_SEL_FLAG_LE | V4L2_SEL_FLAG_GE) &&
*compose != crop && *compose != crop / 2)
return -ERANGE;
if (*compose <= crop / 2 ||
(*compose < crop * 3 / 4 && !(flags & V4L2_SEL_FLAG_GE)) ||
(*compose < crop && (flags & V4L2_SEL_FLAG_LE)))
*compose = crop / 2;
else
*compose = crop;
return 0;
}
static int csi_set_selection(struct v4l2_subdev *sd,
struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_selection *sel)
{
struct csi_priv *priv = v4l2_get_subdevdata(sd);
struct v4l2_mbus_config mbus_cfg = { .type = 0 };
struct v4l2_mbus_framefmt *infmt;
struct v4l2_rect *crop, *compose;
int pad, ret;
if (sel->pad != CSI_SINK_PAD)
return -EINVAL;
ret = csi_get_upstream_mbus_config(priv, &mbus_cfg);
if (ret) {
v4l2_err(&priv->sd,
"failed to get upstream media bus configuration\n");
return ret;
}
mutex_lock(&priv->lock);
if (priv->stream_count > 0) {
ret = -EBUSY;
goto out;
}
infmt = __csi_get_fmt(priv, sd_state, CSI_SINK_PAD, sel->which);
crop = __csi_get_crop(priv, sd_state, sel->which);
compose = __csi_get_compose(priv, sd_state, sel->which);
switch (sel->target) {
case V4L2_SEL_TGT_CROP:
/*
* Modifying the crop rectangle always changes the format on
* the source pads. If the KEEP_CONFIG flag is set, just return
* the current crop rectangle.
*/
if (sel->flags & V4L2_SEL_FLAG_KEEP_CONFIG) {
sel->r = priv->crop;
if (sel->which == V4L2_SUBDEV_FORMAT_TRY)
*crop = sel->r;
goto out;
}
csi_try_crop(priv, &sel->r, sd_state, infmt, &mbus_cfg);
*crop = sel->r;
/* Reset scaling to 1:1 */
compose->width = crop->width;
compose->height = crop->height;
break;
case V4L2_SEL_TGT_COMPOSE:
/*
* Modifying the compose rectangle always changes the format on
* the source pads. If the KEEP_CONFIG flag is set, just return
* the current compose rectangle.
*/
if (sel->flags & V4L2_SEL_FLAG_KEEP_CONFIG) {
sel->r = priv->compose;
if (sel->which == V4L2_SUBDEV_FORMAT_TRY)
*compose = sel->r;
goto out;
}
sel->r.left = 0;
sel->r.top = 0;
ret = csi_set_scale(&sel->r.width, crop->width, sel->flags);
if (ret)
goto out;
ret = csi_set_scale(&sel->r.height, crop->height, sel->flags);
if (ret)
goto out;
*compose = sel->r;
break;
default:
ret = -EINVAL;
goto out;
}
/* Reset source pads to sink compose rectangle */
for (pad = CSI_SINK_PAD + 1; pad < CSI_NUM_PADS; pad++) {
struct v4l2_mbus_framefmt *outfmt;
outfmt = __csi_get_fmt(priv, sd_state, pad, sel->which);
outfmt->width = compose->width;
outfmt->height = compose->height;
}
out:
mutex_unlock(&priv->lock);
return ret;
}
static int csi_subscribe_event(struct v4l2_subdev *sd, struct v4l2_fh *fh,
struct v4l2_event_subscription *sub)
{
if (sub->type != V4L2_EVENT_IMX_FRAME_INTERVAL_ERROR)
return -EINVAL;
if (sub->id != 0)
return -EINVAL;
return v4l2_event_subscribe(fh, sub, 0, NULL);
}
static int csi_unsubscribe_event(struct v4l2_subdev *sd, struct v4l2_fh *fh,
struct v4l2_event_subscription *sub)
{
return v4l2_event_unsubscribe(fh, sub);
}
static int csi_registered(struct v4l2_subdev *sd)
{
struct csi_priv *priv = v4l2_get_subdevdata(sd);
struct ipu_csi *csi;
int i, ret;
u32 code;
/* get handle to IPU CSI */
csi = ipu_csi_get(priv->ipu, priv->csi_id);
if (IS_ERR(csi)) {
v4l2_err(&priv->sd, "failed to get CSI%d\n", priv->csi_id);
return PTR_ERR(csi);
}
priv->csi = csi;
for (i = 0; i < CSI_NUM_PADS; i++) {
code = 0;
if (i != CSI_SINK_PAD)
imx_media_enum_ipu_formats(&code, 0, PIXFMT_SEL_YUV);
/* set a default mbus format */
ret = imx_media_init_mbus_fmt(&priv->format_mbus[i],
IMX_MEDIA_DEF_PIX_WIDTH,
IMX_MEDIA_DEF_PIX_HEIGHT, code,
V4L2_FIELD_NONE, &priv->cc[i]);
if (ret)
goto put_csi;
/* init default frame interval */
priv->frame_interval[i].numerator = 1;
priv->frame_interval[i].denominator = 30;
}
/* disable frame skipping */
priv->skip = &csi_skip[0];
/* init default crop and compose rectangle sizes */
priv->crop.width = IMX_MEDIA_DEF_PIX_WIDTH;
priv->crop.height = IMX_MEDIA_DEF_PIX_HEIGHT;
priv->compose.width = IMX_MEDIA_DEF_PIX_WIDTH;
priv->compose.height = IMX_MEDIA_DEF_PIX_HEIGHT;
priv->fim = imx_media_fim_init(&priv->sd);
if (IS_ERR(priv->fim)) {
ret = PTR_ERR(priv->fim);
goto put_csi;
}
priv->vdev = imx_media_capture_device_init(priv->sd.dev, &priv->sd,
CSI_SRC_PAD_IDMAC, true);
if (IS_ERR(priv->vdev)) {
ret = PTR_ERR(priv->vdev);
goto free_fim;
}
ret = imx_media_capture_device_register(priv->vdev, 0);
if (ret)
goto remove_vdev;
return 0;
remove_vdev:
imx_media_capture_device_remove(priv->vdev);
free_fim:
if (priv->fim)
imx_media_fim_free(priv->fim);
put_csi:
ipu_csi_put(priv->csi);
return ret;
}
static void csi_unregistered(struct v4l2_subdev *sd)
{
struct csi_priv *priv = v4l2_get_subdevdata(sd);
imx_media_capture_device_unregister(priv->vdev);
imx_media_capture_device_remove(priv->vdev);
if (priv->fim)
imx_media_fim_free(priv->fim);
if (priv->csi)
ipu_csi_put(priv->csi);
}
/*
* The CSI has only one fwnode endpoint, at the sink pad. Verify the
* endpoint belongs to us, and return CSI_SINK_PAD.
*/
static int csi_get_fwnode_pad(struct media_entity *entity,
struct fwnode_endpoint *endpoint)
{
struct v4l2_subdev *sd = media_entity_to_v4l2_subdev(entity);
struct csi_priv *priv = v4l2_get_subdevdata(sd);
struct fwnode_handle *csi_port = dev_fwnode(priv->dev);
struct fwnode_handle *csi_ep;
int ret;
csi_ep = fwnode_get_next_child_node(csi_port, NULL);
ret = endpoint->local_fwnode == csi_ep ? CSI_SINK_PAD : -ENXIO;
fwnode_handle_put(csi_ep);
return ret;
}
static const struct media_entity_operations csi_entity_ops = {
.link_setup = csi_link_setup,
.link_validate = v4l2_subdev_link_validate,
.get_fwnode_pad = csi_get_fwnode_pad,
};
static const struct v4l2_subdev_core_ops csi_core_ops = {
.subscribe_event = csi_subscribe_event,
.unsubscribe_event = csi_unsubscribe_event,
};
static const struct v4l2_subdev_video_ops csi_video_ops = {
.g_frame_interval = csi_g_frame_interval,
.s_frame_interval = csi_s_frame_interval,
.s_stream = csi_s_stream,
};
static const struct v4l2_subdev_pad_ops csi_pad_ops = {
.init_cfg = imx_media_init_cfg,
.enum_mbus_code = csi_enum_mbus_code,
.enum_frame_size = csi_enum_frame_size,
.enum_frame_interval = csi_enum_frame_interval,
.get_fmt = csi_get_fmt,
.set_fmt = csi_set_fmt,
.get_selection = csi_get_selection,
.set_selection = csi_set_selection,
.link_validate = csi_link_validate,
};
static const struct v4l2_subdev_ops csi_subdev_ops = {
.core = &csi_core_ops,
.video = &csi_video_ops,
.pad = &csi_pad_ops,
};
static const struct v4l2_subdev_internal_ops csi_internal_ops = {
.registered = csi_registered,
.unregistered = csi_unregistered,
};
static int imx_csi_notify_bound(struct v4l2_async_notifier *notifier,
struct v4l2_subdev *sd,
struct v4l2_async_connection *asd)
{
struct csi_priv *priv = notifier_to_dev(notifier);
struct media_pad *sink = &priv->sd.entity.pads[CSI_SINK_PAD];
/*
* If the subdev is a video mux, it must be one of the CSI
* muxes. Mark it as such via its group id.
*/
if (sd->entity.function == MEDIA_ENT_F_VID_MUX)
sd->grp_id = IMX_MEDIA_GRP_ID_CSI_MUX;
return v4l2_create_fwnode_links_to_pad(sd, sink, 0);
}
static const struct v4l2_async_notifier_operations csi_notify_ops = {
.bound = imx_csi_notify_bound,
};
static int imx_csi_async_register(struct csi_priv *priv)
{
struct v4l2_async_connection *asd = NULL;
struct fwnode_handle *ep;
unsigned int port;
int ret;
v4l2_async_subdev_nf_init(&priv->notifier, &priv->sd);
/* get this CSI's port id */
ret = fwnode_property_read_u32(dev_fwnode(priv->dev), "reg", &port);
if (ret < 0)
return ret;
ep = fwnode_graph_get_endpoint_by_id(dev_fwnode(priv->dev->parent),
port, 0,
FWNODE_GRAPH_ENDPOINT_NEXT);
if (ep) {
asd = v4l2_async_nf_add_fwnode_remote(&priv->notifier, ep,
struct v4l2_async_connection);
fwnode_handle_put(ep);
if (IS_ERR(asd)) {
ret = PTR_ERR(asd);
/* OK if asd already exists */
if (ret != -EEXIST)
return ret;
}
}
priv->notifier.ops = &csi_notify_ops;
ret = v4l2_async_nf_register(&priv->notifier);
if (ret)
return ret;
return v4l2_async_register_subdev(&priv->sd);
}
static int imx_csi_probe(struct platform_device *pdev)
{
struct ipu_client_platformdata *pdata;
struct pinctrl *pinctrl;
struct csi_priv *priv;
int i, ret;
priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
platform_set_drvdata(pdev, &priv->sd);
priv->dev = &pdev->dev;
ret = dma_set_coherent_mask(priv->dev, DMA_BIT_MASK(32));
if (ret)
return ret;
/* get parent IPU */
priv->ipu = dev_get_drvdata(priv->dev->parent);
/* get our CSI id */
pdata = priv->dev->platform_data;
priv->csi_id = pdata->csi;
priv->smfc_id = (priv->csi_id == 0) ? 0 : 2;
priv->active_output_pad = CSI_SRC_PAD_IDMAC;
timer_setup(&priv->eof_timeout_timer, csi_idmac_eof_timeout, 0);
spin_lock_init(&priv->irqlock);
v4l2_subdev_init(&priv->sd, &csi_subdev_ops);
v4l2_set_subdevdata(&priv->sd, priv);
priv->sd.internal_ops = &csi_internal_ops;
priv->sd.entity.ops = &csi_entity_ops;
priv->sd.entity.function = MEDIA_ENT_F_PROC_VIDEO_PIXEL_FORMATTER;
priv->sd.dev = &pdev->dev;
priv->sd.fwnode = of_fwnode_handle(pdata->of_node);
priv->sd.owner = THIS_MODULE;
priv->sd.flags = V4L2_SUBDEV_FL_HAS_DEVNODE | V4L2_SUBDEV_FL_HAS_EVENTS;
priv->sd.grp_id = priv->csi_id ?
IMX_MEDIA_GRP_ID_IPU_CSI1 : IMX_MEDIA_GRP_ID_IPU_CSI0;
imx_media_grp_id_to_sd_name(priv->sd.name, sizeof(priv->sd.name),
priv->sd.grp_id, ipu_get_num(priv->ipu));
for (i = 0; i < CSI_NUM_PADS; i++)
priv->pad[i].flags = (i == CSI_SINK_PAD) ?
MEDIA_PAD_FL_SINK : MEDIA_PAD_FL_SOURCE;
ret = media_entity_pads_init(&priv->sd.entity, CSI_NUM_PADS,
priv->pad);
if (ret)
return ret;
mutex_init(&priv->lock);
v4l2_ctrl_handler_init(&priv->ctrl_hdlr, 0);
priv->sd.ctrl_handler = &priv->ctrl_hdlr;
/*
* The IPUv3 driver did not assign an of_node to this
* device. As a result, pinctrl does not automatically
* configure our pin groups, so we need to do that manually
* here, after setting this device's of_node.
*/
priv->dev->of_node = pdata->of_node;
pinctrl = devm_pinctrl_get_select_default(priv->dev);
if (IS_ERR(pinctrl)) {
ret = PTR_ERR(pinctrl);
dev_dbg(priv->dev,
"devm_pinctrl_get_select_default() failed: %d\n", ret);
if (ret != -ENODEV)
goto free;
}
ret = imx_csi_async_register(priv);
if (ret)
goto cleanup;
return 0;
cleanup:
v4l2_async_nf_unregister(&priv->notifier);
v4l2_async_nf_cleanup(&priv->notifier);
free:
v4l2_ctrl_handler_free(&priv->ctrl_hdlr);
mutex_destroy(&priv->lock);
return ret;
}
static void imx_csi_remove(struct platform_device *pdev)
{
struct v4l2_subdev *sd = platform_get_drvdata(pdev);
struct csi_priv *priv = sd_to_dev(sd);
v4l2_ctrl_handler_free(&priv->ctrl_hdlr);
mutex_destroy(&priv->lock);
v4l2_async_nf_unregister(&priv->notifier);
v4l2_async_nf_cleanup(&priv->notifier);
v4l2_async_unregister_subdev(sd);
media_entity_cleanup(&sd->entity);
}
static const struct platform_device_id imx_csi_ids[] = {
{ .name = "imx-ipuv3-csi" },
{ },
};
MODULE_DEVICE_TABLE(platform, imx_csi_ids);
static struct platform_driver imx_csi_driver = {
.probe = imx_csi_probe,
.remove_new = imx_csi_remove,
.id_table = imx_csi_ids,
.driver = {
.name = "imx-ipuv3-csi",
},
};
module_platform_driver(imx_csi_driver);
MODULE_DESCRIPTION("i.MX CSI subdev driver");
MODULE_AUTHOR("Steve Longerbeam <[email protected]>");
MODULE_LICENSE("GPL");
| linux-master | drivers/staging/media/imx/imx-media-csi.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* V4L2 Media Controller Driver for Freescale i.MX5/6 SOC
*
* Copyright (c) 2016-2019 Mentor Graphics Inc.
*/
#include <linux/fs.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <media/v4l2-async.h>
#include <media/v4l2-event.h>
#include <media/imx.h>
#include "imx-media.h"
static inline struct imx_media_dev *notifier2dev(struct v4l2_async_notifier *n)
{
return container_of(n, struct imx_media_dev, notifier);
}
/* async subdev bound notifier */
static int imx_media_subdev_bound(struct v4l2_async_notifier *notifier,
struct v4l2_subdev *sd,
struct v4l2_async_connection *asd)
{
struct imx_media_dev *imxmd = notifier2dev(notifier);
int ret;
if (sd->grp_id & IMX_MEDIA_GRP_ID_IPU_CSI) {
/* register the IPU internal subdevs */
ret = imx_media_register_ipu_internal_subdevs(imxmd, sd);
if (ret)
return ret;
}
dev_dbg(imxmd->md.dev, "subdev %s bound\n", sd->name);
return 0;
}
/* async subdev complete notifier */
static int imx6_media_probe_complete(struct v4l2_async_notifier *notifier)
{
struct imx_media_dev *imxmd = notifier2dev(notifier);
int ret;
/* call the imx5/6/7 common probe completion handler */
ret = imx_media_probe_complete(notifier);
if (ret)
return ret;
mutex_lock(&imxmd->mutex);
imxmd->m2m_vdev = imx_media_csc_scaler_device_init(imxmd);
if (IS_ERR(imxmd->m2m_vdev)) {
ret = PTR_ERR(imxmd->m2m_vdev);
imxmd->m2m_vdev = NULL;
goto unlock;
}
ret = imx_media_csc_scaler_device_register(imxmd->m2m_vdev);
unlock:
mutex_unlock(&imxmd->mutex);
return ret;
}
/* async subdev complete notifier */
static const struct v4l2_async_notifier_operations imx_media_notifier_ops = {
.bound = imx_media_subdev_bound,
.complete = imx6_media_probe_complete,
};
static int imx_media_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct device_node *node = dev->of_node;
struct imx_media_dev *imxmd;
int ret;
imxmd = imx_media_dev_init(dev, NULL);
if (IS_ERR(imxmd))
return PTR_ERR(imxmd);
ret = imx_media_add_of_subdevs(imxmd, node);
if (ret) {
v4l2_err(&imxmd->v4l2_dev,
"add_of_subdevs failed with %d\n", ret);
goto cleanup;
}
ret = imx_media_dev_notifier_register(imxmd, &imx_media_notifier_ops);
if (ret)
goto cleanup;
return 0;
cleanup:
v4l2_async_nf_cleanup(&imxmd->notifier);
v4l2_device_unregister(&imxmd->v4l2_dev);
media_device_cleanup(&imxmd->md);
return ret;
}
static void imx_media_remove(struct platform_device *pdev)
{
struct imx_media_dev *imxmd =
(struct imx_media_dev *)platform_get_drvdata(pdev);
v4l2_info(&imxmd->v4l2_dev, "Removing imx-media\n");
if (imxmd->m2m_vdev) {
imx_media_csc_scaler_device_unregister(imxmd->m2m_vdev);
imxmd->m2m_vdev = NULL;
}
v4l2_async_nf_unregister(&imxmd->notifier);
imx_media_unregister_ipu_internal_subdevs(imxmd);
v4l2_async_nf_cleanup(&imxmd->notifier);
media_device_unregister(&imxmd->md);
v4l2_device_unregister(&imxmd->v4l2_dev);
media_device_cleanup(&imxmd->md);
}
static const struct of_device_id imx_media_dt_ids[] = {
{ .compatible = "fsl,imx-capture-subsystem" },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, imx_media_dt_ids);
static struct platform_driver imx_media_pdrv = {
.probe = imx_media_probe,
.remove_new = imx_media_remove,
.driver = {
.name = "imx-media",
.of_match_table = imx_media_dt_ids,
},
};
module_platform_driver(imx_media_pdrv);
MODULE_DESCRIPTION("i.MX5/6 v4l2 media controller driver");
MODULE_AUTHOR("Steve Longerbeam <[email protected]>");
MODULE_LICENSE("GPL");
| linux-master | drivers/staging/media/imx/imx-media-dev.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Media driver for Freescale i.MX5/6 SOC
*
* Open Firmware parsing.
*
* Copyright (c) 2016 Mentor Graphics Inc.
*/
#include <linux/of_platform.h>
#include <media/v4l2-ctrls.h>
#include <media/v4l2-device.h>
#include <media/v4l2-fwnode.h>
#include <media/v4l2-subdev.h>
#include <media/videobuf2-dma-contig.h>
#include <linux/of_graph.h>
#include <video/imx-ipu-v3.h>
#include "imx-media.h"
static int imx_media_of_add_csi(struct imx_media_dev *imxmd,
struct device_node *csi_np)
{
struct v4l2_async_connection *asd;
int ret = 0;
if (!of_device_is_available(csi_np)) {
dev_dbg(imxmd->md.dev, "%s: %pOFn not enabled\n", __func__,
csi_np);
return -ENODEV;
}
/* add CSI fwnode to async notifier */
asd = v4l2_async_nf_add_fwnode(&imxmd->notifier,
of_fwnode_handle(csi_np),
struct v4l2_async_connection);
if (IS_ERR(asd)) {
ret = PTR_ERR(asd);
if (ret == -EEXIST)
dev_dbg(imxmd->md.dev, "%s: already added %pOFn\n",
__func__, csi_np);
}
return ret;
}
int imx_media_add_of_subdevs(struct imx_media_dev *imxmd,
struct device_node *np)
{
struct device_node *csi_np;
int i, ret;
for (i = 0; ; i++) {
csi_np = of_parse_phandle(np, "ports", i);
if (!csi_np)
break;
ret = imx_media_of_add_csi(imxmd, csi_np);
if (ret) {
/* unavailable or already added is not an error */
if (ret == -ENODEV || ret == -EEXIST) {
of_node_put(csi_np);
continue;
}
/* other error, can't continue */
goto err_out;
}
}
return 0;
err_out:
of_node_put(csi_np);
return ret;
}
EXPORT_SYMBOL_GPL(imx_media_add_of_subdevs);
| linux-master | drivers/staging/media/imx/imx-media-of.c |
// SPDX-License-Identifier: GPL-2.0
/*
* V4L2 Media Controller Driver for Freescale common i.MX5/6/7 SOC
*
* Copyright (c) 2019 Linaro Ltd
* Copyright (c) 2016 Mentor Graphics Inc.
*/
#include <media/v4l2-ctrls.h>
#include <media/v4l2-event.h>
#include <media/v4l2-ioctl.h>
#include <media/v4l2-mc.h>
#include "imx-media.h"
static inline struct imx_media_dev *notifier2dev(struct v4l2_async_notifier *n)
{
return container_of(n, struct imx_media_dev, notifier);
}
/*
* Create the missing media links from the CSI-2 receiver.
* Called after all async subdevs have bound.
*/
static void imx_media_create_csi2_links(struct imx_media_dev *imxmd)
{
struct v4l2_subdev *sd, *csi2 = NULL;
list_for_each_entry(sd, &imxmd->v4l2_dev.subdevs, list) {
if (sd->grp_id == IMX_MEDIA_GRP_ID_CSI2) {
csi2 = sd;
break;
}
}
if (!csi2)
return;
list_for_each_entry(sd, &imxmd->v4l2_dev.subdevs, list) {
/* skip if not a CSI or a CSI mux */
if (!(sd->grp_id & IMX_MEDIA_GRP_ID_IPU_CSI) &&
!(sd->grp_id & IMX_MEDIA_GRP_ID_CSI_MUX))
continue;
v4l2_create_fwnode_links(csi2, sd);
}
}
/*
* adds given video device to given imx-media source pad vdev list.
* Continues upstream from the pad entity's sink pads.
*/
static int imx_media_add_vdev_to_pad(struct imx_media_dev *imxmd,
struct imx_media_video_dev *vdev,
struct media_pad *srcpad)
{
struct media_entity *entity = srcpad->entity;
struct imx_media_pad_vdev *pad_vdev;
struct list_head *pad_vdev_list;
struct media_link *link;
struct v4l2_subdev *sd;
int i, ret;
/* skip this entity if not a v4l2_subdev */
if (!is_media_entity_v4l2_subdev(entity))
return 0;
sd = media_entity_to_v4l2_subdev(entity);
pad_vdev_list = to_pad_vdev_list(sd, srcpad->index);
if (!pad_vdev_list) {
v4l2_warn(&imxmd->v4l2_dev, "%s:%u has no vdev list!\n",
entity->name, srcpad->index);
/*
* shouldn't happen, but no reason to fail driver load,
* just skip this entity.
*/
return 0;
}
/* just return if we've been here before */
list_for_each_entry(pad_vdev, pad_vdev_list, list) {
if (pad_vdev->vdev == vdev)
return 0;
}
dev_dbg(imxmd->md.dev, "adding %s to pad %s:%u\n",
vdev->vfd->entity.name, entity->name, srcpad->index);
pad_vdev = devm_kzalloc(imxmd->md.dev, sizeof(*pad_vdev), GFP_KERNEL);
if (!pad_vdev)
return -ENOMEM;
/* attach this vdev to this pad */
pad_vdev->vdev = vdev;
list_add_tail(&pad_vdev->list, pad_vdev_list);
/* move upstream from this entity's sink pads */
for (i = 0; i < entity->num_pads; i++) {
struct media_pad *pad = &entity->pads[i];
if (!(pad->flags & MEDIA_PAD_FL_SINK))
continue;
list_for_each_entry(link, &entity->links, list) {
if (link->sink != pad)
continue;
ret = imx_media_add_vdev_to_pad(imxmd, vdev,
link->source);
if (ret)
return ret;
}
}
return 0;
}
/*
* For every subdevice, allocate an array of list_head's, one list_head
* for each pad, to hold the list of video devices reachable from that
* pad.
*/
static int imx_media_alloc_pad_vdev_lists(struct imx_media_dev *imxmd)
{
struct list_head *vdev_lists;
struct media_entity *entity;
struct v4l2_subdev *sd;
int i;
list_for_each_entry(sd, &imxmd->v4l2_dev.subdevs, list) {
entity = &sd->entity;
vdev_lists = devm_kcalloc(imxmd->md.dev,
entity->num_pads, sizeof(*vdev_lists),
GFP_KERNEL);
if (!vdev_lists)
return -ENOMEM;
/* attach to the subdev's host private pointer */
sd->host_priv = vdev_lists;
for (i = 0; i < entity->num_pads; i++)
INIT_LIST_HEAD(to_pad_vdev_list(sd, i));
}
return 0;
}
/* form the vdev lists in all imx-media source pads */
static int imx_media_create_pad_vdev_lists(struct imx_media_dev *imxmd)
{
struct imx_media_video_dev *vdev;
struct media_link *link;
int ret;
ret = imx_media_alloc_pad_vdev_lists(imxmd);
if (ret)
return ret;
list_for_each_entry(vdev, &imxmd->vdev_list, list) {
link = list_first_entry(&vdev->vfd->entity.links,
struct media_link, list);
ret = imx_media_add_vdev_to_pad(imxmd, vdev, link->source);
if (ret)
return ret;
}
return 0;
}
/* async subdev complete notifier */
int imx_media_probe_complete(struct v4l2_async_notifier *notifier)
{
struct imx_media_dev *imxmd = notifier2dev(notifier);
int ret;
mutex_lock(&imxmd->mutex);
imx_media_create_csi2_links(imxmd);
ret = imx_media_create_pad_vdev_lists(imxmd);
if (ret)
goto unlock;
ret = v4l2_device_register_subdev_nodes(&imxmd->v4l2_dev);
unlock:
mutex_unlock(&imxmd->mutex);
if (ret)
return ret;
return media_device_register(&imxmd->md);
}
EXPORT_SYMBOL_GPL(imx_media_probe_complete);
/*
* adds controls to a video device from an entity subdevice.
* Continues upstream from the entity's sink pads.
*/
static int imx_media_inherit_controls(struct imx_media_dev *imxmd,
struct video_device *vfd,
struct media_entity *entity)
{
int i, ret = 0;
if (is_media_entity_v4l2_subdev(entity)) {
struct v4l2_subdev *sd = media_entity_to_v4l2_subdev(entity);
dev_dbg(imxmd->md.dev,
"adding controls to %s from %s\n",
vfd->entity.name, sd->entity.name);
ret = v4l2_ctrl_add_handler(vfd->ctrl_handler,
sd->ctrl_handler,
NULL, true);
if (ret)
return ret;
}
/* move upstream */
for (i = 0; i < entity->num_pads; i++) {
struct media_pad *pad, *spad = &entity->pads[i];
if (!(spad->flags & MEDIA_PAD_FL_SINK))
continue;
pad = media_pad_remote_pad_first(spad);
if (!pad || !is_media_entity_v4l2_subdev(pad->entity))
continue;
ret = imx_media_inherit_controls(imxmd, vfd, pad->entity);
if (ret)
break;
}
return ret;
}
static int imx_media_link_notify(struct media_link *link, u32 flags,
unsigned int notification)
{
struct imx_media_dev *imxmd = container_of(link->graph_obj.mdev,
struct imx_media_dev, md);
struct media_entity *source = link->source->entity;
struct imx_media_pad_vdev *pad_vdev;
struct list_head *pad_vdev_list;
struct video_device *vfd;
struct v4l2_subdev *sd;
int pad_idx, ret;
ret = v4l2_pipeline_link_notify(link, flags, notification);
if (ret)
return ret;
/* don't bother if source is not a subdev */
if (!is_media_entity_v4l2_subdev(source))
return 0;
sd = media_entity_to_v4l2_subdev(source);
pad_idx = link->source->index;
pad_vdev_list = to_pad_vdev_list(sd, pad_idx);
if (!pad_vdev_list) {
/* nothing to do if source sd has no pad vdev list */
return 0;
}
/*
* Before disabling a link, reset controls for all video
* devices reachable from this link.
*
* After enabling a link, refresh controls for all video
* devices reachable from this link.
*/
if (notification == MEDIA_DEV_NOTIFY_PRE_LINK_CH &&
!(flags & MEDIA_LNK_FL_ENABLED)) {
list_for_each_entry(pad_vdev, pad_vdev_list, list) {
vfd = pad_vdev->vdev->vfd;
if (!vfd->ctrl_handler)
continue;
dev_dbg(imxmd->md.dev,
"reset controls for %s\n",
vfd->entity.name);
v4l2_ctrl_handler_free(vfd->ctrl_handler);
v4l2_ctrl_handler_init(vfd->ctrl_handler, 0);
}
} else if (notification == MEDIA_DEV_NOTIFY_POST_LINK_CH &&
(link->flags & MEDIA_LNK_FL_ENABLED)) {
list_for_each_entry(pad_vdev, pad_vdev_list, list) {
vfd = pad_vdev->vdev->vfd;
if (!vfd->ctrl_handler)
continue;
dev_dbg(imxmd->md.dev,
"refresh controls for %s\n",
vfd->entity.name);
ret = imx_media_inherit_controls(imxmd, vfd,
&vfd->entity);
if (ret)
break;
}
}
return ret;
}
static void imx_media_notify(struct v4l2_subdev *sd, unsigned int notification,
void *arg)
{
struct media_entity *entity = &sd->entity;
int i;
if (notification != V4L2_DEVICE_NOTIFY_EVENT)
return;
for (i = 0; i < entity->num_pads; i++) {
struct media_pad *pad = &entity->pads[i];
struct imx_media_pad_vdev *pad_vdev;
struct list_head *pad_vdev_list;
pad_vdev_list = to_pad_vdev_list(sd, pad->index);
if (!pad_vdev_list)
continue;
list_for_each_entry(pad_vdev, pad_vdev_list, list)
v4l2_event_queue(pad_vdev->vdev->vfd, arg);
}
}
static const struct v4l2_async_notifier_operations imx_media_notifier_ops = {
.complete = imx_media_probe_complete,
};
static const struct media_device_ops imx_media_md_ops = {
.link_notify = imx_media_link_notify,
};
struct imx_media_dev *imx_media_dev_init(struct device *dev,
const struct media_device_ops *ops)
{
struct imx_media_dev *imxmd;
int ret;
imxmd = devm_kzalloc(dev, sizeof(*imxmd), GFP_KERNEL);
if (!imxmd)
return ERR_PTR(-ENOMEM);
dev_set_drvdata(dev, imxmd);
strscpy(imxmd->md.model, "imx-media", sizeof(imxmd->md.model));
imxmd->md.ops = ops ? ops : &imx_media_md_ops;
imxmd->md.dev = dev;
mutex_init(&imxmd->mutex);
imxmd->v4l2_dev.mdev = &imxmd->md;
imxmd->v4l2_dev.notify = imx_media_notify;
strscpy(imxmd->v4l2_dev.name, "imx-media",
sizeof(imxmd->v4l2_dev.name));
snprintf(imxmd->md.bus_info, sizeof(imxmd->md.bus_info),
"platform:%s", dev_name(imxmd->md.dev));
media_device_init(&imxmd->md);
ret = v4l2_device_register(dev, &imxmd->v4l2_dev);
if (ret < 0) {
v4l2_err(&imxmd->v4l2_dev,
"Failed to register v4l2_device: %d\n", ret);
goto cleanup;
}
INIT_LIST_HEAD(&imxmd->vdev_list);
v4l2_async_nf_init(&imxmd->notifier, &imxmd->v4l2_dev);
return imxmd;
cleanup:
media_device_cleanup(&imxmd->md);
return ERR_PTR(ret);
}
EXPORT_SYMBOL_GPL(imx_media_dev_init);
int imx_media_dev_notifier_register(struct imx_media_dev *imxmd,
const struct v4l2_async_notifier_operations *ops)
{
int ret;
/* no subdevs? just bail */
if (list_empty(&imxmd->notifier.waiting_list)) {
v4l2_err(&imxmd->v4l2_dev, "no subdevs\n");
return -ENODEV;
}
/* prepare the async subdev notifier and register it */
imxmd->notifier.ops = ops ? ops : &imx_media_notifier_ops;
ret = v4l2_async_nf_register(&imxmd->notifier);
if (ret) {
v4l2_err(&imxmd->v4l2_dev,
"v4l2_async_nf_register failed with %d\n", ret);
return ret;
}
return 0;
}
EXPORT_SYMBOL_GPL(imx_media_dev_notifier_register);
| linux-master | drivers/staging/media/imx/imx-media-dev-common.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* V4L2 Capture IC Preprocess Subdev for Freescale i.MX5/6 SOC
*
* This subdevice handles capture of video frames from the CSI or VDIC,
* which are routed directly to the Image Converter preprocess tasks,
* for resizing, colorspace conversion, and rotation.
*
* Copyright (c) 2012-2017 Mentor Graphics Inc.
*/
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/timer.h>
#include <media/v4l2-ctrls.h>
#include <media/v4l2-device.h>
#include <media/v4l2-ioctl.h>
#include <media/v4l2-mc.h>
#include <media/v4l2-subdev.h>
#include <media/imx.h>
#include "imx-media.h"
#include "imx-ic.h"
/*
* Min/Max supported width and heights.
*
* We allow planar output, so we have to align width at the source pad
* by 16 pixels to meet IDMAC alignment requirements for possible planar
* output.
*
* TODO: move this into pad format negotiation, if capture device
* has not requested a planar format, we should allow 8 pixel
* alignment at the source pad.
*/
#define MIN_W_SINK 32
#define MIN_H_SINK 32
#define MAX_W_SINK 4096
#define MAX_H_SINK 4096
#define W_ALIGN_SINK 3 /* multiple of 8 pixels */
#define H_ALIGN_SINK 1 /* multiple of 2 lines */
#define MAX_W_SRC 1024
#define MAX_H_SRC 1024
#define W_ALIGN_SRC 1 /* multiple of 2 pixels */
#define H_ALIGN_SRC 1 /* multiple of 2 lines */
#define S_ALIGN 1 /* multiple of 2 */
struct prp_priv {
struct imx_ic_priv *ic_priv;
struct media_pad pad[PRPENCVF_NUM_PADS];
/* the video device at output pad */
struct imx_media_video_dev *vdev;
/* lock to protect all members below */
struct mutex lock;
/* IPU units we require */
struct ipu_ic *ic;
struct ipuv3_channel *out_ch;
struct ipuv3_channel *rot_in_ch;
struct ipuv3_channel *rot_out_ch;
/* active vb2 buffers to send to video dev sink */
struct imx_media_buffer *active_vb2_buf[2];
struct imx_media_dma_buf underrun_buf;
int ipu_buf_num; /* ipu double buffer index: 0-1 */
/* the sink for the captured frames */
struct media_entity *sink;
/* the source subdev */
struct v4l2_subdev *src_sd;
struct v4l2_mbus_framefmt format_mbus[PRPENCVF_NUM_PADS];
const struct imx_media_pixfmt *cc[PRPENCVF_NUM_PADS];
struct v4l2_fract frame_interval;
struct imx_media_dma_buf rot_buf[2];
/* controls */
struct v4l2_ctrl_handler ctrl_hdlr;
int rotation; /* degrees */
bool hflip;
bool vflip;
/* derived from rotation, hflip, vflip controls */
enum ipu_rotate_mode rot_mode;
spinlock_t irqlock; /* protect eof_irq handler */
struct timer_list eof_timeout_timer;
int eof_irq;
int nfb4eof_irq;
int stream_count;
u32 frame_sequence; /* frame sequence counter */
bool last_eof; /* waiting for last EOF at stream off */
bool nfb4eof; /* NFB4EOF encountered during streaming */
bool interweave_swap; /* swap top/bottom lines when interweaving */
struct completion last_eof_comp;
};
static const struct prp_channels {
u32 out_ch;
u32 rot_in_ch;
u32 rot_out_ch;
} prp_channel[] = {
[IC_TASK_ENCODER] = {
.out_ch = IPUV3_CHANNEL_IC_PRP_ENC_MEM,
.rot_in_ch = IPUV3_CHANNEL_MEM_ROT_ENC,
.rot_out_ch = IPUV3_CHANNEL_ROT_ENC_MEM,
},
[IC_TASK_VIEWFINDER] = {
.out_ch = IPUV3_CHANNEL_IC_PRP_VF_MEM,
.rot_in_ch = IPUV3_CHANNEL_MEM_ROT_VF,
.rot_out_ch = IPUV3_CHANNEL_ROT_VF_MEM,
},
};
static inline struct prp_priv *sd_to_priv(struct v4l2_subdev *sd)
{
struct imx_ic_priv *ic_priv = v4l2_get_subdevdata(sd);
return ic_priv->task_priv;
}
static void prp_put_ipu_resources(struct prp_priv *priv)
{
if (priv->ic)
ipu_ic_put(priv->ic);
priv->ic = NULL;
if (priv->out_ch)
ipu_idmac_put(priv->out_ch);
priv->out_ch = NULL;
if (priv->rot_in_ch)
ipu_idmac_put(priv->rot_in_ch);
priv->rot_in_ch = NULL;
if (priv->rot_out_ch)
ipu_idmac_put(priv->rot_out_ch);
priv->rot_out_ch = NULL;
}
static int prp_get_ipu_resources(struct prp_priv *priv)
{
struct imx_ic_priv *ic_priv = priv->ic_priv;
struct ipu_ic *ic;
struct ipuv3_channel *out_ch, *rot_in_ch, *rot_out_ch;
int ret, task = ic_priv->task_id;
ic = ipu_ic_get(ic_priv->ipu, task);
if (IS_ERR(ic)) {
v4l2_err(&ic_priv->sd, "failed to get IC\n");
ret = PTR_ERR(ic);
goto out;
}
priv->ic = ic;
out_ch = ipu_idmac_get(ic_priv->ipu, prp_channel[task].out_ch);
if (IS_ERR(out_ch)) {
v4l2_err(&ic_priv->sd, "could not get IDMAC channel %u\n",
prp_channel[task].out_ch);
ret = PTR_ERR(out_ch);
goto out;
}
priv->out_ch = out_ch;
rot_in_ch = ipu_idmac_get(ic_priv->ipu, prp_channel[task].rot_in_ch);
if (IS_ERR(rot_in_ch)) {
v4l2_err(&ic_priv->sd, "could not get IDMAC channel %u\n",
prp_channel[task].rot_in_ch);
ret = PTR_ERR(rot_in_ch);
goto out;
}
priv->rot_in_ch = rot_in_ch;
rot_out_ch = ipu_idmac_get(ic_priv->ipu, prp_channel[task].rot_out_ch);
if (IS_ERR(rot_out_ch)) {
v4l2_err(&ic_priv->sd, "could not get IDMAC channel %u\n",
prp_channel[task].rot_out_ch);
ret = PTR_ERR(rot_out_ch);
goto out;
}
priv->rot_out_ch = rot_out_ch;
return 0;
out:
prp_put_ipu_resources(priv);
return ret;
}
static void prp_vb2_buf_done(struct prp_priv *priv, struct ipuv3_channel *ch)
{
struct imx_media_video_dev *vdev = priv->vdev;
struct imx_media_buffer *done, *next;
struct vb2_buffer *vb;
dma_addr_t phys;
done = priv->active_vb2_buf[priv->ipu_buf_num];
if (done) {
done->vbuf.field = vdev->fmt.field;
done->vbuf.sequence = priv->frame_sequence;
vb = &done->vbuf.vb2_buf;
vb->timestamp = ktime_get_ns();
vb2_buffer_done(vb, priv->nfb4eof ?
VB2_BUF_STATE_ERROR : VB2_BUF_STATE_DONE);
}
priv->frame_sequence++;
priv->nfb4eof = false;
/* get next queued buffer */
next = imx_media_capture_device_next_buf(vdev);
if (next) {
phys = vb2_dma_contig_plane_dma_addr(&next->vbuf.vb2_buf, 0);
priv->active_vb2_buf[priv->ipu_buf_num] = next;
} else {
phys = priv->underrun_buf.phys;
priv->active_vb2_buf[priv->ipu_buf_num] = NULL;
}
if (ipu_idmac_buffer_is_ready(ch, priv->ipu_buf_num))
ipu_idmac_clear_buffer(ch, priv->ipu_buf_num);
if (priv->interweave_swap && ch == priv->out_ch)
phys += vdev->fmt.bytesperline;
ipu_cpmem_set_buffer(ch, priv->ipu_buf_num, phys);
}
static irqreturn_t prp_eof_interrupt(int irq, void *dev_id)
{
struct prp_priv *priv = dev_id;
struct ipuv3_channel *channel;
spin_lock(&priv->irqlock);
if (priv->last_eof) {
complete(&priv->last_eof_comp);
priv->last_eof = false;
goto unlock;
}
channel = (ipu_rot_mode_is_irt(priv->rot_mode)) ?
priv->rot_out_ch : priv->out_ch;
prp_vb2_buf_done(priv, channel);
/* select new IPU buf */
ipu_idmac_select_buffer(channel, priv->ipu_buf_num);
/* toggle IPU double-buffer index */
priv->ipu_buf_num ^= 1;
/* bump the EOF timeout timer */
mod_timer(&priv->eof_timeout_timer,
jiffies + msecs_to_jiffies(IMX_MEDIA_EOF_TIMEOUT));
unlock:
spin_unlock(&priv->irqlock);
return IRQ_HANDLED;
}
static irqreturn_t prp_nfb4eof_interrupt(int irq, void *dev_id)
{
struct prp_priv *priv = dev_id;
struct imx_ic_priv *ic_priv = priv->ic_priv;
spin_lock(&priv->irqlock);
/*
* this is not an unrecoverable error, just mark
* the next captured frame with vb2 error flag.
*/
priv->nfb4eof = true;
v4l2_err(&ic_priv->sd, "NFB4EOF\n");
spin_unlock(&priv->irqlock);
return IRQ_HANDLED;
}
/*
* EOF timeout timer function.
*/
/*
* EOF timeout timer function. This is an unrecoverable condition
* without a stream restart.
*/
static void prp_eof_timeout(struct timer_list *t)
{
struct prp_priv *priv = from_timer(priv, t, eof_timeout_timer);
struct imx_media_video_dev *vdev = priv->vdev;
struct imx_ic_priv *ic_priv = priv->ic_priv;
v4l2_err(&ic_priv->sd, "EOF timeout\n");
/* signal a fatal error to capture device */
imx_media_capture_device_error(vdev);
}
static void prp_setup_vb2_buf(struct prp_priv *priv, dma_addr_t *phys)
{
struct imx_media_video_dev *vdev = priv->vdev;
struct imx_media_buffer *buf;
int i;
for (i = 0; i < 2; i++) {
buf = imx_media_capture_device_next_buf(vdev);
if (buf) {
priv->active_vb2_buf[i] = buf;
phys[i] = vb2_dma_contig_plane_dma_addr(
&buf->vbuf.vb2_buf, 0);
} else {
priv->active_vb2_buf[i] = NULL;
phys[i] = priv->underrun_buf.phys;
}
}
}
static void prp_unsetup_vb2_buf(struct prp_priv *priv,
enum vb2_buffer_state return_status)
{
struct imx_media_buffer *buf;
int i;
/* return any remaining active frames with return_status */
for (i = 0; i < 2; i++) {
buf = priv->active_vb2_buf[i];
if (buf) {
struct vb2_buffer *vb = &buf->vbuf.vb2_buf;
vb->timestamp = ktime_get_ns();
vb2_buffer_done(vb, return_status);
}
}
}
static int prp_setup_channel(struct prp_priv *priv,
struct ipuv3_channel *channel,
enum ipu_rotate_mode rot_mode,
dma_addr_t addr0, dma_addr_t addr1,
bool rot_swap_width_height)
{
struct imx_media_video_dev *vdev = priv->vdev;
const struct imx_media_pixfmt *outcc;
struct v4l2_mbus_framefmt *outfmt;
unsigned int burst_size;
struct ipu_image image;
bool interweave;
int ret;
outfmt = &priv->format_mbus[PRPENCVF_SRC_PAD];
outcc = vdev->cc;
ipu_cpmem_zero(channel);
memset(&image, 0, sizeof(image));
image.pix = vdev->fmt;
image.rect = vdev->compose;
/*
* If the field type at capture interface is interlaced, and
* the output IDMAC pad is sequential, enable interweave at
* the IDMAC output channel.
*/
interweave = V4L2_FIELD_IS_INTERLACED(image.pix.field) &&
V4L2_FIELD_IS_SEQUENTIAL(outfmt->field);
priv->interweave_swap = interweave &&
image.pix.field == V4L2_FIELD_INTERLACED_BT;
if (rot_swap_width_height) {
swap(image.pix.width, image.pix.height);
swap(image.rect.width, image.rect.height);
/* recalc stride using swapped width */
image.pix.bytesperline = outcc->planar ?
image.pix.width :
(image.pix.width * outcc->bpp) >> 3;
}
if (priv->interweave_swap && channel == priv->out_ch) {
/* start interweave scan at 1st top line (2nd line) */
image.rect.top = 1;
}
image.phys0 = addr0;
image.phys1 = addr1;
/*
* Skip writing U and V components to odd rows in the output
* channels for planar 4:2:0 (but not when enabling IDMAC
* interweaving, they are incompatible).
*/
if ((channel == priv->out_ch && !interweave) ||
channel == priv->rot_out_ch) {
switch (image.pix.pixelformat) {
case V4L2_PIX_FMT_YUV420:
case V4L2_PIX_FMT_YVU420:
case V4L2_PIX_FMT_NV12:
ipu_cpmem_skip_odd_chroma_rows(channel);
break;
}
}
ret = ipu_cpmem_set_image(channel, &image);
if (ret)
return ret;
if (channel == priv->rot_in_ch ||
channel == priv->rot_out_ch) {
burst_size = 8;
ipu_cpmem_set_block_mode(channel);
} else {
burst_size = (image.pix.width & 0xf) ? 8 : 16;
}
ipu_cpmem_set_burstsize(channel, burst_size);
if (rot_mode)
ipu_cpmem_set_rotation(channel, rot_mode);
if (interweave && channel == priv->out_ch)
ipu_cpmem_interlaced_scan(channel,
priv->interweave_swap ?
-image.pix.bytesperline :
image.pix.bytesperline,
image.pix.pixelformat);
ret = ipu_ic_task_idma_init(priv->ic, channel,
image.pix.width, image.pix.height,
burst_size, rot_mode);
if (ret)
return ret;
ipu_cpmem_set_axi_id(channel, 1);
ipu_idmac_set_double_buffer(channel, true);
return 0;
}
static int prp_setup_rotation(struct prp_priv *priv)
{
struct imx_media_video_dev *vdev = priv->vdev;
struct imx_ic_priv *ic_priv = priv->ic_priv;
const struct imx_media_pixfmt *outcc, *incc;
struct v4l2_mbus_framefmt *infmt;
struct v4l2_pix_format *outfmt;
struct ipu_ic_csc csc;
dma_addr_t phys[2];
int ret;
infmt = &priv->format_mbus[PRPENCVF_SINK_PAD];
outfmt = &vdev->fmt;
incc = priv->cc[PRPENCVF_SINK_PAD];
outcc = vdev->cc;
ret = ipu_ic_calc_csc(&csc,
infmt->ycbcr_enc, infmt->quantization,
incc->cs,
outfmt->ycbcr_enc, outfmt->quantization,
outcc->cs);
if (ret) {
v4l2_err(&ic_priv->sd, "ipu_ic_calc_csc failed, %d\n",
ret);
return ret;
}
ret = imx_media_alloc_dma_buf(ic_priv->ipu_dev, &priv->rot_buf[0],
outfmt->sizeimage);
if (ret) {
v4l2_err(&ic_priv->sd, "failed to alloc rot_buf[0], %d\n", ret);
return ret;
}
ret = imx_media_alloc_dma_buf(ic_priv->ipu_dev, &priv->rot_buf[1],
outfmt->sizeimage);
if (ret) {
v4l2_err(&ic_priv->sd, "failed to alloc rot_buf[1], %d\n", ret);
goto free_rot0;
}
ret = ipu_ic_task_init(priv->ic, &csc,
infmt->width, infmt->height,
outfmt->height, outfmt->width);
if (ret) {
v4l2_err(&ic_priv->sd, "ipu_ic_task_init failed, %d\n", ret);
goto free_rot1;
}
/* init the IC-PRP-->MEM IDMAC channel */
ret = prp_setup_channel(priv, priv->out_ch, IPU_ROTATE_NONE,
priv->rot_buf[0].phys, priv->rot_buf[1].phys,
true);
if (ret) {
v4l2_err(&ic_priv->sd,
"prp_setup_channel(out_ch) failed, %d\n", ret);
goto free_rot1;
}
/* init the MEM-->IC-PRP ROT IDMAC channel */
ret = prp_setup_channel(priv, priv->rot_in_ch, priv->rot_mode,
priv->rot_buf[0].phys, priv->rot_buf[1].phys,
true);
if (ret) {
v4l2_err(&ic_priv->sd,
"prp_setup_channel(rot_in_ch) failed, %d\n", ret);
goto free_rot1;
}
prp_setup_vb2_buf(priv, phys);
/* init the destination IC-PRP ROT-->MEM IDMAC channel */
ret = prp_setup_channel(priv, priv->rot_out_ch, IPU_ROTATE_NONE,
phys[0], phys[1],
false);
if (ret) {
v4l2_err(&ic_priv->sd,
"prp_setup_channel(rot_out_ch) failed, %d\n", ret);
goto unsetup_vb2;
}
/* now link IC-PRP-->MEM to MEM-->IC-PRP ROT */
ipu_idmac_link(priv->out_ch, priv->rot_in_ch);
/* enable the IC */
ipu_ic_enable(priv->ic);
/* set buffers ready */
ipu_idmac_select_buffer(priv->out_ch, 0);
ipu_idmac_select_buffer(priv->out_ch, 1);
ipu_idmac_select_buffer(priv->rot_out_ch, 0);
ipu_idmac_select_buffer(priv->rot_out_ch, 1);
/* enable the channels */
ipu_idmac_enable_channel(priv->out_ch);
ipu_idmac_enable_channel(priv->rot_in_ch);
ipu_idmac_enable_channel(priv->rot_out_ch);
/* and finally enable the IC PRP task */
ipu_ic_task_enable(priv->ic);
return 0;
unsetup_vb2:
prp_unsetup_vb2_buf(priv, VB2_BUF_STATE_QUEUED);
free_rot1:
imx_media_free_dma_buf(ic_priv->ipu_dev, &priv->rot_buf[1]);
free_rot0:
imx_media_free_dma_buf(ic_priv->ipu_dev, &priv->rot_buf[0]);
return ret;
}
static void prp_unsetup_rotation(struct prp_priv *priv)
{
struct imx_ic_priv *ic_priv = priv->ic_priv;
ipu_ic_task_disable(priv->ic);
ipu_idmac_disable_channel(priv->out_ch);
ipu_idmac_disable_channel(priv->rot_in_ch);
ipu_idmac_disable_channel(priv->rot_out_ch);
ipu_idmac_unlink(priv->out_ch, priv->rot_in_ch);
ipu_ic_disable(priv->ic);
imx_media_free_dma_buf(ic_priv->ipu_dev, &priv->rot_buf[0]);
imx_media_free_dma_buf(ic_priv->ipu_dev, &priv->rot_buf[1]);
}
static int prp_setup_norotation(struct prp_priv *priv)
{
struct imx_media_video_dev *vdev = priv->vdev;
struct imx_ic_priv *ic_priv = priv->ic_priv;
const struct imx_media_pixfmt *outcc, *incc;
struct v4l2_mbus_framefmt *infmt;
struct v4l2_pix_format *outfmt;
struct ipu_ic_csc csc;
dma_addr_t phys[2];
int ret;
infmt = &priv->format_mbus[PRPENCVF_SINK_PAD];
outfmt = &vdev->fmt;
incc = priv->cc[PRPENCVF_SINK_PAD];
outcc = vdev->cc;
ret = ipu_ic_calc_csc(&csc,
infmt->ycbcr_enc, infmt->quantization,
incc->cs,
outfmt->ycbcr_enc, outfmt->quantization,
outcc->cs);
if (ret) {
v4l2_err(&ic_priv->sd, "ipu_ic_calc_csc failed, %d\n",
ret);
return ret;
}
ret = ipu_ic_task_init(priv->ic, &csc,
infmt->width, infmt->height,
outfmt->width, outfmt->height);
if (ret) {
v4l2_err(&ic_priv->sd, "ipu_ic_task_init failed, %d\n", ret);
return ret;
}
prp_setup_vb2_buf(priv, phys);
/* init the IC PRP-->MEM IDMAC channel */
ret = prp_setup_channel(priv, priv->out_ch, priv->rot_mode,
phys[0], phys[1], false);
if (ret) {
v4l2_err(&ic_priv->sd,
"prp_setup_channel(out_ch) failed, %d\n", ret);
goto unsetup_vb2;
}
ipu_cpmem_dump(priv->out_ch);
ipu_ic_dump(priv->ic);
ipu_dump(ic_priv->ipu);
ipu_ic_enable(priv->ic);
/* set buffers ready */
ipu_idmac_select_buffer(priv->out_ch, 0);
ipu_idmac_select_buffer(priv->out_ch, 1);
/* enable the channels */
ipu_idmac_enable_channel(priv->out_ch);
/* enable the IC task */
ipu_ic_task_enable(priv->ic);
return 0;
unsetup_vb2:
prp_unsetup_vb2_buf(priv, VB2_BUF_STATE_QUEUED);
return ret;
}
static void prp_unsetup_norotation(struct prp_priv *priv)
{
ipu_ic_task_disable(priv->ic);
ipu_idmac_disable_channel(priv->out_ch);
ipu_ic_disable(priv->ic);
}
static void prp_unsetup(struct prp_priv *priv,
enum vb2_buffer_state state)
{
if (ipu_rot_mode_is_irt(priv->rot_mode))
prp_unsetup_rotation(priv);
else
prp_unsetup_norotation(priv);
prp_unsetup_vb2_buf(priv, state);
}
static int prp_start(struct prp_priv *priv)
{
struct imx_ic_priv *ic_priv = priv->ic_priv;
struct imx_media_video_dev *vdev = priv->vdev;
int ret;
ret = prp_get_ipu_resources(priv);
if (ret)
return ret;
ret = imx_media_alloc_dma_buf(ic_priv->ipu_dev, &priv->underrun_buf,
vdev->fmt.sizeimage);
if (ret)
goto out_put_ipu;
priv->ipu_buf_num = 0;
/* init EOF completion waitq */
init_completion(&priv->last_eof_comp);
priv->frame_sequence = 0;
priv->last_eof = false;
priv->nfb4eof = false;
if (ipu_rot_mode_is_irt(priv->rot_mode))
ret = prp_setup_rotation(priv);
else
ret = prp_setup_norotation(priv);
if (ret)
goto out_free_underrun;
priv->nfb4eof_irq = ipu_idmac_channel_irq(ic_priv->ipu,
priv->out_ch,
IPU_IRQ_NFB4EOF);
ret = devm_request_irq(ic_priv->ipu_dev, priv->nfb4eof_irq,
prp_nfb4eof_interrupt, 0,
"imx-ic-prp-nfb4eof", priv);
if (ret) {
v4l2_err(&ic_priv->sd,
"Error registering NFB4EOF irq: %d\n", ret);
goto out_unsetup;
}
if (ipu_rot_mode_is_irt(priv->rot_mode))
priv->eof_irq = ipu_idmac_channel_irq(
ic_priv->ipu, priv->rot_out_ch, IPU_IRQ_EOF);
else
priv->eof_irq = ipu_idmac_channel_irq(
ic_priv->ipu, priv->out_ch, IPU_IRQ_EOF);
ret = devm_request_irq(ic_priv->ipu_dev, priv->eof_irq,
prp_eof_interrupt, 0,
"imx-ic-prp-eof", priv);
if (ret) {
v4l2_err(&ic_priv->sd,
"Error registering eof irq: %d\n", ret);
goto out_free_nfb4eof_irq;
}
/* start upstream */
ret = v4l2_subdev_call(priv->src_sd, video, s_stream, 1);
ret = (ret && ret != -ENOIOCTLCMD) ? ret : 0;
if (ret) {
v4l2_err(&ic_priv->sd,
"upstream stream on failed: %d\n", ret);
goto out_free_eof_irq;
}
/* start the EOF timeout timer */
mod_timer(&priv->eof_timeout_timer,
jiffies + msecs_to_jiffies(IMX_MEDIA_EOF_TIMEOUT));
return 0;
out_free_eof_irq:
devm_free_irq(ic_priv->ipu_dev, priv->eof_irq, priv);
out_free_nfb4eof_irq:
devm_free_irq(ic_priv->ipu_dev, priv->nfb4eof_irq, priv);
out_unsetup:
prp_unsetup(priv, VB2_BUF_STATE_QUEUED);
out_free_underrun:
imx_media_free_dma_buf(ic_priv->ipu_dev, &priv->underrun_buf);
out_put_ipu:
prp_put_ipu_resources(priv);
return ret;
}
static void prp_stop(struct prp_priv *priv)
{
struct imx_ic_priv *ic_priv = priv->ic_priv;
unsigned long flags;
int ret;
/* mark next EOF interrupt as the last before stream off */
spin_lock_irqsave(&priv->irqlock, flags);
priv->last_eof = true;
spin_unlock_irqrestore(&priv->irqlock, flags);
/*
* and then wait for interrupt handler to mark completion.
*/
ret = wait_for_completion_timeout(
&priv->last_eof_comp,
msecs_to_jiffies(IMX_MEDIA_EOF_TIMEOUT));
if (ret == 0)
v4l2_warn(&ic_priv->sd, "wait last EOF timeout\n");
/* stop upstream */
ret = v4l2_subdev_call(priv->src_sd, video, s_stream, 0);
if (ret && ret != -ENOIOCTLCMD)
v4l2_warn(&ic_priv->sd,
"upstream stream off failed: %d\n", ret);
devm_free_irq(ic_priv->ipu_dev, priv->eof_irq, priv);
devm_free_irq(ic_priv->ipu_dev, priv->nfb4eof_irq, priv);
prp_unsetup(priv, VB2_BUF_STATE_ERROR);
imx_media_free_dma_buf(ic_priv->ipu_dev, &priv->underrun_buf);
/* cancel the EOF timeout timer */
del_timer_sync(&priv->eof_timeout_timer);
prp_put_ipu_resources(priv);
}
static struct v4l2_mbus_framefmt *
__prp_get_fmt(struct prp_priv *priv, struct v4l2_subdev_state *sd_state,
unsigned int pad, enum v4l2_subdev_format_whence which)
{
struct imx_ic_priv *ic_priv = priv->ic_priv;
if (which == V4L2_SUBDEV_FORMAT_TRY)
return v4l2_subdev_get_try_format(&ic_priv->sd, sd_state, pad);
else
return &priv->format_mbus[pad];
}
/*
* Applies IC resizer and IDMAC alignment restrictions to output
* rectangle given the input rectangle, and depending on given
* rotation mode.
*
* The IC resizer cannot downsize more than 4:1. Note also that
* for 90 or 270 rotation, _both_ output width and height must
* be aligned by W_ALIGN_SRC, because the intermediate rotation
* buffer swaps output width/height, and the final output buffer
* does not.
*
* Returns true if the output rectangle was modified.
*/
static bool prp_bound_align_output(struct v4l2_mbus_framefmt *outfmt,
struct v4l2_mbus_framefmt *infmt,
enum ipu_rotate_mode rot_mode)
{
u32 orig_width = outfmt->width;
u32 orig_height = outfmt->height;
if (ipu_rot_mode_is_irt(rot_mode))
v4l_bound_align_image(&outfmt->width,
infmt->height / 4, MAX_H_SRC,
W_ALIGN_SRC,
&outfmt->height,
infmt->width / 4, MAX_W_SRC,
W_ALIGN_SRC, S_ALIGN);
else
v4l_bound_align_image(&outfmt->width,
infmt->width / 4, MAX_W_SRC,
W_ALIGN_SRC,
&outfmt->height,
infmt->height / 4, MAX_H_SRC,
H_ALIGN_SRC, S_ALIGN);
return outfmt->width != orig_width || outfmt->height != orig_height;
}
/*
* V4L2 subdev operations.
*/
static int prp_enum_mbus_code(struct v4l2_subdev *sd,
struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_mbus_code_enum *code)
{
if (code->pad >= PRPENCVF_NUM_PADS)
return -EINVAL;
return imx_media_enum_ipu_formats(&code->code, code->index,
PIXFMT_SEL_YUV_RGB);
}
static int prp_get_fmt(struct v4l2_subdev *sd,
struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *sdformat)
{
struct prp_priv *priv = sd_to_priv(sd);
struct v4l2_mbus_framefmt *fmt;
int ret = 0;
if (sdformat->pad >= PRPENCVF_NUM_PADS)
return -EINVAL;
mutex_lock(&priv->lock);
fmt = __prp_get_fmt(priv, sd_state, sdformat->pad, sdformat->which);
if (!fmt) {
ret = -EINVAL;
goto out;
}
sdformat->format = *fmt;
out:
mutex_unlock(&priv->lock);
return ret;
}
static void prp_try_fmt(struct prp_priv *priv,
struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *sdformat,
const struct imx_media_pixfmt **cc)
{
struct v4l2_mbus_framefmt *infmt;
*cc = imx_media_find_ipu_format(sdformat->format.code,
PIXFMT_SEL_YUV_RGB);
if (!*cc) {
u32 code;
imx_media_enum_ipu_formats(&code, 0, PIXFMT_SEL_YUV_RGB);
*cc = imx_media_find_ipu_format(code, PIXFMT_SEL_YUV_RGB);
sdformat->format.code = (*cc)->codes[0];
}
infmt = __prp_get_fmt(priv, sd_state, PRPENCVF_SINK_PAD,
sdformat->which);
if (sdformat->pad == PRPENCVF_SRC_PAD) {
sdformat->format.field = infmt->field;
prp_bound_align_output(&sdformat->format, infmt,
priv->rot_mode);
/* propagate colorimetry from sink */
sdformat->format.colorspace = infmt->colorspace;
sdformat->format.xfer_func = infmt->xfer_func;
} else {
v4l_bound_align_image(&sdformat->format.width,
MIN_W_SINK, MAX_W_SINK, W_ALIGN_SINK,
&sdformat->format.height,
MIN_H_SINK, MAX_H_SINK, H_ALIGN_SINK,
S_ALIGN);
if (sdformat->format.field == V4L2_FIELD_ANY)
sdformat->format.field = V4L2_FIELD_NONE;
}
imx_media_try_colorimetry(&sdformat->format, true);
}
static int prp_set_fmt(struct v4l2_subdev *sd,
struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *sdformat)
{
struct prp_priv *priv = sd_to_priv(sd);
const struct imx_media_pixfmt *cc;
struct v4l2_mbus_framefmt *fmt;
int ret = 0;
if (sdformat->pad >= PRPENCVF_NUM_PADS)
return -EINVAL;
mutex_lock(&priv->lock);
if (priv->stream_count > 0) {
ret = -EBUSY;
goto out;
}
prp_try_fmt(priv, sd_state, sdformat, &cc);
fmt = __prp_get_fmt(priv, sd_state, sdformat->pad, sdformat->which);
*fmt = sdformat->format;
/* propagate a default format to source pad */
if (sdformat->pad == PRPENCVF_SINK_PAD) {
const struct imx_media_pixfmt *outcc;
struct v4l2_mbus_framefmt *outfmt;
struct v4l2_subdev_format format;
format.pad = PRPENCVF_SRC_PAD;
format.which = sdformat->which;
format.format = sdformat->format;
prp_try_fmt(priv, sd_state, &format, &outcc);
outfmt = __prp_get_fmt(priv, sd_state, PRPENCVF_SRC_PAD,
sdformat->which);
*outfmt = format.format;
if (sdformat->which == V4L2_SUBDEV_FORMAT_ACTIVE)
priv->cc[PRPENCVF_SRC_PAD] = outcc;
}
if (sdformat->which == V4L2_SUBDEV_FORMAT_ACTIVE)
priv->cc[sdformat->pad] = cc;
out:
mutex_unlock(&priv->lock);
return ret;
}
static int prp_enum_frame_size(struct v4l2_subdev *sd,
struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_frame_size_enum *fse)
{
struct prp_priv *priv = sd_to_priv(sd);
struct v4l2_subdev_format format = {};
const struct imx_media_pixfmt *cc;
int ret = 0;
if (fse->pad >= PRPENCVF_NUM_PADS || fse->index != 0)
return -EINVAL;
mutex_lock(&priv->lock);
format.pad = fse->pad;
format.which = fse->which;
format.format.code = fse->code;
format.format.width = 1;
format.format.height = 1;
prp_try_fmt(priv, sd_state, &format, &cc);
fse->min_width = format.format.width;
fse->min_height = format.format.height;
if (format.format.code != fse->code) {
ret = -EINVAL;
goto out;
}
format.format.code = fse->code;
format.format.width = -1;
format.format.height = -1;
prp_try_fmt(priv, sd_state, &format, &cc);
fse->max_width = format.format.width;
fse->max_height = format.format.height;
out:
mutex_unlock(&priv->lock);
return ret;
}
static int prp_link_setup(struct media_entity *entity,
const struct media_pad *local,
const struct media_pad *remote, u32 flags)
{
struct v4l2_subdev *sd = media_entity_to_v4l2_subdev(entity);
struct imx_ic_priv *ic_priv = v4l2_get_subdevdata(sd);
struct prp_priv *priv = ic_priv->task_priv;
struct v4l2_subdev *remote_sd;
int ret = 0;
dev_dbg(ic_priv->ipu_dev, "%s: link setup %s -> %s",
ic_priv->sd.name, remote->entity->name, local->entity->name);
mutex_lock(&priv->lock);
if (local->flags & MEDIA_PAD_FL_SINK) {
if (!is_media_entity_v4l2_subdev(remote->entity)) {
ret = -EINVAL;
goto out;
}
remote_sd = media_entity_to_v4l2_subdev(remote->entity);
if (flags & MEDIA_LNK_FL_ENABLED) {
if (priv->src_sd) {
ret = -EBUSY;
goto out;
}
priv->src_sd = remote_sd;
} else {
priv->src_sd = NULL;
}
goto out;
}
/* this is the source pad */
/* the remote must be the device node */
if (!is_media_entity_v4l2_video_device(remote->entity)) {
ret = -EINVAL;
goto out;
}
if (flags & MEDIA_LNK_FL_ENABLED) {
if (priv->sink) {
ret = -EBUSY;
goto out;
}
} else {
priv->sink = NULL;
goto out;
}
priv->sink = remote->entity;
out:
mutex_unlock(&priv->lock);
return ret;
}
static int prp_s_ctrl(struct v4l2_ctrl *ctrl)
{
struct prp_priv *priv = container_of(ctrl->handler,
struct prp_priv, ctrl_hdlr);
struct imx_ic_priv *ic_priv = priv->ic_priv;
enum ipu_rotate_mode rot_mode;
int rotation, ret = 0;
bool hflip, vflip;
mutex_lock(&priv->lock);
rotation = priv->rotation;
hflip = priv->hflip;
vflip = priv->vflip;
switch (ctrl->id) {
case V4L2_CID_HFLIP:
hflip = (ctrl->val == 1);
break;
case V4L2_CID_VFLIP:
vflip = (ctrl->val == 1);
break;
case V4L2_CID_ROTATE:
rotation = ctrl->val;
break;
default:
v4l2_err(&ic_priv->sd, "Invalid control\n");
ret = -EINVAL;
goto out;
}
ret = ipu_degrees_to_rot_mode(&rot_mode, rotation, hflip, vflip);
if (ret)
goto out;
if (rot_mode != priv->rot_mode) {
struct v4l2_mbus_framefmt outfmt, infmt;
/* can't change rotation mid-streaming */
if (priv->stream_count > 0) {
ret = -EBUSY;
goto out;
}
outfmt = priv->format_mbus[PRPENCVF_SRC_PAD];
infmt = priv->format_mbus[PRPENCVF_SINK_PAD];
if (prp_bound_align_output(&outfmt, &infmt, rot_mode)) {
ret = -EINVAL;
goto out;
}
priv->rot_mode = rot_mode;
priv->rotation = rotation;
priv->hflip = hflip;
priv->vflip = vflip;
}
out:
mutex_unlock(&priv->lock);
return ret;
}
static const struct v4l2_ctrl_ops prp_ctrl_ops = {
.s_ctrl = prp_s_ctrl,
};
static int prp_init_controls(struct prp_priv *priv)
{
struct imx_ic_priv *ic_priv = priv->ic_priv;
struct v4l2_ctrl_handler *hdlr = &priv->ctrl_hdlr;
int ret;
v4l2_ctrl_handler_init(hdlr, 3);
v4l2_ctrl_new_std(hdlr, &prp_ctrl_ops, V4L2_CID_HFLIP,
0, 1, 1, 0);
v4l2_ctrl_new_std(hdlr, &prp_ctrl_ops, V4L2_CID_VFLIP,
0, 1, 1, 0);
v4l2_ctrl_new_std(hdlr, &prp_ctrl_ops, V4L2_CID_ROTATE,
0, 270, 90, 0);
ic_priv->sd.ctrl_handler = hdlr;
if (hdlr->error) {
ret = hdlr->error;
goto out_free;
}
v4l2_ctrl_handler_setup(hdlr);
return 0;
out_free:
v4l2_ctrl_handler_free(hdlr);
return ret;
}
static int prp_s_stream(struct v4l2_subdev *sd, int enable)
{
struct imx_ic_priv *ic_priv = v4l2_get_subdevdata(sd);
struct prp_priv *priv = ic_priv->task_priv;
int ret = 0;
mutex_lock(&priv->lock);
if (!priv->src_sd || !priv->sink) {
ret = -EPIPE;
goto out;
}
/*
* enable/disable streaming only if stream_count is
* going from 0 to 1 / 1 to 0.
*/
if (priv->stream_count != !enable)
goto update_count;
dev_dbg(ic_priv->ipu_dev, "%s: stream %s\n", sd->name,
enable ? "ON" : "OFF");
if (enable)
ret = prp_start(priv);
else
prp_stop(priv);
if (ret)
goto out;
update_count:
priv->stream_count += enable ? 1 : -1;
if (priv->stream_count < 0)
priv->stream_count = 0;
out:
mutex_unlock(&priv->lock);
return ret;
}
static int prp_g_frame_interval(struct v4l2_subdev *sd,
struct v4l2_subdev_frame_interval *fi)
{
struct prp_priv *priv = sd_to_priv(sd);
if (fi->pad >= PRPENCVF_NUM_PADS)
return -EINVAL;
mutex_lock(&priv->lock);
fi->interval = priv->frame_interval;
mutex_unlock(&priv->lock);
return 0;
}
static int prp_s_frame_interval(struct v4l2_subdev *sd,
struct v4l2_subdev_frame_interval *fi)
{
struct prp_priv *priv = sd_to_priv(sd);
if (fi->pad >= PRPENCVF_NUM_PADS)
return -EINVAL;
mutex_lock(&priv->lock);
/* No limits on valid frame intervals */
if (fi->interval.numerator == 0 || fi->interval.denominator == 0)
fi->interval = priv->frame_interval;
else
priv->frame_interval = fi->interval;
mutex_unlock(&priv->lock);
return 0;
}
static int prp_registered(struct v4l2_subdev *sd)
{
struct prp_priv *priv = sd_to_priv(sd);
struct imx_ic_priv *ic_priv = priv->ic_priv;
int i, ret;
u32 code;
/* set a default mbus format */
imx_media_enum_ipu_formats(&code, 0, PIXFMT_SEL_YUV);
for (i = 0; i < PRPENCVF_NUM_PADS; i++) {
ret = imx_media_init_mbus_fmt(&priv->format_mbus[i],
IMX_MEDIA_DEF_PIX_WIDTH,
IMX_MEDIA_DEF_PIX_HEIGHT, code,
V4L2_FIELD_NONE, &priv->cc[i]);
if (ret)
return ret;
}
/* init default frame interval */
priv->frame_interval.numerator = 1;
priv->frame_interval.denominator = 30;
priv->vdev = imx_media_capture_device_init(ic_priv->ipu_dev,
&ic_priv->sd,
PRPENCVF_SRC_PAD, true);
if (IS_ERR(priv->vdev))
return PTR_ERR(priv->vdev);
ret = imx_media_capture_device_register(priv->vdev, 0);
if (ret)
goto remove_vdev;
ret = prp_init_controls(priv);
if (ret)
goto unreg_vdev;
return 0;
unreg_vdev:
imx_media_capture_device_unregister(priv->vdev);
remove_vdev:
imx_media_capture_device_remove(priv->vdev);
return ret;
}
static void prp_unregistered(struct v4l2_subdev *sd)
{
struct prp_priv *priv = sd_to_priv(sd);
imx_media_capture_device_unregister(priv->vdev);
imx_media_capture_device_remove(priv->vdev);
v4l2_ctrl_handler_free(&priv->ctrl_hdlr);
}
static const struct v4l2_subdev_pad_ops prp_pad_ops = {
.init_cfg = imx_media_init_cfg,
.enum_mbus_code = prp_enum_mbus_code,
.enum_frame_size = prp_enum_frame_size,
.get_fmt = prp_get_fmt,
.set_fmt = prp_set_fmt,
};
static const struct v4l2_subdev_video_ops prp_video_ops = {
.g_frame_interval = prp_g_frame_interval,
.s_frame_interval = prp_s_frame_interval,
.s_stream = prp_s_stream,
};
static const struct media_entity_operations prp_entity_ops = {
.link_setup = prp_link_setup,
.link_validate = v4l2_subdev_link_validate,
};
static const struct v4l2_subdev_ops prp_subdev_ops = {
.video = &prp_video_ops,
.pad = &prp_pad_ops,
};
static const struct v4l2_subdev_internal_ops prp_internal_ops = {
.registered = prp_registered,
.unregistered = prp_unregistered,
};
static int prp_init(struct imx_ic_priv *ic_priv)
{
struct prp_priv *priv;
int i, ret;
priv = devm_kzalloc(ic_priv->ipu_dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
ic_priv->task_priv = priv;
priv->ic_priv = ic_priv;
spin_lock_init(&priv->irqlock);
timer_setup(&priv->eof_timeout_timer, prp_eof_timeout, 0);
mutex_init(&priv->lock);
for (i = 0; i < PRPENCVF_NUM_PADS; i++) {
priv->pad[i].flags = (i == PRPENCVF_SINK_PAD) ?
MEDIA_PAD_FL_SINK : MEDIA_PAD_FL_SOURCE;
}
ret = media_entity_pads_init(&ic_priv->sd.entity, PRPENCVF_NUM_PADS,
priv->pad);
if (ret)
mutex_destroy(&priv->lock);
return ret;
}
static void prp_remove(struct imx_ic_priv *ic_priv)
{
struct prp_priv *priv = ic_priv->task_priv;
mutex_destroy(&priv->lock);
}
struct imx_ic_ops imx_ic_prpencvf_ops = {
.subdev_ops = &prp_subdev_ops,
.internal_ops = &prp_internal_ops,
.entity_ops = &prp_entity_ops,
.init = prp_init,
.remove = prp_remove,
};
| linux-master | drivers/staging/media/imx/imx-ic-prpencvf.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* V4L2 Media Controller Driver for Freescale i.MX5/6 SOC
*
* Copyright (c) 2016 Mentor Graphics Inc.
*/
#include <linux/module.h>
#include "imx-media.h"
#define IMX_BUS_FMTS(fmt...) ((const u32[]) {fmt, 0})
/*
* List of supported pixel formats for the subdevs.
*/
static const struct imx_media_pixfmt pixel_formats[] = {
/*** YUV formats start here ***/
{
.fourcc = V4L2_PIX_FMT_UYVY,
.codes = IMX_BUS_FMTS(
MEDIA_BUS_FMT_UYVY8_2X8,
MEDIA_BUS_FMT_UYVY8_1X16
),
.cs = IPUV3_COLORSPACE_YUV,
.bpp = 16,
}, {
.fourcc = V4L2_PIX_FMT_YUYV,
.codes = IMX_BUS_FMTS(
MEDIA_BUS_FMT_YUYV8_2X8,
MEDIA_BUS_FMT_YUYV8_1X16
),
.cs = IPUV3_COLORSPACE_YUV,
.bpp = 16,
}, {
.fourcc = V4L2_PIX_FMT_YUV420,
.cs = IPUV3_COLORSPACE_YUV,
.bpp = 12,
.planar = true,
}, {
.fourcc = V4L2_PIX_FMT_YVU420,
.cs = IPUV3_COLORSPACE_YUV,
.bpp = 12,
.planar = true,
}, {
.fourcc = V4L2_PIX_FMT_YUV422P,
.cs = IPUV3_COLORSPACE_YUV,
.bpp = 16,
.planar = true,
}, {
.fourcc = V4L2_PIX_FMT_NV12,
.cs = IPUV3_COLORSPACE_YUV,
.bpp = 12,
.planar = true,
}, {
.fourcc = V4L2_PIX_FMT_NV16,
.cs = IPUV3_COLORSPACE_YUV,
.bpp = 16,
.planar = true,
}, {
.fourcc = V4L2_PIX_FMT_YUV32,
.codes = IMX_BUS_FMTS(MEDIA_BUS_FMT_AYUV8_1X32),
.cs = IPUV3_COLORSPACE_YUV,
.bpp = 32,
.ipufmt = true,
},
/*** RGB formats start here ***/
{
.fourcc = V4L2_PIX_FMT_RGB565,
.codes = IMX_BUS_FMTS(MEDIA_BUS_FMT_RGB565_2X8_LE),
.cs = IPUV3_COLORSPACE_RGB,
.bpp = 16,
.cycles = 2,
}, {
.fourcc = V4L2_PIX_FMT_RGB24,
.codes = IMX_BUS_FMTS(
MEDIA_BUS_FMT_RGB888_1X24,
MEDIA_BUS_FMT_RGB888_2X12_LE
),
.cs = IPUV3_COLORSPACE_RGB,
.bpp = 24,
}, {
.fourcc = V4L2_PIX_FMT_BGR24,
.cs = IPUV3_COLORSPACE_RGB,
.bpp = 24,
}, {
.fourcc = V4L2_PIX_FMT_XRGB32,
.codes = IMX_BUS_FMTS(MEDIA_BUS_FMT_ARGB8888_1X32),
.cs = IPUV3_COLORSPACE_RGB,
.bpp = 32,
}, {
.fourcc = V4L2_PIX_FMT_XRGB32,
.codes = IMX_BUS_FMTS(MEDIA_BUS_FMT_ARGB8888_1X32),
.cs = IPUV3_COLORSPACE_RGB,
.bpp = 32,
.ipufmt = true,
}, {
.fourcc = V4L2_PIX_FMT_XBGR32,
.cs = IPUV3_COLORSPACE_RGB,
.bpp = 32,
}, {
.fourcc = V4L2_PIX_FMT_BGRX32,
.cs = IPUV3_COLORSPACE_RGB,
.bpp = 32,
}, {
.fourcc = V4L2_PIX_FMT_RGBX32,
.cs = IPUV3_COLORSPACE_RGB,
.bpp = 32,
},
/*** raw bayer and grayscale formats start here ***/
{
.fourcc = V4L2_PIX_FMT_SBGGR8,
.codes = IMX_BUS_FMTS(MEDIA_BUS_FMT_SBGGR8_1X8),
.cs = IPUV3_COLORSPACE_RGB,
.bpp = 8,
.bayer = true,
}, {
.fourcc = V4L2_PIX_FMT_SGBRG8,
.codes = IMX_BUS_FMTS(MEDIA_BUS_FMT_SGBRG8_1X8),
.cs = IPUV3_COLORSPACE_RGB,
.bpp = 8,
.bayer = true,
}, {
.fourcc = V4L2_PIX_FMT_SGRBG8,
.codes = IMX_BUS_FMTS(MEDIA_BUS_FMT_SGRBG8_1X8),
.cs = IPUV3_COLORSPACE_RGB,
.bpp = 8,
.bayer = true,
}, {
.fourcc = V4L2_PIX_FMT_SRGGB8,
.codes = IMX_BUS_FMTS(MEDIA_BUS_FMT_SRGGB8_1X8),
.cs = IPUV3_COLORSPACE_RGB,
.bpp = 8,
.bayer = true,
}, {
.fourcc = V4L2_PIX_FMT_SBGGR16,
.codes = IMX_BUS_FMTS(
MEDIA_BUS_FMT_SBGGR10_1X10,
MEDIA_BUS_FMT_SBGGR12_1X12,
MEDIA_BUS_FMT_SBGGR14_1X14,
MEDIA_BUS_FMT_SBGGR16_1X16
),
.cs = IPUV3_COLORSPACE_RGB,
.bpp = 16,
.bayer = true,
}, {
.fourcc = V4L2_PIX_FMT_SGBRG16,
.codes = IMX_BUS_FMTS(
MEDIA_BUS_FMT_SGBRG10_1X10,
MEDIA_BUS_FMT_SGBRG12_1X12,
MEDIA_BUS_FMT_SGBRG14_1X14,
MEDIA_BUS_FMT_SGBRG16_1X16
),
.cs = IPUV3_COLORSPACE_RGB,
.bpp = 16,
.bayer = true,
}, {
.fourcc = V4L2_PIX_FMT_SGRBG16,
.codes = IMX_BUS_FMTS(
MEDIA_BUS_FMT_SGRBG10_1X10,
MEDIA_BUS_FMT_SGRBG12_1X12,
MEDIA_BUS_FMT_SGRBG14_1X14,
MEDIA_BUS_FMT_SGRBG16_1X16
),
.cs = IPUV3_COLORSPACE_RGB,
.bpp = 16,
.bayer = true,
}, {
.fourcc = V4L2_PIX_FMT_SRGGB16,
.codes = IMX_BUS_FMTS(
MEDIA_BUS_FMT_SRGGB10_1X10,
MEDIA_BUS_FMT_SRGGB12_1X12,
MEDIA_BUS_FMT_SRGGB14_1X14,
MEDIA_BUS_FMT_SRGGB16_1X16
),
.cs = IPUV3_COLORSPACE_RGB,
.bpp = 16,
.bayer = true,
}, {
.fourcc = V4L2_PIX_FMT_GREY,
.codes = IMX_BUS_FMTS(
MEDIA_BUS_FMT_Y8_1X8,
MEDIA_BUS_FMT_Y10_1X10,
MEDIA_BUS_FMT_Y12_1X12
),
.cs = IPUV3_COLORSPACE_RGB,
.bpp = 8,
.bayer = true,
}, {
.fourcc = V4L2_PIX_FMT_Y10,
.codes = IMX_BUS_FMTS(MEDIA_BUS_FMT_Y10_1X10),
.cs = IPUV3_COLORSPACE_RGB,
.bpp = 16,
.bayer = true,
}, {
.fourcc = V4L2_PIX_FMT_Y12,
.codes = IMX_BUS_FMTS(MEDIA_BUS_FMT_Y12_1X12),
.cs = IPUV3_COLORSPACE_RGB,
.bpp = 16,
.bayer = true,
},
};
/*
* Search in the pixel_formats[] array for an entry with the given fourcc
* that matches the requested selection criteria and return it.
*
* @fourcc: Search for an entry with the given fourcc pixel format.
* @fmt_sel: Allow entries only with the given selection criteria.
*/
const struct imx_media_pixfmt *
imx_media_find_pixel_format(u32 fourcc, enum imx_pixfmt_sel fmt_sel)
{
bool sel_ipu = fmt_sel & PIXFMT_SEL_IPU;
unsigned int i;
fmt_sel &= ~PIXFMT_SEL_IPU;
for (i = 0; i < ARRAY_SIZE(pixel_formats); i++) {
const struct imx_media_pixfmt *fmt = &pixel_formats[i];
enum imx_pixfmt_sel sel;
if (sel_ipu != fmt->ipufmt)
continue;
sel = fmt->bayer ? PIXFMT_SEL_BAYER :
((fmt->cs == IPUV3_COLORSPACE_YUV) ?
PIXFMT_SEL_YUV : PIXFMT_SEL_RGB);
if ((fmt_sel & sel) && fmt->fourcc == fourcc)
return fmt;
}
return NULL;
}
EXPORT_SYMBOL_GPL(imx_media_find_pixel_format);
/*
* Search in the pixel_formats[] array for an entry with the given media
* bus code that matches the requested selection criteria and return it.
*
* @code: Search for an entry with the given media-bus code.
* @fmt_sel: Allow entries only with the given selection criteria.
*/
const struct imx_media_pixfmt *
imx_media_find_mbus_format(u32 code, enum imx_pixfmt_sel fmt_sel)
{
bool sel_ipu = fmt_sel & PIXFMT_SEL_IPU;
unsigned int i;
fmt_sel &= ~PIXFMT_SEL_IPU;
for (i = 0; i < ARRAY_SIZE(pixel_formats); i++) {
const struct imx_media_pixfmt *fmt = &pixel_formats[i];
enum imx_pixfmt_sel sel;
unsigned int j;
if (sel_ipu != fmt->ipufmt)
continue;
sel = fmt->bayer ? PIXFMT_SEL_BAYER :
((fmt->cs == IPUV3_COLORSPACE_YUV) ?
PIXFMT_SEL_YUV : PIXFMT_SEL_RGB);
if (!(fmt_sel & sel) || !fmt->codes)
continue;
for (j = 0; fmt->codes[j]; j++) {
if (code == fmt->codes[j])
return fmt;
}
}
return NULL;
}
EXPORT_SYMBOL_GPL(imx_media_find_mbus_format);
/*
* Enumerate entries in the pixel_formats[] array that match the
* requested selection criteria. Return the fourcc that matches the
* selection criteria at the requested match index.
*
* @fourcc: The returned fourcc that matches the search criteria at
* the requested match index.
* @index: The requested match index.
* @fmt_sel: Include in the enumeration entries with the given selection
* criteria.
* @code: If non-zero, only include in the enumeration entries matching this
* media bus code.
*/
int imx_media_enum_pixel_formats(u32 *fourcc, u32 index,
enum imx_pixfmt_sel fmt_sel, u32 code)
{
bool sel_ipu = fmt_sel & PIXFMT_SEL_IPU;
unsigned int i;
fmt_sel &= ~PIXFMT_SEL_IPU;
for (i = 0; i < ARRAY_SIZE(pixel_formats); i++) {
const struct imx_media_pixfmt *fmt = &pixel_formats[i];
enum imx_pixfmt_sel sel;
if (sel_ipu != fmt->ipufmt)
continue;
sel = fmt->bayer ? PIXFMT_SEL_BAYER :
((fmt->cs == IPUV3_COLORSPACE_YUV) ?
PIXFMT_SEL_YUV : PIXFMT_SEL_RGB);
if (!(fmt_sel & sel))
continue;
/*
* If a media bus code is specified, only consider formats that
* match it.
*/
if (code) {
unsigned int j;
if (!fmt->codes)
continue;
for (j = 0; fmt->codes[j]; j++) {
if (code == fmt->codes[j])
break;
}
if (!fmt->codes[j])
continue;
}
if (index == 0) {
*fourcc = fmt->fourcc;
return 0;
}
index--;
}
return -EINVAL;
}
EXPORT_SYMBOL_GPL(imx_media_enum_pixel_formats);
/*
* Enumerate entries in the pixel_formats[] array that match the
* requested search criteria. Return the media-bus code that matches
* the search criteria at the requested match index.
*
* @code: The returned media-bus code that matches the search criteria at
* the requested match index.
* @index: The requested match index.
* @fmt_sel: Include in the enumeration entries with the given selection
* criteria.
*/
int imx_media_enum_mbus_formats(u32 *code, u32 index,
enum imx_pixfmt_sel fmt_sel)
{
bool sel_ipu = fmt_sel & PIXFMT_SEL_IPU;
unsigned int i;
fmt_sel &= ~PIXFMT_SEL_IPU;
for (i = 0; i < ARRAY_SIZE(pixel_formats); i++) {
const struct imx_media_pixfmt *fmt = &pixel_formats[i];
enum imx_pixfmt_sel sel;
unsigned int j;
if (sel_ipu != fmt->ipufmt)
continue;
sel = fmt->bayer ? PIXFMT_SEL_BAYER :
((fmt->cs == IPUV3_COLORSPACE_YUV) ?
PIXFMT_SEL_YUV : PIXFMT_SEL_RGB);
if (!(fmt_sel & sel) || !fmt->codes)
continue;
for (j = 0; fmt->codes[j]; j++) {
if (index == 0) {
*code = fmt->codes[j];
return 0;
}
index--;
}
}
return -EINVAL;
}
EXPORT_SYMBOL_GPL(imx_media_enum_mbus_formats);
int imx_media_init_mbus_fmt(struct v4l2_mbus_framefmt *mbus,
u32 width, u32 height, u32 code, u32 field,
const struct imx_media_pixfmt **cc)
{
const struct imx_media_pixfmt *lcc;
mbus->width = width;
mbus->height = height;
mbus->field = field;
if (code == 0)
imx_media_enum_mbus_formats(&code, 0, PIXFMT_SEL_YUV);
lcc = imx_media_find_mbus_format(code, PIXFMT_SEL_ANY);
if (!lcc) {
lcc = imx_media_find_ipu_format(code, PIXFMT_SEL_YUV_RGB);
if (!lcc)
return -EINVAL;
}
mbus->code = code;
mbus->colorspace = V4L2_COLORSPACE_SRGB;
mbus->xfer_func = V4L2_MAP_XFER_FUNC_DEFAULT(mbus->colorspace);
mbus->ycbcr_enc = V4L2_MAP_YCBCR_ENC_DEFAULT(mbus->colorspace);
mbus->quantization =
V4L2_MAP_QUANTIZATION_DEFAULT(lcc->cs == IPUV3_COLORSPACE_RGB,
mbus->colorspace,
mbus->ycbcr_enc);
if (cc)
*cc = lcc;
return 0;
}
EXPORT_SYMBOL_GPL(imx_media_init_mbus_fmt);
/*
* Initializes the TRY format to the ACTIVE format on all pads
* of a subdev. Can be used as the .init_cfg pad operation.
*/
int imx_media_init_cfg(struct v4l2_subdev *sd,
struct v4l2_subdev_state *sd_state)
{
struct v4l2_mbus_framefmt *mf_try;
unsigned int pad;
int ret;
for (pad = 0; pad < sd->entity.num_pads; pad++) {
struct v4l2_subdev_format format = {
.pad = pad,
.which = V4L2_SUBDEV_FORMAT_ACTIVE,
};
ret = v4l2_subdev_call(sd, pad, get_fmt, NULL, &format);
if (ret)
continue;
mf_try = v4l2_subdev_get_try_format(sd, sd_state, pad);
*mf_try = format.format;
}
return 0;
}
EXPORT_SYMBOL_GPL(imx_media_init_cfg);
/*
* Default the colorspace in tryfmt to SRGB if set to an unsupported
* colorspace or not initialized. Then set the remaining colorimetry
* parameters based on the colorspace if they are uninitialized.
*
* tryfmt->code must be set on entry.
*
* If this format is destined to be routed through the Image Converter,
* Y`CbCr encoding must be fixed. The IC supports only BT.601 Y`CbCr
* or Rec.709 Y`CbCr encoding.
*/
void imx_media_try_colorimetry(struct v4l2_mbus_framefmt *tryfmt,
bool ic_route)
{
const struct imx_media_pixfmt *cc;
bool is_rgb = false;
cc = imx_media_find_mbus_format(tryfmt->code, PIXFMT_SEL_ANY);
if (!cc)
cc = imx_media_find_ipu_format(tryfmt->code,
PIXFMT_SEL_YUV_RGB);
if (cc && cc->cs == IPUV3_COLORSPACE_RGB)
is_rgb = true;
switch (tryfmt->colorspace) {
case V4L2_COLORSPACE_SMPTE170M:
case V4L2_COLORSPACE_REC709:
case V4L2_COLORSPACE_JPEG:
case V4L2_COLORSPACE_SRGB:
case V4L2_COLORSPACE_BT2020:
case V4L2_COLORSPACE_OPRGB:
case V4L2_COLORSPACE_DCI_P3:
case V4L2_COLORSPACE_RAW:
break;
default:
tryfmt->colorspace = V4L2_COLORSPACE_SRGB;
break;
}
if (tryfmt->xfer_func == V4L2_XFER_FUNC_DEFAULT)
tryfmt->xfer_func =
V4L2_MAP_XFER_FUNC_DEFAULT(tryfmt->colorspace);
if (ic_route) {
if (tryfmt->ycbcr_enc != V4L2_YCBCR_ENC_601 &&
tryfmt->ycbcr_enc != V4L2_YCBCR_ENC_709)
tryfmt->ycbcr_enc = V4L2_YCBCR_ENC_601;
} else {
if (tryfmt->ycbcr_enc == V4L2_YCBCR_ENC_DEFAULT) {
tryfmt->ycbcr_enc =
V4L2_MAP_YCBCR_ENC_DEFAULT(tryfmt->colorspace);
}
}
if (tryfmt->quantization == V4L2_QUANTIZATION_DEFAULT)
tryfmt->quantization =
V4L2_MAP_QUANTIZATION_DEFAULT(is_rgb,
tryfmt->colorspace,
tryfmt->ycbcr_enc);
}
EXPORT_SYMBOL_GPL(imx_media_try_colorimetry);
int imx_media_mbus_fmt_to_pix_fmt(struct v4l2_pix_format *pix,
const struct v4l2_mbus_framefmt *mbus,
const struct imx_media_pixfmt *cc)
{
u32 width;
u32 stride;
if (!cc) {
cc = imx_media_find_ipu_format(mbus->code,
PIXFMT_SEL_YUV_RGB);
if (!cc)
cc = imx_media_find_mbus_format(mbus->code,
PIXFMT_SEL_ANY);
if (!cc)
return -EINVAL;
}
/*
* TODO: the IPU currently does not support the AYUV32 format,
* so until it does convert to a supported YUV format.
*/
if (cc->ipufmt && cc->cs == IPUV3_COLORSPACE_YUV) {
u32 code;
imx_media_enum_mbus_formats(&code, 0, PIXFMT_SEL_YUV);
cc = imx_media_find_mbus_format(code, PIXFMT_SEL_YUV);
}
/* Round up width for minimum burst size */
width = round_up(mbus->width, 8);
/* Round up stride for IDMAC line start address alignment */
if (cc->planar)
stride = round_up(width, 16);
else
stride = round_up((width * cc->bpp) >> 3, 8);
pix->width = width;
pix->height = mbus->height;
pix->pixelformat = cc->fourcc;
pix->colorspace = mbus->colorspace;
pix->xfer_func = mbus->xfer_func;
pix->ycbcr_enc = mbus->ycbcr_enc;
pix->quantization = mbus->quantization;
pix->field = mbus->field;
pix->bytesperline = stride;
pix->sizeimage = cc->planar ? ((stride * pix->height * cc->bpp) >> 3) :
stride * pix->height;
return 0;
}
EXPORT_SYMBOL_GPL(imx_media_mbus_fmt_to_pix_fmt);
void imx_media_free_dma_buf(struct device *dev,
struct imx_media_dma_buf *buf)
{
if (buf->virt)
dma_free_coherent(dev, buf->len, buf->virt, buf->phys);
buf->virt = NULL;
buf->phys = 0;
}
EXPORT_SYMBOL_GPL(imx_media_free_dma_buf);
int imx_media_alloc_dma_buf(struct device *dev,
struct imx_media_dma_buf *buf,
int size)
{
imx_media_free_dma_buf(dev, buf);
buf->len = PAGE_ALIGN(size);
buf->virt = dma_alloc_coherent(dev, buf->len, &buf->phys,
GFP_DMA | GFP_KERNEL);
if (!buf->virt) {
dev_err(dev, "%s: failed\n", __func__);
return -ENOMEM;
}
return 0;
}
EXPORT_SYMBOL_GPL(imx_media_alloc_dma_buf);
/* form a subdev name given a group id and ipu id */
void imx_media_grp_id_to_sd_name(char *sd_name, int sz, u32 grp_id, int ipu_id)
{
int id;
switch (grp_id) {
case IMX_MEDIA_GRP_ID_IPU_CSI0...IMX_MEDIA_GRP_ID_IPU_CSI1:
id = (grp_id >> IMX_MEDIA_GRP_ID_IPU_CSI_BIT) - 1;
snprintf(sd_name, sz, "ipu%d_csi%d", ipu_id + 1, id);
break;
case IMX_MEDIA_GRP_ID_IPU_VDIC:
snprintf(sd_name, sz, "ipu%d_vdic", ipu_id + 1);
break;
case IMX_MEDIA_GRP_ID_IPU_IC_PRP:
snprintf(sd_name, sz, "ipu%d_ic_prp", ipu_id + 1);
break;
case IMX_MEDIA_GRP_ID_IPU_IC_PRPENC:
snprintf(sd_name, sz, "ipu%d_ic_prpenc", ipu_id + 1);
break;
case IMX_MEDIA_GRP_ID_IPU_IC_PRPVF:
snprintf(sd_name, sz, "ipu%d_ic_prpvf", ipu_id + 1);
break;
default:
break;
}
}
EXPORT_SYMBOL_GPL(imx_media_grp_id_to_sd_name);
/*
* Adds a video device to the master video device list. This is called
* when a video device is registered.
*/
void imx_media_add_video_device(struct imx_media_dev *imxmd,
struct imx_media_video_dev *vdev)
{
mutex_lock(&imxmd->mutex);
list_add_tail(&vdev->list, &imxmd->vdev_list);
mutex_unlock(&imxmd->mutex);
}
EXPORT_SYMBOL_GPL(imx_media_add_video_device);
/*
* Search upstream/downstream for a subdevice or video device pad in the
* current pipeline, starting from start_entity. Returns the device's
* source/sink pad that it was reached from. Must be called with
* mdev->graph_mutex held.
*
* If grp_id != 0, finds a subdevice's pad of given grp_id.
* Else If buftype != 0, finds a video device's pad of given buffer type.
* Else, returns the nearest source/sink pad to start_entity.
*/
struct media_pad *
imx_media_pipeline_pad(struct media_entity *start_entity, u32 grp_id,
enum v4l2_buf_type buftype, bool upstream)
{
struct media_entity *me = start_entity;
struct media_pad *pad = NULL;
struct video_device *vfd;
struct v4l2_subdev *sd;
int i;
for (i = 0; i < me->num_pads; i++) {
struct media_pad *spad = &me->pads[i];
if ((upstream && !(spad->flags & MEDIA_PAD_FL_SINK)) ||
(!upstream && !(spad->flags & MEDIA_PAD_FL_SOURCE)))
continue;
pad = media_pad_remote_pad_first(spad);
if (!pad)
continue;
if (grp_id) {
if (is_media_entity_v4l2_subdev(pad->entity)) {
sd = media_entity_to_v4l2_subdev(pad->entity);
if (sd->grp_id & grp_id)
return pad;
}
return imx_media_pipeline_pad(pad->entity, grp_id,
buftype, upstream);
} else if (buftype) {
if (is_media_entity_v4l2_video_device(pad->entity)) {
vfd = media_entity_to_video_device(pad->entity);
if (buftype == vfd->queue->type)
return pad;
}
return imx_media_pipeline_pad(pad->entity, grp_id,
buftype, upstream);
} else {
return pad;
}
}
return NULL;
}
EXPORT_SYMBOL_GPL(imx_media_pipeline_pad);
/*
* Search upstream/downstream for a subdev or video device in the current
* pipeline. Must be called with mdev->graph_mutex held.
*/
static struct media_entity *
find_pipeline_entity(struct media_entity *start, u32 grp_id,
enum v4l2_buf_type buftype, bool upstream)
{
struct media_pad *pad = NULL;
struct video_device *vfd;
struct v4l2_subdev *sd;
if (grp_id && is_media_entity_v4l2_subdev(start)) {
sd = media_entity_to_v4l2_subdev(start);
if (sd->grp_id & grp_id)
return &sd->entity;
} else if (buftype && is_media_entity_v4l2_video_device(start)) {
vfd = media_entity_to_video_device(start);
if (buftype == vfd->queue->type)
return &vfd->entity;
}
pad = imx_media_pipeline_pad(start, grp_id, buftype, upstream);
return pad ? pad->entity : NULL;
}
/*
* Find a subdev reached upstream from the given start entity in
* the current pipeline.
* Must be called with mdev->graph_mutex held.
*/
struct v4l2_subdev *
imx_media_pipeline_subdev(struct media_entity *start_entity, u32 grp_id,
bool upstream)
{
struct media_entity *me;
me = find_pipeline_entity(start_entity, grp_id, 0, upstream);
if (!me)
return ERR_PTR(-ENODEV);
return media_entity_to_v4l2_subdev(me);
}
EXPORT_SYMBOL_GPL(imx_media_pipeline_subdev);
/*
* Turn current pipeline streaming on/off starting from entity.
*/
int imx_media_pipeline_set_stream(struct imx_media_dev *imxmd,
struct media_entity *entity,
bool on)
{
struct v4l2_subdev *sd;
int ret = 0;
if (!is_media_entity_v4l2_subdev(entity))
return -EINVAL;
sd = media_entity_to_v4l2_subdev(entity);
mutex_lock(&imxmd->md.graph_mutex);
if (on) {
ret = __media_pipeline_start(entity->pads, &imxmd->pipe);
if (ret)
goto out;
ret = v4l2_subdev_call(sd, video, s_stream, 1);
if (ret)
__media_pipeline_stop(entity->pads);
} else {
v4l2_subdev_call(sd, video, s_stream, 0);
if (media_pad_pipeline(entity->pads))
__media_pipeline_stop(entity->pads);
}
out:
mutex_unlock(&imxmd->md.graph_mutex);
return ret;
}
EXPORT_SYMBOL_GPL(imx_media_pipeline_set_stream);
MODULE_DESCRIPTION("i.MX5/6 v4l2 media controller driver");
MODULE_AUTHOR("Steve Longerbeam <[email protected]>");
MODULE_LICENSE("GPL");
| linux-master | drivers/staging/media/imx/imx-media-utils.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* i.MX IPUv3 IC PP mem2mem CSC/Scaler driver
*
* Copyright (C) 2011 Pengutronix, Sascha Hauer
* Copyright (C) 2018 Pengutronix, Philipp Zabel
*/
#include <linux/module.h>
#include <linux/delay.h>
#include <linux/fs.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <video/imx-ipu-v3.h>
#include <video/imx-ipu-image-convert.h>
#include <media/media-device.h>
#include <media/v4l2-ctrls.h>
#include <media/v4l2-event.h>
#include <media/v4l2-mem2mem.h>
#include <media/v4l2-device.h>
#include <media/v4l2-ioctl.h>
#include <media/videobuf2-dma-contig.h>
#include "imx-media.h"
#define fh_to_ctx(__fh) container_of(__fh, struct ipu_csc_scaler_ctx, fh)
#define IMX_CSC_SCALER_NAME "imx-csc-scaler"
enum {
V4L2_M2M_SRC = 0,
V4L2_M2M_DST = 1,
};
struct ipu_csc_scaler_priv {
struct imx_media_video_dev vdev;
struct v4l2_m2m_dev *m2m_dev;
struct device *dev;
struct imx_media_dev *md;
struct mutex mutex; /* mem2mem device mutex */
};
#define vdev_to_priv(v) container_of(v, struct ipu_csc_scaler_priv, vdev)
/* Per-queue, driver-specific private data */
struct ipu_csc_scaler_q_data {
struct v4l2_pix_format cur_fmt;
struct v4l2_rect rect;
};
struct ipu_csc_scaler_ctx {
struct ipu_csc_scaler_priv *priv;
struct v4l2_fh fh;
struct ipu_csc_scaler_q_data q_data[2];
struct ipu_image_convert_ctx *icc;
struct v4l2_ctrl_handler ctrl_hdlr;
int rotate;
bool hflip;
bool vflip;
enum ipu_rotate_mode rot_mode;
unsigned int sequence;
};
static struct ipu_csc_scaler_q_data *get_q_data(struct ipu_csc_scaler_ctx *ctx,
enum v4l2_buf_type type)
{
if (V4L2_TYPE_IS_OUTPUT(type))
return &ctx->q_data[V4L2_M2M_SRC];
else
return &ctx->q_data[V4L2_M2M_DST];
}
/*
* mem2mem callbacks
*/
static void job_abort(void *_ctx)
{
struct ipu_csc_scaler_ctx *ctx = _ctx;
if (ctx->icc)
ipu_image_convert_abort(ctx->icc);
}
static void ipu_ic_pp_complete(struct ipu_image_convert_run *run, void *_ctx)
{
struct ipu_csc_scaler_ctx *ctx = _ctx;
struct ipu_csc_scaler_priv *priv = ctx->priv;
struct vb2_v4l2_buffer *src_buf, *dst_buf;
src_buf = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
dst_buf = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx);
v4l2_m2m_buf_copy_metadata(src_buf, dst_buf, true);
src_buf->sequence = ctx->sequence++;
dst_buf->sequence = src_buf->sequence;
v4l2_m2m_buf_done(src_buf, run->status ? VB2_BUF_STATE_ERROR :
VB2_BUF_STATE_DONE);
v4l2_m2m_buf_done(dst_buf, run->status ? VB2_BUF_STATE_ERROR :
VB2_BUF_STATE_DONE);
v4l2_m2m_job_finish(priv->m2m_dev, ctx->fh.m2m_ctx);
kfree(run);
}
static void device_run(void *_ctx)
{
struct ipu_csc_scaler_ctx *ctx = _ctx;
struct ipu_csc_scaler_priv *priv = ctx->priv;
struct vb2_v4l2_buffer *src_buf, *dst_buf;
struct ipu_image_convert_run *run;
int ret;
src_buf = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
dst_buf = v4l2_m2m_next_dst_buf(ctx->fh.m2m_ctx);
run = kzalloc(sizeof(*run), GFP_KERNEL);
if (!run)
goto err;
run->ctx = ctx->icc;
run->in_phys = vb2_dma_contig_plane_dma_addr(&src_buf->vb2_buf, 0);
run->out_phys = vb2_dma_contig_plane_dma_addr(&dst_buf->vb2_buf, 0);
ret = ipu_image_convert_queue(run);
if (ret < 0) {
v4l2_err(ctx->priv->vdev.vfd->v4l2_dev,
"%s: failed to queue: %d\n", __func__, ret);
goto err;
}
return;
err:
v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx);
v4l2_m2m_buf_done(src_buf, VB2_BUF_STATE_ERROR);
v4l2_m2m_buf_done(dst_buf, VB2_BUF_STATE_ERROR);
v4l2_m2m_job_finish(priv->m2m_dev, ctx->fh.m2m_ctx);
}
/*
* Video ioctls
*/
static int ipu_csc_scaler_querycap(struct file *file, void *priv,
struct v4l2_capability *cap)
{
strscpy(cap->driver, IMX_CSC_SCALER_NAME, sizeof(cap->driver));
strscpy(cap->card, IMX_CSC_SCALER_NAME, sizeof(cap->card));
snprintf(cap->bus_info, sizeof(cap->bus_info),
"platform:%s", IMX_CSC_SCALER_NAME);
return 0;
}
static int ipu_csc_scaler_enum_fmt(struct file *file, void *fh,
struct v4l2_fmtdesc *f)
{
u32 fourcc;
int ret;
ret = imx_media_enum_pixel_formats(&fourcc, f->index,
PIXFMT_SEL_YUV_RGB, 0);
if (ret)
return ret;
f->pixelformat = fourcc;
return 0;
}
static int ipu_csc_scaler_g_fmt(struct file *file, void *priv,
struct v4l2_format *f)
{
struct ipu_csc_scaler_ctx *ctx = fh_to_ctx(priv);
struct ipu_csc_scaler_q_data *q_data;
q_data = get_q_data(ctx, f->type);
f->fmt.pix = q_data->cur_fmt;
return 0;
}
static int ipu_csc_scaler_try_fmt(struct file *file, void *priv,
struct v4l2_format *f)
{
struct ipu_csc_scaler_ctx *ctx = fh_to_ctx(priv);
struct ipu_csc_scaler_q_data *q_data = get_q_data(ctx, f->type);
struct ipu_image test_in, test_out;
enum v4l2_field field;
field = f->fmt.pix.field;
if (field == V4L2_FIELD_ANY)
field = V4L2_FIELD_NONE;
else if (field != V4L2_FIELD_NONE)
return -EINVAL;
if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) {
struct ipu_csc_scaler_q_data *q_data_in =
get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT);
test_out.pix = f->fmt.pix;
test_in.pix = q_data_in->cur_fmt;
} else {
struct ipu_csc_scaler_q_data *q_data_out =
get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE);
test_in.pix = f->fmt.pix;
test_out.pix = q_data_out->cur_fmt;
}
ipu_image_convert_adjust(&test_in, &test_out, ctx->rot_mode);
f->fmt.pix = (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) ?
test_out.pix : test_in.pix;
if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) {
f->fmt.pix.colorspace = q_data->cur_fmt.colorspace;
f->fmt.pix.ycbcr_enc = q_data->cur_fmt.ycbcr_enc;
f->fmt.pix.xfer_func = q_data->cur_fmt.xfer_func;
f->fmt.pix.quantization = q_data->cur_fmt.quantization;
} else if (f->fmt.pix.colorspace == V4L2_COLORSPACE_DEFAULT) {
f->fmt.pix.colorspace = V4L2_COLORSPACE_SRGB;
f->fmt.pix.ycbcr_enc = V4L2_YCBCR_ENC_DEFAULT;
f->fmt.pix.xfer_func = V4L2_XFER_FUNC_DEFAULT;
f->fmt.pix.quantization = V4L2_QUANTIZATION_DEFAULT;
}
return 0;
}
static int ipu_csc_scaler_s_fmt(struct file *file, void *priv,
struct v4l2_format *f)
{
struct ipu_csc_scaler_q_data *q_data;
struct ipu_csc_scaler_ctx *ctx = fh_to_ctx(priv);
struct vb2_queue *vq;
int ret;
vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, f->type);
if (vb2_is_busy(vq)) {
v4l2_err(ctx->priv->vdev.vfd->v4l2_dev, "%s: queue busy\n",
__func__);
return -EBUSY;
}
q_data = get_q_data(ctx, f->type);
ret = ipu_csc_scaler_try_fmt(file, priv, f);
if (ret < 0)
return ret;
q_data->cur_fmt.width = f->fmt.pix.width;
q_data->cur_fmt.height = f->fmt.pix.height;
q_data->cur_fmt.pixelformat = f->fmt.pix.pixelformat;
q_data->cur_fmt.field = f->fmt.pix.field;
q_data->cur_fmt.bytesperline = f->fmt.pix.bytesperline;
q_data->cur_fmt.sizeimage = f->fmt.pix.sizeimage;
/* Reset cropping/composing rectangle */
q_data->rect.left = 0;
q_data->rect.top = 0;
q_data->rect.width = q_data->cur_fmt.width;
q_data->rect.height = q_data->cur_fmt.height;
if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT) {
/* Set colorimetry on the output queue */
q_data->cur_fmt.colorspace = f->fmt.pix.colorspace;
q_data->cur_fmt.ycbcr_enc = f->fmt.pix.ycbcr_enc;
q_data->cur_fmt.xfer_func = f->fmt.pix.xfer_func;
q_data->cur_fmt.quantization = f->fmt.pix.quantization;
/* Propagate colorimetry to the capture queue */
q_data = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE);
q_data->cur_fmt.colorspace = f->fmt.pix.colorspace;
q_data->cur_fmt.ycbcr_enc = f->fmt.pix.ycbcr_enc;
q_data->cur_fmt.xfer_func = f->fmt.pix.xfer_func;
q_data->cur_fmt.quantization = f->fmt.pix.quantization;
}
/*
* TODO: Setting colorimetry on the capture queue is currently not
* supported by the V4L2 API
*/
return 0;
}
static int ipu_csc_scaler_g_selection(struct file *file, void *priv,
struct v4l2_selection *s)
{
struct ipu_csc_scaler_ctx *ctx = fh_to_ctx(priv);
struct ipu_csc_scaler_q_data *q_data;
switch (s->target) {
case V4L2_SEL_TGT_CROP:
case V4L2_SEL_TGT_CROP_DEFAULT:
case V4L2_SEL_TGT_CROP_BOUNDS:
if (s->type != V4L2_BUF_TYPE_VIDEO_OUTPUT)
return -EINVAL;
q_data = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT);
break;
case V4L2_SEL_TGT_COMPOSE:
case V4L2_SEL_TGT_COMPOSE_DEFAULT:
case V4L2_SEL_TGT_COMPOSE_BOUNDS:
if (s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
return -EINVAL;
q_data = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE);
break;
default:
return -EINVAL;
}
if (s->target == V4L2_SEL_TGT_CROP ||
s->target == V4L2_SEL_TGT_COMPOSE) {
s->r = q_data->rect;
} else {
s->r.left = 0;
s->r.top = 0;
s->r.width = q_data->cur_fmt.width;
s->r.height = q_data->cur_fmt.height;
}
return 0;
}
static int ipu_csc_scaler_s_selection(struct file *file, void *priv,
struct v4l2_selection *s)
{
struct ipu_csc_scaler_ctx *ctx = fh_to_ctx(priv);
struct ipu_csc_scaler_q_data *q_data;
switch (s->target) {
case V4L2_SEL_TGT_CROP:
if (s->type != V4L2_BUF_TYPE_VIDEO_OUTPUT)
return -EINVAL;
break;
case V4L2_SEL_TGT_COMPOSE:
if (s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
return -EINVAL;
break;
default:
return -EINVAL;
}
if (s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE &&
s->type != V4L2_BUF_TYPE_VIDEO_OUTPUT)
return -EINVAL;
q_data = get_q_data(ctx, s->type);
/* The input's frame width to the IC must be a multiple of 8 pixels
* When performing resizing the frame width must be multiple of burst
* size - 8 or 16 pixels as defined by CB#_BURST_16 parameter.
*/
if (s->flags & V4L2_SEL_FLAG_GE)
s->r.width = round_up(s->r.width, 8);
if (s->flags & V4L2_SEL_FLAG_LE)
s->r.width = round_down(s->r.width, 8);
s->r.width = clamp_t(unsigned int, s->r.width, 8,
round_down(q_data->cur_fmt.width, 8));
s->r.height = clamp_t(unsigned int, s->r.height, 1,
q_data->cur_fmt.height);
s->r.left = clamp_t(unsigned int, s->r.left, 0,
q_data->cur_fmt.width - s->r.width);
s->r.top = clamp_t(unsigned int, s->r.top, 0,
q_data->cur_fmt.height - s->r.height);
/* V4L2_SEL_FLAG_KEEP_CONFIG is only valid for subdevices */
q_data->rect = s->r;
return 0;
}
static const struct v4l2_ioctl_ops ipu_csc_scaler_ioctl_ops = {
.vidioc_querycap = ipu_csc_scaler_querycap,
.vidioc_enum_fmt_vid_cap = ipu_csc_scaler_enum_fmt,
.vidioc_g_fmt_vid_cap = ipu_csc_scaler_g_fmt,
.vidioc_try_fmt_vid_cap = ipu_csc_scaler_try_fmt,
.vidioc_s_fmt_vid_cap = ipu_csc_scaler_s_fmt,
.vidioc_enum_fmt_vid_out = ipu_csc_scaler_enum_fmt,
.vidioc_g_fmt_vid_out = ipu_csc_scaler_g_fmt,
.vidioc_try_fmt_vid_out = ipu_csc_scaler_try_fmt,
.vidioc_s_fmt_vid_out = ipu_csc_scaler_s_fmt,
.vidioc_g_selection = ipu_csc_scaler_g_selection,
.vidioc_s_selection = ipu_csc_scaler_s_selection,
.vidioc_reqbufs = v4l2_m2m_ioctl_reqbufs,
.vidioc_querybuf = v4l2_m2m_ioctl_querybuf,
.vidioc_qbuf = v4l2_m2m_ioctl_qbuf,
.vidioc_expbuf = v4l2_m2m_ioctl_expbuf,
.vidioc_dqbuf = v4l2_m2m_ioctl_dqbuf,
.vidioc_create_bufs = v4l2_m2m_ioctl_create_bufs,
.vidioc_prepare_buf = v4l2_m2m_ioctl_prepare_buf,
.vidioc_streamon = v4l2_m2m_ioctl_streamon,
.vidioc_streamoff = v4l2_m2m_ioctl_streamoff,
.vidioc_subscribe_event = v4l2_ctrl_subscribe_event,
.vidioc_unsubscribe_event = v4l2_event_unsubscribe,
};
/*
* Queue operations
*/
static int ipu_csc_scaler_queue_setup(struct vb2_queue *vq,
unsigned int *nbuffers,
unsigned int *nplanes,
unsigned int sizes[],
struct device *alloc_devs[])
{
struct ipu_csc_scaler_ctx *ctx = vb2_get_drv_priv(vq);
struct ipu_csc_scaler_q_data *q_data;
unsigned int size, count = *nbuffers;
q_data = get_q_data(ctx, vq->type);
size = q_data->cur_fmt.sizeimage;
*nbuffers = count;
if (*nplanes)
return sizes[0] < size ? -EINVAL : 0;
*nplanes = 1;
sizes[0] = size;
dev_dbg(ctx->priv->dev, "get %d buffer(s) of size %d each.\n",
count, size);
return 0;
}
static int ipu_csc_scaler_buf_prepare(struct vb2_buffer *vb)
{
struct vb2_queue *vq = vb->vb2_queue;
struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
struct ipu_csc_scaler_ctx *ctx = vb2_get_drv_priv(vq);
struct ipu_csc_scaler_q_data *q_data;
unsigned long size;
dev_dbg(ctx->priv->dev, "type: %d\n", vq->type);
if (V4L2_TYPE_IS_OUTPUT(vq->type)) {
if (vbuf->field == V4L2_FIELD_ANY)
vbuf->field = V4L2_FIELD_NONE;
if (vbuf->field != V4L2_FIELD_NONE) {
dev_dbg(ctx->priv->dev, "%s: field isn't supported\n",
__func__);
return -EINVAL;
}
}
q_data = get_q_data(ctx, vq->type);
size = q_data->cur_fmt.sizeimage;
if (vb2_plane_size(vb, 0) < size) {
dev_dbg(ctx->priv->dev,
"%s: data will not fit into plane (%lu < %lu)\n",
__func__, vb2_plane_size(vb, 0), size);
return -EINVAL;
}
vb2_set_plane_payload(vb, 0, size);
return 0;
}
static void ipu_csc_scaler_buf_queue(struct vb2_buffer *vb)
{
struct ipu_csc_scaler_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
v4l2_m2m_buf_queue(ctx->fh.m2m_ctx, to_vb2_v4l2_buffer(vb));
}
static void ipu_image_from_q_data(struct ipu_image *im,
struct ipu_csc_scaler_q_data *q_data)
{
struct v4l2_pix_format *fmt = &q_data->cur_fmt;
im->pix = *fmt;
if (fmt->ycbcr_enc == V4L2_YCBCR_ENC_DEFAULT)
im->pix.ycbcr_enc = V4L2_MAP_YCBCR_ENC_DEFAULT(fmt->colorspace);
if (fmt->quantization == V4L2_QUANTIZATION_DEFAULT)
im->pix.ycbcr_enc = V4L2_MAP_YCBCR_ENC_DEFAULT(fmt->colorspace);
im->rect = q_data->rect;
}
static int ipu_csc_scaler_start_streaming(struct vb2_queue *q,
unsigned int count)
{
const enum ipu_ic_task ic_task = IC_TASK_POST_PROCESSOR;
struct ipu_csc_scaler_ctx *ctx = vb2_get_drv_priv(q);
struct ipu_csc_scaler_priv *priv = ctx->priv;
struct ipu_soc *ipu = priv->md->ipu[0];
struct ipu_csc_scaler_q_data *q_data;
struct vb2_queue *other_q;
struct ipu_image in, out;
other_q = v4l2_m2m_get_vq(ctx->fh.m2m_ctx,
(q->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) ?
V4L2_BUF_TYPE_VIDEO_OUTPUT :
V4L2_BUF_TYPE_VIDEO_CAPTURE);
if (!vb2_is_streaming(other_q))
return 0;
if (ctx->icc) {
v4l2_warn(ctx->priv->vdev.vfd->v4l2_dev, "removing old ICC\n");
ipu_image_convert_unprepare(ctx->icc);
}
q_data = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT);
ipu_image_from_q_data(&in, q_data);
q_data = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE);
ipu_image_from_q_data(&out, q_data);
ctx->icc = ipu_image_convert_prepare(ipu, ic_task, &in, &out,
ctx->rot_mode,
ipu_ic_pp_complete, ctx);
if (IS_ERR(ctx->icc)) {
struct vb2_v4l2_buffer *buf;
int ret = PTR_ERR(ctx->icc);
ctx->icc = NULL;
v4l2_err(ctx->priv->vdev.vfd->v4l2_dev, "%s: error %d\n",
__func__, ret);
while ((buf = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx)))
v4l2_m2m_buf_done(buf, VB2_BUF_STATE_QUEUED);
while ((buf = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx)))
v4l2_m2m_buf_done(buf, VB2_BUF_STATE_QUEUED);
return ret;
}
return 0;
}
static void ipu_csc_scaler_stop_streaming(struct vb2_queue *q)
{
struct ipu_csc_scaler_ctx *ctx = vb2_get_drv_priv(q);
struct vb2_v4l2_buffer *buf;
if (ctx->icc) {
ipu_image_convert_unprepare(ctx->icc);
ctx->icc = NULL;
}
ctx->sequence = 0;
if (q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT) {
while ((buf = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx)))
v4l2_m2m_buf_done(buf, VB2_BUF_STATE_ERROR);
} else {
while ((buf = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx)))
v4l2_m2m_buf_done(buf, VB2_BUF_STATE_ERROR);
}
}
static const struct vb2_ops ipu_csc_scaler_qops = {
.queue_setup = ipu_csc_scaler_queue_setup,
.buf_prepare = ipu_csc_scaler_buf_prepare,
.buf_queue = ipu_csc_scaler_buf_queue,
.wait_prepare = vb2_ops_wait_prepare,
.wait_finish = vb2_ops_wait_finish,
.start_streaming = ipu_csc_scaler_start_streaming,
.stop_streaming = ipu_csc_scaler_stop_streaming,
};
static int ipu_csc_scaler_queue_init(void *priv, struct vb2_queue *src_vq,
struct vb2_queue *dst_vq)
{
struct ipu_csc_scaler_ctx *ctx = priv;
int ret;
memset(src_vq, 0, sizeof(*src_vq));
src_vq->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
src_vq->io_modes = VB2_MMAP | VB2_DMABUF;
src_vq->drv_priv = ctx;
src_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
src_vq->ops = &ipu_csc_scaler_qops;
src_vq->mem_ops = &vb2_dma_contig_memops;
src_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
src_vq->lock = &ctx->priv->mutex;
src_vq->dev = ctx->priv->dev;
ret = vb2_queue_init(src_vq);
if (ret)
return ret;
memset(dst_vq, 0, sizeof(*dst_vq));
dst_vq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
dst_vq->io_modes = VB2_MMAP | VB2_DMABUF;
dst_vq->drv_priv = ctx;
dst_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
dst_vq->ops = &ipu_csc_scaler_qops;
dst_vq->mem_ops = &vb2_dma_contig_memops;
dst_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
dst_vq->lock = &ctx->priv->mutex;
dst_vq->dev = ctx->priv->dev;
return vb2_queue_init(dst_vq);
}
static int ipu_csc_scaler_s_ctrl(struct v4l2_ctrl *ctrl)
{
struct ipu_csc_scaler_ctx *ctx = container_of(ctrl->handler,
struct ipu_csc_scaler_ctx,
ctrl_hdlr);
enum ipu_rotate_mode rot_mode;
int rotate;
bool hflip, vflip;
int ret = 0;
rotate = ctx->rotate;
hflip = ctx->hflip;
vflip = ctx->vflip;
switch (ctrl->id) {
case V4L2_CID_HFLIP:
hflip = ctrl->val;
break;
case V4L2_CID_VFLIP:
vflip = ctrl->val;
break;
case V4L2_CID_ROTATE:
rotate = ctrl->val;
break;
default:
return -EINVAL;
}
ret = ipu_degrees_to_rot_mode(&rot_mode, rotate, hflip, vflip);
if (ret)
return ret;
if (rot_mode != ctx->rot_mode) {
struct v4l2_pix_format *in_fmt, *out_fmt;
struct ipu_image test_in, test_out;
in_fmt = &ctx->q_data[V4L2_M2M_SRC].cur_fmt;
out_fmt = &ctx->q_data[V4L2_M2M_DST].cur_fmt;
test_in.pix = *in_fmt;
test_out.pix = *out_fmt;
if (ipu_rot_mode_is_irt(rot_mode) !=
ipu_rot_mode_is_irt(ctx->rot_mode)) {
/* Switch width & height to keep aspect ratio intact */
test_out.pix.width = out_fmt->height;
test_out.pix.height = out_fmt->width;
}
ipu_image_convert_adjust(&test_in, &test_out, ctx->rot_mode);
/* Check if output format needs to be changed */
if (test_in.pix.width != in_fmt->width ||
test_in.pix.height != in_fmt->height ||
test_in.pix.bytesperline != in_fmt->bytesperline ||
test_in.pix.sizeimage != in_fmt->sizeimage) {
struct vb2_queue *out_q;
out_q = v4l2_m2m_get_vq(ctx->fh.m2m_ctx,
V4L2_BUF_TYPE_VIDEO_OUTPUT);
if (vb2_is_busy(out_q))
return -EBUSY;
}
/* Check if capture format needs to be changed */
if (test_out.pix.width != out_fmt->width ||
test_out.pix.height != out_fmt->height ||
test_out.pix.bytesperline != out_fmt->bytesperline ||
test_out.pix.sizeimage != out_fmt->sizeimage) {
struct vb2_queue *cap_q;
cap_q = v4l2_m2m_get_vq(ctx->fh.m2m_ctx,
V4L2_BUF_TYPE_VIDEO_CAPTURE);
if (vb2_is_busy(cap_q))
return -EBUSY;
}
*in_fmt = test_in.pix;
*out_fmt = test_out.pix;
ctx->rot_mode = rot_mode;
ctx->rotate = rotate;
ctx->hflip = hflip;
ctx->vflip = vflip;
}
return 0;
}
static const struct v4l2_ctrl_ops ipu_csc_scaler_ctrl_ops = {
.s_ctrl = ipu_csc_scaler_s_ctrl,
};
static int ipu_csc_scaler_init_controls(struct ipu_csc_scaler_ctx *ctx)
{
struct v4l2_ctrl_handler *hdlr = &ctx->ctrl_hdlr;
v4l2_ctrl_handler_init(hdlr, 3);
v4l2_ctrl_new_std(hdlr, &ipu_csc_scaler_ctrl_ops, V4L2_CID_HFLIP,
0, 1, 1, 0);
v4l2_ctrl_new_std(hdlr, &ipu_csc_scaler_ctrl_ops, V4L2_CID_VFLIP,
0, 1, 1, 0);
v4l2_ctrl_new_std(hdlr, &ipu_csc_scaler_ctrl_ops, V4L2_CID_ROTATE,
0, 270, 90, 0);
if (hdlr->error) {
v4l2_ctrl_handler_free(hdlr);
return hdlr->error;
}
v4l2_ctrl_handler_setup(hdlr);
return 0;
}
#define DEFAULT_WIDTH 720
#define DEFAULT_HEIGHT 576
static const struct ipu_csc_scaler_q_data ipu_csc_scaler_q_data_default = {
.cur_fmt = {
.width = DEFAULT_WIDTH,
.height = DEFAULT_HEIGHT,
.pixelformat = V4L2_PIX_FMT_YUV420,
.field = V4L2_FIELD_NONE,
.bytesperline = DEFAULT_WIDTH,
.sizeimage = DEFAULT_WIDTH * DEFAULT_HEIGHT * 3 / 2,
.colorspace = V4L2_COLORSPACE_SRGB,
},
.rect = {
.width = DEFAULT_WIDTH,
.height = DEFAULT_HEIGHT,
},
};
/*
* File operations
*/
static int ipu_csc_scaler_open(struct file *file)
{
struct ipu_csc_scaler_priv *priv = video_drvdata(file);
struct ipu_csc_scaler_ctx *ctx = NULL;
int ret;
ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
if (!ctx)
return -ENOMEM;
ctx->rot_mode = IPU_ROTATE_NONE;
v4l2_fh_init(&ctx->fh, video_devdata(file));
file->private_data = &ctx->fh;
v4l2_fh_add(&ctx->fh);
ctx->priv = priv;
ctx->fh.m2m_ctx = v4l2_m2m_ctx_init(priv->m2m_dev, ctx,
&ipu_csc_scaler_queue_init);
if (IS_ERR(ctx->fh.m2m_ctx)) {
ret = PTR_ERR(ctx->fh.m2m_ctx);
goto err_ctx;
}
ret = ipu_csc_scaler_init_controls(ctx);
if (ret)
goto err_ctrls;
ctx->fh.ctrl_handler = &ctx->ctrl_hdlr;
ctx->q_data[V4L2_M2M_SRC] = ipu_csc_scaler_q_data_default;
ctx->q_data[V4L2_M2M_DST] = ipu_csc_scaler_q_data_default;
dev_dbg(priv->dev, "Created instance %p, m2m_ctx: %p\n", ctx,
ctx->fh.m2m_ctx);
return 0;
err_ctrls:
v4l2_m2m_ctx_release(ctx->fh.m2m_ctx);
err_ctx:
v4l2_fh_del(&ctx->fh);
v4l2_fh_exit(&ctx->fh);
kfree(ctx);
return ret;
}
static int ipu_csc_scaler_release(struct file *file)
{
struct ipu_csc_scaler_priv *priv = video_drvdata(file);
struct ipu_csc_scaler_ctx *ctx = fh_to_ctx(file->private_data);
dev_dbg(priv->dev, "Releasing instance %p\n", ctx);
v4l2_m2m_ctx_release(ctx->fh.m2m_ctx);
v4l2_fh_del(&ctx->fh);
v4l2_fh_exit(&ctx->fh);
kfree(ctx);
return 0;
}
static const struct v4l2_file_operations ipu_csc_scaler_fops = {
.owner = THIS_MODULE,
.open = ipu_csc_scaler_open,
.release = ipu_csc_scaler_release,
.poll = v4l2_m2m_fop_poll,
.unlocked_ioctl = video_ioctl2,
.mmap = v4l2_m2m_fop_mmap,
};
static const struct v4l2_m2m_ops m2m_ops = {
.device_run = device_run,
.job_abort = job_abort,
};
static void ipu_csc_scaler_video_device_release(struct video_device *vdev)
{
struct ipu_csc_scaler_priv *priv = video_get_drvdata(vdev);
v4l2_m2m_release(priv->m2m_dev);
video_device_release(vdev);
kfree(priv);
}
static const struct video_device ipu_csc_scaler_videodev_template = {
.name = "ipu_ic_pp csc/scaler",
.fops = &ipu_csc_scaler_fops,
.ioctl_ops = &ipu_csc_scaler_ioctl_ops,
.minor = -1,
.release = ipu_csc_scaler_video_device_release,
.vfl_dir = VFL_DIR_M2M,
.device_caps = V4L2_CAP_VIDEO_M2M | V4L2_CAP_STREAMING,
};
int imx_media_csc_scaler_device_register(struct imx_media_video_dev *vdev)
{
struct ipu_csc_scaler_priv *priv = vdev_to_priv(vdev);
struct video_device *vfd = vdev->vfd;
int ret;
vfd->v4l2_dev = &priv->md->v4l2_dev;
ret = video_register_device(vfd, VFL_TYPE_VIDEO, -1);
if (ret) {
v4l2_err(vfd->v4l2_dev, "Failed to register video device\n");
return ret;
}
v4l2_info(vfd->v4l2_dev, "Registered %s as /dev/%s\n", vfd->name,
video_device_node_name(vfd));
return 0;
}
void imx_media_csc_scaler_device_unregister(struct imx_media_video_dev *vdev)
{
struct ipu_csc_scaler_priv *priv = vdev_to_priv(vdev);
struct video_device *vfd = priv->vdev.vfd;
video_unregister_device(vfd);
}
struct imx_media_video_dev *
imx_media_csc_scaler_device_init(struct imx_media_dev *md)
{
struct ipu_csc_scaler_priv *priv;
struct video_device *vfd;
int ret;
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
if (!priv)
return ERR_PTR(-ENOMEM);
priv->md = md;
priv->dev = md->md.dev;
mutex_init(&priv->mutex);
vfd = video_device_alloc();
if (!vfd) {
ret = -ENOMEM;
goto err_vfd;
}
*vfd = ipu_csc_scaler_videodev_template;
vfd->lock = &priv->mutex;
priv->vdev.vfd = vfd;
INIT_LIST_HEAD(&priv->vdev.list);
video_set_drvdata(vfd, priv);
priv->m2m_dev = v4l2_m2m_init(&m2m_ops);
if (IS_ERR(priv->m2m_dev)) {
ret = PTR_ERR(priv->m2m_dev);
v4l2_err(&md->v4l2_dev, "Failed to init mem2mem device: %d\n",
ret);
goto err_m2m;
}
return &priv->vdev;
err_m2m:
video_set_drvdata(vfd, NULL);
err_vfd:
kfree(priv);
return ERR_PTR(ret);
}
MODULE_DESCRIPTION("i.MX IPUv3 mem2mem scaler/CSC driver");
MODULE_AUTHOR("Sascha Hauer <[email protected]>");
MODULE_LICENSE("GPL");
| linux-master | drivers/staging/media/imx/imx-media-csc-scaler.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* V4L2 Capture IC Preprocess Subdev for Freescale i.MX5/6 SOC
*
* This subdevice handles capture of video frames from the CSI or VDIC,
* which are routed directly to the Image Converter preprocess tasks,
* for resizing, colorspace conversion, and rotation.
*
* Copyright (c) 2012-2017 Mentor Graphics Inc.
*/
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/timer.h>
#include <media/v4l2-ctrls.h>
#include <media/v4l2-device.h>
#include <media/v4l2-ioctl.h>
#include <media/v4l2-subdev.h>
#include <media/imx.h>
#include "imx-media.h"
#include "imx-ic.h"
/*
* Min/Max supported width and heights.
*/
#define MIN_W 32
#define MIN_H 32
#define MAX_W 4096
#define MAX_H 4096
#define W_ALIGN 4 /* multiple of 16 pixels */
#define H_ALIGN 1 /* multiple of 2 lines */
#define S_ALIGN 1 /* multiple of 2 */
struct prp_priv {
struct imx_ic_priv *ic_priv;
struct media_pad pad[PRP_NUM_PADS];
/* lock to protect all members below */
struct mutex lock;
struct v4l2_subdev *src_sd;
struct v4l2_subdev *sink_sd_prpenc;
struct v4l2_subdev *sink_sd_prpvf;
/* the CSI id at link validate */
int csi_id;
struct v4l2_mbus_framefmt format_mbus;
struct v4l2_fract frame_interval;
int stream_count;
};
static inline struct prp_priv *sd_to_priv(struct v4l2_subdev *sd)
{
struct imx_ic_priv *ic_priv = v4l2_get_subdevdata(sd);
return ic_priv->task_priv;
}
static int prp_start(struct prp_priv *priv)
{
struct imx_ic_priv *ic_priv = priv->ic_priv;
bool src_is_vdic;
/* set IC to receive from CSI or VDI depending on source */
src_is_vdic = !!(priv->src_sd->grp_id & IMX_MEDIA_GRP_ID_IPU_VDIC);
ipu_set_ic_src_mux(ic_priv->ipu, priv->csi_id, src_is_vdic);
return 0;
}
static void prp_stop(struct prp_priv *priv)
{
}
static struct v4l2_mbus_framefmt *
__prp_get_fmt(struct prp_priv *priv, struct v4l2_subdev_state *sd_state,
unsigned int pad, enum v4l2_subdev_format_whence which)
{
struct imx_ic_priv *ic_priv = priv->ic_priv;
if (which == V4L2_SUBDEV_FORMAT_TRY)
return v4l2_subdev_get_try_format(&ic_priv->sd, sd_state, pad);
else
return &priv->format_mbus;
}
/*
* V4L2 subdev operations.
*/
static int prp_enum_mbus_code(struct v4l2_subdev *sd,
struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_mbus_code_enum *code)
{
struct prp_priv *priv = sd_to_priv(sd);
struct v4l2_mbus_framefmt *infmt;
int ret = 0;
mutex_lock(&priv->lock);
switch (code->pad) {
case PRP_SINK_PAD:
ret = imx_media_enum_ipu_formats(&code->code, code->index,
PIXFMT_SEL_YUV_RGB);
break;
case PRP_SRC_PAD_PRPENC:
case PRP_SRC_PAD_PRPVF:
if (code->index != 0) {
ret = -EINVAL;
goto out;
}
infmt = __prp_get_fmt(priv, sd_state, PRP_SINK_PAD,
code->which);
code->code = infmt->code;
break;
default:
ret = -EINVAL;
}
out:
mutex_unlock(&priv->lock);
return ret;
}
static int prp_get_fmt(struct v4l2_subdev *sd,
struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *sdformat)
{
struct prp_priv *priv = sd_to_priv(sd);
struct v4l2_mbus_framefmt *fmt;
int ret = 0;
if (sdformat->pad >= PRP_NUM_PADS)
return -EINVAL;
mutex_lock(&priv->lock);
fmt = __prp_get_fmt(priv, sd_state, sdformat->pad, sdformat->which);
if (!fmt) {
ret = -EINVAL;
goto out;
}
sdformat->format = *fmt;
out:
mutex_unlock(&priv->lock);
return ret;
}
static int prp_set_fmt(struct v4l2_subdev *sd,
struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *sdformat)
{
struct prp_priv *priv = sd_to_priv(sd);
struct v4l2_mbus_framefmt *fmt, *infmt;
const struct imx_media_pixfmt *cc;
int ret = 0;
u32 code;
if (sdformat->pad >= PRP_NUM_PADS)
return -EINVAL;
mutex_lock(&priv->lock);
if (priv->stream_count > 0) {
ret = -EBUSY;
goto out;
}
infmt = __prp_get_fmt(priv, sd_state, PRP_SINK_PAD, sdformat->which);
switch (sdformat->pad) {
case PRP_SINK_PAD:
v4l_bound_align_image(&sdformat->format.width, MIN_W, MAX_W,
W_ALIGN, &sdformat->format.height,
MIN_H, MAX_H, H_ALIGN, S_ALIGN);
cc = imx_media_find_ipu_format(sdformat->format.code,
PIXFMT_SEL_YUV_RGB);
if (!cc) {
imx_media_enum_ipu_formats(&code, 0,
PIXFMT_SEL_YUV_RGB);
cc = imx_media_find_ipu_format(code,
PIXFMT_SEL_YUV_RGB);
sdformat->format.code = cc->codes[0];
}
if (sdformat->format.field == V4L2_FIELD_ANY)
sdformat->format.field = V4L2_FIELD_NONE;
break;
case PRP_SRC_PAD_PRPENC:
case PRP_SRC_PAD_PRPVF:
/* Output pads mirror input pad */
sdformat->format = *infmt;
break;
}
imx_media_try_colorimetry(&sdformat->format, true);
fmt = __prp_get_fmt(priv, sd_state, sdformat->pad, sdformat->which);
*fmt = sdformat->format;
out:
mutex_unlock(&priv->lock);
return ret;
}
static int prp_link_setup(struct media_entity *entity,
const struct media_pad *local,
const struct media_pad *remote, u32 flags)
{
struct v4l2_subdev *sd = media_entity_to_v4l2_subdev(entity);
struct imx_ic_priv *ic_priv = v4l2_get_subdevdata(sd);
struct prp_priv *priv = ic_priv->task_priv;
struct v4l2_subdev *remote_sd;
int ret = 0;
dev_dbg(ic_priv->ipu_dev, "%s: link setup %s -> %s",
ic_priv->sd.name, remote->entity->name, local->entity->name);
remote_sd = media_entity_to_v4l2_subdev(remote->entity);
mutex_lock(&priv->lock);
if (local->flags & MEDIA_PAD_FL_SINK) {
if (flags & MEDIA_LNK_FL_ENABLED) {
if (priv->src_sd) {
ret = -EBUSY;
goto out;
}
if (priv->sink_sd_prpenc &&
(remote_sd->grp_id & IMX_MEDIA_GRP_ID_IPU_VDIC)) {
ret = -EINVAL;
goto out;
}
priv->src_sd = remote_sd;
} else {
priv->src_sd = NULL;
}
goto out;
}
/* this is a source pad */
if (flags & MEDIA_LNK_FL_ENABLED) {
switch (local->index) {
case PRP_SRC_PAD_PRPENC:
if (priv->sink_sd_prpenc) {
ret = -EBUSY;
goto out;
}
if (priv->src_sd && (priv->src_sd->grp_id &
IMX_MEDIA_GRP_ID_IPU_VDIC)) {
ret = -EINVAL;
goto out;
}
priv->sink_sd_prpenc = remote_sd;
break;
case PRP_SRC_PAD_PRPVF:
if (priv->sink_sd_prpvf) {
ret = -EBUSY;
goto out;
}
priv->sink_sd_prpvf = remote_sd;
break;
default:
ret = -EINVAL;
}
} else {
switch (local->index) {
case PRP_SRC_PAD_PRPENC:
priv->sink_sd_prpenc = NULL;
break;
case PRP_SRC_PAD_PRPVF:
priv->sink_sd_prpvf = NULL;
break;
default:
ret = -EINVAL;
}
}
out:
mutex_unlock(&priv->lock);
return ret;
}
static int prp_link_validate(struct v4l2_subdev *sd,
struct media_link *link,
struct v4l2_subdev_format *source_fmt,
struct v4l2_subdev_format *sink_fmt)
{
struct imx_ic_priv *ic_priv = v4l2_get_subdevdata(sd);
struct prp_priv *priv = ic_priv->task_priv;
struct v4l2_subdev *csi;
int ret;
ret = v4l2_subdev_link_validate_default(sd, link,
source_fmt, sink_fmt);
if (ret)
return ret;
csi = imx_media_pipeline_subdev(&ic_priv->sd.entity,
IMX_MEDIA_GRP_ID_IPU_CSI, true);
if (IS_ERR(csi))
csi = NULL;
mutex_lock(&priv->lock);
if (priv->src_sd->grp_id & IMX_MEDIA_GRP_ID_IPU_VDIC) {
/*
* the ->PRPENC link cannot be enabled if the source
* is the VDIC
*/
if (priv->sink_sd_prpenc) {
ret = -EINVAL;
goto out;
}
} else {
/* the source is a CSI */
if (!csi) {
ret = -EINVAL;
goto out;
}
}
if (csi) {
switch (csi->grp_id) {
case IMX_MEDIA_GRP_ID_IPU_CSI0:
priv->csi_id = 0;
break;
case IMX_MEDIA_GRP_ID_IPU_CSI1:
priv->csi_id = 1;
break;
default:
ret = -EINVAL;
}
} else {
priv->csi_id = 0;
}
out:
mutex_unlock(&priv->lock);
return ret;
}
static int prp_s_stream(struct v4l2_subdev *sd, int enable)
{
struct imx_ic_priv *ic_priv = v4l2_get_subdevdata(sd);
struct prp_priv *priv = ic_priv->task_priv;
int ret = 0;
mutex_lock(&priv->lock);
if (!priv->src_sd || (!priv->sink_sd_prpenc && !priv->sink_sd_prpvf)) {
ret = -EPIPE;
goto out;
}
/*
* enable/disable streaming only if stream_count is
* going from 0 to 1 / 1 to 0.
*/
if (priv->stream_count != !enable)
goto update_count;
dev_dbg(ic_priv->ipu_dev, "%s: stream %s\n", sd->name,
enable ? "ON" : "OFF");
if (enable)
ret = prp_start(priv);
else
prp_stop(priv);
if (ret)
goto out;
/* start/stop upstream */
ret = v4l2_subdev_call(priv->src_sd, video, s_stream, enable);
ret = (ret && ret != -ENOIOCTLCMD) ? ret : 0;
if (ret) {
if (enable)
prp_stop(priv);
goto out;
}
update_count:
priv->stream_count += enable ? 1 : -1;
if (priv->stream_count < 0)
priv->stream_count = 0;
out:
mutex_unlock(&priv->lock);
return ret;
}
static int prp_g_frame_interval(struct v4l2_subdev *sd,
struct v4l2_subdev_frame_interval *fi)
{
struct prp_priv *priv = sd_to_priv(sd);
if (fi->pad >= PRP_NUM_PADS)
return -EINVAL;
mutex_lock(&priv->lock);
fi->interval = priv->frame_interval;
mutex_unlock(&priv->lock);
return 0;
}
static int prp_s_frame_interval(struct v4l2_subdev *sd,
struct v4l2_subdev_frame_interval *fi)
{
struct prp_priv *priv = sd_to_priv(sd);
if (fi->pad >= PRP_NUM_PADS)
return -EINVAL;
mutex_lock(&priv->lock);
/* No limits on valid frame intervals */
if (fi->interval.numerator == 0 || fi->interval.denominator == 0)
fi->interval = priv->frame_interval;
else
priv->frame_interval = fi->interval;
mutex_unlock(&priv->lock);
return 0;
}
static int prp_registered(struct v4l2_subdev *sd)
{
struct prp_priv *priv = sd_to_priv(sd);
u32 code;
/* init default frame interval */
priv->frame_interval.numerator = 1;
priv->frame_interval.denominator = 30;
/* set a default mbus format */
imx_media_enum_ipu_formats(&code, 0, PIXFMT_SEL_YUV);
return imx_media_init_mbus_fmt(&priv->format_mbus,
IMX_MEDIA_DEF_PIX_WIDTH,
IMX_MEDIA_DEF_PIX_HEIGHT, code,
V4L2_FIELD_NONE, NULL);
}
static const struct v4l2_subdev_pad_ops prp_pad_ops = {
.init_cfg = imx_media_init_cfg,
.enum_mbus_code = prp_enum_mbus_code,
.get_fmt = prp_get_fmt,
.set_fmt = prp_set_fmt,
.link_validate = prp_link_validate,
};
static const struct v4l2_subdev_video_ops prp_video_ops = {
.g_frame_interval = prp_g_frame_interval,
.s_frame_interval = prp_s_frame_interval,
.s_stream = prp_s_stream,
};
static const struct media_entity_operations prp_entity_ops = {
.link_setup = prp_link_setup,
.link_validate = v4l2_subdev_link_validate,
};
static const struct v4l2_subdev_ops prp_subdev_ops = {
.video = &prp_video_ops,
.pad = &prp_pad_ops,
};
static const struct v4l2_subdev_internal_ops prp_internal_ops = {
.registered = prp_registered,
};
static int prp_init(struct imx_ic_priv *ic_priv)
{
struct prp_priv *priv;
int i;
priv = devm_kzalloc(ic_priv->ipu_dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
mutex_init(&priv->lock);
ic_priv->task_priv = priv;
priv->ic_priv = ic_priv;
for (i = 0; i < PRP_NUM_PADS; i++)
priv->pad[i].flags = (i == PRP_SINK_PAD) ?
MEDIA_PAD_FL_SINK : MEDIA_PAD_FL_SOURCE;
return media_entity_pads_init(&ic_priv->sd.entity, PRP_NUM_PADS,
priv->pad);
}
static void prp_remove(struct imx_ic_priv *ic_priv)
{
struct prp_priv *priv = ic_priv->task_priv;
mutex_destroy(&priv->lock);
}
struct imx_ic_ops imx_ic_prp_ops = {
.subdev_ops = &prp_subdev_ops,
.internal_ops = &prp_internal_ops,
.entity_ops = &prp_entity_ops,
.init = prp_init,
.remove = prp_remove,
};
| linux-master | drivers/staging/media/imx/imx-ic-prp.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/string.h>
#include "dvb_filter.h"
static u32 freq[4] = {480, 441, 320, 0};
static unsigned int ac3_bitrates[32] =
{32,40,48,56,64,80,96,112,128,160,192,224,256,320,384,448,512,576,640,
0,0,0,0,0,0,0,0,0,0,0,0,0};
static u32 ac3_frames[3][32] =
{{64,80,96,112,128,160,192,224,256,320,384,448,512,640,768,896,1024,
1152,1280,0,0,0,0,0,0,0,0,0,0,0,0,0},
{69,87,104,121,139,174,208,243,278,348,417,487,557,696,835,975,1114,
1253,1393,0,0,0,0,0,0,0,0,0,0,0,0,0},
{96,120,144,168,192,240,288,336,384,480,576,672,768,960,1152,1344,
1536,1728,1920,0,0,0,0,0,0,0,0,0,0,0,0,0}};
int dvb_filter_get_ac3info(u8 *mbuf, int count, struct dvb_audio_info *ai, int pr)
{
u8 *headr;
int found = 0;
int c = 0;
u8 frame = 0;
int fr = 0;
while ( !found && c < count){
u8 *b = mbuf+c;
if ( b[0] == 0x0b && b[1] == 0x77 )
found = 1;
else {
c++;
}
}
if (!found) return -1;
if (pr)
printk(KERN_DEBUG "Audiostream: AC3");
ai->off = c;
if (c+5 >= count) return -1;
ai->layer = 0; // 0 for AC3
headr = mbuf+c+2;
frame = (headr[2]&0x3f);
ai->bit_rate = ac3_bitrates[frame >> 1]*1000;
if (pr)
printk(KERN_CONT " BRate: %d kb/s", (int) ai->bit_rate/1000);
ai->frequency = (headr[2] & 0xc0 ) >> 6;
fr = (headr[2] & 0xc0 ) >> 6;
ai->frequency = freq[fr]*100;
if (pr)
printk(KERN_CONT " Freq: %d Hz\n", (int) ai->frequency);
ai->framesize = ac3_frames[fr][frame >> 1];
if ((frame & 1) && (fr == 1)) ai->framesize++;
ai->framesize = ai->framesize << 1;
if (pr)
printk(KERN_DEBUG " Framesize %d\n", (int) ai->framesize);
return 0;
}
void dvb_filter_pes2ts_init(struct dvb_filter_pes2ts *p2ts, unsigned short pid,
dvb_filter_pes2ts_cb_t *cb, void *priv)
{
unsigned char *buf=p2ts->buf;
buf[0]=0x47;
buf[1]=(pid>>8);
buf[2]=pid&0xff;
p2ts->cc=0;
p2ts->cb=cb;
p2ts->priv=priv;
}
int dvb_filter_pes2ts(struct dvb_filter_pes2ts *p2ts, unsigned char *pes,
int len, int payload_start)
{
unsigned char *buf=p2ts->buf;
int ret=0, rest;
//len=6+((pes[4]<<8)|pes[5]);
if (payload_start)
buf[1]|=0x40;
else
buf[1]&=~0x40;
while (len>=184) {
buf[3]=0x10|((p2ts->cc++)&0x0f);
memcpy(buf+4, pes, 184);
if ((ret=p2ts->cb(p2ts->priv, buf)))
return ret;
len-=184; pes+=184;
buf[1]&=~0x40;
}
if (!len)
return 0;
buf[3]=0x30|((p2ts->cc++)&0x0f);
rest=183-len;
if (rest) {
buf[5]=0x00;
if (rest-1)
memset(buf+6, 0xff, rest-1);
}
buf[4]=rest;
memcpy(buf+5+rest, pes, len);
return p2ts->cb(p2ts->priv, buf);
}
| linux-master | drivers/staging/media/av7110/dvb_filter.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Driver for the remote control of SAA7146 based AV7110 cards
*
* Copyright (C) 1999-2003 Holger Waechtler <[email protected]>
* Copyright (C) 2003-2007 Oliver Endriss <[email protected]>
* Copyright (C) 2019 Sean Young <[email protected]>
*/
#include <linux/kernel.h>
#include <media/rc-core.h>
#include "av7110.h"
#include "av7110_hw.h"
#define IR_RC5 0
#define IR_RCMM 1
#define IR_RC5_EXT 2 /* internal only */
/* interrupt handler */
void av7110_ir_handler(struct av7110 *av7110, u32 ircom)
{
struct rc_dev *rcdev = av7110->ir.rcdev;
enum rc_proto proto;
u32 command, addr, scancode;
u32 toggle;
dprintk(4, "ir command = %08x\n", ircom);
if (rcdev) {
switch (av7110->ir.ir_config) {
case IR_RC5: /* RC5: 5 bits device address, 6 bits command */
command = ircom & 0x3f;
addr = (ircom >> 6) & 0x1f;
scancode = RC_SCANCODE_RC5(addr, command);
toggle = ircom & 0x0800;
proto = RC_PROTO_RC5;
break;
case IR_RCMM: /* RCMM: 32 bits scancode */
scancode = ircom & ~0x8000;
toggle = ircom & 0x8000;
proto = RC_PROTO_RCMM32;
break;
case IR_RC5_EXT:
/*
* extended RC5: 5 bits device address, 7 bits command
*
* Extended RC5 uses only one start bit. The second
* start bit is re-assigned bit 6 of the command bit.
*/
command = ircom & 0x3f;
addr = (ircom >> 6) & 0x1f;
if (!(ircom & 0x1000))
command |= 0x40;
scancode = RC_SCANCODE_RC5(addr, command);
toggle = ircom & 0x0800;
proto = RC_PROTO_RC5;
break;
default:
dprintk(2, "unknown ir config %d\n",
av7110->ir.ir_config);
return;
}
rc_keydown(rcdev, proto, scancode, toggle != 0);
}
}
int av7110_set_ir_config(struct av7110 *av7110)
{
dprintk(4, "ir config = %08x\n", av7110->ir.ir_config);
return av7110_fw_cmd(av7110, COMTYPE_PIDFILTER, SetIR, 1,
av7110->ir.ir_config);
}
static int change_protocol(struct rc_dev *rcdev, u64 *rc_type)
{
struct av7110 *av7110 = rcdev->priv;
u32 ir_config;
if (*rc_type & RC_PROTO_BIT_RCMM32) {
ir_config = IR_RCMM;
*rc_type = RC_PROTO_BIT_RCMM32;
} else if (*rc_type & RC_PROTO_BIT_RC5) {
if (FW_VERSION(av7110->arm_app) >= 0x2620)
ir_config = IR_RC5_EXT;
else
ir_config = IR_RC5;
*rc_type = RC_PROTO_BIT_RC5;
} else {
return -EINVAL;
}
if (ir_config == av7110->ir.ir_config)
return 0;
av7110->ir.ir_config = ir_config;
return av7110_set_ir_config(av7110);
}
int av7110_ir_init(struct av7110 *av7110)
{
struct rc_dev *rcdev;
struct pci_dev *pci;
int ret;
rcdev = rc_allocate_device(RC_DRIVER_SCANCODE);
if (!rcdev)
return -ENOMEM;
pci = av7110->dev->pci;
snprintf(av7110->ir.input_phys, sizeof(av7110->ir.input_phys),
"pci-%s/ir0", pci_name(pci));
rcdev->device_name = av7110->card_name;
rcdev->driver_name = KBUILD_MODNAME;
rcdev->input_phys = av7110->ir.input_phys;
rcdev->input_id.bustype = BUS_PCI;
rcdev->input_id.version = 2;
if (pci->subsystem_vendor) {
rcdev->input_id.vendor = pci->subsystem_vendor;
rcdev->input_id.product = pci->subsystem_device;
} else {
rcdev->input_id.vendor = pci->vendor;
rcdev->input_id.product = pci->device;
}
rcdev->dev.parent = &pci->dev;
rcdev->allowed_protocols = RC_PROTO_BIT_RC5 | RC_PROTO_BIT_RCMM32;
rcdev->change_protocol = change_protocol;
rcdev->map_name = RC_MAP_HAUPPAUGE;
rcdev->priv = av7110;
av7110->ir.rcdev = rcdev;
av7110->ir.ir_config = IR_RC5;
av7110_set_ir_config(av7110);
ret = rc_register_device(rcdev);
if (ret) {
av7110->ir.rcdev = NULL;
rc_free_device(rcdev);
}
return ret;
}
void av7110_ir_exit(struct av7110 *av7110)
{
rc_unregister_device(av7110->ir.rcdev);
}
//MODULE_AUTHOR("Holger Waechtler <[email protected]>, Oliver Endriss <[email protected]>");
//MODULE_LICENSE("GPL");
| linux-master | drivers/staging/media/av7110/av7110_ir.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* budget-patch.c: driver for Budget Patch,
* hardware modification of DVB-S cards enabling full TS
*
* Written by Emard <[email protected]>
*
* Original idea by Roberto Deza <[email protected]>
*
* Special thanks to Holger Waechtler, Michael Hunold, Marian Durkovic
* and Metzlerbros
*
* the project's page is at https://linuxtv.org
*/
#include "av7110.h"
#include "av7110_hw.h"
#include "budget.h"
#include "stv0299.h"
#include "ves1x93.h"
#include "tda8083.h"
#include "bsru6.h"
DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr);
#define budget_patch budget
static struct saa7146_extension budget_extension;
MAKE_BUDGET_INFO(ttbp, "TT-Budget/Patch DVB-S 1.x PCI", BUDGET_PATCH);
//MAKE_BUDGET_INFO(satel,"TT-Budget/Patch SATELCO PCI", BUDGET_TT_HW_DISEQC);
static const struct pci_device_id pci_tbl[] = {
MAKE_EXTENSION_PCI(ttbp,0x13c2, 0x0000),
// MAKE_EXTENSION_PCI(satel, 0x13c2, 0x1013),
{
.vendor = 0,
}
};
/* those lines are for budget-patch to be tried
** on a true budget card and observe the
** behaviour of VSYNC generated by rps1.
** this code was shamelessly copy/pasted from budget.c
*/
static void gpio_Set22K (struct budget *budget, int state)
{
struct saa7146_dev *dev=budget->dev;
dprintk(2, "budget: %p\n", budget);
saa7146_setgpio(dev, 3, (state ? SAA7146_GPIO_OUTHI : SAA7146_GPIO_OUTLO));
}
/* Diseqc functions only for TT Budget card */
/* taken from the Skyvision DVB driver by
Ralph Metzler <[email protected]> */
static void DiseqcSendBit (struct budget *budget, int data)
{
struct saa7146_dev *dev=budget->dev;
dprintk(2, "budget: %p\n", budget);
saa7146_setgpio(dev, 3, SAA7146_GPIO_OUTHI);
udelay(data ? 500 : 1000);
saa7146_setgpio(dev, 3, SAA7146_GPIO_OUTLO);
udelay(data ? 1000 : 500);
}
static void DiseqcSendByte (struct budget *budget, int data)
{
int i, par=1, d;
dprintk(2, "budget: %p\n", budget);
for (i=7; i>=0; i--) {
d = (data>>i)&1;
par ^= d;
DiseqcSendBit(budget, d);
}
DiseqcSendBit(budget, par);
}
static int SendDiSEqCMsg (struct budget *budget, int len, u8 *msg, unsigned long burst)
{
struct saa7146_dev *dev=budget->dev;
int i;
dprintk(2, "budget: %p\n", budget);
saa7146_setgpio(dev, 3, SAA7146_GPIO_OUTLO);
mdelay(16);
for (i=0; i<len; i++)
DiseqcSendByte(budget, msg[i]);
mdelay(16);
if (burst!=-1) {
if (burst)
DiseqcSendByte(budget, 0xff);
else {
saa7146_setgpio(dev, 3, SAA7146_GPIO_OUTHI);
mdelay(12);
udelay(500);
saa7146_setgpio(dev, 3, SAA7146_GPIO_OUTLO);
}
msleep(20);
}
return 0;
}
/* shamelessly copy/pasted from budget.c */
static int budget_set_tone(struct dvb_frontend *fe,
enum fe_sec_tone_mode tone)
{
struct budget* budget = (struct budget*) fe->dvb->priv;
switch (tone) {
case SEC_TONE_ON:
gpio_Set22K (budget, 1);
break;
case SEC_TONE_OFF:
gpio_Set22K (budget, 0);
break;
default:
return -EINVAL;
}
return 0;
}
static int budget_diseqc_send_master_cmd(struct dvb_frontend* fe, struct dvb_diseqc_master_cmd* cmd)
{
struct budget* budget = (struct budget*) fe->dvb->priv;
SendDiSEqCMsg (budget, cmd->msg_len, cmd->msg, 0);
return 0;
}
static int budget_diseqc_send_burst(struct dvb_frontend *fe,
enum fe_sec_mini_cmd minicmd)
{
struct budget* budget = (struct budget*) fe->dvb->priv;
SendDiSEqCMsg (budget, 0, NULL, minicmd);
return 0;
}
static int budget_av7110_send_fw_cmd(struct budget_patch *budget, u16* buf, int length)
{
int i;
dprintk(2, "budget: %p\n", budget);
for (i = 2; i < length; i++)
{
ttpci_budget_debiwrite(budget, DEBINOSWAP, COMMAND + 2*i, 2, (u32) buf[i], 0,0);
msleep(5);
}
if (length)
ttpci_budget_debiwrite(budget, DEBINOSWAP, COMMAND + 2, 2, (u32) buf[1], 0,0);
else
ttpci_budget_debiwrite(budget, DEBINOSWAP, COMMAND + 2, 2, 0, 0,0);
msleep(5);
ttpci_budget_debiwrite(budget, DEBINOSWAP, COMMAND, 2, (u32) buf[0], 0,0);
msleep(5);
return 0;
}
static void av7110_set22k(struct budget_patch *budget, int state)
{
u16 buf[2] = {( COMTYPE_AUDIODAC << 8) | (state ? ON22K : OFF22K), 0};
dprintk(2, "budget: %p\n", budget);
budget_av7110_send_fw_cmd(budget, buf, 2);
}
static int av7110_send_diseqc_msg(struct budget_patch *budget, int len, u8 *msg, int burst)
{
int i;
u16 buf[18] = { ((COMTYPE_AUDIODAC << 8) | SendDiSEqC),
16, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
dprintk(2, "budget: %p\n", budget);
if (len>10)
len=10;
buf[1] = len+2;
buf[2] = len;
if (burst != -1)
buf[3]=burst ? 0x01 : 0x00;
else
buf[3]=0xffff;
for (i=0; i<len; i++)
buf[i+4]=msg[i];
budget_av7110_send_fw_cmd(budget, buf, 18);
return 0;
}
static int budget_patch_set_tone(struct dvb_frontend *fe,
enum fe_sec_tone_mode tone)
{
struct budget_patch* budget = (struct budget_patch*) fe->dvb->priv;
switch (tone) {
case SEC_TONE_ON:
av7110_set22k (budget, 1);
break;
case SEC_TONE_OFF:
av7110_set22k (budget, 0);
break;
default:
return -EINVAL;
}
return 0;
}
static int budget_patch_diseqc_send_master_cmd(struct dvb_frontend* fe, struct dvb_diseqc_master_cmd* cmd)
{
struct budget_patch* budget = (struct budget_patch*) fe->dvb->priv;
av7110_send_diseqc_msg (budget, cmd->msg_len, cmd->msg, 0);
return 0;
}
static int budget_patch_diseqc_send_burst(struct dvb_frontend *fe,
enum fe_sec_mini_cmd minicmd)
{
struct budget_patch* budget = (struct budget_patch*) fe->dvb->priv;
av7110_send_diseqc_msg (budget, 0, NULL, minicmd);
return 0;
}
static int alps_bsrv2_tuner_set_params(struct dvb_frontend *fe)
{
struct dtv_frontend_properties *p = &fe->dtv_property_cache;
struct budget_patch* budget = (struct budget_patch*) fe->dvb->priv;
u8 pwr = 0;
u8 buf[4];
struct i2c_msg msg = { .addr = 0x61, .flags = 0, .buf = buf, .len = sizeof(buf) };
u32 div = (p->frequency + 479500) / 125;
if (p->frequency > 2000000)
pwr = 3;
else if (p->frequency > 1800000)
pwr = 2;
else if (p->frequency > 1600000)
pwr = 1;
else if (p->frequency > 1200000)
pwr = 0;
else if (p->frequency >= 1100000)
pwr = 1;
else pwr = 2;
buf[0] = (div >> 8) & 0x7f;
buf[1] = div & 0xff;
buf[2] = ((div & 0x18000) >> 10) | 0x95;
buf[3] = (pwr << 6) | 0x30;
// NOTE: since we're using a prescaler of 2, we set the
// divisor frequency to 62.5kHz and divide by 125 above
if (fe->ops.i2c_gate_ctrl)
fe->ops.i2c_gate_ctrl(fe, 1);
if (i2c_transfer (&budget->i2c_adap, &msg, 1) != 1)
return -EIO;
return 0;
}
static struct ves1x93_config alps_bsrv2_config = {
.demod_address = 0x08,
.xin = 90100000UL,
.invert_pwm = 0,
};
static int grundig_29504_451_tuner_set_params(struct dvb_frontend *fe)
{
struct dtv_frontend_properties *p = &fe->dtv_property_cache;
struct budget_patch* budget = (struct budget_patch*) fe->dvb->priv;
u32 div;
u8 data[4];
struct i2c_msg msg = { .addr = 0x61, .flags = 0, .buf = data, .len = sizeof(data) };
div = p->frequency / 125;
data[0] = (div >> 8) & 0x7f;
data[1] = div & 0xff;
data[2] = 0x8e;
data[3] = 0x00;
if (fe->ops.i2c_gate_ctrl)
fe->ops.i2c_gate_ctrl(fe, 1);
if (i2c_transfer (&budget->i2c_adap, &msg, 1) != 1)
return -EIO;
return 0;
}
static struct tda8083_config grundig_29504_451_config = {
.demod_address = 0x68,
};
static void frontend_init(struct budget_patch* budget)
{
switch(budget->dev->pci->subsystem_device) {
case 0x0000: // Hauppauge/TT WinTV DVB-S rev1.X
case 0x1013: // SATELCO Multimedia PCI
// try the ALPS BSRV2 first of all
budget->dvb_frontend = dvb_attach(ves1x93_attach, &alps_bsrv2_config, &budget->i2c_adap);
if (budget->dvb_frontend) {
budget->dvb_frontend->ops.tuner_ops.set_params = alps_bsrv2_tuner_set_params;
budget->dvb_frontend->ops.diseqc_send_master_cmd = budget_patch_diseqc_send_master_cmd;
budget->dvb_frontend->ops.diseqc_send_burst = budget_patch_diseqc_send_burst;
budget->dvb_frontend->ops.set_tone = budget_patch_set_tone;
break;
}
// try the ALPS BSRU6 now
budget->dvb_frontend = dvb_attach(stv0299_attach, &alps_bsru6_config, &budget->i2c_adap);
if (budget->dvb_frontend) {
budget->dvb_frontend->ops.tuner_ops.set_params = alps_bsru6_tuner_set_params;
budget->dvb_frontend->tuner_priv = &budget->i2c_adap;
budget->dvb_frontend->ops.diseqc_send_master_cmd = budget_diseqc_send_master_cmd;
budget->dvb_frontend->ops.diseqc_send_burst = budget_diseqc_send_burst;
budget->dvb_frontend->ops.set_tone = budget_set_tone;
break;
}
// Try the grundig 29504-451
budget->dvb_frontend = dvb_attach(tda8083_attach, &grundig_29504_451_config, &budget->i2c_adap);
if (budget->dvb_frontend) {
budget->dvb_frontend->ops.tuner_ops.set_params = grundig_29504_451_tuner_set_params;
budget->dvb_frontend->ops.diseqc_send_master_cmd = budget_diseqc_send_master_cmd;
budget->dvb_frontend->ops.diseqc_send_burst = budget_diseqc_send_burst;
budget->dvb_frontend->ops.set_tone = budget_set_tone;
break;
}
break;
}
if (budget->dvb_frontend == NULL) {
printk("dvb-ttpci: A frontend driver was not found for device [%04x:%04x] subsystem [%04x:%04x]\n",
budget->dev->pci->vendor,
budget->dev->pci->device,
budget->dev->pci->subsystem_vendor,
budget->dev->pci->subsystem_device);
} else {
if (dvb_register_frontend(&budget->dvb_adapter, budget->dvb_frontend)) {
printk("budget-av: Frontend registration failed!\n");
dvb_frontend_detach(budget->dvb_frontend);
budget->dvb_frontend = NULL;
}
}
}
/* written by Emard */
static int budget_patch_attach (struct saa7146_dev* dev, struct saa7146_pci_extension_data *info)
{
struct budget_patch *budget;
int err;
int count = 0;
int detected = 0;
#define PATCH_RESET 0
#define RPS_IRQ 0
#define HPS_SETUP 0
#if PATCH_RESET
saa7146_write(dev, MC1, MASK_31);
msleep(40);
#endif
#if HPS_SETUP
// initialize registers. Better to have it like this
// than leaving something unconfigured
saa7146_write(dev, DD1_STREAM_B, 0);
// port B VSYNC at rising edge
saa7146_write(dev, DD1_INIT, 0x00000200); // have this in budget-core too!
saa7146_write(dev, BRS_CTRL, 0x00000000); // VBI
// debi config
// saa7146_write(dev, DEBI_CONFIG, MASK_30|MASK_28|MASK_18);
// zero all HPS registers
saa7146_write(dev, HPS_H_PRESCALE, 0); // r68
saa7146_write(dev, HPS_H_SCALE, 0); // r6c
saa7146_write(dev, BCS_CTRL, 0); // r70
saa7146_write(dev, HPS_V_SCALE, 0); // r60
saa7146_write(dev, HPS_V_GAIN, 0); // r64
saa7146_write(dev, CHROMA_KEY_RANGE, 0); // r74
saa7146_write(dev, CLIP_FORMAT_CTRL, 0); // r78
// Set HPS prescaler for port B input
saa7146_write(dev, HPS_CTRL, (1<<30) | (0<<29) | (1<<28) | (0<<12) );
saa7146_write(dev, MC2,
0 * (MASK_08 | MASK_24) | // BRS control
0 * (MASK_09 | MASK_25) | // a
0 * (MASK_10 | MASK_26) | // b
1 * (MASK_06 | MASK_22) | // HPS_CTRL1
1 * (MASK_05 | MASK_21) | // HPS_CTRL2
0 * (MASK_01 | MASK_15) // DEBI
);
#endif
// Disable RPS1 and RPS0
saa7146_write(dev, MC1, ( MASK_29 | MASK_28));
// RPS1 timeout disable
saa7146_write(dev, RPS_TOV1, 0);
// code for autodetection
// will wait for VBI_B event (vertical blank at port B)
// and will reset GPIO3 after VBI_B is detected.
// (GPIO3 should be raised high by CPU to
// test if GPIO3 will generate vertical blank signal
// in budget patch GPIO3 is connected to VSYNC_B
count = 0;
#if 0
WRITE_RPS1(CMD_UPLOAD |
MASK_10 | MASK_09 | MASK_08 | MASK_06 | MASK_05 | MASK_04 | MASK_03 | MASK_02 );
#endif
WRITE_RPS1(CMD_PAUSE | EVT_VBI_B);
WRITE_RPS1(CMD_WR_REG_MASK | (GPIO_CTRL>>2));
WRITE_RPS1(GPIO3_MSK);
WRITE_RPS1(SAA7146_GPIO_OUTLO<<24);
#if RPS_IRQ
// issue RPS1 interrupt to increment counter
WRITE_RPS1(CMD_INTERRUPT);
// at least a NOP is neede between two interrupts
WRITE_RPS1(CMD_NOP);
// interrupt again
WRITE_RPS1(CMD_INTERRUPT);
#endif
WRITE_RPS1(CMD_STOP);
#if RPS_IRQ
// set event counter 1 source as RPS1 interrupt (0x03) (rE4 p53)
// use 0x03 to track RPS1 interrupts - increase by 1 every gpio3 is toggled
// use 0x15 to track VPE interrupts - increase by 1 every vpeirq() is called
saa7146_write(dev, EC1SSR, (0x03<<2) | 3 );
// set event counter 1 threshold to maximum allowed value (rEC p55)
saa7146_write(dev, ECT1R, 0x3fff );
#endif
// Fix VSYNC level
saa7146_setgpio(dev, 3, SAA7146_GPIO_OUTLO);
// Set RPS1 Address register to point to RPS code (r108 p42)
saa7146_write(dev, RPS_ADDR1, dev->d_rps1.dma_handle);
// Enable RPS1, (rFC p33)
saa7146_write(dev, MC1, (MASK_13 | MASK_29 ));
mdelay(50);
saa7146_setgpio(dev, 3, SAA7146_GPIO_OUTHI);
mdelay(150);
if( (saa7146_read(dev, GPIO_CTRL) & 0x10000000) == 0)
detected = 1;
#if RPS_IRQ
printk("Event Counter 1 0x%04x\n", saa7146_read(dev, EC1R) & 0x3fff );
#endif
// Disable RPS1
saa7146_write(dev, MC1, ( MASK_29 ));
if(detected == 0)
printk("budget-patch not detected or saa7146 in non-default state.\n"
"try enabling resetting of 7146 with MASK_31 in MC1 register\n");
else
printk("BUDGET-PATCH DETECTED.\n");
/* OLD (Original design by Roberto Deza):
** This code will setup the SAA7146_RPS1 to generate a square
** wave on GPIO3, changing when a field (TS_HEIGHT/2 "lines" of
** TS_WIDTH packets) has been acquired on SAA7146_D1B video port;
** then, this GPIO3 output which is connected to the D1B_VSYNC
** input, will trigger the acquisition of the alternate field
** and so on.
** Currently, the TT_budget / WinTV_Nova cards have two ICs
** (74HCT4040, LVC74) for the generation of this VSYNC signal,
** which seems that can be done perfectly without this :-)).
*/
/* New design (By Emard)
** this rps1 code will copy internal HS event to GPIO3 pin.
** GPIO3 is in budget-patch hardware connected to port B VSYNC
** HS is an internal event of 7146, accessible with RPS
** and temporarily raised high every n lines
** (n in defined in the RPS_THRESH1 counter threshold)
** I think HS is raised high on the beginning of the n-th line
** and remains high until this n-th line that triggered
** it is completely received. When the reception of n-th line
** ends, HS is lowered.
** To transmit data over DMA, 7146 needs changing state at
** port B VSYNC pin. Any changing of port B VSYNC will
** cause some DMA data transfer, with more or less packets loss.
** It depends on the phase and frequency of VSYNC and
** the way of 7146 is instructed to trigger on port B (defined
** in DD1_INIT register, 3rd nibble from the right valid
** numbers are 0-7, see datasheet)
**
** The correct triggering can minimize packet loss,
** dvbtraffic should give this stable bandwidths:
** 22k transponder = 33814 kbit/s
** 27.5k transponder = 38045 kbit/s
** by experiment it is found that the best results
** (stable bandwidths and almost no packet loss)
** are obtained using DD1_INIT triggering number 2
** (Va at rising edge of VS Fa = HS x VS-failing forced toggle)
** and a VSYNC phase that occurs in the middle of DMA transfer
** (about byte 188*512=96256 in the DMA window).
**
** Phase of HS is still not clear to me how to control,
** It just happens to be so. It can be seen if one enables
** RPS_IRQ and print Event Counter 1 in vpeirq(). Every
** time RPS_INTERRUPT is called, the Event Counter 1 will
** increment. That's how the 7146 is programmed to do event
** counting in this budget-patch.c
** I *think* HPS setting has something to do with the phase
** of HS but I can't be 100% sure in that.
** hardware debug note: a working budget card (including budget patch)
** with vpeirq() interrupt setup in mode "0x90" (every 64K) will
** generate 3 interrupts per 25-Hz DMA frame of 2*188*512 bytes
** and that means 3*25=75 Hz of interrupt frequency, as seen by
** watch cat /proc/interrupts
**
** If this frequency is 3x lower (and data received in the DMA
** buffer don't start with 0x47, but in the middle of packets,
** whose lengths appear to be like 188 292 188 104 etc.
** this means VSYNC line is not connected in the hardware.
** (check soldering pcb and pins)
** The same behaviour of missing VSYNC can be duplicated on budget
** cards, by setting DD1_INIT trigger mode 7 in 3rd nibble.
*/
// Setup RPS1 "program" (p35)
count = 0;
// Wait Source Line Counter Threshold (p36)
WRITE_RPS1(CMD_PAUSE | EVT_HS);
// Set GPIO3=1 (p42)
WRITE_RPS1(CMD_WR_REG_MASK | (GPIO_CTRL>>2));
WRITE_RPS1(GPIO3_MSK);
WRITE_RPS1(SAA7146_GPIO_OUTHI<<24);
#if RPS_IRQ
// issue RPS1 interrupt
WRITE_RPS1(CMD_INTERRUPT);
#endif
// Wait reset Source Line Counter Threshold (p36)
WRITE_RPS1(CMD_PAUSE | RPS_INV | EVT_HS);
// Set GPIO3=0 (p42)
WRITE_RPS1(CMD_WR_REG_MASK | (GPIO_CTRL>>2));
WRITE_RPS1(GPIO3_MSK);
WRITE_RPS1(SAA7146_GPIO_OUTLO<<24);
#if RPS_IRQ
// issue RPS1 interrupt
WRITE_RPS1(CMD_INTERRUPT);
#endif
// Jump to begin of RPS program (p37)
WRITE_RPS1(CMD_JUMP);
WRITE_RPS1(dev->d_rps1.dma_handle);
// Fix VSYNC level
saa7146_setgpio(dev, 3, SAA7146_GPIO_OUTLO);
// Set RPS1 Address register to point to RPS code (r108 p42)
saa7146_write(dev, RPS_ADDR1, dev->d_rps1.dma_handle);
if (!(budget = kmalloc (sizeof(struct budget_patch), GFP_KERNEL)))
return -ENOMEM;
dprintk(2, "budget: %p\n", budget);
err = ttpci_budget_init(budget, dev, info, THIS_MODULE, adapter_nr);
if (err) {
kfree(budget);
return err;
}
// Set Source Line Counter Threshold, using BRS (rCC p43)
// It generates HS event every TS_HEIGHT lines
// this is related to TS_WIDTH set in register
// NUM_LINE_BYTE3 in budget-core.c. If NUM_LINE_BYTE
// low 16 bits are set to TS_WIDTH bytes (TS_WIDTH=2*188
//,then RPS_THRESH1
// should be set to trigger every TS_HEIGHT (512) lines.
//
saa7146_write(dev, RPS_THRESH1, budget->buffer_height | MASK_12 );
// saa7146_write(dev, RPS_THRESH0, ((TS_HEIGHT/2)<<16) |MASK_28| (TS_HEIGHT/2) |MASK_12 );
// Enable RPS1 (rFC p33)
saa7146_write(dev, MC1, (MASK_13 | MASK_29));
dev->ext_priv = budget;
budget->dvb_adapter.priv = budget;
frontend_init(budget);
ttpci_budget_init_hooks(budget);
return 0;
}
static int budget_patch_detach (struct saa7146_dev* dev)
{
struct budget_patch *budget = (struct budget_patch*) dev->ext_priv;
int err;
if (budget->dvb_frontend) {
dvb_unregister_frontend(budget->dvb_frontend);
dvb_frontend_detach(budget->dvb_frontend);
}
err = ttpci_budget_deinit (budget);
kfree (budget);
return err;
}
static int __init budget_patch_init(void)
{
return saa7146_register_extension(&budget_extension);
}
static void __exit budget_patch_exit(void)
{
saa7146_unregister_extension(&budget_extension);
}
static struct saa7146_extension budget_extension = {
.name = "budget_patch dvb",
.flags = 0,
.module = THIS_MODULE,
.pci_tbl = pci_tbl,
.attach = budget_patch_attach,
.detach = budget_patch_detach,
.irq_mask = MASK_10,
.irq_func = ttpci_budget_irq10_handler,
};
module_init(budget_patch_init);
module_exit(budget_patch_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Emard, Roberto Deza, Holger Waechtler, Michael Hunold, others");
MODULE_DESCRIPTION("Driver for full TS modified DVB-S SAA7146+AV7110 based so-called Budget Patch cards");
| linux-master | drivers/staging/media/av7110/budget-patch.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* driver for the SAA7146 based AV110 cards (like the Fujitsu-Siemens DVB)
* av7110.c: initialization and demux stuff
*
* Copyright (C) 1999-2002 Ralph Metzler
* & Marcus Metzler for convergence integrated media GmbH
*
* originally based on code by:
* Copyright (C) 1998,1999 Christian Theiss <[email protected]>
*
* the project's page is at https://linuxtv.org
*/
#include <linux/module.h>
#include <linux/kmod.h>
#include <linux/delay.h>
#include <linux/fs.h>
#include <linux/timer.h>
#include <linux/poll.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/types.h>
#include <linux/fcntl.h>
#include <linux/interrupt.h>
#include <linux/string.h>
#include <linux/pci.h>
#include <linux/vmalloc.h>
#include <linux/firmware.h>
#include <linux/crc32.h>
#include <linux/i2c.h>
#include <linux/kthread.h>
#include <linux/slab.h>
#include <asm/unaligned.h>
#include <asm/byteorder.h>
#include <linux/dvb/frontend.h>
#include <media/dvb_frontend.h>
#include "ttpci-eeprom.h"
#include "av7110.h"
#include "av7110_hw.h"
#include "av7110_av.h"
#include "av7110_ca.h"
#include "av7110_ipack.h"
#include "bsbe1.h"
#include "lnbp21.h"
#include "bsru6.h"
#define TS_WIDTH 376
#define TS_HEIGHT 512
#define TS_BUFLEN (TS_WIDTH*TS_HEIGHT)
#define TS_MAX_PACKETS (TS_BUFLEN/TS_SIZE)
int av7110_debug;
static int vidmode = CVBS_RGB_OUT;
static int pids_off;
static int adac = DVB_ADAC_TI;
static int hw_sections;
static int rgb_on;
static int volume = 255;
static int budgetpatch;
static int wss_cfg_4_3 = 0x4008;
static int wss_cfg_16_9 = 0x0007;
static int tv_standard;
static int full_ts;
module_param_named(debug, av7110_debug, int, 0644);
MODULE_PARM_DESC(debug, "debug level (bitmask, default 0)");
module_param(vidmode, int, 0444);
MODULE_PARM_DESC(vidmode,"analog video out: 0 off, 1 CVBS+RGB (default), 2 CVBS+YC, 3 YC");
module_param(pids_off, int, 0444);
MODULE_PARM_DESC(pids_off,"clear video/audio/PCR PID filters when demux is closed");
module_param(adac, int, 0444);
MODULE_PARM_DESC(adac,"audio DAC type: 0 TI, 1 CRYSTAL, 2 MSP (use if autodetection fails)");
module_param(hw_sections, int, 0444);
MODULE_PARM_DESC(hw_sections, "0 use software section filter, 1 use hardware");
module_param(rgb_on, int, 0444);
MODULE_PARM_DESC(rgb_on, "For Siemens DVB-C cards only: Enable RGB control signal on SCART pin 16 to switch SCART video mode from CVBS to RGB");
module_param(volume, int, 0444);
MODULE_PARM_DESC(volume, "initial volume: default 255 (range 0-255)");
module_param(budgetpatch, int, 0444);
MODULE_PARM_DESC(budgetpatch, "use budget-patch hardware modification: default 0 (0 no, 1 autodetect, 2 always)");
module_param(full_ts, int, 0444);
MODULE_PARM_DESC(full_ts, "enable code for full-ts hardware modification: 0 disable (default), 1 enable");
module_param(wss_cfg_4_3, int, 0444);
MODULE_PARM_DESC(wss_cfg_4_3, "WSS 4:3 - default 0x4008 - bit 15: disable, 14: burst mode, 13..0: wss data");
module_param(wss_cfg_16_9, int, 0444);
MODULE_PARM_DESC(wss_cfg_16_9, "WSS 16:9 - default 0x0007 - bit 15: disable, 14: burst mode, 13..0: wss data");
module_param(tv_standard, int, 0444);
MODULE_PARM_DESC(tv_standard, "TV standard: 0 PAL (default), 1 NTSC");
DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr);
static void restart_feeds(struct av7110 *av7110);
static int budget_start_feed(struct dvb_demux_feed *feed);
static int budget_stop_feed(struct dvb_demux_feed *feed);
static int av7110_num;
#define FE_FUNC_OVERRIDE(fe_func, av7110_copy, av7110_func) \
{\
if (fe_func != NULL) { \
av7110_copy = fe_func; \
fe_func = av7110_func; \
} \
}
static void init_av7110_av(struct av7110 *av7110)
{
int ret;
struct saa7146_dev *dev = av7110->dev;
/* set internal volume control to maximum */
av7110->adac_type = DVB_ADAC_TI;
ret = av7110_set_volume(av7110, av7110->mixer.volume_left, av7110->mixer.volume_right);
if (ret < 0)
printk("dvb-ttpci:cannot set internal volume to maximum:%d\n",ret);
ret = av7110_fw_cmd(av7110, COMTYPE_ENCODER, SetMonitorType,
1, (u16) av7110->display_ar);
if (ret < 0)
printk("dvb-ttpci: unable to set aspect ratio\n");
ret = av7110_fw_cmd(av7110, COMTYPE_ENCODER, SetPanScanType,
1, av7110->display_panscan);
if (ret < 0)
printk("dvb-ttpci: unable to set pan scan\n");
ret = av7110_fw_cmd(av7110, COMTYPE_ENCODER, SetWSSConfig, 2, 2, wss_cfg_4_3);
if (ret < 0)
printk("dvb-ttpci: unable to configure 4:3 wss\n");
ret = av7110_fw_cmd(av7110, COMTYPE_ENCODER, SetWSSConfig, 2, 3, wss_cfg_16_9);
if (ret < 0)
printk("dvb-ttpci: unable to configure 16:9 wss\n");
ret = av7710_set_video_mode(av7110, vidmode);
if (ret < 0)
printk("dvb-ttpci:cannot set video mode:%d\n",ret);
/* handle different card types */
/* remaining inits according to card and frontend type */
av7110->analog_tuner_flags = 0;
av7110->current_input = 0;
if (dev->pci->subsystem_vendor == 0x13c2 && dev->pci->subsystem_device == 0x000a)
av7110_fw_cmd(av7110, COMTYPE_AUDIODAC, ADSwitch, 1, 0); // SPDIF on
if (i2c_writereg(av7110, 0x20, 0x00, 0x00) == 1) {
printk ("dvb-ttpci: Crystal audio DAC @ card %d detected\n",
av7110->dvb_adapter.num);
av7110->adac_type = DVB_ADAC_CRYSTAL;
i2c_writereg(av7110, 0x20, 0x01, 0xd2);
i2c_writereg(av7110, 0x20, 0x02, 0x49);
i2c_writereg(av7110, 0x20, 0x03, 0x00);
i2c_writereg(av7110, 0x20, 0x04, 0x00);
/**
* some special handling for the Siemens DVB-C cards...
*/
} else if (0 == av7110_init_analog_module(av7110)) {
/* done. */
}
else if (dev->pci->subsystem_vendor == 0x110a) {
printk("dvb-ttpci: DVB-C w/o analog module @ card %d detected\n",
av7110->dvb_adapter.num);
av7110->adac_type = DVB_ADAC_NONE;
}
else {
av7110->adac_type = adac;
printk("dvb-ttpci: adac type set to %d @ card %d\n",
av7110->adac_type, av7110->dvb_adapter.num);
}
if (av7110->adac_type == DVB_ADAC_NONE || av7110->adac_type == DVB_ADAC_MSP34x0) {
// switch DVB SCART on
ret = av7110_fw_cmd(av7110, COMTYPE_AUDIODAC, MainSwitch, 1, 0);
if (ret < 0)
printk("dvb-ttpci:cannot switch on SCART(Main):%d\n",ret);
ret = av7110_fw_cmd(av7110, COMTYPE_AUDIODAC, ADSwitch, 1, 1);
if (ret < 0)
printk("dvb-ttpci:cannot switch on SCART(AD):%d\n",ret);
if (rgb_on &&
((av7110->dev->pci->subsystem_vendor == 0x110a) ||
(av7110->dev->pci->subsystem_vendor == 0x13c2)) &&
(av7110->dev->pci->subsystem_device == 0x0000)) {
saa7146_setgpio(dev, 1, SAA7146_GPIO_OUTHI); // RGB on, SCART pin 16
//saa7146_setgpio(dev, 3, SAA7146_GPIO_OUTLO); // SCARTpin 8
}
}
if (dev->pci->subsystem_vendor == 0x13c2 && dev->pci->subsystem_device == 0x000e)
av7110_fw_cmd(av7110, COMTYPE_AUDIODAC, SpdifSwitch, 1, 0); // SPDIF on
ret = av7110_set_volume(av7110, av7110->mixer.volume_left, av7110->mixer.volume_right);
if (ret < 0)
printk("dvb-ttpci:cannot set volume :%d\n",ret);
}
static void recover_arm(struct av7110 *av7110)
{
dprintk(4, "%p\n",av7110);
av7110_bootarm(av7110);
msleep(100);
init_av7110_av(av7110);
/* card-specific recovery */
if (av7110->recover)
av7110->recover(av7110);
restart_feeds(av7110);
#if IS_ENABLED(CONFIG_DVB_AV7110_IR)
av7110_set_ir_config(av7110);
#endif
}
static void av7110_arm_sync(struct av7110 *av7110)
{
if (av7110->arm_thread)
kthread_stop(av7110->arm_thread);
av7110->arm_thread = NULL;
}
static int arm_thread(void *data)
{
struct av7110 *av7110 = data;
u16 newloops = 0;
int timeout;
dprintk(4, "%p\n",av7110);
for (;;) {
timeout = wait_event_interruptible_timeout(av7110->arm_wait,
kthread_should_stop(), 5 * HZ);
if (-ERESTARTSYS == timeout || kthread_should_stop()) {
/* got signal or told to quit*/
break;
}
if (!av7110->arm_ready)
continue;
if (mutex_lock_interruptible(&av7110->dcomlock))
break;
newloops = rdebi(av7110, DEBINOSWAP, STATUS_LOOPS, 0, 2);
mutex_unlock(&av7110->dcomlock);
if (newloops == av7110->arm_loops || av7110->arm_errors > 3) {
printk(KERN_ERR "dvb-ttpci: ARM crashed @ card %d\n",
av7110->dvb_adapter.num);
recover_arm(av7110);
if (mutex_lock_interruptible(&av7110->dcomlock))
break;
newloops = rdebi(av7110, DEBINOSWAP, STATUS_LOOPS, 0, 2) - 1;
mutex_unlock(&av7110->dcomlock);
}
av7110->arm_loops = newloops;
av7110->arm_errors = 0;
}
return 0;
}
/****************************************************************************
* IRQ handling
****************************************************************************/
static int DvbDmxFilterCallback(u8 *buffer1, size_t buffer1_len,
u8 *buffer2, size_t buffer2_len,
struct dvb_demux_filter *dvbdmxfilter,
struct av7110 *av7110)
{
if (!dvbdmxfilter->feed->demux->dmx.frontend)
return 0;
if (dvbdmxfilter->feed->demux->dmx.frontend->source == DMX_MEMORY_FE)
return 0;
switch (dvbdmxfilter->type) {
case DMX_TYPE_SEC:
if ((((buffer1[1] << 8) | buffer1[2]) & 0xfff) + 3 != buffer1_len)
return 0;
if (dvbdmxfilter->doneq) {
struct dmx_section_filter *filter = &dvbdmxfilter->filter;
int i;
u8 xor, neq = 0;
for (i = 0; i < DVB_DEMUX_MASK_MAX; i++) {
xor = filter->filter_value[i] ^ buffer1[i];
neq |= dvbdmxfilter->maskandnotmode[i] & xor;
}
if (!neq)
return 0;
}
return dvbdmxfilter->feed->cb.sec(buffer1, buffer1_len,
buffer2, buffer2_len,
&dvbdmxfilter->filter, NULL);
case DMX_TYPE_TS:
if (!(dvbdmxfilter->feed->ts_type & TS_PACKET))
return 0;
if (dvbdmxfilter->feed->ts_type & TS_PAYLOAD_ONLY)
return dvbdmxfilter->feed->cb.ts(buffer1, buffer1_len,
buffer2, buffer2_len,
&dvbdmxfilter->feed->feed.ts,
NULL);
else
av7110_p2t_write(buffer1, buffer1_len,
dvbdmxfilter->feed->pid,
&av7110->p2t_filter[dvbdmxfilter->index]);
return 0;
default:
return 0;
}
}
//#define DEBUG_TIMING
static inline void print_time(char *s)
{
#ifdef DEBUG_TIMING
struct timespec64 ts;
ktime_get_real_ts64(&ts);
printk("%s: %lld.%09ld\n", s, (s64)ts.tv_sec, ts.tv_nsec);
#endif
}
#define DEBI_READ 0
#define DEBI_WRITE 1
static inline void start_debi_dma(struct av7110 *av7110, int dir,
unsigned long addr, unsigned int len)
{
dprintk(8, "%c %08lx %u\n", dir == DEBI_READ ? 'R' : 'W', addr, len);
if (saa7146_wait_for_debi_done(av7110->dev, 0)) {
printk(KERN_ERR "%s: saa7146_wait_for_debi_done timed out\n", __func__);
return;
}
SAA7146_ISR_CLEAR(av7110->dev, MASK_19); /* for good measure */
SAA7146_IER_ENABLE(av7110->dev, MASK_19);
if (len < 5)
len = 5; /* we want a real DEBI DMA */
if (dir == DEBI_WRITE)
iwdebi(av7110, DEBISWAB, addr, 0, (len + 3) & ~3);
else
irdebi(av7110, DEBISWAB, addr, 0, len);
}
static void debiirq(struct tasklet_struct *t)
{
struct av7110 *av7110 = from_tasklet(av7110, t, debi_tasklet);
int type = av7110->debitype;
int handle = (type >> 8) & 0x1f;
unsigned int xfer = 0;
print_time("debi");
dprintk(4, "type 0x%04x\n", type);
if (type == -1) {
printk("DEBI irq oops @ %ld, psr:0x%08x, ssr:0x%08x\n",
jiffies, saa7146_read(av7110->dev, PSR),
saa7146_read(av7110->dev, SSR));
goto debi_done;
}
av7110->debitype = -1;
switch (type & 0xff) {
case DATA_TS_RECORD:
dvb_dmx_swfilter_packets(&av7110->demux,
(const u8 *) av7110->debi_virt,
av7110->debilen / 188);
xfer = RX_BUFF;
break;
case DATA_PES_RECORD:
if (av7110->demux.recording)
av7110_record_cb(&av7110->p2t[handle],
(u8 *) av7110->debi_virt,
av7110->debilen);
xfer = RX_BUFF;
break;
case DATA_IPMPE:
case DATA_FSECTION:
case DATA_PIPING:
if (av7110->handle2filter[handle])
DvbDmxFilterCallback((u8 *)av7110->debi_virt,
av7110->debilen, NULL, 0,
av7110->handle2filter[handle],
av7110);
xfer = RX_BUFF;
break;
case DATA_CI_GET:
{
u8 *data = av7110->debi_virt;
u8 data_0 = data[0];
if (data_0 < 2 && data[2] == 0xff) {
int flags = 0;
if (data[5] > 0)
flags |= CA_CI_MODULE_PRESENT;
if (data[5] > 5)
flags |= CA_CI_MODULE_READY;
av7110->ci_slot[data_0].flags = flags;
} else
ci_get_data(&av7110->ci_rbuffer,
av7110->debi_virt,
av7110->debilen);
xfer = RX_BUFF;
break;
}
case DATA_COMMON_INTERFACE:
CI_handle(av7110, (u8 *)av7110->debi_virt, av7110->debilen);
xfer = RX_BUFF;
break;
case DATA_DEBUG_MESSAGE:
((s8*)av7110->debi_virt)[Reserved_SIZE - 1] = 0;
printk("%s\n", (s8 *) av7110->debi_virt);
xfer = RX_BUFF;
break;
case DATA_CI_PUT:
dprintk(4, "debi DATA_CI_PUT\n");
xfer = TX_BUFF;
break;
case DATA_MPEG_PLAY:
dprintk(4, "debi DATA_MPEG_PLAY\n");
xfer = TX_BUFF;
break;
case DATA_BMP_LOAD:
dprintk(4, "debi DATA_BMP_LOAD\n");
xfer = TX_BUFF;
break;
default:
break;
}
debi_done:
spin_lock(&av7110->debilock);
if (xfer)
iwdebi(av7110, DEBINOSWAP, xfer, 0, 2);
ARM_ClearMailBox(av7110);
spin_unlock(&av7110->debilock);
}
/* irq from av7110 firmware writing the mailbox register in the DPRAM */
static void gpioirq(struct tasklet_struct *t)
{
struct av7110 *av7110 = from_tasklet(av7110, t, gpio_tasklet);
u32 rxbuf, txbuf;
int len;
if (av7110->debitype != -1)
/* we shouldn't get any irq while a debi xfer is running */
printk("dvb-ttpci: GPIO0 irq oops @ %ld, psr:0x%08x, ssr:0x%08x\n",
jiffies, saa7146_read(av7110->dev, PSR),
saa7146_read(av7110->dev, SSR));
if (saa7146_wait_for_debi_done(av7110->dev, 0)) {
printk(KERN_ERR "%s: saa7146_wait_for_debi_done timed out\n", __func__);
BUG(); /* maybe we should try resetting the debi? */
}
spin_lock(&av7110->debilock);
ARM_ClearIrq(av7110);
/* see what the av7110 wants */
av7110->debitype = irdebi(av7110, DEBINOSWAP, IRQ_STATE, 0, 2);
av7110->debilen = irdebi(av7110, DEBINOSWAP, IRQ_STATE_EXT, 0, 2);
rxbuf = irdebi(av7110, DEBINOSWAP, RX_BUFF, 0, 2);
txbuf = irdebi(av7110, DEBINOSWAP, TX_BUFF, 0, 2);
len = (av7110->debilen + 3) & ~3;
print_time("gpio");
dprintk(8, "GPIO0 irq 0x%04x %d\n", av7110->debitype, av7110->debilen);
switch (av7110->debitype & 0xff) {
case DATA_TS_PLAY:
case DATA_PES_PLAY:
break;
case DATA_MPEG_VIDEO_EVENT:
{
u32 h_ar;
struct video_event event;
av7110->video_size.w = irdebi(av7110, DEBINOSWAP, STATUS_MPEG_WIDTH, 0, 2);
h_ar = irdebi(av7110, DEBINOSWAP, STATUS_MPEG_HEIGHT_AR, 0, 2);
iwdebi(av7110, DEBINOSWAP, IRQ_STATE_EXT, 0, 2);
iwdebi(av7110, DEBINOSWAP, RX_BUFF, 0, 2);
av7110->video_size.h = h_ar & 0xfff;
event.type = VIDEO_EVENT_SIZE_CHANGED;
event.u.size.w = av7110->video_size.w;
event.u.size.h = av7110->video_size.h;
switch ((h_ar >> 12) & 0xf)
{
case 3:
av7110->video_size.aspect_ratio = VIDEO_FORMAT_16_9;
event.u.size.aspect_ratio = VIDEO_FORMAT_16_9;
av7110->videostate.video_format = VIDEO_FORMAT_16_9;
break;
case 4:
av7110->video_size.aspect_ratio = VIDEO_FORMAT_221_1;
event.u.size.aspect_ratio = VIDEO_FORMAT_221_1;
av7110->videostate.video_format = VIDEO_FORMAT_221_1;
break;
default:
av7110->video_size.aspect_ratio = VIDEO_FORMAT_4_3;
event.u.size.aspect_ratio = VIDEO_FORMAT_4_3;
av7110->videostate.video_format = VIDEO_FORMAT_4_3;
}
dprintk(8, "GPIO0 irq: DATA_MPEG_VIDEO_EVENT: w/h/ar = %u/%u/%u\n",
av7110->video_size.w, av7110->video_size.h,
av7110->video_size.aspect_ratio);
dvb_video_add_event(av7110, &event);
break;
}
case DATA_CI_PUT:
{
int avail;
struct dvb_ringbuffer *cibuf = &av7110->ci_wbuffer;
avail = dvb_ringbuffer_avail(cibuf);
if (avail <= 2) {
iwdebi(av7110, DEBINOSWAP, IRQ_STATE_EXT, 0, 2);
iwdebi(av7110, DEBINOSWAP, TX_LEN, 0, 2);
iwdebi(av7110, DEBINOSWAP, TX_BUFF, 0, 2);
break;
}
len = DVB_RINGBUFFER_PEEK(cibuf, 0) << 8;
len |= DVB_RINGBUFFER_PEEK(cibuf, 1);
if (avail < len + 2) {
iwdebi(av7110, DEBINOSWAP, IRQ_STATE_EXT, 0, 2);
iwdebi(av7110, DEBINOSWAP, TX_LEN, 0, 2);
iwdebi(av7110, DEBINOSWAP, TX_BUFF, 0, 2);
break;
}
DVB_RINGBUFFER_SKIP(cibuf, 2);
dvb_ringbuffer_read(cibuf, av7110->debi_virt, len);
iwdebi(av7110, DEBINOSWAP, TX_LEN, len, 2);
iwdebi(av7110, DEBINOSWAP, IRQ_STATE_EXT, len, 2);
dprintk(8, "DMA: CI\n");
start_debi_dma(av7110, DEBI_WRITE, DPRAM_BASE + txbuf, len);
spin_unlock(&av7110->debilock);
wake_up(&cibuf->queue);
return;
}
case DATA_MPEG_PLAY:
if (!av7110->playing) {
iwdebi(av7110, DEBINOSWAP, IRQ_STATE_EXT, 0, 2);
iwdebi(av7110, DEBINOSWAP, TX_LEN, 0, 2);
iwdebi(av7110, DEBINOSWAP, TX_BUFF, 0, 2);
break;
}
len = 0;
if (av7110->debitype & 0x100) {
spin_lock(&av7110->aout.lock);
len = av7110_pes_play(av7110->debi_virt, &av7110->aout, 2048);
spin_unlock(&av7110->aout.lock);
}
if (len <= 0 && (av7110->debitype & 0x200)
&&av7110->videostate.play_state != VIDEO_FREEZED) {
spin_lock(&av7110->avout.lock);
len = av7110_pes_play(av7110->debi_virt, &av7110->avout, 2048);
spin_unlock(&av7110->avout.lock);
}
if (len <= 0) {
iwdebi(av7110, DEBINOSWAP, IRQ_STATE_EXT, 0, 2);
iwdebi(av7110, DEBINOSWAP, TX_LEN, 0, 2);
iwdebi(av7110, DEBINOSWAP, TX_BUFF, 0, 2);
break;
}
dprintk(8, "GPIO0 PES_PLAY len=%04x\n", len);
iwdebi(av7110, DEBINOSWAP, TX_LEN, len, 2);
iwdebi(av7110, DEBINOSWAP, IRQ_STATE_EXT, len, 2);
dprintk(8, "DMA: MPEG_PLAY\n");
start_debi_dma(av7110, DEBI_WRITE, DPRAM_BASE + txbuf, len);
spin_unlock(&av7110->debilock);
return;
case DATA_BMP_LOAD:
len = av7110->debilen;
dprintk(8, "gpio DATA_BMP_LOAD len %d\n", len);
if (!len) {
av7110->bmp_state = BMP_LOADED;
iwdebi(av7110, DEBINOSWAP, IRQ_STATE_EXT, 0, 2);
iwdebi(av7110, DEBINOSWAP, TX_LEN, 0, 2);
iwdebi(av7110, DEBINOSWAP, TX_BUFF, 0, 2);
wake_up(&av7110->bmpq);
dprintk(8, "gpio DATA_BMP_LOAD done\n");
break;
}
if (len > av7110->bmplen)
len = av7110->bmplen;
if (len > 2 * 1024)
len = 2 * 1024;
iwdebi(av7110, DEBINOSWAP, TX_LEN, len, 2);
iwdebi(av7110, DEBINOSWAP, IRQ_STATE_EXT, len, 2);
memcpy(av7110->debi_virt, av7110->bmpbuf+av7110->bmpp, len);
av7110->bmpp += len;
av7110->bmplen -= len;
dprintk(8, "gpio DATA_BMP_LOAD DMA len %d\n", len);
start_debi_dma(av7110, DEBI_WRITE, DPRAM_BASE+txbuf, len);
spin_unlock(&av7110->debilock);
return;
case DATA_CI_GET:
case DATA_COMMON_INTERFACE:
case DATA_FSECTION:
case DATA_IPMPE:
case DATA_PIPING:
if (!len || len > 4 * 1024) {
iwdebi(av7110, DEBINOSWAP, RX_BUFF, 0, 2);
break;
}
fallthrough;
case DATA_TS_RECORD:
case DATA_PES_RECORD:
dprintk(8, "DMA: TS_REC etc.\n");
start_debi_dma(av7110, DEBI_READ, DPRAM_BASE+rxbuf, len);
spin_unlock(&av7110->debilock);
return;
case DATA_DEBUG_MESSAGE:
if (!len || len > 0xff) {
iwdebi(av7110, DEBINOSWAP, RX_BUFF, 0, 2);
break;
}
start_debi_dma(av7110, DEBI_READ, Reserved, len);
spin_unlock(&av7110->debilock);
return;
case DATA_IRCOMMAND:
#if IS_ENABLED(CONFIG_DVB_AV7110_IR)
av7110_ir_handler(av7110,
swahw32(irdebi(av7110, DEBINOSWAP, Reserved,
0, 4)));
#endif
iwdebi(av7110, DEBINOSWAP, RX_BUFF, 0, 2);
break;
default:
printk("dvb-ttpci: gpioirq unknown type=%d len=%d\n",
av7110->debitype, av7110->debilen);
break;
}
av7110->debitype = -1;
ARM_ClearMailBox(av7110);
spin_unlock(&av7110->debilock);
}
#ifdef CONFIG_DVB_AV7110_OSD
static int dvb_osd_ioctl(struct file *file,
unsigned int cmd, void *parg)
{
struct dvb_device *dvbdev = file->private_data;
struct av7110 *av7110 = dvbdev->priv;
dprintk(4, "%p\n", av7110);
if (cmd == OSD_SEND_CMD)
return av7110_osd_cmd(av7110, (osd_cmd_t *) parg);
if (cmd == OSD_GET_CAPABILITY)
return av7110_osd_capability(av7110, (osd_cap_t *) parg);
return -EINVAL;
}
static const struct file_operations dvb_osd_fops = {
.owner = THIS_MODULE,
.unlocked_ioctl = dvb_generic_ioctl,
.open = dvb_generic_open,
.release = dvb_generic_release,
.llseek = noop_llseek,
};
static struct dvb_device dvbdev_osd = {
.priv = NULL,
.users = 1,
.writers = 1,
.fops = &dvb_osd_fops,
.kernel_ioctl = dvb_osd_ioctl,
};
#endif /* CONFIG_DVB_AV7110_OSD */
static inline int SetPIDs(struct av7110 *av7110, u16 vpid, u16 apid, u16 ttpid,
u16 subpid, u16 pcrpid)
{
u16 aflags = 0;
dprintk(4, "%p\n", av7110);
if (vpid == 0x1fff || apid == 0x1fff ||
ttpid == 0x1fff || subpid == 0x1fff || pcrpid == 0x1fff) {
vpid = apid = ttpid = subpid = pcrpid = 0;
av7110->pids[DMX_PES_VIDEO] = 0;
av7110->pids[DMX_PES_AUDIO] = 0;
av7110->pids[DMX_PES_TELETEXT] = 0;
av7110->pids[DMX_PES_PCR] = 0;
}
if (av7110->audiostate.bypass_mode)
aflags |= 0x8000;
return av7110_fw_cmd(av7110, COMTYPE_PIDFILTER, MultiPID, 6,
pcrpid, vpid, apid, ttpid, subpid, aflags);
}
int ChangePIDs(struct av7110 *av7110, u16 vpid, u16 apid, u16 ttpid,
u16 subpid, u16 pcrpid)
{
int ret = 0;
dprintk(4, "%p\n", av7110);
if (mutex_lock_interruptible(&av7110->pid_mutex))
return -ERESTARTSYS;
if (!(vpid & 0x8000))
av7110->pids[DMX_PES_VIDEO] = vpid;
if (!(apid & 0x8000))
av7110->pids[DMX_PES_AUDIO] = apid;
if (!(ttpid & 0x8000))
av7110->pids[DMX_PES_TELETEXT] = ttpid;
if (!(pcrpid & 0x8000))
av7110->pids[DMX_PES_PCR] = pcrpid;
av7110->pids[DMX_PES_SUBTITLE] = 0;
if (av7110->fe_synced) {
pcrpid = av7110->pids[DMX_PES_PCR];
ret = SetPIDs(av7110, vpid, apid, ttpid, subpid, pcrpid);
}
mutex_unlock(&av7110->pid_mutex);
return ret;
}
/******************************************************************************
* hardware filter functions
******************************************************************************/
static int StartHWFilter(struct dvb_demux_filter *dvbdmxfilter)
{
struct dvb_demux_feed *dvbdmxfeed = dvbdmxfilter->feed;
struct av7110 *av7110 = dvbdmxfeed->demux->priv;
u16 buf[20];
int ret, i;
u16 handle;
// u16 mode = 0x0320;
u16 mode = 0xb96a;
dprintk(4, "%p\n", av7110);
if (av7110->full_ts)
return 0;
if (dvbdmxfilter->type == DMX_TYPE_SEC) {
if (hw_sections) {
buf[4] = (dvbdmxfilter->filter.filter_value[0] << 8) |
dvbdmxfilter->maskandmode[0];
for (i = 3; i < 18; i++)
buf[i + 4 - 2] =
(dvbdmxfilter->filter.filter_value[i] << 8) |
dvbdmxfilter->maskandmode[i];
mode = 4;
}
} else if ((dvbdmxfeed->ts_type & TS_PACKET) &&
!(dvbdmxfeed->ts_type & TS_PAYLOAD_ONLY)) {
av7110_p2t_init(&av7110->p2t_filter[dvbdmxfilter->index], dvbdmxfeed);
}
buf[0] = (COMTYPE_PID_FILTER << 8) + AddPIDFilter;
buf[1] = 16;
buf[2] = dvbdmxfeed->pid;
buf[3] = mode;
ret = av7110_fw_request(av7110, buf, 20, &handle, 1);
if (ret != 0 || handle >= 32) {
printk(KERN_ERR "dvb-ttpci: %s error buf %04x %04x %04x %04x ret %d handle %04x\n",
__func__, buf[0], buf[1], buf[2], buf[3],
ret, handle);
dvbdmxfilter->hw_handle = 0xffff;
if (!ret)
ret = -1;
return ret;
}
av7110->handle2filter[handle] = dvbdmxfilter;
dvbdmxfilter->hw_handle = handle;
return ret;
}
static int StopHWFilter(struct dvb_demux_filter *dvbdmxfilter)
{
struct av7110 *av7110 = dvbdmxfilter->feed->demux->priv;
u16 buf[3];
u16 answ[2];
int ret;
u16 handle;
dprintk(4, "%p\n", av7110);
if (av7110->full_ts)
return 0;
handle = dvbdmxfilter->hw_handle;
if (handle >= 32) {
printk("%s tried to stop invalid filter %04x, filter type = %x\n",
__func__, handle, dvbdmxfilter->type);
return -EINVAL;
}
av7110->handle2filter[handle] = NULL;
buf[0] = (COMTYPE_PID_FILTER << 8) + DelPIDFilter;
buf[1] = 1;
buf[2] = handle;
ret = av7110_fw_request(av7110, buf, 3, answ, 2);
if (ret != 0 || answ[1] != handle) {
printk(KERN_ERR "dvb-ttpci: %s error cmd %04x %04x %04x ret %x resp %04x %04x pid %d\n",
__func__, buf[0], buf[1], buf[2], ret,
answ[0], answ[1], dvbdmxfilter->feed->pid);
if (!ret)
ret = -1;
}
return ret;
}
static int dvb_feed_start_pid(struct dvb_demux_feed *dvbdmxfeed)
{
struct dvb_demux *dvbdmx = dvbdmxfeed->demux;
struct av7110 *av7110 = dvbdmx->priv;
u16 *pid = dvbdmx->pids, npids[5];
int i;
int ret = 0;
dprintk(4, "%p\n", av7110);
npids[0] = npids[1] = npids[2] = npids[3] = npids[4] = 0xffff;
i = dvbdmxfeed->pes_type;
npids[i] = (pid[i]&0x8000) ? 0 : pid[i];
if ((i == 2) && npids[i] && (dvbdmxfeed->ts_type & TS_PACKET)) {
npids[i] = 0;
ret = ChangePIDs(av7110, npids[1], npids[0], npids[2], npids[3], npids[4]);
if (!ret)
ret = StartHWFilter(dvbdmxfeed->filter);
return ret;
}
if (dvbdmxfeed->pes_type <= 2 || dvbdmxfeed->pes_type == 4) {
ret = ChangePIDs(av7110, npids[1], npids[0], npids[2], npids[3], npids[4]);
if (ret)
return ret;
}
if (dvbdmxfeed->pes_type < 2 && npids[0])
if (av7110->fe_synced)
{
ret = av7110_fw_cmd(av7110, COMTYPE_PIDFILTER, Scan, 0);
if (ret)
return ret;
}
if ((dvbdmxfeed->ts_type & TS_PACKET) && !av7110->full_ts) {
if (dvbdmxfeed->pes_type == 0 && !(dvbdmx->pids[0] & 0x8000))
ret = av7110_av_start_record(av7110, RP_AUDIO, dvbdmxfeed);
if (dvbdmxfeed->pes_type == 1 && !(dvbdmx->pids[1] & 0x8000))
ret = av7110_av_start_record(av7110, RP_VIDEO, dvbdmxfeed);
}
return ret;
}
static int dvb_feed_stop_pid(struct dvb_demux_feed *dvbdmxfeed)
{
struct dvb_demux *dvbdmx = dvbdmxfeed->demux;
struct av7110 *av7110 = dvbdmx->priv;
u16 *pid = dvbdmx->pids, npids[5];
int i;
int ret = 0;
dprintk(4, "%p\n", av7110);
if (dvbdmxfeed->pes_type <= 1) {
ret = av7110_av_stop(av7110, dvbdmxfeed->pes_type ? RP_VIDEO : RP_AUDIO);
if (ret)
return ret;
if (!av7110->rec_mode)
dvbdmx->recording = 0;
if (!av7110->playing)
dvbdmx->playing = 0;
}
npids[0] = npids[1] = npids[2] = npids[3] = npids[4] = 0xffff;
i = dvbdmxfeed->pes_type;
switch (i) {
case 2: //teletext
if (dvbdmxfeed->ts_type & TS_PACKET)
ret = StopHWFilter(dvbdmxfeed->filter);
npids[2] = 0;
break;
case 0:
case 1:
case 4:
if (!pids_off)
return 0;
npids[i] = (pid[i]&0x8000) ? 0 : pid[i];
break;
}
if (!ret)
ret = ChangePIDs(av7110, npids[1], npids[0], npids[2], npids[3], npids[4]);
return ret;
}
static int av7110_start_feed(struct dvb_demux_feed *feed)
{
struct dvb_demux *demux = feed->demux;
struct av7110 *av7110 = demux->priv;
int ret = 0;
dprintk(4, "%p\n", av7110);
if (!demux->dmx.frontend)
return -EINVAL;
if (!av7110->full_ts && feed->pid > 0x1fff)
return -EINVAL;
if (feed->type == DMX_TYPE_TS) {
if ((feed->ts_type & TS_DECODER) &&
(feed->pes_type <= DMX_PES_PCR)) {
switch (demux->dmx.frontend->source) {
case DMX_MEMORY_FE:
if (feed->ts_type & TS_DECODER)
if (feed->pes_type < 2 &&
!(demux->pids[0] & 0x8000) &&
!(demux->pids[1] & 0x8000)) {
dvb_ringbuffer_flush_spinlock_wakeup(&av7110->avout);
dvb_ringbuffer_flush_spinlock_wakeup(&av7110->aout);
ret = av7110_av_start_play(av7110,RP_AV);
if (!ret)
demux->playing = 1;
}
break;
default:
ret = dvb_feed_start_pid(feed);
break;
}
} else if ((feed->ts_type & TS_PACKET) &&
(demux->dmx.frontend->source != DMX_MEMORY_FE)) {
ret = StartHWFilter(feed->filter);
}
}
if (av7110->full_ts) {
budget_start_feed(feed);
return ret;
}
if (feed->type == DMX_TYPE_SEC) {
int i;
for (i = 0; i < demux->filternum; i++) {
if (demux->filter[i].state != DMX_STATE_READY)
continue;
if (demux->filter[i].type != DMX_TYPE_SEC)
continue;
if (demux->filter[i].filter.parent != &feed->feed.sec)
continue;
demux->filter[i].state = DMX_STATE_GO;
if (demux->dmx.frontend->source != DMX_MEMORY_FE) {
ret = StartHWFilter(&demux->filter[i]);
if (ret)
break;
}
}
}
return ret;
}
static int av7110_stop_feed(struct dvb_demux_feed *feed)
{
struct dvb_demux *demux = feed->demux;
struct av7110 *av7110 = demux->priv;
int i, rc, ret = 0;
dprintk(4, "%p\n", av7110);
if (feed->type == DMX_TYPE_TS) {
if (feed->ts_type & TS_DECODER) {
if (feed->pes_type >= DMX_PES_OTHER ||
!demux->pesfilter[feed->pes_type])
return -EINVAL;
demux->pids[feed->pes_type] |= 0x8000;
demux->pesfilter[feed->pes_type] = NULL;
}
if (feed->ts_type & TS_DECODER &&
feed->pes_type < DMX_PES_OTHER) {
ret = dvb_feed_stop_pid(feed);
} else
if ((feed->ts_type & TS_PACKET) &&
(demux->dmx.frontend->source != DMX_MEMORY_FE))
ret = StopHWFilter(feed->filter);
}
if (av7110->full_ts) {
budget_stop_feed(feed);
return ret;
}
if (feed->type == DMX_TYPE_SEC) {
for (i = 0; i<demux->filternum; i++) {
if (demux->filter[i].state == DMX_STATE_GO &&
demux->filter[i].filter.parent == &feed->feed.sec) {
demux->filter[i].state = DMX_STATE_READY;
if (demux->dmx.frontend->source != DMX_MEMORY_FE) {
rc = StopHWFilter(&demux->filter[i]);
if (!ret)
ret = rc;
/* keep going, stop as many filters as possible */
}
}
}
}
return ret;
}
static void restart_feeds(struct av7110 *av7110)
{
struct dvb_demux *dvbdmx = &av7110->demux;
struct dvb_demux_feed *feed;
int mode;
int feeding;
int i, j;
dprintk(4, "%p\n", av7110);
mode = av7110->playing;
av7110->playing = 0;
av7110->rec_mode = 0;
feeding = av7110->feeding1; /* full_ts mod */
for (i = 0; i < dvbdmx->feednum; i++) {
feed = &dvbdmx->feed[i];
if (feed->state == DMX_STATE_GO) {
if (feed->type == DMX_TYPE_SEC) {
for (j = 0; j < dvbdmx->filternum; j++) {
if (dvbdmx->filter[j].type != DMX_TYPE_SEC)
continue;
if (dvbdmx->filter[j].filter.parent != &feed->feed.sec)
continue;
if (dvbdmx->filter[j].state == DMX_STATE_GO)
dvbdmx->filter[j].state = DMX_STATE_READY;
}
}
av7110_start_feed(feed);
}
}
av7110->feeding1 = feeding; /* full_ts mod */
if (mode)
av7110_av_start_play(av7110, mode);
}
static int dvb_get_stc(struct dmx_demux *demux, unsigned int num,
uint64_t *stc, unsigned int *base)
{
int ret;
u16 fwstc[4];
u16 tag = ((COMTYPE_REQUEST << 8) + ReqSTC);
struct dvb_demux *dvbdemux;
struct av7110 *av7110;
/* pointer casting paranoia... */
if (WARN_ON(!demux))
return -EIO;
dvbdemux = demux->priv;
if (WARN_ON(!dvbdemux))
return -EIO;
av7110 = dvbdemux->priv;
dprintk(4, "%p\n", av7110);
if (num != 0)
return -EINVAL;
ret = av7110_fw_request(av7110, &tag, 0, fwstc, 4);
if (ret) {
printk(KERN_ERR "%s: av7110_fw_request error\n", __func__);
return ret;
}
dprintk(2, "fwstc = %04hx %04hx %04hx %04hx\n",
fwstc[0], fwstc[1], fwstc[2], fwstc[3]);
*stc = (((uint64_t) ((fwstc[3] & 0x8000) >> 15)) << 32) |
(((uint64_t) fwstc[1]) << 16) | ((uint64_t) fwstc[0]);
*base = 1;
dprintk(4, "stc = %lu\n", (unsigned long)*stc);
return 0;
}
/******************************************************************************
* SEC device file operations
******************************************************************************/
static int av7110_set_tone(struct dvb_frontend *fe, enum fe_sec_tone_mode tone)
{
struct av7110* av7110 = fe->dvb->priv;
switch (tone) {
case SEC_TONE_ON:
return Set22K(av7110, 1);
case SEC_TONE_OFF:
return Set22K(av7110, 0);
default:
return -EINVAL;
}
}
static int av7110_diseqc_send_master_cmd(struct dvb_frontend* fe,
struct dvb_diseqc_master_cmd* cmd)
{
struct av7110* av7110 = fe->dvb->priv;
return av7110_diseqc_send(av7110, cmd->msg_len, cmd->msg, -1);
}
static int av7110_diseqc_send_burst(struct dvb_frontend* fe,
enum fe_sec_mini_cmd minicmd)
{
struct av7110* av7110 = fe->dvb->priv;
return av7110_diseqc_send(av7110, 0, NULL, minicmd);
}
/* simplified code from budget-core.c */
static int stop_ts_capture(struct av7110 *budget)
{
dprintk(2, "budget: %p\n", budget);
if (--budget->feeding1)
return budget->feeding1;
saa7146_write(budget->dev, MC1, MASK_20); /* DMA3 off */
SAA7146_IER_DISABLE(budget->dev, MASK_10);
SAA7146_ISR_CLEAR(budget->dev, MASK_10);
return 0;
}
static int start_ts_capture(struct av7110 *budget)
{
unsigned y;
dprintk(2, "budget: %p\n", budget);
if (budget->feeding1)
return ++budget->feeding1;
for (y = 0; y < TS_HEIGHT; y++)
memset(budget->grabbing + y * TS_WIDTH, 0x00, TS_WIDTH);
budget->ttbp = 0;
SAA7146_ISR_CLEAR(budget->dev, MASK_10); /* VPE */
SAA7146_IER_ENABLE(budget->dev, MASK_10); /* VPE */
saa7146_write(budget->dev, MC1, (MASK_04 | MASK_20)); /* DMA3 on */
return ++budget->feeding1;
}
static int budget_start_feed(struct dvb_demux_feed *feed)
{
struct dvb_demux *demux = feed->demux;
struct av7110 *budget = demux->priv;
int status;
dprintk(2, "av7110: %p\n", budget);
spin_lock(&budget->feedlock1);
feed->pusi_seen = false; /* have a clean section start */
status = start_ts_capture(budget);
spin_unlock(&budget->feedlock1);
return status;
}
static int budget_stop_feed(struct dvb_demux_feed *feed)
{
struct dvb_demux *demux = feed->demux;
struct av7110 *budget = demux->priv;
int status;
dprintk(2, "budget: %p\n", budget);
spin_lock(&budget->feedlock1);
status = stop_ts_capture(budget);
spin_unlock(&budget->feedlock1);
return status;
}
static void vpeirq(struct tasklet_struct *t)
{
struct av7110 *budget = from_tasklet(budget, t, vpe_tasklet);
u8 *mem = (u8 *) (budget->grabbing);
u32 olddma = budget->ttbp;
u32 newdma = saa7146_read(budget->dev, PCI_VDP3);
struct dvb_demux *demux = budget->full_ts ? &budget->demux : &budget->demux1;
/* nearest lower position divisible by 188 */
newdma -= newdma % 188;
if (newdma >= TS_BUFLEN)
return;
budget->ttbp = newdma;
if (!budget->feeding1 || (newdma == olddma))
return;
/* Ensure streamed PCI data is synced to CPU */
dma_sync_sg_for_cpu(&budget->dev->pci->dev, budget->pt.slist,
budget->pt.nents, DMA_FROM_DEVICE);
#if 0
/* track rps1 activity */
printk("vpeirq: %02x Event Counter 1 0x%04x\n",
mem[olddma],
saa7146_read(budget->dev, EC1R) & 0x3fff);
#endif
if (newdma > olddma)
/* no wraparound, dump olddma..newdma */
dvb_dmx_swfilter_packets(demux, mem + olddma, (newdma - olddma) / 188);
else {
/* wraparound, dump olddma..buflen and 0..newdma */
dvb_dmx_swfilter_packets(demux, mem + olddma, (TS_BUFLEN - olddma) / 188);
dvb_dmx_swfilter_packets(demux, mem, newdma / 188);
}
}
static int av7110_register(struct av7110 *av7110)
{
int ret, i;
struct dvb_demux *dvbdemux = &av7110->demux;
struct dvb_demux *dvbdemux1 = &av7110->demux1;
dprintk(4, "%p\n", av7110);
if (av7110->registered)
return -1;
av7110->registered = 1;
dvbdemux->priv = (void *) av7110;
for (i = 0; i < 32; i++)
av7110->handle2filter[i] = NULL;
dvbdemux->filternum = (av7110->full_ts) ? 256 : 32;
dvbdemux->feednum = (av7110->full_ts) ? 256 : 32;
dvbdemux->start_feed = av7110_start_feed;
dvbdemux->stop_feed = av7110_stop_feed;
dvbdemux->write_to_decoder = av7110_write_to_decoder;
dvbdemux->dmx.capabilities = (DMX_TS_FILTERING | DMX_SECTION_FILTERING |
DMX_MEMORY_BASED_FILTERING);
dvb_dmx_init(&av7110->demux);
av7110->demux.dmx.get_stc = dvb_get_stc;
av7110->dmxdev.filternum = (av7110->full_ts) ? 256 : 32;
av7110->dmxdev.demux = &dvbdemux->dmx;
av7110->dmxdev.capabilities = 0;
dvb_dmxdev_init(&av7110->dmxdev, &av7110->dvb_adapter);
av7110->hw_frontend.source = DMX_FRONTEND_0;
ret = dvbdemux->dmx.add_frontend(&dvbdemux->dmx, &av7110->hw_frontend);
if (ret < 0)
return ret;
av7110->mem_frontend.source = DMX_MEMORY_FE;
ret = dvbdemux->dmx.add_frontend(&dvbdemux->dmx, &av7110->mem_frontend);
if (ret < 0)
return ret;
ret = dvbdemux->dmx.connect_frontend(&dvbdemux->dmx,
&av7110->hw_frontend);
if (ret < 0)
return ret;
av7110_av_register(av7110);
av7110_ca_register(av7110);
#ifdef CONFIG_DVB_AV7110_OSD
dvb_register_device(&av7110->dvb_adapter, &av7110->osd_dev,
&dvbdev_osd, av7110, DVB_DEVICE_OSD, 0);
#endif
dvb_net_init(&av7110->dvb_adapter, &av7110->dvb_net, &dvbdemux->dmx);
if (budgetpatch) {
/* initialize software demux1 without its own frontend
* demux1 hardware is connected to frontend0 of demux0
*/
dvbdemux1->priv = (void *) av7110;
dvbdemux1->filternum = 256;
dvbdemux1->feednum = 256;
dvbdemux1->start_feed = budget_start_feed;
dvbdemux1->stop_feed = budget_stop_feed;
dvbdemux1->write_to_decoder = NULL;
dvbdemux1->dmx.capabilities = (DMX_TS_FILTERING | DMX_SECTION_FILTERING |
DMX_MEMORY_BASED_FILTERING);
dvb_dmx_init(&av7110->demux1);
av7110->dmxdev1.filternum = 256;
av7110->dmxdev1.demux = &dvbdemux1->dmx;
av7110->dmxdev1.capabilities = 0;
dvb_dmxdev_init(&av7110->dmxdev1, &av7110->dvb_adapter);
dvb_net_init(&av7110->dvb_adapter, &av7110->dvb_net1, &dvbdemux1->dmx);
printk("dvb-ttpci: additional demux1 for budget-patch registered\n");
}
return 0;
}
static void dvb_unregister(struct av7110 *av7110)
{
struct dvb_demux *dvbdemux = &av7110->demux;
struct dvb_demux *dvbdemux1 = &av7110->demux1;
dprintk(4, "%p\n", av7110);
if (!av7110->registered)
return;
if (budgetpatch) {
dvb_net_release(&av7110->dvb_net1);
dvbdemux->dmx.close(&dvbdemux1->dmx);
dvb_dmxdev_release(&av7110->dmxdev1);
dvb_dmx_release(&av7110->demux1);
}
dvb_net_release(&av7110->dvb_net);
dvbdemux->dmx.close(&dvbdemux->dmx);
dvbdemux->dmx.remove_frontend(&dvbdemux->dmx, &av7110->hw_frontend);
dvbdemux->dmx.remove_frontend(&dvbdemux->dmx, &av7110->mem_frontend);
dvb_dmxdev_release(&av7110->dmxdev);
dvb_dmx_release(&av7110->demux);
if (av7110->fe != NULL) {
dvb_unregister_frontend(av7110->fe);
dvb_frontend_detach(av7110->fe);
}
dvb_unregister_device(av7110->osd_dev);
av7110_av_unregister(av7110);
av7110_ca_unregister(av7110);
}
/****************************************************************************
* I2C client commands
****************************************************************************/
int i2c_writereg(struct av7110 *av7110, u8 id, u8 reg, u8 val)
{
u8 msg[2] = { reg, val };
struct i2c_msg msgs;
msgs.flags = 0;
msgs.addr = id / 2;
msgs.len = 2;
msgs.buf = msg;
return i2c_transfer(&av7110->i2c_adap, &msgs, 1);
}
u8 i2c_readreg(struct av7110 *av7110, u8 id, u8 reg)
{
u8 mm1[] = {0x00};
u8 mm2[] = {0x00};
struct i2c_msg msgs[2];
msgs[0].flags = 0;
msgs[1].flags = I2C_M_RD;
msgs[0].addr = msgs[1].addr = id / 2;
mm1[0] = reg;
msgs[0].len = 1; msgs[1].len = 1;
msgs[0].buf = mm1; msgs[1].buf = mm2;
i2c_transfer(&av7110->i2c_adap, msgs, 2);
return mm2[0];
}
/****************************************************************************
* INITIALIZATION
****************************************************************************/
static int check_firmware(struct av7110* av7110)
{
u32 crc = 0, len = 0;
unsigned char *ptr;
/* check for firmware magic */
ptr = av7110->bin_fw;
if (ptr[0] != 'A' || ptr[1] != 'V' ||
ptr[2] != 'F' || ptr[3] != 'W') {
printk("dvb-ttpci: this is not an av7110 firmware\n");
return -EINVAL;
}
ptr += 4;
/* check dpram file */
crc = get_unaligned_be32(ptr);
ptr += 4;
len = get_unaligned_be32(ptr);
ptr += 4;
if (len >= 512) {
printk("dvb-ttpci: dpram file is way too big.\n");
return -EINVAL;
}
if (crc != crc32_le(0, ptr, len)) {
printk("dvb-ttpci: crc32 of dpram file does not match.\n");
return -EINVAL;
}
av7110->bin_dpram = ptr;
av7110->size_dpram = len;
ptr += len;
/* check root file */
crc = get_unaligned_be32(ptr);
ptr += 4;
len = get_unaligned_be32(ptr);
ptr += 4;
if (len <= 200000 || len >= 300000 ||
len > ((av7110->bin_fw + av7110->size_fw) - ptr)) {
printk("dvb-ttpci: root file has strange size (%d). aborting.\n", len);
return -EINVAL;
}
if( crc != crc32_le(0, ptr, len)) {
printk("dvb-ttpci: crc32 of root file does not match.\n");
return -EINVAL;
}
av7110->bin_root = ptr;
av7110->size_root = len;
return 0;
}
static void put_firmware(struct av7110* av7110)
{
vfree(av7110->bin_fw);
}
static int get_firmware(struct av7110* av7110)
{
int ret;
const struct firmware *fw;
/* request the av7110 firmware, this will block until someone uploads it */
ret = request_firmware(&fw, "dvb-ttpci-01.fw", &av7110->dev->pci->dev);
if (ret) {
if (ret == -ENOENT) {
printk(KERN_ERR "dvb-ttpci: could not load firmware, file not found: dvb-ttpci-01.fw\n");
printk(KERN_ERR "dvb-ttpci: usually this should be in /usr/lib/hotplug/firmware or /lib/firmware\n");
printk(KERN_ERR "dvb-ttpci: and can be downloaded from https://linuxtv.org/download/dvb/firmware/\n");
} else
printk(KERN_ERR "dvb-ttpci: cannot request firmware (error %i)\n",
ret);
return -EINVAL;
}
if (fw->size <= 200000) {
printk("dvb-ttpci: this firmware is way too small.\n");
release_firmware(fw);
return -EINVAL;
}
/* check if the firmware is available */
av7110->bin_fw = vmalloc(fw->size);
if (NULL == av7110->bin_fw) {
dprintk(1, "out of memory\n");
release_firmware(fw);
return -ENOMEM;
}
memcpy(av7110->bin_fw, fw->data, fw->size);
av7110->size_fw = fw->size;
if ((ret = check_firmware(av7110)))
vfree(av7110->bin_fw);
release_firmware(fw);
return ret;
}
static int alps_bsrv2_tuner_set_params(struct dvb_frontend *fe)
{
struct dtv_frontend_properties *p = &fe->dtv_property_cache;
struct av7110* av7110 = fe->dvb->priv;
u8 pwr = 0;
u8 buf[4];
struct i2c_msg msg = { .addr = 0x61, .flags = 0, .buf = buf, .len = sizeof(buf) };
u32 div = (p->frequency + 479500) / 125;
if (p->frequency > 2000000)
pwr = 3;
else if (p->frequency > 1800000)
pwr = 2;
else if (p->frequency > 1600000)
pwr = 1;
else if (p->frequency > 1200000)
pwr = 0;
else if (p->frequency >= 1100000)
pwr = 1;
else
pwr = 2;
buf[0] = (div >> 8) & 0x7f;
buf[1] = div & 0xff;
buf[2] = ((div & 0x18000) >> 10) | 0x95;
buf[3] = (pwr << 6) | 0x30;
// NOTE: since we're using a prescaler of 2, we set the
// divisor frequency to 62.5kHz and divide by 125 above
if (fe->ops.i2c_gate_ctrl)
fe->ops.i2c_gate_ctrl(fe, 1);
if (i2c_transfer (&av7110->i2c_adap, &msg, 1) != 1)
return -EIO;
return 0;
}
static struct ves1x93_config alps_bsrv2_config = {
.demod_address = 0x08,
.xin = 90100000UL,
.invert_pwm = 0,
};
static int alps_tdbe2_tuner_set_params(struct dvb_frontend *fe)
{
struct dtv_frontend_properties *p = &fe->dtv_property_cache;
struct av7110* av7110 = fe->dvb->priv;
u32 div;
u8 data[4];
struct i2c_msg msg = { .addr = 0x62, .flags = 0, .buf = data, .len = sizeof(data) };
div = (p->frequency + 35937500 + 31250) / 62500;
data[0] = (div >> 8) & 0x7f;
data[1] = div & 0xff;
data[2] = 0x85 | ((div >> 10) & 0x60);
data[3] = (p->frequency < 174000000 ? 0x88 : p->frequency < 470000000 ? 0x84 : 0x81);
if (fe->ops.i2c_gate_ctrl)
fe->ops.i2c_gate_ctrl(fe, 1);
if (i2c_transfer(&av7110->i2c_adap, &msg, 1) != 1)
return -EIO;
return 0;
}
static struct ves1820_config alps_tdbe2_config = {
.demod_address = 0x09,
.xin = 57840000UL,
.invert = 1,
.selagc = VES1820_SELAGC_SIGNAMPERR,
};
static int grundig_29504_451_tuner_set_params(struct dvb_frontend *fe)
{
struct dtv_frontend_properties *p = &fe->dtv_property_cache;
struct av7110* av7110 = fe->dvb->priv;
u32 div;
u8 data[4];
struct i2c_msg msg = { .addr = 0x61, .flags = 0, .buf = data, .len = sizeof(data) };
div = p->frequency / 125;
data[0] = (div >> 8) & 0x7f;
data[1] = div & 0xff;
data[2] = 0x8e;
data[3] = 0x00;
if (fe->ops.i2c_gate_ctrl)
fe->ops.i2c_gate_ctrl(fe, 1);
if (i2c_transfer(&av7110->i2c_adap, &msg, 1) != 1)
return -EIO;
return 0;
}
static struct tda8083_config grundig_29504_451_config = {
.demod_address = 0x68,
};
static int philips_cd1516_tuner_set_params(struct dvb_frontend *fe)
{
struct dtv_frontend_properties *p = &fe->dtv_property_cache;
struct av7110* av7110 = fe->dvb->priv;
u32 div;
u32 f = p->frequency;
u8 data[4];
struct i2c_msg msg = { .addr = 0x61, .flags = 0, .buf = data, .len = sizeof(data) };
div = (f + 36125000 + 31250) / 62500;
data[0] = (div >> 8) & 0x7f;
data[1] = div & 0xff;
data[2] = 0x8e;
data[3] = (f < 174000000 ? 0xa1 : f < 470000000 ? 0x92 : 0x34);
if (fe->ops.i2c_gate_ctrl)
fe->ops.i2c_gate_ctrl(fe, 1);
if (i2c_transfer(&av7110->i2c_adap, &msg, 1) != 1)
return -EIO;
return 0;
}
static struct ves1820_config philips_cd1516_config = {
.demod_address = 0x09,
.xin = 57840000UL,
.invert = 1,
.selagc = VES1820_SELAGC_SIGNAMPERR,
};
static int alps_tdlb7_tuner_set_params(struct dvb_frontend *fe)
{
struct dtv_frontend_properties *p = &fe->dtv_property_cache;
struct av7110* av7110 = fe->dvb->priv;
u32 div, pwr;
u8 data[4];
struct i2c_msg msg = { .addr = 0x60, .flags = 0, .buf = data, .len = sizeof(data) };
div = (p->frequency + 36200000) / 166666;
if (p->frequency <= 782000000)
pwr = 1;
else
pwr = 2;
data[0] = (div >> 8) & 0x7f;
data[1] = div & 0xff;
data[2] = 0x85;
data[3] = pwr << 6;
if (fe->ops.i2c_gate_ctrl)
fe->ops.i2c_gate_ctrl(fe, 1);
if (i2c_transfer(&av7110->i2c_adap, &msg, 1) != 1)
return -EIO;
return 0;
}
static int alps_tdlb7_request_firmware(struct dvb_frontend* fe, const struct firmware **fw, char* name)
{
#if IS_ENABLED(CONFIG_DVB_SP8870)
struct av7110* av7110 = fe->dvb->priv;
return request_firmware(fw, name, &av7110->dev->pci->dev);
#else
return -EINVAL;
#endif
}
static const struct sp8870_config alps_tdlb7_config = {
.demod_address = 0x71,
.request_firmware = alps_tdlb7_request_firmware,
};
static u8 nexusca_stv0297_inittab[] = {
0x80, 0x01,
0x80, 0x00,
0x81, 0x01,
0x81, 0x00,
0x00, 0x09,
0x01, 0x69,
0x03, 0x00,
0x04, 0x00,
0x07, 0x00,
0x08, 0x00,
0x20, 0x00,
0x21, 0x40,
0x22, 0x00,
0x23, 0x00,
0x24, 0x40,
0x25, 0x88,
0x30, 0xff,
0x31, 0x00,
0x32, 0xff,
0x33, 0x00,
0x34, 0x50,
0x35, 0x7f,
0x36, 0x00,
0x37, 0x20,
0x38, 0x00,
0x40, 0x1c,
0x41, 0xff,
0x42, 0x29,
0x43, 0x00,
0x44, 0xff,
0x45, 0x00,
0x46, 0x00,
0x49, 0x04,
0x4a, 0x00,
0x4b, 0x7b,
0x52, 0x30,
0x55, 0xae,
0x56, 0x47,
0x57, 0xe1,
0x58, 0x3a,
0x5a, 0x1e,
0x5b, 0x34,
0x60, 0x00,
0x63, 0x00,
0x64, 0x00,
0x65, 0x00,
0x66, 0x00,
0x67, 0x00,
0x68, 0x00,
0x69, 0x00,
0x6a, 0x02,
0x6b, 0x00,
0x70, 0xff,
0x71, 0x00,
0x72, 0x00,
0x73, 0x00,
0x74, 0x0c,
0x80, 0x00,
0x81, 0x00,
0x82, 0x00,
0x83, 0x00,
0x84, 0x04,
0x85, 0x80,
0x86, 0x24,
0x87, 0x78,
0x88, 0x10,
0x89, 0x00,
0x90, 0x01,
0x91, 0x01,
0xa0, 0x04,
0xa1, 0x00,
0xa2, 0x00,
0xb0, 0x91,
0xb1, 0x0b,
0xc0, 0x53,
0xc1, 0x70,
0xc2, 0x12,
0xd0, 0x00,
0xd1, 0x00,
0xd2, 0x00,
0xd3, 0x00,
0xd4, 0x00,
0xd5, 0x00,
0xde, 0x00,
0xdf, 0x00,
0x61, 0x49,
0x62, 0x0b,
0x53, 0x08,
0x59, 0x08,
0xff, 0xff,
};
static int nexusca_stv0297_tuner_set_params(struct dvb_frontend *fe)
{
struct dtv_frontend_properties *p = &fe->dtv_property_cache;
struct av7110* av7110 = fe->dvb->priv;
u32 div;
u8 data[4];
struct i2c_msg msg = { .addr = 0x63, .flags = 0, .buf = data, .len = sizeof(data) };
struct i2c_msg readmsg = { .addr = 0x63, .flags = I2C_M_RD, .buf = data, .len = 1 };
int i;
div = (p->frequency + 36150000 + 31250) / 62500;
data[0] = (div >> 8) & 0x7f;
data[1] = div & 0xff;
data[2] = 0xce;
if (p->frequency < 45000000)
return -EINVAL;
else if (p->frequency < 137000000)
data[3] = 0x01;
else if (p->frequency < 403000000)
data[3] = 0x02;
else if (p->frequency < 860000000)
data[3] = 0x04;
else
return -EINVAL;
if (fe->ops.i2c_gate_ctrl)
fe->ops.i2c_gate_ctrl(fe, 1);
if (i2c_transfer(&av7110->i2c_adap, &msg, 1) != 1) {
printk("nexusca: pll transfer failed!\n");
return -EIO;
}
// wait for PLL lock
for(i = 0; i < 20; i++) {
if (fe->ops.i2c_gate_ctrl)
fe->ops.i2c_gate_ctrl(fe, 1);
if (i2c_transfer(&av7110->i2c_adap, &readmsg, 1) == 1)
if (data[0] & 0x40) break;
msleep(10);
}
return 0;
}
static struct stv0297_config nexusca_stv0297_config = {
.demod_address = 0x1C,
.inittab = nexusca_stv0297_inittab,
.invert = 1,
.stop_during_read = 1,
};
static int grundig_29504_401_tuner_set_params(struct dvb_frontend *fe)
{
struct dtv_frontend_properties *p = &fe->dtv_property_cache;
struct av7110* av7110 = fe->dvb->priv;
u32 div;
u8 cfg, cpump, band_select;
u8 data[4];
struct i2c_msg msg = { .addr = 0x61, .flags = 0, .buf = data, .len = sizeof(data) };
div = (36125000 + p->frequency) / 166666;
cfg = 0x88;
if (p->frequency < 175000000)
cpump = 2;
else if (p->frequency < 390000000)
cpump = 1;
else if (p->frequency < 470000000)
cpump = 2;
else if (p->frequency < 750000000)
cpump = 1;
else
cpump = 3;
if (p->frequency < 175000000)
band_select = 0x0e;
else if (p->frequency < 470000000)
band_select = 0x05;
else
band_select = 0x03;
data[0] = (div >> 8) & 0x7f;
data[1] = div & 0xff;
data[2] = ((div >> 10) & 0x60) | cfg;
data[3] = (cpump << 6) | band_select;
if (fe->ops.i2c_gate_ctrl)
fe->ops.i2c_gate_ctrl(fe, 1);
if (i2c_transfer (&av7110->i2c_adap, &msg, 1) != 1) return -EIO;
return 0;
}
static struct l64781_config grundig_29504_401_config = {
.demod_address = 0x55,
};
static int av7110_fe_lock_fix(struct av7110 *av7110, enum fe_status status)
{
int ret = 0;
int synced = (status & FE_HAS_LOCK) ? 1 : 0;
av7110->fe_status = status;
if (av7110->fe_synced == synced)
return 0;
if (av7110->playing) {
av7110->fe_synced = synced;
return 0;
}
if (mutex_lock_interruptible(&av7110->pid_mutex))
return -ERESTARTSYS;
if (synced) {
ret = SetPIDs(av7110, av7110->pids[DMX_PES_VIDEO],
av7110->pids[DMX_PES_AUDIO],
av7110->pids[DMX_PES_TELETEXT], 0,
av7110->pids[DMX_PES_PCR]);
if (!ret)
ret = av7110_fw_cmd(av7110, COMTYPE_PIDFILTER, Scan, 0);
} else {
ret = SetPIDs(av7110, 0, 0, 0, 0, 0);
if (!ret) {
ret = av7110_fw_cmd(av7110, COMTYPE_PID_FILTER, FlushTSQueue, 0);
if (!ret)
ret = av7110_wait_msgstate(av7110, GPMQBusy);
}
}
if (!ret)
av7110->fe_synced = synced;
mutex_unlock(&av7110->pid_mutex);
return ret;
}
static int av7110_fe_set_frontend(struct dvb_frontend *fe)
{
struct av7110* av7110 = fe->dvb->priv;
int ret = av7110_fe_lock_fix(av7110, 0);
if (!ret)
ret = av7110->fe_set_frontend(fe);
return ret;
}
static int av7110_fe_init(struct dvb_frontend* fe)
{
struct av7110* av7110 = fe->dvb->priv;
int ret = av7110_fe_lock_fix(av7110, 0);
if (!ret)
ret = av7110->fe_init(fe);
return ret;
}
static int av7110_fe_read_status(struct dvb_frontend *fe,
enum fe_status *status)
{
struct av7110* av7110 = fe->dvb->priv;
/* call the real implementation */
int ret = av7110->fe_read_status(fe, status);
if (!ret)
if (((*status ^ av7110->fe_status) & FE_HAS_LOCK) && (*status & FE_HAS_LOCK))
ret = av7110_fe_lock_fix(av7110, *status);
return ret;
}
static int av7110_fe_diseqc_reset_overload(struct dvb_frontend* fe)
{
struct av7110* av7110 = fe->dvb->priv;
int ret = av7110_fe_lock_fix(av7110, 0);
if (!ret)
ret = av7110->fe_diseqc_reset_overload(fe);
return ret;
}
static int av7110_fe_diseqc_send_master_cmd(struct dvb_frontend* fe,
struct dvb_diseqc_master_cmd* cmd)
{
struct av7110* av7110 = fe->dvb->priv;
int ret = av7110_fe_lock_fix(av7110, 0);
if (!ret) {
av7110->saved_master_cmd = *cmd;
ret = av7110->fe_diseqc_send_master_cmd(fe, cmd);
}
return ret;
}
static int av7110_fe_diseqc_send_burst(struct dvb_frontend *fe,
enum fe_sec_mini_cmd minicmd)
{
struct av7110* av7110 = fe->dvb->priv;
int ret = av7110_fe_lock_fix(av7110, 0);
if (!ret) {
av7110->saved_minicmd = minicmd;
ret = av7110->fe_diseqc_send_burst(fe, minicmd);
}
return ret;
}
static int av7110_fe_set_tone(struct dvb_frontend *fe,
enum fe_sec_tone_mode tone)
{
struct av7110* av7110 = fe->dvb->priv;
int ret = av7110_fe_lock_fix(av7110, 0);
if (!ret) {
av7110->saved_tone = tone;
ret = av7110->fe_set_tone(fe, tone);
}
return ret;
}
static int av7110_fe_set_voltage(struct dvb_frontend *fe,
enum fe_sec_voltage voltage)
{
struct av7110* av7110 = fe->dvb->priv;
int ret = av7110_fe_lock_fix(av7110, 0);
if (!ret) {
av7110->saved_voltage = voltage;
ret = av7110->fe_set_voltage(fe, voltage);
}
return ret;
}
static int av7110_fe_dishnetwork_send_legacy_command(struct dvb_frontend* fe, unsigned long cmd)
{
struct av7110* av7110 = fe->dvb->priv;
int ret = av7110_fe_lock_fix(av7110, 0);
if (!ret)
ret = av7110->fe_dishnetwork_send_legacy_command(fe, cmd);
return ret;
}
static void dvb_s_recover(struct av7110* av7110)
{
av7110_fe_init(av7110->fe);
av7110_fe_set_voltage(av7110->fe, av7110->saved_voltage);
if (av7110->saved_master_cmd.msg_len) {
msleep(20);
av7110_fe_diseqc_send_master_cmd(av7110->fe, &av7110->saved_master_cmd);
}
msleep(20);
av7110_fe_diseqc_send_burst(av7110->fe, av7110->saved_minicmd);
msleep(20);
av7110_fe_set_tone(av7110->fe, av7110->saved_tone);
av7110_fe_set_frontend(av7110->fe);
}
static u8 read_pwm(struct av7110* av7110)
{
u8 b = 0xff;
u8 pwm;
struct i2c_msg msg[] = { { .addr = 0x50,.flags = 0,.buf = &b,.len = 1 },
{ .addr = 0x50,.flags = I2C_M_RD,.buf = &pwm,.len = 1} };
if ((i2c_transfer(&av7110->i2c_adap, msg, 2) != 2) || (pwm == 0xff))
pwm = 0x48;
return pwm;
}
static int frontend_init(struct av7110 *av7110)
{
int ret;
if (av7110->dev->pci->subsystem_vendor == 0x110a) {
switch(av7110->dev->pci->subsystem_device) {
case 0x0000: // Fujitsu/Siemens DVB-Cable (ves1820/Philips CD1516(??))
av7110->fe = dvb_attach(ves1820_attach, &philips_cd1516_config,
&av7110->i2c_adap, read_pwm(av7110));
if (av7110->fe) {
av7110->fe->ops.tuner_ops.set_params = philips_cd1516_tuner_set_params;
}
break;
}
} else if (av7110->dev->pci->subsystem_vendor == 0x13c2) {
switch(av7110->dev->pci->subsystem_device) {
case 0x0000: // Hauppauge/TT WinTV DVB-S rev1.X
case 0x0003: // Hauppauge/TT WinTV Nexus-S Rev 2.X
case 0x1002: // Hauppauge/TT WinTV DVB-S rev1.3SE
// try the ALPS BSRV2 first of all
av7110->fe = dvb_attach(ves1x93_attach, &alps_bsrv2_config, &av7110->i2c_adap);
if (av7110->fe) {
av7110->fe->ops.tuner_ops.set_params = alps_bsrv2_tuner_set_params;
av7110->fe->ops.diseqc_send_master_cmd = av7110_diseqc_send_master_cmd;
av7110->fe->ops.diseqc_send_burst = av7110_diseqc_send_burst;
av7110->fe->ops.set_tone = av7110_set_tone;
av7110->recover = dvb_s_recover;
break;
}
// try the ALPS BSRU6 now
av7110->fe = dvb_attach(stv0299_attach, &alps_bsru6_config, &av7110->i2c_adap);
if (av7110->fe) {
av7110->fe->ops.tuner_ops.set_params = alps_bsru6_tuner_set_params;
av7110->fe->tuner_priv = &av7110->i2c_adap;
av7110->fe->ops.diseqc_send_master_cmd = av7110_diseqc_send_master_cmd;
av7110->fe->ops.diseqc_send_burst = av7110_diseqc_send_burst;
av7110->fe->ops.set_tone = av7110_set_tone;
av7110->recover = dvb_s_recover;
break;
}
// Try the grundig 29504-451
av7110->fe = dvb_attach(tda8083_attach, &grundig_29504_451_config, &av7110->i2c_adap);
if (av7110->fe) {
av7110->fe->ops.tuner_ops.set_params = grundig_29504_451_tuner_set_params;
av7110->fe->ops.diseqc_send_master_cmd = av7110_diseqc_send_master_cmd;
av7110->fe->ops.diseqc_send_burst = av7110_diseqc_send_burst;
av7110->fe->ops.set_tone = av7110_set_tone;
av7110->recover = dvb_s_recover;
break;
}
/* Try DVB-C cards */
switch(av7110->dev->pci->subsystem_device) {
case 0x0000:
/* Siemens DVB-C (full-length card) VES1820/Philips CD1516 */
av7110->fe = dvb_attach(ves1820_attach, &philips_cd1516_config, &av7110->i2c_adap,
read_pwm(av7110));
if (av7110->fe) {
av7110->fe->ops.tuner_ops.set_params = philips_cd1516_tuner_set_params;
}
break;
case 0x0003:
/* Hauppauge DVB-C 2.1 VES1820/ALPS TDBE2 */
av7110->fe = dvb_attach(ves1820_attach, &alps_tdbe2_config, &av7110->i2c_adap,
read_pwm(av7110));
if (av7110->fe) {
av7110->fe->ops.tuner_ops.set_params = alps_tdbe2_tuner_set_params;
}
break;
}
break;
case 0x0001: // Hauppauge/TT Nexus-T premium rev1.X
{
struct dvb_frontend *fe;
// try ALPS TDLB7 first, then Grundig 29504-401
fe = dvb_attach(sp8870_attach, &alps_tdlb7_config, &av7110->i2c_adap);
if (fe) {
fe->ops.tuner_ops.set_params = alps_tdlb7_tuner_set_params;
av7110->fe = fe;
break;
}
}
fallthrough;
case 0x0008: // Hauppauge/TT DVB-T
// Grundig 29504-401
av7110->fe = dvb_attach(l64781_attach, &grundig_29504_401_config, &av7110->i2c_adap);
if (av7110->fe)
av7110->fe->ops.tuner_ops.set_params = grundig_29504_401_tuner_set_params;
break;
case 0x0002: // Hauppauge/TT DVB-C premium rev2.X
av7110->fe = dvb_attach(ves1820_attach, &alps_tdbe2_config, &av7110->i2c_adap, read_pwm(av7110));
if (av7110->fe) {
av7110->fe->ops.tuner_ops.set_params = alps_tdbe2_tuner_set_params;
}
break;
case 0x0004: // Galaxis DVB-S rev1.3
/* ALPS BSRV2 */
av7110->fe = dvb_attach(ves1x93_attach, &alps_bsrv2_config, &av7110->i2c_adap);
if (av7110->fe) {
av7110->fe->ops.tuner_ops.set_params = alps_bsrv2_tuner_set_params;
av7110->fe->ops.diseqc_send_master_cmd = av7110_diseqc_send_master_cmd;
av7110->fe->ops.diseqc_send_burst = av7110_diseqc_send_burst;
av7110->fe->ops.set_tone = av7110_set_tone;
av7110->recover = dvb_s_recover;
}
break;
case 0x0006: /* Fujitsu-Siemens DVB-S rev 1.6 */
/* Grundig 29504-451 */
av7110->fe = dvb_attach(tda8083_attach, &grundig_29504_451_config, &av7110->i2c_adap);
if (av7110->fe) {
av7110->fe->ops.tuner_ops.set_params = grundig_29504_451_tuner_set_params;
av7110->fe->ops.diseqc_send_master_cmd = av7110_diseqc_send_master_cmd;
av7110->fe->ops.diseqc_send_burst = av7110_diseqc_send_burst;
av7110->fe->ops.set_tone = av7110_set_tone;
av7110->recover = dvb_s_recover;
}
break;
case 0x000A: // Hauppauge/TT Nexus-CA rev1.X
av7110->fe = dvb_attach(stv0297_attach, &nexusca_stv0297_config, &av7110->i2c_adap);
if (av7110->fe) {
av7110->fe->ops.tuner_ops.set_params = nexusca_stv0297_tuner_set_params;
/* set TDA9819 into DVB mode */
saa7146_setgpio(av7110->dev, 1, SAA7146_GPIO_OUTLO); // TDA9819 pin9(STD)
saa7146_setgpio(av7110->dev, 3, SAA7146_GPIO_OUTLO); // TDA9819 pin30(VIF)
/* tuner on this needs a slower i2c bus speed */
av7110->dev->i2c_bitrate = SAA7146_I2C_BUS_BIT_RATE_240;
break;
}
break;
case 0x000E: /* Hauppauge/TT Nexus-S rev 2.3 */
/* ALPS BSBE1 */
av7110->fe = dvb_attach(stv0299_attach, &alps_bsbe1_config, &av7110->i2c_adap);
if (av7110->fe) {
av7110->fe->ops.tuner_ops.set_params = alps_bsbe1_tuner_set_params;
av7110->fe->tuner_priv = &av7110->i2c_adap;
if (dvb_attach(lnbp21_attach, av7110->fe, &av7110->i2c_adap, 0, 0) == NULL) {
printk("dvb-ttpci: LNBP21 not found!\n");
if (av7110->fe->ops.release)
av7110->fe->ops.release(av7110->fe);
av7110->fe = NULL;
} else {
av7110->fe->ops.dishnetwork_send_legacy_command = NULL;
av7110->recover = dvb_s_recover;
}
}
break;
}
}
if (!av7110->fe) {
/* FIXME: propagate the failure code from the lower layers */
ret = -ENOMEM;
printk("dvb-ttpci: A frontend driver was not found for device [%04x:%04x] subsystem [%04x:%04x]\n",
av7110->dev->pci->vendor,
av7110->dev->pci->device,
av7110->dev->pci->subsystem_vendor,
av7110->dev->pci->subsystem_device);
} else {
FE_FUNC_OVERRIDE(av7110->fe->ops.init, av7110->fe_init, av7110_fe_init);
FE_FUNC_OVERRIDE(av7110->fe->ops.read_status, av7110->fe_read_status, av7110_fe_read_status);
FE_FUNC_OVERRIDE(av7110->fe->ops.diseqc_reset_overload, av7110->fe_diseqc_reset_overload, av7110_fe_diseqc_reset_overload);
FE_FUNC_OVERRIDE(av7110->fe->ops.diseqc_send_master_cmd, av7110->fe_diseqc_send_master_cmd, av7110_fe_diseqc_send_master_cmd);
FE_FUNC_OVERRIDE(av7110->fe->ops.diseqc_send_burst, av7110->fe_diseqc_send_burst, av7110_fe_diseqc_send_burst);
FE_FUNC_OVERRIDE(av7110->fe->ops.set_tone, av7110->fe_set_tone, av7110_fe_set_tone);
FE_FUNC_OVERRIDE(av7110->fe->ops.set_voltage, av7110->fe_set_voltage, av7110_fe_set_voltage);
FE_FUNC_OVERRIDE(av7110->fe->ops.dishnetwork_send_legacy_command, av7110->fe_dishnetwork_send_legacy_command, av7110_fe_dishnetwork_send_legacy_command);
FE_FUNC_OVERRIDE(av7110->fe->ops.set_frontend, av7110->fe_set_frontend, av7110_fe_set_frontend);
ret = dvb_register_frontend(&av7110->dvb_adapter, av7110->fe);
if (ret < 0) {
printk("av7110: Frontend registration failed!\n");
dvb_frontend_detach(av7110->fe);
av7110->fe = NULL;
}
}
return ret;
}
/* Budgetpatch note:
* Original hardware design by Roberto Deza:
* There is a DVB_Wiki at
* https://linuxtv.org
*
* New software triggering design by Emard that works on
* original Roberto Deza's hardware:
*
* rps1 code for budgetpatch will copy internal HS event to GPIO3 pin.
* GPIO3 is in budget-patch hardware connectd to port B VSYNC
* HS is an internal event of 7146, accessible with RPS
* and temporarily raised high every n lines
* (n in defined in the RPS_THRESH1 counter threshold)
* I think HS is raised high on the beginning of the n-th line
* and remains high until this n-th line that triggered
* it is completely received. When the reception of n-th line
* ends, HS is lowered.
*
* To transmit data over DMA, 7146 needs changing state at
* port B VSYNC pin. Any changing of port B VSYNC will
* cause some DMA data transfer, with more or less packets loss.
* It depends on the phase and frequency of VSYNC and
* the way of 7146 is instructed to trigger on port B (defined
* in DD1_INIT register, 3rd nibble from the right valid
* numbers are 0-7, see datasheet)
*
* The correct triggering can minimize packet loss,
* dvbtraffic should give this stable bandwidths:
* 22k transponder = 33814 kbit/s
* 27.5k transponder = 38045 kbit/s
* by experiment it is found that the best results
* (stable bandwidths and almost no packet loss)
* are obtained using DD1_INIT triggering number 2
* (Va at rising edge of VS Fa = HS x VS-failing forced toggle)
* and a VSYNC phase that occurs in the middle of DMA transfer
* (about byte 188*512=96256 in the DMA window).
*
* Phase of HS is still not clear to me how to control,
* It just happens to be so. It can be seen if one enables
* RPS_IRQ and print Event Counter 1 in vpeirq(). Every
* time RPS_INTERRUPT is called, the Event Counter 1 will
* increment. That's how the 7146 is programmed to do event
* counting in this budget-patch.c
* I *think* HPS setting has something to do with the phase
* of HS but I can't be 100% sure in that.
*
* hardware debug note: a working budget card (including budget patch)
* with vpeirq() interrupt setup in mode "0x90" (every 64K) will
* generate 3 interrupts per 25-Hz DMA frame of 2*188*512 bytes
* and that means 3*25=75 Hz of interrupt frequency, as seen by
* watch cat /proc/interrupts
*
* If this frequency is 3x lower (and data received in the DMA
* buffer don't start with 0x47, but in the middle of packets,
* whose lengths appear to be like 188 292 188 104 etc.
* this means VSYNC line is not connected in the hardware.
* (check soldering pcb and pins)
* The same behaviour of missing VSYNC can be duplicated on budget
* cards, by setting DD1_INIT trigger mode 7 in 3rd nibble.
*/
static int av7110_attach(struct saa7146_dev* dev,
struct saa7146_pci_extension_data *pci_ext)
{
const int length = TS_WIDTH * TS_HEIGHT;
struct pci_dev *pdev = dev->pci;
struct av7110 *av7110;
struct task_struct *thread;
int ret, count = 0;
dprintk(4, "dev: %p\n", dev);
/* Set RPS_IRQ to 1 to track rps1 activity.
* Enabling this won't send any interrupt to PC CPU.
*/
#define RPS_IRQ 0
if (budgetpatch == 1) {
budgetpatch = 0;
/* autodetect the presence of budget patch
* this only works if saa7146 has been recently
* reset with MASK_31 to MC1
*
* will wait for VBI_B event (vertical blank at port B)
* and will reset GPIO3 after VBI_B is detected.
* (GPIO3 should be raised high by CPU to
* test if GPIO3 will generate vertical blank signal
* in budget patch GPIO3 is connected to VSYNC_B
*/
/* RESET SAA7146 */
saa7146_write(dev, MC1, MASK_31);
/* autodetection success seems to be time-dependend after reset */
/* Fix VSYNC level */
saa7146_setgpio(dev, 3, SAA7146_GPIO_OUTLO);
/* set vsync_b triggering */
saa7146_write(dev, DD1_STREAM_B, 0);
/* port B VSYNC at rising edge */
saa7146_write(dev, DD1_INIT, 0x00000200);
saa7146_write(dev, BRS_CTRL, 0x00000000); // VBI
saa7146_write(dev, MC2,
1 * (MASK_08 | MASK_24) | // BRS control
0 * (MASK_09 | MASK_25) | // a
1 * (MASK_10 | MASK_26) | // b
0 * (MASK_06 | MASK_22) | // HPS_CTRL1
0 * (MASK_05 | MASK_21) | // HPS_CTRL2
0 * (MASK_01 | MASK_15) // DEBI
);
/* start writing RPS1 code from beginning */
count = 0;
/* Disable RPS1 */
saa7146_write(dev, MC1, MASK_29);
/* RPS1 timeout disable */
saa7146_write(dev, RPS_TOV1, 0);
WRITE_RPS1(CMD_PAUSE | EVT_VBI_B);
WRITE_RPS1(CMD_WR_REG_MASK | (GPIO_CTRL>>2));
WRITE_RPS1(GPIO3_MSK);
WRITE_RPS1(SAA7146_GPIO_OUTLO<<24);
#if RPS_IRQ
/* issue RPS1 interrupt to increment counter */
WRITE_RPS1(CMD_INTERRUPT);
#endif
WRITE_RPS1(CMD_STOP);
/* Jump to begin of RPS program as safety measure (p37) */
WRITE_RPS1(CMD_JUMP);
WRITE_RPS1(dev->d_rps1.dma_handle);
#if RPS_IRQ
/* set event counter 1 source as RPS1 interrupt (0x03) (rE4 p53)
* use 0x03 to track RPS1 interrupts - increase by 1 every gpio3 is toggled
* use 0x15 to track VPE interrupts - increase by 1 every vpeirq() is called
*/
saa7146_write(dev, EC1SSR, (0x03<<2) | 3 );
/* set event counter 1 threshold to maximum allowed value (rEC p55) */
saa7146_write(dev, ECT1R, 0x3fff );
#endif
/* Set RPS1 Address register to point to RPS code (r108 p42) */
saa7146_write(dev, RPS_ADDR1, dev->d_rps1.dma_handle);
/* Enable RPS1, (rFC p33) */
saa7146_write(dev, MC1, (MASK_13 | MASK_29 ));
mdelay(10);
/* now send VSYNC_B to rps1 by rising GPIO3 */
saa7146_setgpio(dev, 3, SAA7146_GPIO_OUTHI);
mdelay(10);
/* if rps1 responded by lowering the GPIO3,
* then we have budgetpatch hardware
*/
if ((saa7146_read(dev, GPIO_CTRL) & 0x10000000) == 0) {
budgetpatch = 1;
printk("dvb-ttpci: BUDGET-PATCH DETECTED.\n");
}
/* Disable RPS1 */
saa7146_write(dev, MC1, ( MASK_29 ));
#if RPS_IRQ
printk("dvb-ttpci: Event Counter 1 0x%04x\n", saa7146_read(dev, EC1R) & 0x3fff );
#endif
}
/* prepare the av7110 device struct */
av7110 = kzalloc(sizeof(struct av7110), GFP_KERNEL);
if (!av7110) {
dprintk(1, "out of memory\n");
return -ENOMEM;
}
av7110->card_name = (char*) pci_ext->ext_priv;
av7110->dev = dev;
dev->ext_priv = av7110;
ret = get_firmware(av7110);
if (ret < 0)
goto err_kfree_0;
ret = dvb_register_adapter(&av7110->dvb_adapter, av7110->card_name,
THIS_MODULE, &dev->pci->dev, adapter_nr);
if (ret < 0)
goto err_put_firmware_1;
/* the Siemens DVB needs this if you want to have the i2c chips
get recognized before the main driver is fully loaded */
saa7146_write(dev, GPIO_CTRL, 0x500000);
strscpy(av7110->i2c_adap.name, pci_ext->ext_priv,
sizeof(av7110->i2c_adap.name));
saa7146_i2c_adapter_prepare(dev, &av7110->i2c_adap, SAA7146_I2C_BUS_BIT_RATE_120); /* 275 kHz */
ret = i2c_add_adapter(&av7110->i2c_adap);
if (ret < 0)
goto err_dvb_unregister_adapter_2;
ttpci_eeprom_parse_mac(&av7110->i2c_adap,
av7110->dvb_adapter.proposed_mac);
ret = -ENOMEM;
/* full-ts mod? */
if (full_ts)
av7110->full_ts = true;
/* check for full-ts flag in eeprom */
if (i2c_readreg(av7110, 0xaa, 0) == 0x4f && i2c_readreg(av7110, 0xaa, 1) == 0x45) {
u8 flags = i2c_readreg(av7110, 0xaa, 2);
if (flags != 0xff && (flags & 0x01))
av7110->full_ts = true;
}
if (av7110->full_ts) {
printk(KERN_INFO "dvb-ttpci: full-ts mode enabled for saa7146 port B\n");
spin_lock_init(&av7110->feedlock1);
av7110->grabbing = saa7146_vmalloc_build_pgtable(pdev, length,
&av7110->pt);
if (!av7110->grabbing)
goto err_i2c_del_3;
saa7146_write(dev, DD1_STREAM_B, 0x00000000);
saa7146_write(dev, MC2, (MASK_10 | MASK_26));
saa7146_write(dev, DD1_INIT, 0x00000600);
saa7146_write(dev, MC2, (MASK_09 | MASK_25 | MASK_10 | MASK_26));
saa7146_write(dev, BRS_CTRL, 0x60000000);
saa7146_write(dev, MC2, MASK_08 | MASK_24);
/* dma3 */
saa7146_write(dev, PCI_BT_V1, 0x001c0000 | (saa7146_read(dev, PCI_BT_V1) & ~0x001f0000));
saa7146_write(dev, BASE_ODD3, 0);
saa7146_write(dev, BASE_EVEN3, 0);
saa7146_write(dev, PROT_ADDR3, TS_WIDTH * TS_HEIGHT);
saa7146_write(dev, PITCH3, TS_WIDTH);
saa7146_write(dev, BASE_PAGE3, av7110->pt.dma | ME1 | 0x90);
saa7146_write(dev, NUM_LINE_BYTE3, (TS_HEIGHT << 16) | TS_WIDTH);
saa7146_write(dev, MC2, MASK_04 | MASK_20);
tasklet_setup(&av7110->vpe_tasklet, vpeirq);
} else if (budgetpatch) {
spin_lock_init(&av7110->feedlock1);
av7110->grabbing = saa7146_vmalloc_build_pgtable(pdev, length,
&av7110->pt);
if (!av7110->grabbing)
goto err_i2c_del_3;
saa7146_write(dev, PCI_BT_V1, 0x1c1f101f);
saa7146_write(dev, BCS_CTRL, 0x80400040);
/* set dd1 stream a & b */
saa7146_write(dev, DD1_STREAM_B, 0x00000000);
saa7146_write(dev, DD1_INIT, 0x03000200);
saa7146_write(dev, MC2, (MASK_09 | MASK_25 | MASK_10 | MASK_26));
saa7146_write(dev, BRS_CTRL, 0x60000000);
saa7146_write(dev, BASE_ODD3, 0);
saa7146_write(dev, BASE_EVEN3, 0);
saa7146_write(dev, PROT_ADDR3, TS_WIDTH * TS_HEIGHT);
saa7146_write(dev, BASE_PAGE3, av7110->pt.dma | ME1 | 0x90);
saa7146_write(dev, PITCH3, TS_WIDTH);
saa7146_write(dev, NUM_LINE_BYTE3, (TS_HEIGHT << 16) | TS_WIDTH);
/* upload all */
saa7146_write(dev, MC2, 0x077c077c);
saa7146_write(dev, GPIO_CTRL, 0x000000);
#if RPS_IRQ
/* set event counter 1 source as RPS1 interrupt (0x03) (rE4 p53)
* use 0x03 to track RPS1 interrupts - increase by 1 every gpio3 is toggled
* use 0x15 to track VPE interrupts - increase by 1 every vpeirq() is called
*/
saa7146_write(dev, EC1SSR, (0x03<<2) | 3 );
/* set event counter 1 threshold to maximum allowed value (rEC p55) */
saa7146_write(dev, ECT1R, 0x3fff );
#endif
/* Setup BUDGETPATCH MAIN RPS1 "program" (p35) */
count = 0;
/* Wait Source Line Counter Threshold (p36) */
WRITE_RPS1(CMD_PAUSE | EVT_HS);
/* Set GPIO3=1 (p42) */
WRITE_RPS1(CMD_WR_REG_MASK | (GPIO_CTRL>>2));
WRITE_RPS1(GPIO3_MSK);
WRITE_RPS1(SAA7146_GPIO_OUTHI<<24);
#if RPS_IRQ
/* issue RPS1 interrupt */
WRITE_RPS1(CMD_INTERRUPT);
#endif
/* Wait reset Source Line Counter Threshold (p36) */
WRITE_RPS1(CMD_PAUSE | RPS_INV | EVT_HS);
/* Set GPIO3=0 (p42) */
WRITE_RPS1(CMD_WR_REG_MASK | (GPIO_CTRL>>2));
WRITE_RPS1(GPIO3_MSK);
WRITE_RPS1(SAA7146_GPIO_OUTLO<<24);
#if RPS_IRQ
/* issue RPS1 interrupt */
WRITE_RPS1(CMD_INTERRUPT);
#endif
/* Jump to begin of RPS program (p37) */
WRITE_RPS1(CMD_JUMP);
WRITE_RPS1(dev->d_rps1.dma_handle);
/* Fix VSYNC level */
saa7146_setgpio(dev, 3, SAA7146_GPIO_OUTLO);
/* Set RPS1 Address register to point to RPS code (r108 p42) */
saa7146_write(dev, RPS_ADDR1, dev->d_rps1.dma_handle);
/* Set Source Line Counter Threshold, using BRS (rCC p43)
* It generates HS event every TS_HEIGHT lines
* this is related to TS_WIDTH set in register
* NUM_LINE_BYTE3. If NUM_LINE_BYTE low 16 bits
* are set to TS_WIDTH bytes (TS_WIDTH=2*188),
* then RPS_THRESH1 should be set to trigger
* every TS_HEIGHT (512) lines.
*/
saa7146_write(dev, RPS_THRESH1, (TS_HEIGHT*1) | MASK_12 );
/* Enable RPS1 (rFC p33) */
saa7146_write(dev, MC1, (MASK_13 | MASK_29));
/* end of budgetpatch register initialization */
tasklet_setup(&av7110->vpe_tasklet, vpeirq);
} else {
saa7146_write(dev, PCI_BT_V1, 0x1c00101f);
saa7146_write(dev, BCS_CTRL, 0x80400040);
/* set dd1 stream a & b */
saa7146_write(dev, DD1_STREAM_B, 0x00000000);
saa7146_write(dev, DD1_INIT, 0x03000000);
saa7146_write(dev, MC2, (MASK_09 | MASK_25 | MASK_10 | MASK_26));
/* upload all */
saa7146_write(dev, MC2, 0x077c077c);
saa7146_write(dev, GPIO_CTRL, 0x000000);
}
tasklet_setup(&av7110->debi_tasklet, debiirq);
tasklet_setup(&av7110->gpio_tasklet, gpioirq);
mutex_init(&av7110->pid_mutex);
/* locks for data transfers from/to AV7110 */
spin_lock_init(&av7110->debilock);
mutex_init(&av7110->dcomlock);
av7110->debitype = -1;
/* default OSD window */
av7110->osdwin = 1;
mutex_init(&av7110->osd_mutex);
/* TV standard */
av7110->vidmode = tv_standard == 1 ? AV7110_VIDEO_MODE_NTSC
: AV7110_VIDEO_MODE_PAL;
/* ARM "watchdog" */
init_waitqueue_head(&av7110->arm_wait);
av7110->arm_thread = NULL;
/* allocate and init buffers */
av7110->debi_virt = dma_alloc_coherent(&pdev->dev, 8192,
&av7110->debi_bus, GFP_KERNEL);
if (!av7110->debi_virt)
goto err_saa71466_vfree_4;
av7110->iobuf = vmalloc(AVOUTLEN+AOUTLEN+BMPLEN+4*IPACKS);
if (!av7110->iobuf)
goto err_pci_free_5;
ret = av7110_av_init(av7110);
if (ret < 0)
goto err_iobuf_vfree_6;
/* init BMP buffer */
av7110->bmpbuf = av7110->iobuf+AVOUTLEN+AOUTLEN;
init_waitqueue_head(&av7110->bmpq);
ret = av7110_ca_init(av7110);
if (ret < 0)
goto err_av7110_av_exit_7;
/* load firmware into AV7110 cards */
ret = av7110_bootarm(av7110);
if (ret < 0)
goto err_av7110_ca_exit_8;
ret = av7110_firmversion(av7110);
if (ret < 0)
goto err_stop_arm_9;
if (FW_VERSION(av7110->arm_app)<0x2501)
printk(KERN_WARNING
"dvb-ttpci: Warning, firmware version 0x%04x is too old. System might be unstable!\n",
FW_VERSION(av7110->arm_app));
thread = kthread_run(arm_thread, (void *) av7110, "arm_mon");
if (IS_ERR(thread)) {
ret = PTR_ERR(thread);
goto err_stop_arm_9;
}
av7110->arm_thread = thread;
/* set initial volume in mixer struct */
av7110->mixer.volume_left = volume;
av7110->mixer.volume_right = volume;
ret = av7110_register(av7110);
if (ret < 0)
goto err_arm_thread_stop_10;
init_av7110_av(av7110);
/* special case DVB-C: these cards have an analog tuner
plus need some special handling, so we have separate
saa7146_ext_vv data for these... */
ret = av7110_init_v4l(av7110);
if (ret < 0)
goto err_av7110_unregister_11;
av7110->dvb_adapter.priv = av7110;
ret = frontend_init(av7110);
if (ret < 0)
goto err_av7110_exit_v4l_12;
mutex_init(&av7110->ioctl_mutex);
#if IS_ENABLED(CONFIG_DVB_AV7110_IR)
av7110_ir_init(av7110);
#endif
printk(KERN_INFO "dvb-ttpci: found av7110-%d.\n", av7110_num);
av7110_num++;
out:
return ret;
err_av7110_exit_v4l_12:
av7110_exit_v4l(av7110);
err_av7110_unregister_11:
dvb_unregister(av7110);
err_arm_thread_stop_10:
av7110_arm_sync(av7110);
err_stop_arm_9:
/* Nothing to do. Rejoice. */
err_av7110_ca_exit_8:
av7110_ca_exit(av7110);
err_av7110_av_exit_7:
av7110_av_exit(av7110);
err_iobuf_vfree_6:
vfree(av7110->iobuf);
err_pci_free_5:
dma_free_coherent(&pdev->dev, 8192, av7110->debi_virt,
av7110->debi_bus);
err_saa71466_vfree_4:
if (av7110->grabbing)
saa7146_vfree_destroy_pgtable(pdev, av7110->grabbing, &av7110->pt);
err_i2c_del_3:
i2c_del_adapter(&av7110->i2c_adap);
err_dvb_unregister_adapter_2:
dvb_unregister_adapter(&av7110->dvb_adapter);
err_put_firmware_1:
put_firmware(av7110);
err_kfree_0:
kfree(av7110);
goto out;
}
static int av7110_detach(struct saa7146_dev* saa)
{
struct av7110 *av7110 = saa->ext_priv;
dprintk(4, "%p\n", av7110);
#if IS_ENABLED(CONFIG_DVB_AV7110_IR)
av7110_ir_exit(av7110);
#endif
if (budgetpatch || av7110->full_ts) {
if (budgetpatch) {
/* Disable RPS1 */
saa7146_write(saa, MC1, MASK_29);
/* VSYNC LOW (inactive) */
saa7146_setgpio(saa, 3, SAA7146_GPIO_OUTLO);
}
saa7146_write(saa, MC1, MASK_20); /* DMA3 off */
SAA7146_IER_DISABLE(saa, MASK_10);
SAA7146_ISR_CLEAR(saa, MASK_10);
msleep(50);
tasklet_kill(&av7110->vpe_tasklet);
saa7146_vfree_destroy_pgtable(saa->pci, av7110->grabbing, &av7110->pt);
}
av7110_exit_v4l(av7110);
av7110_arm_sync(av7110);
tasklet_kill(&av7110->debi_tasklet);
tasklet_kill(&av7110->gpio_tasklet);
dvb_unregister(av7110);
SAA7146_IER_DISABLE(saa, MASK_19 | MASK_03);
SAA7146_ISR_CLEAR(saa, MASK_19 | MASK_03);
av7110_ca_exit(av7110);
av7110_av_exit(av7110);
vfree(av7110->iobuf);
dma_free_coherent(&saa->pci->dev, 8192, av7110->debi_virt,
av7110->debi_bus);
i2c_del_adapter(&av7110->i2c_adap);
dvb_unregister_adapter (&av7110->dvb_adapter);
av7110_num--;
put_firmware(av7110);
kfree(av7110);
saa->ext_priv = NULL;
return 0;
}
static void av7110_irq(struct saa7146_dev* dev, u32 *isr)
{
struct av7110 *av7110 = dev->ext_priv;
//print_time("av7110_irq");
/* Note: Don't try to handle the DEBI error irq (MASK_18), in
* intel mode the timeout is asserted all the time...
*/
if (*isr & MASK_19) {
//printk("av7110_irq: DEBI\n");
/* Note 1: The DEBI irq is level triggered: We must enable it
* only after we started a DMA xfer, and disable it here
* immediately, or it will be signalled all the time while
* DEBI is idle.
* Note 2: You would think that an irq which is masked is
* not signalled by the hardware. Not so for the SAA7146:
* An irq is signalled as long as the corresponding bit
* in the ISR is set, and disabling irqs just prevents the
* hardware from setting the ISR bit. This means a) that we
* must clear the ISR *after* disabling the irq (which is why
* we must do it here even though saa7146_core did it already),
* and b) that if we were to disable an edge triggered irq
* (like the gpio irqs sadly are) temporarily we would likely
* loose some. This sucks :-(
*/
SAA7146_IER_DISABLE(av7110->dev, MASK_19);
SAA7146_ISR_CLEAR(av7110->dev, MASK_19);
tasklet_schedule(&av7110->debi_tasklet);
}
if (*isr & MASK_03) {
//printk("av7110_irq: GPIO\n");
tasklet_schedule(&av7110->gpio_tasklet);
}
if (*isr & MASK_10)
tasklet_schedule(&av7110->vpe_tasklet);
}
static struct saa7146_extension av7110_extension_driver;
#define MAKE_AV7110_INFO(x_var,x_name) \
static struct saa7146_pci_extension_data x_var = { \
.ext_priv = x_name, \
.ext = &av7110_extension_driver }
MAKE_AV7110_INFO(tts_1_X_fsc,"Technotrend/Hauppauge WinTV DVB-S rev1.X or Fujitsu Siemens DVB-C");
MAKE_AV7110_INFO(ttt_1_X, "Technotrend/Hauppauge WinTV DVB-T rev1.X");
MAKE_AV7110_INFO(ttc_1_X, "Technotrend/Hauppauge WinTV Nexus-CA rev1.X");
MAKE_AV7110_INFO(ttc_2_X, "Technotrend/Hauppauge WinTV DVB-C rev2.X");
MAKE_AV7110_INFO(tts_2_X, "Technotrend/Hauppauge WinTV Nexus-S rev2.X");
MAKE_AV7110_INFO(tts_2_3, "Technotrend/Hauppauge WinTV Nexus-S rev2.3");
MAKE_AV7110_INFO(tts_1_3se, "Technotrend/Hauppauge WinTV DVB-S rev1.3 SE");
MAKE_AV7110_INFO(ttt, "Technotrend/Hauppauge DVB-T");
MAKE_AV7110_INFO(fsc, "Fujitsu Siemens DVB-C");
MAKE_AV7110_INFO(fss, "Fujitsu Siemens DVB-S rev1.6");
MAKE_AV7110_INFO(gxs_1_3, "Galaxis DVB-S rev1.3");
static const struct pci_device_id pci_tbl[] = {
MAKE_EXTENSION_PCI(fsc, 0x110a, 0x0000),
MAKE_EXTENSION_PCI(tts_1_X_fsc, 0x13c2, 0x0000),
MAKE_EXTENSION_PCI(ttt_1_X, 0x13c2, 0x0001),
MAKE_EXTENSION_PCI(ttc_2_X, 0x13c2, 0x0002),
MAKE_EXTENSION_PCI(tts_2_X, 0x13c2, 0x0003),
MAKE_EXTENSION_PCI(gxs_1_3, 0x13c2, 0x0004),
MAKE_EXTENSION_PCI(fss, 0x13c2, 0x0006),
MAKE_EXTENSION_PCI(ttt, 0x13c2, 0x0008),
MAKE_EXTENSION_PCI(ttc_1_X, 0x13c2, 0x000a),
MAKE_EXTENSION_PCI(tts_2_3, 0x13c2, 0x000e),
MAKE_EXTENSION_PCI(tts_1_3se, 0x13c2, 0x1002),
/* MAKE_EXTENSION_PCI(???, 0x13c2, 0x0005), UNDEFINED CARD */ // Technisat SkyStar1
/* MAKE_EXTENSION_PCI(???, 0x13c2, 0x0009), UNDEFINED CARD */ // TT/Hauppauge WinTV Nexus-CA v????
{
.vendor = 0,
}
};
MODULE_DEVICE_TABLE(pci, pci_tbl);
static struct saa7146_extension av7110_extension_driver = {
.name = "av7110",
.flags = SAA7146_USE_I2C_IRQ,
.module = THIS_MODULE,
.pci_tbl = &pci_tbl[0],
.attach = av7110_attach,
.detach = av7110_detach,
.irq_mask = MASK_19 | MASK_03 | MASK_10,
.irq_func = av7110_irq,
};
static int __init av7110_init(void)
{
return saa7146_register_extension(&av7110_extension_driver);
}
static void __exit av7110_exit(void)
{
saa7146_unregister_extension(&av7110_extension_driver);
}
module_init(av7110_init);
module_exit(av7110_exit);
MODULE_DESCRIPTION("driver for the SAA7146 based AV110 PCI DVB cards by Siemens, Technotrend, Hauppauge");
MODULE_AUTHOR("Ralph Metzler, Marcus Metzler, others");
MODULE_LICENSE("GPL");
| linux-master | drivers/staging/media/av7110/av7110.c |
// SPDX-License-Identifier: GPL-2.0
#include "dvb_filter.h"
#include "av7110_ipack.h"
#include <linux/string.h> /* for memcpy() */
#include <linux/vmalloc.h>
void av7110_ipack_reset(struct ipack *p)
{
p->found = 0;
p->cid = 0;
p->plength = 0;
p->flag1 = 0;
p->flag2 = 0;
p->hlength = 0;
p->mpeg = 0;
p->check = 0;
p->which = 0;
p->done = 0;
p->count = 0;
}
int av7110_ipack_init(struct ipack *p, int size,
void (*func)(u8 *buf, int size, void *priv))
{
if (!(p->buf = vmalloc(size))) {
printk(KERN_WARNING "Couldn't allocate memory for ipack\n");
return -ENOMEM;
}
p->size = size;
p->func = func;
p->repack_subids = 0;
av7110_ipack_reset(p);
return 0;
}
void av7110_ipack_free(struct ipack *p)
{
vfree(p->buf);
}
static void send_ipack(struct ipack *p)
{
int off;
struct dvb_audio_info ai;
int ac3_off = 0;
int streamid = 0;
int nframes = 0;
int f = 0;
switch (p->mpeg) {
case 2:
if (p->count < 10)
return;
p->buf[3] = p->cid;
p->buf[4] = (u8)(((p->count - 6) & 0xff00) >> 8);
p->buf[5] = (u8)((p->count - 6) & 0x00ff);
if (p->repack_subids && p->cid == PRIVATE_STREAM1) {
off = 9 + p->buf[8];
streamid = p->buf[off];
if ((streamid & 0xf8) == 0x80) {
ai.off = 0;
ac3_off = ((p->buf[off + 2] << 8)|
p->buf[off + 3]);
if (ac3_off < p->count)
f = dvb_filter_get_ac3info(p->buf + off + 3 + ac3_off,
p->count - ac3_off, &ai, 0);
if (!f) {
nframes = (p->count - off - 3 - ac3_off) /
ai.framesize + 1;
p->buf[off + 2] = (ac3_off >> 8) & 0xff;
p->buf[off + 3] = (ac3_off) & 0xff;
p->buf[off + 1] = nframes;
ac3_off += nframes * ai.framesize - p->count;
}
}
}
p->func(p->buf, p->count, p->data);
p->buf[6] = 0x80;
p->buf[7] = 0x00;
p->buf[8] = 0x00;
p->count = 9;
if (p->repack_subids && p->cid == PRIVATE_STREAM1
&& (streamid & 0xf8) == 0x80) {
p->count += 4;
p->buf[9] = streamid;
p->buf[10] = (ac3_off >> 8) & 0xff;
p->buf[11] = (ac3_off) & 0xff;
p->buf[12] = 0;
}
break;
case 1:
if (p->count < 8)
return;
p->buf[3] = p->cid;
p->buf[4] = (u8)(((p->count - 6) & 0xff00) >> 8);
p->buf[5] = (u8)((p->count - 6) & 0x00ff);
p->func(p->buf, p->count, p->data);
p->buf[6] = 0x0f;
p->count = 7;
break;
}
}
void av7110_ipack_flush(struct ipack *p)
{
if (p->plength != MMAX_PLENGTH - 6 || p->found <= 6)
return;
p->plength = p->found - 6;
p->found = 0;
send_ipack(p);
av7110_ipack_reset(p);
}
static void write_ipack(struct ipack *p, const u8 *data, int count)
{
u8 headr[3] = { 0x00, 0x00, 0x01 };
if (p->count < 6) {
memcpy(p->buf, headr, 3);
p->count = 6;
}
if (p->count + count < p->size){
memcpy(p->buf+p->count, data, count);
p->count += count;
} else {
int rest = p->size - p->count;
memcpy(p->buf+p->count, data, rest);
p->count += rest;
send_ipack(p);
if (count - rest > 0)
write_ipack(p, data + rest, count - rest);
}
}
int av7110_ipack_instant_repack (const u8 *buf, int count, struct ipack *p)
{
int l;
int c = 0;
while (c < count && (p->mpeg == 0 ||
(p->mpeg == 1 && p->found < 7) ||
(p->mpeg == 2 && p->found < 9))
&& (p->found < 5 || !p->done)) {
switch (p->found) {
case 0:
case 1:
if (buf[c] == 0x00)
p->found++;
else
p->found = 0;
c++;
break;
case 2:
if (buf[c] == 0x01)
p->found++;
else if (buf[c] == 0)
p->found = 2;
else
p->found = 0;
c++;
break;
case 3:
p->cid = 0;
switch (buf[c]) {
case PROG_STREAM_MAP:
case PRIVATE_STREAM2:
case PROG_STREAM_DIR:
case ECM_STREAM :
case EMM_STREAM :
case PADDING_STREAM :
case DSM_CC_STREAM :
case ISO13522_STREAM:
p->done = 1;
fallthrough;
case PRIVATE_STREAM1:
case VIDEO_STREAM_S ... VIDEO_STREAM_E:
case AUDIO_STREAM_S ... AUDIO_STREAM_E:
p->found++;
p->cid = buf[c];
c++;
break;
default:
p->found = 0;
break;
}
break;
case 4:
if (count-c > 1) {
p->plen[0] = buf[c];
c++;
p->plen[1] = buf[c];
c++;
p->found += 2;
p->plength = (p->plen[0] << 8) | p->plen[1];
} else {
p->plen[0] = buf[c];
p->found++;
return count;
}
break;
case 5:
p->plen[1] = buf[c];
c++;
p->found++;
p->plength = (p->plen[0] << 8) | p->plen[1];
break;
case 6:
if (!p->done) {
p->flag1 = buf[c];
c++;
p->found++;
if ((p->flag1 & 0xc0) == 0x80)
p->mpeg = 2;
else {
p->hlength = 0;
p->which = 0;
p->mpeg = 1;
p->flag2 = 0;
}
}
break;
case 7:
if (!p->done && p->mpeg == 2) {
p->flag2 = buf[c];
c++;
p->found++;
}
break;
case 8:
if (!p->done && p->mpeg == 2) {
p->hlength = buf[c];
c++;
p->found++;
}
break;
}
}
if (c == count)
return count;
if (!p->plength)
p->plength = MMAX_PLENGTH - 6;
if (p->done || ((p->mpeg == 2 && p->found >= 9) ||
(p->mpeg == 1 && p->found >= 7))) {
switch (p->cid) {
case AUDIO_STREAM_S ... AUDIO_STREAM_E:
case VIDEO_STREAM_S ... VIDEO_STREAM_E:
case PRIVATE_STREAM1:
if (p->mpeg == 2 && p->found == 9) {
write_ipack(p, &p->flag1, 1);
write_ipack(p, &p->flag2, 1);
write_ipack(p, &p->hlength, 1);
}
if (p->mpeg == 1 && p->found == 7)
write_ipack(p, &p->flag1, 1);
if (p->mpeg == 2 && (p->flag2 & PTS_ONLY) &&
p->found < 14) {
while (c < count && p->found < 14) {
p->pts[p->found - 9] = buf[c];
write_ipack(p, buf + c, 1);
c++;
p->found++;
}
if (c == count)
return count;
}
if (p->mpeg == 1 && p->which < 2000) {
if (p->found == 7) {
p->check = p->flag1;
p->hlength = 1;
}
while (!p->which && c < count &&
p->check == 0xff){
p->check = buf[c];
write_ipack(p, buf + c, 1);
c++;
p->found++;
p->hlength++;
}
if (c == count)
return count;
if ((p->check & 0xc0) == 0x40 && !p->which) {
p->check = buf[c];
write_ipack(p, buf + c, 1);
c++;
p->found++;
p->hlength++;
p->which = 1;
if (c == count)
return count;
p->check = buf[c];
write_ipack(p, buf + c, 1);
c++;
p->found++;
p->hlength++;
p->which = 2;
if (c == count)
return count;
}
if (p->which == 1) {
p->check = buf[c];
write_ipack(p, buf + c, 1);
c++;
p->found++;
p->hlength++;
p->which = 2;
if (c == count)
return count;
}
if ((p->check & 0x30) && p->check != 0xff) {
p->flag2 = (p->check & 0xf0) << 2;
p->pts[0] = p->check;
p->which = 3;
}
if (c == count)
return count;
if (p->which > 2){
if ((p->flag2 & PTS_DTS_FLAGS) == PTS_ONLY) {
while (c < count && p->which < 7) {
p->pts[p->which - 2] = buf[c];
write_ipack(p, buf + c, 1);
c++;
p->found++;
p->which++;
p->hlength++;
}
if (c == count)
return count;
} else if ((p->flag2 & PTS_DTS_FLAGS) == PTS_DTS) {
while (c < count && p->which < 12) {
if (p->which < 7)
p->pts[p->which - 2] = buf[c];
write_ipack(p, buf + c, 1);
c++;
p->found++;
p->which++;
p->hlength++;
}
if (c == count)
return count;
}
p->which = 2000;
}
}
while (c < count && p->found < p->plength + 6) {
l = count - c;
if (l + p->found > p->plength + 6)
l = p->plength + 6 - p->found;
write_ipack(p, buf + c, l);
p->found += l;
c += l;
}
break;
}
if (p->done) {
if (p->found + count - c < p->plength + 6) {
p->found += count - c;
c = count;
} else {
c += p->plength + 6 - p->found;
p->found = p->plength + 6;
}
}
if (p->plength && p->found == p->plength + 6) {
send_ipack(p);
av7110_ipack_reset(p);
if (c < count)
av7110_ipack_instant_repack(buf + c, count - c, p);
}
}
return count;
}
| linux-master | drivers/staging/media/av7110/av7110_ipack.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* av7110_ca.c: CA and CI stuff
*
* Copyright (C) 1999-2002 Ralph Metzler
* & Marcus Metzler for convergence integrated media GmbH
*
* originally based on code by:
* Copyright (C) 1998,1999 Christian Theiss <[email protected]>
*
* the project's page is at https://linuxtv.org
*/
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/delay.h>
#include <linux/fs.h>
#include <linux/timer.h>
#include <linux/poll.h>
#include <linux/gfp.h>
#include "av7110.h"
#include "av7110_hw.h"
#include "av7110_ca.h"
void CI_handle(struct av7110 *av7110, u8 *data, u16 len)
{
dprintk(8, "av7110:%p\n",av7110);
if (len < 3)
return;
switch (data[0]) {
case CI_MSG_CI_INFO:
if (data[2] != 1 && data[2] != 2)
break;
switch (data[1]) {
case 0:
av7110->ci_slot[data[2] - 1].flags = 0;
break;
case 1:
av7110->ci_slot[data[2] - 1].flags |= CA_CI_MODULE_PRESENT;
break;
case 2:
av7110->ci_slot[data[2] - 1].flags |= CA_CI_MODULE_READY;
break;
}
break;
case CI_SWITCH_PRG_REPLY:
//av7110->ci_stat=data[1];
break;
default:
break;
}
}
void ci_get_data(struct dvb_ringbuffer *cibuf, u8 *data, int len)
{
if (dvb_ringbuffer_free(cibuf) < len + 2)
return;
DVB_RINGBUFFER_WRITE_BYTE(cibuf, len >> 8);
DVB_RINGBUFFER_WRITE_BYTE(cibuf, len & 0xff);
dvb_ringbuffer_write(cibuf, data, len);
wake_up_interruptible(&cibuf->queue);
}
/******************************************************************************
* CI link layer file ops
******************************************************************************/
static int ci_ll_init(struct dvb_ringbuffer *cirbuf, struct dvb_ringbuffer *ciwbuf, int size)
{
struct dvb_ringbuffer *tab[] = { cirbuf, ciwbuf, NULL }, **p;
void *data;
for (p = tab; *p; p++) {
data = vmalloc(size);
if (!data) {
while (p-- != tab) {
vfree(p[0]->data);
p[0]->data = NULL;
}
return -ENOMEM;
}
dvb_ringbuffer_init(*p, data, size);
}
return 0;
}
static void ci_ll_flush(struct dvb_ringbuffer *cirbuf, struct dvb_ringbuffer *ciwbuf)
{
dvb_ringbuffer_flush_spinlock_wakeup(cirbuf);
dvb_ringbuffer_flush_spinlock_wakeup(ciwbuf);
}
static void ci_ll_release(struct dvb_ringbuffer *cirbuf, struct dvb_ringbuffer *ciwbuf)
{
vfree(cirbuf->data);
cirbuf->data = NULL;
vfree(ciwbuf->data);
ciwbuf->data = NULL;
}
static int ci_ll_reset(struct dvb_ringbuffer *cibuf, struct file *file,
int slots, struct ca_slot_info *slot)
{
int i;
int len = 0;
u8 msg[8] = { 0x00, 0x06, 0x00, 0x00, 0xff, 0x02, 0x00, 0x00 };
for (i = 0; i < 2; i++) {
if (slots & (1 << i))
len += 8;
}
if (dvb_ringbuffer_free(cibuf) < len)
return -EBUSY;
for (i = 0; i < 2; i++) {
if (slots & (1 << i)) {
msg[2] = i;
dvb_ringbuffer_write(cibuf, msg, 8);
slot[i].flags = 0;
}
}
return 0;
}
static ssize_t ci_ll_write(struct dvb_ringbuffer *cibuf, struct file *file,
const char __user *buf, size_t count, loff_t *ppos)
{
int free;
int non_blocking = file->f_flags & O_NONBLOCK;
u8 *page = (u8 *)__get_free_page(GFP_USER);
int res;
if (!page)
return -ENOMEM;
res = -EINVAL;
if (count > 2048)
goto out;
res = -EFAULT;
if (copy_from_user(page, buf, count))
goto out;
free = dvb_ringbuffer_free(cibuf);
if (count + 2 > free) {
res = -EWOULDBLOCK;
if (non_blocking)
goto out;
res = -ERESTARTSYS;
if (wait_event_interruptible(cibuf->queue,
(dvb_ringbuffer_free(cibuf) >= count + 2)))
goto out;
}
DVB_RINGBUFFER_WRITE_BYTE(cibuf, count >> 8);
DVB_RINGBUFFER_WRITE_BYTE(cibuf, count & 0xff);
res = dvb_ringbuffer_write(cibuf, page, count);
out:
free_page((unsigned long)page);
return res;
}
static ssize_t ci_ll_read(struct dvb_ringbuffer *cibuf, struct file *file,
char __user *buf, size_t count, loff_t *ppos)
{
int avail;
int non_blocking = file->f_flags & O_NONBLOCK;
ssize_t len;
if (!cibuf->data || !count)
return 0;
if (non_blocking && (dvb_ringbuffer_empty(cibuf)))
return -EWOULDBLOCK;
if (wait_event_interruptible(cibuf->queue,
!dvb_ringbuffer_empty(cibuf)))
return -ERESTARTSYS;
avail = dvb_ringbuffer_avail(cibuf);
if (avail < 4)
return 0;
len = DVB_RINGBUFFER_PEEK(cibuf, 0) << 8;
len |= DVB_RINGBUFFER_PEEK(cibuf, 1);
if (avail < len + 2 || count < len)
return -EINVAL;
DVB_RINGBUFFER_SKIP(cibuf, 2);
return dvb_ringbuffer_read_user(cibuf, buf, len);
}
static int dvb_ca_open(struct inode *inode, struct file *file)
{
struct dvb_device *dvbdev = file->private_data;
struct av7110 *av7110 = dvbdev->priv;
int err = dvb_generic_open(inode, file);
dprintk(8, "av7110:%p\n",av7110);
if (err < 0)
return err;
ci_ll_flush(&av7110->ci_rbuffer, &av7110->ci_wbuffer);
return 0;
}
static __poll_t dvb_ca_poll (struct file *file, poll_table *wait)
{
struct dvb_device *dvbdev = file->private_data;
struct av7110 *av7110 = dvbdev->priv;
struct dvb_ringbuffer *rbuf = &av7110->ci_rbuffer;
struct dvb_ringbuffer *wbuf = &av7110->ci_wbuffer;
__poll_t mask = 0;
dprintk(8, "av7110:%p\n",av7110);
poll_wait(file, &rbuf->queue, wait);
poll_wait(file, &wbuf->queue, wait);
if (!dvb_ringbuffer_empty(rbuf))
mask |= (EPOLLIN | EPOLLRDNORM);
if (dvb_ringbuffer_free(wbuf) > 1024)
mask |= (EPOLLOUT | EPOLLWRNORM);
return mask;
}
static int dvb_ca_ioctl(struct file *file, unsigned int cmd, void *parg)
{
struct dvb_device *dvbdev = file->private_data;
struct av7110 *av7110 = dvbdev->priv;
unsigned long arg = (unsigned long) parg;
int ret = 0;
dprintk(8, "av7110:%p\n",av7110);
if (mutex_lock_interruptible(&av7110->ioctl_mutex))
return -ERESTARTSYS;
switch (cmd) {
case CA_RESET:
ret = ci_ll_reset(&av7110->ci_wbuffer, file, arg,
&av7110->ci_slot[0]);
break;
case CA_GET_CAP:
{
struct ca_caps cap;
cap.slot_num = 2;
cap.slot_type = (FW_CI_LL_SUPPORT(av7110->arm_app) ?
CA_CI_LINK : CA_CI) | CA_DESCR;
cap.descr_num = 16;
cap.descr_type = CA_ECD;
memcpy(parg, &cap, sizeof(cap));
break;
}
case CA_GET_SLOT_INFO:
{
struct ca_slot_info *info=(struct ca_slot_info *)parg;
if (info->num < 0 || info->num > 1) {
mutex_unlock(&av7110->ioctl_mutex);
return -EINVAL;
}
av7110->ci_slot[info->num].num = info->num;
av7110->ci_slot[info->num].type = FW_CI_LL_SUPPORT(av7110->arm_app) ?
CA_CI_LINK : CA_CI;
memcpy(info, &av7110->ci_slot[info->num], sizeof(struct ca_slot_info));
break;
}
case CA_GET_MSG:
break;
case CA_SEND_MSG:
break;
case CA_GET_DESCR_INFO:
{
struct ca_descr_info info;
info.num = 16;
info.type = CA_ECD;
memcpy(parg, &info, sizeof (info));
break;
}
case CA_SET_DESCR:
{
struct ca_descr *descr = (struct ca_descr*) parg;
if (descr->index >= 16 || descr->parity > 1) {
mutex_unlock(&av7110->ioctl_mutex);
return -EINVAL;
}
av7110_fw_cmd(av7110, COMTYPE_PIDFILTER, SetDescr, 5,
(descr->index<<8)|descr->parity,
(descr->cw[0]<<8)|descr->cw[1],
(descr->cw[2]<<8)|descr->cw[3],
(descr->cw[4]<<8)|descr->cw[5],
(descr->cw[6]<<8)|descr->cw[7]);
break;
}
default:
ret = -EINVAL;
break;
}
mutex_unlock(&av7110->ioctl_mutex);
return ret;
}
static ssize_t dvb_ca_write(struct file *file, const char __user *buf,
size_t count, loff_t *ppos)
{
struct dvb_device *dvbdev = file->private_data;
struct av7110 *av7110 = dvbdev->priv;
dprintk(8, "av7110:%p\n",av7110);
return ci_ll_write(&av7110->ci_wbuffer, file, buf, count, ppos);
}
static ssize_t dvb_ca_read(struct file *file, char __user *buf,
size_t count, loff_t *ppos)
{
struct dvb_device *dvbdev = file->private_data;
struct av7110 *av7110 = dvbdev->priv;
dprintk(8, "av7110:%p\n",av7110);
return ci_ll_read(&av7110->ci_rbuffer, file, buf, count, ppos);
}
static const struct file_operations dvb_ca_fops = {
.owner = THIS_MODULE,
.read = dvb_ca_read,
.write = dvb_ca_write,
.unlocked_ioctl = dvb_generic_ioctl,
.open = dvb_ca_open,
.release = dvb_generic_release,
.poll = dvb_ca_poll,
.llseek = default_llseek,
};
static struct dvb_device dvbdev_ca = {
.priv = NULL,
.users = 1,
.writers = 1,
.fops = &dvb_ca_fops,
.kernel_ioctl = dvb_ca_ioctl,
};
int av7110_ca_register(struct av7110 *av7110)
{
return dvb_register_device(&av7110->dvb_adapter, &av7110->ca_dev,
&dvbdev_ca, av7110, DVB_DEVICE_CA, 0);
}
void av7110_ca_unregister(struct av7110 *av7110)
{
dvb_unregister_device(av7110->ca_dev);
}
int av7110_ca_init(struct av7110* av7110)
{
return ci_ll_init(&av7110->ci_rbuffer, &av7110->ci_wbuffer, 8192);
}
void av7110_ca_exit(struct av7110* av7110)
{
ci_ll_release(&av7110->ci_rbuffer, &av7110->ci_wbuffer);
}
| linux-master | drivers/staging/media/av7110/av7110_ca.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* av7110_v4l.c: av7110 video4linux interface for DVB and Siemens DVB-C analog module
*
* Copyright (C) 1999-2002 Ralph Metzler
* & Marcus Metzler for convergence integrated media GmbH
*
* originally based on code by:
* Copyright (C) 1998,1999 Christian Theiss <[email protected]>
*
* the project's page is at https://linuxtv.org
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/delay.h>
#include <linux/fs.h>
#include <linux/timer.h>
#include <linux/poll.h>
#include "av7110.h"
#include "av7110_hw.h"
#include "av7110_av.h"
int msp_writereg(struct av7110 *av7110, u8 dev, u16 reg, u16 val)
{
u8 msg[5] = { dev, reg >> 8, reg & 0xff, val >> 8 , val & 0xff };
struct i2c_msg msgs = { .flags = 0, .len = 5, .buf = msg };
switch (av7110->adac_type) {
case DVB_ADAC_MSP34x0:
msgs.addr = 0x40;
break;
case DVB_ADAC_MSP34x5:
msgs.addr = 0x42;
break;
default:
return 0;
}
if (i2c_transfer(&av7110->i2c_adap, &msgs, 1) != 1) {
dprintk(1, "dvb-ttpci: failed @ card %d, %u = %u\n",
av7110->dvb_adapter.num, reg, val);
return -EIO;
}
return 0;
}
static int msp_readreg(struct av7110 *av7110, u8 dev, u16 reg, u16 *val)
{
u8 msg1[3] = { dev, reg >> 8, reg & 0xff };
u8 msg2[2];
struct i2c_msg msgs[2] = {
{ .flags = 0 , .len = 3, .buf = msg1 },
{ .flags = I2C_M_RD, .len = 2, .buf = msg2 }
};
switch (av7110->adac_type) {
case DVB_ADAC_MSP34x0:
msgs[0].addr = 0x40;
msgs[1].addr = 0x40;
break;
case DVB_ADAC_MSP34x5:
msgs[0].addr = 0x42;
msgs[1].addr = 0x42;
break;
default:
return 0;
}
if (i2c_transfer(&av7110->i2c_adap, &msgs[0], 2) != 2) {
dprintk(1, "dvb-ttpci: failed @ card %d, %u\n",
av7110->dvb_adapter.num, reg);
return -EIO;
}
*val = (msg2[0] << 8) | msg2[1];
return 0;
}
static struct v4l2_input inputs[4] = {
{
.index = 0,
.name = "DVB",
.type = V4L2_INPUT_TYPE_CAMERA,
.audioset = 1,
.tuner = 0, /* ignored */
.std = V4L2_STD_PAL_BG|V4L2_STD_NTSC_M,
.status = 0,
.capabilities = V4L2_IN_CAP_STD,
}, {
.index = 1,
.name = "Television",
.type = V4L2_INPUT_TYPE_TUNER,
.audioset = 1,
.tuner = 0,
.std = V4L2_STD_PAL_BG|V4L2_STD_NTSC_M,
.status = 0,
.capabilities = V4L2_IN_CAP_STD,
}, {
.index = 2,
.name = "Video",
.type = V4L2_INPUT_TYPE_CAMERA,
.audioset = 0,
.tuner = 0,
.std = V4L2_STD_PAL_BG|V4L2_STD_NTSC_M,
.status = 0,
.capabilities = V4L2_IN_CAP_STD,
}, {
.index = 3,
.name = "Y/C",
.type = V4L2_INPUT_TYPE_CAMERA,
.audioset = 0,
.tuner = 0,
.std = V4L2_STD_PAL_BG|V4L2_STD_NTSC_M,
.status = 0,
.capabilities = V4L2_IN_CAP_STD,
}
};
static int ves1820_writereg(struct saa7146_dev *dev, u8 addr, u8 reg, u8 data)
{
struct av7110 *av7110 = dev->ext_priv;
u8 buf[] = { 0x00, reg, data };
struct i2c_msg msg = { .addr = addr, .flags = 0, .buf = buf, .len = 3 };
dprintk(4, "dev: %p\n", dev);
if (1 != i2c_transfer(&av7110->i2c_adap, &msg, 1))
return -1;
return 0;
}
static int tuner_write(struct saa7146_dev *dev, u8 addr, u8 data [4])
{
struct av7110 *av7110 = dev->ext_priv;
struct i2c_msg msg = { .addr = addr, .flags = 0, .buf = data, .len = 4 };
dprintk(4, "dev: %p\n", dev);
if (1 != i2c_transfer(&av7110->i2c_adap, &msg, 1))
return -1;
return 0;
}
static int ves1820_set_tv_freq(struct saa7146_dev *dev, u32 freq)
{
u32 div;
u8 config;
u8 buf[4];
dprintk(4, "freq: 0x%08x\n", freq);
/* magic number: 614. tuning with the frequency given by v4l2
is always off by 614*62.5 = 38375 kHz...*/
div = freq + 614;
buf[0] = (div >> 8) & 0x7f;
buf[1] = div & 0xff;
buf[2] = 0x8e;
if (freq < 16U * 16825 / 100)
config = 0xa0;
else if (freq < 16U * 44725 / 100)
config = 0x90;
else
config = 0x30;
config &= ~0x02;
buf[3] = config;
return tuner_write(dev, 0x61, buf);
}
static int stv0297_set_tv_freq(struct saa7146_dev *dev, u32 freq)
{
struct av7110 *av7110 = (struct av7110*)dev->ext_priv;
u32 div;
u8 data[4];
div = (freq + 38900000 + 31250) / 62500;
data[0] = (div >> 8) & 0x7f;
data[1] = div & 0xff;
data[2] = 0xce;
if (freq < 45000000)
return -EINVAL;
else if (freq < 137000000)
data[3] = 0x01;
else if (freq < 403000000)
data[3] = 0x02;
else if (freq < 860000000)
data[3] = 0x04;
else
return -EINVAL;
if (av7110->fe->ops.i2c_gate_ctrl)
av7110->fe->ops.i2c_gate_ctrl(av7110->fe, 1);
return tuner_write(dev, 0x63, data);
}
static struct saa7146_standard analog_standard[];
static struct saa7146_standard dvb_standard[];
static struct saa7146_standard standard[];
static const struct v4l2_audio msp3400_v4l2_audio = {
.index = 0,
.name = "Television",
.capability = V4L2_AUDCAP_STEREO
};
static int av7110_dvb_c_switch(struct saa7146_dev *dev)
{
struct av7110 *av7110 = (struct av7110*)dev->ext_priv;
u16 adswitch;
int source, sync;
dprintk(4, "%p\n", av7110);
if (0 != av7110->current_input) {
dprintk(1, "switching to analog TV:\n");
adswitch = 1;
source = SAA7146_HPS_SOURCE_PORT_B;
sync = SAA7146_HPS_SYNC_PORT_B;
memcpy(standard, analog_standard, sizeof(struct saa7146_standard) * 2);
switch (av7110->current_input) {
case 1:
dprintk(1, "switching SAA7113 to Analog Tuner Input\n");
msp_writereg(av7110, MSP_WR_DSP, 0x0008, 0x0000); // loudspeaker source
msp_writereg(av7110, MSP_WR_DSP, 0x0009, 0x0000); // headphone source
msp_writereg(av7110, MSP_WR_DSP, 0x000a, 0x0000); // SCART 1 source
msp_writereg(av7110, MSP_WR_DSP, 0x000e, 0x3000); // FM matrix, mono
msp_writereg(av7110, MSP_WR_DSP, 0x0000, 0x4f00); // loudspeaker + headphone
msp_writereg(av7110, MSP_WR_DSP, 0x0007, 0x4f00); // SCART 1 volume
if (av7110->analog_tuner_flags & ANALOG_TUNER_VES1820) {
if (ves1820_writereg(dev, 0x09, 0x0f, 0x60))
dprintk(1, "setting band in demodulator failed\n");
} else if (av7110->analog_tuner_flags & ANALOG_TUNER_STV0297) {
saa7146_setgpio(dev, 1, SAA7146_GPIO_OUTHI); // TDA9819 pin9(STD)
saa7146_setgpio(dev, 3, SAA7146_GPIO_OUTHI); // TDA9819 pin30(VIF)
}
if (i2c_writereg(av7110, 0x48, 0x02, 0xd0) != 1)
dprintk(1, "saa7113 write failed @ card %d", av7110->dvb_adapter.num);
break;
case 2:
dprintk(1, "switching SAA7113 to Video AV CVBS Input\n");
if (i2c_writereg(av7110, 0x48, 0x02, 0xd2) != 1)
dprintk(1, "saa7113 write failed @ card %d", av7110->dvb_adapter.num);
break;
case 3:
dprintk(1, "switching SAA7113 to Video AV Y/C Input\n");
if (i2c_writereg(av7110, 0x48, 0x02, 0xd9) != 1)
dprintk(1, "saa7113 write failed @ card %d", av7110->dvb_adapter.num);
break;
default:
dprintk(1, "switching SAA7113 to Input: AV7110: SAA7113: invalid input\n");
}
} else {
adswitch = 0;
source = SAA7146_HPS_SOURCE_PORT_A;
sync = SAA7146_HPS_SYNC_PORT_A;
memcpy(standard, dvb_standard, sizeof(struct saa7146_standard) * 2);
dprintk(1, "switching DVB mode\n");
msp_writereg(av7110, MSP_WR_DSP, 0x0008, 0x0220); // loudspeaker source
msp_writereg(av7110, MSP_WR_DSP, 0x0009, 0x0220); // headphone source
msp_writereg(av7110, MSP_WR_DSP, 0x000a, 0x0220); // SCART 1 source
msp_writereg(av7110, MSP_WR_DSP, 0x000e, 0x3000); // FM matrix, mono
msp_writereg(av7110, MSP_WR_DSP, 0x0000, 0x7f00); // loudspeaker + headphone
msp_writereg(av7110, MSP_WR_DSP, 0x0007, 0x7f00); // SCART 1 volume
if (av7110->analog_tuner_flags & ANALOG_TUNER_VES1820) {
if (ves1820_writereg(dev, 0x09, 0x0f, 0x20))
dprintk(1, "setting band in demodulator failed\n");
} else if (av7110->analog_tuner_flags & ANALOG_TUNER_STV0297) {
saa7146_setgpio(dev, 1, SAA7146_GPIO_OUTLO); // TDA9819 pin9(STD)
saa7146_setgpio(dev, 3, SAA7146_GPIO_OUTLO); // TDA9819 pin30(VIF)
}
}
/* hmm, this does not do anything!? */
if (av7110_fw_cmd(av7110, COMTYPE_AUDIODAC, ADSwitch, 1, adswitch))
dprintk(1, "ADSwitch error\n");
saa7146_set_hps_source_and_sync(dev, source, sync);
return 0;
}
static int vidioc_g_tuner(struct file *file, void *fh, struct v4l2_tuner *t)
{
struct saa7146_dev *dev = video_drvdata(file);
struct av7110 *av7110 = (struct av7110 *)dev->ext_priv;
u16 stereo_det;
s8 stereo;
dprintk(2, "VIDIOC_G_TUNER: %d\n", t->index);
if (!av7110->analog_tuner_flags || t->index != 0)
return -EINVAL;
memset(t, 0, sizeof(*t));
strscpy((char *)t->name, "Television", sizeof(t->name));
t->type = V4L2_TUNER_ANALOG_TV;
t->capability = V4L2_TUNER_CAP_NORM | V4L2_TUNER_CAP_STEREO |
V4L2_TUNER_CAP_LANG1 | V4L2_TUNER_CAP_LANG2 | V4L2_TUNER_CAP_SAP;
t->rangelow = 772; /* 48.25 MHZ / 62.5 kHz = 772, see fi1216mk2-specs, page 2 */
t->rangehigh = 13684; /* 855.25 MHz / 62.5 kHz = 13684 */
/* FIXME: add the real signal strength here */
t->signal = 0xffff;
t->afc = 0;
/* FIXME: standard / stereo detection is still broken */
msp_readreg(av7110, MSP_RD_DEM, 0x007e, &stereo_det);
dprintk(1, "VIDIOC_G_TUNER: msp3400 TV standard detection: 0x%04x\n", stereo_det);
msp_readreg(av7110, MSP_RD_DSP, 0x0018, &stereo_det);
dprintk(1, "VIDIOC_G_TUNER: msp3400 stereo detection: 0x%04x\n", stereo_det);
stereo = (s8)(stereo_det >> 8);
if (stereo > 0x10) {
/* stereo */
t->rxsubchans = V4L2_TUNER_SUB_STEREO | V4L2_TUNER_SUB_MONO;
t->audmode = V4L2_TUNER_MODE_STEREO;
} else if (stereo < -0x10) {
/* bilingual */
t->rxsubchans = V4L2_TUNER_SUB_LANG1 | V4L2_TUNER_SUB_LANG2;
t->audmode = V4L2_TUNER_MODE_LANG1;
} else /* mono */
t->rxsubchans = V4L2_TUNER_SUB_MONO;
return 0;
}
static int vidioc_s_tuner(struct file *file, void *fh, const struct v4l2_tuner *t)
{
struct saa7146_dev *dev = video_drvdata(file);
struct av7110 *av7110 = (struct av7110 *)dev->ext_priv;
u16 fm_matrix, src;
dprintk(2, "VIDIOC_S_TUNER: %d\n", t->index);
if (!av7110->analog_tuner_flags || av7110->current_input != 1)
return -EINVAL;
switch (t->audmode) {
case V4L2_TUNER_MODE_STEREO:
dprintk(2, "VIDIOC_S_TUNER: V4L2_TUNER_MODE_STEREO\n");
fm_matrix = 0x3001; /* stereo */
src = 0x0020;
break;
case V4L2_TUNER_MODE_LANG1_LANG2:
dprintk(2, "VIDIOC_S_TUNER: V4L2_TUNER_MODE_LANG1_LANG2\n");
fm_matrix = 0x3000; /* bilingual */
src = 0x0020;
break;
case V4L2_TUNER_MODE_LANG1:
dprintk(2, "VIDIOC_S_TUNER: V4L2_TUNER_MODE_LANG1\n");
fm_matrix = 0x3000; /* mono */
src = 0x0000;
break;
case V4L2_TUNER_MODE_LANG2:
dprintk(2, "VIDIOC_S_TUNER: V4L2_TUNER_MODE_LANG2\n");
fm_matrix = 0x3000; /* mono */
src = 0x0010;
break;
default: /* case V4L2_TUNER_MODE_MONO: */
dprintk(2, "VIDIOC_S_TUNER: TDA9840_SET_MONO\n");
fm_matrix = 0x3000; /* mono */
src = 0x0030;
break;
}
msp_writereg(av7110, MSP_WR_DSP, 0x000e, fm_matrix);
msp_writereg(av7110, MSP_WR_DSP, 0x0008, src);
msp_writereg(av7110, MSP_WR_DSP, 0x0009, src);
msp_writereg(av7110, MSP_WR_DSP, 0x000a, src);
return 0;
}
static int vidioc_g_frequency(struct file *file, void *fh, struct v4l2_frequency *f)
{
struct saa7146_dev *dev = video_drvdata(file);
struct av7110 *av7110 = (struct av7110 *)dev->ext_priv;
dprintk(2, "VIDIOC_G_FREQ: freq:0x%08x\n", f->frequency);
if (!av7110->analog_tuner_flags || av7110->current_input != 1)
return -EINVAL;
memset(f, 0, sizeof(*f));
f->type = V4L2_TUNER_ANALOG_TV;
f->frequency = av7110->current_freq;
return 0;
}
static int vidioc_s_frequency(struct file *file, void *fh, const struct v4l2_frequency *f)
{
struct saa7146_dev *dev = video_drvdata(file);
struct av7110 *av7110 = (struct av7110 *)dev->ext_priv;
dprintk(2, "VIDIOC_S_FREQUENCY: freq:0x%08x\n", f->frequency);
if (!av7110->analog_tuner_flags || av7110->current_input != 1)
return -EINVAL;
if (V4L2_TUNER_ANALOG_TV != f->type)
return -EINVAL;
msp_writereg(av7110, MSP_WR_DSP, 0x0000, 0xffe0); /* fast mute */
msp_writereg(av7110, MSP_WR_DSP, 0x0007, 0xffe0);
/* tune in desired frequency */
if (av7110->analog_tuner_flags & ANALOG_TUNER_VES1820)
ves1820_set_tv_freq(dev, f->frequency);
else if (av7110->analog_tuner_flags & ANALOG_TUNER_STV0297)
stv0297_set_tv_freq(dev, f->frequency);
av7110->current_freq = f->frequency;
msp_writereg(av7110, MSP_WR_DSP, 0x0015, 0x003f); /* start stereo detection */
msp_writereg(av7110, MSP_WR_DSP, 0x0015, 0x0000);
msp_writereg(av7110, MSP_WR_DSP, 0x0000, 0x4f00); /* loudspeaker + headphone */
msp_writereg(av7110, MSP_WR_DSP, 0x0007, 0x4f00); /* SCART 1 volume */
return 0;
}
static int vidioc_enum_input(struct file *file, void *fh, struct v4l2_input *i)
{
struct saa7146_dev *dev = video_drvdata(file);
struct av7110 *av7110 = (struct av7110 *)dev->ext_priv;
dprintk(2, "VIDIOC_ENUMINPUT: %d\n", i->index);
if (av7110->analog_tuner_flags) {
if (i->index >= 4)
return -EINVAL;
} else {
if (i->index != 0)
return -EINVAL;
}
memcpy(i, &inputs[i->index], sizeof(struct v4l2_input));
return 0;
}
static int vidioc_g_input(struct file *file, void *fh, unsigned int *input)
{
struct saa7146_dev *dev = video_drvdata(file);
struct av7110 *av7110 = (struct av7110 *)dev->ext_priv;
*input = av7110->current_input;
dprintk(2, "VIDIOC_G_INPUT: %d\n", *input);
return 0;
}
static int vidioc_s_input(struct file *file, void *fh, unsigned int input)
{
struct saa7146_dev *dev = video_drvdata(file);
struct av7110 *av7110 = (struct av7110 *)dev->ext_priv;
dprintk(2, "VIDIOC_S_INPUT: %d\n", input);
if (!av7110->analog_tuner_flags)
return input ? -EINVAL : 0;
if (input >= 4)
return -EINVAL;
av7110->current_input = input;
return av7110_dvb_c_switch(dev);
}
static int vidioc_enum_output(struct file *file, void *fh, struct v4l2_output *o)
{
if (o->index)
return -EINVAL;
strscpy(o->name, "Video Output", sizeof(o->name));
o->type = V4L2_OUTPUT_TYPE_ANALOG;
o->std = V4L2_STD_NTSC_M | V4L2_STD_PAL_BG;
o->capabilities = V4L2_OUT_CAP_STD;
return 0;
}
static int vidioc_g_output(struct file *file, void *fh, unsigned int *output)
{
*output = 0;
return 0;
}
static int vidioc_s_output(struct file *file, void *fh, unsigned int output)
{
return output ? -EINVAL : 0;
}
static int vidioc_enumaudio(struct file *file, void *fh, struct v4l2_audio *a)
{
dprintk(2, "VIDIOC_G_AUDIO: %d\n", a->index);
if (a->index != 0)
return -EINVAL;
*a = msp3400_v4l2_audio;
return 0;
}
static int vidioc_g_audio(struct file *file, void *fh, struct v4l2_audio *a)
{
struct saa7146_dev *dev = video_drvdata(file);
struct av7110 *av7110 = (struct av7110 *)dev->ext_priv;
dprintk(2, "VIDIOC_G_AUDIO: %d\n", a->index);
if (a->index != 0)
return -EINVAL;
if (av7110->current_input >= 2)
return -EINVAL;
*a = msp3400_v4l2_audio;
return 0;
}
static int vidioc_s_audio(struct file *file, void *fh, const struct v4l2_audio *a)
{
struct saa7146_dev *dev = video_drvdata(file);
struct av7110 *av7110 = (struct av7110 *)dev->ext_priv;
dprintk(2, "VIDIOC_S_AUDIO: %d\n", a->index);
if (av7110->current_input >= 2)
return -EINVAL;
return a->index ? -EINVAL : 0;
}
static int vidioc_g_sliced_vbi_cap(struct file *file, void *fh,
struct v4l2_sliced_vbi_cap *cap)
{
struct saa7146_dev *dev = video_drvdata(file);
struct av7110 *av7110 = (struct av7110 *)dev->ext_priv;
dprintk(2, "VIDIOC_G_SLICED_VBI_CAP\n");
if (cap->type != V4L2_BUF_TYPE_SLICED_VBI_OUTPUT)
return -EINVAL;
if (FW_VERSION(av7110->arm_app) >= 0x2623) {
cap->service_set = V4L2_SLICED_WSS_625;
cap->service_lines[0][23] = V4L2_SLICED_WSS_625;
}
return 0;
}
static int vidioc_g_fmt_sliced_vbi_out(struct file *file, void *fh,
struct v4l2_format *f)
{
struct saa7146_dev *dev = video_drvdata(file);
struct av7110 *av7110 = (struct av7110 *)dev->ext_priv;
dprintk(2, "VIDIOC_G_FMT:\n");
if (FW_VERSION(av7110->arm_app) < 0x2623)
return -EINVAL;
memset(&f->fmt.sliced, 0, sizeof f->fmt.sliced);
if (av7110->wssMode) {
f->fmt.sliced.service_set = V4L2_SLICED_WSS_625;
f->fmt.sliced.service_lines[0][23] = V4L2_SLICED_WSS_625;
}
f->fmt.sliced.io_size = sizeof(struct v4l2_sliced_vbi_data);
return 0;
}
static int vidioc_try_fmt_sliced_vbi_out(struct file *file, void *fh,
struct v4l2_format *f)
{
struct saa7146_dev *dev = video_drvdata(file);
struct av7110 *av7110 = (struct av7110 *)dev->ext_priv;
bool want_wss = (f->fmt.sliced.service_set & V4L2_SLICED_WSS_625) ||
(!f->fmt.sliced.service_set &&
f->fmt.sliced.service_lines[0][23] == V4L2_SLICED_WSS_625);
dprintk(2, "VIDIOC_G_FMT:\n");
if (FW_VERSION(av7110->arm_app) < 0x2623)
return -EINVAL;
memset(&f->fmt.sliced, 0, sizeof(f->fmt.sliced));
if (want_wss) {
f->fmt.sliced.service_set = V4L2_SLICED_WSS_625;
f->fmt.sliced.service_lines[0][23] = V4L2_SLICED_WSS_625;
}
f->fmt.sliced.io_size = sizeof(struct v4l2_sliced_vbi_data);
return 0;
}
static int vidioc_s_fmt_sliced_vbi_out(struct file *file, void *fh,
struct v4l2_format *f)
{
struct saa7146_dev *dev = video_drvdata(file);
struct av7110 *av7110 = (struct av7110 *)dev->ext_priv;
dprintk(2, "VIDIOC_S_FMT\n");
if (vidioc_try_fmt_sliced_vbi_out(file, fh, f))
return -EINVAL;
if (f->fmt.sliced.service_set & V4L2_SLICED_WSS_625) {
/* WSS controlled by userspace */
av7110->wssMode = 1;
av7110->wssData = 0;
} else {
/* WSS controlled by firmware */
av7110->wssMode = 0;
av7110->wssData = 0;
return av7110_fw_cmd(av7110, COMTYPE_ENCODER,
SetWSSConfig, 1, 0);
}
return 0;
}
static ssize_t av7110_vbi_write(struct file *file, const char __user *data, size_t count, loff_t *ppos)
{
struct saa7146_dev *dev = video_drvdata(file);
struct av7110 *av7110 = (struct av7110*) dev->ext_priv;
struct v4l2_sliced_vbi_data d;
int rc;
dprintk(2, "%s\n", __func__);
if (FW_VERSION(av7110->arm_app) < 0x2623 || !av7110->wssMode || count != sizeof d)
return -EINVAL;
if (copy_from_user(&d, data, count))
return -EFAULT;
if ((d.id != 0 && d.id != V4L2_SLICED_WSS_625) || d.field != 0 || d.line != 23)
return -EINVAL;
if (d.id)
av7110->wssData = ((d.data[1] << 8) & 0x3f00) | d.data[0];
else
av7110->wssData = 0x8000;
rc = av7110_fw_cmd(av7110, COMTYPE_ENCODER, SetWSSConfig, 2, 1, av7110->wssData);
return (rc < 0) ? rc : count;
}
/****************************************************************************
* INITIALIZATION
****************************************************************************/
static u8 saa7113_init_regs[] = {
0x02, 0xd0,
0x03, 0x23,
0x04, 0x00,
0x05, 0x00,
0x06, 0xe9,
0x07, 0x0d,
0x08, 0x98,
0x09, 0x02,
0x0a, 0x80,
0x0b, 0x40,
0x0c, 0x40,
0x0d, 0x00,
0x0e, 0x01,
0x0f, 0x7c,
0x10, 0x48,
0x11, 0x0c,
0x12, 0x8b,
0x13, 0x1a,
0x14, 0x00,
0x15, 0x00,
0x16, 0x00,
0x17, 0x00,
0x18, 0x00,
0x19, 0x00,
0x1a, 0x00,
0x1b, 0x00,
0x1c, 0x00,
0x1d, 0x00,
0x1e, 0x00,
0x41, 0x77,
0x42, 0x77,
0x43, 0x77,
0x44, 0x77,
0x45, 0x77,
0x46, 0x77,
0x47, 0x77,
0x48, 0x77,
0x49, 0x77,
0x4a, 0x77,
0x4b, 0x77,
0x4c, 0x77,
0x4d, 0x77,
0x4e, 0x77,
0x4f, 0x77,
0x50, 0x77,
0x51, 0x77,
0x52, 0x77,
0x53, 0x77,
0x54, 0x77,
0x55, 0x77,
0x56, 0x77,
0x57, 0xff,
0xff
};
static struct saa7146_ext_vv av7110_vv_data_st;
static struct saa7146_ext_vv av7110_vv_data_c;
int av7110_init_analog_module(struct av7110 *av7110)
{
u16 version1, version2;
if (i2c_writereg(av7110, 0x80, 0x0, 0x80) == 1 &&
i2c_writereg(av7110, 0x80, 0x0, 0) == 1) {
pr_info("DVB-C analog module @ card %d detected, initializing MSP3400\n",
av7110->dvb_adapter.num);
av7110->adac_type = DVB_ADAC_MSP34x0;
} else if (i2c_writereg(av7110, 0x84, 0x0, 0x80) == 1 &&
i2c_writereg(av7110, 0x84, 0x0, 0) == 1) {
pr_info("DVB-C analog module @ card %d detected, initializing MSP3415\n",
av7110->dvb_adapter.num);
av7110->adac_type = DVB_ADAC_MSP34x5;
} else
return -ENODEV;
msleep(100); // the probing above resets the msp...
msp_readreg(av7110, MSP_RD_DSP, 0x001e, &version1);
msp_readreg(av7110, MSP_RD_DSP, 0x001f, &version2);
dprintk(1, "dvb-ttpci: @ card %d MSP34xx version 0x%04x 0x%04x\n",
av7110->dvb_adapter.num, version1, version2);
msp_writereg(av7110, MSP_WR_DSP, 0x0013, 0x0c00);
msp_writereg(av7110, MSP_WR_DSP, 0x0000, 0x7f00); // loudspeaker + headphone
msp_writereg(av7110, MSP_WR_DSP, 0x0008, 0x0220); // loudspeaker source
msp_writereg(av7110, MSP_WR_DSP, 0x0009, 0x0220); // headphone source
msp_writereg(av7110, MSP_WR_DSP, 0x0004, 0x7f00); // loudspeaker volume
msp_writereg(av7110, MSP_WR_DSP, 0x000a, 0x0220); // SCART 1 source
msp_writereg(av7110, MSP_WR_DSP, 0x0007, 0x7f00); // SCART 1 volume
msp_writereg(av7110, MSP_WR_DSP, 0x000d, 0x1900); // prescale SCART
if (i2c_writereg(av7110, 0x48, 0x01, 0x00)!=1) {
pr_info("saa7113 not accessible\n");
} else {
u8 *i = saa7113_init_regs;
if ((av7110->dev->pci->subsystem_vendor == 0x110a) && (av7110->dev->pci->subsystem_device == 0x0000)) {
/* Fujitsu/Siemens DVB-Cable */
av7110->analog_tuner_flags |= ANALOG_TUNER_VES1820;
} else if ((av7110->dev->pci->subsystem_vendor == 0x13c2) && (av7110->dev->pci->subsystem_device == 0x0002)) {
/* Hauppauge/TT DVB-C premium */
av7110->analog_tuner_flags |= ANALOG_TUNER_VES1820;
} else if ((av7110->dev->pci->subsystem_vendor == 0x13c2) && (av7110->dev->pci->subsystem_device == 0x000A)) {
/* Hauppauge/TT DVB-C premium */
av7110->analog_tuner_flags |= ANALOG_TUNER_STV0297;
}
/* setup for DVB by default */
if (av7110->analog_tuner_flags & ANALOG_TUNER_VES1820) {
if (ves1820_writereg(av7110->dev, 0x09, 0x0f, 0x20))
dprintk(1, "setting band in demodulator failed\n");
} else if (av7110->analog_tuner_flags & ANALOG_TUNER_STV0297) {
saa7146_setgpio(av7110->dev, 1, SAA7146_GPIO_OUTLO); // TDA9819 pin9(STD)
saa7146_setgpio(av7110->dev, 3, SAA7146_GPIO_OUTLO); // TDA9819 pin30(VIF)
}
/* init the saa7113 */
while (*i != 0xff) {
if (i2c_writereg(av7110, 0x48, i[0], i[1]) != 1) {
dprintk(1, "saa7113 initialization failed @ card %d", av7110->dvb_adapter.num);
break;
}
i += 2;
}
/* setup msp for analog sound: B/G Dual-FM */
msp_writereg(av7110, MSP_WR_DEM, 0x00bb, 0x02d0); // AD_CV
msp_writereg(av7110, MSP_WR_DEM, 0x0001, 3); // FIR1
msp_writereg(av7110, MSP_WR_DEM, 0x0001, 18); // FIR1
msp_writereg(av7110, MSP_WR_DEM, 0x0001, 27); // FIR1
msp_writereg(av7110, MSP_WR_DEM, 0x0001, 48); // FIR1
msp_writereg(av7110, MSP_WR_DEM, 0x0001, 66); // FIR1
msp_writereg(av7110, MSP_WR_DEM, 0x0001, 72); // FIR1
msp_writereg(av7110, MSP_WR_DEM, 0x0005, 4); // FIR2
msp_writereg(av7110, MSP_WR_DEM, 0x0005, 64); // FIR2
msp_writereg(av7110, MSP_WR_DEM, 0x0005, 0); // FIR2
msp_writereg(av7110, MSP_WR_DEM, 0x0005, 3); // FIR2
msp_writereg(av7110, MSP_WR_DEM, 0x0005, 18); // FIR2
msp_writereg(av7110, MSP_WR_DEM, 0x0005, 27); // FIR2
msp_writereg(av7110, MSP_WR_DEM, 0x0005, 48); // FIR2
msp_writereg(av7110, MSP_WR_DEM, 0x0005, 66); // FIR2
msp_writereg(av7110, MSP_WR_DEM, 0x0005, 72); // FIR2
msp_writereg(av7110, MSP_WR_DEM, 0x0083, 0xa000); // MODE_REG
msp_writereg(av7110, MSP_WR_DEM, 0x0093, 0x00aa); // DCO1_LO 5.74MHz
msp_writereg(av7110, MSP_WR_DEM, 0x009b, 0x04fc); // DCO1_HI
msp_writereg(av7110, MSP_WR_DEM, 0x00a3, 0x038e); // DCO2_LO 5.5MHz
msp_writereg(av7110, MSP_WR_DEM, 0x00ab, 0x04c6); // DCO2_HI
msp_writereg(av7110, MSP_WR_DEM, 0x0056, 0); // LOAD_REG 1/2
}
memcpy(standard, dvb_standard, sizeof(struct saa7146_standard) * 2);
/* set dd1 stream a & b */
saa7146_write(av7110->dev, DD1_STREAM_B, 0x00000000);
saa7146_write(av7110->dev, DD1_INIT, 0x03000700);
saa7146_write(av7110->dev, MC2, (MASK_09 | MASK_25 | MASK_10 | MASK_26));
return 0;
}
int av7110_init_v4l(struct av7110 *av7110)
{
struct saa7146_dev* dev = av7110->dev;
struct saa7146_ext_vv *vv_data;
int ret;
/* special case DVB-C: these cards have an analog tuner
plus need some special handling, so we have separate
saa7146_ext_vv data for these... */
if (av7110->analog_tuner_flags)
vv_data = &av7110_vv_data_c;
else
vv_data = &av7110_vv_data_st;
ret = saa7146_vv_init(dev, vv_data);
if (ret) {
ERR("cannot init capture device. skipping\n");
return -ENODEV;
}
vv_data->vid_ops.vidioc_enum_input = vidioc_enum_input;
vv_data->vid_ops.vidioc_g_input = vidioc_g_input;
vv_data->vid_ops.vidioc_s_input = vidioc_s_input;
vv_data->vid_ops.vidioc_g_tuner = vidioc_g_tuner;
vv_data->vid_ops.vidioc_s_tuner = vidioc_s_tuner;
vv_data->vid_ops.vidioc_g_frequency = vidioc_g_frequency;
vv_data->vid_ops.vidioc_s_frequency = vidioc_s_frequency;
vv_data->vid_ops.vidioc_enumaudio = vidioc_enumaudio;
vv_data->vid_ops.vidioc_g_audio = vidioc_g_audio;
vv_data->vid_ops.vidioc_s_audio = vidioc_s_audio;
vv_data->vid_ops.vidioc_g_fmt_vbi_cap = NULL;
vv_data->vbi_ops.vidioc_enum_output = vidioc_enum_output;
vv_data->vbi_ops.vidioc_g_output = vidioc_g_output;
vv_data->vbi_ops.vidioc_s_output = vidioc_s_output;
vv_data->vbi_ops.vidioc_g_parm = NULL;
vv_data->vbi_ops.vidioc_g_fmt_vbi_cap = NULL;
vv_data->vbi_ops.vidioc_try_fmt_vbi_cap = NULL;
vv_data->vbi_ops.vidioc_s_fmt_vbi_cap = NULL;
vv_data->vbi_ops.vidioc_g_sliced_vbi_cap = vidioc_g_sliced_vbi_cap;
vv_data->vbi_ops.vidioc_g_fmt_sliced_vbi_out = vidioc_g_fmt_sliced_vbi_out;
vv_data->vbi_ops.vidioc_try_fmt_sliced_vbi_out = vidioc_try_fmt_sliced_vbi_out;
vv_data->vbi_ops.vidioc_s_fmt_sliced_vbi_out = vidioc_s_fmt_sliced_vbi_out;
if (FW_VERSION(av7110->arm_app) < 0x2623)
vv_data->capabilities &= ~V4L2_CAP_SLICED_VBI_OUTPUT;
if (saa7146_register_device(&av7110->v4l_dev, dev, "av7110", VFL_TYPE_VIDEO)) {
ERR("cannot register capture device. skipping\n");
saa7146_vv_release(dev);
return -ENODEV;
}
if (FW_VERSION(av7110->arm_app) >= 0x2623) {
if (saa7146_register_device(&av7110->vbi_dev, dev, "av7110", VFL_TYPE_VBI))
ERR("cannot register vbi v4l2 device. skipping\n");
}
return 0;
}
int av7110_exit_v4l(struct av7110 *av7110)
{
struct saa7146_dev* dev = av7110->dev;
saa7146_unregister_device(&av7110->v4l_dev, av7110->dev);
saa7146_unregister_device(&av7110->vbi_dev, av7110->dev);
saa7146_vv_release(dev);
return 0;
}
/* FIXME: these values are experimental values that look better than the
values from the latest "official" driver -- at least for me... (MiHu) */
static struct saa7146_standard standard[] = {
{
.name = "PAL", .id = V4L2_STD_PAL_BG,
.v_offset = 0x15, .v_field = 288,
.h_offset = 0x48, .h_pixels = 708,
.v_max_out = 576, .h_max_out = 768,
}, {
.name = "NTSC", .id = V4L2_STD_NTSC_M,
.v_offset = 0x10, .v_field = 244,
.h_offset = 0x40, .h_pixels = 708,
.v_max_out = 480, .h_max_out = 640,
}
};
static struct saa7146_standard analog_standard[] = {
{
.name = "PAL", .id = V4L2_STD_PAL_BG,
.v_offset = 0x1b, .v_field = 288,
.h_offset = 0x08, .h_pixels = 708,
.v_max_out = 576, .h_max_out = 768,
}, {
.name = "NTSC", .id = V4L2_STD_NTSC_M,
.v_offset = 0x10, .v_field = 244,
.h_offset = 0x40, .h_pixels = 708,
.v_max_out = 480, .h_max_out = 640,
}
};
static struct saa7146_standard dvb_standard[] = {
{
.name = "PAL", .id = V4L2_STD_PAL_BG,
.v_offset = 0x14, .v_field = 288,
.h_offset = 0x48, .h_pixels = 708,
.v_max_out = 576, .h_max_out = 768,
}, {
.name = "NTSC", .id = V4L2_STD_NTSC_M,
.v_offset = 0x10, .v_field = 244,
.h_offset = 0x40, .h_pixels = 708,
.v_max_out = 480, .h_max_out = 640,
}
};
static int std_callback(struct saa7146_dev* dev, struct saa7146_standard *std)
{
struct av7110 *av7110 = (struct av7110*) dev->ext_priv;
if (std->id & V4L2_STD_PAL) {
av7110->vidmode = AV7110_VIDEO_MODE_PAL;
av7110_set_vidmode(av7110, av7110->vidmode);
}
else if (std->id & V4L2_STD_NTSC) {
av7110->vidmode = AV7110_VIDEO_MODE_NTSC;
av7110_set_vidmode(av7110, av7110->vidmode);
}
else
return -1;
return 0;
}
static struct saa7146_ext_vv av7110_vv_data_st = {
.inputs = 1,
.audios = 1,
.capabilities = V4L2_CAP_SLICED_VBI_OUTPUT | V4L2_CAP_AUDIO,
.flags = 0,
.stds = &standard[0],
.num_stds = ARRAY_SIZE(standard),
.std_callback = &std_callback,
.vbi_fops.write = av7110_vbi_write,
};
static struct saa7146_ext_vv av7110_vv_data_c = {
.inputs = 1,
.audios = 1,
.capabilities = V4L2_CAP_TUNER | V4L2_CAP_SLICED_VBI_OUTPUT | V4L2_CAP_AUDIO,
.flags = SAA7146_USE_PORT_B_FOR_VBI,
.stds = &standard[0],
.num_stds = ARRAY_SIZE(standard),
.std_callback = &std_callback,
.vbi_fops.write = av7110_vbi_write,
};
| linux-master | drivers/staging/media/av7110/av7110_v4l.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* av7110_av.c: audio and video MPEG decoder stuff
*
* Copyright (C) 1999-2002 Ralph Metzler
* & Marcus Metzler for convergence integrated media GmbH
*
* originally based on code by:
* Copyright (C) 1998,1999 Christian Theiss <[email protected]>
*
* the project's page is at https://linuxtv.org
*/
#include <linux/ethtool.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/delay.h>
#include <linux/fs.h>
#include "av7110.h"
#include "av7110_hw.h"
#include "av7110_av.h"
#include "av7110_ipack.h"
/* MPEG-2 (ISO 13818 / H.222.0) stream types */
#define PROG_STREAM_MAP 0xBC
#define PRIVATE_STREAM1 0xBD
#define PADDING_STREAM 0xBE
#define PRIVATE_STREAM2 0xBF
#define AUDIO_STREAM_S 0xC0
#define AUDIO_STREAM_E 0xDF
#define VIDEO_STREAM_S 0xE0
#define VIDEO_STREAM_E 0xEF
#define ECM_STREAM 0xF0
#define EMM_STREAM 0xF1
#define DSM_CC_STREAM 0xF2
#define ISO13522_STREAM 0xF3
#define PROG_STREAM_DIR 0xFF
#define PTS_DTS_FLAGS 0xC0
//pts_dts flags
#define PTS_ONLY 0x80
#define PTS_DTS 0xC0
#define TS_SIZE 188
#define TRANS_ERROR 0x80
#define PAY_START 0x40
#define TRANS_PRIO 0x20
#define PID_MASK_HI 0x1F
//flags
#define TRANS_SCRMBL1 0x80
#define TRANS_SCRMBL2 0x40
#define ADAPT_FIELD 0x20
#define PAYLOAD 0x10
#define COUNT_MASK 0x0F
// adaptation flags
#define DISCON_IND 0x80
#define RAND_ACC_IND 0x40
#define ES_PRI_IND 0x20
#define PCR_FLAG 0x10
#define OPCR_FLAG 0x08
#define SPLICE_FLAG 0x04
#define TRANS_PRIV 0x02
#define ADAP_EXT_FLAG 0x01
// adaptation extension flags
#define LTW_FLAG 0x80
#define PIECE_RATE 0x40
#define SEAM_SPLICE 0x20
static void p_to_t(u8 const *buf, long int length, u16 pid,
u8 *counter, struct dvb_demux_feed *feed);
static int write_ts_to_decoder(struct av7110 *av7110, int type, const u8 *buf, size_t len);
int av7110_record_cb(struct dvb_filter_pes2ts *p2t, u8 *buf, size_t len)
{
struct dvb_demux_feed *dvbdmxfeed = p2t->priv;
if (!(dvbdmxfeed->ts_type & TS_PACKET))
return 0;
if (buf[3] == 0xe0) // video PES do not have a length in TS
buf[4] = buf[5] = 0;
if (dvbdmxfeed->ts_type & TS_PAYLOAD_ONLY)
return dvbdmxfeed->cb.ts(buf, len, NULL, 0,
&dvbdmxfeed->feed.ts, NULL);
else
return dvb_filter_pes2ts(p2t, buf, len, 1);
}
static int dvb_filter_pes2ts_cb(void *priv, unsigned char *data)
{
struct dvb_demux_feed *dvbdmxfeed = (struct dvb_demux_feed *) priv;
dvbdmxfeed->cb.ts(data, 188, NULL, 0,
&dvbdmxfeed->feed.ts, NULL);
return 0;
}
int av7110_av_start_record(struct av7110 *av7110, int av,
struct dvb_demux_feed *dvbdmxfeed)
{
int ret = 0;
struct dvb_demux *dvbdmx = dvbdmxfeed->demux;
dprintk(2, "av7110:%p, dvb_demux_feed:%p\n", av7110, dvbdmxfeed);
if (av7110->playing || (av7110->rec_mode & av))
return -EBUSY;
av7110_fw_cmd(av7110, COMTYPE_REC_PLAY, __Stop, 0);
dvbdmx->recording = 1;
av7110->rec_mode |= av;
switch (av7110->rec_mode) {
case RP_AUDIO:
dvb_filter_pes2ts_init(&av7110->p2t[0],
dvbdmx->pesfilter[0]->pid,
dvb_filter_pes2ts_cb,
(void *) dvbdmx->pesfilter[0]);
ret = av7110_fw_cmd(av7110, COMTYPE_REC_PLAY, __Record, 2, AudioPES, 0);
break;
case RP_VIDEO:
dvb_filter_pes2ts_init(&av7110->p2t[1],
dvbdmx->pesfilter[1]->pid,
dvb_filter_pes2ts_cb,
(void *) dvbdmx->pesfilter[1]);
ret = av7110_fw_cmd(av7110, COMTYPE_REC_PLAY, __Record, 2, VideoPES, 0);
break;
case RP_AV:
dvb_filter_pes2ts_init(&av7110->p2t[0],
dvbdmx->pesfilter[0]->pid,
dvb_filter_pes2ts_cb,
(void *) dvbdmx->pesfilter[0]);
dvb_filter_pes2ts_init(&av7110->p2t[1],
dvbdmx->pesfilter[1]->pid,
dvb_filter_pes2ts_cb,
(void *) dvbdmx->pesfilter[1]);
ret = av7110_fw_cmd(av7110, COMTYPE_REC_PLAY, __Record, 2, AV_PES, 0);
break;
}
return ret;
}
int av7110_av_start_play(struct av7110 *av7110, int av)
{
int ret = 0;
dprintk(2, "av7110:%p, \n", av7110);
if (av7110->rec_mode)
return -EBUSY;
if (av7110->playing & av)
return -EBUSY;
av7110_fw_cmd(av7110, COMTYPE_REC_PLAY, __Stop, 0);
if (av7110->playing == RP_NONE) {
av7110_ipack_reset(&av7110->ipack[0]);
av7110_ipack_reset(&av7110->ipack[1]);
}
av7110->playing |= av;
switch (av7110->playing) {
case RP_AUDIO:
ret = av7110_fw_cmd(av7110, COMTYPE_REC_PLAY, __Play, 2, AudioPES, 0);
break;
case RP_VIDEO:
ret = av7110_fw_cmd(av7110, COMTYPE_REC_PLAY, __Play, 2, VideoPES, 0);
av7110->sinfo = 0;
break;
case RP_AV:
av7110->sinfo = 0;
ret = av7110_fw_cmd(av7110, COMTYPE_REC_PLAY, __Play, 2, AV_PES, 0);
break;
}
return ret;
}
int av7110_av_stop(struct av7110 *av7110, int av)
{
int ret = 0;
dprintk(2, "av7110:%p, \n", av7110);
if (!(av7110->playing & av) && !(av7110->rec_mode & av))
return 0;
av7110_fw_cmd(av7110, COMTYPE_REC_PLAY, __Stop, 0);
if (av7110->playing) {
av7110->playing &= ~av;
switch (av7110->playing) {
case RP_AUDIO:
ret = av7110_fw_cmd(av7110, COMTYPE_REC_PLAY, __Play, 2, AudioPES, 0);
break;
case RP_VIDEO:
ret = av7110_fw_cmd(av7110, COMTYPE_REC_PLAY, __Play, 2, VideoPES, 0);
break;
case RP_NONE:
ret = av7110_set_vidmode(av7110, av7110->vidmode);
break;
}
} else {
av7110->rec_mode &= ~av;
switch (av7110->rec_mode) {
case RP_AUDIO:
ret = av7110_fw_cmd(av7110, COMTYPE_REC_PLAY, __Record, 2, AudioPES, 0);
break;
case RP_VIDEO:
ret = av7110_fw_cmd(av7110, COMTYPE_REC_PLAY, __Record, 2, VideoPES, 0);
break;
case RP_NONE:
break;
}
}
return ret;
}
int av7110_pes_play(void *dest, struct dvb_ringbuffer *buf, int dlen)
{
int len;
u32 sync;
u16 blen;
if (!dlen) {
wake_up(&buf->queue);
return -1;
}
while (1) {
len = dvb_ringbuffer_avail(buf);
if (len < 6) {
wake_up(&buf->queue);
return -1;
}
sync = DVB_RINGBUFFER_PEEK(buf, 0) << 24;
sync |= DVB_RINGBUFFER_PEEK(buf, 1) << 16;
sync |= DVB_RINGBUFFER_PEEK(buf, 2) << 8;
sync |= DVB_RINGBUFFER_PEEK(buf, 3);
if (((sync &~ 0x0f) == 0x000001e0) ||
((sync &~ 0x1f) == 0x000001c0) ||
(sync == 0x000001bd))
break;
printk("resync\n");
DVB_RINGBUFFER_SKIP(buf, 1);
}
blen = DVB_RINGBUFFER_PEEK(buf, 4) << 8;
blen |= DVB_RINGBUFFER_PEEK(buf, 5);
blen += 6;
if (len < blen || blen > dlen) {
//printk("buffer empty - avail %d blen %u dlen %d\n", len, blen, dlen);
wake_up(&buf->queue);
return -1;
}
dvb_ringbuffer_read(buf, dest, (size_t) blen);
dprintk(2, "pread=0x%08lx, pwrite=0x%08lx\n",
(unsigned long) buf->pread, (unsigned long) buf->pwrite);
wake_up(&buf->queue);
return blen;
}
int av7110_set_volume(struct av7110 *av7110, unsigned int volleft,
unsigned int volright)
{
unsigned int vol, val, balance = 0;
int err;
dprintk(2, "av7110:%p, \n", av7110);
av7110->mixer.volume_left = volleft;
av7110->mixer.volume_right = volright;
switch (av7110->adac_type) {
case DVB_ADAC_TI:
volleft = (volleft * 256) / 1036;
volright = (volright * 256) / 1036;
if (volleft > 0x3f)
volleft = 0x3f;
if (volright > 0x3f)
volright = 0x3f;
if ((err = SendDAC(av7110, 3, 0x80 + volleft)))
return err;
return SendDAC(av7110, 4, volright);
case DVB_ADAC_CRYSTAL:
volleft = 127 - volleft / 2;
volright = 127 - volright / 2;
i2c_writereg(av7110, 0x20, 0x03, volleft);
i2c_writereg(av7110, 0x20, 0x04, volright);
return 0;
case DVB_ADAC_MSP34x0:
vol = (volleft > volright) ? volleft : volright;
val = (vol * 0x73 / 255) << 8;
if (vol > 0)
balance = ((volright - volleft) * 127) / vol;
msp_writereg(av7110, MSP_WR_DSP, 0x0001, balance << 8);
msp_writereg(av7110, MSP_WR_DSP, 0x0000, val); /* loudspeaker */
msp_writereg(av7110, MSP_WR_DSP, 0x0006, val); /* headphonesr */
return 0;
case DVB_ADAC_MSP34x5:
vol = (volleft > volright) ? volleft : volright;
val = (vol * 0x73 / 255) << 8;
if (vol > 0)
balance = ((volright - volleft) * 127) / vol;
msp_writereg(av7110, MSP_WR_DSP, 0x0001, balance << 8);
msp_writereg(av7110, MSP_WR_DSP, 0x0000, val); /* loudspeaker */
return 0;
}
return 0;
}
int av7110_set_vidmode(struct av7110 *av7110, enum av7110_video_mode mode)
{
int ret;
dprintk(2, "av7110:%p, \n", av7110);
ret = av7110_fw_cmd(av7110, COMTYPE_ENCODER, LoadVidCode, 1, mode);
if (!ret && !av7110->playing) {
ret = ChangePIDs(av7110, av7110->pids[DMX_PES_VIDEO],
av7110->pids[DMX_PES_AUDIO],
av7110->pids[DMX_PES_TELETEXT],
0, av7110->pids[DMX_PES_PCR]);
if (!ret)
ret = av7110_fw_cmd(av7110, COMTYPE_PIDFILTER, Scan, 0);
}
return ret;
}
static enum av7110_video_mode sw2mode[16] = {
AV7110_VIDEO_MODE_PAL, AV7110_VIDEO_MODE_NTSC,
AV7110_VIDEO_MODE_NTSC, AV7110_VIDEO_MODE_PAL,
AV7110_VIDEO_MODE_NTSC, AV7110_VIDEO_MODE_NTSC,
AV7110_VIDEO_MODE_PAL, AV7110_VIDEO_MODE_NTSC,
AV7110_VIDEO_MODE_PAL, AV7110_VIDEO_MODE_PAL,
AV7110_VIDEO_MODE_PAL, AV7110_VIDEO_MODE_PAL,
AV7110_VIDEO_MODE_PAL, AV7110_VIDEO_MODE_PAL,
AV7110_VIDEO_MODE_PAL, AV7110_VIDEO_MODE_PAL,
};
static int get_video_format(struct av7110 *av7110, u8 *buf, int count)
{
int i;
int hsize, vsize;
int sw;
u8 *p;
int ret = 0;
dprintk(2, "av7110:%p, \n", av7110);
if (av7110->sinfo)
return 0;
for (i = 7; i < count - 10; i++) {
p = buf + i;
if (p[0] || p[1] || p[2] != 0x01 || p[3] != 0xb3)
continue;
p += 4;
hsize = ((p[1] &0xF0) >> 4) | (p[0] << 4);
vsize = ((p[1] &0x0F) << 8) | (p[2]);
sw = (p[3] & 0x0F);
ret = av7110_set_vidmode(av7110, sw2mode[sw]);
if (!ret) {
dprintk(2, "playback %dx%d fr=%d\n", hsize, vsize, sw);
av7110->sinfo = 1;
}
break;
}
return ret;
}
/****************************************************************************
* I/O buffer management and control
****************************************************************************/
static inline long aux_ring_buffer_write(struct dvb_ringbuffer *rbuf,
const u8 *buf, unsigned long count)
{
unsigned long todo = count;
int free;
while (todo > 0) {
if (dvb_ringbuffer_free(rbuf) < 2048) {
if (wait_event_interruptible(rbuf->queue,
(dvb_ringbuffer_free(rbuf) >= 2048)))
return count - todo;
}
free = dvb_ringbuffer_free(rbuf);
if (free > todo)
free = todo;
dvb_ringbuffer_write(rbuf, buf, free);
todo -= free;
buf += free;
}
return count - todo;
}
static void play_video_cb(u8 *buf, int count, void *priv)
{
struct av7110 *av7110 = (struct av7110 *) priv;
dprintk(2, "av7110:%p, \n", av7110);
if ((buf[3] & 0xe0) == 0xe0) {
get_video_format(av7110, buf, count);
aux_ring_buffer_write(&av7110->avout, buf, count);
} else
aux_ring_buffer_write(&av7110->aout, buf, count);
}
static void play_audio_cb(u8 *buf, int count, void *priv)
{
struct av7110 *av7110 = (struct av7110 *) priv;
dprintk(2, "av7110:%p, \n", av7110);
aux_ring_buffer_write(&av7110->aout, buf, count);
}
#define FREE_COND_TS (dvb_ringbuffer_free(rb) >= 4096)
static ssize_t ts_play(struct av7110 *av7110, const char __user *buf,
unsigned long count, int nonblock, int type)
{
struct dvb_ringbuffer *rb;
u8 *kb;
unsigned long todo = count;
dprintk(2, "%s: type %d cnt %lu\n", __func__, type, count);
rb = (type) ? &av7110->avout : &av7110->aout;
kb = av7110->kbuf[type];
if (!kb)
return -ENOBUFS;
if (nonblock && !FREE_COND_TS)
return -EWOULDBLOCK;
while (todo >= TS_SIZE) {
if (!FREE_COND_TS) {
if (nonblock)
return count - todo;
if (wait_event_interruptible(rb->queue, FREE_COND_TS))
return count - todo;
}
if (copy_from_user(kb, buf, TS_SIZE))
return -EFAULT;
write_ts_to_decoder(av7110, type, kb, TS_SIZE);
todo -= TS_SIZE;
buf += TS_SIZE;
}
return count - todo;
}
#define FREE_COND (dvb_ringbuffer_free(&av7110->avout) >= 20 * 1024 && \
dvb_ringbuffer_free(&av7110->aout) >= 20 * 1024)
static ssize_t dvb_play(struct av7110 *av7110, const char __user *buf,
unsigned long count, int nonblock, int type)
{
unsigned long todo = count, n;
dprintk(2, "av7110:%p, \n", av7110);
if (!av7110->kbuf[type])
return -ENOBUFS;
if (nonblock && !FREE_COND)
return -EWOULDBLOCK;
while (todo > 0) {
if (!FREE_COND) {
if (nonblock)
return count - todo;
if (wait_event_interruptible(av7110->avout.queue,
FREE_COND))
return count - todo;
}
n = todo;
if (n > IPACKS * 2)
n = IPACKS * 2;
if (copy_from_user(av7110->kbuf[type], buf, n))
return -EFAULT;
av7110_ipack_instant_repack(av7110->kbuf[type], n,
&av7110->ipack[type]);
todo -= n;
buf += n;
}
return count - todo;
}
static ssize_t dvb_play_kernel(struct av7110 *av7110, const u8 *buf,
unsigned long count, int nonblock, int type)
{
unsigned long todo = count, n;
dprintk(2, "av7110:%p, \n", av7110);
if (!av7110->kbuf[type])
return -ENOBUFS;
if (nonblock && !FREE_COND)
return -EWOULDBLOCK;
while (todo > 0) {
if (!FREE_COND) {
if (nonblock)
return count - todo;
if (wait_event_interruptible(av7110->avout.queue,
FREE_COND))
return count - todo;
}
n = todo;
if (n > IPACKS * 2)
n = IPACKS * 2;
av7110_ipack_instant_repack(buf, n, &av7110->ipack[type]);
todo -= n;
buf += n;
}
return count - todo;
}
static ssize_t dvb_aplay(struct av7110 *av7110, const char __user *buf,
unsigned long count, int nonblock, int type)
{
unsigned long todo = count, n;
dprintk(2, "av7110:%p, \n", av7110);
if (!av7110->kbuf[type])
return -ENOBUFS;
if (nonblock && dvb_ringbuffer_free(&av7110->aout) < 20 * 1024)
return -EWOULDBLOCK;
while (todo > 0) {
if (dvb_ringbuffer_free(&av7110->aout) < 20 * 1024) {
if (nonblock)
return count - todo;
if (wait_event_interruptible(av7110->aout.queue,
(dvb_ringbuffer_free(&av7110->aout) >= 20 * 1024)))
return count-todo;
}
n = todo;
if (n > IPACKS * 2)
n = IPACKS * 2;
if (copy_from_user(av7110->kbuf[type], buf, n))
return -EFAULT;
av7110_ipack_instant_repack(av7110->kbuf[type], n,
&av7110->ipack[type]);
todo -= n;
buf += n;
}
return count - todo;
}
void av7110_p2t_init(struct av7110_p2t *p, struct dvb_demux_feed *feed)
{
memset(p->pes, 0, TS_SIZE);
p->counter = 0;
p->pos = 0;
p->frags = 0;
if (feed)
p->feed = feed;
}
static void clear_p2t(struct av7110_p2t *p)
{
memset(p->pes, 0, TS_SIZE);
// p->counter = 0;
p->pos = 0;
p->frags = 0;
}
static int find_pes_header(u8 const *buf, long int length, int *frags)
{
int c = 0;
int found = 0;
*frags = 0;
while (c < length - 3 && !found) {
if (buf[c] == 0x00 && buf[c + 1] == 0x00 &&
buf[c + 2] == 0x01) {
switch ( buf[c + 3] ) {
case PROG_STREAM_MAP:
case PRIVATE_STREAM2:
case PROG_STREAM_DIR:
case ECM_STREAM:
case EMM_STREAM:
case PADDING_STREAM:
case DSM_CC_STREAM:
case ISO13522_STREAM:
case PRIVATE_STREAM1:
case AUDIO_STREAM_S ... AUDIO_STREAM_E:
case VIDEO_STREAM_S ... VIDEO_STREAM_E:
found = 1;
break;
default:
c++;
break;
}
} else
c++;
}
if (c == length - 3 && !found) {
if (buf[length - 1] == 0x00)
*frags = 1;
if (buf[length - 2] == 0x00 &&
buf[length - 1] == 0x00)
*frags = 2;
if (buf[length - 3] == 0x00 &&
buf[length - 2] == 0x00 &&
buf[length - 1] == 0x01)
*frags = 3;
return -1;
}
return c;
}
void av7110_p2t_write(u8 const *buf, long int length, u16 pid, struct av7110_p2t *p)
{
int c, c2, l, add;
int check, rest;
c = 0;
c2 = 0;
if (p->frags){
check = 0;
switch(p->frags) {
case 1:
if (buf[c] == 0x00 && buf[c + 1] == 0x01) {
check = 1;
c += 2;
}
break;
case 2:
if (buf[c] == 0x01) {
check = 1;
c++;
}
break;
case 3:
check = 1;
}
if (check) {
switch (buf[c]) {
case PROG_STREAM_MAP:
case PRIVATE_STREAM2:
case PROG_STREAM_DIR:
case ECM_STREAM:
case EMM_STREAM:
case PADDING_STREAM:
case DSM_CC_STREAM:
case ISO13522_STREAM:
case PRIVATE_STREAM1:
case AUDIO_STREAM_S ... AUDIO_STREAM_E:
case VIDEO_STREAM_S ... VIDEO_STREAM_E:
p->pes[0] = 0x00;
p->pes[1] = 0x00;
p->pes[2] = 0x01;
p->pes[3] = buf[c];
p->pos = 4;
memcpy(p->pes + p->pos, buf + c, (TS_SIZE - 4) - p->pos);
c += (TS_SIZE - 4) - p->pos;
p_to_t(p->pes, (TS_SIZE - 4), pid, &p->counter, p->feed);
clear_p2t(p);
break;
default:
c = 0;
break;
}
}
p->frags = 0;
}
if (p->pos) {
c2 = find_pes_header(buf + c, length - c, &p->frags);
if (c2 >= 0 && c2 < (TS_SIZE - 4) - p->pos)
l = c2+c;
else
l = (TS_SIZE - 4) - p->pos;
memcpy(p->pes + p->pos, buf, l);
c += l;
p->pos += l;
p_to_t(p->pes, p->pos, pid, &p->counter, p->feed);
clear_p2t(p);
}
add = 0;
while (c < length) {
c2 = find_pes_header(buf + c + add, length - c - add, &p->frags);
if (c2 >= 0) {
c2 += c + add;
if (c2 > c){
p_to_t(buf + c, c2 - c, pid, &p->counter, p->feed);
c = c2;
clear_p2t(p);
add = 0;
} else
add = 1;
} else {
l = length - c;
rest = l % (TS_SIZE - 4);
l -= rest;
p_to_t(buf + c, l, pid, &p->counter, p->feed);
memcpy(p->pes, buf + c + l, rest);
p->pos = rest;
c = length;
}
}
}
static int write_ts_header2(u16 pid, u8 *counter, int pes_start, u8 *buf, u8 length)
{
int i;
int c = 0;
int fill;
u8 tshead[4] = { 0x47, 0x00, 0x00, 0x10 };
fill = (TS_SIZE - 4) - length;
if (pes_start)
tshead[1] = 0x40;
if (fill)
tshead[3] = 0x30;
tshead[1] |= (u8)((pid & 0x1F00) >> 8);
tshead[2] |= (u8)(pid & 0x00FF);
tshead[3] |= ((*counter)++ & 0x0F);
memcpy(buf, tshead, 4);
c += 4;
if (fill) {
buf[4] = fill - 1;
c++;
if (fill > 1) {
buf[5] = 0x00;
c++;
}
for (i = 6; i < fill + 4; i++) {
buf[i] = 0xFF;
c++;
}
}
return c;
}
static void p_to_t(u8 const *buf, long int length, u16 pid, u8 *counter,
struct dvb_demux_feed *feed)
{
int l, pes_start;
u8 obuf[TS_SIZE];
long c = 0;
pes_start = 0;
if (length > 3 &&
buf[0] == 0x00 && buf[1] == 0x00 && buf[2] == 0x01)
switch (buf[3]) {
case PROG_STREAM_MAP:
case PRIVATE_STREAM2:
case PROG_STREAM_DIR:
case ECM_STREAM:
case EMM_STREAM:
case PADDING_STREAM:
case DSM_CC_STREAM:
case ISO13522_STREAM:
case PRIVATE_STREAM1:
case AUDIO_STREAM_S ... AUDIO_STREAM_E:
case VIDEO_STREAM_S ... VIDEO_STREAM_E:
pes_start = 1;
break;
default:
break;
}
while (c < length) {
memset(obuf, 0, TS_SIZE);
if (length - c >= (TS_SIZE - 4)){
l = write_ts_header2(pid, counter, pes_start,
obuf, (TS_SIZE - 4));
memcpy(obuf + l, buf + c, TS_SIZE - l);
c += TS_SIZE - l;
} else {
l = write_ts_header2(pid, counter, pes_start,
obuf, length - c);
memcpy(obuf + l, buf + c, TS_SIZE - l);
c = length;
}
feed->cb.ts(obuf, 188, NULL, 0, &feed->feed.ts, NULL);
pes_start = 0;
}
}
static int write_ts_to_decoder(struct av7110 *av7110, int type, const u8 *buf, size_t len)
{
struct ipack *ipack = &av7110->ipack[type];
if (buf[1] & TRANS_ERROR) {
av7110_ipack_reset(ipack);
return -1;
}
if (!(buf[3] & PAYLOAD))
return -1;
if (buf[1] & PAY_START)
av7110_ipack_flush(ipack);
if (buf[3] & ADAPT_FIELD) {
if (buf[4] > len - 1 - 4)
return 0;
len -= buf[4] + 1;
buf += buf[4] + 1;
}
av7110_ipack_instant_repack(buf + 4, len - 4, ipack);
return 0;
}
int av7110_write_to_decoder(struct dvb_demux_feed *feed, const u8 *buf, size_t len)
{
struct dvb_demux *demux = feed->demux;
struct av7110 *av7110 = demux->priv;
dprintk(2, "av7110:%p, \n", av7110);
if (av7110->full_ts && demux->dmx.frontend->source != DMX_MEMORY_FE)
return 0;
switch (feed->pes_type) {
case 0:
if (av7110->audiostate.stream_source == AUDIO_SOURCE_MEMORY)
return -EINVAL;
break;
case 1:
if (av7110->videostate.stream_source == VIDEO_SOURCE_MEMORY)
return -EINVAL;
break;
default:
return -1;
}
return write_ts_to_decoder(av7110, feed->pes_type, buf, len);
}
/******************************************************************************
* Video MPEG decoder events
******************************************************************************/
void dvb_video_add_event(struct av7110 *av7110, struct video_event *event)
{
struct dvb_video_events *events = &av7110->video_events;
int wp;
spin_lock_bh(&events->lock);
wp = (events->eventw + 1) % MAX_VIDEO_EVENT;
if (wp == events->eventr) {
events->overflow = 1;
events->eventr = (events->eventr + 1) % MAX_VIDEO_EVENT;
}
//FIXME: timestamp?
memcpy(&events->events[events->eventw], event, sizeof(struct video_event));
events->eventw = wp;
spin_unlock_bh(&events->lock);
wake_up_interruptible(&events->wait_queue);
}
static int dvb_video_get_event (struct av7110 *av7110, struct video_event *event, int flags)
{
struct dvb_video_events *events = &av7110->video_events;
if (events->overflow) {
events->overflow = 0;
return -EOVERFLOW;
}
if (events->eventw == events->eventr) {
int ret;
if (flags & O_NONBLOCK)
return -EWOULDBLOCK;
ret = wait_event_interruptible(events->wait_queue,
events->eventw != events->eventr);
if (ret < 0)
return ret;
}
spin_lock_bh(&events->lock);
memcpy(event, &events->events[events->eventr],
sizeof(struct video_event));
events->eventr = (events->eventr + 1) % MAX_VIDEO_EVENT;
spin_unlock_bh(&events->lock);
return 0;
}
/******************************************************************************
* DVB device file operations
******************************************************************************/
static __poll_t dvb_video_poll(struct file *file, poll_table *wait)
{
struct dvb_device *dvbdev = file->private_data;
struct av7110 *av7110 = dvbdev->priv;
__poll_t mask = 0;
dprintk(2, "av7110:%p, \n", av7110);
if ((file->f_flags & O_ACCMODE) != O_RDONLY)
poll_wait(file, &av7110->avout.queue, wait);
poll_wait(file, &av7110->video_events.wait_queue, wait);
if (av7110->video_events.eventw != av7110->video_events.eventr)
mask = EPOLLPRI;
if ((file->f_flags & O_ACCMODE) != O_RDONLY) {
if (av7110->playing) {
if (FREE_COND)
mask |= (EPOLLOUT | EPOLLWRNORM);
} else {
/* if not playing: may play if asked for */
mask |= (EPOLLOUT | EPOLLWRNORM);
}
}
return mask;
}
static ssize_t dvb_video_write(struct file *file, const char __user *buf,
size_t count, loff_t *ppos)
{
struct dvb_device *dvbdev = file->private_data;
struct av7110 *av7110 = dvbdev->priv;
unsigned char c;
dprintk(2, "av7110:%p, \n", av7110);
if ((file->f_flags & O_ACCMODE) == O_RDONLY)
return -EPERM;
if (av7110->videostate.stream_source != VIDEO_SOURCE_MEMORY)
return -EPERM;
if (get_user(c, buf))
return -EFAULT;
if (c == 0x47 && count % TS_SIZE == 0)
return ts_play(av7110, buf, count, file->f_flags & O_NONBLOCK, 1);
else
return dvb_play(av7110, buf, count, file->f_flags & O_NONBLOCK, 1);
}
static __poll_t dvb_audio_poll(struct file *file, poll_table *wait)
{
struct dvb_device *dvbdev = file->private_data;
struct av7110 *av7110 = dvbdev->priv;
__poll_t mask = 0;
dprintk(2, "av7110:%p, \n", av7110);
poll_wait(file, &av7110->aout.queue, wait);
if (av7110->playing) {
if (dvb_ringbuffer_free(&av7110->aout) >= 20 * 1024)
mask |= (EPOLLOUT | EPOLLWRNORM);
} else /* if not playing: may play if asked for */
mask = (EPOLLOUT | EPOLLWRNORM);
return mask;
}
static ssize_t dvb_audio_write(struct file *file, const char __user *buf,
size_t count, loff_t *ppos)
{
struct dvb_device *dvbdev = file->private_data;
struct av7110 *av7110 = dvbdev->priv;
unsigned char c;
dprintk(2, "av7110:%p, \n", av7110);
if (av7110->audiostate.stream_source != AUDIO_SOURCE_MEMORY) {
printk(KERN_ERR "not audio source memory\n");
return -EPERM;
}
if (get_user(c, buf))
return -EFAULT;
if (c == 0x47 && count % TS_SIZE == 0)
return ts_play(av7110, buf, count, file->f_flags & O_NONBLOCK, 0);
else
return dvb_aplay(av7110, buf, count, file->f_flags & O_NONBLOCK, 0);
}
static u8 iframe_header[] = { 0x00, 0x00, 0x01, 0xe0, 0x00, 0x00, 0x80, 0x00, 0x00 };
#define MIN_IFRAME 400000
static int play_iframe(struct av7110 *av7110, char __user *buf, unsigned int len, int nonblock)
{
unsigned i, n;
int progressive = 0;
int match = 0;
dprintk(2, "av7110:%p, \n", av7110);
if (len == 0)
return 0;
if (!(av7110->playing & RP_VIDEO)) {
if (av7110_av_start_play(av7110, RP_VIDEO) < 0)
return -EBUSY;
}
/* search in buf for instances of 00 00 01 b5 1? */
for (i = 0; i < len; i++) {
unsigned char c;
if (get_user(c, buf + i))
return -EFAULT;
if (match == 5) {
progressive = c & 0x08;
match = 0;
}
if (c == 0x00) {
match = (match == 1 || match == 2) ? 2 : 1;
continue;
}
switch (match++) {
case 2: if (c == 0x01)
continue;
break;
case 3: if (c == 0xb5)
continue;
break;
case 4: if ((c & 0xf0) == 0x10)
continue;
break;
}
match = 0;
}
/* setting n always > 1, fixes problems when playing stillframes
consisting of I- and P-Frames */
n = MIN_IFRAME / len + 1;
/* FIXME: nonblock? */
dvb_play_kernel(av7110, iframe_header, sizeof(iframe_header), 0, 1);
for (i = 0; i < n; i++)
dvb_play(av7110, buf, len, 0, 1);
av7110_ipack_flush(&av7110->ipack[1]);
if (progressive)
return vidcom(av7110, AV_VIDEO_CMD_FREEZE, 1);
else
return 0;
}
#ifdef CONFIG_COMPAT
struct compat_video_still_picture {
compat_uptr_t iFrame;
int32_t size;
};
#define VIDEO_STILLPICTURE32 _IOW('o', 30, struct compat_video_still_picture)
struct compat_video_event {
__s32 type;
/* unused, make sure to use atomic time for y2038 if it ever gets used */
compat_long_t timestamp;
union {
video_size_t size;
unsigned int frame_rate; /* in frames per 1000sec */
unsigned char vsync_field; /* unknown/odd/even/progressive */
} u;
};
#define VIDEO_GET_EVENT32 _IOR('o', 28, struct compat_video_event)
static int dvb_compat_video_get_event(struct av7110 *av7110,
struct compat_video_event *event, int flags)
{
struct video_event ev;
int ret;
ret = dvb_video_get_event(av7110, &ev, flags);
*event = (struct compat_video_event) {
.type = ev.type,
.timestamp = ev.timestamp,
.u.size = ev.u.size,
};
return ret;
}
#endif
static int dvb_video_ioctl(struct file *file,
unsigned int cmd, void *parg)
{
struct dvb_device *dvbdev = file->private_data;
struct av7110 *av7110 = dvbdev->priv;
unsigned long arg = (unsigned long) parg;
int ret = 0;
dprintk(1, "av7110:%p, cmd=%04x\n", av7110,cmd);
if ((file->f_flags & O_ACCMODE) == O_RDONLY) {
if ( cmd != VIDEO_GET_STATUS && cmd != VIDEO_GET_EVENT &&
cmd != VIDEO_GET_SIZE ) {
return -EPERM;
}
}
if (mutex_lock_interruptible(&av7110->ioctl_mutex))
return -ERESTARTSYS;
switch (cmd) {
case VIDEO_STOP:
av7110->videostate.play_state = VIDEO_STOPPED;
if (av7110->videostate.stream_source == VIDEO_SOURCE_MEMORY)
ret = av7110_av_stop(av7110, RP_VIDEO);
else
ret = vidcom(av7110, AV_VIDEO_CMD_STOP,
av7110->videostate.video_blank ? 0 : 1);
if (!ret)
av7110->trickmode = TRICK_NONE;
break;
case VIDEO_PLAY:
av7110->trickmode = TRICK_NONE;
if (av7110->videostate.play_state == VIDEO_FREEZED) {
av7110->videostate.play_state = VIDEO_PLAYING;
ret = vidcom(av7110, AV_VIDEO_CMD_PLAY, 0);
if (ret)
break;
}
if (av7110->videostate.stream_source == VIDEO_SOURCE_MEMORY) {
if (av7110->playing == RP_AV) {
ret = av7110_fw_cmd(av7110, COMTYPE_REC_PLAY, __Stop, 0);
if (ret)
break;
av7110->playing &= ~RP_VIDEO;
}
ret = av7110_av_start_play(av7110, RP_VIDEO);
}
if (!ret)
ret = vidcom(av7110, AV_VIDEO_CMD_PLAY, 0);
if (!ret)
av7110->videostate.play_state = VIDEO_PLAYING;
break;
case VIDEO_FREEZE:
av7110->videostate.play_state = VIDEO_FREEZED;
if (av7110->playing & RP_VIDEO)
ret = av7110_fw_cmd(av7110, COMTYPE_REC_PLAY, __Pause, 0);
else
ret = vidcom(av7110, AV_VIDEO_CMD_FREEZE, 1);
if (!ret)
av7110->trickmode = TRICK_FREEZE;
break;
case VIDEO_CONTINUE:
if (av7110->playing & RP_VIDEO)
ret = av7110_fw_cmd(av7110, COMTYPE_REC_PLAY, __Continue, 0);
if (!ret)
ret = vidcom(av7110, AV_VIDEO_CMD_PLAY, 0);
if (!ret) {
av7110->videostate.play_state = VIDEO_PLAYING;
av7110->trickmode = TRICK_NONE;
}
break;
case VIDEO_SELECT_SOURCE:
av7110->videostate.stream_source = (video_stream_source_t) arg;
break;
case VIDEO_SET_BLANK:
av7110->videostate.video_blank = (int) arg;
break;
case VIDEO_GET_STATUS:
memcpy(parg, &av7110->videostate, sizeof(struct video_status));
break;
#ifdef CONFIG_COMPAT
case VIDEO_GET_EVENT32:
ret = dvb_compat_video_get_event(av7110, parg, file->f_flags);
break;
#endif
case VIDEO_GET_EVENT:
ret = dvb_video_get_event(av7110, parg, file->f_flags);
break;
case VIDEO_GET_SIZE:
memcpy(parg, &av7110->video_size, sizeof(video_size_t));
break;
case VIDEO_SET_DISPLAY_FORMAT:
{
video_displayformat_t format = (video_displayformat_t) arg;
switch (format) {
case VIDEO_PAN_SCAN:
av7110->display_panscan = VID_PAN_SCAN_PREF;
break;
case VIDEO_LETTER_BOX:
av7110->display_panscan = VID_VC_AND_PS_PREF;
break;
case VIDEO_CENTER_CUT_OUT:
av7110->display_panscan = VID_CENTRE_CUT_PREF;
break;
default:
ret = -EINVAL;
}
if (ret < 0)
break;
av7110->videostate.display_format = format;
ret = av7110_fw_cmd(av7110, COMTYPE_ENCODER, SetPanScanType,
1, av7110->display_panscan);
break;
}
case VIDEO_SET_FORMAT:
if (arg > 1) {
ret = -EINVAL;
break;
}
av7110->display_ar = arg;
ret = av7110_fw_cmd(av7110, COMTYPE_ENCODER, SetMonitorType,
1, (u16) arg);
break;
#ifdef CONFIG_COMPAT
case VIDEO_STILLPICTURE32:
{
struct compat_video_still_picture *pic =
(struct compat_video_still_picture *) parg;
av7110->videostate.stream_source = VIDEO_SOURCE_MEMORY;
dvb_ringbuffer_flush_spinlock_wakeup(&av7110->avout);
ret = play_iframe(av7110, compat_ptr(pic->iFrame),
pic->size, file->f_flags & O_NONBLOCK);
break;
}
#endif
case VIDEO_STILLPICTURE:
{
struct video_still_picture *pic =
(struct video_still_picture *) parg;
av7110->videostate.stream_source = VIDEO_SOURCE_MEMORY;
dvb_ringbuffer_flush_spinlock_wakeup(&av7110->avout);
ret = play_iframe(av7110, pic->iFrame, pic->size,
file->f_flags & O_NONBLOCK);
break;
}
case VIDEO_FAST_FORWARD:
//note: arg is ignored by firmware
if (av7110->playing & RP_VIDEO)
ret = av7110_fw_cmd(av7110, COMTYPE_REC_PLAY,
__Scan_I, 2, AV_PES, 0);
else
ret = vidcom(av7110, AV_VIDEO_CMD_FFWD, arg);
if (!ret) {
av7110->trickmode = TRICK_FAST;
av7110->videostate.play_state = VIDEO_PLAYING;
}
break;
case VIDEO_SLOWMOTION:
if (av7110->playing&RP_VIDEO) {
if (av7110->trickmode != TRICK_SLOW)
ret = av7110_fw_cmd(av7110, COMTYPE_REC_PLAY, __Slow, 2, 0, 0);
if (!ret)
ret = vidcom(av7110, AV_VIDEO_CMD_SLOW, arg);
} else {
ret = vidcom(av7110, AV_VIDEO_CMD_PLAY, 0);
if (!ret)
ret = vidcom(av7110, AV_VIDEO_CMD_STOP, 0);
if (!ret)
ret = vidcom(av7110, AV_VIDEO_CMD_SLOW, arg);
}
if (!ret) {
av7110->trickmode = TRICK_SLOW;
av7110->videostate.play_state = VIDEO_PLAYING;
}
break;
case VIDEO_GET_CAPABILITIES:
*(int *)parg = VIDEO_CAP_MPEG1 | VIDEO_CAP_MPEG2 |
VIDEO_CAP_SYS | VIDEO_CAP_PROG;
break;
case VIDEO_CLEAR_BUFFER:
dvb_ringbuffer_flush_spinlock_wakeup(&av7110->avout);
av7110_ipack_reset(&av7110->ipack[1]);
if (av7110->playing == RP_AV) {
ret = av7110_fw_cmd(av7110, COMTYPE_REC_PLAY,
__Play, 2, AV_PES, 0);
if (ret)
break;
if (av7110->trickmode == TRICK_FAST)
ret = av7110_fw_cmd(av7110, COMTYPE_REC_PLAY,
__Scan_I, 2, AV_PES, 0);
if (av7110->trickmode == TRICK_SLOW) {
ret = av7110_fw_cmd(av7110, COMTYPE_REC_PLAY,
__Slow, 2, 0, 0);
if (!ret)
ret = vidcom(av7110, AV_VIDEO_CMD_SLOW, arg);
}
if (av7110->trickmode == TRICK_FREEZE)
ret = vidcom(av7110, AV_VIDEO_CMD_STOP, 1);
}
break;
case VIDEO_SET_STREAMTYPE:
break;
default:
ret = -ENOIOCTLCMD;
break;
}
mutex_unlock(&av7110->ioctl_mutex);
return ret;
}
static int dvb_audio_ioctl(struct file *file,
unsigned int cmd, void *parg)
{
struct dvb_device *dvbdev = file->private_data;
struct av7110 *av7110 = dvbdev->priv;
unsigned long arg = (unsigned long) parg;
int ret = 0;
dprintk(1, "av7110:%p, cmd=%04x\n", av7110,cmd);
if (((file->f_flags & O_ACCMODE) == O_RDONLY) &&
(cmd != AUDIO_GET_STATUS))
return -EPERM;
if (mutex_lock_interruptible(&av7110->ioctl_mutex))
return -ERESTARTSYS;
switch (cmd) {
case AUDIO_STOP:
if (av7110->audiostate.stream_source == AUDIO_SOURCE_MEMORY)
ret = av7110_av_stop(av7110, RP_AUDIO);
else
ret = audcom(av7110, AUDIO_CMD_MUTE);
if (!ret)
av7110->audiostate.play_state = AUDIO_STOPPED;
break;
case AUDIO_PLAY:
if (av7110->audiostate.stream_source == AUDIO_SOURCE_MEMORY)
ret = av7110_av_start_play(av7110, RP_AUDIO);
if (!ret)
ret = audcom(av7110, AUDIO_CMD_UNMUTE);
if (!ret)
av7110->audiostate.play_state = AUDIO_PLAYING;
break;
case AUDIO_PAUSE:
ret = audcom(av7110, AUDIO_CMD_MUTE);
if (!ret)
av7110->audiostate.play_state = AUDIO_PAUSED;
break;
case AUDIO_CONTINUE:
if (av7110->audiostate.play_state == AUDIO_PAUSED) {
av7110->audiostate.play_state = AUDIO_PLAYING;
ret = audcom(av7110, AUDIO_CMD_UNMUTE | AUDIO_CMD_PCM16);
}
break;
case AUDIO_SELECT_SOURCE:
av7110->audiostate.stream_source = (audio_stream_source_t) arg;
break;
case AUDIO_SET_MUTE:
{
ret = audcom(av7110, arg ? AUDIO_CMD_MUTE : AUDIO_CMD_UNMUTE);
if (!ret)
av7110->audiostate.mute_state = (int) arg;
break;
}
case AUDIO_SET_AV_SYNC:
av7110->audiostate.AV_sync_state = (int) arg;
ret = audcom(av7110, arg ? AUDIO_CMD_SYNC_ON : AUDIO_CMD_SYNC_OFF);
break;
case AUDIO_SET_BYPASS_MODE:
if (FW_VERSION(av7110->arm_app) < 0x2621)
ret = -EINVAL;
av7110->audiostate.bypass_mode = (int)arg;
break;
case AUDIO_CHANNEL_SELECT:
av7110->audiostate.channel_select = (audio_channel_select_t) arg;
switch(av7110->audiostate.channel_select) {
case AUDIO_STEREO:
ret = audcom(av7110, AUDIO_CMD_STEREO);
if (!ret) {
if (av7110->adac_type == DVB_ADAC_CRYSTAL)
i2c_writereg(av7110, 0x20, 0x02, 0x49);
else if (av7110->adac_type == DVB_ADAC_MSP34x5)
msp_writereg(av7110, MSP_WR_DSP, 0x0008, 0x0220);
}
break;
case AUDIO_MONO_LEFT:
ret = audcom(av7110, AUDIO_CMD_MONO_L);
if (!ret) {
if (av7110->adac_type == DVB_ADAC_CRYSTAL)
i2c_writereg(av7110, 0x20, 0x02, 0x4a);
else if (av7110->adac_type == DVB_ADAC_MSP34x5)
msp_writereg(av7110, MSP_WR_DSP, 0x0008, 0x0200);
}
break;
case AUDIO_MONO_RIGHT:
ret = audcom(av7110, AUDIO_CMD_MONO_R);
if (!ret) {
if (av7110->adac_type == DVB_ADAC_CRYSTAL)
i2c_writereg(av7110, 0x20, 0x02, 0x45);
else if (av7110->adac_type == DVB_ADAC_MSP34x5)
msp_writereg(av7110, MSP_WR_DSP, 0x0008, 0x0210);
}
break;
default:
ret = -EINVAL;
break;
}
break;
case AUDIO_GET_STATUS:
memcpy(parg, &av7110->audiostate, sizeof(struct audio_status));
break;
case AUDIO_GET_CAPABILITIES:
if (FW_VERSION(av7110->arm_app) < 0x2621)
*(unsigned int *)parg = AUDIO_CAP_LPCM | AUDIO_CAP_MP1 | AUDIO_CAP_MP2;
else
*(unsigned int *)parg = AUDIO_CAP_LPCM | AUDIO_CAP_DTS | AUDIO_CAP_AC3 |
AUDIO_CAP_MP1 | AUDIO_CAP_MP2;
break;
case AUDIO_CLEAR_BUFFER:
dvb_ringbuffer_flush_spinlock_wakeup(&av7110->aout);
av7110_ipack_reset(&av7110->ipack[0]);
if (av7110->playing == RP_AV)
ret = av7110_fw_cmd(av7110, COMTYPE_REC_PLAY,
__Play, 2, AV_PES, 0);
break;
case AUDIO_SET_ID:
break;
case AUDIO_SET_MIXER:
{
struct audio_mixer *amix = (struct audio_mixer *)parg;
ret = av7110_set_volume(av7110, amix->volume_left, amix->volume_right);
break;
}
case AUDIO_SET_STREAMTYPE:
break;
default:
ret = -ENOIOCTLCMD;
}
mutex_unlock(&av7110->ioctl_mutex);
return ret;
}
static int dvb_video_open(struct inode *inode, struct file *file)
{
struct dvb_device *dvbdev = file->private_data;
struct av7110 *av7110 = dvbdev->priv;
int err;
dprintk(2, "av7110:%p, \n", av7110);
if ((err = dvb_generic_open(inode, file)) < 0)
return err;
if ((file->f_flags & O_ACCMODE) != O_RDONLY) {
dvb_ringbuffer_flush_spinlock_wakeup(&av7110->aout);
dvb_ringbuffer_flush_spinlock_wakeup(&av7110->avout);
av7110->video_blank = 1;
av7110->audiostate.AV_sync_state = 1;
av7110->videostate.stream_source = VIDEO_SOURCE_DEMUX;
/* empty event queue */
av7110->video_events.eventr = av7110->video_events.eventw = 0;
}
return 0;
}
static int dvb_video_release(struct inode *inode, struct file *file)
{
struct dvb_device *dvbdev = file->private_data;
struct av7110 *av7110 = dvbdev->priv;
dprintk(2, "av7110:%p, \n", av7110);
if ((file->f_flags & O_ACCMODE) != O_RDONLY) {
av7110_av_stop(av7110, RP_VIDEO);
}
return dvb_generic_release(inode, file);
}
static int dvb_audio_open(struct inode *inode, struct file *file)
{
struct dvb_device *dvbdev = file->private_data;
struct av7110 *av7110 = dvbdev->priv;
int err = dvb_generic_open(inode, file);
dprintk(2, "av7110:%p, \n", av7110);
if (err < 0)
return err;
dvb_ringbuffer_flush_spinlock_wakeup(&av7110->aout);
av7110->audiostate.stream_source = AUDIO_SOURCE_DEMUX;
return 0;
}
static int dvb_audio_release(struct inode *inode, struct file *file)
{
struct dvb_device *dvbdev = file->private_data;
struct av7110 *av7110 = dvbdev->priv;
dprintk(2, "av7110:%p, \n", av7110);
av7110_av_stop(av7110, RP_AUDIO);
return dvb_generic_release(inode, file);
}
/******************************************************************************
* driver registration
******************************************************************************/
static const struct file_operations dvb_video_fops = {
.owner = THIS_MODULE,
.write = dvb_video_write,
.unlocked_ioctl = dvb_generic_ioctl,
.compat_ioctl = dvb_generic_ioctl,
.open = dvb_video_open,
.release = dvb_video_release,
.poll = dvb_video_poll,
.llseek = noop_llseek,
};
static struct dvb_device dvbdev_video = {
.priv = NULL,
.users = 6,
.readers = 5, /* arbitrary */
.writers = 1,
.fops = &dvb_video_fops,
.kernel_ioctl = dvb_video_ioctl,
};
static const struct file_operations dvb_audio_fops = {
.owner = THIS_MODULE,
.write = dvb_audio_write,
.unlocked_ioctl = dvb_generic_ioctl,
.compat_ioctl = dvb_generic_ioctl,
.open = dvb_audio_open,
.release = dvb_audio_release,
.poll = dvb_audio_poll,
.llseek = noop_llseek,
};
static struct dvb_device dvbdev_audio = {
.priv = NULL,
.users = 1,
.writers = 1,
.fops = &dvb_audio_fops,
.kernel_ioctl = dvb_audio_ioctl,
};
int av7110_av_register(struct av7110 *av7110)
{
av7110->audiostate.AV_sync_state = 0;
av7110->audiostate.mute_state = 0;
av7110->audiostate.play_state = AUDIO_STOPPED;
av7110->audiostate.stream_source = AUDIO_SOURCE_DEMUX;
av7110->audiostate.channel_select = AUDIO_STEREO;
av7110->audiostate.bypass_mode = 0;
av7110->videostate.video_blank = 0;
av7110->videostate.play_state = VIDEO_STOPPED;
av7110->videostate.stream_source = VIDEO_SOURCE_DEMUX;
av7110->videostate.video_format = VIDEO_FORMAT_4_3;
av7110->videostate.display_format = VIDEO_LETTER_BOX;
av7110->display_ar = VIDEO_FORMAT_4_3;
av7110->display_panscan = VID_VC_AND_PS_PREF;
init_waitqueue_head(&av7110->video_events.wait_queue);
spin_lock_init(&av7110->video_events.lock);
av7110->video_events.eventw = av7110->video_events.eventr = 0;
av7110->video_events.overflow = 0;
memset(&av7110->video_size, 0, sizeof (video_size_t));
dvb_register_device(&av7110->dvb_adapter, &av7110->video_dev,
&dvbdev_video, av7110, DVB_DEVICE_VIDEO, 0);
dvb_register_device(&av7110->dvb_adapter, &av7110->audio_dev,
&dvbdev_audio, av7110, DVB_DEVICE_AUDIO, 0);
return 0;
}
void av7110_av_unregister(struct av7110 *av7110)
{
dvb_unregister_device(av7110->audio_dev);
dvb_unregister_device(av7110->video_dev);
}
int av7110_av_init(struct av7110 *av7110)
{
void (*play[])(u8 *, int, void *) = { play_audio_cb, play_video_cb };
int i, ret;
for (i = 0; i < 2; i++) {
struct ipack *ipack = av7110->ipack + i;
ret = av7110_ipack_init(ipack, IPACKS, play[i]);
if (ret < 0) {
if (i)
av7110_ipack_free(--ipack);
goto out;
}
ipack->data = av7110;
}
dvb_ringbuffer_init(&av7110->avout, av7110->iobuf, AVOUTLEN);
dvb_ringbuffer_init(&av7110->aout, av7110->iobuf + AVOUTLEN, AOUTLEN);
av7110->kbuf[0] = (u8 *)(av7110->iobuf + AVOUTLEN + AOUTLEN + BMPLEN);
av7110->kbuf[1] = av7110->kbuf[0] + 2 * IPACKS;
out:
return ret;
}
void av7110_av_exit(struct av7110 *av7110)
{
av7110_ipack_free(&av7110->ipack[0]);
av7110_ipack_free(&av7110->ipack[1]);
}
| linux-master | drivers/staging/media/av7110/av7110_av.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* av7110_hw.c: av7110 low level hardware access and firmware interface
*
* Copyright (C) 1999-2002 Ralph Metzler
* & Marcus Metzler for convergence integrated media GmbH
*
* originally based on code by:
* Copyright (C) 1998,1999 Christian Theiss <[email protected]>
*
* the project's page is at https://linuxtv.org
*/
/* for debugging ARM communication: */
//#define COM_DEBUG
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/delay.h>
#include <linux/fs.h>
#include "av7110.h"
#include "av7110_hw.h"
#define _NOHANDSHAKE
/*
* Max transfer size done by av7110_fw_cmd()
*
* The maximum size passed to this function is 6 bytes. The buffer also
* uses two additional ones for type and size. So, 8 bytes is enough.
*/
#define MAX_XFER_SIZE 8
/****************************************************************************
* DEBI functions
****************************************************************************/
/* This DEBI code is based on the Stradis driver
by Nathan Laredo <[email protected]> */
int av7110_debiwrite(struct av7110 *av7110, u32 config,
int addr, u32 val, unsigned int count)
{
struct saa7146_dev *dev = av7110->dev;
if (count > 32764) {
printk("%s: invalid count %d\n", __func__, count);
return -1;
}
if (saa7146_wait_for_debi_done(av7110->dev, 0) < 0) {
printk("%s: wait_for_debi_done failed\n", __func__);
return -1;
}
saa7146_write(dev, DEBI_CONFIG, config);
if (count <= 4) /* immediate transfer */
saa7146_write(dev, DEBI_AD, val);
else /* block transfer */
saa7146_write(dev, DEBI_AD, av7110->debi_bus);
saa7146_write(dev, DEBI_COMMAND, (count << 17) | (addr & 0xffff));
saa7146_write(dev, MC2, (2 << 16) | 2);
return 0;
}
u32 av7110_debiread(struct av7110 *av7110, u32 config, int addr, unsigned int count)
{
struct saa7146_dev *dev = av7110->dev;
u32 result = 0;
if (count > 32764) {
printk("%s: invalid count %d\n", __func__, count);
return 0;
}
if (saa7146_wait_for_debi_done(av7110->dev, 0) < 0) {
printk("%s: wait_for_debi_done #1 failed\n", __func__);
return 0;
}
saa7146_write(dev, DEBI_AD, av7110->debi_bus);
saa7146_write(dev, DEBI_COMMAND, (count << 17) | 0x10000 | (addr & 0xffff));
saa7146_write(dev, DEBI_CONFIG, config);
saa7146_write(dev, MC2, (2 << 16) | 2);
if (count > 4)
return count;
if (saa7146_wait_for_debi_done(av7110->dev, 0) < 0) {
printk("%s: wait_for_debi_done #2 failed\n", __func__);
return 0;
}
result = saa7146_read(dev, DEBI_AD);
result &= (0xffffffffUL >> ((4 - count) * 8));
return result;
}
/* av7110 ARM core boot stuff */
#if 0
void av7110_reset_arm(struct av7110 *av7110)
{
saa7146_setgpio(av7110->dev, RESET_LINE, SAA7146_GPIO_OUTLO);
/* Disable DEBI and GPIO irq */
SAA7146_IER_DISABLE(av7110->dev, MASK_19 | MASK_03);
SAA7146_ISR_CLEAR(av7110->dev, MASK_19 | MASK_03);
saa7146_setgpio(av7110->dev, RESET_LINE, SAA7146_GPIO_OUTHI);
msleep(30); /* the firmware needs some time to initialize */
ARM_ResetMailBox(av7110);
SAA7146_ISR_CLEAR(av7110->dev, MASK_19 | MASK_03);
SAA7146_IER_ENABLE(av7110->dev, MASK_03);
av7110->arm_ready = 1;
dprintk(1, "reset ARM\n");
}
#endif /* 0 */
static int waitdebi(struct av7110 *av7110, int adr, int state)
{
int k;
dprintk(4, "%p\n", av7110);
for (k = 0; k < 100; k++) {
if (irdebi(av7110, DEBINOSWAP, adr, 0, 2) == state)
return 0;
udelay(5);
}
return -ETIMEDOUT;
}
static int load_dram(struct av7110 *av7110, u32 *data, int len)
{
int i;
int blocks, rest;
u32 base, bootblock = AV7110_BOOT_BLOCK;
dprintk(4, "%p\n", av7110);
blocks = len / AV7110_BOOT_MAX_SIZE;
rest = len % AV7110_BOOT_MAX_SIZE;
base = DRAM_START_CODE;
for (i = 0; i < blocks; i++) {
if (waitdebi(av7110, AV7110_BOOT_STATE, BOOTSTATE_BUFFER_EMPTY) < 0) {
printk(KERN_ERR "dvb-ttpci: load_dram(): timeout at block %d\n", i);
return -ETIMEDOUT;
}
dprintk(4, "writing DRAM block %d\n", i);
mwdebi(av7110, DEBISWAB, bootblock,
((u8 *)data) + i * AV7110_BOOT_MAX_SIZE, AV7110_BOOT_MAX_SIZE);
bootblock ^= 0x1400;
iwdebi(av7110, DEBISWAB, AV7110_BOOT_BASE, swab32(base), 4);
iwdebi(av7110, DEBINOSWAP, AV7110_BOOT_SIZE, AV7110_BOOT_MAX_SIZE, 2);
iwdebi(av7110, DEBINOSWAP, AV7110_BOOT_STATE, BOOTSTATE_BUFFER_FULL, 2);
base += AV7110_BOOT_MAX_SIZE;
}
if (rest > 0) {
if (waitdebi(av7110, AV7110_BOOT_STATE, BOOTSTATE_BUFFER_EMPTY) < 0) {
printk(KERN_ERR "dvb-ttpci: load_dram(): timeout at last block\n");
return -ETIMEDOUT;
}
if (rest > 4)
mwdebi(av7110, DEBISWAB, bootblock,
((u8 *)data) + i * AV7110_BOOT_MAX_SIZE, rest);
else
mwdebi(av7110, DEBISWAB, bootblock,
((u8 *)data) + i * AV7110_BOOT_MAX_SIZE - 4, rest + 4);
iwdebi(av7110, DEBISWAB, AV7110_BOOT_BASE, swab32(base), 4);
iwdebi(av7110, DEBINOSWAP, AV7110_BOOT_SIZE, rest, 2);
iwdebi(av7110, DEBINOSWAP, AV7110_BOOT_STATE, BOOTSTATE_BUFFER_FULL, 2);
}
if (waitdebi(av7110, AV7110_BOOT_STATE, BOOTSTATE_BUFFER_EMPTY) < 0) {
printk(KERN_ERR "dvb-ttpci: load_dram(): timeout after last block\n");
return -ETIMEDOUT;
}
iwdebi(av7110, DEBINOSWAP, AV7110_BOOT_SIZE, 0, 2);
iwdebi(av7110, DEBINOSWAP, AV7110_BOOT_STATE, BOOTSTATE_BUFFER_FULL, 2);
if (waitdebi(av7110, AV7110_BOOT_STATE, BOOTSTATE_AV7110_BOOT_COMPLETE) < 0) {
printk(KERN_ERR "dvb-ttpci: load_dram(): final handshake timeout\n");
return -ETIMEDOUT;
}
return 0;
}
/* we cannot write av7110 DRAM directly, so load a bootloader into
* the DPRAM which implements a simple boot protocol */
int av7110_bootarm(struct av7110 *av7110)
{
const struct firmware *fw;
const char *fw_name = "av7110/bootcode.bin";
struct saa7146_dev *dev = av7110->dev;
u32 ret;
int i;
dprintk(4, "%p\n", av7110);
av7110->arm_ready = 0;
saa7146_setgpio(dev, RESET_LINE, SAA7146_GPIO_OUTLO);
/* Disable DEBI and GPIO irq */
SAA7146_IER_DISABLE(av7110->dev, MASK_03 | MASK_19);
SAA7146_ISR_CLEAR(av7110->dev, MASK_19 | MASK_03);
/* enable DEBI */
saa7146_write(av7110->dev, MC1, 0x08800880);
saa7146_write(av7110->dev, DD1_STREAM_B, 0x00000000);
saa7146_write(av7110->dev, MC2, (MASK_09 | MASK_25 | MASK_10 | MASK_26));
/* test DEBI */
iwdebi(av7110, DEBISWAP, DPRAM_BASE, 0x76543210, 4);
/* FIXME: Why does Nexus CA require 2x iwdebi for first init? */
iwdebi(av7110, DEBISWAP, DPRAM_BASE, 0x76543210, 4);
if ((ret=irdebi(av7110, DEBINOSWAP, DPRAM_BASE, 0, 4)) != 0x10325476) {
printk(KERN_ERR "dvb-ttpci: debi test in av7110_bootarm() failed: %08x != %08x (check your BIOS 'Plug&Play OS' settings)\n",
ret, 0x10325476);
return -1;
}
for (i = 0; i < 8192; i += 4)
iwdebi(av7110, DEBISWAP, DPRAM_BASE + i, 0x00, 4);
dprintk(2, "debi test OK\n");
/* boot */
dprintk(1, "load boot code\n");
saa7146_setgpio(dev, ARM_IRQ_LINE, SAA7146_GPIO_IRQLO);
//saa7146_setgpio(dev, DEBI_DONE_LINE, SAA7146_GPIO_INPUT);
//saa7146_setgpio(dev, 3, SAA7146_GPIO_INPUT);
ret = request_firmware(&fw, fw_name, &dev->pci->dev);
if (ret) {
printk(KERN_ERR "dvb-ttpci: Failed to load firmware \"%s\"\n",
fw_name);
return ret;
}
mwdebi(av7110, DEBISWAB, DPRAM_BASE, fw->data, fw->size);
release_firmware(fw);
iwdebi(av7110, DEBINOSWAP, AV7110_BOOT_STATE, BOOTSTATE_BUFFER_FULL, 2);
if (saa7146_wait_for_debi_done(av7110->dev, 1)) {
printk(KERN_ERR "dvb-ttpci: av7110_bootarm(): saa7146_wait_for_debi_done() timed out\n");
return -ETIMEDOUT;
}
saa7146_setgpio(dev, RESET_LINE, SAA7146_GPIO_OUTHI);
mdelay(1);
dprintk(1, "load dram code\n");
if (load_dram(av7110, (u32 *)av7110->bin_root, av7110->size_root) < 0) {
printk(KERN_ERR "dvb-ttpci: av7110_bootarm(): load_dram() failed\n");
return -1;
}
saa7146_setgpio(dev, RESET_LINE, SAA7146_GPIO_OUTLO);
mdelay(1);
dprintk(1, "load dpram code\n");
mwdebi(av7110, DEBISWAB, DPRAM_BASE, av7110->bin_dpram, av7110->size_dpram);
if (saa7146_wait_for_debi_done(av7110->dev, 1)) {
printk(KERN_ERR "dvb-ttpci: av7110_bootarm(): saa7146_wait_for_debi_done() timed out after loading DRAM\n");
return -ETIMEDOUT;
}
saa7146_setgpio(dev, RESET_LINE, SAA7146_GPIO_OUTHI);
msleep(30); /* the firmware needs some time to initialize */
//ARM_ClearIrq(av7110);
ARM_ResetMailBox(av7110);
SAA7146_ISR_CLEAR(av7110->dev, MASK_19 | MASK_03);
SAA7146_IER_ENABLE(av7110->dev, MASK_03);
av7110->arm_errors = 0;
av7110->arm_ready = 1;
return 0;
}
MODULE_FIRMWARE("av7110/bootcode.bin");
/****************************************************************************
* DEBI command polling
****************************************************************************/
int av7110_wait_msgstate(struct av7110 *av7110, u16 flags)
{
unsigned long start;
u32 stat;
int err;
if (FW_VERSION(av7110->arm_app) <= 0x261c) {
/* not supported by old firmware */
msleep(50);
return 0;
}
/* new firmware */
start = jiffies;
for (;;) {
err = time_after(jiffies, start + ARM_WAIT_FREE);
if (mutex_lock_interruptible(&av7110->dcomlock))
return -ERESTARTSYS;
stat = rdebi(av7110, DEBINOSWAP, MSGSTATE, 0, 2);
mutex_unlock(&av7110->dcomlock);
if ((stat & flags) == 0)
break;
if (err) {
printk(KERN_ERR "%s: timeout waiting for MSGSTATE %04x\n",
__func__, stat & flags);
return -ETIMEDOUT;
}
msleep(1);
}
return 0;
}
static int __av7110_send_fw_cmd(struct av7110 *av7110, u16* buf, int length)
{
int i;
unsigned long start;
char *type = NULL;
u16 flags[2] = {0, 0};
u32 stat;
int err;
// dprintk(4, "%p\n", av7110);
if (!av7110->arm_ready) {
dprintk(1, "arm not ready.\n");
return -ENXIO;
}
start = jiffies;
while (1) {
err = time_after(jiffies, start + ARM_WAIT_FREE);
if (rdebi(av7110, DEBINOSWAP, COMMAND, 0, 2) == 0)
break;
if (err) {
printk(KERN_ERR "dvb-ttpci: %s(): timeout waiting for COMMAND idle\n", __func__);
av7110->arm_errors++;
return -ETIMEDOUT;
}
msleep(1);
}
if (FW_VERSION(av7110->arm_app) <= 0x261f)
wdebi(av7110, DEBINOSWAP, COM_IF_LOCK, 0xffff, 2);
#ifndef _NOHANDSHAKE
start = jiffies;
while (1) {
err = time_after(jiffies, start + ARM_WAIT_SHAKE);
if (rdebi(av7110, DEBINOSWAP, HANDSHAKE_REG, 0, 2) == 0)
break;
if (err) {
printk(KERN_ERR "dvb-ttpci: %s(): timeout waiting for HANDSHAKE_REG\n", __func__);
return -ETIMEDOUT;
}
msleep(1);
}
#endif
switch ((buf[0] >> 8) & 0xff) {
case COMTYPE_PIDFILTER:
case COMTYPE_ENCODER:
case COMTYPE_REC_PLAY:
case COMTYPE_MPEGDECODER:
type = "MSG";
flags[0] = GPMQOver;
flags[1] = GPMQFull;
break;
case COMTYPE_OSD:
type = "OSD";
flags[0] = OSDQOver;
flags[1] = OSDQFull;
break;
case COMTYPE_MISC:
if (FW_VERSION(av7110->arm_app) >= 0x261d) {
type = "MSG";
flags[0] = GPMQOver;
flags[1] = GPMQBusy;
}
break;
default:
break;
}
if (type != NULL) {
/* non-immediate COMMAND type */
start = jiffies;
for (;;) {
err = time_after(jiffies, start + ARM_WAIT_FREE);
stat = rdebi(av7110, DEBINOSWAP, MSGSTATE, 0, 2);
if (stat & flags[0]) {
printk(KERN_ERR "%s: %s QUEUE overflow\n",
__func__, type);
return -1;
}
if ((stat & flags[1]) == 0)
break;
if (err) {
printk(KERN_ERR "%s: timeout waiting on busy %s QUEUE\n",
__func__, type);
av7110->arm_errors++;
return -ETIMEDOUT;
}
msleep(1);
}
}
for (i = 2; i < length; i++)
wdebi(av7110, DEBINOSWAP, COMMAND + 2 * i, (u32) buf[i], 2);
if (length)
wdebi(av7110, DEBINOSWAP, COMMAND + 2, (u32) buf[1], 2);
else
wdebi(av7110, DEBINOSWAP, COMMAND + 2, 0, 2);
wdebi(av7110, DEBINOSWAP, COMMAND, (u32) buf[0], 2);
if (FW_VERSION(av7110->arm_app) <= 0x261f)
wdebi(av7110, DEBINOSWAP, COM_IF_LOCK, 0x0000, 2);
#ifdef COM_DEBUG
start = jiffies;
while (1) {
err = time_after(jiffies, start + ARM_WAIT_FREE);
if (rdebi(av7110, DEBINOSWAP, COMMAND, 0, 2) == 0)
break;
if (err) {
printk(KERN_ERR "dvb-ttpci: %s(): timeout waiting for COMMAND %d to complete\n",
__func__, (buf[0] >> 8) & 0xff);
return -ETIMEDOUT;
}
msleep(1);
}
stat = rdebi(av7110, DEBINOSWAP, MSGSTATE, 0, 2);
if (stat & GPMQOver) {
printk(KERN_ERR "dvb-ttpci: %s(): GPMQOver\n", __func__);
return -ENOSPC;
}
else if (stat & OSDQOver) {
printk(KERN_ERR "dvb-ttpci: %s(): OSDQOver\n", __func__);
return -ENOSPC;
}
#endif
return 0;
}
static int av7110_send_fw_cmd(struct av7110 *av7110, u16* buf, int length)
{
int ret;
// dprintk(4, "%p\n", av7110);
if (!av7110->arm_ready) {
dprintk(1, "arm not ready.\n");
return -1;
}
if (mutex_lock_interruptible(&av7110->dcomlock))
return -ERESTARTSYS;
ret = __av7110_send_fw_cmd(av7110, buf, length);
mutex_unlock(&av7110->dcomlock);
if (ret && ret!=-ERESTARTSYS)
printk(KERN_ERR "dvb-ttpci: %s(): av7110_send_fw_cmd error %d\n",
__func__, ret);
return ret;
}
int av7110_fw_cmd(struct av7110 *av7110, int type, int com, int num, ...)
{
va_list args;
u16 buf[MAX_XFER_SIZE];
int i, ret;
// dprintk(4, "%p\n", av7110);
if (2 + num > ARRAY_SIZE(buf)) {
printk(KERN_WARNING
"%s: %s len=%d is too big!\n",
KBUILD_MODNAME, __func__, num);
return -EINVAL;
}
buf[0] = ((type << 8) | com);
buf[1] = num;
if (num) {
va_start(args, num);
for (i = 0; i < num; i++)
buf[i + 2] = va_arg(args, u32);
va_end(args);
}
ret = av7110_send_fw_cmd(av7110, buf, num + 2);
if (ret && ret != -ERESTARTSYS)
printk(KERN_ERR "dvb-ttpci: av7110_fw_cmd error %d\n", ret);
return ret;
}
#if 0
int av7110_send_ci_cmd(struct av7110 *av7110, u8 subcom, u8 *buf, u8 len)
{
int i, ret;
u16 cmd[18] = { ((COMTYPE_COMMON_IF << 8) + subcom),
16, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
dprintk(4, "%p\n", av7110);
for(i = 0; i < len && i < 32; i++)
{
if(i % 2 == 0)
cmd[(i / 2) + 2] = (u16)(buf[i]) << 8;
else
cmd[(i / 2) + 2] |= buf[i];
}
ret = av7110_send_fw_cmd(av7110, cmd, 18);
if (ret && ret != -ERESTARTSYS)
printk(KERN_ERR "dvb-ttpci: av7110_send_ci_cmd error %d\n", ret);
return ret;
}
#endif /* 0 */
int av7110_fw_request(struct av7110 *av7110, u16 *request_buf,
int request_buf_len, u16 *reply_buf, int reply_buf_len)
{
int err;
s16 i;
unsigned long start;
#ifdef COM_DEBUG
u32 stat;
#endif
dprintk(4, "%p\n", av7110);
if (!av7110->arm_ready) {
dprintk(1, "arm not ready.\n");
return -1;
}
if (mutex_lock_interruptible(&av7110->dcomlock))
return -ERESTARTSYS;
if ((err = __av7110_send_fw_cmd(av7110, request_buf, request_buf_len)) < 0) {
mutex_unlock(&av7110->dcomlock);
printk(KERN_ERR "dvb-ttpci: av7110_fw_request error %d\n", err);
return err;
}
start = jiffies;
while (1) {
err = time_after(jiffies, start + ARM_WAIT_FREE);
if (rdebi(av7110, DEBINOSWAP, COMMAND, 0, 2) == 0)
break;
if (err) {
printk(KERN_ERR "%s: timeout waiting for COMMAND to complete\n", __func__);
mutex_unlock(&av7110->dcomlock);
return -ETIMEDOUT;
}
#ifdef _NOHANDSHAKE
msleep(1);
#endif
}
#ifndef _NOHANDSHAKE
start = jiffies;
while (1) {
err = time_after(jiffies, start + ARM_WAIT_SHAKE);
if (rdebi(av7110, DEBINOSWAP, HANDSHAKE_REG, 0, 2) == 0)
break;
if (err) {
printk(KERN_ERR "%s: timeout waiting for HANDSHAKE_REG\n", __func__);
mutex_unlock(&av7110->dcomlock);
return -ETIMEDOUT;
}
msleep(1);
}
#endif
#ifdef COM_DEBUG
stat = rdebi(av7110, DEBINOSWAP, MSGSTATE, 0, 2);
if (stat & GPMQOver) {
printk(KERN_ERR "%s: GPMQOver\n", __func__);
mutex_unlock(&av7110->dcomlock);
return -1;
}
else if (stat & OSDQOver) {
printk(KERN_ERR "%s: OSDQOver\n", __func__);
mutex_unlock(&av7110->dcomlock);
return -1;
}
#endif
for (i = 0; i < reply_buf_len; i++)
reply_buf[i] = rdebi(av7110, DEBINOSWAP, COM_BUFF + 2 * i, 0, 2);
mutex_unlock(&av7110->dcomlock);
return 0;
}
static int av7110_fw_query(struct av7110 *av7110, u16 tag, u16* buf, s16 length)
{
int ret;
ret = av7110_fw_request(av7110, &tag, 0, buf, length);
if (ret)
printk(KERN_ERR "dvb-ttpci: av7110_fw_query error %d\n", ret);
return ret;
}
/****************************************************************************
* Firmware commands
****************************************************************************/
/* get version of the firmware ROM, RTSL, video ucode and ARM application */
int av7110_firmversion(struct av7110 *av7110)
{
u16 buf[20];
u16 tag = ((COMTYPE_REQUEST << 8) + ReqVersion);
dprintk(4, "%p\n", av7110);
if (av7110_fw_query(av7110, tag, buf, 16)) {
printk("dvb-ttpci: failed to boot firmware @ card %d\n",
av7110->dvb_adapter.num);
return -EIO;
}
av7110->arm_fw = (buf[0] << 16) + buf[1];
av7110->arm_rtsl = (buf[2] << 16) + buf[3];
av7110->arm_vid = (buf[4] << 16) + buf[5];
av7110->arm_app = (buf[6] << 16) + buf[7];
av7110->avtype = (buf[8] << 16) + buf[9];
printk("dvb-ttpci: info @ card %d: firm %08x, rtsl %08x, vid %08x, app %08x\n",
av7110->dvb_adapter.num, av7110->arm_fw,
av7110->arm_rtsl, av7110->arm_vid, av7110->arm_app);
/* print firmware capabilities */
if (FW_CI_LL_SUPPORT(av7110->arm_app))
printk("dvb-ttpci: firmware @ card %d supports CI link layer interface\n",
av7110->dvb_adapter.num);
else
printk("dvb-ttpci: no firmware support for CI link layer interface @ card %d\n",
av7110->dvb_adapter.num);
return 0;
}
int av7110_diseqc_send(struct av7110 *av7110, int len, u8 *msg, unsigned long burst)
{
int i, ret;
u16 buf[18] = { ((COMTYPE_AUDIODAC << 8) + SendDiSEqC),
16, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
dprintk(4, "%p\n", av7110);
if (len > 10)
len = 10;
buf[1] = len + 2;
buf[2] = len;
if (burst != -1)
buf[3] = burst ? 0x01 : 0x00;
else
buf[3] = 0xffff;
for (i = 0; i < len; i++)
buf[i + 4] = msg[i];
ret = av7110_send_fw_cmd(av7110, buf, 18);
if (ret && ret!=-ERESTARTSYS)
printk(KERN_ERR "dvb-ttpci: av7110_diseqc_send error %d\n", ret);
return ret;
}
#ifdef CONFIG_DVB_AV7110_OSD
static inline int SetColorBlend(struct av7110 *av7110, u8 windownr)
{
return av7110_fw_cmd(av7110, COMTYPE_OSD, SetCBlend, 1, windownr);
}
static inline int SetBlend_(struct av7110 *av7110, u8 windownr,
enum av7110_osd_palette_type colordepth, u16 index, u8 blending)
{
return av7110_fw_cmd(av7110, COMTYPE_OSD, SetBlend, 4,
windownr, colordepth, index, blending);
}
static inline int SetColor_(struct av7110 *av7110, u8 windownr,
enum av7110_osd_palette_type colordepth, u16 index, u16 colorhi, u16 colorlo)
{
return av7110_fw_cmd(av7110, COMTYPE_OSD, SetColor, 5,
windownr, colordepth, index, colorhi, colorlo);
}
static inline int SetFont(struct av7110 *av7110, u8 windownr, u8 fontsize,
u16 colorfg, u16 colorbg)
{
return av7110_fw_cmd(av7110, COMTYPE_OSD, Set_Font, 4,
windownr, fontsize, colorfg, colorbg);
}
static int FlushText(struct av7110 *av7110)
{
unsigned long start;
int err;
if (mutex_lock_interruptible(&av7110->dcomlock))
return -ERESTARTSYS;
start = jiffies;
while (1) {
err = time_after(jiffies, start + ARM_WAIT_OSD);
if (rdebi(av7110, DEBINOSWAP, BUFF1_BASE, 0, 2) == 0)
break;
if (err) {
printk(KERN_ERR "dvb-ttpci: %s(): timeout waiting for BUFF1_BASE == 0\n",
__func__);
mutex_unlock(&av7110->dcomlock);
return -ETIMEDOUT;
}
msleep(1);
}
mutex_unlock(&av7110->dcomlock);
return 0;
}
static int WriteText(struct av7110 *av7110, u8 win, u16 x, u16 y, char *buf)
{
int i, ret;
unsigned long start;
int length = strlen(buf) + 1;
u16 cbuf[5] = { (COMTYPE_OSD << 8) + DText, 3, win, x, y };
if (mutex_lock_interruptible(&av7110->dcomlock))
return -ERESTARTSYS;
start = jiffies;
while (1) {
ret = time_after(jiffies, start + ARM_WAIT_OSD);
if (rdebi(av7110, DEBINOSWAP, BUFF1_BASE, 0, 2) == 0)
break;
if (ret) {
printk(KERN_ERR "dvb-ttpci: %s: timeout waiting for BUFF1_BASE == 0\n",
__func__);
mutex_unlock(&av7110->dcomlock);
return -ETIMEDOUT;
}
msleep(1);
}
#ifndef _NOHANDSHAKE
start = jiffies;
while (1) {
ret = time_after(jiffies, start + ARM_WAIT_SHAKE);
if (rdebi(av7110, DEBINOSWAP, HANDSHAKE_REG, 0, 2) == 0)
break;
if (ret) {
printk(KERN_ERR "dvb-ttpci: %s: timeout waiting for HANDSHAKE_REG\n",
__func__);
mutex_unlock(&av7110->dcomlock);
return -ETIMEDOUT;
}
msleep(1);
}
#endif
for (i = 0; i < length / 2; i++)
wdebi(av7110, DEBINOSWAP, BUFF1_BASE + i * 2,
swab16(*(u16 *)(buf + 2 * i)), 2);
if (length & 1)
wdebi(av7110, DEBINOSWAP, BUFF1_BASE + i * 2, 0, 2);
ret = __av7110_send_fw_cmd(av7110, cbuf, 5);
mutex_unlock(&av7110->dcomlock);
if (ret && ret!=-ERESTARTSYS)
printk(KERN_ERR "dvb-ttpci: WriteText error %d\n", ret);
return ret;
}
static inline int DrawLine(struct av7110 *av7110, u8 windownr,
u16 x, u16 y, u16 dx, u16 dy, u16 color)
{
return av7110_fw_cmd(av7110, COMTYPE_OSD, DLine, 6,
windownr, x, y, dx, dy, color);
}
static inline int DrawBlock(struct av7110 *av7110, u8 windownr,
u16 x, u16 y, u16 dx, u16 dy, u16 color)
{
return av7110_fw_cmd(av7110, COMTYPE_OSD, DBox, 6,
windownr, x, y, dx, dy, color);
}
static inline int HideWindow(struct av7110 *av7110, u8 windownr)
{
return av7110_fw_cmd(av7110, COMTYPE_OSD, WHide, 1, windownr);
}
static inline int MoveWindowRel(struct av7110 *av7110, u8 windownr, u16 x, u16 y)
{
return av7110_fw_cmd(av7110, COMTYPE_OSD, WMoveD, 3, windownr, x, y);
}
static inline int MoveWindowAbs(struct av7110 *av7110, u8 windownr, u16 x, u16 y)
{
return av7110_fw_cmd(av7110, COMTYPE_OSD, WMoveA, 3, windownr, x, y);
}
static inline int DestroyOSDWindow(struct av7110 *av7110, u8 windownr)
{
return av7110_fw_cmd(av7110, COMTYPE_OSD, WDestroy, 1, windownr);
}
static inline int CreateOSDWindow(struct av7110 *av7110, u8 windownr,
osd_raw_window_t disptype,
u16 width, u16 height)
{
return av7110_fw_cmd(av7110, COMTYPE_OSD, WCreate, 4,
windownr, disptype, width, height);
}
static enum av7110_osd_palette_type bpp2pal[8] = {
Pal1Bit, Pal2Bit, 0, Pal4Bit, 0, 0, 0, Pal8Bit
};
static osd_raw_window_t bpp2bit[8] = {
OSD_BITMAP1, OSD_BITMAP2, 0, OSD_BITMAP4, 0, 0, 0, OSD_BITMAP8
};
static inline int WaitUntilBmpLoaded(struct av7110 *av7110)
{
int ret = wait_event_timeout(av7110->bmpq,
av7110->bmp_state != BMP_LOADING, 10*HZ);
if (ret == 0) {
printk("dvb-ttpci: warning: timeout waiting in LoadBitmap: %d, %d\n",
ret, av7110->bmp_state);
av7110->bmp_state = BMP_NONE;
return -ETIMEDOUT;
}
return 0;
}
static inline int LoadBitmap(struct av7110 *av7110,
u16 dx, u16 dy, int inc, u8 __user * data)
{
u16 format;
int bpp;
int i;
int d, delta;
u8 c;
int ret;
dprintk(4, "%p\n", av7110);
format = bpp2bit[av7110->osdbpp[av7110->osdwin]];
av7110->bmp_state = BMP_LOADING;
if (format == OSD_BITMAP8) {
bpp=8; delta = 1;
} else if (format == OSD_BITMAP4) {
bpp=4; delta = 2;
} else if (format == OSD_BITMAP2) {
bpp=2; delta = 4;
} else if (format == OSD_BITMAP1) {
bpp=1; delta = 8;
} else {
av7110->bmp_state = BMP_NONE;
return -EINVAL;
}
av7110->bmplen = ((dx * dy * bpp + 7) & ~7) / 8;
av7110->bmpp = 0;
if (av7110->bmplen > 32768) {
av7110->bmp_state = BMP_NONE;
return -EINVAL;
}
for (i = 0; i < dy; i++) {
if (copy_from_user(av7110->bmpbuf + 1024 + i * dx, data + i * inc, dx)) {
av7110->bmp_state = BMP_NONE;
return -EINVAL;
}
}
if (format != OSD_BITMAP8) {
for (i = 0; i < dx * dy / delta; i++) {
c = ((u8 *)av7110->bmpbuf)[1024 + i * delta + delta - 1];
for (d = delta - 2; d >= 0; d--) {
c |= (((u8 *)av7110->bmpbuf)[1024 + i * delta + d]
<< ((delta - d - 1) * bpp));
((u8 *)av7110->bmpbuf)[1024 + i] = c;
}
}
}
av7110->bmplen += 1024;
dprintk(4, "av7110_fw_cmd: LoadBmp size %d\n", av7110->bmplen);
ret = av7110_fw_cmd(av7110, COMTYPE_OSD, LoadBmp, 3, format, dx, dy);
if (!ret)
ret = WaitUntilBmpLoaded(av7110);
return ret;
}
static int BlitBitmap(struct av7110 *av7110, u16 x, u16 y)
{
dprintk(4, "%p\n", av7110);
return av7110_fw_cmd(av7110, COMTYPE_OSD, BlitBmp, 4, av7110->osdwin, x, y, 0);
}
static inline int ReleaseBitmap(struct av7110 *av7110)
{
dprintk(4, "%p\n", av7110);
if (av7110->bmp_state != BMP_LOADED && FW_VERSION(av7110->arm_app) < 0x261e)
return -1;
if (av7110->bmp_state == BMP_LOADING)
dprintk(1,"ReleaseBitmap called while BMP_LOADING\n");
av7110->bmp_state = BMP_NONE;
return av7110_fw_cmd(av7110, COMTYPE_OSD, ReleaseBmp, 0);
}
static u32 RGB2YUV(u16 R, u16 G, u16 B)
{
u16 y, u, v;
u16 Y, Cr, Cb;
y = R * 77 + G * 150 + B * 29; /* Luma=0.299R+0.587G+0.114B 0..65535 */
u = 2048 + B * 8 -(y >> 5); /* Cr 0..4095 */
v = 2048 + R * 8 -(y >> 5); /* Cb 0..4095 */
Y = y / 256;
Cb = u / 16;
Cr = v / 16;
return Cr | (Cb << 16) | (Y << 8);
}
static int OSDSetColor(struct av7110 *av7110, u8 color, u8 r, u8 g, u8 b, u8 blend)
{
int ret;
u16 ch, cl;
u32 yuv;
yuv = blend ? RGB2YUV(r,g,b) : 0;
cl = (yuv & 0xffff);
ch = ((yuv >> 16) & 0xffff);
ret = SetColor_(av7110, av7110->osdwin, bpp2pal[av7110->osdbpp[av7110->osdwin]],
color, ch, cl);
if (!ret)
ret = SetBlend_(av7110, av7110->osdwin, bpp2pal[av7110->osdbpp[av7110->osdwin]],
color, ((blend >> 4) & 0x0f));
return ret;
}
static int OSDSetPalette(struct av7110 *av7110, u32 __user * colors, u8 first, u8 last)
{
int i;
int length = last - first + 1;
if (length * 4 > DATA_BUFF3_SIZE)
return -EINVAL;
for (i = 0; i < length; i++) {
u32 color, blend, yuv;
if (get_user(color, colors + i))
return -EFAULT;
blend = (color & 0xF0000000) >> 4;
yuv = blend ? RGB2YUV(color & 0xFF, (color >> 8) & 0xFF,
(color >> 16) & 0xFF) | blend : 0;
yuv = ((yuv & 0xFFFF0000) >> 16) | ((yuv & 0x0000FFFF) << 16);
wdebi(av7110, DEBINOSWAP, DATA_BUFF3_BASE + i * 4, yuv, 4);
}
return av7110_fw_cmd(av7110, COMTYPE_OSD, Set_Palette, 4,
av7110->osdwin,
bpp2pal[av7110->osdbpp[av7110->osdwin]],
first, last);
}
static int OSDSetBlock(struct av7110 *av7110, int x0, int y0,
int x1, int y1, int inc, u8 __user * data)
{
uint w, h, bpp, bpl, size, lpb, bnum, brest;
int i;
int rc,release_rc;
w = x1 - x0 + 1;
h = y1 - y0 + 1;
if (inc <= 0)
inc = w;
if (w <= 0 || w > 720 || h <= 0 || h > 576)
return -EINVAL;
bpp = av7110->osdbpp[av7110->osdwin] + 1;
bpl = ((w * bpp + 7) & ~7) / 8;
size = h * bpl;
lpb = (32 * 1024) / bpl;
bnum = size / (lpb * bpl);
brest = size - bnum * lpb * bpl;
if (av7110->bmp_state == BMP_LOADING) {
/* possible if syscall is repeated by -ERESTARTSYS and if firmware cannot abort */
if (WARN_ON(FW_VERSION(av7110->arm_app) >= 0x261e))
return -EIO;
rc = WaitUntilBmpLoaded(av7110);
if (rc)
return rc;
/* just continue. This should work for all fw versions
* if bnum==1 && !brest && LoadBitmap was successful
*/
}
rc = 0;
for (i = 0; i < bnum; i++) {
rc = LoadBitmap(av7110, w, lpb, inc, data);
if (rc)
break;
rc = BlitBitmap(av7110, x0, y0 + i * lpb);
if (rc)
break;
data += lpb * inc;
}
if (!rc && brest) {
rc = LoadBitmap(av7110, w, brest / bpl, inc, data);
if (!rc)
rc = BlitBitmap(av7110, x0, y0 + bnum * lpb);
}
release_rc = ReleaseBitmap(av7110);
if (!rc)
rc = release_rc;
if (rc)
dprintk(1,"returns %d\n",rc);
return rc;
}
int av7110_osd_cmd(struct av7110 *av7110, osd_cmd_t *dc)
{
int ret;
if (mutex_lock_interruptible(&av7110->osd_mutex))
return -ERESTARTSYS;
switch (dc->cmd) {
case OSD_Close:
ret = DestroyOSDWindow(av7110, av7110->osdwin);
break;
case OSD_Open:
av7110->osdbpp[av7110->osdwin] = (dc->color - 1) & 7;
ret = CreateOSDWindow(av7110, av7110->osdwin,
bpp2bit[av7110->osdbpp[av7110->osdwin]],
dc->x1 - dc->x0 + 1, dc->y1 - dc->y0 + 1);
if (ret)
break;
if (!dc->data) {
ret = MoveWindowAbs(av7110, av7110->osdwin, dc->x0, dc->y0);
if (ret)
break;
ret = SetColorBlend(av7110, av7110->osdwin);
}
break;
case OSD_Show:
ret = MoveWindowRel(av7110, av7110->osdwin, 0, 0);
break;
case OSD_Hide:
ret = HideWindow(av7110, av7110->osdwin);
break;
case OSD_Clear:
ret = DrawBlock(av7110, av7110->osdwin, 0, 0, 720, 576, 0);
break;
case OSD_Fill:
ret = DrawBlock(av7110, av7110->osdwin, 0, 0, 720, 576, dc->color);
break;
case OSD_SetColor:
ret = OSDSetColor(av7110, dc->color, dc->x0, dc->y0, dc->x1, dc->y1);
break;
case OSD_SetPalette:
if (FW_VERSION(av7110->arm_app) >= 0x2618)
ret = OSDSetPalette(av7110, dc->data, dc->color, dc->x0);
else {
int i, len = dc->x0-dc->color+1;
u8 __user *colors = (u8 __user *)dc->data;
u8 r, g = 0, b = 0, blend = 0;
ret = 0;
for (i = 0; i<len; i++) {
if (get_user(r, colors + i * 4) ||
get_user(g, colors + i * 4 + 1) ||
get_user(b, colors + i * 4 + 2) ||
get_user(blend, colors + i * 4 + 3)) {
ret = -EFAULT;
break;
}
ret = OSDSetColor(av7110, dc->color + i, r, g, b, blend);
if (ret)
break;
}
}
break;
case OSD_SetPixel:
ret = DrawLine(av7110, av7110->osdwin,
dc->x0, dc->y0, 0, 0, dc->color);
break;
case OSD_SetRow:
dc->y1 = dc->y0;
fallthrough;
case OSD_SetBlock:
ret = OSDSetBlock(av7110, dc->x0, dc->y0, dc->x1, dc->y1, dc->color, dc->data);
break;
case OSD_FillRow:
ret = DrawBlock(av7110, av7110->osdwin, dc->x0, dc->y0,
dc->x1-dc->x0+1, dc->y1, dc->color);
break;
case OSD_FillBlock:
ret = DrawBlock(av7110, av7110->osdwin, dc->x0, dc->y0,
dc->x1 - dc->x0 + 1, dc->y1 - dc->y0 + 1, dc->color);
break;
case OSD_Line:
ret = DrawLine(av7110, av7110->osdwin,
dc->x0, dc->y0, dc->x1 - dc->x0, dc->y1 - dc->y0, dc->color);
break;
case OSD_Text:
{
char textbuf[240];
if (strncpy_from_user(textbuf, dc->data, 240) < 0) {
ret = -EFAULT;
break;
}
textbuf[239] = 0;
if (dc->x1 > 3)
dc->x1 = 3;
ret = SetFont(av7110, av7110->osdwin, dc->x1,
(u16) (dc->color & 0xffff), (u16) (dc->color >> 16));
if (!ret)
ret = FlushText(av7110);
if (!ret)
ret = WriteText(av7110, av7110->osdwin, dc->x0, dc->y0, textbuf);
break;
}
case OSD_SetWindow:
if (dc->x0 < 1 || dc->x0 > 7)
ret = -EINVAL;
else {
av7110->osdwin = dc->x0;
ret = 0;
}
break;
case OSD_MoveWindow:
ret = MoveWindowAbs(av7110, av7110->osdwin, dc->x0, dc->y0);
if (!ret)
ret = SetColorBlend(av7110, av7110->osdwin);
break;
case OSD_OpenRaw:
if (dc->color < OSD_BITMAP1 || dc->color > OSD_CURSOR) {
ret = -EINVAL;
break;
}
if (dc->color >= OSD_BITMAP1 && dc->color <= OSD_BITMAP8HR)
av7110->osdbpp[av7110->osdwin] = (1 << (dc->color & 3)) - 1;
else
av7110->osdbpp[av7110->osdwin] = 0;
ret = CreateOSDWindow(av7110, av7110->osdwin, (osd_raw_window_t)dc->color,
dc->x1 - dc->x0 + 1, dc->y1 - dc->y0 + 1);
if (ret)
break;
if (!dc->data) {
ret = MoveWindowAbs(av7110, av7110->osdwin, dc->x0, dc->y0);
if (!ret)
ret = SetColorBlend(av7110, av7110->osdwin);
}
break;
default:
ret = -EINVAL;
break;
}
mutex_unlock(&av7110->osd_mutex);
if (ret==-ERESTARTSYS)
dprintk(1, "av7110_osd_cmd(%d) returns with -ERESTARTSYS\n",dc->cmd);
else if (ret)
dprintk(1, "av7110_osd_cmd(%d) returns with %d\n",dc->cmd,ret);
return ret;
}
int av7110_osd_capability(struct av7110 *av7110, osd_cap_t *cap)
{
switch (cap->cmd) {
case OSD_CAP_MEMSIZE:
if (FW_4M_SDRAM(av7110->arm_app))
cap->val = 1000000;
else
cap->val = 92000;
return 0;
default:
return -EINVAL;
}
}
#endif /* CONFIG_DVB_AV7110_OSD */
| linux-master | drivers/staging/media/av7110/av7110_hw.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
Driver for Spase SP8870 demodulator
Copyright (C) 1999 Juergen Peitz
*/
/*
* This driver needs external firmware. Please use the command
* "<kerneldir>/scripts/get_dvb_firmware alps_tdlb7" to
* download/extract it, and then copy it to /usr/lib/hotplug/firmware
* or /lib/firmware (depending on configuration of firmware hotplug).
*/
#define SP8870_DEFAULT_FIRMWARE "dvb-fe-sp8870.fw"
#include <linux/init.h>
#include <linux/module.h>
#include <linux/device.h>
#include <linux/firmware.h>
#include <linux/delay.h>
#include <linux/string.h>
#include <linux/slab.h>
#include <media/dvb_frontend.h>
#include "sp8870.h"
struct sp8870_state {
struct i2c_adapter* i2c;
const struct sp8870_config* config;
struct dvb_frontend frontend;
/* demodulator private data */
u8 initialised:1;
};
static int debug;
#define dprintk(args...) \
do { \
if (debug) printk(KERN_DEBUG "sp8870: " args); \
} while (0)
/* firmware size for sp8870 */
#define SP8870_FIRMWARE_SIZE 16382
/* starting point for firmware in file 'Sc_main.mc' */
#define SP8870_FIRMWARE_OFFSET 0x0A
static int sp8870_writereg (struct sp8870_state* state, u16 reg, u16 data)
{
u8 buf [] = { reg >> 8, reg & 0xff, data >> 8, data & 0xff };
struct i2c_msg msg = { .addr = state->config->demod_address, .flags = 0, .buf = buf, .len = 4 };
int err;
if ((err = i2c_transfer (state->i2c, &msg, 1)) != 1) {
dprintk ("%s: writereg error (err == %i, reg == 0x%02x, data == 0x%02x)\n", __func__, err, reg, data);
return -EREMOTEIO;
}
return 0;
}
static int sp8870_readreg (struct sp8870_state* state, u16 reg)
{
int ret;
u8 b0 [] = { reg >> 8 , reg & 0xff };
u8 b1 [] = { 0, 0 };
struct i2c_msg msg [] = { { .addr = state->config->demod_address, .flags = 0, .buf = b0, .len = 2 },
{ .addr = state->config->demod_address, .flags = I2C_M_RD, .buf = b1, .len = 2 } };
ret = i2c_transfer (state->i2c, msg, 2);
if (ret != 2) {
dprintk("%s: readreg error (ret == %i)\n", __func__, ret);
return -1;
}
return (b1[0] << 8 | b1[1]);
}
static int sp8870_firmware_upload (struct sp8870_state* state, const struct firmware *fw)
{
struct i2c_msg msg;
const char *fw_buf = fw->data;
int fw_pos;
u8 tx_buf[255];
int tx_len;
int err = 0;
dprintk ("%s: ...\n", __func__);
if (fw->size < SP8870_FIRMWARE_SIZE + SP8870_FIRMWARE_OFFSET)
return -EINVAL;
// system controller stop
sp8870_writereg(state, 0x0F00, 0x0000);
// instruction RAM register hiword
sp8870_writereg(state, 0x8F08, ((SP8870_FIRMWARE_SIZE / 2) & 0xFFFF));
// instruction RAM MWR
sp8870_writereg(state, 0x8F0A, ((SP8870_FIRMWARE_SIZE / 2) >> 16));
// do firmware upload
fw_pos = SP8870_FIRMWARE_OFFSET;
while (fw_pos < SP8870_FIRMWARE_SIZE + SP8870_FIRMWARE_OFFSET){
tx_len = (fw_pos <= SP8870_FIRMWARE_SIZE + SP8870_FIRMWARE_OFFSET - 252) ? 252 : SP8870_FIRMWARE_SIZE + SP8870_FIRMWARE_OFFSET - fw_pos;
// write register 0xCF0A
tx_buf[0] = 0xCF;
tx_buf[1] = 0x0A;
memcpy(&tx_buf[2], fw_buf + fw_pos, tx_len);
msg.addr = state->config->demod_address;
msg.flags = 0;
msg.buf = tx_buf;
msg.len = tx_len + 2;
if ((err = i2c_transfer (state->i2c, &msg, 1)) != 1) {
printk("%s: firmware upload failed!\n", __func__);
printk ("%s: i2c error (err == %i)\n", __func__, err);
return err;
}
fw_pos += tx_len;
}
dprintk ("%s: done!\n", __func__);
return 0;
};
static void sp8870_microcontroller_stop (struct sp8870_state* state)
{
sp8870_writereg(state, 0x0F08, 0x000);
sp8870_writereg(state, 0x0F09, 0x000);
// microcontroller STOP
sp8870_writereg(state, 0x0F00, 0x000);
}
static void sp8870_microcontroller_start (struct sp8870_state* state)
{
sp8870_writereg(state, 0x0F08, 0x000);
sp8870_writereg(state, 0x0F09, 0x000);
// microcontroller START
sp8870_writereg(state, 0x0F00, 0x001);
// not documented but if we don't read 0x0D01 out here
// we don't get a correct data valid signal
sp8870_readreg(state, 0x0D01);
}
static int sp8870_read_data_valid_signal(struct sp8870_state* state)
{
return (sp8870_readreg(state, 0x0D02) > 0);
}
static int configure_reg0xc05 (struct dtv_frontend_properties *p, u16 *reg0xc05)
{
int known_parameters = 1;
*reg0xc05 = 0x000;
switch (p->modulation) {
case QPSK:
break;
case QAM_16:
*reg0xc05 |= (1 << 10);
break;
case QAM_64:
*reg0xc05 |= (2 << 10);
break;
case QAM_AUTO:
known_parameters = 0;
break;
default:
return -EINVAL;
}
switch (p->hierarchy) {
case HIERARCHY_NONE:
break;
case HIERARCHY_1:
*reg0xc05 |= (1 << 7);
break;
case HIERARCHY_2:
*reg0xc05 |= (2 << 7);
break;
case HIERARCHY_4:
*reg0xc05 |= (3 << 7);
break;
case HIERARCHY_AUTO:
known_parameters = 0;
break;
default:
return -EINVAL;
}
switch (p->code_rate_HP) {
case FEC_1_2:
break;
case FEC_2_3:
*reg0xc05 |= (1 << 3);
break;
case FEC_3_4:
*reg0xc05 |= (2 << 3);
break;
case FEC_5_6:
*reg0xc05 |= (3 << 3);
break;
case FEC_7_8:
*reg0xc05 |= (4 << 3);
break;
case FEC_AUTO:
known_parameters = 0;
break;
default:
return -EINVAL;
}
if (known_parameters)
*reg0xc05 |= (2 << 1); /* use specified parameters */
else
*reg0xc05 |= (1 << 1); /* enable autoprobing */
return 0;
}
static int sp8870_wake_up(struct sp8870_state* state)
{
// enable TS output and interface pins
return sp8870_writereg(state, 0xC18, 0x00D);
}
static int sp8870_set_frontend_parameters(struct dvb_frontend *fe)
{
struct dtv_frontend_properties *p = &fe->dtv_property_cache;
struct sp8870_state* state = fe->demodulator_priv;
int err;
u16 reg0xc05;
if ((err = configure_reg0xc05(p, ®0xc05)))
return err;
// system controller stop
sp8870_microcontroller_stop(state);
// set tuner parameters
if (fe->ops.tuner_ops.set_params) {
fe->ops.tuner_ops.set_params(fe);
if (fe->ops.i2c_gate_ctrl) fe->ops.i2c_gate_ctrl(fe, 0);
}
// sample rate correction bit [23..17]
sp8870_writereg(state, 0x0319, 0x000A);
// sample rate correction bit [16..0]
sp8870_writereg(state, 0x031A, 0x0AAB);
// integer carrier offset
sp8870_writereg(state, 0x0309, 0x0400);
// fractional carrier offset
sp8870_writereg(state, 0x030A, 0x0000);
// filter for 6/7/8 Mhz channel
if (p->bandwidth_hz == 6000000)
sp8870_writereg(state, 0x0311, 0x0002);
else if (p->bandwidth_hz == 7000000)
sp8870_writereg(state, 0x0311, 0x0001);
else
sp8870_writereg(state, 0x0311, 0x0000);
// scan order: 2k first = 0x0000, 8k first = 0x0001
if (p->transmission_mode == TRANSMISSION_MODE_2K)
sp8870_writereg(state, 0x0338, 0x0000);
else
sp8870_writereg(state, 0x0338, 0x0001);
sp8870_writereg(state, 0xc05, reg0xc05);
// read status reg in order to clear pending irqs
err = sp8870_readreg(state, 0x200);
if (err < 0)
return err;
// system controller start
sp8870_microcontroller_start(state);
return 0;
}
static int sp8870_init (struct dvb_frontend* fe)
{
struct sp8870_state* state = fe->demodulator_priv;
const struct firmware *fw = NULL;
sp8870_wake_up(state);
if (state->initialised) return 0;
state->initialised = 1;
dprintk ("%s\n", __func__);
/* request the firmware, this will block until someone uploads it */
printk("sp8870: waiting for firmware upload (%s)...\n", SP8870_DEFAULT_FIRMWARE);
if (state->config->request_firmware(fe, &fw, SP8870_DEFAULT_FIRMWARE)) {
printk("sp8870: no firmware upload (timeout or file not found?)\n");
return -EIO;
}
if (sp8870_firmware_upload(state, fw)) {
printk("sp8870: writing firmware to device failed\n");
release_firmware(fw);
return -EIO;
}
release_firmware(fw);
printk("sp8870: firmware upload complete\n");
/* enable TS output and interface pins */
sp8870_writereg(state, 0xc18, 0x00d);
// system controller stop
sp8870_microcontroller_stop(state);
// ADC mode
sp8870_writereg(state, 0x0301, 0x0003);
// Reed Solomon parity bytes passed to output
sp8870_writereg(state, 0x0C13, 0x0001);
// MPEG clock is suppressed if no valid data
sp8870_writereg(state, 0x0C14, 0x0001);
/* bit 0x010: enable data valid signal */
sp8870_writereg(state, 0x0D00, 0x010);
sp8870_writereg(state, 0x0D01, 0x000);
return 0;
}
static int sp8870_read_status(struct dvb_frontend *fe,
enum fe_status *fe_status)
{
struct sp8870_state* state = fe->demodulator_priv;
int status;
int signal;
*fe_status = 0;
status = sp8870_readreg (state, 0x0200);
if (status < 0)
return -EIO;
signal = sp8870_readreg (state, 0x0303);
if (signal < 0)
return -EIO;
if (signal > 0x0F)
*fe_status |= FE_HAS_SIGNAL;
if (status & 0x08)
*fe_status |= FE_HAS_SYNC;
if (status & 0x04)
*fe_status |= FE_HAS_LOCK | FE_HAS_CARRIER | FE_HAS_VITERBI;
return 0;
}
static int sp8870_read_ber (struct dvb_frontend* fe, u32 * ber)
{
struct sp8870_state* state = fe->demodulator_priv;
int ret;
u32 tmp;
*ber = 0;
ret = sp8870_readreg(state, 0xC08);
if (ret < 0)
return -EIO;
tmp = ret & 0x3F;
ret = sp8870_readreg(state, 0xC07);
if (ret < 0)
return -EIO;
tmp = ret << 6;
if (tmp >= 0x3FFF0)
tmp = ~0;
*ber = tmp;
return 0;
}
static int sp8870_read_signal_strength(struct dvb_frontend* fe, u16 * signal)
{
struct sp8870_state* state = fe->demodulator_priv;
int ret;
u16 tmp;
*signal = 0;
ret = sp8870_readreg (state, 0x306);
if (ret < 0)
return -EIO;
tmp = ret << 8;
ret = sp8870_readreg (state, 0x303);
if (ret < 0)
return -EIO;
tmp |= ret;
if (tmp)
*signal = 0xFFFF - tmp;
return 0;
}
static int sp8870_read_uncorrected_blocks (struct dvb_frontend* fe, u32* ublocks)
{
struct sp8870_state* state = fe->demodulator_priv;
int ret;
*ublocks = 0;
ret = sp8870_readreg(state, 0xC0C);
if (ret < 0)
return -EIO;
if (ret == 0xFFFF)
ret = ~0;
*ublocks = ret;
return 0;
}
/* number of trials to recover from lockup */
#define MAXTRIALS 5
/* maximum checks for data valid signal */
#define MAXCHECKS 100
/* only for debugging: counter for detected lockups */
static int lockups;
/* only for debugging: counter for channel switches */
static int switches;
static int sp8870_set_frontend(struct dvb_frontend *fe)
{
struct dtv_frontend_properties *p = &fe->dtv_property_cache;
struct sp8870_state* state = fe->demodulator_priv;
/*
The firmware of the sp8870 sometimes locks up after setting frontend parameters.
We try to detect this by checking the data valid signal.
If it is not set after MAXCHECKS we try to recover the lockup by setting
the frontend parameters again.
*/
int err = 0;
int valid = 0;
int trials = 0;
int check_count = 0;
dprintk("%s: frequency = %i\n", __func__, p->frequency);
for (trials = 1; trials <= MAXTRIALS; trials++) {
err = sp8870_set_frontend_parameters(fe);
if (err)
return err;
for (check_count = 0; check_count < MAXCHECKS; check_count++) {
// valid = ((sp8870_readreg(i2c, 0x0200) & 4) == 0);
valid = sp8870_read_data_valid_signal(state);
if (valid) {
dprintk("%s: delay = %i usec\n",
__func__, check_count * 10);
break;
}
udelay(10);
}
if (valid)
break;
}
if (!valid) {
printk("%s: firmware crash!!!!!!\n", __func__);
return -EIO;
}
if (debug) {
if (valid) {
if (trials > 1) {
printk("%s: firmware lockup!!!\n", __func__);
printk("%s: recovered after %i trial(s))\n", __func__, trials - 1);
lockups++;
}
}
switches++;
printk("%s: switches = %i lockups = %i\n", __func__, switches, lockups);
}
return 0;
}
static int sp8870_sleep(struct dvb_frontend* fe)
{
struct sp8870_state* state = fe->demodulator_priv;
// tristate TS output and disable interface pins
return sp8870_writereg(state, 0xC18, 0x000);
}
static int sp8870_get_tune_settings(struct dvb_frontend* fe, struct dvb_frontend_tune_settings* fesettings)
{
fesettings->min_delay_ms = 350;
fesettings->step_size = 0;
fesettings->max_drift = 0;
return 0;
}
static int sp8870_i2c_gate_ctrl(struct dvb_frontend* fe, int enable)
{
struct sp8870_state* state = fe->demodulator_priv;
if (enable) {
return sp8870_writereg(state, 0x206, 0x001);
} else {
return sp8870_writereg(state, 0x206, 0x000);
}
}
static void sp8870_release(struct dvb_frontend* fe)
{
struct sp8870_state* state = fe->demodulator_priv;
kfree(state);
}
static const struct dvb_frontend_ops sp8870_ops;
struct dvb_frontend* sp8870_attach(const struct sp8870_config* config,
struct i2c_adapter* i2c)
{
struct sp8870_state* state = NULL;
/* allocate memory for the internal state */
state = kzalloc(sizeof(struct sp8870_state), GFP_KERNEL);
if (state == NULL) goto error;
/* setup the state */
state->config = config;
state->i2c = i2c;
state->initialised = 0;
/* check if the demod is there */
if (sp8870_readreg(state, 0x0200) < 0) goto error;
/* create dvb_frontend */
memcpy(&state->frontend.ops, &sp8870_ops, sizeof(struct dvb_frontend_ops));
state->frontend.demodulator_priv = state;
return &state->frontend;
error:
kfree(state);
return NULL;
}
static const struct dvb_frontend_ops sp8870_ops = {
.delsys = { SYS_DVBT },
.info = {
.name = "Spase SP8870 DVB-T",
.frequency_min_hz = 470 * MHz,
.frequency_max_hz = 860 * MHz,
.frequency_stepsize_hz = 166666,
.caps = FE_CAN_FEC_1_2 | FE_CAN_FEC_2_3 |
FE_CAN_FEC_3_4 | FE_CAN_FEC_5_6 |
FE_CAN_FEC_7_8 | FE_CAN_FEC_AUTO |
FE_CAN_QPSK | FE_CAN_QAM_16 |
FE_CAN_QAM_64 | FE_CAN_QAM_AUTO |
FE_CAN_HIERARCHY_AUTO | FE_CAN_RECOVER
},
.release = sp8870_release,
.init = sp8870_init,
.sleep = sp8870_sleep,
.i2c_gate_ctrl = sp8870_i2c_gate_ctrl,
.set_frontend = sp8870_set_frontend,
.get_tune_settings = sp8870_get_tune_settings,
.read_status = sp8870_read_status,
.read_ber = sp8870_read_ber,
.read_signal_strength = sp8870_read_signal_strength,
.read_ucblocks = sp8870_read_uncorrected_blocks,
};
module_param(debug, int, 0644);
MODULE_PARM_DESC(debug, "Turn on/off frontend debugging (default:off).");
MODULE_DESCRIPTION("Spase SP8870 DVB-T Demodulator driver");
MODULE_AUTHOR("Juergen Peitz");
MODULE_LICENSE("GPL");
EXPORT_SYMBOL_GPL(sp8870_attach);
| linux-master | drivers/staging/media/av7110/sp8870.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* TI OMAP4 ISS V4L2 Driver - ISP IPIPE module
*
* Copyright (C) 2012 Texas Instruments, Inc.
*
* Author: Sergio Aguirre <[email protected]>
*/
#include <linux/module.h>
#include <linux/uaccess.h>
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/dma-mapping.h>
#include <linux/mm.h>
#include <linux/sched.h>
#include "iss.h"
#include "iss_regs.h"
#include "iss_ipipe.h"
static struct v4l2_mbus_framefmt *
__ipipe_get_format(struct iss_ipipe_device *ipipe,
struct v4l2_subdev_state *sd_state,
unsigned int pad,
enum v4l2_subdev_format_whence which);
static const unsigned int ipipe_fmts[] = {
MEDIA_BUS_FMT_SGRBG10_1X10,
MEDIA_BUS_FMT_SRGGB10_1X10,
MEDIA_BUS_FMT_SBGGR10_1X10,
MEDIA_BUS_FMT_SGBRG10_1X10,
};
/*
* ipipe_print_status - Print current IPIPE Module register values.
* @ipipe: Pointer to ISS ISP IPIPE device.
*
* Also prints other debug information stored in the IPIPE module.
*/
#define IPIPE_PRINT_REGISTER(iss, name)\
dev_dbg(iss->dev, "###IPIPE " #name "=0x%08x\n", \
iss_reg_read(iss, OMAP4_ISS_MEM_ISP_IPIPE, IPIPE_##name))
static void ipipe_print_status(struct iss_ipipe_device *ipipe)
{
struct iss_device *iss = to_iss_device(ipipe);
dev_dbg(iss->dev, "-------------IPIPE Register dump-------------\n");
IPIPE_PRINT_REGISTER(iss, SRC_EN);
IPIPE_PRINT_REGISTER(iss, SRC_MODE);
IPIPE_PRINT_REGISTER(iss, SRC_FMT);
IPIPE_PRINT_REGISTER(iss, SRC_COL);
IPIPE_PRINT_REGISTER(iss, SRC_VPS);
IPIPE_PRINT_REGISTER(iss, SRC_VSZ);
IPIPE_PRINT_REGISTER(iss, SRC_HPS);
IPIPE_PRINT_REGISTER(iss, SRC_HSZ);
IPIPE_PRINT_REGISTER(iss, GCK_MMR);
IPIPE_PRINT_REGISTER(iss, YUV_PHS);
dev_dbg(iss->dev, "-----------------------------------------------\n");
}
/*
* ipipe_enable - Enable/Disable IPIPE.
* @enable: enable flag
*
*/
static void ipipe_enable(struct iss_ipipe_device *ipipe, u8 enable)
{
struct iss_device *iss = to_iss_device(ipipe);
iss_reg_update(iss, OMAP4_ISS_MEM_ISP_IPIPE, IPIPE_SRC_EN,
IPIPE_SRC_EN_EN, enable ? IPIPE_SRC_EN_EN : 0);
}
/* -----------------------------------------------------------------------------
* Format- and pipeline-related configuration helpers
*/
static void ipipe_configure(struct iss_ipipe_device *ipipe)
{
struct iss_device *iss = to_iss_device(ipipe);
struct v4l2_mbus_framefmt *format;
/* IPIPE_PAD_SINK */
format = &ipipe->formats[IPIPE_PAD_SINK];
/* NOTE: Currently just supporting pipeline IN: RGB, OUT: YUV422 */
iss_reg_write(iss, OMAP4_ISS_MEM_ISP_IPIPE, IPIPE_SRC_FMT,
IPIPE_SRC_FMT_RAW2YUV);
/* Enable YUV444 -> YUV422 conversion */
iss_reg_write(iss, OMAP4_ISS_MEM_ISP_IPIPE, IPIPE_YUV_PHS,
IPIPE_YUV_PHS_LPF);
iss_reg_write(iss, OMAP4_ISS_MEM_ISP_IPIPE, IPIPE_SRC_VPS, 0);
iss_reg_write(iss, OMAP4_ISS_MEM_ISP_IPIPE, IPIPE_SRC_HPS, 0);
iss_reg_write(iss, OMAP4_ISS_MEM_ISP_IPIPE, IPIPE_SRC_VSZ,
(format->height - 2) & IPIPE_SRC_VSZ_MASK);
iss_reg_write(iss, OMAP4_ISS_MEM_ISP_IPIPE, IPIPE_SRC_HSZ,
(format->width - 1) & IPIPE_SRC_HSZ_MASK);
/* Ignore ipipeif_wrt signal, and operate on-the-fly. */
iss_reg_clr(iss, OMAP4_ISS_MEM_ISP_IPIPE, IPIPE_SRC_MODE,
IPIPE_SRC_MODE_WRT | IPIPE_SRC_MODE_OST);
/* HACK: Values tuned for Ducati SW (OV) */
iss_reg_write(iss, OMAP4_ISS_MEM_ISP_IPIPE, IPIPE_SRC_COL,
IPIPE_SRC_COL_EE_B | IPIPE_SRC_COL_EO_GB |
IPIPE_SRC_COL_OE_GR | IPIPE_SRC_COL_OO_R);
/* IPIPE_PAD_SOURCE_VP */
format = &ipipe->formats[IPIPE_PAD_SOURCE_VP];
/* Do nothing? */
}
/* -----------------------------------------------------------------------------
* V4L2 subdev operations
*/
/*
* ipipe_set_stream - Enable/Disable streaming on the IPIPE module
* @sd: ISP IPIPE V4L2 subdevice
* @enable: Enable/disable stream
*/
static int ipipe_set_stream(struct v4l2_subdev *sd, int enable)
{
struct iss_ipipe_device *ipipe = v4l2_get_subdevdata(sd);
struct iss_device *iss = to_iss_device(ipipe);
int ret = 0;
if (ipipe->state == ISS_PIPELINE_STREAM_STOPPED) {
if (enable == ISS_PIPELINE_STREAM_STOPPED)
return 0;
omap4iss_isp_subclk_enable(iss, OMAP4_ISS_ISP_SUBCLK_IPIPE);
/* Enable clk_arm_g0 */
iss_reg_write(iss, OMAP4_ISS_MEM_ISP_IPIPE, IPIPE_GCK_MMR,
IPIPE_GCK_MMR_REG);
/* Enable clk_pix_g[3:0] */
iss_reg_write(iss, OMAP4_ISS_MEM_ISP_IPIPE, IPIPE_GCK_PIX,
IPIPE_GCK_PIX_G3 | IPIPE_GCK_PIX_G2 |
IPIPE_GCK_PIX_G1 | IPIPE_GCK_PIX_G0);
}
switch (enable) {
case ISS_PIPELINE_STREAM_CONTINUOUS:
ipipe_configure(ipipe);
ipipe_print_status(ipipe);
atomic_set(&ipipe->stopping, 0);
ipipe_enable(ipipe, 1);
break;
case ISS_PIPELINE_STREAM_STOPPED:
if (ipipe->state == ISS_PIPELINE_STREAM_STOPPED)
return 0;
if (omap4iss_module_sync_idle(&sd->entity, &ipipe->wait,
&ipipe->stopping))
ret = -ETIMEDOUT;
ipipe_enable(ipipe, 0);
omap4iss_isp_subclk_disable(iss, OMAP4_ISS_ISP_SUBCLK_IPIPE);
break;
}
ipipe->state = enable;
return ret;
}
static struct v4l2_mbus_framefmt *
__ipipe_get_format(struct iss_ipipe_device *ipipe,
struct v4l2_subdev_state *sd_state,
unsigned int pad,
enum v4l2_subdev_format_whence which)
{
if (which == V4L2_SUBDEV_FORMAT_TRY)
return v4l2_subdev_get_try_format(&ipipe->subdev, sd_state,
pad);
return &ipipe->formats[pad];
}
/*
* ipipe_try_format - Try video format on a pad
* @ipipe: ISS IPIPE device
* @cfg: V4L2 subdev pad config
* @pad: Pad number
* @fmt: Format
*/
static void
ipipe_try_format(struct iss_ipipe_device *ipipe,
struct v4l2_subdev_state *sd_state,
unsigned int pad,
struct v4l2_mbus_framefmt *fmt,
enum v4l2_subdev_format_whence which)
{
struct v4l2_mbus_framefmt *format;
unsigned int width = fmt->width;
unsigned int height = fmt->height;
unsigned int i;
switch (pad) {
case IPIPE_PAD_SINK:
for (i = 0; i < ARRAY_SIZE(ipipe_fmts); i++) {
if (fmt->code == ipipe_fmts[i])
break;
}
/* If not found, use SGRBG10 as default */
if (i >= ARRAY_SIZE(ipipe_fmts))
fmt->code = MEDIA_BUS_FMT_SGRBG10_1X10;
/* Clamp the input size. */
fmt->width = clamp_t(u32, width, 1, 8192);
fmt->height = clamp_t(u32, height, 1, 8192);
fmt->colorspace = V4L2_COLORSPACE_SRGB;
break;
case IPIPE_PAD_SOURCE_VP:
format = __ipipe_get_format(ipipe, sd_state, IPIPE_PAD_SINK,
which);
memcpy(fmt, format, sizeof(*fmt));
fmt->code = MEDIA_BUS_FMT_UYVY8_1X16;
fmt->width = clamp_t(u32, width, 32, fmt->width);
fmt->height = clamp_t(u32, height, 32, fmt->height);
fmt->colorspace = V4L2_COLORSPACE_JPEG;
break;
}
fmt->field = V4L2_FIELD_NONE;
}
/*
* ipipe_enum_mbus_code - Handle pixel format enumeration
* @sd : pointer to v4l2 subdev structure
* @cfg : V4L2 subdev pad config
* @code : pointer to v4l2_subdev_mbus_code_enum structure
* return -EINVAL or zero on success
*/
static int ipipe_enum_mbus_code(struct v4l2_subdev *sd,
struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_mbus_code_enum *code)
{
switch (code->pad) {
case IPIPE_PAD_SINK:
if (code->index >= ARRAY_SIZE(ipipe_fmts))
return -EINVAL;
code->code = ipipe_fmts[code->index];
break;
case IPIPE_PAD_SOURCE_VP:
/* FIXME: Forced format conversion inside IPIPE ? */
if (code->index != 0)
return -EINVAL;
code->code = MEDIA_BUS_FMT_UYVY8_1X16;
break;
default:
return -EINVAL;
}
return 0;
}
static int ipipe_enum_frame_size(struct v4l2_subdev *sd,
struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_frame_size_enum *fse)
{
struct iss_ipipe_device *ipipe = v4l2_get_subdevdata(sd);
struct v4l2_mbus_framefmt format;
if (fse->index != 0)
return -EINVAL;
format.code = fse->code;
format.width = 1;
format.height = 1;
ipipe_try_format(ipipe, sd_state, fse->pad, &format, fse->which);
fse->min_width = format.width;
fse->min_height = format.height;
if (format.code != fse->code)
return -EINVAL;
format.code = fse->code;
format.width = -1;
format.height = -1;
ipipe_try_format(ipipe, sd_state, fse->pad, &format, fse->which);
fse->max_width = format.width;
fse->max_height = format.height;
return 0;
}
/*
* ipipe_get_format - Retrieve the video format on a pad
* @sd : ISP IPIPE V4L2 subdevice
* @cfg: V4L2 subdev pad config
* @fmt: Format
*
* Return 0 on success or -EINVAL if the pad is invalid or doesn't correspond
* to the format type.
*/
static int ipipe_get_format(struct v4l2_subdev *sd,
struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
struct iss_ipipe_device *ipipe = v4l2_get_subdevdata(sd);
struct v4l2_mbus_framefmt *format;
format = __ipipe_get_format(ipipe, sd_state, fmt->pad, fmt->which);
if (!format)
return -EINVAL;
fmt->format = *format;
return 0;
}
/*
* ipipe_set_format - Set the video format on a pad
* @sd : ISP IPIPE V4L2 subdevice
* @cfg: V4L2 subdev pad config
* @fmt: Format
*
* Return 0 on success or -EINVAL if the pad is invalid or doesn't correspond
* to the format type.
*/
static int ipipe_set_format(struct v4l2_subdev *sd,
struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
struct iss_ipipe_device *ipipe = v4l2_get_subdevdata(sd);
struct v4l2_mbus_framefmt *format;
format = __ipipe_get_format(ipipe, sd_state, fmt->pad, fmt->which);
if (!format)
return -EINVAL;
ipipe_try_format(ipipe, sd_state, fmt->pad, &fmt->format, fmt->which);
*format = fmt->format;
/* Propagate the format from sink to source */
if (fmt->pad == IPIPE_PAD_SINK) {
format = __ipipe_get_format(ipipe, sd_state,
IPIPE_PAD_SOURCE_VP,
fmt->which);
*format = fmt->format;
ipipe_try_format(ipipe, sd_state, IPIPE_PAD_SOURCE_VP, format,
fmt->which);
}
return 0;
}
static int ipipe_link_validate(struct v4l2_subdev *sd, struct media_link *link,
struct v4l2_subdev_format *source_fmt,
struct v4l2_subdev_format *sink_fmt)
{
/* Check if the two ends match */
if (source_fmt->format.width != sink_fmt->format.width ||
source_fmt->format.height != sink_fmt->format.height)
return -EPIPE;
if (source_fmt->format.code != sink_fmt->format.code)
return -EPIPE;
return 0;
}
/*
* ipipe_init_formats - Initialize formats on all pads
* @sd: ISP IPIPE V4L2 subdevice
* @fh: V4L2 subdev file handle
*
* Initialize all pad formats with default values. If fh is not NULL, try
* formats are initialized on the file handle. Otherwise active formats are
* initialized on the device.
*/
static int ipipe_init_formats(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
{
struct v4l2_subdev_format format;
memset(&format, 0, sizeof(format));
format.pad = IPIPE_PAD_SINK;
format.which = fh ? V4L2_SUBDEV_FORMAT_TRY : V4L2_SUBDEV_FORMAT_ACTIVE;
format.format.code = MEDIA_BUS_FMT_SGRBG10_1X10;
format.format.width = 4096;
format.format.height = 4096;
ipipe_set_format(sd, fh ? fh->state : NULL, &format);
return 0;
}
/* V4L2 subdev video operations */
static const struct v4l2_subdev_video_ops ipipe_v4l2_video_ops = {
.s_stream = ipipe_set_stream,
};
/* V4L2 subdev pad operations */
static const struct v4l2_subdev_pad_ops ipipe_v4l2_pad_ops = {
.enum_mbus_code = ipipe_enum_mbus_code,
.enum_frame_size = ipipe_enum_frame_size,
.get_fmt = ipipe_get_format,
.set_fmt = ipipe_set_format,
.link_validate = ipipe_link_validate,
};
/* V4L2 subdev operations */
static const struct v4l2_subdev_ops ipipe_v4l2_ops = {
.video = &ipipe_v4l2_video_ops,
.pad = &ipipe_v4l2_pad_ops,
};
/* V4L2 subdev internal operations */
static const struct v4l2_subdev_internal_ops ipipe_v4l2_internal_ops = {
.open = ipipe_init_formats,
};
/* -----------------------------------------------------------------------------
* Media entity operations
*/
/*
* ipipe_link_setup - Setup IPIPE connections
* @entity: IPIPE media entity
* @local: Pad at the local end of the link
* @remote: Pad at the remote end of the link
* @flags: Link flags
*
* return -EINVAL or zero on success
*/
static int ipipe_link_setup(struct media_entity *entity,
const struct media_pad *local,
const struct media_pad *remote, u32 flags)
{
struct v4l2_subdev *sd = media_entity_to_v4l2_subdev(entity);
struct iss_ipipe_device *ipipe = v4l2_get_subdevdata(sd);
struct iss_device *iss = to_iss_device(ipipe);
if (!is_media_entity_v4l2_subdev(remote->entity))
return -EINVAL;
switch (local->index) {
case IPIPE_PAD_SINK:
/* Read from IPIPEIF. */
if (!(flags & MEDIA_LNK_FL_ENABLED)) {
ipipe->input = IPIPE_INPUT_NONE;
break;
}
if (ipipe->input != IPIPE_INPUT_NONE)
return -EBUSY;
if (remote->entity == &iss->ipipeif.subdev.entity)
ipipe->input = IPIPE_INPUT_IPIPEIF;
break;
case IPIPE_PAD_SOURCE_VP:
/* Send to RESIZER */
if (flags & MEDIA_LNK_FL_ENABLED) {
if (ipipe->output & ~IPIPE_OUTPUT_VP)
return -EBUSY;
ipipe->output |= IPIPE_OUTPUT_VP;
} else {
ipipe->output &= ~IPIPE_OUTPUT_VP;
}
break;
default:
return -EINVAL;
}
return 0;
}
/* media operations */
static const struct media_entity_operations ipipe_media_ops = {
.link_setup = ipipe_link_setup,
.link_validate = v4l2_subdev_link_validate,
};
/*
* ipipe_init_entities - Initialize V4L2 subdev and media entity
* @ipipe: ISS ISP IPIPE module
*
* Return 0 on success and a negative error code on failure.
*/
static int ipipe_init_entities(struct iss_ipipe_device *ipipe)
{
struct v4l2_subdev *sd = &ipipe->subdev;
struct media_pad *pads = ipipe->pads;
struct media_entity *me = &sd->entity;
int ret;
ipipe->input = IPIPE_INPUT_NONE;
v4l2_subdev_init(sd, &ipipe_v4l2_ops);
sd->internal_ops = &ipipe_v4l2_internal_ops;
strscpy(sd->name, "OMAP4 ISS ISP IPIPE", sizeof(sd->name));
sd->grp_id = BIT(16); /* group ID for iss subdevs */
v4l2_set_subdevdata(sd, ipipe);
sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
pads[IPIPE_PAD_SINK].flags = MEDIA_PAD_FL_SINK;
pads[IPIPE_PAD_SOURCE_VP].flags = MEDIA_PAD_FL_SOURCE;
me->ops = &ipipe_media_ops;
ret = media_entity_pads_init(me, IPIPE_PADS_NUM, pads);
if (ret < 0)
return ret;
ipipe_init_formats(sd, NULL);
return 0;
}
void omap4iss_ipipe_unregister_entities(struct iss_ipipe_device *ipipe)
{
v4l2_device_unregister_subdev(&ipipe->subdev);
}
int omap4iss_ipipe_register_entities(struct iss_ipipe_device *ipipe,
struct v4l2_device *vdev)
{
int ret;
/* Register the subdev and video node. */
ret = v4l2_device_register_subdev(vdev, &ipipe->subdev);
if (ret < 0)
goto error;
return 0;
error:
omap4iss_ipipe_unregister_entities(ipipe);
return ret;
}
/* -----------------------------------------------------------------------------
* ISP IPIPE initialisation and cleanup
*/
/*
* omap4iss_ipipe_init - IPIPE module initialization.
* @iss: Device pointer specific to the OMAP4 ISS.
*
* TODO: Get the initialisation values from platform data.
*
* Return 0 on success or a negative error code otherwise.
*/
int omap4iss_ipipe_init(struct iss_device *iss)
{
struct iss_ipipe_device *ipipe = &iss->ipipe;
ipipe->state = ISS_PIPELINE_STREAM_STOPPED;
init_waitqueue_head(&ipipe->wait);
return ipipe_init_entities(ipipe);
}
/*
* omap4iss_ipipe_cleanup - IPIPE module cleanup.
* @iss: Device pointer specific to the OMAP4 ISS.
*/
void omap4iss_ipipe_cleanup(struct iss_device *iss)
{
struct iss_ipipe_device *ipipe = &iss->ipipe;
media_entity_cleanup(&ipipe->subdev.entity);
}
| linux-master | drivers/staging/media/omap4iss/iss_ipipe.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* TI OMAP4 ISS V4L2 Driver - CSI PHY module
*
* Copyright (C) 2012 Texas Instruments, Inc.
*
* Author: Sergio Aguirre <[email protected]>
*/
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/regmap.h>
#include "../../../../arch/arm/mach-omap2/control.h"
#include "iss.h"
#include "iss_regs.h"
#include "iss_csiphy.h"
/*
* csiphy_lanes_config - Configuration of CSIPHY lanes.
*
* Updates HW configuration.
* Called with phy->mutex taken.
*/
static void csiphy_lanes_config(struct iss_csiphy *phy)
{
unsigned int i;
u32 reg;
reg = iss_reg_read(phy->iss, phy->cfg_regs, CSI2_COMPLEXIO_CFG);
for (i = 0; i < phy->max_data_lanes; i++) {
reg &= ~(CSI2_COMPLEXIO_CFG_DATA_POL(i + 1) |
CSI2_COMPLEXIO_CFG_DATA_POSITION_MASK(i + 1));
reg |= (phy->lanes.data[i].pol ?
CSI2_COMPLEXIO_CFG_DATA_POL(i + 1) : 0);
reg |= (phy->lanes.data[i].pos <<
CSI2_COMPLEXIO_CFG_DATA_POSITION_SHIFT(i + 1));
}
reg &= ~(CSI2_COMPLEXIO_CFG_CLOCK_POL |
CSI2_COMPLEXIO_CFG_CLOCK_POSITION_MASK);
reg |= phy->lanes.clk.pol ? CSI2_COMPLEXIO_CFG_CLOCK_POL : 0;
reg |= phy->lanes.clk.pos << CSI2_COMPLEXIO_CFG_CLOCK_POSITION_SHIFT;
iss_reg_write(phy->iss, phy->cfg_regs, CSI2_COMPLEXIO_CFG, reg);
}
/*
* csiphy_set_power
* @power: Power state to be set.
*
* Returns 0 if successful, or -EBUSY if the retry count is exceeded.
*/
static int csiphy_set_power(struct iss_csiphy *phy, u32 power)
{
u32 reg;
u8 retry_count;
iss_reg_update(phy->iss, phy->cfg_regs, CSI2_COMPLEXIO_CFG,
CSI2_COMPLEXIO_CFG_PWD_CMD_MASK,
power | CSI2_COMPLEXIO_CFG_PWR_AUTO);
retry_count = 0;
do {
udelay(1);
reg = iss_reg_read(phy->iss, phy->cfg_regs, CSI2_COMPLEXIO_CFG)
& CSI2_COMPLEXIO_CFG_PWD_STATUS_MASK;
if (reg != power >> 2)
retry_count++;
} while ((reg != power >> 2) && (retry_count < 250));
if (retry_count == 250) {
dev_err(phy->iss->dev, "CSI2 CIO set power failed!\n");
return -EBUSY;
}
return 0;
}
/*
* csiphy_dphy_config - Configure CSI2 D-PHY parameters.
*
* Called with phy->mutex taken.
*/
static void csiphy_dphy_config(struct iss_csiphy *phy)
{
u32 reg;
/* Set up REGISTER0 */
reg = phy->dphy.ths_term << REGISTER0_THS_TERM_SHIFT;
reg |= phy->dphy.ths_settle << REGISTER0_THS_SETTLE_SHIFT;
iss_reg_write(phy->iss, phy->phy_regs, REGISTER0, reg);
/* Set up REGISTER1 */
reg = phy->dphy.tclk_term << REGISTER1_TCLK_TERM_SHIFT;
reg |= phy->dphy.tclk_miss << REGISTER1_CTRLCLK_DIV_FACTOR_SHIFT;
reg |= phy->dphy.tclk_settle << REGISTER1_TCLK_SETTLE_SHIFT;
reg |= 0xb8 << REGISTER1_DPHY_HS_SYNC_PATTERN_SHIFT;
iss_reg_write(phy->iss, phy->phy_regs, REGISTER1, reg);
}
/*
* TCLK values are OK at their reset values
*/
#define TCLK_TERM 0
#define TCLK_MISS 1
#define TCLK_SETTLE 14
int omap4iss_csiphy_config(struct iss_device *iss,
struct v4l2_subdev *csi2_subdev)
{
struct iss_csi2_device *csi2 = v4l2_get_subdevdata(csi2_subdev);
struct iss_pipeline *pipe = to_iss_pipeline(&csi2_subdev->entity);
struct iss_v4l2_subdevs_group *subdevs = pipe->external->host_priv;
struct iss_csiphy_dphy_cfg csi2phy;
int csi2_ddrclk_khz;
struct iss_csiphy_lanes_cfg *lanes;
unsigned int used_lanes = 0;
u32 cam_rx_ctrl;
unsigned int i;
lanes = &subdevs->bus.csi2.lanecfg;
/*
* SCM.CONTROL_CAMERA_RX
* - bit [31] : CSIPHY2 lane 2 enable (4460+ only)
* - bit [30:29] : CSIPHY2 per-lane enable (1 to 0)
* - bit [28:24] : CSIPHY1 per-lane enable (4 to 0)
* - bit [21] : CSIPHY2 CTRLCLK enable
* - bit [20:19] : CSIPHY2 config: 00 d-phy, 01/10 ccp2
* - bit [18] : CSIPHY1 CTRLCLK enable
* - bit [17:16] : CSIPHY1 config: 00 d-phy, 01/10 ccp2
*/
/*
* TODO: When implementing DT support specify the CONTROL_CAMERA_RX
* register offset in the syscon property instead of hardcoding it.
*/
regmap_read(iss->syscon, 0x68, &cam_rx_ctrl);
if (subdevs->interface == ISS_INTERFACE_CSI2A_PHY1) {
cam_rx_ctrl &= ~(OMAP4_CAMERARX_CSI21_LANEENABLE_MASK |
OMAP4_CAMERARX_CSI21_CAMMODE_MASK);
/* NOTE: Leave CSIPHY1 config to 0x0: D-PHY mode */
/* Enable all lanes for now */
cam_rx_ctrl |=
0x1f << OMAP4_CAMERARX_CSI21_LANEENABLE_SHIFT;
/* Enable CTRLCLK */
cam_rx_ctrl |= OMAP4_CAMERARX_CSI21_CTRLCLKEN_MASK;
}
if (subdevs->interface == ISS_INTERFACE_CSI2B_PHY2) {
cam_rx_ctrl &= ~(OMAP4_CAMERARX_CSI22_LANEENABLE_MASK |
OMAP4_CAMERARX_CSI22_CAMMODE_MASK);
/* NOTE: Leave CSIPHY2 config to 0x0: D-PHY mode */
/* Enable all lanes for now */
cam_rx_ctrl |=
0x3 << OMAP4_CAMERARX_CSI22_LANEENABLE_SHIFT;
/* Enable CTRLCLK */
cam_rx_ctrl |= OMAP4_CAMERARX_CSI22_CTRLCLKEN_MASK;
}
regmap_write(iss->syscon, 0x68, cam_rx_ctrl);
/* Reset used lane count */
csi2->phy->used_data_lanes = 0;
/* Clock and data lanes verification */
for (i = 0; i < csi2->phy->max_data_lanes; i++) {
if (lanes->data[i].pos == 0)
continue;
if (lanes->data[i].pol > 1 ||
lanes->data[i].pos > (csi2->phy->max_data_lanes + 1))
return -EINVAL;
if (used_lanes & (1 << lanes->data[i].pos))
return -EINVAL;
used_lanes |= 1 << lanes->data[i].pos;
csi2->phy->used_data_lanes++;
}
if (lanes->clk.pol > 1 ||
lanes->clk.pos > (csi2->phy->max_data_lanes + 1))
return -EINVAL;
if (lanes->clk.pos == 0 || used_lanes & (1 << lanes->clk.pos))
return -EINVAL;
csi2_ddrclk_khz = pipe->external_rate / 1000
/ (2 * csi2->phy->used_data_lanes)
* pipe->external_bpp;
/*
* THS_TERM: Programmed value = ceil(12.5 ns/DDRClk period) - 1.
* THS_SETTLE: Programmed value = ceil(90 ns/DDRClk period) + 3.
*/
csi2phy.ths_term = DIV_ROUND_UP(25 * csi2_ddrclk_khz, 2000000) - 1;
csi2phy.ths_settle = DIV_ROUND_UP(90 * csi2_ddrclk_khz, 1000000) + 3;
csi2phy.tclk_term = TCLK_TERM;
csi2phy.tclk_miss = TCLK_MISS;
csi2phy.tclk_settle = TCLK_SETTLE;
mutex_lock(&csi2->phy->mutex);
csi2->phy->dphy = csi2phy;
csi2->phy->lanes = *lanes;
mutex_unlock(&csi2->phy->mutex);
return 0;
}
int omap4iss_csiphy_acquire(struct iss_csiphy *phy)
{
int rval;
mutex_lock(&phy->mutex);
rval = omap4iss_csi2_reset(phy->csi2);
if (rval)
goto done;
csiphy_dphy_config(phy);
csiphy_lanes_config(phy);
rval = csiphy_set_power(phy, CSI2_COMPLEXIO_CFG_PWD_CMD_ON);
if (rval)
goto done;
phy->phy_in_use = 1;
done:
mutex_unlock(&phy->mutex);
return rval;
}
void omap4iss_csiphy_release(struct iss_csiphy *phy)
{
mutex_lock(&phy->mutex);
if (phy->phy_in_use) {
csiphy_set_power(phy, CSI2_COMPLEXIO_CFG_PWD_CMD_OFF);
phy->phy_in_use = 0;
}
mutex_unlock(&phy->mutex);
}
/*
* omap4iss_csiphy_init - Initialize the CSI PHY frontends
*/
int omap4iss_csiphy_init(struct iss_device *iss)
{
struct iss_csiphy *phy1 = &iss->csiphy1;
struct iss_csiphy *phy2 = &iss->csiphy2;
phy1->iss = iss;
phy1->csi2 = &iss->csi2a;
phy1->max_data_lanes = ISS_CSIPHY1_NUM_DATA_LANES;
phy1->used_data_lanes = 0;
phy1->cfg_regs = OMAP4_ISS_MEM_CSI2_A_REGS1;
phy1->phy_regs = OMAP4_ISS_MEM_CAMERARX_CORE1;
mutex_init(&phy1->mutex);
phy2->iss = iss;
phy2->csi2 = &iss->csi2b;
phy2->max_data_lanes = ISS_CSIPHY2_NUM_DATA_LANES;
phy2->used_data_lanes = 0;
phy2->cfg_regs = OMAP4_ISS_MEM_CSI2_B_REGS1;
phy2->phy_regs = OMAP4_ISS_MEM_CAMERARX_CORE2;
mutex_init(&phy2->mutex);
return 0;
}
| linux-master | drivers/staging/media/omap4iss/iss_csiphy.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* TI OMAP4 ISS V4L2 Driver - Generic video node
*
* Copyright (C) 2012 Texas Instruments, Inc.
*
* Author: Sergio Aguirre <[email protected]>
*/
#include <linux/clk.h>
#include <linux/mm.h>
#include <linux/pagemap.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <linux/module.h>
#include <media/v4l2-dev.h>
#include <media/v4l2-ioctl.h>
#include <media/v4l2-mc.h>
#include "iss_video.h"
#include "iss.h"
/* -----------------------------------------------------------------------------
* Helper functions
*/
static struct iss_format_info formats[] = {
{ MEDIA_BUS_FMT_Y8_1X8, MEDIA_BUS_FMT_Y8_1X8,
MEDIA_BUS_FMT_Y8_1X8, MEDIA_BUS_FMT_Y8_1X8,
V4L2_PIX_FMT_GREY, 8, },
{ MEDIA_BUS_FMT_Y10_1X10, MEDIA_BUS_FMT_Y10_1X10,
MEDIA_BUS_FMT_Y10_1X10, MEDIA_BUS_FMT_Y8_1X8,
V4L2_PIX_FMT_Y10, 10, },
{ MEDIA_BUS_FMT_Y12_1X12, MEDIA_BUS_FMT_Y10_1X10,
MEDIA_BUS_FMT_Y12_1X12, MEDIA_BUS_FMT_Y8_1X8,
V4L2_PIX_FMT_Y12, 12, },
{ MEDIA_BUS_FMT_SBGGR8_1X8, MEDIA_BUS_FMT_SBGGR8_1X8,
MEDIA_BUS_FMT_SBGGR8_1X8, MEDIA_BUS_FMT_SBGGR8_1X8,
V4L2_PIX_FMT_SBGGR8, 8, },
{ MEDIA_BUS_FMT_SGBRG8_1X8, MEDIA_BUS_FMT_SGBRG8_1X8,
MEDIA_BUS_FMT_SGBRG8_1X8, MEDIA_BUS_FMT_SGBRG8_1X8,
V4L2_PIX_FMT_SGBRG8, 8, },
{ MEDIA_BUS_FMT_SGRBG8_1X8, MEDIA_BUS_FMT_SGRBG8_1X8,
MEDIA_BUS_FMT_SGRBG8_1X8, MEDIA_BUS_FMT_SGRBG8_1X8,
V4L2_PIX_FMT_SGRBG8, 8, },
{ MEDIA_BUS_FMT_SRGGB8_1X8, MEDIA_BUS_FMT_SRGGB8_1X8,
MEDIA_BUS_FMT_SRGGB8_1X8, MEDIA_BUS_FMT_SRGGB8_1X8,
V4L2_PIX_FMT_SRGGB8, 8, },
{ MEDIA_BUS_FMT_SGRBG10_DPCM8_1X8, MEDIA_BUS_FMT_SGRBG10_DPCM8_1X8,
MEDIA_BUS_FMT_SGRBG10_1X10, 0,
V4L2_PIX_FMT_SGRBG10DPCM8, 8, },
{ MEDIA_BUS_FMT_SBGGR10_1X10, MEDIA_BUS_FMT_SBGGR10_1X10,
MEDIA_BUS_FMT_SBGGR10_1X10, MEDIA_BUS_FMT_SBGGR8_1X8,
V4L2_PIX_FMT_SBGGR10, 10, },
{ MEDIA_BUS_FMT_SGBRG10_1X10, MEDIA_BUS_FMT_SGBRG10_1X10,
MEDIA_BUS_FMT_SGBRG10_1X10, MEDIA_BUS_FMT_SGBRG8_1X8,
V4L2_PIX_FMT_SGBRG10, 10, },
{ MEDIA_BUS_FMT_SGRBG10_1X10, MEDIA_BUS_FMT_SGRBG10_1X10,
MEDIA_BUS_FMT_SGRBG10_1X10, MEDIA_BUS_FMT_SGRBG8_1X8,
V4L2_PIX_FMT_SGRBG10, 10, },
{ MEDIA_BUS_FMT_SRGGB10_1X10, MEDIA_BUS_FMT_SRGGB10_1X10,
MEDIA_BUS_FMT_SRGGB10_1X10, MEDIA_BUS_FMT_SRGGB8_1X8,
V4L2_PIX_FMT_SRGGB10, 10, },
{ MEDIA_BUS_FMT_SBGGR12_1X12, MEDIA_BUS_FMT_SBGGR10_1X10,
MEDIA_BUS_FMT_SBGGR12_1X12, MEDIA_BUS_FMT_SBGGR8_1X8,
V4L2_PIX_FMT_SBGGR12, 12, },
{ MEDIA_BUS_FMT_SGBRG12_1X12, MEDIA_BUS_FMT_SGBRG10_1X10,
MEDIA_BUS_FMT_SGBRG12_1X12, MEDIA_BUS_FMT_SGBRG8_1X8,
V4L2_PIX_FMT_SGBRG12, 12, },
{ MEDIA_BUS_FMT_SGRBG12_1X12, MEDIA_BUS_FMT_SGRBG10_1X10,
MEDIA_BUS_FMT_SGRBG12_1X12, MEDIA_BUS_FMT_SGRBG8_1X8,
V4L2_PIX_FMT_SGRBG12, 12, },
{ MEDIA_BUS_FMT_SRGGB12_1X12, MEDIA_BUS_FMT_SRGGB10_1X10,
MEDIA_BUS_FMT_SRGGB12_1X12, MEDIA_BUS_FMT_SRGGB8_1X8,
V4L2_PIX_FMT_SRGGB12, 12, },
{ MEDIA_BUS_FMT_UYVY8_1X16, MEDIA_BUS_FMT_UYVY8_1X16,
MEDIA_BUS_FMT_UYVY8_1X16, 0,
V4L2_PIX_FMT_UYVY, 16, },
{ MEDIA_BUS_FMT_YUYV8_1X16, MEDIA_BUS_FMT_YUYV8_1X16,
MEDIA_BUS_FMT_YUYV8_1X16, 0,
V4L2_PIX_FMT_YUYV, 16, },
{ MEDIA_BUS_FMT_YUYV8_1_5X8, MEDIA_BUS_FMT_YUYV8_1_5X8,
MEDIA_BUS_FMT_YUYV8_1_5X8, 0,
V4L2_PIX_FMT_NV12, 8, },
};
const struct iss_format_info *
omap4iss_video_format_info(u32 code)
{
unsigned int i;
for (i = 0; i < ARRAY_SIZE(formats); ++i) {
if (formats[i].code == code)
return &formats[i];
}
return NULL;
}
/*
* iss_video_mbus_to_pix - Convert v4l2_mbus_framefmt to v4l2_pix_format
* @video: ISS video instance
* @mbus: v4l2_mbus_framefmt format (input)
* @pix: v4l2_pix_format format (output)
*
* Fill the output pix structure with information from the input mbus format.
* The bytesperline and sizeimage fields are computed from the requested bytes
* per line value in the pix format and information from the video instance.
*
* Return the number of padding bytes at end of line.
*/
static unsigned int iss_video_mbus_to_pix(const struct iss_video *video,
const struct v4l2_mbus_framefmt *mbus,
struct v4l2_pix_format *pix)
{
unsigned int bpl = pix->bytesperline;
unsigned int min_bpl;
unsigned int i;
memset(pix, 0, sizeof(*pix));
pix->width = mbus->width;
pix->height = mbus->height;
/*
* Skip the last format in the loop so that it will be selected if no
* match is found.
*/
for (i = 0; i < ARRAY_SIZE(formats) - 1; ++i) {
if (formats[i].code == mbus->code)
break;
}
min_bpl = pix->width * ALIGN(formats[i].bpp, 8) / 8;
/*
* Clamp the requested bytes per line value. If the maximum bytes per
* line value is zero, the module doesn't support user configurable line
* sizes. Override the requested value with the minimum in that case.
*/
if (video->bpl_max)
bpl = clamp(bpl, min_bpl, video->bpl_max);
else
bpl = min_bpl;
if (!video->bpl_zero_padding || bpl != min_bpl)
bpl = ALIGN(bpl, video->bpl_alignment);
pix->pixelformat = formats[i].pixelformat;
pix->bytesperline = bpl;
pix->sizeimage = pix->bytesperline * pix->height;
pix->colorspace = mbus->colorspace;
pix->field = mbus->field;
/* FIXME: Special case for NV12! We should make this nicer... */
if (pix->pixelformat == V4L2_PIX_FMT_NV12)
pix->sizeimage += (pix->bytesperline * pix->height) / 2;
return bpl - min_bpl;
}
static void iss_video_pix_to_mbus(const struct v4l2_pix_format *pix,
struct v4l2_mbus_framefmt *mbus)
{
unsigned int i;
memset(mbus, 0, sizeof(*mbus));
mbus->width = pix->width;
mbus->height = pix->height;
/*
* Skip the last format in the loop so that it will be selected if no
* match is found.
*/
for (i = 0; i < ARRAY_SIZE(formats) - 1; ++i) {
if (formats[i].pixelformat == pix->pixelformat)
break;
}
mbus->code = formats[i].code;
mbus->colorspace = pix->colorspace;
mbus->field = pix->field;
}
static struct v4l2_subdev *
iss_video_remote_subdev(struct iss_video *video, u32 *pad)
{
struct media_pad *remote;
remote = media_pad_remote_pad_first(&video->pad);
if (!remote || !is_media_entity_v4l2_subdev(remote->entity))
return NULL;
if (pad)
*pad = remote->index;
return media_entity_to_v4l2_subdev(remote->entity);
}
/* Return a pointer to the ISS video instance at the far end of the pipeline. */
static struct iss_video *
iss_video_far_end(struct iss_video *video, struct iss_pipeline *pipe)
{
struct media_pipeline_entity_iter iter;
struct media_entity *entity;
struct iss_video *far_end = NULL;
int ret;
ret = media_pipeline_entity_iter_init(&pipe->pipe, &iter);
if (ret)
return ERR_PTR(-ENOMEM);
media_pipeline_for_each_entity(&pipe->pipe, &iter, entity) {
struct iss_video *other;
if (entity == &video->video.entity)
continue;
if (!is_media_entity_v4l2_video_device(entity))
continue;
other = to_iss_video(media_entity_to_video_device(entity));
if (other->type != video->type) {
far_end = other;
break;
}
}
media_pipeline_entity_iter_cleanup(&iter);
return far_end;
}
static int
__iss_video_get_format(struct iss_video *video,
struct v4l2_mbus_framefmt *format)
{
struct v4l2_subdev_format fmt = {
.which = V4L2_SUBDEV_FORMAT_ACTIVE,
};
struct v4l2_subdev *subdev;
u32 pad;
int ret;
subdev = iss_video_remote_subdev(video, &pad);
if (!subdev)
return -EINVAL;
fmt.pad = pad;
mutex_lock(&video->mutex);
ret = v4l2_subdev_call(subdev, pad, get_fmt, NULL, &fmt);
mutex_unlock(&video->mutex);
if (ret)
return ret;
*format = fmt.format;
return 0;
}
static int
iss_video_check_format(struct iss_video *video, struct iss_video_fh *vfh)
{
struct v4l2_mbus_framefmt format;
struct v4l2_pix_format pixfmt;
int ret;
ret = __iss_video_get_format(video, &format);
if (ret < 0)
return ret;
pixfmt.bytesperline = 0;
ret = iss_video_mbus_to_pix(video, &format, &pixfmt);
if (vfh->format.fmt.pix.pixelformat != pixfmt.pixelformat ||
vfh->format.fmt.pix.height != pixfmt.height ||
vfh->format.fmt.pix.width != pixfmt.width ||
vfh->format.fmt.pix.bytesperline != pixfmt.bytesperline ||
vfh->format.fmt.pix.sizeimage != pixfmt.sizeimage)
return -EINVAL;
return ret;
}
/* -----------------------------------------------------------------------------
* Video queue operations
*/
static int iss_video_queue_setup(struct vb2_queue *vq,
unsigned int *count, unsigned int *num_planes,
unsigned int sizes[],
struct device *alloc_devs[])
{
struct iss_video_fh *vfh = vb2_get_drv_priv(vq);
struct iss_video *video = vfh->video;
/* Revisit multi-planar support for NV12 */
*num_planes = 1;
sizes[0] = vfh->format.fmt.pix.sizeimage;
if (sizes[0] == 0)
return -EINVAL;
*count = min(*count, video->capture_mem / PAGE_ALIGN(sizes[0]));
return 0;
}
static void iss_video_buf_cleanup(struct vb2_buffer *vb)
{
struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
struct iss_buffer *buffer = container_of(vbuf, struct iss_buffer, vb);
if (buffer->iss_addr)
buffer->iss_addr = 0;
}
static int iss_video_buf_prepare(struct vb2_buffer *vb)
{
struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
struct iss_video_fh *vfh = vb2_get_drv_priv(vb->vb2_queue);
struct iss_buffer *buffer = container_of(vbuf, struct iss_buffer, vb);
struct iss_video *video = vfh->video;
unsigned long size = vfh->format.fmt.pix.sizeimage;
dma_addr_t addr;
if (vb2_plane_size(vb, 0) < size)
return -ENOBUFS;
addr = vb2_dma_contig_plane_dma_addr(vb, 0);
if (!IS_ALIGNED(addr, 32)) {
dev_dbg(video->iss->dev,
"Buffer address must be aligned to 32 bytes boundary.\n");
return -EINVAL;
}
vb2_set_plane_payload(vb, 0, size);
buffer->iss_addr = addr;
return 0;
}
static void iss_video_buf_queue(struct vb2_buffer *vb)
{
struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
struct iss_video_fh *vfh = vb2_get_drv_priv(vb->vb2_queue);
struct iss_video *video = vfh->video;
struct iss_buffer *buffer = container_of(vbuf, struct iss_buffer, vb);
struct iss_pipeline *pipe = to_iss_pipeline(&video->video.entity);
unsigned long flags;
bool empty;
spin_lock_irqsave(&video->qlock, flags);
/*
* Mark the buffer is faulty and give it back to the queue immediately
* if the video node has registered an error. vb2 will perform the same
* check when preparing the buffer, but that is inherently racy, so we
* need to handle the race condition with an authoritative check here.
*/
if (unlikely(video->error)) {
vb2_buffer_done(vb, VB2_BUF_STATE_ERROR);
spin_unlock_irqrestore(&video->qlock, flags);
return;
}
empty = list_empty(&video->dmaqueue);
list_add_tail(&buffer->list, &video->dmaqueue);
spin_unlock_irqrestore(&video->qlock, flags);
if (empty) {
enum iss_pipeline_state state;
unsigned int start;
if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
state = ISS_PIPELINE_QUEUE_OUTPUT;
else
state = ISS_PIPELINE_QUEUE_INPUT;
spin_lock_irqsave(&pipe->lock, flags);
pipe->state |= state;
video->ops->queue(video, buffer);
video->dmaqueue_flags |= ISS_VIDEO_DMAQUEUE_QUEUED;
start = iss_pipeline_ready(pipe);
if (start)
pipe->state |= ISS_PIPELINE_STREAM;
spin_unlock_irqrestore(&pipe->lock, flags);
if (start)
omap4iss_pipeline_set_stream(pipe,
ISS_PIPELINE_STREAM_SINGLESHOT);
}
}
static const struct vb2_ops iss_video_vb2ops = {
.queue_setup = iss_video_queue_setup,
.buf_prepare = iss_video_buf_prepare,
.buf_queue = iss_video_buf_queue,
.buf_cleanup = iss_video_buf_cleanup,
};
/*
* omap4iss_video_buffer_next - Complete the current buffer and return the next
* @video: ISS video object
*
* Remove the current video buffer from the DMA queue and fill its timestamp,
* field count and state fields before waking up its completion handler.
*
* For capture video nodes, the buffer state is set to VB2_BUF_STATE_DONE if no
* error has been flagged in the pipeline, or to VB2_BUF_STATE_ERROR otherwise.
*
* The DMA queue is expected to contain at least one buffer.
*
* Return a pointer to the next buffer in the DMA queue, or NULL if the queue is
* empty.
*/
struct iss_buffer *omap4iss_video_buffer_next(struct iss_video *video)
{
struct iss_pipeline *pipe = to_iss_pipeline(&video->video.entity);
enum iss_pipeline_state state;
struct iss_buffer *buf;
unsigned long flags;
spin_lock_irqsave(&video->qlock, flags);
if (WARN_ON(list_empty(&video->dmaqueue))) {
spin_unlock_irqrestore(&video->qlock, flags);
return NULL;
}
buf = list_first_entry(&video->dmaqueue, struct iss_buffer,
list);
list_del(&buf->list);
spin_unlock_irqrestore(&video->qlock, flags);
buf->vb.vb2_buf.timestamp = ktime_get_ns();
/*
* Do frame number propagation only if this is the output video node.
* Frame number either comes from the CSI receivers or it gets
* incremented here if H3A is not active.
* Note: There is no guarantee that the output buffer will finish
* first, so the input number might lag behind by 1 in some cases.
*/
if (video == pipe->output && !pipe->do_propagation)
buf->vb.sequence =
atomic_inc_return(&pipe->frame_number);
else
buf->vb.sequence = atomic_read(&pipe->frame_number);
vb2_buffer_done(&buf->vb.vb2_buf, pipe->error ?
VB2_BUF_STATE_ERROR : VB2_BUF_STATE_DONE);
pipe->error = false;
spin_lock_irqsave(&video->qlock, flags);
if (list_empty(&video->dmaqueue)) {
spin_unlock_irqrestore(&video->qlock, flags);
if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
state = ISS_PIPELINE_QUEUE_OUTPUT
| ISS_PIPELINE_STREAM;
else
state = ISS_PIPELINE_QUEUE_INPUT
| ISS_PIPELINE_STREAM;
spin_lock_irqsave(&pipe->lock, flags);
pipe->state &= ~state;
if (video->pipe.stream_state == ISS_PIPELINE_STREAM_CONTINUOUS)
video->dmaqueue_flags |= ISS_VIDEO_DMAQUEUE_UNDERRUN;
spin_unlock_irqrestore(&pipe->lock, flags);
return NULL;
}
if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE && pipe->input) {
spin_lock(&pipe->lock);
pipe->state &= ~ISS_PIPELINE_STREAM;
spin_unlock(&pipe->lock);
}
buf = list_first_entry(&video->dmaqueue, struct iss_buffer,
list);
spin_unlock_irqrestore(&video->qlock, flags);
buf->vb.vb2_buf.state = VB2_BUF_STATE_ACTIVE;
return buf;
}
/*
* omap4iss_video_cancel_stream - Cancel stream on a video node
* @video: ISS video object
*
* Cancelling a stream mark all buffers on the video node as erroneous and makes
* sure no new buffer can be queued.
*/
void omap4iss_video_cancel_stream(struct iss_video *video)
{
unsigned long flags;
spin_lock_irqsave(&video->qlock, flags);
while (!list_empty(&video->dmaqueue)) {
struct iss_buffer *buf;
buf = list_first_entry(&video->dmaqueue, struct iss_buffer,
list);
list_del(&buf->list);
vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
}
vb2_queue_error(video->queue);
video->error = true;
spin_unlock_irqrestore(&video->qlock, flags);
}
/* -----------------------------------------------------------------------------
* V4L2 ioctls
*/
static int
iss_video_querycap(struct file *file, void *fh, struct v4l2_capability *cap)
{
struct iss_video *video = video_drvdata(file);
strscpy(cap->driver, ISS_VIDEO_DRIVER_NAME, sizeof(cap->driver));
strscpy(cap->card, video->video.name, sizeof(cap->card));
strscpy(cap->bus_info, "media", sizeof(cap->bus_info));
cap->capabilities = V4L2_CAP_DEVICE_CAPS | V4L2_CAP_STREAMING
| V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_VIDEO_OUTPUT;
return 0;
}
static int
iss_video_enum_format(struct file *file, void *fh, struct v4l2_fmtdesc *f)
{
struct iss_video *video = video_drvdata(file);
struct v4l2_mbus_framefmt format;
unsigned int index = f->index;
unsigned int i;
int ret;
if (f->type != video->type)
return -EINVAL;
ret = __iss_video_get_format(video, &format);
if (ret < 0)
return ret;
for (i = 0; i < ARRAY_SIZE(formats); ++i) {
const struct iss_format_info *info = &formats[i];
if (format.code != info->code)
continue;
if (index == 0) {
f->pixelformat = info->pixelformat;
return 0;
}
index--;
}
return -EINVAL;
}
static int
iss_video_get_format(struct file *file, void *fh, struct v4l2_format *format)
{
struct iss_video_fh *vfh = to_iss_video_fh(fh);
struct iss_video *video = video_drvdata(file);
if (format->type != video->type)
return -EINVAL;
mutex_lock(&video->mutex);
*format = vfh->format;
mutex_unlock(&video->mutex);
return 0;
}
static int
iss_video_set_format(struct file *file, void *fh, struct v4l2_format *format)
{
struct iss_video_fh *vfh = to_iss_video_fh(fh);
struct iss_video *video = video_drvdata(file);
struct v4l2_mbus_framefmt fmt;
if (format->type != video->type)
return -EINVAL;
mutex_lock(&video->mutex);
/*
* Fill the bytesperline and sizeimage fields by converting to media bus
* format and back to pixel format.
*/
iss_video_pix_to_mbus(&format->fmt.pix, &fmt);
iss_video_mbus_to_pix(video, &fmt, &format->fmt.pix);
vfh->format = *format;
mutex_unlock(&video->mutex);
return 0;
}
static int
iss_video_try_format(struct file *file, void *fh, struct v4l2_format *format)
{
struct iss_video *video = video_drvdata(file);
struct v4l2_subdev_format fmt = {
.which = V4L2_SUBDEV_FORMAT_ACTIVE,
};
struct v4l2_subdev *subdev;
u32 pad;
int ret;
if (format->type != video->type)
return -EINVAL;
subdev = iss_video_remote_subdev(video, &pad);
if (!subdev)
return -EINVAL;
iss_video_pix_to_mbus(&format->fmt.pix, &fmt.format);
fmt.pad = pad;
ret = v4l2_subdev_call(subdev, pad, get_fmt, NULL, &fmt);
if (ret)
return ret;
iss_video_mbus_to_pix(video, &fmt.format, &format->fmt.pix);
return 0;
}
static int
iss_video_get_selection(struct file *file, void *fh, struct v4l2_selection *sel)
{
struct iss_video *video = video_drvdata(file);
struct v4l2_subdev_format format = {
.which = V4L2_SUBDEV_FORMAT_ACTIVE,
};
struct v4l2_subdev *subdev;
struct v4l2_subdev_selection sdsel = {
.which = V4L2_SUBDEV_FORMAT_ACTIVE,
.target = sel->target,
};
u32 pad;
int ret;
switch (sel->target) {
case V4L2_SEL_TGT_CROP:
case V4L2_SEL_TGT_CROP_BOUNDS:
case V4L2_SEL_TGT_CROP_DEFAULT:
if (video->type == V4L2_BUF_TYPE_VIDEO_OUTPUT)
return -EINVAL;
break;
case V4L2_SEL_TGT_COMPOSE:
case V4L2_SEL_TGT_COMPOSE_BOUNDS:
case V4L2_SEL_TGT_COMPOSE_DEFAULT:
if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
return -EINVAL;
break;
default:
return -EINVAL;
}
subdev = iss_video_remote_subdev(video, &pad);
if (!subdev)
return -EINVAL;
/*
* Try the get selection operation first and fallback to get format if
* not implemented.
*/
sdsel.pad = pad;
ret = v4l2_subdev_call(subdev, pad, get_selection, NULL, &sdsel);
if (!ret)
sel->r = sdsel.r;
if (ret != -ENOIOCTLCMD)
return ret;
format.pad = pad;
ret = v4l2_subdev_call(subdev, pad, get_fmt, NULL, &format);
if (ret < 0)
return ret == -ENOIOCTLCMD ? -ENOTTY : ret;
sel->r.left = 0;
sel->r.top = 0;
sel->r.width = format.format.width;
sel->r.height = format.format.height;
return 0;
}
static int
iss_video_set_selection(struct file *file, void *fh, struct v4l2_selection *sel)
{
struct iss_video *video = video_drvdata(file);
struct v4l2_subdev *subdev;
struct v4l2_subdev_selection sdsel = {
.which = V4L2_SUBDEV_FORMAT_ACTIVE,
.target = sel->target,
.flags = sel->flags,
.r = sel->r,
};
u32 pad;
int ret;
switch (sel->target) {
case V4L2_SEL_TGT_CROP:
if (video->type == V4L2_BUF_TYPE_VIDEO_OUTPUT)
return -EINVAL;
break;
case V4L2_SEL_TGT_COMPOSE:
if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
return -EINVAL;
break;
default:
return -EINVAL;
}
subdev = iss_video_remote_subdev(video, &pad);
if (!subdev)
return -EINVAL;
sdsel.pad = pad;
mutex_lock(&video->mutex);
ret = v4l2_subdev_call(subdev, pad, set_selection, NULL, &sdsel);
mutex_unlock(&video->mutex);
if (!ret)
sel->r = sdsel.r;
return ret == -ENOIOCTLCMD ? -ENOTTY : ret;
}
static int
iss_video_get_param(struct file *file, void *fh, struct v4l2_streamparm *a)
{
struct iss_video_fh *vfh = to_iss_video_fh(fh);
struct iss_video *video = video_drvdata(file);
if (video->type != V4L2_BUF_TYPE_VIDEO_OUTPUT ||
video->type != a->type)
return -EINVAL;
memset(a, 0, sizeof(*a));
a->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
a->parm.output.capability = V4L2_CAP_TIMEPERFRAME;
a->parm.output.timeperframe = vfh->timeperframe;
return 0;
}
static int
iss_video_set_param(struct file *file, void *fh, struct v4l2_streamparm *a)
{
struct iss_video_fh *vfh = to_iss_video_fh(fh);
struct iss_video *video = video_drvdata(file);
if (video->type != V4L2_BUF_TYPE_VIDEO_OUTPUT ||
video->type != a->type)
return -EINVAL;
if (a->parm.output.timeperframe.denominator == 0)
a->parm.output.timeperframe.denominator = 1;
vfh->timeperframe = a->parm.output.timeperframe;
return 0;
}
static int
iss_video_reqbufs(struct file *file, void *fh, struct v4l2_requestbuffers *rb)
{
struct iss_video_fh *vfh = to_iss_video_fh(fh);
return vb2_reqbufs(&vfh->queue, rb);
}
static int
iss_video_querybuf(struct file *file, void *fh, struct v4l2_buffer *b)
{
struct iss_video_fh *vfh = to_iss_video_fh(fh);
return vb2_querybuf(&vfh->queue, b);
}
static int
iss_video_qbuf(struct file *file, void *fh, struct v4l2_buffer *b)
{
struct iss_video *video = video_drvdata(file);
struct iss_video_fh *vfh = to_iss_video_fh(fh);
return vb2_qbuf(&vfh->queue, video->video.v4l2_dev->mdev, b);
}
static int
iss_video_expbuf(struct file *file, void *fh, struct v4l2_exportbuffer *e)
{
struct iss_video_fh *vfh = to_iss_video_fh(fh);
return vb2_expbuf(&vfh->queue, e);
}
static int
iss_video_dqbuf(struct file *file, void *fh, struct v4l2_buffer *b)
{
struct iss_video_fh *vfh = to_iss_video_fh(fh);
return vb2_dqbuf(&vfh->queue, b, file->f_flags & O_NONBLOCK);
}
/*
* Stream management
*
* Every ISS pipeline has a single input and a single output. The input can be
* either a sensor or a video node. The output is always a video node.
*
* As every pipeline has an output video node, the ISS video objects at the
* pipeline output stores the pipeline state. It tracks the streaming state of
* both the input and output, as well as the availability of buffers.
*
* In sensor-to-memory mode, frames are always available at the pipeline input.
* Starting the sensor usually requires I2C transfers and must be done in
* interruptible context. The pipeline is started and stopped synchronously
* to the stream on/off commands. All modules in the pipeline will get their
* subdev set stream handler called. The module at the end of the pipeline must
* delay starting the hardware until buffers are available at its output.
*
* In memory-to-memory mode, starting/stopping the stream requires
* synchronization between the input and output. ISS modules can't be stopped
* in the middle of a frame, and at least some of the modules seem to become
* busy as soon as they're started, even if they don't receive a frame start
* event. For that reason frames need to be processed in single-shot mode. The
* driver needs to wait until a frame is completely processed and written to
* memory before restarting the pipeline for the next frame. Pipelined
* processing might be possible but requires more testing.
*
* Stream start must be delayed until buffers are available at both the input
* and output. The pipeline must be started in the vb2 queue callback with
* the buffers queue spinlock held. The modules subdev set stream operation must
* not sleep.
*/
static int
iss_video_streamon(struct file *file, void *fh, enum v4l2_buf_type type)
{
struct iss_video_fh *vfh = to_iss_video_fh(fh);
struct iss_video *video = video_drvdata(file);
struct media_device *mdev = video->video.entity.graph_obj.mdev;
struct media_pipeline_pad_iter iter;
enum iss_pipeline_state state;
struct iss_pipeline *pipe;
struct iss_video *far_end;
struct media_pad *pad;
unsigned long flags;
int ret;
if (type != video->type)
return -EINVAL;
mutex_lock(&video->stream_lock);
/*
* Start streaming on the pipeline. No link touching an entity in the
* pipeline can be activated or deactivated once streaming is started.
*/
pipe = to_iss_pipeline(&video->video.entity) ? : &video->pipe;
pipe->external = NULL;
pipe->external_rate = 0;
pipe->external_bpp = 0;
ret = media_entity_enum_init(&pipe->ent_enum, mdev);
if (ret)
goto err_entity_enum_init;
if (video->iss->pdata->set_constraints)
video->iss->pdata->set_constraints(video->iss, true);
ret = video_device_pipeline_start(&video->video, &pipe->pipe);
if (ret < 0)
goto err_media_pipeline_start;
media_pipeline_for_each_pad(&pipe->pipe, &iter, pad)
media_entity_enum_set(&pipe->ent_enum, pad->entity);
/*
* Verify that the currently configured format matches the output of
* the connected subdev.
*/
ret = iss_video_check_format(video, vfh);
if (ret < 0)
goto err_iss_video_check_format;
video->bpl_padding = ret;
video->bpl_value = vfh->format.fmt.pix.bytesperline;
/*
* Find the ISS video node connected at the far end of the pipeline and
* update the pipeline.
*/
far_end = iss_video_far_end(video, pipe);
if (IS_ERR(far_end)) {
ret = PTR_ERR(far_end);
goto err_iss_video_check_format;
}
if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) {
state = ISS_PIPELINE_STREAM_OUTPUT | ISS_PIPELINE_IDLE_OUTPUT;
pipe->input = far_end;
pipe->output = video;
} else {
if (!far_end) {
ret = -EPIPE;
goto err_iss_video_check_format;
}
state = ISS_PIPELINE_STREAM_INPUT | ISS_PIPELINE_IDLE_INPUT;
pipe->input = video;
pipe->output = far_end;
}
spin_lock_irqsave(&pipe->lock, flags);
pipe->state &= ~ISS_PIPELINE_STREAM;
pipe->state |= state;
spin_unlock_irqrestore(&pipe->lock, flags);
/*
* Set the maximum time per frame as the value requested by userspace.
* This is a soft limit that can be overridden if the hardware doesn't
* support the request limit.
*/
if (video->type == V4L2_BUF_TYPE_VIDEO_OUTPUT)
pipe->max_timeperframe = vfh->timeperframe;
video->queue = &vfh->queue;
INIT_LIST_HEAD(&video->dmaqueue);
video->error = false;
atomic_set(&pipe->frame_number, -1);
ret = vb2_streamon(&vfh->queue, type);
if (ret < 0)
goto err_iss_video_check_format;
/*
* In sensor-to-memory mode, the stream can be started synchronously
* to the stream on command. In memory-to-memory mode, it will be
* started when buffers are queued on both the input and output.
*/
if (!pipe->input) {
unsigned long flags;
ret = omap4iss_pipeline_set_stream(pipe,
ISS_PIPELINE_STREAM_CONTINUOUS);
if (ret < 0)
goto err_omap4iss_set_stream;
spin_lock_irqsave(&video->qlock, flags);
if (list_empty(&video->dmaqueue))
video->dmaqueue_flags |= ISS_VIDEO_DMAQUEUE_UNDERRUN;
spin_unlock_irqrestore(&video->qlock, flags);
}
mutex_unlock(&video->stream_lock);
return 0;
err_omap4iss_set_stream:
vb2_streamoff(&vfh->queue, type);
err_iss_video_check_format:
video_device_pipeline_stop(&video->video);
err_media_pipeline_start:
if (video->iss->pdata->set_constraints)
video->iss->pdata->set_constraints(video->iss, false);
video->queue = NULL;
err_entity_enum_init:
media_entity_enum_cleanup(&pipe->ent_enum);
mutex_unlock(&video->stream_lock);
return ret;
}
static int
iss_video_streamoff(struct file *file, void *fh, enum v4l2_buf_type type)
{
struct iss_video_fh *vfh = to_iss_video_fh(fh);
struct iss_video *video = video_drvdata(file);
struct iss_pipeline *pipe = to_iss_pipeline(&video->video.entity);
enum iss_pipeline_state state;
unsigned long flags;
if (type != video->type)
return -EINVAL;
mutex_lock(&video->stream_lock);
if (!vb2_is_streaming(&vfh->queue))
goto done;
/* Update the pipeline state. */
if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
state = ISS_PIPELINE_STREAM_OUTPUT
| ISS_PIPELINE_QUEUE_OUTPUT;
else
state = ISS_PIPELINE_STREAM_INPUT
| ISS_PIPELINE_QUEUE_INPUT;
spin_lock_irqsave(&pipe->lock, flags);
pipe->state &= ~state;
spin_unlock_irqrestore(&pipe->lock, flags);
/* Stop the stream. */
omap4iss_pipeline_set_stream(pipe, ISS_PIPELINE_STREAM_STOPPED);
vb2_streamoff(&vfh->queue, type);
video->queue = NULL;
media_entity_enum_cleanup(&pipe->ent_enum);
if (video->iss->pdata->set_constraints)
video->iss->pdata->set_constraints(video->iss, false);
video_device_pipeline_stop(&video->video);
done:
mutex_unlock(&video->stream_lock);
return 0;
}
static int
iss_video_enum_input(struct file *file, void *fh, struct v4l2_input *input)
{
if (input->index > 0)
return -EINVAL;
strscpy(input->name, "camera", sizeof(input->name));
input->type = V4L2_INPUT_TYPE_CAMERA;
return 0;
}
static int
iss_video_g_input(struct file *file, void *fh, unsigned int *input)
{
*input = 0;
return 0;
}
static int
iss_video_s_input(struct file *file, void *fh, unsigned int input)
{
return input == 0 ? 0 : -EINVAL;
}
static const struct v4l2_ioctl_ops iss_video_ioctl_ops = {
.vidioc_querycap = iss_video_querycap,
.vidioc_enum_fmt_vid_cap = iss_video_enum_format,
.vidioc_g_fmt_vid_cap = iss_video_get_format,
.vidioc_s_fmt_vid_cap = iss_video_set_format,
.vidioc_try_fmt_vid_cap = iss_video_try_format,
.vidioc_g_fmt_vid_out = iss_video_get_format,
.vidioc_s_fmt_vid_out = iss_video_set_format,
.vidioc_try_fmt_vid_out = iss_video_try_format,
.vidioc_g_selection = iss_video_get_selection,
.vidioc_s_selection = iss_video_set_selection,
.vidioc_g_parm = iss_video_get_param,
.vidioc_s_parm = iss_video_set_param,
.vidioc_reqbufs = iss_video_reqbufs,
.vidioc_querybuf = iss_video_querybuf,
.vidioc_qbuf = iss_video_qbuf,
.vidioc_expbuf = iss_video_expbuf,
.vidioc_dqbuf = iss_video_dqbuf,
.vidioc_streamon = iss_video_streamon,
.vidioc_streamoff = iss_video_streamoff,
.vidioc_enum_input = iss_video_enum_input,
.vidioc_g_input = iss_video_g_input,
.vidioc_s_input = iss_video_s_input,
};
/* -----------------------------------------------------------------------------
* V4L2 file operations
*/
static int iss_video_open(struct file *file)
{
struct iss_video *video = video_drvdata(file);
struct iss_video_fh *handle;
struct vb2_queue *q;
int ret = 0;
handle = kzalloc(sizeof(*handle), GFP_KERNEL);
if (!handle)
return -ENOMEM;
v4l2_fh_init(&handle->vfh, &video->video);
v4l2_fh_add(&handle->vfh);
/* If this is the first user, initialise the pipeline. */
if (!omap4iss_get(video->iss)) {
ret = -EBUSY;
goto done;
}
ret = v4l2_pipeline_pm_get(&video->video.entity);
if (ret < 0) {
omap4iss_put(video->iss);
goto done;
}
q = &handle->queue;
q->type = video->type;
q->io_modes = VB2_MMAP | VB2_DMABUF;
q->drv_priv = handle;
q->ops = &iss_video_vb2ops;
q->mem_ops = &vb2_dma_contig_memops;
q->buf_struct_size = sizeof(struct iss_buffer);
q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
q->dev = video->iss->dev;
ret = vb2_queue_init(q);
if (ret) {
omap4iss_put(video->iss);
goto done;
}
memset(&handle->format, 0, sizeof(handle->format));
handle->format.type = video->type;
handle->timeperframe.denominator = 1;
handle->video = video;
file->private_data = &handle->vfh;
done:
if (ret < 0) {
v4l2_fh_del(&handle->vfh);
v4l2_fh_exit(&handle->vfh);
kfree(handle);
}
return ret;
}
static int iss_video_release(struct file *file)
{
struct iss_video *video = video_drvdata(file);
struct v4l2_fh *vfh = file->private_data;
struct iss_video_fh *handle = to_iss_video_fh(vfh);
/* Disable streaming and free the buffers queue resources. */
iss_video_streamoff(file, vfh, video->type);
v4l2_pipeline_pm_put(&video->video.entity);
/* Release the videobuf2 queue */
vb2_queue_release(&handle->queue);
v4l2_fh_del(vfh);
v4l2_fh_exit(vfh);
kfree(handle);
file->private_data = NULL;
omap4iss_put(video->iss);
return 0;
}
static __poll_t iss_video_poll(struct file *file, poll_table *wait)
{
struct iss_video_fh *vfh = to_iss_video_fh(file->private_data);
return vb2_poll(&vfh->queue, file, wait);
}
static int iss_video_mmap(struct file *file, struct vm_area_struct *vma)
{
struct iss_video_fh *vfh = to_iss_video_fh(file->private_data);
return vb2_mmap(&vfh->queue, vma);
}
static const struct v4l2_file_operations iss_video_fops = {
.owner = THIS_MODULE,
.unlocked_ioctl = video_ioctl2,
.open = iss_video_open,
.release = iss_video_release,
.poll = iss_video_poll,
.mmap = iss_video_mmap,
};
/* -----------------------------------------------------------------------------
* ISS video core
*/
static const struct iss_video_operations iss_video_dummy_ops = {
};
int omap4iss_video_init(struct iss_video *video, const char *name)
{
const char *direction;
int ret;
switch (video->type) {
case V4L2_BUF_TYPE_VIDEO_CAPTURE:
direction = "output";
video->pad.flags = MEDIA_PAD_FL_SINK;
break;
case V4L2_BUF_TYPE_VIDEO_OUTPUT:
direction = "input";
video->pad.flags = MEDIA_PAD_FL_SOURCE;
break;
default:
return -EINVAL;
}
ret = media_entity_pads_init(&video->video.entity, 1, &video->pad);
if (ret < 0)
return ret;
spin_lock_init(&video->qlock);
mutex_init(&video->mutex);
atomic_set(&video->active, 0);
spin_lock_init(&video->pipe.lock);
mutex_init(&video->stream_lock);
/* Initialize the video device. */
if (!video->ops)
video->ops = &iss_video_dummy_ops;
video->video.fops = &iss_video_fops;
snprintf(video->video.name, sizeof(video->video.name),
"OMAP4 ISS %s %s", name, direction);
video->video.vfl_type = VFL_TYPE_VIDEO;
video->video.release = video_device_release_empty;
video->video.ioctl_ops = &iss_video_ioctl_ops;
video->pipe.stream_state = ISS_PIPELINE_STREAM_STOPPED;
video_set_drvdata(&video->video, video);
return 0;
}
void omap4iss_video_cleanup(struct iss_video *video)
{
media_entity_cleanup(&video->video.entity);
mutex_destroy(&video->stream_lock);
mutex_destroy(&video->mutex);
}
int omap4iss_video_register(struct iss_video *video, struct v4l2_device *vdev)
{
int ret;
video->video.v4l2_dev = vdev;
if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
video->video.device_caps = V4L2_CAP_VIDEO_CAPTURE;
else
video->video.device_caps = V4L2_CAP_VIDEO_OUTPUT;
video->video.device_caps |= V4L2_CAP_STREAMING;
ret = video_register_device(&video->video, VFL_TYPE_VIDEO, -1);
if (ret < 0)
dev_err(video->iss->dev,
"could not register video device (%d)\n", ret);
return ret;
}
void omap4iss_video_unregister(struct iss_video *video)
{
video_unregister_device(&video->video);
}
| linux-master | drivers/staging/media/omap4iss/iss_video.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* TI OMAP4 ISS V4L2 Driver - ISP IPIPEIF module
*
* Copyright (C) 2012 Texas Instruments, Inc.
*
* Author: Sergio Aguirre <[email protected]>
*/
#include <linux/module.h>
#include <linux/uaccess.h>
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/dma-mapping.h>
#include <linux/mm.h>
#include <linux/sched.h>
#include "iss.h"
#include "iss_regs.h"
#include "iss_ipipeif.h"
static const unsigned int ipipeif_fmts[] = {
MEDIA_BUS_FMT_SGRBG10_1X10,
MEDIA_BUS_FMT_SRGGB10_1X10,
MEDIA_BUS_FMT_SBGGR10_1X10,
MEDIA_BUS_FMT_SGBRG10_1X10,
MEDIA_BUS_FMT_UYVY8_1X16,
MEDIA_BUS_FMT_YUYV8_1X16,
};
/*
* ipipeif_print_status - Print current IPIPEIF Module register values.
* @ipipeif: Pointer to ISS ISP IPIPEIF device.
*
* Also prints other debug information stored in the IPIPEIF module.
*/
#define IPIPEIF_PRINT_REGISTER(iss, name)\
dev_dbg(iss->dev, "###IPIPEIF " #name "=0x%08x\n", \
iss_reg_read(iss, OMAP4_ISS_MEM_ISP_IPIPEIF, IPIPEIF_##name))
#define ISIF_PRINT_REGISTER(iss, name)\
dev_dbg(iss->dev, "###ISIF " #name "=0x%08x\n", \
iss_reg_read(iss, OMAP4_ISS_MEM_ISP_ISIF, ISIF_##name))
#define ISP5_PRINT_REGISTER(iss, name)\
dev_dbg(iss->dev, "###ISP5 " #name "=0x%08x\n", \
iss_reg_read(iss, OMAP4_ISS_MEM_ISP_SYS1, ISP5_##name))
static void ipipeif_print_status(struct iss_ipipeif_device *ipipeif)
{
struct iss_device *iss = to_iss_device(ipipeif);
dev_dbg(iss->dev, "-------------IPIPEIF Register dump-------------\n");
IPIPEIF_PRINT_REGISTER(iss, CFG1);
IPIPEIF_PRINT_REGISTER(iss, CFG2);
ISIF_PRINT_REGISTER(iss, SYNCEN);
ISIF_PRINT_REGISTER(iss, CADU);
ISIF_PRINT_REGISTER(iss, CADL);
ISIF_PRINT_REGISTER(iss, MODESET);
ISIF_PRINT_REGISTER(iss, CCOLP);
ISIF_PRINT_REGISTER(iss, SPH);
ISIF_PRINT_REGISTER(iss, LNH);
ISIF_PRINT_REGISTER(iss, LNV);
ISIF_PRINT_REGISTER(iss, VDINT(0));
ISIF_PRINT_REGISTER(iss, HSIZE);
ISP5_PRINT_REGISTER(iss, SYSCONFIG);
ISP5_PRINT_REGISTER(iss, CTRL);
ISP5_PRINT_REGISTER(iss, IRQSTATUS(0));
ISP5_PRINT_REGISTER(iss, IRQENABLE_SET(0));
ISP5_PRINT_REGISTER(iss, IRQENABLE_CLR(0));
dev_dbg(iss->dev, "-----------------------------------------------\n");
}
static void ipipeif_write_enable(struct iss_ipipeif_device *ipipeif, u8 enable)
{
struct iss_device *iss = to_iss_device(ipipeif);
iss_reg_update(iss, OMAP4_ISS_MEM_ISP_ISIF, ISIF_SYNCEN,
ISIF_SYNCEN_DWEN, enable ? ISIF_SYNCEN_DWEN : 0);
}
/*
* ipipeif_enable - Enable/Disable IPIPEIF.
* @enable: enable flag
*
*/
static void ipipeif_enable(struct iss_ipipeif_device *ipipeif, u8 enable)
{
struct iss_device *iss = to_iss_device(ipipeif);
iss_reg_update(iss, OMAP4_ISS_MEM_ISP_ISIF, ISIF_SYNCEN,
ISIF_SYNCEN_SYEN, enable ? ISIF_SYNCEN_SYEN : 0);
}
/* -----------------------------------------------------------------------------
* Format- and pipeline-related configuration helpers
*/
/*
* ipipeif_set_outaddr - Set memory address to save output image
* @ipipeif: Pointer to ISP IPIPEIF device.
* @addr: 32-bit memory address aligned on 32 byte boundary.
*
* Sets the memory address where the output will be saved.
*/
static void ipipeif_set_outaddr(struct iss_ipipeif_device *ipipeif, u32 addr)
{
struct iss_device *iss = to_iss_device(ipipeif);
/* Save address split in Base Address H & L */
iss_reg_write(iss, OMAP4_ISS_MEM_ISP_ISIF, ISIF_CADU,
(addr >> (16 + 5)) & ISIF_CADU_MASK);
iss_reg_write(iss, OMAP4_ISS_MEM_ISP_ISIF, ISIF_CADL,
(addr >> 5) & ISIF_CADL_MASK);
}
static void ipipeif_configure(struct iss_ipipeif_device *ipipeif)
{
struct iss_device *iss = to_iss_device(ipipeif);
const struct iss_format_info *info;
struct v4l2_mbus_framefmt *format;
u32 isif_ccolp = 0;
omap4iss_configure_bridge(iss, ipipeif->input);
/* IPIPEIF_PAD_SINK */
format = &ipipeif->formats[IPIPEIF_PAD_SINK];
/* IPIPEIF with YUV422 input from ISIF */
iss_reg_clr(iss, OMAP4_ISS_MEM_ISP_IPIPEIF, IPIPEIF_CFG1,
IPIPEIF_CFG1_INPSRC1_MASK | IPIPEIF_CFG1_INPSRC2_MASK);
/* Select ISIF/IPIPEIF input format */
switch (format->code) {
case MEDIA_BUS_FMT_UYVY8_1X16:
case MEDIA_BUS_FMT_YUYV8_1X16:
iss_reg_update(iss, OMAP4_ISS_MEM_ISP_ISIF, ISIF_MODESET,
ISIF_MODESET_CCDMD | ISIF_MODESET_INPMOD_MASK |
ISIF_MODESET_CCDW_MASK,
ISIF_MODESET_INPMOD_YCBCR16);
iss_reg_update(iss, OMAP4_ISS_MEM_ISP_IPIPEIF, IPIPEIF_CFG2,
IPIPEIF_CFG2_YUV8, IPIPEIF_CFG2_YUV16);
break;
case MEDIA_BUS_FMT_SGRBG10_1X10:
isif_ccolp = ISIF_CCOLP_CP0_F0_GR |
ISIF_CCOLP_CP1_F0_R |
ISIF_CCOLP_CP2_F0_B |
ISIF_CCOLP_CP3_F0_GB;
goto cont_raw;
case MEDIA_BUS_FMT_SRGGB10_1X10:
isif_ccolp = ISIF_CCOLP_CP0_F0_R |
ISIF_CCOLP_CP1_F0_GR |
ISIF_CCOLP_CP2_F0_GB |
ISIF_CCOLP_CP3_F0_B;
goto cont_raw;
case MEDIA_BUS_FMT_SBGGR10_1X10:
isif_ccolp = ISIF_CCOLP_CP0_F0_B |
ISIF_CCOLP_CP1_F0_GB |
ISIF_CCOLP_CP2_F0_GR |
ISIF_CCOLP_CP3_F0_R;
goto cont_raw;
case MEDIA_BUS_FMT_SGBRG10_1X10:
isif_ccolp = ISIF_CCOLP_CP0_F0_GB |
ISIF_CCOLP_CP1_F0_B |
ISIF_CCOLP_CP2_F0_R |
ISIF_CCOLP_CP3_F0_GR;
cont_raw:
iss_reg_clr(iss, OMAP4_ISS_MEM_ISP_IPIPEIF, IPIPEIF_CFG2,
IPIPEIF_CFG2_YUV16);
iss_reg_update(iss, OMAP4_ISS_MEM_ISP_ISIF, ISIF_MODESET,
ISIF_MODESET_CCDMD | ISIF_MODESET_INPMOD_MASK |
ISIF_MODESET_CCDW_MASK, ISIF_MODESET_INPMOD_RAW |
ISIF_MODESET_CCDW_2BIT);
info = omap4iss_video_format_info(format->code);
iss_reg_update(iss, OMAP4_ISS_MEM_ISP_ISIF, ISIF_CGAMMAWD,
ISIF_CGAMMAWD_GWDI_MASK,
ISIF_CGAMMAWD_GWDI(info->bpp));
/* Set RAW Bayer pattern */
iss_reg_write(iss, OMAP4_ISS_MEM_ISP_ISIF, ISIF_CCOLP,
isif_ccolp);
break;
}
iss_reg_write(iss, OMAP4_ISS_MEM_ISP_ISIF, ISIF_SPH, 0 & ISIF_SPH_MASK);
iss_reg_write(iss, OMAP4_ISS_MEM_ISP_ISIF, ISIF_LNH,
(format->width - 1) & ISIF_LNH_MASK);
iss_reg_write(iss, OMAP4_ISS_MEM_ISP_ISIF, ISIF_LNV,
(format->height - 1) & ISIF_LNV_MASK);
/* Generate ISIF0 on the last line of the image */
iss_reg_write(iss, OMAP4_ISS_MEM_ISP_ISIF, ISIF_VDINT(0),
format->height - 1);
/* IPIPEIF_PAD_SOURCE_ISIF_SF */
format = &ipipeif->formats[IPIPEIF_PAD_SOURCE_ISIF_SF];
iss_reg_write(iss, OMAP4_ISS_MEM_ISP_ISIF, ISIF_HSIZE,
(ipipeif->video_out.bpl_value >> 5) &
ISIF_HSIZE_HSIZE_MASK);
/* IPIPEIF_PAD_SOURCE_VP */
/* Do nothing? */
}
/* -----------------------------------------------------------------------------
* Interrupt handling
*/
static void ipipeif_isr_buffer(struct iss_ipipeif_device *ipipeif)
{
struct iss_buffer *buffer;
/* The ISIF generates VD0 interrupts even when writes are disabled.
* deal with it anyway). Disabling the ISIF when no buffer is available
* is thus not be enough, we need to handle the situation explicitly.
*/
if (list_empty(&ipipeif->video_out.dmaqueue))
return;
ipipeif_write_enable(ipipeif, 0);
buffer = omap4iss_video_buffer_next(&ipipeif->video_out);
if (!buffer)
return;
ipipeif_set_outaddr(ipipeif, buffer->iss_addr);
ipipeif_write_enable(ipipeif, 1);
}
/*
* omap4iss_ipipeif_isr - Configure ipipeif during interframe time.
* @ipipeif: Pointer to ISP IPIPEIF device.
* @events: IPIPEIF events
*/
void omap4iss_ipipeif_isr(struct iss_ipipeif_device *ipipeif, u32 events)
{
if (omap4iss_module_sync_is_stopping(&ipipeif->wait,
&ipipeif->stopping))
return;
if ((events & ISP5_IRQ_ISIF_INT(0)) &&
(ipipeif->output & IPIPEIF_OUTPUT_MEMORY))
ipipeif_isr_buffer(ipipeif);
}
/* -----------------------------------------------------------------------------
* ISP video operations
*/
static int ipipeif_video_queue(struct iss_video *video,
struct iss_buffer *buffer)
{
struct iss_ipipeif_device *ipipeif = container_of(video,
struct iss_ipipeif_device, video_out);
if (!(ipipeif->output & IPIPEIF_OUTPUT_MEMORY))
return -ENODEV;
ipipeif_set_outaddr(ipipeif, buffer->iss_addr);
/*
* If streaming was enabled before there was a buffer queued
* or underrun happened in the ISR, the hardware was not enabled
* and DMA queue flag ISS_VIDEO_DMAQUEUE_UNDERRUN is still set.
* Enable it now.
*/
if (video->dmaqueue_flags & ISS_VIDEO_DMAQUEUE_UNDERRUN) {
if (ipipeif->output & IPIPEIF_OUTPUT_MEMORY)
ipipeif_write_enable(ipipeif, 1);
ipipeif_enable(ipipeif, 1);
iss_video_dmaqueue_flags_clr(video);
}
return 0;
}
static const struct iss_video_operations ipipeif_video_ops = {
.queue = ipipeif_video_queue,
};
/* -----------------------------------------------------------------------------
* V4L2 subdev operations
*/
#define IPIPEIF_DRV_SUBCLK_MASK (OMAP4_ISS_ISP_SUBCLK_IPIPEIF |\
OMAP4_ISS_ISP_SUBCLK_ISIF)
/*
* ipipeif_set_stream - Enable/Disable streaming on the IPIPEIF module
* @sd: ISP IPIPEIF V4L2 subdevice
* @enable: Enable/disable stream
*/
static int ipipeif_set_stream(struct v4l2_subdev *sd, int enable)
{
struct iss_ipipeif_device *ipipeif = v4l2_get_subdevdata(sd);
struct iss_device *iss = to_iss_device(ipipeif);
struct iss_video *video_out = &ipipeif->video_out;
int ret = 0;
if (ipipeif->state == ISS_PIPELINE_STREAM_STOPPED) {
if (enable == ISS_PIPELINE_STREAM_STOPPED)
return 0;
omap4iss_isp_subclk_enable(iss, IPIPEIF_DRV_SUBCLK_MASK);
}
switch (enable) {
case ISS_PIPELINE_STREAM_CONTINUOUS:
ipipeif_configure(ipipeif);
ipipeif_print_status(ipipeif);
/*
* When outputting to memory with no buffer available, let the
* buffer queue handler start the hardware. A DMA queue flag
* ISS_VIDEO_DMAQUEUE_QUEUED will be set as soon as there is
* a buffer available.
*/
if (ipipeif->output & IPIPEIF_OUTPUT_MEMORY &&
!(video_out->dmaqueue_flags & ISS_VIDEO_DMAQUEUE_QUEUED))
break;
atomic_set(&ipipeif->stopping, 0);
if (ipipeif->output & IPIPEIF_OUTPUT_MEMORY)
ipipeif_write_enable(ipipeif, 1);
ipipeif_enable(ipipeif, 1);
iss_video_dmaqueue_flags_clr(video_out);
break;
case ISS_PIPELINE_STREAM_STOPPED:
if (ipipeif->state == ISS_PIPELINE_STREAM_STOPPED)
return 0;
if (omap4iss_module_sync_idle(&sd->entity, &ipipeif->wait,
&ipipeif->stopping))
ret = -ETIMEDOUT;
if (ipipeif->output & IPIPEIF_OUTPUT_MEMORY)
ipipeif_write_enable(ipipeif, 0);
ipipeif_enable(ipipeif, 0);
omap4iss_isp_subclk_disable(iss, IPIPEIF_DRV_SUBCLK_MASK);
iss_video_dmaqueue_flags_clr(video_out);
break;
}
ipipeif->state = enable;
return ret;
}
static struct v4l2_mbus_framefmt *
__ipipeif_get_format(struct iss_ipipeif_device *ipipeif,
struct v4l2_subdev_state *sd_state, unsigned int pad,
enum v4l2_subdev_format_whence which)
{
if (which == V4L2_SUBDEV_FORMAT_TRY)
return v4l2_subdev_get_try_format(&ipipeif->subdev, sd_state,
pad);
return &ipipeif->formats[pad];
}
/*
* ipipeif_try_format - Try video format on a pad
* @ipipeif: ISS IPIPEIF device
* @cfg: V4L2 subdev pad config
* @pad: Pad number
* @fmt: Format
*/
static void
ipipeif_try_format(struct iss_ipipeif_device *ipipeif,
struct v4l2_subdev_state *sd_state, unsigned int pad,
struct v4l2_mbus_framefmt *fmt,
enum v4l2_subdev_format_whence which)
{
struct v4l2_mbus_framefmt *format;
unsigned int width = fmt->width;
unsigned int height = fmt->height;
unsigned int i;
switch (pad) {
case IPIPEIF_PAD_SINK:
/* TODO: If the IPIPEIF output formatter pad is connected
* directly to the resizer, only YUV formats can be used.
*/
for (i = 0; i < ARRAY_SIZE(ipipeif_fmts); i++) {
if (fmt->code == ipipeif_fmts[i])
break;
}
/* If not found, use SGRBG10 as default */
if (i >= ARRAY_SIZE(ipipeif_fmts))
fmt->code = MEDIA_BUS_FMT_SGRBG10_1X10;
/* Clamp the input size. */
fmt->width = clamp_t(u32, width, 1, 8192);
fmt->height = clamp_t(u32, height, 1, 8192);
break;
case IPIPEIF_PAD_SOURCE_ISIF_SF:
format = __ipipeif_get_format(ipipeif, sd_state,
IPIPEIF_PAD_SINK,
which);
memcpy(fmt, format, sizeof(*fmt));
/* The data formatter truncates the number of horizontal output
* pixels to a multiple of 16. To avoid clipping data, allow
* callers to request an output size bigger than the input size
* up to the nearest multiple of 16.
*/
fmt->width = clamp_t(u32, width, 32, (fmt->width + 15) & ~15);
fmt->width &= ~15;
fmt->height = clamp_t(u32, height, 32, fmt->height);
break;
case IPIPEIF_PAD_SOURCE_VP:
format = __ipipeif_get_format(ipipeif, sd_state,
IPIPEIF_PAD_SINK,
which);
memcpy(fmt, format, sizeof(*fmt));
fmt->width = clamp_t(u32, width, 32, fmt->width);
fmt->height = clamp_t(u32, height, 32, fmt->height);
break;
}
/* Data is written to memory unpacked, each 10-bit or 12-bit pixel is
* stored on 2 bytes.
*/
fmt->colorspace = V4L2_COLORSPACE_SRGB;
fmt->field = V4L2_FIELD_NONE;
}
/*
* ipipeif_enum_mbus_code - Handle pixel format enumeration
* @sd : pointer to v4l2 subdev structure
* @cfg : V4L2 subdev pad config
* @code : pointer to v4l2_subdev_mbus_code_enum structure
* return -EINVAL or zero on success
*/
static int ipipeif_enum_mbus_code(struct v4l2_subdev *sd,
struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_mbus_code_enum *code)
{
struct iss_ipipeif_device *ipipeif = v4l2_get_subdevdata(sd);
struct v4l2_mbus_framefmt *format;
switch (code->pad) {
case IPIPEIF_PAD_SINK:
if (code->index >= ARRAY_SIZE(ipipeif_fmts))
return -EINVAL;
code->code = ipipeif_fmts[code->index];
break;
case IPIPEIF_PAD_SOURCE_ISIF_SF:
case IPIPEIF_PAD_SOURCE_VP:
/* No format conversion inside IPIPEIF */
if (code->index != 0)
return -EINVAL;
format = __ipipeif_get_format(ipipeif, sd_state,
IPIPEIF_PAD_SINK,
code->which);
code->code = format->code;
break;
default:
return -EINVAL;
}
return 0;
}
static int ipipeif_enum_frame_size(struct v4l2_subdev *sd,
struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_frame_size_enum *fse)
{
struct iss_ipipeif_device *ipipeif = v4l2_get_subdevdata(sd);
struct v4l2_mbus_framefmt format;
if (fse->index != 0)
return -EINVAL;
format.code = fse->code;
format.width = 1;
format.height = 1;
ipipeif_try_format(ipipeif, sd_state, fse->pad, &format, fse->which);
fse->min_width = format.width;
fse->min_height = format.height;
if (format.code != fse->code)
return -EINVAL;
format.code = fse->code;
format.width = -1;
format.height = -1;
ipipeif_try_format(ipipeif, sd_state, fse->pad, &format, fse->which);
fse->max_width = format.width;
fse->max_height = format.height;
return 0;
}
/*
* ipipeif_get_format - Retrieve the video format on a pad
* @sd : ISP IPIPEIF V4L2 subdevice
* @cfg: V4L2 subdev pad config
* @fmt: Format
*
* Return 0 on success or -EINVAL if the pad is invalid or doesn't correspond
* to the format type.
*/
static int ipipeif_get_format(struct v4l2_subdev *sd,
struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
struct iss_ipipeif_device *ipipeif = v4l2_get_subdevdata(sd);
struct v4l2_mbus_framefmt *format;
format = __ipipeif_get_format(ipipeif, sd_state, fmt->pad, fmt->which);
if (!format)
return -EINVAL;
fmt->format = *format;
return 0;
}
/*
* ipipeif_set_format - Set the video format on a pad
* @sd : ISP IPIPEIF V4L2 subdevice
* @cfg: V4L2 subdev pad config
* @fmt: Format
*
* Return 0 on success or -EINVAL if the pad is invalid or doesn't correspond
* to the format type.
*/
static int ipipeif_set_format(struct v4l2_subdev *sd,
struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
struct iss_ipipeif_device *ipipeif = v4l2_get_subdevdata(sd);
struct v4l2_mbus_framefmt *format;
format = __ipipeif_get_format(ipipeif, sd_state, fmt->pad, fmt->which);
if (!format)
return -EINVAL;
ipipeif_try_format(ipipeif, sd_state, fmt->pad, &fmt->format,
fmt->which);
*format = fmt->format;
/* Propagate the format from sink to source */
if (fmt->pad == IPIPEIF_PAD_SINK) {
format = __ipipeif_get_format(ipipeif, sd_state,
IPIPEIF_PAD_SOURCE_ISIF_SF,
fmt->which);
*format = fmt->format;
ipipeif_try_format(ipipeif, sd_state,
IPIPEIF_PAD_SOURCE_ISIF_SF,
format, fmt->which);
format = __ipipeif_get_format(ipipeif, sd_state,
IPIPEIF_PAD_SOURCE_VP,
fmt->which);
*format = fmt->format;
ipipeif_try_format(ipipeif, sd_state, IPIPEIF_PAD_SOURCE_VP,
format,
fmt->which);
}
return 0;
}
static int ipipeif_link_validate(struct v4l2_subdev *sd,
struct media_link *link,
struct v4l2_subdev_format *source_fmt,
struct v4l2_subdev_format *sink_fmt)
{
/* Check if the two ends match */
if (source_fmt->format.width != sink_fmt->format.width ||
source_fmt->format.height != sink_fmt->format.height)
return -EPIPE;
if (source_fmt->format.code != sink_fmt->format.code)
return -EPIPE;
return 0;
}
/*
* ipipeif_init_formats - Initialize formats on all pads
* @sd: ISP IPIPEIF V4L2 subdevice
* @fh: V4L2 subdev file handle
*
* Initialize all pad formats with default values. If fh is not NULL, try
* formats are initialized on the file handle. Otherwise active formats are
* initialized on the device.
*/
static int ipipeif_init_formats(struct v4l2_subdev *sd,
struct v4l2_subdev_fh *fh)
{
struct v4l2_subdev_format format;
memset(&format, 0, sizeof(format));
format.pad = IPIPEIF_PAD_SINK;
format.which = fh ? V4L2_SUBDEV_FORMAT_TRY : V4L2_SUBDEV_FORMAT_ACTIVE;
format.format.code = MEDIA_BUS_FMT_SGRBG10_1X10;
format.format.width = 4096;
format.format.height = 4096;
ipipeif_set_format(sd, fh ? fh->state : NULL, &format);
return 0;
}
/* V4L2 subdev video operations */
static const struct v4l2_subdev_video_ops ipipeif_v4l2_video_ops = {
.s_stream = ipipeif_set_stream,
};
/* V4L2 subdev pad operations */
static const struct v4l2_subdev_pad_ops ipipeif_v4l2_pad_ops = {
.enum_mbus_code = ipipeif_enum_mbus_code,
.enum_frame_size = ipipeif_enum_frame_size,
.get_fmt = ipipeif_get_format,
.set_fmt = ipipeif_set_format,
.link_validate = ipipeif_link_validate,
};
/* V4L2 subdev operations */
static const struct v4l2_subdev_ops ipipeif_v4l2_ops = {
.video = &ipipeif_v4l2_video_ops,
.pad = &ipipeif_v4l2_pad_ops,
};
/* V4L2 subdev internal operations */
static const struct v4l2_subdev_internal_ops ipipeif_v4l2_internal_ops = {
.open = ipipeif_init_formats,
};
/* -----------------------------------------------------------------------------
* Media entity operations
*/
/*
* ipipeif_link_setup - Setup IPIPEIF connections
* @entity: IPIPEIF media entity
* @local: Pad at the local end of the link
* @remote: Pad at the remote end of the link
* @flags: Link flags
*
* return -EINVAL or zero on success
*/
static int ipipeif_link_setup(struct media_entity *entity,
const struct media_pad *local,
const struct media_pad *remote, u32 flags)
{
struct v4l2_subdev *sd = media_entity_to_v4l2_subdev(entity);
struct iss_ipipeif_device *ipipeif = v4l2_get_subdevdata(sd);
struct iss_device *iss = to_iss_device(ipipeif);
unsigned int index = local->index;
/* FIXME: this is actually a hack! */
if (is_media_entity_v4l2_subdev(remote->entity))
index |= 2 << 16;
switch (index) {
case IPIPEIF_PAD_SINK | 2 << 16:
/* Read from the sensor CSI2a or CSI2b. */
if (!(flags & MEDIA_LNK_FL_ENABLED)) {
ipipeif->input = IPIPEIF_INPUT_NONE;
break;
}
if (ipipeif->input != IPIPEIF_INPUT_NONE)
return -EBUSY;
if (remote->entity == &iss->csi2a.subdev.entity)
ipipeif->input = IPIPEIF_INPUT_CSI2A;
else if (remote->entity == &iss->csi2b.subdev.entity)
ipipeif->input = IPIPEIF_INPUT_CSI2B;
break;
case IPIPEIF_PAD_SOURCE_ISIF_SF:
/* Write to memory */
if (flags & MEDIA_LNK_FL_ENABLED) {
if (ipipeif->output & ~IPIPEIF_OUTPUT_MEMORY)
return -EBUSY;
ipipeif->output |= IPIPEIF_OUTPUT_MEMORY;
} else {
ipipeif->output &= ~IPIPEIF_OUTPUT_MEMORY;
}
break;
case IPIPEIF_PAD_SOURCE_VP | 2 << 16:
/* Send to IPIPE/RESIZER */
if (flags & MEDIA_LNK_FL_ENABLED) {
if (ipipeif->output & ~IPIPEIF_OUTPUT_VP)
return -EBUSY;
ipipeif->output |= IPIPEIF_OUTPUT_VP;
} else {
ipipeif->output &= ~IPIPEIF_OUTPUT_VP;
}
break;
default:
return -EINVAL;
}
return 0;
}
/* media operations */
static const struct media_entity_operations ipipeif_media_ops = {
.link_setup = ipipeif_link_setup,
.link_validate = v4l2_subdev_link_validate,
};
/*
* ipipeif_init_entities - Initialize V4L2 subdev and media entity
* @ipipeif: ISS ISP IPIPEIF module
*
* Return 0 on success and a negative error code on failure.
*/
static int ipipeif_init_entities(struct iss_ipipeif_device *ipipeif)
{
struct v4l2_subdev *sd = &ipipeif->subdev;
struct media_pad *pads = ipipeif->pads;
struct media_entity *me = &sd->entity;
int ret;
ipipeif->input = IPIPEIF_INPUT_NONE;
v4l2_subdev_init(sd, &ipipeif_v4l2_ops);
sd->internal_ops = &ipipeif_v4l2_internal_ops;
strscpy(sd->name, "OMAP4 ISS ISP IPIPEIF", sizeof(sd->name));
sd->grp_id = BIT(16); /* group ID for iss subdevs */
v4l2_set_subdevdata(sd, ipipeif);
sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
pads[IPIPEIF_PAD_SINK].flags = MEDIA_PAD_FL_SINK;
pads[IPIPEIF_PAD_SOURCE_ISIF_SF].flags = MEDIA_PAD_FL_SOURCE;
pads[IPIPEIF_PAD_SOURCE_VP].flags = MEDIA_PAD_FL_SOURCE;
me->ops = &ipipeif_media_ops;
ret = media_entity_pads_init(me, IPIPEIF_PADS_NUM, pads);
if (ret < 0)
return ret;
ipipeif_init_formats(sd, NULL);
ipipeif->video_out.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
ipipeif->video_out.ops = &ipipeif_video_ops;
ipipeif->video_out.iss = to_iss_device(ipipeif);
ipipeif->video_out.capture_mem = PAGE_ALIGN(4096 * 4096) * 3;
ipipeif->video_out.bpl_alignment = 32;
ipipeif->video_out.bpl_zero_padding = 1;
ipipeif->video_out.bpl_max = 0x1ffe0;
return omap4iss_video_init(&ipipeif->video_out, "ISP IPIPEIF");
}
void omap4iss_ipipeif_unregister_entities(struct iss_ipipeif_device *ipipeif)
{
v4l2_device_unregister_subdev(&ipipeif->subdev);
omap4iss_video_unregister(&ipipeif->video_out);
}
int omap4iss_ipipeif_register_entities(struct iss_ipipeif_device *ipipeif,
struct v4l2_device *vdev)
{
int ret;
/* Register the subdev and video node. */
ret = v4l2_device_register_subdev(vdev, &ipipeif->subdev);
if (ret < 0)
goto error;
ret = omap4iss_video_register(&ipipeif->video_out, vdev);
if (ret < 0)
goto error;
return 0;
error:
omap4iss_ipipeif_unregister_entities(ipipeif);
return ret;
}
/* -----------------------------------------------------------------------------
* ISP IPIPEIF initialisation and cleanup
*/
/*
* omap4iss_ipipeif_init - IPIPEIF module initialization.
* @iss: Device pointer specific to the OMAP4 ISS.
*
* TODO: Get the initialisation values from platform data.
*
* Return 0 on success or a negative error code otherwise.
*/
int omap4iss_ipipeif_init(struct iss_device *iss)
{
struct iss_ipipeif_device *ipipeif = &iss->ipipeif;
ipipeif->state = ISS_PIPELINE_STREAM_STOPPED;
init_waitqueue_head(&ipipeif->wait);
return ipipeif_init_entities(ipipeif);
}
/*
* omap4iss_ipipeif_create_links() - IPIPEIF pads links creation
* @iss: Pointer to ISS device
*
* return negative error code or zero on success
*/
int omap4iss_ipipeif_create_links(struct iss_device *iss)
{
struct iss_ipipeif_device *ipipeif = &iss->ipipeif;
/* Connect the IPIPEIF subdev to the video node. */
return media_create_pad_link(&ipipeif->subdev.entity,
IPIPEIF_PAD_SOURCE_ISIF_SF,
&ipipeif->video_out.video.entity, 0, 0);
}
/*
* omap4iss_ipipeif_cleanup - IPIPEIF module cleanup.
* @iss: Device pointer specific to the OMAP4 ISS.
*/
void omap4iss_ipipeif_cleanup(struct iss_device *iss)
{
struct iss_ipipeif_device *ipipeif = &iss->ipipeif;
media_entity_cleanup(&ipipeif->subdev.entity);
}
| linux-master | drivers/staging/media/omap4iss/iss_ipipeif.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* TI OMAP4 ISS V4L2 Driver
*
* Copyright (C) 2012, Texas Instruments
*
* Author: Sergio Aguirre <[email protected]>
*/
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/dma-mapping.h>
#include <linux/i2c.h>
#include <linux/interrupt.h>
#include <linux/mfd/syscon.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/sched.h>
#include <linux/vmalloc.h>
#include <media/v4l2-common.h>
#include <media/v4l2-device.h>
#include <media/v4l2-ctrls.h>
#include "iss.h"
#include "iss_regs.h"
#define ISS_PRINT_REGISTER(iss, name)\
dev_dbg(iss->dev, "###ISS " #name "=0x%08x\n", \
iss_reg_read(iss, OMAP4_ISS_MEM_TOP, ISS_##name))
static void iss_print_status(struct iss_device *iss)
{
dev_dbg(iss->dev, "-------------ISS HL Register dump-------------\n");
ISS_PRINT_REGISTER(iss, HL_REVISION);
ISS_PRINT_REGISTER(iss, HL_SYSCONFIG);
ISS_PRINT_REGISTER(iss, HL_IRQSTATUS(5));
ISS_PRINT_REGISTER(iss, HL_IRQENABLE_SET(5));
ISS_PRINT_REGISTER(iss, HL_IRQENABLE_CLR(5));
ISS_PRINT_REGISTER(iss, CTRL);
ISS_PRINT_REGISTER(iss, CLKCTRL);
ISS_PRINT_REGISTER(iss, CLKSTAT);
dev_dbg(iss->dev, "-----------------------------------------------\n");
}
/*
* omap4iss_flush - Post pending L3 bus writes by doing a register readback
* @iss: OMAP4 ISS device
*
* In order to force posting of pending writes, we need to write and
* readback the same register, in this case the revision register.
*
* See this link for reference:
* https://www.mail-archive.com/[email protected]/msg08149.html
*/
static void omap4iss_flush(struct iss_device *iss)
{
iss_reg_write(iss, OMAP4_ISS_MEM_TOP, ISS_HL_REVISION, 0);
iss_reg_read(iss, OMAP4_ISS_MEM_TOP, ISS_HL_REVISION);
}
/*
* iss_isp_enable_interrupts - Enable ISS ISP interrupts.
* @iss: OMAP4 ISS device
*/
static void omap4iss_isp_enable_interrupts(struct iss_device *iss)
{
static const u32 isp_irq = ISP5_IRQ_OCP_ERR |
ISP5_IRQ_RSZ_FIFO_IN_BLK_ERR |
ISP5_IRQ_RSZ_FIFO_OVF |
ISP5_IRQ_RSZ_INT_DMA |
ISP5_IRQ_ISIF_INT(0);
/* Enable ISP interrupts */
iss_reg_write(iss, OMAP4_ISS_MEM_ISP_SYS1, ISP5_IRQSTATUS(0), isp_irq);
iss_reg_write(iss, OMAP4_ISS_MEM_ISP_SYS1, ISP5_IRQENABLE_SET(0),
isp_irq);
}
/*
* iss_isp_disable_interrupts - Disable ISS interrupts.
* @iss: OMAP4 ISS device
*/
static void omap4iss_isp_disable_interrupts(struct iss_device *iss)
{
iss_reg_write(iss, OMAP4_ISS_MEM_ISP_SYS1, ISP5_IRQENABLE_CLR(0), ~0);
}
/*
* iss_enable_interrupts - Enable ISS interrupts.
* @iss: OMAP4 ISS device
*/
static void iss_enable_interrupts(struct iss_device *iss)
{
static const u32 hl_irq = ISS_HL_IRQ_CSIA | ISS_HL_IRQ_CSIB
| ISS_HL_IRQ_ISP(0);
/* Enable HL interrupts */
iss_reg_write(iss, OMAP4_ISS_MEM_TOP, ISS_HL_IRQSTATUS(5), hl_irq);
iss_reg_write(iss, OMAP4_ISS_MEM_TOP, ISS_HL_IRQENABLE_SET(5), hl_irq);
if (iss->regs[OMAP4_ISS_MEM_ISP_SYS1])
omap4iss_isp_enable_interrupts(iss);
}
/*
* iss_disable_interrupts - Disable ISS interrupts.
* @iss: OMAP4 ISS device
*/
static void iss_disable_interrupts(struct iss_device *iss)
{
if (iss->regs[OMAP4_ISS_MEM_ISP_SYS1])
omap4iss_isp_disable_interrupts(iss);
iss_reg_write(iss, OMAP4_ISS_MEM_TOP, ISS_HL_IRQENABLE_CLR(5), ~0);
}
int omap4iss_get_external_info(struct iss_pipeline *pipe,
struct media_link *link)
{
struct iss_device *iss =
container_of(pipe, struct iss_video, pipe)->iss;
struct v4l2_subdev_format fmt;
struct v4l2_ctrl *ctrl;
int ret;
if (!pipe->external)
return 0;
if (pipe->external_rate)
return 0;
memset(&fmt, 0, sizeof(fmt));
fmt.pad = link->source->index;
fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE;
ret = v4l2_subdev_call(media_entity_to_v4l2_subdev(link->sink->entity),
pad, get_fmt, NULL, &fmt);
if (ret < 0)
return -EPIPE;
pipe->external_bpp = omap4iss_video_format_info(fmt.format.code)->bpp;
ctrl = v4l2_ctrl_find(pipe->external->ctrl_handler,
V4L2_CID_PIXEL_RATE);
if (!ctrl) {
dev_warn(iss->dev, "no pixel rate control in subdev %s\n",
pipe->external->name);
return -EPIPE;
}
pipe->external_rate = v4l2_ctrl_g_ctrl_int64(ctrl);
return 0;
}
/*
* Configure the bridge. Valid inputs are
*
* IPIPEIF_INPUT_CSI2A: CSI2a receiver
* IPIPEIF_INPUT_CSI2B: CSI2b receiver
*
* The bridge and lane shifter are configured according to the selected input
* and the ISP platform data.
*/
void omap4iss_configure_bridge(struct iss_device *iss,
enum ipipeif_input_entity input)
{
u32 issctrl_val;
u32 isp5ctrl_val;
issctrl_val = iss_reg_read(iss, OMAP4_ISS_MEM_TOP, ISS_CTRL);
issctrl_val &= ~ISS_CTRL_INPUT_SEL_MASK;
issctrl_val &= ~ISS_CTRL_CLK_DIV_MASK;
isp5ctrl_val = iss_reg_read(iss, OMAP4_ISS_MEM_ISP_SYS1, ISP5_CTRL);
switch (input) {
case IPIPEIF_INPUT_CSI2A:
issctrl_val |= ISS_CTRL_INPUT_SEL_CSI2A;
break;
case IPIPEIF_INPUT_CSI2B:
issctrl_val |= ISS_CTRL_INPUT_SEL_CSI2B;
break;
default:
return;
}
issctrl_val |= ISS_CTRL_SYNC_DETECT_VS_RAISING;
isp5ctrl_val |= ISP5_CTRL_VD_PULSE_EXT | ISP5_CTRL_PSYNC_CLK_SEL |
ISP5_CTRL_SYNC_ENABLE;
iss_reg_write(iss, OMAP4_ISS_MEM_TOP, ISS_CTRL, issctrl_val);
iss_reg_write(iss, OMAP4_ISS_MEM_ISP_SYS1, ISP5_CTRL, isp5ctrl_val);
}
#ifdef ISS_ISR_DEBUG
static void iss_isr_dbg(struct iss_device *iss, u32 irqstatus)
{
static const char * const name[] = {
"ISP_0",
"ISP_1",
"ISP_2",
"ISP_3",
"CSIA",
"CSIB",
"CCP2_0",
"CCP2_1",
"CCP2_2",
"CCP2_3",
"CBUFF",
"BTE",
"SIMCOP_0",
"SIMCOP_1",
"SIMCOP_2",
"SIMCOP_3",
"CCP2_8",
"HS_VS",
"18",
"19",
"20",
"21",
"22",
"23",
"24",
"25",
"26",
"27",
"28",
"29",
"30",
"31",
};
unsigned int i;
dev_dbg(iss->dev, "ISS IRQ: ");
for (i = 0; i < ARRAY_SIZE(name); i++) {
if ((1 << i) & irqstatus)
pr_cont("%s ", name[i]);
}
pr_cont("\n");
}
static void iss_isp_isr_dbg(struct iss_device *iss, u32 irqstatus)
{
static const char * const name[] = {
"ISIF_0",
"ISIF_1",
"ISIF_2",
"ISIF_3",
"IPIPEREQ",
"IPIPELAST_PIX",
"IPIPEDMA",
"IPIPEBSC",
"IPIPEHST",
"IPIPEIF",
"AEW",
"AF",
"H3A",
"RSZ_REG",
"RSZ_LAST_PIX",
"RSZ_DMA",
"RSZ_CYC_RZA",
"RSZ_CYC_RZB",
"RSZ_FIFO_OVF",
"RSZ_FIFO_IN_BLK_ERR",
"20",
"21",
"RSZ_EOF0",
"RSZ_EOF1",
"H3A_EOF",
"IPIPE_EOF",
"26",
"IPIPE_DPC_INI",
"IPIPE_DPC_RNEW0",
"IPIPE_DPC_RNEW1",
"30",
"OCP_ERR",
};
unsigned int i;
dev_dbg(iss->dev, "ISP IRQ: ");
for (i = 0; i < ARRAY_SIZE(name); i++) {
if ((1 << i) & irqstatus)
pr_cont("%s ", name[i]);
}
pr_cont("\n");
}
#endif
/*
* iss_isr - Interrupt Service Routine for ISS module.
* @irq: Not used currently.
* @_iss: Pointer to the OMAP4 ISS device
*
* Handles the corresponding callback if plugged in.
*
* Returns IRQ_HANDLED when IRQ was correctly handled, or IRQ_NONE when the
* IRQ wasn't handled.
*/
static irqreturn_t iss_isr(int irq, void *_iss)
{
static const u32 ipipeif_events = ISP5_IRQ_IPIPEIF_IRQ |
ISP5_IRQ_ISIF_INT(0);
static const u32 resizer_events = ISP5_IRQ_RSZ_FIFO_IN_BLK_ERR |
ISP5_IRQ_RSZ_FIFO_OVF |
ISP5_IRQ_RSZ_INT_DMA;
struct iss_device *iss = _iss;
u32 irqstatus;
irqstatus = iss_reg_read(iss, OMAP4_ISS_MEM_TOP, ISS_HL_IRQSTATUS(5));
iss_reg_write(iss, OMAP4_ISS_MEM_TOP, ISS_HL_IRQSTATUS(5), irqstatus);
if (irqstatus & ISS_HL_IRQ_CSIA)
omap4iss_csi2_isr(&iss->csi2a);
if (irqstatus & ISS_HL_IRQ_CSIB)
omap4iss_csi2_isr(&iss->csi2b);
if (irqstatus & ISS_HL_IRQ_ISP(0)) {
u32 isp_irqstatus = iss_reg_read(iss, OMAP4_ISS_MEM_ISP_SYS1,
ISP5_IRQSTATUS(0));
iss_reg_write(iss, OMAP4_ISS_MEM_ISP_SYS1, ISP5_IRQSTATUS(0),
isp_irqstatus);
if (isp_irqstatus & ISP5_IRQ_OCP_ERR)
dev_dbg(iss->dev, "ISP5 OCP Error!\n");
if (isp_irqstatus & ipipeif_events) {
omap4iss_ipipeif_isr(&iss->ipipeif,
isp_irqstatus & ipipeif_events);
}
if (isp_irqstatus & resizer_events)
omap4iss_resizer_isr(&iss->resizer,
isp_irqstatus & resizer_events);
#ifdef ISS_ISR_DEBUG
iss_isp_isr_dbg(iss, isp_irqstatus);
#endif
}
omap4iss_flush(iss);
#ifdef ISS_ISR_DEBUG
iss_isr_dbg(iss, irqstatus);
#endif
return IRQ_HANDLED;
}
static const struct media_device_ops iss_media_ops = {
.link_notify = v4l2_pipeline_link_notify,
};
/* -----------------------------------------------------------------------------
* Pipeline stream management
*/
/*
* iss_pipeline_disable - Disable streaming on a pipeline
* @pipe: ISS pipeline
* @until: entity at which to stop pipeline walk
*
* Walk the entities chain starting at the pipeline output video node and stop
* all modules in the chain. Wait synchronously for the modules to be stopped if
* necessary.
*
* If the until argument isn't NULL, stop the pipeline walk when reaching the
* until entity. This is used to disable a partially started pipeline due to a
* subdev start error.
*/
static int iss_pipeline_disable(struct iss_pipeline *pipe,
struct media_entity *until)
{
struct iss_device *iss = pipe->output->iss;
struct media_entity *entity;
struct media_pad *pad;
struct v4l2_subdev *subdev;
int failure = 0;
int ret;
entity = &pipe->output->video.entity;
while (1) {
pad = &entity->pads[0];
if (!(pad->flags & MEDIA_PAD_FL_SINK))
break;
pad = media_pad_remote_pad_first(pad);
if (!pad || !is_media_entity_v4l2_subdev(pad->entity))
break;
entity = pad->entity;
if (entity == until)
break;
subdev = media_entity_to_v4l2_subdev(entity);
ret = v4l2_subdev_call(subdev, video, s_stream, 0);
if (ret < 0) {
dev_warn(iss->dev, "%s: module stop timeout.\n",
subdev->name);
/* If the entity failed to stopped, assume it has
* crashed. Mark it as such, the ISS will be reset when
* applications will release it.
*/
media_entity_enum_set(&iss->crashed, &subdev->entity);
failure = -ETIMEDOUT;
}
}
return failure;
}
/*
* iss_pipeline_enable - Enable streaming on a pipeline
* @pipe: ISS pipeline
* @mode: Stream mode (single shot or continuous)
*
* Walk the entities chain starting at the pipeline output video node and start
* all modules in the chain in the given mode.
*
* Return 0 if successful, or the return value of the failed video::s_stream
* operation otherwise.
*/
static int iss_pipeline_enable(struct iss_pipeline *pipe,
enum iss_pipeline_stream_state mode)
{
struct iss_device *iss = pipe->output->iss;
struct media_entity *entity;
struct media_pad *pad;
struct v4l2_subdev *subdev;
unsigned long flags;
int ret;
/* If one of the entities in the pipeline has crashed it will not work
* properly. Refuse to start streaming in that case. This check must be
* performed before the loop below to avoid starting entities if the
* pipeline won't start anyway (those entities would then likely fail to
* stop, making the problem worse).
*/
if (media_entity_enum_intersects(&pipe->ent_enum, &iss->crashed))
return -EIO;
spin_lock_irqsave(&pipe->lock, flags);
pipe->state &= ~(ISS_PIPELINE_IDLE_INPUT | ISS_PIPELINE_IDLE_OUTPUT);
spin_unlock_irqrestore(&pipe->lock, flags);
pipe->do_propagation = false;
mutex_lock(&iss->media_dev.graph_mutex);
entity = &pipe->output->video.entity;
while (1) {
pad = &entity->pads[0];
if (!(pad->flags & MEDIA_PAD_FL_SINK))
break;
pad = media_pad_remote_pad_first(pad);
if (!pad || !is_media_entity_v4l2_subdev(pad->entity))
break;
entity = pad->entity;
subdev = media_entity_to_v4l2_subdev(entity);
ret = v4l2_subdev_call(subdev, video, s_stream, mode);
if (ret < 0 && ret != -ENOIOCTLCMD) {
iss_pipeline_disable(pipe, entity);
mutex_unlock(&iss->media_dev.graph_mutex);
return ret;
}
if (subdev == &iss->csi2a.subdev ||
subdev == &iss->csi2b.subdev)
pipe->do_propagation = true;
}
mutex_unlock(&iss->media_dev.graph_mutex);
iss_print_status(pipe->output->iss);
return 0;
}
/*
* omap4iss_pipeline_set_stream - Enable/disable streaming on a pipeline
* @pipe: ISS pipeline
* @state: Stream state (stopped, single shot or continuous)
*
* Set the pipeline to the given stream state. Pipelines can be started in
* single-shot or continuous mode.
*
* Return 0 if successful, or the return value of the failed video::s_stream
* operation otherwise. The pipeline state is not updated when the operation
* fails, except when stopping the pipeline.
*/
int omap4iss_pipeline_set_stream(struct iss_pipeline *pipe,
enum iss_pipeline_stream_state state)
{
int ret;
if (state == ISS_PIPELINE_STREAM_STOPPED)
ret = iss_pipeline_disable(pipe, NULL);
else
ret = iss_pipeline_enable(pipe, state);
if (ret == 0 || state == ISS_PIPELINE_STREAM_STOPPED)
pipe->stream_state = state;
return ret;
}
/*
* omap4iss_pipeline_cancel_stream - Cancel stream on a pipeline
* @pipe: ISS pipeline
*
* Cancelling a stream mark all buffers on all video nodes in the pipeline as
* erroneous and makes sure no new buffer can be queued. This function is called
* when a fatal error that prevents any further operation on the pipeline
* occurs.
*/
void omap4iss_pipeline_cancel_stream(struct iss_pipeline *pipe)
{
if (pipe->input)
omap4iss_video_cancel_stream(pipe->input);
if (pipe->output)
omap4iss_video_cancel_stream(pipe->output);
}
/*
* iss_pipeline_is_last - Verify if entity has an enabled link to the output
* video node
* @me: ISS module's media entity
*
* Returns 1 if the entity has an enabled link to the output video node or 0
* otherwise. It's true only while pipeline can have no more than one output
* node.
*/
static int iss_pipeline_is_last(struct media_entity *me)
{
struct iss_pipeline *pipe;
struct media_pad *pad;
pipe = to_iss_pipeline(me);
if (!pipe || pipe->stream_state == ISS_PIPELINE_STREAM_STOPPED)
return 0;
pad = media_pad_remote_pad_first(&pipe->output->pad);
return pad->entity == me;
}
static int iss_reset(struct iss_device *iss)
{
unsigned int timeout;
iss_reg_set(iss, OMAP4_ISS_MEM_TOP, ISS_HL_SYSCONFIG,
ISS_HL_SYSCONFIG_SOFTRESET);
timeout = iss_poll_condition_timeout(
!(iss_reg_read(iss, OMAP4_ISS_MEM_TOP, ISS_HL_SYSCONFIG) &
ISS_HL_SYSCONFIG_SOFTRESET), 1000, 10, 100);
if (timeout) {
dev_err(iss->dev, "ISS reset timeout\n");
return -ETIMEDOUT;
}
media_entity_enum_zero(&iss->crashed);
return 0;
}
static int iss_isp_reset(struct iss_device *iss)
{
unsigned int timeout;
/* Fist, ensure that the ISP is IDLE (no transactions happening) */
iss_reg_update(iss, OMAP4_ISS_MEM_ISP_SYS1, ISP5_SYSCONFIG,
ISP5_SYSCONFIG_STANDBYMODE_MASK,
ISP5_SYSCONFIG_STANDBYMODE_SMART);
iss_reg_set(iss, OMAP4_ISS_MEM_ISP_SYS1, ISP5_CTRL, ISP5_CTRL_MSTANDBY);
timeout = iss_poll_condition_timeout(
iss_reg_read(iss, OMAP4_ISS_MEM_ISP_SYS1, ISP5_CTRL) &
ISP5_CTRL_MSTANDBY_WAIT, 1000000, 1000, 1500);
if (timeout) {
dev_err(iss->dev, "ISP5 standby timeout\n");
return -ETIMEDOUT;
}
/* Now finally, do the reset */
iss_reg_set(iss, OMAP4_ISS_MEM_ISP_SYS1, ISP5_SYSCONFIG,
ISP5_SYSCONFIG_SOFTRESET);
timeout = iss_poll_condition_timeout(
!(iss_reg_read(iss, OMAP4_ISS_MEM_ISP_SYS1, ISP5_SYSCONFIG) &
ISP5_SYSCONFIG_SOFTRESET), 1000000, 1000, 1500);
if (timeout) {
dev_err(iss->dev, "ISP5 reset timeout\n");
return -ETIMEDOUT;
}
return 0;
}
/*
* iss_module_sync_idle - Helper to sync module with its idle state
* @me: ISS submodule's media entity
* @wait: ISS submodule's wait queue for streamoff/interrupt synchronization
* @stopping: flag which tells module wants to stop
*
* This function checks if ISS submodule needs to wait for next interrupt. If
* yes, makes the caller to sleep while waiting for such event.
*/
int omap4iss_module_sync_idle(struct media_entity *me, wait_queue_head_t *wait,
atomic_t *stopping)
{
struct iss_pipeline *pipe = to_iss_pipeline(me);
struct iss_video *video = pipe->output;
unsigned long flags;
if (pipe->stream_state == ISS_PIPELINE_STREAM_STOPPED ||
(pipe->stream_state == ISS_PIPELINE_STREAM_SINGLESHOT &&
!iss_pipeline_ready(pipe)))
return 0;
/*
* atomic_set() doesn't include memory barrier on ARM platform for SMP
* scenario. We'll call it here to avoid race conditions.
*/
atomic_set(stopping, 1);
smp_wmb();
/*
* If module is the last one, it's writing to memory. In this case,
* it's necessary to check if the module is already paused due to
* DMA queue underrun or if it has to wait for next interrupt to be
* idle.
* If it isn't the last one, the function won't sleep but *stopping
* will still be set to warn next submodule caller's interrupt the
* module wants to be idle.
*/
if (!iss_pipeline_is_last(me))
return 0;
spin_lock_irqsave(&video->qlock, flags);
if (video->dmaqueue_flags & ISS_VIDEO_DMAQUEUE_UNDERRUN) {
spin_unlock_irqrestore(&video->qlock, flags);
atomic_set(stopping, 0);
smp_wmb();
return 0;
}
spin_unlock_irqrestore(&video->qlock, flags);
if (!wait_event_timeout(*wait, !atomic_read(stopping),
msecs_to_jiffies(1000))) {
atomic_set(stopping, 0);
smp_wmb();
return -ETIMEDOUT;
}
return 0;
}
/*
* omap4iss_module_sync_is_stopped - Helper to verify if module was stopping
* @wait: ISS submodule's wait queue for streamoff/interrupt synchronization
* @stopping: flag which tells module wants to stop
*
* This function checks if ISS submodule was stopping. In case of yes, it
* notices the caller by setting stopping to 0 and waking up the wait queue.
* Returns 1 if it was stopping or 0 otherwise.
*/
int omap4iss_module_sync_is_stopping(wait_queue_head_t *wait,
atomic_t *stopping)
{
if (atomic_cmpxchg(stopping, 1, 0)) {
wake_up(wait);
return 1;
}
return 0;
}
/* --------------------------------------------------------------------------
* Clock management
*/
#define ISS_CLKCTRL_MASK (ISS_CLKCTRL_CSI2_A |\
ISS_CLKCTRL_CSI2_B |\
ISS_CLKCTRL_ISP)
static int __iss_subclk_update(struct iss_device *iss)
{
u32 clk = 0;
int ret = 0, timeout = 1000;
if (iss->subclk_resources & OMAP4_ISS_SUBCLK_CSI2_A)
clk |= ISS_CLKCTRL_CSI2_A;
if (iss->subclk_resources & OMAP4_ISS_SUBCLK_CSI2_B)
clk |= ISS_CLKCTRL_CSI2_B;
if (iss->subclk_resources & OMAP4_ISS_SUBCLK_ISP)
clk |= ISS_CLKCTRL_ISP;
iss_reg_update(iss, OMAP4_ISS_MEM_TOP, ISS_CLKCTRL,
ISS_CLKCTRL_MASK, clk);
/* Wait for HW assertion */
while (--timeout > 0) {
udelay(1);
if ((iss_reg_read(iss, OMAP4_ISS_MEM_TOP, ISS_CLKSTAT) &
ISS_CLKCTRL_MASK) == clk)
break;
}
if (!timeout)
ret = -EBUSY;
return ret;
}
int omap4iss_subclk_enable(struct iss_device *iss,
enum iss_subclk_resource res)
{
iss->subclk_resources |= res;
return __iss_subclk_update(iss);
}
int omap4iss_subclk_disable(struct iss_device *iss,
enum iss_subclk_resource res)
{
iss->subclk_resources &= ~res;
return __iss_subclk_update(iss);
}
#define ISS_ISP5_CLKCTRL_MASK (ISP5_CTRL_BL_CLK_ENABLE |\
ISP5_CTRL_ISIF_CLK_ENABLE |\
ISP5_CTRL_H3A_CLK_ENABLE |\
ISP5_CTRL_RSZ_CLK_ENABLE |\
ISP5_CTRL_IPIPE_CLK_ENABLE |\
ISP5_CTRL_IPIPEIF_CLK_ENABLE)
static void __iss_isp_subclk_update(struct iss_device *iss)
{
u32 clk = 0;
if (iss->isp_subclk_resources & OMAP4_ISS_ISP_SUBCLK_ISIF)
clk |= ISP5_CTRL_ISIF_CLK_ENABLE;
if (iss->isp_subclk_resources & OMAP4_ISS_ISP_SUBCLK_H3A)
clk |= ISP5_CTRL_H3A_CLK_ENABLE;
if (iss->isp_subclk_resources & OMAP4_ISS_ISP_SUBCLK_RSZ)
clk |= ISP5_CTRL_RSZ_CLK_ENABLE;
if (iss->isp_subclk_resources & OMAP4_ISS_ISP_SUBCLK_IPIPE)
clk |= ISP5_CTRL_IPIPE_CLK_ENABLE;
if (iss->isp_subclk_resources & OMAP4_ISS_ISP_SUBCLK_IPIPEIF)
clk |= ISP5_CTRL_IPIPEIF_CLK_ENABLE;
if (clk)
clk |= ISP5_CTRL_BL_CLK_ENABLE;
iss_reg_update(iss, OMAP4_ISS_MEM_ISP_SYS1, ISP5_CTRL,
ISS_ISP5_CLKCTRL_MASK, clk);
}
void omap4iss_isp_subclk_enable(struct iss_device *iss,
enum iss_isp_subclk_resource res)
{
iss->isp_subclk_resources |= res;
__iss_isp_subclk_update(iss);
}
void omap4iss_isp_subclk_disable(struct iss_device *iss,
enum iss_isp_subclk_resource res)
{
iss->isp_subclk_resources &= ~res;
__iss_isp_subclk_update(iss);
}
/*
* iss_enable_clocks - Enable ISS clocks
* @iss: OMAP4 ISS device
*
* Return 0 if successful, or clk_enable return value if any of tthem fails.
*/
static int iss_enable_clocks(struct iss_device *iss)
{
int ret;
ret = clk_enable(iss->iss_fck);
if (ret) {
dev_err(iss->dev, "clk_enable iss_fck failed\n");
return ret;
}
ret = clk_enable(iss->iss_ctrlclk);
if (ret) {
dev_err(iss->dev, "clk_enable iss_ctrlclk failed\n");
clk_disable(iss->iss_fck);
return ret;
}
return 0;
}
/*
* iss_disable_clocks - Disable ISS clocks
* @iss: OMAP4 ISS device
*/
static void iss_disable_clocks(struct iss_device *iss)
{
clk_disable(iss->iss_ctrlclk);
clk_disable(iss->iss_fck);
}
static int iss_get_clocks(struct iss_device *iss)
{
iss->iss_fck = devm_clk_get(iss->dev, "iss_fck");
if (IS_ERR(iss->iss_fck)) {
dev_err(iss->dev, "Unable to get iss_fck clock info\n");
return PTR_ERR(iss->iss_fck);
}
iss->iss_ctrlclk = devm_clk_get(iss->dev, "iss_ctrlclk");
if (IS_ERR(iss->iss_ctrlclk)) {
dev_err(iss->dev, "Unable to get iss_ctrlclk clock info\n");
return PTR_ERR(iss->iss_ctrlclk);
}
return 0;
}
/*
* omap4iss_get - Acquire the ISS resource.
*
* Initializes the clocks for the first acquire.
*
* Increment the reference count on the ISS. If the first reference is taken,
* enable clocks and power-up all submodules.
*
* Return a pointer to the ISS device structure, or NULL if an error occurred.
*/
struct iss_device *omap4iss_get(struct iss_device *iss)
{
struct iss_device *__iss = iss;
if (!iss)
return NULL;
mutex_lock(&iss->iss_mutex);
if (iss->ref_count > 0)
goto out;
if (iss_enable_clocks(iss) < 0) {
__iss = NULL;
goto out;
}
iss_enable_interrupts(iss);
out:
if (__iss)
iss->ref_count++;
mutex_unlock(&iss->iss_mutex);
return __iss;
}
/*
* omap4iss_put - Release the ISS
*
* Decrement the reference count on the ISS. If the last reference is released,
* power-down all submodules, disable clocks and free temporary buffers.
*/
void omap4iss_put(struct iss_device *iss)
{
if (!iss)
return;
mutex_lock(&iss->iss_mutex);
WARN_ON(iss->ref_count == 0);
if (--iss->ref_count == 0) {
iss_disable_interrupts(iss);
/* Reset the ISS if an entity has failed to stop. This is the
* only way to recover from such conditions, although it would
* be worth investigating whether resetting the ISP only can't
* fix the problem in some cases.
*/
if (!media_entity_enum_empty(&iss->crashed))
iss_reset(iss);
iss_disable_clocks(iss);
}
mutex_unlock(&iss->iss_mutex);
}
static int iss_map_mem_resource(struct platform_device *pdev,
struct iss_device *iss,
enum iss_mem_resources res)
{
iss->regs[res] = devm_platform_ioremap_resource(pdev, res);
return PTR_ERR_OR_ZERO(iss->regs[res]);
}
static void iss_unregister_entities(struct iss_device *iss)
{
omap4iss_resizer_unregister_entities(&iss->resizer);
omap4iss_ipipe_unregister_entities(&iss->ipipe);
omap4iss_ipipeif_unregister_entities(&iss->ipipeif);
omap4iss_csi2_unregister_entities(&iss->csi2a);
omap4iss_csi2_unregister_entities(&iss->csi2b);
v4l2_device_unregister(&iss->v4l2_dev);
media_device_unregister(&iss->media_dev);
}
/*
* iss_register_subdev_group - Register a group of subdevices
* @iss: OMAP4 ISS device
* @board_info: I2C subdevs board information array
*
* Register all I2C subdevices in the board_info array. The array must be
* terminated by a NULL entry, and the first entry must be the sensor.
*
* Return a pointer to the sensor media entity if it has been successfully
* registered, or NULL otherwise.
*/
static struct v4l2_subdev *
iss_register_subdev_group(struct iss_device *iss,
struct iss_subdev_i2c_board_info *board_info)
{
struct v4l2_subdev *sensor = NULL;
unsigned int first;
if (!board_info->board_info)
return NULL;
for (first = 1; board_info->board_info; ++board_info, first = 0) {
struct v4l2_subdev *subdev;
struct i2c_adapter *adapter;
adapter = i2c_get_adapter(board_info->i2c_adapter_id);
if (!adapter) {
dev_err(iss->dev,
"%s: Unable to get I2C adapter %d for device %s\n",
__func__, board_info->i2c_adapter_id,
board_info->board_info->type);
continue;
}
subdev = v4l2_i2c_new_subdev_board(&iss->v4l2_dev, adapter,
board_info->board_info, NULL);
if (!subdev) {
dev_err(iss->dev, "Unable to register subdev %s\n",
board_info->board_info->type);
continue;
}
if (first)
sensor = subdev;
}
return sensor;
}
static int iss_register_entities(struct iss_device *iss)
{
struct iss_platform_data *pdata = iss->pdata;
struct iss_v4l2_subdevs_group *subdevs;
int ret;
iss->media_dev.dev = iss->dev;
strscpy(iss->media_dev.model, "TI OMAP4 ISS",
sizeof(iss->media_dev.model));
iss->media_dev.hw_revision = iss->revision;
iss->media_dev.ops = &iss_media_ops;
ret = media_device_register(&iss->media_dev);
if (ret < 0) {
dev_err(iss->dev, "Media device registration failed (%d)\n",
ret);
return ret;
}
iss->v4l2_dev.mdev = &iss->media_dev;
ret = v4l2_device_register(iss->dev, &iss->v4l2_dev);
if (ret < 0) {
dev_err(iss->dev, "V4L2 device registration failed (%d)\n",
ret);
goto done;
}
/* Register internal entities */
ret = omap4iss_csi2_register_entities(&iss->csi2a, &iss->v4l2_dev);
if (ret < 0)
goto done;
ret = omap4iss_csi2_register_entities(&iss->csi2b, &iss->v4l2_dev);
if (ret < 0)
goto done;
ret = omap4iss_ipipeif_register_entities(&iss->ipipeif, &iss->v4l2_dev);
if (ret < 0)
goto done;
ret = omap4iss_ipipe_register_entities(&iss->ipipe, &iss->v4l2_dev);
if (ret < 0)
goto done;
ret = omap4iss_resizer_register_entities(&iss->resizer, &iss->v4l2_dev);
if (ret < 0)
goto done;
/* Register external entities */
for (subdevs = pdata->subdevs; subdevs && subdevs->subdevs; ++subdevs) {
struct v4l2_subdev *sensor;
struct media_entity *input;
unsigned int flags;
unsigned int pad;
sensor = iss_register_subdev_group(iss, subdevs->subdevs);
if (!sensor)
continue;
sensor->host_priv = subdevs;
/* Connect the sensor to the correct interface module.
* CSI2a receiver through CSIPHY1, or
* CSI2b receiver through CSIPHY2
*/
switch (subdevs->interface) {
case ISS_INTERFACE_CSI2A_PHY1:
input = &iss->csi2a.subdev.entity;
pad = CSI2_PAD_SINK;
flags = MEDIA_LNK_FL_IMMUTABLE
| MEDIA_LNK_FL_ENABLED;
break;
case ISS_INTERFACE_CSI2B_PHY2:
input = &iss->csi2b.subdev.entity;
pad = CSI2_PAD_SINK;
flags = MEDIA_LNK_FL_IMMUTABLE
| MEDIA_LNK_FL_ENABLED;
break;
default:
dev_err(iss->dev, "invalid interface type %u\n",
subdevs->interface);
ret = -EINVAL;
goto done;
}
ret = media_create_pad_link(&sensor->entity, 0, input, pad,
flags);
if (ret < 0)
goto done;
}
ret = v4l2_device_register_subdev_nodes(&iss->v4l2_dev);
done:
if (ret < 0)
iss_unregister_entities(iss);
return ret;
}
/*
* iss_create_links() - Pads links creation for the subdevices
* @iss : Pointer to ISS device
*
* return negative error code or zero on success
*/
static int iss_create_links(struct iss_device *iss)
{
int ret;
ret = omap4iss_csi2_create_links(iss);
if (ret < 0) {
dev_err(iss->dev, "CSI2 pads links creation failed\n");
return ret;
}
ret = omap4iss_ipipeif_create_links(iss);
if (ret < 0) {
dev_err(iss->dev, "ISP IPIPEIF pads links creation failed\n");
return ret;
}
ret = omap4iss_resizer_create_links(iss);
if (ret < 0) {
dev_err(iss->dev, "ISP RESIZER pads links creation failed\n");
return ret;
}
/* Connect the submodules. */
ret = media_create_pad_link(
&iss->csi2a.subdev.entity, CSI2_PAD_SOURCE,
&iss->ipipeif.subdev.entity, IPIPEIF_PAD_SINK, 0);
if (ret < 0)
return ret;
ret = media_create_pad_link(
&iss->csi2b.subdev.entity, CSI2_PAD_SOURCE,
&iss->ipipeif.subdev.entity, IPIPEIF_PAD_SINK, 0);
if (ret < 0)
return ret;
ret = media_create_pad_link(
&iss->ipipeif.subdev.entity, IPIPEIF_PAD_SOURCE_VP,
&iss->resizer.subdev.entity, RESIZER_PAD_SINK, 0);
if (ret < 0)
return ret;
ret = media_create_pad_link(
&iss->ipipeif.subdev.entity, IPIPEIF_PAD_SOURCE_VP,
&iss->ipipe.subdev.entity, IPIPE_PAD_SINK, 0);
if (ret < 0)
return ret;
ret = media_create_pad_link(
&iss->ipipe.subdev.entity, IPIPE_PAD_SOURCE_VP,
&iss->resizer.subdev.entity, RESIZER_PAD_SINK, 0);
if (ret < 0)
return ret;
return 0;
};
static void iss_cleanup_modules(struct iss_device *iss)
{
omap4iss_csi2_cleanup(iss);
omap4iss_ipipeif_cleanup(iss);
omap4iss_ipipe_cleanup(iss);
omap4iss_resizer_cleanup(iss);
}
static int iss_initialize_modules(struct iss_device *iss)
{
int ret;
ret = omap4iss_csiphy_init(iss);
if (ret < 0) {
dev_err(iss->dev, "CSI PHY initialization failed\n");
goto error_csiphy;
}
ret = omap4iss_csi2_init(iss);
if (ret < 0) {
dev_err(iss->dev, "CSI2 initialization failed\n");
goto error_csi2;
}
ret = omap4iss_ipipeif_init(iss);
if (ret < 0) {
dev_err(iss->dev, "ISP IPIPEIF initialization failed\n");
goto error_ipipeif;
}
ret = omap4iss_ipipe_init(iss);
if (ret < 0) {
dev_err(iss->dev, "ISP IPIPE initialization failed\n");
goto error_ipipe;
}
ret = omap4iss_resizer_init(iss);
if (ret < 0) {
dev_err(iss->dev, "ISP RESIZER initialization failed\n");
goto error_resizer;
}
return 0;
error_resizer:
omap4iss_ipipe_cleanup(iss);
error_ipipe:
omap4iss_ipipeif_cleanup(iss);
error_ipipeif:
omap4iss_csi2_cleanup(iss);
error_csi2:
error_csiphy:
return ret;
}
static int iss_probe(struct platform_device *pdev)
{
struct iss_platform_data *pdata = pdev->dev.platform_data;
struct iss_device *iss;
unsigned int i;
int ret;
if (!pdata)
return -EINVAL;
iss = devm_kzalloc(&pdev->dev, sizeof(*iss), GFP_KERNEL);
if (!iss)
return -ENOMEM;
mutex_init(&iss->iss_mutex);
iss->dev = &pdev->dev;
iss->pdata = pdata;
iss->raw_dmamask = DMA_BIT_MASK(32);
iss->dev->dma_mask = &iss->raw_dmamask;
iss->dev->coherent_dma_mask = DMA_BIT_MASK(32);
platform_set_drvdata(pdev, iss);
/*
* TODO: When implementing DT support switch to syscon regmap lookup by
* phandle.
*/
iss->syscon = syscon_regmap_lookup_by_compatible("syscon");
if (IS_ERR(iss->syscon)) {
ret = PTR_ERR(iss->syscon);
goto error;
}
/* Clocks */
ret = iss_map_mem_resource(pdev, iss, OMAP4_ISS_MEM_TOP);
if (ret < 0)
goto error;
ret = iss_get_clocks(iss);
if (ret < 0)
goto error;
if (!omap4iss_get(iss)) {
ret = -EINVAL;
goto error;
}
ret = iss_reset(iss);
if (ret < 0)
goto error_iss;
iss->revision = iss_reg_read(iss, OMAP4_ISS_MEM_TOP, ISS_HL_REVISION);
dev_info(iss->dev, "Revision %08x found\n", iss->revision);
for (i = 1; i < OMAP4_ISS_MEM_LAST; i++) {
ret = iss_map_mem_resource(pdev, iss, i);
if (ret)
goto error_iss;
}
/* Configure BTE BW_LIMITER field to max recommended value (1 GB) */
iss_reg_update(iss, OMAP4_ISS_MEM_BTE, BTE_CTRL,
BTE_CTRL_BW_LIMITER_MASK,
18 << BTE_CTRL_BW_LIMITER_SHIFT);
/* Perform ISP reset */
ret = omap4iss_subclk_enable(iss, OMAP4_ISS_SUBCLK_ISP);
if (ret < 0)
goto error_iss;
ret = iss_isp_reset(iss);
if (ret < 0)
goto error_iss;
dev_info(iss->dev, "ISP Revision %08x found\n",
iss_reg_read(iss, OMAP4_ISS_MEM_ISP_SYS1, ISP5_REVISION));
/* Interrupt */
ret = platform_get_irq(pdev, 0);
if (ret <= 0) {
ret = -ENODEV;
goto error_iss;
}
iss->irq_num = ret;
if (devm_request_irq(iss->dev, iss->irq_num, iss_isr, IRQF_SHARED,
"OMAP4 ISS", iss)) {
dev_err(iss->dev, "Unable to request IRQ\n");
ret = -EINVAL;
goto error_iss;
}
/* Entities */
ret = iss_initialize_modules(iss);
if (ret < 0)
goto error_iss;
ret = iss_register_entities(iss);
if (ret < 0)
goto error_modules;
ret = media_entity_enum_init(&iss->crashed, &iss->media_dev);
if (ret)
goto error_entities;
ret = iss_create_links(iss);
if (ret < 0)
goto error_entities;
omap4iss_put(iss);
return 0;
error_entities:
iss_unregister_entities(iss);
media_entity_enum_cleanup(&iss->crashed);
error_modules:
iss_cleanup_modules(iss);
error_iss:
omap4iss_put(iss);
error:
mutex_destroy(&iss->iss_mutex);
return ret;
}
static void iss_remove(struct platform_device *pdev)
{
struct iss_device *iss = platform_get_drvdata(pdev);
iss_unregister_entities(iss);
media_entity_enum_cleanup(&iss->crashed);
iss_cleanup_modules(iss);
}
static const struct platform_device_id omap4iss_id_table[] = {
{ "omap4iss", 0 },
{ },
};
MODULE_DEVICE_TABLE(platform, omap4iss_id_table);
static struct platform_driver iss_driver = {
.probe = iss_probe,
.remove_new = iss_remove,
.id_table = omap4iss_id_table,
.driver = {
.name = "omap4iss",
},
};
module_platform_driver(iss_driver);
MODULE_DESCRIPTION("TI OMAP4 ISS driver");
MODULE_AUTHOR("Sergio Aguirre <[email protected]>");
MODULE_LICENSE("GPL");
| linux-master | drivers/staging/media/omap4iss/iss.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* TI OMAP4 ISS V4L2 Driver - CSI PHY module
*
* Copyright (C) 2012 Texas Instruments, Inc.
*
* Author: Sergio Aguirre <[email protected]>
*/
#include <linux/delay.h>
#include <media/v4l2-common.h>
#include <linux/v4l2-mediabus.h>
#include <linux/mm.h>
#include "iss.h"
#include "iss_regs.h"
#include "iss_csi2.h"
/*
* csi2_if_enable - Enable CSI2 Receiver interface.
* @enable: enable flag
*
*/
static void csi2_if_enable(struct iss_csi2_device *csi2, u8 enable)
{
struct iss_csi2_ctrl_cfg *currctrl = &csi2->ctrl;
iss_reg_update(csi2->iss, csi2->regs1, CSI2_CTRL, CSI2_CTRL_IF_EN,
enable ? CSI2_CTRL_IF_EN : 0);
currctrl->if_enable = enable;
}
/*
* csi2_recv_config - CSI2 receiver module configuration.
* @currctrl: iss_csi2_ctrl_cfg structure
*
*/
static void csi2_recv_config(struct iss_csi2_device *csi2,
struct iss_csi2_ctrl_cfg *currctrl)
{
u32 reg = 0;
if (currctrl->frame_mode)
reg |= CSI2_CTRL_FRAME;
else
reg &= ~CSI2_CTRL_FRAME;
if (currctrl->vp_clk_enable)
reg |= CSI2_CTRL_VP_CLK_EN;
else
reg &= ~CSI2_CTRL_VP_CLK_EN;
if (currctrl->vp_only_enable)
reg |= CSI2_CTRL_VP_ONLY_EN;
else
reg &= ~CSI2_CTRL_VP_ONLY_EN;
reg &= ~CSI2_CTRL_VP_OUT_CTRL_MASK;
reg |= currctrl->vp_out_ctrl << CSI2_CTRL_VP_OUT_CTRL_SHIFT;
if (currctrl->ecc_enable)
reg |= CSI2_CTRL_ECC_EN;
else
reg &= ~CSI2_CTRL_ECC_EN;
/*
* Set MFlag assertion boundaries to:
* Low: 4/8 of FIFO size
* High: 6/8 of FIFO size
*/
reg &= ~(CSI2_CTRL_MFLAG_LEVH_MASK | CSI2_CTRL_MFLAG_LEVL_MASK);
reg |= (2 << CSI2_CTRL_MFLAG_LEVH_SHIFT) |
(4 << CSI2_CTRL_MFLAG_LEVL_SHIFT);
/* Generation of 16x64-bit bursts (Recommended) */
reg |= CSI2_CTRL_BURST_SIZE_EXPAND;
/* Do Non-Posted writes (Recommended) */
reg |= CSI2_CTRL_NON_POSTED_WRITE;
/*
* Enforce Little endian for all formats, including:
* YUV4:2:2 8-bit and YUV4:2:0 Legacy
*/
reg |= CSI2_CTRL_ENDIANNESS;
iss_reg_write(csi2->iss, csi2->regs1, CSI2_CTRL, reg);
}
static const unsigned int csi2_input_fmts[] = {
MEDIA_BUS_FMT_SGRBG10_1X10,
MEDIA_BUS_FMT_SGRBG10_DPCM8_1X8,
MEDIA_BUS_FMT_SRGGB10_1X10,
MEDIA_BUS_FMT_SRGGB10_DPCM8_1X8,
MEDIA_BUS_FMT_SBGGR10_1X10,
MEDIA_BUS_FMT_SBGGR10_DPCM8_1X8,
MEDIA_BUS_FMT_SGBRG10_1X10,
MEDIA_BUS_FMT_SGBRG10_DPCM8_1X8,
MEDIA_BUS_FMT_SBGGR8_1X8,
MEDIA_BUS_FMT_SGBRG8_1X8,
MEDIA_BUS_FMT_SGRBG8_1X8,
MEDIA_BUS_FMT_SRGGB8_1X8,
MEDIA_BUS_FMT_UYVY8_1X16,
MEDIA_BUS_FMT_YUYV8_1X16,
};
/* To set the format on the CSI2 requires a mapping function that takes
* the following inputs:
* - 3 different formats (at this time)
* - 2 destinations (mem, vp+mem) (vp only handled separately)
* - 2 decompression options (on, off)
* Output should be CSI2 frame format code
* Array indices as follows: [format][dest][decompr]
* Not all combinations are valid. 0 means invalid.
*/
static const u16 __csi2_fmt_map[][2][2] = {
/* RAW10 formats */
{
/* Output to memory */
{
/* No DPCM decompression */
CSI2_PIX_FMT_RAW10_EXP16,
/* DPCM decompression */
0,
},
/* Output to both */
{
/* No DPCM decompression */
CSI2_PIX_FMT_RAW10_EXP16_VP,
/* DPCM decompression */
0,
},
},
/* RAW10 DPCM8 formats */
{
/* Output to memory */
{
/* No DPCM decompression */
CSI2_USERDEF_8BIT_DATA1,
/* DPCM decompression */
CSI2_USERDEF_8BIT_DATA1_DPCM10,
},
/* Output to both */
{
/* No DPCM decompression */
CSI2_PIX_FMT_RAW8_VP,
/* DPCM decompression */
CSI2_USERDEF_8BIT_DATA1_DPCM10_VP,
},
},
/* RAW8 formats */
{
/* Output to memory */
{
/* No DPCM decompression */
CSI2_PIX_FMT_RAW8,
/* DPCM decompression */
0,
},
/* Output to both */
{
/* No DPCM decompression */
CSI2_PIX_FMT_RAW8_VP,
/* DPCM decompression */
0,
},
},
/* YUV422 formats */
{
/* Output to memory */
{
/* No DPCM decompression */
CSI2_PIX_FMT_YUV422_8BIT,
/* DPCM decompression */
0,
},
/* Output to both */
{
/* No DPCM decompression */
CSI2_PIX_FMT_YUV422_8BIT_VP16,
/* DPCM decompression */
0,
},
},
};
/*
* csi2_ctx_map_format - Map CSI2 sink media bus format to CSI2 format ID
* @csi2: ISS CSI2 device
*
* Returns CSI2 physical format id
*/
static u16 csi2_ctx_map_format(struct iss_csi2_device *csi2)
{
const struct v4l2_mbus_framefmt *fmt = &csi2->formats[CSI2_PAD_SINK];
int fmtidx, destidx;
switch (fmt->code) {
case MEDIA_BUS_FMT_SGRBG10_1X10:
case MEDIA_BUS_FMT_SRGGB10_1X10:
case MEDIA_BUS_FMT_SBGGR10_1X10:
case MEDIA_BUS_FMT_SGBRG10_1X10:
fmtidx = 0;
break;
case MEDIA_BUS_FMT_SGRBG10_DPCM8_1X8:
case MEDIA_BUS_FMT_SRGGB10_DPCM8_1X8:
case MEDIA_BUS_FMT_SBGGR10_DPCM8_1X8:
case MEDIA_BUS_FMT_SGBRG10_DPCM8_1X8:
fmtidx = 1;
break;
case MEDIA_BUS_FMT_SBGGR8_1X8:
case MEDIA_BUS_FMT_SGBRG8_1X8:
case MEDIA_BUS_FMT_SGRBG8_1X8:
case MEDIA_BUS_FMT_SRGGB8_1X8:
fmtidx = 2;
break;
case MEDIA_BUS_FMT_UYVY8_1X16:
case MEDIA_BUS_FMT_YUYV8_1X16:
fmtidx = 3;
break;
default:
WARN(1, "CSI2: pixel format %08x unsupported!\n",
fmt->code);
return 0;
}
if (!(csi2->output & CSI2_OUTPUT_IPIPEIF) &&
!(csi2->output & CSI2_OUTPUT_MEMORY)) {
/* Neither output enabled is a valid combination */
return CSI2_PIX_FMT_OTHERS;
}
/* If we need to skip frames at the beginning of the stream disable the
* video port to avoid sending the skipped frames to the IPIPEIF.
*/
destidx = csi2->frame_skip ? 0 : !!(csi2->output & CSI2_OUTPUT_IPIPEIF);
return __csi2_fmt_map[fmtidx][destidx][csi2->dpcm_decompress];
}
/*
* csi2_set_outaddr - Set memory address to save output image
* @csi2: Pointer to ISS CSI2a device.
* @addr: 32-bit memory address aligned on 32 byte boundary.
*
* Sets the memory address where the output will be saved.
*
* Returns 0 if successful, or -EINVAL if the address is not in the 32 byte
* boundary.
*/
static void csi2_set_outaddr(struct iss_csi2_device *csi2, u32 addr)
{
struct iss_csi2_ctx_cfg *ctx = &csi2->contexts[0];
ctx->ping_addr = addr;
ctx->pong_addr = addr;
iss_reg_write(csi2->iss, csi2->regs1, CSI2_CTX_PING_ADDR(ctx->ctxnum),
ctx->ping_addr);
iss_reg_write(csi2->iss, csi2->regs1, CSI2_CTX_PONG_ADDR(ctx->ctxnum),
ctx->pong_addr);
}
/*
* is_usr_def_mapping - Checks whether USER_DEF_MAPPING should
* be enabled by CSI2.
* @format_id: mapped format id
*
*/
static inline int is_usr_def_mapping(u32 format_id)
{
return (format_id & 0xf0) == 0x40 ? 1 : 0;
}
/*
* csi2_ctx_enable - Enable specified CSI2 context
* @ctxnum: Context number, valid between 0 and 7 values.
* @enable: enable
*
*/
static void csi2_ctx_enable(struct iss_csi2_device *csi2, u8 ctxnum, u8 enable)
{
struct iss_csi2_ctx_cfg *ctx = &csi2->contexts[ctxnum];
u32 reg;
reg = iss_reg_read(csi2->iss, csi2->regs1, CSI2_CTX_CTRL1(ctxnum));
if (enable) {
unsigned int skip = 0;
if (csi2->frame_skip)
skip = csi2->frame_skip;
else if (csi2->output & CSI2_OUTPUT_MEMORY)
skip = 1;
reg &= ~CSI2_CTX_CTRL1_COUNT_MASK;
reg |= CSI2_CTX_CTRL1_COUNT_UNLOCK
| (skip << CSI2_CTX_CTRL1_COUNT_SHIFT)
| CSI2_CTX_CTRL1_CTX_EN;
} else {
reg &= ~CSI2_CTX_CTRL1_CTX_EN;
}
iss_reg_write(csi2->iss, csi2->regs1, CSI2_CTX_CTRL1(ctxnum), reg);
ctx->enabled = enable;
}
/*
* csi2_ctx_config - CSI2 context configuration.
* @ctx: context configuration
*
*/
static void csi2_ctx_config(struct iss_csi2_device *csi2,
struct iss_csi2_ctx_cfg *ctx)
{
u32 reg = 0;
ctx->frame = 0;
/* Set up CSI2_CTx_CTRL1 */
if (ctx->eof_enabled)
reg = CSI2_CTX_CTRL1_EOF_EN;
if (ctx->eol_enabled)
reg |= CSI2_CTX_CTRL1_EOL_EN;
if (ctx->checksum_enabled)
reg |= CSI2_CTX_CTRL1_CS_EN;
iss_reg_write(csi2->iss, csi2->regs1, CSI2_CTX_CTRL1(ctx->ctxnum), reg);
/* Set up CSI2_CTx_CTRL2 */
reg = ctx->virtual_id << CSI2_CTX_CTRL2_VIRTUAL_ID_SHIFT;
reg |= ctx->format_id << CSI2_CTX_CTRL2_FORMAT_SHIFT;
if (ctx->dpcm_decompress && ctx->dpcm_predictor)
reg |= CSI2_CTX_CTRL2_DPCM_PRED;
if (is_usr_def_mapping(ctx->format_id))
reg |= 2 << CSI2_CTX_CTRL2_USER_DEF_MAP_SHIFT;
iss_reg_write(csi2->iss, csi2->regs1, CSI2_CTX_CTRL2(ctx->ctxnum), reg);
/* Set up CSI2_CTx_CTRL3 */
iss_reg_write(csi2->iss, csi2->regs1, CSI2_CTX_CTRL3(ctx->ctxnum),
ctx->alpha << CSI2_CTX_CTRL3_ALPHA_SHIFT);
/* Set up CSI2_CTx_DAT_OFST */
iss_reg_update(csi2->iss, csi2->regs1, CSI2_CTX_DAT_OFST(ctx->ctxnum),
CSI2_CTX_DAT_OFST_MASK, ctx->data_offset);
iss_reg_write(csi2->iss, csi2->regs1, CSI2_CTX_PING_ADDR(ctx->ctxnum),
ctx->ping_addr);
iss_reg_write(csi2->iss, csi2->regs1, CSI2_CTX_PONG_ADDR(ctx->ctxnum),
ctx->pong_addr);
}
/*
* csi2_timing_config - CSI2 timing configuration.
* @timing: csi2_timing_cfg structure
*/
static void csi2_timing_config(struct iss_csi2_device *csi2,
struct iss_csi2_timing_cfg *timing)
{
u32 reg;
reg = iss_reg_read(csi2->iss, csi2->regs1, CSI2_TIMING);
if (timing->force_rx_mode)
reg |= CSI2_TIMING_FORCE_RX_MODE_IO1;
else
reg &= ~CSI2_TIMING_FORCE_RX_MODE_IO1;
if (timing->stop_state_16x)
reg |= CSI2_TIMING_STOP_STATE_X16_IO1;
else
reg &= ~CSI2_TIMING_STOP_STATE_X16_IO1;
if (timing->stop_state_4x)
reg |= CSI2_TIMING_STOP_STATE_X4_IO1;
else
reg &= ~CSI2_TIMING_STOP_STATE_X4_IO1;
reg &= ~CSI2_TIMING_STOP_STATE_COUNTER_IO1_MASK;
reg |= timing->stop_state_counter <<
CSI2_TIMING_STOP_STATE_COUNTER_IO1_SHIFT;
iss_reg_write(csi2->iss, csi2->regs1, CSI2_TIMING, reg);
}
/*
* csi2_irq_ctx_set - Enables CSI2 Context IRQs.
* @enable: Enable/disable CSI2 Context interrupts
*/
static void csi2_irq_ctx_set(struct iss_csi2_device *csi2, int enable)
{
const u32 mask = CSI2_CTX_IRQ_FE | CSI2_CTX_IRQ_FS;
int i;
for (i = 0; i < 8; i++) {
iss_reg_write(csi2->iss, csi2->regs1, CSI2_CTX_IRQSTATUS(i),
mask);
if (enable)
iss_reg_set(csi2->iss, csi2->regs1,
CSI2_CTX_IRQENABLE(i), mask);
else
iss_reg_clr(csi2->iss, csi2->regs1,
CSI2_CTX_IRQENABLE(i), mask);
}
}
/*
* csi2_irq_complexio1_set - Enables CSI2 ComplexIO IRQs.
* @enable: Enable/disable CSI2 ComplexIO #1 interrupts
*/
static void csi2_irq_complexio1_set(struct iss_csi2_device *csi2, int enable)
{
u32 reg;
reg = CSI2_COMPLEXIO_IRQ_STATEALLULPMEXIT |
CSI2_COMPLEXIO_IRQ_STATEALLULPMENTER |
CSI2_COMPLEXIO_IRQ_STATEULPM5 |
CSI2_COMPLEXIO_IRQ_ERRCONTROL5 |
CSI2_COMPLEXIO_IRQ_ERRESC5 |
CSI2_COMPLEXIO_IRQ_ERRSOTSYNCHS5 |
CSI2_COMPLEXIO_IRQ_ERRSOTHS5 |
CSI2_COMPLEXIO_IRQ_STATEULPM4 |
CSI2_COMPLEXIO_IRQ_ERRCONTROL4 |
CSI2_COMPLEXIO_IRQ_ERRESC4 |
CSI2_COMPLEXIO_IRQ_ERRSOTSYNCHS4 |
CSI2_COMPLEXIO_IRQ_ERRSOTHS4 |
CSI2_COMPLEXIO_IRQ_STATEULPM3 |
CSI2_COMPLEXIO_IRQ_ERRCONTROL3 |
CSI2_COMPLEXIO_IRQ_ERRESC3 |
CSI2_COMPLEXIO_IRQ_ERRSOTSYNCHS3 |
CSI2_COMPLEXIO_IRQ_ERRSOTHS3 |
CSI2_COMPLEXIO_IRQ_STATEULPM2 |
CSI2_COMPLEXIO_IRQ_ERRCONTROL2 |
CSI2_COMPLEXIO_IRQ_ERRESC2 |
CSI2_COMPLEXIO_IRQ_ERRSOTSYNCHS2 |
CSI2_COMPLEXIO_IRQ_ERRSOTHS2 |
CSI2_COMPLEXIO_IRQ_STATEULPM1 |
CSI2_COMPLEXIO_IRQ_ERRCONTROL1 |
CSI2_COMPLEXIO_IRQ_ERRESC1 |
CSI2_COMPLEXIO_IRQ_ERRSOTSYNCHS1 |
CSI2_COMPLEXIO_IRQ_ERRSOTHS1;
iss_reg_write(csi2->iss, csi2->regs1, CSI2_COMPLEXIO_IRQSTATUS, reg);
if (enable)
iss_reg_set(csi2->iss, csi2->regs1, CSI2_COMPLEXIO_IRQENABLE,
reg);
else
iss_reg_write(csi2->iss, csi2->regs1, CSI2_COMPLEXIO_IRQENABLE,
0);
}
/*
* csi2_irq_status_set - Enables CSI2 Status IRQs.
* @enable: Enable/disable CSI2 Status interrupts
*/
static void csi2_irq_status_set(struct iss_csi2_device *csi2, int enable)
{
u32 reg;
reg = CSI2_IRQ_OCP_ERR |
CSI2_IRQ_SHORT_PACKET |
CSI2_IRQ_ECC_CORRECTION |
CSI2_IRQ_ECC_NO_CORRECTION |
CSI2_IRQ_COMPLEXIO_ERR |
CSI2_IRQ_FIFO_OVF |
CSI2_IRQ_CONTEXT0;
iss_reg_write(csi2->iss, csi2->regs1, CSI2_IRQSTATUS, reg);
if (enable)
iss_reg_set(csi2->iss, csi2->regs1, CSI2_IRQENABLE, reg);
else
iss_reg_write(csi2->iss, csi2->regs1, CSI2_IRQENABLE, 0);
}
/*
* omap4iss_csi2_reset - Resets the CSI2 module.
*
* Must be called with the phy lock held.
*
* Returns 0 if successful, or -EBUSY if power command didn't respond.
*/
int omap4iss_csi2_reset(struct iss_csi2_device *csi2)
{
unsigned int timeout;
if (!csi2->available)
return -ENODEV;
if (csi2->phy->phy_in_use)
return -EBUSY;
iss_reg_set(csi2->iss, csi2->regs1, CSI2_SYSCONFIG,
CSI2_SYSCONFIG_SOFT_RESET);
timeout = iss_poll_condition_timeout(
iss_reg_read(csi2->iss, csi2->regs1, CSI2_SYSSTATUS) &
CSI2_SYSSTATUS_RESET_DONE, 500, 100, 200);
if (timeout) {
dev_err(csi2->iss->dev, "CSI2: Soft reset timeout!\n");
return -EBUSY;
}
iss_reg_set(csi2->iss, csi2->regs1, CSI2_COMPLEXIO_CFG,
CSI2_COMPLEXIO_CFG_RESET_CTRL);
timeout = iss_poll_condition_timeout(
iss_reg_read(csi2->iss, csi2->phy->phy_regs, REGISTER1) &
REGISTER1_RESET_DONE_CTRLCLK, 10000, 100, 500);
if (timeout) {
dev_err(csi2->iss->dev, "CSI2: CSI2_96M_FCLK reset timeout!\n");
return -EBUSY;
}
iss_reg_update(csi2->iss, csi2->regs1, CSI2_SYSCONFIG,
CSI2_SYSCONFIG_MSTANDBY_MODE_MASK |
CSI2_SYSCONFIG_AUTO_IDLE,
CSI2_SYSCONFIG_MSTANDBY_MODE_NO);
return 0;
}
static int csi2_configure(struct iss_csi2_device *csi2)
{
const struct iss_v4l2_subdevs_group *pdata;
struct iss_csi2_timing_cfg *timing = &csi2->timing[0];
struct v4l2_subdev *sensor;
struct media_pad *pad;
/*
* CSI2 fields that can be updated while the context has
* been enabled or the interface has been enabled are not
* updated dynamically currently. So we do not allow to
* reconfigure if either has been enabled
*/
if (csi2->contexts[0].enabled || csi2->ctrl.if_enable)
return -EBUSY;
pad = media_pad_remote_pad_first(&csi2->pads[CSI2_PAD_SINK]);
sensor = media_entity_to_v4l2_subdev(pad->entity);
pdata = sensor->host_priv;
csi2->frame_skip = 0;
v4l2_subdev_call(sensor, sensor, g_skip_frames, &csi2->frame_skip);
csi2->ctrl.vp_out_ctrl = pdata->bus.csi2.vpclk_div;
csi2->ctrl.frame_mode = ISS_CSI2_FRAME_IMMEDIATE;
csi2->ctrl.ecc_enable = pdata->bus.csi2.crc;
timing->force_rx_mode = 1;
timing->stop_state_16x = 1;
timing->stop_state_4x = 1;
timing->stop_state_counter = 0x1ff;
/*
* The CSI2 receiver can't do any format conversion except DPCM
* decompression, so every set_format call configures both pads
* and enables DPCM decompression as a special case:
*/
if (csi2->formats[CSI2_PAD_SINK].code !=
csi2->formats[CSI2_PAD_SOURCE].code)
csi2->dpcm_decompress = true;
else
csi2->dpcm_decompress = false;
csi2->contexts[0].format_id = csi2_ctx_map_format(csi2);
if (csi2->video_out.bpl_padding == 0)
csi2->contexts[0].data_offset = 0;
else
csi2->contexts[0].data_offset = csi2->video_out.bpl_value;
/*
* Enable end of frame and end of line signals generation for
* context 0. These signals are generated from CSI2 receiver to
* qualify the last pixel of a frame and the last pixel of a line.
* Without enabling the signals CSI2 receiver writes data to memory
* beyond buffer size and/or data line offset is not handled correctly.
*/
csi2->contexts[0].eof_enabled = 1;
csi2->contexts[0].eol_enabled = 1;
csi2_irq_complexio1_set(csi2, 1);
csi2_irq_ctx_set(csi2, 1);
csi2_irq_status_set(csi2, 1);
/* Set configuration (timings, format and links) */
csi2_timing_config(csi2, timing);
csi2_recv_config(csi2, &csi2->ctrl);
csi2_ctx_config(csi2, &csi2->contexts[0]);
return 0;
}
/*
* csi2_print_status - Prints CSI2 debug information.
*/
#define CSI2_PRINT_REGISTER(iss, regs, name)\
dev_dbg(iss->dev, "###CSI2 " #name "=0x%08x\n", \
iss_reg_read(iss, regs, CSI2_##name))
static void csi2_print_status(struct iss_csi2_device *csi2)
{
struct iss_device *iss = csi2->iss;
if (!csi2->available)
return;
dev_dbg(iss->dev, "-------------CSI2 Register dump-------------\n");
CSI2_PRINT_REGISTER(iss, csi2->regs1, SYSCONFIG);
CSI2_PRINT_REGISTER(iss, csi2->regs1, SYSSTATUS);
CSI2_PRINT_REGISTER(iss, csi2->regs1, IRQENABLE);
CSI2_PRINT_REGISTER(iss, csi2->regs1, IRQSTATUS);
CSI2_PRINT_REGISTER(iss, csi2->regs1, CTRL);
CSI2_PRINT_REGISTER(iss, csi2->regs1, DBG_H);
CSI2_PRINT_REGISTER(iss, csi2->regs1, COMPLEXIO_CFG);
CSI2_PRINT_REGISTER(iss, csi2->regs1, COMPLEXIO_IRQSTATUS);
CSI2_PRINT_REGISTER(iss, csi2->regs1, SHORT_PACKET);
CSI2_PRINT_REGISTER(iss, csi2->regs1, COMPLEXIO_IRQENABLE);
CSI2_PRINT_REGISTER(iss, csi2->regs1, DBG_P);
CSI2_PRINT_REGISTER(iss, csi2->regs1, TIMING);
CSI2_PRINT_REGISTER(iss, csi2->regs1, CTX_CTRL1(0));
CSI2_PRINT_REGISTER(iss, csi2->regs1, CTX_CTRL2(0));
CSI2_PRINT_REGISTER(iss, csi2->regs1, CTX_DAT_OFST(0));
CSI2_PRINT_REGISTER(iss, csi2->regs1, CTX_PING_ADDR(0));
CSI2_PRINT_REGISTER(iss, csi2->regs1, CTX_PONG_ADDR(0));
CSI2_PRINT_REGISTER(iss, csi2->regs1, CTX_IRQENABLE(0));
CSI2_PRINT_REGISTER(iss, csi2->regs1, CTX_IRQSTATUS(0));
CSI2_PRINT_REGISTER(iss, csi2->regs1, CTX_CTRL3(0));
dev_dbg(iss->dev, "--------------------------------------------\n");
}
/* -----------------------------------------------------------------------------
* Interrupt handling
*/
/*
* csi2_isr_buffer - Does buffer handling at end-of-frame
* when writing to memory.
*/
static void csi2_isr_buffer(struct iss_csi2_device *csi2)
{
struct iss_buffer *buffer;
csi2_ctx_enable(csi2, 0, 0);
buffer = omap4iss_video_buffer_next(&csi2->video_out);
/*
* Let video queue operation restart engine if there is an underrun
* condition.
*/
if (!buffer)
return;
csi2_set_outaddr(csi2, buffer->iss_addr);
csi2_ctx_enable(csi2, 0, 1);
}
static void csi2_isr_ctx(struct iss_csi2_device *csi2,
struct iss_csi2_ctx_cfg *ctx)
{
unsigned int n = ctx->ctxnum;
u32 status;
status = iss_reg_read(csi2->iss, csi2->regs1, CSI2_CTX_IRQSTATUS(n));
iss_reg_write(csi2->iss, csi2->regs1, CSI2_CTX_IRQSTATUS(n), status);
if (omap4iss_module_sync_is_stopping(&csi2->wait, &csi2->stopping))
return;
/* Propagate frame number */
if (status & CSI2_CTX_IRQ_FS) {
struct iss_pipeline *pipe =
to_iss_pipeline(&csi2->subdev.entity);
u16 frame;
u16 delta;
frame = iss_reg_read(csi2->iss, csi2->regs1,
CSI2_CTX_CTRL2(ctx->ctxnum))
>> CSI2_CTX_CTRL2_FRAME_SHIFT;
if (frame == 0) {
/* A zero value means that the counter isn't implemented
* by the source. Increment the frame number in software
* in that case.
*/
atomic_inc(&pipe->frame_number);
} else {
/* Extend the 16 bit frame number to 32 bits by
* computing the delta between two consecutive CSI2
* frame numbers and adding it to the software frame
* number. The hardware counter starts at 1 and wraps
* from 0xffff to 1 without going through 0, so subtract
* 1 when the counter wraps.
*/
delta = frame - ctx->frame;
if (frame < ctx->frame)
delta--;
ctx->frame = frame;
atomic_add(delta, &pipe->frame_number);
}
}
if (!(status & CSI2_CTX_IRQ_FE))
return;
/* Skip interrupts until we reach the frame skip count. The CSI2 will be
* automatically disabled, as the frame skip count has been programmed
* in the CSI2_CTx_CTRL1::COUNT field, so re-enable it.
*
* It would have been nice to rely on the FRAME_NUMBER interrupt instead
* but it turned out that the interrupt is only generated when the CSI2
* writes to memory (the CSI2_CTx_CTRL1::COUNT field is decreased
* correctly and reaches 0 when data is forwarded to the video port only
* but no interrupt arrives). Maybe a CSI2 hardware bug.
*/
if (csi2->frame_skip) {
csi2->frame_skip--;
if (csi2->frame_skip == 0) {
ctx->format_id = csi2_ctx_map_format(csi2);
csi2_ctx_config(csi2, ctx);
csi2_ctx_enable(csi2, n, 1);
}
return;
}
if (csi2->output & CSI2_OUTPUT_MEMORY)
csi2_isr_buffer(csi2);
}
/*
* omap4iss_csi2_isr - CSI2 interrupt handling.
*/
void omap4iss_csi2_isr(struct iss_csi2_device *csi2)
{
struct iss_pipeline *pipe = to_iss_pipeline(&csi2->subdev.entity);
u32 csi2_irqstatus, cpxio1_irqstatus;
struct iss_device *iss = csi2->iss;
if (!csi2->available)
return;
csi2_irqstatus = iss_reg_read(csi2->iss, csi2->regs1, CSI2_IRQSTATUS);
iss_reg_write(csi2->iss, csi2->regs1, CSI2_IRQSTATUS, csi2_irqstatus);
/* Failure Cases */
if (csi2_irqstatus & CSI2_IRQ_COMPLEXIO_ERR) {
cpxio1_irqstatus = iss_reg_read(csi2->iss, csi2->regs1,
CSI2_COMPLEXIO_IRQSTATUS);
iss_reg_write(csi2->iss, csi2->regs1, CSI2_COMPLEXIO_IRQSTATUS,
cpxio1_irqstatus);
dev_dbg(iss->dev, "CSI2: ComplexIO Error IRQ %x\n",
cpxio1_irqstatus);
pipe->error = true;
}
if (csi2_irqstatus & (CSI2_IRQ_OCP_ERR |
CSI2_IRQ_SHORT_PACKET |
CSI2_IRQ_ECC_NO_CORRECTION |
CSI2_IRQ_COMPLEXIO_ERR |
CSI2_IRQ_FIFO_OVF)) {
dev_dbg(iss->dev,
"CSI2 Err: OCP:%d SHORT:%d ECC:%d CPXIO:%d OVF:%d\n",
csi2_irqstatus & CSI2_IRQ_OCP_ERR ? 1 : 0,
csi2_irqstatus & CSI2_IRQ_SHORT_PACKET ? 1 : 0,
csi2_irqstatus & CSI2_IRQ_ECC_NO_CORRECTION ? 1 : 0,
csi2_irqstatus & CSI2_IRQ_COMPLEXIO_ERR ? 1 : 0,
csi2_irqstatus & CSI2_IRQ_FIFO_OVF ? 1 : 0);
pipe->error = true;
}
/* Successful cases */
if (csi2_irqstatus & CSI2_IRQ_CONTEXT0)
csi2_isr_ctx(csi2, &csi2->contexts[0]);
if (csi2_irqstatus & CSI2_IRQ_ECC_CORRECTION)
dev_dbg(iss->dev, "CSI2: ECC correction done\n");
}
/* -----------------------------------------------------------------------------
* ISS video operations
*/
/*
* csi2_queue - Queues the first buffer when using memory output
* @video: The video node
* @buffer: buffer to queue
*/
static int csi2_queue(struct iss_video *video, struct iss_buffer *buffer)
{
struct iss_csi2_device *csi2 = container_of(video,
struct iss_csi2_device, video_out);
csi2_set_outaddr(csi2, buffer->iss_addr);
/*
* If streaming was enabled before there was a buffer queued
* or underrun happened in the ISR, the hardware was not enabled
* and DMA queue flag ISS_VIDEO_DMAQUEUE_UNDERRUN is still set.
* Enable it now.
*/
if (csi2->video_out.dmaqueue_flags & ISS_VIDEO_DMAQUEUE_UNDERRUN) {
/* Enable / disable context 0 and IRQs */
csi2_if_enable(csi2, 1);
csi2_ctx_enable(csi2, 0, 1);
iss_video_dmaqueue_flags_clr(&csi2->video_out);
}
return 0;
}
static const struct iss_video_operations csi2_issvideo_ops = {
.queue = csi2_queue,
};
/* -----------------------------------------------------------------------------
* V4L2 subdev operations
*/
static struct v4l2_mbus_framefmt *
__csi2_get_format(struct iss_csi2_device *csi2,
struct v4l2_subdev_state *sd_state,
unsigned int pad,
enum v4l2_subdev_format_whence which)
{
if (which == V4L2_SUBDEV_FORMAT_TRY)
return v4l2_subdev_get_try_format(&csi2->subdev, sd_state,
pad);
return &csi2->formats[pad];
}
static void
csi2_try_format(struct iss_csi2_device *csi2,
struct v4l2_subdev_state *sd_state,
unsigned int pad,
struct v4l2_mbus_framefmt *fmt,
enum v4l2_subdev_format_whence which)
{
u32 pixelcode;
struct v4l2_mbus_framefmt *format;
const struct iss_format_info *info;
unsigned int i;
switch (pad) {
case CSI2_PAD_SINK:
/* Clamp the width and height to valid range (1-8191). */
for (i = 0; i < ARRAY_SIZE(csi2_input_fmts); i++) {
if (fmt->code == csi2_input_fmts[i])
break;
}
/* If not found, use SGRBG10 as default */
if (i >= ARRAY_SIZE(csi2_input_fmts))
fmt->code = MEDIA_BUS_FMT_SGRBG10_1X10;
fmt->width = clamp_t(u32, fmt->width, 1, 8191);
fmt->height = clamp_t(u32, fmt->height, 1, 8191);
break;
case CSI2_PAD_SOURCE:
/* Source format same as sink format, except for DPCM
* compression.
*/
pixelcode = fmt->code;
format = __csi2_get_format(csi2, sd_state, CSI2_PAD_SINK,
which);
memcpy(fmt, format, sizeof(*fmt));
/*
* Only Allow DPCM decompression, and check that the
* pattern is preserved
*/
info = omap4iss_video_format_info(fmt->code);
if (info->uncompressed == pixelcode)
fmt->code = pixelcode;
break;
}
/* RGB, non-interlaced */
fmt->colorspace = V4L2_COLORSPACE_SRGB;
fmt->field = V4L2_FIELD_NONE;
}
/*
* csi2_enum_mbus_code - Handle pixel format enumeration
* @sd : pointer to v4l2 subdev structure
* @cfg : V4L2 subdev pad config
* @code : pointer to v4l2_subdev_mbus_code_enum structure
* return -EINVAL or zero on success
*/
static int csi2_enum_mbus_code(struct v4l2_subdev *sd,
struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_mbus_code_enum *code)
{
struct iss_csi2_device *csi2 = v4l2_get_subdevdata(sd);
struct v4l2_mbus_framefmt *format;
const struct iss_format_info *info;
if (code->pad == CSI2_PAD_SINK) {
if (code->index >= ARRAY_SIZE(csi2_input_fmts))
return -EINVAL;
code->code = csi2_input_fmts[code->index];
} else {
format = __csi2_get_format(csi2, sd_state, CSI2_PAD_SINK,
code->which);
switch (code->index) {
case 0:
/* Passthrough sink pad code */
code->code = format->code;
break;
case 1:
/* Uncompressed code */
info = omap4iss_video_format_info(format->code);
if (info->uncompressed == format->code)
return -EINVAL;
code->code = info->uncompressed;
break;
default:
return -EINVAL;
}
}
return 0;
}
static int csi2_enum_frame_size(struct v4l2_subdev *sd,
struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_frame_size_enum *fse)
{
struct iss_csi2_device *csi2 = v4l2_get_subdevdata(sd);
struct v4l2_mbus_framefmt format;
if (fse->index != 0)
return -EINVAL;
format.code = fse->code;
format.width = 1;
format.height = 1;
csi2_try_format(csi2, sd_state, fse->pad, &format, fse->which);
fse->min_width = format.width;
fse->min_height = format.height;
if (format.code != fse->code)
return -EINVAL;
format.code = fse->code;
format.width = -1;
format.height = -1;
csi2_try_format(csi2, sd_state, fse->pad, &format, fse->which);
fse->max_width = format.width;
fse->max_height = format.height;
return 0;
}
/*
* csi2_get_format - Handle get format by pads subdev method
* @sd : pointer to v4l2 subdev structure
* @cfg: V4L2 subdev pad config
* @fmt: pointer to v4l2 subdev format structure
* return -EINVAL or zero on success
*/
static int csi2_get_format(struct v4l2_subdev *sd,
struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
struct iss_csi2_device *csi2 = v4l2_get_subdevdata(sd);
struct v4l2_mbus_framefmt *format;
format = __csi2_get_format(csi2, sd_state, fmt->pad, fmt->which);
if (!format)
return -EINVAL;
fmt->format = *format;
return 0;
}
/*
* csi2_set_format - Handle set format by pads subdev method
* @sd : pointer to v4l2 subdev structure
* @cfg: V4L2 subdev pad config
* @fmt: pointer to v4l2 subdev format structure
* return -EINVAL or zero on success
*/
static int csi2_set_format(struct v4l2_subdev *sd,
struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
struct iss_csi2_device *csi2 = v4l2_get_subdevdata(sd);
struct v4l2_mbus_framefmt *format;
format = __csi2_get_format(csi2, sd_state, fmt->pad, fmt->which);
if (!format)
return -EINVAL;
csi2_try_format(csi2, sd_state, fmt->pad, &fmt->format, fmt->which);
*format = fmt->format;
/* Propagate the format from sink to source */
if (fmt->pad == CSI2_PAD_SINK) {
format = __csi2_get_format(csi2, sd_state, CSI2_PAD_SOURCE,
fmt->which);
*format = fmt->format;
csi2_try_format(csi2, sd_state, CSI2_PAD_SOURCE, format,
fmt->which);
}
return 0;
}
static int csi2_link_validate(struct v4l2_subdev *sd, struct media_link *link,
struct v4l2_subdev_format *source_fmt,
struct v4l2_subdev_format *sink_fmt)
{
struct iss_csi2_device *csi2 = v4l2_get_subdevdata(sd);
struct iss_pipeline *pipe = to_iss_pipeline(&csi2->subdev.entity);
int rval;
pipe->external = media_entity_to_v4l2_subdev(link->source->entity);
rval = omap4iss_get_external_info(pipe, link);
if (rval < 0)
return rval;
return v4l2_subdev_link_validate_default(sd, link, source_fmt,
sink_fmt);
}
/*
* csi2_init_formats - Initialize formats on all pads
* @sd: ISS CSI2 V4L2 subdevice
* @fh: V4L2 subdev file handle
*
* Initialize all pad formats with default values. If fh is not NULL, try
* formats are initialized on the file handle. Otherwise active formats are
* initialized on the device.
*/
static int csi2_init_formats(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
{
struct v4l2_subdev_format format;
memset(&format, 0, sizeof(format));
format.pad = CSI2_PAD_SINK;
format.which = fh ? V4L2_SUBDEV_FORMAT_TRY : V4L2_SUBDEV_FORMAT_ACTIVE;
format.format.code = MEDIA_BUS_FMT_SGRBG10_1X10;
format.format.width = 4096;
format.format.height = 4096;
csi2_set_format(sd, fh ? fh->state : NULL, &format);
return 0;
}
/*
* csi2_set_stream - Enable/Disable streaming on the CSI2 module
* @sd: ISS CSI2 V4L2 subdevice
* @enable: ISS pipeline stream state
*
* Return 0 on success or a negative error code otherwise.
*/
static int csi2_set_stream(struct v4l2_subdev *sd, int enable)
{
struct iss_csi2_device *csi2 = v4l2_get_subdevdata(sd);
struct iss_device *iss = csi2->iss;
struct iss_video *video_out = &csi2->video_out;
int ret = 0;
if (csi2->state == ISS_PIPELINE_STREAM_STOPPED) {
if (enable == ISS_PIPELINE_STREAM_STOPPED)
return 0;
omap4iss_subclk_enable(iss, csi2->subclk);
}
switch (enable) {
case ISS_PIPELINE_STREAM_CONTINUOUS: {
ret = omap4iss_csiphy_config(iss, sd);
if (ret < 0)
return ret;
if (omap4iss_csiphy_acquire(csi2->phy) < 0)
return -ENODEV;
csi2_configure(csi2);
csi2_print_status(csi2);
/*
* When outputting to memory with no buffer available, let the
* buffer queue handler start the hardware. A DMA queue flag
* ISS_VIDEO_DMAQUEUE_QUEUED will be set as soon as there is
* a buffer available.
*/
if (csi2->output & CSI2_OUTPUT_MEMORY &&
!(video_out->dmaqueue_flags & ISS_VIDEO_DMAQUEUE_QUEUED))
break;
/* Enable context 0 and IRQs */
atomic_set(&csi2->stopping, 0);
csi2_ctx_enable(csi2, 0, 1);
csi2_if_enable(csi2, 1);
iss_video_dmaqueue_flags_clr(video_out);
break;
}
case ISS_PIPELINE_STREAM_STOPPED:
if (csi2->state == ISS_PIPELINE_STREAM_STOPPED)
return 0;
if (omap4iss_module_sync_idle(&sd->entity, &csi2->wait,
&csi2->stopping))
ret = -ETIMEDOUT;
csi2_ctx_enable(csi2, 0, 0);
csi2_if_enable(csi2, 0);
csi2_irq_ctx_set(csi2, 0);
omap4iss_csiphy_release(csi2->phy);
omap4iss_subclk_disable(iss, csi2->subclk);
iss_video_dmaqueue_flags_clr(video_out);
break;
}
csi2->state = enable;
return ret;
}
/* subdev video operations */
static const struct v4l2_subdev_video_ops csi2_video_ops = {
.s_stream = csi2_set_stream,
};
/* subdev pad operations */
static const struct v4l2_subdev_pad_ops csi2_pad_ops = {
.enum_mbus_code = csi2_enum_mbus_code,
.enum_frame_size = csi2_enum_frame_size,
.get_fmt = csi2_get_format,
.set_fmt = csi2_set_format,
.link_validate = csi2_link_validate,
};
/* subdev operations */
static const struct v4l2_subdev_ops csi2_ops = {
.video = &csi2_video_ops,
.pad = &csi2_pad_ops,
};
/* subdev internal operations */
static const struct v4l2_subdev_internal_ops csi2_internal_ops = {
.open = csi2_init_formats,
};
/* -----------------------------------------------------------------------------
* Media entity operations
*/
/*
* csi2_link_setup - Setup CSI2 connections.
* @entity : Pointer to media entity structure
* @local : Pointer to local pad array
* @remote : Pointer to remote pad array
* @flags : Link flags
* return -EINVAL or zero on success
*/
static int csi2_link_setup(struct media_entity *entity,
const struct media_pad *local,
const struct media_pad *remote, u32 flags)
{
struct v4l2_subdev *sd = media_entity_to_v4l2_subdev(entity);
struct iss_csi2_device *csi2 = v4l2_get_subdevdata(sd);
struct iss_csi2_ctrl_cfg *ctrl = &csi2->ctrl;
unsigned int index = local->index;
/* FIXME: this is actually a hack! */
if (is_media_entity_v4l2_subdev(remote->entity))
index |= 2 << 16;
/*
* The ISS core doesn't support pipelines with multiple video outputs.
* Revisit this when it will be implemented, and return -EBUSY for now.
*/
switch (index) {
case CSI2_PAD_SOURCE:
if (flags & MEDIA_LNK_FL_ENABLED) {
if (csi2->output & ~CSI2_OUTPUT_MEMORY)
return -EBUSY;
csi2->output |= CSI2_OUTPUT_MEMORY;
} else {
csi2->output &= ~CSI2_OUTPUT_MEMORY;
}
break;
case CSI2_PAD_SOURCE | 2 << 16:
if (flags & MEDIA_LNK_FL_ENABLED) {
if (csi2->output & ~CSI2_OUTPUT_IPIPEIF)
return -EBUSY;
csi2->output |= CSI2_OUTPUT_IPIPEIF;
} else {
csi2->output &= ~CSI2_OUTPUT_IPIPEIF;
}
break;
default:
/* Link from camera to CSI2 is fixed... */
return -EINVAL;
}
ctrl->vp_only_enable = csi2->output & CSI2_OUTPUT_MEMORY ? false : true;
ctrl->vp_clk_enable = !!(csi2->output & CSI2_OUTPUT_IPIPEIF);
return 0;
}
/* media operations */
static const struct media_entity_operations csi2_media_ops = {
.link_setup = csi2_link_setup,
.link_validate = v4l2_subdev_link_validate,
};
void omap4iss_csi2_unregister_entities(struct iss_csi2_device *csi2)
{
v4l2_device_unregister_subdev(&csi2->subdev);
omap4iss_video_unregister(&csi2->video_out);
}
int omap4iss_csi2_register_entities(struct iss_csi2_device *csi2,
struct v4l2_device *vdev)
{
int ret;
/* Register the subdev and video nodes. */
ret = v4l2_device_register_subdev(vdev, &csi2->subdev);
if (ret < 0)
goto error;
ret = omap4iss_video_register(&csi2->video_out, vdev);
if (ret < 0)
goto error;
return 0;
error:
omap4iss_csi2_unregister_entities(csi2);
return ret;
}
/* -----------------------------------------------------------------------------
* ISS CSI2 initialisation and cleanup
*/
/*
* csi2_init_entities - Initialize subdev and media entity.
* @csi2: Pointer to csi2 structure.
* return -ENOMEM or zero on success
*/
static int csi2_init_entities(struct iss_csi2_device *csi2, const char *subname)
{
struct v4l2_subdev *sd = &csi2->subdev;
struct media_pad *pads = csi2->pads;
struct media_entity *me = &sd->entity;
int ret;
char name[V4L2_SUBDEV_NAME_SIZE];
v4l2_subdev_init(sd, &csi2_ops);
sd->internal_ops = &csi2_internal_ops;
snprintf(name, sizeof(name), "CSI2%s", subname);
snprintf(sd->name, sizeof(sd->name), "OMAP4 ISS %s", name);
sd->grp_id = BIT(16); /* group ID for iss subdevs */
v4l2_set_subdevdata(sd, csi2);
sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
pads[CSI2_PAD_SOURCE].flags = MEDIA_PAD_FL_SOURCE;
pads[CSI2_PAD_SINK].flags = MEDIA_PAD_FL_SINK;
me->ops = &csi2_media_ops;
ret = media_entity_pads_init(me, CSI2_PADS_NUM, pads);
if (ret < 0)
return ret;
csi2_init_formats(sd, NULL);
/* Video device node */
csi2->video_out.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
csi2->video_out.ops = &csi2_issvideo_ops;
csi2->video_out.bpl_alignment = 32;
csi2->video_out.bpl_zero_padding = 1;
csi2->video_out.bpl_max = 0x1ffe0;
csi2->video_out.iss = csi2->iss;
csi2->video_out.capture_mem = PAGE_ALIGN(4096 * 4096) * 3;
ret = omap4iss_video_init(&csi2->video_out, name);
if (ret < 0)
goto error_video;
return 0;
error_video:
media_entity_cleanup(&csi2->subdev.entity);
return ret;
}
/*
* omap4iss_csi2_init - Routine for module driver init
*/
int omap4iss_csi2_init(struct iss_device *iss)
{
struct iss_csi2_device *csi2a = &iss->csi2a;
struct iss_csi2_device *csi2b = &iss->csi2b;
int ret;
csi2a->iss = iss;
csi2a->available = 1;
csi2a->regs1 = OMAP4_ISS_MEM_CSI2_A_REGS1;
csi2a->phy = &iss->csiphy1;
csi2a->subclk = OMAP4_ISS_SUBCLK_CSI2_A;
csi2a->state = ISS_PIPELINE_STREAM_STOPPED;
init_waitqueue_head(&csi2a->wait);
ret = csi2_init_entities(csi2a, "a");
if (ret < 0)
return ret;
csi2b->iss = iss;
csi2b->available = 1;
csi2b->regs1 = OMAP4_ISS_MEM_CSI2_B_REGS1;
csi2b->phy = &iss->csiphy2;
csi2b->subclk = OMAP4_ISS_SUBCLK_CSI2_B;
csi2b->state = ISS_PIPELINE_STREAM_STOPPED;
init_waitqueue_head(&csi2b->wait);
ret = csi2_init_entities(csi2b, "b");
if (ret < 0)
return ret;
return 0;
}
/*
* omap4iss_csi2_create_links() - CSI2 pads links creation
* @iss: Pointer to ISS device
*
* return negative error code or zero on success
*/
int omap4iss_csi2_create_links(struct iss_device *iss)
{
struct iss_csi2_device *csi2a = &iss->csi2a;
struct iss_csi2_device *csi2b = &iss->csi2b;
int ret;
/* Connect the CSI2a subdev to the video node. */
ret = media_create_pad_link(&csi2a->subdev.entity, CSI2_PAD_SOURCE,
&csi2a->video_out.video.entity, 0, 0);
if (ret < 0)
return ret;
/* Connect the CSI2b subdev to the video node. */
ret = media_create_pad_link(&csi2b->subdev.entity, CSI2_PAD_SOURCE,
&csi2b->video_out.video.entity, 0, 0);
if (ret < 0)
return ret;
return 0;
}
/*
* omap4iss_csi2_cleanup - Routine for module driver cleanup
*/
void omap4iss_csi2_cleanup(struct iss_device *iss)
{
struct iss_csi2_device *csi2a = &iss->csi2a;
struct iss_csi2_device *csi2b = &iss->csi2b;
omap4iss_video_cleanup(&csi2a->video_out);
media_entity_cleanup(&csi2a->subdev.entity);
omap4iss_video_cleanup(&csi2b->video_out);
media_entity_cleanup(&csi2b->subdev.entity);
}
| linux-master | drivers/staging/media/omap4iss/iss_csi2.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* TI OMAP4 ISS V4L2 Driver - ISP RESIZER module
*
* Copyright (C) 2012 Texas Instruments, Inc.
*
* Author: Sergio Aguirre <[email protected]>
*/
#include <linux/module.h>
#include <linux/uaccess.h>
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/dma-mapping.h>
#include <linux/mm.h>
#include <linux/sched.h>
#include "iss.h"
#include "iss_regs.h"
#include "iss_resizer.h"
static const unsigned int resizer_fmts[] = {
MEDIA_BUS_FMT_UYVY8_1X16,
MEDIA_BUS_FMT_YUYV8_1X16,
};
/*
* resizer_print_status - Print current RESIZER Module register values.
* @resizer: Pointer to ISS ISP RESIZER device.
*
* Also prints other debug information stored in the RESIZER module.
*/
#define RSZ_PRINT_REGISTER(iss, name)\
dev_dbg(iss->dev, "###RSZ " #name "=0x%08x\n", \
iss_reg_read(iss, OMAP4_ISS_MEM_ISP_RESIZER, RSZ_##name))
#define RZA_PRINT_REGISTER(iss, name)\
dev_dbg(iss->dev, "###RZA " #name "=0x%08x\n", \
iss_reg_read(iss, OMAP4_ISS_MEM_ISP_RESIZER, RZA_##name))
static void resizer_print_status(struct iss_resizer_device *resizer)
{
struct iss_device *iss = to_iss_device(resizer);
dev_dbg(iss->dev, "-------------RESIZER Register dump-------------\n");
RSZ_PRINT_REGISTER(iss, SYSCONFIG);
RSZ_PRINT_REGISTER(iss, IN_FIFO_CTRL);
RSZ_PRINT_REGISTER(iss, FRACDIV);
RSZ_PRINT_REGISTER(iss, SRC_EN);
RSZ_PRINT_REGISTER(iss, SRC_MODE);
RSZ_PRINT_REGISTER(iss, SRC_FMT0);
RSZ_PRINT_REGISTER(iss, SRC_FMT1);
RSZ_PRINT_REGISTER(iss, SRC_VPS);
RSZ_PRINT_REGISTER(iss, SRC_VSZ);
RSZ_PRINT_REGISTER(iss, SRC_HPS);
RSZ_PRINT_REGISTER(iss, SRC_HSZ);
RSZ_PRINT_REGISTER(iss, DMA_RZA);
RSZ_PRINT_REGISTER(iss, DMA_RZB);
RSZ_PRINT_REGISTER(iss, DMA_STA);
RSZ_PRINT_REGISTER(iss, GCK_MMR);
RSZ_PRINT_REGISTER(iss, GCK_SDR);
RSZ_PRINT_REGISTER(iss, IRQ_RZA);
RSZ_PRINT_REGISTER(iss, IRQ_RZB);
RSZ_PRINT_REGISTER(iss, YUV_Y_MIN);
RSZ_PRINT_REGISTER(iss, YUV_Y_MAX);
RSZ_PRINT_REGISTER(iss, YUV_C_MIN);
RSZ_PRINT_REGISTER(iss, YUV_C_MAX);
RSZ_PRINT_REGISTER(iss, SEQ);
RZA_PRINT_REGISTER(iss, EN);
RZA_PRINT_REGISTER(iss, MODE);
RZA_PRINT_REGISTER(iss, 420);
RZA_PRINT_REGISTER(iss, I_VPS);
RZA_PRINT_REGISTER(iss, I_HPS);
RZA_PRINT_REGISTER(iss, O_VSZ);
RZA_PRINT_REGISTER(iss, O_HSZ);
RZA_PRINT_REGISTER(iss, V_PHS_Y);
RZA_PRINT_REGISTER(iss, V_PHS_C);
RZA_PRINT_REGISTER(iss, V_DIF);
RZA_PRINT_REGISTER(iss, V_TYP);
RZA_PRINT_REGISTER(iss, V_LPF);
RZA_PRINT_REGISTER(iss, H_PHS);
RZA_PRINT_REGISTER(iss, H_DIF);
RZA_PRINT_REGISTER(iss, H_TYP);
RZA_PRINT_REGISTER(iss, H_LPF);
RZA_PRINT_REGISTER(iss, DWN_EN);
RZA_PRINT_REGISTER(iss, SDR_Y_BAD_H);
RZA_PRINT_REGISTER(iss, SDR_Y_BAD_L);
RZA_PRINT_REGISTER(iss, SDR_Y_SAD_H);
RZA_PRINT_REGISTER(iss, SDR_Y_SAD_L);
RZA_PRINT_REGISTER(iss, SDR_Y_OFT);
RZA_PRINT_REGISTER(iss, SDR_Y_PTR_S);
RZA_PRINT_REGISTER(iss, SDR_Y_PTR_E);
RZA_PRINT_REGISTER(iss, SDR_C_BAD_H);
RZA_PRINT_REGISTER(iss, SDR_C_BAD_L);
RZA_PRINT_REGISTER(iss, SDR_C_SAD_H);
RZA_PRINT_REGISTER(iss, SDR_C_SAD_L);
RZA_PRINT_REGISTER(iss, SDR_C_OFT);
RZA_PRINT_REGISTER(iss, SDR_C_PTR_S);
RZA_PRINT_REGISTER(iss, SDR_C_PTR_E);
dev_dbg(iss->dev, "-----------------------------------------------\n");
}
/*
* resizer_enable - Enable/Disable RESIZER.
* @enable: enable flag
*
*/
static void resizer_enable(struct iss_resizer_device *resizer, u8 enable)
{
struct iss_device *iss = to_iss_device(resizer);
iss_reg_update(iss, OMAP4_ISS_MEM_ISP_RESIZER, RSZ_SRC_EN,
RSZ_SRC_EN_SRC_EN, enable ? RSZ_SRC_EN_SRC_EN : 0);
/* TODO: Enable RSZB */
iss_reg_update(iss, OMAP4_ISS_MEM_ISP_RESIZER, RZA_EN, RSZ_EN_EN,
enable ? RSZ_EN_EN : 0);
}
/* -----------------------------------------------------------------------------
* Format- and pipeline-related configuration helpers
*/
/*
* resizer_set_outaddr - Set memory address to save output image
* @resizer: Pointer to ISP RESIZER device.
* @addr: 32-bit memory address aligned on 32 byte boundary.
*
* Sets the memory address where the output will be saved.
*/
static void resizer_set_outaddr(struct iss_resizer_device *resizer, u32 addr)
{
struct iss_device *iss = to_iss_device(resizer);
struct v4l2_mbus_framefmt *informat, *outformat;
informat = &resizer->formats[RESIZER_PAD_SINK];
outformat = &resizer->formats[RESIZER_PAD_SOURCE_MEM];
/* Save address split in Base Address H & L */
iss_reg_write(iss, OMAP4_ISS_MEM_ISP_RESIZER, RZA_SDR_Y_BAD_H,
(addr >> 16) & 0xffff);
iss_reg_write(iss, OMAP4_ISS_MEM_ISP_RESIZER, RZA_SDR_Y_BAD_L,
addr & 0xffff);
/* SAD = BAD */
iss_reg_write(iss, OMAP4_ISS_MEM_ISP_RESIZER, RZA_SDR_Y_SAD_H,
(addr >> 16) & 0xffff);
iss_reg_write(iss, OMAP4_ISS_MEM_ISP_RESIZER, RZA_SDR_Y_SAD_L,
addr & 0xffff);
/* Program UV buffer address... Hardcoded to be contiguous! */
if ((informat->code == MEDIA_BUS_FMT_UYVY8_1X16) &&
(outformat->code == MEDIA_BUS_FMT_YUYV8_1_5X8)) {
u32 c_addr = addr + resizer->video_out.bpl_value
* outformat->height;
/* Ensure Y_BAD_L[6:0] = C_BAD_L[6:0]*/
if ((c_addr ^ addr) & 0x7f) {
c_addr &= ~0x7f;
c_addr += 0x80;
c_addr |= addr & 0x7f;
}
/* Save address split in Base Address H & L */
iss_reg_write(iss, OMAP4_ISS_MEM_ISP_RESIZER, RZA_SDR_C_BAD_H,
(c_addr >> 16) & 0xffff);
iss_reg_write(iss, OMAP4_ISS_MEM_ISP_RESIZER, RZA_SDR_C_BAD_L,
c_addr & 0xffff);
/* SAD = BAD */
iss_reg_write(iss, OMAP4_ISS_MEM_ISP_RESIZER, RZA_SDR_C_SAD_H,
(c_addr >> 16) & 0xffff);
iss_reg_write(iss, OMAP4_ISS_MEM_ISP_RESIZER, RZA_SDR_C_SAD_L,
c_addr & 0xffff);
}
}
static void resizer_configure(struct iss_resizer_device *resizer)
{
struct iss_device *iss = to_iss_device(resizer);
struct v4l2_mbus_framefmt *informat, *outformat;
informat = &resizer->formats[RESIZER_PAD_SINK];
outformat = &resizer->formats[RESIZER_PAD_SOURCE_MEM];
/* Disable pass-through more. Despite its name, the BYPASS bit controls
* pass-through mode, not bypass mode.
*/
iss_reg_clr(iss, OMAP4_ISS_MEM_ISP_RESIZER, RSZ_SRC_FMT0,
RSZ_SRC_FMT0_BYPASS);
/* Select RSZ input */
iss_reg_update(iss, OMAP4_ISS_MEM_ISP_RESIZER, RSZ_SRC_FMT0,
RSZ_SRC_FMT0_SEL,
resizer->input == RESIZER_INPUT_IPIPEIF ?
RSZ_SRC_FMT0_SEL : 0);
/* RSZ ignores WEN signal from IPIPE/IPIPEIF */
iss_reg_clr(iss, OMAP4_ISS_MEM_ISP_RESIZER, RSZ_SRC_MODE,
RSZ_SRC_MODE_WRT);
/* Set Resizer in free-running mode */
iss_reg_clr(iss, OMAP4_ISS_MEM_ISP_RESIZER, RSZ_SRC_MODE,
RSZ_SRC_MODE_OST);
/* Init Resizer A */
iss_reg_clr(iss, OMAP4_ISS_MEM_ISP_RESIZER, RZA_MODE,
RZA_MODE_ONE_SHOT);
/* Set size related things now */
iss_reg_write(iss, OMAP4_ISS_MEM_ISP_RESIZER, RSZ_SRC_VPS, 0);
iss_reg_write(iss, OMAP4_ISS_MEM_ISP_RESIZER, RSZ_SRC_HPS, 0);
iss_reg_write(iss, OMAP4_ISS_MEM_ISP_RESIZER, RSZ_SRC_VSZ,
informat->height - 2);
iss_reg_write(iss, OMAP4_ISS_MEM_ISP_RESIZER, RSZ_SRC_HSZ,
informat->width - 1);
iss_reg_write(iss, OMAP4_ISS_MEM_ISP_RESIZER, RZA_I_VPS, 0);
iss_reg_write(iss, OMAP4_ISS_MEM_ISP_RESIZER, RZA_I_HPS, 0);
iss_reg_write(iss, OMAP4_ISS_MEM_ISP_RESIZER, RZA_O_VSZ,
outformat->height - 2);
iss_reg_write(iss, OMAP4_ISS_MEM_ISP_RESIZER, RZA_O_HSZ,
outformat->width - 1);
iss_reg_write(iss, OMAP4_ISS_MEM_ISP_RESIZER, RZA_V_DIF, 0x100);
iss_reg_write(iss, OMAP4_ISS_MEM_ISP_RESIZER, RZA_H_DIF, 0x100);
/* Buffer output settings */
iss_reg_write(iss, OMAP4_ISS_MEM_ISP_RESIZER, RZA_SDR_Y_PTR_S, 0);
iss_reg_write(iss, OMAP4_ISS_MEM_ISP_RESIZER, RZA_SDR_Y_PTR_E,
outformat->height - 1);
iss_reg_write(iss, OMAP4_ISS_MEM_ISP_RESIZER, RZA_SDR_Y_OFT,
resizer->video_out.bpl_value);
/* UYVY -> NV12 conversion */
if ((informat->code == MEDIA_BUS_FMT_UYVY8_1X16) &&
(outformat->code == MEDIA_BUS_FMT_YUYV8_1_5X8)) {
iss_reg_write(iss, OMAP4_ISS_MEM_ISP_RESIZER, RZA_420,
RSZ_420_CEN | RSZ_420_YEN);
/* UV Buffer output settings */
iss_reg_write(iss, OMAP4_ISS_MEM_ISP_RESIZER, RZA_SDR_C_PTR_S,
0);
iss_reg_write(iss, OMAP4_ISS_MEM_ISP_RESIZER, RZA_SDR_C_PTR_E,
outformat->height - 1);
iss_reg_write(iss, OMAP4_ISS_MEM_ISP_RESIZER, RZA_SDR_C_OFT,
resizer->video_out.bpl_value);
} else {
iss_reg_write(iss, OMAP4_ISS_MEM_ISP_RESIZER, RZA_420, 0);
}
}
/* -----------------------------------------------------------------------------
* Interrupt handling
*/
static void resizer_isr_buffer(struct iss_resizer_device *resizer)
{
struct iss_buffer *buffer;
/* The whole resizer needs to be stopped. Disabling RZA only produces
* input FIFO overflows, most probably when the next frame is received.
*/
resizer_enable(resizer, 0);
buffer = omap4iss_video_buffer_next(&resizer->video_out);
if (!buffer)
return;
resizer_set_outaddr(resizer, buffer->iss_addr);
resizer_enable(resizer, 1);
}
/*
* omap4iss_resizer_isr - Configure resizer during interframe time.
* @resizer: Pointer to ISP RESIZER device.
* @events: RESIZER events
*/
void omap4iss_resizer_isr(struct iss_resizer_device *resizer, u32 events)
{
struct iss_device *iss = to_iss_device(resizer);
struct iss_pipeline *pipe =
to_iss_pipeline(&resizer->subdev.entity);
if (events & (ISP5_IRQ_RSZ_FIFO_IN_BLK_ERR |
ISP5_IRQ_RSZ_FIFO_OVF)) {
dev_dbg(iss->dev, "RSZ Err: FIFO_IN_BLK:%d, FIFO_OVF:%d\n",
events & ISP5_IRQ_RSZ_FIFO_IN_BLK_ERR ? 1 : 0,
events & ISP5_IRQ_RSZ_FIFO_OVF ? 1 : 0);
omap4iss_pipeline_cancel_stream(pipe);
}
if (omap4iss_module_sync_is_stopping(&resizer->wait,
&resizer->stopping))
return;
if (events & ISP5_IRQ_RSZ_INT_DMA)
resizer_isr_buffer(resizer);
}
/* -----------------------------------------------------------------------------
* ISS video operations
*/
static int resizer_video_queue(struct iss_video *video,
struct iss_buffer *buffer)
{
struct iss_resizer_device *resizer = container_of(video,
struct iss_resizer_device, video_out);
if (!(resizer->output & RESIZER_OUTPUT_MEMORY))
return -ENODEV;
resizer_set_outaddr(resizer, buffer->iss_addr);
/*
* If streaming was enabled before there was a buffer queued
* or underrun happened in the ISR, the hardware was not enabled
* and DMA queue flag ISS_VIDEO_DMAQUEUE_UNDERRUN is still set.
* Enable it now.
*/
if (video->dmaqueue_flags & ISS_VIDEO_DMAQUEUE_UNDERRUN) {
resizer_enable(resizer, 1);
iss_video_dmaqueue_flags_clr(video);
}
return 0;
}
static const struct iss_video_operations resizer_video_ops = {
.queue = resizer_video_queue,
};
/* -----------------------------------------------------------------------------
* V4L2 subdev operations
*/
/*
* resizer_set_stream - Enable/Disable streaming on the RESIZER module
* @sd: ISP RESIZER V4L2 subdevice
* @enable: Enable/disable stream
*/
static int resizer_set_stream(struct v4l2_subdev *sd, int enable)
{
struct iss_resizer_device *resizer = v4l2_get_subdevdata(sd);
struct iss_device *iss = to_iss_device(resizer);
struct iss_video *video_out = &resizer->video_out;
int ret = 0;
if (resizer->state == ISS_PIPELINE_STREAM_STOPPED) {
if (enable == ISS_PIPELINE_STREAM_STOPPED)
return 0;
omap4iss_isp_subclk_enable(iss, OMAP4_ISS_ISP_SUBCLK_RSZ);
iss_reg_set(iss, OMAP4_ISS_MEM_ISP_RESIZER, RSZ_GCK_MMR,
RSZ_GCK_MMR_MMR);
iss_reg_set(iss, OMAP4_ISS_MEM_ISP_RESIZER, RSZ_GCK_SDR,
RSZ_GCK_SDR_CORE);
/* FIXME: Enable RSZB also */
iss_reg_set(iss, OMAP4_ISS_MEM_ISP_RESIZER, RSZ_SYSCONFIG,
RSZ_SYSCONFIG_RSZA_CLK_EN);
}
switch (enable) {
case ISS_PIPELINE_STREAM_CONTINUOUS:
resizer_configure(resizer);
resizer_print_status(resizer);
/*
* When outputting to memory with no buffer available, let the
* buffer queue handler start the hardware. A DMA queue flag
* ISS_VIDEO_DMAQUEUE_QUEUED will be set as soon as there is
* a buffer available.
*/
if (resizer->output & RESIZER_OUTPUT_MEMORY &&
!(video_out->dmaqueue_flags & ISS_VIDEO_DMAQUEUE_QUEUED))
break;
atomic_set(&resizer->stopping, 0);
resizer_enable(resizer, 1);
iss_video_dmaqueue_flags_clr(video_out);
break;
case ISS_PIPELINE_STREAM_STOPPED:
if (resizer->state == ISS_PIPELINE_STREAM_STOPPED)
return 0;
if (omap4iss_module_sync_idle(&sd->entity, &resizer->wait,
&resizer->stopping))
ret = -ETIMEDOUT;
resizer_enable(resizer, 0);
iss_reg_clr(iss, OMAP4_ISS_MEM_ISP_RESIZER, RSZ_SYSCONFIG,
RSZ_SYSCONFIG_RSZA_CLK_EN);
iss_reg_clr(iss, OMAP4_ISS_MEM_ISP_RESIZER, RSZ_GCK_SDR,
RSZ_GCK_SDR_CORE);
iss_reg_clr(iss, OMAP4_ISS_MEM_ISP_RESIZER, RSZ_GCK_MMR,
RSZ_GCK_MMR_MMR);
omap4iss_isp_subclk_disable(iss, OMAP4_ISS_ISP_SUBCLK_RSZ);
iss_video_dmaqueue_flags_clr(video_out);
break;
}
resizer->state = enable;
return ret;
}
static struct v4l2_mbus_framefmt *
__resizer_get_format(struct iss_resizer_device *resizer,
struct v4l2_subdev_state *sd_state, unsigned int pad,
enum v4l2_subdev_format_whence which)
{
if (which == V4L2_SUBDEV_FORMAT_TRY)
return v4l2_subdev_get_try_format(&resizer->subdev, sd_state,
pad);
return &resizer->formats[pad];
}
/*
* resizer_try_format - Try video format on a pad
* @resizer: ISS RESIZER device
* @cfg: V4L2 subdev pad config
* @pad: Pad number
* @fmt: Format
*/
static void
resizer_try_format(struct iss_resizer_device *resizer,
struct v4l2_subdev_state *sd_state, unsigned int pad,
struct v4l2_mbus_framefmt *fmt,
enum v4l2_subdev_format_whence which)
{
u32 pixelcode;
struct v4l2_mbus_framefmt *format;
unsigned int width = fmt->width;
unsigned int height = fmt->height;
unsigned int i;
switch (pad) {
case RESIZER_PAD_SINK:
for (i = 0; i < ARRAY_SIZE(resizer_fmts); i++) {
if (fmt->code == resizer_fmts[i])
break;
}
/* If not found, use UYVY as default */
if (i >= ARRAY_SIZE(resizer_fmts))
fmt->code = MEDIA_BUS_FMT_UYVY8_1X16;
/* Clamp the input size. */
fmt->width = clamp_t(u32, width, 1, 8192);
fmt->height = clamp_t(u32, height, 1, 8192);
break;
case RESIZER_PAD_SOURCE_MEM:
pixelcode = fmt->code;
format = __resizer_get_format(resizer, sd_state,
RESIZER_PAD_SINK,
which);
memcpy(fmt, format, sizeof(*fmt));
if ((pixelcode == MEDIA_BUS_FMT_YUYV8_1_5X8) &&
(fmt->code == MEDIA_BUS_FMT_UYVY8_1X16))
fmt->code = pixelcode;
/* The data formatter truncates the number of horizontal output
* pixels to a multiple of 16. To avoid clipping data, allow
* callers to request an output size bigger than the input size
* up to the nearest multiple of 16.
*/
fmt->width = clamp_t(u32, width, 32, (fmt->width + 15) & ~15);
fmt->width &= ~15;
fmt->height = clamp_t(u32, height, 32, fmt->height);
break;
}
fmt->colorspace = V4L2_COLORSPACE_JPEG;
fmt->field = V4L2_FIELD_NONE;
}
/*
* resizer_enum_mbus_code - Handle pixel format enumeration
* @sd : pointer to v4l2 subdev structure
* @cfg: V4L2 subdev pad config
* @code : pointer to v4l2_subdev_mbus_code_enum structure
* return -EINVAL or zero on success
*/
static int resizer_enum_mbus_code(struct v4l2_subdev *sd,
struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_mbus_code_enum *code)
{
struct iss_resizer_device *resizer = v4l2_get_subdevdata(sd);
struct v4l2_mbus_framefmt *format;
switch (code->pad) {
case RESIZER_PAD_SINK:
if (code->index >= ARRAY_SIZE(resizer_fmts))
return -EINVAL;
code->code = resizer_fmts[code->index];
break;
case RESIZER_PAD_SOURCE_MEM:
format = __resizer_get_format(resizer, sd_state,
RESIZER_PAD_SINK,
code->which);
if (code->index == 0) {
code->code = format->code;
break;
}
switch (format->code) {
case MEDIA_BUS_FMT_UYVY8_1X16:
if (code->index == 1)
code->code = MEDIA_BUS_FMT_YUYV8_1_5X8;
else
return -EINVAL;
break;
default:
if (code->index != 0)
return -EINVAL;
}
break;
default:
return -EINVAL;
}
return 0;
}
static int resizer_enum_frame_size(struct v4l2_subdev *sd,
struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_frame_size_enum *fse)
{
struct iss_resizer_device *resizer = v4l2_get_subdevdata(sd);
struct v4l2_mbus_framefmt format;
if (fse->index != 0)
return -EINVAL;
format.code = fse->code;
format.width = 1;
format.height = 1;
resizer_try_format(resizer, sd_state, fse->pad, &format, fse->which);
fse->min_width = format.width;
fse->min_height = format.height;
if (format.code != fse->code)
return -EINVAL;
format.code = fse->code;
format.width = -1;
format.height = -1;
resizer_try_format(resizer, sd_state, fse->pad, &format, fse->which);
fse->max_width = format.width;
fse->max_height = format.height;
return 0;
}
/*
* resizer_get_format - Retrieve the video format on a pad
* @sd : ISP RESIZER V4L2 subdevice
* @cfg: V4L2 subdev pad config
* @fmt: Format
*
* Return 0 on success or -EINVAL if the pad is invalid or doesn't correspond
* to the format type.
*/
static int resizer_get_format(struct v4l2_subdev *sd,
struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
struct iss_resizer_device *resizer = v4l2_get_subdevdata(sd);
struct v4l2_mbus_framefmt *format;
format = __resizer_get_format(resizer, sd_state, fmt->pad, fmt->which);
if (!format)
return -EINVAL;
fmt->format = *format;
return 0;
}
/*
* resizer_set_format - Set the video format on a pad
* @sd : ISP RESIZER V4L2 subdevice
* @cfg: V4L2 subdev pad config
* @fmt: Format
*
* Return 0 on success or -EINVAL if the pad is invalid or doesn't correspond
* to the format type.
*/
static int resizer_set_format(struct v4l2_subdev *sd,
struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
struct iss_resizer_device *resizer = v4l2_get_subdevdata(sd);
struct v4l2_mbus_framefmt *format;
format = __resizer_get_format(resizer, sd_state, fmt->pad, fmt->which);
if (!format)
return -EINVAL;
resizer_try_format(resizer, sd_state, fmt->pad, &fmt->format,
fmt->which);
*format = fmt->format;
/* Propagate the format from sink to source */
if (fmt->pad == RESIZER_PAD_SINK) {
format = __resizer_get_format(resizer, sd_state,
RESIZER_PAD_SOURCE_MEM,
fmt->which);
*format = fmt->format;
resizer_try_format(resizer, sd_state, RESIZER_PAD_SOURCE_MEM,
format,
fmt->which);
}
return 0;
}
static int resizer_link_validate(struct v4l2_subdev *sd,
struct media_link *link,
struct v4l2_subdev_format *source_fmt,
struct v4l2_subdev_format *sink_fmt)
{
/* Check if the two ends match */
if (source_fmt->format.width != sink_fmt->format.width ||
source_fmt->format.height != sink_fmt->format.height)
return -EPIPE;
if (source_fmt->format.code != sink_fmt->format.code)
return -EPIPE;
return 0;
}
/*
* resizer_init_formats - Initialize formats on all pads
* @sd: ISP RESIZER V4L2 subdevice
* @fh: V4L2 subdev file handle
*
* Initialize all pad formats with default values. If fh is not NULL, try
* formats are initialized on the file handle. Otherwise active formats are
* initialized on the device.
*/
static int resizer_init_formats(struct v4l2_subdev *sd,
struct v4l2_subdev_fh *fh)
{
struct v4l2_subdev_format format;
memset(&format, 0, sizeof(format));
format.pad = RESIZER_PAD_SINK;
format.which = fh ? V4L2_SUBDEV_FORMAT_TRY : V4L2_SUBDEV_FORMAT_ACTIVE;
format.format.code = MEDIA_BUS_FMT_UYVY8_1X16;
format.format.width = 4096;
format.format.height = 4096;
resizer_set_format(sd, fh ? fh->state : NULL, &format);
return 0;
}
/* V4L2 subdev video operations */
static const struct v4l2_subdev_video_ops resizer_v4l2_video_ops = {
.s_stream = resizer_set_stream,
};
/* V4L2 subdev pad operations */
static const struct v4l2_subdev_pad_ops resizer_v4l2_pad_ops = {
.enum_mbus_code = resizer_enum_mbus_code,
.enum_frame_size = resizer_enum_frame_size,
.get_fmt = resizer_get_format,
.set_fmt = resizer_set_format,
.link_validate = resizer_link_validate,
};
/* V4L2 subdev operations */
static const struct v4l2_subdev_ops resizer_v4l2_ops = {
.video = &resizer_v4l2_video_ops,
.pad = &resizer_v4l2_pad_ops,
};
/* V4L2 subdev internal operations */
static const struct v4l2_subdev_internal_ops resizer_v4l2_internal_ops = {
.open = resizer_init_formats,
};
/* -----------------------------------------------------------------------------
* Media entity operations
*/
/*
* resizer_link_setup - Setup RESIZER connections
* @entity: RESIZER media entity
* @local: Pad at the local end of the link
* @remote: Pad at the remote end of the link
* @flags: Link flags
*
* return -EINVAL or zero on success
*/
static int resizer_link_setup(struct media_entity *entity,
const struct media_pad *local,
const struct media_pad *remote, u32 flags)
{
struct v4l2_subdev *sd = media_entity_to_v4l2_subdev(entity);
struct iss_resizer_device *resizer = v4l2_get_subdevdata(sd);
struct iss_device *iss = to_iss_device(resizer);
unsigned int index = local->index;
/* FIXME: this is actually a hack! */
if (is_media_entity_v4l2_subdev(remote->entity))
index |= 2 << 16;
switch (index) {
case RESIZER_PAD_SINK | 2 << 16:
/* Read from IPIPE or IPIPEIF. */
if (!(flags & MEDIA_LNK_FL_ENABLED)) {
resizer->input = RESIZER_INPUT_NONE;
break;
}
if (resizer->input != RESIZER_INPUT_NONE)
return -EBUSY;
if (remote->entity == &iss->ipipeif.subdev.entity)
resizer->input = RESIZER_INPUT_IPIPEIF;
else if (remote->entity == &iss->ipipe.subdev.entity)
resizer->input = RESIZER_INPUT_IPIPE;
break;
case RESIZER_PAD_SOURCE_MEM:
/* Write to memory */
if (flags & MEDIA_LNK_FL_ENABLED) {
if (resizer->output & ~RESIZER_OUTPUT_MEMORY)
return -EBUSY;
resizer->output |= RESIZER_OUTPUT_MEMORY;
} else {
resizer->output &= ~RESIZER_OUTPUT_MEMORY;
}
break;
default:
return -EINVAL;
}
return 0;
}
/* media operations */
static const struct media_entity_operations resizer_media_ops = {
.link_setup = resizer_link_setup,
.link_validate = v4l2_subdev_link_validate,
};
/*
* resizer_init_entities - Initialize V4L2 subdev and media entity
* @resizer: ISS ISP RESIZER module
*
* Return 0 on success and a negative error code on failure.
*/
static int resizer_init_entities(struct iss_resizer_device *resizer)
{
struct v4l2_subdev *sd = &resizer->subdev;
struct media_pad *pads = resizer->pads;
struct media_entity *me = &sd->entity;
int ret;
resizer->input = RESIZER_INPUT_NONE;
v4l2_subdev_init(sd, &resizer_v4l2_ops);
sd->internal_ops = &resizer_v4l2_internal_ops;
strscpy(sd->name, "OMAP4 ISS ISP resizer", sizeof(sd->name));
sd->grp_id = BIT(16); /* group ID for iss subdevs */
v4l2_set_subdevdata(sd, resizer);
sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
pads[RESIZER_PAD_SINK].flags = MEDIA_PAD_FL_SINK;
pads[RESIZER_PAD_SOURCE_MEM].flags = MEDIA_PAD_FL_SOURCE;
me->ops = &resizer_media_ops;
ret = media_entity_pads_init(me, RESIZER_PADS_NUM, pads);
if (ret < 0)
return ret;
resizer_init_formats(sd, NULL);
resizer->video_out.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
resizer->video_out.ops = &resizer_video_ops;
resizer->video_out.iss = to_iss_device(resizer);
resizer->video_out.capture_mem = PAGE_ALIGN(4096 * 4096) * 3;
resizer->video_out.bpl_alignment = 32;
resizer->video_out.bpl_zero_padding = 1;
resizer->video_out.bpl_max = 0x1ffe0;
return omap4iss_video_init(&resizer->video_out, "ISP resizer a");
}
void omap4iss_resizer_unregister_entities(struct iss_resizer_device *resizer)
{
v4l2_device_unregister_subdev(&resizer->subdev);
omap4iss_video_unregister(&resizer->video_out);
}
int omap4iss_resizer_register_entities(struct iss_resizer_device *resizer,
struct v4l2_device *vdev)
{
int ret;
/* Register the subdev and video node. */
ret = v4l2_device_register_subdev(vdev, &resizer->subdev);
if (ret < 0)
goto error;
ret = omap4iss_video_register(&resizer->video_out, vdev);
if (ret < 0)
goto error;
return 0;
error:
omap4iss_resizer_unregister_entities(resizer);
return ret;
}
/* -----------------------------------------------------------------------------
* ISP RESIZER initialisation and cleanup
*/
/*
* omap4iss_resizer_init - RESIZER module initialization.
* @iss: Device pointer specific to the OMAP4 ISS.
*
* TODO: Get the initialisation values from platform data.
*
* Return 0 on success or a negative error code otherwise.
*/
int omap4iss_resizer_init(struct iss_device *iss)
{
struct iss_resizer_device *resizer = &iss->resizer;
resizer->state = ISS_PIPELINE_STREAM_STOPPED;
init_waitqueue_head(&resizer->wait);
return resizer_init_entities(resizer);
}
/*
* omap4iss_resizer_create_links() - RESIZER pads links creation
* @iss: Pointer to ISS device
*
* return negative error code or zero on success
*/
int omap4iss_resizer_create_links(struct iss_device *iss)
{
struct iss_resizer_device *resizer = &iss->resizer;
/* Connect the RESIZER subdev to the video node. */
return media_create_pad_link(&resizer->subdev.entity,
RESIZER_PAD_SOURCE_MEM,
&resizer->video_out.video.entity, 0, 0);
}
/*
* omap4iss_resizer_cleanup - RESIZER module cleanup.
* @iss: Device pointer specific to the OMAP4 ISS.
*/
void omap4iss_resizer_cleanup(struct iss_device *iss)
{
struct iss_resizer_device *resizer = &iss->resizer;
media_entity_cleanup(&resizer->subdev.entity);
}
| linux-master | drivers/staging/media/omap4iss/iss_resizer.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Support for Intel Camera Imaging ISP subsystem.
* Copyright (c) 2015, Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*/
#include "sh_css_param_dvs.h"
#include <assert_support.h>
#include <type_support.h>
#include <ia_css_err.h>
#include <ia_css_types.h>
#include "ia_css_debug.h"
static struct ia_css_dvs_6axis_config *
alloc_dvs_6axis_table(const struct ia_css_resolution *frame_res,
struct ia_css_dvs_6axis_config *dvs_config_src)
{
unsigned int width_y = 0;
unsigned int height_y = 0;
unsigned int width_uv = 0;
unsigned int height_uv = 0;
int err = 0;
struct ia_css_dvs_6axis_config *dvs_config = NULL;
dvs_config = kvmalloc(sizeof(struct ia_css_dvs_6axis_config),
GFP_KERNEL);
if (!dvs_config) {
IA_CSS_ERROR("out of memory");
err = -ENOMEM;
} else {
/*Initialize new struct with latest config settings*/
if (dvs_config_src) {
dvs_config->width_y = width_y = dvs_config_src->width_y;
dvs_config->height_y = height_y = dvs_config_src->height_y;
dvs_config->width_uv = width_uv = dvs_config_src->width_uv;
dvs_config->height_uv = height_uv = dvs_config_src->height_uv;
IA_CSS_LOG("alloc_dvs_6axis_table Y: W %d H %d", width_y, height_y);
} else if (frame_res) {
dvs_config->width_y = width_y = DVS_TABLE_IN_BLOCKDIM_X_LUMA(frame_res->width);
dvs_config->height_y = height_y = DVS_TABLE_IN_BLOCKDIM_Y_LUMA(
frame_res->height);
dvs_config->width_uv = width_uv = DVS_TABLE_IN_BLOCKDIM_X_CHROMA(
frame_res->width /
2); /* UV = Y/2, depens on colour format YUV 4.2.0*/
dvs_config->height_uv = height_uv = DVS_TABLE_IN_BLOCKDIM_Y_CHROMA(
frame_res->height /
2);/* UV = Y/2, depens on colour format YUV 4.2.0*/
IA_CSS_LOG("alloc_dvs_6axis_table Y: W %d H %d", width_y, height_y);
}
/* Generate Y buffers */
dvs_config->xcoords_y = kvmalloc(width_y * height_y * sizeof(uint32_t),
GFP_KERNEL);
if (!dvs_config->xcoords_y) {
IA_CSS_ERROR("out of memory");
err = -ENOMEM;
goto exit;
}
dvs_config->ycoords_y = kvmalloc(width_y * height_y * sizeof(uint32_t),
GFP_KERNEL);
if (!dvs_config->ycoords_y) {
IA_CSS_ERROR("out of memory");
err = -ENOMEM;
goto exit;
}
/* Generate UV buffers */
IA_CSS_LOG("UV W %d H %d", width_uv, height_uv);
dvs_config->xcoords_uv = kvmalloc(width_uv * height_uv * sizeof(uint32_t),
GFP_KERNEL);
if (!dvs_config->xcoords_uv) {
IA_CSS_ERROR("out of memory");
err = -ENOMEM;
goto exit;
}
dvs_config->ycoords_uv = kvmalloc(width_uv * height_uv * sizeof(uint32_t),
GFP_KERNEL);
if (!dvs_config->ycoords_uv) {
IA_CSS_ERROR("out of memory");
err = -ENOMEM;
}
exit:
if (err) {
free_dvs_6axis_table(
&dvs_config); /* we might have allocated some memory, release this */
dvs_config = NULL;
}
}
IA_CSS_LEAVE("dvs_config=%p", dvs_config);
return dvs_config;
}
static void
init_dvs_6axis_table_from_default(struct ia_css_dvs_6axis_config *dvs_config,
const struct ia_css_resolution *dvs_offset)
{
unsigned int x, y;
unsigned int width_y = dvs_config->width_y;
unsigned int height_y = dvs_config->height_y;
unsigned int width_uv = dvs_config->width_uv;
unsigned int height_uv = dvs_config->height_uv;
IA_CSS_LOG("Env_X=%d, Env_Y=%d, width_y=%d, height_y=%d",
dvs_offset->width, dvs_offset->height, width_y, height_y);
for (y = 0; y < height_y; y++) {
for (x = 0; x < width_y; x++) {
dvs_config->xcoords_y[y * width_y + x] = (dvs_offset->width + x *
DVS_BLOCKDIM_X) << DVS_COORD_FRAC_BITS;
}
}
for (y = 0; y < height_y; y++) {
for (x = 0; x < width_y; x++) {
dvs_config->ycoords_y[y * width_y + x] = (dvs_offset->height + y *
DVS_BLOCKDIM_Y_LUMA) << DVS_COORD_FRAC_BITS;
}
}
for (y = 0; y < height_uv; y++) {
for (x = 0; x < width_uv;
x++) { /* Envelope dimensions set in Ypixels hence offset UV = offset Y/2 */
dvs_config->xcoords_uv[y * width_uv + x] = ((dvs_offset->width / 2) + x *
DVS_BLOCKDIM_X) << DVS_COORD_FRAC_BITS;
}
}
for (y = 0; y < height_uv; y++) {
for (x = 0; x < width_uv;
x++) { /* Envelope dimensions set in Ypixels hence offset UV = offset Y/2 */
dvs_config->ycoords_uv[y * width_uv + x] = ((dvs_offset->height / 2) + y *
DVS_BLOCKDIM_Y_CHROMA) <<
DVS_COORD_FRAC_BITS;
}
}
}
static void
init_dvs_6axis_table_from_config(struct ia_css_dvs_6axis_config *dvs_config,
struct ia_css_dvs_6axis_config *dvs_config_src)
{
unsigned int width_y = dvs_config->width_y;
unsigned int height_y = dvs_config->height_y;
unsigned int width_uv = dvs_config->width_uv;
unsigned int height_uv = dvs_config->height_uv;
memcpy(dvs_config->xcoords_y, dvs_config_src->xcoords_y,
(width_y * height_y * sizeof(uint32_t)));
memcpy(dvs_config->ycoords_y, dvs_config_src->ycoords_y,
(width_y * height_y * sizeof(uint32_t)));
memcpy(dvs_config->xcoords_uv, dvs_config_src->xcoords_uv,
(width_uv * height_uv * sizeof(uint32_t)));
memcpy(dvs_config->ycoords_uv, dvs_config_src->ycoords_uv,
(width_uv * height_uv * sizeof(uint32_t)));
}
struct ia_css_dvs_6axis_config *
generate_dvs_6axis_table(const struct ia_css_resolution *frame_res,
const struct ia_css_resolution *dvs_offset)
{
struct ia_css_dvs_6axis_config *dvs_6axis_table;
assert(frame_res);
assert(dvs_offset);
dvs_6axis_table = alloc_dvs_6axis_table(frame_res, NULL);
if (dvs_6axis_table) {
init_dvs_6axis_table_from_default(dvs_6axis_table, dvs_offset);
return dvs_6axis_table;
}
return NULL;
}
struct ia_css_dvs_6axis_config *
generate_dvs_6axis_table_from_config(struct ia_css_dvs_6axis_config
*dvs_config_src)
{
struct ia_css_dvs_6axis_config *dvs_6axis_table;
assert(dvs_config_src);
dvs_6axis_table = alloc_dvs_6axis_table(NULL, dvs_config_src);
if (dvs_6axis_table) {
init_dvs_6axis_table_from_config(dvs_6axis_table, dvs_config_src);
return dvs_6axis_table;
}
return NULL;
}
void
free_dvs_6axis_table(struct ia_css_dvs_6axis_config **dvs_6axis_config)
{
if ((dvs_6axis_config) && (*dvs_6axis_config)) {
IA_CSS_ENTER_PRIVATE("dvs_6axis_config %p", (*dvs_6axis_config));
if ((*dvs_6axis_config)->xcoords_y) {
kvfree((*dvs_6axis_config)->xcoords_y);
(*dvs_6axis_config)->xcoords_y = NULL;
}
if ((*dvs_6axis_config)->ycoords_y) {
kvfree((*dvs_6axis_config)->ycoords_y);
(*dvs_6axis_config)->ycoords_y = NULL;
}
/* Free up UV buffers */
if ((*dvs_6axis_config)->xcoords_uv) {
kvfree((*dvs_6axis_config)->xcoords_uv);
(*dvs_6axis_config)->xcoords_uv = NULL;
}
if ((*dvs_6axis_config)->ycoords_uv) {
kvfree((*dvs_6axis_config)->ycoords_uv);
(*dvs_6axis_config)->ycoords_uv = NULL;
}
IA_CSS_LEAVE_PRIVATE("dvs_6axis_config %p", (*dvs_6axis_config));
kvfree(*dvs_6axis_config);
*dvs_6axis_config = NULL;
}
}
void copy_dvs_6axis_table(struct ia_css_dvs_6axis_config *dvs_config_dst,
const struct ia_css_dvs_6axis_config *dvs_config_src)
{
unsigned int width_y;
unsigned int height_y;
unsigned int width_uv;
unsigned int height_uv;
assert(dvs_config_src);
assert(dvs_config_dst);
assert(dvs_config_src->xcoords_y);
assert(dvs_config_src->xcoords_uv);
assert(dvs_config_src->ycoords_y);
assert(dvs_config_src->ycoords_uv);
assert(dvs_config_src->width_y == dvs_config_dst->width_y);
assert(dvs_config_src->width_uv == dvs_config_dst->width_uv);
assert(dvs_config_src->height_y == dvs_config_dst->height_y);
assert(dvs_config_src->height_uv == dvs_config_dst->height_uv);
width_y = dvs_config_src->width_y;
height_y = dvs_config_src->height_y;
width_uv =
dvs_config_src->width_uv; /* = Y/2, depens on colour format YUV 4.2.0*/
height_uv = dvs_config_src->height_uv;
memcpy(dvs_config_dst->xcoords_y, dvs_config_src->xcoords_y,
(width_y * height_y * sizeof(uint32_t)));
memcpy(dvs_config_dst->ycoords_y, dvs_config_src->ycoords_y,
(width_y * height_y * sizeof(uint32_t)));
memcpy(dvs_config_dst->xcoords_uv, dvs_config_src->xcoords_uv,
(width_uv * height_uv * sizeof(uint32_t)));
memcpy(dvs_config_dst->ycoords_uv, dvs_config_src->ycoords_uv,
(width_uv * height_uv * sizeof(uint32_t)));
}
void
ia_css_dvs_statistics_get(enum dvs_statistics_type type,
union ia_css_dvs_statistics_host *host_stats,
const union ia_css_dvs_statistics_isp *isp_stats)
{
if (type == DVS_STATISTICS) {
ia_css_get_dvs_statistics(host_stats->p_dvs_statistics_host,
isp_stats->p_dvs_statistics_isp);
} else if (type == DVS2_STATISTICS) {
ia_css_get_dvs2_statistics(host_stats->p_dvs2_statistics_host,
isp_stats->p_dvs_statistics_isp);
}
return;
}
| linux-master | drivers/staging/media/atomisp/pci/sh_css_param_dvs.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Support for Intel Camera Imaging ISP subsystem.
* Copyright (c) 2015, Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*/
#include <linux/slab.h>
#include <ia_css_host_data.h>
#include <sh_css_internal.h>
struct ia_css_host_data *ia_css_host_data_allocate(size_t size)
{
struct ia_css_host_data *me;
me = kmalloc(sizeof(struct ia_css_host_data), GFP_KERNEL);
if (!me)
return NULL;
me->size = (uint32_t)size;
me->address = kvmalloc(size, GFP_KERNEL);
if (!me->address) {
kfree(me);
return NULL;
}
return me;
}
void ia_css_host_data_free(struct ia_css_host_data *me)
{
if (me) {
kvfree(me->address);
me->address = NULL;
kfree(me);
}
}
| linux-master | drivers/staging/media/atomisp/pci/sh_css_host_data.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Support for Intel Camera Imaging ISP subsystem.
* Copyright (c) 2015, Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*/
#include "ia_css_mipi.h"
#include "sh_css_mipi.h"
#include <type_support.h>
#include "system_global.h"
#include "ia_css_err.h"
#include "ia_css_pipe.h"
#include "ia_css_stream_format.h"
#include "sh_css_stream_format.h"
#include "ia_css_stream_public.h"
#include "ia_css_frame_public.h"
#include "ia_css_input_port.h"
#include "ia_css_debug.h"
#include "sh_css_struct.h"
#include "sh_css_defs.h"
#include "sh_css_sp.h" /* sh_css_update_host2sp_mipi_frame sh_css_update_host2sp_num_mipi_frames ... */
#include "sw_event_global.h" /* IA_CSS_PSYS_SW_EVENT_MIPI_BUFFERS_READY */
static u32
ref_count_mipi_allocation[N_CSI_PORTS]; /* Initialized in mipi_init */
/* Assumptions:
* - A line is multiple of 4 bytes = 1 word.
* - Each frame has SOF and EOF (each 1 word).
* - Each line has format header and optionally SOL and EOL (each 1 word).
* - Odd and even lines of YUV420 format are different in bites per pixel size.
* - Custom size of embedded data.
* -- Interleaved frames are not taken into account.
* -- Lines are multiples of 8B, and not necessary of (custom 3B, or 7B
* etc.).
* Result is given in DDR mem words, 32B or 256 bits
*/
int
ia_css_mipi_frame_calculate_size(const unsigned int width,
const unsigned int height,
const enum atomisp_input_format format,
const bool hasSOLandEOL,
const unsigned int embedded_data_size_words,
unsigned int *size_mem_words)
{
int err = 0;
unsigned int bits_per_pixel = 0;
unsigned int even_line_bytes = 0;
unsigned int odd_line_bytes = 0;
unsigned int words_per_odd_line = 0;
unsigned int words_for_first_line = 0;
unsigned int words_per_even_line = 0;
unsigned int mem_words_per_even_line = 0;
unsigned int mem_words_per_odd_line = 0;
unsigned int mem_words_for_first_line = 0;
unsigned int mem_words_for_EOF = 0;
unsigned int mem_words = 0;
unsigned int width_padded = width;
/* The changes will be reverted as soon as RAW
* Buffers are deployed by the 2401 Input System
* in the non-continuous use scenario.
*/
if (IS_ISP2401)
width_padded += (2 * ISP_VEC_NELEMS);
IA_CSS_ENTER("padded_width=%d, height=%d, format=%d, hasSOLandEOL=%d, embedded_data_size_words=%d\n",
width_padded, height, format, hasSOLandEOL, embedded_data_size_words);
switch (format) {
case ATOMISP_INPUT_FORMAT_RAW_6: /* 4p, 3B, 24bits */
bits_per_pixel = 6;
break;
case ATOMISP_INPUT_FORMAT_RAW_7: /* 8p, 7B, 56bits */
bits_per_pixel = 7;
break;
case ATOMISP_INPUT_FORMAT_RAW_8: /* 1p, 1B, 8bits */
case ATOMISP_INPUT_FORMAT_BINARY_8: /* 8bits, TODO: check. */
case ATOMISP_INPUT_FORMAT_YUV420_8: /* odd 2p, 2B, 16bits, even 2p, 4B, 32bits */
bits_per_pixel = 8;
break;
case ATOMISP_INPUT_FORMAT_YUV420_10: /* odd 4p, 5B, 40bits, even 4p, 10B, 80bits */
case ATOMISP_INPUT_FORMAT_RAW_10: /* 4p, 5B, 40bits */
/* The changes will be reverted as soon as RAW
* Buffers are deployed by the 2401 Input System
* in the non-continuous use scenario.
*/
bits_per_pixel = 10;
break;
case ATOMISP_INPUT_FORMAT_YUV420_8_LEGACY: /* 2p, 3B, 24bits */
case ATOMISP_INPUT_FORMAT_RAW_12: /* 2p, 3B, 24bits */
bits_per_pixel = 12;
break;
case ATOMISP_INPUT_FORMAT_RAW_14: /* 4p, 7B, 56bits */
bits_per_pixel = 14;
break;
case ATOMISP_INPUT_FORMAT_RGB_444: /* 1p, 2B, 16bits */
case ATOMISP_INPUT_FORMAT_RGB_555: /* 1p, 2B, 16bits */
case ATOMISP_INPUT_FORMAT_RGB_565: /* 1p, 2B, 16bits */
case ATOMISP_INPUT_FORMAT_YUV422_8: /* 2p, 4B, 32bits */
bits_per_pixel = 16;
break;
case ATOMISP_INPUT_FORMAT_RGB_666: /* 4p, 9B, 72bits */
bits_per_pixel = 18;
break;
case ATOMISP_INPUT_FORMAT_YUV422_10: /* 2p, 5B, 40bits */
bits_per_pixel = 20;
break;
case ATOMISP_INPUT_FORMAT_RGB_888: /* 1p, 3B, 24bits */
bits_per_pixel = 24;
break;
case ATOMISP_INPUT_FORMAT_YUV420_16: /* Not supported */
case ATOMISP_INPUT_FORMAT_YUV422_16: /* Not supported */
case ATOMISP_INPUT_FORMAT_RAW_16: /* TODO: not specified in MIPI SPEC, check */
default:
return -EINVAL;
}
odd_line_bytes = (width_padded * bits_per_pixel + 7) >> 3; /* ceil ( bits per line / 8) */
/* Even lines for YUV420 formats are double in bits_per_pixel. */
if (format == ATOMISP_INPUT_FORMAT_YUV420_8
|| format == ATOMISP_INPUT_FORMAT_YUV420_10
|| format == ATOMISP_INPUT_FORMAT_YUV420_16) {
even_line_bytes = (width_padded * 2 * bits_per_pixel + 7) >>
3; /* ceil ( bits per line / 8) */
} else {
even_line_bytes = odd_line_bytes;
}
/* a frame represented in memory: ()- optional; data - payload words.
* addr 0 1 2 3 4 5 6 7:
* first SOF (SOL) PACK_H data data data data data
* data data data data data data data data
* ...
* data data 0 0 0 0 0 0
* second (EOL) (SOL) PACK_H data data data data data
* data data data data data data data data
* ...
* data data 0 0 0 0 0 0
* ...
* last (EOL) EOF 0 0 0 0 0 0
*
* Embedded lines are regular lines stored before the first and after
* payload lines.
*/
words_per_odd_line = (odd_line_bytes + 3) >> 2;
/* ceil(odd_line_bytes/4); word = 4 bytes */
words_per_even_line = (even_line_bytes + 3) >> 2;
words_for_first_line = words_per_odd_line + 2 + (hasSOLandEOL ? 1 : 0);
/* + SOF +packet header + optionally (SOL), but (EOL) is not in the first line */
words_per_odd_line += (1 + (hasSOLandEOL ? 2 : 0));
/* each non-first line has format header, and optionally (SOL) and (EOL). */
words_per_even_line += (1 + (hasSOLandEOL ? 2 : 0));
mem_words_per_odd_line = (words_per_odd_line + 7) >> 3;
/* ceil(words_per_odd_line/8); mem_word = 32 bytes, 8 words */
mem_words_for_first_line = (words_for_first_line + 7) >> 3;
mem_words_per_even_line = (words_per_even_line + 7) >> 3;
mem_words_for_EOF = 1; /* last line consisit of the optional (EOL) and EOF */
mem_words = ((embedded_data_size_words + 7) >> 3) +
mem_words_for_first_line +
(((height + 1) >> 1) - 1) * mem_words_per_odd_line +
/* ceil (height/2) - 1 (first line is calculated separatelly) */
(height >> 1) * mem_words_per_even_line + /* floor(height/2) */
mem_words_for_EOF;
*size_mem_words = mem_words; /* ceil(words/8); mem word is 32B = 8words. */
/* Check if the above is still needed. */
IA_CSS_LEAVE_ERR(err);
return err;
}
/*
* Check if a source port or TPG/PRBS ID is valid
*/
#if !defined(ISP2401)
int
ia_css_mipi_frame_enable_check_on_size(const enum mipi_port_id port,
const unsigned int size_mem_words)
{
u32 idx;
int err = -EBUSY;
OP___assert(port < N_CSI_PORTS);
OP___assert(size_mem_words != 0);
for (idx = 0; idx < IA_CSS_MIPI_SIZE_CHECK_MAX_NOF_ENTRIES_PER_PORT &&
my_css.mipi_sizes_for_check[port][idx] != 0;
idx++) { /* do nothing */
}
if (idx < IA_CSS_MIPI_SIZE_CHECK_MAX_NOF_ENTRIES_PER_PORT) {
my_css.mipi_sizes_for_check[port][idx] = size_mem_words;
err = 0;
}
return err;
}
#endif
void
mipi_init(void)
{
unsigned int i;
for (i = 0; i < N_CSI_PORTS; i++)
ref_count_mipi_allocation[i] = 0;
}
bool mipi_is_free(void)
{
unsigned int i;
for (i = 0; i < N_CSI_PORTS; i++)
if (ref_count_mipi_allocation[i])
return false;
return true;
}
/*
* @brief Calculate the required MIPI buffer sizes.
* Based on the stream configuration, calculate the
* required MIPI buffer sizes (in DDR words).
*
* @param[in] stream_cfg Point to the target stream configuration
* @param[out] size_mem_words MIPI buffer size in DDR words.
*
* @return
*/
static int calculate_mipi_buff_size(struct ia_css_stream_config *stream_cfg,
unsigned int *size_mem_words)
{
unsigned int width;
unsigned int height;
enum atomisp_input_format format;
bool pack_raw_pixels;
unsigned int width_padded;
unsigned int bits_per_pixel = 0;
unsigned int even_line_bytes = 0;
unsigned int odd_line_bytes = 0;
unsigned int words_per_odd_line = 0;
unsigned int words_per_even_line = 0;
unsigned int mem_words_per_even_line = 0;
unsigned int mem_words_per_odd_line = 0;
unsigned int mem_words_per_buff_line = 0;
unsigned int mem_words_per_buff = 0;
int err = 0;
/**
* [email protected]
*
* NOTE
* - In the struct "ia_css_stream_config", there
* are two members: "input_config" and "isys_config".
* Both of them provide the same information, e.g.
* input_res and format.
*
* Question here is that: which one shall be used?
*/
width = stream_cfg->input_config.input_res.width;
height = stream_cfg->input_config.input_res.height;
format = stream_cfg->input_config.format;
pack_raw_pixels = stream_cfg->pack_raw_pixels;
/* end of NOTE */
/**
* [email protected]
*
* NOTE
* - The following code is derived from the
* existing code "ia_css_mipi_frame_calculate_size()".
*
* Question here is: why adding "2 * ISP_VEC_NELEMS"
* to "width_padded", but not making "width_padded"
* aligned with "2 * ISP_VEC_NELEMS"?
*/
/* The changes will be reverted as soon as RAW
* Buffers are deployed by the 2401 Input System
* in the non-continuous use scenario.
*/
width_padded = width + (2 * ISP_VEC_NELEMS);
/* end of NOTE */
IA_CSS_ENTER("padded_width=%d, height=%d, format=%d\n",
width_padded, height, format);
bits_per_pixel = sh_css_stream_format_2_bits_per_subpixel(format);
bits_per_pixel =
(format == ATOMISP_INPUT_FORMAT_RAW_10 && pack_raw_pixels) ? bits_per_pixel : 16;
if (bits_per_pixel == 0)
return -EINVAL;
odd_line_bytes = (width_padded * bits_per_pixel + 7) >> 3; /* ceil ( bits per line / 8) */
/* Even lines for YUV420 formats are double in bits_per_pixel. */
if (format == ATOMISP_INPUT_FORMAT_YUV420_8
|| format == ATOMISP_INPUT_FORMAT_YUV420_10) {
even_line_bytes = (width_padded * 2 * bits_per_pixel + 7) >>
3; /* ceil ( bits per line / 8) */
} else {
even_line_bytes = odd_line_bytes;
}
words_per_odd_line = (odd_line_bytes + 3) >> 2;
/* ceil(odd_line_bytes/4); word = 4 bytes */
words_per_even_line = (even_line_bytes + 3) >> 2;
mem_words_per_odd_line = (words_per_odd_line + 7) >> 3;
/* ceil(words_per_odd_line/8); mem_word = 32 bytes, 8 words */
mem_words_per_even_line = (words_per_even_line + 7) >> 3;
mem_words_per_buff_line =
(mem_words_per_odd_line > mem_words_per_even_line) ? mem_words_per_odd_line : mem_words_per_even_line;
mem_words_per_buff = mem_words_per_buff_line * height;
*size_mem_words = mem_words_per_buff;
IA_CSS_LEAVE_ERR(err);
return err;
}
int
allocate_mipi_frames(struct ia_css_pipe *pipe,
struct ia_css_stream_info *info)
{
int err = -EINVAL;
unsigned int port;
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
"allocate_mipi_frames(%p) enter:\n", pipe);
if (IS_ISP2401 && pipe->stream->config.online) {
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
"allocate_mipi_frames(%p) exit: no buffers needed for 2401 pipe mode.\n",
pipe);
return 0;
}
if (pipe->stream->config.mode != IA_CSS_INPUT_MODE_BUFFERED_SENSOR) {
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
"allocate_mipi_frames(%p) exit: no buffers needed for pipe mode.\n",
pipe);
return 0; /* AM TODO: Check */
}
port = (unsigned int)pipe->stream->config.source.port.port;
if (port >= N_CSI_PORTS) {
IA_CSS_ERROR("allocate_mipi_frames(%p) exit: port is not correct (port=%d).",
pipe, port);
return -EINVAL;
}
if (IS_ISP2401)
err = calculate_mipi_buff_size(&pipe->stream->config,
&my_css.mipi_frame_size[port]);
/*
* 2401 system allows multiple streams to use same physical port. This is not
* true for 2400 system. Currently 2401 uses MIPI buffers as a temporary solution.
* TODO AM: Once that is changed (removed) this code should be removed as well.
* In that case only 2400 related code should remain.
*/
if (ref_count_mipi_allocation[port] != 0) {
if (IS_ISP2401)
ref_count_mipi_allocation[port]++;
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
"allocate_mipi_frames(%p) leave: nothing to do, already allocated for this port (port=%d).\n",
pipe, port);
return 0;
}
ref_count_mipi_allocation[port]++;
/* AM TODO: mipi frames number should come from stream struct. */
my_css.num_mipi_frames[port] = NUM_MIPI_FRAMES_PER_STREAM;
/* Incremental allocation (per stream), not for all streams at once. */
{ /* limit the scope of i,j */
unsigned int i, j;
for (i = 0; i < my_css.num_mipi_frames[port]; i++) {
/* free previous frame */
if (my_css.mipi_frames[port][i]) {
ia_css_frame_free(my_css.mipi_frames[port][i]);
my_css.mipi_frames[port][i] = NULL;
}
/* check if new frame is needed */
if (i < my_css.num_mipi_frames[port]) {
/* allocate new frame */
err = ia_css_frame_allocate_with_buffer_size(
&my_css.mipi_frames[port][i],
my_css.mipi_frame_size[port] * HIVE_ISP_DDR_WORD_BYTES);
if (err) {
for (j = 0; j < i; j++) {
if (my_css.mipi_frames[port][j]) {
ia_css_frame_free(my_css.mipi_frames[port][j]);
my_css.mipi_frames[port][j] = NULL;
}
}
IA_CSS_ERROR("allocate_mipi_frames(%p, %d) exit: allocation failed.",
pipe, port);
return err;
}
}
if (info->metadata_info.size > 0) {
/* free previous metadata buffer */
if (my_css.mipi_metadata[port][i]) {
ia_css_metadata_free(my_css.mipi_metadata[port][i]);
my_css.mipi_metadata[port][i] = NULL;
}
/* check if need to allocate a new metadata buffer */
if (i < my_css.num_mipi_frames[port]) {
/* allocate new metadata buffer */
my_css.mipi_metadata[port][i] = ia_css_metadata_allocate(&info->metadata_info);
if (!my_css.mipi_metadata[port][i]) {
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
"allocate_mipi_metadata(%p, %d) failed.\n",
pipe, port);
return err;
}
}
}
}
}
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
"allocate_mipi_frames(%p) exit:\n", pipe);
return err;
}
int
free_mipi_frames(struct ia_css_pipe *pipe)
{
int err = -EINVAL;
unsigned int port;
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
"free_mipi_frames(%p) enter:\n", pipe);
/* assert(pipe != NULL); TEMP: TODO: Should be assert only. */
if (pipe) {
assert(pipe->stream);
if ((!pipe) || (!pipe->stream)) {
IA_CSS_ERROR("free_mipi_frames(%p) exit: pipe or stream is null.",
pipe);
return -EINVAL;
}
if (pipe->stream->config.mode != IA_CSS_INPUT_MODE_BUFFERED_SENSOR) {
IA_CSS_ERROR("free_mipi_frames(%p) exit: wrong mode.",
pipe);
return err;
}
port = (unsigned int)pipe->stream->config.source.port.port;
if (port >= N_CSI_PORTS) {
IA_CSS_ERROR("free_mipi_frames(%p, %d) exit: pipe port is not correct.",
pipe, port);
return err;
}
if (ref_count_mipi_allocation[port] > 0) {
if (!IS_ISP2401) {
assert(ref_count_mipi_allocation[port] == 1);
if (ref_count_mipi_allocation[port] != 1) {
IA_CSS_ERROR("free_mipi_frames(%p) exit: wrong ref_count (ref_count=%d).",
pipe, ref_count_mipi_allocation[port]);
return err;
}
}
ref_count_mipi_allocation[port]--;
if (ref_count_mipi_allocation[port] == 0) {
/* no streams are using this buffer, so free it */
unsigned int i;
for (i = 0; i < my_css.num_mipi_frames[port]; i++) {
if (my_css.mipi_frames[port][i]) {
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
"free_mipi_frames(port=%d, num=%d).\n", port, i);
ia_css_frame_free(my_css.mipi_frames[port][i]);
my_css.mipi_frames[port][i] = NULL;
}
if (my_css.mipi_metadata[port][i]) {
ia_css_metadata_free(my_css.mipi_metadata[port][i]);
my_css.mipi_metadata[port][i] = NULL;
}
}
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
"free_mipi_frames(%p) exit (deallocated).\n", pipe);
}
}
} else { /* pipe ==NULL */
/* AM TEMP: free-ing all mipi buffers just like a legacy code. */
for (port = CSI_PORT0_ID; port < N_CSI_PORTS; port++) {
unsigned int i;
for (i = 0; i < my_css.num_mipi_frames[port]; i++) {
if (my_css.mipi_frames[port][i]) {
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
"free_mipi_frames(port=%d, num=%d).\n", port, i);
ia_css_frame_free(my_css.mipi_frames[port][i]);
my_css.mipi_frames[port][i] = NULL;
}
if (my_css.mipi_metadata[port][i]) {
ia_css_metadata_free(my_css.mipi_metadata[port][i]);
my_css.mipi_metadata[port][i] = NULL;
}
}
ref_count_mipi_allocation[port] = 0;
}
}
return 0;
}
int
send_mipi_frames(struct ia_css_pipe *pipe)
{
int err = -EINVAL;
unsigned int i;
unsigned int port;
IA_CSS_ENTER_PRIVATE("pipe=%p", pipe);
/* multi stream video needs mipi buffers */
/* nothing to be done in other cases. */
if (pipe->stream->config.mode != IA_CSS_INPUT_MODE_BUFFERED_SENSOR) {
IA_CSS_LOG("nothing to be done for this mode");
return 0;
/* TODO: AM: maybe this should be returning an error. */
}
port = (unsigned int)pipe->stream->config.source.port.port;
if (port >= N_CSI_PORTS) {
IA_CSS_ERROR("send_mipi_frames(%p) exit: invalid port specified (port=%d).",
pipe, port);
return err;
}
/* Hand-over the SP-internal mipi buffers */
for (i = 0; i < my_css.num_mipi_frames[port]; i++) {
/* Need to include the ofset for port. */
sh_css_update_host2sp_mipi_frame(port * NUM_MIPI_FRAMES_PER_STREAM + i,
my_css.mipi_frames[port][i]);
sh_css_update_host2sp_mipi_metadata(port * NUM_MIPI_FRAMES_PER_STREAM + i,
my_css.mipi_metadata[port][i]);
}
sh_css_update_host2sp_num_mipi_frames(my_css.num_mipi_frames[port]);
/**********************************
* Send an event to inform the SP
* that all MIPI frames are passed.
**********************************/
if (!sh_css_sp_is_running()) {
/* SP is not running. The queues are not valid */
IA_CSS_ERROR("sp is not running");
return err;
}
ia_css_bufq_enqueue_psys_event(
IA_CSS_PSYS_SW_EVENT_MIPI_BUFFERS_READY,
(uint8_t)port,
(uint8_t)my_css.num_mipi_frames[port],
0 /* not used */);
IA_CSS_LEAVE_ERR_PRIVATE(0);
return 0;
}
| linux-master | drivers/staging/media/atomisp/pci/sh_css_mipi.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Support for Medifield PNW Camera Imaging ISP subsystem.
*
* Copyright (c) 2010 Intel Corporation. All Rights Reserved.
*
* Copyright (c) 2010 Silicon Hive www.siliconhive.com.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License version
* 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*
*/
#include <linux/errno.h>
#include <linux/firmware.h>
#include <linux/pci.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/kfifo.h>
#include <linux/pm_runtime.h>
#include <linux/timer.h>
#include <asm/iosf_mbi.h>
#include <media/v4l2-event.h>
#define CREATE_TRACE_POINTS
#include "atomisp_trace_event.h"
#include "atomisp_cmd.h"
#include "atomisp_common.h"
#include "atomisp_fops.h"
#include "atomisp_internal.h"
#include "atomisp_ioctl.h"
#include "atomisp-regs.h"
#include "atomisp_tables.h"
#include "atomisp_compat.h"
#include "atomisp_subdev.h"
#include "atomisp_dfs_tables.h"
#include <hmm/hmm.h>
#include "sh_css_hrt.h"
#include "sh_css_defs.h"
#include "system_global.h"
#include "sh_css_internal.h"
#include "sh_css_sp.h"
#include "gp_device.h"
#include "device_access.h"
#include "irq.h"
#include "ia_css_types.h"
#include "ia_css_stream.h"
#include "ia_css_debug.h"
#include "bits.h"
/* We should never need to run the flash for more than 2 frames.
* At 15fps this means 133ms. We set the timeout a bit longer.
* Each flash driver is supposed to set its own timeout, but
* just in case someone else changed the timeout, we set it
* here to make sure we don't damage the flash hardware. */
#define FLASH_TIMEOUT 800 /* ms */
union host {
struct {
void *kernel_ptr;
void __user *user_ptr;
int size;
} scalar;
struct {
void *hmm_ptr;
} ptr;
};
/*
* get sensor:dis71430/ov2720 related info from v4l2_subdev->priv data field.
* subdev->priv is set in mrst.c
*/
struct camera_mipi_info *atomisp_to_sensor_mipi_info(struct v4l2_subdev *sd)
{
return (struct camera_mipi_info *)v4l2_get_subdev_hostdata(sd);
}
/*
* get struct atomisp_video_pipe from v4l2 video_device
*/
struct atomisp_video_pipe *atomisp_to_video_pipe(struct video_device *dev)
{
return (struct atomisp_video_pipe *)
container_of(dev, struct atomisp_video_pipe, vdev);
}
static unsigned short atomisp_get_sensor_fps(struct atomisp_sub_device *asd)
{
struct v4l2_subdev_frame_interval fi = { 0 };
struct atomisp_device *isp = asd->isp;
unsigned short fps = 0;
int ret;
ret = v4l2_subdev_call(isp->inputs[asd->input_curr].camera,
video, g_frame_interval, &fi);
if (!ret && fi.interval.numerator)
fps = fi.interval.denominator / fi.interval.numerator;
return fps;
}
/*
* DFS progress is shown as follows:
* 1. Target frequency is calculated according to FPS/Resolution/ISP running
* mode.
* 2. Ratio is calculated using formula: 2 * HPLL / target frequency - 1
* with proper rounding.
* 3. Set ratio to ISPFREQ40, 1 to FREQVALID and ISPFREQGUAR40
* to 200MHz in ISPSSPM1.
* 4. Wait for FREQVALID to be cleared by P-Unit.
* 5. Wait for field ISPFREQSTAT40 in ISPSSPM1 turn to ratio set in 3.
*/
static int write_target_freq_to_hw(struct atomisp_device *isp,
unsigned int new_freq)
{
unsigned int ratio, timeout, guar_ratio;
u32 isp_sspm1 = 0;
int i;
if (!isp->hpll_freq) {
dev_err(isp->dev, "failed to get hpll_freq. no change to freq\n");
return -EINVAL;
}
iosf_mbi_read(BT_MBI_UNIT_PMC, MBI_REG_READ, ISPSSPM1, &isp_sspm1);
if (isp_sspm1 & ISP_FREQ_VALID_MASK) {
dev_dbg(isp->dev, "clearing ISPSSPM1 valid bit.\n");
iosf_mbi_write(BT_MBI_UNIT_PMC, MBI_REG_WRITE, ISPSSPM1,
isp_sspm1 & ~(1 << ISP_FREQ_VALID_OFFSET));
}
ratio = (2 * isp->hpll_freq + new_freq / 2) / new_freq - 1;
guar_ratio = (2 * isp->hpll_freq + 200 / 2) / 200 - 1;
iosf_mbi_read(BT_MBI_UNIT_PMC, MBI_REG_READ, ISPSSPM1, &isp_sspm1);
isp_sspm1 &= ~(0x1F << ISP_REQ_FREQ_OFFSET);
for (i = 0; i < ISP_DFS_TRY_TIMES; i++) {
iosf_mbi_write(BT_MBI_UNIT_PMC, MBI_REG_WRITE, ISPSSPM1,
isp_sspm1
| ratio << ISP_REQ_FREQ_OFFSET
| 1 << ISP_FREQ_VALID_OFFSET
| guar_ratio << ISP_REQ_GUAR_FREQ_OFFSET);
iosf_mbi_read(BT_MBI_UNIT_PMC, MBI_REG_READ, ISPSSPM1, &isp_sspm1);
timeout = 20;
while ((isp_sspm1 & ISP_FREQ_VALID_MASK) && timeout) {
iosf_mbi_read(BT_MBI_UNIT_PMC, MBI_REG_READ, ISPSSPM1, &isp_sspm1);
dev_dbg(isp->dev, "waiting for ISPSSPM1 valid bit to be 0.\n");
udelay(100);
timeout--;
}
if (timeout != 0)
break;
}
if (timeout == 0) {
dev_err(isp->dev, "DFS failed due to HW error.\n");
return -EINVAL;
}
iosf_mbi_read(BT_MBI_UNIT_PMC, MBI_REG_READ, ISPSSPM1, &isp_sspm1);
timeout = 10;
while (((isp_sspm1 >> ISP_FREQ_STAT_OFFSET) != ratio) && timeout) {
iosf_mbi_read(BT_MBI_UNIT_PMC, MBI_REG_READ, ISPSSPM1, &isp_sspm1);
dev_dbg(isp->dev, "waiting for ISPSSPM1 status bit to be 0x%x.\n",
new_freq);
udelay(100);
timeout--;
}
if (timeout == 0) {
dev_err(isp->dev, "DFS target freq is rejected by HW.\n");
return -EINVAL;
}
return 0;
}
int atomisp_freq_scaling(struct atomisp_device *isp,
enum atomisp_dfs_mode mode,
bool force)
{
const struct atomisp_dfs_config *dfs;
unsigned int new_freq;
struct atomisp_freq_scaling_rule curr_rules;
int i, ret;
unsigned short fps = 0;
dfs = isp->dfs;
if (dfs->lowest_freq == 0 || dfs->max_freq_at_vmin == 0 ||
dfs->highest_freq == 0 || dfs->dfs_table_size == 0 ||
!dfs->dfs_table) {
dev_err(isp->dev, "DFS configuration is invalid.\n");
return -EINVAL;
}
if (mode == ATOMISP_DFS_MODE_LOW) {
new_freq = dfs->lowest_freq;
goto done;
}
if (mode == ATOMISP_DFS_MODE_MAX) {
new_freq = dfs->highest_freq;
goto done;
}
fps = atomisp_get_sensor_fps(&isp->asd);
if (fps == 0) {
dev_info(isp->dev,
"Sensor didn't report FPS. Using DFS max mode.\n");
new_freq = dfs->highest_freq;
goto done;
}
curr_rules.width = isp->asd.fmt[ATOMISP_SUBDEV_PAD_SOURCE].fmt.width;
curr_rules.height = isp->asd.fmt[ATOMISP_SUBDEV_PAD_SOURCE].fmt.height;
curr_rules.fps = fps;
curr_rules.run_mode = isp->asd.run_mode->val;
/* search for the target frequency by looping freq rules*/
for (i = 0; i < dfs->dfs_table_size; i++) {
if (curr_rules.width != dfs->dfs_table[i].width &&
dfs->dfs_table[i].width != ISP_FREQ_RULE_ANY)
continue;
if (curr_rules.height != dfs->dfs_table[i].height &&
dfs->dfs_table[i].height != ISP_FREQ_RULE_ANY)
continue;
if (curr_rules.fps != dfs->dfs_table[i].fps &&
dfs->dfs_table[i].fps != ISP_FREQ_RULE_ANY)
continue;
if (curr_rules.run_mode != dfs->dfs_table[i].run_mode &&
dfs->dfs_table[i].run_mode != ISP_FREQ_RULE_ANY)
continue;
break;
}
if (i == dfs->dfs_table_size)
new_freq = dfs->max_freq_at_vmin;
else
new_freq = dfs->dfs_table[i].isp_freq;
done:
dev_dbg(isp->dev, "DFS target frequency=%d.\n", new_freq);
if ((new_freq == isp->running_freq) && !force)
return 0;
dev_dbg(isp->dev, "Programming DFS frequency to %d\n", new_freq);
ret = write_target_freq_to_hw(isp, new_freq);
if (!ret) {
isp->running_freq = new_freq;
trace_ipu_pstate(new_freq, -1);
}
return ret;
}
/*
* reset and restore ISP
*/
int atomisp_reset(struct atomisp_device *isp)
{
/* Reset ISP by power-cycling it */
int ret = 0;
dev_dbg(isp->dev, "%s\n", __func__);
ret = atomisp_power_off(isp->dev);
if (ret < 0)
dev_err(isp->dev, "atomisp_power_off failed, %d\n", ret);
ret = atomisp_power_on(isp->dev);
if (ret < 0) {
dev_err(isp->dev, "atomisp_power_on failed, %d\n", ret);
isp->isp_fatal_error = true;
}
return ret;
}
/*
* interrupt disable functions
*/
static void disable_isp_irq(enum hrt_isp_css_irq irq)
{
irq_disable_channel(IRQ0_ID, irq);
if (irq != hrt_isp_css_irq_sp)
return;
cnd_sp_irq_enable(SP0_ID, false);
}
/*
* interrupt clean function
*/
static void clear_isp_irq(enum hrt_isp_css_irq irq)
{
irq_clear_all(IRQ0_ID);
}
void atomisp_msi_irq_init(struct atomisp_device *isp)
{
struct pci_dev *pdev = to_pci_dev(isp->dev);
u32 msg32;
u16 msg16;
pci_read_config_dword(pdev, PCI_MSI_CAPID, &msg32);
msg32 |= 1 << MSI_ENABLE_BIT;
pci_write_config_dword(pdev, PCI_MSI_CAPID, msg32);
msg32 = (1 << INTR_IER) | (1 << INTR_IIR);
pci_write_config_dword(pdev, PCI_INTERRUPT_CTRL, msg32);
pci_read_config_word(pdev, PCI_COMMAND, &msg16);
msg16 |= (PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER |
PCI_COMMAND_INTX_DISABLE);
pci_write_config_word(pdev, PCI_COMMAND, msg16);
}
void atomisp_msi_irq_uninit(struct atomisp_device *isp)
{
struct pci_dev *pdev = to_pci_dev(isp->dev);
u32 msg32;
u16 msg16;
pci_read_config_dword(pdev, PCI_MSI_CAPID, &msg32);
msg32 &= ~(1 << MSI_ENABLE_BIT);
pci_write_config_dword(pdev, PCI_MSI_CAPID, msg32);
msg32 = 0x0;
pci_write_config_dword(pdev, PCI_INTERRUPT_CTRL, msg32);
pci_read_config_word(pdev, PCI_COMMAND, &msg16);
msg16 &= ~(PCI_COMMAND_MASTER);
pci_write_config_word(pdev, PCI_COMMAND, msg16);
}
static void atomisp_sof_event(struct atomisp_sub_device *asd)
{
struct v4l2_event event = {0};
event.type = V4L2_EVENT_FRAME_SYNC;
event.u.frame_sync.frame_sequence = atomic_read(&asd->sof_count);
v4l2_event_queue(asd->subdev.devnode, &event);
}
void atomisp_eof_event(struct atomisp_sub_device *asd, uint8_t exp_id)
{
struct v4l2_event event = {0};
event.type = V4L2_EVENT_FRAME_END;
event.u.frame_sync.frame_sequence = exp_id;
v4l2_event_queue(asd->subdev.devnode, &event);
}
static void atomisp_3a_stats_ready_event(struct atomisp_sub_device *asd,
uint8_t exp_id)
{
struct v4l2_event event = {0};
event.type = V4L2_EVENT_ATOMISP_3A_STATS_READY;
event.u.frame_sync.frame_sequence = exp_id;
v4l2_event_queue(asd->subdev.devnode, &event);
}
static void atomisp_metadata_ready_event(struct atomisp_sub_device *asd,
enum atomisp_metadata_type md_type)
{
struct v4l2_event event = {0};
event.type = V4L2_EVENT_ATOMISP_METADATA_READY;
event.u.data[0] = md_type;
v4l2_event_queue(asd->subdev.devnode, &event);
}
static void atomisp_reset_event(struct atomisp_sub_device *asd)
{
struct v4l2_event event = {0};
event.type = V4L2_EVENT_ATOMISP_CSS_RESET;
v4l2_event_queue(asd->subdev.devnode, &event);
}
static void print_csi_rx_errors(enum mipi_port_id port,
struct atomisp_device *isp)
{
u32 infos = 0;
atomisp_css_rx_get_irq_info(port, &infos);
dev_err(isp->dev, "CSI Receiver port %d errors:\n", port);
if (infos & IA_CSS_RX_IRQ_INFO_BUFFER_OVERRUN)
dev_err(isp->dev, " buffer overrun");
if (infos & IA_CSS_RX_IRQ_INFO_ERR_SOT)
dev_err(isp->dev, " start-of-transmission error");
if (infos & IA_CSS_RX_IRQ_INFO_ERR_SOT_SYNC)
dev_err(isp->dev, " start-of-transmission sync error");
if (infos & IA_CSS_RX_IRQ_INFO_ERR_CONTROL)
dev_err(isp->dev, " control error");
if (infos & IA_CSS_RX_IRQ_INFO_ERR_ECC_DOUBLE)
dev_err(isp->dev, " 2 or more ECC errors");
if (infos & IA_CSS_RX_IRQ_INFO_ERR_CRC)
dev_err(isp->dev, " CRC mismatch");
if (infos & IA_CSS_RX_IRQ_INFO_ERR_UNKNOWN_ID)
dev_err(isp->dev, " unknown error");
if (infos & IA_CSS_RX_IRQ_INFO_ERR_FRAME_SYNC)
dev_err(isp->dev, " frame sync error");
if (infos & IA_CSS_RX_IRQ_INFO_ERR_FRAME_DATA)
dev_err(isp->dev, " frame data error");
if (infos & IA_CSS_RX_IRQ_INFO_ERR_DATA_TIMEOUT)
dev_err(isp->dev, " data timeout");
if (infos & IA_CSS_RX_IRQ_INFO_ERR_UNKNOWN_ESC)
dev_err(isp->dev, " unknown escape command entry");
if (infos & IA_CSS_RX_IRQ_INFO_ERR_LINE_SYNC)
dev_err(isp->dev, " line sync error");
}
/* Clear irq reg */
static void clear_irq_reg(struct atomisp_device *isp)
{
struct pci_dev *pdev = to_pci_dev(isp->dev);
u32 msg_ret;
pci_read_config_dword(pdev, PCI_INTERRUPT_CTRL, &msg_ret);
msg_ret |= 1 << INTR_IIR;
pci_write_config_dword(pdev, PCI_INTERRUPT_CTRL, msg_ret);
}
/* interrupt handling function*/
irqreturn_t atomisp_isr(int irq, void *dev)
{
struct atomisp_device *isp = (struct atomisp_device *)dev;
struct atomisp_css_event eof_event;
unsigned int irq_infos = 0;
unsigned long flags;
int err;
spin_lock_irqsave(&isp->lock, flags);
if (!isp->css_initialized) {
spin_unlock_irqrestore(&isp->lock, flags);
return IRQ_HANDLED;
}
err = atomisp_css_irq_translate(isp, &irq_infos);
if (err) {
spin_unlock_irqrestore(&isp->lock, flags);
return IRQ_NONE;
}
clear_irq_reg(isp);
if (!isp->asd.streaming)
goto out_nowake;
if (irq_infos & IA_CSS_IRQ_INFO_CSS_RECEIVER_SOF) {
atomic_inc(&isp->asd.sof_count);
atomisp_sof_event(&isp->asd);
/*
* If sequence_temp and sequence are the same there where no frames
* lost so we can increase sequence_temp.
* If not then processing of frame is still in progress and driver
* needs to keep old sequence_temp value.
* NOTE: There is assumption here that ISP will not start processing
* next frame from sensor before old one is completely done.
*/
if (atomic_read(&isp->asd.sequence) == atomic_read(&isp->asd.sequence_temp))
atomic_set(&isp->asd.sequence_temp, atomic_read(&isp->asd.sof_count));
dev_dbg_ratelimited(isp->dev, "irq:0x%x (SOF)\n", irq_infos);
irq_infos &= ~IA_CSS_IRQ_INFO_CSS_RECEIVER_SOF;
}
if (irq_infos & IA_CSS_IRQ_INFO_EVENTS_READY)
atomic_set(&isp->asd.sequence, atomic_read(&isp->asd.sequence_temp));
if ((irq_infos & IA_CSS_IRQ_INFO_INPUT_SYSTEM_ERROR) ||
(irq_infos & IA_CSS_IRQ_INFO_IF_ERROR)) {
/* handle mipi receiver error */
u32 rx_infos;
enum mipi_port_id port;
for (port = MIPI_PORT0_ID; port <= MIPI_PORT2_ID;
port++) {
print_csi_rx_errors(port, isp);
atomisp_css_rx_get_irq_info(port, &rx_infos);
atomisp_css_rx_clear_irq_info(port, rx_infos);
}
}
if (irq_infos & IA_CSS_IRQ_INFO_ISYS_EVENTS_READY) {
while (ia_css_dequeue_isys_event(&eof_event.event) == 0) {
atomisp_eof_event(&isp->asd, eof_event.event.exp_id);
dev_dbg_ratelimited(isp->dev, "ISYS event: EOF exp_id %d\n",
eof_event.event.exp_id);
}
irq_infos &= ~IA_CSS_IRQ_INFO_ISYS_EVENTS_READY;
if (irq_infos == 0)
goto out_nowake;
}
spin_unlock_irqrestore(&isp->lock, flags);
dev_dbg_ratelimited(isp->dev, "irq:0x%x (unhandled)\n", irq_infos);
return IRQ_WAKE_THREAD;
out_nowake:
spin_unlock_irqrestore(&isp->lock, flags);
if (irq_infos)
dev_dbg_ratelimited(isp->dev, "irq:0x%x (ignored, as not streaming anymore)\n",
irq_infos);
return IRQ_HANDLED;
}
void atomisp_clear_css_buffer_counters(struct atomisp_sub_device *asd)
{
int i;
memset(asd->s3a_bufs_in_css, 0, sizeof(asd->s3a_bufs_in_css));
for (i = 0; i < ATOMISP_INPUT_STREAM_NUM; i++)
memset(asd->metadata_bufs_in_css[i], 0,
sizeof(asd->metadata_bufs_in_css[i]));
asd->dis_bufs_in_css = 0;
}
/* 0x100000 is the start of dmem inside SP */
#define SP_DMEM_BASE 0x100000
void dump_sp_dmem(struct atomisp_device *isp, unsigned int addr,
unsigned int size)
{
unsigned int data = 0;
unsigned int size32 = DIV_ROUND_UP(size, sizeof(u32));
dev_dbg(isp->dev, "atomisp mmio base: %p\n", isp->base);
dev_dbg(isp->dev, "%s, addr:0x%x, size: %d, size32: %d\n", __func__,
addr, size, size32);
if (size32 * 4 + addr > 0x4000) {
dev_err(isp->dev, "illegal size (%d) or addr (0x%x)\n",
size32, addr);
return;
}
addr += SP_DMEM_BASE;
addr &= 0x003FFFFF;
do {
data = readl(isp->base + addr);
dev_dbg(isp->dev, "%s, \t [0x%x]:0x%x\n", __func__, addr, data);
addr += sizeof(u32);
} while (--size32);
}
int atomisp_buffers_in_css(struct atomisp_video_pipe *pipe)
{
unsigned long irqflags;
struct list_head *pos;
int buffers_in_css = 0;
spin_lock_irqsave(&pipe->irq_lock, irqflags);
list_for_each(pos, &pipe->buffers_in_css)
buffers_in_css++;
spin_unlock_irqrestore(&pipe->irq_lock, irqflags);
return buffers_in_css;
}
void atomisp_buffer_done(struct ia_css_frame *frame, enum vb2_buffer_state state)
{
struct atomisp_video_pipe *pipe = vb_to_pipe(&frame->vb.vb2_buf);
lockdep_assert_held(&pipe->irq_lock);
frame->vb.vb2_buf.timestamp = ktime_get_ns();
frame->vb.field = pipe->pix.field;
frame->vb.sequence = atomic_read(&pipe->asd->sequence);
list_del(&frame->queue);
if (state == VB2_BUF_STATE_DONE)
vb2_set_plane_payload(&frame->vb.vb2_buf, 0, pipe->pix.sizeimage);
vb2_buffer_done(&frame->vb.vb2_buf, state);
}
void atomisp_flush_video_pipe(struct atomisp_video_pipe *pipe, enum vb2_buffer_state state,
bool warn_on_css_frames)
{
struct ia_css_frame *frame, *_frame;
unsigned long irqflags;
spin_lock_irqsave(&pipe->irq_lock, irqflags);
list_for_each_entry_safe(frame, _frame, &pipe->buffers_in_css, queue) {
if (warn_on_css_frames)
dev_warn(pipe->isp->dev, "Warning: CSS frames queued on flush\n");
atomisp_buffer_done(frame, state);
}
list_for_each_entry_safe(frame, _frame, &pipe->activeq, queue)
atomisp_buffer_done(frame, state);
list_for_each_entry_safe(frame, _frame, &pipe->buffers_waiting_for_param, queue) {
pipe->frame_request_config_id[frame->vb.vb2_buf.index] = 0;
atomisp_buffer_done(frame, state);
}
spin_unlock_irqrestore(&pipe->irq_lock, irqflags);
}
/* clean out the parameters that did not apply */
void atomisp_flush_params_queue(struct atomisp_video_pipe *pipe)
{
struct atomisp_css_params_with_list *param;
while (!list_empty(&pipe->per_frame_params)) {
param = list_entry(pipe->per_frame_params.next,
struct atomisp_css_params_with_list, list);
list_del(¶m->list);
atomisp_free_css_parameters(¶m->params);
kvfree(param);
}
}
/* Re-queue per-frame parameters */
static void atomisp_recover_params_queue(struct atomisp_video_pipe *pipe)
{
struct atomisp_css_params_with_list *param;
int i;
for (i = 0; i < VIDEO_MAX_FRAME; i++) {
param = pipe->frame_params[i];
if (param)
list_add_tail(¶m->list, &pipe->per_frame_params);
pipe->frame_params[i] = NULL;
}
atomisp_handle_parameter_and_buffer(pipe);
}
void atomisp_buf_done(struct atomisp_sub_device *asd, int error,
enum ia_css_buffer_type buf_type,
enum ia_css_pipe_id css_pipe_id,
bool q_buffers, enum atomisp_input_stream_id stream_id)
{
struct atomisp_video_pipe *pipe = NULL;
struct atomisp_css_buffer buffer;
bool requeue = false;
unsigned long irqflags;
struct ia_css_frame *frame = NULL;
struct atomisp_s3a_buf *s3a_buf = NULL, *_s3a_buf_tmp, *s3a_iter;
struct atomisp_dis_buf *dis_buf = NULL, *_dis_buf_tmp, *dis_iter;
struct atomisp_metadata_buf *md_buf = NULL, *_md_buf_tmp, *md_iter;
enum atomisp_metadata_type md_type;
struct atomisp_device *isp = asd->isp;
struct v4l2_control ctrl;
int i, err;
lockdep_assert_held(&isp->mutex);
if (
buf_type != IA_CSS_BUFFER_TYPE_METADATA &&
buf_type != IA_CSS_BUFFER_TYPE_3A_STATISTICS &&
buf_type != IA_CSS_BUFFER_TYPE_DIS_STATISTICS &&
buf_type != IA_CSS_BUFFER_TYPE_OUTPUT_FRAME &&
buf_type != IA_CSS_BUFFER_TYPE_SEC_OUTPUT_FRAME &&
buf_type != IA_CSS_BUFFER_TYPE_RAW_OUTPUT_FRAME &&
buf_type != IA_CSS_BUFFER_TYPE_SEC_VF_OUTPUT_FRAME &&
buf_type != IA_CSS_BUFFER_TYPE_VF_OUTPUT_FRAME) {
dev_err(isp->dev, "%s, unsupported buffer type: %d\n",
__func__, buf_type);
return;
}
memset(&buffer, 0, sizeof(struct atomisp_css_buffer));
buffer.css_buffer.type = buf_type;
err = atomisp_css_dequeue_buffer(asd, stream_id, css_pipe_id,
buf_type, &buffer);
if (err) {
dev_err(isp->dev,
"atomisp_css_dequeue_buffer failed: 0x%x\n", err);
return;
}
switch (buf_type) {
case IA_CSS_BUFFER_TYPE_3A_STATISTICS:
list_for_each_entry_safe(s3a_iter, _s3a_buf_tmp,
&asd->s3a_stats_in_css, list) {
if (s3a_iter->s3a_data ==
buffer.css_buffer.data.stats_3a) {
list_del_init(&s3a_iter->list);
list_add_tail(&s3a_iter->list,
&asd->s3a_stats_ready);
s3a_buf = s3a_iter;
break;
}
}
asd->s3a_bufs_in_css[css_pipe_id]--;
atomisp_3a_stats_ready_event(asd, buffer.css_buffer.exp_id);
if (s3a_buf)
dev_dbg(isp->dev, "%s: s3a stat with exp_id %d is ready\n",
__func__, s3a_buf->s3a_data->exp_id);
else
dev_dbg(isp->dev, "%s: s3a stat is ready with no exp_id found\n",
__func__);
break;
case IA_CSS_BUFFER_TYPE_METADATA:
if (error)
break;
md_type = ATOMISP_MAIN_METADATA;
list_for_each_entry_safe(md_iter, _md_buf_tmp,
&asd->metadata_in_css[md_type], list) {
if (md_iter->metadata ==
buffer.css_buffer.data.metadata) {
list_del_init(&md_iter->list);
list_add_tail(&md_iter->list,
&asd->metadata_ready[md_type]);
md_buf = md_iter;
break;
}
}
asd->metadata_bufs_in_css[stream_id][css_pipe_id]--;
atomisp_metadata_ready_event(asd, md_type);
if (md_buf)
dev_dbg(isp->dev, "%s: metadata with exp_id %d is ready\n",
__func__, md_buf->metadata->exp_id);
else
dev_dbg(isp->dev, "%s: metadata is ready with no exp_id found\n",
__func__);
break;
case IA_CSS_BUFFER_TYPE_DIS_STATISTICS:
list_for_each_entry_safe(dis_iter, _dis_buf_tmp,
&asd->dis_stats_in_css, list) {
if (dis_iter->dis_data ==
buffer.css_buffer.data.stats_dvs) {
spin_lock_irqsave(&asd->dis_stats_lock,
irqflags);
list_del_init(&dis_iter->list);
list_add(&dis_iter->list, &asd->dis_stats);
asd->params.dis_proj_data_valid = true;
spin_unlock_irqrestore(&asd->dis_stats_lock,
irqflags);
dis_buf = dis_iter;
break;
}
}
asd->dis_bufs_in_css--;
if (dis_buf)
dev_dbg(isp->dev, "%s: dis stat with exp_id %d is ready\n",
__func__, dis_buf->dis_data->exp_id);
else
dev_dbg(isp->dev, "%s: dis stat is ready with no exp_id found\n",
__func__);
break;
case IA_CSS_BUFFER_TYPE_VF_OUTPUT_FRAME:
case IA_CSS_BUFFER_TYPE_SEC_VF_OUTPUT_FRAME:
frame = buffer.css_buffer.data.frame;
if (!frame) {
WARN_ON(1);
break;
}
if (!frame->valid)
error = true;
pipe = vb_to_pipe(&frame->vb.vb2_buf);
dev_dbg(isp->dev, "%s: vf frame with exp_id %d is ready\n",
__func__, frame->exp_id);
if (asd->params.flash_state == ATOMISP_FLASH_ONGOING) {
if (frame->flash_state
== IA_CSS_FRAME_FLASH_STATE_PARTIAL)
dev_dbg(isp->dev, "%s thumb partially flashed\n",
__func__);
else if (frame->flash_state
== IA_CSS_FRAME_FLASH_STATE_FULL)
dev_dbg(isp->dev, "%s thumb completely flashed\n",
__func__);
else
dev_dbg(isp->dev, "%s thumb no flash in this frame\n",
__func__);
}
pipe->frame_config_id[frame->vb.vb2_buf.index] = frame->isp_config_id;
break;
case IA_CSS_BUFFER_TYPE_OUTPUT_FRAME:
case IA_CSS_BUFFER_TYPE_SEC_OUTPUT_FRAME:
frame = buffer.css_buffer.data.frame;
if (!frame) {
WARN_ON(1);
break;
}
if (!frame->valid)
error = true;
pipe = vb_to_pipe(&frame->vb.vb2_buf);
dev_dbg(isp->dev, "%s: main frame with exp_id %d is ready\n",
__func__, frame->exp_id);
i = frame->vb.vb2_buf.index;
/* free the parameters */
if (pipe->frame_params[i]) {
if (asd->params.dvs_6axis == pipe->frame_params[i]->params.dvs_6axis)
asd->params.dvs_6axis = NULL;
atomisp_free_css_parameters(&pipe->frame_params[i]->params);
kvfree(pipe->frame_params[i]);
pipe->frame_params[i] = NULL;
}
pipe->frame_config_id[i] = frame->isp_config_id;
ctrl.id = V4L2_CID_FLASH_MODE;
if (asd->params.flash_state == ATOMISP_FLASH_ONGOING) {
if (frame->flash_state == IA_CSS_FRAME_FLASH_STATE_PARTIAL) {
asd->frame_status[i] = ATOMISP_FRAME_STATUS_FLASH_PARTIAL;
dev_dbg(isp->dev, "%s partially flashed\n", __func__);
} else if (frame->flash_state == IA_CSS_FRAME_FLASH_STATE_FULL) {
asd->frame_status[i] = ATOMISP_FRAME_STATUS_FLASH_EXPOSED;
asd->params.num_flash_frames--;
dev_dbg(isp->dev, "%s completely flashed\n", __func__);
} else {
asd->frame_status[i] = ATOMISP_FRAME_STATUS_OK;
dev_dbg(isp->dev, "%s no flash in this frame\n", __func__);
}
/* Check if flashing sequence is done */
if (asd->frame_status[i] == ATOMISP_FRAME_STATUS_FLASH_EXPOSED)
asd->params.flash_state = ATOMISP_FLASH_DONE;
} else if (isp->flash) {
if (v4l2_g_ctrl(isp->flash->ctrl_handler, &ctrl) == 0 &&
ctrl.value == ATOMISP_FLASH_MODE_TORCH) {
ctrl.id = V4L2_CID_FLASH_TORCH_INTENSITY;
if (v4l2_g_ctrl(isp->flash->ctrl_handler, &ctrl) == 0 &&
ctrl.value > 0)
asd->frame_status[i] = ATOMISP_FRAME_STATUS_FLASH_EXPOSED;
else
asd->frame_status[i] = ATOMISP_FRAME_STATUS_OK;
} else {
asd->frame_status[i] = ATOMISP_FRAME_STATUS_OK;
}
} else {
asd->frame_status[i] = ATOMISP_FRAME_STATUS_OK;
}
asd->params.last_frame_status = asd->frame_status[i];
if (asd->params.css_update_params_needed) {
atomisp_apply_css_parameters(asd,
&asd->params.css_param);
if (asd->params.css_param.update_flag.dz_config)
asd->params.config.dz_config = &asd->params.css_param.dz_config;
/* New global dvs 6axis config should be blocked
* here if there's a buffer with per-frame parameters
* pending in CSS frame buffer queue.
* This is to aviod zooming vibration since global
* parameters take effect immediately while
* per-frame parameters are taken after previous
* buffers in CSS got processed.
*/
if (asd->params.dvs_6axis)
atomisp_css_set_dvs_6axis(asd,
asd->params.dvs_6axis);
else
asd->params.css_update_params_needed = false;
/* The update flag should not be cleaned here
* since it is still going to be used to make up
* following per-frame parameters.
* This will introduce more copy work since each
* time when updating global parameters, the whole
* parameter set are applied.
* FIXME: A new set of parameter copy functions can
* be added to make up per-frame parameters based on
* solid structures stored in asd->params.css_param
* instead of using shadow pointers in update flag.
*/
atomisp_css_update_isp_params(asd);
}
break;
default:
break;
}
if (frame) {
spin_lock_irqsave(&pipe->irq_lock, irqflags);
atomisp_buffer_done(frame, error ? VB2_BUF_STATE_ERROR : VB2_BUF_STATE_DONE);
spin_unlock_irqrestore(&pipe->irq_lock, irqflags);
}
/*
* Requeue should only be done for 3a and dis buffers.
* Queue/dequeue order will change if driver recycles image buffers.
*/
if (requeue) {
err = atomisp_css_queue_buffer(asd,
stream_id, css_pipe_id,
buf_type, &buffer);
if (err)
dev_err(isp->dev, "%s, q to css fails: %d\n",
__func__, err);
return;
}
if (!error && q_buffers)
atomisp_qbuffers_to_css(asd);
}
void atomisp_assert_recovery_work(struct work_struct *work)
{
struct atomisp_device *isp = container_of(work, struct atomisp_device,
assert_recovery_work);
struct pci_dev *pdev = to_pci_dev(isp->dev);
unsigned long flags;
int ret;
mutex_lock(&isp->mutex);
if (!isp->asd.streaming)
goto out_unlock;
atomisp_css_irq_enable(isp, IA_CSS_IRQ_INFO_CSS_RECEIVER_SOF, false);
spin_lock_irqsave(&isp->lock, flags);
isp->asd.streaming = false;
spin_unlock_irqrestore(&isp->lock, flags);
/* stream off sensor */
ret = v4l2_subdev_call(isp->inputs[isp->asd.input_curr].camera, video, s_stream, 0);
if (ret)
dev_warn(isp->dev, "Stopping sensor stream failed: %d\n", ret);
atomisp_clear_css_buffer_counters(&isp->asd);
atomisp_css_stop(&isp->asd, true);
isp->asd.preview_exp_id = 1;
isp->asd.postview_exp_id = 1;
/* notify HAL the CSS reset */
dev_dbg(isp->dev, "send reset event to %s\n", isp->asd.subdev.devnode->name);
atomisp_reset_event(&isp->asd);
/* clear irq */
disable_isp_irq(hrt_isp_css_irq_sp);
clear_isp_irq(hrt_isp_css_irq_sp);
/* Set the SRSE to 3 before resetting */
pci_write_config_dword(pdev, PCI_I_CONTROL,
isp->saved_regs.i_control | MRFLD_PCI_I_CONTROL_SRSE_RESET_MASK);
/* reset ISP and restore its state */
atomisp_reset(isp);
atomisp_css_input_set_mode(&isp->asd, IA_CSS_INPUT_MODE_BUFFERED_SENSOR);
/* Recreate streams destroyed by atomisp_css_stop() */
atomisp_create_pipes_stream(&isp->asd);
/* Invalidate caches. FIXME: should flush only necessary buffers */
wbinvd();
if (atomisp_css_start(&isp->asd)) {
dev_warn(isp->dev, "start SP failed, so do not set streaming to be enable!\n");
} else {
spin_lock_irqsave(&isp->lock, flags);
isp->asd.streaming = true;
spin_unlock_irqrestore(&isp->lock, flags);
}
atomisp_csi2_configure(&isp->asd);
atomisp_css_irq_enable(isp, IA_CSS_IRQ_INFO_CSS_RECEIVER_SOF,
atomisp_css_valid_sof(isp));
if (atomisp_freq_scaling(isp, ATOMISP_DFS_MODE_AUTO, true) < 0)
dev_dbg(isp->dev, "DFS auto failed while recovering!\n");
/* Dequeueing buffers is not needed, CSS will recycle buffers that it has */
atomisp_flush_video_pipe(&isp->asd.video_out, VB2_BUF_STATE_ERROR, false);
/* Requeue unprocessed per-frame parameters. */
atomisp_recover_params_queue(&isp->asd.video_out);
ret = v4l2_subdev_call(isp->inputs[isp->asd.input_curr].camera, video, s_stream, 1);
if (ret)
dev_err(isp->dev, "Starting sensor stream failed: %d\n", ret);
out_unlock:
mutex_unlock(&isp->mutex);
}
void atomisp_setup_flash(struct atomisp_sub_device *asd)
{
struct atomisp_device *isp = asd->isp;
struct v4l2_control ctrl;
if (!isp->flash)
return;
if (asd->params.flash_state != ATOMISP_FLASH_REQUESTED &&
asd->params.flash_state != ATOMISP_FLASH_DONE)
return;
if (asd->params.num_flash_frames) {
/* make sure the timeout is set before setting flash mode */
ctrl.id = V4L2_CID_FLASH_TIMEOUT;
ctrl.value = FLASH_TIMEOUT;
if (v4l2_s_ctrl(NULL, isp->flash->ctrl_handler, &ctrl)) {
dev_err(isp->dev, "flash timeout configure failed\n");
return;
}
ia_css_stream_request_flash(asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL].stream);
asd->params.flash_state = ATOMISP_FLASH_ONGOING;
} else {
asd->params.flash_state = ATOMISP_FLASH_IDLE;
}
}
irqreturn_t atomisp_isr_thread(int irq, void *isp_ptr)
{
struct atomisp_device *isp = isp_ptr;
unsigned long flags;
dev_dbg(isp->dev, ">%s\n", __func__);
spin_lock_irqsave(&isp->lock, flags);
if (!isp->asd.streaming) {
spin_unlock_irqrestore(&isp->lock, flags);
return IRQ_HANDLED;
}
spin_unlock_irqrestore(&isp->lock, flags);
/*
* The standard CSS2.0 API tells the following calling sequence of
* dequeue ready buffers:
* while (ia_css_dequeue_psys_event(...)) {
* switch (event.type) {
* ...
* ia_css_pipe_dequeue_buffer()
* }
* }
* That is, dequeue event and buffer are one after another.
*
* But the following implementation is to first deuque all the event
* to a FIFO, then process the event in the FIFO.
* This will not have issue in single stream mode, but it do have some
* issue in multiple stream case. The issue is that
* ia_css_pipe_dequeue_buffer() will not return the corrent buffer in
* a specific pipe.
*
* This is due to ia_css_pipe_dequeue_buffer() does not take the
* ia_css_pipe parameter.
*
* So:
* For CSS2.0: we change the way to not dequeue all the event at one
* time, instead, dequue one and process one, then another
*/
mutex_lock(&isp->mutex);
if (atomisp_css_isr_thread(isp))
goto out;
if (isp->asd.streaming)
atomisp_setup_flash(&isp->asd);
out:
mutex_unlock(&isp->mutex);
dev_dbg(isp->dev, "<%s\n", __func__);
return IRQ_HANDLED;
}
/*
* Get internal fmt according to V4L2 fmt
*/
static enum ia_css_frame_format
v4l2_fmt_to_sh_fmt(u32 fmt)
{
switch (fmt) {
case V4L2_PIX_FMT_YUV420:
return IA_CSS_FRAME_FORMAT_YUV420;
case V4L2_PIX_FMT_YVU420:
return IA_CSS_FRAME_FORMAT_YV12;
case V4L2_PIX_FMT_YUV422P:
return IA_CSS_FRAME_FORMAT_YUV422;
case V4L2_PIX_FMT_YUV444:
return IA_CSS_FRAME_FORMAT_YUV444;
case V4L2_PIX_FMT_NV12:
return IA_CSS_FRAME_FORMAT_NV12;
case V4L2_PIX_FMT_NV21:
return IA_CSS_FRAME_FORMAT_NV21;
case V4L2_PIX_FMT_NV16:
return IA_CSS_FRAME_FORMAT_NV16;
case V4L2_PIX_FMT_NV61:
return IA_CSS_FRAME_FORMAT_NV61;
case V4L2_PIX_FMT_UYVY:
return IA_CSS_FRAME_FORMAT_UYVY;
case V4L2_PIX_FMT_YUYV:
return IA_CSS_FRAME_FORMAT_YUYV;
case V4L2_PIX_FMT_RGB24:
return IA_CSS_FRAME_FORMAT_PLANAR_RGB888;
case V4L2_PIX_FMT_RGB32:
return IA_CSS_FRAME_FORMAT_RGBA888;
case V4L2_PIX_FMT_RGB565:
return IA_CSS_FRAME_FORMAT_RGB565;
#if 0
case V4L2_PIX_FMT_JPEG:
case V4L2_PIX_FMT_CUSTOM_M10MO_RAW:
return IA_CSS_FRAME_FORMAT_BINARY_8;
#endif
case V4L2_PIX_FMT_SBGGR16:
case V4L2_PIX_FMT_SBGGR10:
case V4L2_PIX_FMT_SGBRG10:
case V4L2_PIX_FMT_SGRBG10:
case V4L2_PIX_FMT_SRGGB10:
case V4L2_PIX_FMT_SBGGR12:
case V4L2_PIX_FMT_SGBRG12:
case V4L2_PIX_FMT_SGRBG12:
case V4L2_PIX_FMT_SRGGB12:
case V4L2_PIX_FMT_SBGGR8:
case V4L2_PIX_FMT_SGBRG8:
case V4L2_PIX_FMT_SGRBG8:
case V4L2_PIX_FMT_SRGGB8:
return IA_CSS_FRAME_FORMAT_RAW;
default:
return -EINVAL;
}
}
/*
* raw format match between SH format and V4L2 format
*/
static int raw_output_format_match_input(u32 input, u32 output)
{
if ((input == ATOMISP_INPUT_FORMAT_RAW_12) &&
((output == V4L2_PIX_FMT_SRGGB12) ||
(output == V4L2_PIX_FMT_SGRBG12) ||
(output == V4L2_PIX_FMT_SBGGR12) ||
(output == V4L2_PIX_FMT_SGBRG12)))
return 0;
if ((input == ATOMISP_INPUT_FORMAT_RAW_10) &&
((output == V4L2_PIX_FMT_SRGGB10) ||
(output == V4L2_PIX_FMT_SGRBG10) ||
(output == V4L2_PIX_FMT_SBGGR10) ||
(output == V4L2_PIX_FMT_SGBRG10)))
return 0;
if ((input == ATOMISP_INPUT_FORMAT_RAW_8) &&
((output == V4L2_PIX_FMT_SRGGB8) ||
(output == V4L2_PIX_FMT_SGRBG8) ||
(output == V4L2_PIX_FMT_SBGGR8) ||
(output == V4L2_PIX_FMT_SGBRG8)))
return 0;
if ((input == ATOMISP_INPUT_FORMAT_RAW_16) && (output == V4L2_PIX_FMT_SBGGR16))
return 0;
return -EINVAL;
}
u32 atomisp_get_pixel_depth(u32 pixelformat)
{
switch (pixelformat) {
case V4L2_PIX_FMT_YUV420:
case V4L2_PIX_FMT_NV12:
case V4L2_PIX_FMT_NV21:
case V4L2_PIX_FMT_YVU420:
return 12;
case V4L2_PIX_FMT_YUV422P:
case V4L2_PIX_FMT_YUYV:
case V4L2_PIX_FMT_UYVY:
case V4L2_PIX_FMT_NV16:
case V4L2_PIX_FMT_NV61:
case V4L2_PIX_FMT_RGB565:
case V4L2_PIX_FMT_SBGGR16:
case V4L2_PIX_FMT_SBGGR12:
case V4L2_PIX_FMT_SGBRG12:
case V4L2_PIX_FMT_SGRBG12:
case V4L2_PIX_FMT_SRGGB12:
case V4L2_PIX_FMT_SBGGR10:
case V4L2_PIX_FMT_SGBRG10:
case V4L2_PIX_FMT_SGRBG10:
case V4L2_PIX_FMT_SRGGB10:
return 16;
case V4L2_PIX_FMT_RGB24:
case V4L2_PIX_FMT_YUV444:
return 24;
case V4L2_PIX_FMT_RGB32:
return 32;
case V4L2_PIX_FMT_JPEG:
case V4L2_PIX_FMT_CUSTOM_M10MO_RAW:
case V4L2_PIX_FMT_SBGGR8:
case V4L2_PIX_FMT_SGBRG8:
case V4L2_PIX_FMT_SGRBG8:
case V4L2_PIX_FMT_SRGGB8:
return 8;
default:
return 8 * 2; /* raw type now */
}
}
bool atomisp_is_mbuscode_raw(uint32_t code)
{
return code >= 0x3000 && code < 0x4000;
}
/*
* ISP features control function
*/
/*
* Set ISP capture mode based on current settings
*/
static void atomisp_update_capture_mode(struct atomisp_sub_device *asd)
{
if (asd->params.gdc_cac_en)
atomisp_css_capture_set_mode(asd, IA_CSS_CAPTURE_MODE_ADVANCED);
else if (asd->params.low_light)
atomisp_css_capture_set_mode(asd, IA_CSS_CAPTURE_MODE_LOW_LIGHT);
else if (asd->video_out.sh_fmt == IA_CSS_FRAME_FORMAT_RAW)
atomisp_css_capture_set_mode(asd, IA_CSS_CAPTURE_MODE_RAW);
else
atomisp_css_capture_set_mode(asd, IA_CSS_CAPTURE_MODE_PRIMARY);
}
/* ISP2401 */
int atomisp_set_sensor_runmode(struct atomisp_sub_device *asd,
struct atomisp_s_runmode *runmode)
{
struct atomisp_device *isp = asd->isp;
struct v4l2_ctrl *c;
int ret = 0;
if (!(runmode && (runmode->mode & RUNMODE_MASK)))
return -EINVAL;
mutex_lock(asd->ctrl_handler.lock);
c = v4l2_ctrl_find(isp->inputs[asd->input_curr].camera->ctrl_handler,
V4L2_CID_RUN_MODE);
if (c)
ret = v4l2_ctrl_s_ctrl(c, runmode->mode);
mutex_unlock(asd->ctrl_handler.lock);
return ret;
}
/*
* Function to enable/disable lens geometry distortion correction (GDC) and
* chromatic aberration correction (CAC)
*/
int atomisp_gdc_cac(struct atomisp_sub_device *asd, int flag,
__s32 *value)
{
if (flag == 0) {
*value = asd->params.gdc_cac_en;
return 0;
}
asd->params.gdc_cac_en = !!*value;
if (asd->params.gdc_cac_en) {
asd->params.config.morph_table = asd->params.css_param.morph_table;
} else {
asd->params.config.morph_table = NULL;
}
asd->params.css_update_params_needed = true;
atomisp_update_capture_mode(asd);
return 0;
}
/*
* Function to enable/disable low light mode including ANR
*/
int atomisp_low_light(struct atomisp_sub_device *asd, int flag,
__s32 *value)
{
if (flag == 0) {
*value = asd->params.low_light;
return 0;
}
asd->params.low_light = (*value != 0);
atomisp_update_capture_mode(asd);
return 0;
}
/*
* Function to enable/disable extra noise reduction (XNR) in low light
* condition
*/
int atomisp_xnr(struct atomisp_sub_device *asd, int flag,
int *xnr_enable)
{
if (flag == 0) {
*xnr_enable = asd->params.xnr_en;
return 0;
}
atomisp_css_capture_enable_xnr(asd, !!*xnr_enable);
return 0;
}
/*
* Function to configure bayer noise reduction
*/
int atomisp_nr(struct atomisp_sub_device *asd, int flag,
struct atomisp_nr_config *arg)
{
if (flag == 0) {
/* Get nr config from current setup */
if (atomisp_css_get_nr_config(asd, arg))
return -EINVAL;
} else {
/* Set nr config to isp parameters */
memcpy(&asd->params.css_param.nr_config, arg,
sizeof(struct ia_css_nr_config));
asd->params.config.nr_config = &asd->params.css_param.nr_config;
asd->params.css_update_params_needed = true;
}
return 0;
}
/*
* Function to configure temporal noise reduction (TNR)
*/
int atomisp_tnr(struct atomisp_sub_device *asd, int flag,
struct atomisp_tnr_config *config)
{
/* Get tnr config from current setup */
if (flag == 0) {
/* Get tnr config from current setup */
if (atomisp_css_get_tnr_config(asd, config))
return -EINVAL;
} else {
/* Set tnr config to isp parameters */
memcpy(&asd->params.css_param.tnr_config, config,
sizeof(struct ia_css_tnr_config));
asd->params.config.tnr_config = &asd->params.css_param.tnr_config;
asd->params.css_update_params_needed = true;
}
return 0;
}
/*
* Function to configure black level compensation
*/
int atomisp_black_level(struct atomisp_sub_device *asd, int flag,
struct atomisp_ob_config *config)
{
if (flag == 0) {
/* Get ob config from current setup */
if (atomisp_css_get_ob_config(asd, config))
return -EINVAL;
} else {
/* Set ob config to isp parameters */
memcpy(&asd->params.css_param.ob_config, config,
sizeof(struct ia_css_ob_config));
asd->params.config.ob_config = &asd->params.css_param.ob_config;
asd->params.css_update_params_needed = true;
}
return 0;
}
/*
* Function to configure edge enhancement
*/
int atomisp_ee(struct atomisp_sub_device *asd, int flag,
struct atomisp_ee_config *config)
{
if (flag == 0) {
/* Get ee config from current setup */
if (atomisp_css_get_ee_config(asd, config))
return -EINVAL;
} else {
/* Set ee config to isp parameters */
memcpy(&asd->params.css_param.ee_config, config,
sizeof(asd->params.css_param.ee_config));
asd->params.config.ee_config = &asd->params.css_param.ee_config;
asd->params.css_update_params_needed = true;
}
return 0;
}
/*
* Function to update Gamma table for gamma, brightness and contrast config
*/
int atomisp_gamma(struct atomisp_sub_device *asd, int flag,
struct atomisp_gamma_table *config)
{
if (flag == 0) {
/* Get gamma table from current setup */
if (atomisp_css_get_gamma_table(asd, config))
return -EINVAL;
} else {
/* Set gamma table to isp parameters */
memcpy(&asd->params.css_param.gamma_table, config,
sizeof(asd->params.css_param.gamma_table));
asd->params.config.gamma_table = &asd->params.css_param.gamma_table;
}
return 0;
}
/*
* Function to update Ctc table for Chroma Enhancement
*/
int atomisp_ctc(struct atomisp_sub_device *asd, int flag,
struct atomisp_ctc_table *config)
{
if (flag == 0) {
/* Get ctc table from current setup */
if (atomisp_css_get_ctc_table(asd, config))
return -EINVAL;
} else {
/* Set ctc table to isp parameters */
memcpy(&asd->params.css_param.ctc_table, config,
sizeof(asd->params.css_param.ctc_table));
atomisp_css_set_ctc_table(asd, &asd->params.css_param.ctc_table);
}
return 0;
}
/*
* Function to update gamma correction parameters
*/
int atomisp_gamma_correction(struct atomisp_sub_device *asd, int flag,
struct atomisp_gc_config *config)
{
if (flag == 0) {
/* Get gamma correction params from current setup */
if (atomisp_css_get_gc_config(asd, config))
return -EINVAL;
} else {
/* Set gamma correction params to isp parameters */
memcpy(&asd->params.css_param.gc_config, config,
sizeof(asd->params.css_param.gc_config));
asd->params.config.gc_config = &asd->params.css_param.gc_config;
asd->params.css_update_params_needed = true;
}
return 0;
}
/*
* Function to update narrow gamma flag
*/
int atomisp_formats(struct atomisp_sub_device *asd, int flag,
struct atomisp_formats_config *config)
{
if (flag == 0) {
/* Get narrow gamma flag from current setup */
if (atomisp_css_get_formats_config(asd, config))
return -EINVAL;
} else {
/* Set narrow gamma flag to isp parameters */
memcpy(&asd->params.css_param.formats_config, config,
sizeof(asd->params.css_param.formats_config));
asd->params.config.formats_config = &asd->params.css_param.formats_config;
}
return 0;
}
void atomisp_free_internal_buffers(struct atomisp_sub_device *asd)
{
atomisp_free_css_parameters(&asd->params.css_param);
}
static void atomisp_update_grid_info(struct atomisp_sub_device *asd,
enum ia_css_pipe_id pipe_id)
{
struct atomisp_device *isp = asd->isp;
int err;
if (atomisp_css_get_grid_info(asd, pipe_id))
return;
/* We must free all buffers because they no longer match
the grid size. */
atomisp_css_free_stat_buffers(asd);
err = atomisp_alloc_css_stat_bufs(asd, ATOMISP_INPUT_STREAM_GENERAL);
if (err) {
dev_err(isp->dev, "stat_buf allocate error\n");
goto err;
}
if (atomisp_alloc_3a_output_buf(asd)) {
/* Failure for 3A buffers does not influence DIS buffers */
if (asd->params.s3a_output_bytes != 0) {
/* For SOC sensor happens s3a_output_bytes == 0,
* using if condition to exclude false error log */
dev_err(isp->dev, "Failed to allocate memory for 3A statistics\n");
}
goto err;
}
if (atomisp_alloc_dis_coef_buf(asd)) {
dev_err(isp->dev,
"Failed to allocate memory for DIS statistics\n");
goto err;
}
if (atomisp_alloc_metadata_output_buf(asd)) {
dev_err(isp->dev, "Failed to allocate memory for metadata\n");
goto err;
}
return;
err:
atomisp_css_free_stat_buffers(asd);
return;
}
static void atomisp_curr_user_grid_info(struct atomisp_sub_device *asd,
struct atomisp_grid_info *info)
{
memcpy(info, &asd->params.curr_grid_info.s3a_grid,
sizeof(struct ia_css_3a_grid_info));
}
int atomisp_compare_grid(struct atomisp_sub_device *asd,
struct atomisp_grid_info *atomgrid)
{
struct atomisp_grid_info tmp = {0};
atomisp_curr_user_grid_info(asd, &tmp);
return memcmp(atomgrid, &tmp, sizeof(tmp));
}
/*
* Function to update Gdc table for gdc
*/
int atomisp_gdc_cac_table(struct atomisp_sub_device *asd, int flag,
struct atomisp_morph_table *config)
{
int ret;
int i;
struct atomisp_device *isp = asd->isp;
if (flag == 0) {
/* Get gdc table from current setup */
struct ia_css_morph_table tab = {0};
atomisp_css_get_morph_table(asd, &tab);
config->width = tab.width;
config->height = tab.height;
for (i = 0; i < IA_CSS_MORPH_TABLE_NUM_PLANES; i++) {
ret = copy_to_user(config->coordinates_x[i],
tab.coordinates_x[i], tab.height *
tab.width * sizeof(*tab.coordinates_x[i]));
if (ret) {
dev_err(isp->dev,
"Failed to copy to User for x\n");
return -EFAULT;
}
ret = copy_to_user(config->coordinates_y[i],
tab.coordinates_y[i], tab.height *
tab.width * sizeof(*tab.coordinates_y[i]));
if (ret) {
dev_err(isp->dev,
"Failed to copy to User for y\n");
return -EFAULT;
}
}
} else {
struct ia_css_morph_table *tab =
asd->params.css_param.morph_table;
/* free first if we have one */
if (tab) {
atomisp_css_morph_table_free(tab);
asd->params.css_param.morph_table = NULL;
}
/* allocate new one */
tab = atomisp_css_morph_table_allocate(config->width,
config->height);
if (!tab) {
dev_err(isp->dev, "out of memory\n");
return -EINVAL;
}
for (i = 0; i < IA_CSS_MORPH_TABLE_NUM_PLANES; i++) {
ret = copy_from_user(tab->coordinates_x[i],
config->coordinates_x[i],
config->height * config->width *
sizeof(*config->coordinates_x[i]));
if (ret) {
dev_err(isp->dev,
"Failed to copy from User for x, ret %d\n",
ret);
atomisp_css_morph_table_free(tab);
return -EFAULT;
}
ret = copy_from_user(tab->coordinates_y[i],
config->coordinates_y[i],
config->height * config->width *
sizeof(*config->coordinates_y[i]));
if (ret) {
dev_err(isp->dev,
"Failed to copy from User for y, ret is %d\n",
ret);
atomisp_css_morph_table_free(tab);
return -EFAULT;
}
}
asd->params.css_param.morph_table = tab;
if (asd->params.gdc_cac_en)
asd->params.config.morph_table = tab;
}
return 0;
}
int atomisp_macc_table(struct atomisp_sub_device *asd, int flag,
struct atomisp_macc_config *config)
{
struct ia_css_macc_table *macc_table;
switch (config->color_effect) {
case V4L2_COLORFX_NONE:
macc_table = &asd->params.css_param.macc_table;
break;
case V4L2_COLORFX_SKY_BLUE:
macc_table = &blue_macc_table;
break;
case V4L2_COLORFX_GRASS_GREEN:
macc_table = &green_macc_table;
break;
case V4L2_COLORFX_SKIN_WHITEN_LOW:
macc_table = &skin_low_macc_table;
break;
case V4L2_COLORFX_SKIN_WHITEN:
macc_table = &skin_medium_macc_table;
break;
case V4L2_COLORFX_SKIN_WHITEN_HIGH:
macc_table = &skin_high_macc_table;
break;
default:
return -EINVAL;
}
if (flag == 0) {
/* Get macc table from current setup */
memcpy(&config->table, macc_table,
sizeof(struct ia_css_macc_table));
} else {
memcpy(macc_table, &config->table,
sizeof(struct ia_css_macc_table));
if (config->color_effect == asd->params.color_effect)
asd->params.config.macc_table = macc_table;
}
return 0;
}
int atomisp_set_dis_vector(struct atomisp_sub_device *asd,
struct atomisp_dis_vector *vector)
{
atomisp_css_video_set_dis_vector(asd, vector);
asd->params.dis_proj_data_valid = false;
asd->params.css_update_params_needed = true;
return 0;
}
/*
* Function to set/get image stablization statistics
*/
int atomisp_get_dis_stat(struct atomisp_sub_device *asd,
struct atomisp_dis_statistics *stats)
{
return atomisp_css_get_dis_stat(asd, stats);
}
/*
* Function set camrea_prefiles.xml current sensor pixel array size
*/
int atomisp_set_array_res(struct atomisp_sub_device *asd,
struct atomisp_resolution *config)
{
dev_dbg(asd->isp->dev, ">%s start\n", __func__);
if (!config) {
dev_err(asd->isp->dev, "Set sensor array size is not valid\n");
return -EINVAL;
}
asd->sensor_array_res.width = config->width;
asd->sensor_array_res.height = config->height;
return 0;
}
/*
* Function to get DVS2 BQ resolution settings
*/
int atomisp_get_dvs2_bq_resolutions(struct atomisp_sub_device *asd,
struct atomisp_dvs2_bq_resolutions *bq_res)
{
struct ia_css_pipe_config *pipe_cfg = NULL;
struct ia_css_stream *stream =
asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL].stream;
if (!stream) {
dev_warn(asd->isp->dev, "stream is not created");
return -EAGAIN;
}
pipe_cfg = &asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL]
.pipe_configs[IA_CSS_PIPE_ID_VIDEO];
if (!bq_res)
return -EINVAL;
/* the GDC output resolution */
bq_res->output_bq.width_bq = pipe_cfg->output_info[0].res.width / 2;
bq_res->output_bq.height_bq = pipe_cfg->output_info[0].res.height / 2;
bq_res->envelope_bq.width_bq = 0;
bq_res->envelope_bq.height_bq = 0;
/* the GDC input resolution */
bq_res->source_bq.width_bq = bq_res->output_bq.width_bq +
pipe_cfg->dvs_envelope.width / 2;
bq_res->source_bq.height_bq = bq_res->output_bq.height_bq +
pipe_cfg->dvs_envelope.height / 2;
/*
* Bad pixels caused by spatial filter processing
* ISP filter resolution should be given by CSS/FW, but for now
* there is not such API to query, and it is fixed value, so
* hardcoded here.
*/
bq_res->ispfilter_bq.width_bq = 12 / 2;
bq_res->ispfilter_bq.height_bq = 12 / 2;
/* spatial filter shift, always 4 pixels */
bq_res->gdc_shift_bq.width_bq = 4 / 2;
bq_res->gdc_shift_bq.height_bq = 4 / 2;
if (asd->params.video_dis_en) {
bq_res->envelope_bq.width_bq = pipe_cfg->dvs_envelope.width / 2 -
bq_res->ispfilter_bq.width_bq;
bq_res->envelope_bq.height_bq = pipe_cfg->dvs_envelope.height / 2 -
bq_res->ispfilter_bq.height_bq;
}
dev_dbg(asd->isp->dev,
"source_bq.width_bq %d, source_bq.height_bq %d,\nispfilter_bq.width_bq %d, ispfilter_bq.height_bq %d,\ngdc_shift_bq.width_bq %d, gdc_shift_bq.height_bq %d,\nenvelope_bq.width_bq %d, envelope_bq.height_bq %d,\noutput_bq.width_bq %d, output_bq.height_bq %d\n",
bq_res->source_bq.width_bq, bq_res->source_bq.height_bq,
bq_res->ispfilter_bq.width_bq, bq_res->ispfilter_bq.height_bq,
bq_res->gdc_shift_bq.width_bq, bq_res->gdc_shift_bq.height_bq,
bq_res->envelope_bq.width_bq, bq_res->envelope_bq.height_bq,
bq_res->output_bq.width_bq, bq_res->output_bq.height_bq);
return 0;
}
int atomisp_set_dis_coefs(struct atomisp_sub_device *asd,
struct atomisp_dis_coefficients *coefs)
{
return atomisp_css_set_dis_coefs(asd, coefs);
}
/*
* Function to set/get 3A stat from isp
*/
int atomisp_3a_stat(struct atomisp_sub_device *asd, int flag,
struct atomisp_3a_statistics *config)
{
struct atomisp_device *isp = asd->isp;
struct atomisp_s3a_buf *s3a_buf;
unsigned long ret;
if (flag != 0)
return -EINVAL;
/* sanity check to avoid writing into unallocated memory. */
if (asd->params.s3a_output_bytes == 0)
return -EINVAL;
if (atomisp_compare_grid(asd, &config->grid_info) != 0) {
/* If the grid info in the argument differs from the current
grid info, we tell the caller to reset the grid size and
try again. */
return -EAGAIN;
}
if (list_empty(&asd->s3a_stats_ready)) {
dev_err(isp->dev, "3a statistics is not valid.\n");
return -EAGAIN;
}
s3a_buf = list_entry(asd->s3a_stats_ready.next,
struct atomisp_s3a_buf, list);
if (s3a_buf->s3a_map)
ia_css_translate_3a_statistics(
asd->params.s3a_user_stat, s3a_buf->s3a_map);
else
ia_css_get_3a_statistics(asd->params.s3a_user_stat,
s3a_buf->s3a_data);
config->exp_id = s3a_buf->s3a_data->exp_id;
config->isp_config_id = s3a_buf->s3a_data->isp_config_id;
ret = copy_to_user(config->data, asd->params.s3a_user_stat->data,
asd->params.s3a_output_bytes);
if (ret) {
dev_err(isp->dev, "copy to user failed: copied %lu bytes\n",
ret);
return -EFAULT;
}
/* Move to free buffer list */
list_del_init(&s3a_buf->list);
list_add_tail(&s3a_buf->list, &asd->s3a_stats);
dev_dbg(isp->dev, "%s: finish getting exp_id %d 3a stat, isp_config_id %d\n",
__func__,
config->exp_id, config->isp_config_id);
return 0;
}
/*
* Function to calculate real zoom region for every pipe
*/
int atomisp_calculate_real_zoom_region(struct atomisp_sub_device *asd,
struct ia_css_dz_config *dz_config,
enum ia_css_pipe_id css_pipe_id)
{
struct atomisp_stream_env *stream_env =
&asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL];
struct atomisp_resolution eff_res, out_res;
int w_offset, h_offset;
memset(&eff_res, 0, sizeof(eff_res));
memset(&out_res, 0, sizeof(out_res));
if (dz_config->dx || dz_config->dy)
return 0;
if (css_pipe_id != IA_CSS_PIPE_ID_PREVIEW
&& css_pipe_id != IA_CSS_PIPE_ID_CAPTURE) {
dev_err(asd->isp->dev, "%s the set pipe no support crop region"
, __func__);
return -EINVAL;
}
eff_res.width =
stream_env->stream_config.input_config.effective_res.width;
eff_res.height =
stream_env->stream_config.input_config.effective_res.height;
if (eff_res.width == 0 || eff_res.height == 0) {
dev_err(asd->isp->dev, "%s err effective resolution"
, __func__);
return -EINVAL;
}
if (dz_config->zoom_region.resolution.width
== asd->sensor_array_res.width
|| dz_config->zoom_region.resolution.height
== asd->sensor_array_res.height) {
/*no need crop region*/
dz_config->zoom_region.origin.x = 0;
dz_config->zoom_region.origin.y = 0;
dz_config->zoom_region.resolution.width = eff_res.width;
dz_config->zoom_region.resolution.height = eff_res.height;
return 0;
}
/* FIXME:
* This is not the correct implementation with Google's definition, due
* to firmware limitation.
* map real crop region base on above calculating base max crop region.
*/
if (!IS_ISP2401) {
dz_config->zoom_region.origin.x = dz_config->zoom_region.origin.x
* eff_res.width
/ asd->sensor_array_res.width;
dz_config->zoom_region.origin.y = dz_config->zoom_region.origin.y
* eff_res.height
/ asd->sensor_array_res.height;
dz_config->zoom_region.resolution.width = dz_config->zoom_region.resolution.width
* eff_res.width
/ asd->sensor_array_res.width;
dz_config->zoom_region.resolution.height = dz_config->zoom_region.resolution.height
* eff_res.height
/ asd->sensor_array_res.height;
/*
* Set same ratio of crop region resolution and current pipe output
* resolution
*/
out_res.width = stream_env->pipe_configs[css_pipe_id].output_info[0].res.width;
out_res.height = stream_env->pipe_configs[css_pipe_id].output_info[0].res.height;
if (out_res.width == 0 || out_res.height == 0) {
dev_err(asd->isp->dev, "%s err current pipe output resolution"
, __func__);
return -EINVAL;
}
} else {
out_res.width = stream_env->pipe_configs[css_pipe_id].output_info[0].res.width;
out_res.height = stream_env->pipe_configs[css_pipe_id].output_info[0].res.height;
if (out_res.width == 0 || out_res.height == 0) {
dev_err(asd->isp->dev, "%s err current pipe output resolution"
, __func__);
return -EINVAL;
}
if (asd->sensor_array_res.width * out_res.height
< out_res.width * asd->sensor_array_res.height) {
h_offset = asd->sensor_array_res.height
- asd->sensor_array_res.width
* out_res.height / out_res.width;
h_offset = h_offset / 2;
if (dz_config->zoom_region.origin.y < h_offset)
dz_config->zoom_region.origin.y = 0;
else
dz_config->zoom_region.origin.y = dz_config->zoom_region.origin.y - h_offset;
w_offset = 0;
} else {
w_offset = asd->sensor_array_res.width
- asd->sensor_array_res.height
* out_res.width / out_res.height;
w_offset = w_offset / 2;
if (dz_config->zoom_region.origin.x < w_offset)
dz_config->zoom_region.origin.x = 0;
else
dz_config->zoom_region.origin.x = dz_config->zoom_region.origin.x - w_offset;
h_offset = 0;
}
dz_config->zoom_region.origin.x = dz_config->zoom_region.origin.x
* eff_res.width
/ (asd->sensor_array_res.width - 2 * w_offset);
dz_config->zoom_region.origin.y = dz_config->zoom_region.origin.y
* eff_res.height
/ (asd->sensor_array_res.height - 2 * h_offset);
dz_config->zoom_region.resolution.width = dz_config->zoom_region.resolution.width
* eff_res.width
/ (asd->sensor_array_res.width - 2 * w_offset);
dz_config->zoom_region.resolution.height = dz_config->zoom_region.resolution.height
* eff_res.height
/ (asd->sensor_array_res.height - 2 * h_offset);
}
if (out_res.width * dz_config->zoom_region.resolution.height
> dz_config->zoom_region.resolution.width * out_res.height) {
dz_config->zoom_region.resolution.height =
dz_config->zoom_region.resolution.width
* out_res.height / out_res.width;
} else {
dz_config->zoom_region.resolution.width =
dz_config->zoom_region.resolution.height
* out_res.width / out_res.height;
}
dev_dbg(asd->isp->dev,
"%s crop region:(%d,%d),(%d,%d) eff_res(%d, %d) array_size(%d,%d) out_res(%d, %d)\n",
__func__, dz_config->zoom_region.origin.x,
dz_config->zoom_region.origin.y,
dz_config->zoom_region.resolution.width,
dz_config->zoom_region.resolution.height,
eff_res.width, eff_res.height,
asd->sensor_array_res.width,
asd->sensor_array_res.height,
out_res.width, out_res.height);
if ((dz_config->zoom_region.origin.x +
dz_config->zoom_region.resolution.width
> eff_res.width) ||
(dz_config->zoom_region.origin.y +
dz_config->zoom_region.resolution.height
> eff_res.height))
return -EINVAL;
return 0;
}
/*
* Function to check the zoom region whether is effective
*/
static bool atomisp_check_zoom_region(
struct atomisp_sub_device *asd,
struct ia_css_dz_config *dz_config)
{
struct atomisp_resolution config;
bool flag = false;
unsigned int w, h;
memset(&config, 0, sizeof(struct atomisp_resolution));
if (dz_config->dx && dz_config->dy)
return true;
config.width = asd->sensor_array_res.width;
config.height = asd->sensor_array_res.height;
w = dz_config->zoom_region.origin.x +
dz_config->zoom_region.resolution.width;
h = dz_config->zoom_region.origin.y +
dz_config->zoom_region.resolution.height;
if ((w <= config.width) && (h <= config.height) && w > 0 && h > 0)
flag = true;
else
/* setting error zoom region */
dev_err(asd->isp->dev,
"%s zoom region ERROR:dz_config:(%d,%d),(%d,%d)array_res(%d, %d)\n",
__func__, dz_config->zoom_region.origin.x,
dz_config->zoom_region.origin.y,
dz_config->zoom_region.resolution.width,
dz_config->zoom_region.resolution.height,
config.width, config.height);
return flag;
}
void atomisp_apply_css_parameters(
struct atomisp_sub_device *asd,
struct atomisp_css_params *css_param)
{
if (css_param->update_flag.wb_config)
asd->params.config.wb_config = &css_param->wb_config;
if (css_param->update_flag.ob_config)
asd->params.config.ob_config = &css_param->ob_config;
if (css_param->update_flag.dp_config)
asd->params.config.dp_config = &css_param->dp_config;
if (css_param->update_flag.nr_config)
asd->params.config.nr_config = &css_param->nr_config;
if (css_param->update_flag.ee_config)
asd->params.config.ee_config = &css_param->ee_config;
if (css_param->update_flag.tnr_config)
asd->params.config.tnr_config = &css_param->tnr_config;
if (css_param->update_flag.a3a_config)
asd->params.config.s3a_config = &css_param->s3a_config;
if (css_param->update_flag.ctc_config)
asd->params.config.ctc_config = &css_param->ctc_config;
if (css_param->update_flag.cnr_config)
asd->params.config.cnr_config = &css_param->cnr_config;
if (css_param->update_flag.ecd_config)
asd->params.config.ecd_config = &css_param->ecd_config;
if (css_param->update_flag.ynr_config)
asd->params.config.ynr_config = &css_param->ynr_config;
if (css_param->update_flag.fc_config)
asd->params.config.fc_config = &css_param->fc_config;
if (css_param->update_flag.macc_config)
asd->params.config.macc_config = &css_param->macc_config;
if (css_param->update_flag.aa_config)
asd->params.config.aa_config = &css_param->aa_config;
if (css_param->update_flag.anr_config)
asd->params.config.anr_config = &css_param->anr_config;
if (css_param->update_flag.xnr_config)
asd->params.config.xnr_config = &css_param->xnr_config;
if (css_param->update_flag.yuv2rgb_cc_config)
asd->params.config.yuv2rgb_cc_config = &css_param->yuv2rgb_cc_config;
if (css_param->update_flag.rgb2yuv_cc_config)
asd->params.config.rgb2yuv_cc_config = &css_param->rgb2yuv_cc_config;
if (css_param->update_flag.macc_table)
asd->params.config.macc_table = &css_param->macc_table;
if (css_param->update_flag.xnr_table)
asd->params.config.xnr_table = &css_param->xnr_table;
if (css_param->update_flag.r_gamma_table)
asd->params.config.r_gamma_table = &css_param->r_gamma_table;
if (css_param->update_flag.g_gamma_table)
asd->params.config.g_gamma_table = &css_param->g_gamma_table;
if (css_param->update_flag.b_gamma_table)
asd->params.config.b_gamma_table = &css_param->b_gamma_table;
if (css_param->update_flag.anr_thres)
atomisp_css_set_anr_thres(asd, &css_param->anr_thres);
if (css_param->update_flag.shading_table)
asd->params.config.shading_table = css_param->shading_table;
if (css_param->update_flag.morph_table && asd->params.gdc_cac_en)
asd->params.config.morph_table = css_param->morph_table;
if (css_param->update_flag.dvs2_coefs) {
struct ia_css_dvs_grid_info *dvs_grid_info =
atomisp_css_get_dvs_grid_info(
&asd->params.curr_grid_info);
if (dvs_grid_info && dvs_grid_info->enable)
atomisp_css_set_dvs2_coefs(asd, css_param->dvs2_coeff);
}
if (css_param->update_flag.dvs_6axis_config)
atomisp_css_set_dvs_6axis(asd, css_param->dvs_6axis);
atomisp_css_set_isp_config_id(asd, css_param->isp_config_id);
/*
* These configurations are on used by ISP1.x, not for ISP2.x,
* so do not handle them. see comments of ia_css_isp_config.
* 1 cc_config
* 2 ce_config
* 3 de_config
* 4 gc_config
* 5 gamma_table
* 6 ctc_table
* 7 dvs_coefs
*/
}
static unsigned int long copy_from_compatible(void *to, const void *from,
unsigned long n, bool from_user)
{
if (from_user)
return copy_from_user(to, (void __user *)from, n);
else
memcpy(to, from, n);
return 0;
}
int atomisp_cp_general_isp_parameters(struct atomisp_sub_device *asd,
struct atomisp_parameters *arg,
struct atomisp_css_params *css_param,
bool from_user)
{
struct atomisp_parameters *cur_config = &css_param->update_flag;
if (!arg || !asd || !css_param)
return -EINVAL;
if (arg->wb_config && (from_user || !cur_config->wb_config)) {
if (copy_from_compatible(&css_param->wb_config, arg->wb_config,
sizeof(struct ia_css_wb_config),
from_user))
return -EFAULT;
css_param->update_flag.wb_config =
(struct atomisp_wb_config *)&css_param->wb_config;
}
if (arg->ob_config && (from_user || !cur_config->ob_config)) {
if (copy_from_compatible(&css_param->ob_config, arg->ob_config,
sizeof(struct ia_css_ob_config),
from_user))
return -EFAULT;
css_param->update_flag.ob_config =
(struct atomisp_ob_config *)&css_param->ob_config;
}
if (arg->dp_config && (from_user || !cur_config->dp_config)) {
if (copy_from_compatible(&css_param->dp_config, arg->dp_config,
sizeof(struct ia_css_dp_config),
from_user))
return -EFAULT;
css_param->update_flag.dp_config =
(struct atomisp_dp_config *)&css_param->dp_config;
}
if (asd->run_mode->val != ATOMISP_RUN_MODE_VIDEO) {
if (arg->dz_config && (from_user || !cur_config->dz_config)) {
if (copy_from_compatible(&css_param->dz_config,
arg->dz_config,
sizeof(struct ia_css_dz_config),
from_user))
return -EFAULT;
if (!atomisp_check_zoom_region(asd,
&css_param->dz_config)) {
dev_err(asd->isp->dev, "crop region error!");
return -EINVAL;
}
css_param->update_flag.dz_config =
(struct atomisp_dz_config *)
&css_param->dz_config;
}
}
if (arg->nr_config && (from_user || !cur_config->nr_config)) {
if (copy_from_compatible(&css_param->nr_config, arg->nr_config,
sizeof(struct ia_css_nr_config),
from_user))
return -EFAULT;
css_param->update_flag.nr_config =
(struct atomisp_nr_config *)&css_param->nr_config;
}
if (arg->ee_config && (from_user || !cur_config->ee_config)) {
if (copy_from_compatible(&css_param->ee_config, arg->ee_config,
sizeof(struct ia_css_ee_config),
from_user))
return -EFAULT;
css_param->update_flag.ee_config =
(struct atomisp_ee_config *)&css_param->ee_config;
}
if (arg->tnr_config && (from_user || !cur_config->tnr_config)) {
if (copy_from_compatible(&css_param->tnr_config,
arg->tnr_config,
sizeof(struct ia_css_tnr_config),
from_user))
return -EFAULT;
css_param->update_flag.tnr_config =
(struct atomisp_tnr_config *)
&css_param->tnr_config;
}
if (arg->a3a_config && (from_user || !cur_config->a3a_config)) {
if (copy_from_compatible(&css_param->s3a_config,
arg->a3a_config,
sizeof(struct ia_css_3a_config),
from_user))
return -EFAULT;
css_param->update_flag.a3a_config =
(struct atomisp_3a_config *)&css_param->s3a_config;
}
if (arg->ctc_config && (from_user || !cur_config->ctc_config)) {
if (copy_from_compatible(&css_param->ctc_config,
arg->ctc_config,
sizeof(struct ia_css_ctc_config),
from_user))
return -EFAULT;
css_param->update_flag.ctc_config =
(struct atomisp_ctc_config *)
&css_param->ctc_config;
}
if (arg->cnr_config && (from_user || !cur_config->cnr_config)) {
if (copy_from_compatible(&css_param->cnr_config,
arg->cnr_config,
sizeof(struct ia_css_cnr_config),
from_user))
return -EFAULT;
css_param->update_flag.cnr_config =
(struct atomisp_cnr_config *)
&css_param->cnr_config;
}
if (arg->ecd_config && (from_user || !cur_config->ecd_config)) {
if (copy_from_compatible(&css_param->ecd_config,
arg->ecd_config,
sizeof(struct ia_css_ecd_config),
from_user))
return -EFAULT;
css_param->update_flag.ecd_config =
(struct atomisp_ecd_config *)
&css_param->ecd_config;
}
if (arg->ynr_config && (from_user || !cur_config->ynr_config)) {
if (copy_from_compatible(&css_param->ynr_config,
arg->ynr_config,
sizeof(struct ia_css_ynr_config),
from_user))
return -EFAULT;
css_param->update_flag.ynr_config =
(struct atomisp_ynr_config *)
&css_param->ynr_config;
}
if (arg->fc_config && (from_user || !cur_config->fc_config)) {
if (copy_from_compatible(&css_param->fc_config,
arg->fc_config,
sizeof(struct ia_css_fc_config),
from_user))
return -EFAULT;
css_param->update_flag.fc_config =
(struct atomisp_fc_config *)&css_param->fc_config;
}
if (arg->macc_config && (from_user || !cur_config->macc_config)) {
if (copy_from_compatible(&css_param->macc_config,
arg->macc_config,
sizeof(struct ia_css_macc_config),
from_user))
return -EFAULT;
css_param->update_flag.macc_config =
(struct atomisp_macc_config *)
&css_param->macc_config;
}
if (arg->aa_config && (from_user || !cur_config->aa_config)) {
if (copy_from_compatible(&css_param->aa_config, arg->aa_config,
sizeof(struct ia_css_aa_config),
from_user))
return -EFAULT;
css_param->update_flag.aa_config =
(struct atomisp_aa_config *)&css_param->aa_config;
}
if (arg->anr_config && (from_user || !cur_config->anr_config)) {
if (copy_from_compatible(&css_param->anr_config,
arg->anr_config,
sizeof(struct ia_css_anr_config),
from_user))
return -EFAULT;
css_param->update_flag.anr_config =
(struct atomisp_anr_config *)
&css_param->anr_config;
}
if (arg->xnr_config && (from_user || !cur_config->xnr_config)) {
if (copy_from_compatible(&css_param->xnr_config,
arg->xnr_config,
sizeof(struct ia_css_xnr_config),
from_user))
return -EFAULT;
css_param->update_flag.xnr_config =
(struct atomisp_xnr_config *)
&css_param->xnr_config;
}
if (arg->yuv2rgb_cc_config &&
(from_user || !cur_config->yuv2rgb_cc_config)) {
if (copy_from_compatible(&css_param->yuv2rgb_cc_config,
arg->yuv2rgb_cc_config,
sizeof(struct ia_css_cc_config),
from_user))
return -EFAULT;
css_param->update_flag.yuv2rgb_cc_config =
(struct atomisp_cc_config *)
&css_param->yuv2rgb_cc_config;
}
if (arg->rgb2yuv_cc_config &&
(from_user || !cur_config->rgb2yuv_cc_config)) {
if (copy_from_compatible(&css_param->rgb2yuv_cc_config,
arg->rgb2yuv_cc_config,
sizeof(struct ia_css_cc_config),
from_user))
return -EFAULT;
css_param->update_flag.rgb2yuv_cc_config =
(struct atomisp_cc_config *)
&css_param->rgb2yuv_cc_config;
}
if (arg->macc_table && (from_user || !cur_config->macc_table)) {
if (copy_from_compatible(&css_param->macc_table,
arg->macc_table,
sizeof(struct ia_css_macc_table),
from_user))
return -EFAULT;
css_param->update_flag.macc_table =
(struct atomisp_macc_table *)
&css_param->macc_table;
}
if (arg->xnr_table && (from_user || !cur_config->xnr_table)) {
if (copy_from_compatible(&css_param->xnr_table,
arg->xnr_table,
sizeof(struct ia_css_xnr_table),
from_user))
return -EFAULT;
css_param->update_flag.xnr_table =
(struct atomisp_xnr_table *)&css_param->xnr_table;
}
if (arg->r_gamma_table && (from_user || !cur_config->r_gamma_table)) {
if (copy_from_compatible(&css_param->r_gamma_table,
arg->r_gamma_table,
sizeof(struct ia_css_rgb_gamma_table),
from_user))
return -EFAULT;
css_param->update_flag.r_gamma_table =
(struct atomisp_rgb_gamma_table *)
&css_param->r_gamma_table;
}
if (arg->g_gamma_table && (from_user || !cur_config->g_gamma_table)) {
if (copy_from_compatible(&css_param->g_gamma_table,
arg->g_gamma_table,
sizeof(struct ia_css_rgb_gamma_table),
from_user))
return -EFAULT;
css_param->update_flag.g_gamma_table =
(struct atomisp_rgb_gamma_table *)
&css_param->g_gamma_table;
}
if (arg->b_gamma_table && (from_user || !cur_config->b_gamma_table)) {
if (copy_from_compatible(&css_param->b_gamma_table,
arg->b_gamma_table,
sizeof(struct ia_css_rgb_gamma_table),
from_user))
return -EFAULT;
css_param->update_flag.b_gamma_table =
(struct atomisp_rgb_gamma_table *)
&css_param->b_gamma_table;
}
if (arg->anr_thres && (from_user || !cur_config->anr_thres)) {
if (copy_from_compatible(&css_param->anr_thres, arg->anr_thres,
sizeof(struct ia_css_anr_thres),
from_user))
return -EFAULT;
css_param->update_flag.anr_thres =
(struct atomisp_anr_thres *)&css_param->anr_thres;
}
if (from_user)
css_param->isp_config_id = arg->isp_config_id;
/*
* These configurations are on used by ISP1.x, not for ISP2.x,
* so do not handle them. see comments of ia_css_isp_config.
* 1 cc_config
* 2 ce_config
* 3 de_config
* 4 gc_config
* 5 gamma_table
* 6 ctc_table
* 7 dvs_coefs
*/
return 0;
}
int atomisp_cp_lsc_table(struct atomisp_sub_device *asd,
struct atomisp_shading_table *source_st,
struct atomisp_css_params *css_param,
bool from_user)
{
unsigned int i;
unsigned int len_table;
struct ia_css_shading_table *shading_table;
struct ia_css_shading_table *old_table;
struct atomisp_shading_table *st, dest_st;
if (!source_st)
return 0;
if (!css_param)
return -EINVAL;
if (!from_user && css_param->update_flag.shading_table)
return 0;
if (IS_ISP2401) {
if (copy_from_compatible(&dest_st, source_st,
sizeof(struct atomisp_shading_table),
from_user)) {
dev_err(asd->isp->dev, "copy shading table failed!");
return -EFAULT;
}
st = &dest_st;
} else {
st = source_st;
}
old_table = css_param->shading_table;
/* user config is to disable the shading table. */
if (!st->enable) {
/* Generate a minimum table with enable = 0. */
shading_table = atomisp_css_shading_table_alloc(1, 1);
if (!shading_table)
return -ENOMEM;
shading_table->enable = 0;
goto set_lsc;
}
/* Setting a new table. Validate first - all tables must be set */
for (i = 0; i < ATOMISP_NUM_SC_COLORS; i++) {
if (!st->data[i]) {
dev_err(asd->isp->dev, "shading table validate failed");
return -EINVAL;
}
}
/* Shading table size per color */
if (st->width > SH_CSS_MAX_SCTBL_WIDTH_PER_COLOR ||
st->height > SH_CSS_MAX_SCTBL_HEIGHT_PER_COLOR) {
dev_err(asd->isp->dev, "shading table w/h validate failed!");
return -EINVAL;
}
shading_table = atomisp_css_shading_table_alloc(st->width, st->height);
if (!shading_table)
return -ENOMEM;
len_table = st->width * st->height * ATOMISP_SC_TYPE_SIZE;
for (i = 0; i < ATOMISP_NUM_SC_COLORS; i++) {
if (copy_from_compatible(shading_table->data[i],
st->data[i], len_table, from_user)) {
atomisp_css_shading_table_free(shading_table);
return -EFAULT;
}
}
shading_table->sensor_width = st->sensor_width;
shading_table->sensor_height = st->sensor_height;
shading_table->fraction_bits = st->fraction_bits;
shading_table->enable = st->enable;
/* No need to update shading table if it is the same */
if (old_table &&
old_table->sensor_width == shading_table->sensor_width &&
old_table->sensor_height == shading_table->sensor_height &&
old_table->width == shading_table->width &&
old_table->height == shading_table->height &&
old_table->fraction_bits == shading_table->fraction_bits &&
old_table->enable == shading_table->enable) {
bool data_is_same = true;
for (i = 0; i < ATOMISP_NUM_SC_COLORS; i++) {
if (memcmp(shading_table->data[i], old_table->data[i],
len_table) != 0) {
data_is_same = false;
break;
}
}
if (data_is_same) {
atomisp_css_shading_table_free(shading_table);
return 0;
}
}
set_lsc:
/* set LSC to CSS */
css_param->shading_table = shading_table;
css_param->update_flag.shading_table = (struct atomisp_shading_table *)shading_table;
asd->params.sc_en = shading_table;
if (old_table)
atomisp_css_shading_table_free(old_table);
return 0;
}
int atomisp_css_cp_dvs2_coefs(struct atomisp_sub_device *asd,
struct ia_css_dvs2_coefficients *coefs,
struct atomisp_css_params *css_param,
bool from_user)
{
struct ia_css_dvs_grid_info *cur =
atomisp_css_get_dvs_grid_info(&asd->params.curr_grid_info);
int dvs_hor_coef_bytes, dvs_ver_coef_bytes;
struct ia_css_dvs2_coefficients dvs2_coefs;
if (!coefs || !cur)
return 0;
if (!from_user && css_param->update_flag.dvs2_coefs)
return 0;
if (!IS_ISP2401) {
if (sizeof(*cur) != sizeof(coefs->grid) ||
memcmp(&coefs->grid, cur, sizeof(coefs->grid))) {
dev_err(asd->isp->dev, "dvs grid mismatch!\n");
/* If the grid info in the argument differs from the current
grid info, we tell the caller to reset the grid size and
try again. */
return -EAGAIN;
}
if (!coefs->hor_coefs.odd_real ||
!coefs->hor_coefs.odd_imag ||
!coefs->hor_coefs.even_real ||
!coefs->hor_coefs.even_imag ||
!coefs->ver_coefs.odd_real ||
!coefs->ver_coefs.odd_imag ||
!coefs->ver_coefs.even_real ||
!coefs->ver_coefs.even_imag)
return -EINVAL;
if (!css_param->dvs2_coeff) {
/* DIS coefficients. */
css_param->dvs2_coeff = ia_css_dvs2_coefficients_allocate(cur);
if (!css_param->dvs2_coeff)
return -ENOMEM;
}
dvs_hor_coef_bytes = asd->params.dvs_hor_coef_bytes;
dvs_ver_coef_bytes = asd->params.dvs_ver_coef_bytes;
if (copy_from_compatible(css_param->dvs2_coeff->hor_coefs.odd_real,
coefs->hor_coefs.odd_real, dvs_hor_coef_bytes, from_user) ||
copy_from_compatible(css_param->dvs2_coeff->hor_coefs.odd_imag,
coefs->hor_coefs.odd_imag, dvs_hor_coef_bytes, from_user) ||
copy_from_compatible(css_param->dvs2_coeff->hor_coefs.even_real,
coefs->hor_coefs.even_real, dvs_hor_coef_bytes, from_user) ||
copy_from_compatible(css_param->dvs2_coeff->hor_coefs.even_imag,
coefs->hor_coefs.even_imag, dvs_hor_coef_bytes, from_user) ||
copy_from_compatible(css_param->dvs2_coeff->ver_coefs.odd_real,
coefs->ver_coefs.odd_real, dvs_ver_coef_bytes, from_user) ||
copy_from_compatible(css_param->dvs2_coeff->ver_coefs.odd_imag,
coefs->ver_coefs.odd_imag, dvs_ver_coef_bytes, from_user) ||
copy_from_compatible(css_param->dvs2_coeff->ver_coefs.even_real,
coefs->ver_coefs.even_real, dvs_ver_coef_bytes, from_user) ||
copy_from_compatible(css_param->dvs2_coeff->ver_coefs.even_imag,
coefs->ver_coefs.even_imag, dvs_ver_coef_bytes, from_user)) {
ia_css_dvs2_coefficients_free(css_param->dvs2_coeff);
css_param->dvs2_coeff = NULL;
return -EFAULT;
}
} else {
if (copy_from_compatible(&dvs2_coefs, coefs,
sizeof(struct ia_css_dvs2_coefficients),
from_user)) {
dev_err(asd->isp->dev, "copy dvs2 coef failed");
return -EFAULT;
}
if (sizeof(*cur) != sizeof(dvs2_coefs.grid) ||
memcmp(&dvs2_coefs.grid, cur, sizeof(dvs2_coefs.grid))) {
dev_err(asd->isp->dev, "dvs grid mismatch!\n");
/* If the grid info in the argument differs from the current
grid info, we tell the caller to reset the grid size and
try again. */
return -EAGAIN;
}
if (!dvs2_coefs.hor_coefs.odd_real ||
!dvs2_coefs.hor_coefs.odd_imag ||
!dvs2_coefs.hor_coefs.even_real ||
!dvs2_coefs.hor_coefs.even_imag ||
!dvs2_coefs.ver_coefs.odd_real ||
!dvs2_coefs.ver_coefs.odd_imag ||
!dvs2_coefs.ver_coefs.even_real ||
!dvs2_coefs.ver_coefs.even_imag)
return -EINVAL;
if (!css_param->dvs2_coeff) {
/* DIS coefficients. */
css_param->dvs2_coeff = ia_css_dvs2_coefficients_allocate(cur);
if (!css_param->dvs2_coeff)
return -ENOMEM;
}
dvs_hor_coef_bytes = asd->params.dvs_hor_coef_bytes;
dvs_ver_coef_bytes = asd->params.dvs_ver_coef_bytes;
if (copy_from_compatible(css_param->dvs2_coeff->hor_coefs.odd_real,
dvs2_coefs.hor_coefs.odd_real, dvs_hor_coef_bytes, from_user) ||
copy_from_compatible(css_param->dvs2_coeff->hor_coefs.odd_imag,
dvs2_coefs.hor_coefs.odd_imag, dvs_hor_coef_bytes, from_user) ||
copy_from_compatible(css_param->dvs2_coeff->hor_coefs.even_real,
dvs2_coefs.hor_coefs.even_real, dvs_hor_coef_bytes, from_user) ||
copy_from_compatible(css_param->dvs2_coeff->hor_coefs.even_imag,
dvs2_coefs.hor_coefs.even_imag, dvs_hor_coef_bytes, from_user) ||
copy_from_compatible(css_param->dvs2_coeff->ver_coefs.odd_real,
dvs2_coefs.ver_coefs.odd_real, dvs_ver_coef_bytes, from_user) ||
copy_from_compatible(css_param->dvs2_coeff->ver_coefs.odd_imag,
dvs2_coefs.ver_coefs.odd_imag, dvs_ver_coef_bytes, from_user) ||
copy_from_compatible(css_param->dvs2_coeff->ver_coefs.even_real,
dvs2_coefs.ver_coefs.even_real, dvs_ver_coef_bytes, from_user) ||
copy_from_compatible(css_param->dvs2_coeff->ver_coefs.even_imag,
dvs2_coefs.ver_coefs.even_imag, dvs_ver_coef_bytes, from_user)) {
ia_css_dvs2_coefficients_free(css_param->dvs2_coeff);
css_param->dvs2_coeff = NULL;
return -EFAULT;
}
}
css_param->update_flag.dvs2_coefs =
(struct atomisp_dis_coefficients *)css_param->dvs2_coeff;
return 0;
}
int atomisp_cp_dvs_6axis_config(struct atomisp_sub_device *asd,
struct atomisp_dvs_6axis_config *source_6axis_config,
struct atomisp_css_params *css_param,
bool from_user)
{
struct ia_css_dvs_6axis_config *dvs_6axis_config;
struct ia_css_dvs_6axis_config *old_6axis_config;
struct ia_css_stream *stream =
asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL].stream;
struct ia_css_dvs_grid_info *dvs_grid_info =
atomisp_css_get_dvs_grid_info(&asd->params.curr_grid_info);
int ret = -EFAULT;
if (!stream) {
dev_err(asd->isp->dev, "%s: internal error!", __func__);
return -EINVAL;
}
if (!source_6axis_config || !dvs_grid_info)
return 0;
if (!dvs_grid_info->enable)
return 0;
if (!from_user && css_param->update_flag.dvs_6axis_config)
return 0;
/* check whether need to reallocate for 6 axis config */
old_6axis_config = css_param->dvs_6axis;
dvs_6axis_config = old_6axis_config;
if (IS_ISP2401) {
struct ia_css_dvs_6axis_config t_6axis_config;
if (copy_from_compatible(&t_6axis_config, source_6axis_config,
sizeof(struct atomisp_dvs_6axis_config),
from_user)) {
dev_err(asd->isp->dev, "copy morph table failed!");
return -EFAULT;
}
if (old_6axis_config &&
(old_6axis_config->width_y != t_6axis_config.width_y ||
old_6axis_config->height_y != t_6axis_config.height_y ||
old_6axis_config->width_uv != t_6axis_config.width_uv ||
old_6axis_config->height_uv != t_6axis_config.height_uv)) {
ia_css_dvs2_6axis_config_free(css_param->dvs_6axis);
css_param->dvs_6axis = NULL;
dvs_6axis_config = ia_css_dvs2_6axis_config_allocate(stream);
if (!dvs_6axis_config)
return -ENOMEM;
} else if (!dvs_6axis_config) {
dvs_6axis_config = ia_css_dvs2_6axis_config_allocate(stream);
if (!dvs_6axis_config)
return -ENOMEM;
}
dvs_6axis_config->exp_id = t_6axis_config.exp_id;
if (copy_from_compatible(dvs_6axis_config->xcoords_y,
t_6axis_config.xcoords_y,
t_6axis_config.width_y *
t_6axis_config.height_y *
sizeof(*dvs_6axis_config->xcoords_y),
from_user))
goto error;
if (copy_from_compatible(dvs_6axis_config->ycoords_y,
t_6axis_config.ycoords_y,
t_6axis_config.width_y *
t_6axis_config.height_y *
sizeof(*dvs_6axis_config->ycoords_y),
from_user))
goto error;
if (copy_from_compatible(dvs_6axis_config->xcoords_uv,
t_6axis_config.xcoords_uv,
t_6axis_config.width_uv *
t_6axis_config.height_uv *
sizeof(*dvs_6axis_config->xcoords_uv),
from_user))
goto error;
if (copy_from_compatible(dvs_6axis_config->ycoords_uv,
t_6axis_config.ycoords_uv,
t_6axis_config.width_uv *
t_6axis_config.height_uv *
sizeof(*dvs_6axis_config->ycoords_uv),
from_user))
goto error;
} else {
if (old_6axis_config &&
(old_6axis_config->width_y != source_6axis_config->width_y ||
old_6axis_config->height_y != source_6axis_config->height_y ||
old_6axis_config->width_uv != source_6axis_config->width_uv ||
old_6axis_config->height_uv != source_6axis_config->height_uv)) {
ia_css_dvs2_6axis_config_free(css_param->dvs_6axis);
css_param->dvs_6axis = NULL;
dvs_6axis_config = ia_css_dvs2_6axis_config_allocate(stream);
if (!dvs_6axis_config)
return -ENOMEM;
} else if (!dvs_6axis_config) {
dvs_6axis_config = ia_css_dvs2_6axis_config_allocate(stream);
if (!dvs_6axis_config)
return -ENOMEM;
}
dvs_6axis_config->exp_id = source_6axis_config->exp_id;
if (copy_from_compatible(dvs_6axis_config->xcoords_y,
source_6axis_config->xcoords_y,
source_6axis_config->width_y *
source_6axis_config->height_y *
sizeof(*source_6axis_config->xcoords_y),
from_user))
goto error;
if (copy_from_compatible(dvs_6axis_config->ycoords_y,
source_6axis_config->ycoords_y,
source_6axis_config->width_y *
source_6axis_config->height_y *
sizeof(*source_6axis_config->ycoords_y),
from_user))
goto error;
if (copy_from_compatible(dvs_6axis_config->xcoords_uv,
source_6axis_config->xcoords_uv,
source_6axis_config->width_uv *
source_6axis_config->height_uv *
sizeof(*source_6axis_config->xcoords_uv),
from_user))
goto error;
if (copy_from_compatible(dvs_6axis_config->ycoords_uv,
source_6axis_config->ycoords_uv,
source_6axis_config->width_uv *
source_6axis_config->height_uv *
sizeof(*source_6axis_config->ycoords_uv),
from_user))
goto error;
}
css_param->dvs_6axis = dvs_6axis_config;
css_param->update_flag.dvs_6axis_config =
(struct atomisp_dvs_6axis_config *)dvs_6axis_config;
return 0;
error:
if (dvs_6axis_config)
ia_css_dvs2_6axis_config_free(dvs_6axis_config);
return ret;
}
int atomisp_cp_morph_table(struct atomisp_sub_device *asd,
struct atomisp_morph_table *source_morph_table,
struct atomisp_css_params *css_param,
bool from_user)
{
int ret = -EFAULT;
unsigned int i;
struct ia_css_morph_table *morph_table;
struct ia_css_morph_table *old_morph_table;
if (!source_morph_table)
return 0;
if (!from_user && css_param->update_flag.morph_table)
return 0;
old_morph_table = css_param->morph_table;
if (IS_ISP2401) {
struct ia_css_morph_table mtbl;
if (copy_from_compatible(&mtbl, source_morph_table,
sizeof(struct atomisp_morph_table),
from_user)) {
dev_err(asd->isp->dev, "copy morph table failed!");
return -EFAULT;
}
morph_table = atomisp_css_morph_table_allocate(
mtbl.width,
mtbl.height);
if (!morph_table)
return -ENOMEM;
for (i = 0; i < IA_CSS_MORPH_TABLE_NUM_PLANES; i++) {
if (copy_from_compatible(morph_table->coordinates_x[i],
(__force void *)source_morph_table->coordinates_x[i],
mtbl.height * mtbl.width *
sizeof(*morph_table->coordinates_x[i]),
from_user))
goto error;
if (copy_from_compatible(morph_table->coordinates_y[i],
(__force void *)source_morph_table->coordinates_y[i],
mtbl.height * mtbl.width *
sizeof(*morph_table->coordinates_y[i]),
from_user))
goto error;
}
} else {
morph_table = atomisp_css_morph_table_allocate(
source_morph_table->width,
source_morph_table->height);
if (!morph_table)
return -ENOMEM;
for (i = 0; i < IA_CSS_MORPH_TABLE_NUM_PLANES; i++) {
if (copy_from_compatible(morph_table->coordinates_x[i],
(__force void *)source_morph_table->coordinates_x[i],
source_morph_table->height * source_morph_table->width *
sizeof(*source_morph_table->coordinates_x[i]),
from_user))
goto error;
if (copy_from_compatible(morph_table->coordinates_y[i],
(__force void *)source_morph_table->coordinates_y[i],
source_morph_table->height * source_morph_table->width *
sizeof(*source_morph_table->coordinates_y[i]),
from_user))
goto error;
}
}
css_param->morph_table = morph_table;
if (old_morph_table)
atomisp_css_morph_table_free(old_morph_table);
css_param->update_flag.morph_table =
(struct atomisp_morph_table *)morph_table;
return 0;
error:
if (morph_table)
atomisp_css_morph_table_free(morph_table);
return ret;
}
int atomisp_makeup_css_parameters(struct atomisp_sub_device *asd,
struct atomisp_parameters *arg,
struct atomisp_css_params *css_param)
{
int ret;
ret = atomisp_cp_general_isp_parameters(asd, arg, css_param, false);
if (ret)
return ret;
ret = atomisp_cp_lsc_table(asd, arg->shading_table, css_param, false);
if (ret)
return ret;
ret = atomisp_cp_morph_table(asd, arg->morph_table, css_param, false);
if (ret)
return ret;
ret = atomisp_css_cp_dvs2_coefs(asd,
(struct ia_css_dvs2_coefficients *)arg->dvs2_coefs,
css_param, false);
if (ret)
return ret;
ret = atomisp_cp_dvs_6axis_config(asd, arg->dvs_6axis_config,
css_param, false);
return ret;
}
void atomisp_free_css_parameters(struct atomisp_css_params *css_param)
{
if (css_param->dvs_6axis) {
ia_css_dvs2_6axis_config_free(css_param->dvs_6axis);
css_param->dvs_6axis = NULL;
}
if (css_param->dvs2_coeff) {
ia_css_dvs2_coefficients_free(css_param->dvs2_coeff);
css_param->dvs2_coeff = NULL;
}
if (css_param->shading_table) {
ia_css_shading_table_free(css_param->shading_table);
css_param->shading_table = NULL;
}
if (css_param->morph_table) {
ia_css_morph_table_free(css_param->morph_table);
css_param->morph_table = NULL;
}
}
static void atomisp_move_frame_to_activeq(struct ia_css_frame *frame,
struct atomisp_css_params_with_list *param)
{
struct atomisp_video_pipe *pipe = vb_to_pipe(&frame->vb.vb2_buf);
unsigned long irqflags;
pipe->frame_params[frame->vb.vb2_buf.index] = param;
spin_lock_irqsave(&pipe->irq_lock, irqflags);
list_move_tail(&frame->queue, &pipe->activeq);
spin_unlock_irqrestore(&pipe->irq_lock, irqflags);
}
/*
* Check parameter queue list and buffer queue list to find out if matched items
* and then set parameter to CSS and enqueue buffer to CSS.
* Of course, if the buffer in buffer waiting list is not bound to a per-frame
* parameter, it will be enqueued into CSS as long as the per-frame setting
* buffers before it get enqueued.
*/
void atomisp_handle_parameter_and_buffer(struct atomisp_video_pipe *pipe)
{
struct atomisp_sub_device *asd = pipe->asd;
struct ia_css_frame *frame = NULL, *frame_tmp;
struct atomisp_css_params_with_list *param = NULL, *param_tmp;
bool need_to_enqueue_buffer = false;
int i;
lockdep_assert_held(&asd->isp->mutex);
/*
* CSS/FW requires set parameter and enqueue buffer happen after ISP
* is streamon.
*/
if (!asd->streaming)
return;
if (list_empty(&pipe->per_frame_params) ||
list_empty(&pipe->buffers_waiting_for_param))
return;
list_for_each_entry_safe(frame, frame_tmp,
&pipe->buffers_waiting_for_param, queue) {
i = frame->vb.vb2_buf.index;
if (pipe->frame_request_config_id[i]) {
list_for_each_entry_safe(param, param_tmp,
&pipe->per_frame_params, list) {
if (pipe->frame_request_config_id[i] != param->params.isp_config_id)
continue;
list_del(¶m->list);
/*
* clear the request config id as the buffer
* will be handled and enqueued into CSS soon
*/
pipe->frame_request_config_id[i] = 0;
atomisp_move_frame_to_activeq(frame, param);
need_to_enqueue_buffer = true;
break;
}
/* If this is the end, stop further loop */
if (list_entry_is_head(param, &pipe->per_frame_params, list))
break;
} else {
atomisp_move_frame_to_activeq(frame, NULL);
need_to_enqueue_buffer = true;
}
}
if (!need_to_enqueue_buffer)
return;
atomisp_qbuffers_to_css(asd);
}
/*
* Function to configure ISP parameters
*/
int atomisp_set_parameters(struct video_device *vdev,
struct atomisp_parameters *arg)
{
struct atomisp_video_pipe *pipe = atomisp_to_video_pipe(vdev);
struct atomisp_sub_device *asd = pipe->asd;
struct atomisp_css_params_with_list *param = NULL;
struct atomisp_css_params *css_param = &asd->params.css_param;
int ret;
lockdep_assert_held(&asd->isp->mutex);
if (!asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL].stream) {
dev_err(asd->isp->dev, "%s: internal error!\n", __func__);
return -EINVAL;
}
dev_dbg(asd->isp->dev, "set parameter(per_frame_setting %d) isp_config_id %d of %s\n",
arg->per_frame_setting, arg->isp_config_id, vdev->name);
if (arg->per_frame_setting) {
/*
* Per-frame setting enabled, we allocate a new parameter
* buffer to cache the parameters and only when frame buffers
* are ready, the parameters will be set to CSS.
* per-frame setting only works for the main output frame.
*/
param = kvzalloc(sizeof(*param), GFP_KERNEL);
if (!param) {
dev_err(asd->isp->dev, "%s: failed to alloc params buffer\n",
__func__);
return -ENOMEM;
}
css_param = ¶m->params;
}
ret = atomisp_cp_general_isp_parameters(asd, arg, css_param, true);
if (ret)
goto apply_parameter_failed;
ret = atomisp_cp_lsc_table(asd, arg->shading_table, css_param, true);
if (ret)
goto apply_parameter_failed;
ret = atomisp_cp_morph_table(asd, arg->morph_table, css_param, true);
if (ret)
goto apply_parameter_failed;
ret = atomisp_css_cp_dvs2_coefs(asd,
(struct ia_css_dvs2_coefficients *)arg->dvs2_coefs,
css_param, true);
if (ret)
goto apply_parameter_failed;
ret = atomisp_cp_dvs_6axis_config(asd, arg->dvs_6axis_config,
css_param, true);
if (ret)
goto apply_parameter_failed;
if (!arg->per_frame_setting) {
/* indicate to CSS that we have parameters to be updated */
asd->params.css_update_params_needed = true;
} else {
list_add_tail(¶m->list, &pipe->per_frame_params);
atomisp_handle_parameter_and_buffer(pipe);
}
return 0;
apply_parameter_failed:
if (css_param)
atomisp_free_css_parameters(css_param);
kvfree(param);
return ret;
}
/*
* Function to set/get isp parameters to isp
*/
int atomisp_param(struct atomisp_sub_device *asd, int flag,
struct atomisp_parm *config)
{
struct ia_css_pipe_config *vp_cfg =
&asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL].
pipe_configs[IA_CSS_PIPE_ID_VIDEO];
/* Read parameter for 3A binary info */
if (flag == 0) {
struct ia_css_dvs_grid_info *dvs_grid_info =
atomisp_css_get_dvs_grid_info(
&asd->params.curr_grid_info);
atomisp_curr_user_grid_info(asd, &config->info);
/* We always return the resolution and stride even if there is
* no valid metadata. This allows the caller to get the
* information needed to allocate user-space buffers. */
config->metadata_config.metadata_height = asd->
stream_env[ATOMISP_INPUT_STREAM_GENERAL].stream_info.
metadata_info.resolution.height;
config->metadata_config.metadata_stride = asd->
stream_env[ATOMISP_INPUT_STREAM_GENERAL].stream_info.
metadata_info.stride;
/* update dvs grid info */
if (dvs_grid_info)
memcpy(&config->dvs_grid,
dvs_grid_info,
sizeof(struct ia_css_dvs_grid_info));
if (asd->run_mode->val != ATOMISP_RUN_MODE_VIDEO) {
config->dvs_envelop.width = 0;
config->dvs_envelop.height = 0;
return 0;
}
/* update dvs envelop info */
config->dvs_envelop.width = vp_cfg->dvs_envelope.width;
config->dvs_envelop.height = vp_cfg->dvs_envelope.height;
return 0;
}
memcpy(&asd->params.css_param.wb_config, &config->wb_config,
sizeof(struct ia_css_wb_config));
memcpy(&asd->params.css_param.ob_config, &config->ob_config,
sizeof(struct ia_css_ob_config));
memcpy(&asd->params.css_param.dp_config, &config->dp_config,
sizeof(struct ia_css_dp_config));
memcpy(&asd->params.css_param.de_config, &config->de_config,
sizeof(struct ia_css_de_config));
memcpy(&asd->params.css_param.dz_config, &config->dz_config,
sizeof(struct ia_css_dz_config));
memcpy(&asd->params.css_param.ce_config, &config->ce_config,
sizeof(struct ia_css_ce_config));
memcpy(&asd->params.css_param.nr_config, &config->nr_config,
sizeof(struct ia_css_nr_config));
memcpy(&asd->params.css_param.ee_config, &config->ee_config,
sizeof(struct ia_css_ee_config));
memcpy(&asd->params.css_param.tnr_config, &config->tnr_config,
sizeof(struct ia_css_tnr_config));
if (asd->params.color_effect == V4L2_COLORFX_NEGATIVE) {
asd->params.css_param.cc_config.matrix[3] = -config->cc_config.matrix[3];
asd->params.css_param.cc_config.matrix[4] = -config->cc_config.matrix[4];
asd->params.css_param.cc_config.matrix[5] = -config->cc_config.matrix[5];
asd->params.css_param.cc_config.matrix[6] = -config->cc_config.matrix[6];
asd->params.css_param.cc_config.matrix[7] = -config->cc_config.matrix[7];
asd->params.css_param.cc_config.matrix[8] = -config->cc_config.matrix[8];
}
if (asd->params.color_effect != V4L2_COLORFX_SEPIA &&
asd->params.color_effect != V4L2_COLORFX_BW) {
memcpy(&asd->params.css_param.cc_config, &config->cc_config,
sizeof(struct ia_css_cc_config));
asd->params.config.cc_config = &asd->params.css_param.cc_config;
}
asd->params.config.wb_config = &asd->params.css_param.wb_config;
asd->params.config.ob_config = &asd->params.css_param.ob_config;
asd->params.config.de_config = &asd->params.css_param.de_config;
asd->params.config.dz_config = &asd->params.css_param.dz_config;
asd->params.config.ce_config = &asd->params.css_param.ce_config;
asd->params.config.dp_config = &asd->params.css_param.dp_config;
asd->params.config.nr_config = &asd->params.css_param.nr_config;
asd->params.config.ee_config = &asd->params.css_param.ee_config;
asd->params.config.tnr_config = &asd->params.css_param.tnr_config;
asd->params.css_update_params_needed = true;
return 0;
}
/*
* Function to configure color effect of the image
*/
int atomisp_color_effect(struct atomisp_sub_device *asd, int flag,
__s32 *effect)
{
struct ia_css_cc_config *cc_config = NULL;
struct ia_css_macc_table *macc_table = NULL;
struct ia_css_ctc_table *ctc_table = NULL;
int ret = 0;
struct v4l2_control control;
struct atomisp_device *isp = asd->isp;
if (flag == 0) {
*effect = asd->params.color_effect;
return 0;
}
control.id = V4L2_CID_COLORFX;
control.value = *effect;
ret =
v4l2_s_ctrl(NULL, isp->inputs[asd->input_curr].camera->ctrl_handler,
&control);
/*
* if set color effect to sensor successfully, return
* 0 directly.
*/
if (!ret) {
asd->params.color_effect = (u32)*effect;
return 0;
}
if (*effect == asd->params.color_effect)
return 0;
/*
* isp_subdev->params.macc_en should be set to false.
*/
asd->params.macc_en = false;
switch (*effect) {
case V4L2_COLORFX_NONE:
macc_table = &asd->params.css_param.macc_table;
asd->params.macc_en = true;
break;
case V4L2_COLORFX_SEPIA:
cc_config = &sepia_cc_config;
break;
case V4L2_COLORFX_NEGATIVE:
cc_config = &nega_cc_config;
break;
case V4L2_COLORFX_BW:
cc_config = &mono_cc_config;
break;
case V4L2_COLORFX_SKY_BLUE:
macc_table = &blue_macc_table;
asd->params.macc_en = true;
break;
case V4L2_COLORFX_GRASS_GREEN:
macc_table = &green_macc_table;
asd->params.macc_en = true;
break;
case V4L2_COLORFX_SKIN_WHITEN_LOW:
macc_table = &skin_low_macc_table;
asd->params.macc_en = true;
break;
case V4L2_COLORFX_SKIN_WHITEN:
macc_table = &skin_medium_macc_table;
asd->params.macc_en = true;
break;
case V4L2_COLORFX_SKIN_WHITEN_HIGH:
macc_table = &skin_high_macc_table;
asd->params.macc_en = true;
break;
case V4L2_COLORFX_VIVID:
ctc_table = &vivid_ctc_table;
break;
default:
return -EINVAL;
}
atomisp_update_capture_mode(asd);
if (cc_config)
asd->params.config.cc_config = cc_config;
if (macc_table)
asd->params.config.macc_table = macc_table;
if (ctc_table)
atomisp_css_set_ctc_table(asd, ctc_table);
asd->params.color_effect = (u32)*effect;
asd->params.css_update_params_needed = true;
return 0;
}
/*
* Function to configure bad pixel correction
*/
int atomisp_bad_pixel(struct atomisp_sub_device *asd, int flag,
__s32 *value)
{
if (flag == 0) {
*value = asd->params.bad_pixel_en;
return 0;
}
asd->params.bad_pixel_en = !!*value;
return 0;
}
/*
* Function to configure bad pixel correction params
*/
int atomisp_bad_pixel_param(struct atomisp_sub_device *asd, int flag,
struct atomisp_dp_config *config)
{
if (flag == 0) {
/* Get bad pixel from current setup */
if (atomisp_css_get_dp_config(asd, config))
return -EINVAL;
} else {
/* Set bad pixel to isp parameters */
memcpy(&asd->params.css_param.dp_config, config,
sizeof(asd->params.css_param.dp_config));
asd->params.config.dp_config = &asd->params.css_param.dp_config;
asd->params.css_update_params_needed = true;
}
return 0;
}
/*
* Function to enable/disable video image stablization
*/
int atomisp_video_stable(struct atomisp_sub_device *asd, int flag,
__s32 *value)
{
if (flag == 0)
*value = asd->params.video_dis_en;
else
asd->params.video_dis_en = !!*value;
return 0;
}
/*
* Function to configure fixed pattern noise
*/
int atomisp_fixed_pattern(struct atomisp_sub_device *asd, int flag,
__s32 *value)
{
if (flag == 0) {
*value = asd->params.fpn_en;
return 0;
}
if (*value == 0) {
asd->params.fpn_en = false;
return 0;
}
/* Add function to get black from from sensor with shutter off */
return 0;
}
static unsigned int
atomisp_bytesperline_to_padded_width(unsigned int bytesperline,
enum ia_css_frame_format format)
{
switch (format) {
case IA_CSS_FRAME_FORMAT_UYVY:
case IA_CSS_FRAME_FORMAT_YUYV:
case IA_CSS_FRAME_FORMAT_RAW:
case IA_CSS_FRAME_FORMAT_RGB565:
return bytesperline / 2;
case IA_CSS_FRAME_FORMAT_RGBA888:
return bytesperline / 4;
/* The following cases could be removed, but we leave them
in to document the formats that are included. */
case IA_CSS_FRAME_FORMAT_NV11:
case IA_CSS_FRAME_FORMAT_NV12:
case IA_CSS_FRAME_FORMAT_NV16:
case IA_CSS_FRAME_FORMAT_NV21:
case IA_CSS_FRAME_FORMAT_NV61:
case IA_CSS_FRAME_FORMAT_YV12:
case IA_CSS_FRAME_FORMAT_YV16:
case IA_CSS_FRAME_FORMAT_YUV420:
case IA_CSS_FRAME_FORMAT_YUV420_16:
case IA_CSS_FRAME_FORMAT_YUV422:
case IA_CSS_FRAME_FORMAT_YUV422_16:
case IA_CSS_FRAME_FORMAT_YUV444:
case IA_CSS_FRAME_FORMAT_YUV_LINE:
case IA_CSS_FRAME_FORMAT_PLANAR_RGB888:
case IA_CSS_FRAME_FORMAT_QPLANE6:
case IA_CSS_FRAME_FORMAT_BINARY_8:
default:
return bytesperline;
}
}
static int
atomisp_v4l2_framebuffer_to_css_frame(const struct v4l2_framebuffer *arg,
struct ia_css_frame **result)
{
struct ia_css_frame *res = NULL;
unsigned int padded_width;
enum ia_css_frame_format sh_format;
char *tmp_buf = NULL;
int ret = 0;
sh_format = v4l2_fmt_to_sh_fmt(arg->fmt.pixelformat);
padded_width = atomisp_bytesperline_to_padded_width(
arg->fmt.bytesperline, sh_format);
/* Note: the padded width on an ia_css_frame is in elements, not in
bytes. The RAW frame we use here should always be a 16bit RAW
frame. This is why we bytesperline/2 is equal to the padded with */
if (ia_css_frame_allocate(&res, arg->fmt.width, arg->fmt.height,
sh_format, padded_width, 0)) {
ret = -ENOMEM;
goto err;
}
tmp_buf = vmalloc(arg->fmt.sizeimage);
if (!tmp_buf) {
ret = -ENOMEM;
goto err;
}
if (copy_from_user(tmp_buf, (void __user __force *)arg->base,
arg->fmt.sizeimage)) {
ret = -EFAULT;
goto err;
}
if (hmm_store(res->data, tmp_buf, arg->fmt.sizeimage)) {
ret = -EINVAL;
goto err;
}
err:
if (ret && res)
ia_css_frame_free(res);
vfree(tmp_buf);
if (ret == 0)
*result = res;
return ret;
}
/*
* Function to configure fixed pattern noise table
*/
int atomisp_fixed_pattern_table(struct atomisp_sub_device *asd,
struct v4l2_framebuffer *arg)
{
struct ia_css_frame *raw_black_frame = NULL;
int ret;
if (!arg)
return -EINVAL;
ret = atomisp_v4l2_framebuffer_to_css_frame(arg, &raw_black_frame);
if (ret)
return ret;
if (sh_css_set_black_frame(asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL].stream,
raw_black_frame) != 0)
return -ENOMEM;
ia_css_frame_free(raw_black_frame);
return ret;
}
/*
* Function to configure false color correction
*/
int atomisp_false_color(struct atomisp_sub_device *asd, int flag,
__s32 *value)
{
/* Get nr config from current setup */
if (flag == 0) {
*value = asd->params.false_color;
return 0;
}
/* Set nr config to isp parameters */
if (*value) {
asd->params.config.de_config = NULL;
} else {
asd->params.css_param.de_config.pixelnoise = 0;
asd->params.config.de_config = &asd->params.css_param.de_config;
}
asd->params.css_update_params_needed = true;
asd->params.false_color = *value;
return 0;
}
/*
* Function to configure bad pixel correction params
*/
int atomisp_false_color_param(struct atomisp_sub_device *asd, int flag,
struct atomisp_de_config *config)
{
if (flag == 0) {
/* Get false color from current setup */
if (atomisp_css_get_de_config(asd, config))
return -EINVAL;
} else {
/* Set false color to isp parameters */
memcpy(&asd->params.css_param.de_config, config,
sizeof(asd->params.css_param.de_config));
asd->params.config.de_config = &asd->params.css_param.de_config;
asd->params.css_update_params_needed = true;
}
return 0;
}
/*
* Function to configure white balance params
*/
int atomisp_white_balance_param(struct atomisp_sub_device *asd, int flag,
struct atomisp_wb_config *config)
{
if (flag == 0) {
/* Get white balance from current setup */
if (atomisp_css_get_wb_config(asd, config))
return -EINVAL;
} else {
/* Set white balance to isp parameters */
memcpy(&asd->params.css_param.wb_config, config,
sizeof(asd->params.css_param.wb_config));
asd->params.config.wb_config = &asd->params.css_param.wb_config;
asd->params.css_update_params_needed = true;
}
return 0;
}
int atomisp_3a_config_param(struct atomisp_sub_device *asd, int flag,
struct atomisp_3a_config *config)
{
struct atomisp_device *isp = asd->isp;
dev_dbg(isp->dev, ">%s %d\n", __func__, flag);
if (flag == 0) {
/* Get white balance from current setup */
if (atomisp_css_get_3a_config(asd, config))
return -EINVAL;
} else {
/* Set white balance to isp parameters */
memcpy(&asd->params.css_param.s3a_config, config,
sizeof(asd->params.css_param.s3a_config));
asd->params.config.s3a_config = &asd->params.css_param.s3a_config;
asd->params.css_update_params_needed = true;
}
dev_dbg(isp->dev, "<%s %d\n", __func__, flag);
return 0;
}
/*
* Function to setup digital zoom
*/
int atomisp_digital_zoom(struct atomisp_sub_device *asd, int flag,
__s32 *value)
{
u32 zoom;
struct atomisp_device *isp = asd->isp;
unsigned int max_zoom = MRFLD_MAX_ZOOM_FACTOR;
if (flag == 0) {
atomisp_css_get_zoom_factor(asd, &zoom);
*value = max_zoom - zoom;
} else {
if (*value < 0)
return -EINVAL;
zoom = max_zoom - min_t(u32, max_zoom - 1, *value);
atomisp_css_set_zoom_factor(asd, zoom);
dev_dbg(isp->dev, "%s, zoom: %d\n", __func__, zoom);
asd->params.css_update_params_needed = true;
}
return 0;
}
static void __atomisp_update_stream_env(struct atomisp_sub_device *asd,
u16 stream_index, struct atomisp_input_stream_info *stream_info)
{
int i;
/* assign virtual channel id return from sensor driver query */
asd->stream_env[stream_index].ch_id = stream_info->ch_id;
asd->stream_env[stream_index].isys_configs = stream_info->isys_configs;
for (i = 0; i < stream_info->isys_configs; i++) {
asd->stream_env[stream_index].isys_info[i].input_format =
stream_info->isys_info[i].input_format;
asd->stream_env[stream_index].isys_info[i].width =
stream_info->isys_info[i].width;
asd->stream_env[stream_index].isys_info[i].height =
stream_info->isys_info[i].height;
}
}
static void __atomisp_init_stream_info(u16 stream_index,
struct atomisp_input_stream_info *stream_info)
{
int i;
stream_info->enable = 1;
stream_info->stream = stream_index;
stream_info->ch_id = 0;
stream_info->isys_configs = 0;
for (i = 0; i < MAX_STREAMS_PER_CHANNEL; i++) {
stream_info->isys_info[i].input_format = 0;
stream_info->isys_info[i].width = 0;
stream_info->isys_info[i].height = 0;
}
}
static void atomisp_fill_pix_format(struct v4l2_pix_format *f,
u32 width, u32 height,
const struct atomisp_format_bridge *br_fmt)
{
u32 bytes;
f->width = width;
f->height = height;
f->pixelformat = br_fmt->pixelformat;
/* Adding padding to width for bytesperline calculation */
width = ia_css_frame_pad_width(width, br_fmt->sh_fmt);
bytes = BITS_TO_BYTES(br_fmt->depth * width);
if (br_fmt->planar)
f->bytesperline = width;
else
f->bytesperline = bytes;
f->sizeimage = PAGE_ALIGN(height * bytes);
if (f->field == V4L2_FIELD_ANY)
f->field = V4L2_FIELD_NONE;
/*
* FIXME: do we need to set this up differently, depending on the
* sensor or the pipeline?
*/
f->colorspace = V4L2_COLORSPACE_REC709;
f->ycbcr_enc = V4L2_YCBCR_ENC_709;
f->xfer_func = V4L2_XFER_FUNC_709;
}
/* Get sensor padding values for the non padded width x height resolution */
void atomisp_get_padding(struct atomisp_device *isp, u32 width, u32 height,
u32 *padding_w, u32 *padding_h)
{
struct atomisp_input_subdev *input = &isp->inputs[isp->asd.input_curr];
struct v4l2_rect native_rect = input->native_rect;
const struct atomisp_in_fmt_conv *fc = NULL;
u32 min_pad_w = ISP2400_MIN_PAD_W;
u32 min_pad_h = ISP2400_MIN_PAD_H;
struct v4l2_mbus_framefmt *sink;
if (!input->crop_support) {
*padding_w = pad_w;
*padding_h = pad_h;
return;
}
width = min(width, input->active_rect.width);
height = min(height, input->active_rect.height);
if (input->binning_support && width <= (input->active_rect.width / 2) &&
height <= (input->active_rect.height / 2)) {
native_rect.width /= 2;
native_rect.height /= 2;
}
*padding_w = min_t(u32, (native_rect.width - width) & ~1, pad_w);
*padding_h = min_t(u32, (native_rect.height - height) & ~1, pad_h);
/* The below minimum padding requirements are for BYT / ISP2400 only */
if (IS_ISP2401)
return;
sink = atomisp_subdev_get_ffmt(&isp->asd.subdev, NULL, V4L2_SUBDEV_FORMAT_ACTIVE,
ATOMISP_SUBDEV_PAD_SINK);
if (sink)
fc = atomisp_find_in_fmt_conv(sink->code);
if (!fc) {
dev_warn(isp->dev, "%s: Could not get sensor format\n", __func__);
goto apply_min_padding;
}
/*
* The ISP only supports GRBG for other bayer-orders additional padding
* is used so that the raw sensor data can be cropped to fix the order.
*/
if (fc->bayer_order == IA_CSS_BAYER_ORDER_RGGB ||
fc->bayer_order == IA_CSS_BAYER_ORDER_GBRG)
min_pad_w += 2;
if (fc->bayer_order == IA_CSS_BAYER_ORDER_BGGR ||
fc->bayer_order == IA_CSS_BAYER_ORDER_GBRG)
min_pad_h += 2;
apply_min_padding:
*padding_w = max_t(u32, *padding_w, min_pad_w);
*padding_h = max_t(u32, *padding_h, min_pad_h);
}
static int atomisp_set_crop(struct atomisp_device *isp,
const struct v4l2_mbus_framefmt *format,
int which)
{
struct atomisp_input_subdev *input = &isp->inputs[isp->asd.input_curr];
struct v4l2_subdev_state pad_state = {
.pads = &input->pad_cfg,
};
struct v4l2_subdev_selection sel = {
.which = which,
.target = V4L2_SEL_TGT_CROP,
.r.width = format->width,
.r.height = format->height,
};
int ret;
if (!input->crop_support)
return 0;
/* Cropping is done before binning, when binning double the crop rect */
if (input->binning_support && sel.r.width <= (input->native_rect.width / 2) &&
sel.r.height <= (input->native_rect.height / 2)) {
sel.r.width *= 2;
sel.r.height *= 2;
}
/* Clamp to avoid top/left calculations overflowing */
sel.r.width = min(sel.r.width, input->native_rect.width);
sel.r.height = min(sel.r.height, input->native_rect.height);
sel.r.left = ((input->native_rect.width - sel.r.width) / 2) & ~1;
sel.r.top = ((input->native_rect.height - sel.r.height) / 2) & ~1;
ret = v4l2_subdev_call(input->camera, pad, set_selection, &pad_state, &sel);
if (ret)
dev_err(isp->dev, "Error setting crop to %ux%u @%ux%u: %d\n",
sel.r.width, sel.r.height, sel.r.left, sel.r.top, ret);
return ret;
}
/* This function looks up the closest available resolution. */
int atomisp_try_fmt(struct atomisp_device *isp, struct v4l2_pix_format *f,
const struct atomisp_format_bridge **fmt_ret,
const struct atomisp_format_bridge **snr_fmt_ret)
{
const struct atomisp_format_bridge *fmt, *snr_fmt;
struct atomisp_sub_device *asd = &isp->asd;
struct atomisp_input_subdev *input = &isp->inputs[asd->input_curr];
struct v4l2_subdev_state pad_state = {
.pads = &input->pad_cfg,
};
struct v4l2_subdev_format format = {
.which = V4L2_SUBDEV_FORMAT_TRY,
};
u32 padding_w, padding_h;
int ret;
if (!input->camera)
return -EINVAL;
fmt = atomisp_get_format_bridge(f->pixelformat);
/* Currently, raw formats are broken!!! */
if (!fmt || fmt->sh_fmt == IA_CSS_FRAME_FORMAT_RAW) {
f->pixelformat = V4L2_PIX_FMT_YUV420;
fmt = atomisp_get_format_bridge(f->pixelformat);
if (!fmt)
return -EINVAL;
}
/*
* atomisp_set_fmt() will set the sensor resolution to the requested
* resolution + padding. Add padding here and remove it again after
* the set_fmt call, like atomisp_set_fmt_to_snr() does.
*/
atomisp_get_padding(isp, f->width, f->height, &padding_w, &padding_h);
v4l2_fill_mbus_format(&format.format, f, fmt->mbus_code);
format.format.width += padding_w;
format.format.height += padding_h;
dev_dbg(isp->dev, "try_mbus_fmt: asking for %ux%u\n",
format.format.width, format.format.height);
ret = atomisp_set_crop(isp, &format.format, V4L2_SUBDEV_FORMAT_TRY);
if (ret)
return ret;
ret = v4l2_subdev_call(input->camera, pad, set_fmt, &pad_state, &format);
if (ret)
return ret;
dev_dbg(isp->dev, "try_mbus_fmt: got %ux%u\n",
format.format.width, format.format.height);
snr_fmt = atomisp_get_format_bridge_from_mbus(format.format.code);
if (!snr_fmt) {
dev_err(isp->dev, "unknown sensor format 0x%8.8x\n",
format.format.code);
return -EINVAL;
}
f->width = format.format.width - padding_w;
f->height = format.format.height - padding_h;
/*
* If the format is jpeg or custom RAW, then the width and height will
* not satisfy the normal atomisp requirements and no need to check
* the below conditions. So just assign to what is being returned from
* the sensor driver.
*/
if (f->pixelformat == V4L2_PIX_FMT_JPEG ||
f->pixelformat == V4L2_PIX_FMT_CUSTOM_M10MO_RAW)
goto out_fill_pix_format;
/* app vs isp */
f->width = rounddown(clamp_t(u32, f->width, ATOM_ISP_MIN_WIDTH,
ATOM_ISP_MAX_WIDTH), ATOM_ISP_STEP_WIDTH);
f->height = rounddown(clamp_t(u32, f->height, ATOM_ISP_MIN_HEIGHT,
ATOM_ISP_MAX_HEIGHT), ATOM_ISP_STEP_HEIGHT);
out_fill_pix_format:
atomisp_fill_pix_format(f, f->width, f->height, fmt);
if (fmt_ret)
*fmt_ret = fmt;
if (snr_fmt_ret)
*snr_fmt_ret = snr_fmt;
return 0;
}
enum mipi_port_id atomisp_port_to_mipi_port(struct atomisp_device *isp,
enum atomisp_camera_port port)
{
switch (port) {
case ATOMISP_CAMERA_PORT_PRIMARY:
return MIPI_PORT0_ID;
case ATOMISP_CAMERA_PORT_SECONDARY:
return MIPI_PORT1_ID;
case ATOMISP_CAMERA_PORT_TERTIARY:
return MIPI_PORT2_ID;
default:
dev_err(isp->dev, "unsupported port: %d\n", port);
return MIPI_PORT0_ID;
}
}
static inline int atomisp_set_sensor_mipi_to_isp(
struct atomisp_sub_device *asd,
enum atomisp_input_stream_id stream_id,
struct camera_mipi_info *mipi_info)
{
struct v4l2_control ctrl;
struct atomisp_device *isp = asd->isp;
struct atomisp_input_subdev *input = &isp->inputs[asd->input_curr];
const struct atomisp_in_fmt_conv *fc;
int mipi_freq = 0;
unsigned int input_format, bayer_order;
enum atomisp_input_format metadata_format = ATOMISP_INPUT_FORMAT_EMBEDDED;
u32 mipi_port, metadata_width = 0, metadata_height = 0;
ctrl.id = V4L2_CID_LINK_FREQ;
if (v4l2_g_ctrl(input->camera->ctrl_handler, &ctrl) == 0)
mipi_freq = ctrl.value;
if (asd->stream_env[stream_id].isys_configs == 1) {
input_format =
asd->stream_env[stream_id].isys_info[0].input_format;
atomisp_css_isys_set_format(asd, stream_id,
input_format, IA_CSS_STREAM_DEFAULT_ISYS_STREAM_IDX);
} else if (asd->stream_env[stream_id].isys_configs == 2) {
atomisp_css_isys_two_stream_cfg_update_stream1(
asd, stream_id,
asd->stream_env[stream_id].isys_info[0].input_format,
asd->stream_env[stream_id].isys_info[0].width,
asd->stream_env[stream_id].isys_info[0].height);
atomisp_css_isys_two_stream_cfg_update_stream2(
asd, stream_id,
asd->stream_env[stream_id].isys_info[1].input_format,
asd->stream_env[stream_id].isys_info[1].width,
asd->stream_env[stream_id].isys_info[1].height);
}
/* Compatibility for sensors which provide no media bus code
* in s_mbus_framefmt() nor support pad formats. */
if (mipi_info && mipi_info->input_format != -1) {
bayer_order = mipi_info->raw_bayer_order;
/* Input stream config is still needs configured */
/* TODO: Check if this is necessary */
fc = atomisp_find_in_fmt_conv_by_atomisp_in_fmt(
mipi_info->input_format);
if (!fc)
return -EINVAL;
input_format = fc->atomisp_in_fmt;
metadata_format = mipi_info->metadata_format;
metadata_width = mipi_info->metadata_width;
metadata_height = mipi_info->metadata_height;
} else {
struct v4l2_mbus_framefmt *sink;
sink = atomisp_subdev_get_ffmt(&asd->subdev, NULL,
V4L2_SUBDEV_FORMAT_ACTIVE,
ATOMISP_SUBDEV_PAD_SINK);
fc = atomisp_find_in_fmt_conv(sink->code);
if (!fc)
return -EINVAL;
input_format = fc->atomisp_in_fmt;
bayer_order = fc->bayer_order;
}
atomisp_css_input_set_format(asd, stream_id, input_format);
atomisp_css_input_set_bayer_order(asd, stream_id, bayer_order);
fc = atomisp_find_in_fmt_conv_by_atomisp_in_fmt(metadata_format);
if (!fc)
return -EINVAL;
input_format = fc->atomisp_in_fmt;
mipi_port = atomisp_port_to_mipi_port(isp, input->port);
atomisp_css_input_configure_port(asd, mipi_port,
isp->sensor_lanes[mipi_port],
0xffff4, mipi_freq,
input_format,
metadata_width, metadata_height);
return 0;
}
static int configure_pp_input_nop(struct atomisp_sub_device *asd,
unsigned int width, unsigned int height)
{
return 0;
}
static int configure_output_nop(struct atomisp_sub_device *asd,
unsigned int width, unsigned int height,
unsigned int min_width,
enum ia_css_frame_format sh_fmt)
{
return 0;
}
static int get_frame_info_nop(struct atomisp_sub_device *asd,
struct ia_css_frame_info *finfo)
{
return 0;
}
/*
* Resets CSS parameters that depend on input resolution.
*
* Update params like CSS RAW binning, 2ppc mode and pp_input
* which depend on input size, but are not automatically
* handled in CSS when the input resolution is changed.
*/
static int css_input_resolution_changed(struct atomisp_sub_device *asd,
struct v4l2_mbus_framefmt *ffmt)
{
struct atomisp_metadata_buf *md_buf = NULL, *_md_buf;
unsigned int i;
dev_dbg(asd->isp->dev, "css_input_resolution_changed to %ux%u\n",
ffmt->width, ffmt->height);
if (IS_ISP2401)
atomisp_css_input_set_two_pixels_per_clock(asd, false);
else
atomisp_css_input_set_two_pixels_per_clock(asd, true);
/*
* If sensor input changed, which means metadata resolution changed
* together. Release all metadata buffers here to let it re-allocated
* next time in reqbufs.
*/
for (i = 0; i < ATOMISP_METADATA_TYPE_NUM; i++) {
list_for_each_entry_safe(md_buf, _md_buf, &asd->metadata[i],
list) {
atomisp_css_free_metadata_buffer(md_buf);
list_del(&md_buf->list);
kfree(md_buf);
}
}
return 0;
/*
* TODO: atomisp_css_preview_configure_pp_input() not
* reset due to CSS bug tracked as PSI BZ 115124
*/
}
static int atomisp_set_fmt_to_isp(struct video_device *vdev,
struct ia_css_frame_info *output_info,
const struct v4l2_pix_format *pix)
{
struct camera_mipi_info *mipi_info;
struct atomisp_device *isp = video_get_drvdata(vdev);
struct atomisp_sub_device *asd = atomisp_to_video_pipe(vdev)->asd;
struct atomisp_input_subdev *input = &isp->inputs[asd->input_curr];
const struct atomisp_format_bridge *format;
struct v4l2_rect *isp_sink_crop;
enum ia_css_pipe_id pipe_id;
int (*configure_output)(struct atomisp_sub_device *asd,
unsigned int width, unsigned int height,
unsigned int min_width,
enum ia_css_frame_format sh_fmt) =
configure_output_nop;
int (*get_frame_info)(struct atomisp_sub_device *asd,
struct ia_css_frame_info *finfo) =
get_frame_info_nop;
int (*configure_pp_input)(struct atomisp_sub_device *asd,
unsigned int width, unsigned int height) =
configure_pp_input_nop;
const struct atomisp_in_fmt_conv *fc = NULL;
int ret, i;
isp_sink_crop = atomisp_subdev_get_rect(
&asd->subdev, NULL, V4L2_SUBDEV_FORMAT_ACTIVE,
ATOMISP_SUBDEV_PAD_SINK, V4L2_SEL_TGT_CROP);
format = atomisp_get_format_bridge(pix->pixelformat);
if (!format)
return -EINVAL;
if (input->type != TEST_PATTERN) {
mipi_info = atomisp_to_sensor_mipi_info(input->camera);
if (atomisp_set_sensor_mipi_to_isp(asd, ATOMISP_INPUT_STREAM_GENERAL,
mipi_info))
return -EINVAL;
if (mipi_info)
fc = atomisp_find_in_fmt_conv_by_atomisp_in_fmt(mipi_info->input_format);
if (!fc)
fc = atomisp_find_in_fmt_conv(
atomisp_subdev_get_ffmt(&asd->subdev,
NULL, V4L2_SUBDEV_FORMAT_ACTIVE,
ATOMISP_SUBDEV_PAD_SINK)->code);
if (!fc)
return -EINVAL;
if (format->sh_fmt == IA_CSS_FRAME_FORMAT_RAW &&
raw_output_format_match_input(fc->atomisp_in_fmt,
pix->pixelformat))
return -EINVAL;
}
/*
* Configure viewfinder also when vfpp is disabled: the
* CSS still requires viewfinder configuration.
*/
{
u32 width, height;
if (pix->width < 640 || pix->height < 480) {
width = pix->width;
height = pix->height;
} else {
width = 640;
height = 480;
}
if (asd->run_mode->val == ATOMISP_RUN_MODE_VIDEO ||
asd->vfpp->val == ATOMISP_VFPP_DISABLE_SCALER) {
atomisp_css_video_configure_viewfinder(asd, width, height, 0,
IA_CSS_FRAME_FORMAT_NV12);
} else if (asd->run_mode->val == ATOMISP_RUN_MODE_STILL_CAPTURE ||
asd->vfpp->val == ATOMISP_VFPP_DISABLE_LOWLAT) {
atomisp_css_capture_configure_viewfinder(asd, width, height, 0,
IA_CSS_FRAME_FORMAT_NV12);
}
}
atomisp_css_input_set_mode(asd, IA_CSS_INPUT_MODE_BUFFERED_SENSOR);
for (i = 0; i < IA_CSS_PIPE_ID_NUM; i++)
asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL].pipe_extra_configs[i].disable_vf_pp = asd->vfpp->val != ATOMISP_VFPP_ENABLE;
/* ISP2401 new input system need to use copy pipe */
if (asd->copy_mode) {
pipe_id = IA_CSS_PIPE_ID_COPY;
atomisp_css_capture_enable_online(asd, ATOMISP_INPUT_STREAM_GENERAL, false);
} else if (asd->vfpp->val == ATOMISP_VFPP_DISABLE_SCALER) {
/* video same in continuouscapture and online modes */
configure_output = atomisp_css_video_configure_output;
get_frame_info = atomisp_css_video_get_output_frame_info;
pipe_id = IA_CSS_PIPE_ID_VIDEO;
} else if (asd->run_mode->val == ATOMISP_RUN_MODE_VIDEO) {
configure_output = atomisp_css_video_configure_output;
get_frame_info = atomisp_css_video_get_output_frame_info;
pipe_id = IA_CSS_PIPE_ID_VIDEO;
} else if (asd->run_mode->val == ATOMISP_RUN_MODE_PREVIEW) {
configure_output = atomisp_css_preview_configure_output;
get_frame_info = atomisp_css_preview_get_output_frame_info;
configure_pp_input = atomisp_css_preview_configure_pp_input;
pipe_id = IA_CSS_PIPE_ID_PREVIEW;
} else {
if (format->sh_fmt == IA_CSS_FRAME_FORMAT_RAW) {
atomisp_css_capture_set_mode(asd, IA_CSS_CAPTURE_MODE_RAW);
atomisp_css_enable_dz(asd, false);
} else {
atomisp_update_capture_mode(asd);
}
/* in case of ANR, force capture pipe to offline mode */
atomisp_css_capture_enable_online(asd, ATOMISP_INPUT_STREAM_GENERAL,
!asd->params.low_light);
configure_output = atomisp_css_capture_configure_output;
get_frame_info = atomisp_css_capture_get_output_frame_info;
configure_pp_input = atomisp_css_capture_configure_pp_input;
pipe_id = IA_CSS_PIPE_ID_CAPTURE;
if (asd->run_mode->val != ATOMISP_RUN_MODE_STILL_CAPTURE) {
dev_err(isp->dev,
"Need to set the running mode first\n");
asd->run_mode->val = ATOMISP_RUN_MODE_STILL_CAPTURE;
}
}
if (asd->copy_mode)
ret = atomisp_css_copy_configure_output(asd, ATOMISP_INPUT_STREAM_GENERAL,
pix->width, pix->height,
format->planar ? pix->bytesperline :
pix->bytesperline * 8 / format->depth,
format->sh_fmt);
else
ret = configure_output(asd, pix->width, pix->height,
format->planar ? pix->bytesperline :
pix->bytesperline * 8 / format->depth,
format->sh_fmt);
if (ret) {
dev_err(isp->dev, "configure_output %ux%u, format %8.8x\n",
pix->width, pix->height, format->sh_fmt);
return -EINVAL;
}
ret = configure_pp_input(asd, isp_sink_crop->width, isp_sink_crop->height);
if (ret) {
dev_err(isp->dev, "configure_pp_input %ux%u\n",
isp_sink_crop->width,
isp_sink_crop->height);
return -EINVAL;
}
if (asd->copy_mode)
ret = atomisp_css_copy_get_output_frame_info(asd,
ATOMISP_INPUT_STREAM_GENERAL,
output_info);
else
ret = get_frame_info(asd, output_info);
if (ret) {
dev_err(isp->dev, "__get_frame_info %ux%u (padded to %u) returned %d\n",
pix->width, pix->height, pix->bytesperline, ret);
return ret;
}
atomisp_update_grid_info(asd, pipe_id);
return 0;
}
static void atomisp_get_dis_envelop(struct atomisp_sub_device *asd,
unsigned int width, unsigned int height,
unsigned int *dvs_env_w, unsigned int *dvs_env_h)
{
if (asd->params.video_dis_en &&
asd->run_mode->val == ATOMISP_RUN_MODE_VIDEO) {
/* envelope is 20% of the output resolution */
/*
* dvs envelope cannot be round up.
* it would cause ISP timeout and color switch issue
*/
*dvs_env_w = rounddown(width / 5, ATOM_ISP_STEP_WIDTH);
*dvs_env_h = rounddown(height / 5, ATOM_ISP_STEP_HEIGHT);
}
asd->params.dis_proj_data_valid = false;
asd->params.css_update_params_needed = true;
}
static void atomisp_check_copy_mode(struct atomisp_sub_device *asd,
const struct v4l2_pix_format *f)
{
struct v4l2_mbus_framefmt *sink, *src;
if (!IS_ISP2401) {
/* Only used for the new input system */
asd->copy_mode = false;
return;
}
sink = atomisp_subdev_get_ffmt(&asd->subdev, NULL,
V4L2_SUBDEV_FORMAT_ACTIVE, ATOMISP_SUBDEV_PAD_SINK);
src = atomisp_subdev_get_ffmt(&asd->subdev, NULL,
V4L2_SUBDEV_FORMAT_ACTIVE, ATOMISP_SUBDEV_PAD_SOURCE);
if (sink->code == src->code && sink->width == f->width && sink->height == f->height)
asd->copy_mode = true;
else
asd->copy_mode = false;
dev_dbg(asd->isp->dev, "copy_mode: %d\n", asd->copy_mode);
}
static int atomisp_set_fmt_to_snr(struct video_device *vdev, const struct v4l2_pix_format *f,
unsigned int dvs_env_w, unsigned int dvs_env_h)
{
struct atomisp_video_pipe *pipe = atomisp_to_video_pipe(vdev);
struct atomisp_sub_device *asd = pipe->asd;
struct atomisp_device *isp = asd->isp;
struct atomisp_input_subdev *input = &isp->inputs[asd->input_curr];
const struct atomisp_format_bridge *format;
struct v4l2_subdev_state pad_state = {
.pads = &input->pad_cfg,
};
struct v4l2_subdev_format vformat = {
.which = V4L2_SUBDEV_FORMAT_TRY,
};
struct v4l2_mbus_framefmt *ffmt = &vformat.format;
struct v4l2_mbus_framefmt *req_ffmt;
struct atomisp_input_stream_info *stream_info =
(struct atomisp_input_stream_info *)ffmt->reserved;
int ret;
format = atomisp_get_format_bridge(f->pixelformat);
if (!format)
return -EINVAL;
v4l2_fill_mbus_format(ffmt, f, format->mbus_code);
ffmt->height += asd->sink_pad_padding_h + dvs_env_h;
ffmt->width += asd->sink_pad_padding_w + dvs_env_w;
dev_dbg(isp->dev, "s_mbus_fmt: ask %ux%u (padding %ux%u, dvs %ux%u)\n",
ffmt->width, ffmt->height, asd->sink_pad_padding_w, asd->sink_pad_padding_h,
dvs_env_w, dvs_env_h);
__atomisp_init_stream_info(ATOMISP_INPUT_STREAM_GENERAL, stream_info);
req_ffmt = ffmt;
/* Disable dvs if resolution can't be supported by sensor */
if (asd->params.video_dis_en && asd->run_mode->val == ATOMISP_RUN_MODE_VIDEO) {
ret = atomisp_set_crop(isp, &vformat.format, V4L2_SUBDEV_FORMAT_TRY);
if (ret)
return ret;
vformat.which = V4L2_SUBDEV_FORMAT_TRY;
ret = v4l2_subdev_call(input->camera, pad, set_fmt, &pad_state, &vformat);
if (ret)
return ret;
dev_dbg(isp->dev, "video dis: sensor width: %d, height: %d\n",
ffmt->width, ffmt->height);
if (ffmt->width < req_ffmt->width ||
ffmt->height < req_ffmt->height) {
req_ffmt->height -= dvs_env_h;
req_ffmt->width -= dvs_env_w;
ffmt = req_ffmt;
dev_warn(isp->dev,
"can not enable video dis due to sensor limitation.");
asd->params.video_dis_en = false;
}
}
ret = atomisp_set_crop(isp, &vformat.format, V4L2_SUBDEV_FORMAT_ACTIVE);
if (ret)
return ret;
vformat.which = V4L2_SUBDEV_FORMAT_ACTIVE;
ret = v4l2_subdev_call(input->camera, pad, set_fmt, NULL, &vformat);
if (ret)
return ret;
__atomisp_update_stream_env(asd, ATOMISP_INPUT_STREAM_GENERAL, stream_info);
dev_dbg(isp->dev, "sensor width: %d, height: %d\n",
ffmt->width, ffmt->height);
if (ffmt->width < ATOM_ISP_STEP_WIDTH ||
ffmt->height < ATOM_ISP_STEP_HEIGHT)
return -EINVAL;
if (asd->params.video_dis_en && asd->run_mode->val == ATOMISP_RUN_MODE_VIDEO &&
(ffmt->width < req_ffmt->width || ffmt->height < req_ffmt->height)) {
dev_warn(isp->dev,
"can not enable video dis due to sensor limitation.");
asd->params.video_dis_en = false;
}
atomisp_subdev_set_ffmt(&asd->subdev, NULL,
V4L2_SUBDEV_FORMAT_ACTIVE,
ATOMISP_SUBDEV_PAD_SINK, ffmt);
return css_input_resolution_changed(asd, ffmt);
}
int atomisp_set_fmt(struct video_device *vdev, struct v4l2_format *f)
{
struct atomisp_device *isp = video_get_drvdata(vdev);
struct atomisp_video_pipe *pipe = atomisp_to_video_pipe(vdev);
struct atomisp_sub_device *asd = pipe->asd;
const struct atomisp_format_bridge *format_bridge;
const struct atomisp_format_bridge *snr_format_bridge;
struct ia_css_frame_info output_info;
unsigned int dvs_env_w = 0, dvs_env_h = 0;
struct v4l2_mbus_framefmt isp_source_fmt = {0};
struct v4l2_rect isp_sink_crop;
int ret;
ret = atomisp_pipe_check(pipe, true);
if (ret)
return ret;
dev_dbg(isp->dev,
"setting resolution %ux%u bytesperline %u\n",
f->fmt.pix.width, f->fmt.pix.height, f->fmt.pix.bytesperline);
/* Ensure that the resolution is equal or below the maximum supported */
ret = atomisp_try_fmt(isp, &f->fmt.pix, &format_bridge, &snr_format_bridge);
if (ret)
return ret;
pipe->sh_fmt = format_bridge->sh_fmt;
pipe->pix.pixelformat = format_bridge->pixelformat;
atomisp_subdev_get_ffmt(&asd->subdev, NULL,
V4L2_SUBDEV_FORMAT_ACTIVE,
ATOMISP_SUBDEV_PAD_SINK)->code =
snr_format_bridge->mbus_code;
isp_source_fmt.code = format_bridge->mbus_code;
atomisp_subdev_set_ffmt(&asd->subdev, NULL,
V4L2_SUBDEV_FORMAT_ACTIVE,
ATOMISP_SUBDEV_PAD_SOURCE, &isp_source_fmt);
if (atomisp_subdev_format_conversion(asd)) {
atomisp_get_padding(isp, f->fmt.pix.width, f->fmt.pix.height,
&asd->sink_pad_padding_w, &asd->sink_pad_padding_h);
} else {
asd->sink_pad_padding_w = 0;
asd->sink_pad_padding_h = 0;
}
atomisp_get_dis_envelop(asd, f->fmt.pix.width, f->fmt.pix.height,
&dvs_env_w, &dvs_env_h);
ret = atomisp_set_fmt_to_snr(vdev, &f->fmt.pix, dvs_env_w, dvs_env_h);
if (ret) {
dev_warn(isp->dev,
"Set format to sensor failed with %d\n", ret);
return -EINVAL;
}
atomisp_csi_lane_config(isp);
atomisp_check_copy_mode(asd, &f->fmt.pix);
isp_sink_crop = *atomisp_subdev_get_rect(&asd->subdev, NULL,
V4L2_SUBDEV_FORMAT_ACTIVE,
ATOMISP_SUBDEV_PAD_SINK,
V4L2_SEL_TGT_CROP);
/* Try to enable YUV downscaling if ISP input is 10 % (either
* width or height) bigger than the desired result. */
if (!IS_MOFD ||
isp_sink_crop.width * 9 / 10 < f->fmt.pix.width ||
isp_sink_crop.height * 9 / 10 < f->fmt.pix.height ||
(atomisp_subdev_format_conversion(asd) &&
(asd->run_mode->val == ATOMISP_RUN_MODE_VIDEO ||
asd->vfpp->val == ATOMISP_VFPP_DISABLE_SCALER))) {
isp_sink_crop.width = f->fmt.pix.width;
isp_sink_crop.height = f->fmt.pix.height;
atomisp_subdev_set_selection(&asd->subdev, NULL,
V4L2_SUBDEV_FORMAT_ACTIVE,
ATOMISP_SUBDEV_PAD_SOURCE, V4L2_SEL_TGT_COMPOSE,
0, &isp_sink_crop);
} else {
struct v4l2_rect main_compose = {0};
main_compose.width = isp_sink_crop.width;
main_compose.height =
DIV_ROUND_UP(main_compose.width * f->fmt.pix.height,
f->fmt.pix.width);
if (main_compose.height > isp_sink_crop.height) {
main_compose.height = isp_sink_crop.height;
main_compose.width =
DIV_ROUND_UP(main_compose.height *
f->fmt.pix.width,
f->fmt.pix.height);
}
atomisp_subdev_set_selection(&asd->subdev, NULL,
V4L2_SUBDEV_FORMAT_ACTIVE,
ATOMISP_SUBDEV_PAD_SOURCE,
V4L2_SEL_TGT_COMPOSE, 0,
&main_compose);
}
ret = atomisp_set_fmt_to_isp(vdev, &output_info, &f->fmt.pix);
if (ret) {
dev_warn(isp->dev, "Can't set format on ISP. Error %d\n", ret);
return -EINVAL;
}
atomisp_fill_pix_format(&pipe->pix, f->fmt.pix.width, f->fmt.pix.height, format_bridge);
f->fmt.pix = pipe->pix;
f->fmt.pix.priv = PAGE_ALIGN(pipe->pix.width *
pipe->pix.height * 2);
dev_dbg(isp->dev, "%s: %dx%d, image size: %d, %d bytes per line\n",
__func__,
f->fmt.pix.width, f->fmt.pix.height,
f->fmt.pix.sizeimage, f->fmt.pix.bytesperline);
return 0;
}
int atomisp_set_shading_table(struct atomisp_sub_device *asd,
struct atomisp_shading_table *user_shading_table)
{
struct ia_css_shading_table *shading_table;
struct ia_css_shading_table *free_table;
unsigned int len_table;
int i;
int ret = 0;
if (!user_shading_table)
return -EINVAL;
if (!user_shading_table->enable) {
asd->params.config.shading_table = NULL;
asd->params.sc_en = false;
return 0;
}
/* If enabling, all tables must be set */
for (i = 0; i < ATOMISP_NUM_SC_COLORS; i++) {
if (!user_shading_table->data[i])
return -EINVAL;
}
/* Shading table size per color */
if (user_shading_table->width > SH_CSS_MAX_SCTBL_WIDTH_PER_COLOR ||
user_shading_table->height > SH_CSS_MAX_SCTBL_HEIGHT_PER_COLOR)
return -EINVAL;
shading_table = atomisp_css_shading_table_alloc(
user_shading_table->width, user_shading_table->height);
if (!shading_table)
return -ENOMEM;
len_table = user_shading_table->width * user_shading_table->height *
ATOMISP_SC_TYPE_SIZE;
for (i = 0; i < ATOMISP_NUM_SC_COLORS; i++) {
ret = copy_from_user(shading_table->data[i],
(void __user *)user_shading_table->data[i],
len_table);
if (ret) {
free_table = shading_table;
ret = -EFAULT;
goto out;
}
}
shading_table->sensor_width = user_shading_table->sensor_width;
shading_table->sensor_height = user_shading_table->sensor_height;
shading_table->fraction_bits = user_shading_table->fraction_bits;
free_table = asd->params.css_param.shading_table;
asd->params.css_param.shading_table = shading_table;
asd->params.config.shading_table = shading_table;
asd->params.sc_en = true;
out:
if (free_table)
atomisp_css_shading_table_free(free_table);
return ret;
}
int atomisp_flash_enable(struct atomisp_sub_device *asd, int num_frames)
{
struct atomisp_device *isp = asd->isp;
if (num_frames < 0) {
dev_dbg(isp->dev, "%s ERROR: num_frames: %d\n", __func__,
num_frames);
return -EINVAL;
}
/* a requested flash is still in progress. */
if (num_frames && asd->params.flash_state != ATOMISP_FLASH_IDLE) {
dev_dbg(isp->dev, "%s flash busy: %d frames left: %d\n",
__func__, asd->params.flash_state,
asd->params.num_flash_frames);
return -EBUSY;
}
asd->params.num_flash_frames = num_frames;
asd->params.flash_state = ATOMISP_FLASH_REQUESTED;
return 0;
}
static int __checking_exp_id(struct atomisp_sub_device *asd, int exp_id)
{
struct atomisp_device *isp = asd->isp;
if (!asd->enable_raw_buffer_lock->val) {
dev_warn(isp->dev, "%s Raw Buffer Lock is disable.\n", __func__);
return -EINVAL;
}
if (!asd->streaming) {
dev_err(isp->dev, "%s streaming %d invalid exp_id %d.\n",
__func__, exp_id, asd->streaming);
return -EINVAL;
}
if ((exp_id > ATOMISP_MAX_EXP_ID) || (exp_id <= 0)) {
dev_err(isp->dev, "%s exp_id %d invalid.\n", __func__, exp_id);
return -EINVAL;
}
return 0;
}
void atomisp_init_raw_buffer_bitmap(struct atomisp_sub_device *asd)
{
unsigned long flags;
spin_lock_irqsave(&asd->raw_buffer_bitmap_lock, flags);
memset(asd->raw_buffer_bitmap, 0, sizeof(asd->raw_buffer_bitmap));
asd->raw_buffer_locked_count = 0;
spin_unlock_irqrestore(&asd->raw_buffer_bitmap_lock, flags);
}
static int __is_raw_buffer_locked(struct atomisp_sub_device *asd, int exp_id)
{
int *bitmap, bit;
unsigned long flags;
int ret;
if (__checking_exp_id(asd, exp_id))
return -EINVAL;
bitmap = asd->raw_buffer_bitmap + exp_id / 32;
bit = exp_id % 32;
spin_lock_irqsave(&asd->raw_buffer_bitmap_lock, flags);
ret = ((*bitmap) & (1 << bit));
spin_unlock_irqrestore(&asd->raw_buffer_bitmap_lock, flags);
return !ret;
}
static int __clear_raw_buffer_bitmap(struct atomisp_sub_device *asd, int exp_id)
{
int *bitmap, bit;
unsigned long flags;
if (__is_raw_buffer_locked(asd, exp_id))
return -EINVAL;
bitmap = asd->raw_buffer_bitmap + exp_id / 32;
bit = exp_id % 32;
spin_lock_irqsave(&asd->raw_buffer_bitmap_lock, flags);
(*bitmap) &= ~(1 << bit);
asd->raw_buffer_locked_count--;
spin_unlock_irqrestore(&asd->raw_buffer_bitmap_lock, flags);
dev_dbg(asd->isp->dev, "%s: exp_id %d, raw_buffer_locked_count %d\n",
__func__, exp_id, asd->raw_buffer_locked_count);
return 0;
}
int atomisp_exp_id_capture(struct atomisp_sub_device *asd, int *exp_id)
{
struct atomisp_device *isp = asd->isp;
int value = *exp_id;
int ret;
lockdep_assert_held(&isp->mutex);
ret = __is_raw_buffer_locked(asd, value);
if (ret) {
dev_err(isp->dev, "%s exp_id %d invalid %d.\n", __func__, value, ret);
return -EINVAL;
}
dev_dbg(isp->dev, "%s exp_id %d\n", __func__, value);
ret = atomisp_css_exp_id_capture(asd, value);
if (ret) {
dev_err(isp->dev, "%s exp_id %d failed.\n", __func__, value);
return -EIO;
}
return 0;
}
int atomisp_exp_id_unlock(struct atomisp_sub_device *asd, int *exp_id)
{
struct atomisp_device *isp = asd->isp;
int value = *exp_id;
int ret;
lockdep_assert_held(&isp->mutex);
ret = __clear_raw_buffer_bitmap(asd, value);
if (ret) {
dev_err(isp->dev, "%s exp_id %d invalid %d.\n", __func__, value, ret);
return -EINVAL;
}
dev_dbg(isp->dev, "%s exp_id %d\n", __func__, value);
ret = atomisp_css_exp_id_unlock(asd, value);
if (ret)
dev_err(isp->dev, "%s exp_id %d failed, err %d.\n",
__func__, value, ret);
return ret;
}
int atomisp_enable_dz_capt_pipe(struct atomisp_sub_device *asd,
unsigned int *enable)
{
bool value;
if (!enable)
return -EINVAL;
value = *enable > 0;
atomisp_en_dz_capt_pipe(asd, value);
return 0;
}
int atomisp_inject_a_fake_event(struct atomisp_sub_device *asd, int *event)
{
if (!event || !asd->streaming)
return -EINVAL;
lockdep_assert_held(&asd->isp->mutex);
dev_dbg(asd->isp->dev, "%s: trying to inject a fake event 0x%x\n",
__func__, *event);
switch (*event) {
case V4L2_EVENT_FRAME_SYNC:
atomisp_sof_event(asd);
break;
case V4L2_EVENT_FRAME_END:
atomisp_eof_event(asd, 0);
break;
case V4L2_EVENT_ATOMISP_3A_STATS_READY:
atomisp_3a_stats_ready_event(asd, 0);
break;
case V4L2_EVENT_ATOMISP_METADATA_READY:
atomisp_metadata_ready_event(asd, 0);
break;
default:
return -EINVAL;
}
return 0;
}
| linux-master | drivers/staging/media/atomisp/pci/atomisp_cmd.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Support for Intel Camera Imaging ISP subsystem.
* Copyright (c) 2015, Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*/
#include "hmm.h"
#include "sh_css_sp.h"
#if !defined(ISP2401)
#include "input_formatter.h"
#endif
#include "dma.h" /* N_DMA_CHANNEL_ID */
#include "ia_css_buffer.h"
#include "ia_css_binary.h"
#include "sh_css_hrt.h"
#include "sh_css_defs.h"
#include "sh_css_internal.h"
#include "ia_css_control.h"
#include "ia_css_debug.h"
#include "ia_css_debug_pipe.h"
#include "ia_css_event_public.h"
#include "ia_css_mmu.h"
#include "ia_css_stream.h"
#include "ia_css_isp_param.h"
#include "sh_css_params.h"
#include "sh_css_legacy.h"
#include "ia_css_frame_comm.h"
#include "ia_css_isys.h"
#include "gdc_device.h" /* HRT_GDC_N */
/*#include "sp.h"*/ /* host2sp_enqueue_frame_data() */
#include "assert_support.h"
#include "sw_event_global.h" /* Event IDs.*/
#include "ia_css_event.h"
#include "mmu_device.h"
#include "ia_css_spctrl.h"
#include "atomisp_internal.h"
#ifndef offsetof
#define offsetof(T, x) ((unsigned int)&(((T *)0)->x))
#endif
#define IA_CSS_INCLUDE_CONFIGURATIONS
#include "ia_css_isp_configs.h"
#define IA_CSS_INCLUDE_STATES
#include "ia_css_isp_states.h"
#include "isp/kernels/ipu2_io_ls/bayer_io_ls/ia_css_bayer_io.host.h"
struct sh_css_sp_group sh_css_sp_group;
struct sh_css_sp_stage sh_css_sp_stage;
struct sh_css_isp_stage sh_css_isp_stage;
static struct sh_css_sp_output sh_css_sp_output;
static struct sh_css_sp_per_frame_data per_frame_data;
/* true if SP supports frame loop and host2sp_commands */
/* For the moment there is only code that sets this bool to true */
/* TODO: add code that sets this bool to false */
static bool sp_running;
static int
set_output_frame_buffer(const struct ia_css_frame *frame,
unsigned int idx);
static void
sh_css_copy_buffer_attr_to_spbuffer(struct ia_css_buffer_sp *dest_buf,
const enum sh_css_queue_id queue_id,
const ia_css_ptr xmem_addr,
const enum ia_css_buffer_type buf_type);
static void
initialize_frame_buffer_attribute(struct ia_css_buffer_sp *buf_attr);
static void
initialize_stage_frames(struct ia_css_frames_sp *frames);
/* This data is stored every frame */
void
store_sp_group_data(void)
{
per_frame_data.sp_group_addr = sh_css_store_sp_group_to_ddr();
}
static void
copy_isp_stage_to_sp_stage(void)
{
/* [WW07.5]type casting will cause potential issues */
sh_css_sp_stage.num_stripes = (uint8_t)
sh_css_isp_stage.binary_info.iterator.num_stripes;
sh_css_sp_stage.row_stripes_height = (uint16_t)
sh_css_isp_stage.binary_info.iterator.row_stripes_height;
sh_css_sp_stage.row_stripes_overlap_lines = (uint16_t)
sh_css_isp_stage.binary_info.iterator.row_stripes_overlap_lines;
sh_css_sp_stage.top_cropping = (uint16_t)
sh_css_isp_stage.binary_info.pipeline.top_cropping;
/* moved to sh_css_sp_init_stage
sh_css_sp_stage.enable.vf_output =
sh_css_isp_stage.binary_info.enable.vf_veceven ||
sh_css_isp_stage.binary_info.num_output_pins > 1;
*/
sh_css_sp_stage.enable.sdis = sh_css_isp_stage.binary_info.enable.dis;
sh_css_sp_stage.enable.s3a = sh_css_isp_stage.binary_info.enable.s3a;
}
void
store_sp_stage_data(enum ia_css_pipe_id id, unsigned int pipe_num,
unsigned int stage)
{
unsigned int thread_id;
ia_css_pipeline_get_sp_thread_id(pipe_num, &thread_id);
copy_isp_stage_to_sp_stage();
if (id != IA_CSS_PIPE_ID_COPY)
sh_css_sp_stage.isp_stage_addr =
sh_css_store_isp_stage_to_ddr(pipe_num, stage);
sh_css_sp_group.pipe[thread_id].sp_stage_addr[stage] =
sh_css_store_sp_stage_to_ddr(pipe_num, stage);
/* Clear for next frame */
sh_css_sp_stage.program_input_circuit = false;
}
static void
store_sp_per_frame_data(const struct ia_css_fw_info *fw)
{
unsigned int HIVE_ADDR_sp_per_frame_data = 0;
assert(fw);
switch (fw->type) {
case ia_css_sp_firmware:
HIVE_ADDR_sp_per_frame_data = fw->info.sp.per_frame_data;
break;
case ia_css_acc_firmware:
HIVE_ADDR_sp_per_frame_data = fw->info.acc.per_frame_data;
break;
case ia_css_isp_firmware:
return;
}
sp_dmem_store(SP0_ID,
(unsigned int)sp_address_of(sp_per_frame_data),
&per_frame_data,
sizeof(per_frame_data));
}
static void
sh_css_store_sp_per_frame_data(enum ia_css_pipe_id pipe_id,
unsigned int pipe_num,
const struct ia_css_fw_info *sp_fw)
{
if (!sp_fw)
sp_fw = &sh_css_sp_fw;
store_sp_stage_data(pipe_id, pipe_num, 0);
store_sp_group_data();
store_sp_per_frame_data(sp_fw);
}
#if SP_DEBUG != SP_DEBUG_NONE
void
sh_css_sp_get_debug_state(struct sh_css_sp_debug_state *state)
{
const struct ia_css_fw_info *fw = &sh_css_sp_fw;
unsigned int HIVE_ADDR_sp_output = fw->info.sp.output;
unsigned int i;
unsigned int offset = (unsigned int)offsetof(struct sh_css_sp_output,
debug) / sizeof(int);
assert(state);
(void)HIVE_ADDR_sp_output; /* To get rid of warning in CRUN */
for (i = 0; i < sizeof(*state) / sizeof(int); i++)
((unsigned *)state)[i] = load_sp_array_uint(sp_output, i + offset);
}
#endif
void
sh_css_sp_start_binary_copy(unsigned int pipe_num,
struct ia_css_frame *out_frame,
unsigned int two_ppc)
{
enum ia_css_pipe_id pipe_id;
unsigned int thread_id;
struct sh_css_sp_pipeline *pipe;
u8 stage_num = 0;
assert(out_frame);
pipe_id = IA_CSS_PIPE_ID_CAPTURE;
ia_css_pipeline_get_sp_thread_id(pipe_num, &thread_id);
pipe = &sh_css_sp_group.pipe[thread_id];
pipe->copy.bin.bytes_available = out_frame->data_bytes;
pipe->num_stages = 1;
pipe->pipe_id = pipe_id;
pipe->pipe_num = pipe_num;
pipe->thread_id = thread_id;
pipe->pipe_config = 0x0; /* No parameters */
pipe->pipe_qos_config = QOS_INVALID;
if (pipe->inout_port_config == 0) {
SH_CSS_PIPE_PORT_CONFIG_SET(pipe->inout_port_config,
(uint8_t)SH_CSS_PORT_INPUT,
(uint8_t)SH_CSS_HOST_TYPE, 1);
SH_CSS_PIPE_PORT_CONFIG_SET(pipe->inout_port_config,
(uint8_t)SH_CSS_PORT_OUTPUT,
(uint8_t)SH_CSS_HOST_TYPE, 1);
}
IA_CSS_LOG("pipe_id %d port_config %08x",
pipe->pipe_id, pipe->inout_port_config);
#if !defined(ISP2401)
sh_css_sp_group.config.input_formatter.isp_2ppc = (uint8_t)two_ppc;
#else
(void)two_ppc;
#endif
sh_css_sp_stage.num = stage_num;
sh_css_sp_stage.stage_type = SH_CSS_SP_STAGE_TYPE;
sh_css_sp_stage.func =
(unsigned int)IA_CSS_PIPELINE_BIN_COPY;
set_output_frame_buffer(out_frame, 0);
/* sp_bin_copy_init on the SP does not deal with dynamica/static yet */
/* For now always update the dynamic data from out frames. */
sh_css_store_sp_per_frame_data(pipe_id, pipe_num, &sh_css_sp_fw);
}
static void
sh_css_sp_start_raw_copy(struct ia_css_frame *out_frame,
unsigned int pipe_num,
unsigned int two_ppc,
unsigned int max_input_width,
enum sh_css_pipe_config_override pipe_conf_override,
unsigned int if_config_index)
{
enum ia_css_pipe_id pipe_id;
unsigned int thread_id;
u8 stage_num = 0;
struct sh_css_sp_pipeline *pipe;
assert(out_frame);
{
/*
* Clear sh_css_sp_stage for easy debugging.
* program_input_circuit must be saved as it is set outside
* this function.
*/
u8 program_input_circuit;
program_input_circuit = sh_css_sp_stage.program_input_circuit;
memset(&sh_css_sp_stage, 0, sizeof(sh_css_sp_stage));
sh_css_sp_stage.program_input_circuit = program_input_circuit;
}
pipe_id = IA_CSS_PIPE_ID_COPY;
ia_css_pipeline_get_sp_thread_id(pipe_num, &thread_id);
pipe = &sh_css_sp_group.pipe[thread_id];
pipe->copy.raw.height = out_frame->frame_info.res.height;
pipe->copy.raw.width = out_frame->frame_info.res.width;
pipe->copy.raw.padded_width = out_frame->frame_info.padded_width;
pipe->copy.raw.raw_bit_depth = out_frame->frame_info.raw_bit_depth;
pipe->copy.raw.max_input_width = max_input_width;
pipe->num_stages = 1;
pipe->pipe_id = pipe_id;
/* TODO: next indicates from which queues parameters need to be
sampled, needs checking/improvement */
if (pipe_conf_override == SH_CSS_PIPE_CONFIG_OVRD_NO_OVRD)
pipe->pipe_config =
(SH_CSS_PIPE_CONFIG_SAMPLE_PARAMS << thread_id);
else
pipe->pipe_config = pipe_conf_override;
pipe->pipe_qos_config = QOS_INVALID;
if (pipe->inout_port_config == 0) {
SH_CSS_PIPE_PORT_CONFIG_SET(pipe->inout_port_config,
(uint8_t)SH_CSS_PORT_INPUT,
(uint8_t)SH_CSS_HOST_TYPE, 1);
SH_CSS_PIPE_PORT_CONFIG_SET(pipe->inout_port_config,
(uint8_t)SH_CSS_PORT_OUTPUT,
(uint8_t)SH_CSS_HOST_TYPE, 1);
}
IA_CSS_LOG("pipe_id %d port_config %08x",
pipe->pipe_id, pipe->inout_port_config);
#if !defined(ISP2401)
sh_css_sp_group.config.input_formatter.isp_2ppc = (uint8_t)two_ppc;
#else
(void)two_ppc;
#endif
sh_css_sp_stage.num = stage_num;
sh_css_sp_stage.xmem_bin_addr = 0x0;
sh_css_sp_stage.stage_type = SH_CSS_SP_STAGE_TYPE;
sh_css_sp_stage.func = (unsigned int)IA_CSS_PIPELINE_RAW_COPY;
sh_css_sp_stage.if_config_index = (uint8_t)if_config_index;
set_output_frame_buffer(out_frame, 0);
ia_css_debug_pipe_graph_dump_sp_raw_copy(out_frame);
}
static void
sh_css_sp_start_isys_copy(struct ia_css_frame *out_frame,
unsigned int pipe_num, unsigned int max_input_width,
unsigned int if_config_index)
{
enum ia_css_pipe_id pipe_id;
unsigned int thread_id;
u8 stage_num = 0;
struct sh_css_sp_pipeline *pipe;
enum sh_css_queue_id queue_id;
assert(out_frame);
{
/*
* Clear sh_css_sp_stage for easy debugging.
* program_input_circuit must be saved as it is set outside
* this function.
*/
u8 program_input_circuit;
program_input_circuit = sh_css_sp_stage.program_input_circuit;
memset(&sh_css_sp_stage, 0, sizeof(sh_css_sp_stage));
sh_css_sp_stage.program_input_circuit = program_input_circuit;
}
pipe_id = IA_CSS_PIPE_ID_COPY;
ia_css_pipeline_get_sp_thread_id(pipe_num, &thread_id);
pipe = &sh_css_sp_group.pipe[thread_id];
pipe->copy.raw.height = out_frame->frame_info.res.height;
pipe->copy.raw.width = out_frame->frame_info.res.width;
pipe->copy.raw.padded_width = out_frame->frame_info.padded_width;
pipe->copy.raw.raw_bit_depth = out_frame->frame_info.raw_bit_depth;
pipe->copy.raw.max_input_width = max_input_width;
pipe->num_stages = 1;
pipe->pipe_id = pipe_id;
pipe->pipe_config = 0x0; /* No parameters */
pipe->pipe_qos_config = QOS_INVALID;
initialize_stage_frames(&sh_css_sp_stage.frames);
sh_css_sp_stage.num = stage_num;
sh_css_sp_stage.xmem_bin_addr = 0x0;
sh_css_sp_stage.stage_type = SH_CSS_SP_STAGE_TYPE;
sh_css_sp_stage.func = (unsigned int)IA_CSS_PIPELINE_ISYS_COPY;
sh_css_sp_stage.if_config_index = (uint8_t)if_config_index;
set_output_frame_buffer(out_frame, 0);
if (pipe->metadata.height > 0) {
ia_css_query_internal_queue_id(IA_CSS_BUFFER_TYPE_METADATA, thread_id,
&queue_id);
sh_css_copy_buffer_attr_to_spbuffer(&sh_css_sp_stage.frames.metadata_buf,
queue_id, mmgr_EXCEPTION,
IA_CSS_BUFFER_TYPE_METADATA);
}
ia_css_debug_pipe_graph_dump_sp_raw_copy(out_frame);
}
unsigned int
sh_css_sp_get_binary_copy_size(void)
{
const struct ia_css_fw_info *fw = &sh_css_sp_fw;
unsigned int HIVE_ADDR_sp_output = fw->info.sp.output;
unsigned int offset = (unsigned int)offsetof(struct sh_css_sp_output,
bin_copy_bytes_copied) / sizeof(int);
(void)HIVE_ADDR_sp_output; /* To get rid of warning in CRUN */
return load_sp_array_uint(sp_output, offset);
}
unsigned int
sh_css_sp_get_sw_interrupt_value(unsigned int irq)
{
const struct ia_css_fw_info *fw = &sh_css_sp_fw;
unsigned int HIVE_ADDR_sp_output = fw->info.sp.output;
unsigned int offset = (unsigned int)offsetof(struct sh_css_sp_output,
sw_interrupt_value)
/ sizeof(int);
(void)HIVE_ADDR_sp_output; /* To get rid of warning in CRUN */
return load_sp_array_uint(sp_output, offset + irq);
}
static void
sh_css_copy_buffer_attr_to_spbuffer(struct ia_css_buffer_sp *dest_buf,
const enum sh_css_queue_id queue_id,
const ia_css_ptr xmem_addr,
const enum ia_css_buffer_type buf_type)
{
assert(buf_type < IA_CSS_NUM_BUFFER_TYPE);
if (queue_id > SH_CSS_INVALID_QUEUE_ID) {
/*
* value >=0 indicates that function init_frame_pointers()
* should use the dynamic data address
*/
assert(queue_id < SH_CSS_MAX_NUM_QUEUES);
/* Klocwork assumes assert can be disabled;
Since we can get there with any type, and it does not
know that frame_in->dynamic_data_index can only be set
for one of the types in the assert) it has to assume we
can get here for any type. however this could lead to an
out of bounds reference when indexing buf_type about 10
lines below. In order to satisfy KW an additional if
has been added. This one will always yield true.
*/
if ((queue_id < SH_CSS_MAX_NUM_QUEUES)) {
dest_buf->buf_src.queue_id = queue_id;
}
} else {
assert(xmem_addr != mmgr_EXCEPTION);
dest_buf->buf_src.xmem_addr = xmem_addr;
}
dest_buf->buf_type = buf_type;
}
static void
sh_css_copy_frame_to_spframe(struct ia_css_frame_sp *sp_frame_out,
const struct ia_css_frame *frame_in)
{
assert(frame_in);
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
"sh_css_copy_frame_to_spframe():\n");
sh_css_copy_buffer_attr_to_spbuffer(&sp_frame_out->buf_attr,
frame_in->dynamic_queue_id,
frame_in->data,
frame_in->buf_type);
ia_css_frame_info_to_frame_sp_info(&sp_frame_out->info, &frame_in->frame_info);
switch (frame_in->frame_info.format) {
case IA_CSS_FRAME_FORMAT_RAW_PACKED:
case IA_CSS_FRAME_FORMAT_RAW:
sp_frame_out->planes.raw.offset = frame_in->planes.raw.offset;
break;
case IA_CSS_FRAME_FORMAT_RGB565:
case IA_CSS_FRAME_FORMAT_RGBA888:
sp_frame_out->planes.rgb.offset = frame_in->planes.rgb.offset;
break;
case IA_CSS_FRAME_FORMAT_PLANAR_RGB888:
sp_frame_out->planes.planar_rgb.r.offset =
frame_in->planes.planar_rgb.r.offset;
sp_frame_out->planes.planar_rgb.g.offset =
frame_in->planes.planar_rgb.g.offset;
sp_frame_out->planes.planar_rgb.b.offset =
frame_in->planes.planar_rgb.b.offset;
break;
case IA_CSS_FRAME_FORMAT_YUYV:
case IA_CSS_FRAME_FORMAT_UYVY:
case IA_CSS_FRAME_FORMAT_CSI_MIPI_YUV420_8:
case IA_CSS_FRAME_FORMAT_CSI_MIPI_LEGACY_YUV420_8:
case IA_CSS_FRAME_FORMAT_YUV_LINE:
sp_frame_out->planes.yuyv.offset = frame_in->planes.yuyv.offset;
break;
case IA_CSS_FRAME_FORMAT_NV11:
case IA_CSS_FRAME_FORMAT_NV12:
case IA_CSS_FRAME_FORMAT_NV12_16:
case IA_CSS_FRAME_FORMAT_NV12_TILEY:
case IA_CSS_FRAME_FORMAT_NV21:
case IA_CSS_FRAME_FORMAT_NV16:
case IA_CSS_FRAME_FORMAT_NV61:
sp_frame_out->planes.nv.y.offset =
frame_in->planes.nv.y.offset;
sp_frame_out->planes.nv.uv.offset =
frame_in->planes.nv.uv.offset;
break;
case IA_CSS_FRAME_FORMAT_YUV420:
case IA_CSS_FRAME_FORMAT_YUV422:
case IA_CSS_FRAME_FORMAT_YUV444:
case IA_CSS_FRAME_FORMAT_YUV420_16:
case IA_CSS_FRAME_FORMAT_YUV422_16:
case IA_CSS_FRAME_FORMAT_YV12:
case IA_CSS_FRAME_FORMAT_YV16:
sp_frame_out->planes.yuv.y.offset =
frame_in->planes.yuv.y.offset;
sp_frame_out->planes.yuv.u.offset =
frame_in->planes.yuv.u.offset;
sp_frame_out->planes.yuv.v.offset =
frame_in->planes.yuv.v.offset;
break;
case IA_CSS_FRAME_FORMAT_QPLANE6:
sp_frame_out->planes.plane6.r.offset =
frame_in->planes.plane6.r.offset;
sp_frame_out->planes.plane6.r_at_b.offset =
frame_in->planes.plane6.r_at_b.offset;
sp_frame_out->planes.plane6.gr.offset =
frame_in->planes.plane6.gr.offset;
sp_frame_out->planes.plane6.gb.offset =
frame_in->planes.plane6.gb.offset;
sp_frame_out->planes.plane6.b.offset =
frame_in->planes.plane6.b.offset;
sp_frame_out->planes.plane6.b_at_r.offset =
frame_in->planes.plane6.b_at_r.offset;
break;
case IA_CSS_FRAME_FORMAT_BINARY_8:
sp_frame_out->planes.binary.data.offset =
frame_in->planes.binary.data.offset;
break;
default:
/* This should not happen, but in case it does,
* nullify the planes
*/
memset(&sp_frame_out->planes, 0, sizeof(sp_frame_out->planes));
break;
}
}
static int
set_input_frame_buffer(const struct ia_css_frame *frame)
{
if (!frame)
return -EINVAL;
switch (frame->frame_info.format) {
case IA_CSS_FRAME_FORMAT_QPLANE6:
case IA_CSS_FRAME_FORMAT_YUV420_16:
case IA_CSS_FRAME_FORMAT_RAW_PACKED:
case IA_CSS_FRAME_FORMAT_RAW:
case IA_CSS_FRAME_FORMAT_YUV420:
case IA_CSS_FRAME_FORMAT_YUYV:
case IA_CSS_FRAME_FORMAT_YUV_LINE:
case IA_CSS_FRAME_FORMAT_NV12:
case IA_CSS_FRAME_FORMAT_NV12_16:
case IA_CSS_FRAME_FORMAT_NV12_TILEY:
case IA_CSS_FRAME_FORMAT_NV21:
case IA_CSS_FRAME_FORMAT_CSI_MIPI_YUV420_8:
case IA_CSS_FRAME_FORMAT_CSI_MIPI_LEGACY_YUV420_8:
case IA_CSS_FRAME_FORMAT_CSI_MIPI_YUV420_10:
break;
default:
return -EINVAL;
}
sh_css_copy_frame_to_spframe(&sh_css_sp_stage.frames.in, frame);
return 0;
}
static int
set_output_frame_buffer(const struct ia_css_frame *frame,
unsigned int idx)
{
if (!frame)
return -EINVAL;
switch (frame->frame_info.format) {
case IA_CSS_FRAME_FORMAT_YUV420:
case IA_CSS_FRAME_FORMAT_YUV422:
case IA_CSS_FRAME_FORMAT_YUV444:
case IA_CSS_FRAME_FORMAT_YV12:
case IA_CSS_FRAME_FORMAT_YV16:
case IA_CSS_FRAME_FORMAT_YUV420_16:
case IA_CSS_FRAME_FORMAT_YUV422_16:
case IA_CSS_FRAME_FORMAT_NV11:
case IA_CSS_FRAME_FORMAT_NV12:
case IA_CSS_FRAME_FORMAT_NV12_16:
case IA_CSS_FRAME_FORMAT_NV12_TILEY:
case IA_CSS_FRAME_FORMAT_NV16:
case IA_CSS_FRAME_FORMAT_NV21:
case IA_CSS_FRAME_FORMAT_NV61:
case IA_CSS_FRAME_FORMAT_YUYV:
case IA_CSS_FRAME_FORMAT_UYVY:
case IA_CSS_FRAME_FORMAT_CSI_MIPI_YUV420_8:
case IA_CSS_FRAME_FORMAT_CSI_MIPI_LEGACY_YUV420_8:
case IA_CSS_FRAME_FORMAT_YUV_LINE:
case IA_CSS_FRAME_FORMAT_RGB565:
case IA_CSS_FRAME_FORMAT_RGBA888:
case IA_CSS_FRAME_FORMAT_PLANAR_RGB888:
case IA_CSS_FRAME_FORMAT_RAW:
case IA_CSS_FRAME_FORMAT_RAW_PACKED:
case IA_CSS_FRAME_FORMAT_QPLANE6:
case IA_CSS_FRAME_FORMAT_BINARY_8:
break;
default:
return -EINVAL;
}
sh_css_copy_frame_to_spframe(&sh_css_sp_stage.frames.out[idx], frame);
return 0;
}
static int
set_view_finder_buffer(const struct ia_css_frame *frame)
{
if (!frame)
return -EINVAL;
switch (frame->frame_info.format) {
/* the dual output pin */
case IA_CSS_FRAME_FORMAT_NV12:
case IA_CSS_FRAME_FORMAT_NV12_16:
case IA_CSS_FRAME_FORMAT_NV21:
case IA_CSS_FRAME_FORMAT_YUYV:
case IA_CSS_FRAME_FORMAT_UYVY:
case IA_CSS_FRAME_FORMAT_CSI_MIPI_YUV420_8:
case IA_CSS_FRAME_FORMAT_CSI_MIPI_LEGACY_YUV420_8:
case IA_CSS_FRAME_FORMAT_YUV420:
case IA_CSS_FRAME_FORMAT_YV12:
case IA_CSS_FRAME_FORMAT_NV12_TILEY:
/* for vf_veceven */
case IA_CSS_FRAME_FORMAT_YUV_LINE:
break;
default:
return -EINVAL;
}
sh_css_copy_frame_to_spframe(&sh_css_sp_stage.frames.out_vf, frame);
return 0;
}
#if !defined(ISP2401)
void sh_css_sp_set_if_configs(
const input_formatter_cfg_t *config_a,
const input_formatter_cfg_t *config_b,
const uint8_t if_config_index
)
{
assert(if_config_index < SH_CSS_MAX_IF_CONFIGS);
assert(config_a);
sh_css_sp_group.config.input_formatter.set[if_config_index].config_a =
*config_a;
sh_css_sp_group.config.input_formatter.a_changed = true;
if (config_b) {
sh_css_sp_group.config.input_formatter.set[if_config_index].config_b =
*config_b;
sh_css_sp_group.config.input_formatter.b_changed = true;
}
return;
}
#endif
#if !defined(ISP2401)
void
sh_css_sp_program_input_circuit(int fmt_type,
int ch_id,
enum ia_css_input_mode input_mode)
{
sh_css_sp_group.config.input_circuit.no_side_band = false;
sh_css_sp_group.config.input_circuit.fmt_type = fmt_type;
sh_css_sp_group.config.input_circuit.ch_id = ch_id;
sh_css_sp_group.config.input_circuit.input_mode = input_mode;
/*
* The SP group is only loaded at SP boot time and is read once
* change flags as "input_circuit_cfg_changed" must be reset on the SP
*/
sh_css_sp_group.config.input_circuit_cfg_changed = true;
sh_css_sp_stage.program_input_circuit = true;
}
#endif
#if !defined(ISP2401)
void
sh_css_sp_configure_sync_gen(int width, int height,
int hblank_cycles,
int vblank_cycles)
{
sh_css_sp_group.config.sync_gen.width = width;
sh_css_sp_group.config.sync_gen.height = height;
sh_css_sp_group.config.sync_gen.hblank_cycles = hblank_cycles;
sh_css_sp_group.config.sync_gen.vblank_cycles = vblank_cycles;
}
void
sh_css_sp_configure_tpg(int x_mask,
int y_mask,
int x_delta,
int y_delta,
int xy_mask)
{
sh_css_sp_group.config.tpg.x_mask = x_mask;
sh_css_sp_group.config.tpg.y_mask = y_mask;
sh_css_sp_group.config.tpg.x_delta = x_delta;
sh_css_sp_group.config.tpg.y_delta = y_delta;
sh_css_sp_group.config.tpg.xy_mask = xy_mask;
}
void
sh_css_sp_configure_prbs(int seed)
{
sh_css_sp_group.config.prbs.seed = seed;
}
#endif
void
sh_css_sp_configure_enable_raw_pool_locking(bool lock_all)
{
sh_css_sp_group.config.enable_raw_pool_locking = true;
sh_css_sp_group.config.lock_all = lock_all;
}
void
sh_css_sp_enable_isys_event_queue(bool enable)
{
sh_css_sp_group.config.enable_isys_event_queue = enable;
}
void
sh_css_sp_set_disable_continuous_viewfinder(bool flag)
{
sh_css_sp_group.config.disable_cont_vf = flag;
}
static int
sh_css_sp_write_frame_pointers(const struct sh_css_binary_args *args)
{
int err = 0;
int i;
assert(args);
if (args->in_frame)
err = set_input_frame_buffer(args->in_frame);
if (!err && args->out_vf_frame)
err = set_view_finder_buffer(args->out_vf_frame);
for (i = 0; i < IA_CSS_BINARY_MAX_OUTPUT_PORTS; i++) {
if (!err && args->out_frame[i])
err = set_output_frame_buffer(args->out_frame[i], i);
}
/* we don't pass this error back to the upper layer, so we add a assert here
because we actually hit the error here but it still works by accident... */
if (err) assert(false);
return err;
}
static void
sh_css_sp_init_group(bool two_ppc,
enum atomisp_input_format input_format,
bool no_isp_sync,
uint8_t if_config_index)
{
#if !defined(ISP2401)
sh_css_sp_group.config.input_formatter.isp_2ppc = two_ppc;
#else
(void)two_ppc;
#endif
sh_css_sp_group.config.no_isp_sync = (uint8_t)no_isp_sync;
/* decide whether the frame is processed online or offline */
if (if_config_index == SH_CSS_IF_CONFIG_NOT_NEEDED) return;
#if !defined(ISP2401)
assert(if_config_index < SH_CSS_MAX_IF_CONFIGS);
sh_css_sp_group.config.input_formatter.set[if_config_index].stream_format =
input_format;
#else
(void)input_format;
#endif
}
void
sh_css_stage_write_binary_info(struct ia_css_binary_info *info)
{
assert(info);
sh_css_isp_stage.binary_info = *info;
}
static int
copy_isp_mem_if_to_ddr(struct ia_css_binary *binary)
{
int err;
err = ia_css_isp_param_copy_isp_mem_if_to_ddr(
&binary->css_params,
&binary->mem_params,
IA_CSS_PARAM_CLASS_CONFIG);
if (err)
return err;
err = ia_css_isp_param_copy_isp_mem_if_to_ddr(
&binary->css_params,
&binary->mem_params,
IA_CSS_PARAM_CLASS_STATE);
if (err)
return err;
return 0;
}
static bool
is_sp_stage(struct ia_css_pipeline_stage *stage)
{
assert(stage);
return stage->sp_func != IA_CSS_PIPELINE_NO_FUNC;
}
static int configure_isp_from_args(const struct sh_css_sp_pipeline *pipeline,
const struct ia_css_binary *binary,
const struct sh_css_binary_args *args,
bool two_ppc,
bool deinterleaved)
{
int ret;
ret = ia_css_fpn_configure(binary, &binary->in_frame_info);
if (ret)
return ret;
ret = ia_css_crop_configure(binary, ia_css_frame_get_info(args->delay_frames[0]));
if (ret)
return ret;
ret = ia_css_qplane_configure(pipeline, binary, &binary->in_frame_info);
if (ret)
return ret;
ret = ia_css_output0_configure(binary, ia_css_frame_get_info(args->out_frame[0]));
if (ret)
return ret;
ret = ia_css_output1_configure(binary, ia_css_frame_get_info(args->out_vf_frame));
if (ret)
return ret;
ret = ia_css_copy_output_configure(binary, args->copy_output);
if (ret)
return ret;
ret = ia_css_output0_configure(binary, ia_css_frame_get_info(args->out_frame[0]));
if (ret)
return ret;
ret = ia_css_iterator_configure(binary, ia_css_frame_get_info(args->in_frame));
if (ret)
return ret;
ret = ia_css_dvs_configure(binary, ia_css_frame_get_info(args->out_frame[0]));
if (ret)
return ret;
ret = ia_css_output_configure(binary, ia_css_frame_get_info(args->out_frame[0]));
if (ret)
return ret;
ret = ia_css_raw_configure(pipeline, binary, ia_css_frame_get_info(args->in_frame),
&binary->in_frame_info, two_ppc, deinterleaved);
if (ret)
return ret;
/*
* FIXME: args->delay_frames can be NULL here
*
* Somehow, the driver at the Intel Atom Yocto tree doesn't seem to
* suffer from the same issue.
*
* Anyway, the function below should now handle a NULL delay_frames
* without crashing, but the pipeline should likely be built without
* adding it at the first place (or there are a hidden bug somewhere)
*/
ret = ia_css_ref_configure(binary, args->delay_frames, pipeline->dvs_frame_delay);
if (ret)
return ret;
ret = ia_css_tnr_configure(binary, args->tnr_frames);
if (ret)
return ret;
return ia_css_bayer_io_config(binary, args);
}
static void
initialize_isp_states(const struct ia_css_binary *binary)
{
unsigned int i;
if (!binary->info->mem_offsets.offsets.state)
return;
for (i = 0; i < IA_CSS_NUM_STATE_IDS; i++) {
ia_css_kernel_init_state[i](binary);
}
}
static void
initialize_frame_buffer_attribute(struct ia_css_buffer_sp *buf_attr)
{
buf_attr->buf_src.queue_id = SH_CSS_INVALID_QUEUE_ID;
buf_attr->buf_type = IA_CSS_BUFFER_TYPE_INVALID;
}
static void
initialize_stage_frames(struct ia_css_frames_sp *frames)
{
unsigned int i;
initialize_frame_buffer_attribute(&frames->in.buf_attr);
for (i = 0; i < IA_CSS_BINARY_MAX_OUTPUT_PORTS; i++) {
initialize_frame_buffer_attribute(&frames->out[i].buf_attr);
}
initialize_frame_buffer_attribute(&frames->out_vf.buf_attr);
initialize_frame_buffer_attribute(&frames->s3a_buf);
initialize_frame_buffer_attribute(&frames->dvs_buf);
initialize_frame_buffer_attribute(&frames->metadata_buf);
}
static int
sh_css_sp_init_stage(struct ia_css_binary *binary,
const char *binary_name,
const struct ia_css_blob_info *blob_info,
const struct sh_css_binary_args *args,
unsigned int pipe_num,
unsigned int stage,
bool xnr,
const struct ia_css_isp_param_css_segments *isp_mem_if,
unsigned int if_config_index,
bool two_ppc)
{
const struct ia_css_binary_xinfo *xinfo;
const struct ia_css_binary_info *info;
int err = 0;
int i;
struct ia_css_pipe *pipe = NULL;
unsigned int thread_id;
enum sh_css_queue_id queue_id;
bool continuous = sh_css_continuous_is_enabled((uint8_t)pipe_num);
assert(binary);
assert(blob_info);
assert(args);
assert(isp_mem_if);
xinfo = binary->info;
info = &xinfo->sp;
{
/*
* Clear sh_css_sp_stage for easy debugging.
* program_input_circuit must be saved as it is set outside
* this function.
*/
u8 program_input_circuit;
program_input_circuit = sh_css_sp_stage.program_input_circuit;
memset(&sh_css_sp_stage, 0, sizeof(sh_css_sp_stage));
sh_css_sp_stage.program_input_circuit = (uint8_t)program_input_circuit;
}
ia_css_pipeline_get_sp_thread_id(pipe_num, &thread_id);
if (!info) {
sh_css_sp_group.pipe[thread_id].sp_stage_addr[stage] = mmgr_NULL;
return 0;
}
if (IS_ISP2401)
sh_css_sp_stage.deinterleaved = 0;
else
sh_css_sp_stage.deinterleaved = ((stage == 0) && continuous);
initialize_stage_frames(&sh_css_sp_stage.frames);
/*
* TODO: Make the Host dynamically determine
* the stage type.
*/
sh_css_sp_stage.stage_type = SH_CSS_ISP_STAGE_TYPE;
sh_css_sp_stage.num = (uint8_t)stage;
sh_css_sp_stage.isp_online = (uint8_t)binary->online;
sh_css_sp_stage.isp_copy_vf = (uint8_t)args->copy_vf;
sh_css_sp_stage.isp_copy_output = (uint8_t)args->copy_output;
sh_css_sp_stage.enable.vf_output = (args->out_vf_frame != NULL);
/* Copy the frame infos first, to be overwritten by the frames,
if these are present.
*/
sh_css_sp_stage.frames.effective_in_res.width = binary->effective_in_frame_res.width;
sh_css_sp_stage.frames.effective_in_res.height = binary->effective_in_frame_res.height;
ia_css_frame_info_to_frame_sp_info(&sh_css_sp_stage.frames.in.info,
&binary->in_frame_info);
for (i = 0; i < IA_CSS_BINARY_MAX_OUTPUT_PORTS; i++) {
ia_css_frame_info_to_frame_sp_info(&sh_css_sp_stage.frames.out[i].info,
&binary->out_frame_info[i]);
}
ia_css_frame_info_to_frame_sp_info(&sh_css_sp_stage.frames.internal_frame_info,
&binary->internal_frame_info);
sh_css_sp_stage.dvs_envelope.width = binary->dvs_envelope.width;
sh_css_sp_stage.dvs_envelope.height = binary->dvs_envelope.height;
sh_css_sp_stage.isp_pipe_version = (uint8_t)info->pipeline.isp_pipe_version;
sh_css_sp_stage.isp_deci_log_factor = (uint8_t)binary->deci_factor_log2;
sh_css_sp_stage.isp_vf_downscale_bits = (uint8_t)binary->vf_downscale_log2;
sh_css_sp_stage.if_config_index = (uint8_t)if_config_index;
sh_css_sp_stage.sp_enable_xnr = (uint8_t)xnr;
sh_css_sp_stage.xmem_bin_addr = xinfo->xmem_addr;
sh_css_sp_stage.xmem_map_addr = sh_css_params_ddr_address_map();
sh_css_isp_stage.blob_info = *blob_info;
sh_css_stage_write_binary_info((struct ia_css_binary_info *)info);
/* Make sure binary name is smaller than allowed string size */
assert(strlen(binary_name) < SH_CSS_MAX_BINARY_NAME - 1);
strscpy(sh_css_isp_stage.binary_name, binary_name, SH_CSS_MAX_BINARY_NAME);
sh_css_isp_stage.mem_initializers = *isp_mem_if;
/*
* Even when a stage does not need uds and does not params,
* ia_css_uds_sp_scale_params() seems to be called (needs
* further investigation). This function can not deal with
* dx, dy = {0, 0}
*/
err = sh_css_sp_write_frame_pointers(args);
/* TODO: move it to a better place */
if (binary->info->sp.enable.s3a) {
ia_css_query_internal_queue_id(IA_CSS_BUFFER_TYPE_3A_STATISTICS, thread_id,
&queue_id);
sh_css_copy_buffer_attr_to_spbuffer(&sh_css_sp_stage.frames.s3a_buf, queue_id,
mmgr_EXCEPTION,
IA_CSS_BUFFER_TYPE_3A_STATISTICS);
}
if (binary->info->sp.enable.dis) {
ia_css_query_internal_queue_id(IA_CSS_BUFFER_TYPE_DIS_STATISTICS, thread_id,
&queue_id);
sh_css_copy_buffer_attr_to_spbuffer(&sh_css_sp_stage.frames.dvs_buf, queue_id,
mmgr_EXCEPTION,
IA_CSS_BUFFER_TYPE_DIS_STATISTICS);
}
ia_css_query_internal_queue_id(IA_CSS_BUFFER_TYPE_METADATA, thread_id, &queue_id);
sh_css_copy_buffer_attr_to_spbuffer(&sh_css_sp_stage.frames.metadata_buf, queue_id, mmgr_EXCEPTION, IA_CSS_BUFFER_TYPE_METADATA);
if (err)
return err;
#ifdef ISP2401
pipe = find_pipe_by_num(sh_css_sp_group.pipe[thread_id].pipe_num);
if (!pipe)
return -EINVAL;
if (args->in_frame)
ia_css_get_crop_offsets(pipe, &args->in_frame->frame_info);
else
ia_css_get_crop_offsets(pipe, &binary->in_frame_info);
#else
(void)pipe; /*avoid build warning*/
#endif
err = configure_isp_from_args(&sh_css_sp_group.pipe[thread_id],
binary, args, two_ppc, sh_css_sp_stage.deinterleaved);
if (err)
return err;
initialize_isp_states(binary);
/* we do this only for preview pipe because in fill_binary_info function
* we assign vf_out res to out res, but for ISP internal processing, we need
* the original out res. for video pipe, it has two output pins --- out and
* vf_out, so it can keep these two resolutions already. */
if (binary->info->sp.pipeline.mode == IA_CSS_BINARY_MODE_PREVIEW &&
(binary->vf_downscale_log2 > 0)) {
/* TODO: Remove this after preview output decimation is fixed
* by configuring out&vf info fiels properly */
sh_css_sp_stage.frames.out[0].info.padded_width
<<= binary->vf_downscale_log2;
sh_css_sp_stage.frames.out[0].info.res.width
<<= binary->vf_downscale_log2;
sh_css_sp_stage.frames.out[0].info.res.height
<<= binary->vf_downscale_log2;
}
err = copy_isp_mem_if_to_ddr(binary);
if (err)
return err;
return 0;
}
static int
sp_init_stage(struct ia_css_pipeline_stage *stage,
unsigned int pipe_num,
bool xnr,
unsigned int if_config_index,
bool two_ppc)
{
struct ia_css_binary *binary;
const struct ia_css_fw_info *firmware;
const struct sh_css_binary_args *args;
unsigned int stage_num;
/*
* Initialiser required because of the "else" path below.
* Is this a valid path ?
*/
const char *binary_name = "";
const struct ia_css_binary_xinfo *info = NULL;
/* note: the var below is made static as it is quite large;
if it is not static it ends up on the stack which could
cause issues for drivers
*/
static struct ia_css_binary tmp_binary;
const struct ia_css_blob_info *blob_info = NULL;
struct ia_css_isp_param_css_segments isp_mem_if;
/* LA: should be ia_css_data, should not contain host pointer.
However, CSS/DDR pointer is not available yet.
Hack is to store it in params->ddr_ptrs and then copy it late in the SP just before vmem init.
TODO: Call this after CSS/DDR allocation and store that pointer.
Best is to allocate it at stage creation time together with host pointer.
Remove vmem from params.
*/
struct ia_css_isp_param_css_segments *mem_if = &isp_mem_if;
int err = 0;
assert(stage);
binary = stage->binary;
firmware = stage->firmware;
args = &stage->args;
stage_num = stage->stage_num;
if (binary) {
info = binary->info;
binary_name = (const char *)(info->blob->name);
blob_info = &info->blob->header.blob;
ia_css_init_memory_interface(mem_if, &binary->mem_params, &binary->css_params);
} else if (firmware) {
const struct ia_css_frame_info *out_infos[IA_CSS_BINARY_MAX_OUTPUT_PORTS] = {NULL};
if (args->out_frame[0])
out_infos[0] = &args->out_frame[0]->frame_info;
info = &firmware->info.isp;
ia_css_binary_fill_info(info, false, false,
ATOMISP_INPUT_FORMAT_RAW_10,
ia_css_frame_get_info(args->in_frame),
NULL,
out_infos,
ia_css_frame_get_info(args->out_vf_frame),
&tmp_binary,
NULL,
-1, true);
binary = &tmp_binary;
binary->info = info;
binary_name = IA_CSS_EXT_ISP_PROG_NAME(firmware);
blob_info = &firmware->blob;
mem_if = (struct ia_css_isp_param_css_segments *)&firmware->mem_initializers;
} else {
/* SP stage */
assert(stage->sp_func != IA_CSS_PIPELINE_NO_FUNC);
/* binary and blob_info are now NULL.
These will be passed to sh_css_sp_init_stage
and dereferenced there, so passing a NULL
pointer is no good. return an error */
return -EINVAL;
}
err = sh_css_sp_init_stage(binary,
(const char *)binary_name,
blob_info,
args,
pipe_num,
stage_num,
xnr,
mem_if,
if_config_index,
two_ppc);
return err;
}
static void
sp_init_sp_stage(struct ia_css_pipeline_stage *stage,
unsigned int pipe_num,
bool two_ppc,
enum sh_css_pipe_config_override copy_ovrd,
unsigned int if_config_index)
{
const struct sh_css_binary_args *args = &stage->args;
assert(stage);
switch (stage->sp_func) {
case IA_CSS_PIPELINE_RAW_COPY:
sh_css_sp_start_raw_copy(args->out_frame[0],
pipe_num, two_ppc,
stage->max_input_width,
copy_ovrd, if_config_index);
break;
case IA_CSS_PIPELINE_BIN_COPY:
assert(false); /* TBI */
break;
case IA_CSS_PIPELINE_ISYS_COPY:
sh_css_sp_start_isys_copy(args->out_frame[0],
pipe_num, stage->max_input_width, if_config_index);
break;
case IA_CSS_PIPELINE_NO_FUNC:
assert(false);
break;
}
}
void
sh_css_sp_init_pipeline(struct ia_css_pipeline *me,
enum ia_css_pipe_id id,
u8 pipe_num,
bool xnr,
bool two_ppc,
bool continuous,
bool offline,
unsigned int required_bds_factor,
enum sh_css_pipe_config_override copy_ovrd,
enum ia_css_input_mode input_mode,
const struct ia_css_metadata_config *md_config,
const struct ia_css_metadata_info *md_info,
const enum mipi_port_id port_id)
{
/* Get first stage */
struct ia_css_pipeline_stage *stage = NULL;
struct ia_css_binary *first_binary = NULL;
struct ia_css_pipe *pipe = NULL;
unsigned int num;
enum ia_css_pipe_id pipe_id = id;
unsigned int thread_id;
u8 if_config_index, tmp_if_config_index;
if (!me->stages) {
dev_err(atomisp_dev, "%s called on a pipeline without stages\n",
__func__);
return; /* FIXME should be able to return an error */
}
first_binary = me->stages->binary;
if (input_mode == IA_CSS_INPUT_MODE_SENSOR ||
input_mode == IA_CSS_INPUT_MODE_BUFFERED_SENSOR) {
assert(port_id < N_MIPI_PORT_ID);
if (port_id >= N_MIPI_PORT_ID) /* should not happen but KW does not know */
return; /* we should be able to return an error */
if_config_index = (uint8_t)(port_id - MIPI_PORT0_ID);
} else if (input_mode == IA_CSS_INPUT_MODE_MEMORY) {
if_config_index = SH_CSS_IF_CONFIG_NOT_NEEDED;
} else {
if_config_index = 0x0;
}
ia_css_pipeline_get_sp_thread_id(pipe_num, &thread_id);
memset(&sh_css_sp_group.pipe[thread_id], 0, sizeof(struct sh_css_sp_pipeline));
/* Count stages */
for (stage = me->stages, num = 0; stage; stage = stage->next, num++) {
stage->stage_num = num;
ia_css_debug_pipe_graph_dump_stage(stage, id);
}
me->num_stages = num;
if (first_binary) {
/* Init pipeline data */
sh_css_sp_init_group(two_ppc, first_binary->input_format,
offline, if_config_index);
} /* if (first_binary != NULL) */
/* Signal the host immediately after start for SP_ISYS_COPY only */
if (me->num_stages == 1 &&
me->stages->sp_func == IA_CSS_PIPELINE_ISYS_COPY)
sh_css_sp_group.config.no_isp_sync = true;
/* Init stage data */
sh_css_init_host2sp_frame_data();
sh_css_sp_group.pipe[thread_id].num_stages = 0;
sh_css_sp_group.pipe[thread_id].pipe_id = pipe_id;
sh_css_sp_group.pipe[thread_id].thread_id = thread_id;
sh_css_sp_group.pipe[thread_id].pipe_num = pipe_num;
sh_css_sp_group.pipe[thread_id].num_execs = me->num_execs;
sh_css_sp_group.pipe[thread_id].pipe_qos_config = QOS_INVALID;
sh_css_sp_group.pipe[thread_id].required_bds_factor = required_bds_factor;
sh_css_sp_group.pipe[thread_id].input_system_mode
= (uint32_t)input_mode;
sh_css_sp_group.pipe[thread_id].port_id = port_id;
sh_css_sp_group.pipe[thread_id].dvs_frame_delay = (uint32_t)me->dvs_frame_delay;
/* TODO: next indicates from which queues parameters need to be
sampled, needs checking/improvement */
if (ia_css_pipeline_uses_params(me)) {
sh_css_sp_group.pipe[thread_id].pipe_config =
SH_CSS_PIPE_CONFIG_SAMPLE_PARAMS << thread_id;
}
/* For continuous use-cases, SP copy is responsible for sampling the
* parameters */
if (continuous)
sh_css_sp_group.pipe[thread_id].pipe_config = 0;
sh_css_sp_group.pipe[thread_id].inout_port_config = me->inout_port_config;
pipe = find_pipe_by_num(pipe_num);
assert(pipe);
if (!pipe) {
return;
}
sh_css_sp_group.pipe[thread_id].scaler_pp_lut = sh_css_pipe_get_pp_gdc_lut(pipe);
if (md_info && md_info->size > 0) {
sh_css_sp_group.pipe[thread_id].metadata.width = md_info->resolution.width;
sh_css_sp_group.pipe[thread_id].metadata.height = md_info->resolution.height;
sh_css_sp_group.pipe[thread_id].metadata.stride = md_info->stride;
sh_css_sp_group.pipe[thread_id].metadata.size = md_info->size;
ia_css_isys_convert_stream_format_to_mipi_format(
md_config->data_type, MIPI_PREDICTOR_NONE,
&sh_css_sp_group.pipe[thread_id].metadata.format);
}
sh_css_sp_group.pipe[thread_id].output_frame_queue_id = (uint32_t)SH_CSS_INVALID_QUEUE_ID;
if (pipe_id != IA_CSS_PIPE_ID_COPY) {
ia_css_query_internal_queue_id(IA_CSS_BUFFER_TYPE_OUTPUT_FRAME, thread_id,
(enum sh_css_queue_id *)(
&sh_css_sp_group.pipe[thread_id].output_frame_queue_id));
}
IA_CSS_LOG("pipe_id %d port_config %08x",
pipe_id, sh_css_sp_group.pipe[thread_id].inout_port_config);
for (stage = me->stages, num = 0; stage; stage = stage->next, num++) {
sh_css_sp_group.pipe[thread_id].num_stages++;
if (is_sp_stage(stage)) {
sp_init_sp_stage(stage, pipe_num, two_ppc,
copy_ovrd, if_config_index);
} else {
if ((stage->stage_num != 0) ||
SH_CSS_PIPE_PORT_CONFIG_IS_CONTINUOUS(me->inout_port_config))
tmp_if_config_index = SH_CSS_IF_CONFIG_NOT_NEEDED;
else
tmp_if_config_index = if_config_index;
sp_init_stage(stage, pipe_num,
xnr, tmp_if_config_index, two_ppc);
}
store_sp_stage_data(pipe_id, pipe_num, num);
}
sh_css_sp_group.pipe[thread_id].pipe_config |= (uint32_t)
(me->acquire_isp_each_stage << IA_CSS_ACQUIRE_ISP_POS);
store_sp_group_data();
}
void
sh_css_sp_uninit_pipeline(unsigned int pipe_num)
{
unsigned int thread_id;
ia_css_pipeline_get_sp_thread_id(pipe_num, &thread_id);
/*memset(&sh_css_sp_group.pipe[thread_id], 0, sizeof(struct sh_css_sp_pipeline));*/
sh_css_sp_group.pipe[thread_id].num_stages = 0;
}
bool sh_css_write_host2sp_command(enum host2sp_commands host2sp_command)
{
unsigned int HIVE_ADDR_host_sp_com = sh_css_sp_fw.info.sp.host_sp_com;
unsigned int offset = (unsigned int)offsetof(struct host_sp_communication,
host2sp_command)
/ sizeof(int);
enum host2sp_commands last_cmd = host2sp_cmd_error;
(void)HIVE_ADDR_host_sp_com; /* Suppres warnings in CRUN */
/* Previous command must be handled by SP (by design) */
last_cmd = load_sp_array_uint(host_sp_com, offset);
if (last_cmd != host2sp_cmd_ready)
IA_CSS_ERROR("last host command not handled by SP(%d)", last_cmd);
store_sp_array_uint(host_sp_com, offset, host2sp_command);
return (last_cmd == host2sp_cmd_ready);
}
enum host2sp_commands
sh_css_read_host2sp_command(void)
{
unsigned int HIVE_ADDR_host_sp_com = sh_css_sp_fw.info.sp.host_sp_com;
unsigned int offset = (unsigned int)offsetof(struct host_sp_communication, host2sp_command)
/ sizeof(int);
(void)HIVE_ADDR_host_sp_com; /* Suppres warnings in CRUN */
return (enum host2sp_commands)load_sp_array_uint(host_sp_com, offset);
}
/*
* Frame data is no longer part of the sp_stage structure but part of a
* separate structure. The aim is to make the sp_data struct static
* (it defines a pipeline) and that the dynamic (per frame) data is stored
* separetly.
*
* This function must be called first every where were you start constructing
* a new pipeline by defining one or more stages with use of variable
* sh_css_sp_stage. Even the special cases like accelerator and copy_frame
* These have a pipeline of just 1 stage.
*/
void
sh_css_init_host2sp_frame_data(void)
{
/* Clean table */
unsigned int HIVE_ADDR_host_sp_com = sh_css_sp_fw.info.sp.host_sp_com;
(void)HIVE_ADDR_host_sp_com; /* Suppres warnings in CRUN */
/*
* rvanimme: don't clean it to save static frame info line ref_in
* ref_out, and tnr_frames. Once this static data is in a
* separate data struct, this may be enable (but still, there is
* no need for it)
*/
}
/*
* @brief Update the offline frame information in host_sp_communication.
* Refer to "sh_css_sp.h" for more details.
*/
void
sh_css_update_host2sp_offline_frame(
unsigned int frame_num,
struct ia_css_frame *frame,
struct ia_css_metadata *metadata)
{
unsigned int HIVE_ADDR_host_sp_com;
unsigned int offset;
assert(frame_num < NUM_CONTINUOUS_FRAMES);
/* Write new frame data into SP DMEM */
HIVE_ADDR_host_sp_com = sh_css_sp_fw.info.sp.host_sp_com;
offset = (unsigned int)offsetof(struct host_sp_communication,
host2sp_offline_frames)
/ sizeof(int);
offset += frame_num;
store_sp_array_uint(host_sp_com, offset, frame ? frame->data : 0);
/* Write metadata buffer into SP DMEM */
offset = (unsigned int)offsetof(struct host_sp_communication,
host2sp_offline_metadata)
/ sizeof(int);
offset += frame_num;
store_sp_array_uint(host_sp_com, offset, metadata ? metadata->address : 0);
}
/*
* @brief Update the mipi frame information in host_sp_communication.
* Refer to "sh_css_sp.h" for more details.
*/
void
sh_css_update_host2sp_mipi_frame(
unsigned int frame_num,
struct ia_css_frame *frame)
{
unsigned int HIVE_ADDR_host_sp_com;
unsigned int offset;
/* MIPI buffers are dedicated to port, so now there are more of them. */
assert(frame_num < (N_CSI_PORTS * NUM_MIPI_FRAMES_PER_STREAM));
/* Write new frame data into SP DMEM */
HIVE_ADDR_host_sp_com = sh_css_sp_fw.info.sp.host_sp_com;
offset = (unsigned int)offsetof(struct host_sp_communication,
host2sp_mipi_frames)
/ sizeof(int);
offset += frame_num;
store_sp_array_uint(host_sp_com, offset,
frame ? frame->data : 0);
}
/*
* @brief Update the mipi metadata information in host_sp_communication.
* Refer to "sh_css_sp.h" for more details.
*/
void
sh_css_update_host2sp_mipi_metadata(
unsigned int frame_num,
struct ia_css_metadata *metadata)
{
unsigned int HIVE_ADDR_host_sp_com;
unsigned int o;
/* MIPI buffers are dedicated to port, so now there are more of them. */
assert(frame_num < (N_CSI_PORTS * NUM_MIPI_FRAMES_PER_STREAM));
/* Write new frame data into SP DMEM */
HIVE_ADDR_host_sp_com = sh_css_sp_fw.info.sp.host_sp_com;
o = offsetof(struct host_sp_communication, host2sp_mipi_metadata)
/ sizeof(int);
o += frame_num;
store_sp_array_uint(host_sp_com, o,
metadata ? metadata->address : 0);
}
void
sh_css_update_host2sp_num_mipi_frames(unsigned int num_frames)
{
unsigned int HIVE_ADDR_host_sp_com;
unsigned int offset;
/* Write new frame data into SP DMEM */
HIVE_ADDR_host_sp_com = sh_css_sp_fw.info.sp.host_sp_com;
offset = (unsigned int)offsetof(struct host_sp_communication,
host2sp_num_mipi_frames)
/ sizeof(int);
store_sp_array_uint(host_sp_com, offset, num_frames);
}
void
sh_css_update_host2sp_cont_num_raw_frames(unsigned int num_frames,
bool set_avail)
{
const struct ia_css_fw_info *fw;
unsigned int HIVE_ADDR_host_sp_com;
unsigned int extra_num_frames, avail_num_frames;
unsigned int offset, offset_extra;
/* Write new frame data into SP DMEM */
fw = &sh_css_sp_fw;
HIVE_ADDR_host_sp_com = fw->info.sp.host_sp_com;
if (set_avail) {
offset = (unsigned int)offsetof(struct host_sp_communication,
host2sp_cont_avail_num_raw_frames)
/ sizeof(int);
avail_num_frames = load_sp_array_uint(host_sp_com, offset);
extra_num_frames = num_frames - avail_num_frames;
offset_extra = (unsigned int)offsetof(struct host_sp_communication,
host2sp_cont_extra_num_raw_frames)
/ sizeof(int);
store_sp_array_uint(host_sp_com, offset_extra, extra_num_frames);
} else
offset = (unsigned int)offsetof(struct host_sp_communication,
host2sp_cont_target_num_raw_frames)
/ sizeof(int);
store_sp_array_uint(host_sp_com, offset, num_frames);
}
void
sh_css_event_init_irq_mask(void)
{
int i;
unsigned int HIVE_ADDR_host_sp_com = sh_css_sp_fw.info.sp.host_sp_com;
unsigned int offset;
struct sh_css_event_irq_mask event_irq_mask_init;
event_irq_mask_init.or_mask = IA_CSS_EVENT_TYPE_ALL;
event_irq_mask_init.and_mask = IA_CSS_EVENT_TYPE_NONE;
(void)HIVE_ADDR_host_sp_com; /* Suppress warnings in CRUN */
assert(sizeof(event_irq_mask_init) % HRT_BUS_BYTES == 0);
for (i = 0; i < IA_CSS_PIPE_ID_NUM; i++) {
offset = (unsigned int)offsetof(struct host_sp_communication,
host2sp_event_irq_mask[i]);
assert(offset % HRT_BUS_BYTES == 0);
sp_dmem_store(SP0_ID,
(unsigned int)sp_address_of(host_sp_com) + offset,
&event_irq_mask_init, sizeof(event_irq_mask_init));
}
}
int
ia_css_pipe_set_irq_mask(struct ia_css_pipe *pipe,
unsigned int or_mask,
unsigned int and_mask)
{
unsigned int HIVE_ADDR_host_sp_com = sh_css_sp_fw.info.sp.host_sp_com;
unsigned int offset;
struct sh_css_event_irq_mask event_irq_mask;
unsigned int pipe_num;
assert(pipe);
assert(IA_CSS_PIPE_ID_NUM == NR_OF_PIPELINES);
/* Linux kernel does not have UINT16_MAX
* Therefore decided to comment out these 2 asserts for Linux
* Alternatives that were not chosen:
* - add a conditional #define for UINT16_MAX
* - compare with (uint16_t)~0 or 0xffff
* - different assert for Linux and Windows
*/
(void)HIVE_ADDR_host_sp_com; /* Suppres warnings in CRUN */
IA_CSS_LOG("or_mask=%x, and_mask=%x", or_mask, and_mask);
event_irq_mask.or_mask = (uint16_t)or_mask;
event_irq_mask.and_mask = (uint16_t)and_mask;
pipe_num = ia_css_pipe_get_pipe_num(pipe);
if (pipe_num >= IA_CSS_PIPE_ID_NUM)
return -EINVAL;
offset = (unsigned int)offsetof(struct host_sp_communication,
host2sp_event_irq_mask[pipe_num]);
assert(offset % HRT_BUS_BYTES == 0);
sp_dmem_store(SP0_ID,
(unsigned int)sp_address_of(host_sp_com) + offset,
&event_irq_mask, sizeof(event_irq_mask));
return 0;
}
int
ia_css_event_get_irq_mask(const struct ia_css_pipe *pipe,
unsigned int *or_mask,
unsigned int *and_mask)
{
unsigned int HIVE_ADDR_host_sp_com = sh_css_sp_fw.info.sp.host_sp_com;
unsigned int offset;
struct sh_css_event_irq_mask event_irq_mask;
unsigned int pipe_num;
(void)HIVE_ADDR_host_sp_com; /* Suppres warnings in CRUN */
IA_CSS_ENTER_LEAVE("");
assert(pipe);
assert(IA_CSS_PIPE_ID_NUM == NR_OF_PIPELINES);
pipe_num = ia_css_pipe_get_pipe_num(pipe);
if (pipe_num >= IA_CSS_PIPE_ID_NUM)
return -EINVAL;
offset = (unsigned int)offsetof(struct host_sp_communication,
host2sp_event_irq_mask[pipe_num]);
assert(offset % HRT_BUS_BYTES == 0);
sp_dmem_load(SP0_ID,
(unsigned int)sp_address_of(host_sp_com) + offset,
&event_irq_mask, sizeof(event_irq_mask));
if (or_mask)
*or_mask = event_irq_mask.or_mask;
if (and_mask)
*and_mask = event_irq_mask.and_mask;
return 0;
}
void
sh_css_sp_set_sp_running(bool flag)
{
sp_running = flag;
}
bool
sh_css_sp_is_running(void)
{
return sp_running;
}
void
sh_css_sp_start_isp(void)
{
const struct ia_css_fw_info *fw;
unsigned int HIVE_ADDR_sp_sw_state;
fw = &sh_css_sp_fw;
HIVE_ADDR_sp_sw_state = fw->info.sp.sw_state;
if (sp_running)
return;
(void)HIVE_ADDR_sp_sw_state; /* Suppres warnings in CRUN */
/* no longer here, sp started immediately */
/*ia_css_debug_pipe_graph_dump_epilogue();*/
store_sp_group_data();
store_sp_per_frame_data(fw);
sp_dmem_store_uint32(SP0_ID,
(unsigned int)sp_address_of(sp_sw_state),
(uint32_t)(IA_CSS_SP_SW_TERMINATED));
/* Note 1: The sp_start_isp function contains a wait till
* the input network is configured by the SP.
* Note 2: Not all SP binaries supports host2sp_commands.
* In case a binary does support it, the host2sp_command
* will have status cmd_ready after return of the function
* sh_css_hrt_sp_start_isp. There is no race-condition here
* because only after the process_frame command has been
* received, the SP starts configuring the input network.
*/
/* we need to set sp_running before we call ia_css_mmu_invalidate_cache
* as ia_css_mmu_invalidate_cache checks on sp_running to
* avoid that it accesses dmem while the SP is not powered
*/
sp_running = true;
ia_css_mmu_invalidate_cache();
/* Invalidate all MMU caches */
mmu_invalidate_cache_all();
ia_css_spctrl_start(SP0_ID);
}
bool
ia_css_isp_has_started(void)
{
const struct ia_css_fw_info *fw = &sh_css_sp_fw;
unsigned int HIVE_ADDR_ia_css_ispctrl_sp_isp_started = fw->info.sp.isp_started;
(void)HIVE_ADDR_ia_css_ispctrl_sp_isp_started; /* Suppres warnings in CRUN */
return (bool)load_sp_uint(ia_css_ispctrl_sp_isp_started);
}
/*
* @brief Initialize the DMA software-mask in the debug mode.
* Refer to "sh_css_sp.h" for more details.
*/
bool
sh_css_sp_init_dma_sw_reg(int dma_id)
{
int i;
/* enable all the DMA channels */
for (i = 0; i < N_DMA_CHANNEL_ID; i++) {
/* enable the writing request */
sh_css_sp_set_dma_sw_reg(dma_id,
i,
0,
true);
/* enable the reading request */
sh_css_sp_set_dma_sw_reg(dma_id,
i,
1,
true);
}
return true;
}
/*
* @brief Set the DMA software-mask in the debug mode.
* Refer to "sh_css_sp.h" for more details.
*/
bool
sh_css_sp_set_dma_sw_reg(int dma_id,
int channel_id,
int request_type,
bool enable)
{
u32 sw_reg;
u32 bit_val;
u32 bit_offset;
u32 bit_mask;
(void)dma_id;
assert(channel_id >= 0 && channel_id < N_DMA_CHANNEL_ID);
assert(request_type >= 0);
/* get the software-mask */
sw_reg =
sh_css_sp_group.debug.dma_sw_reg;
/* get the offest of the target bit */
bit_offset = (8 * request_type) + channel_id;
/* clear the value of the target bit */
bit_mask = ~(1 << bit_offset);
sw_reg &= bit_mask;
/* set the value of the bit for the DMA channel */
bit_val = enable ? 1 : 0;
bit_val <<= bit_offset;
sw_reg |= bit_val;
/* update the software status of DMA channels */
sh_css_sp_group.debug.dma_sw_reg = sw_reg;
return true;
}
void
sh_css_sp_reset_global_vars(void)
{
memset(&sh_css_sp_group, 0, sizeof(struct sh_css_sp_group));
memset(&sh_css_sp_stage, 0, sizeof(struct sh_css_sp_stage));
memset(&sh_css_isp_stage, 0, sizeof(struct sh_css_isp_stage));
memset(&sh_css_sp_output, 0, sizeof(struct sh_css_sp_output));
memset(&per_frame_data, 0, sizeof(struct sh_css_sp_per_frame_data));
}
| linux-master | drivers/staging/media/atomisp/pci/sh_css_sp.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Support for Intel Camera Imaging ISP subsystem.
* Copyright (c) 2015, Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*/
#include "ia_css_properties.h"
#include <assert_support.h>
#include "ia_css_types.h"
#include "gdc_device.h"
void
ia_css_get_properties(struct ia_css_properties *properties)
{
assert(properties);
/*
* MW: We don't want to store the coordinates
* full range in memory: Truncate
*/
properties->gdc_coord_one = gdc_get_unity(GDC0_ID) / HRT_GDC_COORD_SCALE;
properties->l1_base_is_index = true;
properties->vamem_type = IA_CSS_VAMEM_TYPE_2;
}
| linux-master | drivers/staging/media/atomisp/pci/sh_css_properties.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Support for atomisp driver sysfs interface
*
* Copyright (c) 2014 Intel Corporation. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License version
* 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*
*/
#include <linux/device.h>
#include <linux/err.h>
#include <linux/kernel.h>
#include "atomisp_compat.h"
#include "atomisp_internal.h"
#include "atomisp_ioctl.h"
#include "atomisp_drvfs.h"
#include "hmm/hmm.h"
#include "ia_css_debug.h"
/*
* _iunit_debug:
* dbglvl: iunit css driver trace level
* dbgopt: iunit debug option:
* bit 0: binary list
* bit 1: running binary
* bit 2: memory statistic
*/
struct _iunit_debug {
struct device_driver *drv;
struct atomisp_device *isp;
unsigned int dbglvl;
unsigned int dbgfun;
unsigned int dbgopt;
};
#define OPTION_BIN_LIST BIT(0)
#define OPTION_BIN_RUN BIT(1)
#define OPTION_VALID (OPTION_BIN_LIST \
| OPTION_BIN_RUN)
static struct _iunit_debug iunit_debug = {
.dbglvl = 0,
.dbgopt = OPTION_BIN_LIST,
};
static inline int iunit_dump_dbgopt(struct atomisp_device *isp,
unsigned int opt)
{
int ret = 0;
if (opt & OPTION_VALID) {
if (opt & OPTION_BIN_LIST) {
ret = atomisp_css_dump_blob_infor(isp);
if (ret) {
dev_err(isp->dev, "%s dump blob infor err[ret:%d]\n",
__func__, ret);
goto opt_err;
}
}
if (opt & OPTION_BIN_RUN) {
if (isp->asd.streaming) {
atomisp_css_dump_sp_raw_copy_linecount(true);
atomisp_css_debug_dump_isp_binary();
} else {
ret = -EPERM;
dev_err(isp->dev, "%s dump running bin err[ret:%d]\n",
__func__, ret);
goto opt_err;
}
}
} else {
ret = -EINVAL;
dev_err(isp->dev, "%s dump nothing[ret=%d]\n", __func__, ret);
}
opt_err:
return ret;
}
static ssize_t iunit_dbglvl_show(struct device_driver *drv, char *buf)
{
iunit_debug.dbglvl = dbg_level;
return sysfs_emit(buf, "dtrace level:%u\n", iunit_debug.dbglvl);
}
static ssize_t iunit_dbglvl_store(struct device_driver *drv, const char *buf,
size_t size)
{
if (kstrtouint(buf, 10, &iunit_debug.dbglvl)
|| iunit_debug.dbglvl < 1
|| iunit_debug.dbglvl > 9) {
return -ERANGE;
}
ia_css_debug_set_dtrace_level(iunit_debug.dbglvl);
return size;
}
static ssize_t iunit_dbgfun_show(struct device_driver *drv, char *buf)
{
iunit_debug.dbgfun = atomisp_get_css_dbgfunc();
return sysfs_emit(buf, "dbgfun opt:%u\n", iunit_debug.dbgfun);
}
static ssize_t iunit_dbgfun_store(struct device_driver *drv, const char *buf,
size_t size)
{
unsigned int opt;
int ret;
ret = kstrtouint(buf, 10, &opt);
if (ret)
return ret;
ret = atomisp_set_css_dbgfunc(iunit_debug.isp, opt);
if (ret)
return ret;
iunit_debug.dbgfun = opt;
return size;
}
static ssize_t iunit_dbgopt_show(struct device_driver *drv, char *buf)
{
return sysfs_emit(buf, "option:0x%x\n", iunit_debug.dbgopt);
}
static ssize_t iunit_dbgopt_store(struct device_driver *drv, const char *buf,
size_t size)
{
unsigned int opt;
int ret;
ret = kstrtouint(buf, 10, &opt);
if (ret)
return ret;
iunit_debug.dbgopt = opt;
ret = iunit_dump_dbgopt(iunit_debug.isp, iunit_debug.dbgopt);
if (ret)
return ret;
return size;
}
static const struct driver_attribute iunit_drvfs_attrs[] = {
__ATTR(dbglvl, 0644, iunit_dbglvl_show, iunit_dbglvl_store),
__ATTR(dbgfun, 0644, iunit_dbgfun_show, iunit_dbgfun_store),
__ATTR(dbgopt, 0644, iunit_dbgopt_show, iunit_dbgopt_store),
};
static int iunit_drvfs_create_files(struct device_driver *drv)
{
int i, ret = 0;
for (i = 0; i < ARRAY_SIZE(iunit_drvfs_attrs); i++)
ret |= driver_create_file(drv, &iunit_drvfs_attrs[i]);
return ret;
}
static void iunit_drvfs_remove_files(struct device_driver *drv)
{
int i;
for (i = 0; i < ARRAY_SIZE(iunit_drvfs_attrs); i++)
driver_remove_file(drv, &iunit_drvfs_attrs[i]);
}
int atomisp_drvfs_init(struct atomisp_device *isp)
{
struct device_driver *drv = isp->dev->driver;
int ret;
iunit_debug.isp = isp;
iunit_debug.drv = drv;
ret = iunit_drvfs_create_files(iunit_debug.drv);
if (ret) {
dev_err(isp->dev, "drvfs_create_files error: %d\n", ret);
iunit_drvfs_remove_files(iunit_debug.drv);
}
return ret;
}
void atomisp_drvfs_exit(void)
{
iunit_drvfs_remove_files(iunit_debug.drv);
}
| linux-master | drivers/staging/media/atomisp/pci/atomisp_drvfs.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Support for Medifield PNW Camera Imaging ISP subsystem.
*
* Copyright (c) 2010 Intel Corporation. All Rights Reserved.
*
* Copyright (c) 2010 Silicon Hive www.siliconhive.com.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License version
* 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*
*/
#include <linux/delay.h>
#include <linux/pci.h>
#include <media/v4l2-ioctl.h>
#include <media/v4l2-event.h>
#include "atomisp_cmd.h"
#include "atomisp_common.h"
#include "atomisp_fops.h"
#include "atomisp_internal.h"
#include "atomisp_ioctl.h"
#include "atomisp-regs.h"
#include "atomisp_compat.h"
#include "sh_css_hrt.h"
#include "gp_device.h"
#include "device_access.h"
#include "irq.h"
static const char *DRIVER = "atomisp"; /* max size 15 */
static const char *CARD = "ATOM ISP"; /* max size 31 */
/*
* FIXME: ISP should not know beforehand all CIDs supported by sensor.
* Instead, it needs to propagate to sensor unkonwn CIDs.
*/
static struct v4l2_queryctrl ci_v4l2_controls[] = {
{
.id = V4L2_CID_AUTO_WHITE_BALANCE,
.type = V4L2_CTRL_TYPE_BOOLEAN,
.name = "Automatic White Balance",
.minimum = 0,
.maximum = 1,
.step = 1,
.default_value = 0,
},
{
.id = V4L2_CID_RED_BALANCE,
.type = V4L2_CTRL_TYPE_INTEGER,
.name = "Red Balance",
.minimum = 0x00,
.maximum = 0xff,
.step = 1,
.default_value = 0x00,
},
{
.id = V4L2_CID_BLUE_BALANCE,
.type = V4L2_CTRL_TYPE_INTEGER,
.name = "Blue Balance",
.minimum = 0x00,
.maximum = 0xff,
.step = 1,
.default_value = 0x00,
},
{
.id = V4L2_CID_GAMMA,
.type = V4L2_CTRL_TYPE_INTEGER,
.name = "Gamma",
.minimum = 0x00,
.maximum = 0xff,
.step = 1,
.default_value = 0x00,
},
{
.id = V4L2_CID_POWER_LINE_FREQUENCY,
.type = V4L2_CTRL_TYPE_MENU,
.name = "Light frequency filter",
.minimum = 1,
.maximum = 2,
.step = 1,
.default_value = 1,
},
{
.id = V4L2_CID_COLORFX,
.type = V4L2_CTRL_TYPE_INTEGER,
.name = "Image Color Effect",
.minimum = 0,
.maximum = 9,
.step = 1,
.default_value = 0,
},
{
.id = V4L2_CID_COLORFX_CBCR,
.type = V4L2_CTRL_TYPE_INTEGER,
.name = "Image Color Effect CbCr",
.minimum = 0,
.maximum = 0xffff,
.step = 1,
.default_value = 0,
},
{
.id = V4L2_CID_ATOMISP_BAD_PIXEL_DETECTION,
.type = V4L2_CTRL_TYPE_INTEGER,
.name = "Bad Pixel Correction",
.minimum = 0,
.maximum = 1,
.step = 1,
.default_value = 0,
},
{
.id = V4L2_CID_ATOMISP_POSTPROCESS_GDC_CAC,
.type = V4L2_CTRL_TYPE_INTEGER,
.name = "GDC/CAC",
.minimum = 0,
.maximum = 1,
.step = 1,
.default_value = 0,
},
{
.id = V4L2_CID_ATOMISP_VIDEO_STABLIZATION,
.type = V4L2_CTRL_TYPE_INTEGER,
.name = "Video Stablization",
.minimum = 0,
.maximum = 1,
.step = 1,
.default_value = 0,
},
{
.id = V4L2_CID_ATOMISP_FIXED_PATTERN_NR,
.type = V4L2_CTRL_TYPE_INTEGER,
.name = "Fixed Pattern Noise Reduction",
.minimum = 0,
.maximum = 1,
.step = 1,
.default_value = 0,
},
{
.id = V4L2_CID_ATOMISP_FALSE_COLOR_CORRECTION,
.type = V4L2_CTRL_TYPE_INTEGER,
.name = "False Color Correction",
.minimum = 0,
.maximum = 1,
.step = 1,
.default_value = 0,
},
{
.id = V4L2_CID_REQUEST_FLASH,
.type = V4L2_CTRL_TYPE_INTEGER,
.name = "Request flash frames",
.minimum = 0,
.maximum = 10,
.step = 1,
.default_value = 1,
},
{
.id = V4L2_CID_ATOMISP_LOW_LIGHT,
.type = V4L2_CTRL_TYPE_BOOLEAN,
.name = "Low light mode",
.minimum = 0,
.maximum = 1,
.step = 1,
.default_value = 1,
},
{
.id = V4L2_CID_2A_STATUS,
.type = V4L2_CTRL_TYPE_BITMASK,
.name = "AE and AWB status",
.minimum = 0,
.maximum = V4L2_2A_STATUS_AE_READY | V4L2_2A_STATUS_AWB_READY,
.step = 1,
.default_value = 0,
},
{
.id = V4L2_CID_EXPOSURE,
.type = V4L2_CTRL_TYPE_INTEGER,
.name = "exposure",
.minimum = -4,
.maximum = 4,
.step = 1,
.default_value = 0,
},
{
.id = V4L2_CID_EXPOSURE_ZONE_NUM,
.type = V4L2_CTRL_TYPE_INTEGER,
.name = "one-time exposure zone number",
.minimum = 0x0,
.maximum = 0xffff,
.step = 1,
.default_value = 0,
},
{
.id = V4L2_CID_EXPOSURE_AUTO_PRIORITY,
.type = V4L2_CTRL_TYPE_INTEGER,
.name = "Exposure auto priority",
.minimum = V4L2_EXPOSURE_AUTO,
.maximum = V4L2_EXPOSURE_APERTURE_PRIORITY,
.step = 1,
.default_value = V4L2_EXPOSURE_AUTO,
},
{
.id = V4L2_CID_SCENE_MODE,
.type = V4L2_CTRL_TYPE_INTEGER,
.name = "scene mode",
.minimum = 0,
.maximum = 13,
.step = 1,
.default_value = 0,
},
{
.id = V4L2_CID_ISO_SENSITIVITY,
.type = V4L2_CTRL_TYPE_INTEGER,
.name = "iso",
.minimum = -4,
.maximum = 4,
.step = 1,
.default_value = 0,
},
{
.id = V4L2_CID_ISO_SENSITIVITY_AUTO,
.type = V4L2_CTRL_TYPE_INTEGER,
.name = "iso mode",
.minimum = V4L2_ISO_SENSITIVITY_MANUAL,
.maximum = V4L2_ISO_SENSITIVITY_AUTO,
.step = 1,
.default_value = V4L2_ISO_SENSITIVITY_AUTO,
},
{
.id = V4L2_CID_AUTO_N_PRESET_WHITE_BALANCE,
.type = V4L2_CTRL_TYPE_INTEGER,
.name = "white balance",
.minimum = 0,
.maximum = 9,
.step = 1,
.default_value = 0,
},
{
.id = V4L2_CID_EXPOSURE_METERING,
.type = V4L2_CTRL_TYPE_MENU,
.name = "metering",
.minimum = 0,
.maximum = 3,
.step = 1,
.default_value = 1,
},
{
.id = V4L2_CID_3A_LOCK,
.type = V4L2_CTRL_TYPE_BITMASK,
.name = "3a lock",
.minimum = 0,
.maximum = V4L2_LOCK_EXPOSURE | V4L2_LOCK_WHITE_BALANCE
| V4L2_LOCK_FOCUS,
.step = 1,
.default_value = 0,
},
{
.id = V4L2_CID_TEST_PATTERN,
.type = V4L2_CTRL_TYPE_INTEGER,
.name = "Test Pattern",
.minimum = 0,
.maximum = 0xffff,
.step = 1,
.default_value = 0,
},
{
.id = V4L2_CID_TEST_PATTERN_COLOR_R,
.type = V4L2_CTRL_TYPE_INTEGER,
.name = "Test Pattern Solid Color R",
.minimum = INT_MIN,
.maximum = INT_MAX,
.step = 1,
.default_value = 0,
},
{
.id = V4L2_CID_TEST_PATTERN_COLOR_GR,
.type = V4L2_CTRL_TYPE_INTEGER,
.name = "Test Pattern Solid Color GR",
.minimum = INT_MIN,
.maximum = INT_MAX,
.step = 1,
.default_value = 0,
},
{
.id = V4L2_CID_TEST_PATTERN_COLOR_GB,
.type = V4L2_CTRL_TYPE_INTEGER,
.name = "Test Pattern Solid Color GB",
.minimum = INT_MIN,
.maximum = INT_MAX,
.step = 1,
.default_value = 0,
},
{
.id = V4L2_CID_TEST_PATTERN_COLOR_B,
.type = V4L2_CTRL_TYPE_INTEGER,
.name = "Test Pattern Solid Color B",
.minimum = INT_MIN,
.maximum = INT_MAX,
.step = 1,
.default_value = 0,
},
};
static const u32 ctrls_num = ARRAY_SIZE(ci_v4l2_controls);
/*
* supported V4L2 fmts and resolutions
*/
const struct atomisp_format_bridge atomisp_output_fmts[] = {
{
.pixelformat = V4L2_PIX_FMT_YUV420,
.depth = 12,
.mbus_code = V4L2_MBUS_FMT_CUSTOM_YUV420,
.sh_fmt = IA_CSS_FRAME_FORMAT_YUV420,
.description = "YUV420, planar",
.planar = true
}, {
.pixelformat = V4L2_PIX_FMT_YVU420,
.depth = 12,
.mbus_code = V4L2_MBUS_FMT_CUSTOM_YVU420,
.sh_fmt = IA_CSS_FRAME_FORMAT_YV12,
.description = "YVU420, planar",
.planar = true
}, {
.pixelformat = V4L2_PIX_FMT_YUV422P,
.depth = 16,
.mbus_code = V4L2_MBUS_FMT_CUSTOM_YUV422P,
.sh_fmt = IA_CSS_FRAME_FORMAT_YUV422,
.description = "YUV422, planar",
.planar = true
}, {
.pixelformat = V4L2_PIX_FMT_YUV444,
.depth = 24,
.mbus_code = V4L2_MBUS_FMT_CUSTOM_YUV444,
.sh_fmt = IA_CSS_FRAME_FORMAT_YUV444,
.description = "YUV444"
}, {
.pixelformat = V4L2_PIX_FMT_NV12,
.depth = 12,
.mbus_code = V4L2_MBUS_FMT_CUSTOM_NV12,
.sh_fmt = IA_CSS_FRAME_FORMAT_NV12,
.description = "NV12, Y-plane, CbCr interleaved",
.planar = true
}, {
.pixelformat = V4L2_PIX_FMT_NV21,
.depth = 12,
.mbus_code = V4L2_MBUS_FMT_CUSTOM_NV21,
.sh_fmt = IA_CSS_FRAME_FORMAT_NV21,
.description = "NV21, Y-plane, CbCr interleaved",
.planar = true
}, {
.pixelformat = V4L2_PIX_FMT_NV16,
.depth = 16,
.mbus_code = V4L2_MBUS_FMT_CUSTOM_NV16,
.sh_fmt = IA_CSS_FRAME_FORMAT_NV16,
.description = "NV16, Y-plane, CbCr interleaved",
.planar = true
}, {
.pixelformat = V4L2_PIX_FMT_YUYV,
.depth = 16,
.mbus_code = V4L2_MBUS_FMT_CUSTOM_YUYV,
.sh_fmt = IA_CSS_FRAME_FORMAT_YUYV,
.description = "YUYV, interleaved"
}, {
.pixelformat = V4L2_PIX_FMT_UYVY,
.depth = 16,
.mbus_code = MEDIA_BUS_FMT_UYVY8_1X16,
.sh_fmt = IA_CSS_FRAME_FORMAT_UYVY,
.description = "UYVY, interleaved"
}, { /* This one is for parallel sensors! DO NOT USE! */
.pixelformat = V4L2_PIX_FMT_UYVY,
.depth = 16,
.mbus_code = MEDIA_BUS_FMT_UYVY8_2X8,
.sh_fmt = IA_CSS_FRAME_FORMAT_UYVY,
.description = "UYVY, interleaved"
}, {
.pixelformat = V4L2_PIX_FMT_SBGGR16,
.depth = 16,
.mbus_code = V4L2_MBUS_FMT_CUSTOM_SBGGR16,
.sh_fmt = IA_CSS_FRAME_FORMAT_RAW,
.description = "Bayer 16"
}, {
.pixelformat = V4L2_PIX_FMT_SBGGR8,
.depth = 8,
.mbus_code = MEDIA_BUS_FMT_SBGGR8_1X8,
.sh_fmt = IA_CSS_FRAME_FORMAT_RAW,
.description = "Bayer 8"
}, {
.pixelformat = V4L2_PIX_FMT_SGBRG8,
.depth = 8,
.mbus_code = MEDIA_BUS_FMT_SGBRG8_1X8,
.sh_fmt = IA_CSS_FRAME_FORMAT_RAW,
.description = "Bayer 8"
}, {
.pixelformat = V4L2_PIX_FMT_SGRBG8,
.depth = 8,
.mbus_code = MEDIA_BUS_FMT_SGRBG8_1X8,
.sh_fmt = IA_CSS_FRAME_FORMAT_RAW,
.description = "Bayer 8"
}, {
.pixelformat = V4L2_PIX_FMT_SRGGB8,
.depth = 8,
.mbus_code = MEDIA_BUS_FMT_SRGGB8_1X8,
.sh_fmt = IA_CSS_FRAME_FORMAT_RAW,
.description = "Bayer 8"
}, {
.pixelformat = V4L2_PIX_FMT_SBGGR10,
.depth = 16,
.mbus_code = MEDIA_BUS_FMT_SBGGR10_1X10,
.sh_fmt = IA_CSS_FRAME_FORMAT_RAW,
.description = "Bayer 10"
}, {
.pixelformat = V4L2_PIX_FMT_SGBRG10,
.depth = 16,
.mbus_code = MEDIA_BUS_FMT_SGBRG10_1X10,
.sh_fmt = IA_CSS_FRAME_FORMAT_RAW,
.description = "Bayer 10"
}, {
.pixelformat = V4L2_PIX_FMT_SGRBG10,
.depth = 16,
.mbus_code = MEDIA_BUS_FMT_SGRBG10_1X10,
.sh_fmt = IA_CSS_FRAME_FORMAT_RAW,
.description = "Bayer 10"
}, {
.pixelformat = V4L2_PIX_FMT_SRGGB10,
.depth = 16,
.mbus_code = MEDIA_BUS_FMT_SRGGB10_1X10,
.sh_fmt = IA_CSS_FRAME_FORMAT_RAW,
.description = "Bayer 10"
}, {
.pixelformat = V4L2_PIX_FMT_SBGGR12,
.depth = 16,
.mbus_code = MEDIA_BUS_FMT_SBGGR12_1X12,
.sh_fmt = IA_CSS_FRAME_FORMAT_RAW,
.description = "Bayer 12"
}, {
.pixelformat = V4L2_PIX_FMT_SGBRG12,
.depth = 16,
.mbus_code = MEDIA_BUS_FMT_SGBRG12_1X12,
.sh_fmt = IA_CSS_FRAME_FORMAT_RAW,
.description = "Bayer 12"
}, {
.pixelformat = V4L2_PIX_FMT_SGRBG12,
.depth = 16,
.mbus_code = MEDIA_BUS_FMT_SGRBG12_1X12,
.sh_fmt = IA_CSS_FRAME_FORMAT_RAW,
.description = "Bayer 12"
}, {
.pixelformat = V4L2_PIX_FMT_SRGGB12,
.depth = 16,
.mbus_code = MEDIA_BUS_FMT_SRGGB12_1X12,
.sh_fmt = IA_CSS_FRAME_FORMAT_RAW,
.description = "Bayer 12"
}, {
.pixelformat = V4L2_PIX_FMT_RGB32,
.depth = 32,
.mbus_code = V4L2_MBUS_FMT_CUSTOM_RGB32,
.sh_fmt = IA_CSS_FRAME_FORMAT_RGBA888,
.description = "32 RGB 8-8-8-8"
}, {
.pixelformat = V4L2_PIX_FMT_RGB565,
.depth = 16,
.mbus_code = MEDIA_BUS_FMT_BGR565_2X8_LE,
.sh_fmt = IA_CSS_FRAME_FORMAT_RGB565,
.description = "16 RGB 5-6-5"
#if 0
}, {
.pixelformat = V4L2_PIX_FMT_JPEG,
.depth = 8,
.mbus_code = MEDIA_BUS_FMT_JPEG_1X8,
.sh_fmt = IA_CSS_FRAME_FORMAT_BINARY_8,
.description = "JPEG"
}, {
/* This is a custom format being used by M10MO to send the RAW data */
.pixelformat = V4L2_PIX_FMT_CUSTOM_M10MO_RAW,
.depth = 8,
.mbus_code = V4L2_MBUS_FMT_CUSTOM_M10MO_RAW,
.sh_fmt = IA_CSS_FRAME_FORMAT_BINARY_8,
.description = "Custom RAW for M10MO"
#endif
},
};
const struct atomisp_format_bridge *
atomisp_get_format_bridge(unsigned int pixelformat)
{
unsigned int i;
for (i = 0; i < ARRAY_SIZE(atomisp_output_fmts); i++) {
if (atomisp_output_fmts[i].pixelformat == pixelformat)
return &atomisp_output_fmts[i];
}
return NULL;
}
const struct atomisp_format_bridge *
atomisp_get_format_bridge_from_mbus(u32 mbus_code)
{
unsigned int i;
for (i = 0; i < ARRAY_SIZE(atomisp_output_fmts); i++) {
if (mbus_code == atomisp_output_fmts[i].mbus_code)
return &atomisp_output_fmts[i];
}
return NULL;
}
int atomisp_pipe_check(struct atomisp_video_pipe *pipe, bool settings_change)
{
lockdep_assert_held(&pipe->isp->mutex);
if (pipe->isp->isp_fatal_error)
return -EIO;
if (settings_change && vb2_is_busy(&pipe->vb_queue)) {
dev_err(pipe->isp->dev, "Set fmt/input IOCTL while streaming\n");
return -EBUSY;
}
return 0;
}
/*
* v4l2 ioctls
* return ISP capabilities
*/
static int atomisp_querycap(struct file *file, void *fh,
struct v4l2_capability *cap)
{
struct video_device *vdev = video_devdata(file);
struct atomisp_device *isp = video_get_drvdata(vdev);
strscpy(cap->driver, DRIVER, sizeof(cap->driver));
strscpy(cap->card, CARD, sizeof(cap->card));
snprintf(cap->bus_info, sizeof(cap->bus_info), "PCI:%s", dev_name(isp->dev));
return 0;
}
/*
* enum input are used to check primary/secondary camera
*/
static int atomisp_enum_input(struct file *file, void *fh,
struct v4l2_input *input)
{
struct video_device *vdev = video_devdata(file);
struct atomisp_device *isp = video_get_drvdata(vdev);
int index = input->index;
struct v4l2_subdev *motor;
if (index >= isp->input_cnt)
return -EINVAL;
if (!isp->inputs[index].camera)
return -EINVAL;
memset(input, 0, sizeof(struct v4l2_input));
strscpy(input->name, isp->inputs[index].camera->name,
sizeof(input->name));
/*
* HACK: append actuator's name to sensor's
* As currently userspace can't talk directly to subdev nodes, this
* ioctl is the only way to enum inputs + possible external actuators
* for 3A tuning purpose.
*/
if (!IS_ISP2401)
motor = isp->inputs[index].motor;
else
motor = isp->motor;
if (motor && strlen(motor->name) > 0) {
const int cur_len = strlen(input->name);
const int max_size = sizeof(input->name) - cur_len - 1;
if (max_size > 1) {
input->name[cur_len] = '+';
strscpy(&input->name[cur_len + 1],
motor->name, max_size);
}
}
input->type = V4L2_INPUT_TYPE_CAMERA;
input->index = index;
input->reserved[0] = isp->inputs[index].type;
input->reserved[1] = isp->inputs[index].port;
return 0;
}
/*
* get input are used to get current primary/secondary camera
*/
static int atomisp_g_input(struct file *file, void *fh, unsigned int *input)
{
struct video_device *vdev = video_devdata(file);
struct atomisp_sub_device *asd = atomisp_to_video_pipe(vdev)->asd;
*input = asd->input_curr;
return 0;
}
static int atomisp_s_fmt_cap(struct file *file, void *fh,
struct v4l2_format *f)
{
struct video_device *vdev = video_devdata(file);
return atomisp_set_fmt(vdev, f);
}
/*
* set input are used to set current primary/secondary camera
*/
static int atomisp_s_input(struct file *file, void *fh, unsigned int input)
{
struct video_device *vdev = video_devdata(file);
struct atomisp_device *isp = video_get_drvdata(vdev);
struct atomisp_video_pipe *pipe = atomisp_to_video_pipe(vdev);
struct atomisp_sub_device *asd = pipe->asd;
struct v4l2_subdev *camera = NULL;
struct v4l2_subdev *motor;
int ret;
ret = atomisp_pipe_check(pipe, true);
if (ret)
return ret;
if (input >= ATOM_ISP_MAX_INPUTS || input >= isp->input_cnt) {
dev_dbg(isp->dev, "input_cnt: %d\n", isp->input_cnt);
return -EINVAL;
}
camera = isp->inputs[input].camera;
if (!camera) {
dev_err(isp->dev, "%s, no camera\n", __func__);
return -EINVAL;
}
/* power off the current owned sensor, as it is not used this time */
if (isp->inputs[asd->input_curr].asd == asd &&
asd->input_curr != input) {
ret = v4l2_subdev_call(isp->inputs[asd->input_curr].camera,
core, s_power, 0);
if (ret && ret != -ENOIOCTLCMD)
dev_warn(isp->dev,
"Failed to power-off sensor\n");
/* clear the asd field to show this camera is not used */
isp->inputs[asd->input_curr].asd = NULL;
}
/* powe on the new sensor */
ret = v4l2_subdev_call(isp->inputs[input].camera, core, s_power, 1);
if (ret && ret != -ENOIOCTLCMD) {
dev_err(isp->dev, "Failed to power-on sensor\n");
return ret;
}
/*
* Some sensor driver resets the run mode during power-on, thus force
* update the run mode to sensor after power-on.
*/
atomisp_update_run_mode(asd);
/* select operating sensor */
ret = v4l2_subdev_call(isp->inputs[input].camera, video, s_routing,
0, 0, 0);
if (ret && (ret != -ENOIOCTLCMD)) {
dev_err(isp->dev, "Failed to select sensor\n");
return ret;
}
if (!IS_ISP2401) {
motor = isp->inputs[input].motor;
} else {
motor = isp->motor;
if (motor)
ret = v4l2_subdev_call(motor, core, s_power, 1);
}
if (motor)
ret = v4l2_subdev_call(motor, core, init, 1);
asd->input_curr = input;
/* mark this camera is used by the current stream */
isp->inputs[input].asd = asd;
return 0;
}
/*
* With crop any framesize <= sensor-size can be made, give
* userspace a list of sizes to choice from.
*/
static int atomisp_enum_framesizes_crop_inner(struct atomisp_device *isp,
struct v4l2_frmsizeenum *fsize,
const struct v4l2_rect *active,
const struct v4l2_rect *native,
int *valid_sizes)
{
static const struct v4l2_frmsize_discrete frame_sizes[] = {
{ 1600, 1200 },
{ 1600, 1080 },
{ 1600, 900 },
{ 1440, 1080 },
{ 1280, 960 },
{ 1280, 720 },
{ 800, 600 },
{ 640, 480 },
};
u32 padding_w, padding_h;
int i;
for (i = 0; i < ARRAY_SIZE(frame_sizes); i++) {
atomisp_get_padding(isp, frame_sizes[i].width, frame_sizes[i].height,
&padding_w, &padding_h);
if ((frame_sizes[i].width + padding_w) > native->width ||
(frame_sizes[i].height + padding_h) > native->height)
continue;
/*
* Skip sizes where width and height are less then 2/3th of the
* sensor size to avoid sizes with a too small field of view.
*/
if (frame_sizes[i].width < (active->width * 2 / 3) &&
frame_sizes[i].height < (active->height * 2 / 3))
continue;
if (*valid_sizes == fsize->index) {
fsize->type = V4L2_FRMSIZE_TYPE_DISCRETE;
fsize->discrete = frame_sizes[i];
return 0;
}
(*valid_sizes)++;
}
return -EINVAL;
}
static int atomisp_enum_framesizes_crop(struct atomisp_device *isp,
struct v4l2_frmsizeenum *fsize)
{
struct atomisp_input_subdev *input = &isp->inputs[isp->asd.input_curr];
struct v4l2_rect active = input->active_rect;
struct v4l2_rect native = input->native_rect;
int ret, valid_sizes = 0;
ret = atomisp_enum_framesizes_crop_inner(isp, fsize, &active, &native, &valid_sizes);
if (ret == 0)
return 0;
if (!input->binning_support)
return -EINVAL;
active.width /= 2;
active.height /= 2;
native.width /= 2;
native.height /= 2;
return atomisp_enum_framesizes_crop_inner(isp, fsize, &active, &native, &valid_sizes);
}
static int atomisp_enum_framesizes(struct file *file, void *priv,
struct v4l2_frmsizeenum *fsize)
{
struct video_device *vdev = video_devdata(file);
struct atomisp_device *isp = video_get_drvdata(vdev);
struct atomisp_sub_device *asd = atomisp_to_video_pipe(vdev)->asd;
struct atomisp_input_subdev *input = &isp->inputs[asd->input_curr];
struct v4l2_subdev_frame_size_enum fse = {
.index = fsize->index,
.which = V4L2_SUBDEV_FORMAT_ACTIVE,
.code = input->code,
};
int ret;
if (input->crop_support)
return atomisp_enum_framesizes_crop(isp, fsize);
ret = v4l2_subdev_call(input->camera, pad, enum_frame_size, NULL, &fse);
if (ret)
return ret;
fsize->type = V4L2_FRMSIZE_TYPE_DISCRETE;
fsize->discrete.width = fse.max_width - pad_w;
fsize->discrete.height = fse.max_height - pad_h;
return 0;
}
static int atomisp_enum_frameintervals(struct file *file, void *priv,
struct v4l2_frmivalenum *fival)
{
struct video_device *vdev = video_devdata(file);
struct atomisp_device *isp = video_get_drvdata(vdev);
struct atomisp_sub_device *asd = atomisp_to_video_pipe(vdev)->asd;
struct v4l2_subdev_frame_interval_enum fie = {
.code = atomisp_in_fmt_conv[0].code,
.index = fival->index,
.width = fival->width,
.height = fival->height,
.which = V4L2_SUBDEV_FORMAT_ACTIVE,
};
int ret;
ret = v4l2_subdev_call(isp->inputs[asd->input_curr].camera,
pad, enum_frame_interval, NULL,
&fie);
if (ret)
return ret;
fival->type = V4L2_FRMIVAL_TYPE_DISCRETE;
fival->discrete = fie.interval;
return ret;
}
static int atomisp_enum_fmt_cap(struct file *file, void *fh,
struct v4l2_fmtdesc *f)
{
struct video_device *vdev = video_devdata(file);
struct atomisp_device *isp = video_get_drvdata(vdev);
struct atomisp_sub_device *asd = atomisp_to_video_pipe(vdev)->asd;
struct v4l2_subdev_mbus_code_enum code = {
.which = V4L2_SUBDEV_FORMAT_ACTIVE,
};
const struct atomisp_format_bridge *format;
struct v4l2_subdev *camera;
unsigned int i, fi = 0;
int rval;
camera = isp->inputs[asd->input_curr].camera;
if(!camera) {
dev_err(isp->dev, "%s(): camera is NULL, device is %s\n",
__func__, vdev->name);
return -EINVAL;
}
rval = v4l2_subdev_call(camera, pad, enum_mbus_code, NULL, &code);
if (rval == -ENOIOCTLCMD) {
dev_warn(isp->dev,
"enum_mbus_code pad op not supported by %s. Please fix your sensor driver!\n",
camera->name);
}
if (rval)
return rval;
for (i = 0; i < ARRAY_SIZE(atomisp_output_fmts); i++) {
format = &atomisp_output_fmts[i];
/*
* Is the atomisp-supported format is valid for the
* sensor (configuration)? If not, skip it.
*
* FIXME: fix the pipeline to allow sensor format too.
*/
if (format->sh_fmt == IA_CSS_FRAME_FORMAT_RAW)
continue;
/* Found a match. Now let's pick f->index'th one. */
if (fi < f->index) {
fi++;
continue;
}
strscpy(f->description, format->description,
sizeof(f->description));
f->pixelformat = format->pixelformat;
return 0;
}
return -EINVAL;
}
/* This function looks up the closest available resolution. */
static int atomisp_try_fmt_cap(struct file *file, void *fh,
struct v4l2_format *f)
{
struct video_device *vdev = video_devdata(file);
struct atomisp_device *isp = video_get_drvdata(vdev);
return atomisp_try_fmt(isp, &f->fmt.pix, NULL, NULL);
}
static int atomisp_g_fmt_cap(struct file *file, void *fh,
struct v4l2_format *f)
{
struct video_device *vdev = video_devdata(file);
struct atomisp_video_pipe *pipe;
pipe = atomisp_to_video_pipe(vdev);
f->fmt.pix = pipe->pix;
/* If s_fmt was issued, just return whatever is was previouly set */
if (f->fmt.pix.sizeimage)
return 0;
f->fmt.pix.pixelformat = V4L2_PIX_FMT_YUV420;
f->fmt.pix.width = 10000;
f->fmt.pix.height = 10000;
return atomisp_try_fmt_cap(file, fh, f);
}
int atomisp_alloc_css_stat_bufs(struct atomisp_sub_device *asd,
uint16_t stream_id)
{
struct atomisp_device *isp = asd->isp;
struct atomisp_s3a_buf *s3a_buf = NULL, *_s3a_buf;
struct atomisp_dis_buf *dis_buf = NULL, *_dis_buf;
struct atomisp_metadata_buf *md_buf = NULL, *_md_buf;
int count;
struct ia_css_dvs_grid_info *dvs_grid_info =
atomisp_css_get_dvs_grid_info(&asd->params.curr_grid_info);
unsigned int i;
if (list_empty(&asd->s3a_stats) &&
asd->params.curr_grid_info.s3a_grid.enable) {
count = ATOMISP_CSS_Q_DEPTH +
ATOMISP_S3A_BUF_QUEUE_DEPTH_FOR_HAL;
dev_dbg(isp->dev, "allocating %d 3a buffers\n", count);
while (count--) {
s3a_buf = kzalloc(sizeof(struct atomisp_s3a_buf), GFP_KERNEL);
if (!s3a_buf)
goto error;
if (atomisp_css_allocate_stat_buffers(
asd, stream_id, s3a_buf, NULL, NULL)) {
kfree(s3a_buf);
goto error;
}
list_add_tail(&s3a_buf->list, &asd->s3a_stats);
}
}
if (list_empty(&asd->dis_stats) && dvs_grid_info &&
dvs_grid_info->enable) {
count = ATOMISP_CSS_Q_DEPTH + 1;
dev_dbg(isp->dev, "allocating %d dis buffers\n", count);
while (count--) {
dis_buf = kzalloc(sizeof(struct atomisp_dis_buf), GFP_KERNEL);
if (!dis_buf)
goto error;
if (atomisp_css_allocate_stat_buffers(
asd, stream_id, NULL, dis_buf, NULL)) {
kfree(dis_buf);
goto error;
}
list_add_tail(&dis_buf->list, &asd->dis_stats);
}
}
for (i = 0; i < ATOMISP_METADATA_TYPE_NUM; i++) {
if (list_empty(&asd->metadata[i]) &&
list_empty(&asd->metadata_ready[i]) &&
list_empty(&asd->metadata_in_css[i])) {
count = ATOMISP_CSS_Q_DEPTH +
ATOMISP_METADATA_QUEUE_DEPTH_FOR_HAL;
dev_dbg(isp->dev, "allocating %d metadata buffers for type %d\n",
count, i);
while (count--) {
md_buf = kzalloc(sizeof(struct atomisp_metadata_buf),
GFP_KERNEL);
if (!md_buf)
goto error;
if (atomisp_css_allocate_stat_buffers(
asd, stream_id, NULL, NULL, md_buf)) {
kfree(md_buf);
goto error;
}
list_add_tail(&md_buf->list, &asd->metadata[i]);
}
}
}
return 0;
error:
dev_err(isp->dev, "failed to allocate statistics buffers\n");
list_for_each_entry_safe(dis_buf, _dis_buf, &asd->dis_stats, list) {
atomisp_css_free_dis_buffer(dis_buf);
list_del(&dis_buf->list);
kfree(dis_buf);
}
list_for_each_entry_safe(s3a_buf, _s3a_buf, &asd->s3a_stats, list) {
atomisp_css_free_3a_buffer(s3a_buf);
list_del(&s3a_buf->list);
kfree(s3a_buf);
}
for (i = 0; i < ATOMISP_METADATA_TYPE_NUM; i++) {
list_for_each_entry_safe(md_buf, _md_buf, &asd->metadata[i],
list) {
atomisp_css_free_metadata_buffer(md_buf);
list_del(&md_buf->list);
kfree(md_buf);
}
}
return -ENOMEM;
}
/*
* FIXME the abuse of buf->reserved2 in the qbuf and dqbuf wrappers comes from
* the original atomisp buffer handling and should be replaced with proper V4L2
* per frame parameters use.
*
* Once this is fixed these wrappers can be removed, replacing them with direct
* calls to vb2_ioctl_[d]qbuf().
*/
static int atomisp_qbuf_wrapper(struct file *file, void *fh, struct v4l2_buffer *buf)
{
struct video_device *vdev = video_devdata(file);
struct atomisp_device *isp = video_get_drvdata(vdev);
struct atomisp_video_pipe *pipe = atomisp_to_video_pipe(vdev);
if (buf->index >= vdev->queue->num_buffers)
return -EINVAL;
if (buf->reserved2 & ATOMISP_BUFFER_HAS_PER_FRAME_SETTING) {
/* this buffer will have a per-frame parameter */
pipe->frame_request_config_id[buf->index] = buf->reserved2 &
~ATOMISP_BUFFER_HAS_PER_FRAME_SETTING;
dev_dbg(isp->dev,
"This buffer requires per_frame setting which has isp_config_id %d\n",
pipe->frame_request_config_id[buf->index]);
} else {
pipe->frame_request_config_id[buf->index] = 0;
}
return vb2_ioctl_qbuf(file, fh, buf);
}
static int atomisp_dqbuf_wrapper(struct file *file, void *fh, struct v4l2_buffer *buf)
{
struct video_device *vdev = video_devdata(file);
struct atomisp_video_pipe *pipe = atomisp_to_video_pipe(vdev);
struct atomisp_sub_device *asd = pipe->asd;
struct atomisp_device *isp = video_get_drvdata(vdev);
struct ia_css_frame *frame;
struct vb2_buffer *vb;
int ret;
ret = vb2_ioctl_dqbuf(file, fh, buf);
if (ret)
return ret;
vb = pipe->vb_queue.bufs[buf->index];
frame = vb_to_frame(vb);
buf->reserved = asd->frame_status[buf->index];
/*
* Hack:
* Currently frame_status in the enum type which takes no more lower
* 8 bit.
* use bit[31:16] for exp_id as it is only in the range of 1~255
*/
buf->reserved &= 0x0000ffff;
if (!(buf->flags & V4L2_BUF_FLAG_ERROR))
buf->reserved |= frame->exp_id;
buf->reserved2 = pipe->frame_config_id[buf->index];
dev_dbg(isp->dev,
"dqbuf buffer %d (%s) with exp_id %d, isp_config_id %d\n",
buf->index, vdev->name, buf->reserved >> 16, buf->reserved2);
return 0;
}
/* Input system HW workaround */
/* Input system address translation corrupts burst during */
/* invalidate. SW workaround for this is to set burst length */
/* manually to 128 in case of 13MPx snapshot and to 1 otherwise. */
static void atomisp_dma_burst_len_cfg(struct atomisp_sub_device *asd)
{
struct v4l2_mbus_framefmt *sink;
sink = atomisp_subdev_get_ffmt(&asd->subdev, NULL,
V4L2_SUBDEV_FORMAT_ACTIVE,
ATOMISP_SUBDEV_PAD_SINK);
if (sink->width * sink->height >= 4096 * 3072)
atomisp_css2_hw_store_32(DMA_BURST_SIZE_REG, 0x7F);
else
atomisp_css2_hw_store_32(DMA_BURST_SIZE_REG, 0x00);
}
int atomisp_start_streaming(struct vb2_queue *vq, unsigned int count)
{
struct atomisp_video_pipe *pipe = vq_to_pipe(vq);
struct atomisp_sub_device *asd = pipe->asd;
struct atomisp_device *isp = asd->isp;
struct pci_dev *pdev = to_pci_dev(isp->dev);
unsigned long irqflags;
int ret;
dev_dbg(isp->dev, "Start stream\n");
mutex_lock(&isp->mutex);
ret = atomisp_pipe_check(pipe, false);
if (ret)
goto out_unlock;
/* Input system HW workaround */
atomisp_dma_burst_len_cfg(asd);
/* Invalidate caches. FIXME: should flush only necessary buffers */
wbinvd();
if (asd->params.css_update_params_needed) {
atomisp_apply_css_parameters(asd, &asd->params.css_param);
if (asd->params.css_param.update_flag.dz_config)
asd->params.config.dz_config = &asd->params.css_param.dz_config;
atomisp_css_update_isp_params(asd);
asd->params.css_update_params_needed = false;
memset(&asd->params.css_param.update_flag, 0,
sizeof(struct atomisp_parameters));
}
asd->params.dvs_6axis = NULL;
ret = atomisp_css_start(asd);
if (ret) {
atomisp_flush_video_pipe(pipe, VB2_BUF_STATE_QUEUED, true);
goto out_unlock;
}
spin_lock_irqsave(&isp->lock, irqflags);
asd->streaming = true;
spin_unlock_irqrestore(&isp->lock, irqflags);
atomic_set(&asd->sof_count, -1);
atomic_set(&asd->sequence, -1);
atomic_set(&asd->sequence_temp, -1);
asd->params.dis_proj_data_valid = false;
asd->latest_preview_exp_id = 0;
asd->postview_exp_id = 1;
asd->preview_exp_id = 1;
/* handle per_frame_setting parameter and buffers */
atomisp_handle_parameter_and_buffer(pipe);
atomisp_qbuffers_to_css(asd);
if (isp->flash) {
asd->params.num_flash_frames = 0;
asd->params.flash_state = ATOMISP_FLASH_IDLE;
atomisp_setup_flash(asd);
}
atomisp_css_irq_enable(isp, IA_CSS_IRQ_INFO_CSS_RECEIVER_SOF,
atomisp_css_valid_sof(isp));
atomisp_csi2_configure(asd);
if (atomisp_freq_scaling(isp, ATOMISP_DFS_MODE_AUTO, false) < 0)
dev_dbg(isp->dev, "DFS auto mode failed!\n");
/* Enable the CSI interface on ANN B0/K0 */
if (isp->media_dev.hw_revision >= ((ATOMISP_HW_REVISION_ISP2401 <<
ATOMISP_HW_REVISION_SHIFT) | ATOMISP_HW_STEPPING_B0)) {
pci_write_config_word(pdev, MRFLD_PCI_CSI_CONTROL,
isp->saved_regs.csi_control | MRFLD_PCI_CSI_CONTROL_CSI_READY);
}
/* stream on the sensor */
ret = v4l2_subdev_call(isp->inputs[asd->input_curr].camera,
video, s_stream, 1);
if (ret) {
dev_err(isp->dev, "Starting sensor stream failed: %d\n", ret);
spin_lock_irqsave(&isp->lock, irqflags);
asd->streaming = false;
spin_unlock_irqrestore(&isp->lock, irqflags);
ret = -EINVAL;
goto out_unlock;
}
out_unlock:
mutex_unlock(&isp->mutex);
return ret;
}
void atomisp_stop_streaming(struct vb2_queue *vq)
{
struct atomisp_video_pipe *pipe = vq_to_pipe(vq);
struct atomisp_sub_device *asd = pipe->asd;
struct atomisp_device *isp = asd->isp;
struct pci_dev *pdev = to_pci_dev(isp->dev);
unsigned long flags;
int ret;
dev_dbg(isp->dev, "Stop stream\n");
mutex_lock(&isp->mutex);
/*
* There is no guarantee that the buffers queued to / owned by the ISP
* will properly be returned to the queue when stopping. Set a flag to
* avoid new buffers getting queued and then wait for all the current
* buffers to finish.
*/
pipe->stopping = true;
mutex_unlock(&isp->mutex);
/* wait max 1 second */
ret = wait_event_timeout(pipe->vb_queue.done_wq,
atomisp_buffers_in_css(pipe) == 0, HZ);
mutex_lock(&isp->mutex);
pipe->stopping = false;
if (ret == 0)
dev_warn(isp->dev, "Warning timeout waiting for CSS to return buffers\n");
spin_lock_irqsave(&isp->lock, flags);
asd->streaming = false;
spin_unlock_irqrestore(&isp->lock, flags);
atomisp_clear_css_buffer_counters(asd);
atomisp_css_irq_enable(isp, IA_CSS_IRQ_INFO_CSS_RECEIVER_SOF, false);
atomisp_css_stop(asd, false);
atomisp_flush_video_pipe(pipe, VB2_BUF_STATE_ERROR, true);
atomisp_subdev_cleanup_pending_events(asd);
ret = v4l2_subdev_call(isp->inputs[asd->input_curr].camera,
video, s_stream, 0);
if (ret)
dev_warn(isp->dev, "Stopping sensor stream failed: %d\n", ret);
if (isp->flash) {
asd->params.num_flash_frames = 0;
asd->params.flash_state = ATOMISP_FLASH_IDLE;
}
/* Disable the CSI interface on ANN B0/K0 */
if (isp->media_dev.hw_revision >= ((ATOMISP_HW_REVISION_ISP2401 <<
ATOMISP_HW_REVISION_SHIFT) | ATOMISP_HW_STEPPING_B0)) {
pci_write_config_word(pdev, MRFLD_PCI_CSI_CONTROL,
isp->saved_regs.csi_control & ~MRFLD_PCI_CSI_CONTROL_CSI_READY);
}
if (atomisp_freq_scaling(isp, ATOMISP_DFS_MODE_LOW, false))
dev_warn(isp->dev, "DFS failed.\n");
/*
* ISP work around, need to reset ISP to allow next stream on to work.
* Streams have already been destroyed by atomisp_css_stop().
* Disable PUNIT/ISP acknowlede/handshake - SRSE=3 and then reset.
*/
pci_write_config_dword(pdev, PCI_I_CONTROL,
isp->saved_regs.i_control | MRFLD_PCI_I_CONTROL_SRSE_RESET_MASK);
atomisp_reset(isp);
/* Streams were destroyed by atomisp_css_stop(), recreate them. */
ret = atomisp_create_pipes_stream(&isp->asd);
if (ret)
dev_warn(isp->dev, "Recreating streams failed: %d\n", ret);
mutex_unlock(&isp->mutex);
}
/*
* To get the current value of a control.
* applications initialize the id field of a struct v4l2_control and
* call this ioctl with a pointer to this structure
*/
static int atomisp_g_ctrl(struct file *file, void *fh,
struct v4l2_control *control)
{
struct video_device *vdev = video_devdata(file);
struct atomisp_sub_device *asd = atomisp_to_video_pipe(vdev)->asd;
struct atomisp_device *isp = video_get_drvdata(vdev);
int i, ret = -EINVAL;
for (i = 0; i < ctrls_num; i++) {
if (ci_v4l2_controls[i].id == control->id) {
ret = 0;
break;
}
}
if (ret)
return ret;
switch (control->id) {
case V4L2_CID_IRIS_ABSOLUTE:
case V4L2_CID_EXPOSURE_ABSOLUTE:
case V4L2_CID_2A_STATUS:
case V4L2_CID_AUTO_N_PRESET_WHITE_BALANCE:
case V4L2_CID_EXPOSURE:
case V4L2_CID_EXPOSURE_AUTO:
case V4L2_CID_SCENE_MODE:
case V4L2_CID_ISO_SENSITIVITY:
case V4L2_CID_ISO_SENSITIVITY_AUTO:
case V4L2_CID_CONTRAST:
case V4L2_CID_SATURATION:
case V4L2_CID_SHARPNESS:
case V4L2_CID_3A_LOCK:
case V4L2_CID_EXPOSURE_ZONE_NUM:
case V4L2_CID_TEST_PATTERN:
case V4L2_CID_TEST_PATTERN_COLOR_R:
case V4L2_CID_TEST_PATTERN_COLOR_GR:
case V4L2_CID_TEST_PATTERN_COLOR_GB:
case V4L2_CID_TEST_PATTERN_COLOR_B:
return v4l2_g_ctrl(isp->inputs[asd->input_curr].camera->
ctrl_handler, control);
case V4L2_CID_COLORFX:
ret = atomisp_color_effect(asd, 0, &control->value);
break;
case V4L2_CID_ATOMISP_BAD_PIXEL_DETECTION:
ret = atomisp_bad_pixel(asd, 0, &control->value);
break;
case V4L2_CID_ATOMISP_POSTPROCESS_GDC_CAC:
ret = atomisp_gdc_cac(asd, 0, &control->value);
break;
case V4L2_CID_ATOMISP_VIDEO_STABLIZATION:
ret = atomisp_video_stable(asd, 0, &control->value);
break;
case V4L2_CID_ATOMISP_FIXED_PATTERN_NR:
ret = atomisp_fixed_pattern(asd, 0, &control->value);
break;
case V4L2_CID_ATOMISP_FALSE_COLOR_CORRECTION:
ret = atomisp_false_color(asd, 0, &control->value);
break;
case V4L2_CID_ATOMISP_LOW_LIGHT:
ret = atomisp_low_light(asd, 0, &control->value);
break;
default:
ret = -EINVAL;
break;
}
return ret;
}
/*
* To change the value of a control.
* applications initialize the id and value fields of a struct v4l2_control
* and call this ioctl.
*/
static int atomisp_s_ctrl(struct file *file, void *fh,
struct v4l2_control *control)
{
struct video_device *vdev = video_devdata(file);
struct atomisp_sub_device *asd = atomisp_to_video_pipe(vdev)->asd;
struct atomisp_device *isp = video_get_drvdata(vdev);
int i, ret = -EINVAL;
for (i = 0; i < ctrls_num; i++) {
if (ci_v4l2_controls[i].id == control->id) {
ret = 0;
break;
}
}
if (ret)
return ret;
switch (control->id) {
case V4L2_CID_AUTO_N_PRESET_WHITE_BALANCE:
case V4L2_CID_EXPOSURE:
case V4L2_CID_EXPOSURE_AUTO:
case V4L2_CID_EXPOSURE_AUTO_PRIORITY:
case V4L2_CID_SCENE_MODE:
case V4L2_CID_ISO_SENSITIVITY:
case V4L2_CID_ISO_SENSITIVITY_AUTO:
case V4L2_CID_POWER_LINE_FREQUENCY:
case V4L2_CID_EXPOSURE_METERING:
case V4L2_CID_CONTRAST:
case V4L2_CID_SATURATION:
case V4L2_CID_SHARPNESS:
case V4L2_CID_3A_LOCK:
case V4L2_CID_COLORFX_CBCR:
case V4L2_CID_TEST_PATTERN:
case V4L2_CID_TEST_PATTERN_COLOR_R:
case V4L2_CID_TEST_PATTERN_COLOR_GR:
case V4L2_CID_TEST_PATTERN_COLOR_GB:
case V4L2_CID_TEST_PATTERN_COLOR_B:
return v4l2_s_ctrl(NULL,
isp->inputs[asd->input_curr].camera->
ctrl_handler, control);
case V4L2_CID_COLORFX:
ret = atomisp_color_effect(asd, 1, &control->value);
break;
case V4L2_CID_ATOMISP_BAD_PIXEL_DETECTION:
ret = atomisp_bad_pixel(asd, 1, &control->value);
break;
case V4L2_CID_ATOMISP_POSTPROCESS_GDC_CAC:
ret = atomisp_gdc_cac(asd, 1, &control->value);
break;
case V4L2_CID_ATOMISP_VIDEO_STABLIZATION:
ret = atomisp_video_stable(asd, 1, &control->value);
break;
case V4L2_CID_ATOMISP_FIXED_PATTERN_NR:
ret = atomisp_fixed_pattern(asd, 1, &control->value);
break;
case V4L2_CID_ATOMISP_FALSE_COLOR_CORRECTION:
ret = atomisp_false_color(asd, 1, &control->value);
break;
case V4L2_CID_REQUEST_FLASH:
ret = atomisp_flash_enable(asd, control->value);
break;
case V4L2_CID_ATOMISP_LOW_LIGHT:
ret = atomisp_low_light(asd, 1, &control->value);
break;
default:
ret = -EINVAL;
break;
}
return ret;
}
/*
* To query the attributes of a control.
* applications set the id field of a struct v4l2_queryctrl and call the
* this ioctl with a pointer to this structure. The driver fills
* the rest of the structure.
*/
static int atomisp_queryctl(struct file *file, void *fh,
struct v4l2_queryctrl *qc)
{
int i, ret = -EINVAL;
struct video_device *vdev = video_devdata(file);
struct atomisp_sub_device *asd = atomisp_to_video_pipe(vdev)->asd;
struct atomisp_device *isp = video_get_drvdata(vdev);
switch (qc->id) {
case V4L2_CID_FOCUS_ABSOLUTE:
case V4L2_CID_FOCUS_RELATIVE:
case V4L2_CID_FOCUS_STATUS:
if (!IS_ISP2401) {
return v4l2_queryctrl(isp->inputs[asd->input_curr].camera->
ctrl_handler, qc);
}
/* ISP2401 */
if (isp->motor)
return v4l2_queryctrl(isp->motor->ctrl_handler, qc);
else
return v4l2_queryctrl(isp->inputs[asd->input_curr].
camera->ctrl_handler, qc);
}
if (qc->id & V4L2_CTRL_FLAG_NEXT_CTRL)
return ret;
for (i = 0; i < ctrls_num; i++) {
if (ci_v4l2_controls[i].id == qc->id) {
memcpy(qc, &ci_v4l2_controls[i],
sizeof(struct v4l2_queryctrl));
qc->reserved[0] = 0;
ret = 0;
break;
}
}
if (ret != 0)
qc->flags = V4L2_CTRL_FLAG_DISABLED;
return ret;
}
static int atomisp_camera_g_ext_ctrls(struct file *file, void *fh,
struct v4l2_ext_controls *c)
{
struct video_device *vdev = video_devdata(file);
struct atomisp_sub_device *asd = atomisp_to_video_pipe(vdev)->asd;
struct atomisp_device *isp = video_get_drvdata(vdev);
struct v4l2_subdev *motor;
struct v4l2_control ctrl;
int i;
int ret = 0;
if (!IS_ISP2401)
motor = isp->inputs[asd->input_curr].motor;
else
motor = isp->motor;
for (i = 0; i < c->count; i++) {
ctrl.id = c->controls[i].id;
ctrl.value = c->controls[i].value;
switch (ctrl.id) {
case V4L2_CID_EXPOSURE_ABSOLUTE:
case V4L2_CID_EXPOSURE_AUTO:
case V4L2_CID_IRIS_ABSOLUTE:
case V4L2_CID_3A_LOCK:
case V4L2_CID_TEST_PATTERN:
case V4L2_CID_TEST_PATTERN_COLOR_R:
case V4L2_CID_TEST_PATTERN_COLOR_GR:
case V4L2_CID_TEST_PATTERN_COLOR_GB:
case V4L2_CID_TEST_PATTERN_COLOR_B:
/*
* Exposure related control will be handled by sensor
* driver
*/
ret =
v4l2_g_ctrl(isp->inputs[asd->input_curr].camera->
ctrl_handler, &ctrl);
break;
case V4L2_CID_FOCUS_ABSOLUTE:
case V4L2_CID_FOCUS_RELATIVE:
case V4L2_CID_FOCUS_STATUS:
case V4L2_CID_FOCUS_AUTO:
if (motor)
ret = v4l2_g_ctrl(motor->ctrl_handler, &ctrl);
break;
case V4L2_CID_FLASH_STATUS:
case V4L2_CID_FLASH_INTENSITY:
case V4L2_CID_FLASH_TORCH_INTENSITY:
case V4L2_CID_FLASH_INDICATOR_INTENSITY:
case V4L2_CID_FLASH_TIMEOUT:
case V4L2_CID_FLASH_STROBE:
case V4L2_CID_FLASH_MODE:
case V4L2_CID_FLASH_STATUS_REGISTER:
if (isp->flash)
ret =
v4l2_g_ctrl(isp->flash->ctrl_handler,
&ctrl);
break;
case V4L2_CID_ZOOM_ABSOLUTE:
ret = atomisp_digital_zoom(asd, 0, &ctrl.value);
break;
case V4L2_CID_G_SKIP_FRAMES:
ret = v4l2_subdev_call(
isp->inputs[asd->input_curr].camera,
sensor, g_skip_frames, (u32 *)&ctrl.value);
break;
default:
ret = -EINVAL;
}
if (ret) {
c->error_idx = i;
break;
}
c->controls[i].value = ctrl.value;
}
return ret;
}
/* This ioctl allows the application to get multiple controls by class */
static int atomisp_g_ext_ctrls(struct file *file, void *fh,
struct v4l2_ext_controls *c)
{
struct v4l2_control ctrl;
int i, ret = 0;
/*
* input_lock is not need for the Camera related IOCTLs
* The input_lock downgrade the FPS of 3A
*/
ret = atomisp_camera_g_ext_ctrls(file, fh, c);
if (ret != -EINVAL)
return ret;
for (i = 0; i < c->count; i++) {
ctrl.id = c->controls[i].id;
ctrl.value = c->controls[i].value;
ret = atomisp_g_ctrl(file, fh, &ctrl);
c->controls[i].value = ctrl.value;
if (ret) {
c->error_idx = i;
break;
}
}
return ret;
}
static int atomisp_camera_s_ext_ctrls(struct file *file, void *fh,
struct v4l2_ext_controls *c)
{
struct video_device *vdev = video_devdata(file);
struct atomisp_sub_device *asd = atomisp_to_video_pipe(vdev)->asd;
struct atomisp_device *isp = video_get_drvdata(vdev);
struct v4l2_subdev *motor;
struct v4l2_control ctrl;
int i;
int ret = 0;
if (!IS_ISP2401)
motor = isp->inputs[asd->input_curr].motor;
else
motor = isp->motor;
for (i = 0; i < c->count; i++) {
struct v4l2_ctrl *ctr;
ctrl.id = c->controls[i].id;
ctrl.value = c->controls[i].value;
switch (ctrl.id) {
case V4L2_CID_EXPOSURE_ABSOLUTE:
case V4L2_CID_EXPOSURE_AUTO:
case V4L2_CID_EXPOSURE_METERING:
case V4L2_CID_IRIS_ABSOLUTE:
case V4L2_CID_VCM_TIMING:
case V4L2_CID_VCM_SLEW:
case V4L2_CID_3A_LOCK:
case V4L2_CID_TEST_PATTERN:
case V4L2_CID_TEST_PATTERN_COLOR_R:
case V4L2_CID_TEST_PATTERN_COLOR_GR:
case V4L2_CID_TEST_PATTERN_COLOR_GB:
case V4L2_CID_TEST_PATTERN_COLOR_B:
ret = v4l2_s_ctrl(NULL,
isp->inputs[asd->input_curr].camera->
ctrl_handler, &ctrl);
break;
case V4L2_CID_FOCUS_ABSOLUTE:
case V4L2_CID_FOCUS_RELATIVE:
case V4L2_CID_FOCUS_STATUS:
case V4L2_CID_FOCUS_AUTO:
if (motor)
ret = v4l2_s_ctrl(NULL, motor->ctrl_handler,
&ctrl);
else
ret = v4l2_s_ctrl(NULL,
isp->inputs[asd->input_curr].
camera->ctrl_handler, &ctrl);
break;
case V4L2_CID_FLASH_STATUS:
case V4L2_CID_FLASH_INTENSITY:
case V4L2_CID_FLASH_TORCH_INTENSITY:
case V4L2_CID_FLASH_INDICATOR_INTENSITY:
case V4L2_CID_FLASH_TIMEOUT:
case V4L2_CID_FLASH_STROBE:
case V4L2_CID_FLASH_MODE:
case V4L2_CID_FLASH_STATUS_REGISTER:
if (isp->flash) {
ret =
v4l2_s_ctrl(NULL, isp->flash->ctrl_handler,
&ctrl);
/*
* When flash mode is changed we need to reset
* flash state
*/
if (ctrl.id == V4L2_CID_FLASH_MODE) {
asd->params.flash_state =
ATOMISP_FLASH_IDLE;
asd->params.num_flash_frames = 0;
}
}
break;
case V4L2_CID_ZOOM_ABSOLUTE:
ret = atomisp_digital_zoom(asd, 1, &ctrl.value);
break;
default:
ctr = v4l2_ctrl_find(&asd->ctrl_handler, ctrl.id);
if (ctr)
ret = v4l2_ctrl_s_ctrl(ctr, ctrl.value);
else
ret = -EINVAL;
}
if (ret) {
c->error_idx = i;
break;
}
c->controls[i].value = ctrl.value;
}
return ret;
}
/* This ioctl allows the application to set multiple controls by class */
static int atomisp_s_ext_ctrls(struct file *file, void *fh,
struct v4l2_ext_controls *c)
{
struct v4l2_control ctrl;
int i, ret = 0;
/*
* input_lock is not need for the Camera related IOCTLs
* The input_lock downgrade the FPS of 3A
*/
ret = atomisp_camera_s_ext_ctrls(file, fh, c);
if (ret != -EINVAL)
return ret;
for (i = 0; i < c->count; i++) {
ctrl.id = c->controls[i].id;
ctrl.value = c->controls[i].value;
ret = atomisp_s_ctrl(file, fh, &ctrl);
c->controls[i].value = ctrl.value;
if (ret) {
c->error_idx = i;
break;
}
}
return ret;
}
/*
* vidioc_g/s_param are used to switch isp running mode
*/
static int atomisp_g_parm(struct file *file, void *fh,
struct v4l2_streamparm *parm)
{
struct video_device *vdev = video_devdata(file);
struct atomisp_sub_device *asd = atomisp_to_video_pipe(vdev)->asd;
struct atomisp_device *isp = video_get_drvdata(vdev);
if (parm->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) {
dev_err(isp->dev, "unsupported v4l2 buf type\n");
return -EINVAL;
}
parm->parm.capture.capturemode = asd->run_mode->val;
return 0;
}
static int atomisp_s_parm(struct file *file, void *fh,
struct v4l2_streamparm *parm)
{
struct video_device *vdev = video_devdata(file);
struct atomisp_device *isp = video_get_drvdata(vdev);
struct atomisp_sub_device *asd = atomisp_to_video_pipe(vdev)->asd;
int mode;
int rval;
int fps;
if (parm->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) {
dev_err(isp->dev, "unsupported v4l2 buf type\n");
return -EINVAL;
}
asd->high_speed_mode = false;
switch (parm->parm.capture.capturemode) {
case CI_MODE_NONE: {
struct v4l2_subdev_frame_interval fi = {0};
fi.interval = parm->parm.capture.timeperframe;
rval = v4l2_subdev_call(isp->inputs[asd->input_curr].camera,
video, s_frame_interval, &fi);
if (!rval)
parm->parm.capture.timeperframe = fi.interval;
if (fi.interval.numerator != 0) {
fps = fi.interval.denominator / fi.interval.numerator;
if (fps > 30)
asd->high_speed_mode = true;
}
return rval == -ENOIOCTLCMD ? 0 : rval;
}
case CI_MODE_VIDEO:
mode = ATOMISP_RUN_MODE_VIDEO;
break;
case CI_MODE_STILL_CAPTURE:
mode = ATOMISP_RUN_MODE_STILL_CAPTURE;
break;
case CI_MODE_PREVIEW:
mode = ATOMISP_RUN_MODE_PREVIEW;
break;
default:
return -EINVAL;
}
rval = v4l2_ctrl_s_ctrl(asd->run_mode, mode);
return rval == -ENOIOCTLCMD ? 0 : rval;
}
static long atomisp_vidioc_default(struct file *file, void *fh,
bool valid_prio, unsigned int cmd, void *arg)
{
struct video_device *vdev = video_devdata(file);
struct atomisp_device *isp = video_get_drvdata(vdev);
struct atomisp_sub_device *asd = atomisp_to_video_pipe(vdev)->asd;
int err;
switch (cmd) {
case ATOMISP_IOC_S_SENSOR_RUNMODE:
if (IS_ISP2401)
err = atomisp_set_sensor_runmode(asd, arg);
else
err = -EINVAL;
break;
case ATOMISP_IOC_G_XNR:
err = atomisp_xnr(asd, 0, arg);
break;
case ATOMISP_IOC_S_XNR:
err = atomisp_xnr(asd, 1, arg);
break;
case ATOMISP_IOC_G_NR:
err = atomisp_nr(asd, 0, arg);
break;
case ATOMISP_IOC_S_NR:
err = atomisp_nr(asd, 1, arg);
break;
case ATOMISP_IOC_G_TNR:
err = atomisp_tnr(asd, 0, arg);
break;
case ATOMISP_IOC_S_TNR:
err = atomisp_tnr(asd, 1, arg);
break;
case ATOMISP_IOC_G_BLACK_LEVEL_COMP:
err = atomisp_black_level(asd, 0, arg);
break;
case ATOMISP_IOC_S_BLACK_LEVEL_COMP:
err = atomisp_black_level(asd, 1, arg);
break;
case ATOMISP_IOC_G_EE:
err = atomisp_ee(asd, 0, arg);
break;
case ATOMISP_IOC_S_EE:
err = atomisp_ee(asd, 1, arg);
break;
case ATOMISP_IOC_G_DIS_STAT:
err = atomisp_get_dis_stat(asd, arg);
break;
case ATOMISP_IOC_G_DVS2_BQ_RESOLUTIONS:
err = atomisp_get_dvs2_bq_resolutions(asd, arg);
break;
case ATOMISP_IOC_S_DIS_COEFS:
err = atomisp_css_cp_dvs2_coefs(asd, arg,
&asd->params.css_param, true);
if (!err && arg)
asd->params.css_update_params_needed = true;
break;
case ATOMISP_IOC_S_DIS_VECTOR:
err = atomisp_cp_dvs_6axis_config(asd, arg,
&asd->params.css_param, true);
if (!err && arg)
asd->params.css_update_params_needed = true;
break;
case ATOMISP_IOC_G_ISP_PARM:
err = atomisp_param(asd, 0, arg);
break;
case ATOMISP_IOC_S_ISP_PARM:
err = atomisp_param(asd, 1, arg);
break;
case ATOMISP_IOC_G_3A_STAT:
err = atomisp_3a_stat(asd, 0, arg);
break;
case ATOMISP_IOC_G_ISP_GAMMA:
err = atomisp_gamma(asd, 0, arg);
break;
case ATOMISP_IOC_S_ISP_GAMMA:
err = atomisp_gamma(asd, 1, arg);
break;
case ATOMISP_IOC_G_ISP_GDC_TAB:
err = atomisp_gdc_cac_table(asd, 0, arg);
break;
case ATOMISP_IOC_S_ISP_GDC_TAB:
err = atomisp_gdc_cac_table(asd, 1, arg);
break;
case ATOMISP_IOC_G_ISP_MACC:
err = atomisp_macc_table(asd, 0, arg);
break;
case ATOMISP_IOC_S_ISP_MACC:
err = atomisp_macc_table(asd, 1, arg);
break;
case ATOMISP_IOC_G_ISP_BAD_PIXEL_DETECTION:
err = atomisp_bad_pixel_param(asd, 0, arg);
break;
case ATOMISP_IOC_S_ISP_BAD_PIXEL_DETECTION:
err = atomisp_bad_pixel_param(asd, 1, arg);
break;
case ATOMISP_IOC_G_ISP_FALSE_COLOR_CORRECTION:
err = atomisp_false_color_param(asd, 0, arg);
break;
case ATOMISP_IOC_S_ISP_FALSE_COLOR_CORRECTION:
err = atomisp_false_color_param(asd, 1, arg);
break;
case ATOMISP_IOC_G_ISP_CTC:
err = atomisp_ctc(asd, 0, arg);
break;
case ATOMISP_IOC_S_ISP_CTC:
err = atomisp_ctc(asd, 1, arg);
break;
case ATOMISP_IOC_G_ISP_WHITE_BALANCE:
err = atomisp_white_balance_param(asd, 0, arg);
break;
case ATOMISP_IOC_S_ISP_WHITE_BALANCE:
err = atomisp_white_balance_param(asd, 1, arg);
break;
case ATOMISP_IOC_G_3A_CONFIG:
err = atomisp_3a_config_param(asd, 0, arg);
break;
case ATOMISP_IOC_S_3A_CONFIG:
err = atomisp_3a_config_param(asd, 1, arg);
break;
case ATOMISP_IOC_S_ISP_FPN_TABLE:
err = atomisp_fixed_pattern_table(asd, arg);
break;
case ATOMISP_IOC_S_EXPOSURE:
err = v4l2_subdev_call(isp->inputs[asd->input_curr].camera,
core, ioctl, cmd, arg);
break;
case ATOMISP_IOC_S_ISP_SHD_TAB:
err = atomisp_set_shading_table(asd, arg);
break;
case ATOMISP_IOC_G_ISP_GAMMA_CORRECTION:
err = atomisp_gamma_correction(asd, 0, arg);
break;
case ATOMISP_IOC_S_ISP_GAMMA_CORRECTION:
err = atomisp_gamma_correction(asd, 1, arg);
break;
case ATOMISP_IOC_S_PARAMETERS:
err = atomisp_set_parameters(vdev, arg);
break;
case ATOMISP_IOC_EXT_ISP_CTRL:
err = v4l2_subdev_call(isp->inputs[asd->input_curr].camera,
core, ioctl, cmd, arg);
break;
case ATOMISP_IOC_EXP_ID_UNLOCK:
err = atomisp_exp_id_unlock(asd, arg);
break;
case ATOMISP_IOC_EXP_ID_CAPTURE:
err = atomisp_exp_id_capture(asd, arg);
break;
case ATOMISP_IOC_S_ENABLE_DZ_CAPT_PIPE:
err = atomisp_enable_dz_capt_pipe(asd, arg);
break;
case ATOMISP_IOC_G_FORMATS_CONFIG:
err = atomisp_formats(asd, 0, arg);
break;
case ATOMISP_IOC_S_FORMATS_CONFIG:
err = atomisp_formats(asd, 1, arg);
break;
case ATOMISP_IOC_INJECT_A_FAKE_EVENT:
err = atomisp_inject_a_fake_event(asd, arg);
break;
case ATOMISP_IOC_S_ARRAY_RESOLUTION:
err = atomisp_set_array_res(asd, arg);
break;
default:
err = -EINVAL;
break;
}
return err;
}
const struct v4l2_ioctl_ops atomisp_ioctl_ops = {
.vidioc_querycap = atomisp_querycap,
.vidioc_enum_input = atomisp_enum_input,
.vidioc_g_input = atomisp_g_input,
.vidioc_s_input = atomisp_s_input,
.vidioc_queryctrl = atomisp_queryctl,
.vidioc_s_ctrl = atomisp_s_ctrl,
.vidioc_g_ctrl = atomisp_g_ctrl,
.vidioc_s_ext_ctrls = atomisp_s_ext_ctrls,
.vidioc_g_ext_ctrls = atomisp_g_ext_ctrls,
.vidioc_enum_framesizes = atomisp_enum_framesizes,
.vidioc_enum_frameintervals = atomisp_enum_frameintervals,
.vidioc_enum_fmt_vid_cap = atomisp_enum_fmt_cap,
.vidioc_try_fmt_vid_cap = atomisp_try_fmt_cap,
.vidioc_g_fmt_vid_cap = atomisp_g_fmt_cap,
.vidioc_s_fmt_vid_cap = atomisp_s_fmt_cap,
.vidioc_reqbufs = vb2_ioctl_reqbufs,
.vidioc_querybuf = vb2_ioctl_querybuf,
.vidioc_qbuf = atomisp_qbuf_wrapper,
.vidioc_dqbuf = atomisp_dqbuf_wrapper,
.vidioc_streamon = vb2_ioctl_streamon,
.vidioc_streamoff = vb2_ioctl_streamoff,
.vidioc_default = atomisp_vidioc_default,
.vidioc_s_parm = atomisp_s_parm,
.vidioc_g_parm = atomisp_g_parm,
};
| linux-master | drivers/staging/media/atomisp/pci/atomisp_ioctl.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/module.h>
#include <linux/i2c.h>
#include <linux/dmi.h>
#include <linux/efi.h>
#include <linux/pci.h>
#include <linux/acpi.h>
#include <linux/clk.h>
#include <linux/delay.h>
#include <media/v4l2-subdev.h>
#include <linux/mfd/intel_soc_pmic.h>
#include <linux/regulator/consumer.h>
#include <linux/gpio/consumer.h>
#include <linux/gpio.h>
#include <linux/platform_device.h>
#include "../../include/linux/atomisp_platform.h"
#include "../../include/linux/atomisp_gmin_platform.h"
#define MAX_SUBDEVS 8
enum clock_rate {
VLV2_CLK_XTAL_25_0MHz = 0,
VLV2_CLK_PLL_19P2MHZ = 1
};
#define CLK_RATE_19_2MHZ 19200000
#define CLK_RATE_25_0MHZ 25000000
/* Valid clock number range from 0 to 5 */
#define MAX_CLK_COUNT 5
/* X-Powers AXP288 register set */
#define ALDO1_SEL_REG 0x28
#define ALDO1_CTRL3_REG 0x13
#define ALDO1_2P8V 0x16
#define ALDO1_CTRL3_SHIFT 0x05
#define ELDO_CTRL_REG 0x12
#define ELDO1_SEL_REG 0x19
#define ELDO1_1P6V 0x12
#define ELDO1_CTRL_SHIFT 0x00
#define ELDO2_SEL_REG 0x1a
#define ELDO2_1P8V 0x16
#define ELDO2_CTRL_SHIFT 0x01
/* TI SND9039 PMIC register set */
#define LDO9_REG 0x49
#define LDO10_REG 0x4a
#define LDO11_REG 0x4b
#define LDO_2P8V_ON 0x2f /* 0x2e selects 2.85V ... */
#define LDO_2P8V_OFF 0x2e /* ... bottom bit is "enabled" */
#define LDO_1P8V_ON 0x59 /* 0x58 selects 1.80V ... */
#define LDO_1P8V_OFF 0x58 /* ... bottom bit is "enabled" */
/* CRYSTAL COVE PMIC register set */
#define CRYSTAL_BYT_1P8V_REG 0x5d
#define CRYSTAL_BYT_2P8V_REG 0x66
#define CRYSTAL_CHT_1P8V_REG 0x57
#define CRYSTAL_CHT_2P8V_REG 0x5d
#define CRYSTAL_ON 0x63
#define CRYSTAL_OFF 0x62
struct gmin_subdev {
struct v4l2_subdev *subdev;
enum clock_rate clock_src;
struct clk *pmc_clk;
struct gpio_desc *gpio0;
struct gpio_desc *gpio1;
struct regulator *v1p8_reg;
struct regulator *v2p8_reg;
struct regulator *v1p2_reg;
struct regulator *v2p8_vcm_reg;
enum atomisp_camera_port csi_port;
unsigned int csi_lanes;
enum atomisp_input_format csi_fmt;
enum atomisp_bayer_order csi_bayer;
bool clock_on;
bool v1p8_on;
bool v2p8_on;
bool v1p2_on;
bool v2p8_vcm_on;
int v1p8_gpio;
int v2p8_gpio;
u8 pwm_i2c_addr;
/* For PMIC AXP */
int eldo1_sel_reg, eldo1_1p6v, eldo1_ctrl_shift;
int eldo2_sel_reg, eldo2_1p8v, eldo2_ctrl_shift;
};
static struct gmin_subdev gmin_subdevs[MAX_SUBDEVS];
/* ACPI HIDs for the PMICs that could be used by this driver */
#define PMIC_ACPI_AXP "INT33F4" /* XPower AXP288 PMIC */
#define PMIC_ACPI_TI "INT33F5" /* Dollar Cove TI PMIC */
#define PMIC_ACPI_CRYSTALCOVE "INT33FD" /* Crystal Cove PMIC */
#define PMIC_PLATFORM_TI "intel_soc_pmic_chtdc_ti"
static enum {
PMIC_UNSET = 0,
PMIC_REGULATOR,
PMIC_AXP,
PMIC_TI,
PMIC_CRYSTALCOVE
} pmic_id;
static const char *pmic_name[] = {
[PMIC_UNSET] = "ACPI device PM",
[PMIC_REGULATOR] = "regulator driver",
[PMIC_AXP] = "XPower AXP288 PMIC",
[PMIC_TI] = "Dollar Cove TI PMIC",
[PMIC_CRYSTALCOVE] = "Crystal Cove PMIC",
};
static DEFINE_MUTEX(gmin_regulator_mutex);
static int gmin_v1p8_enable_count;
static int gmin_v2p8_enable_count;
/* The atomisp uses type==0 for the end-of-list marker, so leave space. */
static struct intel_v4l2_subdev_table pdata_subdevs[MAX_SUBDEVS + 1];
static const struct atomisp_platform_data pdata = {
.subdevs = pdata_subdevs,
};
static LIST_HEAD(vcm_devices);
static DEFINE_MUTEX(vcm_lock);
static struct gmin_subdev *find_gmin_subdev(struct v4l2_subdev *subdev);
const struct atomisp_platform_data *atomisp_get_platform_data(void)
{
return &pdata;
}
EXPORT_SYMBOL_GPL(atomisp_get_platform_data);
int atomisp_register_i2c_module(struct v4l2_subdev *subdev,
struct camera_sensor_platform_data *plat_data,
enum intel_v4l2_subdev_type type)
{
int i;
struct gmin_subdev *gs;
struct i2c_client *client = v4l2_get_subdevdata(subdev);
struct acpi_device *adev = ACPI_COMPANION(&client->dev);
dev_info(&client->dev, "register atomisp i2c module type %d\n", type);
/* The windows driver model (and thus most BIOSes by default)
* uses ACPI runtime power management for camera devices, but
* we don't. Disable it, or else the rails will be needlessly
* tickled during suspend/resume. This has caused power and
* performance issues on multiple devices.
*/
/*
* Turn off the device before disabling ACPI power resources
* (the sensor driver has already probed it at this point).
* This avoids leaking the reference count of the (possibly shared)
* ACPI power resources which were enabled/referenced before probe().
*/
acpi_device_set_power(adev, ACPI_STATE_D3_COLD);
adev->power.flags.power_resources = 0;
for (i = 0; i < MAX_SUBDEVS; i++)
if (!pdata.subdevs[i].type)
break;
if (pdata.subdevs[i].type)
return -ENOMEM;
/* Note subtlety of initialization order: at the point where
* this registration API gets called, the platform data
* callbacks have probably already been invoked, so the
* gmin_subdev struct is already initialized for us.
*/
gs = find_gmin_subdev(subdev);
if (!gs)
return -ENODEV;
pdata.subdevs[i].type = type;
pdata.subdevs[i].port = gs->csi_port;
pdata.subdevs[i].lanes = gs->csi_lanes;
pdata.subdevs[i].subdev = subdev;
return 0;
}
EXPORT_SYMBOL_GPL(atomisp_register_i2c_module);
int atomisp_gmin_remove_subdev(struct v4l2_subdev *sd)
{
int i, j;
if (!sd)
return 0;
for (i = 0; i < MAX_SUBDEVS; i++) {
if (pdata.subdevs[i].subdev == sd) {
for (j = i + 1; j <= MAX_SUBDEVS; j++)
pdata.subdevs[j - 1] = pdata.subdevs[j];
}
if (gmin_subdevs[i].subdev == sd) {
if (gmin_subdevs[i].gpio0)
gpiod_put(gmin_subdevs[i].gpio0);
gmin_subdevs[i].gpio0 = NULL;
if (gmin_subdevs[i].gpio1)
gpiod_put(gmin_subdevs[i].gpio1);
gmin_subdevs[i].gpio1 = NULL;
if (pmic_id == PMIC_REGULATOR) {
regulator_put(gmin_subdevs[i].v1p8_reg);
regulator_put(gmin_subdevs[i].v2p8_reg);
regulator_put(gmin_subdevs[i].v1p2_reg);
regulator_put(gmin_subdevs[i].v2p8_vcm_reg);
}
gmin_subdevs[i].subdev = NULL;
}
}
return 0;
}
EXPORT_SYMBOL_GPL(atomisp_gmin_remove_subdev);
struct gmin_cfg_var {
const char *name, *val;
};
static struct gmin_cfg_var ffrd8_vars[] = {
{ "INTCF1B:00_ImxId", "0x134" },
{ "INTCF1B:00_CsiPort", "1" },
{ "INTCF1B:00_CsiLanes", "4" },
{ "INTCF1B:00_CamClk", "0" },
{},
};
/* Cribbed from MCG defaults in the mt9m114 driver, not actually verified
* vs. T100 hardware
*/
static struct gmin_cfg_var t100_vars[] = {
{ "INT33F0:00_CsiPort", "0" },
{ "INT33F0:00_CsiLanes", "1" },
{ "INT33F0:00_CamClk", "1" },
{},
};
static struct gmin_cfg_var mrd7_vars[] = {
{"INT33F8:00_CamType", "1"},
{"INT33F8:00_CsiPort", "1"},
{"INT33F8:00_CsiLanes", "2"},
{"INT33F8:00_CsiFmt", "13"},
{"INT33F8:00_CsiBayer", "0"},
{"INT33F8:00_CamClk", "0"},
{"INT33F9:00_CamType", "1"},
{"INT33F9:00_CsiPort", "0"},
{"INT33F9:00_CsiLanes", "1"},
{"INT33F9:00_CsiFmt", "13"},
{"INT33F9:00_CsiBayer", "0"},
{"INT33F9:00_CamClk", "1"},
{},
};
static struct gmin_cfg_var ecs7_vars[] = {
{"INT33BE:00_CsiPort", "1"},
{"INT33BE:00_CsiLanes", "2"},
{"INT33BE:00_CsiFmt", "13"},
{"INT33BE:00_CsiBayer", "2"},
{"INT33BE:00_CamClk", "0"},
{"INT33F0:00_CsiPort", "0"},
{"INT33F0:00_CsiLanes", "1"},
{"INT33F0:00_CsiFmt", "13"},
{"INT33F0:00_CsiBayer", "0"},
{"INT33F0:00_CamClk", "1"},
{"gmin_V2P8GPIO", "402"},
{},
};
static struct gmin_cfg_var i8880_vars[] = {
{"XXOV2680:00_CsiPort", "1"},
{"XXOV2680:00_CsiLanes", "1"},
{"XXOV2680:00_CamClk", "0"},
{"XXGC0310:00_CsiPort", "0"},
{"XXGC0310:00_CsiLanes", "1"},
{"XXGC0310:00_CamClk", "1"},
{},
};
/*
* Surface 3 does not describe CsiPort/CsiLanes in both DSDT and EFI.
*/
static struct gmin_cfg_var surface3_vars[] = {
{"APTA0330:00_CsiPort", "0"},
{"APTA0330:00_CsiLanes", "2"},
{"OVTI8835:00_CsiPort", "1"},
{"OVTI8835:00_CsiLanes", "4"},
{},
};
static struct gmin_cfg_var lenovo_ideapad_miix_310_vars[] = {
/* _DSM contains the wrong CsiPort! */
{ "OVTI2680:01_CsiPort", "0" },
{}
};
static const struct dmi_system_id gmin_vars[] = {
/*
* These DMI IDs were present when the atomisp driver was merged into
* drivers/staging and it is unclear if they are really necessary.
*/
{
.ident = "BYT-T FFD8",
.matches = {
DMI_MATCH(DMI_BOARD_NAME, "BYT-T FFD8"),
},
.driver_data = ffrd8_vars,
},
{
.ident = "T100TA",
.matches = {
DMI_MATCH(DMI_BOARD_NAME, "T100TA"),
},
.driver_data = t100_vars,
},
{
.ident = "MRD7",
.matches = {
DMI_MATCH(DMI_BOARD_NAME, "TABLET"),
DMI_MATCH(DMI_BOARD_VERSION, "MRD 7"),
},
.driver_data = mrd7_vars,
},
{
.ident = "ST70408",
.matches = {
DMI_MATCH(DMI_BOARD_NAME, "ST70408"),
},
.driver_data = ecs7_vars,
},
{
.ident = "VTA0803",
.matches = {
DMI_MATCH(DMI_BOARD_NAME, "VTA0803"),
},
.driver_data = i8880_vars,
},
/* Later added DMI ids, these are confirmed to really be necessary! */
{
.ident = "Surface 3",
.matches = {
DMI_MATCH(DMI_BOARD_NAME, "Surface 3"),
},
.driver_data = surface3_vars,
},
{
.ident = "Lenovo Ideapad Miix 310",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
DMI_MATCH(DMI_PRODUCT_VERSION, "MIIX 310-10"),
},
.driver_data = lenovo_ideapad_miix_310_vars,
},
{}
};
#define GMIN_CFG_VAR_EFI_GUID EFI_GUID(0xecb54cd9, 0xe5ae, 0x4fdc, \
0xa9, 0x71, 0xe8, 0x77, \
0x75, 0x60, 0x68, 0xf7)
static const guid_t atomisp_dsm_guid = GUID_INIT(0xdc2f6c4f, 0x045b, 0x4f1d,
0x97, 0xb9, 0x88, 0x2a,
0x68, 0x60, 0xa4, 0xbe);
#define CFG_VAR_NAME_MAX 64
#define GMIN_PMC_CLK_NAME 14 /* "pmc_plt_clk_[0..5]" */
static char gmin_pmc_clk_name[GMIN_PMC_CLK_NAME];
static struct i2c_client *gmin_i2c_dev_exists(struct device *dev, char *name,
struct i2c_client **client)
{
struct acpi_device *adev;
struct device *d;
adev = acpi_dev_get_first_match_dev(name, NULL, -1);
if (!adev)
return NULL;
d = bus_find_device_by_acpi_dev(&i2c_bus_type, adev);
acpi_dev_put(adev);
if (!d)
return NULL;
*client = i2c_verify_client(d);
put_device(d);
dev_dbg(dev, "found '%s' at address 0x%02x, adapter %d\n",
(*client)->name, (*client)->addr, (*client)->adapter->nr);
return *client;
}
static int gmin_i2c_write(struct device *dev, u16 i2c_addr, u8 reg,
u32 value, u32 mask)
{
int ret;
/*
* FIXME: Right now, the intel_pmic driver just write values
* directly at the regmap, instead of properly implementing
* i2c_transfer() mechanism. Let's use the same interface here,
* as otherwise we may face issues.
*/
dev_dbg(dev,
"I2C write, addr: 0x%02x, reg: 0x%02x, value: 0x%02x, mask: 0x%02x\n",
i2c_addr, reg, value, mask);
ret = intel_soc_pmic_exec_mipi_pmic_seq_element(i2c_addr, reg, value, mask);
if (ret == -EOPNOTSUPP)
dev_err(dev,
"ACPI didn't mapped the OpRegion needed to access I2C address 0x%02x.\n"
"Need to compile the kernel using CONFIG_*_PMIC_OPREGION settings\n",
i2c_addr);
return ret;
}
static int atomisp_get_acpi_power(struct device *dev)
{
char name[5];
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
struct acpi_buffer b_name = { sizeof(name), name };
union acpi_object *package, *element;
acpi_handle handle = ACPI_HANDLE(dev);
acpi_handle rhandle;
acpi_status status;
int clock_num = -1;
int i;
status = acpi_evaluate_object(handle, "_PR0", NULL, &buffer);
if (!ACPI_SUCCESS(status))
return -1;
package = buffer.pointer;
if (!buffer.length || !package
|| package->type != ACPI_TYPE_PACKAGE
|| !package->package.count)
goto fail;
for (i = 0; i < package->package.count; i++) {
element = &package->package.elements[i];
if (element->type != ACPI_TYPE_LOCAL_REFERENCE)
continue;
rhandle = element->reference.handle;
if (!rhandle)
goto fail;
acpi_get_name(rhandle, ACPI_SINGLE_NAME, &b_name);
dev_dbg(dev, "Found PM resource '%s'\n", name);
if (strlen(name) == 4 && !strncmp(name, "CLK", 3)) {
if (name[3] >= '0' && name[3] <= '4')
clock_num = name[3] - '0';
#if 0
/*
* We could abort here, but let's parse all resources,
* as this is helpful for debugging purposes
*/
if (clock_num >= 0)
break;
#endif
}
}
fail:
ACPI_FREE(buffer.pointer);
return clock_num;
}
static u8 gmin_get_pmic_id_and_addr(struct device *dev)
{
struct i2c_client *power = NULL;
static u8 pmic_i2c_addr;
if (pmic_id)
return pmic_i2c_addr;
if (gmin_i2c_dev_exists(dev, PMIC_ACPI_TI, &power))
pmic_id = PMIC_TI;
else if (gmin_i2c_dev_exists(dev, PMIC_ACPI_AXP, &power))
pmic_id = PMIC_AXP;
else if (gmin_i2c_dev_exists(dev, PMIC_ACPI_CRYSTALCOVE, &power))
pmic_id = PMIC_CRYSTALCOVE;
else
pmic_id = PMIC_REGULATOR;
pmic_i2c_addr = power ? power->addr : 0;
return pmic_i2c_addr;
}
static int gmin_detect_pmic(struct v4l2_subdev *subdev)
{
struct i2c_client *client = v4l2_get_subdevdata(subdev);
struct device *dev = &client->dev;
u8 pmic_i2c_addr;
pmic_i2c_addr = gmin_get_pmic_id_and_addr(dev);
dev_info(dev, "gmin: power management provided via %s (i2c addr 0x%02x)\n",
pmic_name[pmic_id], pmic_i2c_addr);
return pmic_i2c_addr;
}
static int gmin_subdev_add(struct gmin_subdev *gs)
{
struct i2c_client *client = v4l2_get_subdevdata(gs->subdev);
struct device *dev = &client->dev;
struct acpi_device *adev = ACPI_COMPANION(dev);
int ret, default_val, clock_num = -1;
dev_info(dev, "%s: ACPI path is %pfw\n", __func__, dev_fwnode(dev));
/*WA:CHT requires XTAL clock as PLL is not stable.*/
gs->clock_src = gmin_get_var_int(dev, false, "ClkSrc",
VLV2_CLK_PLL_19P2MHZ);
/*
* Get ACPI _PR0 derived clock here already because it is used
* to determine the csi_port default.
*/
if (acpi_device_power_manageable(adev))
clock_num = atomisp_get_acpi_power(dev);
/* Compare clock to CsiPort 1 pmc-clock used in the CHT/BYT reference designs */
if (IS_ISP2401)
default_val = clock_num == 4 ? 1 : 0;
else
default_val = clock_num == 0 ? 1 : 0;
gs->csi_port = gmin_get_var_int(dev, false, "CsiPort", default_val);
gs->csi_lanes = gmin_get_var_int(dev, false, "CsiLanes", 1);
gs->gpio0 = gpiod_get_index(dev, NULL, 0, GPIOD_OUT_LOW);
if (IS_ERR(gs->gpio0))
gs->gpio0 = NULL;
else
dev_info(dev, "will handle gpio0 via ACPI\n");
gs->gpio1 = gpiod_get_index(dev, NULL, 1, GPIOD_OUT_LOW);
if (IS_ERR(gs->gpio1))
gs->gpio1 = NULL;
else
dev_info(dev, "will handle gpio1 via ACPI\n");
/*
* Those are used only when there is an external regulator apart
* from the PMIC that would be providing power supply, like on the
* two cases below:
*
* The ECS E7 board drives camera 2.8v from an external regulator
* instead of the PMIC. There's a gmin_CamV2P8 config variable
* that specifies the GPIO to handle this particular case,
* but this needs a broader architecture for handling camera power.
*
* The CHT RVP board drives camera 1.8v from an* external regulator
* instead of the PMIC just like ECS E7 board.
*/
gs->v1p8_gpio = gmin_get_var_int(dev, true, "V1P8GPIO", -1);
gs->v2p8_gpio = gmin_get_var_int(dev, true, "V2P8GPIO", -1);
/*
* FIXME:
*
* The ACPI handling code checks for the _PR? tables in order to
* know what is required to switch the device from power state
* D0 (_PR0) up to D3COLD (_PR3).
*
* The adev->flags.power_manageable is set to true if the device
* has a _PR0 table, which can be checked by calling
* acpi_device_power_manageable(adev).
*
* However, this only says that the device can be set to power off
* mode.
*
* At least on the DSDT tables we've seen so far, there's no _PR3,
* nor _PS3 (which would have a somewhat similar effect).
* So, using ACPI for power management won't work, except if adding
* an ACPI override logic somewhere.
*
* So, at least for the existing devices we know, the check below
* will always be false.
*/
if (acpi_device_can_wakeup(adev) &&
acpi_device_can_poweroff(adev)) {
dev_info(dev,
"gmin: power management provided via device PM\n");
return 0;
}
/*
* The code below is here due to backward compatibility with devices
* whose ACPI BIOS may not contain everything that would be needed
* in order to set clocks and do power management.
*/
/*
* According with :
* https://github.com/projectceladon/hardware-intel-kernelflinger/blob/master/doc/fastboot.md
*
* The "CamClk" EFI var is set via fastboot on some Android devices,
* and seems to contain the number of the clock used to feed the
* sensor.
*
* On systems with a proper ACPI table, this is given via the _PR0
* power resource table. The logic below should first check if there
* is a power resource already, falling back to the EFI vars detection
* otherwise.
*/
/* If getting the clock from _PR0 above failed, fall-back to EFI and/or DMI match */
if (clock_num < 0)
clock_num = gmin_get_var_int(dev, false, "CamClk", 0);
if (clock_num < 0 || clock_num > MAX_CLK_COUNT) {
dev_err(dev, "Invalid clock number\n");
return -EINVAL;
}
snprintf(gmin_pmc_clk_name, sizeof(gmin_pmc_clk_name),
"%s_%d", "pmc_plt_clk", clock_num);
gs->pmc_clk = devm_clk_get(dev, gmin_pmc_clk_name);
if (IS_ERR(gs->pmc_clk)) {
ret = PTR_ERR(gs->pmc_clk);
dev_err(dev, "Failed to get clk from %s: %d\n", gmin_pmc_clk_name, ret);
return ret;
}
dev_info(dev, "Will use CLK%d (%s)\n", clock_num, gmin_pmc_clk_name);
/*
* The firmware might enable the clock at
* boot (this information may or may not
* be reflected in the enable clock register).
* To change the rate we must disable the clock
* first to cover these cases. Due to common
* clock framework restrictions that do not allow
* to disable a clock that has not been enabled,
* we need to enable the clock first.
*/
ret = clk_prepare_enable(gs->pmc_clk);
if (!ret)
clk_disable_unprepare(gs->pmc_clk);
switch (pmic_id) {
case PMIC_REGULATOR:
gs->v1p8_reg = regulator_get(dev, "V1P8SX");
gs->v2p8_reg = regulator_get(dev, "V2P8SX");
gs->v1p2_reg = regulator_get(dev, "V1P2A");
gs->v2p8_vcm_reg = regulator_get(dev, "VPROG4B");
/* Note: ideally we would initialize v[12]p8_on to the
* output of regulator_is_enabled(), but sadly that
* API is broken with the current drivers, returning
* "1" for a regulator that will then emit a
* "unbalanced disable" WARNing if we try to disable
* it.
*/
break;
case PMIC_AXP:
gs->eldo1_1p6v = gmin_get_var_int(dev, false,
"eldo1_1p8v",
ELDO1_1P6V);
gs->eldo1_sel_reg = gmin_get_var_int(dev, false,
"eldo1_sel_reg",
ELDO1_SEL_REG);
gs->eldo1_ctrl_shift = gmin_get_var_int(dev, false,
"eldo1_ctrl_shift",
ELDO1_CTRL_SHIFT);
gs->eldo2_1p8v = gmin_get_var_int(dev, false,
"eldo2_1p8v",
ELDO2_1P8V);
gs->eldo2_sel_reg = gmin_get_var_int(dev, false,
"eldo2_sel_reg",
ELDO2_SEL_REG);
gs->eldo2_ctrl_shift = gmin_get_var_int(dev, false,
"eldo2_ctrl_shift",
ELDO2_CTRL_SHIFT);
break;
default:
break;
}
return 0;
}
static struct gmin_subdev *find_gmin_subdev(struct v4l2_subdev *subdev)
{
int i;
for (i = 0; i < MAX_SUBDEVS; i++)
if (gmin_subdevs[i].subdev == subdev)
return &gmin_subdevs[i];
return NULL;
}
static struct gmin_subdev *find_free_gmin_subdev_slot(void)
{
unsigned int i;
for (i = 0; i < MAX_SUBDEVS; i++)
if (gmin_subdevs[i].subdev == NULL)
return &gmin_subdevs[i];
return NULL;
}
static int axp_regulator_set(struct device *dev, struct gmin_subdev *gs,
int sel_reg, u8 setting,
int ctrl_reg, int shift, bool on)
{
int ret;
int val;
ret = gmin_i2c_write(dev, gs->pwm_i2c_addr, sel_reg, setting, 0xff);
if (ret)
return ret;
val = on ? 1 << shift : 0;
ret = gmin_i2c_write(dev, gs->pwm_i2c_addr, ctrl_reg, val, 1 << shift);
if (ret)
return ret;
return 0;
}
/*
* Some boards contain a hw-bug where turning eldo2 back on after having turned
* it off causes the CPLM3218 ambient-light-sensor on the image-sensor's I2C bus
* to crash, hanging the bus. Do not turn eldo2 off on these systems.
*/
static const struct dmi_system_id axp_leave_eldo2_on_ids[] = {
{
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "TrekStor"),
DMI_MATCH(DMI_PRODUCT_NAME, "SurfTab duo W1 10.1 (VT4)"),
},
},
{ }
};
static int axp_v1p8_on(struct device *dev, struct gmin_subdev *gs)
{
int ret;
ret = axp_regulator_set(dev, gs, gs->eldo2_sel_reg, gs->eldo2_1p8v,
ELDO_CTRL_REG, gs->eldo2_ctrl_shift, true);
if (ret)
return ret;
/*
* This sleep comes out of the gc2235 driver, which is the
* only one I currently see that wants to set both 1.8v rails.
*/
usleep_range(110, 150);
ret = axp_regulator_set(dev, gs, gs->eldo1_sel_reg, gs->eldo1_1p6v,
ELDO_CTRL_REG, gs->eldo1_ctrl_shift, true);
return ret;
}
static int axp_v1p8_off(struct device *dev, struct gmin_subdev *gs)
{
int ret;
ret = axp_regulator_set(dev, gs, gs->eldo1_sel_reg, gs->eldo1_1p6v,
ELDO_CTRL_REG, gs->eldo1_ctrl_shift, false);
if (ret)
return ret;
if (dmi_check_system(axp_leave_eldo2_on_ids))
return 0;
ret = axp_regulator_set(dev, gs, gs->eldo2_sel_reg, gs->eldo2_1p8v,
ELDO_CTRL_REG, gs->eldo2_ctrl_shift, false);
return ret;
}
static int gmin_gpio0_ctrl(struct v4l2_subdev *subdev, int on)
{
struct gmin_subdev *gs = find_gmin_subdev(subdev);
if (gs) {
gpiod_set_value(gs->gpio0, on);
return 0;
}
return -EINVAL;
}
static int gmin_gpio1_ctrl(struct v4l2_subdev *subdev, int on)
{
struct gmin_subdev *gs = find_gmin_subdev(subdev);
if (gs) {
gpiod_set_value(gs->gpio1, on);
return 0;
}
return -EINVAL;
}
static int gmin_v1p2_ctrl(struct v4l2_subdev *subdev, int on)
{
struct gmin_subdev *gs = find_gmin_subdev(subdev);
if (!gs || gs->v1p2_on == on)
return 0;
gs->v1p2_on = on;
/* use regulator for PMIC */
if (gs->v1p2_reg) {
if (on)
return regulator_enable(gs->v1p2_reg);
else
return regulator_disable(gs->v1p2_reg);
}
/* TODO:v1p2 may need to extend to other PMICs */
return -EINVAL;
}
static int gmin_v1p8_ctrl(struct v4l2_subdev *subdev, int on)
{
struct gmin_subdev *gs = find_gmin_subdev(subdev);
int ret;
int value;
int reg;
if (!gs || gs->v1p8_on == on)
return 0;
if (gs->v1p8_gpio >= 0) {
pr_info("atomisp_gmin_platform: 1.8v power on GPIO %d\n",
gs->v1p8_gpio);
ret = gpio_request(gs->v1p8_gpio, "camera_v1p8_en");
if (!ret)
ret = gpio_direction_output(gs->v1p8_gpio, 0);
if (ret)
pr_err("V1P8 GPIO initialization failed\n");
}
gs->v1p8_on = on;
ret = 0;
mutex_lock(&gmin_regulator_mutex);
if (on) {
gmin_v1p8_enable_count++;
if (gmin_v1p8_enable_count > 1)
goto out; /* Already on */
} else {
gmin_v1p8_enable_count--;
if (gmin_v1p8_enable_count > 0)
goto out; /* Still needed */
}
if (gs->v1p8_gpio >= 0)
gpio_set_value(gs->v1p8_gpio, on);
if (gs->v1p8_reg) {
regulator_set_voltage(gs->v1p8_reg, 1800000, 1800000);
if (on)
ret = regulator_enable(gs->v1p8_reg);
else
ret = regulator_disable(gs->v1p8_reg);
goto out;
}
switch (pmic_id) {
case PMIC_AXP:
if (on)
ret = axp_v1p8_on(subdev->dev, gs);
else
ret = axp_v1p8_off(subdev->dev, gs);
break;
case PMIC_TI:
value = on ? LDO_1P8V_ON : LDO_1P8V_OFF;
ret = gmin_i2c_write(subdev->dev, gs->pwm_i2c_addr,
LDO10_REG, value, 0xff);
break;
case PMIC_CRYSTALCOVE:
if (IS_ISP2401)
reg = CRYSTAL_CHT_1P8V_REG;
else
reg = CRYSTAL_BYT_1P8V_REG;
value = on ? CRYSTAL_ON : CRYSTAL_OFF;
ret = gmin_i2c_write(subdev->dev, gs->pwm_i2c_addr,
reg, value, 0xff);
break;
default:
dev_err(subdev->dev, "Couldn't set power mode for v1p8\n");
ret = -EINVAL;
}
out:
mutex_unlock(&gmin_regulator_mutex);
return ret;
}
static int gmin_v2p8_ctrl(struct v4l2_subdev *subdev, int on)
{
struct gmin_subdev *gs = find_gmin_subdev(subdev);
int ret;
int value;
int reg;
if (WARN_ON(!gs))
return -ENODEV;
if (gs->v2p8_gpio >= 0) {
pr_info("atomisp_gmin_platform: 2.8v power on GPIO %d\n",
gs->v2p8_gpio);
ret = gpio_request(gs->v2p8_gpio, "camera_v2p8");
if (!ret)
ret = gpio_direction_output(gs->v2p8_gpio, 0);
if (ret)
pr_err("V2P8 GPIO initialization failed\n");
}
if (gs->v2p8_on == on)
return 0;
gs->v2p8_on = on;
ret = 0;
mutex_lock(&gmin_regulator_mutex);
if (on) {
gmin_v2p8_enable_count++;
if (gmin_v2p8_enable_count > 1)
goto out; /* Already on */
} else {
gmin_v2p8_enable_count--;
if (gmin_v2p8_enable_count > 0)
goto out; /* Still needed */
}
if (gs->v2p8_gpio >= 0)
gpio_set_value(gs->v2p8_gpio, on);
if (gs->v2p8_reg) {
regulator_set_voltage(gs->v2p8_reg, 2900000, 2900000);
if (on)
ret = regulator_enable(gs->v2p8_reg);
else
ret = regulator_disable(gs->v2p8_reg);
goto out;
}
switch (pmic_id) {
case PMIC_AXP:
ret = axp_regulator_set(subdev->dev, gs, ALDO1_SEL_REG,
ALDO1_2P8V, ALDO1_CTRL3_REG,
ALDO1_CTRL3_SHIFT, on);
break;
case PMIC_TI:
value = on ? LDO_2P8V_ON : LDO_2P8V_OFF;
ret = gmin_i2c_write(subdev->dev, gs->pwm_i2c_addr,
LDO9_REG, value, 0xff);
break;
case PMIC_CRYSTALCOVE:
if (IS_ISP2401)
reg = CRYSTAL_CHT_2P8V_REG;
else
reg = CRYSTAL_BYT_2P8V_REG;
value = on ? CRYSTAL_ON : CRYSTAL_OFF;
ret = gmin_i2c_write(subdev->dev, gs->pwm_i2c_addr,
reg, value, 0xff);
break;
default:
dev_err(subdev->dev, "Couldn't set power mode for v2p8\n");
ret = -EINVAL;
}
out:
mutex_unlock(&gmin_regulator_mutex);
return ret;
}
static int gmin_acpi_pm_ctrl(struct v4l2_subdev *subdev, int on)
{
int ret = 0;
struct gmin_subdev *gs = find_gmin_subdev(subdev);
struct i2c_client *client = v4l2_get_subdevdata(subdev);
struct acpi_device *adev = ACPI_COMPANION(&client->dev);
/* Use the ACPI power management to control it */
on = !!on;
if (gs->clock_on == on)
return 0;
dev_dbg(subdev->dev, "Setting power state to %s\n",
on ? "on" : "off");
if (on)
ret = acpi_device_set_power(adev,
ACPI_STATE_D0);
else
ret = acpi_device_set_power(adev,
ACPI_STATE_D3_COLD);
if (!ret)
gs->clock_on = on;
else
dev_err(subdev->dev, "Couldn't set power state to %s\n",
on ? "on" : "off");
return ret;
}
static int gmin_flisclk_ctrl(struct v4l2_subdev *subdev, int on)
{
int ret = 0;
struct gmin_subdev *gs = find_gmin_subdev(subdev);
struct i2c_client *client = v4l2_get_subdevdata(subdev);
if (gs->clock_on == !!on)
return 0;
if (on) {
ret = clk_set_rate(gs->pmc_clk,
gs->clock_src ? CLK_RATE_19_2MHZ : CLK_RATE_25_0MHZ);
if (ret)
dev_err(&client->dev, "unable to set PMC rate %d\n",
gs->clock_src);
ret = clk_prepare_enable(gs->pmc_clk);
if (ret == 0)
gs->clock_on = true;
} else {
clk_disable_unprepare(gs->pmc_clk);
gs->clock_on = false;
}
return ret;
}
static int camera_sensor_csi_alloc(struct v4l2_subdev *sd, u32 port, u32 lanes,
u32 format, u32 bayer_order)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
struct camera_mipi_info *csi;
csi = kzalloc(sizeof(*csi), GFP_KERNEL);
if (!csi)
return -ENOMEM;
csi->port = port;
csi->num_lanes = lanes;
csi->input_format = format;
csi->raw_bayer_order = bayer_order;
v4l2_set_subdev_hostdata(sd, csi);
csi->metadata_format = ATOMISP_INPUT_FORMAT_EMBEDDED;
csi->metadata_effective_width = NULL;
dev_info(&client->dev,
"camera pdata: port: %d lanes: %d order: %8.8x\n",
port, lanes, bayer_order);
return 0;
}
static void camera_sensor_csi_free(struct v4l2_subdev *sd)
{
struct camera_mipi_info *csi;
csi = v4l2_get_subdev_hostdata(sd);
kfree(csi);
}
static int gmin_csi_cfg(struct v4l2_subdev *sd, int flag)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
struct gmin_subdev *gs = find_gmin_subdev(sd);
if (!client || !gs)
return -ENODEV;
if (flag)
return camera_sensor_csi_alloc(sd, gs->csi_port, gs->csi_lanes,
gs->csi_fmt, gs->csi_bayer);
camera_sensor_csi_free(sd);
return 0;
}
int atomisp_register_sensor_no_gmin(struct v4l2_subdev *subdev, u32 lanes,
enum atomisp_input_format format,
enum atomisp_bayer_order bayer_order)
{
struct i2c_client *client = v4l2_get_subdevdata(subdev);
struct acpi_device *adev = ACPI_COMPANION(&client->dev);
int i, ret, clock_num, port = 0;
if (adev) {
/* Get ACPI _PR0 derived clock to determine the csi_port default */
if (acpi_device_power_manageable(adev)) {
clock_num = atomisp_get_acpi_power(&client->dev);
/* Compare clock to CsiPort 1 pmc-clock used in the CHT/BYT reference designs */
if (IS_ISP2401)
port = clock_num == 4 ? 1 : 0;
else
port = clock_num == 0 ? 1 : 0;
}
port = gmin_get_var_int(&client->dev, false, "CsiPort", port);
lanes = gmin_get_var_int(&client->dev, false, "CsiLanes", lanes);
}
for (i = 0; i < MAX_SUBDEVS; i++)
if (!pdata.subdevs[i].type)
break;
if (i >= MAX_SUBDEVS) {
dev_err(&client->dev, "Error too many subdevs already registered\n");
return -ENOMEM;
}
ret = camera_sensor_csi_alloc(subdev, port, lanes, format, bayer_order);
if (ret)
return ret;
pdata.subdevs[i].type = RAW_CAMERA;
pdata.subdevs[i].port = port;
pdata.subdevs[i].lanes = lanes;
pdata.subdevs[i].subdev = subdev;
return 0;
}
EXPORT_SYMBOL_GPL(atomisp_register_sensor_no_gmin);
void atomisp_unregister_subdev(struct v4l2_subdev *subdev)
{
int i;
for (i = 0; i < MAX_SUBDEVS; i++) {
if (pdata.subdevs[i].subdev != subdev)
continue;
camera_sensor_csi_free(subdev);
pdata.subdevs[i].subdev = NULL;
pdata.subdevs[i].type = 0;
pdata.subdevs[i].port = 0;
break;
}
}
EXPORT_SYMBOL_GPL(atomisp_unregister_subdev);
static struct camera_vcm_control *gmin_get_vcm_ctrl(struct v4l2_subdev *subdev,
char *camera_module)
{
struct i2c_client *client = v4l2_get_subdevdata(subdev);
struct gmin_subdev *gs = find_gmin_subdev(subdev);
struct camera_vcm_control *vcm;
if (!client || !gs)
return NULL;
if (!camera_module)
return NULL;
mutex_lock(&vcm_lock);
list_for_each_entry(vcm, &vcm_devices, list) {
if (!strcmp(camera_module, vcm->camera_module)) {
mutex_unlock(&vcm_lock);
return vcm;
}
}
mutex_unlock(&vcm_lock);
return NULL;
}
static struct camera_sensor_platform_data pmic_gmin_plat = {
.gpio0_ctrl = gmin_gpio0_ctrl,
.gpio1_ctrl = gmin_gpio1_ctrl,
.v1p8_ctrl = gmin_v1p8_ctrl,
.v2p8_ctrl = gmin_v2p8_ctrl,
.v1p2_ctrl = gmin_v1p2_ctrl,
.flisclk_ctrl = gmin_flisclk_ctrl,
.csi_cfg = gmin_csi_cfg,
.get_vcm_ctrl = gmin_get_vcm_ctrl,
};
static struct camera_sensor_platform_data acpi_gmin_plat = {
.gpio0_ctrl = gmin_gpio0_ctrl,
.gpio1_ctrl = gmin_gpio1_ctrl,
.v1p8_ctrl = gmin_acpi_pm_ctrl,
.v2p8_ctrl = gmin_acpi_pm_ctrl,
.v1p2_ctrl = gmin_acpi_pm_ctrl,
.flisclk_ctrl = gmin_acpi_pm_ctrl,
.csi_cfg = gmin_csi_cfg,
.get_vcm_ctrl = gmin_get_vcm_ctrl,
};
struct camera_sensor_platform_data *
gmin_camera_platform_data(struct v4l2_subdev *subdev,
enum atomisp_input_format csi_format,
enum atomisp_bayer_order csi_bayer)
{
u8 pmic_i2c_addr = gmin_detect_pmic(subdev);
struct gmin_subdev *gs;
gs = find_free_gmin_subdev_slot();
gs->subdev = subdev;
gs->csi_fmt = csi_format;
gs->csi_bayer = csi_bayer;
gs->pwm_i2c_addr = pmic_i2c_addr;
gmin_subdev_add(gs);
if (gs->pmc_clk)
return &pmic_gmin_plat;
else
return &acpi_gmin_plat;
}
EXPORT_SYMBOL_GPL(gmin_camera_platform_data);
int atomisp_gmin_register_vcm_control(struct camera_vcm_control *vcmCtrl)
{
if (!vcmCtrl)
return -EINVAL;
mutex_lock(&vcm_lock);
list_add_tail(&vcmCtrl->list, &vcm_devices);
mutex_unlock(&vcm_lock);
return 0;
}
EXPORT_SYMBOL_GPL(atomisp_gmin_register_vcm_control);
static int gmin_get_hardcoded_var(struct device *dev,
struct gmin_cfg_var *varlist,
const char *var8, char *out, size_t *out_len)
{
struct gmin_cfg_var *gv;
for (gv = varlist; gv->name; gv++) {
size_t vl;
if (strcmp(var8, gv->name))
continue;
dev_info(dev, "Found DMI entry for '%s'\n", var8);
vl = strlen(gv->val);
if (vl > *out_len - 1)
return -ENOSPC;
strscpy(out, gv->val, *out_len);
*out_len = vl;
return 0;
}
return -EINVAL;
}
static int gmin_get_config_dsm_var(struct device *dev,
const char *var,
char *out, size_t *out_len)
{
acpi_handle handle = ACPI_HANDLE(dev);
union acpi_object *obj, *cur = NULL;
int i;
/*
* The data reported by "CamClk" seems to be either 0 or 1 at the
* _DSM table.
*
* At the ACPI tables we looked so far, this is not related to the
* actual clock source for the sensor, which is given by the
* _PR0 ACPI table. So, ignore it, as otherwise this will be
* set to a wrong value.
*/
if (!strcmp(var, "CamClk"))
return -EINVAL;
/* Return on unexpected object type */
obj = acpi_evaluate_dsm_typed(handle, &atomisp_dsm_guid, 0, 0, NULL,
ACPI_TYPE_PACKAGE);
if (!obj) {
dev_info_once(dev, "Didn't find ACPI _DSM table.\n");
return -EINVAL;
}
#if 0 /* Just for debugging purposes */
for (i = 0; i < obj->package.count; i++) {
union acpi_object *cur = &obj->package.elements[i];
if (cur->type == ACPI_TYPE_INTEGER)
dev_info(dev, "object #%d, type %d, value: %lld\n",
i, cur->type, cur->integer.value);
else if (cur->type == ACPI_TYPE_STRING)
dev_info(dev, "object #%d, type %d, string: %s\n",
i, cur->type, cur->string.pointer);
else
dev_info(dev, "object #%d, type %d\n",
i, cur->type);
}
#endif
/* Seek for the desired var */
for (i = 0; i < obj->package.count - 1; i += 2) {
if (obj->package.elements[i].type == ACPI_TYPE_STRING &&
!strcmp(obj->package.elements[i].string.pointer, var)) {
/* Next element should be the required value */
cur = &obj->package.elements[i + 1];
break;
}
}
if (!cur) {
dev_info(dev, "didn't found _DSM entry for '%s'\n", var);
ACPI_FREE(obj);
return -EINVAL;
}
/*
* While it could be possible to have an ACPI_TYPE_INTEGER,
* and read the value from cur->integer.value, the table
* seen so far uses the string type. So, produce a warning
* if it founds something different than string, letting it
* to fall back to the old code.
*/
if (cur && cur->type != ACPI_TYPE_STRING) {
dev_info(dev, "found non-string _DSM entry for '%s'\n", var);
ACPI_FREE(obj);
return -EINVAL;
}
dev_info(dev, "found _DSM entry for '%s': %s\n", var,
cur->string.pointer);
strscpy(out, cur->string.pointer, *out_len);
*out_len = strlen(out);
ACPI_FREE(obj);
return 0;
}
/* Retrieves a device-specific configuration variable. The dev
* argument should be a device with an ACPI companion, as all
* configuration is based on firmware ID.
*/
static int gmin_get_config_var(struct device *maindev,
bool is_gmin,
const char *var,
char *out, size_t *out_len)
{
struct acpi_device *adev = ACPI_COMPANION(maindev);
efi_char16_t var16[CFG_VAR_NAME_MAX];
const struct dmi_system_id *id;
char var8[CFG_VAR_NAME_MAX];
efi_status_t status;
int i, ret;
if (!is_gmin && adev)
ret = snprintf(var8, sizeof(var8), "%s_%s", acpi_dev_name(adev), var);
else
ret = snprintf(var8, sizeof(var8), "gmin_%s", var);
if (ret < 0 || ret >= sizeof(var8) - 1)
return -EINVAL;
/* DMI based quirks override both the _DSM table and EFI variables */
id = dmi_first_match(gmin_vars);
if (id) {
ret = gmin_get_hardcoded_var(maindev, id->driver_data, var8,
out, out_len);
if (!ret)
return 0;
}
/* For sensors, try first to use the _DSM table */
if (!is_gmin) {
ret = gmin_get_config_dsm_var(maindev, var, out, out_len);
if (!ret)
return 0;
}
/* Our variable names are ASCII by construction, but EFI names
* are wide chars. Convert and zero-pad.
*/
memset(var16, 0, sizeof(var16));
for (i = 0; i < sizeof(var8) && var8[i]; i++)
var16[i] = var8[i];
status = EFI_UNSUPPORTED;
if (efi_rt_services_supported(EFI_RT_SUPPORTED_GET_VARIABLE))
status = efi.get_variable(var16, &GMIN_CFG_VAR_EFI_GUID, NULL,
(unsigned long *)out_len, out);
if (status == EFI_SUCCESS) {
dev_info(maindev, "found EFI entry for '%s'\n", var8);
} else if (is_gmin) {
dev_info(maindev, "Failed to find EFI gmin variable %s\n", var8);
} else {
dev_info(maindev, "Failed to find EFI variable %s\n", var8);
}
return ret;
}
int gmin_get_var_int(struct device *dev, bool is_gmin, const char *var, int def)
{
char val[CFG_VAR_NAME_MAX + 1];
size_t len = CFG_VAR_NAME_MAX;
long result;
int ret;
ret = gmin_get_config_var(dev, is_gmin, var, val, &len);
if (!ret) {
val[len] = 0;
ret = kstrtol(val, 0, &result);
} else {
dev_info(dev, "%s: using default (%d)\n", var, def);
}
return ret ? def : result;
}
EXPORT_SYMBOL_GPL(gmin_get_var_int);
/* PCI quirk: The BYT ISP advertises PCI runtime PM but it doesn't
* work. Disable so the kernel framework doesn't hang the device
* trying. The driver itself does direct calls to the PUNIT to manage
* ISP power.
*/
static void isp_pm_cap_fixup(struct pci_dev *pdev)
{
dev_info(&pdev->dev, "Disabling PCI power management on camera ISP\n");
pdev->pm_cap = 0;
}
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0f38, isp_pm_cap_fixup);
MODULE_DESCRIPTION("Ancillary routines for binding ACPI devices");
MODULE_LICENSE("GPL");
| linux-master | drivers/staging/media/atomisp/pci/atomisp_gmin_platform.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Support for Intel Camera Imaging ISP subsystem.
* Copyright (c) 2015, Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*/
/* Generated code: do not edit or commmit. */
#include "ia_css_pipeline.h"
#include "ia_css_isp_states.h"
#include "ia_css_debug.h"
#include "assert_support.h"
/* Code generated by genparam/genstate.c:gen_init_function() */
static void
ia_css_initialize_aa_state(
const struct ia_css_binary *binary)
{
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
"ia_css_initialize_aa_state() enter:\n");
{
unsigned int size = binary->info->mem_offsets.offsets.state->vmem.aa.size;
unsigned int offset = binary->info->mem_offsets.offsets.state->vmem.aa.offset;
if (size)
memset(&binary->mem_params.params[IA_CSS_PARAM_CLASS_STATE][IA_CSS_ISP_VMEM].address[offset],
0, size);
}
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
"ia_css_initialize_aa_state() leave:\n");
}
/* Code generated by genparam/genstate.c:gen_init_function() */
static void
ia_css_initialize_cnr_state(
const struct ia_css_binary *binary)
{
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
"ia_css_initialize_cnr_state() enter:\n");
{
unsigned int size = binary->info->mem_offsets.offsets.state->vmem.cnr.size;
unsigned int offset = binary->info->mem_offsets.offsets.state->vmem.cnr.offset;
if (size) {
ia_css_init_cnr_state(
&binary->mem_params.params[IA_CSS_PARAM_CLASS_STATE][IA_CSS_ISP_VMEM].address[offset],
size);
}
}
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
"ia_css_initialize_cnr_state() leave:\n");
}
/* Code generated by genparam/genstate.c:gen_init_function() */
static void
ia_css_initialize_cnr2_state(
const struct ia_css_binary *binary)
{
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
"ia_css_initialize_cnr2_state() enter:\n");
{
unsigned int size = binary->info->mem_offsets.offsets.state->vmem.cnr2.size;
unsigned int offset = binary->info->mem_offsets.offsets.state->vmem.cnr2.offset;
if (size) {
ia_css_init_cnr2_state(
&binary->mem_params.params[IA_CSS_PARAM_CLASS_STATE][IA_CSS_ISP_VMEM].address[offset],
size);
}
}
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
"ia_css_initialize_cnr2_state() leave:\n");
}
/* Code generated by genparam/genstate.c:gen_init_function() */
static void
ia_css_initialize_dp_state(
const struct ia_css_binary *binary)
{
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
"ia_css_initialize_dp_state() enter:\n");
{
unsigned int size = binary->info->mem_offsets.offsets.state->vmem.dp.size;
unsigned int offset = binary->info->mem_offsets.offsets.state->vmem.dp.offset;
if (size) {
ia_css_init_dp_state(
&binary->mem_params.params[IA_CSS_PARAM_CLASS_STATE][IA_CSS_ISP_VMEM].address[offset],
size);
}
}
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
"ia_css_initialize_dp_state() leave:\n");
}
/* Code generated by genparam/genstate.c:gen_init_function() */
static void
ia_css_initialize_de_state(
const struct ia_css_binary *binary)
{
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
"ia_css_initialize_de_state() enter:\n");
{
unsigned int size = binary->info->mem_offsets.offsets.state->vmem.de.size;
unsigned int offset = binary->info->mem_offsets.offsets.state->vmem.de.offset;
if (size) {
ia_css_init_de_state(
&binary->mem_params.params[IA_CSS_PARAM_CLASS_STATE][IA_CSS_ISP_VMEM].address[offset],
size);
}
}
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
"ia_css_initialize_de_state() leave:\n");
}
/* Code generated by genparam/genstate.c:gen_init_function() */
static void
ia_css_initialize_tnr_state(
const struct ia_css_binary *binary)
{
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
"ia_css_initialize_tnr_state() enter:\n");
{
unsigned int size = binary->info->mem_offsets.offsets.state->dmem.tnr.size;
unsigned int offset = binary->info->mem_offsets.offsets.state->dmem.tnr.offset;
if (size) {
ia_css_init_tnr_state((struct sh_css_isp_tnr_dmem_state *)
&binary->mem_params.params[IA_CSS_PARAM_CLASS_STATE][IA_CSS_ISP_DMEM].address[offset],
size);
}
}
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
"ia_css_initialize_tnr_state() leave:\n");
}
/* Code generated by genparam/genstate.c:gen_init_function() */
static void
ia_css_initialize_ref_state(
const struct ia_css_binary *binary)
{
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
"ia_css_initialize_ref_state() enter:\n");
{
unsigned int size = binary->info->mem_offsets.offsets.state->dmem.ref.size;
unsigned int offset = binary->info->mem_offsets.offsets.state->dmem.ref.offset;
if (size) {
ia_css_init_ref_state((struct sh_css_isp_ref_dmem_state *)
&binary->mem_params.params[IA_CSS_PARAM_CLASS_STATE][IA_CSS_ISP_DMEM].address[offset],
size);
}
}
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
"ia_css_initialize_ref_state() leave:\n");
}
/* Code generated by genparam/genstate.c:gen_init_function() */
static void
ia_css_initialize_ynr_state(
const struct ia_css_binary *binary)
{
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
"ia_css_initialize_ynr_state() enter:\n");
{
unsigned int size = binary->info->mem_offsets.offsets.state->vmem.ynr.size;
unsigned int offset = binary->info->mem_offsets.offsets.state->vmem.ynr.offset;
if (size) {
ia_css_init_ynr_state(
&binary->mem_params.params[IA_CSS_PARAM_CLASS_STATE][IA_CSS_ISP_VMEM].address[offset],
size);
}
}
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
"ia_css_initialize_ynr_state() leave:\n");
}
/* Code generated by genparam/genstate.c:gen_state_init_table() */
void (*ia_css_kernel_init_state[IA_CSS_NUM_STATE_IDS])(
const struct ia_css_binary *binary) = {
ia_css_initialize_aa_state,
ia_css_initialize_cnr_state,
ia_css_initialize_cnr2_state,
ia_css_initialize_dp_state,
ia_css_initialize_de_state,
ia_css_initialize_tnr_state,
ia_css_initialize_ref_state,
ia_css_initialize_ynr_state,
};
| linux-master | drivers/staging/media/atomisp/pci/ia_css_isp_states.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Support for Intel Camera Imaging ISP subsystem.
* Copyright (c) 2015, Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*/
#include "sh_css_stream_format.h"
#include <ia_css_stream_format.h>
unsigned int sh_css_stream_format_2_bits_per_subpixel(
enum atomisp_input_format format)
{
unsigned int rval;
switch (format) {
case ATOMISP_INPUT_FORMAT_RGB_444:
rval = 4;
break;
case ATOMISP_INPUT_FORMAT_RGB_555:
rval = 5;
break;
case ATOMISP_INPUT_FORMAT_RGB_565:
case ATOMISP_INPUT_FORMAT_RGB_666:
case ATOMISP_INPUT_FORMAT_RAW_6:
rval = 6;
break;
case ATOMISP_INPUT_FORMAT_RAW_7:
rval = 7;
break;
case ATOMISP_INPUT_FORMAT_YUV420_8_LEGACY:
case ATOMISP_INPUT_FORMAT_YUV420_8:
case ATOMISP_INPUT_FORMAT_YUV422_8:
case ATOMISP_INPUT_FORMAT_RGB_888:
case ATOMISP_INPUT_FORMAT_RAW_8:
case ATOMISP_INPUT_FORMAT_BINARY_8:
case ATOMISP_INPUT_FORMAT_USER_DEF1:
case ATOMISP_INPUT_FORMAT_USER_DEF2:
case ATOMISP_INPUT_FORMAT_USER_DEF3:
case ATOMISP_INPUT_FORMAT_USER_DEF4:
case ATOMISP_INPUT_FORMAT_USER_DEF5:
case ATOMISP_INPUT_FORMAT_USER_DEF6:
case ATOMISP_INPUT_FORMAT_USER_DEF7:
case ATOMISP_INPUT_FORMAT_USER_DEF8:
rval = 8;
break;
case ATOMISP_INPUT_FORMAT_YUV420_10:
case ATOMISP_INPUT_FORMAT_YUV422_10:
case ATOMISP_INPUT_FORMAT_RAW_10:
rval = 10;
break;
case ATOMISP_INPUT_FORMAT_RAW_12:
rval = 12;
break;
case ATOMISP_INPUT_FORMAT_RAW_14:
rval = 14;
break;
case ATOMISP_INPUT_FORMAT_RAW_16:
case ATOMISP_INPUT_FORMAT_YUV420_16:
case ATOMISP_INPUT_FORMAT_YUV422_16:
rval = 16;
break;
default:
rval = 0;
break;
}
return rval;
}
| linux-master | drivers/staging/media/atomisp/pci/sh_css_stream_format.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Support for Intel Camera Imaging ISP subsystem.
* Copyright (c) 2015, Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*/
#include <linux/string.h> /* for memcpy() */
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include "hmm.h"
#include <math_support.h>
#include "platform_support.h"
#include "sh_css_firmware.h"
#include "sh_css_defs.h"
#include "ia_css_debug.h"
#include "sh_css_internal.h"
#include "ia_css_isp_param.h"
#include "assert_support.h"
#include "isp.h" /* PMEM_WIDTH_LOG2 */
#include "ia_css_isp_params.h"
#include "ia_css_isp_configs.h"
#include "ia_css_isp_states.h"
#define _STR(x) #x
#define STR(x) _STR(x)
struct firmware_header {
struct sh_css_fw_bi_file_h file_header;
struct ia_css_fw_info binary_header;
};
struct fw_param {
const char *name;
const void *buffer;
};
static struct firmware_header *firmware_header;
/*
* The string STR is a place holder
* which will be replaced with the actual RELEASE_VERSION
* during package generation. Please do not modify
*/
static const char *release_version_2401 = STR(irci_stable_candrpv_0415_20150521_0458);
static const char *release_version_2400 = STR(irci_stable_candrpv_0415_20150423_1753);
#define MAX_FW_REL_VER_NAME 300
static char FW_rel_ver_name[MAX_FW_REL_VER_NAME] = "---";
struct ia_css_fw_info sh_css_sp_fw;
struct ia_css_blob_descr *sh_css_blob_info; /* Only ISP blob info (no SP) */
unsigned int sh_css_num_binaries; /* This includes 1 SP binary */
static struct fw_param *fw_minibuffer;
char *sh_css_get_fw_version(void)
{
return FW_rel_ver_name;
}
/*
* Split the loaded firmware into blobs
*/
/* Setup sp/sp1 binary */
static int
setup_binary(struct ia_css_fw_info *fw, const char *fw_data,
struct ia_css_fw_info *sh_css_fw, unsigned int binary_id)
{
const char *blob_data;
if ((!fw) || (!fw_data))
return -EINVAL;
blob_data = fw_data + fw->blob.offset;
*sh_css_fw = *fw;
sh_css_fw->blob.code = vmalloc(fw->blob.size);
if (!sh_css_fw->blob.code)
return -ENOMEM;
memcpy((void *)sh_css_fw->blob.code, blob_data, fw->blob.size);
sh_css_fw->blob.data = (char *)sh_css_fw->blob.code + fw->blob.data_source;
fw_minibuffer[binary_id].buffer = sh_css_fw->blob.code;
return 0;
}
int
sh_css_load_blob_info(const char *fw, const struct ia_css_fw_info *bi,
struct ia_css_blob_descr *bd,
unsigned int index)
{
const char *name;
const unsigned char *blob;
if ((!fw) || (!bd))
return -EINVAL;
/* Special case: only one binary in fw */
if (!bi)
bi = (const struct ia_css_fw_info *)fw;
name = fw + bi->blob.prog_name_offset;
blob = (const unsigned char *)fw + bi->blob.offset;
/* sanity check */
if (bi->blob.size !=
bi->blob.text_size + bi->blob.icache_size +
bi->blob.data_size + bi->blob.padding_size) {
/* sanity check, note the padding bytes added for section to DDR alignment */
return -EINVAL;
}
if ((bi->blob.offset % (1UL << (ISP_PMEM_WIDTH_LOG2 - 3))) != 0)
return -EINVAL;
bd->blob = blob;
bd->header = *bi;
if (bi->type == ia_css_isp_firmware || bi->type == ia_css_sp_firmware) {
char *namebuffer;
namebuffer = kstrdup(name, GFP_KERNEL);
if (!namebuffer)
return -ENOMEM;
bd->name = fw_minibuffer[index].name = namebuffer;
} else {
bd->name = name;
}
if (bi->type == ia_css_isp_firmware) {
size_t paramstruct_size = sizeof(struct ia_css_memory_offsets);
size_t configstruct_size = sizeof(struct ia_css_config_memory_offsets);
size_t statestruct_size = sizeof(struct ia_css_state_memory_offsets);
char *parambuf = kmalloc(paramstruct_size + configstruct_size +
statestruct_size,
GFP_KERNEL);
if (!parambuf)
return -ENOMEM;
bd->mem_offsets.array[IA_CSS_PARAM_CLASS_PARAM].ptr = NULL;
bd->mem_offsets.array[IA_CSS_PARAM_CLASS_CONFIG].ptr = NULL;
bd->mem_offsets.array[IA_CSS_PARAM_CLASS_STATE].ptr = NULL;
fw_minibuffer[index].buffer = parambuf;
/* copy ia_css_memory_offsets */
memcpy(parambuf, (void *)(fw +
bi->blob.memory_offsets.offsets[IA_CSS_PARAM_CLASS_PARAM]),
paramstruct_size);
bd->mem_offsets.array[IA_CSS_PARAM_CLASS_PARAM].ptr = parambuf;
/* copy ia_css_config_memory_offsets */
memcpy(parambuf + paramstruct_size,
(void *)(fw + bi->blob.memory_offsets.offsets[IA_CSS_PARAM_CLASS_CONFIG]),
configstruct_size);
bd->mem_offsets.array[IA_CSS_PARAM_CLASS_CONFIG].ptr = parambuf +
paramstruct_size;
/* copy ia_css_state_memory_offsets */
memcpy(parambuf + paramstruct_size + configstruct_size,
(void *)(fw + bi->blob.memory_offsets.offsets[IA_CSS_PARAM_CLASS_STATE]),
statestruct_size);
bd->mem_offsets.array[IA_CSS_PARAM_CLASS_STATE].ptr = parambuf +
paramstruct_size + configstruct_size;
}
return 0;
}
bool
sh_css_check_firmware_version(struct device *dev, const char *fw_data)
{
const char *release_version;
struct sh_css_fw_bi_file_h *file_header;
if (IS_ISP2401)
release_version = release_version_2401;
else
release_version = release_version_2400;
firmware_header = (struct firmware_header *)fw_data;
file_header = &firmware_header->file_header;
if (strcmp(file_header->version, release_version) != 0) {
dev_err(dev, "Firmware version may not be compatible with this driver\n");
dev_err(dev, "Expecting version '%s', but firmware is '%s'.\n",
release_version, file_header->version);
}
/* For now, let's just accept a wrong version, even if wrong */
return false;
}
static const char * const fw_type_name[] = {
[ia_css_sp_firmware] = "SP",
[ia_css_isp_firmware] = "ISP",
[ia_css_bootloader_firmware] = "BootLoader",
[ia_css_acc_firmware] = "accel",
};
static const char * const fw_acc_type_name[] = {
[IA_CSS_ACC_NONE] = "Normal",
[IA_CSS_ACC_OUTPUT] = "Accel for output",
[IA_CSS_ACC_VIEWFINDER] = "Accel for viewfinder",
[IA_CSS_ACC_STANDALONE] = "Stand-alone accel",
};
int
sh_css_load_firmware(struct device *dev, const char *fw_data,
unsigned int fw_size)
{
unsigned int i;
const char *release_version;
struct ia_css_fw_info *binaries;
struct sh_css_fw_bi_file_h *file_header;
int ret;
/* some sanity checks */
if (!fw_data || fw_size < sizeof(struct sh_css_fw_bi_file_h))
return -EINVAL;
firmware_header = (struct firmware_header *)fw_data;
file_header = &firmware_header->file_header;
if (file_header->h_size != sizeof(struct sh_css_fw_bi_file_h))
return -EINVAL;
binaries = &firmware_header->binary_header;
strscpy(FW_rel_ver_name, file_header->version,
min(sizeof(FW_rel_ver_name), sizeof(file_header->version)));
if (IS_ISP2401)
release_version = release_version_2401;
else
release_version = release_version_2400;
ret = sh_css_check_firmware_version(dev, fw_data);
if (ret) {
IA_CSS_ERROR("CSS code version (%s) and firmware version (%s) mismatch!",
file_header->version, release_version);
return -EINVAL;
} else {
IA_CSS_LOG("successfully load firmware version %s", release_version);
}
sh_css_num_binaries = file_header->binary_nr;
/* Only allocate memory for ISP blob info */
if (sh_css_num_binaries > NUM_OF_SPS) {
sh_css_blob_info = kmalloc(
(sh_css_num_binaries - NUM_OF_SPS) *
sizeof(*sh_css_blob_info), GFP_KERNEL);
if (!sh_css_blob_info)
return -ENOMEM;
} else {
sh_css_blob_info = NULL;
}
fw_minibuffer = kcalloc(sh_css_num_binaries, sizeof(struct fw_param),
GFP_KERNEL);
if (!fw_minibuffer)
return -ENOMEM;
for (i = 0; i < sh_css_num_binaries; i++) {
struct ia_css_fw_info *bi = &binaries[i];
/*
* note: the var below is made static as it is quite large;
* if it is not static it ends up on the stack which could
* cause issues for drivers
*/
static struct ia_css_blob_descr bd;
int err;
err = sh_css_load_blob_info(fw_data, bi, &bd, i);
if (err)
return -EINVAL;
if (bi->blob.offset + bi->blob.size > fw_size)
return -EINVAL;
switch (bd.header.type) {
case ia_css_isp_firmware:
if (bd.header.info.isp.type > IA_CSS_ACC_STANDALONE) {
dev_err(dev, "binary #%2d: invalid SP type\n",
i);
return -EINVAL;
}
dev_dbg(dev,
"binary #%-2d type %s (%s), binary id is %2d: %s\n",
i,
fw_type_name[bd.header.type],
fw_acc_type_name[bd.header.info.isp.type],
bd.header.info.isp.sp.id,
bd.name);
break;
case ia_css_sp_firmware:
case ia_css_bootloader_firmware:
case ia_css_acc_firmware:
dev_dbg(dev,
"binary #%-2d type %s: %s\n",
i, fw_type_name[bd.header.type],
bd.name);
break;
default:
if (bd.header.info.isp.type > IA_CSS_ACC_STANDALONE) {
dev_err(dev,
"binary #%2d: invalid firmware type\n",
i);
return -EINVAL;
}
break;
}
if (bi->type == ia_css_sp_firmware) {
if (i != SP_FIRMWARE)
return -EINVAL;
err = setup_binary(bi, fw_data, &sh_css_sp_fw, i);
if (err)
return err;
} else {
/*
* All subsequent binaries
* (including bootloaders) (i>NUM_OF_SPS)
* are ISP firmware
*/
if (i < NUM_OF_SPS)
return -EINVAL;
if (bi->type != ia_css_isp_firmware)
return -EINVAL;
if (!sh_css_blob_info) /* cannot happen but KW does not see this */
return -EINVAL;
sh_css_blob_info[i - NUM_OF_SPS] = bd;
}
}
return 0;
}
void sh_css_unload_firmware(void)
{
/* release firmware minibuffer */
if (fw_minibuffer) {
unsigned int i = 0;
for (i = 0; i < sh_css_num_binaries; i++) {
kfree(fw_minibuffer[i].name);
kvfree(fw_minibuffer[i].buffer);
}
kfree(fw_minibuffer);
fw_minibuffer = NULL;
}
memset(&sh_css_sp_fw, 0, sizeof(sh_css_sp_fw));
kfree(sh_css_blob_info);
sh_css_blob_info = NULL;
sh_css_num_binaries = 0;
}
ia_css_ptr
sh_css_load_blob(const unsigned char *blob, unsigned int size)
{
ia_css_ptr target_addr = hmm_alloc(size);
/*
* this will allocate memory aligned to a DDR word boundary which
* is required for the CSS DMA to read the instructions.
*/
assert(blob);
if (target_addr)
hmm_store(target_addr, blob, size);
return target_addr;
}
| linux-master | drivers/staging/media/atomisp/pci/sh_css_firmware.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Support for Intel Camera Imaging ISP subsystem.
* Copyright (c) 2015, Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*/
#include <linux/math.h>
#include <linux/slab.h>
#include <math_support.h>
#include "sh_css_param_shading.h"
#include "ia_css_shading.h"
#include "assert_support.h"
#include "sh_css_defs.h"
#include "sh_css_internal.h"
#include "ia_css_debug.h"
#include "ia_css_pipe_binarydesc.h"
#include "sh_css_hrt.h"
#include "platform_support.h"
/* Bilinear interpolation on shading tables:
* For each target point T, we calculate the 4 surrounding source points:
* ul (upper left), ur (upper right), ll (lower left) and lr (lower right).
* We then calculate the distances from the T to the source points: x0, x1,
* y0 and y1.
* We then calculate the value of T:
* dx0*dy0*Slr + dx0*dy1*Sur + dx1*dy0*Sll + dx1*dy1*Sul.
* We choose a grid size of 1x1 which means:
* dx1 = 1-dx0
* dy1 = 1-dy0
*
* Sul dx0 dx1 Sur
* .<----->|<------------->.
* ^
* dy0|
* v T
* - .
* ^
* |
* dy1|
* v
* . .
* Sll Slr
*
* Padding:
* The area that the ISP operates on can include padding both on the left
* and the right. We need to padd the shading table such that the shading
* values end up on the correct pixel values. This means we must padd the
* shading table to match the ISP padding.
* We can have 5 cases:
* 1. All 4 points fall in the left padding.
* 2. The left 2 points fall in the left padding.
* 3. All 4 points fall in the cropped (target) region.
* 4. The right 2 points fall in the right padding.
* 5. All 4 points fall in the right padding.
* Cases 1 and 5 are easy to handle: we simply use the
* value 1 in the shading table.
* Cases 2 and 4 require interpolation that takes into
* account how far into the padding area the pixels
* fall. We extrapolate the shading table into the
* padded area and then interpolate.
*/
static void
crop_and_interpolate(unsigned int cropped_width,
unsigned int cropped_height,
unsigned int left_padding,
int right_padding,
int top_padding,
const struct ia_css_shading_table *in_table,
struct ia_css_shading_table *out_table,
enum ia_css_sc_color color)
{
unsigned int i, j,
sensor_width,
sensor_height,
table_width,
table_height,
table_cell_h,
out_cell_size,
in_cell_size,
out_start_row,
padded_width;
int out_start_col, /* can be negative to indicate padded space */
table_cell_w;
unsigned short *in_ptr,
*out_ptr;
assert(in_table);
assert(out_table);
sensor_width = in_table->sensor_width;
sensor_height = in_table->sensor_height;
table_width = in_table->width;
table_height = in_table->height;
in_ptr = in_table->data[color];
out_ptr = out_table->data[color];
padded_width = cropped_width + left_padding + right_padding;
out_cell_size = CEIL_DIV(padded_width, out_table->width - 1);
in_cell_size = CEIL_DIV(sensor_width, table_width - 1);
out_start_col = ((int)sensor_width - (int)cropped_width) / 2 - left_padding;
out_start_row = ((int)sensor_height - (int)cropped_height) / 2 - top_padding;
table_cell_w = (int)((table_width - 1) * in_cell_size);
table_cell_h = (table_height - 1) * in_cell_size;
for (i = 0; i < out_table->height; i++) {
int ty, src_y0, src_y1;
unsigned int sy0, sy1, dy0, dy1, divy;
/*
* calculate target point and make sure it falls within
* the table
*/
ty = out_start_row + i * out_cell_size;
/* calculate closest source points in shading table and
make sure they fall within the table */
src_y0 = ty / (int)in_cell_size;
if (in_cell_size < out_cell_size)
src_y1 = (ty + out_cell_size) / in_cell_size;
else
src_y1 = src_y0 + 1;
src_y0 = clamp(src_y0, 0, (int)table_height - 1);
src_y1 = clamp(src_y1, 0, (int)table_height - 1);
ty = min(clamp(ty, 0, (int)sensor_height - 1),
(int)table_cell_h);
/* calculate closest source points for distance computation */
sy0 = min(src_y0 * in_cell_size, sensor_height - 1);
sy1 = min(src_y1 * in_cell_size, sensor_height - 1);
/* calculate distance between source and target pixels */
dy0 = ty - sy0;
dy1 = sy1 - ty;
divy = sy1 - sy0;
if (divy == 0) {
dy0 = 1;
divy = 1;
}
for (j = 0; j < out_table->width; j++, out_ptr++) {
int tx, src_x0, src_x1;
unsigned int sx0, sx1, dx0, dx1, divx;
unsigned short s_ul, s_ur, s_ll, s_lr;
/* calculate target point */
tx = out_start_col + j * out_cell_size;
/* calculate closest source points. */
src_x0 = tx / (int)in_cell_size;
if (in_cell_size < out_cell_size) {
src_x1 = (tx + out_cell_size) /
(int)in_cell_size;
} else {
src_x1 = src_x0 + 1;
}
/* if src points fall in padding, select closest ones.*/
src_x0 = clamp(src_x0, 0, (int)table_width - 1);
src_x1 = clamp(src_x1, 0, (int)table_width - 1);
tx = min(clamp(tx, 0, (int)sensor_width - 1),
(int)table_cell_w);
/*
* calculate closest source points for distance
* computation
*/
sx0 = min(src_x0 * in_cell_size, sensor_width - 1);
sx1 = min(src_x1 * in_cell_size, sensor_width - 1);
/*
* calculate distances between source and target
* pixels
*/
dx0 = tx - sx0;
dx1 = sx1 - tx;
divx = sx1 - sx0;
/* if we're at the edge, we just use the closest
* point still in the grid. We make up for the divider
* in this case by setting the distance to
* out_cell_size, since it's actually 0.
*/
if (divx == 0) {
dx0 = 1;
divx = 1;
}
/* get source pixel values */
s_ul = in_ptr[(table_width * src_y0) + src_x0];
s_ur = in_ptr[(table_width * src_y0) + src_x1];
s_ll = in_ptr[(table_width * src_y1) + src_x0];
s_lr = in_ptr[(table_width * src_y1) + src_x1];
*out_ptr = (unsigned short)((dx0 * dy0 * s_lr + dx0 * dy1 * s_ur + dx1 * dy0 *
s_ll + dx1 * dy1 * s_ul) /
(divx * divy));
}
}
}
void
sh_css_params_shading_id_table_generate(
struct ia_css_shading_table **target_table,
unsigned int table_width,
unsigned int table_height)
{
/* initialize table with ones, shift becomes zero */
unsigned int i, j;
struct ia_css_shading_table *result;
assert(target_table);
result = ia_css_shading_table_alloc(table_width, table_height);
if (!result) {
*target_table = NULL;
return;
}
for (i = 0; i < IA_CSS_SC_NUM_COLORS; i++) {
for (j = 0; j < table_height * table_width; j++)
result->data[i][j] = 1;
}
result->fraction_bits = 0;
*target_table = result;
}
void
prepare_shading_table(const struct ia_css_shading_table *in_table,
unsigned int sensor_binning,
struct ia_css_shading_table **target_table,
const struct ia_css_binary *binary,
unsigned int bds_factor)
{
unsigned int input_width, input_height, table_width, table_height, i;
unsigned int left_padding, top_padding, left_cropping;
struct ia_css_shading_table *result;
struct u32_fract bds;
int right_padding;
assert(target_table);
assert(binary);
if (!in_table) {
sh_css_params_shading_id_table_generate(target_table,
binary->sctbl_width_per_color,
binary->sctbl_height);
return;
}
/*
* We use the ISP input resolution for the shading table because
* shading correction is performed in the bayer domain (before bayer
* down scaling).
*/
input_height = binary->in_frame_info.res.height;
input_width = binary->in_frame_info.res.width;
left_padding = binary->left_padding;
left_cropping = (binary->info->sp.pipeline.left_cropping == 0) ?
binary->dvs_envelope.width : 2 * ISP_VEC_NELEMS;
sh_css_bds_factor_get_fract(bds_factor, &bds);
left_padding = (left_padding + binary->info->sp.pipeline.left_cropping) *
bds.numerator / bds.denominator -
binary->info->sp.pipeline.left_cropping;
right_padding = (binary->internal_frame_info.res.width -
binary->effective_in_frame_res.width * bds.denominator /
bds.numerator - left_cropping) * bds.numerator / bds.denominator;
top_padding = binary->info->sp.pipeline.top_cropping * bds.numerator /
bds.denominator -
binary->info->sp.pipeline.top_cropping;
/*
* We take into account the binning done by the sensor. We do this
* by cropping the non-binned part of the shading table and then
* increasing the size of a grid cell with this same binning factor.
*/
input_width <<= sensor_binning;
input_height <<= sensor_binning;
/*
* We also scale the padding by the same binning factor. This will
* make it much easier later on to calculate the padding of the
* shading table.
*/
left_padding <<= sensor_binning;
right_padding <<= sensor_binning;
top_padding <<= sensor_binning;
/*
* during simulation, the used resolution can exceed the sensor
* resolution, so we clip it.
*/
input_width = min(input_width, in_table->sensor_width);
input_height = min(input_height, in_table->sensor_height);
/* This prepare_shading_table() function is called only in legacy API (not in new API).
Then, the legacy shading table width and height should be used. */
table_width = binary->sctbl_width_per_color;
table_height = binary->sctbl_height;
result = ia_css_shading_table_alloc(table_width, table_height);
if (!result) {
*target_table = NULL;
return;
}
result->sensor_width = in_table->sensor_width;
result->sensor_height = in_table->sensor_height;
result->fraction_bits = in_table->fraction_bits;
/*
* now we crop the original shading table and then interpolate to the
* requested resolution and decimation factor.
*/
for (i = 0; i < IA_CSS_SC_NUM_COLORS; i++) {
crop_and_interpolate(input_width, input_height,
left_padding, right_padding, top_padding,
in_table,
result, i);
}
*target_table = result;
}
struct ia_css_shading_table *
ia_css_shading_table_alloc(
unsigned int width,
unsigned int height)
{
unsigned int i;
struct ia_css_shading_table *me;
IA_CSS_ENTER("");
me = kmalloc(sizeof(*me), GFP_KERNEL);
if (!me)
return me;
me->width = width;
me->height = height;
me->sensor_width = 0;
me->sensor_height = 0;
me->fraction_bits = 0;
for (i = 0; i < IA_CSS_SC_NUM_COLORS; i++) {
me->data[i] =
kvmalloc(width * height * sizeof(*me->data[0]),
GFP_KERNEL);
if (!me->data[i]) {
unsigned int j;
for (j = 0; j < i; j++) {
kvfree(me->data[j]);
me->data[j] = NULL;
}
kfree(me);
return NULL;
}
}
IA_CSS_LEAVE("");
return me;
}
void
ia_css_shading_table_free(struct ia_css_shading_table *table)
{
unsigned int i;
if (!table)
return;
/*
* We only output logging when the table is not NULL, otherwise
* logs will give the impression that a table was freed.
*/
IA_CSS_ENTER("");
for (i = 0; i < IA_CSS_SC_NUM_COLORS; i++) {
if (table->data[i]) {
kvfree(table->data[i]);
table->data[i] = NULL;
}
}
kfree(table);
IA_CSS_LEAVE("");
}
| linux-master | drivers/staging/media/atomisp/pci/sh_css_param_shading.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Support for Clovertrail PNW Camera Imaging ISP subsystem.
*
* Copyright (c) 2013 Intel Corporation. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License version
* 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*
*/
#include <media/videobuf-vmalloc.h>
#include <media/v4l2-dev.h>
#include <media/v4l2-event.h>
#include "mmu/isp_mmu.h"
#include "mmu/sh_mmu_mrfld.h"
#include "hmm/hmm_bo.h"
#include "hmm/hmm.h"
#include "atomisp_compat.h"
#include "atomisp_internal.h"
#include "atomisp_cmd.h"
#include "atomisp-regs.h"
#include "atomisp_fops.h"
#include "atomisp_ioctl.h"
#include "ia_css_debug.h"
#include "ia_css_isp_param.h"
#include "sh_css_hrt.h"
#include "ia_css_isys.h"
#include <linux/io.h>
#include <linux/pm_runtime.h>
/* Assume max number of ACC stages */
#define MAX_ACC_STAGES 20
/* Ideally, this should come from CSS headers */
#define NO_LINK -1
/*
* to serialize MMIO access , this is due to ISP2400 silicon issue Sighting
* #4684168, if concurrency access happened, system may hard hang.
*/
static DEFINE_SPINLOCK(mmio_lock);
enum frame_info_type {
ATOMISP_CSS_VF_FRAME,
ATOMISP_CSS_SECOND_VF_FRAME,
ATOMISP_CSS_OUTPUT_FRAME,
ATOMISP_CSS_SECOND_OUTPUT_FRAME,
ATOMISP_CSS_RAW_FRAME,
};
struct bayer_ds_factor {
unsigned int numerator;
unsigned int denominator;
};
static void atomisp_css2_hw_store_8(hrt_address addr, uint8_t data)
{
struct atomisp_device *isp = dev_get_drvdata(atomisp_dev);
unsigned long flags;
spin_lock_irqsave(&mmio_lock, flags);
writeb(data, isp->base + (addr & 0x003FFFFF));
spin_unlock_irqrestore(&mmio_lock, flags);
}
static void atomisp_css2_hw_store_16(hrt_address addr, uint16_t data)
{
struct atomisp_device *isp = dev_get_drvdata(atomisp_dev);
unsigned long flags;
spin_lock_irqsave(&mmio_lock, flags);
writew(data, isp->base + (addr & 0x003FFFFF));
spin_unlock_irqrestore(&mmio_lock, flags);
}
void atomisp_css2_hw_store_32(hrt_address addr, uint32_t data)
{
struct atomisp_device *isp = dev_get_drvdata(atomisp_dev);
unsigned long flags;
spin_lock_irqsave(&mmio_lock, flags);
writel(data, isp->base + (addr & 0x003FFFFF));
spin_unlock_irqrestore(&mmio_lock, flags);
}
static uint8_t atomisp_css2_hw_load_8(hrt_address addr)
{
struct atomisp_device *isp = dev_get_drvdata(atomisp_dev);
unsigned long flags;
u8 ret;
spin_lock_irqsave(&mmio_lock, flags);
ret = readb(isp->base + (addr & 0x003FFFFF));
spin_unlock_irqrestore(&mmio_lock, flags);
return ret;
}
static uint16_t atomisp_css2_hw_load_16(hrt_address addr)
{
struct atomisp_device *isp = dev_get_drvdata(atomisp_dev);
unsigned long flags;
u16 ret;
spin_lock_irqsave(&mmio_lock, flags);
ret = readw(isp->base + (addr & 0x003FFFFF));
spin_unlock_irqrestore(&mmio_lock, flags);
return ret;
}
static uint32_t atomisp_css2_hw_load_32(hrt_address addr)
{
struct atomisp_device *isp = dev_get_drvdata(atomisp_dev);
unsigned long flags;
u32 ret;
spin_lock_irqsave(&mmio_lock, flags);
ret = readl(isp->base + (addr & 0x003FFFFF));
spin_unlock_irqrestore(&mmio_lock, flags);
return ret;
}
static void atomisp_css2_hw_store(hrt_address addr, const void *from, uint32_t n)
{
struct atomisp_device *isp = dev_get_drvdata(atomisp_dev);
unsigned long flags;
unsigned int i;
addr &= 0x003FFFFF;
spin_lock_irqsave(&mmio_lock, flags);
for (i = 0; i < n; i++, from++)
writeb(*(s8 *)from, isp->base + addr + i);
spin_unlock_irqrestore(&mmio_lock, flags);
}
static void atomisp_css2_hw_load(hrt_address addr, void *to, uint32_t n)
{
struct atomisp_device *isp = dev_get_drvdata(atomisp_dev);
unsigned long flags;
unsigned int i;
addr &= 0x003FFFFF;
spin_lock_irqsave(&mmio_lock, flags);
for (i = 0; i < n; i++, to++)
*(s8 *)to = readb(isp->base + addr + i);
spin_unlock_irqrestore(&mmio_lock, flags);
}
static int __printf(1, 0) atomisp_vprintk(const char *fmt, va_list args)
{
vprintk(fmt, args);
return 0;
}
void atomisp_load_uint32(hrt_address addr, uint32_t *data)
{
*data = atomisp_css2_hw_load_32(addr);
}
static int hmm_get_mmu_base_addr(struct device *dev, unsigned int *mmu_base_addr)
{
if (!sh_mmu_mrfld.get_pd_base) {
dev_err(dev, "get mmu base address failed.\n");
return -EINVAL;
}
*mmu_base_addr = sh_mmu_mrfld.get_pd_base(&bo_device.mmu,
bo_device.mmu.base_address);
return 0;
}
static void __dump_pipe_config(struct atomisp_sub_device *asd,
struct atomisp_stream_env *stream_env,
unsigned int pipe_id)
{
struct atomisp_device *isp = asd->isp;
if (stream_env->pipes[pipe_id]) {
struct ia_css_pipe_config *p_config;
struct ia_css_pipe_extra_config *pe_config;
p_config = &stream_env->pipe_configs[pipe_id];
pe_config = &stream_env->pipe_extra_configs[pipe_id];
dev_dbg(isp->dev, "dumping pipe[%d] config:\n", pipe_id);
dev_dbg(isp->dev,
"pipe_config.pipe_mode:%d.\n", p_config->mode);
dev_dbg(isp->dev,
"pipe_config.output_info[0] w=%d, h=%d.\n",
p_config->output_info[0].res.width,
p_config->output_info[0].res.height);
dev_dbg(isp->dev,
"pipe_config.vf_pp_in_res w=%d, h=%d.\n",
p_config->vf_pp_in_res.width,
p_config->vf_pp_in_res.height);
dev_dbg(isp->dev,
"pipe_config.capt_pp_in_res w=%d, h=%d.\n",
p_config->capt_pp_in_res.width,
p_config->capt_pp_in_res.height);
dev_dbg(isp->dev,
"pipe_config.output.padded w=%d.\n",
p_config->output_info[0].padded_width);
dev_dbg(isp->dev,
"pipe_config.vf_output_info[0] w=%d, h=%d.\n",
p_config->vf_output_info[0].res.width,
p_config->vf_output_info[0].res.height);
dev_dbg(isp->dev,
"pipe_config.bayer_ds_out_res w=%d, h=%d.\n",
p_config->bayer_ds_out_res.width,
p_config->bayer_ds_out_res.height);
dev_dbg(isp->dev,
"pipe_config.envelope w=%d, h=%d.\n",
p_config->dvs_envelope.width,
p_config->dvs_envelope.height);
dev_dbg(isp->dev,
"pipe_config.dvs_frame_delay=%d.\n",
p_config->dvs_frame_delay);
dev_dbg(isp->dev,
"pipe_config.isp_pipe_version:%d.\n",
p_config->isp_pipe_version);
dev_dbg(isp->dev,
"pipe_config.default_capture_config.capture_mode=%d.\n",
p_config->default_capture_config.mode);
dev_dbg(isp->dev,
"pipe_config.enable_dz=%d.\n",
p_config->enable_dz);
dev_dbg(isp->dev,
"pipe_config.default_capture_config.enable_xnr=%d.\n",
p_config->default_capture_config.enable_xnr);
dev_dbg(isp->dev,
"dumping pipe[%d] extra config:\n", pipe_id);
dev_dbg(isp->dev,
"pipe_extra_config.enable_raw_binning:%d.\n",
pe_config->enable_raw_binning);
dev_dbg(isp->dev,
"pipe_extra_config.enable_yuv_ds:%d.\n",
pe_config->enable_yuv_ds);
dev_dbg(isp->dev,
"pipe_extra_config.enable_high_speed:%d.\n",
pe_config->enable_high_speed);
dev_dbg(isp->dev,
"pipe_extra_config.enable_dvs_6axis:%d.\n",
pe_config->enable_dvs_6axis);
dev_dbg(isp->dev,
"pipe_extra_config.enable_reduced_pipe:%d.\n",
pe_config->enable_reduced_pipe);
dev_dbg(isp->dev,
"pipe_(extra_)config.enable_dz:%d.\n",
p_config->enable_dz);
dev_dbg(isp->dev,
"pipe_extra_config.disable_vf_pp:%d.\n",
pe_config->disable_vf_pp);
}
}
static void __dump_stream_config(struct atomisp_sub_device *asd,
struct atomisp_stream_env *stream_env)
{
struct atomisp_device *isp = asd->isp;
struct ia_css_stream_config *s_config;
int j;
bool valid_stream = false;
for (j = 0; j < IA_CSS_PIPE_ID_NUM; j++) {
if (stream_env->pipes[j]) {
__dump_pipe_config(asd, stream_env, j);
valid_stream = true;
}
}
if (!valid_stream)
return;
s_config = &stream_env->stream_config;
dev_dbg(isp->dev, "stream_config.mode=%d.\n", s_config->mode);
if (s_config->mode == IA_CSS_INPUT_MODE_SENSOR ||
s_config->mode == IA_CSS_INPUT_MODE_BUFFERED_SENSOR) {
dev_dbg(isp->dev, "stream_config.source.port.port=%d.\n",
s_config->source.port.port);
dev_dbg(isp->dev, "stream_config.source.port.num_lanes=%d.\n",
s_config->source.port.num_lanes);
dev_dbg(isp->dev, "stream_config.source.port.timeout=%d.\n",
s_config->source.port.timeout);
dev_dbg(isp->dev, "stream_config.source.port.rxcount=0x%x.\n",
s_config->source.port.rxcount);
dev_dbg(isp->dev, "stream_config.source.port.compression.type=%d.\n",
s_config->source.port.compression.type);
dev_dbg(isp->dev,
"stream_config.source.port.compression.compressed_bits_per_pixel=%d.\n",
s_config->source.port.compression.
compressed_bits_per_pixel);
dev_dbg(isp->dev,
"stream_config.source.port.compression.uncompressed_bits_per_pixel=%d.\n",
s_config->source.port.compression.
uncompressed_bits_per_pixel);
} else if (s_config->mode == IA_CSS_INPUT_MODE_TPG) {
dev_dbg(isp->dev, "stream_config.source.tpg.id=%d.\n",
s_config->source.tpg.id);
dev_dbg(isp->dev, "stream_config.source.tpg.mode=%d.\n",
s_config->source.tpg.mode);
dev_dbg(isp->dev, "stream_config.source.tpg.x_mask=%d.\n",
s_config->source.tpg.x_mask);
dev_dbg(isp->dev, "stream_config.source.tpg.x_delta=%d.\n",
s_config->source.tpg.x_delta);
dev_dbg(isp->dev, "stream_config.source.tpg.y_mask=%d.\n",
s_config->source.tpg.y_mask);
dev_dbg(isp->dev, "stream_config.source.tpg.y_delta=%d.\n",
s_config->source.tpg.y_delta);
dev_dbg(isp->dev, "stream_config.source.tpg.xy_mask=%d.\n",
s_config->source.tpg.xy_mask);
} else if (s_config->mode == IA_CSS_INPUT_MODE_PRBS) {
dev_dbg(isp->dev, "stream_config.source.prbs.id=%d.\n",
s_config->source.prbs.id);
dev_dbg(isp->dev, "stream_config.source.prbs.h_blank=%d.\n",
s_config->source.prbs.h_blank);
dev_dbg(isp->dev, "stream_config.source.prbs.v_blank=%d.\n",
s_config->source.prbs.v_blank);
dev_dbg(isp->dev, "stream_config.source.prbs.seed=%d.\n",
s_config->source.prbs.seed);
dev_dbg(isp->dev, "stream_config.source.prbs.seed1=%d.\n",
s_config->source.prbs.seed1);
}
for (j = 0; j < IA_CSS_STREAM_MAX_ISYS_STREAM_PER_CH; j++) {
dev_dbg(isp->dev, "stream_configisys_config[%d].input_res w=%d, h=%d.\n",
j,
s_config->isys_config[j].input_res.width,
s_config->isys_config[j].input_res.height);
dev_dbg(isp->dev, "stream_configisys_config[%d].linked_isys_stream_id=%d\n",
j,
s_config->isys_config[j].linked_isys_stream_id);
dev_dbg(isp->dev, "stream_configisys_config[%d].format=%d\n",
j,
s_config->isys_config[j].format);
dev_dbg(isp->dev, "stream_configisys_config[%d].valid=%d.\n",
j,
s_config->isys_config[j].valid);
}
dev_dbg(isp->dev, "stream_config.input_config.input_res w=%d, h=%d.\n",
s_config->input_config.input_res.width,
s_config->input_config.input_res.height);
dev_dbg(isp->dev, "stream_config.input_config.effective_res w=%d, h=%d.\n",
s_config->input_config.effective_res.width,
s_config->input_config.effective_res.height);
dev_dbg(isp->dev, "stream_config.input_config.format=%d\n",
s_config->input_config.format);
dev_dbg(isp->dev, "stream_config.input_config.bayer_order=%d.\n",
s_config->input_config.bayer_order);
dev_dbg(isp->dev, "stream_config.pixels_per_clock=%d.\n",
s_config->pixels_per_clock);
dev_dbg(isp->dev, "stream_config.online=%d.\n", s_config->online);
dev_dbg(isp->dev, "stream_config.continuous=%d.\n",
s_config->continuous);
dev_dbg(isp->dev, "stream_config.disable_cont_viewfinder=%d.\n",
s_config->disable_cont_viewfinder);
dev_dbg(isp->dev, "stream_config.channel_id=%d.\n",
s_config->channel_id);
dev_dbg(isp->dev, "stream_config.init_num_cont_raw_buf=%d.\n",
s_config->init_num_cont_raw_buf);
dev_dbg(isp->dev, "stream_config.target_num_cont_raw_buf=%d.\n",
s_config->target_num_cont_raw_buf);
dev_dbg(isp->dev, "stream_config.left_padding=%d.\n",
s_config->left_padding);
dev_dbg(isp->dev, "stream_config.sensor_binning_factor=%d.\n",
s_config->sensor_binning_factor);
dev_dbg(isp->dev, "stream_config.pixels_per_clock=%d.\n",
s_config->pixels_per_clock);
dev_dbg(isp->dev, "stream_config.pack_raw_pixels=%d.\n",
s_config->pack_raw_pixels);
dev_dbg(isp->dev, "stream_config.flash_gpio_pin=%d.\n",
s_config->flash_gpio_pin);
dev_dbg(isp->dev, "stream_config.mipi_buffer_config.size_mem_words=%d.\n",
s_config->mipi_buffer_config.size_mem_words);
dev_dbg(isp->dev, "stream_config.mipi_buffer_config.contiguous=%d.\n",
s_config->mipi_buffer_config.contiguous);
dev_dbg(isp->dev, "stream_config.metadata_config.data_type=%d.\n",
s_config->metadata_config.data_type);
dev_dbg(isp->dev, "stream_config.metadata_config.resolution w=%d, h=%d.\n",
s_config->metadata_config.resolution.width,
s_config->metadata_config.resolution.height);
}
static int __destroy_stream(struct atomisp_sub_device *asd,
struct atomisp_stream_env *stream_env)
{
struct atomisp_device *isp = asd->isp;
unsigned long timeout;
if (!stream_env->stream)
return 0;
if (stream_env->stream_state == CSS_STREAM_STARTED
&& ia_css_stream_stop(stream_env->stream) != 0) {
dev_err(isp->dev, "stop stream failed.\n");
return -EINVAL;
}
if (stream_env->stream_state == CSS_STREAM_STARTED) {
timeout = jiffies + msecs_to_jiffies(40);
while (1) {
if (ia_css_stream_has_stopped(stream_env->stream))
break;
if (time_after(jiffies, timeout)) {
dev_warn(isp->dev, "stop stream timeout.\n");
break;
}
usleep_range(100, 200);
}
}
stream_env->stream_state = CSS_STREAM_STOPPED;
if (ia_css_stream_destroy(stream_env->stream)) {
dev_err(isp->dev, "destroy stream failed.\n");
return -EINVAL;
}
stream_env->stream_state = CSS_STREAM_UNINIT;
stream_env->stream = NULL;
return 0;
}
static int __destroy_streams(struct atomisp_sub_device *asd)
{
int ret, i;
for (i = 0; i < ATOMISP_INPUT_STREAM_NUM; i++) {
ret = __destroy_stream(asd, &asd->stream_env[i]);
if (ret)
return ret;
}
asd->stream_prepared = false;
return 0;
}
static int __create_stream(struct atomisp_sub_device *asd,
struct atomisp_stream_env *stream_env)
{
int pipe_index = 0, i;
struct ia_css_pipe *multi_pipes[IA_CSS_PIPE_ID_NUM];
for (i = 0; i < IA_CSS_PIPE_ID_NUM; i++) {
if (stream_env->pipes[i])
multi_pipes[pipe_index++] = stream_env->pipes[i];
}
if (pipe_index == 0)
return 0;
stream_env->stream_config.target_num_cont_raw_buf =
asd->continuous_raw_buffer_size->val;
stream_env->stream_config.channel_id = stream_env->ch_id;
stream_env->stream_config.ia_css_enable_raw_buffer_locking =
asd->enable_raw_buffer_lock->val;
__dump_stream_config(asd, stream_env);
if (ia_css_stream_create(&stream_env->stream_config,
pipe_index, multi_pipes, &stream_env->stream) != 0)
return -EINVAL;
if (ia_css_stream_get_info(stream_env->stream,
&stream_env->stream_info) != 0) {
ia_css_stream_destroy(stream_env->stream);
stream_env->stream = NULL;
return -EINVAL;
}
stream_env->stream_state = CSS_STREAM_CREATED;
return 0;
}
static int __create_streams(struct atomisp_sub_device *asd)
{
int ret, i;
for (i = 0; i < ATOMISP_INPUT_STREAM_NUM; i++) {
ret = __create_stream(asd, &asd->stream_env[i]);
if (ret)
goto rollback;
}
asd->stream_prepared = true;
return 0;
rollback:
for (i--; i >= 0; i--)
__destroy_stream(asd, &asd->stream_env[i]);
return ret;
}
static int __destroy_stream_pipes(struct atomisp_sub_device *asd,
struct atomisp_stream_env *stream_env)
{
struct atomisp_device *isp = asd->isp;
int ret = 0;
int i;
for (i = 0; i < IA_CSS_PIPE_ID_NUM; i++) {
if (!stream_env->pipes[i])
continue;
if (ia_css_pipe_destroy(stream_env->pipes[i])
!= 0) {
dev_err(isp->dev,
"destroy pipe[%d]failed.cannot recover.\n", i);
ret = -EINVAL;
}
stream_env->pipes[i] = NULL;
stream_env->update_pipe[i] = false;
}
return ret;
}
static int __destroy_pipes(struct atomisp_sub_device *asd)
{
struct atomisp_device *isp = asd->isp;
int i;
int ret = 0;
for (i = 0; i < ATOMISP_INPUT_STREAM_NUM; i++) {
if (asd->stream_env[i].stream) {
dev_err(isp->dev,
"cannot destroy css pipes for stream[%d].\n",
i);
continue;
}
ret = __destroy_stream_pipes(asd, &asd->stream_env[i]);
if (ret)
return ret;
}
return 0;
}
void atomisp_destroy_pipes_stream(struct atomisp_sub_device *asd)
{
if (__destroy_streams(asd))
dev_warn(asd->isp->dev, "destroy stream failed.\n");
if (__destroy_pipes(asd))
dev_warn(asd->isp->dev, "destroy pipe failed.\n");
}
static void __apply_additional_pipe_config(
struct atomisp_sub_device *asd,
struct atomisp_stream_env *stream_env,
enum ia_css_pipe_id pipe_id)
{
struct atomisp_device *isp = asd->isp;
if (pipe_id < 0 || pipe_id >= IA_CSS_PIPE_ID_NUM) {
dev_err(isp->dev,
"wrong pipe_id for additional pipe config.\n");
return;
}
/* apply default pipe config */
stream_env->pipe_configs[pipe_id].isp_pipe_version = 2;
stream_env->pipe_configs[pipe_id].enable_dz =
asd->disable_dz->val ? false : true;
/* apply isp 2.2 specific config for baytrail*/
switch (pipe_id) {
case IA_CSS_PIPE_ID_CAPTURE:
/* enable capture pp/dz manually or digital zoom would
* fail*/
if (stream_env->pipe_configs[pipe_id].
default_capture_config.mode == IA_CSS_CAPTURE_MODE_RAW)
stream_env->pipe_configs[pipe_id].enable_dz = false;
break;
case IA_CSS_PIPE_ID_VIDEO:
/* enable reduced pipe to have binary
* video_dz_2_min selected*/
stream_env->pipe_extra_configs[pipe_id]
.enable_reduced_pipe = true;
stream_env->pipe_configs[pipe_id]
.enable_dz = false;
if (asd->params.video_dis_en) {
stream_env->pipe_extra_configs[pipe_id]
.enable_dvs_6axis = true;
stream_env->pipe_configs[pipe_id]
.dvs_frame_delay =
ATOMISP_CSS2_NUM_DVS_FRAME_DELAY;
}
break;
case IA_CSS_PIPE_ID_PREVIEW:
break;
case IA_CSS_PIPE_ID_YUVPP:
case IA_CSS_PIPE_ID_COPY:
stream_env->pipe_configs[pipe_id].enable_dz = false;
break;
default:
break;
}
}
static bool is_pipe_valid_to_current_run_mode(struct atomisp_sub_device *asd,
enum ia_css_pipe_id pipe_id)
{
if (pipe_id == IA_CSS_PIPE_ID_YUVPP)
return true;
if (asd->vfpp) {
if (asd->vfpp->val == ATOMISP_VFPP_DISABLE_SCALER) {
if (pipe_id == IA_CSS_PIPE_ID_VIDEO)
return true;
else
return false;
} else if (asd->vfpp->val == ATOMISP_VFPP_DISABLE_LOWLAT) {
if (pipe_id == IA_CSS_PIPE_ID_CAPTURE)
return true;
else
return false;
}
}
if (!asd->run_mode)
return false;
if (asd->copy_mode && pipe_id == IA_CSS_PIPE_ID_COPY)
return true;
switch (asd->run_mode->val) {
case ATOMISP_RUN_MODE_STILL_CAPTURE:
if (pipe_id == IA_CSS_PIPE_ID_CAPTURE)
return true;
return false;
case ATOMISP_RUN_MODE_PREVIEW:
if (pipe_id == IA_CSS_PIPE_ID_PREVIEW)
return true;
return false;
case ATOMISP_RUN_MODE_VIDEO:
if (pipe_id == IA_CSS_PIPE_ID_VIDEO || pipe_id == IA_CSS_PIPE_ID_YUVPP)
return true;
return false;
}
return false;
}
static int __create_pipe(struct atomisp_sub_device *asd,
struct atomisp_stream_env *stream_env,
enum ia_css_pipe_id pipe_id)
{
struct atomisp_device *isp = asd->isp;
struct ia_css_pipe_extra_config extra_config;
int ret;
if (pipe_id >= IA_CSS_PIPE_ID_NUM)
return -EINVAL;
if (!stream_env->pipe_configs[pipe_id].output_info[0].res.width)
return 0;
if (!is_pipe_valid_to_current_run_mode(asd, pipe_id))
return 0;
ia_css_pipe_extra_config_defaults(&extra_config);
__apply_additional_pipe_config(asd, stream_env, pipe_id);
if (!memcmp(&extra_config,
&stream_env->pipe_extra_configs[pipe_id],
sizeof(extra_config)))
ret = ia_css_pipe_create(
&stream_env->pipe_configs[pipe_id],
&stream_env->pipes[pipe_id]);
else
ret = ia_css_pipe_create_extra(
&stream_env->pipe_configs[pipe_id],
&stream_env->pipe_extra_configs[pipe_id],
&stream_env->pipes[pipe_id]);
if (ret)
dev_err(isp->dev, "create pipe[%d] error.\n", pipe_id);
return ret;
}
static int __create_pipes(struct atomisp_sub_device *asd)
{
int ret;
int i, j;
for (i = 0; i < ATOMISP_INPUT_STREAM_NUM; i++) {
for (j = 0; j < IA_CSS_PIPE_ID_NUM; j++) {
ret = __create_pipe(asd, &asd->stream_env[i], j);
if (ret)
break;
}
if (j < IA_CSS_PIPE_ID_NUM)
goto pipe_err;
}
return 0;
pipe_err:
for (; i >= 0; i--) {
for (j--; j >= 0; j--) {
if (asd->stream_env[i].pipes[j]) {
ia_css_pipe_destroy(asd->stream_env[i].pipes[j]);
asd->stream_env[i].pipes[j] = NULL;
}
}
j = IA_CSS_PIPE_ID_NUM;
}
return -EINVAL;
}
int atomisp_create_pipes_stream(struct atomisp_sub_device *asd)
{
int ret;
ret = __create_pipes(asd);
if (ret) {
dev_err(asd->isp->dev, "create pipe failed %d.\n", ret);
return ret;
}
ret = __create_streams(asd);
if (ret) {
dev_warn(asd->isp->dev, "create stream failed %d.\n", ret);
__destroy_pipes(asd);
return ret;
}
return 0;
}
int atomisp_css_update_stream(struct atomisp_sub_device *asd)
{
atomisp_destroy_pipes_stream(asd);
return atomisp_create_pipes_stream(asd);
}
int atomisp_css_init(struct atomisp_device *isp)
{
unsigned int mmu_base_addr;
int ret;
int err;
ret = hmm_get_mmu_base_addr(isp->dev, &mmu_base_addr);
if (ret)
return ret;
/* Init ISP */
err = ia_css_init(isp->dev, &isp->css_env.isp_css_env, NULL,
(uint32_t)mmu_base_addr, IA_CSS_IRQ_TYPE_PULSE);
if (err) {
dev_err(isp->dev, "css init failed --- bad firmware?\n");
return -EINVAL;
}
ia_css_enable_isys_event_queue(true);
isp->css_initialized = true;
dev_dbg(isp->dev, "sh_css_init success\n");
return 0;
}
static inline int __set_css_print_env(struct atomisp_device *isp, int opt)
{
int ret = 0;
if (opt == 0)
isp->css_env.isp_css_env.print_env.debug_print = NULL;
else if (opt == 1)
isp->css_env.isp_css_env.print_env.debug_print = atomisp_vprintk;
else
ret = -EINVAL;
return ret;
}
int atomisp_css_load_firmware(struct atomisp_device *isp)
{
int err;
/* set css env */
isp->css_env.isp_css_fw.data = (void *)isp->firmware->data;
isp->css_env.isp_css_fw.bytes = isp->firmware->size;
isp->css_env.isp_css_env.hw_access_env.store_8 =
atomisp_css2_hw_store_8;
isp->css_env.isp_css_env.hw_access_env.store_16 =
atomisp_css2_hw_store_16;
isp->css_env.isp_css_env.hw_access_env.store_32 =
atomisp_css2_hw_store_32;
isp->css_env.isp_css_env.hw_access_env.load_8 = atomisp_css2_hw_load_8;
isp->css_env.isp_css_env.hw_access_env.load_16 =
atomisp_css2_hw_load_16;
isp->css_env.isp_css_env.hw_access_env.load_32 =
atomisp_css2_hw_load_32;
isp->css_env.isp_css_env.hw_access_env.load = atomisp_css2_hw_load;
isp->css_env.isp_css_env.hw_access_env.store = atomisp_css2_hw_store;
__set_css_print_env(isp, dbg_func);
isp->css_env.isp_css_env.print_env.error_print = atomisp_vprintk;
/* load isp fw into ISP memory */
err = ia_css_load_firmware(isp->dev, &isp->css_env.isp_css_env,
&isp->css_env.isp_css_fw);
if (err) {
dev_err(isp->dev, "css load fw failed.\n");
return -EINVAL;
}
return 0;
}
void atomisp_css_uninit(struct atomisp_device *isp)
{
isp->css_initialized = false;
ia_css_uninit();
}
int atomisp_css_irq_translate(struct atomisp_device *isp,
unsigned int *infos)
{
int err;
err = ia_css_irq_translate(infos);
if (err) {
dev_warn(isp->dev,
"%s:failed to translate irq (err = %d,infos = %d)\n",
__func__, err, *infos);
return -EINVAL;
}
return 0;
}
void atomisp_css_rx_get_irq_info(enum mipi_port_id port,
unsigned int *infos)
{
#ifndef ISP2401
ia_css_isys_rx_get_irq_info(port, infos);
#else
*infos = 0;
#endif
}
void atomisp_css_rx_clear_irq_info(enum mipi_port_id port,
unsigned int infos)
{
#ifndef ISP2401
ia_css_isys_rx_clear_irq_info(port, infos);
#endif
}
int atomisp_css_irq_enable(struct atomisp_device *isp,
enum ia_css_irq_info info, bool enable)
{
dev_dbg(isp->dev, "%s: css irq info 0x%08x: %s (%d).\n",
__func__, info,
enable ? "enable" : "disable", enable);
if (ia_css_irq_enable(info, enable)) {
dev_warn(isp->dev, "%s:Invalid irq info: 0x%08x when %s.\n",
__func__, info,
enable ? "enabling" : "disabling");
return -EINVAL;
}
return 0;
}
void atomisp_css_init_struct(struct atomisp_sub_device *asd)
{
int i, j;
for (i = 0; i < ATOMISP_INPUT_STREAM_NUM; i++) {
asd->stream_env[i].stream = NULL;
for (j = 0; j < IA_CSS_PIPE_MODE_NUM; j++) {
asd->stream_env[i].pipes[j] = NULL;
asd->stream_env[i].update_pipe[j] = false;
ia_css_pipe_config_defaults(
&asd->stream_env[i].pipe_configs[j]);
ia_css_pipe_extra_config_defaults(
&asd->stream_env[i].pipe_extra_configs[j]);
}
ia_css_stream_config_defaults(&asd->stream_env[i].stream_config);
}
}
int atomisp_q_video_buffer_to_css(struct atomisp_sub_device *asd,
struct ia_css_frame *frame,
enum atomisp_input_stream_id stream_id,
enum ia_css_buffer_type css_buf_type,
enum ia_css_pipe_id css_pipe_id)
{
struct atomisp_stream_env *stream_env = &asd->stream_env[stream_id];
struct ia_css_buffer css_buf = {0};
int err;
css_buf.type = css_buf_type;
css_buf.data.frame = frame;
err = ia_css_pipe_enqueue_buffer(
stream_env->pipes[css_pipe_id], &css_buf);
if (err)
return -EINVAL;
return 0;
}
int atomisp_q_metadata_buffer_to_css(struct atomisp_sub_device *asd,
struct atomisp_metadata_buf *metadata_buf,
enum atomisp_input_stream_id stream_id,
enum ia_css_pipe_id css_pipe_id)
{
struct atomisp_stream_env *stream_env = &asd->stream_env[stream_id];
struct ia_css_buffer buffer = {0};
struct atomisp_device *isp = asd->isp;
buffer.type = IA_CSS_BUFFER_TYPE_METADATA;
buffer.data.metadata = metadata_buf->metadata;
if (ia_css_pipe_enqueue_buffer(stream_env->pipes[css_pipe_id],
&buffer)) {
dev_err(isp->dev, "failed to q meta data buffer\n");
return -EINVAL;
}
return 0;
}
int atomisp_q_s3a_buffer_to_css(struct atomisp_sub_device *asd,
struct atomisp_s3a_buf *s3a_buf,
enum atomisp_input_stream_id stream_id,
enum ia_css_pipe_id css_pipe_id)
{
struct atomisp_stream_env *stream_env = &asd->stream_env[stream_id];
struct ia_css_buffer buffer = {0};
struct atomisp_device *isp = asd->isp;
buffer.type = IA_CSS_BUFFER_TYPE_3A_STATISTICS;
buffer.data.stats_3a = s3a_buf->s3a_data;
if (ia_css_pipe_enqueue_buffer(
stream_env->pipes[css_pipe_id],
&buffer)) {
dev_dbg(isp->dev, "failed to q s3a stat buffer\n");
return -EINVAL;
}
return 0;
}
int atomisp_q_dis_buffer_to_css(struct atomisp_sub_device *asd,
struct atomisp_dis_buf *dis_buf,
enum atomisp_input_stream_id stream_id,
enum ia_css_pipe_id css_pipe_id)
{
struct atomisp_stream_env *stream_env = &asd->stream_env[stream_id];
struct ia_css_buffer buffer = {0};
struct atomisp_device *isp = asd->isp;
buffer.type = IA_CSS_BUFFER_TYPE_DIS_STATISTICS;
buffer.data.stats_dvs = dis_buf->dis_data;
if (ia_css_pipe_enqueue_buffer(
stream_env->pipes[css_pipe_id],
&buffer)) {
dev_dbg(isp->dev, "failed to q dvs stat buffer\n");
return -EINVAL;
}
return 0;
}
int atomisp_css_start(struct atomisp_sub_device *asd)
{
struct atomisp_device *isp = asd->isp;
bool sp_is_started = false;
int ret = 0, i = 0;
if (!sh_css_hrt_system_is_idle())
dev_err(isp->dev, "CSS HW not idle before starting SP\n");
if (ia_css_start_sp()) {
dev_err(isp->dev, "start sp error.\n");
ret = -EINVAL;
goto start_err;
}
sp_is_started = true;
for (i = 0; i < ATOMISP_INPUT_STREAM_NUM; i++) {
if (asd->stream_env[i].stream) {
if (ia_css_stream_start(asd->stream_env[i]
.stream) != 0) {
dev_err(isp->dev, "stream[%d] start error.\n", i);
ret = -EINVAL;
goto start_err;
} else {
asd->stream_env[i].stream_state = CSS_STREAM_STARTED;
dev_dbg(isp->dev, "stream[%d] started.\n", i);
}
}
}
return 0;
start_err:
/*
* CSS 2.0 API limitation: ia_css_stop_sp() can only be called after
* destroying all pipes.
*/
if (sp_is_started) {
atomisp_destroy_pipes_stream(asd);
ia_css_stop_sp();
atomisp_create_pipes_stream(asd);
}
return ret;
}
void atomisp_css_update_isp_params(struct atomisp_sub_device *asd)
{
/*
* FIXME!
* for ISP2401 new input system, this api is under development.
* Calling it would cause kernel panic.
*
* VIED BZ: 1458
*
* Check if it is Cherry Trail and also new input system
*/
if (asd->copy_mode) {
dev_warn(asd->isp->dev,
"%s: ia_css_stream_set_isp_config() not supported in copy mode!.\n",
__func__);
return;
}
ia_css_stream_set_isp_config(
asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL].stream,
&asd->params.config);
memset(&asd->params.config, 0, sizeof(asd->params.config));
}
void atomisp_css_update_isp_params_on_pipe(struct atomisp_sub_device *asd,
struct ia_css_pipe *pipe)
{
int ret;
if (!pipe) {
atomisp_css_update_isp_params(asd);
return;
}
dev_dbg(asd->isp->dev,
"%s: apply parameter for ia_css_frame %p with isp_config_id %d on pipe %p.\n",
__func__, asd->params.config.output_frame,
asd->params.config.isp_config_id, pipe);
ret = ia_css_stream_set_isp_config_on_pipe(
asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL].stream,
&asd->params.config, pipe);
if (ret)
dev_warn(asd->isp->dev, "%s: ia_css_stream_set_isp_config_on_pipe failed %d\n",
__func__, ret);
memset(&asd->params.config, 0, sizeof(asd->params.config));
}
int atomisp_css_queue_buffer(struct atomisp_sub_device *asd,
enum atomisp_input_stream_id stream_id,
enum ia_css_pipe_id pipe_id,
enum ia_css_buffer_type buf_type,
struct atomisp_css_buffer *isp_css_buffer)
{
if (ia_css_pipe_enqueue_buffer(
asd->stream_env[stream_id].pipes[pipe_id],
&isp_css_buffer->css_buffer)
!= 0)
return -EINVAL;
return 0;
}
int atomisp_css_dequeue_buffer(struct atomisp_sub_device *asd,
enum atomisp_input_stream_id stream_id,
enum ia_css_pipe_id pipe_id,
enum ia_css_buffer_type buf_type,
struct atomisp_css_buffer *isp_css_buffer)
{
struct atomisp_device *isp = asd->isp;
int err;
err = ia_css_pipe_dequeue_buffer(
asd->stream_env[stream_id].pipes[pipe_id],
&isp_css_buffer->css_buffer);
if (err) {
dev_err(isp->dev,
"ia_css_pipe_dequeue_buffer failed: 0x%x\n", err);
return -EINVAL;
}
return 0;
}
int atomisp_css_allocate_stat_buffers(struct atomisp_sub_device *asd,
u16 stream_id,
struct atomisp_s3a_buf *s3a_buf,
struct atomisp_dis_buf *dis_buf,
struct atomisp_metadata_buf *md_buf)
{
struct atomisp_device *isp = asd->isp;
struct ia_css_dvs_grid_info *dvs_grid_info =
atomisp_css_get_dvs_grid_info(&asd->params.curr_grid_info);
if (s3a_buf && asd->params.curr_grid_info.s3a_grid.enable) {
void *s3a_ptr;
s3a_buf->s3a_data = ia_css_isp_3a_statistics_allocate(
&asd->params.curr_grid_info.s3a_grid);
if (!s3a_buf->s3a_data) {
dev_err(isp->dev, "3a buf allocation failed.\n");
return -EINVAL;
}
s3a_ptr = hmm_vmap(s3a_buf->s3a_data->data_ptr, true);
s3a_buf->s3a_map = ia_css_isp_3a_statistics_map_allocate(
s3a_buf->s3a_data, s3a_ptr);
}
if (dis_buf && dvs_grid_info && dvs_grid_info->enable) {
void *dvs_ptr;
dis_buf->dis_data = ia_css_isp_dvs2_statistics_allocate(
dvs_grid_info);
if (!dis_buf->dis_data) {
dev_err(isp->dev, "dvs buf allocation failed.\n");
if (s3a_buf)
ia_css_isp_3a_statistics_free(s3a_buf->s3a_data);
return -EINVAL;
}
dvs_ptr = hmm_vmap(dis_buf->dis_data->data_ptr, true);
dis_buf->dvs_map = ia_css_isp_dvs_statistics_map_allocate(
dis_buf->dis_data, dvs_ptr);
}
if (asd->stream_env[stream_id].stream_info.
metadata_info.size && md_buf) {
md_buf->metadata = ia_css_metadata_allocate(
&asd->stream_env[stream_id].stream_info.metadata_info);
if (!md_buf->metadata) {
if (s3a_buf)
ia_css_isp_3a_statistics_free(s3a_buf->s3a_data);
if (dis_buf)
ia_css_isp_dvs2_statistics_free(dis_buf->dis_data);
dev_err(isp->dev, "metadata buf allocation failed.\n");
return -EINVAL;
}
md_buf->md_vptr = hmm_vmap(md_buf->metadata->address, false);
}
return 0;
}
void atomisp_css_free_3a_buffer(struct atomisp_s3a_buf *s3a_buf)
{
if (s3a_buf->s3a_data)
hmm_vunmap(s3a_buf->s3a_data->data_ptr);
ia_css_isp_3a_statistics_map_free(s3a_buf->s3a_map);
s3a_buf->s3a_map = NULL;
ia_css_isp_3a_statistics_free(s3a_buf->s3a_data);
}
void atomisp_css_free_dis_buffer(struct atomisp_dis_buf *dis_buf)
{
if (dis_buf->dis_data)
hmm_vunmap(dis_buf->dis_data->data_ptr);
ia_css_isp_dvs_statistics_map_free(dis_buf->dvs_map);
dis_buf->dvs_map = NULL;
ia_css_isp_dvs2_statistics_free(dis_buf->dis_data);
}
void atomisp_css_free_metadata_buffer(struct atomisp_metadata_buf *metadata_buf)
{
if (metadata_buf->md_vptr) {
hmm_vunmap(metadata_buf->metadata->address);
metadata_buf->md_vptr = NULL;
}
ia_css_metadata_free(metadata_buf->metadata);
}
void atomisp_css_free_stat_buffers(struct atomisp_sub_device *asd)
{
struct atomisp_s3a_buf *s3a_buf, *_s3a_buf;
struct atomisp_dis_buf *dis_buf, *_dis_buf;
struct atomisp_metadata_buf *md_buf, *_md_buf;
struct ia_css_dvs_grid_info *dvs_grid_info =
atomisp_css_get_dvs_grid_info(&asd->params.curr_grid_info);
unsigned int i;
/* 3A statistics use vmalloc, DIS use kmalloc */
if (dvs_grid_info && dvs_grid_info->enable) {
ia_css_dvs2_coefficients_free(asd->params.css_param.dvs2_coeff);
ia_css_dvs2_statistics_free(asd->params.dvs_stat);
asd->params.css_param.dvs2_coeff = NULL;
asd->params.dvs_stat = NULL;
asd->params.dvs_hor_proj_bytes = 0;
asd->params.dvs_ver_proj_bytes = 0;
asd->params.dvs_hor_coef_bytes = 0;
asd->params.dvs_ver_coef_bytes = 0;
asd->params.dis_proj_data_valid = false;
list_for_each_entry_safe(dis_buf, _dis_buf,
&asd->dis_stats, list) {
atomisp_css_free_dis_buffer(dis_buf);
list_del(&dis_buf->list);
kfree(dis_buf);
}
list_for_each_entry_safe(dis_buf, _dis_buf,
&asd->dis_stats_in_css, list) {
atomisp_css_free_dis_buffer(dis_buf);
list_del(&dis_buf->list);
kfree(dis_buf);
}
}
if (asd->params.curr_grid_info.s3a_grid.enable) {
ia_css_3a_statistics_free(asd->params.s3a_user_stat);
asd->params.s3a_user_stat = NULL;
asd->params.s3a_output_bytes = 0;
list_for_each_entry_safe(s3a_buf, _s3a_buf,
&asd->s3a_stats, list) {
atomisp_css_free_3a_buffer(s3a_buf);
list_del(&s3a_buf->list);
kfree(s3a_buf);
}
list_for_each_entry_safe(s3a_buf, _s3a_buf,
&asd->s3a_stats_in_css, list) {
atomisp_css_free_3a_buffer(s3a_buf);
list_del(&s3a_buf->list);
kfree(s3a_buf);
}
list_for_each_entry_safe(s3a_buf, _s3a_buf,
&asd->s3a_stats_ready, list) {
atomisp_css_free_3a_buffer(s3a_buf);
list_del(&s3a_buf->list);
kfree(s3a_buf);
}
}
if (asd->params.css_param.dvs_6axis) {
ia_css_dvs2_6axis_config_free(asd->params.css_param.dvs_6axis);
asd->params.css_param.dvs_6axis = NULL;
}
for (i = 0; i < ATOMISP_METADATA_TYPE_NUM; i++) {
list_for_each_entry_safe(md_buf, _md_buf,
&asd->metadata[i], list) {
atomisp_css_free_metadata_buffer(md_buf);
list_del(&md_buf->list);
kfree(md_buf);
}
list_for_each_entry_safe(md_buf, _md_buf,
&asd->metadata_in_css[i], list) {
atomisp_css_free_metadata_buffer(md_buf);
list_del(&md_buf->list);
kfree(md_buf);
}
list_for_each_entry_safe(md_buf, _md_buf,
&asd->metadata_ready[i], list) {
atomisp_css_free_metadata_buffer(md_buf);
list_del(&md_buf->list);
kfree(md_buf);
}
}
asd->params.metadata_width_size = 0;
atomisp_free_metadata_output_buf(asd);
}
int atomisp_css_get_grid_info(struct atomisp_sub_device *asd,
enum ia_css_pipe_id pipe_id)
{
struct ia_css_pipe_info p_info;
struct ia_css_grid_info old_info;
struct atomisp_device *isp = asd->isp;
int md_width = asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL].
stream_config.metadata_config.resolution.width;
memset(&p_info, 0, sizeof(struct ia_css_pipe_info));
memset(&old_info, 0, sizeof(struct ia_css_grid_info));
if (ia_css_pipe_get_info(
asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL].pipes[pipe_id],
&p_info) != 0) {
dev_err(isp->dev, "ia_css_pipe_get_info failed\n");
return -EINVAL;
}
memcpy(&old_info, &asd->params.curr_grid_info,
sizeof(struct ia_css_grid_info));
memcpy(&asd->params.curr_grid_info, &p_info.grid_info,
sizeof(struct ia_css_grid_info));
/*
* Record which css pipe enables s3a_grid.
* Currently would have one css pipe that need it
*/
if (asd->params.curr_grid_info.s3a_grid.enable) {
if (asd->params.s3a_enabled_pipe != IA_CSS_PIPE_ID_NUM)
dev_dbg(isp->dev, "css pipe %d enabled s3a grid replaced by: %d.\n",
asd->params.s3a_enabled_pipe, pipe_id);
asd->params.s3a_enabled_pipe = pipe_id;
}
/* If the grid info has not changed and the buffers for 3A and
* DIS statistics buffers are allocated or buffer size would be zero
* then no need to do anything. */
if (((!memcmp(&old_info, &asd->params.curr_grid_info, sizeof(old_info))
&& asd->params.s3a_user_stat && asd->params.dvs_stat)
|| asd->params.curr_grid_info.s3a_grid.width == 0
|| asd->params.curr_grid_info.s3a_grid.height == 0)
&& asd->params.metadata_width_size == md_width) {
dev_dbg(isp->dev,
"grid info change escape. memcmp=%d, s3a_user_stat=%d,dvs_stat=%d, s3a.width=%d, s3a.height=%d, metadata width =%d\n",
!memcmp(&old_info, &asd->params.curr_grid_info,
sizeof(old_info)),
!!asd->params.s3a_user_stat, !!asd->params.dvs_stat,
asd->params.curr_grid_info.s3a_grid.width,
asd->params.curr_grid_info.s3a_grid.height,
asd->params.metadata_width_size);
return -EINVAL;
}
asd->params.metadata_width_size = md_width;
return 0;
}
int atomisp_alloc_3a_output_buf(struct atomisp_sub_device *asd)
{
if (!asd->params.curr_grid_info.s3a_grid.width ||
!asd->params.curr_grid_info.s3a_grid.height)
return 0;
asd->params.s3a_user_stat = ia_css_3a_statistics_allocate(
&asd->params.curr_grid_info.s3a_grid);
if (!asd->params.s3a_user_stat)
return -ENOMEM;
/* 3A statistics. These can be big, so we use vmalloc. */
asd->params.s3a_output_bytes =
asd->params.curr_grid_info.s3a_grid.width *
asd->params.curr_grid_info.s3a_grid.height *
sizeof(*asd->params.s3a_user_stat->data);
return 0;
}
int atomisp_alloc_dis_coef_buf(struct atomisp_sub_device *asd)
{
struct ia_css_dvs_grid_info *dvs_grid =
atomisp_css_get_dvs_grid_info(&asd->params.curr_grid_info);
if (!dvs_grid)
return 0;
if (!dvs_grid->enable) {
dev_dbg(asd->isp->dev, "%s: dvs_grid not enabled.\n", __func__);
return 0;
}
/* DIS coefficients. */
asd->params.css_param.dvs2_coeff = ia_css_dvs2_coefficients_allocate(
dvs_grid);
if (!asd->params.css_param.dvs2_coeff)
return -ENOMEM;
asd->params.dvs_hor_coef_bytes = dvs_grid->num_hor_coefs *
sizeof(*asd->params.css_param.dvs2_coeff->hor_coefs.odd_real);
asd->params.dvs_ver_coef_bytes = dvs_grid->num_ver_coefs *
sizeof(*asd->params.css_param.dvs2_coeff->ver_coefs.odd_real);
/* DIS projections. */
asd->params.dis_proj_data_valid = false;
asd->params.dvs_stat = ia_css_dvs2_statistics_allocate(dvs_grid);
if (!asd->params.dvs_stat)
return -ENOMEM;
asd->params.dvs_hor_proj_bytes =
dvs_grid->aligned_height * dvs_grid->aligned_width *
sizeof(*asd->params.dvs_stat->hor_prod.odd_real);
asd->params.dvs_ver_proj_bytes =
dvs_grid->aligned_height * dvs_grid->aligned_width *
sizeof(*asd->params.dvs_stat->ver_prod.odd_real);
return 0;
}
int atomisp_alloc_metadata_output_buf(struct atomisp_sub_device *asd)
{
int i;
/* We allocate the cpu-side buffer used for communication with user
* space */
for (i = 0; i < ATOMISP_METADATA_TYPE_NUM; i++) {
asd->params.metadata_user[i] = kvmalloc(
asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL].
stream_info.metadata_info.size, GFP_KERNEL);
if (!asd->params.metadata_user[i]) {
while (--i >= 0) {
kvfree(asd->params.metadata_user[i]);
asd->params.metadata_user[i] = NULL;
}
return -ENOMEM;
}
}
return 0;
}
void atomisp_free_metadata_output_buf(struct atomisp_sub_device *asd)
{
unsigned int i;
for (i = 0; i < ATOMISP_METADATA_TYPE_NUM; i++) {
if (asd->params.metadata_user[i]) {
kvfree(asd->params.metadata_user[i]);
asd->params.metadata_user[i] = NULL;
}
}
}
void atomisp_css_temp_pipe_to_pipe_id(struct atomisp_sub_device *asd,
struct atomisp_css_event *current_event)
{
/*
* FIXME!
* Pipe ID reported in CSS event is not correct for new system's
* copy pipe.
* VIED BZ: 1463
*/
ia_css_temp_pipe_to_pipe_id(current_event->event.pipe,
¤t_event->pipe);
if (asd && asd->copy_mode &&
current_event->pipe == IA_CSS_PIPE_ID_CAPTURE)
current_event->pipe = IA_CSS_PIPE_ID_COPY;
}
int atomisp_css_isys_set_resolution(struct atomisp_sub_device *asd,
enum atomisp_input_stream_id stream_id,
struct v4l2_mbus_framefmt *ffmt,
int isys_stream)
{
struct ia_css_stream_config *s_config =
&asd->stream_env[stream_id].stream_config;
if (isys_stream >= IA_CSS_STREAM_MAX_ISYS_STREAM_PER_CH)
return -EINVAL;
s_config->isys_config[isys_stream].input_res.width = ffmt->width;
s_config->isys_config[isys_stream].input_res.height = ffmt->height;
return 0;
}
int atomisp_css_input_set_resolution(struct atomisp_sub_device *asd,
enum atomisp_input_stream_id stream_id,
struct v4l2_mbus_framefmt *ffmt)
{
struct ia_css_stream_config *s_config =
&asd->stream_env[stream_id].stream_config;
s_config->input_config.input_res.width = ffmt->width;
s_config->input_config.input_res.height = ffmt->height;
return 0;
}
void atomisp_css_input_set_binning_factor(struct atomisp_sub_device *asd,
enum atomisp_input_stream_id stream_id,
unsigned int bin_factor)
{
asd->stream_env[stream_id]
.stream_config.sensor_binning_factor = bin_factor;
}
void atomisp_css_input_set_bayer_order(struct atomisp_sub_device *asd,
enum atomisp_input_stream_id stream_id,
enum ia_css_bayer_order bayer_order)
{
struct ia_css_stream_config *s_config =
&asd->stream_env[stream_id].stream_config;
s_config->input_config.bayer_order = bayer_order;
}
void atomisp_css_isys_set_link(struct atomisp_sub_device *asd,
enum atomisp_input_stream_id stream_id,
int link,
int isys_stream)
{
struct ia_css_stream_config *s_config =
&asd->stream_env[stream_id].stream_config;
s_config->isys_config[isys_stream].linked_isys_stream_id = link;
}
void atomisp_css_isys_set_valid(struct atomisp_sub_device *asd,
enum atomisp_input_stream_id stream_id,
bool valid,
int isys_stream)
{
struct ia_css_stream_config *s_config =
&asd->stream_env[stream_id].stream_config;
s_config->isys_config[isys_stream].valid = valid;
}
void atomisp_css_isys_set_format(struct atomisp_sub_device *asd,
enum atomisp_input_stream_id stream_id,
enum atomisp_input_format format,
int isys_stream)
{
struct ia_css_stream_config *s_config =
&asd->stream_env[stream_id].stream_config;
s_config->isys_config[isys_stream].format = format;
}
void atomisp_css_input_set_format(struct atomisp_sub_device *asd,
enum atomisp_input_stream_id stream_id,
enum atomisp_input_format format)
{
struct ia_css_stream_config *s_config =
&asd->stream_env[stream_id].stream_config;
s_config->input_config.format = format;
}
int atomisp_css_set_default_isys_config(struct atomisp_sub_device *asd,
enum atomisp_input_stream_id stream_id,
struct v4l2_mbus_framefmt *ffmt)
{
int i;
struct ia_css_stream_config *s_config =
&asd->stream_env[stream_id].stream_config;
/*
* Set all isys configs to not valid.
* Currently we support only one stream per channel
*/
for (i = IA_CSS_STREAM_ISYS_STREAM_0;
i < IA_CSS_STREAM_MAX_ISYS_STREAM_PER_CH; i++)
s_config->isys_config[i].valid = false;
atomisp_css_isys_set_resolution(asd, stream_id, ffmt,
IA_CSS_STREAM_DEFAULT_ISYS_STREAM_IDX);
atomisp_css_isys_set_format(asd, stream_id,
s_config->input_config.format,
IA_CSS_STREAM_DEFAULT_ISYS_STREAM_IDX);
atomisp_css_isys_set_link(asd, stream_id, NO_LINK,
IA_CSS_STREAM_DEFAULT_ISYS_STREAM_IDX);
atomisp_css_isys_set_valid(asd, stream_id, true,
IA_CSS_STREAM_DEFAULT_ISYS_STREAM_IDX);
return 0;
}
void atomisp_css_isys_two_stream_cfg_update_stream1(
struct atomisp_sub_device *asd,
enum atomisp_input_stream_id stream_id,
enum atomisp_input_format input_format,
unsigned int width, unsigned int height)
{
struct ia_css_stream_config *s_config =
&asd->stream_env[stream_id].stream_config;
s_config->isys_config[IA_CSS_STREAM_ISYS_STREAM_0].input_res.width =
width;
s_config->isys_config[IA_CSS_STREAM_ISYS_STREAM_0].input_res.height =
height;
s_config->isys_config[IA_CSS_STREAM_ISYS_STREAM_0].format =
input_format;
s_config->isys_config[IA_CSS_STREAM_ISYS_STREAM_0].valid = true;
}
void atomisp_css_isys_two_stream_cfg_update_stream2(
struct atomisp_sub_device *asd,
enum atomisp_input_stream_id stream_id,
enum atomisp_input_format input_format,
unsigned int width, unsigned int height)
{
struct ia_css_stream_config *s_config =
&asd->stream_env[stream_id].stream_config;
s_config->isys_config[IA_CSS_STREAM_ISYS_STREAM_1].input_res.width =
width;
s_config->isys_config[IA_CSS_STREAM_ISYS_STREAM_1].input_res.height =
height;
s_config->isys_config[IA_CSS_STREAM_ISYS_STREAM_1].linked_isys_stream_id
= IA_CSS_STREAM_ISYS_STREAM_0;
s_config->isys_config[IA_CSS_STREAM_ISYS_STREAM_1].format =
input_format;
s_config->isys_config[IA_CSS_STREAM_ISYS_STREAM_1].valid = true;
}
int atomisp_css_input_set_effective_resolution(
struct atomisp_sub_device *asd,
enum atomisp_input_stream_id stream_id,
unsigned int width, unsigned int height)
{
struct ia_css_stream_config *s_config =
&asd->stream_env[stream_id].stream_config;
s_config->input_config.effective_res.width = width;
s_config->input_config.effective_res.height = height;
return 0;
}
void atomisp_css_video_set_dis_envelope(struct atomisp_sub_device *asd,
unsigned int dvs_w, unsigned int dvs_h)
{
asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL]
.pipe_configs[IA_CSS_PIPE_ID_VIDEO].dvs_envelope.width = dvs_w;
asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL]
.pipe_configs[IA_CSS_PIPE_ID_VIDEO].dvs_envelope.height = dvs_h;
}
void atomisp_css_input_set_two_pixels_per_clock(
struct atomisp_sub_device *asd,
bool two_ppc)
{
int i;
if (asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL]
.stream_config.pixels_per_clock == (two_ppc ? 2 : 1))
return;
asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL]
.stream_config.pixels_per_clock = (two_ppc ? 2 : 1);
for (i = 0; i < IA_CSS_PIPE_ID_NUM; i++)
asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL]
.update_pipe[i] = true;
}
void atomisp_css_enable_dz(struct atomisp_sub_device *asd, bool enable)
{
int i;
for (i = 0; i < IA_CSS_PIPE_ID_NUM; i++)
asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL]
.pipe_configs[i].enable_dz = enable;
}
void atomisp_css_capture_set_mode(struct atomisp_sub_device *asd,
enum ia_css_capture_mode mode)
{
struct atomisp_stream_env *stream_env =
&asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL];
if (stream_env->pipe_configs[IA_CSS_PIPE_ID_CAPTURE]
.default_capture_config.mode == mode)
return;
stream_env->pipe_configs[IA_CSS_PIPE_ID_CAPTURE].
default_capture_config.mode = mode;
stream_env->update_pipe[IA_CSS_PIPE_ID_CAPTURE] = true;
}
void atomisp_css_input_set_mode(struct atomisp_sub_device *asd,
enum ia_css_input_mode mode)
{
int i;
struct atomisp_device *isp = asd->isp;
unsigned int size_mem_words;
for (i = 0; i < ATOMISP_INPUT_STREAM_NUM; i++)
asd->stream_env[i].stream_config.mode = mode;
if (isp->inputs[asd->input_curr].type == TEST_PATTERN) {
struct ia_css_stream_config *s_config =
&asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL].stream_config;
s_config->mode = IA_CSS_INPUT_MODE_TPG;
s_config->source.tpg.mode = IA_CSS_TPG_MODE_CHECKERBOARD;
s_config->source.tpg.x_mask = (1 << 4) - 1;
s_config->source.tpg.x_delta = -2;
s_config->source.tpg.y_mask = (1 << 4) - 1;
s_config->source.tpg.y_delta = 3;
s_config->source.tpg.xy_mask = (1 << 8) - 1;
return;
}
if (mode != IA_CSS_INPUT_MODE_BUFFERED_SENSOR)
return;
for (i = 0; i < ATOMISP_INPUT_STREAM_NUM; i++) {
/*
* TODO: sensor needs to export the embedded_data_size_words
* information to atomisp for each setting.
* Here using a large safe value.
*/
struct ia_css_stream_config *s_config =
&asd->stream_env[i].stream_config;
if (s_config->input_config.input_res.width == 0)
continue;
if (ia_css_mipi_frame_calculate_size(
s_config->input_config.input_res.width,
s_config->input_config.input_res.height,
s_config->input_config.format,
true,
0x13000,
&size_mem_words) != 0) {
if (IS_MRFD)
size_mem_words = CSS_MIPI_FRAME_BUFFER_SIZE_2;
else
size_mem_words = CSS_MIPI_FRAME_BUFFER_SIZE_1;
dev_warn(asd->isp->dev,
"ia_css_mipi_frame_calculate_size failed,applying pre-defined MIPI buffer size %u.\n",
size_mem_words);
}
s_config->mipi_buffer_config.size_mem_words = size_mem_words;
s_config->mipi_buffer_config.nof_mipi_buffers = 2;
}
}
void atomisp_css_capture_enable_online(struct atomisp_sub_device *asd,
unsigned short stream_index, bool enable)
{
struct atomisp_stream_env *stream_env =
&asd->stream_env[stream_index];
if (stream_env->stream_config.online == !!enable)
return;
stream_env->stream_config.online = !!enable;
stream_env->update_pipe[IA_CSS_PIPE_ID_CAPTURE] = true;
}
void atomisp_css_preview_enable_online(struct atomisp_sub_device *asd,
unsigned short stream_index, bool enable)
{
struct atomisp_stream_env *stream_env =
&asd->stream_env[stream_index];
int i;
if (stream_env->stream_config.online != !!enable) {
stream_env->stream_config.online = !!enable;
for (i = 0; i < IA_CSS_PIPE_ID_NUM; i++)
stream_env->update_pipe[i] = true;
}
}
int atomisp_css_input_configure_port(
struct atomisp_sub_device *asd,
enum mipi_port_id port,
unsigned int num_lanes,
unsigned int timeout,
unsigned int mipi_freq,
enum atomisp_input_format metadata_format,
unsigned int metadata_width,
unsigned int metadata_height)
{
int i;
struct atomisp_stream_env *stream_env;
/*
* Calculate rx_count as follows:
* Input: mipi_freq : CSI-2 bus frequency in Hz
* UI = 1 / (2 * mipi_freq) : period of one bit on the bus
* min = 85e-9 + 6 * UI : Limits for rx_count in seconds
* max = 145e-9 + 10 * UI
* rxcount0 = min / (4 / mipi_freq) : convert seconds to byte clocks
* rxcount = rxcount0 - 2 : adjust for better results
* The formula below is simplified version of the above with
* 10-bit fixed points for improved accuracy.
*/
const unsigned int rxcount =
min(((mipi_freq / 46000) - 1280) >> 10, 0xffU) * 0x01010101U;
for (i = 0; i < ATOMISP_INPUT_STREAM_NUM; i++) {
stream_env = &asd->stream_env[i];
stream_env->stream_config.source.port.port = port;
stream_env->stream_config.source.port.num_lanes = num_lanes;
stream_env->stream_config.source.port.timeout = timeout;
if (mipi_freq)
stream_env->stream_config.source.port.rxcount = rxcount;
stream_env->stream_config.
metadata_config.data_type = metadata_format;
stream_env->stream_config.
metadata_config.resolution.width = metadata_width;
stream_env->stream_config.
metadata_config.resolution.height = metadata_height;
}
return 0;
}
void atomisp_css_stop(struct atomisp_sub_device *asd, bool in_reset)
{
unsigned long irqflags;
unsigned int i;
/*
* CSS 2.0 API limitation: ia_css_stop_sp() can only be called after
* destroying all pipes.
*/
atomisp_destroy_pipes_stream(asd);
atomisp_init_raw_buffer_bitmap(asd);
ia_css_stop_sp();
if (!in_reset) {
struct atomisp_stream_env *stream_env;
int i, j;
for (i = 0; i < ATOMISP_INPUT_STREAM_NUM; i++) {
stream_env = &asd->stream_env[i];
for (j = 0; j < IA_CSS_PIPE_ID_NUM; j++) {
ia_css_pipe_config_defaults(
&stream_env->pipe_configs[j]);
ia_css_pipe_extra_config_defaults(
&stream_env->pipe_extra_configs[j]);
}
ia_css_stream_config_defaults(
&stream_env->stream_config);
}
memset(&asd->params.config, 0, sizeof(asd->params.config));
asd->params.css_update_params_needed = false;
}
/* move stats buffers to free queue list */
list_splice_init(&asd->s3a_stats_in_css, &asd->s3a_stats);
list_splice_init(&asd->s3a_stats_ready, &asd->s3a_stats);
spin_lock_irqsave(&asd->dis_stats_lock, irqflags);
list_splice_init(&asd->dis_stats_in_css, &asd->dis_stats);
asd->params.dis_proj_data_valid = false;
spin_unlock_irqrestore(&asd->dis_stats_lock, irqflags);
for (i = 0; i < ATOMISP_METADATA_TYPE_NUM; i++) {
list_splice_init(&asd->metadata_in_css[i], &asd->metadata[i]);
list_splice_init(&asd->metadata_ready[i], &asd->metadata[i]);
}
atomisp_flush_params_queue(&asd->video_out);
atomisp_free_css_parameters(&asd->params.css_param);
memset(&asd->params.css_param, 0, sizeof(asd->params.css_param));
}
void atomisp_css_continuous_set_num_raw_frames(
struct atomisp_sub_device *asd,
int num_frames)
{
if (asd->enable_raw_buffer_lock->val) {
asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL]
.stream_config.init_num_cont_raw_buf =
ATOMISP_CSS2_NUM_OFFLINE_INIT_CONTINUOUS_FRAMES_LOCK_EN;
if (asd->run_mode->val == ATOMISP_RUN_MODE_VIDEO &&
asd->params.video_dis_en)
asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL]
.stream_config.init_num_cont_raw_buf +=
ATOMISP_CSS2_NUM_DVS_FRAME_DELAY;
} else {
asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL]
.stream_config.init_num_cont_raw_buf =
ATOMISP_CSS2_NUM_OFFLINE_INIT_CONTINUOUS_FRAMES;
}
if (asd->params.video_dis_en)
asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL]
.stream_config.init_num_cont_raw_buf +=
ATOMISP_CSS2_NUM_DVS_FRAME_DELAY;
asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL]
.stream_config.target_num_cont_raw_buf = num_frames;
}
static enum ia_css_pipe_mode __pipe_id_to_pipe_mode(
struct atomisp_sub_device *asd,
enum ia_css_pipe_id pipe_id)
{
struct atomisp_device *isp = asd->isp;
struct camera_mipi_info *mipi_info = atomisp_to_sensor_mipi_info(
isp->inputs[asd->input_curr].camera);
switch (pipe_id) {
case IA_CSS_PIPE_ID_COPY:
/* Currently only YUVPP mode supports YUV420_Legacy format.
* Revert this when other pipe modes can support
* YUV420_Legacy format.
*/
if (mipi_info && mipi_info->input_format ==
ATOMISP_INPUT_FORMAT_YUV420_8_LEGACY)
return IA_CSS_PIPE_MODE_YUVPP;
return IA_CSS_PIPE_MODE_COPY;
case IA_CSS_PIPE_ID_PREVIEW:
return IA_CSS_PIPE_MODE_PREVIEW;
case IA_CSS_PIPE_ID_CAPTURE:
return IA_CSS_PIPE_MODE_CAPTURE;
case IA_CSS_PIPE_ID_VIDEO:
return IA_CSS_PIPE_MODE_VIDEO;
case IA_CSS_PIPE_ID_YUVPP:
return IA_CSS_PIPE_MODE_YUVPP;
default:
WARN_ON(1);
return IA_CSS_PIPE_MODE_PREVIEW;
}
}
static void __configure_output(struct atomisp_sub_device *asd,
unsigned int stream_index,
unsigned int width, unsigned int height,
unsigned int min_width,
enum ia_css_frame_format format,
enum ia_css_pipe_id pipe_id)
{
struct atomisp_device *isp = asd->isp;
struct atomisp_stream_env *stream_env =
&asd->stream_env[stream_index];
struct ia_css_stream_config *s_config = &stream_env->stream_config;
stream_env->pipe_configs[pipe_id].mode =
__pipe_id_to_pipe_mode(asd, pipe_id);
stream_env->update_pipe[pipe_id] = true;
stream_env->pipe_configs[pipe_id].output_info[0].res.width = width;
stream_env->pipe_configs[pipe_id].output_info[0].res.height = height;
stream_env->pipe_configs[pipe_id].output_info[0].format = format;
stream_env->pipe_configs[pipe_id].output_info[0].padded_width = min_width;
/* isp binary 2.2 specific setting*/
if (width > s_config->input_config.effective_res.width ||
height > s_config->input_config.effective_res.height) {
s_config->input_config.effective_res.width = width;
s_config->input_config.effective_res.height = height;
}
dev_dbg(isp->dev, "configuring pipe[%d] output info w=%d.h=%d.f=%d.\n",
pipe_id, width, height, format);
}
/*
* For CSS2.1, capture pipe uses capture_pp_in_res to configure yuv
* downscaling input resolution.
*/
static void __configure_capture_pp_input(struct atomisp_sub_device *asd,
unsigned int width, unsigned int height,
enum ia_css_pipe_id pipe_id)
{
struct atomisp_device *isp = asd->isp;
struct atomisp_stream_env *stream_env =
&asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL];
struct ia_css_stream_config *stream_config = &stream_env->stream_config;
struct ia_css_pipe_config *pipe_configs =
&stream_env->pipe_configs[pipe_id];
struct ia_css_pipe_extra_config *pipe_extra_configs =
&stream_env->pipe_extra_configs[pipe_id];
unsigned int hor_ds_factor = 0, ver_ds_factor = 0;
if (width == 0 && height == 0)
return;
if (width * 9 / 10 < pipe_configs->output_info[0].res.width ||
height * 9 / 10 < pipe_configs->output_info[0].res.height)
return;
/* here just copy the calculation in css */
hor_ds_factor = CEIL_DIV(width >> 1,
pipe_configs->output_info[0].res.width);
ver_ds_factor = CEIL_DIV(height >> 1,
pipe_configs->output_info[0].res.height);
if ((asd->isp->media_dev.hw_revision <
(ATOMISP_HW_REVISION_ISP2401 << ATOMISP_HW_REVISION_SHIFT) ||
IS_CHT) && hor_ds_factor != ver_ds_factor) {
dev_warn(asd->isp->dev,
"Cropping for capture due to FW limitation");
return;
}
pipe_configs->mode = __pipe_id_to_pipe_mode(asd, pipe_id);
stream_env->update_pipe[pipe_id] = true;
pipe_extra_configs->enable_yuv_ds = true;
pipe_configs->capt_pp_in_res.width =
stream_config->input_config.effective_res.width;
pipe_configs->capt_pp_in_res.height =
stream_config->input_config.effective_res.height;
dev_dbg(isp->dev, "configuring pipe[%d]capture pp input w=%d.h=%d.\n",
pipe_id, width, height);
}
/*
* For CSS2.1, preview pipe could support bayer downscaling, yuv decimation and
* yuv downscaling, which needs addtional configurations.
*/
static void __configure_preview_pp_input(struct atomisp_sub_device *asd,
unsigned int width, unsigned int height,
enum ia_css_pipe_id pipe_id)
{
struct atomisp_device *isp = asd->isp;
int out_width, out_height, yuv_ds_in_width, yuv_ds_in_height;
struct atomisp_stream_env *stream_env =
&asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL];
struct ia_css_stream_config *stream_config = &stream_env->stream_config;
struct ia_css_pipe_config *pipe_configs =
&stream_env->pipe_configs[pipe_id];
struct ia_css_pipe_extra_config *pipe_extra_configs =
&stream_env->pipe_extra_configs[pipe_id];
struct ia_css_resolution *bayer_ds_out_res =
&pipe_configs->bayer_ds_out_res;
struct ia_css_resolution *vf_pp_in_res =
&pipe_configs->vf_pp_in_res;
struct ia_css_resolution *effective_res =
&stream_config->input_config.effective_res;
static const struct bayer_ds_factor bds_fct[] = {{2, 1}, {3, 2}, {5, 4} };
/*
* BZ201033: YUV decimation factor of 4 causes couple of rightmost
* columns to be shaded. Remove this factor to work around the CSS bug.
* const unsigned int yuv_dec_fct[] = {4, 2};
*/
static const unsigned int yuv_dec_fct[] = { 2 };
unsigned int i;
if (width == 0 && height == 0)
return;
pipe_configs->mode = __pipe_id_to_pipe_mode(asd, pipe_id);
stream_env->update_pipe[pipe_id] = true;
out_width = pipe_configs->output_info[0].res.width;
out_height = pipe_configs->output_info[0].res.height;
/*
* The ISP could do bayer downscaling, yuv decimation and yuv
* downscaling:
* 1: Bayer Downscaling: between effective resolution and
* bayer_ds_res_out;
* 2: YUV Decimation: between bayer_ds_res_out and vf_pp_in_res;
* 3: YUV Downscaling: between vf_pp_in_res and final vf output
*
* Rule for Bayer Downscaling: support factor 2, 1.5 and 1.25
* Rule for YUV Decimation: support factor 2, 4
* Rule for YUV Downscaling: arbitrary value below 2
*
* General rule of factor distribution among these stages:
* 1: try to do Bayer downscaling first if not in online mode.
* 2: try to do maximum of 2 for YUV downscaling
* 3: the remainling for YUV decimation
*
* Note:
* Do not configure bayer_ds_out_res if:
* online == 1 or continuous == 0 or raw_binning = 0
*/
if (stream_config->online || !stream_config->continuous ||
!pipe_extra_configs->enable_raw_binning) {
bayer_ds_out_res->width = 0;
bayer_ds_out_res->height = 0;
} else {
bayer_ds_out_res->width = effective_res->width;
bayer_ds_out_res->height = effective_res->height;
for (i = 0; i < ARRAY_SIZE(bds_fct); i++) {
if (effective_res->width >= out_width *
bds_fct[i].numerator / bds_fct[i].denominator &&
effective_res->height >= out_height *
bds_fct[i].numerator / bds_fct[i].denominator) {
bayer_ds_out_res->width =
effective_res->width *
bds_fct[i].denominator /
bds_fct[i].numerator;
bayer_ds_out_res->height =
effective_res->height *
bds_fct[i].denominator /
bds_fct[i].numerator;
break;
}
}
}
/*
* calculate YUV Decimation, YUV downscaling facor:
* YUV Downscaling factor must not exceed 2.
* YUV Decimation factor could be 2, 4.
*/
/* first decide the yuv_ds input resolution */
if (bayer_ds_out_res->width == 0) {
yuv_ds_in_width = effective_res->width;
yuv_ds_in_height = effective_res->height;
} else {
yuv_ds_in_width = bayer_ds_out_res->width;
yuv_ds_in_height = bayer_ds_out_res->height;
}
vf_pp_in_res->width = yuv_ds_in_width;
vf_pp_in_res->height = yuv_ds_in_height;
/* find out the yuv decimation factor */
for (i = 0; i < ARRAY_SIZE(yuv_dec_fct); i++) {
if (yuv_ds_in_width >= out_width * yuv_dec_fct[i] &&
yuv_ds_in_height >= out_height * yuv_dec_fct[i]) {
vf_pp_in_res->width = yuv_ds_in_width / yuv_dec_fct[i];
vf_pp_in_res->height = yuv_ds_in_height / yuv_dec_fct[i];
break;
}
}
if (vf_pp_in_res->width == out_width &&
vf_pp_in_res->height == out_height) {
pipe_extra_configs->enable_yuv_ds = false;
vf_pp_in_res->width = 0;
vf_pp_in_res->height = 0;
} else {
pipe_extra_configs->enable_yuv_ds = true;
}
dev_dbg(isp->dev, "configuring pipe[%d]preview pp input w=%d.h=%d.\n",
pipe_id, width, height);
}
/*
* For CSS2.1, offline video pipe could support bayer decimation, and
* yuv downscaling, which needs addtional configurations.
*/
static void __configure_video_pp_input(struct atomisp_sub_device *asd,
unsigned int width, unsigned int height,
enum ia_css_pipe_id pipe_id)
{
struct atomisp_device *isp = asd->isp;
int out_width, out_height;
struct atomisp_stream_env *stream_env =
&asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL];
struct ia_css_stream_config *stream_config = &stream_env->stream_config;
struct ia_css_pipe_config *pipe_configs =
&stream_env->pipe_configs[pipe_id];
struct ia_css_pipe_extra_config *pipe_extra_configs =
&stream_env->pipe_extra_configs[pipe_id];
struct ia_css_resolution *bayer_ds_out_res =
&pipe_configs->bayer_ds_out_res;
struct ia_css_resolution *effective_res =
&stream_config->input_config.effective_res;
static const struct bayer_ds_factor bds_factors[] = {
{8, 1}, {6, 1}, {4, 1}, {3, 1}, {2, 1}, {3, 2}
};
unsigned int i;
if (width == 0 && height == 0)
return;
pipe_configs->mode = __pipe_id_to_pipe_mode(asd, pipe_id);
stream_env->update_pipe[pipe_id] = true;
pipe_extra_configs->enable_yuv_ds = false;
/*
* If DVS is enabled, video binary will take care the dvs envelope
* and usually the bayer_ds_out_res should be larger than 120% of
* destination resolution, the extra 20% will be cropped as DVS
* envelope. But, if the bayer_ds_out_res is less than 120% of the
* destination. The ISP can still work, but DVS quality is not good.
*/
/* taking at least 10% as envelope */
if (asd->params.video_dis_en) {
out_width = pipe_configs->output_info[0].res.width * 110 / 100;
out_height = pipe_configs->output_info[0].res.height * 110 / 100;
} else {
out_width = pipe_configs->output_info[0].res.width;
out_height = pipe_configs->output_info[0].res.height;
}
/*
* calculate bayer decimate factor:
* 1: only 1.5, 2, 4 and 8 get supported
* 2: Do not configure bayer_ds_out_res if:
* online == 1 or continuous == 0 or raw_binning = 0
*/
if (stream_config->online || !stream_config->continuous) {
bayer_ds_out_res->width = 0;
bayer_ds_out_res->height = 0;
goto done;
}
pipe_extra_configs->enable_raw_binning = true;
bayer_ds_out_res->width = effective_res->width;
bayer_ds_out_res->height = effective_res->height;
for (i = 0; i < sizeof(bds_factors) / sizeof(struct bayer_ds_factor);
i++) {
if (effective_res->width >= out_width *
bds_factors[i].numerator / bds_factors[i].denominator &&
effective_res->height >= out_height *
bds_factors[i].numerator / bds_factors[i].denominator) {
bayer_ds_out_res->width = effective_res->width *
bds_factors[i].denominator /
bds_factors[i].numerator;
bayer_ds_out_res->height = effective_res->height *
bds_factors[i].denominator /
bds_factors[i].numerator;
break;
}
}
/*
* DVS is cropped from BDS output, so we do not really need to set the
* envelope to 20% of output resolution here. always set it to 12x12
* per firmware requirement.
*/
pipe_configs->dvs_envelope.width = 12;
pipe_configs->dvs_envelope.height = 12;
done:
if (pipe_id == IA_CSS_PIPE_ID_YUVPP)
stream_config->left_padding = -1;
else
stream_config->left_padding = 12;
dev_dbg(isp->dev, "configuring pipe[%d]video pp input w=%d.h=%d.\n",
pipe_id, width, height);
}
static void __configure_vf_output(struct atomisp_sub_device *asd,
unsigned int width, unsigned int height,
unsigned int min_width,
enum ia_css_frame_format format,
enum ia_css_pipe_id pipe_id)
{
struct atomisp_device *isp = asd->isp;
struct atomisp_stream_env *stream_env =
&asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL];
stream_env->pipe_configs[pipe_id].mode =
__pipe_id_to_pipe_mode(asd, pipe_id);
stream_env->update_pipe[pipe_id] = true;
stream_env->pipe_configs[pipe_id].vf_output_info[0].res.width = width;
stream_env->pipe_configs[pipe_id].vf_output_info[0].res.height = height;
stream_env->pipe_configs[pipe_id].vf_output_info[0].format = format;
stream_env->pipe_configs[pipe_id].vf_output_info[0].padded_width =
min_width;
dev_dbg(isp->dev,
"configuring pipe[%d] vf output info w=%d.h=%d.f=%d.\n",
pipe_id, width, height, format);
}
static int __get_frame_info(struct atomisp_sub_device *asd,
unsigned int stream_index,
struct ia_css_frame_info *info,
enum frame_info_type type,
enum ia_css_pipe_id pipe_id)
{
struct atomisp_device *isp = asd->isp;
int ret;
struct ia_css_pipe_info p_info;
/* FIXME! No need to destroy/recreate all streams */
ret = atomisp_css_update_stream(asd);
if (ret)
return ret;
ret = ia_css_pipe_get_info(asd->stream_env[stream_index].pipes[pipe_id],
&p_info);
if (ret) {
dev_err(isp->dev, "can't get info from pipe\n");
goto get_info_err;
}
switch (type) {
case ATOMISP_CSS_VF_FRAME:
*info = p_info.vf_output_info[0];
dev_dbg(isp->dev, "getting vf frame info.\n");
break;
case ATOMISP_CSS_SECOND_VF_FRAME:
*info = p_info.vf_output_info[1];
dev_dbg(isp->dev, "getting second vf frame info.\n");
break;
case ATOMISP_CSS_OUTPUT_FRAME:
*info = p_info.output_info[0];
dev_dbg(isp->dev, "getting main frame info.\n");
break;
case ATOMISP_CSS_SECOND_OUTPUT_FRAME:
*info = p_info.output_info[1];
dev_dbg(isp->dev, "getting second main frame info.\n");
break;
default:
case ATOMISP_CSS_RAW_FRAME:
*info = p_info.raw_output_info;
dev_dbg(isp->dev, "getting raw frame info.\n");
break;
}
dev_dbg(isp->dev, "get frame info: w=%d, h=%d, num_invalid_frames %d.\n",
info->res.width, info->res.height, p_info.num_invalid_frames);
return 0;
get_info_err:
atomisp_destroy_pipes_stream(asd);
return -EINVAL;
}
static unsigned int atomisp_get_pipe_index(struct atomisp_sub_device *asd)
{
if (asd->copy_mode)
return IA_CSS_PIPE_ID_COPY;
switch (asd->run_mode->val) {
case ATOMISP_RUN_MODE_VIDEO:
return IA_CSS_PIPE_ID_VIDEO;
case ATOMISP_RUN_MODE_STILL_CAPTURE:
return IA_CSS_PIPE_ID_CAPTURE;
case ATOMISP_RUN_MODE_PREVIEW:
return IA_CSS_PIPE_ID_PREVIEW;
}
dev_warn(asd->isp->dev, "cannot determine pipe-index return default preview pipe\n");
return IA_CSS_PIPE_ID_PREVIEW;
}
int atomisp_get_css_frame_info(struct atomisp_sub_device *asd,
struct ia_css_frame_info *frame_info)
{
struct ia_css_pipe_info info;
int pipe_index = atomisp_get_pipe_index(asd);
int stream_index;
struct atomisp_device *isp = asd->isp;
stream_index = (pipe_index == IA_CSS_PIPE_ID_YUVPP) ?
ATOMISP_INPUT_STREAM_VIDEO :
ATOMISP_INPUT_STREAM_GENERAL;
if (0 != ia_css_pipe_get_info(asd->stream_env[stream_index]
.pipes[pipe_index], &info)) {
dev_dbg(isp->dev, "ia_css_pipe_get_info FAILED");
return -EINVAL;
}
*frame_info = info.output_info[0];
return 0;
}
int atomisp_css_copy_configure_output(struct atomisp_sub_device *asd,
unsigned int stream_index,
unsigned int width, unsigned int height,
unsigned int padded_width,
enum ia_css_frame_format format)
{
asd->stream_env[stream_index].pipe_configs[IA_CSS_PIPE_ID_COPY].
default_capture_config.mode =
IA_CSS_CAPTURE_MODE_RAW;
__configure_output(asd, stream_index, width, height, padded_width,
format, IA_CSS_PIPE_ID_COPY);
return 0;
}
int atomisp_css_preview_configure_output(struct atomisp_sub_device *asd,
unsigned int width, unsigned int height,
unsigned int min_width,
enum ia_css_frame_format format)
{
__configure_output(asd, ATOMISP_INPUT_STREAM_GENERAL, width, height,
min_width, format, IA_CSS_PIPE_ID_PREVIEW);
return 0;
}
int atomisp_css_capture_configure_output(struct atomisp_sub_device *asd,
unsigned int width, unsigned int height,
unsigned int min_width,
enum ia_css_frame_format format)
{
__configure_output(asd, ATOMISP_INPUT_STREAM_GENERAL, width, height,
min_width, format, IA_CSS_PIPE_ID_CAPTURE);
return 0;
}
int atomisp_css_video_configure_output(struct atomisp_sub_device *asd,
unsigned int width, unsigned int height,
unsigned int min_width,
enum ia_css_frame_format format)
{
__configure_output(asd, ATOMISP_INPUT_STREAM_GENERAL, width, height,
min_width, format, IA_CSS_PIPE_ID_VIDEO);
return 0;
}
int atomisp_css_video_configure_viewfinder(
struct atomisp_sub_device *asd,
unsigned int width, unsigned int height,
unsigned int min_width,
enum ia_css_frame_format format)
{
__configure_vf_output(asd, width, height, min_width, format,
IA_CSS_PIPE_ID_VIDEO);
return 0;
}
int atomisp_css_capture_configure_viewfinder(
struct atomisp_sub_device *asd,
unsigned int width, unsigned int height,
unsigned int min_width,
enum ia_css_frame_format format)
{
__configure_vf_output(asd, width, height, min_width, format, IA_CSS_PIPE_ID_CAPTURE);
return 0;
}
int atomisp_css_video_get_viewfinder_frame_info(
struct atomisp_sub_device *asd,
struct ia_css_frame_info *info)
{
return __get_frame_info(asd, ATOMISP_INPUT_STREAM_GENERAL, info,
ATOMISP_CSS_VF_FRAME, IA_CSS_PIPE_ID_VIDEO);
}
int atomisp_css_capture_get_viewfinder_frame_info(
struct atomisp_sub_device *asd,
struct ia_css_frame_info *info)
{
return __get_frame_info(asd, ATOMISP_INPUT_STREAM_GENERAL, info,
ATOMISP_CSS_VF_FRAME, IA_CSS_PIPE_ID_CAPTURE);
}
int atomisp_css_copy_get_output_frame_info(
struct atomisp_sub_device *asd,
unsigned int stream_index,
struct ia_css_frame_info *info)
{
return __get_frame_info(asd, stream_index, info,
ATOMISP_CSS_OUTPUT_FRAME, IA_CSS_PIPE_ID_COPY);
}
int atomisp_css_preview_get_output_frame_info(
struct atomisp_sub_device *asd,
struct ia_css_frame_info *info)
{
return __get_frame_info(asd, ATOMISP_INPUT_STREAM_GENERAL, info,
ATOMISP_CSS_OUTPUT_FRAME, IA_CSS_PIPE_ID_PREVIEW);
}
int atomisp_css_capture_get_output_frame_info(
struct atomisp_sub_device *asd,
struct ia_css_frame_info *info)
{
return __get_frame_info(asd, ATOMISP_INPUT_STREAM_GENERAL, info,
ATOMISP_CSS_OUTPUT_FRAME, IA_CSS_PIPE_ID_CAPTURE);
}
int atomisp_css_video_get_output_frame_info(
struct atomisp_sub_device *asd,
struct ia_css_frame_info *info)
{
return __get_frame_info(asd, ATOMISP_INPUT_STREAM_GENERAL, info,
ATOMISP_CSS_OUTPUT_FRAME, IA_CSS_PIPE_ID_VIDEO);
}
int atomisp_css_preview_configure_pp_input(
struct atomisp_sub_device *asd,
unsigned int width, unsigned int height)
{
struct atomisp_stream_env *stream_env =
&asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL];
__configure_preview_pp_input(asd, width, height, IA_CSS_PIPE_ID_PREVIEW);
if (width > stream_env->pipe_configs[IA_CSS_PIPE_ID_CAPTURE].
capt_pp_in_res.width)
__configure_capture_pp_input(asd, width, height, IA_CSS_PIPE_ID_CAPTURE);
return 0;
}
int atomisp_css_capture_configure_pp_input(
struct atomisp_sub_device *asd,
unsigned int width, unsigned int height)
{
__configure_capture_pp_input(asd, width, height, IA_CSS_PIPE_ID_CAPTURE);
return 0;
}
int atomisp_css_video_configure_pp_input(
struct atomisp_sub_device *asd,
unsigned int width, unsigned int height)
{
struct atomisp_stream_env *stream_env =
&asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL];
__configure_video_pp_input(asd, width, height, IA_CSS_PIPE_ID_VIDEO);
if (width > stream_env->pipe_configs[IA_CSS_PIPE_ID_CAPTURE].
capt_pp_in_res.width)
__configure_capture_pp_input(asd, width, height, IA_CSS_PIPE_ID_CAPTURE);
return 0;
}
int atomisp_css_offline_capture_configure(struct atomisp_sub_device *asd,
int num_captures, unsigned int skip, int offset)
{
int ret;
dev_dbg(asd->isp->dev, "%s num_capture:%d skip:%d offset:%d\n",
__func__, num_captures, skip, offset);
ret = ia_css_stream_capture(
asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL].stream,
num_captures, skip, offset);
if (ret)
return -EINVAL;
return 0;
}
int atomisp_css_exp_id_capture(struct atomisp_sub_device *asd, int exp_id)
{
int ret;
ret = ia_css_stream_capture_frame(
asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL].stream,
exp_id);
if (ret == -ENOBUFS) {
/* capture cmd queue is full */
return -EBUSY;
} else if (ret) {
return -EIO;
}
return 0;
}
int atomisp_css_exp_id_unlock(struct atomisp_sub_device *asd, int exp_id)
{
int ret;
ret = ia_css_unlock_raw_frame(
asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL].stream,
exp_id);
if (ret == -ENOBUFS)
return -EAGAIN;
else if (ret)
return -EIO;
return 0;
}
int atomisp_css_capture_enable_xnr(struct atomisp_sub_device *asd,
bool enable)
{
asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL]
.pipe_configs[IA_CSS_PIPE_ID_CAPTURE]
.default_capture_config.enable_xnr = enable;
asd->params.capture_config.enable_xnr = enable;
asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL]
.update_pipe[IA_CSS_PIPE_ID_CAPTURE] = true;
return 0;
}
void atomisp_css_set_ctc_table(struct atomisp_sub_device *asd,
struct ia_css_ctc_table *ctc_table)
{
int i;
u16 *vamem_ptr = ctc_table->data.vamem_1;
int data_size = IA_CSS_VAMEM_1_CTC_TABLE_SIZE;
bool valid = false;
/* workaround: if ctc_table is all 0, do not apply it */
if (ctc_table->vamem_type == IA_CSS_VAMEM_TYPE_2) {
vamem_ptr = ctc_table->data.vamem_2;
data_size = IA_CSS_VAMEM_2_CTC_TABLE_SIZE;
}
for (i = 0; i < data_size; i++) {
if (*(vamem_ptr + i)) {
valid = true;
break;
}
}
if (valid)
asd->params.config.ctc_table = ctc_table;
else
dev_warn(asd->isp->dev, "Bypass the invalid ctc_table.\n");
}
void atomisp_css_set_anr_thres(struct atomisp_sub_device *asd,
struct ia_css_anr_thres *anr_thres)
{
asd->params.config.anr_thres = anr_thres;
}
void atomisp_css_set_dvs_6axis(struct atomisp_sub_device *asd,
struct ia_css_dvs_6axis_config *dvs_6axis)
{
asd->params.config.dvs_6axis_config = dvs_6axis;
}
void atomisp_css_video_set_dis_vector(struct atomisp_sub_device *asd,
struct atomisp_dis_vector *vector)
{
if (!asd->params.config.motion_vector)
asd->params.config.motion_vector = &asd->params.css_param.motion_vector;
memset(asd->params.config.motion_vector,
0, sizeof(struct ia_css_vector));
asd->params.css_param.motion_vector.x = vector->x;
asd->params.css_param.motion_vector.y = vector->y;
}
static int atomisp_compare_dvs_grid(struct atomisp_sub_device *asd,
struct atomisp_dvs_grid_info *atomgrid)
{
struct ia_css_dvs_grid_info *cur =
atomisp_css_get_dvs_grid_info(&asd->params.curr_grid_info);
if (!cur) {
dev_err(asd->isp->dev, "dvs grid not available!\n");
return -EINVAL;
}
if (sizeof(*cur) != sizeof(*atomgrid)) {
dev_err(asd->isp->dev, "dvs grid mismatch!\n");
return -EINVAL;
}
if (!cur->enable) {
dev_err(asd->isp->dev, "dvs not enabled!\n");
return -EINVAL;
}
return memcmp(atomgrid, cur, sizeof(*cur));
}
void atomisp_css_set_dvs2_coefs(struct atomisp_sub_device *asd,
struct ia_css_dvs2_coefficients *coefs)
{
asd->params.config.dvs2_coefs = coefs;
}
int atomisp_css_set_dis_coefs(struct atomisp_sub_device *asd,
struct atomisp_dis_coefficients *coefs)
{
if (atomisp_compare_dvs_grid(asd, &coefs->grid_info) != 0)
/* If the grid info in the argument differs from the current
grid info, we tell the caller to reset the grid size and
try again. */
return -EAGAIN;
if (!coefs->hor_coefs.odd_real ||
!coefs->hor_coefs.odd_imag ||
!coefs->hor_coefs.even_real ||
!coefs->hor_coefs.even_imag ||
!coefs->ver_coefs.odd_real ||
!coefs->ver_coefs.odd_imag ||
!coefs->ver_coefs.even_real ||
!coefs->ver_coefs.even_imag ||
!asd->params.css_param.dvs2_coeff->hor_coefs.odd_real ||
!asd->params.css_param.dvs2_coeff->hor_coefs.odd_imag ||
!asd->params.css_param.dvs2_coeff->hor_coefs.even_real ||
!asd->params.css_param.dvs2_coeff->hor_coefs.even_imag ||
!asd->params.css_param.dvs2_coeff->ver_coefs.odd_real ||
!asd->params.css_param.dvs2_coeff->ver_coefs.odd_imag ||
!asd->params.css_param.dvs2_coeff->ver_coefs.even_real ||
!asd->params.css_param.dvs2_coeff->ver_coefs.even_imag)
return -EINVAL;
if (copy_from_user(asd->params.css_param.dvs2_coeff->hor_coefs.odd_real,
coefs->hor_coefs.odd_real, asd->params.dvs_hor_coef_bytes))
return -EFAULT;
if (copy_from_user(asd->params.css_param.dvs2_coeff->hor_coefs.odd_imag,
coefs->hor_coefs.odd_imag, asd->params.dvs_hor_coef_bytes))
return -EFAULT;
if (copy_from_user(asd->params.css_param.dvs2_coeff->hor_coefs.even_real,
coefs->hor_coefs.even_real, asd->params.dvs_hor_coef_bytes))
return -EFAULT;
if (copy_from_user(asd->params.css_param.dvs2_coeff->hor_coefs.even_imag,
coefs->hor_coefs.even_imag, asd->params.dvs_hor_coef_bytes))
return -EFAULT;
if (copy_from_user(asd->params.css_param.dvs2_coeff->ver_coefs.odd_real,
coefs->ver_coefs.odd_real, asd->params.dvs_ver_coef_bytes))
return -EFAULT;
if (copy_from_user(asd->params.css_param.dvs2_coeff->ver_coefs.odd_imag,
coefs->ver_coefs.odd_imag, asd->params.dvs_ver_coef_bytes))
return -EFAULT;
if (copy_from_user(asd->params.css_param.dvs2_coeff->ver_coefs.even_real,
coefs->ver_coefs.even_real, asd->params.dvs_ver_coef_bytes))
return -EFAULT;
if (copy_from_user(asd->params.css_param.dvs2_coeff->ver_coefs.even_imag,
coefs->ver_coefs.even_imag, asd->params.dvs_ver_coef_bytes))
return -EFAULT;
asd->params.css_param.update_flag.dvs2_coefs =
(struct atomisp_dis_coefficients *)
asd->params.css_param.dvs2_coeff;
/* FIXME! */
/* asd->params.dis_proj_data_valid = false; */
asd->params.css_update_params_needed = true;
return 0;
}
void atomisp_css_set_zoom_factor(struct atomisp_sub_device *asd,
unsigned int zoom)
{
struct atomisp_device *isp = asd->isp;
if (zoom == asd->params.css_param.dz_config.dx &&
zoom == asd->params.css_param.dz_config.dy) {
dev_dbg(isp->dev, "same zoom scale. skipped.\n");
return;
}
memset(&asd->params.css_param.dz_config, 0,
sizeof(struct ia_css_dz_config));
asd->params.css_param.dz_config.dx = zoom;
asd->params.css_param.dz_config.dy = zoom;
asd->params.css_param.update_flag.dz_config =
(struct atomisp_dz_config *)&asd->params.css_param.dz_config;
asd->params.css_update_params_needed = true;
}
void atomisp_css_set_formats_config(struct atomisp_sub_device *asd,
struct ia_css_formats_config *formats_config)
{
asd->params.config.formats_config = formats_config;
}
int atomisp_css_get_wb_config(struct atomisp_sub_device *asd,
struct atomisp_wb_config *config)
{
struct ia_css_wb_config wb_config;
struct ia_css_isp_config isp_config;
struct atomisp_device *isp = asd->isp;
if (!asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL].stream) {
dev_err(isp->dev, "%s called after streamoff, skipping.\n",
__func__);
return -EINVAL;
}
memset(&wb_config, 0, sizeof(struct ia_css_wb_config));
memset(&isp_config, 0, sizeof(struct ia_css_isp_config));
isp_config.wb_config = &wb_config;
ia_css_stream_get_isp_config(
asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL].stream,
&isp_config);
memcpy(config, &wb_config, sizeof(*config));
return 0;
}
int atomisp_css_get_ob_config(struct atomisp_sub_device *asd,
struct atomisp_ob_config *config)
{
struct ia_css_ob_config ob_config;
struct ia_css_isp_config isp_config;
struct atomisp_device *isp = asd->isp;
if (!asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL].stream) {
dev_err(isp->dev, "%s called after streamoff, skipping.\n",
__func__);
return -EINVAL;
}
memset(&ob_config, 0, sizeof(struct ia_css_ob_config));
memset(&isp_config, 0, sizeof(struct ia_css_isp_config));
isp_config.ob_config = &ob_config;
ia_css_stream_get_isp_config(
asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL].stream,
&isp_config);
memcpy(config, &ob_config, sizeof(*config));
return 0;
}
int atomisp_css_get_dp_config(struct atomisp_sub_device *asd,
struct atomisp_dp_config *config)
{
struct ia_css_dp_config dp_config;
struct ia_css_isp_config isp_config;
struct atomisp_device *isp = asd->isp;
if (!asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL].stream) {
dev_err(isp->dev, "%s called after streamoff, skipping.\n",
__func__);
return -EINVAL;
}
memset(&dp_config, 0, sizeof(struct ia_css_dp_config));
memset(&isp_config, 0, sizeof(struct ia_css_isp_config));
isp_config.dp_config = &dp_config;
ia_css_stream_get_isp_config(
asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL].stream,
&isp_config);
memcpy(config, &dp_config, sizeof(*config));
return 0;
}
int atomisp_css_get_de_config(struct atomisp_sub_device *asd,
struct atomisp_de_config *config)
{
struct ia_css_de_config de_config;
struct ia_css_isp_config isp_config;
struct atomisp_device *isp = asd->isp;
if (!asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL].stream) {
dev_err(isp->dev, "%s called after streamoff, skipping.\n",
__func__);
return -EINVAL;
}
memset(&de_config, 0, sizeof(struct ia_css_de_config));
memset(&isp_config, 0, sizeof(struct ia_css_isp_config));
isp_config.de_config = &de_config;
ia_css_stream_get_isp_config(
asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL].stream,
&isp_config);
memcpy(config, &de_config, sizeof(*config));
return 0;
}
int atomisp_css_get_nr_config(struct atomisp_sub_device *asd,
struct atomisp_nr_config *config)
{
struct ia_css_nr_config nr_config;
struct ia_css_isp_config isp_config;
struct atomisp_device *isp = asd->isp;
if (!asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL].stream) {
dev_err(isp->dev, "%s called after streamoff, skipping.\n",
__func__);
return -EINVAL;
}
memset(&nr_config, 0, sizeof(struct ia_css_nr_config));
memset(&isp_config, 0, sizeof(struct ia_css_isp_config));
isp_config.nr_config = &nr_config;
ia_css_stream_get_isp_config(
asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL].stream,
&isp_config);
memcpy(config, &nr_config, sizeof(*config));
return 0;
}
int atomisp_css_get_ee_config(struct atomisp_sub_device *asd,
struct atomisp_ee_config *config)
{
struct ia_css_ee_config ee_config;
struct ia_css_isp_config isp_config;
struct atomisp_device *isp = asd->isp;
if (!asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL].stream) {
dev_err(isp->dev, "%s called after streamoff, skipping.\n",
__func__);
return -EINVAL;
}
memset(&ee_config, 0, sizeof(struct ia_css_ee_config));
memset(&isp_config, 0, sizeof(struct ia_css_isp_config));
isp_config.ee_config = &ee_config;
ia_css_stream_get_isp_config(
asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL].stream,
&isp_config);
memcpy(config, &ee_config, sizeof(*config));
return 0;
}
int atomisp_css_get_tnr_config(struct atomisp_sub_device *asd,
struct atomisp_tnr_config *config)
{
struct ia_css_tnr_config tnr_config;
struct ia_css_isp_config isp_config;
struct atomisp_device *isp = asd->isp;
if (!asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL].stream) {
dev_err(isp->dev, "%s called after streamoff, skipping.\n",
__func__);
return -EINVAL;
}
memset(&tnr_config, 0, sizeof(struct ia_css_tnr_config));
memset(&isp_config, 0, sizeof(struct ia_css_isp_config));
isp_config.tnr_config = &tnr_config;
ia_css_stream_get_isp_config(
asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL].stream,
&isp_config);
memcpy(config, &tnr_config, sizeof(*config));
return 0;
}
int atomisp_css_get_ctc_table(struct atomisp_sub_device *asd,
struct atomisp_ctc_table *config)
{
struct ia_css_ctc_table *tab;
struct ia_css_isp_config isp_config;
struct atomisp_device *isp = asd->isp;
if (!asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL].stream) {
dev_err(isp->dev, "%s called after streamoff, skipping.\n",
__func__);
return -EINVAL;
}
tab = vzalloc(sizeof(struct ia_css_ctc_table));
if (!tab)
return -ENOMEM;
memset(&isp_config, 0, sizeof(struct ia_css_isp_config));
isp_config.ctc_table = tab;
ia_css_stream_get_isp_config(
asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL].stream,
&isp_config);
memcpy(config, tab, sizeof(*tab));
vfree(tab);
return 0;
}
int atomisp_css_get_gamma_table(struct atomisp_sub_device *asd,
struct atomisp_gamma_table *config)
{
struct ia_css_gamma_table *tab;
struct ia_css_isp_config isp_config;
struct atomisp_device *isp = asd->isp;
if (!asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL].stream) {
dev_err(isp->dev, "%s called after streamoff, skipping.\n",
__func__);
return -EINVAL;
}
tab = vzalloc(sizeof(struct ia_css_gamma_table));
if (!tab)
return -ENOMEM;
memset(&isp_config, 0, sizeof(struct ia_css_isp_config));
isp_config.gamma_table = tab;
ia_css_stream_get_isp_config(
asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL].stream,
&isp_config);
memcpy(config, tab, sizeof(*tab));
vfree(tab);
return 0;
}
int atomisp_css_get_gc_config(struct atomisp_sub_device *asd,
struct atomisp_gc_config *config)
{
struct ia_css_gc_config gc_config;
struct ia_css_isp_config isp_config;
struct atomisp_device *isp = asd->isp;
if (!asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL].stream) {
dev_err(isp->dev, "%s called after streamoff, skipping.\n",
__func__);
return -EINVAL;
}
memset(&gc_config, 0, sizeof(struct ia_css_gc_config));
memset(&isp_config, 0, sizeof(struct ia_css_isp_config));
isp_config.gc_config = &gc_config;
ia_css_stream_get_isp_config(
asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL].stream,
&isp_config);
/* Get gamma correction params from current setup */
memcpy(config, &gc_config, sizeof(*config));
return 0;
}
int atomisp_css_get_3a_config(struct atomisp_sub_device *asd,
struct atomisp_3a_config *config)
{
struct ia_css_3a_config s3a_config;
struct ia_css_isp_config isp_config;
struct atomisp_device *isp = asd->isp;
if (!asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL].stream) {
dev_err(isp->dev, "%s called after streamoff, skipping.\n",
__func__);
return -EINVAL;
}
memset(&s3a_config, 0, sizeof(struct ia_css_3a_config));
memset(&isp_config, 0, sizeof(struct ia_css_isp_config));
isp_config.s3a_config = &s3a_config;
ia_css_stream_get_isp_config(
asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL].stream,
&isp_config);
/* Get white balance from current setup */
memcpy(config, &s3a_config, sizeof(*config));
return 0;
}
int atomisp_css_get_formats_config(struct atomisp_sub_device *asd,
struct atomisp_formats_config *config)
{
struct ia_css_formats_config formats_config;
struct ia_css_isp_config isp_config;
struct atomisp_device *isp = asd->isp;
if (!asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL].stream) {
dev_err(isp->dev, "%s called after streamoff, skipping.\n",
__func__);
return -EINVAL;
}
memset(&formats_config, 0, sizeof(formats_config));
memset(&isp_config, 0, sizeof(isp_config));
isp_config.formats_config = &formats_config;
ia_css_stream_get_isp_config(
asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL].stream,
&isp_config);
/* Get narrow gamma from current setup */
memcpy(config, &formats_config, sizeof(*config));
return 0;
}
int atomisp_css_get_zoom_factor(struct atomisp_sub_device *asd,
unsigned int *zoom)
{
struct ia_css_dz_config dz_config; /** Digital Zoom */
struct ia_css_isp_config isp_config;
struct atomisp_device *isp = asd->isp;
if (!asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL].stream) {
dev_err(isp->dev, "%s called after streamoff, skipping.\n",
__func__);
return -EINVAL;
}
memset(&dz_config, 0, sizeof(struct ia_css_dz_config));
memset(&isp_config, 0, sizeof(struct ia_css_isp_config));
isp_config.dz_config = &dz_config;
ia_css_stream_get_isp_config(
asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL].stream,
&isp_config);
*zoom = dz_config.dx;
return 0;
}
/*
* Function to set/get image stablization statistics
*/
int atomisp_css_get_dis_stat(struct atomisp_sub_device *asd,
struct atomisp_dis_statistics *stats)
{
struct atomisp_device *isp = asd->isp;
struct atomisp_dis_buf *dis_buf;
unsigned long flags;
lockdep_assert_held(&isp->mutex);
if (!asd->params.dvs_stat->hor_prod.odd_real ||
!asd->params.dvs_stat->hor_prod.odd_imag ||
!asd->params.dvs_stat->hor_prod.even_real ||
!asd->params.dvs_stat->hor_prod.even_imag ||
!asd->params.dvs_stat->ver_prod.odd_real ||
!asd->params.dvs_stat->ver_prod.odd_imag ||
!asd->params.dvs_stat->ver_prod.even_real ||
!asd->params.dvs_stat->ver_prod.even_imag)
return -EINVAL;
/* isp needs to be streaming to get DIS statistics */
if (!asd->streaming)
return -EINVAL;
if (atomisp_compare_dvs_grid(asd, &stats->dvs2_stat.grid_info) != 0)
/* If the grid info in the argument differs from the current
grid info, we tell the caller to reset the grid size and
try again. */
return -EAGAIN;
spin_lock_irqsave(&asd->dis_stats_lock, flags);
if (!asd->params.dis_proj_data_valid || list_empty(&asd->dis_stats)) {
spin_unlock_irqrestore(&asd->dis_stats_lock, flags);
dev_err(isp->dev, "dis statistics is not valid.\n");
return -EAGAIN;
}
dis_buf = list_entry(asd->dis_stats.next,
struct atomisp_dis_buf, list);
list_del_init(&dis_buf->list);
spin_unlock_irqrestore(&asd->dis_stats_lock, flags);
if (dis_buf->dvs_map)
ia_css_translate_dvs2_statistics(
asd->params.dvs_stat, dis_buf->dvs_map);
else
ia_css_get_dvs2_statistics(asd->params.dvs_stat,
dis_buf->dis_data);
stats->exp_id = dis_buf->dis_data->exp_id;
spin_lock_irqsave(&asd->dis_stats_lock, flags);
list_add_tail(&dis_buf->list, &asd->dis_stats);
spin_unlock_irqrestore(&asd->dis_stats_lock, flags);
if (copy_to_user(stats->dvs2_stat.ver_prod.odd_real,
asd->params.dvs_stat->ver_prod.odd_real,
asd->params.dvs_ver_proj_bytes))
return -EFAULT;
if (copy_to_user(stats->dvs2_stat.ver_prod.odd_imag,
asd->params.dvs_stat->ver_prod.odd_imag,
asd->params.dvs_ver_proj_bytes))
return -EFAULT;
if (copy_to_user(stats->dvs2_stat.ver_prod.even_real,
asd->params.dvs_stat->ver_prod.even_real,
asd->params.dvs_ver_proj_bytes))
return -EFAULT;
if (copy_to_user(stats->dvs2_stat.ver_prod.even_imag,
asd->params.dvs_stat->ver_prod.even_imag,
asd->params.dvs_ver_proj_bytes))
return -EFAULT;
if (copy_to_user(stats->dvs2_stat.hor_prod.odd_real,
asd->params.dvs_stat->hor_prod.odd_real,
asd->params.dvs_hor_proj_bytes))
return -EFAULT;
if (copy_to_user(stats->dvs2_stat.hor_prod.odd_imag,
asd->params.dvs_stat->hor_prod.odd_imag,
asd->params.dvs_hor_proj_bytes))
return -EFAULT;
if (copy_to_user(stats->dvs2_stat.hor_prod.even_real,
asd->params.dvs_stat->hor_prod.even_real,
asd->params.dvs_hor_proj_bytes))
return -EFAULT;
if (copy_to_user(stats->dvs2_stat.hor_prod.even_imag,
asd->params.dvs_stat->hor_prod.even_imag,
asd->params.dvs_hor_proj_bytes))
return -EFAULT;
return 0;
}
struct ia_css_shading_table *atomisp_css_shading_table_alloc(
unsigned int width, unsigned int height)
{
return ia_css_shading_table_alloc(width, height);
}
void atomisp_css_set_shading_table(struct atomisp_sub_device *asd,
struct ia_css_shading_table *table)
{
asd->params.config.shading_table = table;
}
void atomisp_css_shading_table_free(struct ia_css_shading_table *table)
{
ia_css_shading_table_free(table);
}
struct ia_css_morph_table *atomisp_css_morph_table_allocate(
unsigned int width, unsigned int height)
{
return ia_css_morph_table_allocate(width, height);
}
void atomisp_css_set_morph_table(struct atomisp_sub_device *asd,
struct ia_css_morph_table *table)
{
asd->params.config.morph_table = table;
}
void atomisp_css_get_morph_table(struct atomisp_sub_device *asd,
struct ia_css_morph_table *table)
{
struct ia_css_isp_config isp_config;
struct atomisp_device *isp = asd->isp;
if (!asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL].stream) {
dev_err(isp->dev,
"%s called after streamoff, skipping.\n", __func__);
return;
}
memset(table, 0, sizeof(struct ia_css_morph_table));
memset(&isp_config, 0, sizeof(struct ia_css_isp_config));
isp_config.morph_table = table;
ia_css_stream_get_isp_config(
asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL].stream,
&isp_config);
}
void atomisp_css_morph_table_free(struct ia_css_morph_table *table)
{
ia_css_morph_table_free(table);
}
static bool atomisp_css_isr_get_stream_id(struct ia_css_pipe *css_pipe,
struct atomisp_device *isp,
enum atomisp_input_stream_id *stream_id)
{
struct atomisp_stream_env *stream_env;
int i, j;
if (!isp->asd.streaming)
return false;
for (i = 0; i < ATOMISP_INPUT_STREAM_NUM; i++) {
stream_env = &isp->asd.stream_env[i];
for (j = 0; j < IA_CSS_PIPE_ID_NUM; j++) {
if (stream_env->pipes[j] && stream_env->pipes[j] == css_pipe) {
*stream_id = i;
return true;
}
}
}
return false;
}
int atomisp_css_isr_thread(struct atomisp_device *isp)
{
enum atomisp_input_stream_id stream_id = 0;
struct atomisp_css_event current_event;
lockdep_assert_held(&isp->mutex);
while (!ia_css_dequeue_psys_event(¤t_event.event)) {
if (current_event.event.type ==
IA_CSS_EVENT_TYPE_FW_ASSERT) {
/*
* Received FW assertion signal,
* trigger WDT to recover
*/
dev_err(isp->dev,
"%s: ISP reports FW_ASSERT event! fw_assert_module_id %d fw_assert_line_no %d\n",
__func__,
current_event.event.fw_assert_module_id,
current_event.event.fw_assert_line_no);
queue_work(system_long_wq, &isp->assert_recovery_work);
return -EINVAL;
} else if (current_event.event.type == IA_CSS_EVENT_TYPE_FW_WARNING) {
dev_warn(isp->dev, "%s: ISP reports warning, code is %d, exp_id %d\n",
__func__, current_event.event.fw_warning,
current_event.event.exp_id);
continue;
}
if (!atomisp_css_isr_get_stream_id(current_event.event.pipe, isp, &stream_id)) {
if (current_event.event.type == IA_CSS_EVENT_TYPE_TIMER)
dev_dbg(isp->dev,
"event: Timer event.");
else
dev_warn(isp->dev, "%s:no subdev.event:%d",
__func__,
current_event.event.type);
continue;
}
atomisp_css_temp_pipe_to_pipe_id(&isp->asd, ¤t_event);
switch (current_event.event.type) {
case IA_CSS_EVENT_TYPE_OUTPUT_FRAME_DONE:
dev_dbg(isp->dev, "event: Output frame done");
atomisp_buf_done(&isp->asd, 0, IA_CSS_BUFFER_TYPE_OUTPUT_FRAME,
current_event.pipe, true, stream_id);
break;
case IA_CSS_EVENT_TYPE_SECOND_OUTPUT_FRAME_DONE:
dev_dbg(isp->dev, "event: Second output frame done");
atomisp_buf_done(&isp->asd, 0, IA_CSS_BUFFER_TYPE_SEC_OUTPUT_FRAME,
current_event.pipe, true, stream_id);
break;
case IA_CSS_EVENT_TYPE_3A_STATISTICS_DONE:
dev_dbg(isp->dev, "event: 3A stats frame done");
atomisp_buf_done(&isp->asd, 0,
IA_CSS_BUFFER_TYPE_3A_STATISTICS,
current_event.pipe,
false, stream_id);
break;
case IA_CSS_EVENT_TYPE_METADATA_DONE:
dev_dbg(isp->dev, "event: metadata frame done");
atomisp_buf_done(&isp->asd, 0,
IA_CSS_BUFFER_TYPE_METADATA,
current_event.pipe,
false, stream_id);
break;
case IA_CSS_EVENT_TYPE_VF_OUTPUT_FRAME_DONE:
dev_dbg(isp->dev, "event: VF output frame done");
atomisp_buf_done(&isp->asd, 0,
IA_CSS_BUFFER_TYPE_VF_OUTPUT_FRAME,
current_event.pipe, true, stream_id);
break;
case IA_CSS_EVENT_TYPE_SECOND_VF_OUTPUT_FRAME_DONE:
dev_dbg(isp->dev, "event: second VF output frame done");
atomisp_buf_done(&isp->asd, 0,
IA_CSS_BUFFER_TYPE_SEC_VF_OUTPUT_FRAME,
current_event.pipe, true, stream_id);
break;
case IA_CSS_EVENT_TYPE_DIS_STATISTICS_DONE:
dev_dbg(isp->dev, "event: dis stats frame done");
atomisp_buf_done(&isp->asd, 0,
IA_CSS_BUFFER_TYPE_DIS_STATISTICS,
current_event.pipe,
false, stream_id);
break;
case IA_CSS_EVENT_TYPE_PIPELINE_DONE:
dev_dbg(isp->dev, "event: pipeline done");
break;
case IA_CSS_EVENT_TYPE_ACC_STAGE_COMPLETE:
dev_warn(isp->dev, "unexpected event: acc stage done");
break;
default:
dev_dbg(isp->dev, "unhandled css stored event: 0x%x\n",
current_event.event.type);
break;
}
}
return 0;
}
bool atomisp_css_valid_sof(struct atomisp_device *isp)
{
unsigned int i;
/* Loop for each css vc stream */
for (i = 0; i < ATOMISP_INPUT_STREAM_NUM; i++) {
if (!isp->asd.stream_env[i].stream)
continue;
dev_dbg(isp->dev, "stream #%d: mode: %d\n",
i, isp->asd.stream_env[i].stream_config.mode);
if (isp->asd.stream_env[i].stream_config.mode == IA_CSS_INPUT_MODE_BUFFERED_SENSOR)
return false;
}
return true;
}
int atomisp_css_debug_dump_isp_binary(void)
{
ia_css_debug_dump_isp_binary();
return 0;
}
int atomisp_css_dump_sp_raw_copy_linecount(bool reduced)
{
sh_css_dump_sp_raw_copy_linecount(reduced);
return 0;
}
static const char * const fw_type_name[] = {
[ia_css_sp_firmware] = "SP",
[ia_css_isp_firmware] = "ISP",
[ia_css_bootloader_firmware] = "BootLoader",
[ia_css_acc_firmware] = "accel",
};
static const char * const fw_acc_type_name[] = {
[IA_CSS_ACC_NONE] = "Normal",
[IA_CSS_ACC_OUTPUT] = "Accel stage on output",
[IA_CSS_ACC_VIEWFINDER] = "Accel stage on viewfinder",
[IA_CSS_ACC_STANDALONE] = "Stand-alone acceleration",
};
int atomisp_css_dump_blob_infor(struct atomisp_device *isp)
{
struct ia_css_blob_descr *bd = sh_css_blob_info;
unsigned int i, nm = sh_css_num_binaries;
if (nm == 0)
return -EPERM;
if (!bd)
return -EPERM;
/*
* The sh_css_load_firmware function discard the initial
* "SPS" binaries
*/
for (i = 0; i < sh_css_num_binaries - NUM_OF_SPS; i++) {
switch (bd[i].header.type) {
case ia_css_isp_firmware:
dev_dbg(isp->dev, "Num%2d type %s (%s), binary id is %2d, name is %s\n",
i + NUM_OF_SPS,
fw_type_name[bd[i].header.type],
fw_acc_type_name[bd[i].header.info.isp.type],
bd[i].header.info.isp.sp.id,
bd[i].name);
break;
default:
dev_dbg(isp->dev, "Num%2d type %s, name is %s\n",
i + NUM_OF_SPS, fw_type_name[bd[i].header.type],
bd[i].name);
}
}
return 0;
}
void atomisp_css_set_isp_config_id(struct atomisp_sub_device *asd,
uint32_t isp_config_id)
{
asd->params.config.isp_config_id = isp_config_id;
}
void atomisp_css_set_isp_config_applied_frame(struct atomisp_sub_device *asd,
struct ia_css_frame *output_frame)
{
asd->params.config.output_frame = output_frame;
}
int atomisp_get_css_dbgfunc(void)
{
return dbg_func;
}
int atomisp_set_css_dbgfunc(struct atomisp_device *isp, int opt)
{
int ret;
ret = __set_css_print_env(isp, opt);
if (ret == 0)
dbg_func = opt;
return ret;
}
void atomisp_en_dz_capt_pipe(struct atomisp_sub_device *asd, bool enable)
{
ia_css_en_dz_capt_pipe(
asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL].stream,
enable);
}
struct ia_css_dvs_grid_info *atomisp_css_get_dvs_grid_info(
struct ia_css_grid_info *grid_info)
{
if (!grid_info)
return NULL;
#ifdef IA_CSS_DVS_STAT_GRID_INFO_SUPPORTED
return &grid_info->dvs_grid.dvs_grid_info;
#else
return &grid_info->dvs_grid;
#endif
}
| linux-master | drivers/staging/media/atomisp/pci/atomisp_compat_css20.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Support for Medifield PNW Camera Imaging ISP subsystem.
*
* Copyright (c) 2010 Intel Corporation. All Rights Reserved.
*
* Copyright (c) 2010 Silicon Hive www.siliconhive.com.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License version
* 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*
*/
#include <media/v4l2-event.h>
#include <media/v4l2-mediabus.h>
#include "atomisp_internal.h"
#include "atomisp_tpg.h"
static int tpg_s_stream(struct v4l2_subdev *sd, int enable)
{
return 0;
}
static int tpg_get_fmt(struct v4l2_subdev *sd,
struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *format)
{
/*to fake*/
return 0;
}
static int tpg_set_fmt(struct v4l2_subdev *sd,
struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *format)
{
struct v4l2_mbus_framefmt *fmt = &format->format;
if (format->pad)
return -EINVAL;
/* only raw8 grbg is supported by TPG */
fmt->code = MEDIA_BUS_FMT_SGRBG8_1X8;
if (format->which == V4L2_SUBDEV_FORMAT_TRY) {
sd_state->pads->try_fmt = *fmt;
return 0;
}
return 0;
}
static int tpg_log_status(struct v4l2_subdev *sd)
{
/*to fake*/
return 0;
}
static int tpg_s_power(struct v4l2_subdev *sd, int on)
{
return 0;
}
static int tpg_enum_mbus_code(struct v4l2_subdev *sd,
struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_mbus_code_enum *code)
{
/*to fake*/
return 0;
}
static int tpg_enum_frame_size(struct v4l2_subdev *sd,
struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_frame_size_enum *fse)
{
/*to fake*/
return 0;
}
static int tpg_enum_frame_ival(struct v4l2_subdev *sd,
struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_frame_interval_enum *fie)
{
/*to fake*/
return 0;
}
static const struct v4l2_subdev_video_ops tpg_video_ops = {
.s_stream = tpg_s_stream,
};
static const struct v4l2_subdev_core_ops tpg_core_ops = {
.log_status = tpg_log_status,
.s_power = tpg_s_power,
};
static const struct v4l2_subdev_pad_ops tpg_pad_ops = {
.enum_mbus_code = tpg_enum_mbus_code,
.enum_frame_size = tpg_enum_frame_size,
.enum_frame_interval = tpg_enum_frame_ival,
.get_fmt = tpg_get_fmt,
.set_fmt = tpg_set_fmt,
};
static const struct v4l2_subdev_ops tpg_ops = {
.core = &tpg_core_ops,
.video = &tpg_video_ops,
.pad = &tpg_pad_ops,
};
void atomisp_tpg_unregister_entities(struct atomisp_tpg_device *tpg)
{
media_entity_cleanup(&tpg->sd.entity);
v4l2_device_unregister_subdev(&tpg->sd);
}
int atomisp_tpg_register_entities(struct atomisp_tpg_device *tpg,
struct v4l2_device *vdev)
{
int ret;
/* Register the subdev and video nodes. */
ret = v4l2_device_register_subdev(vdev, &tpg->sd);
if (ret < 0)
goto error;
return 0;
error:
atomisp_tpg_unregister_entities(tpg);
return ret;
}
void atomisp_tpg_cleanup(struct atomisp_device *isp)
{
}
int atomisp_tpg_init(struct atomisp_device *isp)
{
struct atomisp_tpg_device *tpg = &isp->tpg;
struct v4l2_subdev *sd = &tpg->sd;
struct media_pad *pads = tpg->pads;
struct media_entity *me = &sd->entity;
int ret;
tpg->isp = isp;
v4l2_subdev_init(sd, &tpg_ops);
sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
strscpy(sd->name, "tpg_subdev", sizeof(sd->name));
v4l2_set_subdevdata(sd, tpg);
pads[0].flags = MEDIA_PAD_FL_SINK;
me->function = MEDIA_ENT_F_PROC_VIDEO_ISP;
ret = media_entity_pads_init(me, 1, pads);
if (ret < 0)
goto fail;
return 0;
fail:
atomisp_tpg_cleanup(isp);
return ret;
}
| linux-master | drivers/staging/media/atomisp/pci/atomisp_tpg.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Support for Intel Camera Imaging ISP subsystem.
* Copyright (c) 2015, Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*/
#include "../../include/linux/atomisp.h"
#include "../../include/linux/atomisp_platform.h"
#include "ia_css_version.h"
#include "ia_css_version_data.h"
#include "ia_css_err.h"
#include "sh_css_firmware.h"
int
ia_css_get_version(char *version, int max_size)
{
char *css_version;
if (!IS_ISP2401)
css_version = ISP2400_CSS_VERSION_STRING;
else
css_version = ISP2401_CSS_VERSION_STRING;
if (max_size <= (int)strlen(css_version) + (int)strlen(sh_css_get_fw_version()) + 5)
return -EINVAL;
strscpy(version, css_version, max_size);
strcat(version, "FW:");
strcat(version, sh_css_get_fw_version());
strcat(version, "; ");
return 0;
}
| linux-master | drivers/staging/media/atomisp/pci/sh_css_version.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Support for Intel Camera Imaging ISP subsystem.
* Copyright (c) 2015, Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*/
#include "ia_css_device_access.h"
#include <type_support.h> /* for uint*, size_t */
#include <system_local.h> /* for hrt_address */
#include <ia_css_env.h> /* for ia_css_hw_access_env */
#include <assert_support.h> /* for assert */
static struct ia_css_hw_access_env my_env;
void
ia_css_device_access_init(const struct ia_css_hw_access_env *env)
{
assert(env);
my_env = *env;
}
uint8_t
ia_css_device_load_uint8(const hrt_address addr)
{
return my_env.load_8(addr);
}
uint16_t
ia_css_device_load_uint16(const hrt_address addr)
{
return my_env.load_16(addr);
}
uint32_t
ia_css_device_load_uint32(const hrt_address addr)
{
return my_env.load_32(addr);
}
uint64_t
ia_css_device_load_uint64(const hrt_address addr)
{
assert(0);
(void)addr;
return 0;
}
void
ia_css_device_store_uint8(const hrt_address addr, const uint8_t data)
{
my_env.store_8(addr, data);
}
void
ia_css_device_store_uint16(const hrt_address addr, const uint16_t data)
{
my_env.store_16(addr, data);
}
void
ia_css_device_store_uint32(const hrt_address addr, const uint32_t data)
{
my_env.store_32(addr, data);
}
void
ia_css_device_store_uint64(const hrt_address addr, const uint64_t data)
{
assert(0);
(void)addr;
(void)data;
}
void
ia_css_device_load(const hrt_address addr, void *data, const size_t size)
{
my_env.load(addr, data, (uint32_t)size);
}
void
ia_css_device_store(const hrt_address addr, const void *data, const size_t size)
{
my_env.store(addr, data, (uint32_t)size);
}
| linux-master | drivers/staging/media/atomisp/pci/ia_css_device_access.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Support for Medifield PNW Camera Imaging ISP subsystem.
*
* Copyright (c) 2010 Intel Corporation. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License version
* 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*
*/
#include <linux/module.h>
#include <linux/uaccess.h>
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/mm.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <media/v4l2-event.h>
#include <media/v4l2-mediabus.h>
#include <media/videobuf2-vmalloc.h>
#include "atomisp_cmd.h"
#include "atomisp_common.h"
#include "atomisp_compat.h"
#include "atomisp_fops.h"
#include "atomisp_internal.h"
const struct atomisp_in_fmt_conv atomisp_in_fmt_conv[] = {
{ MEDIA_BUS_FMT_SBGGR8_1X8, 8, 8, ATOMISP_INPUT_FORMAT_RAW_8, IA_CSS_BAYER_ORDER_BGGR },
{ MEDIA_BUS_FMT_SGBRG8_1X8, 8, 8, ATOMISP_INPUT_FORMAT_RAW_8, IA_CSS_BAYER_ORDER_GBRG },
{ MEDIA_BUS_FMT_SGRBG8_1X8, 8, 8, ATOMISP_INPUT_FORMAT_RAW_8, IA_CSS_BAYER_ORDER_GRBG },
{ MEDIA_BUS_FMT_SRGGB8_1X8, 8, 8, ATOMISP_INPUT_FORMAT_RAW_8, IA_CSS_BAYER_ORDER_RGGB },
{ MEDIA_BUS_FMT_SBGGR10_1X10, 10, 10, ATOMISP_INPUT_FORMAT_RAW_10, IA_CSS_BAYER_ORDER_BGGR },
{ MEDIA_BUS_FMT_SGBRG10_1X10, 10, 10, ATOMISP_INPUT_FORMAT_RAW_10, IA_CSS_BAYER_ORDER_GBRG },
{ MEDIA_BUS_FMT_SGRBG10_1X10, 10, 10, ATOMISP_INPUT_FORMAT_RAW_10, IA_CSS_BAYER_ORDER_GRBG },
{ MEDIA_BUS_FMT_SRGGB10_1X10, 10, 10, ATOMISP_INPUT_FORMAT_RAW_10, IA_CSS_BAYER_ORDER_RGGB },
{ MEDIA_BUS_FMT_SBGGR12_1X12, 12, 12, ATOMISP_INPUT_FORMAT_RAW_12, IA_CSS_BAYER_ORDER_BGGR },
{ MEDIA_BUS_FMT_SGBRG12_1X12, 12, 12, ATOMISP_INPUT_FORMAT_RAW_12, IA_CSS_BAYER_ORDER_GBRG },
{ MEDIA_BUS_FMT_SGRBG12_1X12, 12, 12, ATOMISP_INPUT_FORMAT_RAW_12, IA_CSS_BAYER_ORDER_GRBG },
{ MEDIA_BUS_FMT_SRGGB12_1X12, 12, 12, ATOMISP_INPUT_FORMAT_RAW_12, IA_CSS_BAYER_ORDER_RGGB },
{ MEDIA_BUS_FMT_UYVY8_1X16, 8, 8, ATOMISP_INPUT_FORMAT_YUV422_8, 0 },
{ MEDIA_BUS_FMT_YUYV8_1X16, 8, 8, ATOMISP_INPUT_FORMAT_YUV422_8, 0 },
#if 0 // disabled due to clang warnings
{ MEDIA_BUS_FMT_JPEG_1X8, 8, 8, IA_CSS_FRAME_FORMAT_BINARY_8, 0 },
{ V4L2_MBUS_FMT_CUSTOM_NV12, 12, 12, IA_CSS_FRAME_FORMAT_NV12, 0 },
{ V4L2_MBUS_FMT_CUSTOM_NV21, 12, 12, IA_CSS_FRAME_FORMAT_NV21, 0 },
#endif
{ V4L2_MBUS_FMT_CUSTOM_YUV420, 12, 12, ATOMISP_INPUT_FORMAT_YUV420_8_LEGACY, 0 },
#if 0
{ V4L2_MBUS_FMT_CUSTOM_M10MO_RAW, 8, 8, IA_CSS_FRAME_FORMAT_BINARY_8, 0 },
#endif
/* no valid V4L2 MBUS code for metadata format, so leave it 0. */
{ 0, 0, 0, ATOMISP_INPUT_FORMAT_EMBEDDED, 0 },
{}
};
static const struct {
u32 code;
u32 compressed;
} compressed_codes[] = {
{ MEDIA_BUS_FMT_SBGGR10_1X10, MEDIA_BUS_FMT_SBGGR10_DPCM8_1X8 },
{ MEDIA_BUS_FMT_SGBRG10_1X10, MEDIA_BUS_FMT_SGBRG10_DPCM8_1X8 },
{ MEDIA_BUS_FMT_SGRBG10_1X10, MEDIA_BUS_FMT_SGRBG10_DPCM8_1X8 },
{ MEDIA_BUS_FMT_SRGGB10_1X10, MEDIA_BUS_FMT_SRGGB10_DPCM8_1X8 },
};
u32 atomisp_subdev_uncompressed_code(u32 code)
{
unsigned int i;
for (i = 0; i < ARRAY_SIZE(compressed_codes); i++)
if (code == compressed_codes[i].compressed)
return compressed_codes[i].code;
return code;
}
bool atomisp_subdev_is_compressed(u32 code)
{
int i;
for (i = 0; i < ARRAY_SIZE(atomisp_in_fmt_conv) - 1; i++)
if (code == atomisp_in_fmt_conv[i].code)
return atomisp_in_fmt_conv[i].bpp !=
atomisp_in_fmt_conv[i].depth;
return false;
}
const struct atomisp_in_fmt_conv *atomisp_find_in_fmt_conv(u32 code)
{
int i;
for (i = 0; i < ARRAY_SIZE(atomisp_in_fmt_conv) - 1; i++)
if (code == atomisp_in_fmt_conv[i].code)
return atomisp_in_fmt_conv + i;
return NULL;
}
const struct atomisp_in_fmt_conv *atomisp_find_in_fmt_conv_by_atomisp_in_fmt(
enum atomisp_input_format atomisp_in_fmt)
{
int i;
for (i = 0; i < ARRAY_SIZE(atomisp_in_fmt_conv) - 1; i++)
if (atomisp_in_fmt_conv[i].atomisp_in_fmt == atomisp_in_fmt)
return atomisp_in_fmt_conv + i;
return NULL;
}
bool atomisp_subdev_format_conversion(struct atomisp_sub_device *asd)
{
struct v4l2_mbus_framefmt *sink, *src;
sink = atomisp_subdev_get_ffmt(&asd->subdev, NULL,
V4L2_SUBDEV_FORMAT_ACTIVE, ATOMISP_SUBDEV_PAD_SINK);
src = atomisp_subdev_get_ffmt(&asd->subdev, NULL,
V4L2_SUBDEV_FORMAT_ACTIVE, ATOMISP_SUBDEV_PAD_SOURCE);
return atomisp_is_mbuscode_raw(sink->code)
&& !atomisp_is_mbuscode_raw(src->code);
}
/*
* V4L2 subdev operations
*/
/*
* isp_subdev_ioctl - CCDC module private ioctl's
* @sd: ISP V4L2 subdevice
* @cmd: ioctl command
* @arg: ioctl argument
*
* Return 0 on success or a negative error code otherwise.
*/
static long isp_subdev_ioctl(struct v4l2_subdev *sd,
unsigned int cmd, void *arg)
{
return 0;
}
/*
* isp_subdev_set_power - Power on/off the CCDC module
* @sd: ISP V4L2 subdevice
* @on: power on/off
*
* Return 0 on success or a negative error code otherwise.
*/
static int isp_subdev_set_power(struct v4l2_subdev *sd, int on)
{
return 0;
}
static int isp_subdev_subscribe_event(struct v4l2_subdev *sd,
struct v4l2_fh *fh,
struct v4l2_event_subscription *sub)
{
struct atomisp_sub_device *isp_sd = v4l2_get_subdevdata(sd);
struct atomisp_device *isp = isp_sd->isp;
if (sub->type != V4L2_EVENT_FRAME_SYNC &&
sub->type != V4L2_EVENT_FRAME_END &&
sub->type != V4L2_EVENT_ATOMISP_3A_STATS_READY &&
sub->type != V4L2_EVENT_ATOMISP_METADATA_READY &&
sub->type != V4L2_EVENT_ATOMISP_PAUSE_BUFFER &&
sub->type != V4L2_EVENT_ATOMISP_CSS_RESET &&
sub->type != V4L2_EVENT_ATOMISP_ACC_COMPLETE)
return -EINVAL;
if (sub->type == V4L2_EVENT_FRAME_SYNC &&
!atomisp_css_valid_sof(isp))
return -EINVAL;
return v4l2_event_subscribe(fh, sub, 16, NULL);
}
static int isp_subdev_unsubscribe_event(struct v4l2_subdev *sd,
struct v4l2_fh *fh,
struct v4l2_event_subscription *sub)
{
return v4l2_event_unsubscribe(fh, sub);
}
/*
* isp_subdev_enum_mbus_code - Handle pixel format enumeration
* @sd: pointer to v4l2 subdev structure
* @fh : V4L2 subdev file handle
* @code: pointer to v4l2_subdev_pad_mbus_code_enum structure
* return -EINVAL or zero on success
*/
static int isp_subdev_enum_mbus_code(struct v4l2_subdev *sd,
struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_mbus_code_enum *code)
{
if (code->index >= ARRAY_SIZE(atomisp_in_fmt_conv) - 1)
return -EINVAL;
code->code = atomisp_in_fmt_conv[code->index].code;
return 0;
}
static int isp_subdev_validate_rect(struct v4l2_subdev *sd, uint32_t pad,
uint32_t target)
{
switch (pad) {
case ATOMISP_SUBDEV_PAD_SINK:
switch (target) {
case V4L2_SEL_TGT_CROP:
return 0;
}
break;
default:
switch (target) {
case V4L2_SEL_TGT_COMPOSE:
return 0;
}
break;
}
return -EINVAL;
}
struct v4l2_rect *atomisp_subdev_get_rect(struct v4l2_subdev *sd,
struct v4l2_subdev_state *sd_state,
u32 which, uint32_t pad,
uint32_t target)
{
struct atomisp_sub_device *isp_sd = v4l2_get_subdevdata(sd);
if (which == V4L2_SUBDEV_FORMAT_TRY) {
switch (target) {
case V4L2_SEL_TGT_CROP:
return v4l2_subdev_get_try_crop(sd, sd_state, pad);
case V4L2_SEL_TGT_COMPOSE:
return v4l2_subdev_get_try_compose(sd, sd_state, pad);
}
}
switch (target) {
case V4L2_SEL_TGT_CROP:
return &isp_sd->fmt[pad].crop;
case V4L2_SEL_TGT_COMPOSE:
return &isp_sd->fmt[pad].compose;
}
return NULL;
}
struct v4l2_mbus_framefmt
*atomisp_subdev_get_ffmt(struct v4l2_subdev *sd,
struct v4l2_subdev_state *sd_state, uint32_t which,
uint32_t pad)
{
struct atomisp_sub_device *isp_sd = v4l2_get_subdevdata(sd);
if (which == V4L2_SUBDEV_FORMAT_TRY)
return v4l2_subdev_get_try_format(sd, sd_state, pad);
return &isp_sd->fmt[pad].fmt;
}
static void isp_get_fmt_rect(struct v4l2_subdev *sd,
struct v4l2_subdev_state *sd_state,
uint32_t which,
struct v4l2_mbus_framefmt **ffmt,
struct v4l2_rect *crop[ATOMISP_SUBDEV_PADS_NUM],
struct v4l2_rect *comp[ATOMISP_SUBDEV_PADS_NUM])
{
unsigned int i;
for (i = 0; i < ATOMISP_SUBDEV_PADS_NUM; i++) {
ffmt[i] = atomisp_subdev_get_ffmt(sd, sd_state, which, i);
crop[i] = atomisp_subdev_get_rect(sd, sd_state, which, i,
V4L2_SEL_TGT_CROP);
comp[i] = atomisp_subdev_get_rect(sd, sd_state, which, i,
V4L2_SEL_TGT_COMPOSE);
}
}
static void isp_subdev_propagate(struct v4l2_subdev *sd,
struct v4l2_subdev_state *sd_state,
u32 which, uint32_t pad, uint32_t target,
uint32_t flags)
{
struct v4l2_mbus_framefmt *ffmt[ATOMISP_SUBDEV_PADS_NUM];
struct v4l2_rect *crop[ATOMISP_SUBDEV_PADS_NUM],
*comp[ATOMISP_SUBDEV_PADS_NUM];
if (flags & V4L2_SEL_FLAG_KEEP_CONFIG)
return;
isp_get_fmt_rect(sd, sd_state, which, ffmt, crop, comp);
switch (pad) {
case ATOMISP_SUBDEV_PAD_SINK: {
struct v4l2_rect r = {0};
/* Only crop target supported on sink pad. */
r.width = ffmt[pad]->width;
r.height = ffmt[pad]->height;
atomisp_subdev_set_selection(sd, sd_state, which, pad,
target, flags, &r);
break;
}
}
}
static int isp_subdev_get_selection(struct v4l2_subdev *sd,
struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_selection *sel)
{
struct v4l2_rect *rec;
int rval = isp_subdev_validate_rect(sd, sel->pad, sel->target);
if (rval)
return rval;
rec = atomisp_subdev_get_rect(sd, sd_state, sel->which, sel->pad,
sel->target);
if (!rec)
return -EINVAL;
sel->r = *rec;
return 0;
}
static const char *atomisp_pad_str(unsigned int pad)
{
static const char *const pad_str[] = {
"ATOMISP_SUBDEV_PAD_SINK",
"ATOMISP_SUBDEV_PAD_SOURCE",
};
if (pad >= ARRAY_SIZE(pad_str))
return "ATOMISP_INVALID_PAD";
return pad_str[pad];
}
int atomisp_subdev_set_selection(struct v4l2_subdev *sd,
struct v4l2_subdev_state *sd_state,
u32 which, uint32_t pad, uint32_t target,
u32 flags, struct v4l2_rect *r)
{
struct atomisp_sub_device *isp_sd = v4l2_get_subdevdata(sd);
struct atomisp_device *isp = isp_sd->isp;
struct v4l2_mbus_framefmt *ffmt[ATOMISP_SUBDEV_PADS_NUM];
struct v4l2_rect *crop[ATOMISP_SUBDEV_PADS_NUM],
*comp[ATOMISP_SUBDEV_PADS_NUM];
if ((pad == ATOMISP_SUBDEV_PAD_SINK && target != V4L2_SEL_TGT_CROP) ||
(pad == ATOMISP_SUBDEV_PAD_SOURCE && target != V4L2_SEL_TGT_COMPOSE))
return -EINVAL;
isp_get_fmt_rect(sd, sd_state, which, ffmt, crop, comp);
dev_dbg(isp->dev,
"sel: pad %s tgt %s l %d t %d w %d h %d which %s f 0x%8.8x\n",
atomisp_pad_str(pad), target == V4L2_SEL_TGT_CROP
? "V4L2_SEL_TGT_CROP" : "V4L2_SEL_TGT_COMPOSE",
r->left, r->top, r->width, r->height,
which == V4L2_SUBDEV_FORMAT_TRY ? "V4L2_SUBDEV_FORMAT_TRY"
: "V4L2_SUBDEV_FORMAT_ACTIVE", flags);
r->width = rounddown(r->width, ATOM_ISP_STEP_WIDTH);
r->height = rounddown(r->height, ATOM_ISP_STEP_HEIGHT);
if (pad == ATOMISP_SUBDEV_PAD_SINK) {
/* Only crop target supported on sink pad. */
unsigned int dvs_w, dvs_h;
crop[pad]->width = ffmt[pad]->width;
crop[pad]->height = ffmt[pad]->height;
if (atomisp_subdev_format_conversion(isp_sd)
&& crop[pad]->width && crop[pad]->height) {
crop[pad]->width -= isp_sd->sink_pad_padding_w;
crop[pad]->height -= isp_sd->sink_pad_padding_h;
}
if (isp_sd->params.video_dis_en &&
isp_sd->run_mode->val == ATOMISP_RUN_MODE_VIDEO) {
/* This resolution contains 20 % of DVS slack
* (of the desired captured image before
* scaling, or 1 / 6 of what we get from the
* sensor) in both width and height. Remove
* it. */
crop[pad]->width = roundup(crop[pad]->width * 5 / 6,
ATOM_ISP_STEP_WIDTH);
crop[pad]->height = roundup(crop[pad]->height * 5 / 6,
ATOM_ISP_STEP_HEIGHT);
}
crop[pad]->width = min(crop[pad]->width, r->width);
crop[pad]->height = min(crop[pad]->height, r->height);
if (!(flags & V4L2_SEL_FLAG_KEEP_CONFIG)) {
struct v4l2_rect tmp = *crop[pad];
atomisp_subdev_set_selection(sd, sd_state, which,
ATOMISP_SUBDEV_PAD_SOURCE,
V4L2_SEL_TGT_COMPOSE, flags, &tmp);
}
if (which == V4L2_SUBDEV_FORMAT_TRY)
goto get_rect;
if (isp_sd->params.video_dis_en &&
isp_sd->run_mode->val == ATOMISP_RUN_MODE_VIDEO) {
dvs_w = rounddown(crop[pad]->width / 5,
ATOM_ISP_STEP_WIDTH);
dvs_h = rounddown(crop[pad]->height / 5,
ATOM_ISP_STEP_HEIGHT);
} else if (!isp_sd->params.video_dis_en &&
isp_sd->run_mode->val == ATOMISP_RUN_MODE_VIDEO) {
/*
* For CSS2.0, digital zoom needs to set dvs envelope to 12
* when dvs is disabled.
*/
dvs_w = dvs_h = 12;
} else {
dvs_w = dvs_h = 0;
}
atomisp_css_video_set_dis_envelope(isp_sd, dvs_w, dvs_h);
atomisp_css_input_set_effective_resolution(isp_sd,
ATOMISP_INPUT_STREAM_GENERAL,
crop[pad]->width,
crop[pad]->height);
} else if (isp_sd->run_mode->val != ATOMISP_RUN_MODE_PREVIEW) {
/* Only compose target is supported on source pads. */
if (isp_sd->vfpp->val == ATOMISP_VFPP_DISABLE_LOWLAT) {
/* Scaling is disabled in this mode */
r->width = crop[ATOMISP_SUBDEV_PAD_SINK]->width;
r->height = crop[ATOMISP_SUBDEV_PAD_SINK]->height;
}
if (crop[ATOMISP_SUBDEV_PAD_SINK]->width == r->width
&& crop[ATOMISP_SUBDEV_PAD_SINK]->height == r->height)
isp_sd->params.yuv_ds_en = false;
else
isp_sd->params.yuv_ds_en = true;
comp[pad]->width = r->width;
comp[pad]->height = r->height;
if (r->width == 0 || r->height == 0 ||
crop[ATOMISP_SUBDEV_PAD_SINK]->width == 0 ||
crop[ATOMISP_SUBDEV_PAD_SINK]->height == 0)
goto get_rect;
/*
* do cropping on sensor input if ratio of required resolution
* is different with sensor output resolution ratio:
*
* ratio = width / height
*
* if ratio_output < ratio_sensor:
* effect_width = sensor_height * out_width / out_height;
* effect_height = sensor_height;
* else
* effect_width = sensor_width;
* effect_height = sensor_width * out_height / out_width;
*
*/
if (r->width * crop[ATOMISP_SUBDEV_PAD_SINK]->height <
crop[ATOMISP_SUBDEV_PAD_SINK]->width * r->height)
atomisp_css_input_set_effective_resolution(isp_sd,
ATOMISP_INPUT_STREAM_GENERAL,
rounddown(crop[ATOMISP_SUBDEV_PAD_SINK]->
height * r->width / r->height,
ATOM_ISP_STEP_WIDTH),
crop[ATOMISP_SUBDEV_PAD_SINK]->height);
else
atomisp_css_input_set_effective_resolution(isp_sd,
ATOMISP_INPUT_STREAM_GENERAL,
crop[ATOMISP_SUBDEV_PAD_SINK]->width,
rounddown(crop[ATOMISP_SUBDEV_PAD_SINK]->
width * r->height / r->width,
ATOM_ISP_STEP_WIDTH));
} else {
comp[pad]->width = r->width;
comp[pad]->height = r->height;
}
get_rect:
/* Set format dimensions on non-sink pads as well. */
if (pad != ATOMISP_SUBDEV_PAD_SINK) {
ffmt[pad]->width = comp[pad]->width;
ffmt[pad]->height = comp[pad]->height;
}
if (!atomisp_subdev_get_rect(sd, sd_state, which, pad, target))
return -EINVAL;
*r = *atomisp_subdev_get_rect(sd, sd_state, which, pad, target);
dev_dbg(isp->dev, "sel actual: l %d t %d w %d h %d\n",
r->left, r->top, r->width, r->height);
return 0;
}
static int isp_subdev_set_selection(struct v4l2_subdev *sd,
struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_selection *sel)
{
int rval = isp_subdev_validate_rect(sd, sel->pad, sel->target);
if (rval)
return rval;
return atomisp_subdev_set_selection(sd, sd_state, sel->which,
sel->pad,
sel->target, sel->flags, &sel->r);
}
void atomisp_subdev_set_ffmt(struct v4l2_subdev *sd,
struct v4l2_subdev_state *sd_state,
uint32_t which,
u32 pad, struct v4l2_mbus_framefmt *ffmt)
{
struct atomisp_sub_device *isp_sd = v4l2_get_subdevdata(sd);
struct atomisp_device *isp = isp_sd->isp;
struct v4l2_mbus_framefmt *__ffmt =
atomisp_subdev_get_ffmt(sd, sd_state, which, pad);
dev_dbg(isp->dev, "ffmt: pad %s w %d h %d code 0x%8.8x which %s\n",
atomisp_pad_str(pad), ffmt->width, ffmt->height, ffmt->code,
which == V4L2_SUBDEV_FORMAT_TRY ? "V4L2_SUBDEV_FORMAT_TRY"
: "V4L2_SUBDEV_FORMAT_ACTIVE");
switch (pad) {
case ATOMISP_SUBDEV_PAD_SINK: {
const struct atomisp_in_fmt_conv *fc =
atomisp_find_in_fmt_conv(ffmt->code);
if (!fc) {
fc = atomisp_in_fmt_conv;
ffmt->code = fc->code;
dev_dbg(isp->dev, "using 0x%8.8x instead\n",
ffmt->code);
}
*__ffmt = *ffmt;
isp_subdev_propagate(sd, sd_state, which, pad,
V4L2_SEL_TGT_CROP, 0);
if (which == V4L2_SUBDEV_FORMAT_ACTIVE) {
atomisp_css_input_set_resolution(isp_sd,
ATOMISP_INPUT_STREAM_GENERAL, ffmt);
atomisp_css_input_set_binning_factor(isp_sd,
ATOMISP_INPUT_STREAM_GENERAL,
0);
atomisp_css_input_set_bayer_order(isp_sd, ATOMISP_INPUT_STREAM_GENERAL,
fc->bayer_order);
atomisp_css_input_set_format(isp_sd, ATOMISP_INPUT_STREAM_GENERAL,
fc->atomisp_in_fmt);
atomisp_css_set_default_isys_config(isp_sd, ATOMISP_INPUT_STREAM_GENERAL,
ffmt);
}
break;
}
case ATOMISP_SUBDEV_PAD_SOURCE:
__ffmt->code = ffmt->code;
break;
}
}
/*
* isp_subdev_get_format - Retrieve the video format on a pad
* @sd : ISP V4L2 subdevice
* @fh : V4L2 subdev file handle
* @pad: Pad number
* @fmt: Format
*
* Return 0 on success or -EINVAL if the pad is invalid or doesn't correspond
* to the format type.
*/
static int isp_subdev_get_format(struct v4l2_subdev *sd,
struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
fmt->format = *atomisp_subdev_get_ffmt(sd, sd_state, fmt->which,
fmt->pad);
return 0;
}
/*
* isp_subdev_set_format - Set the video format on a pad
* @sd : ISP subdev V4L2 subdevice
* @fh : V4L2 subdev file handle
* @pad: Pad number
* @fmt: Format
*
* Return 0 on success or -EINVAL if the pad is invalid or doesn't correspond
* to the format type.
*/
static int isp_subdev_set_format(struct v4l2_subdev *sd,
struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
atomisp_subdev_set_ffmt(sd, sd_state, fmt->which, fmt->pad,
&fmt->format);
return 0;
}
/* V4L2 subdev core operations */
static const struct v4l2_subdev_core_ops isp_subdev_v4l2_core_ops = {
.ioctl = isp_subdev_ioctl, .s_power = isp_subdev_set_power,
.subscribe_event = isp_subdev_subscribe_event,
.unsubscribe_event = isp_subdev_unsubscribe_event,
};
/* V4L2 subdev pad operations */
static const struct v4l2_subdev_pad_ops isp_subdev_v4l2_pad_ops = {
.enum_mbus_code = isp_subdev_enum_mbus_code,
.get_fmt = isp_subdev_get_format,
.set_fmt = isp_subdev_set_format,
.get_selection = isp_subdev_get_selection,
.set_selection = isp_subdev_set_selection,
.link_validate = v4l2_subdev_link_validate_default,
};
/* V4L2 subdev operations */
static const struct v4l2_subdev_ops isp_subdev_v4l2_ops = {
.core = &isp_subdev_v4l2_core_ops,
.pad = &isp_subdev_v4l2_pad_ops,
};
static void isp_subdev_init_params(struct atomisp_sub_device *asd)
{
unsigned int i;
/* parameters initialization */
INIT_LIST_HEAD(&asd->s3a_stats);
INIT_LIST_HEAD(&asd->s3a_stats_in_css);
INIT_LIST_HEAD(&asd->s3a_stats_ready);
INIT_LIST_HEAD(&asd->dis_stats);
INIT_LIST_HEAD(&asd->dis_stats_in_css);
spin_lock_init(&asd->dis_stats_lock);
for (i = 0; i < ATOMISP_METADATA_TYPE_NUM; i++) {
INIT_LIST_HEAD(&asd->metadata[i]);
INIT_LIST_HEAD(&asd->metadata_in_css[i]);
INIT_LIST_HEAD(&asd->metadata_ready[i]);
}
}
/* media operations */
static const struct media_entity_operations isp_subdev_media_ops = {
.link_validate = v4l2_subdev_link_validate,
/* .set_power = v4l2_subdev_set_power, */
};
static int __atomisp_update_run_mode(struct atomisp_sub_device *asd)
{
struct atomisp_device *isp = asd->isp;
struct v4l2_ctrl *ctrl = asd->run_mode;
struct v4l2_ctrl *c;
s32 mode;
mode = ctrl->val;
c = v4l2_ctrl_find(
isp->inputs[asd->input_curr].camera->ctrl_handler,
V4L2_CID_RUN_MODE);
if (c)
return v4l2_ctrl_s_ctrl(c, mode);
return 0;
}
int atomisp_update_run_mode(struct atomisp_sub_device *asd)
{
int rval;
mutex_lock(asd->ctrl_handler.lock);
rval = __atomisp_update_run_mode(asd);
mutex_unlock(asd->ctrl_handler.lock);
return rval;
}
static int s_ctrl(struct v4l2_ctrl *ctrl)
{
struct atomisp_sub_device *asd = container_of(
ctrl->handler, struct atomisp_sub_device, ctrl_handler);
switch (ctrl->id) {
case V4L2_CID_RUN_MODE:
return __atomisp_update_run_mode(asd);
}
return 0;
}
static const struct v4l2_ctrl_ops ctrl_ops = {
.s_ctrl = &s_ctrl,
};
static const char *const ctrl_run_mode_menu[] = {
[ATOMISP_RUN_MODE_VIDEO] = "Video",
[ATOMISP_RUN_MODE_STILL_CAPTURE] = "Still capture",
[ATOMISP_RUN_MODE_PREVIEW] = "Preview",
};
static const struct v4l2_ctrl_config ctrl_run_mode = {
.ops = &ctrl_ops,
.id = V4L2_CID_RUN_MODE,
.name = "Atomisp run mode",
.type = V4L2_CTRL_TYPE_MENU,
.min = ATOMISP_RUN_MODE_MIN,
.def = ATOMISP_RUN_MODE_PREVIEW,
.max = ATOMISP_RUN_MODE_MAX,
.qmenu = ctrl_run_mode_menu,
};
static const char *const ctrl_vfpp_mode_menu[] = {
"Enable", /* vfpp always enabled */
"Disable to scaler mode", /* CSS into video mode and disable */
"Disable to low latency mode", /* CSS into still mode and disable */
};
static const struct v4l2_ctrl_config ctrl_vfpp = {
.id = V4L2_CID_VFPP,
.name = "Atomisp vf postprocess",
.type = V4L2_CTRL_TYPE_MENU,
.min = 0,
.def = 0,
.max = 2,
.qmenu = ctrl_vfpp_mode_menu,
};
/*
* Control for continuous mode raw buffer size
*
* The size of the RAW ringbuffer sets limit on how much
* back in time application can go when requesting capture
* frames to be rendered, and how many frames can be rendered
* in a burst at full sensor rate.
*
* Note: this setting has a big impact on memory consumption of
* the CSS subsystem.
*/
static const struct v4l2_ctrl_config ctrl_continuous_raw_buffer_size = {
.ops = &ctrl_ops,
.id = V4L2_CID_ATOMISP_CONTINUOUS_RAW_BUFFER_SIZE,
.type = V4L2_CTRL_TYPE_INTEGER,
.name = "Continuous raw ringbuffer size",
.min = 1,
.max = 100, /* depends on CSS version, runtime checked */
.step = 1,
.def = 3,
};
/*
* Control for enabling continuous viewfinder
*
* When enabled, and ISP is in continuous mode (see ctrl_continuous_mode ),
* preview pipeline continues concurrently with capture
* processing. When disabled, and continuous mode is used,
* preview is paused while captures are processed, but
* full pipeline restart is not needed.
*
* By setting this to disabled, capture processing is
* essentially given priority over preview, and the effective
* capture output rate may be higher than with continuous
* viewfinder enabled.
*/
static const struct v4l2_ctrl_config ctrl_continuous_viewfinder = {
.id = V4L2_CID_ATOMISP_CONTINUOUS_VIEWFINDER,
.type = V4L2_CTRL_TYPE_BOOLEAN,
.name = "Continuous viewfinder",
.min = 0,
.max = 1,
.step = 1,
.def = 0,
};
/*
* Control for enabling Lock&Unlock Raw Buffer mechanism
*
* When enabled, Raw Buffer can be locked and unlocked.
* Application can hold the exp_id of Raw Buffer
* and unlock it when no longer needed.
* Note: Make sure set this configuration before creating stream.
*/
static const struct v4l2_ctrl_config ctrl_enable_raw_buffer_lock = {
.id = V4L2_CID_ENABLE_RAW_BUFFER_LOCK,
.type = V4L2_CTRL_TYPE_BOOLEAN,
.name = "Lock Unlock Raw Buffer",
.min = 0,
.max = 1,
.step = 1,
.def = 0,
};
/*
* Control to disable digital zoom of the whole stream
*
* When it is true, pipe configuration enable_dz will be set to false.
* This can help get a better performance by disabling pp binary.
*
* Note: Make sure set this configuration before creating stream.
*/
static const struct v4l2_ctrl_config ctrl_disable_dz = {
.id = V4L2_CID_DISABLE_DZ,
.type = V4L2_CTRL_TYPE_BOOLEAN,
.name = "Disable digital zoom",
.min = 0,
.max = 1,
.step = 1,
.def = 0,
};
static int atomisp_init_subdev_pipe(struct atomisp_sub_device *asd,
struct atomisp_video_pipe *pipe, enum v4l2_buf_type buf_type)
{
int ret;
pipe->type = buf_type;
pipe->asd = asd;
pipe->isp = asd->isp;
spin_lock_init(&pipe->irq_lock);
mutex_init(&pipe->vb_queue_mutex);
/* Init videobuf2 queue structure */
pipe->vb_queue.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
pipe->vb_queue.io_modes = VB2_MMAP | VB2_USERPTR;
pipe->vb_queue.buf_struct_size = sizeof(struct ia_css_frame);
pipe->vb_queue.ops = &atomisp_vb2_ops;
pipe->vb_queue.mem_ops = &vb2_vmalloc_memops;
pipe->vb_queue.timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
ret = vb2_queue_init(&pipe->vb_queue);
if (ret)
return ret;
pipe->vdev.queue = &pipe->vb_queue;
pipe->vdev.queue->lock = &pipe->vb_queue_mutex;
INIT_LIST_HEAD(&pipe->buffers_in_css);
INIT_LIST_HEAD(&pipe->activeq);
INIT_LIST_HEAD(&pipe->buffers_waiting_for_param);
INIT_LIST_HEAD(&pipe->per_frame_params);
return 0;
}
/*
* isp_subdev_init_entities - Initialize V4L2 subdev and media entity
* @asd: ISP CCDC module
*
* Return 0 on success and a negative error code on failure.
*/
static int isp_subdev_init_entities(struct atomisp_sub_device *asd)
{
struct v4l2_subdev *sd = &asd->subdev;
struct media_pad *pads = asd->pads;
struct media_entity *me = &sd->entity;
int ret;
v4l2_subdev_init(sd, &isp_subdev_v4l2_ops);
sprintf(sd->name, "ATOMISP_SUBDEV");
v4l2_set_subdevdata(sd, asd);
sd->flags |= V4L2_SUBDEV_FL_HAS_EVENTS | V4L2_SUBDEV_FL_HAS_DEVNODE;
sd->devnode = &asd->video_out.vdev;
pads[ATOMISP_SUBDEV_PAD_SINK].flags = MEDIA_PAD_FL_SINK;
pads[ATOMISP_SUBDEV_PAD_SOURCE].flags = MEDIA_PAD_FL_SOURCE;
asd->fmt[ATOMISP_SUBDEV_PAD_SINK].fmt.code = MEDIA_BUS_FMT_SBGGR10_1X10;
asd->fmt[ATOMISP_SUBDEV_PAD_SOURCE].fmt.code = MEDIA_BUS_FMT_SBGGR10_1X10;
me->ops = &isp_subdev_media_ops;
me->function = MEDIA_ENT_F_PROC_VIDEO_ISP;
ret = media_entity_pads_init(me, ATOMISP_SUBDEV_PADS_NUM, pads);
if (ret < 0)
return ret;
ret = atomisp_init_subdev_pipe(asd, &asd->video_out, V4L2_BUF_TYPE_VIDEO_CAPTURE);
if (ret)
return ret;
ret = atomisp_video_init(&asd->video_out);
if (ret < 0)
return ret;
ret = v4l2_ctrl_handler_init(&asd->ctrl_handler, 1);
if (ret)
return ret;
asd->run_mode = v4l2_ctrl_new_custom(&asd->ctrl_handler,
&ctrl_run_mode, NULL);
asd->vfpp = v4l2_ctrl_new_custom(&asd->ctrl_handler,
&ctrl_vfpp, NULL);
asd->continuous_viewfinder = v4l2_ctrl_new_custom(&asd->ctrl_handler,
&ctrl_continuous_viewfinder,
NULL);
asd->continuous_raw_buffer_size =
v4l2_ctrl_new_custom(&asd->ctrl_handler,
&ctrl_continuous_raw_buffer_size,
NULL);
asd->enable_raw_buffer_lock =
v4l2_ctrl_new_custom(&asd->ctrl_handler,
&ctrl_enable_raw_buffer_lock,
NULL);
asd->disable_dz =
v4l2_ctrl_new_custom(&asd->ctrl_handler,
&ctrl_disable_dz,
NULL);
/* Make controls visible on subdev as well. */
asd->subdev.ctrl_handler = &asd->ctrl_handler;
spin_lock_init(&asd->raw_buffer_bitmap_lock);
return asd->ctrl_handler.error;
}
static void atomisp_subdev_cleanup_entities(struct atomisp_sub_device *asd)
{
v4l2_ctrl_handler_free(&asd->ctrl_handler);
media_entity_cleanup(&asd->subdev.entity);
}
void atomisp_subdev_cleanup_pending_events(struct atomisp_sub_device *asd)
{
struct v4l2_fh *fh, *fh_tmp;
struct v4l2_event event;
unsigned int i, pending_event;
list_for_each_entry_safe(fh, fh_tmp,
&asd->subdev.devnode->fh_list, list) {
pending_event = v4l2_event_pending(fh);
for (i = 0; i < pending_event; i++)
v4l2_event_dequeue(fh, &event, 1);
}
}
void atomisp_subdev_unregister_entities(struct atomisp_sub_device *asd)
{
atomisp_subdev_cleanup_entities(asd);
v4l2_device_unregister_subdev(&asd->subdev);
atomisp_video_unregister(&asd->video_out);
}
int atomisp_subdev_register_subdev(struct atomisp_sub_device *asd,
struct v4l2_device *vdev)
{
return v4l2_device_register_subdev(vdev, &asd->subdev);
}
/*
* atomisp_subdev_init - ISP Subdevice initialization.
* @dev: Device pointer specific to the ATOM ISP.
*
* TODO: Get the initialisation values from platform data.
*
* Return 0 on success or a negative error code otherwise.
*/
int atomisp_subdev_init(struct atomisp_device *isp)
{
int ret;
isp->asd.isp = isp;
isp_subdev_init_params(&isp->asd);
ret = isp_subdev_init_entities(&isp->asd);
if (ret < 0)
atomisp_subdev_cleanup_entities(&isp->asd);
return ret;
}
| linux-master | drivers/staging/media/atomisp/pci/atomisp_subdev.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Support for Medifield PNW Camera Imaging ISP subsystem.
*
* Copyright (c) 2010 Intel Corporation. All Rights Reserved.
*
* Copyright (c) 2010 Silicon Hive www.siliconhive.com.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License version
* 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*
*/
#include <linux/module.h>
#include <linux/pm_runtime.h>
#include <media/v4l2-ioctl.h>
#include <media/videobuf2-vmalloc.h>
#include "atomisp_cmd.h"
#include "atomisp_common.h"
#include "atomisp_fops.h"
#include "atomisp_internal.h"
#include "atomisp_ioctl.h"
#include "atomisp_compat.h"
#include "atomisp_subdev.h"
#include "atomisp_v4l2.h"
#include "atomisp-regs.h"
#include "hmm/hmm.h"
#include "ia_css_frame.h"
#include "type_support.h"
#include "device_access/device_access.h"
/*
* Videobuf2 ops
*/
static int atomisp_queue_setup(struct vb2_queue *vq,
unsigned int *nbuffers, unsigned int *nplanes,
unsigned int sizes[], struct device *alloc_devs[])
{
struct atomisp_video_pipe *pipe = container_of(vq, struct atomisp_video_pipe, vb_queue);
int ret;
mutex_lock(&pipe->asd->isp->mutex); /* for get_css_frame_info() / set_fmt() */
/*
* When VIDIOC_S_FMT has not been called before VIDIOC_REQBUFS, then
* this will fail. Call atomisp_set_fmt() ourselves and try again.
*/
ret = atomisp_get_css_frame_info(pipe->asd, &pipe->frame_info);
if (ret) {
struct v4l2_format f = {
.fmt.pix.pixelformat = V4L2_PIX_FMT_YUV420,
.fmt.pix.width = 10000,
.fmt.pix.height = 10000,
};
ret = atomisp_set_fmt(&pipe->vdev, &f);
if (ret)
goto out;
ret = atomisp_get_css_frame_info(pipe->asd, &pipe->frame_info);
if (ret)
goto out;
}
atomisp_alloc_css_stat_bufs(pipe->asd, ATOMISP_INPUT_STREAM_GENERAL);
*nplanes = 1;
sizes[0] = PAGE_ALIGN(pipe->pix.sizeimage);
out:
mutex_unlock(&pipe->asd->isp->mutex);
return ret;
}
static int atomisp_buf_init(struct vb2_buffer *vb)
{
struct atomisp_video_pipe *pipe = vb_to_pipe(vb);
struct ia_css_frame *frame = vb_to_frame(vb);
int ret;
ret = ia_css_frame_init_from_info(frame, &pipe->frame_info);
if (ret)
return ret;
if (frame->data_bytes > vb2_plane_size(vb, 0)) {
dev_err(pipe->asd->isp->dev, "Internal error frame.data_bytes(%u) > vb.length(%lu)\n",
frame->data_bytes, vb2_plane_size(vb, 0));
return -EIO;
}
frame->data = hmm_create_from_vmalloc_buf(vb2_plane_size(vb, 0),
vb2_plane_vaddr(vb, 0));
if (frame->data == mmgr_NULL)
return -ENOMEM;
return 0;
}
static int atomisp_q_one_metadata_buffer(struct atomisp_sub_device *asd,
enum atomisp_input_stream_id stream_id,
enum ia_css_pipe_id css_pipe_id)
{
struct atomisp_metadata_buf *metadata_buf;
enum atomisp_metadata_type md_type = ATOMISP_MAIN_METADATA;
struct list_head *metadata_list;
if (asd->metadata_bufs_in_css[stream_id][css_pipe_id] >=
ATOMISP_CSS_Q_DEPTH)
return 0; /* we have reached CSS queue depth */
if (!list_empty(&asd->metadata[md_type])) {
metadata_list = &asd->metadata[md_type];
} else if (!list_empty(&asd->metadata_ready[md_type])) {
metadata_list = &asd->metadata_ready[md_type];
} else {
dev_warn(asd->isp->dev, "%s: No metadata buffers available for type %d!\n",
__func__, md_type);
return -EINVAL;
}
metadata_buf = list_entry(metadata_list->next,
struct atomisp_metadata_buf, list);
list_del_init(&metadata_buf->list);
if (atomisp_q_metadata_buffer_to_css(asd, metadata_buf,
stream_id, css_pipe_id)) {
list_add(&metadata_buf->list, metadata_list);
return -EINVAL;
} else {
list_add_tail(&metadata_buf->list,
&asd->metadata_in_css[md_type]);
}
asd->metadata_bufs_in_css[stream_id][css_pipe_id]++;
return 0;
}
static int atomisp_q_one_s3a_buffer(struct atomisp_sub_device *asd,
enum atomisp_input_stream_id stream_id,
enum ia_css_pipe_id css_pipe_id)
{
struct atomisp_s3a_buf *s3a_buf;
struct list_head *s3a_list;
unsigned int exp_id;
if (asd->s3a_bufs_in_css[css_pipe_id] >= ATOMISP_CSS_Q_DEPTH)
return 0; /* we have reached CSS queue depth */
if (!list_empty(&asd->s3a_stats)) {
s3a_list = &asd->s3a_stats;
} else if (!list_empty(&asd->s3a_stats_ready)) {
s3a_list = &asd->s3a_stats_ready;
} else {
dev_warn(asd->isp->dev, "%s: No s3a buffers available!\n",
__func__);
return -EINVAL;
}
s3a_buf = list_entry(s3a_list->next, struct atomisp_s3a_buf, list);
list_del_init(&s3a_buf->list);
exp_id = s3a_buf->s3a_data->exp_id;
hmm_flush_vmap(s3a_buf->s3a_data->data_ptr);
if (atomisp_q_s3a_buffer_to_css(asd, s3a_buf,
stream_id, css_pipe_id)) {
/* got from head, so return back to the head */
list_add(&s3a_buf->list, s3a_list);
return -EINVAL;
} else {
list_add_tail(&s3a_buf->list, &asd->s3a_stats_in_css);
if (s3a_list == &asd->s3a_stats_ready)
dev_dbg(asd->isp->dev, "drop one s3a stat with exp_id %d\n", exp_id);
}
asd->s3a_bufs_in_css[css_pipe_id]++;
return 0;
}
static int atomisp_q_one_dis_buffer(struct atomisp_sub_device *asd,
enum atomisp_input_stream_id stream_id,
enum ia_css_pipe_id css_pipe_id)
{
struct atomisp_dis_buf *dis_buf;
unsigned long irqflags;
if (asd->dis_bufs_in_css >= ATOMISP_CSS_Q_DEPTH)
return 0; /* we have reached CSS queue depth */
spin_lock_irqsave(&asd->dis_stats_lock, irqflags);
if (list_empty(&asd->dis_stats)) {
spin_unlock_irqrestore(&asd->dis_stats_lock, irqflags);
dev_warn(asd->isp->dev, "%s: No dis buffers available!\n",
__func__);
return -EINVAL;
}
dis_buf = list_entry(asd->dis_stats.prev,
struct atomisp_dis_buf, list);
list_del_init(&dis_buf->list);
spin_unlock_irqrestore(&asd->dis_stats_lock, irqflags);
hmm_flush_vmap(dis_buf->dis_data->data_ptr);
if (atomisp_q_dis_buffer_to_css(asd, dis_buf,
stream_id, css_pipe_id)) {
spin_lock_irqsave(&asd->dis_stats_lock, irqflags);
/* got from tail, so return back to the tail */
list_add_tail(&dis_buf->list, &asd->dis_stats);
spin_unlock_irqrestore(&asd->dis_stats_lock, irqflags);
return -EINVAL;
} else {
spin_lock_irqsave(&asd->dis_stats_lock, irqflags);
list_add_tail(&dis_buf->list, &asd->dis_stats_in_css);
spin_unlock_irqrestore(&asd->dis_stats_lock, irqflags);
}
asd->dis_bufs_in_css++;
return 0;
}
static int atomisp_q_video_buffers_to_css(struct atomisp_sub_device *asd,
struct atomisp_video_pipe *pipe,
enum atomisp_input_stream_id stream_id,
enum ia_css_buffer_type css_buf_type,
enum ia_css_pipe_id css_pipe_id)
{
struct atomisp_css_params_with_list *param;
struct ia_css_dvs_grid_info *dvs_grid =
atomisp_css_get_dvs_grid_info(&asd->params.curr_grid_info);
unsigned long irqflags;
int space, err = 0;
lockdep_assert_held(&asd->isp->mutex);
if (WARN_ON(css_pipe_id >= IA_CSS_PIPE_ID_NUM))
return -EINVAL;
if (pipe->stopping)
return -EINVAL;
space = ATOMISP_CSS_Q_DEPTH - atomisp_buffers_in_css(pipe);
while (space--) {
struct ia_css_frame *frame;
spin_lock_irqsave(&pipe->irq_lock, irqflags);
frame = list_first_entry_or_null(&pipe->activeq, struct ia_css_frame, queue);
if (frame)
list_move_tail(&frame->queue, &pipe->buffers_in_css);
spin_unlock_irqrestore(&pipe->irq_lock, irqflags);
if (!frame)
return -EINVAL;
/*
* If there is a per_frame setting to apply on the buffer,
* do it before buffer en-queueing.
*/
param = pipe->frame_params[frame->vb.vb2_buf.index];
if (param) {
atomisp_makeup_css_parameters(asd,
&asd->params.css_param.update_flag,
¶m->params);
atomisp_apply_css_parameters(asd, ¶m->params);
if (param->params.update_flag.dz_config &&
asd->run_mode->val != ATOMISP_RUN_MODE_VIDEO) {
err = atomisp_calculate_real_zoom_region(asd,
¶m->params.dz_config, css_pipe_id);
if (!err)
asd->params.config.dz_config = ¶m->params.dz_config;
}
atomisp_css_set_isp_config_applied_frame(asd, frame);
atomisp_css_update_isp_params_on_pipe(asd,
asd->stream_env[stream_id].pipes[css_pipe_id]);
asd->params.dvs_6axis = (struct ia_css_dvs_6axis_config *)
param->params.dvs_6axis;
/*
* WORKAROUND:
* Because the camera halv3 can't ensure to set zoom
* region to per_frame setting and global setting at
* same time and only set zoom region to pre_frame
* setting now.so when the pre_frame setting include
* zoom region,I will set it to global setting.
*/
if (param->params.update_flag.dz_config &&
asd->run_mode->val != ATOMISP_RUN_MODE_VIDEO
&& !err) {
memcpy(&asd->params.css_param.dz_config,
¶m->params.dz_config,
sizeof(struct ia_css_dz_config));
asd->params.css_param.update_flag.dz_config =
(struct atomisp_dz_config *)
&asd->params.css_param.dz_config;
asd->params.css_update_params_needed = true;
}
pipe->frame_params[frame->vb.vb2_buf.index] = NULL;
}
/* Enqueue buffer */
err = atomisp_q_video_buffer_to_css(asd, frame, stream_id,
css_buf_type, css_pipe_id);
if (err) {
spin_lock_irqsave(&pipe->irq_lock, irqflags);
list_move_tail(&frame->queue, &pipe->activeq);
spin_unlock_irqrestore(&pipe->irq_lock, irqflags);
dev_err(asd->isp->dev, "%s, css q fails: %d\n",
__func__, err);
return -EINVAL;
}
/* enqueue 3A/DIS/metadata buffers */
if (asd->params.curr_grid_info.s3a_grid.enable &&
css_pipe_id == asd->params.s3a_enabled_pipe &&
css_buf_type == IA_CSS_BUFFER_TYPE_OUTPUT_FRAME)
atomisp_q_one_s3a_buffer(asd, stream_id,
css_pipe_id);
if (asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL].stream_info.
metadata_info.size &&
css_buf_type == IA_CSS_BUFFER_TYPE_OUTPUT_FRAME)
atomisp_q_one_metadata_buffer(asd, stream_id,
css_pipe_id);
if (dvs_grid && dvs_grid->enable &&
css_pipe_id == IA_CSS_PIPE_ID_VIDEO &&
css_buf_type == IA_CSS_BUFFER_TYPE_OUTPUT_FRAME)
atomisp_q_one_dis_buffer(asd, stream_id,
css_pipe_id);
}
return 0;
}
/* queue all available buffers to css */
int atomisp_qbuffers_to_css(struct atomisp_sub_device *asd)
{
enum ia_css_pipe_id pipe_id;
if (asd->copy_mode) {
pipe_id = IA_CSS_PIPE_ID_COPY;
} else if (asd->vfpp->val == ATOMISP_VFPP_DISABLE_SCALER) {
pipe_id = IA_CSS_PIPE_ID_VIDEO;
} else if (asd->vfpp->val == ATOMISP_VFPP_DISABLE_LOWLAT) {
pipe_id = IA_CSS_PIPE_ID_CAPTURE;
} else if (asd->run_mode->val == ATOMISP_RUN_MODE_VIDEO) {
pipe_id = IA_CSS_PIPE_ID_VIDEO;
} else if (asd->run_mode->val == ATOMISP_RUN_MODE_PREVIEW) {
pipe_id = IA_CSS_PIPE_ID_PREVIEW;
} else {
/* ATOMISP_RUN_MODE_STILL_CAPTURE */
pipe_id = IA_CSS_PIPE_ID_CAPTURE;
}
atomisp_q_video_buffers_to_css(asd, &asd->video_out,
ATOMISP_INPUT_STREAM_GENERAL,
IA_CSS_BUFFER_TYPE_OUTPUT_FRAME, pipe_id);
return 0;
}
static void atomisp_buf_queue(struct vb2_buffer *vb)
{
struct atomisp_video_pipe *pipe = vb_to_pipe(vb);
struct ia_css_frame *frame = vb_to_frame(vb);
struct atomisp_sub_device *asd = pipe->asd;
unsigned long irqflags;
int ret;
mutex_lock(&asd->isp->mutex);
ret = atomisp_pipe_check(pipe, false);
if (ret || pipe->stopping) {
spin_lock_irqsave(&pipe->irq_lock, irqflags);
atomisp_buffer_done(frame, VB2_BUF_STATE_ERROR);
spin_unlock_irqrestore(&pipe->irq_lock, irqflags);
goto out_unlock;
}
/* FIXME this ugliness comes from the original atomisp buffer handling */
if (!(vb->skip_cache_sync_on_finish && vb->skip_cache_sync_on_prepare))
wbinvd();
pipe->frame_params[vb->index] = NULL;
spin_lock_irqsave(&pipe->irq_lock, irqflags);
/*
* when a frame buffer meets following conditions, it should be put into
* the waiting list:
* 1. It is not a main output frame, and it has a per-frame parameter
* to go with it.
* 2. It is not a main output frame, and the waiting buffer list is not
* empty, to keep the FIFO sequence of frame buffer processing, it
* is put to waiting list until previous per-frame parameter buffers
* get enqueued.
*/
if (pipe->frame_request_config_id[vb->index] ||
!list_empty(&pipe->buffers_waiting_for_param))
list_add_tail(&frame->queue, &pipe->buffers_waiting_for_param);
else
list_add_tail(&frame->queue, &pipe->activeq);
spin_unlock_irqrestore(&pipe->irq_lock, irqflags);
/* TODO: do this better, not best way to queue to css */
if (asd->streaming) {
if (!list_empty(&pipe->buffers_waiting_for_param))
atomisp_handle_parameter_and_buffer(pipe);
else
atomisp_qbuffers_to_css(asd);
}
out_unlock:
mutex_unlock(&asd->isp->mutex);
}
static void atomisp_buf_cleanup(struct vb2_buffer *vb)
{
struct atomisp_video_pipe *pipe = vb_to_pipe(vb);
struct ia_css_frame *frame = vb_to_frame(vb);
int index = frame->vb.vb2_buf.index;
pipe->frame_request_config_id[index] = 0;
pipe->frame_params[index] = NULL;
hmm_free(frame->data);
}
const struct vb2_ops atomisp_vb2_ops = {
.queue_setup = atomisp_queue_setup,
.buf_init = atomisp_buf_init,
.buf_cleanup = atomisp_buf_cleanup,
.buf_queue = atomisp_buf_queue,
.start_streaming = atomisp_start_streaming,
.stop_streaming = atomisp_stop_streaming,
};
static void atomisp_dev_init_struct(struct atomisp_device *isp)
{
unsigned int i;
isp->isp_fatal_error = false;
for (i = 0; i < isp->input_cnt; i++)
isp->inputs[i].asd = NULL;
/*
* For Merrifield, frequency is scalable.
* After boot-up, the default frequency is 200MHz.
*/
isp->running_freq = ISP_FREQ_200MHZ;
}
static void atomisp_subdev_init_struct(struct atomisp_sub_device *asd)
{
v4l2_ctrl_s_ctrl(asd->run_mode, ATOMISP_RUN_MODE_STILL_CAPTURE);
memset(&asd->params.css_param, 0, sizeof(asd->params.css_param));
asd->params.color_effect = V4L2_COLORFX_NONE;
asd->params.bad_pixel_en = true;
asd->params.gdc_cac_en = false;
asd->params.video_dis_en = false;
asd->params.sc_en = false;
asd->params.fpn_en = false;
asd->params.xnr_en = false;
asd->params.false_color = 0;
asd->params.yuv_ds_en = 0;
/* s3a grid not enabled for any pipe */
asd->params.s3a_enabled_pipe = IA_CSS_PIPE_ID_NUM;
asd->copy_mode = false;
asd->stream_prepared = false;
asd->high_speed_mode = false;
asd->sensor_array_res.height = 0;
asd->sensor_array_res.width = 0;
atomisp_css_init_struct(asd);
}
/*
* file operation functions
*/
static int atomisp_open(struct file *file)
{
struct video_device *vdev = video_devdata(file);
struct atomisp_device *isp = video_get_drvdata(vdev);
struct atomisp_video_pipe *pipe = atomisp_to_video_pipe(vdev);
struct atomisp_sub_device *asd = pipe->asd;
int ret;
dev_dbg(isp->dev, "open device %s\n", vdev->name);
ret = v4l2_fh_open(file);
if (ret)
return ret;
mutex_lock(&isp->mutex);
if (!isp->input_cnt) {
dev_err(isp->dev, "no camera attached\n");
ret = -EINVAL;
goto error;
}
/*
* atomisp does not allow multiple open
*/
if (pipe->users) {
dev_dbg(isp->dev, "video node already opened\n");
mutex_unlock(&isp->mutex);
return -EBUSY;
}
/* runtime power management, turn on ISP */
ret = pm_runtime_resume_and_get(vdev->v4l2_dev->dev);
if (ret < 0) {
dev_err(isp->dev, "Failed to power on device\n");
goto error;
}
atomisp_dev_init_struct(isp);
ret = v4l2_subdev_call(isp->flash, core, s_power, 1);
if (ret < 0 && ret != -ENODEV && ret != -ENOIOCTLCMD) {
dev_err(isp->dev, "Failed to power-on flash\n");
goto css_error;
}
atomisp_subdev_init_struct(asd);
/* Ensure that a mode is set */
v4l2_ctrl_s_ctrl(asd->run_mode, ATOMISP_RUN_MODE_PREVIEW);
pipe->users++;
mutex_unlock(&isp->mutex);
return 0;
css_error:
pm_runtime_put(vdev->v4l2_dev->dev);
error:
mutex_unlock(&isp->mutex);
v4l2_fh_release(file);
return ret;
}
static int atomisp_release(struct file *file)
{
struct video_device *vdev = video_devdata(file);
struct atomisp_device *isp = video_get_drvdata(vdev);
struct atomisp_video_pipe *pipe = atomisp_to_video_pipe(vdev);
struct atomisp_sub_device *asd = pipe->asd;
struct v4l2_subdev_fh fh;
struct v4l2_rect clear_compose = {0};
int ret;
v4l2_fh_init(&fh.vfh, vdev);
dev_dbg(isp->dev, "release device %s\n", vdev->name);
/* Note file must not be used after this! */
vb2_fop_release(file);
mutex_lock(&isp->mutex);
pipe->users--;
/*
* A little trick here:
* file injection input resolution is recorded in the sink pad,
* therefore can not be cleared when releaseing one device node.
* The sink pad setting can only be cleared when all device nodes
* get released.
*/
{
struct v4l2_mbus_framefmt isp_sink_fmt = { 0 };
atomisp_subdev_set_ffmt(&asd->subdev, fh.state,
V4L2_SUBDEV_FORMAT_ACTIVE,
ATOMISP_SUBDEV_PAD_SINK, &isp_sink_fmt);
}
atomisp_css_free_stat_buffers(asd);
atomisp_free_internal_buffers(asd);
if (isp->inputs[asd->input_curr].asd == asd) {
ret = v4l2_subdev_call(isp->inputs[asd->input_curr].camera,
core, s_power, 0);
if (ret && ret != -ENOIOCTLCMD)
dev_warn(isp->dev, "Failed to power-off sensor\n");
/* clear the asd field to show this camera is not used */
isp->inputs[asd->input_curr].asd = NULL;
}
atomisp_destroy_pipes_stream(asd);
ret = v4l2_subdev_call(isp->flash, core, s_power, 0);
if (ret < 0 && ret != -ENODEV && ret != -ENOIOCTLCMD)
dev_warn(isp->dev, "Failed to power-off flash\n");
if (pm_runtime_put_sync(vdev->v4l2_dev->dev) < 0)
dev_err(isp->dev, "Failed to power off device\n");
atomisp_subdev_set_selection(&asd->subdev, fh.state,
V4L2_SUBDEV_FORMAT_ACTIVE,
ATOMISP_SUBDEV_PAD_SOURCE,
V4L2_SEL_TGT_COMPOSE, 0,
&clear_compose);
mutex_unlock(&isp->mutex);
return 0;
}
const struct v4l2_file_operations atomisp_fops = {
.owner = THIS_MODULE,
.open = atomisp_open,
.release = atomisp_release,
.mmap = vb2_fop_mmap,
.poll = vb2_fop_poll,
.unlocked_ioctl = video_ioctl2,
#ifdef CONFIG_COMPAT
/*
* this was removed because of bugs, the interface
* needs to be made safe for compat tasks instead.
.compat_ioctl32 = atomisp_compat_ioctl32,
*/
#endif
};
| linux-master | drivers/staging/media/atomisp/pci/atomisp_fops.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Support for Medifield PNW Camera Imaging ISP subsystem.
*
* Copyright (c) 2010 Intel Corporation. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License version
* 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*
*/
#include <media/v4l2-event.h>
#include <media/v4l2-mediabus.h>
#include "atomisp_cmd.h"
#include "atomisp_internal.h"
#include "atomisp-regs.h"
static struct
v4l2_mbus_framefmt *__csi2_get_format(struct atomisp_mipi_csi2_device *csi2,
struct v4l2_subdev_state *sd_state,
enum v4l2_subdev_format_whence which,
unsigned int pad)
{
if (which == V4L2_SUBDEV_FORMAT_TRY)
return v4l2_subdev_get_try_format(&csi2->subdev, sd_state,
pad);
else
return &csi2->formats[pad];
}
/*
* csi2_enum_mbus_code - Handle pixel format enumeration
* @sd : pointer to v4l2 subdev structure
* @fh : V4L2 subdev file handle
* @code : pointer to v4l2_subdev_pad_mbus_code_enum structure
* return -EINVAL or zero on success
*/
static int csi2_enum_mbus_code(struct v4l2_subdev *sd,
struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_mbus_code_enum *code)
{
const struct atomisp_in_fmt_conv *ic = atomisp_in_fmt_conv;
unsigned int i = 0;
while (ic->code) {
if (i == code->index) {
code->code = ic->code;
return 0;
}
i++, ic++;
}
return -EINVAL;
}
/*
* csi2_get_format - Handle get format by pads subdev method
* @sd : pointer to v4l2 subdev structure
* @fh : V4L2 subdev file handle
* @pad: pad num
* @fmt: pointer to v4l2 format structure
* return -EINVAL or zero on success
*/
static int csi2_get_format(struct v4l2_subdev *sd,
struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
struct atomisp_mipi_csi2_device *csi2 = v4l2_get_subdevdata(sd);
struct v4l2_mbus_framefmt *format;
format = __csi2_get_format(csi2, sd_state, fmt->which, fmt->pad);
fmt->format = *format;
return 0;
}
int atomisp_csi2_set_ffmt(struct v4l2_subdev *sd,
struct v4l2_subdev_state *sd_state,
unsigned int which, uint16_t pad,
struct v4l2_mbus_framefmt *ffmt)
{
struct atomisp_mipi_csi2_device *csi2 = v4l2_get_subdevdata(sd);
struct v4l2_mbus_framefmt *actual_ffmt = __csi2_get_format(csi2,
sd_state,
which, pad);
if (pad == CSI2_PAD_SINK) {
const struct atomisp_in_fmt_conv *ic;
struct v4l2_mbus_framefmt tmp_ffmt;
ic = atomisp_find_in_fmt_conv(ffmt->code);
if (ic)
actual_ffmt->code = ic->code;
else
actual_ffmt->code = atomisp_in_fmt_conv[0].code;
actual_ffmt->width = clamp_t(u32, ffmt->width,
ATOM_ISP_MIN_WIDTH,
ATOM_ISP_MAX_WIDTH);
actual_ffmt->height = clamp_t(u32, ffmt->height,
ATOM_ISP_MIN_HEIGHT,
ATOM_ISP_MAX_HEIGHT);
tmp_ffmt = *ffmt = *actual_ffmt;
return atomisp_csi2_set_ffmt(sd, sd_state, which,
CSI2_PAD_SOURCE,
&tmp_ffmt);
}
/* FIXME: DPCM decompression */
*actual_ffmt = *ffmt = *__csi2_get_format(csi2, sd_state, which,
CSI2_PAD_SINK);
return 0;
}
/*
* csi2_set_format - Handle set format by pads subdev method
* @sd : pointer to v4l2 subdev structure
* @fh : V4L2 subdev file handle
* @pad: pad num
* @fmt: pointer to v4l2 format structure
* return -EINVAL or zero on success
*/
static int csi2_set_format(struct v4l2_subdev *sd,
struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
return atomisp_csi2_set_ffmt(sd, sd_state, fmt->which, fmt->pad,
&fmt->format);
}
/*
* csi2_set_stream - Enable/Disable streaming on the CSI2 module
* @sd: ISP CSI2 V4L2 subdevice
* @enable: Enable/disable stream (1/0)
*
* Return 0 on success or a negative error code otherwise.
*/
static int csi2_set_stream(struct v4l2_subdev *sd, int enable)
{
return 0;
}
/* subdev core operations */
static const struct v4l2_subdev_core_ops csi2_core_ops = {
};
/* subdev video operations */
static const struct v4l2_subdev_video_ops csi2_video_ops = {
.s_stream = csi2_set_stream,
};
/* subdev pad operations */
static const struct v4l2_subdev_pad_ops csi2_pad_ops = {
.enum_mbus_code = csi2_enum_mbus_code,
.get_fmt = csi2_get_format,
.set_fmt = csi2_set_format,
.link_validate = v4l2_subdev_link_validate_default,
};
/* subdev operations */
static const struct v4l2_subdev_ops csi2_ops = {
.core = &csi2_core_ops,
.video = &csi2_video_ops,
.pad = &csi2_pad_ops,
};
/* media operations */
static const struct media_entity_operations csi2_media_ops = {
.link_validate = v4l2_subdev_link_validate,
};
/*
* ispcsi2_init_entities - Initialize subdev and media entity.
* @csi2: Pointer to ispcsi2 structure.
* return -ENOMEM or zero on success
*/
static int mipi_csi2_init_entities(struct atomisp_mipi_csi2_device *csi2,
int port)
{
struct v4l2_subdev *sd = &csi2->subdev;
struct media_pad *pads = csi2->pads;
struct media_entity *me = &sd->entity;
int ret;
v4l2_subdev_init(sd, &csi2_ops);
snprintf(sd->name, sizeof(sd->name), "ATOM ISP CSI2-port%d", port);
v4l2_set_subdevdata(sd, csi2);
sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
pads[CSI2_PAD_SOURCE].flags = MEDIA_PAD_FL_SOURCE;
pads[CSI2_PAD_SINK].flags = MEDIA_PAD_FL_SINK;
me->ops = &csi2_media_ops;
me->function = MEDIA_ENT_F_VID_IF_BRIDGE;
ret = media_entity_pads_init(me, CSI2_PADS_NUM, pads);
if (ret < 0)
return ret;
csi2->formats[CSI2_PAD_SINK].code = atomisp_in_fmt_conv[0].code;
csi2->formats[CSI2_PAD_SOURCE].code = atomisp_in_fmt_conv[0].code;
return 0;
}
void
atomisp_mipi_csi2_unregister_entities(struct atomisp_mipi_csi2_device *csi2)
{
media_entity_cleanup(&csi2->subdev.entity);
v4l2_device_unregister_subdev(&csi2->subdev);
}
int atomisp_mipi_csi2_register_entities(struct atomisp_mipi_csi2_device *csi2,
struct v4l2_device *vdev)
{
int ret;
/* Register the subdev and video nodes. */
ret = v4l2_device_register_subdev(vdev, &csi2->subdev);
if (ret < 0)
goto error;
return 0;
error:
atomisp_mipi_csi2_unregister_entities(csi2);
return ret;
}
static const int LIMIT_SHIFT = 6; /* Limit numeric range into 31 bits */
static int
atomisp_csi2_configure_calc(const short int coeffs[2], int mipi_freq, int def)
{
/* Delay counter accuracy, 1/0.0625 for ANN/CHT, 1/0.125 for BXT */
static const int accinv = 16; /* 1 / COUNT_ACC */
int r;
if (mipi_freq >> LIMIT_SHIFT <= 0)
return def;
r = accinv * coeffs[1] * (500000000 >> LIMIT_SHIFT);
r /= mipi_freq >> LIMIT_SHIFT;
r += accinv * coeffs[0];
return r;
}
static void atomisp_csi2_configure_isp2401(struct atomisp_sub_device *asd)
{
/*
* The ISP2401 new input system CSI2+ receiver has several
* parameters affecting the receiver timings. These depend
* on the MIPI bus frequency F in Hz (sensor transmitter rate)
* as follows:
* register value = (A/1e9 + B * UI) / COUNT_ACC
* where
* UI = 1 / (2 * F) in seconds
* COUNT_ACC = counter accuracy in seconds
* For ANN and CHV, COUNT_ACC = 0.0625 ns
* For BXT, COUNT_ACC = 0.125 ns
* A and B are coefficients from the table below,
* depending whether the register minimum or maximum value is
* calculated.
* Minimum Maximum
* Clock lane A B A B
* reg_rx_csi_dly_cnt_termen_clane 0 0 38 0
* reg_rx_csi_dly_cnt_settle_clane 95 -8 300 -16
* Data lanes
* reg_rx_csi_dly_cnt_termen_dlane0 0 0 35 4
* reg_rx_csi_dly_cnt_settle_dlane0 85 -2 145 -6
* reg_rx_csi_dly_cnt_termen_dlane1 0 0 35 4
* reg_rx_csi_dly_cnt_settle_dlane1 85 -2 145 -6
* reg_rx_csi_dly_cnt_termen_dlane2 0 0 35 4
* reg_rx_csi_dly_cnt_settle_dlane2 85 -2 145 -6
* reg_rx_csi_dly_cnt_termen_dlane3 0 0 35 4
* reg_rx_csi_dly_cnt_settle_dlane3 85 -2 145 -6
*
* We use the minimum values in the calculations below.
*/
static const short int coeff_clk_termen[] = { 0, 0 };
static const short int coeff_clk_settle[] = { 95, -8 };
static const short int coeff_dat_termen[] = { 0, 0 };
static const short int coeff_dat_settle[] = { 85, -2 };
static const int TERMEN_DEFAULT = 0 * 0;
static const int SETTLE_DEFAULT = 0x480;
static const hrt_address csi2_port_base[] = {
[ATOMISP_CAMERA_PORT_PRIMARY] = CSI2_PORT_A_BASE,
[ATOMISP_CAMERA_PORT_SECONDARY] = CSI2_PORT_B_BASE,
[ATOMISP_CAMERA_PORT_TERTIARY] = CSI2_PORT_C_BASE,
};
/* Number of lanes on each port, excluding clock lane */
static const unsigned char csi2_port_lanes[] = {
[ATOMISP_CAMERA_PORT_PRIMARY] = 4,
[ATOMISP_CAMERA_PORT_SECONDARY] = 2,
[ATOMISP_CAMERA_PORT_TERTIARY] = 2,
};
static const hrt_address csi2_lane_base[] = {
CSI2_LANE_CL_BASE,
CSI2_LANE_D0_BASE,
CSI2_LANE_D1_BASE,
CSI2_LANE_D2_BASE,
CSI2_LANE_D3_BASE,
};
int clk_termen;
int clk_settle;
int dat_termen;
int dat_settle;
struct v4l2_control ctrl;
struct atomisp_device *isp = asd->isp;
int mipi_freq = 0;
enum atomisp_camera_port port;
int n;
port = isp->inputs[asd->input_curr].port;
ctrl.id = V4L2_CID_LINK_FREQ;
if (v4l2_g_ctrl
(isp->inputs[asd->input_curr].camera->ctrl_handler, &ctrl) == 0)
mipi_freq = ctrl.value;
clk_termen = atomisp_csi2_configure_calc(coeff_clk_termen, mipi_freq,
TERMEN_DEFAULT);
clk_settle = atomisp_csi2_configure_calc(coeff_clk_settle, mipi_freq,
SETTLE_DEFAULT);
dat_termen = atomisp_csi2_configure_calc(coeff_dat_termen, mipi_freq,
TERMEN_DEFAULT);
dat_settle = atomisp_csi2_configure_calc(coeff_dat_settle, mipi_freq,
SETTLE_DEFAULT);
for (n = 0; n < csi2_port_lanes[port] + 1; n++) {
hrt_address base = csi2_port_base[port] + csi2_lane_base[n];
atomisp_css2_hw_store_32(base + CSI2_REG_RX_CSI_DLY_CNT_TERMEN,
n == 0 ? clk_termen : dat_termen);
atomisp_css2_hw_store_32(base + CSI2_REG_RX_CSI_DLY_CNT_SETTLE,
n == 0 ? clk_settle : dat_settle);
}
}
void atomisp_csi2_configure(struct atomisp_sub_device *asd)
{
if (IS_HWREVISION(asd->isp, ATOMISP_HW_REVISION_ISP2401))
atomisp_csi2_configure_isp2401(asd);
}
/*
* atomisp_mipi_csi2_cleanup - Routine for module driver cleanup
*/
void atomisp_mipi_csi2_cleanup(struct atomisp_device *isp)
{
}
int atomisp_mipi_csi2_init(struct atomisp_device *isp)
{
struct atomisp_mipi_csi2_device *csi2_port;
unsigned int i;
int ret;
ret = atomisp_csi2_bridge_init(isp);
if (ret < 0)
return ret;
for (i = 0; i < ATOMISP_CAMERA_NR_PORTS; i++) {
csi2_port = &isp->csi2_port[i];
csi2_port->isp = isp;
ret = mipi_csi2_init_entities(csi2_port, i);
if (ret < 0)
goto fail;
}
return 0;
fail:
atomisp_mipi_csi2_cleanup(isp);
return ret;
}
| linux-master | drivers/staging/media/atomisp/pci/atomisp_csi2.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Support for Intel Camera Imaging ISP subsystem.
* Copyright (c) 2015, Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*/
/*! \file */
#include <linux/mm.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include "hmm.h"
#include "atomisp_internal.h"
#include "ia_css.h"
#include "sh_css_hrt.h" /* only for file 2 MIPI */
#include "ia_css_buffer.h"
#include "ia_css_binary.h"
#include "sh_css_internal.h"
#include "sh_css_mipi.h"
#include "sh_css_sp.h" /* sh_css_sp_group */
#include "ia_css_isys.h"
#include "ia_css_frame.h"
#include "sh_css_defs.h"
#include "sh_css_firmware.h"
#include "sh_css_params.h"
#include "sh_css_params_internal.h"
#include "sh_css_param_shading.h"
#include "ia_css_refcount.h"
#include "ia_css_rmgr.h"
#include "ia_css_debug.h"
#include "ia_css_debug_pipe.h"
#include "ia_css_device_access.h"
#include "device_access.h"
#include "sh_css_legacy.h"
#include "ia_css_pipeline.h"
#include "ia_css_stream.h"
#include "sh_css_stream_format.h"
#include "ia_css_pipe.h"
#include "ia_css_util.h"
#include "ia_css_pipe_util.h"
#include "ia_css_pipe_binarydesc.h"
#include "ia_css_pipe_stagedesc.h"
#include "tag.h"
#include "assert_support.h"
#include "math_support.h"
#include "sw_event_global.h" /* Event IDs.*/
#if !defined(ISP2401)
#include "ia_css_ifmtr.h"
#endif
#include "input_system.h"
#include "mmu_device.h" /* mmu_set_page_table_base_index(), ... */
#include "ia_css_mmu_private.h" /* sh_css_mmu_set_page_table_base_index() */
#include "gdc_device.h" /* HRT_GDC_N */
#include "dma.h" /* dma_set_max_burst_size() */
#include "irq.h" /* virq */
#include "sp.h" /* cnd_sp_irq_enable() */
#include "isp.h" /* cnd_isp_irq_enable, ISP_VEC_NELEMS */
#include "gp_device.h" /* gp_device_reg_store() */
#define __INLINE_GPIO__
#include "gpio.h"
#include "timed_ctrl.h"
#include "ia_css_inputfifo.h"
#define WITH_PC_MONITORING 0
#define SH_CSS_VIDEO_BUFFER_ALIGNMENT 0
#include "ia_css_spctrl.h"
#include "ia_css_version_data.h"
#include "sh_css_struct.h"
#include "ia_css_bufq.h"
#include "ia_css_timer.h" /* clock_value_t */
#include "isp/modes/interface/input_buf.isp.h"
/* Name of the sp program: should not be built-in */
#define SP_PROG_NAME "sp"
/* Size of Refcount List */
#define REFCOUNT_SIZE 1000
/*
* for JPEG, we don't know the length of the image upfront,
* but since we support sensor up to 16MP, we take this as
* upper limit.
*/
#define JPEG_BYTES (16 * 1024 * 1024)
struct sh_css my_css;
int __printf(1, 0) (*sh_css_printf)(const char *fmt, va_list args) = NULL;
/*
* modes of work: stream_create and stream_destroy will update the save/restore
* data only when in working mode, not suspend/resume
*/
enum ia_sh_css_modes {
sh_css_mode_none = 0,
sh_css_mode_working,
sh_css_mode_suspend,
sh_css_mode_resume
};
/**
* struct sh_css_stream_seed - a stream seed, to save and restore the
* stream data.
*
* @orig_stream: pointer to restore the original handle
* @stream: handle, used as ID too.
* @stream_config: stream config struct
* @num_pipes: number of pipes
* @pipes: pipe handles
* @orig_pipes: pointer to restore original handle
* @pipe_config: pipe config structs
*
* the stream seed contains all the data required to "grow" the seed again
* after it was closed.
*/
struct sh_css_stream_seed {
struct ia_css_stream **orig_stream;
struct ia_css_stream *stream;
struct ia_css_stream_config stream_config;
int num_pipes;
struct ia_css_pipe *pipes[IA_CSS_PIPE_ID_NUM];
struct ia_css_pipe **orig_pipes[IA_CSS_PIPE_ID_NUM];
struct ia_css_pipe_config pipe_config[IA_CSS_PIPE_ID_NUM];
};
#define MAX_ACTIVE_STREAMS 5
/*
* A global struct for save/restore to hold all the data that should
* sustain power-down: MMU base, IRQ type, env for routines, binary loaded FW
* and the stream seeds.
*/
struct sh_css_save {
enum ia_sh_css_modes mode;
u32 mmu_base; /* the last mmu_base */
enum ia_css_irq_type irq_type;
struct sh_css_stream_seed stream_seeds[MAX_ACTIVE_STREAMS];
struct ia_css_fw *loaded_fw; /* fw struct previously loaded */
struct ia_css_env driver_env; /* driver-supplied env copy */
};
static bool my_css_save_initialized; /* if my_css_save was initialized */
static struct sh_css_save my_css_save;
/*
* pqiao NOTICE: this is for css internal buffer recycling when stopping
* pipeline,
* this array is temporary and will be replaced by resource manager
*/
/* Taking the biggest Size for number of Elements */
#define MAX_HMM_BUFFER_NUM \
(SH_CSS_MAX_NUM_QUEUES * (IA_CSS_NUM_ELEMS_SP2HOST_BUFFER_QUEUE + 2))
struct sh_css_hmm_buffer_record {
bool in_use;
enum ia_css_buffer_type type;
struct ia_css_rmgr_vbuf_handle *h_vbuf;
hrt_address kernel_ptr;
};
static struct sh_css_hmm_buffer_record hmm_buffer_record[MAX_HMM_BUFFER_NUM];
#define GPIO_FLASH_PIN_MASK BIT(HIVE_GPIO_STROBE_TRIGGER_PIN)
static bool fw_explicitly_loaded;
/*
* Local prototypes
*/
static int
allocate_delay_frames(struct ia_css_pipe *pipe);
static int
sh_css_pipe_start(struct ia_css_stream *stream);
/*
* @brief Check if all "ia_css_pipe" instances in the target
* "ia_css_stream" instance have stopped.
*
* @param[in] stream Point to the target "ia_css_stream" instance.
*
* @return
* - true, if all "ia_css_pipe" instances in the target "ia_css_stream"
* instance have ben stopped.
* - false, otherwise.
*/
/* ISP 2401 */
static int
ia_css_pipe_check_format(struct ia_css_pipe *pipe,
enum ia_css_frame_format format);
/* ISP 2401 */
static void
ia_css_reset_defaults(struct sh_css *css);
static void
sh_css_init_host_sp_control_vars(void);
static int
set_num_primary_stages(unsigned int *num, enum ia_css_pipe_version version);
static bool
need_capture_pp(const struct ia_css_pipe *pipe);
static bool
need_yuv_scaler_stage(const struct ia_css_pipe *pipe);
static int ia_css_pipe_create_cas_scaler_desc_single_output(
struct ia_css_frame_info *cas_scaler_in_info,
struct ia_css_frame_info *cas_scaler_out_info,
struct ia_css_frame_info *cas_scaler_vf_info,
struct ia_css_cas_binary_descr *descr);
static void ia_css_pipe_destroy_cas_scaler_desc(struct ia_css_cas_binary_descr
*descr);
static bool
need_downscaling(const struct ia_css_resolution in_res,
const struct ia_css_resolution out_res);
static bool need_capt_ldc(const struct ia_css_pipe *pipe);
static int
sh_css_pipe_load_binaries(struct ia_css_pipe *pipe);
static
int sh_css_pipe_get_viewfinder_frame_info(
struct ia_css_pipe *pipe,
struct ia_css_frame_info *info,
unsigned int idx);
static int
sh_css_pipe_get_output_frame_info(struct ia_css_pipe *pipe,
struct ia_css_frame_info *info,
unsigned int idx);
static int
capture_start(struct ia_css_pipe *pipe);
static int
video_start(struct ia_css_pipe *pipe);
static int
preview_start(struct ia_css_pipe *pipe);
static int
yuvpp_start(struct ia_css_pipe *pipe);
static bool copy_on_sp(struct ia_css_pipe *pipe);
static int
init_vf_frameinfo_defaults(struct ia_css_pipe *pipe,
struct ia_css_frame *vf_frame, unsigned int idx);
static int
init_in_frameinfo_memory_defaults(struct ia_css_pipe *pipe,
struct ia_css_frame *frame, enum ia_css_frame_format format);
static int
init_out_frameinfo_defaults(struct ia_css_pipe *pipe,
struct ia_css_frame *out_frame, unsigned int idx);
static int
alloc_continuous_frames(struct ia_css_pipe *pipe, bool init_time);
static void
pipe_global_init(void);
static int
pipe_generate_pipe_num(const struct ia_css_pipe *pipe,
unsigned int *pipe_number);
static void
pipe_release_pipe_num(unsigned int pipe_num);
static int
create_host_pipeline_structure(struct ia_css_stream *stream);
static int
create_host_pipeline(struct ia_css_stream *stream);
static int
create_host_preview_pipeline(struct ia_css_pipe *pipe);
static int
create_host_video_pipeline(struct ia_css_pipe *pipe);
static int
create_host_copy_pipeline(struct ia_css_pipe *pipe,
unsigned int max_input_width,
struct ia_css_frame *out_frame);
static int
create_host_isyscopy_capture_pipeline(struct ia_css_pipe *pipe);
static int
create_host_capture_pipeline(struct ia_css_pipe *pipe);
static int
create_host_yuvpp_pipeline(struct ia_css_pipe *pipe);
static unsigned int
sh_css_get_sw_interrupt_value(unsigned int irq);
static struct ia_css_binary *ia_css_pipe_get_shading_correction_binary(
const struct ia_css_pipe *pipe);
static struct ia_css_binary *
ia_css_pipe_get_s3a_binary(const struct ia_css_pipe *pipe);
static struct ia_css_binary *
ia_css_pipe_get_sdis_binary(const struct ia_css_pipe *pipe);
static void
sh_css_hmm_buffer_record_init(void);
static void
sh_css_hmm_buffer_record_uninit(void);
static void
sh_css_hmm_buffer_record_reset(struct sh_css_hmm_buffer_record *buffer_record);
static struct sh_css_hmm_buffer_record
*sh_css_hmm_buffer_record_acquire(struct ia_css_rmgr_vbuf_handle *h_vbuf,
enum ia_css_buffer_type type,
hrt_address kernel_ptr);
static struct sh_css_hmm_buffer_record
*sh_css_hmm_buffer_record_validate(ia_css_ptr ddr_buffer_addr,
enum ia_css_buffer_type type);
#ifdef ISP2401
static unsigned int get_crop_lines_for_bayer_order(const struct
ia_css_stream_config *config);
static unsigned int get_crop_columns_for_bayer_order(const struct
ia_css_stream_config *config);
static void get_pipe_extra_pixel(struct ia_css_pipe *pipe,
unsigned int *extra_row, unsigned int *extra_column);
#endif
static void
sh_css_pipe_free_shading_table(struct ia_css_pipe *pipe)
{
if (!pipe) {
IA_CSS_ERROR("NULL input parameter");
return;
}
if (pipe->shading_table)
ia_css_shading_table_free(pipe->shading_table);
pipe->shading_table = NULL;
}
static enum ia_css_frame_format yuv420_copy_formats[] = {
IA_CSS_FRAME_FORMAT_NV12,
IA_CSS_FRAME_FORMAT_NV21,
IA_CSS_FRAME_FORMAT_YV12,
IA_CSS_FRAME_FORMAT_YUV420,
IA_CSS_FRAME_FORMAT_YUV420_16,
IA_CSS_FRAME_FORMAT_CSI_MIPI_YUV420_8,
IA_CSS_FRAME_FORMAT_CSI_MIPI_LEGACY_YUV420_8
};
static enum ia_css_frame_format yuv422_copy_formats[] = {
IA_CSS_FRAME_FORMAT_NV12,
IA_CSS_FRAME_FORMAT_NV16,
IA_CSS_FRAME_FORMAT_NV21,
IA_CSS_FRAME_FORMAT_NV61,
IA_CSS_FRAME_FORMAT_YV12,
IA_CSS_FRAME_FORMAT_YV16,
IA_CSS_FRAME_FORMAT_YUV420,
IA_CSS_FRAME_FORMAT_YUV420_16,
IA_CSS_FRAME_FORMAT_YUV422,
IA_CSS_FRAME_FORMAT_YUV422_16,
IA_CSS_FRAME_FORMAT_UYVY,
IA_CSS_FRAME_FORMAT_YUYV
};
/*
* Verify whether the selected output format is can be produced
* by the copy binary given the stream format.
*/
static int
verify_copy_out_frame_format(struct ia_css_pipe *pipe)
{
enum ia_css_frame_format out_fmt = pipe->output_info[0].format;
unsigned int i, found = 0;
assert(pipe);
assert(pipe->stream);
switch (pipe->stream->config.input_config.format) {
case ATOMISP_INPUT_FORMAT_YUV420_8_LEGACY:
case ATOMISP_INPUT_FORMAT_YUV420_8:
for (i = 0; i < ARRAY_SIZE(yuv420_copy_formats) && !found; i++)
found = (out_fmt == yuv420_copy_formats[i]);
break;
case ATOMISP_INPUT_FORMAT_YUV420_10:
case ATOMISP_INPUT_FORMAT_YUV420_16:
found = (out_fmt == IA_CSS_FRAME_FORMAT_YUV420_16);
break;
case ATOMISP_INPUT_FORMAT_YUV422_8:
for (i = 0; i < ARRAY_SIZE(yuv422_copy_formats) && !found; i++)
found = (out_fmt == yuv422_copy_formats[i]);
break;
case ATOMISP_INPUT_FORMAT_YUV422_10:
case ATOMISP_INPUT_FORMAT_YUV422_16:
found = (out_fmt == IA_CSS_FRAME_FORMAT_YUV422_16 ||
out_fmt == IA_CSS_FRAME_FORMAT_YUV420_16);
break;
case ATOMISP_INPUT_FORMAT_RGB_444:
case ATOMISP_INPUT_FORMAT_RGB_555:
case ATOMISP_INPUT_FORMAT_RGB_565:
found = (out_fmt == IA_CSS_FRAME_FORMAT_RGBA888 ||
out_fmt == IA_CSS_FRAME_FORMAT_RGB565);
break;
case ATOMISP_INPUT_FORMAT_RGB_666:
case ATOMISP_INPUT_FORMAT_RGB_888:
found = (out_fmt == IA_CSS_FRAME_FORMAT_RGBA888 ||
out_fmt == IA_CSS_FRAME_FORMAT_YUV420);
break;
case ATOMISP_INPUT_FORMAT_RAW_6:
case ATOMISP_INPUT_FORMAT_RAW_7:
case ATOMISP_INPUT_FORMAT_RAW_8:
case ATOMISP_INPUT_FORMAT_RAW_10:
case ATOMISP_INPUT_FORMAT_RAW_12:
case ATOMISP_INPUT_FORMAT_RAW_14:
case ATOMISP_INPUT_FORMAT_RAW_16:
found = (out_fmt == IA_CSS_FRAME_FORMAT_RAW) ||
(out_fmt == IA_CSS_FRAME_FORMAT_RAW_PACKED);
break;
case ATOMISP_INPUT_FORMAT_BINARY_8:
found = (out_fmt == IA_CSS_FRAME_FORMAT_BINARY_8);
break;
default:
break;
}
if (!found)
return -EINVAL;
return 0;
}
unsigned int
ia_css_stream_input_format_bits_per_pixel(struct ia_css_stream *stream)
{
int bpp = 0;
if (stream)
bpp = ia_css_util_input_format_bpp(stream->config.input_config.format,
stream->config.pixels_per_clock == 2);
return bpp;
}
/* TODO: move define to proper file in tools */
#define GP_ISEL_TPG_MODE 0x90058
#if !defined(ISP2401)
static int
sh_css_config_input_network(struct ia_css_stream *stream)
{
unsigned int fmt_type;
struct ia_css_pipe *pipe = stream->last_pipe;
struct ia_css_binary *binary = NULL;
int err = 0;
assert(stream);
assert(pipe);
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
"sh_css_config_input_network() enter:\n");
if (pipe->pipeline.stages)
binary = pipe->pipeline.stages->binary;
err = ia_css_isys_convert_stream_format_to_mipi_format(
stream->config.input_config.format,
stream->csi_rx_config.comp,
&fmt_type);
if (err)
return err;
sh_css_sp_program_input_circuit(fmt_type,
stream->config.channel_id,
stream->config.mode);
if ((binary && (binary->online || stream->config.continuous)) ||
pipe->config.mode == IA_CSS_PIPE_MODE_COPY) {
err = ia_css_ifmtr_configure(&stream->config,
binary);
if (err)
return err;
}
if (stream->config.mode == IA_CSS_INPUT_MODE_TPG ||
stream->config.mode == IA_CSS_INPUT_MODE_PRBS) {
unsigned int hblank_cycles = 100,
vblank_lines = 6,
width,
height,
vblank_cycles;
width = (stream->config.input_config.input_res.width) / (1 +
(stream->config.pixels_per_clock == 2));
height = stream->config.input_config.input_res.height;
vblank_cycles = vblank_lines * (width + hblank_cycles);
sh_css_sp_configure_sync_gen(width, height, hblank_cycles,
vblank_cycles);
if (pipe->stream->config.mode == IA_CSS_INPUT_MODE_TPG)
ia_css_device_store_uint32(GP_ISEL_TPG_MODE, 0);
}
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
"sh_css_config_input_network() leave:\n");
return 0;
}
#elif defined(ISP2401)
static unsigned int csi2_protocol_calculate_max_subpixels_per_line(
enum atomisp_input_format format,
unsigned int pixels_per_line)
{
unsigned int rval;
switch (format) {
case ATOMISP_INPUT_FORMAT_YUV420_8_LEGACY:
/*
* The frame format layout is shown below.
*
* Line 0: UYY0 UYY0 ... UYY0
* Line 1: VYY0 VYY0 ... VYY0
* Line 2: UYY0 UYY0 ... UYY0
* Line 3: VYY0 VYY0 ... VYY0
* ...
* Line (n-2): UYY0 UYY0 ... UYY0
* Line (n-1): VYY0 VYY0 ... VYY0
*
* In this frame format, the even-line is
* as wide as the odd-line.
* The 0 is introduced by the input system
* (mipi backend).
*/
rval = pixels_per_line * 2;
break;
case ATOMISP_INPUT_FORMAT_YUV420_8:
case ATOMISP_INPUT_FORMAT_YUV420_10:
case ATOMISP_INPUT_FORMAT_YUV420_16:
/*
* The frame format layout is shown below.
*
* Line 0: YYYY YYYY ... YYYY
* Line 1: UYVY UYVY ... UYVY UYVY
* Line 2: YYYY YYYY ... YYYY
* Line 3: UYVY UYVY ... UYVY UYVY
* ...
* Line (n-2): YYYY YYYY ... YYYY
* Line (n-1): UYVY UYVY ... UYVY UYVY
*
* In this frame format, the odd-line is twice
* wider than the even-line.
*/
rval = pixels_per_line * 2;
break;
case ATOMISP_INPUT_FORMAT_YUV422_8:
case ATOMISP_INPUT_FORMAT_YUV422_10:
case ATOMISP_INPUT_FORMAT_YUV422_16:
/*
* The frame format layout is shown below.
*
* Line 0: UYVY UYVY ... UYVY
* Line 1: UYVY UYVY ... UYVY
* Line 2: UYVY UYVY ... UYVY
* Line 3: UYVY UYVY ... UYVY
* ...
* Line (n-2): UYVY UYVY ... UYVY
* Line (n-1): UYVY UYVY ... UYVY
*
* In this frame format, the even-line is
* as wide as the odd-line.
*/
rval = pixels_per_line * 2;
break;
case ATOMISP_INPUT_FORMAT_RGB_444:
case ATOMISP_INPUT_FORMAT_RGB_555:
case ATOMISP_INPUT_FORMAT_RGB_565:
case ATOMISP_INPUT_FORMAT_RGB_666:
case ATOMISP_INPUT_FORMAT_RGB_888:
/*
* The frame format layout is shown below.
*
* Line 0: ABGR ABGR ... ABGR
* Line 1: ABGR ABGR ... ABGR
* Line 2: ABGR ABGR ... ABGR
* Line 3: ABGR ABGR ... ABGR
* ...
* Line (n-2): ABGR ABGR ... ABGR
* Line (n-1): ABGR ABGR ... ABGR
*
* In this frame format, the even-line is
* as wide as the odd-line.
*/
rval = pixels_per_line * 4;
break;
case ATOMISP_INPUT_FORMAT_RAW_6:
case ATOMISP_INPUT_FORMAT_RAW_7:
case ATOMISP_INPUT_FORMAT_RAW_8:
case ATOMISP_INPUT_FORMAT_RAW_10:
case ATOMISP_INPUT_FORMAT_RAW_12:
case ATOMISP_INPUT_FORMAT_RAW_14:
case ATOMISP_INPUT_FORMAT_RAW_16:
case ATOMISP_INPUT_FORMAT_BINARY_8:
case ATOMISP_INPUT_FORMAT_USER_DEF1:
case ATOMISP_INPUT_FORMAT_USER_DEF2:
case ATOMISP_INPUT_FORMAT_USER_DEF3:
case ATOMISP_INPUT_FORMAT_USER_DEF4:
case ATOMISP_INPUT_FORMAT_USER_DEF5:
case ATOMISP_INPUT_FORMAT_USER_DEF6:
case ATOMISP_INPUT_FORMAT_USER_DEF7:
case ATOMISP_INPUT_FORMAT_USER_DEF8:
/*
* The frame format layout is shown below.
*
* Line 0: Pixel ... Pixel
* Line 1: Pixel ... Pixel
* Line 2: Pixel ... Pixel
* Line 3: Pixel ... Pixel
* ...
* Line (n-2): Pixel ... Pixel
* Line (n-1): Pixel ... Pixel
*
* In this frame format, the even-line is
* as wide as the odd-line.
*/
rval = pixels_per_line;
break;
default:
rval = 0;
break;
}
return rval;
}
static bool sh_css_translate_stream_cfg_to_input_system_input_port_id(
struct ia_css_stream_config *stream_cfg,
ia_css_isys_descr_t *isys_stream_descr)
{
bool rc;
rc = true;
switch (stream_cfg->mode) {
case IA_CSS_INPUT_MODE_TPG:
if (stream_cfg->source.tpg.id == IA_CSS_TPG_ID0)
isys_stream_descr->input_port_id = INPUT_SYSTEM_PIXELGEN_PORT0_ID;
else if (stream_cfg->source.tpg.id == IA_CSS_TPG_ID1)
isys_stream_descr->input_port_id = INPUT_SYSTEM_PIXELGEN_PORT1_ID;
else if (stream_cfg->source.tpg.id == IA_CSS_TPG_ID2)
isys_stream_descr->input_port_id = INPUT_SYSTEM_PIXELGEN_PORT2_ID;
break;
case IA_CSS_INPUT_MODE_PRBS:
if (stream_cfg->source.prbs.id == IA_CSS_PRBS_ID0)
isys_stream_descr->input_port_id = INPUT_SYSTEM_PIXELGEN_PORT0_ID;
else if (stream_cfg->source.prbs.id == IA_CSS_PRBS_ID1)
isys_stream_descr->input_port_id = INPUT_SYSTEM_PIXELGEN_PORT1_ID;
else if (stream_cfg->source.prbs.id == IA_CSS_PRBS_ID2)
isys_stream_descr->input_port_id = INPUT_SYSTEM_PIXELGEN_PORT2_ID;
break;
case IA_CSS_INPUT_MODE_BUFFERED_SENSOR:
if (stream_cfg->source.port.port == MIPI_PORT0_ID)
isys_stream_descr->input_port_id = INPUT_SYSTEM_CSI_PORT0_ID;
else if (stream_cfg->source.port.port == MIPI_PORT1_ID)
isys_stream_descr->input_port_id = INPUT_SYSTEM_CSI_PORT1_ID;
else if (stream_cfg->source.port.port == MIPI_PORT2_ID)
isys_stream_descr->input_port_id = INPUT_SYSTEM_CSI_PORT2_ID;
break;
default:
rc = false;
break;
}
return rc;
}
static bool sh_css_translate_stream_cfg_to_input_system_input_port_type(
struct ia_css_stream_config *stream_cfg,
ia_css_isys_descr_t *isys_stream_descr)
{
bool rc;
rc = true;
switch (stream_cfg->mode) {
case IA_CSS_INPUT_MODE_TPG:
isys_stream_descr->mode = INPUT_SYSTEM_SOURCE_TYPE_TPG;
break;
case IA_CSS_INPUT_MODE_PRBS:
isys_stream_descr->mode = INPUT_SYSTEM_SOURCE_TYPE_PRBS;
break;
case IA_CSS_INPUT_MODE_SENSOR:
case IA_CSS_INPUT_MODE_BUFFERED_SENSOR:
isys_stream_descr->mode = INPUT_SYSTEM_SOURCE_TYPE_SENSOR;
break;
default:
rc = false;
break;
}
return rc;
}
static bool sh_css_translate_stream_cfg_to_input_system_input_port_attr(
struct ia_css_stream_config *stream_cfg,
ia_css_isys_descr_t *isys_stream_descr,
int isys_stream_idx)
{
bool rc;
rc = true;
switch (stream_cfg->mode) {
case IA_CSS_INPUT_MODE_TPG:
if (stream_cfg->source.tpg.mode == IA_CSS_TPG_MODE_RAMP)
isys_stream_descr->tpg_port_attr.mode = PIXELGEN_TPG_MODE_RAMP;
else if (stream_cfg->source.tpg.mode == IA_CSS_TPG_MODE_CHECKERBOARD)
isys_stream_descr->tpg_port_attr.mode = PIXELGEN_TPG_MODE_CHBO;
else if (stream_cfg->source.tpg.mode == IA_CSS_TPG_MODE_MONO)
isys_stream_descr->tpg_port_attr.mode = PIXELGEN_TPG_MODE_MONO;
else
rc = false;
/*
* TODO
* - Make "color_cfg" as part of "ia_css_tpg_config".
*/
isys_stream_descr->tpg_port_attr.color_cfg.R1 = 51;
isys_stream_descr->tpg_port_attr.color_cfg.G1 = 102;
isys_stream_descr->tpg_port_attr.color_cfg.B1 = 255;
isys_stream_descr->tpg_port_attr.color_cfg.R2 = 0;
isys_stream_descr->tpg_port_attr.color_cfg.G2 = 100;
isys_stream_descr->tpg_port_attr.color_cfg.B2 = 160;
isys_stream_descr->tpg_port_attr.mask_cfg.h_mask =
stream_cfg->source.tpg.x_mask;
isys_stream_descr->tpg_port_attr.mask_cfg.v_mask =
stream_cfg->source.tpg.y_mask;
isys_stream_descr->tpg_port_attr.mask_cfg.hv_mask =
stream_cfg->source.tpg.xy_mask;
isys_stream_descr->tpg_port_attr.delta_cfg.h_delta =
stream_cfg->source.tpg.x_delta;
isys_stream_descr->tpg_port_attr.delta_cfg.v_delta =
stream_cfg->source.tpg.y_delta;
/*
* TODO
* - Make "sync_gen_cfg" as part of "ia_css_tpg_config".
*/
isys_stream_descr->tpg_port_attr.sync_gen_cfg.hblank_cycles = 100;
isys_stream_descr->tpg_port_attr.sync_gen_cfg.vblank_cycles = 100;
isys_stream_descr->tpg_port_attr.sync_gen_cfg.pixels_per_clock =
stream_cfg->pixels_per_clock;
isys_stream_descr->tpg_port_attr.sync_gen_cfg.nr_of_frames = (uint32_t)~(0x0);
isys_stream_descr->tpg_port_attr.sync_gen_cfg.pixels_per_line =
stream_cfg->isys_config[IA_CSS_STREAM_DEFAULT_ISYS_STREAM_IDX].input_res.width;
isys_stream_descr->tpg_port_attr.sync_gen_cfg.lines_per_frame =
stream_cfg->isys_config[IA_CSS_STREAM_DEFAULT_ISYS_STREAM_IDX].input_res.height;
break;
case IA_CSS_INPUT_MODE_PRBS:
isys_stream_descr->prbs_port_attr.seed0 = stream_cfg->source.prbs.seed;
isys_stream_descr->prbs_port_attr.seed1 = stream_cfg->source.prbs.seed1;
/*
* TODO
* - Make "sync_gen_cfg" as part of "ia_css_prbs_config".
*/
isys_stream_descr->prbs_port_attr.sync_gen_cfg.hblank_cycles = 100;
isys_stream_descr->prbs_port_attr.sync_gen_cfg.vblank_cycles = 100;
isys_stream_descr->prbs_port_attr.sync_gen_cfg.pixels_per_clock =
stream_cfg->pixels_per_clock;
isys_stream_descr->prbs_port_attr.sync_gen_cfg.nr_of_frames = (uint32_t)~(0x0);
isys_stream_descr->prbs_port_attr.sync_gen_cfg.pixels_per_line =
stream_cfg->isys_config[IA_CSS_STREAM_DEFAULT_ISYS_STREAM_IDX].input_res.width;
isys_stream_descr->prbs_port_attr.sync_gen_cfg.lines_per_frame =
stream_cfg->isys_config[IA_CSS_STREAM_DEFAULT_ISYS_STREAM_IDX].input_res.height;
break;
case IA_CSS_INPUT_MODE_BUFFERED_SENSOR: {
int err;
unsigned int fmt_type;
err = ia_css_isys_convert_stream_format_to_mipi_format(
stream_cfg->isys_config[isys_stream_idx].format,
MIPI_PREDICTOR_NONE,
&fmt_type);
if (err)
rc = false;
isys_stream_descr->csi_port_attr.active_lanes =
stream_cfg->source.port.num_lanes;
isys_stream_descr->csi_port_attr.fmt_type = fmt_type;
isys_stream_descr->csi_port_attr.ch_id = stream_cfg->channel_id;
#ifdef ISP2401
isys_stream_descr->online = stream_cfg->online;
#endif
err |= ia_css_isys_convert_compressed_format(
&stream_cfg->source.port.compression,
isys_stream_descr);
if (err)
rc = false;
/* metadata */
isys_stream_descr->metadata.enable = false;
if (stream_cfg->metadata_config.resolution.height > 0) {
err = ia_css_isys_convert_stream_format_to_mipi_format(
stream_cfg->metadata_config.data_type,
MIPI_PREDICTOR_NONE,
&fmt_type);
if (err)
rc = false;
isys_stream_descr->metadata.fmt_type = fmt_type;
isys_stream_descr->metadata.bits_per_pixel =
ia_css_util_input_format_bpp(stream_cfg->metadata_config.data_type, true);
isys_stream_descr->metadata.pixels_per_line =
stream_cfg->metadata_config.resolution.width;
isys_stream_descr->metadata.lines_per_frame =
stream_cfg->metadata_config.resolution.height;
#ifdef ISP2401
/*
* For new input system, number of str2mmio requests must be even.
* So we round up number of metadata lines to be even.
*/
if (isys_stream_descr->metadata.lines_per_frame > 0)
isys_stream_descr->metadata.lines_per_frame +=
(isys_stream_descr->metadata.lines_per_frame & 1);
#endif
isys_stream_descr->metadata.align_req_in_bytes =
ia_css_csi2_calculate_input_system_alignment(
stream_cfg->metadata_config.data_type);
isys_stream_descr->metadata.enable = true;
}
break;
}
default:
rc = false;
break;
}
return rc;
}
static bool sh_css_translate_stream_cfg_to_input_system_input_port_resolution(
struct ia_css_stream_config *stream_cfg,
ia_css_isys_descr_t *isys_stream_descr,
int isys_stream_idx)
{
unsigned int bits_per_subpixel;
unsigned int max_subpixels_per_line;
unsigned int lines_per_frame;
unsigned int align_req_in_bytes;
enum atomisp_input_format fmt_type;
fmt_type = stream_cfg->isys_config[isys_stream_idx].format;
if ((stream_cfg->mode == IA_CSS_INPUT_MODE_SENSOR ||
stream_cfg->mode == IA_CSS_INPUT_MODE_BUFFERED_SENSOR) &&
stream_cfg->source.port.compression.type != IA_CSS_CSI2_COMPRESSION_TYPE_NONE) {
if (stream_cfg->source.port.compression.uncompressed_bits_per_pixel ==
UNCOMPRESSED_BITS_PER_PIXEL_10)
fmt_type = ATOMISP_INPUT_FORMAT_RAW_10;
else if (stream_cfg->source.port.compression.uncompressed_bits_per_pixel ==
UNCOMPRESSED_BITS_PER_PIXEL_12)
fmt_type = ATOMISP_INPUT_FORMAT_RAW_12;
else
return false;
}
bits_per_subpixel =
sh_css_stream_format_2_bits_per_subpixel(fmt_type);
if (bits_per_subpixel == 0)
return false;
max_subpixels_per_line =
csi2_protocol_calculate_max_subpixels_per_line(fmt_type,
stream_cfg->isys_config[isys_stream_idx].input_res.width);
if (max_subpixels_per_line == 0)
return false;
lines_per_frame = stream_cfg->isys_config[isys_stream_idx].input_res.height;
if (lines_per_frame == 0)
return false;
align_req_in_bytes = ia_css_csi2_calculate_input_system_alignment(fmt_type);
/* HW needs subpixel info for their settings */
isys_stream_descr->input_port_resolution.bits_per_pixel = bits_per_subpixel;
isys_stream_descr->input_port_resolution.pixels_per_line =
max_subpixels_per_line;
isys_stream_descr->input_port_resolution.lines_per_frame = lines_per_frame;
isys_stream_descr->input_port_resolution.align_req_in_bytes =
align_req_in_bytes;
return true;
}
static bool sh_css_translate_stream_cfg_to_isys_stream_descr(
struct ia_css_stream_config *stream_cfg,
bool early_polling,
ia_css_isys_descr_t *isys_stream_descr,
int isys_stream_idx)
{
bool rc;
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
"sh_css_translate_stream_cfg_to_isys_stream_descr() enter:\n");
rc = sh_css_translate_stream_cfg_to_input_system_input_port_id(stream_cfg,
isys_stream_descr);
rc &= sh_css_translate_stream_cfg_to_input_system_input_port_type(stream_cfg,
isys_stream_descr);
rc &= sh_css_translate_stream_cfg_to_input_system_input_port_attr(stream_cfg,
isys_stream_descr, isys_stream_idx);
rc &= sh_css_translate_stream_cfg_to_input_system_input_port_resolution(
stream_cfg, isys_stream_descr, isys_stream_idx);
isys_stream_descr->raw_packed = stream_cfg->pack_raw_pixels;
isys_stream_descr->linked_isys_stream_id = (int8_t)
stream_cfg->isys_config[isys_stream_idx].linked_isys_stream_id;
if (IS_ISP2401)
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
"sh_css_translate_stream_cfg_to_isys_stream_descr() leave:\n");
return rc;
}
static bool sh_css_translate_binary_info_to_input_system_output_port_attr(
struct ia_css_binary *binary,
ia_css_isys_descr_t *isys_stream_descr)
{
if (!binary)
return false;
isys_stream_descr->output_port_attr.left_padding = binary->left_padding;
isys_stream_descr->output_port_attr.max_isp_input_width =
binary->info->sp.input.max_width;
return true;
}
static int
sh_css_config_input_network(struct ia_css_stream *stream)
{
bool rc;
ia_css_isys_descr_t isys_stream_descr;
unsigned int sp_thread_id;
struct sh_css_sp_pipeline_terminal *sp_pipeline_input_terminal;
struct ia_css_pipe *pipe = NULL;
struct ia_css_binary *binary = NULL;
int i;
u32 isys_stream_id;
bool early_polling = false;
assert(stream);
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
"sh_css_config_input_network() enter 0x%p:\n", stream);
if (stream->config.continuous) {
if (stream->last_pipe->config.mode == IA_CSS_PIPE_MODE_CAPTURE)
pipe = stream->last_pipe;
else if (stream->last_pipe->config.mode == IA_CSS_PIPE_MODE_YUVPP)
pipe = stream->last_pipe;
else if (stream->last_pipe->config.mode == IA_CSS_PIPE_MODE_PREVIEW)
pipe = stream->last_pipe->pipe_settings.preview.copy_pipe;
else if (stream->last_pipe->config.mode == IA_CSS_PIPE_MODE_VIDEO)
pipe = stream->last_pipe->pipe_settings.video.copy_pipe;
} else {
pipe = stream->last_pipe;
}
if (!pipe)
return -EINVAL;
if (pipe->pipeline.stages)
if (pipe->pipeline.stages->binary)
binary = pipe->pipeline.stages->binary;
if (binary) {
/*
* this was being done in ifmtr in 2400.
* online and cont bypass the init_in_frameinfo_memory_defaults
* so need to do it here
*/
ia_css_get_crop_offsets(pipe, &binary->in_frame_info);
}
/* get the SP thread id */
rc = ia_css_pipeline_get_sp_thread_id(ia_css_pipe_get_pipe_num(pipe), &sp_thread_id);
if (!rc)
return -EINVAL;
/* get the target input terminal */
sp_pipeline_input_terminal = &sh_css_sp_group.pipe_io[sp_thread_id].input;
for (i = 0; i < IA_CSS_STREAM_MAX_ISYS_STREAM_PER_CH; i++) {
/* initialization */
memset((void *)(&isys_stream_descr), 0, sizeof(ia_css_isys_descr_t));
sp_pipeline_input_terminal->context.virtual_input_system_stream[i].valid = 0;
sp_pipeline_input_terminal->ctrl.virtual_input_system_stream_cfg[i].valid = 0;
if (!stream->config.isys_config[i].valid)
continue;
/* translate the stream configuration to the Input System (2401) configuration */
rc = sh_css_translate_stream_cfg_to_isys_stream_descr(
&stream->config,
early_polling,
&(isys_stream_descr), i);
if (stream->config.online) {
rc &= sh_css_translate_binary_info_to_input_system_output_port_attr(
binary,
&(isys_stream_descr));
}
if (!rc)
return -EINVAL;
isys_stream_id = ia_css_isys_generate_stream_id(sp_thread_id, i);
/* create the virtual Input System (2401) */
rc = ia_css_isys_stream_create(
&(isys_stream_descr),
&sp_pipeline_input_terminal->context.virtual_input_system_stream[i],
isys_stream_id);
if (!rc)
return -EINVAL;
/* calculate the configuration of the virtual Input System (2401) */
rc = ia_css_isys_stream_calculate_cfg(
&sp_pipeline_input_terminal->context.virtual_input_system_stream[i],
&(isys_stream_descr),
&sp_pipeline_input_terminal->ctrl.virtual_input_system_stream_cfg[i]);
if (!rc) {
ia_css_isys_stream_destroy(
&sp_pipeline_input_terminal->context.virtual_input_system_stream[i]);
return -EINVAL;
}
}
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
"sh_css_config_input_network() leave:\n");
return 0;
}
static inline struct ia_css_pipe *stream_get_last_pipe(
struct ia_css_stream *stream)
{
struct ia_css_pipe *last_pipe = NULL;
if (stream)
last_pipe = stream->last_pipe;
return last_pipe;
}
static inline struct ia_css_pipe *stream_get_copy_pipe(
struct ia_css_stream *stream)
{
struct ia_css_pipe *copy_pipe = NULL;
struct ia_css_pipe *last_pipe = NULL;
enum ia_css_pipe_id pipe_id;
last_pipe = stream_get_last_pipe(stream);
if ((stream) &&
(last_pipe) &&
(stream->config.continuous)) {
pipe_id = last_pipe->mode;
switch (pipe_id) {
case IA_CSS_PIPE_ID_PREVIEW:
copy_pipe = last_pipe->pipe_settings.preview.copy_pipe;
break;
case IA_CSS_PIPE_ID_VIDEO:
copy_pipe = last_pipe->pipe_settings.video.copy_pipe;
break;
default:
copy_pipe = NULL;
break;
}
}
return copy_pipe;
}
static inline struct ia_css_pipe *stream_get_target_pipe(
struct ia_css_stream *stream)
{
struct ia_css_pipe *target_pipe;
/* get the pipe that consumes the stream */
if (stream->config.continuous)
target_pipe = stream_get_copy_pipe(stream);
else
target_pipe = stream_get_last_pipe(stream);
return target_pipe;
}
static int stream_csi_rx_helper(
struct ia_css_stream *stream,
int (*func)(enum mipi_port_id, uint32_t))
{
int retval = -EINVAL;
u32 sp_thread_id, stream_id;
bool rc;
struct ia_css_pipe *target_pipe = NULL;
if ((!stream) || (stream->config.mode != IA_CSS_INPUT_MODE_BUFFERED_SENSOR))
goto exit;
target_pipe = stream_get_target_pipe(stream);
if (!target_pipe)
goto exit;
rc = ia_css_pipeline_get_sp_thread_id(
ia_css_pipe_get_pipe_num(target_pipe),
&sp_thread_id);
if (!rc)
goto exit;
/* (un)register all valid "virtual isys streams" within the ia_css_stream */
stream_id = 0;
do {
if (stream->config.isys_config[stream_id].valid) {
u32 isys_stream_id = ia_css_isys_generate_stream_id(sp_thread_id, stream_id);
retval = func(stream->config.source.port.port, isys_stream_id);
}
stream_id++;
} while ((retval == 0) &&
(stream_id < IA_CSS_STREAM_MAX_ISYS_STREAM_PER_CH));
exit:
return retval;
}
static inline int stream_register_with_csi_rx(
struct ia_css_stream *stream)
{
return stream_csi_rx_helper(stream, ia_css_isys_csi_rx_register_stream);
}
static inline int stream_unregister_with_csi_rx(
struct ia_css_stream *stream)
{
return stream_csi_rx_helper(stream, ia_css_isys_csi_rx_unregister_stream);
}
#endif
static void
start_binary(struct ia_css_pipe *pipe,
struct ia_css_binary *binary)
{
assert(pipe);
/* Acceleration uses firmware, the binary thus can be NULL */
if (binary)
sh_css_metrics_start_binary(&binary->metrics);
#if !defined(ISP2401)
if (pipe->stream->reconfigure_css_rx) {
ia_css_isys_rx_configure(&pipe->stream->csi_rx_config,
pipe->stream->config.mode);
pipe->stream->reconfigure_css_rx = false;
}
#endif
}
/* start the copy function on the SP */
static int
start_copy_on_sp(struct ia_css_pipe *pipe,
struct ia_css_frame *out_frame)
{
(void)out_frame;
if ((!pipe) || (!pipe->stream))
return -EINVAL;
#if !defined(ISP2401)
if (pipe->stream->reconfigure_css_rx)
ia_css_isys_rx_disable();
#endif
if (pipe->stream->config.input_config.format != ATOMISP_INPUT_FORMAT_BINARY_8)
return -EINVAL;
sh_css_sp_start_binary_copy(ia_css_pipe_get_pipe_num(pipe), out_frame, pipe->stream->config.pixels_per_clock == 2);
#if !defined(ISP2401)
if (pipe->stream->reconfigure_css_rx) {
ia_css_isys_rx_configure(&pipe->stream->csi_rx_config,
pipe->stream->config.mode);
pipe->stream->reconfigure_css_rx = false;
}
#endif
return 0;
}
void sh_css_binary_args_reset(struct sh_css_binary_args *args)
{
unsigned int i;
for (i = 0; i < NUM_VIDEO_TNR_FRAMES; i++)
args->tnr_frames[i] = NULL;
for (i = 0; i < MAX_NUM_VIDEO_DELAY_FRAMES; i++)
args->delay_frames[i] = NULL;
args->in_frame = NULL;
for (i = 0; i < IA_CSS_BINARY_MAX_OUTPUT_PORTS; i++)
args->out_frame[i] = NULL;
args->out_vf_frame = NULL;
args->copy_vf = false;
args->copy_output = true;
args->vf_downscale_log2 = 0;
}
static void start_pipe(
struct ia_css_pipe *me,
enum sh_css_pipe_config_override copy_ovrd,
enum ia_css_input_mode input_mode)
{
IA_CSS_ENTER_PRIVATE("me = %p, copy_ovrd = %d, input_mode = %d",
me, copy_ovrd, input_mode);
assert(me); /* all callers are in this file and call with non null argument */
sh_css_sp_init_pipeline(&me->pipeline,
me->mode,
(uint8_t)ia_css_pipe_get_pipe_num(me),
me->config.default_capture_config.enable_xnr != 0,
me->stream->config.pixels_per_clock == 2,
me->stream->config.continuous,
false,
me->required_bds_factor,
copy_ovrd,
input_mode,
&me->stream->config.metadata_config,
&me->stream->info.metadata_info
, (input_mode == IA_CSS_INPUT_MODE_MEMORY) ?
(enum mipi_port_id)0 :
me->stream->config.source.port.port);
if (me->config.mode != IA_CSS_PIPE_MODE_COPY) {
struct ia_css_pipeline_stage *stage;
stage = me->pipeline.stages;
if (stage) {
me->pipeline.current_stage = stage;
start_binary(me, stage->binary);
}
}
IA_CSS_LEAVE_PRIVATE("void");
}
void
sh_css_invalidate_shading_tables(struct ia_css_stream *stream)
{
int i;
assert(stream);
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
"sh_css_invalidate_shading_tables() enter:\n");
for (i = 0; i < stream->num_pipes; i++) {
assert(stream->pipes[i]);
sh_css_pipe_free_shading_table(stream->pipes[i]);
}
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
"sh_css_invalidate_shading_tables() leave: return_void\n");
}
static void
enable_interrupts(enum ia_css_irq_type irq_type)
{
#ifndef ISP2401
enum mipi_port_id port;
#endif
bool enable_pulse = irq_type != IA_CSS_IRQ_TYPE_EDGE;
IA_CSS_ENTER_PRIVATE("");
/* Enable IRQ on the SP which signals that SP goes to idle
* (aka ready state) */
cnd_sp_irq_enable(SP0_ID, true);
/* Set the IRQ device 0 to either level or pulse */
irq_enable_pulse(IRQ0_ID, enable_pulse);
cnd_virq_enable_channel(virq_sp, true);
/* Enable SW interrupt 0, this is used to signal ISYS events */
cnd_virq_enable_channel(
(enum virq_id)(IRQ_SW_CHANNEL0_ID + IRQ_SW_CHANNEL_OFFSET),
true);
/* Enable SW interrupt 1, this is used to signal PSYS events */
cnd_virq_enable_channel(
(enum virq_id)(IRQ_SW_CHANNEL1_ID + IRQ_SW_CHANNEL_OFFSET),
true);
#ifndef ISP2401
for (port = 0; port < N_MIPI_PORT_ID; port++)
ia_css_isys_rx_enable_all_interrupts(port);
#endif
IA_CSS_LEAVE_PRIVATE("");
}
static bool sh_css_setup_spctrl_config(const struct ia_css_fw_info *fw,
const char *program,
ia_css_spctrl_cfg *spctrl_cfg)
{
if ((!fw) || (!spctrl_cfg))
return false;
spctrl_cfg->sp_entry = 0;
spctrl_cfg->program_name = (char *)(program);
spctrl_cfg->ddr_data_offset = fw->blob.data_source;
spctrl_cfg->dmem_data_addr = fw->blob.data_target;
spctrl_cfg->dmem_bss_addr = fw->blob.bss_target;
spctrl_cfg->data_size = fw->blob.data_size;
spctrl_cfg->bss_size = fw->blob.bss_size;
spctrl_cfg->spctrl_config_dmem_addr = fw->info.sp.init_dmem_data;
spctrl_cfg->spctrl_state_dmem_addr = fw->info.sp.sw_state;
spctrl_cfg->code_size = fw->blob.size;
spctrl_cfg->code = fw->blob.code;
spctrl_cfg->sp_entry = fw->info.sp.sp_entry; /* entry function ptr on SP */
return true;
}
void
ia_css_unload_firmware(void)
{
if (sh_css_num_binaries) {
/* we have already loaded before so get rid of the old stuff */
ia_css_binary_uninit();
sh_css_unload_firmware();
}
fw_explicitly_loaded = false;
}
static void
ia_css_reset_defaults(struct sh_css *css)
{
struct sh_css default_css;
/* Reset everything to zero */
memset(&default_css, 0, sizeof(default_css));
/* Initialize the non zero values */
default_css.check_system_idle = true;
default_css.num_cont_raw_frames = NUM_CONTINUOUS_FRAMES;
/*
* All should be 0: but memset does it already.
* default_css.num_mipi_frames[N_CSI_PORTS] = 0;
*/
default_css.irq_type = IA_CSS_IRQ_TYPE_EDGE;
/* Set the defaults to the output */
*css = default_css;
}
int
ia_css_load_firmware(struct device *dev, const struct ia_css_env *env,
const struct ia_css_fw *fw)
{
int err;
if (!env)
return -EINVAL;
if (!fw)
return -EINVAL;
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_load_firmware() enter\n");
/* make sure we initialize my_css */
if (my_css.flush != env->cpu_mem_env.flush) {
ia_css_reset_defaults(&my_css);
my_css.flush = env->cpu_mem_env.flush;
}
ia_css_unload_firmware(); /* in case we are called twice */
err = sh_css_load_firmware(dev, fw->data, fw->bytes);
if (!err) {
err = ia_css_binary_init_infos();
if (!err)
fw_explicitly_loaded = true;
}
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_load_firmware() leave\n");
return err;
}
int
ia_css_init(struct device *dev, const struct ia_css_env *env,
const struct ia_css_fw *fw,
u32 mmu_l1_base,
enum ia_css_irq_type irq_type)
{
int err;
ia_css_spctrl_cfg spctrl_cfg;
void (*flush_func)(struct ia_css_acc_fw *fw);
hrt_data select, enable;
/*
* The C99 standard does not specify the exact object representation of structs;
* the representation is compiler dependent.
*
* The structs that are communicated between host and SP/ISP should have the
* exact same object representation. The compiler that is used to compile the
* firmware is hivecc.
*
* To check if a different compiler, used to compile a host application, uses
* another object representation, macros are defined specifying the size of
* the structs as expected by the firmware.
*
* A host application shall verify that a sizeof( ) of the struct is equal to
* the SIZE_OF_XXX macro of the corresponding struct. If they are not
* equal, functionality will break.
*/
/* Check struct sh_css_ddr_address_map */
COMPILATION_ERROR_IF(sizeof(struct sh_css_ddr_address_map) != SIZE_OF_SH_CSS_DDR_ADDRESS_MAP_STRUCT);
/* Check struct host_sp_queues */
COMPILATION_ERROR_IF(sizeof(struct host_sp_queues) != SIZE_OF_HOST_SP_QUEUES_STRUCT);
COMPILATION_ERROR_IF(sizeof(struct ia_css_circbuf_desc_s) != SIZE_OF_IA_CSS_CIRCBUF_DESC_S_STRUCT);
COMPILATION_ERROR_IF(sizeof(struct ia_css_circbuf_elem_s) != SIZE_OF_IA_CSS_CIRCBUF_ELEM_S_STRUCT);
/* Check struct host_sp_communication */
COMPILATION_ERROR_IF(sizeof(struct host_sp_communication) != SIZE_OF_HOST_SP_COMMUNICATION_STRUCT);
COMPILATION_ERROR_IF(sizeof(struct sh_css_event_irq_mask) != SIZE_OF_SH_CSS_EVENT_IRQ_MASK_STRUCT);
/* Check struct sh_css_hmm_buffer */
COMPILATION_ERROR_IF(sizeof(struct sh_css_hmm_buffer) != SIZE_OF_SH_CSS_HMM_BUFFER_STRUCT);
COMPILATION_ERROR_IF(sizeof(struct ia_css_isp_3a_statistics) != SIZE_OF_IA_CSS_ISP_3A_STATISTICS_STRUCT);
COMPILATION_ERROR_IF(sizeof(struct ia_css_isp_dvs_statistics) != SIZE_OF_IA_CSS_ISP_DVS_STATISTICS_STRUCT);
COMPILATION_ERROR_IF(sizeof(struct ia_css_metadata) != SIZE_OF_IA_CSS_METADATA_STRUCT);
/* Check struct ia_css_init_dmem_cfg */
COMPILATION_ERROR_IF(sizeof(struct ia_css_sp_init_dmem_cfg) != SIZE_OF_IA_CSS_SP_INIT_DMEM_CFG_STRUCT);
if (!fw && !fw_explicitly_loaded)
return -EINVAL;
if (!env)
return -EINVAL;
sh_css_printf = env->print_env.debug_print;
IA_CSS_ENTER("void");
flush_func = env->cpu_mem_env.flush;
pipe_global_init();
ia_css_pipeline_init();
ia_css_queue_map_init();
ia_css_device_access_init(&env->hw_access_env);
select = gpio_reg_load(GPIO0_ID, _gpio_block_reg_do_select)
& (~GPIO_FLASH_PIN_MASK);
enable = gpio_reg_load(GPIO0_ID, _gpio_block_reg_do_e)
| GPIO_FLASH_PIN_MASK;
sh_css_mmu_set_page_table_base_index(mmu_l1_base);
my_css_save.mmu_base = mmu_l1_base;
ia_css_reset_defaults(&my_css);
my_css_save.driver_env = *env;
my_css.flush = flush_func;
err = ia_css_rmgr_init();
if (err) {
IA_CSS_LEAVE_ERR(err);
return err;
}
IA_CSS_LOG("init: %d", my_css_save_initialized);
if (!my_css_save_initialized) {
my_css_save_initialized = true;
my_css_save.mode = sh_css_mode_working;
memset(my_css_save.stream_seeds, 0,
sizeof(struct sh_css_stream_seed) * MAX_ACTIVE_STREAMS);
IA_CSS_LOG("init: %d mode=%d", my_css_save_initialized, my_css_save.mode);
}
mipi_init();
/*
* In case this has been programmed already, update internal
* data structure ...
* DEPRECATED
*/
if (!IS_ISP2401)
my_css.page_table_base_index = mmu_get_page_table_base_index(MMU0_ID);
my_css.irq_type = irq_type;
my_css_save.irq_type = irq_type;
enable_interrupts(my_css.irq_type);
/* configure GPIO to output mode */
gpio_reg_store(GPIO0_ID, _gpio_block_reg_do_select, select);
gpio_reg_store(GPIO0_ID, _gpio_block_reg_do_e, enable);
gpio_reg_store(GPIO0_ID, _gpio_block_reg_do_0, 0);
err = ia_css_refcount_init(REFCOUNT_SIZE);
if (err) {
IA_CSS_LEAVE_ERR(err);
return err;
}
err = sh_css_params_init();
if (err) {
IA_CSS_LEAVE_ERR(err);
return err;
}
if (fw) {
ia_css_unload_firmware(); /* in case we already had firmware loaded */
err = sh_css_load_firmware(dev, fw->data, fw->bytes);
if (err) {
IA_CSS_LEAVE_ERR(err);
return err;
}
err = ia_css_binary_init_infos();
if (err) {
IA_CSS_LEAVE_ERR(err);
return err;
}
fw_explicitly_loaded = false;
my_css_save.loaded_fw = (struct ia_css_fw *)fw;
}
if (!sh_css_setup_spctrl_config(&sh_css_sp_fw, SP_PROG_NAME, &spctrl_cfg))
return -EINVAL;
err = ia_css_spctrl_load_fw(SP0_ID, &spctrl_cfg);
if (err) {
IA_CSS_LEAVE_ERR(err);
return err;
}
if (!sh_css_hrt_system_is_idle()) {
IA_CSS_LEAVE_ERR(-EBUSY);
return -EBUSY;
}
/*
* can be called here, queuing works, but:
* - when sp is started later, it will wipe queued items
* so for now we leave it for later and make sure
* updates are not called to frequently.
* sh_css_init_buffer_queues();
*/
if (IS_ISP2401)
gp_device_reg_store(GP_DEVICE0_ID, _REG_GP_SWITCH_ISYS2401_ADDR, 1);
if (!IS_ISP2401)
dma_set_max_burst_size(DMA0_ID, HIVE_DMA_BUS_DDR_CONN,
ISP2400_DMA_MAX_BURST_LENGTH);
else
dma_set_max_burst_size(DMA0_ID, HIVE_DMA_BUS_DDR_CONN,
ISP2401_DMA_MAX_BURST_LENGTH);
if (ia_css_isys_init() != INPUT_SYSTEM_ERR_NO_ERROR)
err = -EINVAL;
sh_css_params_map_and_store_default_gdc_lut();
IA_CSS_LEAVE_ERR(err);
return err;
}
int
ia_css_enable_isys_event_queue(bool enable)
{
if (sh_css_sp_is_running())
return -EBUSY;
sh_css_sp_enable_isys_event_queue(enable);
return 0;
}
/*
* Mapping sp threads. Currently, this is done when a stream is created and
* pipelines are ready to be converted to sp pipelines. Be careful if you are
* doing it from stream_create since we could run out of sp threads due to
* allocation on inactive pipelines.
*/
static int
map_sp_threads(struct ia_css_stream *stream, bool map)
{
struct ia_css_pipe *main_pipe = NULL;
struct ia_css_pipe *copy_pipe = NULL;
struct ia_css_pipe *capture_pipe = NULL;
int err = 0;
enum ia_css_pipe_id pipe_id;
IA_CSS_ENTER_PRIVATE("stream = %p, map = %s",
stream, map ? "true" : "false");
if (!stream) {
IA_CSS_LEAVE_ERR_PRIVATE(-EINVAL);
return -EINVAL;
}
main_pipe = stream->last_pipe;
pipe_id = main_pipe->mode;
ia_css_pipeline_map(main_pipe->pipe_num, map);
switch (pipe_id) {
case IA_CSS_PIPE_ID_PREVIEW:
copy_pipe = main_pipe->pipe_settings.preview.copy_pipe;
capture_pipe = main_pipe->pipe_settings.preview.capture_pipe;
break;
case IA_CSS_PIPE_ID_VIDEO:
copy_pipe = main_pipe->pipe_settings.video.copy_pipe;
capture_pipe = main_pipe->pipe_settings.video.capture_pipe;
break;
case IA_CSS_PIPE_ID_CAPTURE:
default:
break;
}
if (capture_pipe)
ia_css_pipeline_map(capture_pipe->pipe_num, map);
/* Firmware expects copy pipe to be the last pipe mapped. (if needed) */
if (copy_pipe)
ia_css_pipeline_map(copy_pipe->pipe_num, map);
/* DH regular multi pipe - not continuous mode: map the next pipes too */
if (!stream->config.continuous) {
int i;
for (i = 1; i < stream->num_pipes; i++)
ia_css_pipeline_map(stream->pipes[i]->pipe_num, map);
}
IA_CSS_LEAVE_ERR_PRIVATE(err);
return err;
}
/*
* creates a host pipeline skeleton for all pipes in a stream. Called during
* stream_create.
*/
static int
create_host_pipeline_structure(struct ia_css_stream *stream)
{
struct ia_css_pipe *copy_pipe = NULL, *capture_pipe = NULL;
enum ia_css_pipe_id pipe_id;
struct ia_css_pipe *main_pipe = NULL;
int err = 0;
unsigned int copy_pipe_delay = 0,
capture_pipe_delay = 0;
IA_CSS_ENTER_PRIVATE("stream = %p", stream);
if (!stream) {
IA_CSS_LEAVE_ERR_PRIVATE(-EINVAL);
return -EINVAL;
}
main_pipe = stream->last_pipe;
if (!main_pipe) {
IA_CSS_LEAVE_ERR_PRIVATE(-EINVAL);
return -EINVAL;
}
pipe_id = main_pipe->mode;
switch (pipe_id) {
case IA_CSS_PIPE_ID_PREVIEW:
copy_pipe = main_pipe->pipe_settings.preview.copy_pipe;
copy_pipe_delay = main_pipe->dvs_frame_delay;
capture_pipe = main_pipe->pipe_settings.preview.capture_pipe;
capture_pipe_delay = IA_CSS_FRAME_DELAY_0;
err = ia_css_pipeline_create(&main_pipe->pipeline, main_pipe->mode,
main_pipe->pipe_num, main_pipe->dvs_frame_delay);
break;
case IA_CSS_PIPE_ID_VIDEO:
copy_pipe = main_pipe->pipe_settings.video.copy_pipe;
copy_pipe_delay = main_pipe->dvs_frame_delay;
capture_pipe = main_pipe->pipe_settings.video.capture_pipe;
capture_pipe_delay = IA_CSS_FRAME_DELAY_0;
err = ia_css_pipeline_create(&main_pipe->pipeline, main_pipe->mode,
main_pipe->pipe_num, main_pipe->dvs_frame_delay);
break;
case IA_CSS_PIPE_ID_CAPTURE:
capture_pipe = main_pipe;
capture_pipe_delay = main_pipe->dvs_frame_delay;
break;
case IA_CSS_PIPE_ID_YUVPP:
err = ia_css_pipeline_create(&main_pipe->pipeline, main_pipe->mode,
main_pipe->pipe_num, main_pipe->dvs_frame_delay);
break;
default:
err = -EINVAL;
}
if (!(err) && copy_pipe)
err = ia_css_pipeline_create(©_pipe->pipeline,
copy_pipe->mode,
copy_pipe->pipe_num,
copy_pipe_delay);
if (!(err) && capture_pipe)
err = ia_css_pipeline_create(&capture_pipe->pipeline,
capture_pipe->mode,
capture_pipe->pipe_num,
capture_pipe_delay);
/* DH regular multi pipe - not continuous mode: create the next pipelines too */
if (!stream->config.continuous) {
int i;
for (i = 1; i < stream->num_pipes && 0 == err; i++) {
main_pipe = stream->pipes[i];
err = ia_css_pipeline_create(&main_pipe->pipeline,
main_pipe->mode,
main_pipe->pipe_num,
main_pipe->dvs_frame_delay);
}
}
IA_CSS_LEAVE_ERR_PRIVATE(err);
return err;
}
/*
* creates a host pipeline for all pipes in a stream. Called during
* stream_start.
*/
static int
create_host_pipeline(struct ia_css_stream *stream)
{
struct ia_css_pipe *copy_pipe = NULL, *capture_pipe = NULL;
enum ia_css_pipe_id pipe_id;
struct ia_css_pipe *main_pipe = NULL;
int err = 0;
unsigned int max_input_width = 0;
IA_CSS_ENTER_PRIVATE("stream = %p", stream);
if (!stream) {
IA_CSS_LEAVE_ERR_PRIVATE(-EINVAL);
return -EINVAL;
}
main_pipe = stream->last_pipe;
pipe_id = main_pipe->mode;
/*
* No continuous frame allocation for capture pipe. It uses the
* "main" pipe's frames.
*/
if ((pipe_id == IA_CSS_PIPE_ID_PREVIEW) ||
(pipe_id == IA_CSS_PIPE_ID_VIDEO)) {
/*
* About
* pipe_id == IA_CSS_PIPE_ID_PREVIEW &&
* stream->config.mode != IA_CSS_INPUT_MODE_MEMORY:
*
* The original condition pipe_id == IA_CSS_PIPE_ID_PREVIEW is
* too strong. E.g. in SkyCam (with memory based input frames)
* there is no continuous mode and thus no need for allocated
* continuous frames.
* This is not only for SkyCam but for all preview cases that
* use DDR based input frames. For this reason the
* stream->config.mode != IA_CSS_INPUT_MODE_MEMORY has beed
* added.
*/
if (stream->config.continuous ||
(pipe_id == IA_CSS_PIPE_ID_PREVIEW &&
stream->config.mode != IA_CSS_INPUT_MODE_MEMORY)) {
err = alloc_continuous_frames(main_pipe, true);
if (err)
goto ERR;
}
}
/* old isys: need to allocate_mipi_frames() even in IA_CSS_PIPE_MODE_COPY */
if (!IS_ISP2401 || main_pipe->config.mode != IA_CSS_PIPE_MODE_COPY) {
err = allocate_mipi_frames(main_pipe, &stream->info);
if (err)
goto ERR;
}
switch (pipe_id) {
case IA_CSS_PIPE_ID_PREVIEW:
copy_pipe = main_pipe->pipe_settings.preview.copy_pipe;
capture_pipe = main_pipe->pipe_settings.preview.capture_pipe;
max_input_width =
main_pipe->pipe_settings.preview.preview_binary.info->sp.input.max_width;
err = create_host_preview_pipeline(main_pipe);
if (err)
goto ERR;
break;
case IA_CSS_PIPE_ID_VIDEO:
copy_pipe = main_pipe->pipe_settings.video.copy_pipe;
capture_pipe = main_pipe->pipe_settings.video.capture_pipe;
max_input_width =
main_pipe->pipe_settings.video.video_binary.info->sp.input.max_width;
err = create_host_video_pipeline(main_pipe);
if (err)
goto ERR;
break;
case IA_CSS_PIPE_ID_CAPTURE:
capture_pipe = main_pipe;
break;
case IA_CSS_PIPE_ID_YUVPP:
err = create_host_yuvpp_pipeline(main_pipe);
if (err)
goto ERR;
break;
default:
err = -EINVAL;
}
if (err)
goto ERR;
if (copy_pipe) {
err = create_host_copy_pipeline(copy_pipe, max_input_width,
main_pipe->continuous_frames[0]);
if (err)
goto ERR;
}
if (capture_pipe) {
err = create_host_capture_pipeline(capture_pipe);
if (err)
goto ERR;
}
/* DH regular multi pipe - not continuous mode: create the next pipelines too */
if (!stream->config.continuous) {
int i;
for (i = 1; i < stream->num_pipes && 0 == err; i++) {
switch (stream->pipes[i]->mode) {
case IA_CSS_PIPE_ID_PREVIEW:
err = create_host_preview_pipeline(stream->pipes[i]);
break;
case IA_CSS_PIPE_ID_VIDEO:
err = create_host_video_pipeline(stream->pipes[i]);
break;
case IA_CSS_PIPE_ID_CAPTURE:
err = create_host_capture_pipeline(stream->pipes[i]);
break;
case IA_CSS_PIPE_ID_YUVPP:
err = create_host_yuvpp_pipeline(stream->pipes[i]);
break;
default:
err = -EINVAL;
}
if (err)
goto ERR;
}
}
ERR:
IA_CSS_LEAVE_ERR_PRIVATE(err);
return err;
}
static const struct ia_css_pipe default_pipe = IA_CSS_DEFAULT_PIPE;
static const struct ia_css_preview_settings preview = IA_CSS_DEFAULT_PREVIEW_SETTINGS;
static const struct ia_css_capture_settings capture = IA_CSS_DEFAULT_CAPTURE_SETTINGS;
static const struct ia_css_video_settings video = IA_CSS_DEFAULT_VIDEO_SETTINGS;
static const struct ia_css_yuvpp_settings yuvpp = IA_CSS_DEFAULT_YUVPP_SETTINGS;
static int
init_pipe_defaults(enum ia_css_pipe_mode mode,
struct ia_css_pipe *pipe,
bool copy_pipe)
{
if (!pipe) {
IA_CSS_ERROR("NULL pipe parameter");
return -EINVAL;
}
/* Initialize pipe to pre-defined defaults */
memcpy(pipe, &default_pipe, sizeof(default_pipe));
/* TODO: JB should not be needed, but temporary backward reference */
switch (mode) {
case IA_CSS_PIPE_MODE_PREVIEW:
pipe->mode = IA_CSS_PIPE_ID_PREVIEW;
memcpy(&pipe->pipe_settings.preview, &preview, sizeof(preview));
break;
case IA_CSS_PIPE_MODE_CAPTURE:
if (copy_pipe)
pipe->mode = IA_CSS_PIPE_ID_COPY;
else
pipe->mode = IA_CSS_PIPE_ID_CAPTURE;
memcpy(&pipe->pipe_settings.capture, &capture, sizeof(capture));
break;
case IA_CSS_PIPE_MODE_VIDEO:
pipe->mode = IA_CSS_PIPE_ID_VIDEO;
memcpy(&pipe->pipe_settings.video, &video, sizeof(video));
break;
case IA_CSS_PIPE_MODE_COPY:
pipe->mode = IA_CSS_PIPE_ID_CAPTURE;
break;
case IA_CSS_PIPE_MODE_YUVPP:
pipe->mode = IA_CSS_PIPE_ID_YUVPP;
memcpy(&pipe->pipe_settings.yuvpp, &yuvpp, sizeof(yuvpp));
break;
default:
return -EINVAL;
}
return 0;
}
static void
pipe_global_init(void)
{
u8 i;
my_css.pipe_counter = 0;
for (i = 0; i < IA_CSS_PIPELINE_NUM_MAX; i++)
my_css.all_pipes[i] = NULL;
}
static int
pipe_generate_pipe_num(const struct ia_css_pipe *pipe,
unsigned int *pipe_number)
{
const u8 INVALID_PIPE_NUM = (uint8_t)~(0);
u8 pipe_num = INVALID_PIPE_NUM;
u8 i;
if (!pipe) {
IA_CSS_ERROR("NULL pipe parameter");
return -EINVAL;
}
/* Assign a new pipe_num .... search for empty place */
for (i = 0; i < IA_CSS_PIPELINE_NUM_MAX; i++) {
if (!my_css.all_pipes[i]) {
/* position is reserved */
my_css.all_pipes[i] = (struct ia_css_pipe *)pipe;
pipe_num = i;
break;
}
}
if (pipe_num == INVALID_PIPE_NUM) {
/* Max number of pipes already allocated */
IA_CSS_ERROR("Max number of pipes already created");
return -ENOSPC;
}
my_css.pipe_counter++;
IA_CSS_LOG("pipe_num (%d)", pipe_num);
*pipe_number = pipe_num;
return 0;
}
static void
pipe_release_pipe_num(unsigned int pipe_num)
{
my_css.all_pipes[pipe_num] = NULL;
my_css.pipe_counter--;
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
"pipe_release_pipe_num (%d)\n", pipe_num);
}
static int
create_pipe(enum ia_css_pipe_mode mode,
struct ia_css_pipe **pipe,
bool copy_pipe)
{
int err = 0;
struct ia_css_pipe *me;
if (!pipe) {
IA_CSS_ERROR("NULL pipe parameter");
return -EINVAL;
}
me = kmalloc(sizeof(*me), GFP_KERNEL);
if (!me)
return -ENOMEM;
err = init_pipe_defaults(mode, me, copy_pipe);
if (err) {
kfree(me);
return err;
}
err = pipe_generate_pipe_num(me, &me->pipe_num);
if (err) {
kfree(me);
return err;
}
*pipe = me;
return 0;
}
struct ia_css_pipe *
find_pipe_by_num(uint32_t pipe_num)
{
unsigned int i;
for (i = 0; i < IA_CSS_PIPELINE_NUM_MAX; i++) {
if (my_css.all_pipes[i] &&
ia_css_pipe_get_pipe_num(my_css.all_pipes[i]) == pipe_num) {
return my_css.all_pipes[i];
}
}
return NULL;
}
int
ia_css_pipe_destroy(struct ia_css_pipe *pipe)
{
int err = 0;
IA_CSS_ENTER("pipe = %p", pipe);
if (!pipe) {
IA_CSS_LEAVE_ERR(-EINVAL);
return -EINVAL;
}
if (pipe->stream) {
IA_CSS_LOG("ia_css_stream_destroy not called!");
IA_CSS_LEAVE_ERR(-EINVAL);
return -EINVAL;
}
switch (pipe->config.mode) {
case IA_CSS_PIPE_MODE_PREVIEW:
/*
* need to take into account that this function is also called
* on the internal copy pipe
*/
if (pipe->mode == IA_CSS_PIPE_ID_PREVIEW) {
ia_css_frame_free_multiple(NUM_CONTINUOUS_FRAMES,
pipe->continuous_frames);
ia_css_metadata_free_multiple(NUM_CONTINUOUS_FRAMES,
pipe->cont_md_buffers);
if (pipe->pipe_settings.preview.copy_pipe) {
err = ia_css_pipe_destroy(pipe->pipe_settings.preview.copy_pipe);
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
"ia_css_pipe_destroy(): destroyed internal copy pipe err=%d\n",
err);
}
}
break;
case IA_CSS_PIPE_MODE_VIDEO:
if (pipe->mode == IA_CSS_PIPE_ID_VIDEO) {
ia_css_frame_free_multiple(NUM_CONTINUOUS_FRAMES,
pipe->continuous_frames);
ia_css_metadata_free_multiple(NUM_CONTINUOUS_FRAMES,
pipe->cont_md_buffers);
if (pipe->pipe_settings.video.copy_pipe) {
err = ia_css_pipe_destroy(pipe->pipe_settings.video.copy_pipe);
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
"ia_css_pipe_destroy(): destroyed internal copy pipe err=%d\n",
err);
}
}
ia_css_frame_free_multiple(NUM_VIDEO_TNR_FRAMES,
pipe->pipe_settings.video.tnr_frames);
ia_css_frame_free_multiple(MAX_NUM_VIDEO_DELAY_FRAMES,
pipe->pipe_settings.video.delay_frames);
break;
case IA_CSS_PIPE_MODE_CAPTURE:
ia_css_frame_free_multiple(MAX_NUM_VIDEO_DELAY_FRAMES,
pipe->pipe_settings.capture.delay_frames);
break;
case IA_CSS_PIPE_MODE_COPY:
break;
case IA_CSS_PIPE_MODE_YUVPP:
break;
}
if (pipe->scaler_pp_lut != mmgr_NULL) {
hmm_free(pipe->scaler_pp_lut);
pipe->scaler_pp_lut = mmgr_NULL;
}
my_css.active_pipes[ia_css_pipe_get_pipe_num(pipe)] = NULL;
sh_css_pipe_free_shading_table(pipe);
ia_css_pipeline_destroy(&pipe->pipeline);
pipe_release_pipe_num(ia_css_pipe_get_pipe_num(pipe));
kfree(pipe);
IA_CSS_LEAVE("err = %d", err);
return err;
}
void
ia_css_uninit(void)
{
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_uninit() enter: void\n");
sh_css_params_free_default_gdc_lut();
/* TODO: JB: implement decent check and handling of freeing mipi frames */
if (!mipi_is_free())
dev_warn(atomisp_dev, "mipi frames are not freed.\n");
/* cleanup generic data */
sh_css_params_uninit();
ia_css_refcount_uninit();
ia_css_rmgr_uninit();
#if !defined(ISP2401)
/* needed for reprogramming the inputformatter after power cycle of css */
ifmtr_set_if_blocking_mode_reset = true;
#endif
if (!fw_explicitly_loaded)
ia_css_unload_firmware();
ia_css_spctrl_unload_fw(SP0_ID);
sh_css_sp_set_sp_running(false);
/* check and free any remaining mipi frames */
free_mipi_frames(NULL);
sh_css_sp_reset_global_vars();
ia_css_isys_uninit();
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_uninit() leave: return_void\n");
}
int ia_css_irq_translate(
unsigned int *irq_infos)
{
enum virq_id irq;
enum hrt_isp_css_irq_status status = hrt_isp_css_irq_status_more_irqs;
unsigned int infos = 0;
/* irq_infos can be NULL, but that would make the function useless */
/* assert(irq_infos != NULL); */
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
"ia_css_irq_translate() enter: irq_infos=%p\n", irq_infos);
while (status == hrt_isp_css_irq_status_more_irqs) {
status = virq_get_channel_id(&irq);
if (status == hrt_isp_css_irq_status_error)
return -EINVAL;
switch (irq) {
case virq_sp:
/*
* When SP goes to idle, info is available in the
* event queue.
*/
infos |= IA_CSS_IRQ_INFO_EVENTS_READY;
break;
case virq_isp:
break;
case virq_isys_sof:
infos |= IA_CSS_IRQ_INFO_CSS_RECEIVER_SOF;
break;
case virq_isys_eof:
infos |= IA_CSS_IRQ_INFO_CSS_RECEIVER_EOF;
break;
case virq_isys_csi:
infos |= IA_CSS_IRQ_INFO_INPUT_SYSTEM_ERROR;
break;
case virq_ifmt0_id:
if (!IS_ISP2401)
infos |= IA_CSS_IRQ_INFO_IF_ERROR;
break;
case virq_dma:
infos |= IA_CSS_IRQ_INFO_DMA_ERROR;
break;
case virq_sw_pin_0:
infos |= sh_css_get_sw_interrupt_value(0);
break;
case virq_sw_pin_1:
infos |= sh_css_get_sw_interrupt_value(1);
/* pqiao TODO: also assumption here */
break;
default:
break;
}
}
if (irq_infos)
*irq_infos = infos;
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
"ia_css_irq_translate() leave: irq_infos=%u\n",
infos);
return 0;
}
int ia_css_irq_enable(
enum ia_css_irq_info info,
bool enable)
{
enum virq_id irq = N_virq_id;
IA_CSS_ENTER("info=%d, enable=%d", info, enable);
switch (info) {
case IA_CSS_IRQ_INFO_CSS_RECEIVER_SOF:
if (IS_ISP2401)
/* Just ignore those unused IRQs without printing errors */
return 0;
irq = virq_isys_sof;
break;
case IA_CSS_IRQ_INFO_CSS_RECEIVER_EOF:
if (IS_ISP2401)
/* Just ignore those unused IRQs without printing errors */
return 0;
irq = virq_isys_eof;
break;
case IA_CSS_IRQ_INFO_INPUT_SYSTEM_ERROR:
if (IS_ISP2401)
/* Just ignore those unused IRQs without printing errors */
return 0;
irq = virq_isys_csi;
break;
case IA_CSS_IRQ_INFO_IF_ERROR:
if (IS_ISP2401)
/* Just ignore those unused IRQs without printing errors */
return 0;
irq = virq_ifmt0_id;
break;
case IA_CSS_IRQ_INFO_DMA_ERROR:
irq = virq_dma;
break;
case IA_CSS_IRQ_INFO_SW_0:
irq = virq_sw_pin_0;
break;
case IA_CSS_IRQ_INFO_SW_1:
irq = virq_sw_pin_1;
break;
default:
IA_CSS_LEAVE_ERR(-EINVAL);
return -EINVAL;
}
cnd_virq_enable_channel(irq, enable);
IA_CSS_LEAVE_ERR(0);
return 0;
}
static unsigned int
sh_css_get_sw_interrupt_value(unsigned int irq)
{
unsigned int irq_value;
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
"sh_css_get_sw_interrupt_value() enter: irq=%d\n", irq);
irq_value = sh_css_sp_get_sw_interrupt_value(irq);
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
"sh_css_get_sw_interrupt_value() leave: irq_value=%d\n", irq_value);
return irq_value;
}
/*
* configure and load the copy binary, the next binary is used to
* determine whether the copy binary needs to do left padding.
*/
static int load_copy_binary(
struct ia_css_pipe *pipe,
struct ia_css_binary *copy_binary,
struct ia_css_binary *next_binary)
{
struct ia_css_frame_info copy_out_info, copy_in_info, copy_vf_info;
unsigned int left_padding;
int err;
struct ia_css_binary_descr copy_descr;
/* next_binary can be NULL */
assert(pipe);
assert(copy_binary);
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
"load_copy_binary() enter:\n");
if (next_binary) {
copy_out_info = next_binary->in_frame_info;
left_padding = next_binary->left_padding;
} else {
copy_out_info = pipe->output_info[0];
copy_vf_info = pipe->vf_output_info[0];
ia_css_frame_info_set_format(©_vf_info, IA_CSS_FRAME_FORMAT_YUV_LINE);
left_padding = 0;
}
ia_css_pipe_get_copy_binarydesc(pipe, ©_descr,
©_in_info, ©_out_info,
(next_binary) ? NULL : NULL/*TODO: ©_vf_info*/);
err = ia_css_binary_find(©_descr, copy_binary);
if (err)
return err;
copy_binary->left_padding = left_padding;
return 0;
}
static int
alloc_continuous_frames(struct ia_css_pipe *pipe, bool init_time)
{
int err = 0;
struct ia_css_frame_info ref_info;
enum ia_css_pipe_id pipe_id;
bool continuous;
unsigned int i, idx;
unsigned int num_frames;
IA_CSS_ENTER_PRIVATE("pipe = %p, init_time = %d", pipe, init_time);
if ((!pipe) || (!pipe->stream)) {
IA_CSS_LEAVE_ERR_PRIVATE(-EINVAL);
return -EINVAL;
}
pipe_id = pipe->mode;
continuous = pipe->stream->config.continuous;
if (continuous) {
if (init_time) {
num_frames = pipe->stream->config.init_num_cont_raw_buf;
pipe->stream->continuous_pipe = pipe;
} else {
num_frames = pipe->stream->config.target_num_cont_raw_buf;
}
} else {
num_frames = NUM_ONLINE_INIT_CONTINUOUS_FRAMES;
}
if (pipe_id == IA_CSS_PIPE_ID_PREVIEW) {
ref_info = pipe->pipe_settings.preview.preview_binary.in_frame_info;
} else if (pipe_id == IA_CSS_PIPE_ID_VIDEO) {
ref_info = pipe->pipe_settings.video.video_binary.in_frame_info;
} else {
/* should not happen */
IA_CSS_LEAVE_ERR_PRIVATE(-EINVAL);
return -EINVAL;
}
if (IS_ISP2401) {
/* For CSI2+, the continuous frame will hold the full input frame */
ref_info.res.width = pipe->stream->config.input_config.input_res.width;
ref_info.res.height = pipe->stream->config.input_config.input_res.height;
/* Ensure padded width is aligned for 2401 */
ref_info.padded_width = CEIL_MUL(ref_info.res.width, 2 * ISP_VEC_NELEMS);
}
if (pipe->stream->config.pack_raw_pixels) {
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
"alloc_continuous_frames() IA_CSS_FRAME_FORMAT_RAW_PACKED\n");
ref_info.format = IA_CSS_FRAME_FORMAT_RAW_PACKED;
} else
{
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
"alloc_continuous_frames() IA_CSS_FRAME_FORMAT_RAW\n");
ref_info.format = IA_CSS_FRAME_FORMAT_RAW;
}
/* Write format back to binary */
if (pipe_id == IA_CSS_PIPE_ID_PREVIEW) {
pipe->pipe_settings.preview.preview_binary.in_frame_info.format =
ref_info.format;
} else if (pipe_id == IA_CSS_PIPE_ID_VIDEO) {
pipe->pipe_settings.video.video_binary.in_frame_info.format = ref_info.format;
} else {
/* should not happen */
IA_CSS_LEAVE_ERR_PRIVATE(-EINVAL);
return -EINVAL;
}
if (init_time)
idx = 0;
else
idx = pipe->stream->config.init_num_cont_raw_buf;
for (i = idx; i < NUM_CONTINUOUS_FRAMES; i++) {
/* free previous frame */
if (pipe->continuous_frames[i]) {
ia_css_frame_free(pipe->continuous_frames[i]);
pipe->continuous_frames[i] = NULL;
}
/* free previous metadata buffer */
ia_css_metadata_free(pipe->cont_md_buffers[i]);
pipe->cont_md_buffers[i] = NULL;
/* check if new frame needed */
if (i < num_frames) {
/* allocate new frame */
err = ia_css_frame_allocate_from_info(
&pipe->continuous_frames[i],
&ref_info);
if (err) {
IA_CSS_LEAVE_ERR_PRIVATE(err);
return err;
}
/* allocate metadata buffer */
pipe->cont_md_buffers[i] = ia_css_metadata_allocate(
&pipe->stream->info.metadata_info);
}
}
IA_CSS_LEAVE_ERR_PRIVATE(0);
return 0;
}
int
ia_css_alloc_continuous_frame_remain(struct ia_css_stream *stream)
{
if (!stream)
return -EINVAL;
return alloc_continuous_frames(stream->continuous_pipe, false);
}
static int
load_preview_binaries(struct ia_css_pipe *pipe)
{
struct ia_css_frame_info prev_in_info,
prev_bds_out_info,
prev_out_info,
prev_vf_info;
struct ia_css_binary_descr preview_descr;
bool online;
int err = 0;
bool need_vf_pp = false;
bool need_isp_copy_binary = false;
bool sensor = false;
bool continuous;
/* preview only have 1 output pin now */
struct ia_css_frame_info *pipe_out_info = &pipe->output_info[0];
struct ia_css_preview_settings *mycs = &pipe->pipe_settings.preview;
IA_CSS_ENTER_PRIVATE("");
assert(pipe);
assert(pipe->stream);
assert(pipe->mode == IA_CSS_PIPE_ID_PREVIEW);
online = pipe->stream->config.online;
sensor = pipe->stream->config.mode == IA_CSS_INPUT_MODE_SENSOR;
continuous = pipe->stream->config.continuous;
if (mycs->preview_binary.info)
return 0;
err = ia_css_util_check_input(&pipe->stream->config, false, false);
if (err)
return err;
err = ia_css_frame_check_info(pipe_out_info);
if (err)
return err;
/*
* Note: the current selection of vf_pp binary and
* parameterization of the preview binary contains a few pieces
* of hardcoded knowledge. This needs to be cleaned up such that
* the binary selection becomes more generic.
* The vf_pp binary is needed if one or more of the following features
* are required:
* 1. YUV downscaling.
* 2. Digital zoom.
* 3. An output format that is not supported by the preview binary.
* In practice this means something other than yuv_line or nv12.
* The decision if the vf_pp binary is needed for YUV downscaling is
* made after the preview binary selection, since some preview binaries
* can perform the requested YUV downscaling.
*/
need_vf_pp = pipe->config.enable_dz;
need_vf_pp |= pipe_out_info->format != IA_CSS_FRAME_FORMAT_YUV_LINE &&
!(pipe_out_info->format == IA_CSS_FRAME_FORMAT_NV12 ||
pipe_out_info->format == IA_CSS_FRAME_FORMAT_NV12_16 ||
pipe_out_info->format == IA_CSS_FRAME_FORMAT_NV12_TILEY);
/* Preview step 1 */
if (pipe->vf_yuv_ds_input_info.res.width)
prev_vf_info = pipe->vf_yuv_ds_input_info;
else
prev_vf_info = *pipe_out_info;
/*
* If vf_pp is needed, then preview must output yuv_line.
* The exception is when vf_pp is manually disabled, that is only
* used in combination with a pipeline extension that requires
* yuv_line as input.
*/
if (need_vf_pp)
ia_css_frame_info_set_format(&prev_vf_info,
IA_CSS_FRAME_FORMAT_YUV_LINE);
err = ia_css_pipe_get_preview_binarydesc(
pipe,
&preview_descr,
&prev_in_info,
&prev_bds_out_info,
&prev_out_info,
&prev_vf_info);
if (err)
return err;
err = ia_css_binary_find(&preview_descr, &mycs->preview_binary);
if (err)
return err;
/* The vf_pp binary is needed when (further) YUV downscaling is required */
need_vf_pp |= mycs->preview_binary.out_frame_info[0].res.width != pipe_out_info->res.width;
need_vf_pp |= mycs->preview_binary.out_frame_info[0].res.height != pipe_out_info->res.height;
/*
* When vf_pp is needed, then the output format of the selected
* preview binary must be yuv_line. If this is not the case,
* then the preview binary selection is done again.
*/
if (need_vf_pp &&
(mycs->preview_binary.out_frame_info[0].format != IA_CSS_FRAME_FORMAT_YUV_LINE)) {
/* Preview step 2 */
if (pipe->vf_yuv_ds_input_info.res.width)
prev_vf_info = pipe->vf_yuv_ds_input_info;
else
prev_vf_info = *pipe_out_info;
ia_css_frame_info_set_format(&prev_vf_info,
IA_CSS_FRAME_FORMAT_YUV_LINE);
err = ia_css_pipe_get_preview_binarydesc(
pipe,
&preview_descr,
&prev_in_info,
&prev_bds_out_info,
&prev_out_info,
&prev_vf_info);
if (err)
return err;
err = ia_css_binary_find(&preview_descr,
&mycs->preview_binary);
if (err)
return err;
}
if (need_vf_pp) {
struct ia_css_binary_descr vf_pp_descr;
/* Viewfinder post-processing */
ia_css_pipe_get_vfpp_binarydesc(pipe, &vf_pp_descr,
&mycs->preview_binary.out_frame_info[0],
pipe_out_info);
err = ia_css_binary_find(&vf_pp_descr,
&mycs->vf_pp_binary);
if (err)
return err;
}
if (IS_ISP2401) {
/*
* When the input system is 2401, only the Direct Sensor Mode
* Offline Preview uses the ISP copy binary.
*/
need_isp_copy_binary = !online && sensor;
} else {
/*
* About pipe->stream->config.mode == IA_CSS_INPUT_MODE_MEMORY:
* This is typical the case with SkyCam (which has no input system) but it also
* applies to all cases where the driver chooses for memory based input frames.
* In these cases, a copy binary (which typical copies sensor data to DDR) does
* not have much use.
*/
need_isp_copy_binary = !online && !continuous;
}
/* Copy */
if (need_isp_copy_binary) {
err = load_copy_binary(pipe,
&mycs->copy_binary,
&mycs->preview_binary);
if (err)
return err;
}
if (pipe->shading_table) {
ia_css_shading_table_free(pipe->shading_table);
pipe->shading_table = NULL;
}
return 0;
}
static void
ia_css_binary_unload(struct ia_css_binary *binary)
{
ia_css_binary_destroy_isp_parameters(binary);
}
static int
unload_preview_binaries(struct ia_css_pipe *pipe)
{
IA_CSS_ENTER_PRIVATE("pipe = %p", pipe);
if ((!pipe) || (pipe->mode != IA_CSS_PIPE_ID_PREVIEW)) {
IA_CSS_LEAVE_ERR_PRIVATE(-EINVAL);
return -EINVAL;
}
ia_css_binary_unload(&pipe->pipe_settings.preview.copy_binary);
ia_css_binary_unload(&pipe->pipe_settings.preview.preview_binary);
ia_css_binary_unload(&pipe->pipe_settings.preview.vf_pp_binary);
IA_CSS_LEAVE_ERR_PRIVATE(0);
return 0;
}
static const struct ia_css_fw_info *last_output_firmware(
const struct ia_css_fw_info *fw)
{
const struct ia_css_fw_info *last_fw = NULL;
/* fw can be NULL */
IA_CSS_ENTER_LEAVE_PRIVATE("");
for (; fw; fw = fw->next) {
const struct ia_css_fw_info *info = fw;
if (info->info.isp.sp.enable.output)
last_fw = fw;
}
return last_fw;
}
static int add_firmwares(
struct ia_css_pipeline *me,
struct ia_css_binary *binary,
const struct ia_css_fw_info *fw,
const struct ia_css_fw_info *last_fw,
unsigned int binary_mode,
struct ia_css_frame *in_frame,
struct ia_css_frame *out_frame,
struct ia_css_frame *vf_frame,
struct ia_css_pipeline_stage **my_stage,
struct ia_css_pipeline_stage **vf_stage)
{
int err = 0;
struct ia_css_pipeline_stage *extra_stage = NULL;
struct ia_css_pipeline_stage_desc stage_desc;
/* all args can be NULL ??? */
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
"add_firmwares() enter:\n");
for (; fw; fw = fw->next) {
struct ia_css_frame *out[IA_CSS_BINARY_MAX_OUTPUT_PORTS] = {NULL};
struct ia_css_frame *in = NULL;
struct ia_css_frame *vf = NULL;
if ((fw == last_fw) && (fw->info.isp.sp.enable.out_frame != 0))
out[0] = out_frame;
if (fw->info.isp.sp.enable.in_frame != 0)
in = in_frame;
if (fw->info.isp.sp.enable.out_frame != 0)
vf = vf_frame;
ia_css_pipe_get_firmwares_stage_desc(&stage_desc, binary,
out, in, vf, fw, binary_mode);
err = ia_css_pipeline_create_and_add_stage(me, &stage_desc,
&extra_stage);
if (err)
return err;
if (fw->info.isp.sp.enable.output != 0)
in_frame = extra_stage->args.out_frame[0];
if (my_stage && !*my_stage && extra_stage)
*my_stage = extra_stage;
if (vf_stage && !*vf_stage && extra_stage &&
fw->info.isp.sp.enable.vf_veceven)
*vf_stage = extra_stage;
}
return err;
}
static int add_vf_pp_stage(
struct ia_css_pipe *pipe,
struct ia_css_frame *in_frame,
struct ia_css_frame *out_frame,
struct ia_css_binary *vf_pp_binary,
struct ia_css_pipeline_stage **vf_pp_stage)
{
struct ia_css_pipeline *me = NULL;
const struct ia_css_fw_info *last_fw = NULL;
int err = 0;
struct ia_css_frame *out_frames[IA_CSS_BINARY_MAX_OUTPUT_PORTS];
struct ia_css_pipeline_stage_desc stage_desc;
/* out_frame can be NULL ??? */
if (!pipe)
return -EINVAL;
if (!in_frame)
return -EINVAL;
if (!vf_pp_binary)
return -EINVAL;
if (!vf_pp_stage)
return -EINVAL;
ia_css_pipe_util_create_output_frames(out_frames);
me = &pipe->pipeline;
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
"add_vf_pp_stage() enter:\n");
*vf_pp_stage = NULL;
last_fw = last_output_firmware(pipe->vf_stage);
if (!pipe->extra_config.disable_vf_pp) {
if (last_fw) {
ia_css_pipe_util_set_output_frames(out_frames, 0, NULL);
ia_css_pipe_get_generic_stage_desc(&stage_desc, vf_pp_binary,
out_frames, in_frame, NULL);
} else {
ia_css_pipe_util_set_output_frames(out_frames, 0, out_frame);
ia_css_pipe_get_generic_stage_desc(&stage_desc, vf_pp_binary,
out_frames, in_frame, NULL);
}
err = ia_css_pipeline_create_and_add_stage(me, &stage_desc, vf_pp_stage);
if (err)
return err;
in_frame = (*vf_pp_stage)->args.out_frame[0];
}
err = add_firmwares(me, vf_pp_binary, pipe->vf_stage, last_fw,
IA_CSS_BINARY_MODE_VF_PP,
in_frame, out_frame, NULL,
vf_pp_stage, NULL);
return err;
}
static int add_yuv_scaler_stage(
struct ia_css_pipe *pipe,
struct ia_css_pipeline *me,
struct ia_css_frame *in_frame,
struct ia_css_frame *out_frame,
struct ia_css_frame *internal_out_frame,
struct ia_css_binary *yuv_scaler_binary,
struct ia_css_pipeline_stage **pre_vf_pp_stage)
{
const struct ia_css_fw_info *last_fw;
int err = 0;
struct ia_css_frame *vf_frame = NULL;
struct ia_css_frame *out_frames[IA_CSS_BINARY_MAX_OUTPUT_PORTS];
struct ia_css_pipeline_stage_desc stage_desc;
/* out_frame can be NULL ??? */
assert(in_frame);
assert(pipe);
assert(me);
assert(yuv_scaler_binary);
assert(pre_vf_pp_stage);
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
"add_yuv_scaler_stage() enter:\n");
*pre_vf_pp_stage = NULL;
ia_css_pipe_util_create_output_frames(out_frames);
last_fw = last_output_firmware(pipe->output_stage);
if (last_fw) {
ia_css_pipe_util_set_output_frames(out_frames, 0, NULL);
ia_css_pipe_get_generic_stage_desc(&stage_desc,
yuv_scaler_binary, out_frames, in_frame, vf_frame);
} else {
ia_css_pipe_util_set_output_frames(out_frames, 0, out_frame);
ia_css_pipe_util_set_output_frames(out_frames, 1, internal_out_frame);
ia_css_pipe_get_generic_stage_desc(&stage_desc,
yuv_scaler_binary, out_frames, in_frame, vf_frame);
}
err = ia_css_pipeline_create_and_add_stage(me, &stage_desc,
pre_vf_pp_stage);
if (err)
return err;
in_frame = (*pre_vf_pp_stage)->args.out_frame[0];
err = add_firmwares(me, yuv_scaler_binary, pipe->output_stage, last_fw,
IA_CSS_BINARY_MODE_CAPTURE_PP,
in_frame, out_frame, vf_frame,
NULL, pre_vf_pp_stage);
/* If a firmware produce vf_pp output, we set that as vf_pp input */
(*pre_vf_pp_stage)->args.vf_downscale_log2 =
yuv_scaler_binary->vf_downscale_log2;
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
"add_yuv_scaler_stage() leave:\n");
return err;
}
static int add_capture_pp_stage(
struct ia_css_pipe *pipe,
struct ia_css_pipeline *me,
struct ia_css_frame *in_frame,
struct ia_css_frame *out_frame,
struct ia_css_binary *capture_pp_binary,
struct ia_css_pipeline_stage **capture_pp_stage)
{
const struct ia_css_fw_info *last_fw = NULL;
int err = 0;
struct ia_css_frame *vf_frame = NULL;
struct ia_css_frame *out_frames[IA_CSS_BINARY_MAX_OUTPUT_PORTS];
struct ia_css_pipeline_stage_desc stage_desc;
/* out_frame can be NULL ??? */
assert(in_frame);
assert(pipe);
assert(me);
assert(capture_pp_binary);
assert(capture_pp_stage);
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
"add_capture_pp_stage() enter:\n");
*capture_pp_stage = NULL;
ia_css_pipe_util_create_output_frames(out_frames);
last_fw = last_output_firmware(pipe->output_stage);
err = ia_css_frame_allocate_from_info(&vf_frame,
&capture_pp_binary->vf_frame_info);
if (err)
return err;
if (last_fw) {
ia_css_pipe_util_set_output_frames(out_frames, 0, NULL);
ia_css_pipe_get_generic_stage_desc(&stage_desc,
capture_pp_binary, out_frames, NULL, vf_frame);
} else {
ia_css_pipe_util_set_output_frames(out_frames, 0, out_frame);
ia_css_pipe_get_generic_stage_desc(&stage_desc,
capture_pp_binary, out_frames, NULL, vf_frame);
}
err = ia_css_pipeline_create_and_add_stage(me, &stage_desc,
capture_pp_stage);
if (err)
return err;
err = add_firmwares(me, capture_pp_binary, pipe->output_stage, last_fw,
IA_CSS_BINARY_MODE_CAPTURE_PP,
in_frame, out_frame, vf_frame,
NULL, capture_pp_stage);
/* If a firmware produce vf_pp output, we set that as vf_pp input */
if (*capture_pp_stage) {
(*capture_pp_stage)->args.vf_downscale_log2 =
capture_pp_binary->vf_downscale_log2;
}
return err;
}
static void sh_css_setup_queues(void)
{
const struct ia_css_fw_info *fw;
unsigned int HIVE_ADDR_host_sp_queues_initialized;
sh_css_hmm_buffer_record_init();
sh_css_event_init_irq_mask();
fw = &sh_css_sp_fw;
HIVE_ADDR_host_sp_queues_initialized =
fw->info.sp.host_sp_queues_initialized;
ia_css_bufq_init();
/* set "host_sp_queues_initialized" to "true" */
sp_dmem_store_uint32(SP0_ID,
(unsigned int)sp_address_of(host_sp_queues_initialized),
(uint32_t)(1));
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "sh_css_setup_queues() leave:\n");
}
static int
init_vf_frameinfo_defaults(struct ia_css_pipe *pipe,
struct ia_css_frame *vf_frame, unsigned int idx)
{
int err = 0;
unsigned int thread_id;
enum sh_css_queue_id queue_id;
assert(vf_frame);
sh_css_pipe_get_viewfinder_frame_info(pipe, &vf_frame->frame_info, idx);
vf_frame->flash_state = IA_CSS_FRAME_FLASH_STATE_NONE;
ia_css_pipeline_get_sp_thread_id(ia_css_pipe_get_pipe_num(pipe), &thread_id);
ia_css_query_internal_queue_id(IA_CSS_BUFFER_TYPE_VF_OUTPUT_FRAME + idx, thread_id, &queue_id);
vf_frame->dynamic_queue_id = queue_id;
vf_frame->buf_type = IA_CSS_BUFFER_TYPE_VF_OUTPUT_FRAME + idx;
err = ia_css_frame_init_planes(vf_frame);
return err;
}
#ifdef ISP2401
static unsigned int
get_crop_lines_for_bayer_order(const struct ia_css_stream_config *config)
{
assert(config);
if ((config->input_config.bayer_order == IA_CSS_BAYER_ORDER_BGGR) ||
(config->input_config.bayer_order == IA_CSS_BAYER_ORDER_GBRG))
return 1;
return 0;
}
static unsigned int
get_crop_columns_for_bayer_order(const struct ia_css_stream_config *config)
{
assert(config);
if ((config->input_config.bayer_order == IA_CSS_BAYER_ORDER_RGGB) ||
(config->input_config.bayer_order == IA_CSS_BAYER_ORDER_GBRG))
return 1;
return 0;
}
/*
* This function is to get the sum of all extra pixels in addition to the effective
* input, it includes dvs envelop and filter run-in
*/
static void get_pipe_extra_pixel(struct ia_css_pipe *pipe,
unsigned int *extra_row, unsigned int *extra_column)
{
enum ia_css_pipe_id pipe_id = pipe->mode;
unsigned int left_cropping = 0, top_cropping = 0;
unsigned int i;
struct ia_css_resolution dvs_env = pipe->config.dvs_envelope;
/*
* The dvs envelope info may not be correctly sent down via pipe config
* The check is made and the correct value is populated in the binary info
* Use this value when computing crop, else excess lines may get trimmed
*/
switch (pipe_id) {
case IA_CSS_PIPE_ID_PREVIEW:
if (pipe->pipe_settings.preview.preview_binary.info) {
left_cropping =
pipe->pipe_settings.preview.preview_binary.info->sp.pipeline.left_cropping;
top_cropping =
pipe->pipe_settings.preview.preview_binary.info->sp.pipeline.top_cropping;
}
dvs_env = pipe->pipe_settings.preview.preview_binary.dvs_envelope;
break;
case IA_CSS_PIPE_ID_VIDEO:
if (pipe->pipe_settings.video.video_binary.info) {
left_cropping =
pipe->pipe_settings.video.video_binary.info->sp.pipeline.left_cropping;
top_cropping =
pipe->pipe_settings.video.video_binary.info->sp.pipeline.top_cropping;
}
dvs_env = pipe->pipe_settings.video.video_binary.dvs_envelope;
break;
case IA_CSS_PIPE_ID_CAPTURE:
for (i = 0; i < pipe->pipe_settings.capture.num_primary_stage; i++) {
if (pipe->pipe_settings.capture.primary_binary[i].info) {
left_cropping +=
pipe->pipe_settings.capture.primary_binary[i].info->sp.pipeline.left_cropping;
top_cropping +=
pipe->pipe_settings.capture.primary_binary[i].info->sp.pipeline.top_cropping;
}
dvs_env.width +=
pipe->pipe_settings.capture.primary_binary[i].dvs_envelope.width;
dvs_env.height +=
pipe->pipe_settings.capture.primary_binary[i].dvs_envelope.height;
}
break;
default:
break;
}
*extra_row = top_cropping + dvs_env.height;
*extra_column = left_cropping + dvs_env.width;
}
void
ia_css_get_crop_offsets(
struct ia_css_pipe *pipe,
struct ia_css_frame_info *in_frame)
{
unsigned int row = 0;
unsigned int column = 0;
struct ia_css_resolution *input_res;
struct ia_css_resolution *effective_res;
unsigned int extra_row = 0, extra_col = 0;
unsigned int min_reqd_height, min_reqd_width;
assert(pipe);
assert(pipe->stream);
assert(in_frame);
IA_CSS_ENTER_PRIVATE("pipe = %p effective_wd = %u effective_ht = %u",
pipe, pipe->config.input_effective_res.width,
pipe->config.input_effective_res.height);
input_res = &pipe->stream->config.input_config.input_res;
#ifndef ISP2401
effective_res = &pipe->stream->config.input_config.effective_res;
#else
effective_res = &pipe->config.input_effective_res;
#endif
get_pipe_extra_pixel(pipe, &extra_row, &extra_col);
in_frame->raw_bayer_order = pipe->stream->config.input_config.bayer_order;
min_reqd_height = effective_res->height + extra_row;
min_reqd_width = effective_res->width + extra_col;
if (input_res->height > min_reqd_height) {
row = (input_res->height - min_reqd_height) / 2;
row &= ~0x1;
}
if (input_res->width > min_reqd_width) {
column = (input_res->width - min_reqd_width) / 2;
column &= ~0x1;
}
/*
* TODO:
* 1. Require the special support for RAW10 packed mode.
* 2. Require the special support for the online use cases.
*/
/*
* ISP expects GRBG bayer order, we skip one line and/or one row
* to correct in case the input bayer order is different.
*/
column += get_crop_columns_for_bayer_order(&pipe->stream->config);
row += get_crop_lines_for_bayer_order(&pipe->stream->config);
in_frame->crop_info.start_column = column;
in_frame->crop_info.start_line = row;
IA_CSS_LEAVE_PRIVATE("void start_col: %u start_row: %u", column, row);
return;
}
#endif
static int
init_in_frameinfo_memory_defaults(struct ia_css_pipe *pipe,
struct ia_css_frame *frame, enum ia_css_frame_format format)
{
struct ia_css_frame *in_frame;
int err = 0;
unsigned int thread_id;
enum sh_css_queue_id queue_id;
assert(frame);
in_frame = frame;
in_frame->frame_info.format = format;
if (IS_ISP2401 && format == IA_CSS_FRAME_FORMAT_RAW) {
in_frame->frame_info.format = (pipe->stream->config.pack_raw_pixels) ?
IA_CSS_FRAME_FORMAT_RAW_PACKED : IA_CSS_FRAME_FORMAT_RAW;
}
in_frame->frame_info.res.width = pipe->stream->config.input_config.input_res.width;
in_frame->frame_info.res.height = pipe->stream->config.input_config.input_res.height;
in_frame->frame_info.raw_bit_depth = ia_css_pipe_util_pipe_input_format_bpp(pipe);
ia_css_frame_info_set_width(&in_frame->frame_info,
pipe->stream->config.input_config.input_res.width, 0);
in_frame->flash_state = IA_CSS_FRAME_FLASH_STATE_NONE;
ia_css_pipeline_get_sp_thread_id(ia_css_pipe_get_pipe_num(pipe), &thread_id);
ia_css_query_internal_queue_id(IA_CSS_BUFFER_TYPE_INPUT_FRAME, thread_id, &queue_id);
in_frame->dynamic_queue_id = queue_id;
in_frame->buf_type = IA_CSS_BUFFER_TYPE_INPUT_FRAME;
#ifdef ISP2401
ia_css_get_crop_offsets(pipe, &in_frame->frame_info);
#endif
err = ia_css_frame_init_planes(in_frame);
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "%s() bayer_order = %d\n",
__func__, in_frame->frame_info.raw_bayer_order);
return err;
}
static int
init_out_frameinfo_defaults(struct ia_css_pipe *pipe,
struct ia_css_frame *out_frame, unsigned int idx)
{
int err = 0;
unsigned int thread_id;
enum sh_css_queue_id queue_id;
assert(out_frame);
sh_css_pipe_get_output_frame_info(pipe, &out_frame->frame_info, idx);
out_frame->flash_state = IA_CSS_FRAME_FLASH_STATE_NONE;
ia_css_pipeline_get_sp_thread_id(ia_css_pipe_get_pipe_num(pipe), &thread_id);
ia_css_query_internal_queue_id(IA_CSS_BUFFER_TYPE_OUTPUT_FRAME + idx, thread_id, &queue_id);
out_frame->dynamic_queue_id = queue_id;
out_frame->buf_type = IA_CSS_BUFFER_TYPE_OUTPUT_FRAME + idx;
err = ia_css_frame_init_planes(out_frame);
return err;
}
/* Create stages for video pipe */
static int create_host_video_pipeline(struct ia_css_pipe *pipe)
{
struct ia_css_pipeline_stage_desc stage_desc;
struct ia_css_binary *copy_binary, *video_binary,
*yuv_scaler_binary, *vf_pp_binary;
struct ia_css_pipeline_stage *copy_stage = NULL;
struct ia_css_pipeline_stage *video_stage = NULL;
struct ia_css_pipeline_stage *yuv_scaler_stage = NULL;
struct ia_css_pipeline_stage *vf_pp_stage = NULL;
struct ia_css_pipeline *me;
struct ia_css_frame *in_frame = NULL;
struct ia_css_frame *out_frame;
struct ia_css_frame *out_frames[IA_CSS_BINARY_MAX_OUTPUT_PORTS];
struct ia_css_frame *vf_frame = NULL;
int err = 0;
bool need_copy = false;
bool need_vf_pp = false;
bool need_yuv_pp = false;
bool need_in_frameinfo_memory = false;
unsigned int i, num_yuv_scaler;
bool *is_output_stage = NULL;
IA_CSS_ENTER_PRIVATE("pipe = %p", pipe);
if ((!pipe) || (!pipe->stream) || (pipe->mode != IA_CSS_PIPE_ID_VIDEO)) {
IA_CSS_LEAVE_ERR_PRIVATE(-EINVAL);
return -EINVAL;
}
ia_css_pipe_util_create_output_frames(out_frames);
out_frame = &pipe->out_frame_struct;
/* pipeline already created as part of create_host_pipeline_structure */
me = &pipe->pipeline;
ia_css_pipeline_clean(me);
me->dvs_frame_delay = pipe->dvs_frame_delay;
if (IS_ISP2401) {
/*
* When the input system is 2401, always enable 'in_frameinfo_memory'
* except for the following: online or continuous
*/
need_in_frameinfo_memory = !(pipe->stream->config.online ||
pipe->stream->config.continuous);
} else {
/* Construct in_frame info (only in case we have dynamic input */
need_in_frameinfo_memory = pipe->stream->config.mode ==
IA_CSS_INPUT_MODE_MEMORY;
}
/* Construct in_frame info (only in case we have dynamic input */
if (need_in_frameinfo_memory) {
in_frame = &pipe->in_frame_struct;
err = init_in_frameinfo_memory_defaults(pipe, in_frame,
IA_CSS_FRAME_FORMAT_RAW);
if (err)
goto ERR;
}
out_frame->data = 0;
err = init_out_frameinfo_defaults(pipe, out_frame, 0);
if (err)
goto ERR;
if (pipe->enable_viewfinder[IA_CSS_PIPE_OUTPUT_STAGE_0]) {
vf_frame = &pipe->vf_frame_struct;
vf_frame->data = 0;
err = init_vf_frameinfo_defaults(pipe, vf_frame, 0);
if (err)
goto ERR;
}
copy_binary = &pipe->pipe_settings.video.copy_binary;
video_binary = &pipe->pipe_settings.video.video_binary;
vf_pp_binary = &pipe->pipe_settings.video.vf_pp_binary;
yuv_scaler_binary = pipe->pipe_settings.video.yuv_scaler_binary;
num_yuv_scaler = pipe->pipe_settings.video.num_yuv_scaler;
is_output_stage = pipe->pipe_settings.video.is_output_stage;
need_copy = (copy_binary && copy_binary->info);
need_vf_pp = (vf_pp_binary && vf_pp_binary->info);
need_yuv_pp = (yuv_scaler_binary && yuv_scaler_binary->info);
if (need_copy) {
ia_css_pipe_util_set_output_frames(out_frames, 0, NULL);
ia_css_pipe_get_generic_stage_desc(&stage_desc, copy_binary,
out_frames, NULL, NULL);
err = ia_css_pipeline_create_and_add_stage(me, &stage_desc,
©_stage);
if (err)
goto ERR;
in_frame = me->stages->args.out_frame[0];
} else if (pipe->stream->config.continuous) {
if (IS_ISP2401)
/*
* When continuous is enabled, configure in_frame with the
* last pipe, which is the copy pipe.
*/
in_frame = pipe->stream->last_pipe->continuous_frames[0];
else
in_frame = pipe->continuous_frames[0];
}
ia_css_pipe_util_set_output_frames(out_frames, 0,
need_yuv_pp ? NULL : out_frame);
/*
* when the video binary supports a second output pin,
* it can directly produce the vf_frame.
*/
if (need_vf_pp) {
ia_css_pipe_get_generic_stage_desc(&stage_desc, video_binary,
out_frames, in_frame, NULL);
} else {
ia_css_pipe_get_generic_stage_desc(&stage_desc, video_binary,
out_frames, in_frame, vf_frame);
}
err = ia_css_pipeline_create_and_add_stage(me, &stage_desc,
&video_stage);
if (err)
goto ERR;
/* If we use copy iso video, the input must be yuv iso raw */
if (video_stage) {
video_stage->args.copy_vf =
video_binary->info->sp.pipeline.mode == IA_CSS_BINARY_MODE_COPY;
video_stage->args.copy_output = video_stage->args.copy_vf;
}
/* when the video binary supports only 1 output pin, vf_pp is needed to
produce the vf_frame.*/
if (need_vf_pp && video_stage) {
in_frame = video_stage->args.out_vf_frame;
err = add_vf_pp_stage(pipe, in_frame, vf_frame, vf_pp_binary,
&vf_pp_stage);
if (err)
goto ERR;
}
if (video_stage) {
int frm;
for (frm = 0; frm < NUM_VIDEO_TNR_FRAMES; frm++) {
video_stage->args.tnr_frames[frm] =
pipe->pipe_settings.video.tnr_frames[frm];
}
for (frm = 0; frm < MAX_NUM_VIDEO_DELAY_FRAMES; frm++) {
video_stage->args.delay_frames[frm] =
pipe->pipe_settings.video.delay_frames[frm];
}
}
if (need_yuv_pp && video_stage) {
struct ia_css_frame *tmp_in_frame = video_stage->args.out_frame[0];
struct ia_css_frame *tmp_out_frame = NULL;
for (i = 0; i < num_yuv_scaler; i++) {
tmp_out_frame = is_output_stage[i] ? out_frame : NULL;
err = add_yuv_scaler_stage(pipe, me, tmp_in_frame,
tmp_out_frame, NULL,
&yuv_scaler_binary[i],
&yuv_scaler_stage);
if (err) {
IA_CSS_LEAVE_ERR_PRIVATE(err);
return err;
}
/* we use output port 1 as internal output port */
if (yuv_scaler_stage)
tmp_in_frame = yuv_scaler_stage->args.out_frame[1];
}
}
pipe->pipeline.acquire_isp_each_stage = false;
ia_css_pipeline_finalize_stages(&pipe->pipeline,
pipe->stream->config.continuous);
ERR:
IA_CSS_LEAVE_ERR_PRIVATE(err);
return err;
}
/* Create stages for preview */
static int
create_host_preview_pipeline(struct ia_css_pipe *pipe)
{
struct ia_css_pipeline_stage *copy_stage = NULL;
struct ia_css_pipeline_stage *preview_stage = NULL;
struct ia_css_pipeline_stage *vf_pp_stage = NULL;
struct ia_css_pipeline_stage_desc stage_desc;
struct ia_css_pipeline *me = NULL;
struct ia_css_binary *copy_binary, *preview_binary, *vf_pp_binary = NULL;
struct ia_css_frame *in_frame = NULL;
int err = 0;
struct ia_css_frame *out_frame;
struct ia_css_frame *out_frames[IA_CSS_BINARY_MAX_OUTPUT_PORTS];
bool need_in_frameinfo_memory = false;
bool sensor = false;
bool buffered_sensor = false;
bool online = false;
bool continuous = false;
IA_CSS_ENTER_PRIVATE("pipe = %p", pipe);
if ((!pipe) || (!pipe->stream) || (pipe->mode != IA_CSS_PIPE_ID_PREVIEW)) {
IA_CSS_LEAVE_ERR_PRIVATE(-EINVAL);
return -EINVAL;
}
ia_css_pipe_util_create_output_frames(out_frames);
/* pipeline already created as part of create_host_pipeline_structure */
me = &pipe->pipeline;
ia_css_pipeline_clean(me);
if (IS_ISP2401) {
/*
* When the input system is 2401, always enable 'in_frameinfo_memory'
* except for the following:
* - Direct Sensor Mode Online Preview
* - Buffered Sensor Mode Online Preview
* - Direct Sensor Mode Continuous Preview
* - Buffered Sensor Mode Continuous Preview
*/
sensor = (pipe->stream->config.mode == IA_CSS_INPUT_MODE_SENSOR);
buffered_sensor = (pipe->stream->config.mode == IA_CSS_INPUT_MODE_BUFFERED_SENSOR);
online = pipe->stream->config.online;
continuous = pipe->stream->config.continuous;
need_in_frameinfo_memory =
!((sensor && (online || continuous)) || (buffered_sensor &&
(online || continuous)));
} else {
/* Construct in_frame info (only in case we have dynamic input */
need_in_frameinfo_memory = pipe->stream->config.mode == IA_CSS_INPUT_MODE_MEMORY;
}
if (need_in_frameinfo_memory) {
err = init_in_frameinfo_memory_defaults(pipe, &me->in_frame,
IA_CSS_FRAME_FORMAT_RAW);
if (err)
goto ERR;
in_frame = &me->in_frame;
} else {
in_frame = NULL;
}
err = init_out_frameinfo_defaults(pipe, &me->out_frame[0], 0);
if (err)
goto ERR;
out_frame = &me->out_frame[0];
copy_binary = &pipe->pipe_settings.preview.copy_binary;
preview_binary = &pipe->pipe_settings.preview.preview_binary;
if (pipe->pipe_settings.preview.vf_pp_binary.info)
vf_pp_binary = &pipe->pipe_settings.preview.vf_pp_binary;
if (pipe->pipe_settings.preview.copy_binary.info) {
ia_css_pipe_util_set_output_frames(out_frames, 0, NULL);
ia_css_pipe_get_generic_stage_desc(&stage_desc, copy_binary,
out_frames, NULL, NULL);
err = ia_css_pipeline_create_and_add_stage(me, &stage_desc,
©_stage);
if (err)
goto ERR;
in_frame = me->stages->args.out_frame[0];
} else if (pipe->stream->config.continuous) {
if (IS_ISP2401) {
/*
* When continuous is enabled, configure in_frame with the
* last pipe, which is the copy pipe.
*/
if (continuous || !online)
in_frame = pipe->stream->last_pipe->continuous_frames[0];
} else {
in_frame = pipe->continuous_frames[0];
}
}
if (vf_pp_binary) {
ia_css_pipe_util_set_output_frames(out_frames, 0, NULL);
ia_css_pipe_get_generic_stage_desc(&stage_desc, preview_binary,
out_frames, in_frame, NULL);
} else {
ia_css_pipe_util_set_output_frames(out_frames, 0, out_frame);
ia_css_pipe_get_generic_stage_desc(&stage_desc, preview_binary,
out_frames, in_frame, NULL);
}
err = ia_css_pipeline_create_and_add_stage(me, &stage_desc,
&preview_stage);
if (err)
goto ERR;
/* If we use copy iso preview, the input must be yuv iso raw */
preview_stage->args.copy_vf =
preview_binary->info->sp.pipeline.mode == IA_CSS_BINARY_MODE_COPY;
preview_stage->args.copy_output = !preview_stage->args.copy_vf;
if (preview_stage->args.copy_vf && !preview_stage->args.out_vf_frame) {
/* in case of copy, use the vf frame as output frame */
preview_stage->args.out_vf_frame =
preview_stage->args.out_frame[0];
}
if (vf_pp_binary) {
if (preview_binary->info->sp.pipeline.mode == IA_CSS_BINARY_MODE_COPY)
in_frame = preview_stage->args.out_vf_frame;
else
in_frame = preview_stage->args.out_frame[0];
err = add_vf_pp_stage(pipe, in_frame, out_frame, vf_pp_binary,
&vf_pp_stage);
if (err)
goto ERR;
}
pipe->pipeline.acquire_isp_each_stage = false;
ia_css_pipeline_finalize_stages(&pipe->pipeline, pipe->stream->config.continuous);
ERR:
IA_CSS_LEAVE_ERR_PRIVATE(err);
return err;
}
static void send_raw_frames(struct ia_css_pipe *pipe)
{
if (pipe->stream->config.continuous) {
unsigned int i;
sh_css_update_host2sp_cont_num_raw_frames
(pipe->stream->config.init_num_cont_raw_buf, true);
sh_css_update_host2sp_cont_num_raw_frames
(pipe->stream->config.target_num_cont_raw_buf, false);
/* Hand-over all the SP-internal buffers */
for (i = 0; i < pipe->stream->config.init_num_cont_raw_buf; i++) {
sh_css_update_host2sp_offline_frame(i,
pipe->continuous_frames[i], pipe->cont_md_buffers[i]);
}
}
return;
}
static int
preview_start(struct ia_css_pipe *pipe)
{
int err = 0;
struct ia_css_pipe *copy_pipe, *capture_pipe;
enum sh_css_pipe_config_override copy_ovrd;
enum ia_css_input_mode preview_pipe_input_mode;
unsigned int thread_id;
IA_CSS_ENTER_PRIVATE("pipe = %p", pipe);
if ((!pipe) || (!pipe->stream) || (pipe->mode != IA_CSS_PIPE_ID_PREVIEW)) {
IA_CSS_LEAVE_ERR_PRIVATE(-EINVAL);
return -EINVAL;
}
preview_pipe_input_mode = pipe->stream->config.mode;
copy_pipe = pipe->pipe_settings.preview.copy_pipe;
capture_pipe = pipe->pipe_settings.preview.capture_pipe;
sh_css_metrics_start_frame();
/* multi stream video needs mipi buffers */
err = send_mipi_frames(pipe);
if (err) {
IA_CSS_LEAVE_ERR_PRIVATE(err);
return err;
}
send_raw_frames(pipe);
ia_css_pipeline_get_sp_thread_id(ia_css_pipe_get_pipe_num(pipe), &thread_id);
copy_ovrd = 1 << thread_id;
if (pipe->stream->cont_capt) {
ia_css_pipeline_get_sp_thread_id(ia_css_pipe_get_pipe_num(capture_pipe),
&thread_id);
copy_ovrd |= 1 << thread_id;
}
/* Construct and load the copy pipe */
if (pipe->stream->config.continuous) {
sh_css_sp_init_pipeline(©_pipe->pipeline,
IA_CSS_PIPE_ID_COPY,
(uint8_t)ia_css_pipe_get_pipe_num(copy_pipe),
false,
pipe->stream->config.pixels_per_clock == 2, false,
false, pipe->required_bds_factor,
copy_ovrd,
pipe->stream->config.mode,
&pipe->stream->config.metadata_config,
&pipe->stream->info.metadata_info,
pipe->stream->config.source.port.port);
/*
* make the preview pipe start with mem mode input, copy handles
* the actual mode
*/
preview_pipe_input_mode = IA_CSS_INPUT_MODE_MEMORY;
}
/* Construct and load the capture pipe */
if (pipe->stream->cont_capt) {
sh_css_sp_init_pipeline(&capture_pipe->pipeline,
IA_CSS_PIPE_ID_CAPTURE,
(uint8_t)ia_css_pipe_get_pipe_num(capture_pipe),
capture_pipe->config.default_capture_config.enable_xnr != 0,
capture_pipe->stream->config.pixels_per_clock == 2,
true, /* continuous */
false, /* offline */
capture_pipe->required_bds_factor,
0,
IA_CSS_INPUT_MODE_MEMORY,
&pipe->stream->config.metadata_config,
&pipe->stream->info.metadata_info,
(enum mipi_port_id)0);
}
start_pipe(pipe, copy_ovrd, preview_pipe_input_mode);
IA_CSS_LEAVE_ERR_PRIVATE(err);
return err;
}
int
ia_css_pipe_enqueue_buffer(struct ia_css_pipe *pipe,
const struct ia_css_buffer *buffer)
{
int return_err = 0;
unsigned int thread_id;
enum sh_css_queue_id queue_id;
struct ia_css_pipeline *pipeline;
struct ia_css_pipeline_stage *stage;
struct ia_css_rmgr_vbuf_handle p_vbuf;
struct ia_css_rmgr_vbuf_handle *h_vbuf;
struct sh_css_hmm_buffer ddr_buffer;
enum ia_css_buffer_type buf_type;
enum ia_css_pipe_id pipe_id;
bool ret_err;
IA_CSS_ENTER("pipe=%p, buffer=%p", pipe, buffer);
if ((!pipe) || (!buffer)) {
IA_CSS_LEAVE_ERR(-EINVAL);
return -EINVAL;
}
buf_type = buffer->type;
pipe_id = pipe->mode;
IA_CSS_LOG("pipe_id=%d, buf_type=%d", pipe_id, buf_type);
assert(pipe_id < IA_CSS_PIPE_ID_NUM);
assert(buf_type < IA_CSS_NUM_DYNAMIC_BUFFER_TYPE);
if (buf_type == IA_CSS_BUFFER_TYPE_INVALID ||
buf_type >= IA_CSS_NUM_DYNAMIC_BUFFER_TYPE ||
pipe_id >= IA_CSS_PIPE_ID_NUM) {
IA_CSS_LEAVE_ERR(-EINVAL);
return -EINVAL;
}
ret_err = ia_css_pipeline_get_sp_thread_id(ia_css_pipe_get_pipe_num(pipe), &thread_id);
if (!ret_err) {
IA_CSS_LEAVE_ERR(-EINVAL);
return -EINVAL;
}
ret_err = ia_css_query_internal_queue_id(buf_type, thread_id, &queue_id);
if (!ret_err) {
IA_CSS_LEAVE_ERR(-EINVAL);
return -EINVAL;
}
if ((queue_id <= SH_CSS_INVALID_QUEUE_ID) || (queue_id >= SH_CSS_MAX_NUM_QUEUES)) {
IA_CSS_LEAVE_ERR(-EINVAL);
return -EINVAL;
}
if (!sh_css_sp_is_running()) {
IA_CSS_LOG("SP is not running!");
IA_CSS_LEAVE_ERR(-EBUSY);
/* SP is not running. The queues are not valid */
return -EBUSY;
}
pipeline = &pipe->pipeline;
assert(pipeline || pipe_id == IA_CSS_PIPE_ID_COPY);
assert(sizeof(NULL) <= sizeof(ddr_buffer.kernel_ptr));
ddr_buffer.kernel_ptr = HOST_ADDRESS(NULL);
ddr_buffer.cookie_ptr = buffer->driver_cookie;
ddr_buffer.timing_data = buffer->timing_data;
if (buf_type == IA_CSS_BUFFER_TYPE_3A_STATISTICS) {
if (!buffer->data.stats_3a) {
IA_CSS_LEAVE_ERR(-EINVAL);
return -EINVAL;
}
ddr_buffer.kernel_ptr = HOST_ADDRESS(buffer->data.stats_3a);
ddr_buffer.payload.s3a = *buffer->data.stats_3a;
} else if (buf_type == IA_CSS_BUFFER_TYPE_DIS_STATISTICS) {
if (!buffer->data.stats_dvs) {
IA_CSS_LEAVE_ERR(-EINVAL);
return -EINVAL;
}
ddr_buffer.kernel_ptr = HOST_ADDRESS(buffer->data.stats_dvs);
ddr_buffer.payload.dis = *buffer->data.stats_dvs;
} else if (buf_type == IA_CSS_BUFFER_TYPE_METADATA) {
if (!buffer->data.metadata) {
IA_CSS_LEAVE_ERR(-EINVAL);
return -EINVAL;
}
ddr_buffer.kernel_ptr = HOST_ADDRESS(buffer->data.metadata);
ddr_buffer.payload.metadata = *buffer->data.metadata;
} else if (buf_type == IA_CSS_BUFFER_TYPE_INPUT_FRAME ||
buf_type == IA_CSS_BUFFER_TYPE_OUTPUT_FRAME ||
buf_type == IA_CSS_BUFFER_TYPE_VF_OUTPUT_FRAME ||
buf_type == IA_CSS_BUFFER_TYPE_SEC_OUTPUT_FRAME ||
buf_type == IA_CSS_BUFFER_TYPE_SEC_VF_OUTPUT_FRAME) {
if (!buffer->data.frame) {
IA_CSS_LEAVE_ERR(-EINVAL);
return -EINVAL;
}
ddr_buffer.kernel_ptr = HOST_ADDRESS(buffer->data.frame);
ddr_buffer.payload.frame.frame_data = buffer->data.frame->data;
ddr_buffer.payload.frame.flashed = 0;
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
"ia_css_pipe_enqueue_buffer() buf_type=%d, data(DDR address)=0x%x\n",
buf_type, buffer->data.frame->data);
}
/* start of test for using rmgr for acq/rel memory */
p_vbuf.vptr = 0;
p_vbuf.count = 0;
p_vbuf.size = sizeof(struct sh_css_hmm_buffer);
h_vbuf = &p_vbuf;
/* TODO: change next to correct pool for optimization */
ia_css_rmgr_acq_vbuf(hmm_buffer_pool, &h_vbuf);
if ((!h_vbuf) || (h_vbuf->vptr == 0x0)) {
IA_CSS_LEAVE_ERR(-EINVAL);
return -EINVAL;
}
hmm_store(h_vbuf->vptr,
(void *)(&ddr_buffer),
sizeof(struct sh_css_hmm_buffer));
if (buf_type == IA_CSS_BUFFER_TYPE_3A_STATISTICS ||
buf_type == IA_CSS_BUFFER_TYPE_DIS_STATISTICS ||
buf_type == IA_CSS_BUFFER_TYPE_LACE_STATISTICS) {
if (!pipeline) {
ia_css_rmgr_rel_vbuf(hmm_buffer_pool, &h_vbuf);
IA_CSS_LOG("pipeline is empty!");
IA_CSS_LEAVE_ERR(-EINVAL);
return -EINVAL;
}
for (stage = pipeline->stages; stage; stage = stage->next) {
/*
* The SP will read the params after it got
* empty 3a and dis
*/
if (stage->binary && stage->binary->info &&
(stage->binary->info->sp.enable.s3a ||
stage->binary->info->sp.enable.dis)) {
/* there is a stage that needs it */
return_err = ia_css_bufq_enqueue_buffer(thread_id,
queue_id,
(uint32_t)h_vbuf->vptr);
}
}
} else if (buf_type == IA_CSS_BUFFER_TYPE_INPUT_FRAME ||
buf_type == IA_CSS_BUFFER_TYPE_OUTPUT_FRAME ||
buf_type == IA_CSS_BUFFER_TYPE_VF_OUTPUT_FRAME ||
buf_type == IA_CSS_BUFFER_TYPE_SEC_OUTPUT_FRAME ||
buf_type == IA_CSS_BUFFER_TYPE_SEC_VF_OUTPUT_FRAME ||
buf_type == IA_CSS_BUFFER_TYPE_METADATA) {
return_err = ia_css_bufq_enqueue_buffer(thread_id,
queue_id,
(uint32_t)h_vbuf->vptr);
if (!return_err &&
buf_type == IA_CSS_BUFFER_TYPE_OUTPUT_FRAME) {
IA_CSS_LOG("pfp: enqueued OF %d to q %d thread %d",
ddr_buffer.payload.frame.frame_data,
queue_id, thread_id);
}
}
if (!return_err) {
if (sh_css_hmm_buffer_record_acquire(
h_vbuf, buf_type,
HOST_ADDRESS(ddr_buffer.kernel_ptr))) {
IA_CSS_LOG("send vbuf=%p", h_vbuf);
} else {
return_err = -EINVAL;
IA_CSS_ERROR("hmm_buffer_record[]: no available slots\n");
}
}
/*
* Tell the SP which queues are not empty,
* by sending the software event.
*/
if (!return_err) {
if (!sh_css_sp_is_running()) {
/* SP is not running. The queues are not valid */
IA_CSS_LOG("SP is not running!");
IA_CSS_LEAVE_ERR(-EBUSY);
return -EBUSY;
}
return_err = ia_css_bufq_enqueue_psys_event(
IA_CSS_PSYS_SW_EVENT_BUFFER_ENQUEUED,
(uint8_t)thread_id,
queue_id,
0);
} else {
ia_css_rmgr_rel_vbuf(hmm_buffer_pool, &h_vbuf);
IA_CSS_ERROR("buffer not enqueued");
}
IA_CSS_LEAVE("return value = %d", return_err);
return return_err;
}
/*
* TODO: Free up the hmm memory space.
*/
int
ia_css_pipe_dequeue_buffer(struct ia_css_pipe *pipe,
struct ia_css_buffer *buffer)
{
int return_err;
enum sh_css_queue_id queue_id;
ia_css_ptr ddr_buffer_addr = (ia_css_ptr)0;
struct sh_css_hmm_buffer ddr_buffer;
enum ia_css_buffer_type buf_type;
enum ia_css_pipe_id pipe_id;
unsigned int thread_id;
hrt_address kernel_ptr = 0;
bool ret_err;
IA_CSS_ENTER("pipe=%p, buffer=%p", pipe, buffer);
if ((!pipe) || (!buffer)) {
IA_CSS_LEAVE_ERR(-EINVAL);
return -EINVAL;
}
pipe_id = pipe->mode;
buf_type = buffer->type;
IA_CSS_LOG("pipe_id=%d, buf_type=%d", pipe_id, buf_type);
ddr_buffer.kernel_ptr = 0;
ret_err = ia_css_pipeline_get_sp_thread_id(ia_css_pipe_get_pipe_num(pipe), &thread_id);
if (!ret_err) {
IA_CSS_LEAVE_ERR(-EINVAL);
return -EINVAL;
}
ret_err = ia_css_query_internal_queue_id(buf_type, thread_id, &queue_id);
if (!ret_err) {
IA_CSS_LEAVE_ERR(-EINVAL);
return -EINVAL;
}
if ((queue_id <= SH_CSS_INVALID_QUEUE_ID) || (queue_id >= SH_CSS_MAX_NUM_QUEUES)) {
IA_CSS_LEAVE_ERR(-EINVAL);
return -EINVAL;
}
if (!sh_css_sp_is_running()) {
IA_CSS_LOG("SP is not running!");
IA_CSS_LEAVE_ERR(-EBUSY);
/* SP is not running. The queues are not valid */
return -EBUSY;
}
return_err = ia_css_bufq_dequeue_buffer(queue_id,
(uint32_t *)&ddr_buffer_addr);
if (!return_err) {
struct ia_css_frame *frame;
struct sh_css_hmm_buffer_record *hmm_buffer_record = NULL;
IA_CSS_LOG("receive vbuf=%x", (int)ddr_buffer_addr);
/* Validate the ddr_buffer_addr and buf_type */
hmm_buffer_record = sh_css_hmm_buffer_record_validate(
ddr_buffer_addr, buf_type);
if (hmm_buffer_record) {
/*
* valid hmm_buffer_record found. Save the kernel_ptr
* for validation after performing hmm_load. The
* vbuf handle and buffer_record can be released.
*/
kernel_ptr = hmm_buffer_record->kernel_ptr;
ia_css_rmgr_rel_vbuf(hmm_buffer_pool, &hmm_buffer_record->h_vbuf);
sh_css_hmm_buffer_record_reset(hmm_buffer_record);
} else {
IA_CSS_ERROR("hmm_buffer_record not found (0x%x) buf_type(%d)",
ddr_buffer_addr, buf_type);
IA_CSS_LEAVE_ERR(-EINVAL);
return -EINVAL;
}
hmm_load(ddr_buffer_addr,
&ddr_buffer,
sizeof(struct sh_css_hmm_buffer));
/*
* if the kernel_ptr is 0 or an invalid, return an error.
* do not access the buffer via the kernal_ptr.
*/
if ((ddr_buffer.kernel_ptr == 0) ||
(kernel_ptr != HOST_ADDRESS(ddr_buffer.kernel_ptr))) {
IA_CSS_ERROR("kernel_ptr invalid");
IA_CSS_ERROR("expected: (0x%llx)", (u64)kernel_ptr);
IA_CSS_ERROR("actual: (0x%llx)", (u64)HOST_ADDRESS(ddr_buffer.kernel_ptr));
IA_CSS_ERROR("buf_type: %d\n", buf_type);
IA_CSS_LEAVE_ERR(-EINVAL);
return -EINVAL;
}
if (ddr_buffer.kernel_ptr != 0) {
/*
* buffer->exp_id : all instances to be removed later
* once the driver change is completed. See patch #5758
* for reference
*/
buffer->exp_id = 0;
buffer->driver_cookie = ddr_buffer.cookie_ptr;
buffer->timing_data = ddr_buffer.timing_data;
if (buf_type == IA_CSS_BUFFER_TYPE_OUTPUT_FRAME ||
buf_type == IA_CSS_BUFFER_TYPE_VF_OUTPUT_FRAME) {
buffer->isys_eof_clock_tick.ticks = ddr_buffer.isys_eof_clock_tick;
}
switch (buf_type) {
case IA_CSS_BUFFER_TYPE_INPUT_FRAME:
case IA_CSS_BUFFER_TYPE_OUTPUT_FRAME:
case IA_CSS_BUFFER_TYPE_SEC_OUTPUT_FRAME:
if (pipe && pipe->stop_requested) {
if (!IS_ISP2401) {
/*
* free mipi frames only for old input
* system for 2401 it is done in
* ia_css_stream_destroy call
*/
return_err = free_mipi_frames(pipe);
if (return_err) {
IA_CSS_LOG("free_mipi_frames() failed");
IA_CSS_LEAVE_ERR(return_err);
return return_err;
}
}
pipe->stop_requested = false;
}
fallthrough;
case IA_CSS_BUFFER_TYPE_VF_OUTPUT_FRAME:
case IA_CSS_BUFFER_TYPE_SEC_VF_OUTPUT_FRAME:
frame = (struct ia_css_frame *)HOST_ADDRESS(ddr_buffer.kernel_ptr);
buffer->data.frame = frame;
buffer->exp_id = ddr_buffer.payload.frame.exp_id;
frame->exp_id = ddr_buffer.payload.frame.exp_id;
frame->isp_config_id = ddr_buffer.payload.frame.isp_parameters_id;
if (ddr_buffer.payload.frame.flashed == 1)
frame->flash_state =
IA_CSS_FRAME_FLASH_STATE_PARTIAL;
if (ddr_buffer.payload.frame.flashed == 2)
frame->flash_state =
IA_CSS_FRAME_FLASH_STATE_FULL;
frame->valid = pipe->num_invalid_frames == 0;
if (!frame->valid)
pipe->num_invalid_frames--;
if (frame->frame_info.format == IA_CSS_FRAME_FORMAT_BINARY_8) {
if (IS_ISP2401)
frame->planes.binary.size = frame->data_bytes;
else
frame->planes.binary.size =
sh_css_sp_get_binary_copy_size();
}
if (buf_type == IA_CSS_BUFFER_TYPE_OUTPUT_FRAME) {
IA_CSS_LOG("pfp: dequeued OF %d with config id %d thread %d",
frame->data, frame->isp_config_id, thread_id);
}
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
"ia_css_pipe_dequeue_buffer() buf_type=%d, data(DDR address)=0x%x\n",
buf_type, buffer->data.frame->data);
break;
case IA_CSS_BUFFER_TYPE_3A_STATISTICS:
buffer->data.stats_3a =
(struct ia_css_isp_3a_statistics *)HOST_ADDRESS(ddr_buffer.kernel_ptr);
buffer->exp_id = ddr_buffer.payload.s3a.exp_id;
buffer->data.stats_3a->exp_id = ddr_buffer.payload.s3a.exp_id;
buffer->data.stats_3a->isp_config_id = ddr_buffer.payload.s3a.isp_config_id;
break;
case IA_CSS_BUFFER_TYPE_DIS_STATISTICS:
buffer->data.stats_dvs =
(struct ia_css_isp_dvs_statistics *)
HOST_ADDRESS(ddr_buffer.kernel_ptr);
buffer->exp_id = ddr_buffer.payload.dis.exp_id;
buffer->data.stats_dvs->exp_id = ddr_buffer.payload.dis.exp_id;
break;
case IA_CSS_BUFFER_TYPE_LACE_STATISTICS:
break;
case IA_CSS_BUFFER_TYPE_METADATA:
buffer->data.metadata =
(struct ia_css_metadata *)HOST_ADDRESS(ddr_buffer.kernel_ptr);
buffer->exp_id = ddr_buffer.payload.metadata.exp_id;
buffer->data.metadata->exp_id = ddr_buffer.payload.metadata.exp_id;
break;
default:
return_err = -EINVAL;
break;
}
}
}
/*
* Tell the SP which queues are not full,
* by sending the software event.
*/
if (!return_err) {
if (!sh_css_sp_is_running()) {
IA_CSS_LOG("SP is not running!");
IA_CSS_LEAVE_ERR(-EBUSY);
/* SP is not running. The queues are not valid */
return -EBUSY;
}
ia_css_bufq_enqueue_psys_event(
IA_CSS_PSYS_SW_EVENT_BUFFER_DEQUEUED,
0,
queue_id,
0);
}
IA_CSS_LEAVE("buffer=%p", buffer);
return return_err;
}
/*
* Cannot Move this to event module as it is of ia_css_event_type which is declared in ia_css.h
* TODO: modify and move it if possible.
*
* !!!IMPORTANT!!! KEEP THE FOLLOWING IN SYNC:
* 1) "enum ia_css_event_type" (ia_css_event_public.h)
* 2) "enum sh_css_sp_event_type" (sh_css_internal.h)
* 3) "enum ia_css_event_type event_id_2_event_mask" (event_handler.sp.c)
* 4) "enum ia_css_event_type convert_event_sp_to_host_domain" (sh_css.c)
*/
static enum ia_css_event_type convert_event_sp_to_host_domain[] = {
IA_CSS_EVENT_TYPE_OUTPUT_FRAME_DONE, /* Output frame ready. */
IA_CSS_EVENT_TYPE_SECOND_OUTPUT_FRAME_DONE, /* Second output frame ready. */
IA_CSS_EVENT_TYPE_VF_OUTPUT_FRAME_DONE, /* Viewfinder Output frame ready. */
IA_CSS_EVENT_TYPE_SECOND_VF_OUTPUT_FRAME_DONE, /* Second viewfinder Output frame ready. */
IA_CSS_EVENT_TYPE_3A_STATISTICS_DONE, /* Indication that 3A statistics are available. */
IA_CSS_EVENT_TYPE_DIS_STATISTICS_DONE, /* Indication that DIS statistics are available. */
IA_CSS_EVENT_TYPE_PIPELINE_DONE, /* Pipeline Done event, sent after last pipeline stage. */
IA_CSS_EVENT_TYPE_FRAME_TAGGED, /* Frame tagged. */
IA_CSS_EVENT_TYPE_INPUT_FRAME_DONE, /* Input frame ready. */
IA_CSS_EVENT_TYPE_METADATA_DONE, /* Metadata ready. */
IA_CSS_EVENT_TYPE_LACE_STATISTICS_DONE, /* Indication that LACE statistics are available. */
IA_CSS_EVENT_TYPE_ACC_STAGE_COMPLETE, /* Extension stage executed. */
IA_CSS_EVENT_TYPE_TIMER, /* Timing measurement data. */
IA_CSS_EVENT_TYPE_PORT_EOF, /* End Of Frame event, sent when in buffered sensor mode. */
IA_CSS_EVENT_TYPE_FW_WARNING, /* Performance warning encountered by FW */
IA_CSS_EVENT_TYPE_FW_ASSERT, /* Assertion hit by FW */
0, /* error if sp passes SH_CSS_SP_EVENT_NR_OF_TYPES as a valid event. */
};
int
ia_css_dequeue_psys_event(struct ia_css_event *event)
{
enum ia_css_pipe_id pipe_id = 0;
u8 payload[4] = {0, 0, 0, 0};
int ret_err;
/*
* TODO:
* a) use generic decoding function , same as the one used by sp.
* b) group decode and dequeue into eventQueue module
*
* We skip the IA_CSS_ENTER logging call
* to avoid flooding the logs when the host application
* uses polling.
*/
if (!event)
return -EINVAL;
/* SP is not running. The queues are not valid */
if (!sh_css_sp_is_running())
return -EBUSY;
/* dequeue the event (if any) from the psys event queue */
ret_err = ia_css_bufq_dequeue_psys_event(payload);
if (ret_err)
return ret_err;
IA_CSS_LOG("event dequeued from psys event queue");
/* Tell the SP that we dequeued an event from the event queue. */
ia_css_bufq_enqueue_psys_event(
IA_CSS_PSYS_SW_EVENT_EVENT_DEQUEUED, 0, 0, 0);
/*
* Events are decoded into 4 bytes of payload, the first byte
* contains the sp event type. This is converted to a host enum.
* TODO: can this enum conversion be eliminated
*/
event->type = convert_event_sp_to_host_domain[payload[0]];
/* Some sane default values since not all events use all fields. */
event->pipe = NULL;
event->port = MIPI_PORT0_ID;
event->exp_id = 0;
event->fw_warning = IA_CSS_FW_WARNING_NONE;
event->fw_handle = 0;
event->timer_data = 0;
event->timer_code = 0;
event->timer_subcode = 0;
if (event->type == IA_CSS_EVENT_TYPE_TIMER) {
/*
* timer event ??? get the 2nd event and decode the data
* into the event struct
*/
u32 tmp_data;
/* 1st event: LSB 16-bit timer data and code */
event->timer_data = ((payload[1] & 0xFF) | ((payload[3] & 0xFF) << 8));
event->timer_code = payload[2];
payload[0] = payload[1] = payload[2] = payload[3] = 0;
ret_err = ia_css_bufq_dequeue_psys_event(payload);
if (ret_err) {
/* no 2nd event ??? an error */
/*
* Putting IA_CSS_ERROR is resulting in failures in
* Merrifield smoke testing
*/
IA_CSS_WARNING("Timer: Error de-queuing the 2nd TIMER event!!!\n");
return ret_err;
}
ia_css_bufq_enqueue_psys_event(
IA_CSS_PSYS_SW_EVENT_EVENT_DEQUEUED, 0, 0, 0);
event->type = convert_event_sp_to_host_domain[payload[0]];
/* It's a timer */
if (event->type == IA_CSS_EVENT_TYPE_TIMER) {
/* 2nd event data: MSB 16-bit timer and subcode */
tmp_data = ((payload[1] & 0xFF) | ((payload[3] & 0xFF) << 8));
event->timer_data |= (tmp_data << 16);
event->timer_subcode = payload[2];
} else {
/*
* It's a non timer event. So clear first half of the
* timer event data.
* If the second part of the TIMER event is not
* received, we discard the first half of the timer
* data and process the non timer event without
* affecting the flow. So the non timer event falls
* through the code.
*/
event->timer_data = 0;
event->timer_code = 0;
event->timer_subcode = 0;
IA_CSS_ERROR("Missing 2nd timer event. Timer event discarded");
}
}
if (event->type == IA_CSS_EVENT_TYPE_PORT_EOF) {
event->port = (enum mipi_port_id)payload[1];
event->exp_id = payload[3];
} else if (event->type == IA_CSS_EVENT_TYPE_FW_WARNING) {
event->fw_warning = (enum ia_css_fw_warning)payload[1];
/* exp_id is only available in these warning types */
if (event->fw_warning == IA_CSS_FW_WARNING_EXP_ID_LOCKED ||
event->fw_warning == IA_CSS_FW_WARNING_TAG_EXP_ID_FAILED)
event->exp_id = payload[3];
} else if (event->type == IA_CSS_EVENT_TYPE_FW_ASSERT) {
event->fw_assert_module_id = payload[1]; /* module */
event->fw_assert_line_no = (payload[2] << 8) + payload[3];
/* payload[2] is line_no>>8, payload[3] is line_no&0xff */
} else if (event->type != IA_CSS_EVENT_TYPE_TIMER) {
/*
* pipe related events.
* payload[1] contains the pipe_num,
* payload[2] contains the pipe_id. These are different.
*/
event->pipe = find_pipe_by_num(payload[1]);
pipe_id = (enum ia_css_pipe_id)payload[2];
/* Check to see if pipe still exists */
if (!event->pipe)
return -EBUSY;
if (event->type == IA_CSS_EVENT_TYPE_FRAME_TAGGED) {
/* find the capture pipe that goes with this */
int i, n;
n = event->pipe->stream->num_pipes;
for (i = 0; i < n; i++) {
struct ia_css_pipe *p =
event->pipe->stream->pipes[i];
if (p->config.mode == IA_CSS_PIPE_MODE_CAPTURE) {
event->pipe = p;
break;
}
}
event->exp_id = payload[3];
}
if (event->type == IA_CSS_EVENT_TYPE_ACC_STAGE_COMPLETE) {
/* payload[3] contains the acc fw handle. */
u32 stage_num = (uint32_t)payload[3];
ret_err = ia_css_pipeline_get_fw_from_stage(
&event->pipe->pipeline,
stage_num,
&event->fw_handle);
if (ret_err) {
IA_CSS_ERROR("Invalid stage num received for ACC event. stage_num:%u",
stage_num);
return ret_err;
}
}
}
if (event->pipe)
IA_CSS_LEAVE("event_id=%d, pipe_id=%d", event->type, pipe_id);
else
IA_CSS_LEAVE("event_id=%d", event->type);
return 0;
}
int
ia_css_dequeue_isys_event(struct ia_css_event *event)
{
u8 payload[4] = {0, 0, 0, 0};
int err = 0;
/*
* We skip the IA_CSS_ENTER logging call
* to avoid flooding the logs when the host application
* uses polling.
*/
if (!event)
return -EINVAL;
/* SP is not running. The queues are not valid */
if (!sh_css_sp_is_running())
return -EBUSY;
err = ia_css_bufq_dequeue_isys_event(payload);
if (err)
return err;
IA_CSS_LOG("event dequeued from isys event queue");
/* Update SP state to indicate that element was dequeued. */
ia_css_bufq_enqueue_isys_event(IA_CSS_ISYS_SW_EVENT_EVENT_DEQUEUED);
/* Fill return struct with appropriate info */
event->type = IA_CSS_EVENT_TYPE_PORT_EOF;
/* EOF events are associated with a CSI port, not with a pipe */
event->pipe = NULL;
event->port = payload[1];
event->exp_id = payload[3];
IA_CSS_LEAVE_ERR(err);
return err;
}
static int
sh_css_pipe_start(struct ia_css_stream *stream)
{
int err = 0;
struct ia_css_pipe *pipe;
enum ia_css_pipe_id pipe_id;
unsigned int thread_id;
IA_CSS_ENTER_PRIVATE("stream = %p", stream);
if (!stream) {
IA_CSS_LEAVE_ERR(-EINVAL);
return -EINVAL;
}
pipe = stream->last_pipe;
if (!pipe) {
IA_CSS_LEAVE_ERR(-EINVAL);
return -EINVAL;
}
pipe_id = pipe->mode;
if (stream->started) {
IA_CSS_WARNING("Cannot start stream that is already started");
IA_CSS_LEAVE_ERR(err);
return err;
}
pipe->stop_requested = false;
switch (pipe_id) {
case IA_CSS_PIPE_ID_PREVIEW:
err = preview_start(pipe);
break;
case IA_CSS_PIPE_ID_VIDEO:
err = video_start(pipe);
break;
case IA_CSS_PIPE_ID_CAPTURE:
err = capture_start(pipe);
break;
case IA_CSS_PIPE_ID_YUVPP:
err = yuvpp_start(pipe);
break;
default:
err = -EINVAL;
}
/* DH regular multi pipe - not continuous mode: start the next pipes too */
if (!stream->config.continuous) {
int i;
for (i = 1; i < stream->num_pipes && 0 == err ; i++) {
switch (stream->pipes[i]->mode) {
case IA_CSS_PIPE_ID_PREVIEW:
stream->pipes[i]->stop_requested = false;
err = preview_start(stream->pipes[i]);
break;
case IA_CSS_PIPE_ID_VIDEO:
stream->pipes[i]->stop_requested = false;
err = video_start(stream->pipes[i]);
break;
case IA_CSS_PIPE_ID_CAPTURE:
stream->pipes[i]->stop_requested = false;
err = capture_start(stream->pipes[i]);
break;
case IA_CSS_PIPE_ID_YUVPP:
stream->pipes[i]->stop_requested = false;
err = yuvpp_start(stream->pipes[i]);
break;
default:
err = -EINVAL;
}
}
}
if (err) {
IA_CSS_LEAVE_ERR_PRIVATE(err);
return err;
}
/*
* Force ISP parameter calculation after a mode change
* Acceleration API examples pass NULL for stream but they
* don't use ISP parameters anyway. So this should be okay.
* The SP binary (jpeg) copy does not use any parameters.
*/
if (!copy_on_sp(pipe)) {
sh_css_invalidate_params(stream);
err = sh_css_param_update_isp_params(pipe,
stream->isp_params_configs, true, NULL);
if (err) {
IA_CSS_LEAVE_ERR_PRIVATE(err);
return err;
}
}
ia_css_debug_pipe_graph_dump_epilogue();
ia_css_pipeline_get_sp_thread_id(ia_css_pipe_get_pipe_num(pipe), &thread_id);
if (!sh_css_sp_is_running()) {
IA_CSS_LEAVE_ERR_PRIVATE(-EBUSY);
/* SP is not running. The queues are not valid */
return -EBUSY;
}
ia_css_bufq_enqueue_psys_event(IA_CSS_PSYS_SW_EVENT_START_STREAM,
(uint8_t)thread_id, 0, 0);
/* DH regular multi pipe - not continuous mode: enqueue event to the next pipes too */
if (!stream->config.continuous) {
int i;
for (i = 1; i < stream->num_pipes; i++) {
ia_css_pipeline_get_sp_thread_id(
ia_css_pipe_get_pipe_num(stream->pipes[i]),
&thread_id);
ia_css_bufq_enqueue_psys_event(
IA_CSS_PSYS_SW_EVENT_START_STREAM,
(uint8_t)thread_id, 0, 0);
}
}
/* in case of continuous capture mode, we also start capture thread and copy thread*/
if (pipe->stream->config.continuous) {
struct ia_css_pipe *copy_pipe = NULL;
if (pipe_id == IA_CSS_PIPE_ID_PREVIEW)
copy_pipe = pipe->pipe_settings.preview.copy_pipe;
else if (pipe_id == IA_CSS_PIPE_ID_VIDEO)
copy_pipe = pipe->pipe_settings.video.copy_pipe;
if (!copy_pipe) {
IA_CSS_LEAVE_ERR_PRIVATE(-EINVAL);
return -EINVAL;
}
ia_css_pipeline_get_sp_thread_id(ia_css_pipe_get_pipe_num(copy_pipe),
&thread_id);
/* by the time we reach here q is initialized and handle is available.*/
ia_css_bufq_enqueue_psys_event(
IA_CSS_PSYS_SW_EVENT_START_STREAM,
(uint8_t)thread_id, 0, 0);
}
if (pipe->stream->cont_capt) {
struct ia_css_pipe *capture_pipe = NULL;
if (pipe_id == IA_CSS_PIPE_ID_PREVIEW)
capture_pipe = pipe->pipe_settings.preview.capture_pipe;
else if (pipe_id == IA_CSS_PIPE_ID_VIDEO)
capture_pipe = pipe->pipe_settings.video.capture_pipe;
if (!capture_pipe) {
IA_CSS_LEAVE_ERR_PRIVATE(-EINVAL);
return -EINVAL;
}
ia_css_pipeline_get_sp_thread_id(ia_css_pipe_get_pipe_num(capture_pipe),
&thread_id);
/* by the time we reach here q is initialized and handle is available.*/
ia_css_bufq_enqueue_psys_event(
IA_CSS_PSYS_SW_EVENT_START_STREAM,
(uint8_t)thread_id, 0, 0);
}
stream->started = true;
IA_CSS_LEAVE_ERR_PRIVATE(err);
return err;
}
/* ISP2400 */
void
sh_css_enable_cont_capt(bool enable, bool stop_copy_preview)
{
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
"sh_css_enable_cont_capt() enter: enable=%d\n", enable);
//my_css.cont_capt = enable;
my_css.stop_copy_preview = stop_copy_preview;
}
bool
sh_css_continuous_is_enabled(uint8_t pipe_num)
{
struct ia_css_pipe *pipe;
bool continuous;
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
"sh_css_continuous_is_enabled() enter: pipe_num=%d\n", pipe_num);
pipe = find_pipe_by_num(pipe_num);
continuous = pipe && pipe->stream->config.continuous;
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
"sh_css_continuous_is_enabled() leave: enable=%d\n",
continuous);
return continuous;
}
/* ISP2400 */
int
ia_css_stream_get_max_buffer_depth(struct ia_css_stream *stream,
int *buffer_depth)
{
if (!buffer_depth)
return -EINVAL;
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_stream_get_max_buffer_depth() enter: void\n");
(void)stream;
*buffer_depth = NUM_CONTINUOUS_FRAMES;
return 0;
}
int
ia_css_stream_set_buffer_depth(struct ia_css_stream *stream, int buffer_depth)
{
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_stream_set_buffer_depth() enter: num_frames=%d\n", buffer_depth);
(void)stream;
if (buffer_depth > NUM_CONTINUOUS_FRAMES || buffer_depth < 1)
return -EINVAL;
/* ok, value allowed */
stream->config.target_num_cont_raw_buf = buffer_depth;
/* TODO: check what to regarding initialization */
return 0;
}
/* ISP2401 */
int
ia_css_stream_get_buffer_depth(struct ia_css_stream *stream,
int *buffer_depth)
{
if (!buffer_depth)
return -EINVAL;
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_stream_get_buffer_depth() enter: void\n");
(void)stream;
*buffer_depth = stream->config.target_num_cont_raw_buf;
return 0;
}
#if !defined(ISP2401)
unsigned int
sh_css_get_mipi_sizes_for_check(const unsigned int port, const unsigned int idx)
{
OP___assert(port < N_CSI_PORTS);
OP___assert(idx < IA_CSS_MIPI_SIZE_CHECK_MAX_NOF_ENTRIES_PER_PORT);
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
"sh_css_get_mipi_sizes_for_check(port %d, idx %d): %d\n",
port, idx, my_css.mipi_sizes_for_check[port][idx]);
return my_css.mipi_sizes_for_check[port][idx];
}
#endif
static int sh_css_pipe_configure_output(
struct ia_css_pipe *pipe,
unsigned int width,
unsigned int height,
unsigned int padded_width,
enum ia_css_frame_format format,
unsigned int idx)
{
int err = 0;
IA_CSS_ENTER_PRIVATE("pipe = %p, width = %d, height = %d, padded width = %d, format = %d, idx = %d",
pipe, width, height, padded_width, format, idx);
if (!pipe) {
IA_CSS_LEAVE_ERR_PRIVATE(-EINVAL);
return -EINVAL;
}
err = ia_css_util_check_res(width, height);
if (err) {
IA_CSS_LEAVE_ERR_PRIVATE(err);
return err;
}
if (pipe->output_info[idx].res.width != width ||
pipe->output_info[idx].res.height != height ||
pipe->output_info[idx].format != format) {
ia_css_frame_info_init(
&pipe->output_info[idx],
width,
height,
format,
padded_width);
}
IA_CSS_LEAVE_ERR_PRIVATE(0);
return 0;
}
static int
sh_css_pipe_get_shading_info(struct ia_css_pipe *pipe,
struct ia_css_shading_info *shading_info,
struct ia_css_pipe_config *pipe_config)
{
int err = 0;
struct ia_css_binary *binary = NULL;
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
"sh_css_pipe_get_shading_info() enter:\n");
binary = ia_css_pipe_get_shading_correction_binary(pipe);
if (binary) {
err = ia_css_binary_get_shading_info(binary,
IA_CSS_SHADING_CORRECTION_TYPE_1,
pipe->required_bds_factor,
(const struct ia_css_stream_config *)&pipe->stream->config,
shading_info, pipe_config);
/*
* Other function calls can be added here when other shading
* correction types will be added in the future.
*/
} else {
/*
* When the pipe does not have a binary which has the shading
* correction, this function does not need to fill the shading
* information. It is not a error case, and then
* this function should return 0.
*/
memset(shading_info, 0, sizeof(*shading_info));
}
return err;
}
static int
sh_css_pipe_get_grid_info(struct ia_css_pipe *pipe,
struct ia_css_grid_info *info)
{
int err = 0;
struct ia_css_binary *binary = NULL;
assert(pipe);
assert(info);
IA_CSS_ENTER_PRIVATE("");
binary = ia_css_pipe_get_s3a_binary(pipe);
if (binary) {
err = ia_css_binary_3a_grid_info(binary, info, pipe);
if (err)
goto err;
} else {
memset(&info->s3a_grid, 0, sizeof(info->s3a_grid));
}
binary = ia_css_pipe_get_sdis_binary(pipe);
if (binary) {
ia_css_binary_dvs_grid_info(binary, info, pipe);
ia_css_binary_dvs_stat_grid_info(binary, info, pipe);
} else {
memset(&info->dvs_grid, 0, sizeof(info->dvs_grid));
memset(&info->dvs_grid.dvs_stat_grid_info, 0,
sizeof(info->dvs_grid.dvs_stat_grid_info));
}
if (binary) {
/* copy pipe does not have ISP binary*/
info->isp_in_width = binary->internal_frame_info.res.width;
info->isp_in_height = binary->internal_frame_info.res.height;
}
info->vamem_type = IA_CSS_VAMEM_TYPE_2;
err:
IA_CSS_LEAVE_ERR_PRIVATE(err);
return err;
}
/* ISP2401 */
/*
* @brief Check if a format is supported by the pipe.
*
*/
static int
ia_css_pipe_check_format(struct ia_css_pipe *pipe,
enum ia_css_frame_format format)
{
const enum ia_css_frame_format *supported_formats;
int number_of_formats;
int found = 0;
int i;
IA_CSS_ENTER_PRIVATE("");
if (NULL == pipe || NULL == pipe->pipe_settings.video.video_binary.info) {
IA_CSS_ERROR("Pipe or binary info is not set");
IA_CSS_LEAVE_ERR_PRIVATE(-EINVAL);
return -EINVAL;
}
supported_formats = pipe->pipe_settings.video.video_binary.info->output_formats;
number_of_formats = sizeof(pipe->pipe_settings.video.video_binary.info->output_formats) / sizeof(enum ia_css_frame_format);
for (i = 0; i < number_of_formats && !found; i++) {
if (supported_formats[i] == format) {
found = 1;
break;
}
}
if (!found) {
IA_CSS_ERROR("Requested format is not supported by binary");
IA_CSS_LEAVE_ERR_PRIVATE(-EINVAL);
return -EINVAL;
}
IA_CSS_LEAVE_ERR_PRIVATE(0);
return 0;
}
static int load_video_binaries(struct ia_css_pipe *pipe)
{
struct ia_css_frame_info video_in_info, tnr_info,
*video_vf_info, video_bds_out_info, *pipe_out_info, *pipe_vf_out_info;
bool online;
int err = 0;
bool continuous = pipe->stream->config.continuous;
unsigned int i;
unsigned int num_output_pins;
struct ia_css_frame_info video_bin_out_info;
bool need_scaler = false;
bool vf_res_different_than_output = false;
bool need_vf_pp = false;
int vf_ds_log2;
struct ia_css_video_settings *mycs = &pipe->pipe_settings.video;
IA_CSS_ENTER_PRIVATE("");
assert(pipe);
assert(pipe->mode == IA_CSS_PIPE_ID_VIDEO);
/*
* we only test the video_binary because offline video doesn't need a
* vf_pp binary and online does not (always use) the copy_binary.
* All are always reset at the same time anyway.
*/
if (mycs->video_binary.info)
return 0;
online = pipe->stream->config.online;
pipe_out_info = &pipe->output_info[0];
pipe_vf_out_info = &pipe->vf_output_info[0];
assert(pipe_out_info);
/*
* There is no explicit input format requirement for raw or yuv
* What matters is that there is a binary that supports the stream format.
* This is checked in the binary_find(), so no need to check it here
*/
err = ia_css_util_check_input(&pipe->stream->config, false, false);
if (err)
return err;
/* cannot have online video and input_mode memory */
if (online && pipe->stream->config.mode == IA_CSS_INPUT_MODE_MEMORY)
return -EINVAL;
if (pipe->enable_viewfinder[IA_CSS_PIPE_OUTPUT_STAGE_0]) {
err = ia_css_util_check_vf_out_info(pipe_out_info,
pipe_vf_out_info);
if (err)
return err;
} else {
err = ia_css_frame_check_info(pipe_out_info);
if (err)
return err;
}
if (pipe->out_yuv_ds_input_info.res.width)
video_bin_out_info = pipe->out_yuv_ds_input_info;
else
video_bin_out_info = *pipe_out_info;
/* Video */
if (pipe->enable_viewfinder[IA_CSS_PIPE_OUTPUT_STAGE_0]) {
video_vf_info = pipe_vf_out_info;
vf_res_different_than_output = (video_vf_info->res.width !=
video_bin_out_info.res.width) ||
(video_vf_info->res.height != video_bin_out_info.res.height);
} else {
video_vf_info = NULL;
}
need_scaler = need_downscaling(video_bin_out_info.res, pipe_out_info->res);
/* we build up the pipeline starting at the end */
/* YUV post-processing if needed */
if (need_scaler) {
struct ia_css_cas_binary_descr cas_scaler_descr = { };
/* NV12 is the common format that is supported by both */
/* yuv_scaler and the video_xx_isp2_min binaries. */
video_bin_out_info.format = IA_CSS_FRAME_FORMAT_NV12;
err = ia_css_pipe_create_cas_scaler_desc_single_output(
&video_bin_out_info,
pipe_out_info,
NULL,
&cas_scaler_descr);
if (err)
return err;
mycs->num_yuv_scaler = cas_scaler_descr.num_stage;
mycs->yuv_scaler_binary = kcalloc(cas_scaler_descr.num_stage,
sizeof(struct ia_css_binary),
GFP_KERNEL);
if (!mycs->yuv_scaler_binary) {
err = -ENOMEM;
return err;
}
mycs->is_output_stage = kcalloc(cas_scaler_descr.num_stage,
sizeof(bool), GFP_KERNEL);
if (!mycs->is_output_stage) {
err = -ENOMEM;
return err;
}
for (i = 0; i < cas_scaler_descr.num_stage; i++) {
struct ia_css_binary_descr yuv_scaler_descr;
mycs->is_output_stage[i] = cas_scaler_descr.is_output_stage[i];
ia_css_pipe_get_yuvscaler_binarydesc(pipe,
&yuv_scaler_descr, &cas_scaler_descr.in_info[i],
&cas_scaler_descr.out_info[i],
&cas_scaler_descr.internal_out_info[i],
&cas_scaler_descr.vf_info[i]);
err = ia_css_binary_find(&yuv_scaler_descr,
&mycs->yuv_scaler_binary[i]);
if (err) {
kfree(mycs->is_output_stage);
mycs->is_output_stage = NULL;
return err;
}
}
ia_css_pipe_destroy_cas_scaler_desc(&cas_scaler_descr);
}
{
struct ia_css_binary_descr video_descr;
enum ia_css_frame_format vf_info_format;
err = ia_css_pipe_get_video_binarydesc(pipe,
&video_descr, &video_in_info, &video_bds_out_info, &video_bin_out_info,
video_vf_info,
pipe->stream->config.left_padding);
if (err)
return err;
/*
* In the case where video_vf_info is not NULL, this allows
* us to find a potential video library with desired vf format.
* If success, no vf_pp binary is needed.
* If failed, we will look up video binary with YUV_LINE vf format
*/
err = ia_css_binary_find(&video_descr,
&mycs->video_binary);
if (err) {
/* This will do another video binary lookup later for YUV_LINE format*/
if (video_vf_info)
need_vf_pp = true;
else
return err;
} else if (video_vf_info) {
/*
* The first video binary lookup is successful, but we
* may still need vf_pp binary based on additional check
*/
num_output_pins = mycs->video_binary.info->num_output_pins;
vf_ds_log2 = mycs->video_binary.vf_downscale_log2;
/*
* If the binary has dual output pins, we need vf_pp
* if the resolution is different.
*/
need_vf_pp |= ((num_output_pins == 2) && vf_res_different_than_output);
/*
* If the binary has single output pin, we need vf_pp
* if additional scaling is needed for vf
*/
need_vf_pp |= ((num_output_pins == 1) &&
((video_vf_info->res.width << vf_ds_log2 != pipe_out_info->res.width) ||
(video_vf_info->res.height << vf_ds_log2 != pipe_out_info->res.height)));
}
if (need_vf_pp) {
/* save the current vf_info format for restoration later */
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
"load_video_binaries() need_vf_pp; find video binary with YUV_LINE again\n");
vf_info_format = video_vf_info->format;
if (!pipe->config.enable_vfpp_bci)
ia_css_frame_info_set_format(video_vf_info,
IA_CSS_FRAME_FORMAT_YUV_LINE);
ia_css_binary_destroy_isp_parameters(&mycs->video_binary);
err = ia_css_binary_find(&video_descr,
&mycs->video_binary);
/* restore original vf_info format */
ia_css_frame_info_set_format(video_vf_info,
vf_info_format);
if (err)
return err;
}
}
/*
* If a video binary does not use a ref_frame, we set the frame delay
* to 0. This is the case for the 1-stage low-power video binary.
*/
if (!mycs->video_binary.info->sp.enable.ref_frame)
pipe->dvs_frame_delay = 0;
/*
* The delay latency determines the number of invalid frames after
* a stream is started.
*/
pipe->num_invalid_frames = pipe->dvs_frame_delay;
pipe->info.num_invalid_frames = pipe->num_invalid_frames;
/*
* Viewfinder frames also decrement num_invalid_frames. If the pipe
* outputs a viewfinder output, then we need double the number of
* invalid frames
*/
if (video_vf_info)
pipe->num_invalid_frames *= 2;
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
"load_video_binaries() num_invalid_frames=%d dvs_frame_delay=%d\n",
pipe->num_invalid_frames, pipe->dvs_frame_delay);
/* pqiao TODO: temp hack for PO, should be removed after offline YUVPP is enabled */
if (!IS_ISP2401) {
/* Copy */
if (!online && !continuous) {
/*
* TODO: what exactly needs doing, prepend the copy binary to
* video base this only on !online?
*/
err = load_copy_binary(pipe,
&mycs->copy_binary,
&mycs->video_binary);
if (err)
return err;
}
}
if (pipe->enable_viewfinder[IA_CSS_PIPE_OUTPUT_STAGE_0] && need_vf_pp) {
struct ia_css_binary_descr vf_pp_descr;
if (mycs->video_binary.vf_frame_info.format
== IA_CSS_FRAME_FORMAT_YUV_LINE) {
ia_css_pipe_get_vfpp_binarydesc(pipe, &vf_pp_descr,
&mycs->video_binary.vf_frame_info,
pipe_vf_out_info);
} else {
/*
* output from main binary is not yuv line. currently
* this is possible only when bci is enabled on vfpp
* output
*/
assert(pipe->config.enable_vfpp_bci);
ia_css_pipe_get_yuvscaler_binarydesc(pipe, &vf_pp_descr,
&mycs->video_binary.vf_frame_info,
pipe_vf_out_info, NULL, NULL);
}
err = ia_css_binary_find(&vf_pp_descr,
&mycs->vf_pp_binary);
if (err)
return err;
}
err = allocate_delay_frames(pipe);
if (err)
return err;
if (mycs->video_binary.info->sp.enable.block_output) {
tnr_info = mycs->video_binary.out_frame_info[0];
/* Make tnr reference buffers output block height align */
tnr_info.res.height = CEIL_MUL(tnr_info.res.height,
mycs->video_binary.info->sp.block.output_block_height);
} else {
tnr_info = mycs->video_binary.internal_frame_info;
}
tnr_info.format = IA_CSS_FRAME_FORMAT_YUV_LINE;
tnr_info.raw_bit_depth = SH_CSS_TNR_BIT_DEPTH;
for (i = 0; i < NUM_VIDEO_TNR_FRAMES; i++) {
if (mycs->tnr_frames[i]) {
ia_css_frame_free(mycs->tnr_frames[i]);
mycs->tnr_frames[i] = NULL;
}
err = ia_css_frame_allocate_from_info(
&mycs->tnr_frames[i],
&tnr_info);
if (err)
return err;
}
IA_CSS_LEAVE_PRIVATE("");
return 0;
}
static int
unload_video_binaries(struct ia_css_pipe *pipe)
{
unsigned int i;
IA_CSS_ENTER_PRIVATE("pipe = %p", pipe);
if ((!pipe) || (pipe->mode != IA_CSS_PIPE_ID_VIDEO)) {
IA_CSS_LEAVE_ERR_PRIVATE(-EINVAL);
return -EINVAL;
}
ia_css_binary_unload(&pipe->pipe_settings.video.copy_binary);
ia_css_binary_unload(&pipe->pipe_settings.video.video_binary);
ia_css_binary_unload(&pipe->pipe_settings.video.vf_pp_binary);
for (i = 0; i < pipe->pipe_settings.video.num_yuv_scaler; i++)
ia_css_binary_unload(&pipe->pipe_settings.video.yuv_scaler_binary[i]);
kfree(pipe->pipe_settings.video.is_output_stage);
pipe->pipe_settings.video.is_output_stage = NULL;
kfree(pipe->pipe_settings.video.yuv_scaler_binary);
pipe->pipe_settings.video.yuv_scaler_binary = NULL;
IA_CSS_LEAVE_ERR_PRIVATE(0);
return 0;
}
static int video_start(struct ia_css_pipe *pipe)
{
int err = 0;
struct ia_css_pipe *copy_pipe, *capture_pipe;
enum sh_css_pipe_config_override copy_ovrd;
enum ia_css_input_mode video_pipe_input_mode;
unsigned int thread_id;
IA_CSS_ENTER_PRIVATE("pipe = %p", pipe);
if ((!pipe) || (pipe->mode != IA_CSS_PIPE_ID_VIDEO)) {
IA_CSS_LEAVE_ERR_PRIVATE(-EINVAL);
return -EINVAL;
}
video_pipe_input_mode = pipe->stream->config.mode;
copy_pipe = pipe->pipe_settings.video.copy_pipe;
capture_pipe = pipe->pipe_settings.video.capture_pipe;
sh_css_metrics_start_frame();
/* multi stream video needs mipi buffers */
err = send_mipi_frames(pipe);
if (err)
return err;
send_raw_frames(pipe);
ia_css_pipeline_get_sp_thread_id(ia_css_pipe_get_pipe_num(pipe), &thread_id);
copy_ovrd = 1 << thread_id;
if (pipe->stream->cont_capt) {
ia_css_pipeline_get_sp_thread_id(ia_css_pipe_get_pipe_num(capture_pipe),
&thread_id);
copy_ovrd |= 1 << thread_id;
}
/* Construct and load the copy pipe */
if (pipe->stream->config.continuous) {
sh_css_sp_init_pipeline(©_pipe->pipeline,
IA_CSS_PIPE_ID_COPY,
(uint8_t)ia_css_pipe_get_pipe_num(copy_pipe),
false,
pipe->stream->config.pixels_per_clock == 2, false,
false, pipe->required_bds_factor,
copy_ovrd,
pipe->stream->config.mode,
&pipe->stream->config.metadata_config,
&pipe->stream->info.metadata_info,
pipe->stream->config.source.port.port);
/*
* make the video pipe start with mem mode input, copy handles
* the actual mode
*/
video_pipe_input_mode = IA_CSS_INPUT_MODE_MEMORY;
}
/* Construct and load the capture pipe */
if (pipe->stream->cont_capt) {
sh_css_sp_init_pipeline(&capture_pipe->pipeline,
IA_CSS_PIPE_ID_CAPTURE,
(uint8_t)ia_css_pipe_get_pipe_num(capture_pipe),
capture_pipe->config.default_capture_config.enable_xnr != 0,
capture_pipe->stream->config.pixels_per_clock == 2,
true, /* continuous */
false, /* offline */
capture_pipe->required_bds_factor,
0,
IA_CSS_INPUT_MODE_MEMORY,
&pipe->stream->config.metadata_config,
&pipe->stream->info.metadata_info,
(enum mipi_port_id)0);
}
start_pipe(pipe, copy_ovrd, video_pipe_input_mode);
IA_CSS_LEAVE_ERR_PRIVATE(err);
return err;
}
static
int sh_css_pipe_get_viewfinder_frame_info(
struct ia_css_pipe *pipe,
struct ia_css_frame_info *info,
unsigned int idx)
{
assert(pipe);
assert(info);
/* We could print the pointer as input arg, and the values as output */
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
"sh_css_pipe_get_viewfinder_frame_info() enter: void\n");
if (pipe->mode == IA_CSS_PIPE_ID_CAPTURE &&
(pipe->config.default_capture_config.mode == IA_CSS_CAPTURE_MODE_RAW ||
pipe->config.default_capture_config.mode == IA_CSS_CAPTURE_MODE_BAYER))
return -EINVAL;
/* offline video does not generate viewfinder output */
*info = pipe->vf_output_info[idx];
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
"sh_css_pipe_get_viewfinder_frame_info() leave: \
info.res.width=%d, info.res.height=%d, \
info.padded_width=%d, info.format=%d, \
info.raw_bit_depth=%d, info.raw_bayer_order=%d\n",
info->res.width, info->res.height,
info->padded_width, info->format,
info->raw_bit_depth, info->raw_bayer_order);
return 0;
}
static int
sh_css_pipe_configure_viewfinder(struct ia_css_pipe *pipe, unsigned int width,
unsigned int height, unsigned int min_width,
enum ia_css_frame_format format,
unsigned int idx)
{
int err = 0;
IA_CSS_ENTER_PRIVATE("pipe = %p, width = %d, height = %d, min_width = %d, format = %d, idx = %d\n",
pipe, width, height, min_width, format, idx);
if (!pipe) {
IA_CSS_LEAVE_ERR_PRIVATE(-EINVAL);
return -EINVAL;
}
err = ia_css_util_check_res(width, height);
if (err) {
IA_CSS_LEAVE_ERR_PRIVATE(err);
return err;
}
if (pipe->vf_output_info[idx].res.width != width ||
pipe->vf_output_info[idx].res.height != height ||
pipe->vf_output_info[idx].format != format)
ia_css_frame_info_init(&pipe->vf_output_info[idx], width, height,
format, min_width);
IA_CSS_LEAVE_ERR_PRIVATE(0);
return 0;
}
static int load_copy_binaries(struct ia_css_pipe *pipe)
{
int err = 0;
assert(pipe);
IA_CSS_ENTER_PRIVATE("");
assert(pipe->mode == IA_CSS_PIPE_ID_CAPTURE ||
pipe->mode == IA_CSS_PIPE_ID_COPY);
if (pipe->pipe_settings.capture.copy_binary.info)
return 0;
err = ia_css_frame_check_info(&pipe->output_info[0]);
if (err)
goto ERR;
err = verify_copy_out_frame_format(pipe);
if (err)
goto ERR;
err = load_copy_binary(pipe,
&pipe->pipe_settings.capture.copy_binary,
NULL);
ERR:
IA_CSS_LEAVE_ERR_PRIVATE(err);
return err;
}
static bool need_capture_pp(
const struct ia_css_pipe *pipe)
{
const struct ia_css_frame_info *out_info = &pipe->output_info[0];
IA_CSS_ENTER_LEAVE_PRIVATE("");
assert(pipe);
assert(pipe->mode == IA_CSS_PIPE_ID_CAPTURE);
/* determine whether we need to use the capture_pp binary.
* This is needed for:
* 1. XNR or
* 2. Digital Zoom or
* 3. YUV downscaling
*/
if (pipe->out_yuv_ds_input_info.res.width &&
((pipe->out_yuv_ds_input_info.res.width != out_info->res.width) ||
(pipe->out_yuv_ds_input_info.res.height != out_info->res.height)))
return true;
if (pipe->config.default_capture_config.enable_xnr != 0)
return true;
if ((pipe->stream->isp_params_configs->dz_config.dx < HRT_GDC_N) ||
(pipe->stream->isp_params_configs->dz_config.dy < HRT_GDC_N) ||
pipe->config.enable_dz)
return true;
return false;
}
static bool need_capt_ldc(
const struct ia_css_pipe *pipe)
{
IA_CSS_ENTER_LEAVE_PRIVATE("");
assert(pipe);
assert(pipe->mode == IA_CSS_PIPE_ID_CAPTURE);
return (pipe->extra_config.enable_dvs_6axis) ? true : false;
}
static int set_num_primary_stages(unsigned int *num,
enum ia_css_pipe_version version)
{
int err = 0;
if (!num)
return -EINVAL;
switch (version) {
case IA_CSS_PIPE_VERSION_2_6_1:
*num = NUM_PRIMARY_HQ_STAGES;
break;
case IA_CSS_PIPE_VERSION_2_2:
case IA_CSS_PIPE_VERSION_1:
*num = NUM_PRIMARY_STAGES;
break;
default:
err = -EINVAL;
break;
}
return err;
}
static int load_primary_binaries(
struct ia_css_pipe *pipe)
{
bool online = false;
bool need_pp = false;
bool need_isp_copy_binary = false;
bool need_ldc = false;
bool sensor = false;
bool memory, continuous;
struct ia_css_frame_info prim_in_info,
prim_out_info,
capt_pp_out_info, vf_info,
*vf_pp_in_info, *pipe_out_info,
*pipe_vf_out_info, *capt_pp_in_info,
capt_ldc_out_info;
int err = 0;
struct ia_css_capture_settings *mycs;
unsigned int i;
bool need_extra_yuv_scaler = false;
struct ia_css_binary_descr prim_descr[MAX_NUM_PRIMARY_STAGES];
IA_CSS_ENTER_PRIVATE("");
assert(pipe);
assert(pipe->stream);
assert(pipe->mode == IA_CSS_PIPE_ID_CAPTURE ||
pipe->mode == IA_CSS_PIPE_ID_COPY);
online = pipe->stream->config.online;
sensor = (pipe->stream->config.mode == IA_CSS_INPUT_MODE_SENSOR);
memory = pipe->stream->config.mode == IA_CSS_INPUT_MODE_MEMORY;
continuous = pipe->stream->config.continuous;
mycs = &pipe->pipe_settings.capture;
pipe_out_info = &pipe->output_info[0];
pipe_vf_out_info = &pipe->vf_output_info[0];
if (mycs->primary_binary[0].info)
return 0;
err = set_num_primary_stages(&mycs->num_primary_stage,
pipe->config.isp_pipe_version);
if (err) {
IA_CSS_LEAVE_ERR_PRIVATE(err);
return err;
}
if (pipe->enable_viewfinder[IA_CSS_PIPE_OUTPUT_STAGE_0]) {
err = ia_css_util_check_vf_out_info(pipe_out_info, pipe_vf_out_info);
if (err) {
IA_CSS_LEAVE_ERR_PRIVATE(err);
return err;
}
} else {
err = ia_css_frame_check_info(pipe_out_info);
if (err) {
IA_CSS_LEAVE_ERR_PRIVATE(err);
return err;
}
}
need_pp = need_capture_pp(pipe);
/*
* we use the vf output info to get the primary/capture_pp binary
* configured for vf_veceven. It will select the closest downscaling
* factor.
*/
vf_info = *pipe_vf_out_info;
/*
* WARNING: The #if def flag has been added below as a
* temporary solution to solve the problem of enabling the
* view finder in a single binary in a capture flow. The
* vf-pp stage has been removed for Skycam in the solution
* provided. The vf-pp stage should be re-introduced when
* required. This should not be considered as a clean solution.
* Proper investigation should be done to come up with the clean
* solution.
*/
ia_css_frame_info_set_format(&vf_info, IA_CSS_FRAME_FORMAT_YUV_LINE);
/*
* TODO: All this yuv_scaler and capturepp calculation logic
* can be shared later. Capture_pp is also a yuv_scale binary
* with extra XNR funcionality. Therefore, it can be made as the
* first step of the cascade.
*/
capt_pp_out_info = pipe->out_yuv_ds_input_info;
capt_pp_out_info.format = IA_CSS_FRAME_FORMAT_YUV420;
capt_pp_out_info.res.width /= MAX_PREFERRED_YUV_DS_PER_STEP;
capt_pp_out_info.res.height /= MAX_PREFERRED_YUV_DS_PER_STEP;
ia_css_frame_info_set_width(&capt_pp_out_info, capt_pp_out_info.res.width, 0);
need_extra_yuv_scaler = need_downscaling(capt_pp_out_info.res,
pipe_out_info->res);
if (need_extra_yuv_scaler) {
struct ia_css_cas_binary_descr cas_scaler_descr = { };
err = ia_css_pipe_create_cas_scaler_desc_single_output(
&capt_pp_out_info,
pipe_out_info,
NULL,
&cas_scaler_descr);
if (err) {
IA_CSS_LEAVE_ERR_PRIVATE(err);
return err;
}
mycs->num_yuv_scaler = cas_scaler_descr.num_stage;
mycs->yuv_scaler_binary = kcalloc(cas_scaler_descr.num_stage,
sizeof(struct ia_css_binary),
GFP_KERNEL);
if (!mycs->yuv_scaler_binary) {
err = -ENOMEM;
IA_CSS_LEAVE_ERR_PRIVATE(err);
return err;
}
mycs->is_output_stage = kcalloc(cas_scaler_descr.num_stage,
sizeof(bool), GFP_KERNEL);
if (!mycs->is_output_stage) {
err = -ENOMEM;
IA_CSS_LEAVE_ERR_PRIVATE(err);
return err;
}
for (i = 0; i < cas_scaler_descr.num_stage; i++) {
struct ia_css_binary_descr yuv_scaler_descr;
mycs->is_output_stage[i] = cas_scaler_descr.is_output_stage[i];
ia_css_pipe_get_yuvscaler_binarydesc(pipe,
&yuv_scaler_descr, &cas_scaler_descr.in_info[i],
&cas_scaler_descr.out_info[i],
&cas_scaler_descr.internal_out_info[i],
&cas_scaler_descr.vf_info[i]);
err = ia_css_binary_find(&yuv_scaler_descr,
&mycs->yuv_scaler_binary[i]);
if (err) {
IA_CSS_LEAVE_ERR_PRIVATE(err);
return err;
}
}
ia_css_pipe_destroy_cas_scaler_desc(&cas_scaler_descr);
} else {
capt_pp_out_info = pipe->output_info[0];
}
/* TODO Do we disable ldc for skycam */
need_ldc = need_capt_ldc(pipe);
/* we build up the pipeline starting at the end */
/* Capture post-processing */
if (need_pp) {
struct ia_css_binary_descr capture_pp_descr;
capt_pp_in_info = need_ldc ? &capt_ldc_out_info : &prim_out_info;
ia_css_pipe_get_capturepp_binarydesc(pipe,
&capture_pp_descr,
capt_pp_in_info,
&capt_pp_out_info,
&vf_info);
err = ia_css_binary_find(&capture_pp_descr,
&mycs->capture_pp_binary);
if (err) {
IA_CSS_LEAVE_ERR_PRIVATE(err);
return err;
}
if (need_ldc) {
struct ia_css_binary_descr capt_ldc_descr;
ia_css_pipe_get_ldc_binarydesc(pipe,
&capt_ldc_descr,
&prim_out_info,
&capt_ldc_out_info);
err = ia_css_binary_find(&capt_ldc_descr,
&mycs->capture_ldc_binary);
if (err) {
IA_CSS_LEAVE_ERR_PRIVATE(err);
return err;
}
}
} else {
prim_out_info = *pipe_out_info;
}
/* Primary */
for (i = 0; i < mycs->num_primary_stage; i++) {
struct ia_css_frame_info *local_vf_info = NULL;
if (pipe->enable_viewfinder[IA_CSS_PIPE_OUTPUT_STAGE_0] &&
(i == mycs->num_primary_stage - 1))
local_vf_info = &vf_info;
ia_css_pipe_get_primary_binarydesc(pipe, &prim_descr[i],
&prim_in_info, &prim_out_info,
local_vf_info, i);
err = ia_css_binary_find(&prim_descr[i], &mycs->primary_binary[i]);
if (err) {
IA_CSS_LEAVE_ERR_PRIVATE(err);
return err;
}
}
/* Viewfinder post-processing */
if (need_pp)
vf_pp_in_info = &mycs->capture_pp_binary.vf_frame_info;
else
vf_pp_in_info = &mycs->primary_binary[mycs->num_primary_stage - 1].vf_frame_info;
/*
* WARNING: The #if def flag has been added below as a
* temporary solution to solve the problem of enabling the
* view finder in a single binary in a capture flow. The
* vf-pp stage has been removed for Skycam in the solution
* provided. The vf-pp stage should be re-introduced when
* required. Thisshould not be considered as a clean solution.
* Proper * investigation should be done to come up with the clean
* solution.
*/
if (pipe->enable_viewfinder[IA_CSS_PIPE_OUTPUT_STAGE_0]) {
struct ia_css_binary_descr vf_pp_descr;
ia_css_pipe_get_vfpp_binarydesc(pipe,
&vf_pp_descr, vf_pp_in_info, pipe_vf_out_info);
err = ia_css_binary_find(&vf_pp_descr, &mycs->vf_pp_binary);
if (err) {
IA_CSS_LEAVE_ERR_PRIVATE(err);
return err;
}
}
err = allocate_delay_frames(pipe);
if (err)
return err;
if (IS_ISP2401)
/*
* When the input system is 2401, only the Direct Sensor Mode
* Offline Capture uses the ISP copy binary.
*/
need_isp_copy_binary = !online && sensor;
else
need_isp_copy_binary = !online && !continuous && !memory;
/* ISP Copy */
if (need_isp_copy_binary) {
err = load_copy_binary(pipe,
&mycs->copy_binary,
&mycs->primary_binary[0]);
if (err) {
IA_CSS_LEAVE_ERR_PRIVATE(err);
return err;
}
}
return 0;
}
static int
allocate_delay_frames(struct ia_css_pipe *pipe)
{
unsigned int num_delay_frames = 0, i = 0;
unsigned int dvs_frame_delay = 0;
struct ia_css_frame_info ref_info;
int err = 0;
enum ia_css_pipe_id mode = IA_CSS_PIPE_ID_VIDEO;
struct ia_css_frame **delay_frames = NULL;
IA_CSS_ENTER_PRIVATE("");
if (!pipe) {
IA_CSS_ERROR("Invalid args - pipe %p", pipe);
return -EINVAL;
}
mode = pipe->mode;
dvs_frame_delay = pipe->dvs_frame_delay;
if (dvs_frame_delay > 0)
num_delay_frames = dvs_frame_delay + 1;
switch (mode) {
case IA_CSS_PIPE_ID_CAPTURE: {
struct ia_css_capture_settings *mycs_capture = &pipe->pipe_settings.capture;
(void)mycs_capture;
return err;
}
break;
case IA_CSS_PIPE_ID_VIDEO: {
struct ia_css_video_settings *mycs_video = &pipe->pipe_settings.video;
ref_info = mycs_video->video_binary.internal_frame_info;
/*
* The ref frame expects
* 1. Y plane
* 2. UV plane with line interleaving, like below
* UUUUUU(width/2 times) VVVVVVVV..(width/2 times)
*
* This format is not YUV420(which has Y, U and V planes).
* Its closer to NV12, except that the UV plane has UV
* interleaving, like UVUVUVUVUVUVUVUVU...
*
* TODO: make this ref_frame format as a separate frame format
*/
ref_info.format = IA_CSS_FRAME_FORMAT_NV12;
delay_frames = mycs_video->delay_frames;
}
break;
case IA_CSS_PIPE_ID_PREVIEW: {
struct ia_css_preview_settings *mycs_preview = &pipe->pipe_settings.preview;
ref_info = mycs_preview->preview_binary.internal_frame_info;
/*
* The ref frame expects
* 1. Y plane
* 2. UV plane with line interleaving, like below
* UUUUUU(width/2 times) VVVVVVVV..(width/2 times)
*
* This format is not YUV420(which has Y, U and V planes).
* Its closer to NV12, except that the UV plane has UV
* interleaving, like UVUVUVUVUVUVUVUVU...
*
* TODO: make this ref_frame format as a separate frame format
*/
ref_info.format = IA_CSS_FRAME_FORMAT_NV12;
delay_frames = mycs_preview->delay_frames;
}
break;
default:
return -EINVAL;
}
ref_info.raw_bit_depth = SH_CSS_REF_BIT_DEPTH;
assert(num_delay_frames <= MAX_NUM_VIDEO_DELAY_FRAMES);
for (i = 0; i < num_delay_frames; i++) {
err = ia_css_frame_allocate_from_info(&delay_frames[i], &ref_info);
if (err)
return err;
}
IA_CSS_LEAVE_PRIVATE("");
return 0;
}
static int load_advanced_binaries(struct ia_css_pipe *pipe)
{
struct ia_css_frame_info pre_in_info, gdc_in_info,
post_in_info, post_out_info,
vf_info, *vf_pp_in_info, *pipe_out_info,
*pipe_vf_out_info;
bool need_pp;
bool need_isp_copy = true;
int err = 0;
IA_CSS_ENTER_PRIVATE("");
assert(pipe);
assert(pipe->mode == IA_CSS_PIPE_ID_CAPTURE ||
pipe->mode == IA_CSS_PIPE_ID_COPY);
if (pipe->pipe_settings.capture.pre_isp_binary.info)
return 0;
pipe_out_info = &pipe->output_info[0];
pipe_vf_out_info = &pipe->vf_output_info[0];
vf_info = *pipe_vf_out_info;
err = ia_css_util_check_vf_out_info(pipe_out_info, &vf_info);
if (err)
return err;
need_pp = need_capture_pp(pipe);
ia_css_frame_info_set_format(&vf_info,
IA_CSS_FRAME_FORMAT_YUV_LINE);
/* we build up the pipeline starting at the end */
/* Capture post-processing */
if (need_pp) {
struct ia_css_binary_descr capture_pp_descr;
ia_css_pipe_get_capturepp_binarydesc(pipe, &capture_pp_descr,
&post_out_info,
pipe_out_info, &vf_info);
err = ia_css_binary_find(&capture_pp_descr,
&pipe->pipe_settings.capture.capture_pp_binary);
if (err)
return err;
} else {
post_out_info = *pipe_out_info;
}
/* Post-gdc */
{
struct ia_css_binary_descr post_gdc_descr;
ia_css_pipe_get_post_gdc_binarydesc(pipe, &post_gdc_descr,
&post_in_info,
&post_out_info, &vf_info);
err = ia_css_binary_find(&post_gdc_descr,
&pipe->pipe_settings.capture.post_isp_binary);
if (err)
return err;
}
/* Gdc */
{
struct ia_css_binary_descr gdc_descr;
ia_css_pipe_get_gdc_binarydesc(pipe, &gdc_descr, &gdc_in_info,
&pipe->pipe_settings.capture.post_isp_binary.in_frame_info);
err = ia_css_binary_find(&gdc_descr,
&pipe->pipe_settings.capture.anr_gdc_binary);
if (err)
return err;
}
pipe->pipe_settings.capture.anr_gdc_binary.left_padding =
pipe->pipe_settings.capture.post_isp_binary.left_padding;
/* Pre-gdc */
{
struct ia_css_binary_descr pre_gdc_descr;
ia_css_pipe_get_pre_gdc_binarydesc(pipe, &pre_gdc_descr, &pre_in_info,
&pipe->pipe_settings.capture.anr_gdc_binary.in_frame_info);
err = ia_css_binary_find(&pre_gdc_descr,
&pipe->pipe_settings.capture.pre_isp_binary);
if (err)
return err;
}
pipe->pipe_settings.capture.pre_isp_binary.left_padding =
pipe->pipe_settings.capture.anr_gdc_binary.left_padding;
/* Viewfinder post-processing */
if (need_pp) {
vf_pp_in_info =
&pipe->pipe_settings.capture.capture_pp_binary.vf_frame_info;
} else {
vf_pp_in_info =
&pipe->pipe_settings.capture.post_isp_binary.vf_frame_info;
}
{
struct ia_css_binary_descr vf_pp_descr;
ia_css_pipe_get_vfpp_binarydesc(pipe,
&vf_pp_descr, vf_pp_in_info, pipe_vf_out_info);
err = ia_css_binary_find(&vf_pp_descr,
&pipe->pipe_settings.capture.vf_pp_binary);
if (err)
return err;
}
/* Copy */
if (IS_ISP2401)
/* For CSI2+, only the direct sensor mode/online requires ISP copy */
need_isp_copy = pipe->stream->config.mode == IA_CSS_INPUT_MODE_SENSOR;
if (need_isp_copy)
load_copy_binary(pipe,
&pipe->pipe_settings.capture.copy_binary,
&pipe->pipe_settings.capture.pre_isp_binary);
return err;
}
static int load_bayer_isp_binaries(struct ia_css_pipe *pipe)
{
struct ia_css_frame_info pre_isp_in_info, *pipe_out_info;
int err = 0;
struct ia_css_binary_descr pre_de_descr;
IA_CSS_ENTER_PRIVATE("");
assert(pipe);
assert(pipe->mode == IA_CSS_PIPE_ID_CAPTURE ||
pipe->mode == IA_CSS_PIPE_ID_COPY);
pipe_out_info = &pipe->output_info[0];
if (pipe->pipe_settings.capture.pre_isp_binary.info)
return 0;
err = ia_css_frame_check_info(pipe_out_info);
if (err)
return err;
ia_css_pipe_get_pre_de_binarydesc(pipe, &pre_de_descr,
&pre_isp_in_info,
pipe_out_info);
err = ia_css_binary_find(&pre_de_descr,
&pipe->pipe_settings.capture.pre_isp_binary);
return err;
}
static int load_low_light_binaries(struct ia_css_pipe *pipe)
{
struct ia_css_frame_info pre_in_info, anr_in_info,
post_in_info, post_out_info,
vf_info, *pipe_vf_out_info, *pipe_out_info,
*vf_pp_in_info;
bool need_pp;
bool need_isp_copy = true;
int err = 0;
IA_CSS_ENTER_PRIVATE("");
assert(pipe);
assert(pipe->mode == IA_CSS_PIPE_ID_CAPTURE ||
pipe->mode == IA_CSS_PIPE_ID_COPY);
if (pipe->pipe_settings.capture.pre_isp_binary.info)
return 0;
pipe_vf_out_info = &pipe->vf_output_info[0];
pipe_out_info = &pipe->output_info[0];
vf_info = *pipe_vf_out_info;
err = ia_css_util_check_vf_out_info(pipe_out_info,
&vf_info);
if (err)
return err;
need_pp = need_capture_pp(pipe);
ia_css_frame_info_set_format(&vf_info,
IA_CSS_FRAME_FORMAT_YUV_LINE);
/* we build up the pipeline starting at the end */
/* Capture post-processing */
if (need_pp) {
struct ia_css_binary_descr capture_pp_descr;
ia_css_pipe_get_capturepp_binarydesc(pipe, &capture_pp_descr,
&post_out_info,
pipe_out_info, &vf_info);
err = ia_css_binary_find(&capture_pp_descr,
&pipe->pipe_settings.capture.capture_pp_binary);
if (err)
return err;
} else {
post_out_info = *pipe_out_info;
}
/* Post-anr */
{
struct ia_css_binary_descr post_anr_descr;
ia_css_pipe_get_post_anr_binarydesc(pipe,
&post_anr_descr, &post_in_info, &post_out_info, &vf_info);
err = ia_css_binary_find(&post_anr_descr,
&pipe->pipe_settings.capture.post_isp_binary);
if (err)
return err;
}
/* Anr */
{
struct ia_css_binary_descr anr_descr;
ia_css_pipe_get_anr_binarydesc(pipe, &anr_descr, &anr_in_info,
&pipe->pipe_settings.capture.post_isp_binary.in_frame_info);
err = ia_css_binary_find(&anr_descr,
&pipe->pipe_settings.capture.anr_gdc_binary);
if (err)
return err;
}
pipe->pipe_settings.capture.anr_gdc_binary.left_padding =
pipe->pipe_settings.capture.post_isp_binary.left_padding;
/* Pre-anr */
{
struct ia_css_binary_descr pre_anr_descr;
ia_css_pipe_get_pre_anr_binarydesc(pipe, &pre_anr_descr, &pre_in_info,
&pipe->pipe_settings.capture.anr_gdc_binary.in_frame_info);
err = ia_css_binary_find(&pre_anr_descr,
&pipe->pipe_settings.capture.pre_isp_binary);
if (err)
return err;
}
pipe->pipe_settings.capture.pre_isp_binary.left_padding =
pipe->pipe_settings.capture.anr_gdc_binary.left_padding;
/* Viewfinder post-processing */
if (need_pp) {
vf_pp_in_info =
&pipe->pipe_settings.capture.capture_pp_binary.vf_frame_info;
} else {
vf_pp_in_info =
&pipe->pipe_settings.capture.post_isp_binary.vf_frame_info;
}
{
struct ia_css_binary_descr vf_pp_descr;
ia_css_pipe_get_vfpp_binarydesc(pipe, &vf_pp_descr,
vf_pp_in_info, pipe_vf_out_info);
err = ia_css_binary_find(&vf_pp_descr,
&pipe->pipe_settings.capture.vf_pp_binary);
if (err)
return err;
}
/* Copy */
if (IS_ISP2401)
/* For CSI2+, only the direct sensor mode/online requires ISP copy */
need_isp_copy = pipe->stream->config.mode == IA_CSS_INPUT_MODE_SENSOR;
if (need_isp_copy)
err = load_copy_binary(pipe,
&pipe->pipe_settings.capture.copy_binary,
&pipe->pipe_settings.capture.pre_isp_binary);
return err;
}
static bool copy_on_sp(struct ia_css_pipe *pipe)
{
bool rval;
assert(pipe);
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "copy_on_sp() enter:\n");
rval = true;
rval &= (pipe->mode == IA_CSS_PIPE_ID_CAPTURE);
rval &= (pipe->config.default_capture_config.mode == IA_CSS_CAPTURE_MODE_RAW);
rval &= ((pipe->stream->config.input_config.format ==
ATOMISP_INPUT_FORMAT_BINARY_8) ||
(pipe->config.mode == IA_CSS_PIPE_MODE_COPY));
return rval;
}
static int load_capture_binaries(struct ia_css_pipe *pipe)
{
int err = 0;
bool must_be_raw;
IA_CSS_ENTER_PRIVATE("");
assert(pipe);
assert(pipe->mode == IA_CSS_PIPE_ID_CAPTURE ||
pipe->mode == IA_CSS_PIPE_ID_COPY);
if (pipe->pipe_settings.capture.primary_binary[0].info) {
IA_CSS_LEAVE_ERR_PRIVATE(0);
return 0;
}
/* in primary, advanced,low light or bayer,
the input format must be raw */
must_be_raw =
pipe->config.default_capture_config.mode == IA_CSS_CAPTURE_MODE_ADVANCED ||
pipe->config.default_capture_config.mode == IA_CSS_CAPTURE_MODE_BAYER ||
pipe->config.default_capture_config.mode == IA_CSS_CAPTURE_MODE_LOW_LIGHT;
err = ia_css_util_check_input(&pipe->stream->config, must_be_raw, false);
if (err) {
IA_CSS_LEAVE_ERR_PRIVATE(err);
return err;
}
if (copy_on_sp(pipe) &&
pipe->stream->config.input_config.format == ATOMISP_INPUT_FORMAT_BINARY_8) {
ia_css_frame_info_init(
&pipe->output_info[0],
JPEG_BYTES,
1,
IA_CSS_FRAME_FORMAT_BINARY_8,
0);
IA_CSS_LEAVE_ERR_PRIVATE(0);
return 0;
}
switch (pipe->config.default_capture_config.mode) {
case IA_CSS_CAPTURE_MODE_RAW:
err = load_copy_binaries(pipe);
if (!err && IS_ISP2401)
pipe->pipe_settings.capture.copy_binary.online = pipe->stream->config.online;
break;
case IA_CSS_CAPTURE_MODE_BAYER:
err = load_bayer_isp_binaries(pipe);
break;
case IA_CSS_CAPTURE_MODE_PRIMARY:
err = load_primary_binaries(pipe);
break;
case IA_CSS_CAPTURE_MODE_ADVANCED:
err = load_advanced_binaries(pipe);
break;
case IA_CSS_CAPTURE_MODE_LOW_LIGHT:
err = load_low_light_binaries(pipe);
break;
}
if (err) {
IA_CSS_LEAVE_ERR_PRIVATE(err);
return err;
}
IA_CSS_LEAVE_ERR_PRIVATE(err);
return err;
}
static int
unload_capture_binaries(struct ia_css_pipe *pipe)
{
unsigned int i;
IA_CSS_ENTER_PRIVATE("pipe = %p", pipe);
if (!pipe || (pipe->mode != IA_CSS_PIPE_ID_CAPTURE &&
pipe->mode != IA_CSS_PIPE_ID_COPY)) {
IA_CSS_LEAVE_ERR_PRIVATE(-EINVAL);
return -EINVAL;
}
ia_css_binary_unload(&pipe->pipe_settings.capture.copy_binary);
for (i = 0; i < MAX_NUM_PRIMARY_STAGES; i++)
ia_css_binary_unload(&pipe->pipe_settings.capture.primary_binary[i]);
ia_css_binary_unload(&pipe->pipe_settings.capture.pre_isp_binary);
ia_css_binary_unload(&pipe->pipe_settings.capture.anr_gdc_binary);
ia_css_binary_unload(&pipe->pipe_settings.capture.post_isp_binary);
ia_css_binary_unload(&pipe->pipe_settings.capture.capture_pp_binary);
ia_css_binary_unload(&pipe->pipe_settings.capture.capture_ldc_binary);
ia_css_binary_unload(&pipe->pipe_settings.capture.vf_pp_binary);
for (i = 0; i < pipe->pipe_settings.capture.num_yuv_scaler; i++)
ia_css_binary_unload(&pipe->pipe_settings.capture.yuv_scaler_binary[i]);
kfree(pipe->pipe_settings.capture.is_output_stage);
pipe->pipe_settings.capture.is_output_stage = NULL;
kfree(pipe->pipe_settings.capture.yuv_scaler_binary);
pipe->pipe_settings.capture.yuv_scaler_binary = NULL;
IA_CSS_LEAVE_ERR_PRIVATE(0);
return 0;
}
static bool
need_downscaling(const struct ia_css_resolution in_res,
const struct ia_css_resolution out_res)
{
if (in_res.width > out_res.width || in_res.height > out_res.height)
return true;
return false;
}
static bool
need_yuv_scaler_stage(const struct ia_css_pipe *pipe)
{
unsigned int i;
struct ia_css_resolution in_res, out_res;
bool need_format_conversion = false;
IA_CSS_ENTER_PRIVATE("");
assert(pipe);
assert(pipe->mode == IA_CSS_PIPE_ID_YUVPP);
/* TODO: make generic function */
need_format_conversion =
((pipe->stream->config.input_config.format ==
ATOMISP_INPUT_FORMAT_YUV420_8_LEGACY) &&
(pipe->output_info[0].format != IA_CSS_FRAME_FORMAT_CSI_MIPI_LEGACY_YUV420_8));
in_res = pipe->config.input_effective_res;
if (pipe->config.enable_dz)
return true;
if ((pipe->output_info[0].res.width != 0) && need_format_conversion)
return true;
for (i = 0; i < IA_CSS_PIPE_MAX_OUTPUT_STAGE; i++) {
out_res = pipe->output_info[i].res;
/* A non-zero width means it is a valid output port */
if ((out_res.width != 0) && need_downscaling(in_res, out_res))
return true;
}
return false;
}
/*
* TODO: it is temporarily created from ia_css_pipe_create_cas_scaler_desc
* which has some hard-coded knowledge which prevents reuse of the function.
* Later, merge this with ia_css_pipe_create_cas_scaler_desc
*/
static int ia_css_pipe_create_cas_scaler_desc_single_output(
struct ia_css_frame_info *cas_scaler_in_info,
struct ia_css_frame_info *cas_scaler_out_info,
struct ia_css_frame_info *cas_scaler_vf_info,
struct ia_css_cas_binary_descr *descr)
{
unsigned int i;
unsigned int hor_ds_factor = 0, ver_ds_factor = 0;
int err = 0;
struct ia_css_frame_info tmp_in_info;
unsigned int max_scale_factor_per_stage = MAX_PREFERRED_YUV_DS_PER_STEP;
assert(cas_scaler_in_info);
assert(cas_scaler_out_info);
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
"ia_css_pipe_create_cas_scaler_desc() enter:\n");
/* We assume that this function is used only for single output port case. */
descr->num_output_stage = 1;
hor_ds_factor = CEIL_DIV(cas_scaler_in_info->res.width,
cas_scaler_out_info->res.width);
ver_ds_factor = CEIL_DIV(cas_scaler_in_info->res.height,
cas_scaler_out_info->res.height);
/* use the same horizontal and vertical downscaling factor for simplicity */
assert(hor_ds_factor == ver_ds_factor);
i = 1;
while (i < hor_ds_factor) {
descr->num_stage++;
i *= max_scale_factor_per_stage;
}
descr->in_info = kmalloc(descr->num_stage *
sizeof(struct ia_css_frame_info),
GFP_KERNEL);
if (!descr->in_info) {
err = -ENOMEM;
goto ERR;
}
descr->internal_out_info = kmalloc(descr->num_stage *
sizeof(struct ia_css_frame_info),
GFP_KERNEL);
if (!descr->internal_out_info) {
err = -ENOMEM;
goto ERR;
}
descr->out_info = kmalloc(descr->num_stage *
sizeof(struct ia_css_frame_info),
GFP_KERNEL);
if (!descr->out_info) {
err = -ENOMEM;
goto ERR;
}
descr->vf_info = kmalloc(descr->num_stage *
sizeof(struct ia_css_frame_info),
GFP_KERNEL);
if (!descr->vf_info) {
err = -ENOMEM;
goto ERR;
}
descr->is_output_stage = kmalloc(descr->num_stage * sizeof(bool),
GFP_KERNEL);
if (!descr->is_output_stage) {
err = -ENOMEM;
goto ERR;
}
tmp_in_info = *cas_scaler_in_info;
for (i = 0; i < descr->num_stage; i++) {
descr->in_info[i] = tmp_in_info;
if ((tmp_in_info.res.width / max_scale_factor_per_stage) <=
cas_scaler_out_info->res.width) {
descr->is_output_stage[i] = true;
if ((descr->num_output_stage > 1) && (i != (descr->num_stage - 1))) {
descr->internal_out_info[i].res.width = cas_scaler_out_info->res.width;
descr->internal_out_info[i].res.height = cas_scaler_out_info->res.height;
descr->internal_out_info[i].padded_width = cas_scaler_out_info->padded_width;
descr->internal_out_info[i].format = IA_CSS_FRAME_FORMAT_YUV420;
} else {
assert(i == (descr->num_stage - 1));
descr->internal_out_info[i].res.width = 0;
descr->internal_out_info[i].res.height = 0;
}
descr->out_info[i].res.width = cas_scaler_out_info->res.width;
descr->out_info[i].res.height = cas_scaler_out_info->res.height;
descr->out_info[i].padded_width = cas_scaler_out_info->padded_width;
descr->out_info[i].format = cas_scaler_out_info->format;
if (cas_scaler_vf_info) {
descr->vf_info[i].res.width = cas_scaler_vf_info->res.width;
descr->vf_info[i].res.height = cas_scaler_vf_info->res.height;
descr->vf_info[i].padded_width = cas_scaler_vf_info->padded_width;
ia_css_frame_info_set_format(&descr->vf_info[i], IA_CSS_FRAME_FORMAT_YUV_LINE);
} else {
descr->vf_info[i].res.width = 0;
descr->vf_info[i].res.height = 0;
descr->vf_info[i].padded_width = 0;
}
} else {
descr->is_output_stage[i] = false;
descr->internal_out_info[i].res.width = tmp_in_info.res.width /
max_scale_factor_per_stage;
descr->internal_out_info[i].res.height = tmp_in_info.res.height /
max_scale_factor_per_stage;
descr->internal_out_info[i].format = IA_CSS_FRAME_FORMAT_YUV420;
ia_css_frame_info_init(&descr->internal_out_info[i],
tmp_in_info.res.width / max_scale_factor_per_stage,
tmp_in_info.res.height / max_scale_factor_per_stage,
IA_CSS_FRAME_FORMAT_YUV420, 0);
descr->out_info[i].res.width = 0;
descr->out_info[i].res.height = 0;
descr->vf_info[i].res.width = 0;
descr->vf_info[i].res.height = 0;
}
tmp_in_info = descr->internal_out_info[i];
}
ERR:
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
"ia_css_pipe_create_cas_scaler_desc() leave, err=%d\n",
err);
return err;
}
/* FIXME: merge most of this and single output version */
static int
ia_css_pipe_create_cas_scaler_desc(struct ia_css_pipe *pipe,
struct ia_css_cas_binary_descr *descr)
{
struct ia_css_frame_info in_info = IA_CSS_BINARY_DEFAULT_FRAME_INFO;
struct ia_css_frame_info *out_info[IA_CSS_PIPE_MAX_OUTPUT_STAGE];
struct ia_css_frame_info *vf_out_info[IA_CSS_PIPE_MAX_OUTPUT_STAGE];
struct ia_css_frame_info tmp_in_info = IA_CSS_BINARY_DEFAULT_FRAME_INFO;
unsigned int i, j;
unsigned int hor_scale_factor[IA_CSS_PIPE_MAX_OUTPUT_STAGE],
ver_scale_factor[IA_CSS_PIPE_MAX_OUTPUT_STAGE],
scale_factor = 0;
unsigned int num_stages = 0;
int err = 0;
unsigned int max_scale_factor_per_stage = MAX_PREFERRED_YUV_DS_PER_STEP;
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
"ia_css_pipe_create_cas_scaler_desc() enter:\n");
for (i = 0; i < IA_CSS_PIPE_MAX_OUTPUT_STAGE; i++) {
out_info[i] = NULL;
vf_out_info[i] = NULL;
hor_scale_factor[i] = 0;
ver_scale_factor[i] = 0;
}
in_info.res = pipe->config.input_effective_res;
in_info.padded_width = in_info.res.width;
descr->num_output_stage = 0;
/* Find out how much scaling we need for each output */
for (i = 0; i < IA_CSS_PIPE_MAX_OUTPUT_STAGE; i++) {
if (pipe->output_info[i].res.width != 0) {
out_info[i] = &pipe->output_info[i];
if (pipe->vf_output_info[i].res.width != 0)
vf_out_info[i] = &pipe->vf_output_info[i];
descr->num_output_stage += 1;
}
if (out_info[i]) {
hor_scale_factor[i] = CEIL_DIV(in_info.res.width, out_info[i]->res.width);
ver_scale_factor[i] = CEIL_DIV(in_info.res.height, out_info[i]->res.height);
/* use the same horizontal and vertical scaling factor for simplicity */
assert(hor_scale_factor[i] == ver_scale_factor[i]);
scale_factor = 1;
do {
num_stages++;
scale_factor *= max_scale_factor_per_stage;
} while (scale_factor < hor_scale_factor[i]);
in_info.res = out_info[i]->res;
}
}
if (need_yuv_scaler_stage(pipe) && (num_stages == 0))
num_stages = 1;
descr->num_stage = num_stages;
descr->in_info = kmalloc_array(descr->num_stage,
sizeof(struct ia_css_frame_info),
GFP_KERNEL);
if (!descr->in_info) {
err = -ENOMEM;
goto ERR;
}
descr->internal_out_info = kmalloc(descr->num_stage *
sizeof(struct ia_css_frame_info),
GFP_KERNEL);
if (!descr->internal_out_info) {
err = -ENOMEM;
goto ERR;
}
descr->out_info = kmalloc(descr->num_stage *
sizeof(struct ia_css_frame_info),
GFP_KERNEL);
if (!descr->out_info) {
err = -ENOMEM;
goto ERR;
}
descr->vf_info = kmalloc(descr->num_stage *
sizeof(struct ia_css_frame_info),
GFP_KERNEL);
if (!descr->vf_info) {
err = -ENOMEM;
goto ERR;
}
descr->is_output_stage = kmalloc(descr->num_stage * sizeof(bool),
GFP_KERNEL);
if (!descr->is_output_stage) {
err = -ENOMEM;
goto ERR;
}
for (i = 0; i < IA_CSS_PIPE_MAX_OUTPUT_STAGE; i++) {
if (out_info[i]) {
if (i > 0) {
assert((out_info[i - 1]->res.width >= out_info[i]->res.width) &&
(out_info[i - 1]->res.height >= out_info[i]->res.height));
}
}
}
tmp_in_info.res = pipe->config.input_effective_res;
tmp_in_info.format = IA_CSS_FRAME_FORMAT_YUV420;
for (i = 0, j = 0; i < descr->num_stage; i++) {
assert(j < 2);
assert(out_info[j]);
descr->in_info[i] = tmp_in_info;
if ((tmp_in_info.res.width / max_scale_factor_per_stage) <=
out_info[j]->res.width) {
descr->is_output_stage[i] = true;
if ((descr->num_output_stage > 1) && (i != (descr->num_stage - 1))) {
descr->internal_out_info[i].res.width = out_info[j]->res.width;
descr->internal_out_info[i].res.height = out_info[j]->res.height;
descr->internal_out_info[i].padded_width = out_info[j]->padded_width;
descr->internal_out_info[i].format = IA_CSS_FRAME_FORMAT_YUV420;
} else {
assert(i == (descr->num_stage - 1));
descr->internal_out_info[i].res.width = 0;
descr->internal_out_info[i].res.height = 0;
}
descr->out_info[i].res.width = out_info[j]->res.width;
descr->out_info[i].res.height = out_info[j]->res.height;
descr->out_info[i].padded_width = out_info[j]->padded_width;
descr->out_info[i].format = out_info[j]->format;
if (vf_out_info[j]) {
descr->vf_info[i].res.width = vf_out_info[j]->res.width;
descr->vf_info[i].res.height = vf_out_info[j]->res.height;
descr->vf_info[i].padded_width = vf_out_info[j]->padded_width;
ia_css_frame_info_set_format(&descr->vf_info[i], IA_CSS_FRAME_FORMAT_YUV_LINE);
} else {
descr->vf_info[i].res.width = 0;
descr->vf_info[i].res.height = 0;
descr->vf_info[i].padded_width = 0;
}
j++;
} else {
descr->is_output_stage[i] = false;
descr->internal_out_info[i].res.width = tmp_in_info.res.width /
max_scale_factor_per_stage;
descr->internal_out_info[i].res.height = tmp_in_info.res.height /
max_scale_factor_per_stage;
descr->internal_out_info[i].format = IA_CSS_FRAME_FORMAT_YUV420;
ia_css_frame_info_init(&descr->internal_out_info[i],
tmp_in_info.res.width / max_scale_factor_per_stage,
tmp_in_info.res.height / max_scale_factor_per_stage,
IA_CSS_FRAME_FORMAT_YUV420, 0);
descr->out_info[i].res.width = 0;
descr->out_info[i].res.height = 0;
descr->vf_info[i].res.width = 0;
descr->vf_info[i].res.height = 0;
}
tmp_in_info = descr->internal_out_info[i];
}
ERR:
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
"ia_css_pipe_create_cas_scaler_desc() leave, err=%d\n",
err);
return err;
}
static void ia_css_pipe_destroy_cas_scaler_desc(struct ia_css_cas_binary_descr
*descr)
{
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
"ia_css_pipe_destroy_cas_scaler_desc() enter:\n");
kfree(descr->in_info);
descr->in_info = NULL;
kfree(descr->internal_out_info);
descr->internal_out_info = NULL;
kfree(descr->out_info);
descr->out_info = NULL;
kfree(descr->vf_info);
descr->vf_info = NULL;
kfree(descr->is_output_stage);
descr->is_output_stage = NULL;
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
"ia_css_pipe_destroy_cas_scaler_desc() leave\n");
}
static int
load_yuvpp_binaries(struct ia_css_pipe *pipe)
{
int err = 0;
bool need_scaler = false;
struct ia_css_frame_info *vf_pp_in_info[IA_CSS_PIPE_MAX_OUTPUT_STAGE];
struct ia_css_yuvpp_settings *mycs;
struct ia_css_binary *next_binary;
struct ia_css_cas_binary_descr cas_scaler_descr = { };
unsigned int i, j;
bool need_isp_copy_binary = false;
IA_CSS_ENTER_PRIVATE("");
assert(pipe);
assert(pipe->stream);
assert(pipe->mode == IA_CSS_PIPE_ID_YUVPP);
if (pipe->pipe_settings.yuvpp.copy_binary.info)
goto ERR;
/* Set both must_be_raw and must_be_yuv to false then yuvpp can take rgb inputs */
err = ia_css_util_check_input(&pipe->stream->config, false, false);
if (err)
goto ERR;
mycs = &pipe->pipe_settings.yuvpp;
for (i = 0; i < IA_CSS_PIPE_MAX_OUTPUT_STAGE; i++) {
if (pipe->vf_output_info[i].res.width != 0) {
err = ia_css_util_check_vf_out_info(&pipe->output_info[i],
&pipe->vf_output_info[i]);
if (err)
goto ERR;
}
vf_pp_in_info[i] = NULL;
}
need_scaler = need_yuv_scaler_stage(pipe);
/* we build up the pipeline starting at the end */
/* Capture post-processing */
if (need_scaler) {
struct ia_css_binary_descr yuv_scaler_descr;
err = ia_css_pipe_create_cas_scaler_desc(pipe,
&cas_scaler_descr);
if (err)
goto ERR;
mycs->num_output = cas_scaler_descr.num_output_stage;
mycs->num_yuv_scaler = cas_scaler_descr.num_stage;
mycs->yuv_scaler_binary = kcalloc(cas_scaler_descr.num_stage,
sizeof(struct ia_css_binary),
GFP_KERNEL);
if (!mycs->yuv_scaler_binary) {
err = -ENOMEM;
goto ERR;
}
mycs->is_output_stage = kcalloc(cas_scaler_descr.num_stage,
sizeof(bool), GFP_KERNEL);
if (!mycs->is_output_stage) {
err = -ENOMEM;
goto ERR;
}
for (i = 0; i < cas_scaler_descr.num_stage; i++) {
mycs->is_output_stage[i] = cas_scaler_descr.is_output_stage[i];
ia_css_pipe_get_yuvscaler_binarydesc(pipe,
&yuv_scaler_descr,
&cas_scaler_descr.in_info[i],
&cas_scaler_descr.out_info[i],
&cas_scaler_descr.internal_out_info[i],
&cas_scaler_descr.vf_info[i]);
err = ia_css_binary_find(&yuv_scaler_descr,
&mycs->yuv_scaler_binary[i]);
if (err)
goto ERR;
}
ia_css_pipe_destroy_cas_scaler_desc(&cas_scaler_descr);
} else {
mycs->num_output = 1;
}
if (need_scaler)
next_binary = &mycs->yuv_scaler_binary[0];
else
next_binary = NULL;
/*
* NOTES
* - Why does the "yuvpp" pipe needs "isp_copy_binary" (i.e. ISP Copy) when
* its input is "ATOMISP_INPUT_FORMAT_YUV422_8"?
*
* In most use cases, the first stage in the "yuvpp" pipe is the "yuv_scale_
* binary". However, the "yuv_scale_binary" does NOT support the input-frame
* format as "IA_CSS_STREAM _FORMAT_YUV422_8".
*
* Hence, the "isp_copy_binary" is required to be present in front of the "yuv
* _scale_binary". It would translate the input-frame to the frame formats that
* are supported by the "yuv_scale_binary".
*
* Please refer to "FrameWork/css/isp/pipes/capture_pp/capture_pp_1.0/capture_
* pp_defs.h" for the list of input-frame formats that are supported by the
* "yuv_scale_binary".
*/
if (IS_ISP2401)
need_isp_copy_binary =
(pipe->stream->config.input_config.format == ATOMISP_INPUT_FORMAT_YUV422_8);
else
need_isp_copy_binary = true;
if (need_isp_copy_binary) {
err = load_copy_binary(pipe,
&mycs->copy_binary,
next_binary);
if (err)
goto ERR;
/*
* NOTES
* - Why is "pipe->pipe_settings.capture.copy_binary.online" specified?
*
* In some use cases, the first stage in the "yuvpp" pipe is the
* "isp_copy_binary". The "isp_copy_binary" is designed to process
* the input from either the system DDR or from the IPU internal VMEM.
* So it provides the flag "online" to specify where its input is from,
* i.e.:
*
* (1) "online <= true", the input is from the IPU internal VMEM.
* (2) "online <= false", the input is from the system DDR.
*
* In other use cases, the first stage in the "yuvpp" pipe is the
* "yuv_scale_binary". "The "yuv_scale_binary" is designed to process the
* input ONLY from the system DDR. So it does not provide the flag "online"
* to specify where its input is from.
*/
pipe->pipe_settings.capture.copy_binary.online = pipe->stream->config.online;
}
/* Viewfinder post-processing */
if (need_scaler) {
for (i = 0, j = 0; i < mycs->num_yuv_scaler; i++) {
if (mycs->is_output_stage[i]) {
assert(j < 2);
vf_pp_in_info[j] =
&mycs->yuv_scaler_binary[i].vf_frame_info;
j++;
}
}
mycs->num_vf_pp = j;
} else {
vf_pp_in_info[0] =
&mycs->copy_binary.vf_frame_info;
for (i = 1; i < IA_CSS_PIPE_MAX_OUTPUT_STAGE; i++)
vf_pp_in_info[i] = NULL;
mycs->num_vf_pp = 1;
}
mycs->vf_pp_binary = kcalloc(mycs->num_vf_pp,
sizeof(struct ia_css_binary),
GFP_KERNEL);
if (!mycs->vf_pp_binary) {
err = -ENOMEM;
goto ERR;
}
{
struct ia_css_binary_descr vf_pp_descr;
for (i = 0; i < mycs->num_vf_pp; i++) {
if (pipe->vf_output_info[i].res.width != 0) {
ia_css_pipe_get_vfpp_binarydesc(pipe,
&vf_pp_descr, vf_pp_in_info[i], &pipe->vf_output_info[i]);
err = ia_css_binary_find(&vf_pp_descr, &mycs->vf_pp_binary[i]);
if (err)
goto ERR;
}
}
}
if (err)
goto ERR;
ERR:
if (need_scaler)
ia_css_pipe_destroy_cas_scaler_desc(&cas_scaler_descr);
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "load_yuvpp_binaries() leave, err=%d\n",
err);
return err;
}
static int
unload_yuvpp_binaries(struct ia_css_pipe *pipe)
{
unsigned int i;
IA_CSS_ENTER_PRIVATE("pipe = %p", pipe);
if ((!pipe) || (pipe->mode != IA_CSS_PIPE_ID_YUVPP)) {
IA_CSS_LEAVE_ERR_PRIVATE(-EINVAL);
return -EINVAL;
}
ia_css_binary_unload(&pipe->pipe_settings.yuvpp.copy_binary);
for (i = 0; i < pipe->pipe_settings.yuvpp.num_yuv_scaler; i++)
ia_css_binary_unload(&pipe->pipe_settings.yuvpp.yuv_scaler_binary[i]);
for (i = 0; i < pipe->pipe_settings.yuvpp.num_vf_pp; i++)
ia_css_binary_unload(&pipe->pipe_settings.yuvpp.vf_pp_binary[i]);
kfree(pipe->pipe_settings.yuvpp.is_output_stage);
pipe->pipe_settings.yuvpp.is_output_stage = NULL;
kfree(pipe->pipe_settings.yuvpp.yuv_scaler_binary);
pipe->pipe_settings.yuvpp.yuv_scaler_binary = NULL;
kfree(pipe->pipe_settings.yuvpp.vf_pp_binary);
pipe->pipe_settings.yuvpp.vf_pp_binary = NULL;
IA_CSS_LEAVE_ERR_PRIVATE(0);
return 0;
}
static int yuvpp_start(struct ia_css_pipe *pipe)
{
int err = 0;
enum sh_css_pipe_config_override copy_ovrd;
enum ia_css_input_mode yuvpp_pipe_input_mode;
unsigned int thread_id;
IA_CSS_ENTER_PRIVATE("pipe = %p", pipe);
if ((!pipe) || (pipe->mode != IA_CSS_PIPE_ID_YUVPP)) {
IA_CSS_LEAVE_ERR_PRIVATE(-EINVAL);
return -EINVAL;
}
yuvpp_pipe_input_mode = pipe->stream->config.mode;
sh_css_metrics_start_frame();
/* multi stream video needs mipi buffers */
err = send_mipi_frames(pipe);
if (err) {
IA_CSS_LEAVE_ERR_PRIVATE(err);
return err;
}
ia_css_pipeline_get_sp_thread_id(ia_css_pipe_get_pipe_num(pipe), &thread_id);
copy_ovrd = 1 << thread_id;
start_pipe(pipe, copy_ovrd, yuvpp_pipe_input_mode);
IA_CSS_LEAVE_ERR_PRIVATE(err);
return err;
}
static int
sh_css_pipe_unload_binaries(struct ia_css_pipe *pipe)
{
int err = 0;
IA_CSS_ENTER_PRIVATE("pipe = %p", pipe);
if (!pipe) {
IA_CSS_LEAVE_ERR_PRIVATE(-EINVAL);
return -EINVAL;
}
/* PIPE_MODE_COPY has no binaries, but has output frames to outside*/
if (pipe->config.mode == IA_CSS_PIPE_MODE_COPY) {
IA_CSS_LEAVE_ERR_PRIVATE(0);
return 0;
}
switch (pipe->mode) {
case IA_CSS_PIPE_ID_PREVIEW:
err = unload_preview_binaries(pipe);
break;
case IA_CSS_PIPE_ID_VIDEO:
err = unload_video_binaries(pipe);
break;
case IA_CSS_PIPE_ID_CAPTURE:
err = unload_capture_binaries(pipe);
break;
case IA_CSS_PIPE_ID_YUVPP:
err = unload_yuvpp_binaries(pipe);
break;
default:
break;
}
IA_CSS_LEAVE_ERR_PRIVATE(err);
return err;
}
static int
sh_css_pipe_load_binaries(struct ia_css_pipe *pipe)
{
int err = 0;
assert(pipe);
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "sh_css_pipe_load_binaries() enter:\n");
/* PIPE_MODE_COPY has no binaries, but has output frames to outside*/
if (pipe->config.mode == IA_CSS_PIPE_MODE_COPY)
return err;
switch (pipe->mode) {
case IA_CSS_PIPE_ID_PREVIEW:
err = load_preview_binaries(pipe);
break;
case IA_CSS_PIPE_ID_VIDEO:
err = load_video_binaries(pipe);
break;
case IA_CSS_PIPE_ID_CAPTURE:
err = load_capture_binaries(pipe);
break;
case IA_CSS_PIPE_ID_YUVPP:
err = load_yuvpp_binaries(pipe);
break;
default:
err = -EINVAL;
break;
}
if (err) {
if (sh_css_pipe_unload_binaries(pipe)) {
/*
* currently css does not support multiple error
* returns in a single function, using -EINVAL in
* this case
*/
err = -EINVAL;
}
}
return err;
}
static int
create_host_yuvpp_pipeline(struct ia_css_pipe *pipe)
{
struct ia_css_pipeline *me;
int err = 0;
struct ia_css_pipeline_stage *vf_pp_stage = NULL,
*copy_stage = NULL,
*yuv_scaler_stage = NULL;
struct ia_css_binary *copy_binary,
*vf_pp_binary,
*yuv_scaler_binary;
bool need_scaler = false;
unsigned int num_stage, num_output_stage;
unsigned int i, j;
struct ia_css_frame *in_frame = NULL;
struct ia_css_frame *out_frame[IA_CSS_PIPE_MAX_OUTPUT_STAGE];
struct ia_css_frame *bin_out_frame[IA_CSS_BINARY_MAX_OUTPUT_PORTS];
struct ia_css_frame *vf_frame[IA_CSS_PIPE_MAX_OUTPUT_STAGE];
struct ia_css_pipeline_stage_desc stage_desc;
bool need_in_frameinfo_memory = false;
bool sensor = false;
bool buffered_sensor = false;
bool online = false;
bool continuous = false;
IA_CSS_ENTER_PRIVATE("pipe = %p", pipe);
if ((!pipe) || (!pipe->stream) || (pipe->mode != IA_CSS_PIPE_ID_YUVPP)) {
IA_CSS_LEAVE_ERR_PRIVATE(-EINVAL);
return -EINVAL;
}
me = &pipe->pipeline;
ia_css_pipeline_clean(me);
for (i = 0; i < IA_CSS_PIPE_MAX_OUTPUT_STAGE; i++) {
out_frame[i] = NULL;
vf_frame[i] = NULL;
}
ia_css_pipe_util_create_output_frames(bin_out_frame);
num_stage = pipe->pipe_settings.yuvpp.num_yuv_scaler;
num_output_stage = pipe->pipe_settings.yuvpp.num_output;
if (IS_ISP2401) {
/*
* When the input system is 2401, always enable 'in_frameinfo_memory'
* except for the following:
* - Direct Sensor Mode Online Capture
* - Direct Sensor Mode Continuous Capture
* - Buffered Sensor Mode Continuous Capture
*/
sensor = pipe->stream->config.mode == IA_CSS_INPUT_MODE_SENSOR;
buffered_sensor = pipe->stream->config.mode == IA_CSS_INPUT_MODE_BUFFERED_SENSOR;
online = pipe->stream->config.online;
continuous = pipe->stream->config.continuous;
need_in_frameinfo_memory =
!((sensor && (online || continuous)) || (buffered_sensor && continuous));
} else {
/* Construct in_frame info (only in case we have dynamic input */
need_in_frameinfo_memory = pipe->stream->config.mode == IA_CSS_INPUT_MODE_MEMORY;
}
/*
* the input frame can come from:
*
* a) memory: connect yuvscaler to me->in_frame
* b) sensor, via copy binary: connect yuvscaler to copy binary later
* on
*/
if (need_in_frameinfo_memory) {
/* TODO: improve for different input formats. */
/*
* "pipe->stream->config.input_config.format" represents the sensor output
* frame format, e.g. YUV422 8-bit.
*
* "in_frame_format" represents the imaging pipe's input frame format, e.g.
* Bayer-Quad RAW.
*/
int in_frame_format;
if (pipe->stream->config.input_config.format ==
ATOMISP_INPUT_FORMAT_YUV420_8_LEGACY) {
in_frame_format = IA_CSS_FRAME_FORMAT_CSI_MIPI_LEGACY_YUV420_8;
} else if (pipe->stream->config.input_config.format ==
ATOMISP_INPUT_FORMAT_YUV422_8) {
/*
* When the sensor output frame format is "ATOMISP_INPUT_FORMAT_YUV422_8",
* the "isp_copy_var" binary is selected as the first stage in the yuvpp
* pipe.
*
* For the "isp_copy_var" binary, it reads the YUV422-8 pixels from
* the frame buffer (at DDR) to the frame-line buffer (at VMEM).
*
* By now, the "isp_copy_var" binary does NOT provide a separated
* frame-line buffer to store the YUV422-8 pixels. Instead, it stores
* the YUV422-8 pixels in the frame-line buffer which is designed to
* store the Bayer-Quad RAW pixels.
*
* To direct the "isp_copy_var" binary reading from the RAW frame-line
* buffer, its input frame format must be specified as "IA_CSS_FRAME_
* FORMAT_RAW".
*/
in_frame_format = IA_CSS_FRAME_FORMAT_RAW;
} else {
in_frame_format = IA_CSS_FRAME_FORMAT_NV12;
}
err = init_in_frameinfo_memory_defaults(pipe,
&me->in_frame,
in_frame_format);
if (err) {
IA_CSS_LEAVE_ERR_PRIVATE(err);
return err;
}
in_frame = &me->in_frame;
} else {
in_frame = NULL;
}
for (i = 0; i < num_output_stage; i++) {
assert(i < IA_CSS_PIPE_MAX_OUTPUT_STAGE);
if (pipe->output_info[i].res.width != 0) {
err = init_out_frameinfo_defaults(pipe, &me->out_frame[i], i);
if (err) {
IA_CSS_LEAVE_ERR_PRIVATE(err);
return err;
}
out_frame[i] = &me->out_frame[i];
}
/* Construct vf_frame info (only in case we have VF) */
if (pipe->vf_output_info[i].res.width != 0) {
err = init_vf_frameinfo_defaults(pipe, &me->vf_frame[i], i);
if (err) {
IA_CSS_LEAVE_ERR_PRIVATE(err);
return err;
}
vf_frame[i] = &me->vf_frame[i];
}
}
copy_binary = &pipe->pipe_settings.yuvpp.copy_binary;
vf_pp_binary = pipe->pipe_settings.yuvpp.vf_pp_binary;
yuv_scaler_binary = pipe->pipe_settings.yuvpp.yuv_scaler_binary;
need_scaler = need_yuv_scaler_stage(pipe);
if (pipe->pipe_settings.yuvpp.copy_binary.info) {
struct ia_css_frame *in_frame_local = NULL;
if (IS_ISP2401 && !online) {
/* After isp copy is enabled in_frame needs to be passed. */
in_frame_local = in_frame;
}
if (need_scaler) {
ia_css_pipe_util_set_output_frames(bin_out_frame,
0, NULL);
ia_css_pipe_get_generic_stage_desc(&stage_desc,
copy_binary,
bin_out_frame,
in_frame_local,
NULL);
} else {
ia_css_pipe_util_set_output_frames(bin_out_frame,
0, out_frame[0]);
ia_css_pipe_get_generic_stage_desc(&stage_desc,
copy_binary,
bin_out_frame,
in_frame_local,
NULL);
}
err = ia_css_pipeline_create_and_add_stage(me,
&stage_desc,
©_stage);
if (err) {
IA_CSS_LEAVE_ERR_PRIVATE(err);
return err;
}
if (copy_stage) {
/* if we use yuv scaler binary, vf output should be from there */
copy_stage->args.copy_vf = !need_scaler;
/* for yuvpp pipe, it should always be enabled */
copy_stage->args.copy_output = true;
/* connect output of copy binary to input of yuv scaler */
in_frame = copy_stage->args.out_frame[0];
}
}
if (need_scaler) {
struct ia_css_frame *tmp_out_frame = NULL;
struct ia_css_frame *tmp_vf_frame = NULL;
struct ia_css_frame *tmp_in_frame = in_frame;
for (i = 0, j = 0; i < num_stage; i++) {
assert(j < num_output_stage);
if (pipe->pipe_settings.yuvpp.is_output_stage[i]) {
tmp_out_frame = out_frame[j];
tmp_vf_frame = vf_frame[j];
} else {
tmp_out_frame = NULL;
tmp_vf_frame = NULL;
}
err = add_yuv_scaler_stage(pipe, me, tmp_in_frame,
tmp_out_frame,
NULL,
&yuv_scaler_binary[i],
&yuv_scaler_stage);
if (err) {
IA_CSS_LEAVE_ERR_PRIVATE(err);
return err;
}
/* we use output port 1 as internal output port */
tmp_in_frame = yuv_scaler_stage->args.out_frame[1];
if (pipe->pipe_settings.yuvpp.is_output_stage[i]) {
if (tmp_vf_frame && (tmp_vf_frame->frame_info.res.width != 0)) {
in_frame = yuv_scaler_stage->args.out_vf_frame;
err = add_vf_pp_stage(pipe, in_frame,
tmp_vf_frame,
&vf_pp_binary[j],
&vf_pp_stage);
if (err) {
IA_CSS_LEAVE_ERR_PRIVATE(err);
return err;
}
}
j++;
}
}
} else if (copy_stage) {
if (vf_frame[0] && vf_frame[0]->frame_info.res.width != 0) {
in_frame = copy_stage->args.out_vf_frame;
err = add_vf_pp_stage(pipe, in_frame, vf_frame[0],
&vf_pp_binary[0], &vf_pp_stage);
}
if (err) {
IA_CSS_LEAVE_ERR_PRIVATE(err);
return err;
}
}
ia_css_pipeline_finalize_stages(&pipe->pipeline,
pipe->stream->config.continuous);
IA_CSS_LEAVE_ERR_PRIVATE(0);
return 0;
}
static int
create_host_copy_pipeline(struct ia_css_pipe *pipe,
unsigned int max_input_width,
struct ia_css_frame *out_frame)
{
struct ia_css_pipeline *me;
int err = 0;
struct ia_css_pipeline_stage_desc stage_desc;
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
"create_host_copy_pipeline() enter:\n");
/* pipeline already created as part of create_host_pipeline_structure */
me = &pipe->pipeline;
ia_css_pipeline_clean(me);
/* Construct out_frame info */
out_frame->flash_state = IA_CSS_FRAME_FLASH_STATE_NONE;
if (copy_on_sp(pipe) &&
pipe->stream->config.input_config.format == ATOMISP_INPUT_FORMAT_BINARY_8) {
ia_css_frame_info_init(&out_frame->frame_info, JPEG_BYTES, 1,
IA_CSS_FRAME_FORMAT_BINARY_8, 0);
} else if (out_frame->frame_info.format == IA_CSS_FRAME_FORMAT_RAW) {
out_frame->frame_info.raw_bit_depth =
ia_css_pipe_util_pipe_input_format_bpp(pipe);
}
me->num_stages = 1;
me->pipe_id = IA_CSS_PIPE_ID_COPY;
pipe->mode = IA_CSS_PIPE_ID_COPY;
ia_css_pipe_get_sp_func_stage_desc(&stage_desc, out_frame,
IA_CSS_PIPELINE_RAW_COPY,
max_input_width);
err = ia_css_pipeline_create_and_add_stage(me, &stage_desc, NULL);
ia_css_pipeline_finalize_stages(&pipe->pipeline,
pipe->stream->config.continuous);
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
"create_host_copy_pipeline() leave:\n");
return err;
}
static int
create_host_isyscopy_capture_pipeline(struct ia_css_pipe *pipe)
{
struct ia_css_pipeline *me = &pipe->pipeline;
int err = 0;
struct ia_css_pipeline_stage_desc stage_desc;
struct ia_css_frame *out_frame = &me->out_frame[0];
struct ia_css_pipeline_stage *out_stage = NULL;
unsigned int thread_id;
enum sh_css_queue_id queue_id;
unsigned int max_input_width = MAX_VECTORS_PER_INPUT_LINE_CONT * ISP_VEC_NELEMS;
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
"create_host_isyscopy_capture_pipeline() enter:\n");
ia_css_pipeline_clean(me);
/* Construct out_frame info */
err = sh_css_pipe_get_output_frame_info(pipe, &out_frame->frame_info, 0);
if (err)
return err;
out_frame->flash_state = IA_CSS_FRAME_FLASH_STATE_NONE;
ia_css_pipeline_get_sp_thread_id(ia_css_pipe_get_pipe_num(pipe), &thread_id);
ia_css_query_internal_queue_id(IA_CSS_BUFFER_TYPE_OUTPUT_FRAME, thread_id, &queue_id);
out_frame->dynamic_queue_id = queue_id;
out_frame->buf_type = IA_CSS_BUFFER_TYPE_OUTPUT_FRAME;
me->num_stages = 1;
me->pipe_id = IA_CSS_PIPE_ID_CAPTURE;
pipe->mode = IA_CSS_PIPE_ID_CAPTURE;
ia_css_pipe_get_sp_func_stage_desc(&stage_desc, out_frame,
IA_CSS_PIPELINE_ISYS_COPY,
max_input_width);
err = ia_css_pipeline_create_and_add_stage(me,
&stage_desc, &out_stage);
if (err)
return err;
ia_css_pipeline_finalize_stages(me, pipe->stream->config.continuous);
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
"create_host_isyscopy_capture_pipeline() leave:\n");
return err;
}
static int
create_host_regular_capture_pipeline(struct ia_css_pipe *pipe)
{
struct ia_css_pipeline *me;
int err = 0;
enum ia_css_capture_mode mode;
struct ia_css_pipeline_stage *current_stage = NULL;
struct ia_css_pipeline_stage *yuv_scaler_stage = NULL;
struct ia_css_binary *copy_binary,
*primary_binary[MAX_NUM_PRIMARY_STAGES],
*vf_pp_binary,
*pre_isp_binary,
*anr_gdc_binary,
*post_isp_binary,
*yuv_scaler_binary,
*capture_pp_binary,
*capture_ldc_binary;
bool need_pp = false;
bool raw;
struct ia_css_frame *in_frame;
struct ia_css_frame *out_frame;
struct ia_css_frame *out_frames[IA_CSS_BINARY_MAX_OUTPUT_PORTS];
struct ia_css_frame *vf_frame;
struct ia_css_pipeline_stage_desc stage_desc;
bool need_in_frameinfo_memory = false;
bool sensor = false;
bool buffered_sensor = false;
bool online = false;
bool continuous = false;
unsigned int i, num_yuv_scaler, num_primary_stage;
bool need_yuv_pp = false;
bool *is_output_stage = NULL;
bool need_ldc = false;
IA_CSS_ENTER_PRIVATE("");
assert(pipe);
assert(pipe->stream);
assert(pipe->mode == IA_CSS_PIPE_ID_CAPTURE ||
pipe->mode == IA_CSS_PIPE_ID_COPY);
me = &pipe->pipeline;
mode = pipe->config.default_capture_config.mode;
raw = (mode == IA_CSS_CAPTURE_MODE_RAW);
ia_css_pipeline_clean(me);
ia_css_pipe_util_create_output_frames(out_frames);
if (IS_ISP2401) {
/*
* When the input system is 2401, always enable 'in_frameinfo_memory'
* except for the following:
* - Direct Sensor Mode Online Capture
* - Direct Sensor Mode Online Capture
* - Direct Sensor Mode Continuous Capture
* - Buffered Sensor Mode Continuous Capture
*/
sensor = (pipe->stream->config.mode == IA_CSS_INPUT_MODE_SENSOR);
buffered_sensor = (pipe->stream->config.mode == IA_CSS_INPUT_MODE_BUFFERED_SENSOR);
online = pipe->stream->config.online;
continuous = pipe->stream->config.continuous;
need_in_frameinfo_memory =
!((sensor && (online || continuous)) || (buffered_sensor &&
(online || continuous)));
} else {
/* Construct in_frame info (only in case we have dynamic input */
need_in_frameinfo_memory = pipe->stream->config.mode == IA_CSS_INPUT_MODE_MEMORY;
}
if (need_in_frameinfo_memory) {
err = init_in_frameinfo_memory_defaults(pipe, &me->in_frame,
IA_CSS_FRAME_FORMAT_RAW);
if (err) {
IA_CSS_LEAVE_ERR_PRIVATE(err);
return err;
}
in_frame = &me->in_frame;
} else {
in_frame = NULL;
}
err = init_out_frameinfo_defaults(pipe, &me->out_frame[0], 0);
if (err) {
IA_CSS_LEAVE_ERR_PRIVATE(err);
return err;
}
out_frame = &me->out_frame[0];
/* Construct vf_frame info (only in case we have VF) */
if (pipe->enable_viewfinder[IA_CSS_PIPE_OUTPUT_STAGE_0]) {
if (mode == IA_CSS_CAPTURE_MODE_RAW || mode == IA_CSS_CAPTURE_MODE_BAYER) {
/* These modes don't support viewfinder output */
vf_frame = NULL;
} else {
init_vf_frameinfo_defaults(pipe, &me->vf_frame[0], 0);
vf_frame = &me->vf_frame[0];
}
} else {
vf_frame = NULL;
}
copy_binary = &pipe->pipe_settings.capture.copy_binary;
num_primary_stage = pipe->pipe_settings.capture.num_primary_stage;
if ((num_primary_stage == 0) && (mode == IA_CSS_CAPTURE_MODE_PRIMARY)) {
IA_CSS_LEAVE_ERR_PRIVATE(-EINVAL);
return -EINVAL;
}
for (i = 0; i < num_primary_stage; i++)
primary_binary[i] = &pipe->pipe_settings.capture.primary_binary[i];
vf_pp_binary = &pipe->pipe_settings.capture.vf_pp_binary;
pre_isp_binary = &pipe->pipe_settings.capture.pre_isp_binary;
anr_gdc_binary = &pipe->pipe_settings.capture.anr_gdc_binary;
post_isp_binary = &pipe->pipe_settings.capture.post_isp_binary;
capture_pp_binary = &pipe->pipe_settings.capture.capture_pp_binary;
yuv_scaler_binary = pipe->pipe_settings.capture.yuv_scaler_binary;
num_yuv_scaler = pipe->pipe_settings.capture.num_yuv_scaler;
is_output_stage = pipe->pipe_settings.capture.is_output_stage;
capture_ldc_binary = &pipe->pipe_settings.capture.capture_ldc_binary;
need_pp = (need_capture_pp(pipe) || pipe->output_stage) &&
mode != IA_CSS_CAPTURE_MODE_RAW &&
mode != IA_CSS_CAPTURE_MODE_BAYER;
need_yuv_pp = (yuv_scaler_binary && yuv_scaler_binary->info);
need_ldc = (capture_ldc_binary && capture_ldc_binary->info);
if (pipe->pipe_settings.capture.copy_binary.info) {
if (raw) {
ia_css_pipe_util_set_output_frames(out_frames, 0, out_frame);
if (IS_ISP2401) {
if (!continuous) {
ia_css_pipe_get_generic_stage_desc(&stage_desc,
copy_binary,
out_frames,
in_frame,
NULL);
} else {
in_frame = pipe->stream->last_pipe->continuous_frames[0];
ia_css_pipe_get_generic_stage_desc(&stage_desc,
copy_binary,
out_frames,
in_frame,
NULL);
}
} else {
ia_css_pipe_get_generic_stage_desc(&stage_desc,
copy_binary,
out_frames,
NULL, NULL);
}
} else {
ia_css_pipe_util_set_output_frames(out_frames, 0,
in_frame);
ia_css_pipe_get_generic_stage_desc(&stage_desc,
copy_binary,
out_frames,
NULL, NULL);
}
err = ia_css_pipeline_create_and_add_stage(me,
&stage_desc,
¤t_stage);
if (err) {
IA_CSS_LEAVE_ERR_PRIVATE(err);
return err;
}
} else if (pipe->stream->config.continuous) {
in_frame = pipe->stream->last_pipe->continuous_frames[0];
}
if (mode == IA_CSS_CAPTURE_MODE_PRIMARY) {
struct ia_css_frame *local_in_frame = NULL;
struct ia_css_frame *local_out_frame = NULL;
for (i = 0; i < num_primary_stage; i++) {
if (i == 0)
local_in_frame = in_frame;
else
local_in_frame = NULL;
if (!need_pp && (i == num_primary_stage - 1) && (!IS_ISP2401 || !need_ldc))
local_out_frame = out_frame;
else
local_out_frame = NULL;
ia_css_pipe_util_set_output_frames(out_frames, 0, local_out_frame);
/*
* WARNING: The #if def flag has been added below as a
* temporary solution to solve the problem of enabling the
* view finder in a single binary in a capture flow. The
* vf-pp stage has been removed from Skycam in the solution
* provided. The vf-pp stage should be re-introduced when
* required. This * should not be considered as a clean solution.
* Proper investigation should be done to come up with the clean
* solution.
*/
ia_css_pipe_get_generic_stage_desc(&stage_desc,
primary_binary[i],
out_frames,
local_in_frame,
NULL);
err = ia_css_pipeline_create_and_add_stage(me,
&stage_desc,
¤t_stage);
if (err) {
IA_CSS_LEAVE_ERR_PRIVATE(err);
return err;
}
}
/* If we use copy iso primary, the input must be yuv iso raw */
current_stage->args.copy_vf =
primary_binary[0]->info->sp.pipeline.mode ==
IA_CSS_BINARY_MODE_COPY;
current_stage->args.copy_output = current_stage->args.copy_vf;
} else if (mode == IA_CSS_CAPTURE_MODE_ADVANCED ||
mode == IA_CSS_CAPTURE_MODE_LOW_LIGHT) {
ia_css_pipe_util_set_output_frames(out_frames, 0, NULL);
ia_css_pipe_get_generic_stage_desc(&stage_desc, pre_isp_binary,
out_frames, in_frame, NULL);
err = ia_css_pipeline_create_and_add_stage(me, &stage_desc,
NULL);
if (err) {
IA_CSS_LEAVE_ERR_PRIVATE(err);
return err;
}
ia_css_pipe_util_set_output_frames(out_frames, 0, NULL);
ia_css_pipe_get_generic_stage_desc(&stage_desc, anr_gdc_binary,
out_frames, NULL, NULL);
err = ia_css_pipeline_create_and_add_stage(me, &stage_desc,
NULL);
if (err) {
IA_CSS_LEAVE_ERR_PRIVATE(err);
return err;
}
if (need_pp) {
ia_css_pipe_util_set_output_frames(out_frames, 0, NULL);
ia_css_pipe_get_generic_stage_desc(&stage_desc,
post_isp_binary,
out_frames,
NULL, NULL);
} else {
ia_css_pipe_util_set_output_frames(out_frames, 0,
out_frame);
ia_css_pipe_get_generic_stage_desc(&stage_desc,
post_isp_binary,
out_frames,
NULL, NULL);
}
err = ia_css_pipeline_create_and_add_stage(me, &stage_desc,
¤t_stage);
if (err) {
IA_CSS_LEAVE_ERR_PRIVATE(err);
return err;
}
} else if (mode == IA_CSS_CAPTURE_MODE_BAYER) {
ia_css_pipe_util_set_output_frames(out_frames, 0, out_frame);
ia_css_pipe_get_generic_stage_desc(&stage_desc, pre_isp_binary,
out_frames, in_frame, NULL);
err = ia_css_pipeline_create_and_add_stage(me, &stage_desc,
NULL);
if (err) {
IA_CSS_LEAVE_ERR_PRIVATE(err);
return err;
}
}
if (need_pp && current_stage) {
struct ia_css_frame *local_in_frame = NULL;
local_in_frame = current_stage->args.out_frame[0];
if (need_ldc) {
ia_css_pipe_util_set_output_frames(out_frames, 0, NULL);
ia_css_pipe_get_generic_stage_desc(&stage_desc,
capture_ldc_binary,
out_frames,
local_in_frame,
NULL);
err = ia_css_pipeline_create_and_add_stage(me,
&stage_desc,
¤t_stage);
local_in_frame = current_stage->args.out_frame[0];
}
err = add_capture_pp_stage(pipe, me, local_in_frame,
need_yuv_pp ? NULL : out_frame,
capture_pp_binary,
¤t_stage);
if (err) {
IA_CSS_LEAVE_ERR_PRIVATE(err);
return err;
}
}
if (need_yuv_pp && current_stage) {
struct ia_css_frame *tmp_in_frame = current_stage->args.out_frame[0];
struct ia_css_frame *tmp_out_frame = NULL;
for (i = 0; i < num_yuv_scaler; i++) {
if (is_output_stage[i])
tmp_out_frame = out_frame;
else
tmp_out_frame = NULL;
err = add_yuv_scaler_stage(pipe, me, tmp_in_frame,
tmp_out_frame, NULL,
&yuv_scaler_binary[i],
&yuv_scaler_stage);
if (err) {
IA_CSS_LEAVE_ERR_PRIVATE(err);
return err;
}
/* we use output port 1 as internal output port */
tmp_in_frame = yuv_scaler_stage->args.out_frame[1];
}
}
/*
* WARNING: The #if def flag has been added below as a
* temporary solution to solve the problem of enabling the
* view finder in a single binary in a capture flow. The vf-pp
* stage has been removed from Skycam in the solution provided.
* The vf-pp stage should be re-introduced when required. This
* should not be considered as a clean solution. Proper
* investigation should be done to come up with the clean solution.
*/
if (mode != IA_CSS_CAPTURE_MODE_RAW &&
mode != IA_CSS_CAPTURE_MODE_BAYER &&
current_stage && vf_frame) {
in_frame = current_stage->args.out_vf_frame;
err = add_vf_pp_stage(pipe, in_frame, vf_frame, vf_pp_binary,
¤t_stage);
if (err) {
IA_CSS_LEAVE_ERR_PRIVATE(err);
return err;
}
}
ia_css_pipeline_finalize_stages(&pipe->pipeline, pipe->stream->config.continuous);
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
"create_host_regular_capture_pipeline() leave:\n");
return 0;
}
static int
create_host_capture_pipeline(struct ia_css_pipe *pipe)
{
int err = 0;
IA_CSS_ENTER_PRIVATE("pipe = %p", pipe);
if (pipe->config.mode == IA_CSS_PIPE_MODE_COPY)
err = create_host_isyscopy_capture_pipeline(pipe);
else
err = create_host_regular_capture_pipeline(pipe);
if (err) {
IA_CSS_LEAVE_ERR_PRIVATE(err);
return err;
}
IA_CSS_LEAVE_ERR_PRIVATE(err);
return err;
}
static int capture_start(struct ia_css_pipe *pipe)
{
struct ia_css_pipeline *me;
unsigned int thread_id;
int err = 0;
enum sh_css_pipe_config_override copy_ovrd;
IA_CSS_ENTER_PRIVATE("pipe = %p", pipe);
if (!pipe) {
IA_CSS_LEAVE_ERR_PRIVATE(-EINVAL);
return -EINVAL;
}
me = &pipe->pipeline;
if ((pipe->config.default_capture_config.mode == IA_CSS_CAPTURE_MODE_RAW ||
pipe->config.default_capture_config.mode == IA_CSS_CAPTURE_MODE_BAYER) &&
(pipe->config.mode != IA_CSS_PIPE_MODE_COPY)) {
if (copy_on_sp(pipe)) {
err = start_copy_on_sp(pipe, &me->out_frame[0]);
IA_CSS_LEAVE_ERR_PRIVATE(err);
return err;
}
}
/* old isys: need to send_mipi_frames() in all pipe modes */
if (!IS_ISP2401 || (IS_ISP2401 && pipe->config.mode != IA_CSS_PIPE_MODE_COPY)) {
err = send_mipi_frames(pipe);
if (err) {
IA_CSS_LEAVE_ERR_PRIVATE(err);
return err;
}
}
ia_css_pipeline_get_sp_thread_id(ia_css_pipe_get_pipe_num(pipe), &thread_id);
copy_ovrd = 1 << thread_id;
start_pipe(pipe, copy_ovrd, pipe->stream->config.mode);
#if !defined(ISP2401)
/*
* old isys: for IA_CSS_PIPE_MODE_COPY pipe, isys rx has to be configured,
* which is currently done in start_binary(); but COPY pipe contains no binary,
* and does not call start_binary(); so we need to configure the rx here.
*/
if (pipe->config.mode == IA_CSS_PIPE_MODE_COPY &&
pipe->stream->reconfigure_css_rx) {
ia_css_isys_rx_configure(&pipe->stream->csi_rx_config,
pipe->stream->config.mode);
pipe->stream->reconfigure_css_rx = false;
}
#endif
IA_CSS_LEAVE_ERR_PRIVATE(err);
return err;
}
static int
sh_css_pipe_get_output_frame_info(struct ia_css_pipe *pipe,
struct ia_css_frame_info *info,
unsigned int idx)
{
assert(pipe);
assert(info);
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
"sh_css_pipe_get_output_frame_info() enter:\n");
*info = pipe->output_info[idx];
if (copy_on_sp(pipe) &&
pipe->stream->config.input_config.format == ATOMISP_INPUT_FORMAT_BINARY_8) {
ia_css_frame_info_init(
info,
JPEG_BYTES,
1,
IA_CSS_FRAME_FORMAT_BINARY_8,
0);
} else if (info->format == IA_CSS_FRAME_FORMAT_RAW ||
info->format == IA_CSS_FRAME_FORMAT_RAW_PACKED) {
info->raw_bit_depth =
ia_css_pipe_util_pipe_input_format_bpp(pipe);
}
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
"sh_css_pipe_get_output_frame_info() leave:\n");
return 0;
}
void
ia_css_stream_send_input_frame(const struct ia_css_stream *stream,
const unsigned short *data,
unsigned int width,
unsigned int height)
{
assert(stream);
ia_css_inputfifo_send_input_frame(
data, width, height,
stream->config.channel_id,
stream->config.input_config.format,
stream->config.pixels_per_clock == 2);
}
void
ia_css_stream_start_input_frame(const struct ia_css_stream *stream)
{
assert(stream);
ia_css_inputfifo_start_frame(
stream->config.channel_id,
stream->config.input_config.format,
stream->config.pixels_per_clock == 2);
}
void
ia_css_stream_send_input_line(const struct ia_css_stream *stream,
const unsigned short *data,
unsigned int width,
const unsigned short *data2,
unsigned int width2)
{
assert(stream);
ia_css_inputfifo_send_line(stream->config.channel_id,
data, width, data2, width2);
}
void
ia_css_stream_send_input_embedded_line(const struct ia_css_stream *stream,
enum atomisp_input_format format,
const unsigned short *data,
unsigned int width)
{
assert(stream);
if (!data || width == 0)
return;
ia_css_inputfifo_send_embedded_line(stream->config.channel_id,
format, data, width);
}
void
ia_css_stream_end_input_frame(const struct ia_css_stream *stream)
{
assert(stream);
ia_css_inputfifo_end_frame(stream->config.channel_id);
}
bool
ia_css_pipeline_uses_params(struct ia_css_pipeline *me)
{
struct ia_css_pipeline_stage *stage;
assert(me);
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
"ia_css_pipeline_uses_params() enter: me=%p\n", me);
for (stage = me->stages; stage; stage = stage->next)
if (stage->binary_info && stage->binary_info->enable.params) {
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
"ia_css_pipeline_uses_params() leave: return_bool=true\n");
return true;
}
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
"ia_css_pipeline_uses_params() leave: return_bool=false\n");
return false;
}
/*
* @brief Tag a specific frame in continuous capture.
* Refer to "sh_css_internal.h" for details.
*/
int ia_css_stream_capture_frame(struct ia_css_stream *stream,
unsigned int exp_id)
{
struct sh_css_tag_descr tag_descr;
u32 encoded_tag_descr;
int err;
assert(stream);
IA_CSS_ENTER("exp_id=%d", exp_id);
/* Only continuous streams have a tagger */
if (exp_id == 0 || !stream->config.continuous) {
IA_CSS_LEAVE_ERR(-EINVAL);
return -EINVAL;
}
if (!sh_css_sp_is_running()) {
/* SP is not running. The queues are not valid */
IA_CSS_LEAVE_ERR(-EBUSY);
return -EBUSY;
}
/* Create the tag descriptor from the parameters */
sh_css_create_tag_descr(0, 0, 0, exp_id, &tag_descr);
/* Encode the tag descriptor into a 32-bit value */
encoded_tag_descr = sh_css_encode_tag_descr(&tag_descr);
/*
* Enqueue the encoded tag to the host2sp queue.
* Note: The pipe and stage IDs for tag_cmd queue are hard-coded to 0
* on both host and the SP side.
* It is mainly because it is enough to have only one tag_cmd queue
*/
err = ia_css_bufq_enqueue_tag_cmd(encoded_tag_descr);
IA_CSS_LEAVE_ERR(err);
return err;
}
/*
* @brief Configure the continuous capture.
* Refer to "sh_css_internal.h" for details.
*/
int ia_css_stream_capture(struct ia_css_stream *stream, int num_captures,
unsigned int skip, int offset)
{
struct sh_css_tag_descr tag_descr;
unsigned int encoded_tag_descr;
int return_err;
if (!stream)
return -EINVAL;
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
"ia_css_stream_capture() enter: num_captures=%d, skip=%d, offset=%d\n",
num_captures, skip, offset);
/* Check if the tag descriptor is valid */
if (num_captures < SH_CSS_MINIMUM_TAG_ID) {
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
"ia_css_stream_capture() leave: return_err=%d\n",
-EINVAL);
return -EINVAL;
}
/* Create the tag descriptor from the parameters */
sh_css_create_tag_descr(num_captures, skip, offset, 0, &tag_descr);
/* Encode the tag descriptor into a 32-bit value */
encoded_tag_descr = sh_css_encode_tag_descr(&tag_descr);
if (!sh_css_sp_is_running()) {
/* SP is not running. The queues are not valid */
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
"ia_css_stream_capture() leaving:queues unavailable\n");
return -EBUSY;
}
/*
* Enqueue the encoded tag to the host2sp queue.
* Note: The pipe and stage IDs for tag_cmd queue are hard-coded to 0
* on both host and the SP side.
* It is mainly because it is enough to have only one tag_cmd queue
*/
return_err = ia_css_bufq_enqueue_tag_cmd((uint32_t)encoded_tag_descr);
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
"ia_css_stream_capture() leave: return_err=%d\n",
return_err);
return return_err;
}
void ia_css_stream_request_flash(struct ia_css_stream *stream)
{
(void)stream;
assert(stream);
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
"ia_css_stream_request_flash() enter: void\n");
#ifndef ISP2401
sh_css_write_host2sp_command(host2sp_cmd_start_flash);
#else
if (sh_css_sp_is_running()) {
if (!sh_css_write_host2sp_command(host2sp_cmd_start_flash)) {
IA_CSS_ERROR("Call to 'sh-css_write_host2sp_command()' failed");
ia_css_debug_dump_sp_sw_debug_info();
ia_css_debug_dump_debug_info(NULL);
}
} else {
IA_CSS_LOG("SP is not running!");
}
#endif
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
"ia_css_stream_request_flash() leave: return_void\n");
}
static void
sh_css_init_host_sp_control_vars(void)
{
const struct ia_css_fw_info *fw;
unsigned int HIVE_ADDR_ia_css_ispctrl_sp_isp_started;
unsigned int HIVE_ADDR_host_sp_queues_initialized;
unsigned int HIVE_ADDR_sp_sleep_mode;
unsigned int HIVE_ADDR_ia_css_dmaproxy_sp_invalidate_tlb;
unsigned int HIVE_ADDR_sp_stop_copy_preview;
unsigned int HIVE_ADDR_host_sp_com;
unsigned int o = offsetof(struct host_sp_communication, host2sp_command)
/ sizeof(int);
unsigned int i;
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
"sh_css_init_host_sp_control_vars() enter: void\n");
fw = &sh_css_sp_fw;
HIVE_ADDR_ia_css_ispctrl_sp_isp_started = fw->info.sp.isp_started;
HIVE_ADDR_host_sp_queues_initialized =
fw->info.sp.host_sp_queues_initialized;
HIVE_ADDR_sp_sleep_mode = fw->info.sp.sleep_mode;
HIVE_ADDR_ia_css_dmaproxy_sp_invalidate_tlb = fw->info.sp.invalidate_tlb;
HIVE_ADDR_sp_stop_copy_preview = fw->info.sp.stop_copy_preview;
HIVE_ADDR_host_sp_com = fw->info.sp.host_sp_com;
sp_dmem_store_uint32(SP0_ID,
(unsigned int)sp_address_of(ia_css_ispctrl_sp_isp_started),
(uint32_t)(0));
sp_dmem_store_uint32(SP0_ID,
(unsigned int)sp_address_of(host_sp_queues_initialized),
(uint32_t)(0));
sp_dmem_store_uint32(SP0_ID,
(unsigned int)sp_address_of(sp_sleep_mode),
(uint32_t)(0));
sp_dmem_store_uint32(SP0_ID,
(unsigned int)sp_address_of(ia_css_dmaproxy_sp_invalidate_tlb),
(uint32_t)(false));
sp_dmem_store_uint32(SP0_ID,
(unsigned int)sp_address_of(sp_stop_copy_preview),
my_css.stop_copy_preview ? (uint32_t)(1) : (uint32_t)(0));
store_sp_array_uint(host_sp_com, o, host2sp_cmd_ready);
for (i = 0; i < N_CSI_PORTS; i++) {
sh_css_update_host2sp_num_mipi_frames
(my_css.num_mipi_frames[i]);
}
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
"sh_css_init_host_sp_control_vars() leave: return_void\n");
}
/*
* create the internal structures and fill in the configuration data
*/
static const struct
ia_css_pipe_config ia_css_pipe_default_config = DEFAULT_PIPE_CONFIG;
void ia_css_pipe_config_defaults(struct ia_css_pipe_config *pipe_config)
{
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_pipe_config_defaults()\n");
memcpy(pipe_config, &ia_css_pipe_default_config, sizeof(*pipe_config));
}
void
ia_css_pipe_extra_config_defaults(struct ia_css_pipe_extra_config *extra_config)
{
if (!extra_config) {
IA_CSS_ERROR("NULL input parameter");
return;
}
extra_config->enable_raw_binning = false;
extra_config->enable_yuv_ds = false;
extra_config->enable_high_speed = false;
extra_config->enable_dvs_6axis = false;
extra_config->enable_reduced_pipe = false;
extra_config->disable_vf_pp = false;
extra_config->enable_fractional_ds = false;
}
void ia_css_stream_config_defaults(struct ia_css_stream_config *stream_config)
{
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_stream_config_defaults()\n");
assert(stream_config);
memset(stream_config, 0, sizeof(*stream_config));
stream_config->online = true;
stream_config->left_padding = -1;
stream_config->pixels_per_clock = 1;
/*
* temporary default value for backwards compatibility.
* This field used to be hardcoded within CSS but this has now
* been moved to the stream_config struct.
*/
stream_config->source.port.rxcount = 0x04040404;
}
int ia_css_pipe_create(const struct ia_css_pipe_config *config,
struct ia_css_pipe **pipe)
{
int err = 0;
IA_CSS_ENTER_PRIVATE("config = %p, pipe = %p", config, pipe);
if (!config || !pipe) {
IA_CSS_LEAVE_ERR_PRIVATE(-EINVAL);
return -EINVAL;
}
err = ia_css_pipe_create_extra(config, NULL, pipe);
if (err == 0)
IA_CSS_LOG("pipe created successfully = %p", *pipe);
IA_CSS_LEAVE_ERR_PRIVATE(err);
return err;
}
int
ia_css_pipe_create_extra(const struct ia_css_pipe_config *config,
const struct ia_css_pipe_extra_config *extra_config,
struct ia_css_pipe **pipe)
{
int err = -EINVAL;
struct ia_css_pipe *internal_pipe = NULL;
unsigned int i;
IA_CSS_ENTER_PRIVATE("config = %p, extra_config = %p and pipe = %p", config, extra_config, pipe);
/* do not allow to create more than the maximum limit */
if (my_css.pipe_counter >= IA_CSS_PIPELINE_NUM_MAX) {
IA_CSS_LEAVE_ERR_PRIVATE(-ENOSPC);
return -EINVAL;
}
if ((!pipe) || (!config)) {
IA_CSS_LEAVE_ERR_PRIVATE(-EINVAL);
return -EINVAL;
}
ia_css_debug_dump_pipe_config(config);
ia_css_debug_dump_pipe_extra_config(extra_config);
err = create_pipe(config->mode, &internal_pipe, false);
if (err) {
IA_CSS_LEAVE_ERR_PRIVATE(err);
return err;
}
/* now we have a pipe structure to fill */
internal_pipe->config = *config;
if (extra_config)
internal_pipe->extra_config = *extra_config;
else
ia_css_pipe_extra_config_defaults(&internal_pipe->extra_config);
/*
* Use config value when dvs_frame_delay setting equal to 2,
* otherwise always 1 by default
*/
if (internal_pipe->config.dvs_frame_delay == IA_CSS_FRAME_DELAY_2)
internal_pipe->dvs_frame_delay = 2;
else
internal_pipe->dvs_frame_delay = 1;
/*
* we still keep enable_raw_binning for backward compatibility,
* for any new fractional bayer downscaling, we should use
* bayer_ds_out_res. if both are specified, bayer_ds_out_res will
* take precedence.if none is specified, we set bayer_ds_out_res
* equal to IF output resolution(IF may do cropping on sensor output)
* or use default decimation factor 1.
*/
/* YUV downscaling */
if ((internal_pipe->config.vf_pp_in_res.width ||
internal_pipe->config.capt_pp_in_res.width)) {
enum ia_css_frame_format format;
if (internal_pipe->config.vf_pp_in_res.width) {
format = IA_CSS_FRAME_FORMAT_YUV_LINE;
ia_css_frame_info_init(
&internal_pipe->vf_yuv_ds_input_info,
internal_pipe->config.vf_pp_in_res.width,
internal_pipe->config.vf_pp_in_res.height,
format, 0);
}
if (internal_pipe->config.capt_pp_in_res.width) {
format = IA_CSS_FRAME_FORMAT_YUV420;
ia_css_frame_info_init(
&internal_pipe->out_yuv_ds_input_info,
internal_pipe->config.capt_pp_in_res.width,
internal_pipe->config.capt_pp_in_res.height,
format, 0);
}
}
if (internal_pipe->config.vf_pp_in_res.width &&
internal_pipe->config.mode == IA_CSS_PIPE_MODE_PREVIEW) {
ia_css_frame_info_init(
&internal_pipe->vf_yuv_ds_input_info,
internal_pipe->config.vf_pp_in_res.width,
internal_pipe->config.vf_pp_in_res.height,
IA_CSS_FRAME_FORMAT_YUV_LINE, 0);
}
/* handle bayer downscaling output info */
if (internal_pipe->config.bayer_ds_out_res.width) {
ia_css_frame_info_init(
&internal_pipe->bds_output_info,
internal_pipe->config.bayer_ds_out_res.width,
internal_pipe->config.bayer_ds_out_res.height,
IA_CSS_FRAME_FORMAT_RAW, 0);
}
/* handle output info, assume always needed */
for (i = 0; i < IA_CSS_PIPE_MAX_OUTPUT_STAGE; i++) {
if (internal_pipe->config.output_info[i].res.width) {
err = sh_css_pipe_configure_output(
internal_pipe,
internal_pipe->config.output_info[i].res.width,
internal_pipe->config.output_info[i].res.height,
internal_pipe->config.output_info[i].padded_width,
internal_pipe->config.output_info[i].format,
i);
if (err) {
IA_CSS_LEAVE_ERR_PRIVATE(err);
kvfree(internal_pipe);
internal_pipe = NULL;
return err;
}
}
/* handle vf output info, when configured */
internal_pipe->enable_viewfinder[i] =
(internal_pipe->config.vf_output_info[i].res.width != 0);
if (internal_pipe->config.vf_output_info[i].res.width) {
err = sh_css_pipe_configure_viewfinder(
internal_pipe,
internal_pipe->config.vf_output_info[i].res.width,
internal_pipe->config.vf_output_info[i].res.height,
internal_pipe->config.vf_output_info[i].padded_width,
internal_pipe->config.vf_output_info[i].format,
i);
if (err) {
IA_CSS_LEAVE_ERR_PRIVATE(err);
kvfree(internal_pipe);
internal_pipe = NULL;
return err;
}
}
}
/* set all info to zeroes first */
memset(&internal_pipe->info, 0, sizeof(internal_pipe->info));
/* all went well, return the pipe */
*pipe = internal_pipe;
IA_CSS_LEAVE_ERR_PRIVATE(0);
return 0;
}
int
ia_css_pipe_get_info(const struct ia_css_pipe *pipe,
struct ia_css_pipe_info *pipe_info)
{
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
"ia_css_pipe_get_info()\n");
if (!pipe_info) {
ia_css_debug_dtrace(IA_CSS_DEBUG_ERROR,
"ia_css_pipe_get_info: pipe_info cannot be NULL\n");
return -EINVAL;
}
if (!pipe || !pipe->stream) {
ia_css_debug_dtrace(IA_CSS_DEBUG_ERROR,
"ia_css_pipe_get_info: ia_css_stream_create needs to be called before ia_css_[stream/pipe]_get_info\n");
return -EINVAL;
}
/* we succeeded return the info */
*pipe_info = pipe->info;
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_pipe_get_info() leave\n");
return 0;
}
bool ia_css_pipe_has_dvs_stats(struct ia_css_pipe_info *pipe_info)
{
unsigned int i;
if (pipe_info) {
for (i = 0; i < IA_CSS_DVS_STAT_NUM_OF_LEVELS; i++) {
if (pipe_info->grid_info.dvs_grid.dvs_stat_grid_info.grd_cfg[i].grd_start.enable)
return true;
}
}
return false;
}
int
ia_css_pipe_override_frame_format(struct ia_css_pipe *pipe,
int pin_index,
enum ia_css_frame_format new_format)
{
int err = 0;
IA_CSS_ENTER_PRIVATE("pipe = %p, pin_index = %d, new_formats = %d", pipe, pin_index, new_format);
if (!pipe) {
IA_CSS_ERROR("pipe is not set");
err = -EINVAL;
IA_CSS_LEAVE_ERR_PRIVATE(err);
return err;
}
if (0 != pin_index && 1 != pin_index) {
IA_CSS_ERROR("pin index is not valid");
err = -EINVAL;
IA_CSS_LEAVE_ERR_PRIVATE(err);
return err;
}
if (new_format != IA_CSS_FRAME_FORMAT_NV12_TILEY) {
IA_CSS_ERROR("new format is not valid");
err = -EINVAL;
IA_CSS_LEAVE_ERR_PRIVATE(err);
return err;
} else {
err = ia_css_pipe_check_format(pipe, new_format);
if (!err) {
if (pin_index == 0)
pipe->output_info[0].format = new_format;
else
pipe->vf_output_info[0].format = new_format;
}
}
IA_CSS_LEAVE_ERR_PRIVATE(err);
return err;
}
#if !defined(ISP2401)
/* Configuration of INPUT_SYSTEM_VERSION_2401 is done on SP */
static int
ia_css_stream_configure_rx(struct ia_css_stream *stream)
{
struct ia_css_input_port *config;
assert(stream);
config = &stream->config.source.port;
/* AM: this code is not reliable, especially for 2400 */
if (config->num_lanes == 1)
stream->csi_rx_config.mode = MONO_1L_1L_0L;
else if (config->num_lanes == 2)
stream->csi_rx_config.mode = MONO_2L_1L_0L;
else if (config->num_lanes == 3)
stream->csi_rx_config.mode = MONO_3L_1L_0L;
else if (config->num_lanes == 4)
stream->csi_rx_config.mode = MONO_4L_1L_0L;
else if (config->num_lanes != 0)
return -EINVAL;
if (config->port > MIPI_PORT2_ID)
return -EINVAL;
stream->csi_rx_config.port =
ia_css_isys_port_to_mipi_port(config->port);
stream->csi_rx_config.timeout = config->timeout;
stream->csi_rx_config.initcount = 0;
stream->csi_rx_config.synccount = 0x28282828;
stream->csi_rx_config.rxcount = config->rxcount;
if (config->compression.type == IA_CSS_CSI2_COMPRESSION_TYPE_NONE)
stream->csi_rx_config.comp = MIPI_PREDICTOR_NONE;
else
/*
* not implemented yet, requires extension of the rx_cfg_t
* struct
*/
return -EINVAL;
stream->csi_rx_config.is_two_ppc = (stream->config.pixels_per_clock == 2);
stream->reconfigure_css_rx = true;
return 0;
}
#endif
static struct ia_css_pipe *
find_pipe(struct ia_css_pipe *pipes[], unsigned int num_pipes,
enum ia_css_pipe_mode mode, bool copy_pipe)
{
unsigned int i;
assert(pipes);
for (i = 0; i < num_pipes; i++) {
assert(pipes[i]);
if (pipes[i]->config.mode != mode)
continue;
if (copy_pipe && pipes[i]->mode != IA_CSS_PIPE_ID_COPY)
continue;
return pipes[i];
}
return NULL;
}
static int
metadata_info_init(const struct ia_css_metadata_config *mdc,
struct ia_css_metadata_info *md)
{
/* Either both width and height should be set or neither */
if ((mdc->resolution.height > 0) ^ (mdc->resolution.width > 0))
return -EINVAL;
md->resolution = mdc->resolution;
/*
* We round up the stride to a multiple of the width
* of the port going to DDR, this is a HW requirements (DMA).
*/
md->stride = CEIL_MUL(mdc->resolution.width, HIVE_ISP_DDR_WORD_BYTES);
md->size = mdc->resolution.height * md->stride;
return 0;
}
int
ia_css_stream_create(const struct ia_css_stream_config *stream_config,
int num_pipes,
struct ia_css_pipe *pipes[],
struct ia_css_stream **stream)
{
struct ia_css_pipe *curr_pipe;
struct ia_css_stream *curr_stream = NULL;
bool spcopyonly;
bool sensor_binning_changed;
int i, j;
int err = -EINVAL;
struct ia_css_metadata_info md_info;
struct ia_css_resolution effective_res;
IA_CSS_ENTER("num_pipes=%d", num_pipes);
ia_css_debug_dump_stream_config(stream_config, num_pipes);
/* some checks */
if (num_pipes == 0 ||
!stream ||
!pipes) {
err = -EINVAL;
IA_CSS_LEAVE_ERR(err);
return err;
}
if (!IS_ISP2401) {
/* We don't support metadata for JPEG stream, since they both use str2mem */
if (stream_config->input_config.format == ATOMISP_INPUT_FORMAT_BINARY_8 &&
stream_config->metadata_config.resolution.height > 0) {
err = -EINVAL;
IA_CSS_LEAVE_ERR(err);
return err;
}
} else {
if (stream_config->online && stream_config->pack_raw_pixels) {
IA_CSS_LOG("online and pack raw is invalid on input system 2401");
err = -EINVAL;
IA_CSS_LEAVE_ERR(err);
return err;
}
}
ia_css_debug_pipe_graph_dump_stream_config(stream_config);
/* check if mipi size specified */
if (stream_config->mode == IA_CSS_INPUT_MODE_BUFFERED_SENSOR)
#ifdef ISP2401
if (!stream_config->online)
#endif
{
unsigned int port = (unsigned int)stream_config->source.port.port;
if (port >= N_MIPI_PORT_ID) {
err = -EINVAL;
IA_CSS_LEAVE_ERR(err);
return err;
}
if (my_css.size_mem_words != 0) {
my_css.mipi_frame_size[port] = my_css.size_mem_words;
} else if (stream_config->mipi_buffer_config.size_mem_words != 0) {
my_css.mipi_frame_size[port] = stream_config->mipi_buffer_config.size_mem_words;
} else {
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
"ia_css_stream_create() exit: error, need to set mipi frame size.\n");
assert(stream_config->mipi_buffer_config.size_mem_words != 0);
err = -EINVAL;
IA_CSS_LEAVE_ERR(err);
return err;
}
if (my_css.size_mem_words != 0) {
my_css.num_mipi_frames[port] =
2; /* Temp change: Default for backwards compatibility. */
} else if (stream_config->mipi_buffer_config.nof_mipi_buffers != 0) {
my_css.num_mipi_frames[port] =
stream_config->mipi_buffer_config.nof_mipi_buffers;
} else {
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
"ia_css_stream_create() exit: error, need to set number of mipi frames.\n");
assert(stream_config->mipi_buffer_config.nof_mipi_buffers != 0);
err = -EINVAL;
IA_CSS_LEAVE_ERR(err);
return err;
}
}
/* Currently we only supported metadata up to a certain size. */
err = metadata_info_init(&stream_config->metadata_config, &md_info);
if (err) {
IA_CSS_LEAVE_ERR(err);
return err;
}
/* allocate the stream instance */
curr_stream = kzalloc(sizeof(struct ia_css_stream), GFP_KERNEL);
if (!curr_stream) {
err = -ENOMEM;
IA_CSS_LEAVE_ERR(err);
return err;
}
/* default all to 0 */
curr_stream->info.metadata_info = md_info;
/* allocate pipes */
curr_stream->num_pipes = num_pipes;
curr_stream->pipes = kcalloc(num_pipes, sizeof(struct ia_css_pipe *), GFP_KERNEL);
if (!curr_stream->pipes) {
curr_stream->num_pipes = 0;
kfree(curr_stream);
curr_stream = NULL;
err = -ENOMEM;
IA_CSS_LEAVE_ERR(err);
return err;
}
/* store pipes */
spcopyonly = (num_pipes == 1) && (pipes[0]->config.mode == IA_CSS_PIPE_MODE_COPY);
for (i = 0; i < num_pipes; i++)
curr_stream->pipes[i] = pipes[i];
curr_stream->last_pipe = curr_stream->pipes[0];
/* take over stream config */
curr_stream->config = *stream_config;
if (IS_ISP2401) {
if (stream_config->mode == IA_CSS_INPUT_MODE_BUFFERED_SENSOR &&
stream_config->online)
curr_stream->config.online = false;
if (curr_stream->config.online) {
curr_stream->config.source.port.num_lanes =
stream_config->source.port.num_lanes;
curr_stream->config.mode = IA_CSS_INPUT_MODE_BUFFERED_SENSOR;
}
}
/* in case driver doesn't configure init number of raw buffers, configure it here */
if (curr_stream->config.target_num_cont_raw_buf == 0)
curr_stream->config.target_num_cont_raw_buf = NUM_CONTINUOUS_FRAMES;
if (curr_stream->config.init_num_cont_raw_buf == 0)
curr_stream->config.init_num_cont_raw_buf = curr_stream->config.target_num_cont_raw_buf;
/* Enable locking & unlocking of buffers in RAW buffer pool */
if (curr_stream->config.ia_css_enable_raw_buffer_locking)
sh_css_sp_configure_enable_raw_pool_locking(
curr_stream->config.lock_all);
/* copy mode specific stuff */
switch (curr_stream->config.mode) {
case IA_CSS_INPUT_MODE_SENSOR:
case IA_CSS_INPUT_MODE_BUFFERED_SENSOR:
#if !defined(ISP2401)
ia_css_stream_configure_rx(curr_stream);
#endif
break;
case IA_CSS_INPUT_MODE_TPG:
#if !defined(ISP2401)
IA_CSS_LOG("tpg_configuration: x_mask=%d, y_mask=%d, x_delta=%d, y_delta=%d, xy_mask=%d",
curr_stream->config.source.tpg.x_mask,
curr_stream->config.source.tpg.y_mask,
curr_stream->config.source.tpg.x_delta,
curr_stream->config.source.tpg.y_delta,
curr_stream->config.source.tpg.xy_mask);
sh_css_sp_configure_tpg(
curr_stream->config.source.tpg.x_mask,
curr_stream->config.source.tpg.y_mask,
curr_stream->config.source.tpg.x_delta,
curr_stream->config.source.tpg.y_delta,
curr_stream->config.source.tpg.xy_mask);
#endif
break;
case IA_CSS_INPUT_MODE_PRBS:
#if !defined(ISP2401)
IA_CSS_LOG("mode prbs");
sh_css_sp_configure_prbs(curr_stream->config.source.prbs.seed);
#endif
break;
case IA_CSS_INPUT_MODE_MEMORY:
IA_CSS_LOG("mode memory");
curr_stream->reconfigure_css_rx = false;
break;
default:
IA_CSS_LOG("mode sensor/default");
}
for (i = 0; i < num_pipes; i++) {
struct ia_css_resolution effective_res;
curr_pipe = pipes[i];
/* set current stream */
curr_pipe->stream = curr_stream;
/* take over effective info */
effective_res = curr_pipe->config.input_effective_res;
if (effective_res.height == 0 || effective_res.width == 0) {
effective_res = curr_pipe->stream->config.input_config.effective_res;
curr_pipe->config.input_effective_res = effective_res;
}
IA_CSS_LOG("effective_res=%dx%d",
effective_res.width,
effective_res.height);
}
err = ia_css_stream_isp_parameters_init(curr_stream);
if (err)
goto ERR;
IA_CSS_LOG("isp_params_configs: %p", curr_stream->isp_params_configs);
/* sensor binning */
if (!spcopyonly) {
sensor_binning_changed =
sh_css_params_set_binning_factor(curr_stream,
curr_stream->config.sensor_binning_factor);
} else {
sensor_binning_changed = false;
}
IA_CSS_LOG("sensor_binning=%d, changed=%d",
curr_stream->config.sensor_binning_factor, sensor_binning_changed);
/* loop over pipes */
IA_CSS_LOG("num_pipes=%d", num_pipes);
curr_stream->cont_capt = false;
/* Temporary hack: we give the preview pipe a reference to the capture
* pipe in continuous capture mode. */
if (curr_stream->config.continuous) {
/* Search for the preview pipe and create the copy pipe */
struct ia_css_pipe *preview_pipe;
struct ia_css_pipe *video_pipe;
struct ia_css_pipe *capture_pipe = NULL;
struct ia_css_pipe *copy_pipe = NULL;
if (num_pipes >= 2) {
curr_stream->cont_capt = true;
curr_stream->disable_cont_vf = curr_stream->config.disable_cont_viewfinder;
curr_stream->stop_copy_preview = my_css.stop_copy_preview;
}
/* Create copy pipe here, since it may not be exposed to the driver */
preview_pipe = find_pipe(pipes, num_pipes,
IA_CSS_PIPE_MODE_PREVIEW, false);
video_pipe = find_pipe(pipes, num_pipes,
IA_CSS_PIPE_MODE_VIDEO, false);
if (curr_stream->cont_capt) {
capture_pipe = find_pipe(pipes, num_pipes,
IA_CSS_PIPE_MODE_CAPTURE,
false);
if (!capture_pipe) {
err = -EINVAL;
goto ERR;
}
}
/* We do not support preview and video pipe at the same time */
if (preview_pipe && video_pipe) {
err = -EINVAL;
goto ERR;
}
if (preview_pipe && !preview_pipe->pipe_settings.preview.copy_pipe) {
err = create_pipe(IA_CSS_PIPE_MODE_CAPTURE, ©_pipe, true);
if (err)
goto ERR;
ia_css_pipe_config_defaults(©_pipe->config);
preview_pipe->pipe_settings.preview.copy_pipe = copy_pipe;
copy_pipe->stream = curr_stream;
}
if (preview_pipe && curr_stream->cont_capt)
preview_pipe->pipe_settings.preview.capture_pipe = capture_pipe;
if (video_pipe && !video_pipe->pipe_settings.video.copy_pipe) {
err = create_pipe(IA_CSS_PIPE_MODE_CAPTURE, ©_pipe, true);
if (err)
goto ERR;
ia_css_pipe_config_defaults(©_pipe->config);
video_pipe->pipe_settings.video.copy_pipe = copy_pipe;
copy_pipe->stream = curr_stream;
}
if (video_pipe && curr_stream->cont_capt)
video_pipe->pipe_settings.video.capture_pipe = capture_pipe;
}
for (i = 0; i < num_pipes; i++) {
curr_pipe = pipes[i];
/* set current stream */
curr_pipe->stream = curr_stream;
/* take over effective info */
effective_res = curr_pipe->config.input_effective_res;
err = ia_css_util_check_res(
effective_res.width,
effective_res.height);
if (err)
goto ERR;
/* sensor binning per pipe */
if (sensor_binning_changed)
sh_css_pipe_free_shading_table(curr_pipe);
}
/* now pipes have been configured, info should be available */
for (i = 0; i < num_pipes; i++) {
struct ia_css_pipe_info *pipe_info = NULL;
curr_pipe = pipes[i];
err = sh_css_pipe_load_binaries(curr_pipe);
if (err)
goto ERR;
/* handle each pipe */
pipe_info = &curr_pipe->info;
for (j = 0; j < IA_CSS_PIPE_MAX_OUTPUT_STAGE; j++) {
err = sh_css_pipe_get_output_frame_info(curr_pipe,
&pipe_info->output_info[j], j);
if (err)
goto ERR;
}
if (!spcopyonly) {
if (!IS_ISP2401)
err = sh_css_pipe_get_shading_info(curr_pipe,
&pipe_info->shading_info,
NULL);
else
err = sh_css_pipe_get_shading_info(curr_pipe,
&pipe_info->shading_info,
&curr_pipe->config);
if (err)
goto ERR;
err = sh_css_pipe_get_grid_info(curr_pipe,
&pipe_info->grid_info);
if (err)
goto ERR;
for (j = 0; j < IA_CSS_PIPE_MAX_OUTPUT_STAGE; j++) {
sh_css_pipe_get_viewfinder_frame_info(curr_pipe,
&pipe_info->vf_output_info[j],
j);
if (err)
goto ERR;
}
}
my_css.active_pipes[ia_css_pipe_get_pipe_num(curr_pipe)] = curr_pipe;
}
curr_stream->started = false;
/* Map SP threads before doing anything. */
err = map_sp_threads(curr_stream, true);
if (err) {
IA_CSS_LOG("map_sp_threads: return_err=%d", err);
goto ERR;
}
for (i = 0; i < num_pipes; i++) {
curr_pipe = pipes[i];
ia_css_pipe_map_queue(curr_pipe, true);
}
/* Create host side pipeline objects without stages */
err = create_host_pipeline_structure(curr_stream);
if (err) {
IA_CSS_LOG("create_host_pipeline_structure: return_err=%d", err);
goto ERR;
}
/* assign curr_stream */
*stream = curr_stream;
ERR:
if (!err) {
/* working mode: enter into the seed list */
if (my_css_save.mode == sh_css_mode_working) {
for (i = 0; i < MAX_ACTIVE_STREAMS; i++) {
if (!my_css_save.stream_seeds[i].stream) {
IA_CSS_LOG("entered stream into loc=%d", i);
my_css_save.stream_seeds[i].orig_stream = stream;
my_css_save.stream_seeds[i].stream = curr_stream;
my_css_save.stream_seeds[i].num_pipes = num_pipes;
my_css_save.stream_seeds[i].stream_config = *stream_config;
for (j = 0; j < num_pipes; j++) {
my_css_save.stream_seeds[i].pipe_config[j] = pipes[j]->config;
my_css_save.stream_seeds[i].pipes[j] = pipes[j];
my_css_save.stream_seeds[i].orig_pipes[j] = &pipes[j];
}
break;
}
}
} else {
ia_css_stream_destroy(curr_stream);
}
} else {
ia_css_stream_destroy(curr_stream);
}
IA_CSS_LEAVE("return_err=%d mode=%d", err, my_css_save.mode);
return err;
}
int
ia_css_stream_destroy(struct ia_css_stream *stream)
{
int i;
int err = 0;
IA_CSS_ENTER_PRIVATE("stream = %p", stream);
if (!stream) {
err = -EINVAL;
IA_CSS_LEAVE_ERR_PRIVATE(err);
return err;
}
ia_css_stream_isp_parameters_uninit(stream);
if ((stream->last_pipe) &&
ia_css_pipeline_is_mapped(stream->last_pipe->pipe_num)) {
#if defined(ISP2401)
for (i = 0; i < stream->num_pipes; i++) {
struct ia_css_pipe *entry = stream->pipes[i];
unsigned int sp_thread_id;
struct sh_css_sp_pipeline_terminal *sp_pipeline_input_terminal;
assert(entry);
if (entry) {
/* get the SP thread id */
if (!ia_css_pipeline_get_sp_thread_id(
ia_css_pipe_get_pipe_num(entry), &sp_thread_id))
return -EINVAL;
/* get the target input terminal */
sp_pipeline_input_terminal =
&sh_css_sp_group.pipe_io[sp_thread_id].input;
for (i = 0; i < IA_CSS_STREAM_MAX_ISYS_STREAM_PER_CH; i++) {
ia_css_isys_stream_h isys_stream =
&sp_pipeline_input_terminal->context.virtual_input_system_stream[i];
if (stream->config.isys_config[i].valid && isys_stream->valid)
ia_css_isys_stream_destroy(isys_stream);
}
}
}
if (stream->config.mode == IA_CSS_INPUT_MODE_BUFFERED_SENSOR) {
for (i = 0; i < stream->num_pipes; i++) {
struct ia_css_pipe *entry = stream->pipes[i];
/*
* free any mipi frames that are remaining:
* some test stream create-destroy cycles do
* not generate output frames
* and the mipi buffer is not freed in the
* deque function
*/
if (entry)
free_mipi_frames(entry);
}
}
stream_unregister_with_csi_rx(stream);
#endif
for (i = 0; i < stream->num_pipes; i++) {
struct ia_css_pipe *curr_pipe = stream->pipes[i];
assert(curr_pipe);
ia_css_pipe_map_queue(curr_pipe, false);
}
err = map_sp_threads(stream, false);
if (err) {
IA_CSS_LEAVE_ERR_PRIVATE(err);
return err;
}
}
/* remove references from pipes to stream */
for (i = 0; i < stream->num_pipes; i++) {
struct ia_css_pipe *entry = stream->pipes[i];
assert(entry);
if (entry) {
/* clear reference to stream */
entry->stream = NULL;
/* check internal copy pipe */
if (entry->mode == IA_CSS_PIPE_ID_PREVIEW &&
entry->pipe_settings.preview.copy_pipe) {
IA_CSS_LOG("clearing stream on internal preview copy pipe");
entry->pipe_settings.preview.copy_pipe->stream = NULL;
}
if (entry->mode == IA_CSS_PIPE_ID_VIDEO &&
entry->pipe_settings.video.copy_pipe) {
IA_CSS_LOG("clearing stream on internal video copy pipe");
entry->pipe_settings.video.copy_pipe->stream = NULL;
}
err = sh_css_pipe_unload_binaries(entry);
}
}
/* free associated memory of stream struct */
kfree(stream->pipes);
stream->pipes = NULL;
stream->num_pipes = 0;
/* working mode: take out of the seed list */
if (my_css_save.mode == sh_css_mode_working) {
for (i = 0; i < MAX_ACTIVE_STREAMS; i++) {
if (my_css_save.stream_seeds[i].stream == stream) {
IA_CSS_LOG("took out stream %d", i);
my_css_save.stream_seeds[i].stream = NULL;
break;
}
}
}
kfree(stream);
IA_CSS_LEAVE_ERR(err);
return err;
}
int
ia_css_stream_get_info(const struct ia_css_stream *stream,
struct ia_css_stream_info *stream_info)
{
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_stream_get_info: enter/exit\n");
assert(stream);
assert(stream_info);
*stream_info = stream->info;
return 0;
}
int
ia_css_stream_start(struct ia_css_stream *stream)
{
int err = 0;
IA_CSS_ENTER("stream = %p", stream);
if ((!stream) || (!stream->last_pipe)) {
IA_CSS_LEAVE_ERR(-EINVAL);
return -EINVAL;
}
IA_CSS_LOG("starting %d", stream->last_pipe->mode);
sh_css_sp_set_disable_continuous_viewfinder(stream->disable_cont_vf);
/* Create host side pipeline. */
err = create_host_pipeline(stream);
if (err) {
IA_CSS_LEAVE_ERR(err);
return err;
}
#if defined(ISP2401)
if ((stream->config.mode == IA_CSS_INPUT_MODE_SENSOR) ||
(stream->config.mode == IA_CSS_INPUT_MODE_BUFFERED_SENSOR))
stream_register_with_csi_rx(stream);
#endif
#if !defined(ISP2401)
/* Initialize mipi size checks */
if (stream->config.mode == IA_CSS_INPUT_MODE_BUFFERED_SENSOR) {
unsigned int idx;
unsigned int port = (unsigned int)(stream->config.source.port.port);
for (idx = 0; idx < IA_CSS_MIPI_SIZE_CHECK_MAX_NOF_ENTRIES_PER_PORT; idx++) {
sh_css_sp_group.config.mipi_sizes_for_check[port][idx] =
sh_css_get_mipi_sizes_for_check(port, idx);
}
}
#endif
if (stream->config.mode != IA_CSS_INPUT_MODE_MEMORY) {
err = sh_css_config_input_network(stream);
if (err)
return err;
}
err = sh_css_pipe_start(stream);
IA_CSS_LEAVE_ERR(err);
return err;
}
int
ia_css_stream_stop(struct ia_css_stream *stream)
{
int err = 0;
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_stream_stop() enter/exit\n");
assert(stream);
assert(stream->last_pipe);
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_stream_stop: stopping %d\n",
stream->last_pipe->mode);
#if !defined(ISP2401)
/* De-initialize mipi size checks */
if (stream->config.mode == IA_CSS_INPUT_MODE_BUFFERED_SENSOR) {
unsigned int idx;
unsigned int port = (unsigned int)(stream->config.source.port.port);
for (idx = 0; idx < IA_CSS_MIPI_SIZE_CHECK_MAX_NOF_ENTRIES_PER_PORT; idx++)
sh_css_sp_group.config.mipi_sizes_for_check[port][idx] = 0;
}
#endif
err = ia_css_pipeline_request_stop(&stream->last_pipe->pipeline);
if (err)
return err;
/*
* Ideally, unmapping should happen after pipeline_stop, but current
* semantics do not allow that.
*/
/* err = map_sp_threads(stream, false); */
return err;
}
bool
ia_css_stream_has_stopped(struct ia_css_stream *stream)
{
bool stopped;
assert(stream);
stopped = ia_css_pipeline_has_stopped(&stream->last_pipe->pipeline);
return stopped;
}
/* ISP2400 */
/*
* Destroy the stream and all the pipes related to it.
* The stream handle is used to identify the correct entry in the css_save struct
*/
int
ia_css_stream_unload(struct ia_css_stream *stream)
{
int i;
assert(stream);
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_stream_unload() enter,\n");
/* some checks */
assert(stream);
for (i = 0; i < MAX_ACTIVE_STREAMS; i++)
if (my_css_save.stream_seeds[i].stream == stream) {
int j;
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
"ia_css_stream_unload(): unloading %d (%p)\n", i,
my_css_save.stream_seeds[i].stream);
ia_css_stream_destroy(stream);
for (j = 0; j < my_css_save.stream_seeds[i].num_pipes; j++)
ia_css_pipe_destroy(my_css_save.stream_seeds[i].pipes[j]);
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
"ia_css_stream_unload(): after unloading %d (%p)\n", i,
my_css_save.stream_seeds[i].stream);
break;
}
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_stream_unload() exit,\n");
return 0;
}
int
ia_css_temp_pipe_to_pipe_id(const struct ia_css_pipe *pipe,
enum ia_css_pipe_id *pipe_id)
{
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_temp_pipe_to_pipe_id() enter/exit\n");
if (pipe)
*pipe_id = pipe->mode;
else
*pipe_id = IA_CSS_PIPE_ID_COPY;
return 0;
}
enum atomisp_input_format
ia_css_stream_get_format(const struct ia_css_stream *stream)
{
return stream->config.input_config.format;
}
bool
ia_css_stream_get_two_pixels_per_clock(const struct ia_css_stream *stream)
{
return (stream->config.pixels_per_clock == 2);
}
struct ia_css_binary *
ia_css_stream_get_shading_correction_binary(const struct ia_css_stream
*stream)
{
struct ia_css_pipe *pipe;
assert(stream);
pipe = stream->pipes[0];
if (stream->num_pipes == 2) {
assert(stream->pipes[1]);
if (stream->pipes[1]->config.mode == IA_CSS_PIPE_MODE_VIDEO ||
stream->pipes[1]->config.mode == IA_CSS_PIPE_MODE_PREVIEW)
pipe = stream->pipes[1];
}
return ia_css_pipe_get_shading_correction_binary(pipe);
}
struct ia_css_binary *
ia_css_stream_get_dvs_binary(const struct ia_css_stream *stream)
{
int i;
struct ia_css_pipe *video_pipe = NULL;
/* First we find the video pipe */
for (i = 0; i < stream->num_pipes; i++) {
struct ia_css_pipe *pipe = stream->pipes[i];
if (pipe->config.mode == IA_CSS_PIPE_MODE_VIDEO) {
video_pipe = pipe;
break;
}
}
if (video_pipe)
return &video_pipe->pipe_settings.video.video_binary;
return NULL;
}
struct ia_css_binary *
ia_css_stream_get_3a_binary(const struct ia_css_stream *stream)
{
struct ia_css_pipe *pipe;
struct ia_css_binary *s3a_binary = NULL;
assert(stream);
pipe = stream->pipes[0];
if (stream->num_pipes == 2) {
assert(stream->pipes[1]);
if (stream->pipes[1]->config.mode == IA_CSS_PIPE_MODE_VIDEO ||
stream->pipes[1]->config.mode == IA_CSS_PIPE_MODE_PREVIEW)
pipe = stream->pipes[1];
}
s3a_binary = ia_css_pipe_get_s3a_binary(pipe);
return s3a_binary;
}
int
ia_css_stream_set_output_padded_width(struct ia_css_stream *stream,
unsigned int output_padded_width)
{
struct ia_css_pipe *pipe;
assert(stream);
pipe = stream->last_pipe;
assert(pipe);
/* set the config also just in case (redundant info? why do we save config in pipe?) */
pipe->config.output_info[IA_CSS_PIPE_OUTPUT_STAGE_0].padded_width = output_padded_width;
pipe->output_info[IA_CSS_PIPE_OUTPUT_STAGE_0].padded_width = output_padded_width;
return 0;
}
static struct ia_css_binary *
ia_css_pipe_get_shading_correction_binary(const struct ia_css_pipe *pipe)
{
struct ia_css_binary *binary = NULL;
assert(pipe);
switch (pipe->config.mode) {
case IA_CSS_PIPE_MODE_PREVIEW:
binary = (struct ia_css_binary *)&pipe->pipe_settings.preview.preview_binary;
break;
case IA_CSS_PIPE_MODE_VIDEO:
binary = (struct ia_css_binary *)&pipe->pipe_settings.video.video_binary;
break;
case IA_CSS_PIPE_MODE_CAPTURE:
if (pipe->config.default_capture_config.mode == IA_CSS_CAPTURE_MODE_PRIMARY) {
unsigned int i;
for (i = 0; i < pipe->pipe_settings.capture.num_primary_stage; i++) {
if (pipe->pipe_settings.capture.primary_binary[i].info->sp.enable.sc) {
binary = (struct ia_css_binary *)&pipe->pipe_settings.capture.primary_binary[i];
break;
}
}
} else if (pipe->config.default_capture_config.mode ==
IA_CSS_CAPTURE_MODE_BAYER)
binary = (struct ia_css_binary *)&pipe->pipe_settings.capture.pre_isp_binary;
else if (pipe->config.default_capture_config.mode ==
IA_CSS_CAPTURE_MODE_ADVANCED ||
pipe->config.default_capture_config.mode == IA_CSS_CAPTURE_MODE_LOW_LIGHT) {
if (pipe->config.isp_pipe_version == IA_CSS_PIPE_VERSION_1)
binary = (struct ia_css_binary *)&pipe->pipe_settings.capture.pre_isp_binary;
else if (pipe->config.isp_pipe_version == IA_CSS_PIPE_VERSION_2_2)
binary = (struct ia_css_binary *)&pipe->pipe_settings.capture.post_isp_binary;
}
break;
default:
break;
}
if (binary && binary->info->sp.enable.sc)
return binary;
return NULL;
}
static struct ia_css_binary *
ia_css_pipe_get_s3a_binary(const struct ia_css_pipe *pipe)
{
struct ia_css_binary *binary = NULL;
assert(pipe);
switch (pipe->config.mode) {
case IA_CSS_PIPE_MODE_PREVIEW:
binary = (struct ia_css_binary *)&pipe->pipe_settings.preview.preview_binary;
break;
case IA_CSS_PIPE_MODE_VIDEO:
binary = (struct ia_css_binary *)&pipe->pipe_settings.video.video_binary;
break;
case IA_CSS_PIPE_MODE_CAPTURE:
if (pipe->config.default_capture_config.mode == IA_CSS_CAPTURE_MODE_PRIMARY) {
unsigned int i;
for (i = 0; i < pipe->pipe_settings.capture.num_primary_stage; i++) {
if (pipe->pipe_settings.capture.primary_binary[i].info->sp.enable.s3a) {
binary = (struct ia_css_binary *)&pipe->pipe_settings.capture.primary_binary[i];
break;
}
}
} else if (pipe->config.default_capture_config.mode ==
IA_CSS_CAPTURE_MODE_BAYER) {
binary = (struct ia_css_binary *)&pipe->pipe_settings.capture.pre_isp_binary;
} else if (pipe->config.default_capture_config.mode ==
IA_CSS_CAPTURE_MODE_ADVANCED ||
pipe->config.default_capture_config.mode == IA_CSS_CAPTURE_MODE_LOW_LIGHT) {
if (pipe->config.isp_pipe_version == IA_CSS_PIPE_VERSION_1)
binary = (struct ia_css_binary *)&pipe->pipe_settings.capture.pre_isp_binary;
else if (pipe->config.isp_pipe_version == IA_CSS_PIPE_VERSION_2_2)
binary = (struct ia_css_binary *)&pipe->pipe_settings.capture.post_isp_binary;
else
assert(0);
}
break;
default:
break;
}
if (binary && !binary->info->sp.enable.s3a)
binary = NULL;
return binary;
}
static struct ia_css_binary *
ia_css_pipe_get_sdis_binary(const struct ia_css_pipe *pipe)
{
struct ia_css_binary *binary = NULL;
assert(pipe);
switch (pipe->config.mode) {
case IA_CSS_PIPE_MODE_VIDEO:
binary = (struct ia_css_binary *)&pipe->pipe_settings.video.video_binary;
break;
default:
break;
}
if (binary && !binary->info->sp.enable.dis)
binary = NULL;
return binary;
}
struct ia_css_pipeline *
ia_css_pipe_get_pipeline(const struct ia_css_pipe *pipe)
{
assert(pipe);
return (struct ia_css_pipeline *)&pipe->pipeline;
}
unsigned int
ia_css_pipe_get_pipe_num(const struct ia_css_pipe *pipe)
{
assert(pipe);
/*
* KW was not sure this function was not returning a value
* that was out of range; so added an assert, and, for the
* case when asserts are not enabled, clip to the largest
* value; pipe_num is unsigned so the value cannot be too small
*/
assert(pipe->pipe_num < IA_CSS_PIPELINE_NUM_MAX);
if (pipe->pipe_num >= IA_CSS_PIPELINE_NUM_MAX)
return (IA_CSS_PIPELINE_NUM_MAX - 1);
return pipe->pipe_num;
}
unsigned int
ia_css_pipe_get_isp_pipe_version(const struct ia_css_pipe *pipe)
{
assert(pipe);
return (unsigned int)pipe->config.isp_pipe_version;
}
#define SP_START_TIMEOUT_US 30000000
int
ia_css_start_sp(void)
{
unsigned long timeout;
int err = 0;
IA_CSS_ENTER("");
sh_css_sp_start_isp();
/* waiting for the SP is completely started */
timeout = SP_START_TIMEOUT_US;
while ((ia_css_spctrl_get_state(SP0_ID) != IA_CSS_SP_SW_INITIALIZED) && timeout) {
timeout--;
udelay(1);
}
if (timeout == 0) {
IA_CSS_ERROR("timeout during SP initialization");
return -EINVAL;
}
/* Workaround, in order to run two streams in parallel. See TASK 4271*/
/* TODO: Fix this. */
sh_css_init_host_sp_control_vars();
/* buffers should be initialized only when sp is started */
/* AM: At the moment it will be done only when there is no stream active. */
sh_css_setup_queues();
ia_css_bufq_dump_queue_info();
IA_CSS_LEAVE_ERR(err);
return err;
}
/*
* Time to wait SP for termincate. Only condition when this can happen
* is a fatal hw failure, but we must be able to detect this and emit
* a proper error trace.
*/
#define SP_SHUTDOWN_TIMEOUT_US 200000
int
ia_css_stop_sp(void)
{
unsigned long timeout;
int err = 0;
IA_CSS_ENTER("void");
if (!sh_css_sp_is_running()) {
err = -EINVAL;
IA_CSS_LEAVE("SP already stopped : return_err=%d", err);
/* Return an error - stop SP should not have been called by driver */
return err;
}
/* For now, stop whole SP */
if (!sh_css_write_host2sp_command(host2sp_cmd_terminate)) {
IA_CSS_ERROR("Call to 'sh-css_write_host2sp_command()' failed");
ia_css_debug_dump_sp_sw_debug_info();
ia_css_debug_dump_debug_info(NULL);
}
sh_css_sp_set_sp_running(false);
timeout = SP_SHUTDOWN_TIMEOUT_US;
while (!ia_css_spctrl_is_idle(SP0_ID) && timeout) {
timeout--;
udelay(1);
}
if (ia_css_spctrl_get_state(SP0_ID) != IA_CSS_SP_SW_TERMINATED)
IA_CSS_WARNING("SP has not terminated (SW)");
if (timeout == 0) {
IA_CSS_WARNING("SP is not idle");
ia_css_debug_dump_sp_sw_debug_info();
}
timeout = SP_SHUTDOWN_TIMEOUT_US;
while (!isp_ctrl_getbit(ISP0_ID, ISP_SC_REG, ISP_IDLE_BIT) && timeout) {
timeout--;
udelay(1);
}
if (timeout == 0) {
IA_CSS_WARNING("ISP is not idle");
ia_css_debug_dump_sp_sw_debug_info();
}
sh_css_hmm_buffer_record_uninit();
/* clear pending param sets from refcount */
sh_css_param_clear_param_sets();
IA_CSS_LEAVE_ERR(err);
return err;
}
int
ia_css_update_continuous_frames(struct ia_css_stream *stream)
{
struct ia_css_pipe *pipe;
unsigned int i;
ia_css_debug_dtrace(
IA_CSS_DEBUG_TRACE,
"sh_css_update_continuous_frames() enter:\n");
if (!stream) {
ia_css_debug_dtrace(
IA_CSS_DEBUG_TRACE,
"sh_css_update_continuous_frames() leave: invalid stream, return_void\n");
return -EINVAL;
}
pipe = stream->continuous_pipe;
for (i = stream->config.init_num_cont_raw_buf;
i < stream->config.target_num_cont_raw_buf; i++)
sh_css_update_host2sp_offline_frame(i,
pipe->continuous_frames[i], pipe->cont_md_buffers[i]);
sh_css_update_host2sp_cont_num_raw_frames
(stream->config.target_num_cont_raw_buf, true);
ia_css_debug_dtrace(
IA_CSS_DEBUG_TRACE,
"sh_css_update_continuous_frames() leave: return_void\n");
return 0;
}
void ia_css_pipe_map_queue(struct ia_css_pipe *pipe, bool map)
{
unsigned int thread_id;
unsigned int pipe_num;
bool need_input_queue;
IA_CSS_ENTER("");
assert(pipe);
pipe_num = pipe->pipe_num;
ia_css_pipeline_get_sp_thread_id(pipe_num, &thread_id);
if (IS_ISP2401)
need_input_queue = true;
else
need_input_queue = pipe->stream->config.mode == IA_CSS_INPUT_MODE_MEMORY;
/* map required buffer queues to resources */
/* TODO: to be improved */
if (pipe->mode == IA_CSS_PIPE_ID_PREVIEW) {
if (need_input_queue)
ia_css_queue_map(thread_id, IA_CSS_BUFFER_TYPE_INPUT_FRAME, map);
ia_css_queue_map(thread_id, IA_CSS_BUFFER_TYPE_OUTPUT_FRAME, map);
ia_css_queue_map(thread_id, IA_CSS_BUFFER_TYPE_PARAMETER_SET, map);
ia_css_queue_map(thread_id, IA_CSS_BUFFER_TYPE_PER_FRAME_PARAMETER_SET, map);
ia_css_queue_map(thread_id, IA_CSS_BUFFER_TYPE_METADATA, map);
if (pipe->pipe_settings.preview.preview_binary.info &&
pipe->pipe_settings.preview.preview_binary.info->sp.enable.s3a)
ia_css_queue_map(thread_id, IA_CSS_BUFFER_TYPE_3A_STATISTICS, map);
} else if (pipe->mode == IA_CSS_PIPE_ID_CAPTURE) {
unsigned int i;
if (need_input_queue)
ia_css_queue_map(thread_id, IA_CSS_BUFFER_TYPE_INPUT_FRAME, map);
ia_css_queue_map(thread_id, IA_CSS_BUFFER_TYPE_OUTPUT_FRAME, map);
ia_css_queue_map(thread_id, IA_CSS_BUFFER_TYPE_VF_OUTPUT_FRAME, map);
ia_css_queue_map(thread_id, IA_CSS_BUFFER_TYPE_PARAMETER_SET, map);
ia_css_queue_map(thread_id, IA_CSS_BUFFER_TYPE_PER_FRAME_PARAMETER_SET, map);
ia_css_queue_map(thread_id, IA_CSS_BUFFER_TYPE_METADATA, map);
if (pipe->config.default_capture_config.mode == IA_CSS_CAPTURE_MODE_PRIMARY) {
for (i = 0; i < pipe->pipe_settings.capture.num_primary_stage; i++) {
if (pipe->pipe_settings.capture.primary_binary[i].info &&
pipe->pipe_settings.capture.primary_binary[i].info->sp.enable.s3a) {
ia_css_queue_map(thread_id, IA_CSS_BUFFER_TYPE_3A_STATISTICS, map);
break;
}
}
} else if (pipe->config.default_capture_config.mode ==
IA_CSS_CAPTURE_MODE_ADVANCED ||
pipe->config.default_capture_config.mode == IA_CSS_CAPTURE_MODE_LOW_LIGHT ||
pipe->config.default_capture_config.mode == IA_CSS_CAPTURE_MODE_BAYER) {
if (pipe->pipe_settings.capture.pre_isp_binary.info &&
pipe->pipe_settings.capture.pre_isp_binary.info->sp.enable.s3a)
ia_css_queue_map(thread_id, IA_CSS_BUFFER_TYPE_3A_STATISTICS, map);
}
} else if (pipe->mode == IA_CSS_PIPE_ID_VIDEO) {
if (need_input_queue)
ia_css_queue_map(thread_id, IA_CSS_BUFFER_TYPE_INPUT_FRAME, map);
ia_css_queue_map(thread_id, IA_CSS_BUFFER_TYPE_OUTPUT_FRAME, map);
if (pipe->enable_viewfinder[IA_CSS_PIPE_OUTPUT_STAGE_0])
ia_css_queue_map(thread_id, IA_CSS_BUFFER_TYPE_VF_OUTPUT_FRAME, map);
ia_css_queue_map(thread_id, IA_CSS_BUFFER_TYPE_PARAMETER_SET, map);
ia_css_queue_map(thread_id, IA_CSS_BUFFER_TYPE_PER_FRAME_PARAMETER_SET, map);
ia_css_queue_map(thread_id, IA_CSS_BUFFER_TYPE_METADATA, map);
if (pipe->pipe_settings.video.video_binary.info &&
pipe->pipe_settings.video.video_binary.info->sp.enable.s3a)
ia_css_queue_map(thread_id, IA_CSS_BUFFER_TYPE_3A_STATISTICS, map);
if (pipe->pipe_settings.video.video_binary.info &&
(pipe->pipe_settings.video.video_binary.info->sp.enable.dis
))
ia_css_queue_map(thread_id, IA_CSS_BUFFER_TYPE_DIS_STATISTICS, map);
} else if (pipe->mode == IA_CSS_PIPE_ID_COPY) {
if (need_input_queue)
ia_css_queue_map(thread_id, IA_CSS_BUFFER_TYPE_INPUT_FRAME, map);
if (!pipe->stream->config.continuous)
ia_css_queue_map(thread_id, IA_CSS_BUFFER_TYPE_OUTPUT_FRAME, map);
ia_css_queue_map(thread_id, IA_CSS_BUFFER_TYPE_METADATA, map);
} else if (pipe->mode == IA_CSS_PIPE_ID_YUVPP) {
unsigned int idx;
for (idx = 0; idx < IA_CSS_PIPE_MAX_OUTPUT_STAGE; idx++) {
ia_css_queue_map(thread_id, IA_CSS_BUFFER_TYPE_OUTPUT_FRAME + idx, map);
if (pipe->enable_viewfinder[idx])
ia_css_queue_map(thread_id, IA_CSS_BUFFER_TYPE_VF_OUTPUT_FRAME + idx, map);
}
if (need_input_queue)
ia_css_queue_map(thread_id, IA_CSS_BUFFER_TYPE_INPUT_FRAME, map);
ia_css_queue_map(thread_id, IA_CSS_BUFFER_TYPE_PARAMETER_SET, map);
ia_css_queue_map(thread_id, IA_CSS_BUFFER_TYPE_METADATA, map);
}
IA_CSS_LEAVE("");
}
int
ia_css_unlock_raw_frame(struct ia_css_stream *stream, uint32_t exp_id)
{
int ret;
IA_CSS_ENTER("");
/*
* Only continuous streams have a tagger to which we can send the
* unlock message.
*/
if (!stream || !stream->config.continuous) {
IA_CSS_ERROR("invalid stream pointer");
return -EINVAL;
}
if (exp_id > IA_CSS_ISYS_MAX_EXPOSURE_ID ||
exp_id < IA_CSS_ISYS_MIN_EXPOSURE_ID) {
IA_CSS_ERROR("invalid exposure ID: %d\n", exp_id);
return -EINVAL;
}
/*
* Send the event. Since we verified that the exp_id is valid,
* we can safely assign it to an 8-bit argument here.
*/
ret = ia_css_bufq_enqueue_psys_event(
IA_CSS_PSYS_SW_EVENT_UNLOCK_RAW_BUFFER, exp_id, 0, 0);
IA_CSS_LEAVE_ERR(ret);
return ret;
}
static void
sh_css_hmm_buffer_record_init(void)
{
int i;
for (i = 0; i < MAX_HMM_BUFFER_NUM; i++)
sh_css_hmm_buffer_record_reset(&hmm_buffer_record[i]);
}
static void
sh_css_hmm_buffer_record_uninit(void)
{
int i;
struct sh_css_hmm_buffer_record *buffer_record = NULL;
buffer_record = &hmm_buffer_record[0];
for (i = 0; i < MAX_HMM_BUFFER_NUM; i++) {
if (buffer_record->in_use) {
if (buffer_record->h_vbuf)
ia_css_rmgr_rel_vbuf(hmm_buffer_pool, &buffer_record->h_vbuf);
sh_css_hmm_buffer_record_reset(buffer_record);
}
buffer_record++;
}
}
static void
sh_css_hmm_buffer_record_reset(struct sh_css_hmm_buffer_record *buffer_record)
{
assert(buffer_record);
buffer_record->in_use = false;
buffer_record->type = IA_CSS_BUFFER_TYPE_INVALID;
buffer_record->h_vbuf = NULL;
buffer_record->kernel_ptr = 0;
}
static struct sh_css_hmm_buffer_record
*sh_css_hmm_buffer_record_acquire(struct ia_css_rmgr_vbuf_handle *h_vbuf,
enum ia_css_buffer_type type,
hrt_address kernel_ptr)
{
int i;
struct sh_css_hmm_buffer_record *buffer_record = NULL;
struct sh_css_hmm_buffer_record *out_buffer_record = NULL;
assert(h_vbuf);
assert((type > IA_CSS_BUFFER_TYPE_INVALID) &&
(type < IA_CSS_NUM_DYNAMIC_BUFFER_TYPE));
assert(kernel_ptr != 0);
buffer_record = &hmm_buffer_record[0];
for (i = 0; i < MAX_HMM_BUFFER_NUM; i++) {
if (!buffer_record->in_use) {
buffer_record->in_use = true;
buffer_record->type = type;
buffer_record->h_vbuf = h_vbuf;
buffer_record->kernel_ptr = kernel_ptr;
out_buffer_record = buffer_record;
break;
}
buffer_record++;
}
return out_buffer_record;
}
static struct sh_css_hmm_buffer_record
*sh_css_hmm_buffer_record_validate(ia_css_ptr ddr_buffer_addr,
enum ia_css_buffer_type type)
{
int i;
struct sh_css_hmm_buffer_record *buffer_record = NULL;
bool found_record = false;
buffer_record = &hmm_buffer_record[0];
for (i = 0; i < MAX_HMM_BUFFER_NUM; i++) {
if ((buffer_record->in_use) &&
(buffer_record->type == type) &&
(buffer_record->h_vbuf) &&
(buffer_record->h_vbuf->vptr == ddr_buffer_addr)) {
found_record = true;
break;
}
buffer_record++;
}
if (found_record)
return buffer_record;
else
return NULL;
}
| linux-master | drivers/staging/media/atomisp/pci/sh_css.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Support for Medifield PNW Camera Imaging ISP subsystem.
*
* Copyright (c) 2010-2017 Intel Corporation. All Rights Reserved.
*
* Copyright (c) 2010 Silicon Hive www.siliconhive.com.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License version
* 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*
*/
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/pm_domain.h>
#include <linux/pm_runtime.h>
#include <linux/pm_qos.h>
#include <linux/timer.h>
#include <linux/delay.h>
#include <linux/dmi.h>
#include <linux/interrupt.h>
#include <linux/bits.h>
#include <media/v4l2-fwnode.h>
#include <asm/iosf_mbi.h>
#include "../../include/linux/atomisp_gmin_platform.h"
#include "atomisp_cmd.h"
#include "atomisp_common.h"
#include "atomisp_fops.h"
#include "atomisp_ioctl.h"
#include "atomisp_internal.h"
#include "atomisp-regs.h"
#include "atomisp_dfs_tables.h"
#include "atomisp_drvfs.h"
#include "hmm/hmm.h"
#include "atomisp_trace_event.h"
#include "sh_css_firmware.h"
#include "device_access.h"
/* Timeouts to wait for all subdevs to be registered */
#define SUBDEV_WAIT_TIMEOUT 50 /* ms */
#define SUBDEV_WAIT_TIMEOUT_MAX_COUNT 40 /* up to 2 seconds */
/* G-Min addition: pull this in from intel_mid_pm.h */
#define CSTATE_EXIT_LATENCY_C1 1
static uint skip_fwload;
module_param(skip_fwload, uint, 0644);
MODULE_PARM_DESC(skip_fwload, "Skip atomisp firmware load");
/* cross componnet debug message flag */
int dbg_level;
module_param(dbg_level, int, 0644);
MODULE_PARM_DESC(dbg_level, "debug message level (default:0)");
/* log function switch */
int dbg_func = 1;
module_param(dbg_func, int, 0644);
MODULE_PARM_DESC(dbg_func,
"log function switch non/printk (default:printk)");
int mipicsi_flag;
module_param(mipicsi_flag, int, 0644);
MODULE_PARM_DESC(mipicsi_flag, "mipi csi compression predictor algorithm");
static char firmware_name[256];
module_param_string(firmware_name, firmware_name, sizeof(firmware_name), 0);
MODULE_PARM_DESC(firmware_name, "Firmware file name. Allows overriding the default firmware name.");
/*set to 16x16 since this is the amount of lines and pixels the sensor
exports extra. If these are kept at the 10x8 that they were on, in yuv
downscaling modes incorrect resolutions where requested to the sensor
driver with strange outcomes as a result. The proper way tot do this
would be to have a list of tables the specify the sensor res, mipi rec,
output res, and isp output res. however since we do not have this yet,
the chosen solution is the next best thing. */
int pad_w = 16;
module_param(pad_w, int, 0644);
MODULE_PARM_DESC(pad_w, "extra data for ISP processing");
int pad_h = 16;
module_param(pad_h, int, 0644);
MODULE_PARM_DESC(pad_h, "extra data for ISP processing");
/*
* FIXME: this is a hack to make easier to support ISP2401 variant.
* As a given system will either be ISP2401 or not, we can just use
* a boolean, in order to replace existing #ifdef ISP2401 everywhere.
*
* Once this driver gets into a better shape, however, the best would
* be to replace this to something stored inside atomisp allocated
* structures.
*/
struct device *atomisp_dev;
static const struct atomisp_freq_scaling_rule dfs_rules_merr[] = {
{
.width = ISP_FREQ_RULE_ANY,
.height = ISP_FREQ_RULE_ANY,
.fps = ISP_FREQ_RULE_ANY,
.isp_freq = ISP_FREQ_400MHZ,
.run_mode = ATOMISP_RUN_MODE_VIDEO,
},
{
.width = ISP_FREQ_RULE_ANY,
.height = ISP_FREQ_RULE_ANY,
.fps = ISP_FREQ_RULE_ANY,
.isp_freq = ISP_FREQ_400MHZ,
.run_mode = ATOMISP_RUN_MODE_STILL_CAPTURE,
},
{
.width = ISP_FREQ_RULE_ANY,
.height = ISP_FREQ_RULE_ANY,
.fps = ISP_FREQ_RULE_ANY,
.isp_freq = ISP_FREQ_400MHZ,
.run_mode = ATOMISP_RUN_MODE_PREVIEW,
},
};
/* Merrifield and Moorefield DFS rules */
static const struct atomisp_dfs_config dfs_config_merr = {
.lowest_freq = ISP_FREQ_200MHZ,
.max_freq_at_vmin = ISP_FREQ_400MHZ,
.highest_freq = ISP_FREQ_457MHZ,
.dfs_table = dfs_rules_merr,
.dfs_table_size = ARRAY_SIZE(dfs_rules_merr),
};
static const struct atomisp_freq_scaling_rule dfs_rules_merr_1179[] = {
{
.width = ISP_FREQ_RULE_ANY,
.height = ISP_FREQ_RULE_ANY,
.fps = ISP_FREQ_RULE_ANY,
.isp_freq = ISP_FREQ_400MHZ,
.run_mode = ATOMISP_RUN_MODE_VIDEO,
},
{
.width = ISP_FREQ_RULE_ANY,
.height = ISP_FREQ_RULE_ANY,
.fps = ISP_FREQ_RULE_ANY,
.isp_freq = ISP_FREQ_400MHZ,
.run_mode = ATOMISP_RUN_MODE_STILL_CAPTURE,
},
{
.width = ISP_FREQ_RULE_ANY,
.height = ISP_FREQ_RULE_ANY,
.fps = ISP_FREQ_RULE_ANY,
.isp_freq = ISP_FREQ_400MHZ,
.run_mode = ATOMISP_RUN_MODE_PREVIEW,
},
};
static const struct atomisp_dfs_config dfs_config_merr_1179 = {
.lowest_freq = ISP_FREQ_200MHZ,
.max_freq_at_vmin = ISP_FREQ_400MHZ,
.highest_freq = ISP_FREQ_400MHZ,
.dfs_table = dfs_rules_merr_1179,
.dfs_table_size = ARRAY_SIZE(dfs_rules_merr_1179),
};
static const struct atomisp_freq_scaling_rule dfs_rules_merr_117a[] = {
{
.width = 1920,
.height = 1080,
.fps = 30,
.isp_freq = ISP_FREQ_266MHZ,
.run_mode = ATOMISP_RUN_MODE_VIDEO,
},
{
.width = 1080,
.height = 1920,
.fps = 30,
.isp_freq = ISP_FREQ_266MHZ,
.run_mode = ATOMISP_RUN_MODE_VIDEO,
},
{
.width = 1920,
.height = 1080,
.fps = 45,
.isp_freq = ISP_FREQ_320MHZ,
.run_mode = ATOMISP_RUN_MODE_VIDEO,
},
{
.width = 1080,
.height = 1920,
.fps = 45,
.isp_freq = ISP_FREQ_320MHZ,
.run_mode = ATOMISP_RUN_MODE_VIDEO,
},
{
.width = ISP_FREQ_RULE_ANY,
.height = ISP_FREQ_RULE_ANY,
.fps = 60,
.isp_freq = ISP_FREQ_356MHZ,
.run_mode = ATOMISP_RUN_MODE_VIDEO,
},
{
.width = ISP_FREQ_RULE_ANY,
.height = ISP_FREQ_RULE_ANY,
.fps = ISP_FREQ_RULE_ANY,
.isp_freq = ISP_FREQ_200MHZ,
.run_mode = ATOMISP_RUN_MODE_VIDEO,
},
{
.width = ISP_FREQ_RULE_ANY,
.height = ISP_FREQ_RULE_ANY,
.fps = ISP_FREQ_RULE_ANY,
.isp_freq = ISP_FREQ_400MHZ,
.run_mode = ATOMISP_RUN_MODE_STILL_CAPTURE,
},
{
.width = ISP_FREQ_RULE_ANY,
.height = ISP_FREQ_RULE_ANY,
.fps = ISP_FREQ_RULE_ANY,
.isp_freq = ISP_FREQ_200MHZ,
.run_mode = ATOMISP_RUN_MODE_PREVIEW,
},
};
static struct atomisp_dfs_config dfs_config_merr_117a = {
.lowest_freq = ISP_FREQ_200MHZ,
.max_freq_at_vmin = ISP_FREQ_200MHZ,
.highest_freq = ISP_FREQ_400MHZ,
.dfs_table = dfs_rules_merr_117a,
.dfs_table_size = ARRAY_SIZE(dfs_rules_merr_117a),
};
static const struct atomisp_freq_scaling_rule dfs_rules_byt[] = {
{
.width = ISP_FREQ_RULE_ANY,
.height = ISP_FREQ_RULE_ANY,
.fps = ISP_FREQ_RULE_ANY,
.isp_freq = ISP_FREQ_400MHZ,
.run_mode = ATOMISP_RUN_MODE_VIDEO,
},
{
.width = ISP_FREQ_RULE_ANY,
.height = ISP_FREQ_RULE_ANY,
.fps = ISP_FREQ_RULE_ANY,
.isp_freq = ISP_FREQ_400MHZ,
.run_mode = ATOMISP_RUN_MODE_STILL_CAPTURE,
},
{
.width = ISP_FREQ_RULE_ANY,
.height = ISP_FREQ_RULE_ANY,
.fps = ISP_FREQ_RULE_ANY,
.isp_freq = ISP_FREQ_400MHZ,
.run_mode = ATOMISP_RUN_MODE_PREVIEW,
},
};
static const struct atomisp_dfs_config dfs_config_byt = {
.lowest_freq = ISP_FREQ_200MHZ,
.max_freq_at_vmin = ISP_FREQ_400MHZ,
.highest_freq = ISP_FREQ_400MHZ,
.dfs_table = dfs_rules_byt,
.dfs_table_size = ARRAY_SIZE(dfs_rules_byt),
};
static const struct atomisp_freq_scaling_rule dfs_rules_cht[] = {
{
.width = ISP_FREQ_RULE_ANY,
.height = ISP_FREQ_RULE_ANY,
.fps = ISP_FREQ_RULE_ANY,
.isp_freq = ISP_FREQ_320MHZ,
.run_mode = ATOMISP_RUN_MODE_VIDEO,
},
{
.width = ISP_FREQ_RULE_ANY,
.height = ISP_FREQ_RULE_ANY,
.fps = ISP_FREQ_RULE_ANY,
.isp_freq = ISP_FREQ_356MHZ,
.run_mode = ATOMISP_RUN_MODE_STILL_CAPTURE,
},
{
.width = ISP_FREQ_RULE_ANY,
.height = ISP_FREQ_RULE_ANY,
.fps = ISP_FREQ_RULE_ANY,
.isp_freq = ISP_FREQ_320MHZ,
.run_mode = ATOMISP_RUN_MODE_PREVIEW,
},
};
static const struct atomisp_freq_scaling_rule dfs_rules_cht_soc[] = {
{
.width = ISP_FREQ_RULE_ANY,
.height = ISP_FREQ_RULE_ANY,
.fps = ISP_FREQ_RULE_ANY,
.isp_freq = ISP_FREQ_356MHZ,
.run_mode = ATOMISP_RUN_MODE_VIDEO,
},
{
.width = ISP_FREQ_RULE_ANY,
.height = ISP_FREQ_RULE_ANY,
.fps = ISP_FREQ_RULE_ANY,
.isp_freq = ISP_FREQ_356MHZ,
.run_mode = ATOMISP_RUN_MODE_STILL_CAPTURE,
},
{
.width = ISP_FREQ_RULE_ANY,
.height = ISP_FREQ_RULE_ANY,
.fps = ISP_FREQ_RULE_ANY,
.isp_freq = ISP_FREQ_320MHZ,
.run_mode = ATOMISP_RUN_MODE_PREVIEW,
},
};
static const struct atomisp_dfs_config dfs_config_cht = {
.lowest_freq = ISP_FREQ_100MHZ,
.max_freq_at_vmin = ISP_FREQ_356MHZ,
.highest_freq = ISP_FREQ_356MHZ,
.dfs_table = dfs_rules_cht,
.dfs_table_size = ARRAY_SIZE(dfs_rules_cht),
};
/* This one should be visible also by atomisp_cmd.c */
const struct atomisp_dfs_config dfs_config_cht_soc = {
.lowest_freq = ISP_FREQ_100MHZ,
.max_freq_at_vmin = ISP_FREQ_356MHZ,
.highest_freq = ISP_FREQ_356MHZ,
.dfs_table = dfs_rules_cht_soc,
.dfs_table_size = ARRAY_SIZE(dfs_rules_cht_soc),
};
int atomisp_video_init(struct atomisp_video_pipe *video)
{
int ret;
video->pad.flags = MEDIA_PAD_FL_SINK;
ret = media_entity_pads_init(&video->vdev.entity, 1, &video->pad);
if (ret < 0)
return ret;
/* Initialize the video device. */
strscpy(video->vdev.name, "ATOMISP video output", sizeof(video->vdev.name));
video->vdev.fops = &atomisp_fops;
video->vdev.ioctl_ops = &atomisp_ioctl_ops;
video->vdev.lock = &video->isp->mutex;
video->vdev.release = video_device_release_empty;
video_set_drvdata(&video->vdev, video->isp);
return 0;
}
void atomisp_video_unregister(struct atomisp_video_pipe *video)
{
if (video_is_registered(&video->vdev)) {
media_entity_cleanup(&video->vdev.entity);
video_unregister_device(&video->vdev);
}
}
static int atomisp_save_iunit_reg(struct atomisp_device *isp)
{
struct pci_dev *pdev = to_pci_dev(isp->dev);
dev_dbg(isp->dev, "%s\n", __func__);
pci_read_config_word(pdev, PCI_COMMAND, &isp->saved_regs.pcicmdsts);
/* isp->saved_regs.ispmmadr is set from the atomisp_pci_probe() */
pci_read_config_dword(pdev, PCI_MSI_CAPID, &isp->saved_regs.msicap);
pci_read_config_dword(pdev, PCI_MSI_ADDR, &isp->saved_regs.msi_addr);
pci_read_config_word(pdev, PCI_MSI_DATA, &isp->saved_regs.msi_data);
pci_read_config_byte(pdev, PCI_INTERRUPT_LINE, &isp->saved_regs.intr);
pci_read_config_dword(pdev, PCI_INTERRUPT_CTRL, &isp->saved_regs.interrupt_control);
pci_read_config_dword(pdev, MRFLD_PCI_PMCS, &isp->saved_regs.pmcs);
/* Ensure read/write combining is enabled. */
pci_read_config_dword(pdev, PCI_I_CONTROL, &isp->saved_regs.i_control);
isp->saved_regs.i_control |=
MRFLD_PCI_I_CONTROL_ENABLE_READ_COMBINING |
MRFLD_PCI_I_CONTROL_ENABLE_WRITE_COMBINING;
pci_read_config_dword(pdev, MRFLD_PCI_CSI_ACCESS_CTRL_VIOL,
&isp->saved_regs.csi_access_viol);
pci_read_config_dword(pdev, MRFLD_PCI_CSI_RCOMP_CONTROL,
&isp->saved_regs.csi_rcomp_config);
/*
* Hardware bugs require setting CSI_HS_OVR_CLK_GATE_ON_UPDATE.
* ANN/CHV: RCOMP updates do not happen when using CSI2+ path
* and sensor sending "continuous clock".
* TNG/ANN/CHV: MIPI packets are lost if the HS entry sequence
* is missed, and IUNIT can hang.
* For both issues, setting this bit is a workaround.
*/
isp->saved_regs.csi_rcomp_config |= MRFLD_PCI_CSI_HS_OVR_CLK_GATE_ON_UPDATE;
pci_read_config_dword(pdev, MRFLD_PCI_CSI_AFE_TRIM_CONTROL,
&isp->saved_regs.csi_afe_dly);
pci_read_config_dword(pdev, MRFLD_PCI_CSI_CONTROL,
&isp->saved_regs.csi_control);
if (isp->media_dev.hw_revision >=
(ATOMISP_HW_REVISION_ISP2401 << ATOMISP_HW_REVISION_SHIFT))
isp->saved_regs.csi_control |= MRFLD_PCI_CSI_CONTROL_PARPATHEN;
/*
* On CHT CSI_READY bit should be enabled before stream on
*/
if (IS_CHT && (isp->media_dev.hw_revision >= ((ATOMISP_HW_REVISION_ISP2401 <<
ATOMISP_HW_REVISION_SHIFT) | ATOMISP_HW_STEPPING_B0)))
isp->saved_regs.csi_control |= MRFLD_PCI_CSI_CONTROL_CSI_READY;
pci_read_config_dword(pdev, MRFLD_PCI_CSI_AFE_RCOMP_CONTROL,
&isp->saved_regs.csi_afe_rcomp_config);
pci_read_config_dword(pdev, MRFLD_PCI_CSI_AFE_HS_CONTROL,
&isp->saved_regs.csi_afe_hs_control);
pci_read_config_dword(pdev, MRFLD_PCI_CSI_DEADLINE_CONTROL,
&isp->saved_regs.csi_deadline_control);
return 0;
}
static int atomisp_restore_iunit_reg(struct atomisp_device *isp)
{
struct pci_dev *pdev = to_pci_dev(isp->dev);
dev_dbg(isp->dev, "%s\n", __func__);
pci_write_config_word(pdev, PCI_COMMAND, isp->saved_regs.pcicmdsts);
pci_write_config_dword(pdev, PCI_BASE_ADDRESS_0, isp->saved_regs.ispmmadr);
pci_write_config_dword(pdev, PCI_MSI_CAPID, isp->saved_regs.msicap);
pci_write_config_dword(pdev, PCI_MSI_ADDR, isp->saved_regs.msi_addr);
pci_write_config_word(pdev, PCI_MSI_DATA, isp->saved_regs.msi_data);
pci_write_config_byte(pdev, PCI_INTERRUPT_LINE, isp->saved_regs.intr);
pci_write_config_dword(pdev, PCI_INTERRUPT_CTRL, isp->saved_regs.interrupt_control);
pci_write_config_dword(pdev, PCI_I_CONTROL, isp->saved_regs.i_control);
pci_write_config_dword(pdev, MRFLD_PCI_PMCS, isp->saved_regs.pmcs);
pci_write_config_dword(pdev, MRFLD_PCI_CSI_ACCESS_CTRL_VIOL,
isp->saved_regs.csi_access_viol);
pci_write_config_dword(pdev, MRFLD_PCI_CSI_RCOMP_CONTROL,
isp->saved_regs.csi_rcomp_config);
pci_write_config_dword(pdev, MRFLD_PCI_CSI_AFE_TRIM_CONTROL,
isp->saved_regs.csi_afe_dly);
pci_write_config_dword(pdev, MRFLD_PCI_CSI_CONTROL,
isp->saved_regs.csi_control);
pci_write_config_dword(pdev, MRFLD_PCI_CSI_AFE_RCOMP_CONTROL,
isp->saved_regs.csi_afe_rcomp_config);
pci_write_config_dword(pdev, MRFLD_PCI_CSI_AFE_HS_CONTROL,
isp->saved_regs.csi_afe_hs_control);
pci_write_config_dword(pdev, MRFLD_PCI_CSI_DEADLINE_CONTROL,
isp->saved_regs.csi_deadline_control);
/*
* for MRFLD, Software/firmware needs to write a 1 to bit0
* of the register at CSI_RECEIVER_SELECTION_REG to enable
* SH CSI backend write 0 will enable Arasan CSI backend,
* which has bugs(like sighting:4567697 and 4567699) and
* will be removed in B0
*/
atomisp_css2_hw_store_32(MRFLD_CSI_RECEIVER_SELECTION_REG, 1);
return 0;
}
static int atomisp_mrfld_pre_power_down(struct atomisp_device *isp)
{
struct pci_dev *pdev = to_pci_dev(isp->dev);
u32 irq;
unsigned long flags;
spin_lock_irqsave(&isp->lock, flags);
/*
* MRFLD HAS requirement: cannot power off i-unit if
* ISP has IRQ not serviced.
* So, here we need to check if there is any pending
* IRQ, if so, waiting for it to be served
*/
pci_read_config_dword(pdev, PCI_INTERRUPT_CTRL, &irq);
irq &= BIT(INTR_IIR);
pci_write_config_dword(pdev, PCI_INTERRUPT_CTRL, irq);
pci_read_config_dword(pdev, PCI_INTERRUPT_CTRL, &irq);
if (!(irq & BIT(INTR_IIR)))
goto done;
atomisp_css2_hw_store_32(MRFLD_INTR_CLEAR_REG, 0xFFFFFFFF);
atomisp_load_uint32(MRFLD_INTR_STATUS_REG, &irq);
if (irq != 0) {
dev_err(isp->dev,
"%s: fail to clear isp interrupt status reg=0x%x\n",
__func__, irq);
spin_unlock_irqrestore(&isp->lock, flags);
return -EAGAIN;
} else {
pci_read_config_dword(pdev, PCI_INTERRUPT_CTRL, &irq);
irq &= BIT(INTR_IIR);
pci_write_config_dword(pdev, PCI_INTERRUPT_CTRL, irq);
pci_read_config_dword(pdev, PCI_INTERRUPT_CTRL, &irq);
if (!(irq & BIT(INTR_IIR))) {
atomisp_css2_hw_store_32(MRFLD_INTR_ENABLE_REG, 0x0);
goto done;
}
dev_err(isp->dev,
"%s: error in iunit interrupt. status reg=0x%x\n",
__func__, irq);
spin_unlock_irqrestore(&isp->lock, flags);
return -EAGAIN;
}
done:
/*
* MRFLD WORKAROUND:
* before powering off IUNIT, clear the pending interrupts
* and disable the interrupt. driver should avoid writing 0
* to IIR. It could block subsequent interrupt messages.
* HW sighting:4568410.
*/
pci_read_config_dword(pdev, PCI_INTERRUPT_CTRL, &irq);
irq &= ~BIT(INTR_IER);
pci_write_config_dword(pdev, PCI_INTERRUPT_CTRL, irq);
atomisp_msi_irq_uninit(isp);
atomisp_freq_scaling(isp, ATOMISP_DFS_MODE_LOW, true);
spin_unlock_irqrestore(&isp->lock, flags);
return 0;
}
/*
* WA for DDR DVFS enable/disable
* By default, ISP will force DDR DVFS 1600MHz before disable DVFS
*/
static void punit_ddr_dvfs_enable(bool enable)
{
int reg;
iosf_mbi_read(BT_MBI_UNIT_PMC, MBI_REG_READ, MRFLD_ISPSSDVFS, ®);
if (enable) {
reg &= ~(MRFLD_BIT0 | MRFLD_BIT1);
} else {
reg |= MRFLD_BIT1;
reg &= ~(MRFLD_BIT0);
}
iosf_mbi_write(BT_MBI_UNIT_PMC, MBI_REG_WRITE, MRFLD_ISPSSDVFS, reg);
}
static int atomisp_mrfld_power(struct atomisp_device *isp, bool enable)
{
struct pci_dev *pdev = to_pci_dev(isp->dev);
unsigned long timeout;
u32 val = enable ? MRFLD_ISPSSPM0_IUNIT_POWER_ON :
MRFLD_ISPSSPM0_IUNIT_POWER_OFF;
dev_dbg(isp->dev, "IUNIT power-%s.\n", enable ? "on" : "off");
/* WA for P-Unit, if DVFS enabled, ISP timeout observed */
if (IS_CHT && enable) {
punit_ddr_dvfs_enable(false);
msleep(20);
}
/* Write to ISPSSPM0 bit[1:0] to power on/off the IUNIT */
iosf_mbi_modify(BT_MBI_UNIT_PMC, MBI_REG_READ, MRFLD_ISPSSPM0,
val, MRFLD_ISPSSPM0_ISPSSC_MASK);
/* WA:Enable DVFS */
if (IS_CHT && !enable)
punit_ddr_dvfs_enable(true);
/*
* There should be no IUNIT access while power-down is
* in progress. HW sighting: 4567865.
* Wait up to 50 ms for the IUNIT to shut down.
* And we do the same for power on.
*/
timeout = jiffies + msecs_to_jiffies(50);
do {
u32 tmp;
/* Wait until ISPSSPM0 bit[25:24] shows the right value */
iosf_mbi_read(BT_MBI_UNIT_PMC, MBI_REG_READ, MRFLD_ISPSSPM0, &tmp);
tmp = (tmp >> MRFLD_ISPSSPM0_ISPSSS_OFFSET) & MRFLD_ISPSSPM0_ISPSSC_MASK;
if (tmp == val) {
trace_ipu_cstate(enable);
pdev->current_state = enable ? PCI_D0 : PCI_D3cold;
return 0;
}
if (time_after(jiffies, timeout))
break;
/* FIXME: experienced value for delay */
usleep_range(100, 150);
} while (1);
if (enable)
msleep(10);
dev_err(isp->dev, "IUNIT power-%s timeout.\n", enable ? "on" : "off");
return -EBUSY;
}
int atomisp_power_off(struct device *dev)
{
struct atomisp_device *isp = dev_get_drvdata(dev);
struct pci_dev *pdev = to_pci_dev(dev);
int ret;
u32 reg;
atomisp_css_uninit(isp);
ret = atomisp_mrfld_pre_power_down(isp);
if (ret)
return ret;
/*
* MRFLD IUNIT DPHY is located in an always-power-on island
* MRFLD HW design need all CSI ports are disabled before
* powering down the IUNIT.
*/
pci_read_config_dword(pdev, MRFLD_PCI_CSI_CONTROL, ®);
reg |= MRFLD_ALL_CSI_PORTS_OFF_MASK;
pci_write_config_dword(pdev, MRFLD_PCI_CSI_CONTROL, reg);
cpu_latency_qos_update_request(&isp->pm_qos, PM_QOS_DEFAULT_VALUE);
pci_save_state(pdev);
return atomisp_mrfld_power(isp, false);
}
int atomisp_power_on(struct device *dev)
{
struct atomisp_device *isp = (struct atomisp_device *)
dev_get_drvdata(dev);
int ret;
ret = atomisp_mrfld_power(isp, true);
if (ret)
return ret;
pci_restore_state(to_pci_dev(dev));
cpu_latency_qos_update_request(&isp->pm_qos, isp->max_isr_latency);
/*restore register values for iUnit and iUnitPHY registers*/
if (isp->saved_regs.pcicmdsts)
atomisp_restore_iunit_reg(isp);
atomisp_freq_scaling(isp, ATOMISP_DFS_MODE_LOW, true);
return atomisp_css_init(isp);
}
static int atomisp_suspend(struct device *dev)
{
struct atomisp_device *isp = (struct atomisp_device *)
dev_get_drvdata(dev);
unsigned long flags;
/* FIXME: Suspend is not supported by sensors. Abort if streaming. */
spin_lock_irqsave(&isp->lock, flags);
if (isp->asd.streaming) {
spin_unlock_irqrestore(&isp->lock, flags);
dev_err(isp->dev, "atomisp cannot suspend at this time.\n");
return -EINVAL;
}
spin_unlock_irqrestore(&isp->lock, flags);
pm_runtime_resume(dev);
isp->asd.recreate_streams_on_resume = isp->asd.stream_prepared;
atomisp_destroy_pipes_stream(&isp->asd);
return atomisp_power_off(dev);
}
static int atomisp_resume(struct device *dev)
{
struct atomisp_device *isp = dev_get_drvdata(dev);
int ret;
ret = atomisp_power_on(dev);
if (ret)
return ret;
if (isp->asd.recreate_streams_on_resume)
ret = atomisp_create_pipes_stream(&isp->asd);
return ret;
}
int atomisp_csi_lane_config(struct atomisp_device *isp)
{
struct pci_dev *pdev = to_pci_dev(isp->dev);
static const struct {
u8 code;
u8 lanes[N_MIPI_PORT_ID];
} portconfigs[] = {
/* Tangier/Merrifield available lane configurations */
{ 0x00, { 4, 1, 0 } }, /* 00000 */
{ 0x01, { 3, 1, 0 } }, /* 00001 */
{ 0x02, { 2, 1, 0 } }, /* 00010 */
{ 0x03, { 1, 1, 0 } }, /* 00011 */
{ 0x04, { 2, 1, 2 } }, /* 00100 */
{ 0x08, { 3, 1, 1 } }, /* 01000 */
{ 0x09, { 2, 1, 1 } }, /* 01001 */
{ 0x0a, { 1, 1, 1 } }, /* 01010 */
/* Anniedale/Moorefield only configurations */
{ 0x10, { 4, 2, 0 } }, /* 10000 */
{ 0x11, { 3, 2, 0 } }, /* 10001 */
{ 0x12, { 2, 2, 0 } }, /* 10010 */
{ 0x13, { 1, 2, 0 } }, /* 10011 */
{ 0x14, { 2, 2, 2 } }, /* 10100 */
{ 0x18, { 3, 2, 1 } }, /* 11000 */
{ 0x19, { 2, 2, 1 } }, /* 11001 */
{ 0x1a, { 1, 2, 1 } }, /* 11010 */
};
unsigned int i, j;
u32 csi_control;
int nportconfigs;
u32 port_config_mask;
int port3_lanes_shift;
if (isp->media_dev.hw_revision <
ATOMISP_HW_REVISION_ISP2401_LEGACY <<
ATOMISP_HW_REVISION_SHIFT) {
/* Merrifield */
port_config_mask = MRFLD_PORT_CONFIG_MASK;
port3_lanes_shift = MRFLD_PORT3_LANES_SHIFT;
} else {
/* Moorefield / Cherryview */
port_config_mask = CHV_PORT_CONFIG_MASK;
port3_lanes_shift = CHV_PORT3_LANES_SHIFT;
}
if (isp->media_dev.hw_revision <
ATOMISP_HW_REVISION_ISP2401 <<
ATOMISP_HW_REVISION_SHIFT) {
/* Merrifield / Moorefield legacy input system */
nportconfigs = MRFLD_PORT_CONFIG_NUM;
} else {
/* Moorefield / Cherryview new input system */
nportconfigs = ARRAY_SIZE(portconfigs);
}
for (i = 0; i < nportconfigs; i++) {
for (j = 0; j < N_MIPI_PORT_ID; j++)
if (isp->sensor_lanes[j] &&
isp->sensor_lanes[j] != portconfigs[i].lanes[j])
break;
if (j == N_MIPI_PORT_ID)
break; /* Found matching setting */
}
if (i >= nportconfigs) {
dev_err(isp->dev,
"%s: could not find the CSI port setting for %d-%d-%d\n",
__func__,
isp->sensor_lanes[0], isp->sensor_lanes[1], isp->sensor_lanes[2]);
return -EINVAL;
}
pci_read_config_dword(pdev, MRFLD_PCI_CSI_CONTROL, &csi_control);
csi_control &= ~port_config_mask;
csi_control |= (portconfigs[i].code << MRFLD_PORT_CONFIGCODE_SHIFT)
| (portconfigs[i].lanes[0] ? 0 : (1 << MRFLD_PORT1_ENABLE_SHIFT))
| (portconfigs[i].lanes[1] ? 0 : (1 << MRFLD_PORT2_ENABLE_SHIFT))
| (portconfigs[i].lanes[2] ? 0 : (1 << MRFLD_PORT3_ENABLE_SHIFT))
| (((1 << portconfigs[i].lanes[0]) - 1) << MRFLD_PORT1_LANES_SHIFT)
| (((1 << portconfigs[i].lanes[1]) - 1) << MRFLD_PORT2_LANES_SHIFT)
| (((1 << portconfigs[i].lanes[2]) - 1) << port3_lanes_shift);
pci_write_config_dword(pdev, MRFLD_PCI_CSI_CONTROL, csi_control);
dev_dbg(isp->dev,
"%s: the portconfig is %d-%d-%d, CSI_CONTROL is 0x%08X\n",
__func__, portconfigs[i].lanes[0], portconfigs[i].lanes[1],
portconfigs[i].lanes[2], csi_control);
return 0;
}
static int atomisp_subdev_probe(struct atomisp_device *isp)
{
const struct atomisp_platform_data *pdata;
struct intel_v4l2_subdev_table *subdevs;
int ret, mipi_port;
ret = atomisp_csi2_bridge_parse_firmware(isp);
if (ret)
return ret;
pdata = atomisp_get_platform_data();
if (!pdata) {
dev_err(isp->dev, "no platform data available\n");
return 0;
}
/*
* TODO: this is left here for now to allow testing atomisp-sensor
* drivers which are still using the atomisp_gmin_platform infra before
* converting them to standard v4l2 sensor drivers using runtime-pm +
* ACPI for pm and v4l2_async_register_subdev_sensor() registration.
*/
for (subdevs = pdata->subdevs; subdevs->type; ++subdevs) {
ret = v4l2_device_register_subdev(&isp->v4l2_dev, subdevs->subdev);
if (ret)
continue;
switch (subdevs->type) {
case RAW_CAMERA:
if (subdevs->port >= ATOMISP_CAMERA_NR_PORTS) {
dev_err(isp->dev, "port %d not supported\n", subdevs->port);
break;
}
if (isp->sensor_subdevs[subdevs->port]) {
dev_err(isp->dev, "port %d already has a sensor attached\n",
subdevs->port);
break;
}
mipi_port = atomisp_port_to_mipi_port(isp, subdevs->port);
isp->sensor_lanes[mipi_port] = subdevs->lanes;
isp->sensor_subdevs[subdevs->port] = subdevs->subdev;
break;
case CAMERA_MOTOR:
if (isp->motor) {
dev_warn(isp->dev, "too many atomisp motors\n");
continue;
}
isp->motor = subdevs->subdev;
break;
case LED_FLASH:
if (isp->flash) {
dev_warn(isp->dev, "too many atomisp flash devices\n");
continue;
}
isp->flash = subdevs->subdev;
break;
default:
dev_dbg(isp->dev, "unknown subdev probed\n");
break;
}
}
return atomisp_csi_lane_config(isp);
}
static void atomisp_unregister_entities(struct atomisp_device *isp)
{
unsigned int i;
struct v4l2_subdev *sd, *next;
atomisp_subdev_unregister_entities(&isp->asd);
atomisp_tpg_unregister_entities(&isp->tpg);
for (i = 0; i < ATOMISP_CAMERA_NR_PORTS; i++)
atomisp_mipi_csi2_unregister_entities(&isp->csi2_port[i]);
list_for_each_entry_safe(sd, next, &isp->v4l2_dev.subdevs, list)
v4l2_device_unregister_subdev(sd);
v4l2_device_unregister(&isp->v4l2_dev);
media_device_unregister(&isp->media_dev);
media_device_cleanup(&isp->media_dev);
}
static int atomisp_register_entities(struct atomisp_device *isp)
{
int ret = 0;
unsigned int i;
isp->media_dev.dev = isp->dev;
strscpy(isp->media_dev.model, "Intel Atom ISP",
sizeof(isp->media_dev.model));
media_device_init(&isp->media_dev);
isp->v4l2_dev.mdev = &isp->media_dev;
ret = v4l2_device_register(isp->dev, &isp->v4l2_dev);
if (ret < 0) {
dev_err(isp->dev, "%s: V4L2 device registration failed (%d)\n",
__func__, ret);
goto v4l2_device_failed;
}
ret = atomisp_subdev_probe(isp);
if (ret < 0)
goto csi_and_subdev_probe_failed;
/* Register internal entities */
for (i = 0; i < ATOMISP_CAMERA_NR_PORTS; i++) {
ret = atomisp_mipi_csi2_register_entities(&isp->csi2_port[i],
&isp->v4l2_dev);
if (ret == 0)
continue;
/* error case */
dev_err(isp->dev, "failed to register the CSI port: %d\n", i);
/* deregister all registered CSI ports */
while (i--)
atomisp_mipi_csi2_unregister_entities(
&isp->csi2_port[i]);
goto csi_and_subdev_probe_failed;
}
ret = atomisp_tpg_register_entities(&isp->tpg, &isp->v4l2_dev);
if (ret < 0) {
dev_err(isp->dev, "atomisp_tpg_register_entities\n");
goto tpg_register_failed;
}
ret = atomisp_subdev_register_subdev(&isp->asd, &isp->v4l2_dev);
if (ret < 0) {
dev_err(isp->dev, "atomisp_subdev_register_subdev fail\n");
goto subdev_register_failed;
}
return 0;
subdev_register_failed:
atomisp_tpg_unregister_entities(&isp->tpg);
tpg_register_failed:
for (i = 0; i < ATOMISP_CAMERA_NR_PORTS; i++)
atomisp_mipi_csi2_unregister_entities(&isp->csi2_port[i]);
csi_and_subdev_probe_failed:
v4l2_device_unregister(&isp->v4l2_dev);
v4l2_device_failed:
media_device_unregister(&isp->media_dev);
media_device_cleanup(&isp->media_dev);
return ret;
}
static void atomisp_init_sensor(struct atomisp_input_subdev *input)
{
struct v4l2_subdev_mbus_code_enum mbus_code_enum = { };
struct v4l2_subdev_frame_size_enum fse = { };
struct v4l2_subdev_state sd_state = {
.pads = &input->pad_cfg,
};
struct v4l2_subdev_selection sel = { };
int i, err;
mbus_code_enum.which = V4L2_SUBDEV_FORMAT_ACTIVE;
err = v4l2_subdev_call(input->camera, pad, enum_mbus_code, NULL, &mbus_code_enum);
if (!err)
input->code = mbus_code_enum.code;
sel.which = V4L2_SUBDEV_FORMAT_ACTIVE;
sel.target = V4L2_SEL_TGT_NATIVE_SIZE;
err = v4l2_subdev_call(input->camera, pad, get_selection, NULL, &sel);
if (err)
return;
input->native_rect = sel.r;
sel.which = V4L2_SUBDEV_FORMAT_ACTIVE;
sel.target = V4L2_SEL_TGT_CROP_DEFAULT;
err = v4l2_subdev_call(input->camera, pad, get_selection, NULL, &sel);
if (err)
return;
input->active_rect = sel.r;
/*
* Check for a framesize with half active_rect width and height,
* if found assume the sensor supports binning.
* Do this before changing the crop-rect since that may influence
* enum_frame_size results.
*/
for (i = 0; ; i++) {
fse.index = i;
fse.code = input->code;
fse.which = V4L2_SUBDEV_FORMAT_ACTIVE;
err = v4l2_subdev_call(input->camera, pad, enum_frame_size, NULL, &fse);
if (err)
break;
if (fse.min_width <= (input->active_rect.width / 2) &&
fse.min_height <= (input->active_rect.height / 2)) {
input->binning_support = true;
break;
}
}
/*
* The ISP also wants the non-active pixels at the border of the sensor
* for padding, set the crop rect to cover the entire sensor instead
* of only the default active area.
*
* Do this for both try and active formats since the try_crop rect in
* pad_cfg may influence (clamp) future try_fmt calls with which == try.
*/
sel.which = V4L2_SUBDEV_FORMAT_TRY;
sel.target = V4L2_SEL_TGT_CROP;
sel.r = input->native_rect;
err = v4l2_subdev_call(input->camera, pad, set_selection, &sd_state, &sel);
if (err)
return;
sel.which = V4L2_SUBDEV_FORMAT_ACTIVE;
sel.target = V4L2_SEL_TGT_CROP;
sel.r = input->native_rect;
err = v4l2_subdev_call(input->camera, pad, set_selection, NULL, &sel);
if (err)
return;
dev_info(input->camera->dev, "Supports crop native %dx%d active %dx%d binning %d\n",
input->native_rect.width, input->native_rect.height,
input->active_rect.width, input->active_rect.height,
input->binning_support);
input->crop_support = true;
}
int atomisp_register_device_nodes(struct atomisp_device *isp)
{
struct atomisp_input_subdev *input;
int i, err;
for (i = 0; i < ATOMISP_CAMERA_NR_PORTS; i++) {
err = media_create_pad_link(&isp->csi2_port[i].subdev.entity,
CSI2_PAD_SOURCE, &isp->asd.subdev.entity,
ATOMISP_SUBDEV_PAD_SINK, 0);
if (err)
return err;
if (!isp->sensor_subdevs[i])
continue;
input = &isp->inputs[isp->input_cnt];
input->type = RAW_CAMERA;
input->port = i;
input->camera = isp->sensor_subdevs[i];
atomisp_init_sensor(input);
/*
* HACK: Currently VCM belongs to primary sensor only, but correct
* approach must be to acquire from platform code which sensor
* owns it.
*/
if (i == ATOMISP_CAMERA_PORT_PRIMARY)
input->motor = isp->motor;
err = media_create_pad_link(&input->camera->entity, 0,
&isp->csi2_port[i].subdev.entity,
CSI2_PAD_SINK,
MEDIA_LNK_FL_ENABLED | MEDIA_LNK_FL_IMMUTABLE);
if (err)
return err;
isp->input_cnt++;
}
if (!isp->input_cnt)
dev_warn(isp->dev, "no camera attached or fail to detect\n");
else
dev_info(isp->dev, "detected %d camera sensors\n", isp->input_cnt);
if (isp->input_cnt < ATOM_ISP_MAX_INPUTS) {
dev_dbg(isp->dev, "TPG detected, camera_cnt: %d\n", isp->input_cnt);
isp->inputs[isp->input_cnt].type = TEST_PATTERN;
isp->inputs[isp->input_cnt].port = -1;
isp->inputs[isp->input_cnt++].camera = &isp->tpg.sd;
} else {
dev_warn(isp->dev, "too many atomisp inputs, TPG ignored.\n");
}
isp->asd.video_out.vdev.v4l2_dev = &isp->v4l2_dev;
isp->asd.video_out.vdev.device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING;
err = video_register_device(&isp->asd.video_out.vdev, VFL_TYPE_VIDEO, -1);
if (err)
return err;
err = media_create_pad_link(&isp->asd.subdev.entity, ATOMISP_SUBDEV_PAD_SOURCE,
&isp->asd.video_out.vdev.entity, 0, 0);
if (err)
return err;
err = v4l2_device_register_subdev_nodes(&isp->v4l2_dev);
if (err)
return err;
return media_device_register(&isp->media_dev);
}
static int atomisp_initialize_modules(struct atomisp_device *isp)
{
int ret;
ret = atomisp_mipi_csi2_init(isp);
if (ret < 0) {
dev_err(isp->dev, "mipi csi2 initialization failed\n");
goto error_mipi_csi2;
}
ret = atomisp_tpg_init(isp);
if (ret < 0) {
dev_err(isp->dev, "tpg initialization failed\n");
goto error_tpg;
}
ret = atomisp_subdev_init(isp);
if (ret < 0) {
dev_err(isp->dev, "ISP subdev initialization failed\n");
goto error_isp_subdev;
}
return 0;
error_isp_subdev:
error_tpg:
atomisp_tpg_cleanup(isp);
error_mipi_csi2:
atomisp_mipi_csi2_cleanup(isp);
return ret;
}
static void atomisp_uninitialize_modules(struct atomisp_device *isp)
{
atomisp_tpg_cleanup(isp);
atomisp_mipi_csi2_cleanup(isp);
}
const struct firmware *
atomisp_load_firmware(struct atomisp_device *isp)
{
const struct firmware *fw;
int rc;
char *fw_path = NULL;
if (skip_fwload)
return NULL;
if (firmware_name[0] != '\0') {
fw_path = firmware_name;
} else {
if ((isp->media_dev.hw_revision >> ATOMISP_HW_REVISION_SHIFT)
== ATOMISP_HW_REVISION_ISP2401)
fw_path = "shisp_2401a0_v21.bin";
if (isp->media_dev.hw_revision ==
((ATOMISP_HW_REVISION_ISP2401_LEGACY << ATOMISP_HW_REVISION_SHIFT)
| ATOMISP_HW_STEPPING_A0))
fw_path = "shisp_2401a0_legacy_v21.bin";
if (isp->media_dev.hw_revision ==
((ATOMISP_HW_REVISION_ISP2400 << ATOMISP_HW_REVISION_SHIFT)
| ATOMISP_HW_STEPPING_B0))
fw_path = "shisp_2400b0_v21.bin";
}
if (!fw_path) {
dev_err(isp->dev, "Unsupported hw_revision 0x%x\n",
isp->media_dev.hw_revision);
return NULL;
}
rc = request_firmware(&fw, fw_path, isp->dev);
if (rc) {
dev_err(isp->dev,
"atomisp: Error %d while requesting firmware %s\n",
rc, fw_path);
return NULL;
}
return fw;
}
/*
* Check for flags the driver was compiled with against the PCI
* device. Always returns true on other than ISP 2400.
*/
static bool is_valid_device(struct pci_dev *pdev, const struct pci_device_id *id)
{
const char *name;
const char *product;
product = dmi_get_system_info(DMI_PRODUCT_NAME);
switch (id->device & ATOMISP_PCI_DEVICE_SOC_MASK) {
case ATOMISP_PCI_DEVICE_SOC_MRFLD:
name = "Merrifield";
break;
case ATOMISP_PCI_DEVICE_SOC_BYT:
name = "Baytrail";
break;
case ATOMISP_PCI_DEVICE_SOC_ANN:
name = "Anniedale";
break;
case ATOMISP_PCI_DEVICE_SOC_CHT:
name = "Cherrytrail";
break;
default:
dev_err(&pdev->dev, "%s: unknown device ID %x04:%x04\n",
product, id->vendor, id->device);
return false;
}
if (pdev->revision <= ATOMISP_PCI_REV_BYT_A0_MAX) {
dev_err(&pdev->dev, "%s revision %d is not unsupported\n",
name, pdev->revision);
return false;
}
/*
* FIXME:
* remove the if once the driver become generic
*/
#ifndef ISP2401
if (IS_ISP2401) {
dev_err(&pdev->dev, "Support for %s (ISP2401) was disabled at compile time\n",
name);
return false;
}
#else
if (!IS_ISP2401) {
dev_err(&pdev->dev, "Support for %s (ISP2400) was disabled at compile time\n",
name);
return false;
}
#endif
dev_info(&pdev->dev, "Detected %s version %d (ISP240%c) on %s\n",
name, pdev->revision, IS_ISP2401 ? '1' : '0', product);
return true;
}
#define ATOM_ISP_PCI_BAR 0
static int atomisp_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
const struct atomisp_platform_data *pdata;
struct atomisp_device *isp;
unsigned int start;
int err, val;
u32 irq;
if (!is_valid_device(pdev, id))
return -ENODEV;
/* Pointer to struct device. */
atomisp_dev = &pdev->dev;
pdata = atomisp_get_platform_data();
if (!pdata)
dev_warn(&pdev->dev, "no platform data available\n");
err = pcim_enable_device(pdev);
if (err) {
dev_err(&pdev->dev, "Failed to enable CI ISP device (%d)\n", err);
return err;
}
start = pci_resource_start(pdev, ATOM_ISP_PCI_BAR);
dev_dbg(&pdev->dev, "start: 0x%x\n", start);
err = pcim_iomap_regions(pdev, BIT(ATOM_ISP_PCI_BAR), pci_name(pdev));
if (err) {
dev_err(&pdev->dev, "Failed to I/O memory remapping (%d)\n", err);
goto ioremap_fail;
}
isp = devm_kzalloc(&pdev->dev, sizeof(*isp), GFP_KERNEL);
if (!isp) {
err = -ENOMEM;
goto atomisp_dev_alloc_fail;
}
isp->dev = &pdev->dev;
isp->base = pcim_iomap_table(pdev)[ATOM_ISP_PCI_BAR];
isp->saved_regs.ispmmadr = start;
dev_dbg(&pdev->dev, "atomisp mmio base: %p\n", isp->base);
mutex_init(&isp->mutex);
spin_lock_init(&isp->lock);
/* This is not a true PCI device on SoC, so the delay is not needed. */
pdev->d3hot_delay = 0;
pci_set_drvdata(pdev, isp);
switch (id->device & ATOMISP_PCI_DEVICE_SOC_MASK) {
case ATOMISP_PCI_DEVICE_SOC_MRFLD:
isp->media_dev.hw_revision =
(ATOMISP_HW_REVISION_ISP2400
<< ATOMISP_HW_REVISION_SHIFT) |
ATOMISP_HW_STEPPING_B0;
switch (id->device) {
case ATOMISP_PCI_DEVICE_SOC_MRFLD_1179:
isp->dfs = &dfs_config_merr_1179;
break;
case ATOMISP_PCI_DEVICE_SOC_MRFLD_117A:
isp->dfs = &dfs_config_merr_117a;
break;
default:
isp->dfs = &dfs_config_merr;
break;
}
isp->hpll_freq = HPLL_FREQ_1600MHZ;
break;
case ATOMISP_PCI_DEVICE_SOC_BYT:
isp->media_dev.hw_revision =
(ATOMISP_HW_REVISION_ISP2400
<< ATOMISP_HW_REVISION_SHIFT) |
ATOMISP_HW_STEPPING_B0;
/*
* Note: some Intel-based tablets with Android use a different
* DFS table. Based on the comments at the Yocto Aero meta
* version of this driver (at the ssid.h header), they're
* identified via a "spid" var:
*
* androidboot.spid=vend:cust:manu:plat:prod:hard
*
* As we don't have this upstream, nor we know enough details
* to use a DMI or PCI match table, the old code was just
* removed, but let's keep a note here as a reminder that,
* for certain devices, we may need to limit the max DFS
* frequency to be below certain values, adjusting the
* resolution accordingly.
*/
isp->dfs = &dfs_config_byt;
/*
* HPLL frequency is known to be device-specific, but we don't
* have specs yet for exactly how it varies. Default to
* BYT-CR but let provisioning set it via EFI variable
*/
isp->hpll_freq = gmin_get_var_int(&pdev->dev, false, "HpllFreq", HPLL_FREQ_2000MHZ);
/*
* for BYT/CHT we are put isp into D3cold to avoid pci registers access
* in power off. Set d3cold_delay to 0 since default 100ms is not
* necessary.
*/
pdev->d3cold_delay = 0;
break;
case ATOMISP_PCI_DEVICE_SOC_ANN:
isp->media_dev.hw_revision = ( ATOMISP_HW_REVISION_ISP2401
<< ATOMISP_HW_REVISION_SHIFT);
isp->media_dev.hw_revision |= pdev->revision < 2 ?
ATOMISP_HW_STEPPING_A0 : ATOMISP_HW_STEPPING_B0;
isp->dfs = &dfs_config_merr;
isp->hpll_freq = HPLL_FREQ_1600MHZ;
break;
case ATOMISP_PCI_DEVICE_SOC_CHT:
isp->media_dev.hw_revision = ( ATOMISP_HW_REVISION_ISP2401
<< ATOMISP_HW_REVISION_SHIFT);
isp->media_dev.hw_revision |= pdev->revision < 2 ?
ATOMISP_HW_STEPPING_A0 : ATOMISP_HW_STEPPING_B0;
isp->dfs = &dfs_config_cht;
pdev->d3cold_delay = 0;
iosf_mbi_read(BT_MBI_UNIT_CCK, MBI_REG_READ, CCK_FUSE_REG_0, &val);
switch (val & CCK_FUSE_HPLL_FREQ_MASK) {
case 0x00:
isp->hpll_freq = HPLL_FREQ_800MHZ;
break;
case 0x01:
isp->hpll_freq = HPLL_FREQ_1600MHZ;
break;
case 0x02:
isp->hpll_freq = HPLL_FREQ_2000MHZ;
break;
default:
isp->hpll_freq = HPLL_FREQ_1600MHZ;
dev_warn(&pdev->dev, "read HPLL from cck failed. Default to 1600 MHz.\n");
}
break;
default:
dev_err(&pdev->dev, "un-supported IUNIT device\n");
err = -ENODEV;
goto atomisp_dev_alloc_fail;
}
dev_info(&pdev->dev, "ISP HPLL frequency base = %d MHz\n", isp->hpll_freq);
isp->max_isr_latency = ATOMISP_MAX_ISR_LATENCY;
/* Load isp firmware from user space */
isp->firmware = atomisp_load_firmware(isp);
if (!isp->firmware) {
err = -ENOENT;
dev_dbg(&pdev->dev, "Firmware load failed\n");
goto load_fw_fail;
}
err = sh_css_check_firmware_version(isp->dev, isp->firmware->data);
if (err) {
dev_dbg(&pdev->dev, "Firmware version check failed\n");
goto fw_validation_fail;
}
pci_set_master(pdev);
err = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_MSI);
if (err < 0) {
dev_err(&pdev->dev, "Failed to enable msi (%d)\n", err);
goto enable_msi_fail;
}
atomisp_msi_irq_init(isp);
cpu_latency_qos_add_request(&isp->pm_qos, PM_QOS_DEFAULT_VALUE);
/*
* for MRFLD, Software/firmware needs to write a 1 to bit 0 of
* the register at CSI_RECEIVER_SELECTION_REG to enable SH CSI
* backend write 0 will enable Arasan CSI backend, which has
* bugs(like sighting:4567697 and 4567699) and will be removed
* in B0
*/
atomisp_css2_hw_store_32(MRFLD_CSI_RECEIVER_SELECTION_REG, 1);
if ((id->device & ATOMISP_PCI_DEVICE_SOC_MASK) ==
ATOMISP_PCI_DEVICE_SOC_MRFLD) {
u32 csi_afe_trim;
/*
* Workaround for imbalance data eye issue which is observed
* on TNG B0.
*/
pci_read_config_dword(pdev, MRFLD_PCI_CSI_AFE_TRIM_CONTROL, &csi_afe_trim);
csi_afe_trim &= ~((MRFLD_PCI_CSI_HSRXCLKTRIM_MASK <<
MRFLD_PCI_CSI1_HSRXCLKTRIM_SHIFT) |
(MRFLD_PCI_CSI_HSRXCLKTRIM_MASK <<
MRFLD_PCI_CSI2_HSRXCLKTRIM_SHIFT) |
(MRFLD_PCI_CSI_HSRXCLKTRIM_MASK <<
MRFLD_PCI_CSI3_HSRXCLKTRIM_SHIFT));
csi_afe_trim |= (MRFLD_PCI_CSI1_HSRXCLKTRIM <<
MRFLD_PCI_CSI1_HSRXCLKTRIM_SHIFT) |
(MRFLD_PCI_CSI2_HSRXCLKTRIM <<
MRFLD_PCI_CSI2_HSRXCLKTRIM_SHIFT) |
(MRFLD_PCI_CSI3_HSRXCLKTRIM <<
MRFLD_PCI_CSI3_HSRXCLKTRIM_SHIFT);
pci_write_config_dword(pdev, MRFLD_PCI_CSI_AFE_TRIM_CONTROL, csi_afe_trim);
}
err = atomisp_initialize_modules(isp);
if (err < 0) {
dev_err(&pdev->dev, "atomisp_initialize_modules (%d)\n", err);
goto initialize_modules_fail;
}
err = atomisp_register_entities(isp);
if (err < 0) {
dev_err(&pdev->dev, "atomisp_register_entities failed (%d)\n", err);
goto register_entities_fail;
}
INIT_WORK(&isp->assert_recovery_work, atomisp_assert_recovery_work);
/* save the iunit context only once after all the values are init'ed. */
atomisp_save_iunit_reg(isp);
/*
* The atomisp does not use standard PCI power-management through the
* PCI config space. Instead this driver directly tells the P-Unit to
* disable the ISP over the IOSF. The standard PCI subsystem pm_ops will
* try to access the config space before (resume) / after (suspend) this
* driver has turned the ISP on / off, resulting in the following errors:
*
* "Unable to change power state from D0 to D3hot, device inaccessible"
* "Unable to change power state from D3cold to D0, device inaccessible"
*
* To avoid these errors override the pm_domain so that all the PCI
* subsys suspend / resume handling is skipped.
*/
isp->pm_domain.ops.runtime_suspend = atomisp_power_off;
isp->pm_domain.ops.runtime_resume = atomisp_power_on;
isp->pm_domain.ops.suspend = atomisp_suspend;
isp->pm_domain.ops.resume = atomisp_resume;
dev_pm_domain_set(&pdev->dev, &isp->pm_domain);
pm_runtime_put_noidle(&pdev->dev);
pm_runtime_allow(&pdev->dev);
/* Init ISP memory management */
hmm_init();
err = devm_request_threaded_irq(&pdev->dev, pdev->irq,
atomisp_isr, atomisp_isr_thread,
IRQF_SHARED, "isp_irq", isp);
if (err) {
dev_err(&pdev->dev, "Failed to request irq (%d)\n", err);
goto request_irq_fail;
}
/* Load firmware into ISP memory */
err = atomisp_css_load_firmware(isp);
if (err) {
dev_err(&pdev->dev, "Failed to init css.\n");
goto css_init_fail;
}
/* Clear FW image from memory */
release_firmware(isp->firmware);
isp->firmware = NULL;
isp->css_env.isp_css_fw.data = NULL;
err = v4l2_async_nf_register(&isp->notifier);
if (err) {
dev_err(isp->dev, "failed to register async notifier : %d\n", err);
goto css_init_fail;
}
atomisp_drvfs_init(isp);
return 0;
css_init_fail:
devm_free_irq(&pdev->dev, pdev->irq, isp);
request_irq_fail:
hmm_cleanup();
pm_runtime_get_noresume(&pdev->dev);
dev_pm_domain_set(&pdev->dev, NULL);
atomisp_unregister_entities(isp);
register_entities_fail:
atomisp_uninitialize_modules(isp);
initialize_modules_fail:
cpu_latency_qos_remove_request(&isp->pm_qos);
atomisp_msi_irq_uninit(isp);
pci_free_irq_vectors(pdev);
enable_msi_fail:
fw_validation_fail:
release_firmware(isp->firmware);
load_fw_fail:
/*
* Switch off ISP, as keeping it powered on would prevent
* reaching S0ix states.
*
* The following lines have been copied from atomisp suspend path
*/
pci_read_config_dword(pdev, PCI_INTERRUPT_CTRL, &irq);
irq &= BIT(INTR_IIR);
pci_write_config_dword(pdev, PCI_INTERRUPT_CTRL, irq);
pci_read_config_dword(pdev, PCI_INTERRUPT_CTRL, &irq);
irq &= ~BIT(INTR_IER);
pci_write_config_dword(pdev, PCI_INTERRUPT_CTRL, irq);
atomisp_msi_irq_uninit(isp);
/* Address later when we worry about the ...field chips */
if (IS_ENABLED(CONFIG_PM) && atomisp_mrfld_power(isp, false))
dev_err(&pdev->dev, "Failed to switch off ISP\n");
atomisp_dev_alloc_fail:
pcim_iounmap_regions(pdev, BIT(ATOM_ISP_PCI_BAR));
ioremap_fail:
return err;
}
static void atomisp_pci_remove(struct pci_dev *pdev)
{
struct atomisp_device *isp = pci_get_drvdata(pdev);
dev_info(&pdev->dev, "Removing atomisp driver\n");
atomisp_drvfs_exit();
ia_css_unload_firmware();
hmm_cleanup();
pm_runtime_forbid(&pdev->dev);
pm_runtime_get_noresume(&pdev->dev);
dev_pm_domain_set(&pdev->dev, NULL);
cpu_latency_qos_remove_request(&isp->pm_qos);
atomisp_msi_irq_uninit(isp);
atomisp_unregister_entities(isp);
release_firmware(isp->firmware);
}
static const struct pci_device_id atomisp_pci_tbl[] = {
/* Merrifield */
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, ATOMISP_PCI_DEVICE_SOC_MRFLD)},
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, ATOMISP_PCI_DEVICE_SOC_MRFLD_1179)},
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, ATOMISP_PCI_DEVICE_SOC_MRFLD_117A)},
/* Baytrail */
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, ATOMISP_PCI_DEVICE_SOC_BYT)},
/* Anniedale (Merrifield+ / Moorefield) */
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, ATOMISP_PCI_DEVICE_SOC_ANN)},
/* Cherrytrail */
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, ATOMISP_PCI_DEVICE_SOC_CHT)},
{0,}
};
MODULE_DEVICE_TABLE(pci, atomisp_pci_tbl);
static struct pci_driver atomisp_pci_driver = {
.name = "atomisp-isp2",
.id_table = atomisp_pci_tbl,
.probe = atomisp_pci_probe,
.remove = atomisp_pci_remove,
};
module_pci_driver(atomisp_pci_driver);
MODULE_AUTHOR("Wen Wang <[email protected]>");
MODULE_AUTHOR("Xiaolin Zhang <[email protected]>");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Intel ATOM Platform ISP Driver");
MODULE_IMPORT_NS(INTEL_IPU_BRIDGE);
| linux-master | drivers/staging/media/atomisp/pci/atomisp_v4l2.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Support for Intel Camera Imaging ISP subsystem.
* Copyright (c) 2015, Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*/
#include "assert_support.h"
#include "sh_css_metrics.h"
#include "sp.h"
#include "isp.h"
#include "sh_css_internal.h"
#define MULTIPLE_PCS 0
#define SUSPEND 0
#define NOF_PCS 1
#define RESUME_MASK 0x8
#define STOP_MASK 0x0
static bool pc_histogram_enabled;
static struct sh_css_pc_histogram *isp_histogram;
static struct sh_css_pc_histogram *sp_histogram;
struct sh_css_metrics sh_css_metrics;
void
sh_css_metrics_start_frame(void)
{
sh_css_metrics.frame_metrics.num_frames++;
}
static void
clear_histogram(struct sh_css_pc_histogram *histogram)
{
unsigned int i;
assert(histogram);
for (i = 0; i < histogram->length; i++) {
histogram->run[i] = 0;
histogram->stall[i] = 0;
histogram->msink[i] = 0xFFFF;
}
}
void
sh_css_metrics_enable_pc_histogram(bool enable)
{
pc_histogram_enabled = enable;
}
static void
make_histogram(struct sh_css_pc_histogram *histogram, unsigned int length)
{
assert(histogram);
if (histogram->length)
return;
if (histogram->run)
return;
histogram->run = kvmalloc(length * sizeof(*histogram->run),
GFP_KERNEL);
if (!histogram->run)
return;
histogram->stall = kvmalloc(length * sizeof(*histogram->stall),
GFP_KERNEL);
if (!histogram->stall)
return;
histogram->msink = kvmalloc(length * sizeof(*histogram->msink),
GFP_KERNEL);
if (!histogram->msink)
return;
histogram->length = length;
clear_histogram(histogram);
}
static void
insert_binary_metrics(struct sh_css_binary_metrics **l,
struct sh_css_binary_metrics *metrics)
{
assert(l);
assert(*l);
assert(metrics);
for (; *l; l = &(*l)->next)
if (*l == metrics)
return;
*l = metrics;
metrics->next = NULL;
}
void
sh_css_metrics_start_binary(struct sh_css_binary_metrics *metrics)
{
assert(metrics);
if (!pc_histogram_enabled)
return;
isp_histogram = &metrics->isp_histogram;
sp_histogram = &metrics->sp_histogram;
make_histogram(isp_histogram, ISP_PMEM_DEPTH);
make_histogram(sp_histogram, SP_PMEM_DEPTH);
insert_binary_metrics(&sh_css_metrics.binary_metrics, metrics);
}
void
sh_css_metrics_sample_pcs(void)
{
bool stall;
unsigned int pc;
unsigned int msink;
if (!pc_histogram_enabled)
return;
if (isp_histogram) {
msink = isp_ctrl_load(ISP0_ID, ISP_CTRL_SINK_REG);
pc = isp_ctrl_load(ISP0_ID, ISP_PC_REG);
isp_histogram->msink[pc] &= msink;
stall = (msink != 0x7FF);
if (stall)
isp_histogram->stall[pc]++;
else
isp_histogram->run[pc]++;
}
if (sp_histogram && 0) {
msink = sp_ctrl_load(SP0_ID, SP_CTRL_SINK_REG);
pc = sp_ctrl_load(SP0_ID, SP_PC_REG);
sp_histogram->msink[pc] &= msink;
stall = (msink != 0x7FF);
if (stall)
sp_histogram->stall[pc]++;
else
sp_histogram->run[pc]++;
}
}
| linux-master | drivers/staging/media/atomisp/pci/sh_css_metrics.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Support for Intel Camera Imaging ISP subsystem.
* Copyright (c) 2015, Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*/
#define IA_CSS_INCLUDE_PARAMETERS
#include "sh_css_params.h"
#include "isp/kernels/aa/aa_2/ia_css_aa2.host.h"
#include "isp/kernels/anr/anr_1.0/ia_css_anr.host.h"
#include "isp/kernels/anr/anr_2/ia_css_anr2.host.h"
#include "isp/kernels/bh/bh_2/ia_css_bh.host.h"
#include "isp/kernels/bnr/bnr_1.0/ia_css_bnr.host.h"
#include "isp/kernels/bnr/bnr2_2/ia_css_bnr2_2.host.h"
#include "isp/kernels/cnr/cnr_2/ia_css_cnr2.host.h"
#include "isp/kernels/crop/crop_1.0/ia_css_crop.host.h"
#include "isp/kernels/csc/csc_1.0/ia_css_csc.host.h"
#include "isp/kernels/ctc/ctc_1.0/ia_css_ctc.host.h"
#include "isp/kernels/ctc/ctc1_5/ia_css_ctc1_5.host.h"
#include "isp/kernels/ctc/ctc2/ia_css_ctc2.host.h"
#include "isp/kernels/de/de_1.0/ia_css_de.host.h"
#include "isp/kernels/de/de_2/ia_css_de2.host.h"
#include "isp/kernels/dp/dp_1.0/ia_css_dp.host.h"
#include "isp/kernels/fixedbds/fixedbds_1.0/ia_css_fixedbds_param.h"
#include "isp/kernels/fpn/fpn_1.0/ia_css_fpn.host.h"
#include "isp/kernels/gc/gc_1.0/ia_css_gc.host.h"
#include "isp/kernels/gc/gc_2/ia_css_gc2.host.h"
#include "isp/kernels/macc/macc_1.0/ia_css_macc.host.h"
#include "isp/kernels/macc/macc1_5/ia_css_macc1_5.host.h"
#include "isp/kernels/ob/ob_1.0/ia_css_ob.host.h"
#include "isp/kernels/ob/ob2/ia_css_ob2.host.h"
#include "isp/kernels/output/output_1.0/ia_css_output.host.h"
#include "isp/kernels/raw_aa_binning/raw_aa_binning_1.0/ia_css_raa.host.h"
#include "isp/kernels/s3a/s3a_1.0/ia_css_s3a.host.h"
#include "isp/kernels/sc/sc_1.0/ia_css_sc.host.h"
#include "isp/kernels/sdis/sdis_1.0/ia_css_sdis.host.h"
#include "isp/kernels/sdis/sdis_2/ia_css_sdis2.host.h"
#include "isp/kernels/tnr/tnr_1.0/ia_css_tnr.host.h"
#include "isp/kernels/uds/uds_1.0/ia_css_uds_param.h"
#include "isp/kernels/wb/wb_1.0/ia_css_wb.host.h"
#include "isp/kernels/xnr/xnr_1.0/ia_css_xnr.host.h"
#include "isp/kernels/xnr/xnr_3.0/ia_css_xnr3.host.h"
#include "isp/kernels/ynr/ynr_1.0/ia_css_ynr.host.h"
#include "isp/kernels/ynr/ynr_2/ia_css_ynr2.host.h"
#include "isp/kernels/fc/fc_1.0/ia_css_formats.host.h"
#include "isp/kernels/tdf/tdf_1.0/ia_css_tdf.host.h"
#include "isp/kernels/dpc2/ia_css_dpc2.host.h"
#include "isp/kernels/eed1_8/ia_css_eed1_8.host.h"
#include "isp/kernels/bnlm/ia_css_bnlm.host.h"
#include "isp/kernels/conversion/conversion_1.0/ia_css_conversion.host.h"
/* Generated code: do not edit or commmit. */
#include "ia_css_pipeline.h"
#include "ia_css_isp_params.h"
#include "ia_css_debug.h"
#include "assert_support.h"
/* Code generated by genparam/gencode.c:gen_process_function() */
static void
ia_css_process_aa(
unsigned int pipe_id,
const struct ia_css_pipeline_stage *stage,
struct ia_css_isp_parameters *params)
{
unsigned int size =
stage->binary->info->mem_offsets.offsets.param->dmem.aa.size;
unsigned int offset =
stage->binary->info->mem_offsets.offsets.param->dmem.aa.offset;
if (size) {
struct sh_css_isp_aa_params *t = (struct sh_css_isp_aa_params *)
&stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset];
t->strength = params->aa_config.strength;
}
}
/* Code generated by genparam/gencode.c:gen_process_function() */
static void
ia_css_process_anr(
unsigned int pipe_id,
const struct ia_css_pipeline_stage *stage,
struct ia_css_isp_parameters *params)
{
assert(params);
{
unsigned int size =
stage->binary->info->mem_offsets.offsets.param->dmem.anr.size;
unsigned int offset =
stage->binary->info->mem_offsets.offsets.param->dmem.anr.offset;
if (size) {
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
"ia_css_process_anr() enter:\n");
ia_css_anr_encode((struct sh_css_isp_anr_params *)
&stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset],
¶ms->anr_config,
size);
params->isp_params_changed = true;
params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] =
true;
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
"ia_css_process_anr() leave:\n");
}
}
}
/* Code generated by genparam/gencode.c:gen_process_function() */
static void
ia_css_process_anr2(
unsigned int pipe_id,
const struct ia_css_pipeline_stage *stage,
struct ia_css_isp_parameters *params)
{
assert(params);
{
unsigned int size =
stage->binary->info->mem_offsets.offsets.param->vmem.anr2.size;
unsigned int offset =
stage->binary->info->mem_offsets.offsets.param->vmem.anr2.offset;
if (size) {
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
"ia_css_process_anr2() enter:\n");
ia_css_anr2_vmem_encode((struct ia_css_isp_anr2_params *)
&stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_VMEM].address[offset],
¶ms->anr_thres,
size);
params->isp_params_changed = true;
params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_VMEM] =
true;
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
"ia_css_process_anr2() leave:\n");
}
}
}
/* Code generated by genparam/gencode.c:gen_process_function() */
static void
ia_css_process_bh(
unsigned int pipe_id,
const struct ia_css_pipeline_stage *stage,
struct ia_css_isp_parameters *params)
{
assert(params);
{
unsigned int size =
stage->binary->info->mem_offsets.offsets.param->dmem.bh.size;
unsigned int offset =
stage->binary->info->mem_offsets.offsets.param->dmem.bh.offset;
if (size) {
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_bh() enter:\n");
ia_css_bh_encode((struct sh_css_isp_bh_params *)
&stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset],
¶ms->s3a_config,
size);
params->isp_params_changed = true;
params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] =
true;
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_bh() leave:\n");
}
}
{
unsigned int size =
stage->binary->info->mem_offsets.offsets.param->hmem0.bh.size;
if (size) {
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_bh() enter:\n");
params->isp_params_changed = true;
params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_HMEM0] =
true;
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_bh() leave:\n");
}
}
}
/* Code generated by genparam/gencode.c:gen_process_function() */
static void
ia_css_process_cnr(
unsigned int pipe_id,
const struct ia_css_pipeline_stage *stage,
struct ia_css_isp_parameters *params)
{
assert(params);
{
unsigned int size =
stage->binary->info->mem_offsets.offsets.param->dmem.cnr.size;
unsigned int offset =
stage->binary->info->mem_offsets.offsets.param->dmem.cnr.offset;
if (size) {
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
"ia_css_process_cnr() enter:\n");
ia_css_cnr_encode((struct sh_css_isp_cnr_params *)
&stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset],
¶ms->cnr_config,
size);
params->isp_params_changed = true;
params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] =
true;
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
"ia_css_process_cnr() leave:\n");
}
}
}
/* Code generated by genparam/gencode.c:gen_process_function() */
static void
ia_css_process_crop(
unsigned int pipe_id,
const struct ia_css_pipeline_stage *stage,
struct ia_css_isp_parameters *params)
{
assert(params);
{
unsigned int size =
stage->binary->info->mem_offsets.offsets.param->dmem.crop.size;
unsigned int offset =
stage->binary->info->mem_offsets.offsets.param->dmem.crop.offset;
if (size) {
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
"ia_css_process_crop() enter:\n");
ia_css_crop_encode((struct sh_css_isp_crop_isp_params *)
&stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset],
¶ms->crop_config,
size);
params->isp_params_changed = true;
params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] =
true;
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
"ia_css_process_crop() leave:\n");
}
}
}
/* Code generated by genparam/gencode.c:gen_process_function() */
static void
ia_css_process_csc(
unsigned int pipe_id,
const struct ia_css_pipeline_stage *stage,
struct ia_css_isp_parameters *params)
{
assert(params);
{
unsigned int size =
stage->binary->info->mem_offsets.offsets.param->dmem.csc.size;
unsigned int offset =
stage->binary->info->mem_offsets.offsets.param->dmem.csc.offset;
if (size) {
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
"ia_css_process_csc() enter:\n");
ia_css_csc_encode((struct sh_css_isp_csc_params *)
&stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset],
¶ms->cc_config,
size);
params->isp_params_changed = true;
params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] =
true;
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
"ia_css_process_csc() leave:\n");
}
}
}
/* Code generated by genparam/gencode.c:gen_process_function() */
static void
ia_css_process_dp(
unsigned int pipe_id,
const struct ia_css_pipeline_stage *stage,
struct ia_css_isp_parameters *params)
{
assert(params);
{
unsigned int size =
stage->binary->info->mem_offsets.offsets.param->dmem.dp.size;
unsigned int offset =
stage->binary->info->mem_offsets.offsets.param->dmem.dp.offset;
if (size) {
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_dp() enter:\n");
ia_css_dp_encode((struct sh_css_isp_dp_params *)
&stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset],
¶ms->dp_config,
size);
params->isp_params_changed = true;
params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] =
true;
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_dp() leave:\n");
}
}
}
/* Code generated by genparam/gencode.c:gen_process_function() */
static void
ia_css_process_bnr(
unsigned int pipe_id,
const struct ia_css_pipeline_stage *stage,
struct ia_css_isp_parameters *params)
{
assert(params);
{
unsigned int size =
stage->binary->info->mem_offsets.offsets.param->dmem.bnr.size;
unsigned int offset =
stage->binary->info->mem_offsets.offsets.param->dmem.bnr.offset;
if (size) {
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
"ia_css_process_bnr() enter:\n");
ia_css_bnr_encode((struct sh_css_isp_bnr_params *)
&stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset],
¶ms->nr_config,
size);
params->isp_params_changed = true;
params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] =
true;
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
"ia_css_process_bnr() leave:\n");
}
}
}
/* Code generated by genparam/gencode.c:gen_process_function() */
static void
ia_css_process_de(
unsigned int pipe_id,
const struct ia_css_pipeline_stage *stage,
struct ia_css_isp_parameters *params)
{
assert(params);
{
unsigned int size =
stage->binary->info->mem_offsets.offsets.param->dmem.de.size;
unsigned int offset =
stage->binary->info->mem_offsets.offsets.param->dmem.de.offset;
if (size) {
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_de() enter:\n");
ia_css_de_encode((struct sh_css_isp_de_params *)
&stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset],
¶ms->de_config,
size);
params->isp_params_changed = true;
params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] =
true;
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_de() leave:\n");
}
}
}
/* Code generated by genparam/gencode.c:gen_process_function() */
static void
ia_css_process_ecd(
unsigned int pipe_id,
const struct ia_css_pipeline_stage *stage,
struct ia_css_isp_parameters *params)
{
assert(params);
{
unsigned int size =
stage->binary->info->mem_offsets.offsets.param->dmem.ecd.size;
unsigned int offset =
stage->binary->info->mem_offsets.offsets.param->dmem.ecd.offset;
if (size) {
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
"ia_css_process_ecd() enter:\n");
ia_css_ecd_encode((struct sh_css_isp_ecd_params *)
&stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset],
¶ms->ecd_config,
size);
params->isp_params_changed = true;
params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] =
true;
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
"ia_css_process_ecd() leave:\n");
}
}
}
/* Code generated by genparam/gencode.c:gen_process_function() */
static void
ia_css_process_formats(
unsigned int pipe_id,
const struct ia_css_pipeline_stage *stage,
struct ia_css_isp_parameters *params)
{
assert(params);
{
unsigned int size =
stage->binary->info->mem_offsets.offsets.param->dmem.formats.size;
unsigned int offset =
stage->binary->info->mem_offsets.offsets.param->dmem.formats.offset;
if (size) {
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
"ia_css_process_formats() enter:\n");
ia_css_formats_encode((struct sh_css_isp_formats_params *)
&stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset],
¶ms->formats_config,
size);
params->isp_params_changed = true;
params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] =
true;
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
"ia_css_process_formats() leave:\n");
}
}
}
/* Code generated by genparam/gencode.c:gen_process_function() */
static void
ia_css_process_fpn(
unsigned int pipe_id,
const struct ia_css_pipeline_stage *stage,
struct ia_css_isp_parameters *params)
{
assert(params);
{
unsigned int size =
stage->binary->info->mem_offsets.offsets.param->dmem.fpn.size;
unsigned int offset =
stage->binary->info->mem_offsets.offsets.param->dmem.fpn.offset;
if (size) {
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
"ia_css_process_fpn() enter:\n");
ia_css_fpn_encode((struct sh_css_isp_fpn_params *)
&stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset],
¶ms->fpn_config,
size);
params->isp_params_changed = true;
params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] =
true;
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
"ia_css_process_fpn() leave:\n");
}
}
}
/* Code generated by genparam/gencode.c:gen_process_function() */
static void
ia_css_process_gc(
unsigned int pipe_id,
const struct ia_css_pipeline_stage *stage,
struct ia_css_isp_parameters *params)
{
assert(params);
{
unsigned int size =
stage->binary->info->mem_offsets.offsets.param->dmem.gc.size;
unsigned int offset =
stage->binary->info->mem_offsets.offsets.param->dmem.gc.offset;
if (size) {
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_gc() enter:\n");
ia_css_gc_encode((struct sh_css_isp_gc_params *)
&stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset],
¶ms->gc_config,
size);
params->isp_params_changed = true;
params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] =
true;
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_gc() leave:\n");
}
}
{
unsigned int size =
stage->binary->info->mem_offsets.offsets.param->vamem1.gc.size;
unsigned int offset =
stage->binary->info->mem_offsets.offsets.param->vamem1.gc.offset;
if (size) {
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_gc() enter:\n");
ia_css_gc_vamem_encode((struct sh_css_isp_gc_vamem_params *)
&stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_VAMEM1].address[offset],
¶ms->gc_table,
size);
params->isp_params_changed = true;
params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_VAMEM1] =
true;
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_gc() leave:\n");
}
}
}
/* Code generated by genparam/gencode.c:gen_process_function() */
static void
ia_css_process_ce(
unsigned int pipe_id,
const struct ia_css_pipeline_stage *stage,
struct ia_css_isp_parameters *params)
{
assert(params);
{
unsigned int size =
stage->binary->info->mem_offsets.offsets.param->dmem.ce.size;
unsigned int offset =
stage->binary->info->mem_offsets.offsets.param->dmem.ce.offset;
if (size) {
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_ce() enter:\n");
ia_css_ce_encode((struct sh_css_isp_ce_params *)
&stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset],
¶ms->ce_config,
size);
params->isp_params_changed = true;
params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] =
true;
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_ce() leave:\n");
}
}
}
/* Code generated by genparam/gencode.c:gen_process_function() */
static void
ia_css_process_yuv2rgb(
unsigned int pipe_id,
const struct ia_css_pipeline_stage *stage,
struct ia_css_isp_parameters *params)
{
assert(params);
{
unsigned int size =
stage->binary->info->mem_offsets.offsets.param->dmem.yuv2rgb.size;
unsigned int offset =
stage->binary->info->mem_offsets.offsets.param->dmem.yuv2rgb.offset;
if (size) {
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
"ia_css_process_yuv2rgb() enter:\n");
ia_css_yuv2rgb_encode((struct sh_css_isp_csc_params *)
&stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset],
¶ms->yuv2rgb_cc_config,
size);
params->isp_params_changed = true;
params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] =
true;
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
"ia_css_process_yuv2rgb() leave:\n");
}
}
}
/* Code generated by genparam/gencode.c:gen_process_function() */
static void
ia_css_process_rgb2yuv(
unsigned int pipe_id,
const struct ia_css_pipeline_stage *stage,
struct ia_css_isp_parameters *params)
{
assert(params);
{
unsigned int size =
stage->binary->info->mem_offsets.offsets.param->dmem.rgb2yuv.size;
unsigned int offset =
stage->binary->info->mem_offsets.offsets.param->dmem.rgb2yuv.offset;
if (size) {
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
"ia_css_process_rgb2yuv() enter:\n");
ia_css_rgb2yuv_encode((struct sh_css_isp_csc_params *)
&stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset],
¶ms->rgb2yuv_cc_config,
size);
params->isp_params_changed = true;
params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] =
true;
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
"ia_css_process_rgb2yuv() leave:\n");
}
}
}
/* Code generated by genparam/gencode.c:gen_process_function() */
static void
ia_css_process_r_gamma(
unsigned int pipe_id,
const struct ia_css_pipeline_stage *stage,
struct ia_css_isp_parameters *params)
{
assert(params);
{
unsigned int size =
stage->binary->info->mem_offsets.offsets.param->vamem0.r_gamma.size;
unsigned int offset =
stage->binary->info->mem_offsets.offsets.param->vamem0.r_gamma.offset;
if (size) {
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
"ia_css_process_r_gamma() enter:\n");
ia_css_r_gamma_vamem_encode((struct sh_css_isp_rgb_gamma_vamem_params *)
&stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_VAMEM0].address[offset],
¶ms->r_gamma_table,
size);
params->isp_params_changed = true;
params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_VAMEM0] =
true;
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
"ia_css_process_r_gamma() leave:\n");
}
}
}
/* Code generated by genparam/gencode.c:gen_process_function() */
static void
ia_css_process_g_gamma(
unsigned int pipe_id,
const struct ia_css_pipeline_stage *stage,
struct ia_css_isp_parameters *params)
{
assert(params);
{
unsigned int size =
stage->binary->info->mem_offsets.offsets.param->vamem1.g_gamma.size;
unsigned int offset =
stage->binary->info->mem_offsets.offsets.param->vamem1.g_gamma.offset;
if (size) {
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
"ia_css_process_g_gamma() enter:\n");
ia_css_g_gamma_vamem_encode((struct sh_css_isp_rgb_gamma_vamem_params *)
&stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_VAMEM1].address[offset],
¶ms->g_gamma_table,
size);
params->isp_params_changed = true;
params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_VAMEM1] =
true;
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
"ia_css_process_g_gamma() leave:\n");
}
}
}
/* Code generated by genparam/gencode.c:gen_process_function() */
static void
ia_css_process_b_gamma(
unsigned int pipe_id,
const struct ia_css_pipeline_stage *stage,
struct ia_css_isp_parameters *params)
{
assert(params);
{
unsigned int size =
stage->binary->info->mem_offsets.offsets.param->vamem2.b_gamma.size;
unsigned int offset =
stage->binary->info->mem_offsets.offsets.param->vamem2.b_gamma.offset;
if (size) {
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
"ia_css_process_b_gamma() enter:\n");
ia_css_b_gamma_vamem_encode((struct sh_css_isp_rgb_gamma_vamem_params *)
&stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_VAMEM2].address[offset],
¶ms->b_gamma_table,
size);
params->isp_params_changed = true;
params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_VAMEM2] =
true;
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
"ia_css_process_b_gamma() leave:\n");
}
}
}
/* Code generated by genparam/gencode.c:gen_process_function() */
static void
ia_css_process_uds(
unsigned int pipe_id,
const struct ia_css_pipeline_stage *stage,
struct ia_css_isp_parameters *params)
{
assert(params);
{
unsigned int size =
stage->binary->info->mem_offsets.offsets.param->dmem.uds.size;
unsigned int offset =
stage->binary->info->mem_offsets.offsets.param->dmem.uds.offset;
if (size) {
struct sh_css_sp_uds_params *p;
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
"ia_css_process_uds() enter:\n");
p = (struct sh_css_sp_uds_params *)
&stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset];
p->crop_pos = params->uds_config.crop_pos;
p->uds = params->uds_config.uds;
params->isp_params_changed = true;
params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] =
true;
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
"ia_css_process_uds() leave:\n");
}
}
}
/* Code generated by genparam/gencode.c:gen_process_function() */
static void
ia_css_process_raa(
unsigned int pipe_id,
const struct ia_css_pipeline_stage *stage,
struct ia_css_isp_parameters *params)
{
assert(params);
{
unsigned int size =
stage->binary->info->mem_offsets.offsets.param->dmem.raa.size;
unsigned int offset =
stage->binary->info->mem_offsets.offsets.param->dmem.raa.offset;
if (size) {
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
"ia_css_process_raa() enter:\n");
ia_css_raa_encode((struct sh_css_isp_aa_params *)
&stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset],
¶ms->raa_config,
size);
params->isp_params_changed = true;
params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] =
true;
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
"ia_css_process_raa() leave:\n");
}
}
}
/* Code generated by genparam/gencode.c:gen_process_function() */
static void
ia_css_process_s3a(
unsigned int pipe_id,
const struct ia_css_pipeline_stage *stage,
struct ia_css_isp_parameters *params)
{
assert(params);
{
unsigned int size =
stage->binary->info->mem_offsets.offsets.param->dmem.s3a.size;
unsigned int offset =
stage->binary->info->mem_offsets.offsets.param->dmem.s3a.offset;
if (size) {
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
"ia_css_process_s3a() enter:\n");
ia_css_s3a_encode((struct sh_css_isp_s3a_params *)
&stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset],
¶ms->s3a_config,
size);
params->isp_params_changed = true;
params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] =
true;
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
"ia_css_process_s3a() leave:\n");
}
}
}
/* Code generated by genparam/gencode.c:gen_process_function() */
static void
ia_css_process_ob(
unsigned int pipe_id,
const struct ia_css_pipeline_stage *stage,
struct ia_css_isp_parameters *params)
{
assert(params);
{
unsigned int size =
stage->binary->info->mem_offsets.offsets.param->dmem.ob.size;
unsigned int offset =
stage->binary->info->mem_offsets.offsets.param->dmem.ob.offset;
if (size) {
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_ob() enter:\n");
ia_css_ob_encode((struct sh_css_isp_ob_params *)
&stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset],
¶ms->ob_config,
¶ms->stream_configs.ob, size);
params->isp_params_changed = true;
params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] =
true;
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_ob() leave:\n");
}
}
{
unsigned int size =
stage->binary->info->mem_offsets.offsets.param->vmem.ob.size;
unsigned int offset =
stage->binary->info->mem_offsets.offsets.param->vmem.ob.offset;
if (size) {
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_ob() enter:\n");
ia_css_ob_vmem_encode((struct sh_css_isp_ob_vmem_params *)
&stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_VMEM].address[offset],
¶ms->ob_config,
¶ms->stream_configs.ob, size);
params->isp_params_changed = true;
params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_VMEM] =
true;
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_ob() leave:\n");
}
}
}
/* Code generated by genparam/gencode.c:gen_process_function() */
static void
ia_css_process_output(
unsigned int pipe_id,
const struct ia_css_pipeline_stage *stage,
struct ia_css_isp_parameters *params)
{
assert(params);
{
unsigned int size =
stage->binary->info->mem_offsets.offsets.param->dmem.output.size;
unsigned int offset =
stage->binary->info->mem_offsets.offsets.param->dmem.output.offset;
if (size) {
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
"ia_css_process_output() enter:\n");
ia_css_output_encode((struct sh_css_isp_output_params *)
&stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset],
¶ms->output_config,
size);
params->isp_params_changed = true;
params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] =
true;
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
"ia_css_process_output() leave:\n");
}
}
}
/* Code generated by genparam/gencode.c:gen_process_function() */
static void
ia_css_process_sc(
unsigned int pipe_id,
const struct ia_css_pipeline_stage *stage,
struct ia_css_isp_parameters *params)
{
assert(params);
{
unsigned int size =
stage->binary->info->mem_offsets.offsets.param->dmem.sc.size;
unsigned int offset =
stage->binary->info->mem_offsets.offsets.param->dmem.sc.offset;
if (size) {
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_sc() enter:\n");
ia_css_sc_encode((struct sh_css_isp_sc_params *)
&stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset],
¶ms->sc_config,
size);
params->isp_params_changed = true;
params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] =
true;
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_sc() leave:\n");
}
}
}
/* Code generated by genparam/gencode.c:gen_process_function() */
static void
ia_css_process_bds(
unsigned int pipe_id,
const struct ia_css_pipeline_stage *stage,
struct ia_css_isp_parameters *params)
{
assert(params);
{
unsigned int size =
stage->binary->info->mem_offsets.offsets.param->dmem.bds.size;
unsigned int offset =
stage->binary->info->mem_offsets.offsets.param->dmem.bds.offset;
if (size) {
struct sh_css_isp_bds_params *p;
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
"ia_css_process_bds() enter:\n");
p = (struct sh_css_isp_bds_params *)
&stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset];
p->baf_strength = params->bds_config.strength;
params->isp_params_changed = true;
params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] =
true;
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
"ia_css_process_bds() leave:\n");
}
}
}
/* Code generated by genparam/gencode.c:gen_process_function() */
static void
ia_css_process_tnr(
unsigned int pipe_id,
const struct ia_css_pipeline_stage *stage,
struct ia_css_isp_parameters *params)
{
assert(params);
{
unsigned int size =
stage->binary->info->mem_offsets.offsets.param->dmem.tnr.size;
unsigned int offset =
stage->binary->info->mem_offsets.offsets.param->dmem.tnr.offset;
if (size) {
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
"ia_css_process_tnr() enter:\n");
ia_css_tnr_encode((struct sh_css_isp_tnr_params *)
&stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset],
¶ms->tnr_config,
size);
params->isp_params_changed = true;
params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] =
true;
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
"ia_css_process_tnr() leave:\n");
}
}
}
/* Code generated by genparam/gencode.c:gen_process_function() */
static void
ia_css_process_macc(
unsigned int pipe_id,
const struct ia_css_pipeline_stage *stage,
struct ia_css_isp_parameters *params)
{
assert(params);
{
unsigned int size =
stage->binary->info->mem_offsets.offsets.param->dmem.macc.size;
unsigned int offset =
stage->binary->info->mem_offsets.offsets.param->dmem.macc.offset;
if (size) {
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
"ia_css_process_macc() enter:\n");
ia_css_macc_encode((struct sh_css_isp_macc_params *)
&stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset],
¶ms->macc_config,
size);
params->isp_params_changed = true;
params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] =
true;
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
"ia_css_process_macc() leave:\n");
}
}
}
/* Code generated by genparam/gencode.c:gen_process_function() */
static void
ia_css_process_sdis_horicoef(
unsigned int pipe_id,
const struct ia_css_pipeline_stage *stage,
struct ia_css_isp_parameters *params)
{
assert(params);
{
unsigned int size =
stage->binary->info->mem_offsets.offsets.param->vmem.sdis_horicoef.size;
unsigned int offset =
stage->binary->info->mem_offsets.offsets.param->vmem.sdis_horicoef.offset;
if (size) {
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
"ia_css_process_sdis_horicoef() enter:\n");
ia_css_sdis_horicoef_vmem_encode((struct sh_css_isp_sdis_hori_coef_tbl *)
&stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_VMEM].address[offset],
¶ms->dvs_coefs,
size);
params->isp_params_changed = true;
params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_VMEM] =
true;
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
"ia_css_process_sdis_horicoef() leave:\n");
}
}
}
/* Code generated by genparam/gencode.c:gen_process_function() */
static void
ia_css_process_sdis_vertcoef(
unsigned int pipe_id,
const struct ia_css_pipeline_stage *stage,
struct ia_css_isp_parameters *params)
{
assert(params);
{
unsigned int size =
stage->binary->info->mem_offsets.offsets.param->vmem.sdis_vertcoef.size;
unsigned int offset =
stage->binary->info->mem_offsets.offsets.param->vmem.sdis_vertcoef.offset;
if (size) {
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
"ia_css_process_sdis_vertcoef() enter:\n");
ia_css_sdis_vertcoef_vmem_encode((struct sh_css_isp_sdis_vert_coef_tbl *)
&stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_VMEM].address[offset],
¶ms->dvs_coefs,
size);
params->isp_params_changed = true;
params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_VMEM] =
true;
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
"ia_css_process_sdis_vertcoef() leave:\n");
}
}
}
/* Code generated by genparam/gencode.c:gen_process_function() */
static void
ia_css_process_sdis_horiproj(
unsigned int pipe_id,
const struct ia_css_pipeline_stage *stage,
struct ia_css_isp_parameters *params)
{
assert(params);
{
unsigned int size =
stage->binary->info->mem_offsets.offsets.param->dmem.sdis_horiproj.size;
unsigned int offset =
stage->binary->info->mem_offsets.offsets.param->dmem.sdis_horiproj.offset;
if (size) {
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
"ia_css_process_sdis_horiproj() enter:\n");
ia_css_sdis_horiproj_encode((struct sh_css_isp_sdis_hori_proj_tbl *)
&stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset],
¶ms->dvs_coefs,
size);
params->isp_params_changed = true;
params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] =
true;
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
"ia_css_process_sdis_horiproj() leave:\n");
}
}
}
/* Code generated by genparam/gencode.c:gen_process_function() */
static void
ia_css_process_sdis_vertproj(
unsigned int pipe_id,
const struct ia_css_pipeline_stage *stage,
struct ia_css_isp_parameters *params)
{
assert(params);
{
unsigned int size =
stage->binary->info->mem_offsets.offsets.param->dmem.sdis_vertproj.size;
unsigned int offset =
stage->binary->info->mem_offsets.offsets.param->dmem.sdis_vertproj.offset;
if (size) {
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
"ia_css_process_sdis_vertproj() enter:\n");
ia_css_sdis_vertproj_encode((struct sh_css_isp_sdis_vert_proj_tbl *)
&stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset],
¶ms->dvs_coefs,
size);
params->isp_params_changed = true;
params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] =
true;
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
"ia_css_process_sdis_vertproj() leave:\n");
}
}
}
/* Code generated by genparam/gencode.c:gen_process_function() */
static void
ia_css_process_sdis2_horicoef(
unsigned int pipe_id,
const struct ia_css_pipeline_stage *stage,
struct ia_css_isp_parameters *params)
{
assert(params);
{
unsigned int size =
stage->binary->info->mem_offsets.offsets.param->vmem.sdis2_horicoef.size;
unsigned int offset =
stage->binary->info->mem_offsets.offsets.param->vmem.sdis2_horicoef.offset;
if (size) {
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
"ia_css_process_sdis2_horicoef() enter:\n");
ia_css_sdis2_horicoef_vmem_encode((struct sh_css_isp_sdis_hori_coef_tbl *)
&stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_VMEM].address[offset],
¶ms->dvs2_coefs,
size);
params->isp_params_changed = true;
params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_VMEM] =
true;
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
"ia_css_process_sdis2_horicoef() leave:\n");
}
}
}
/* Code generated by genparam/gencode.c:gen_process_function() */
static void
ia_css_process_sdis2_vertcoef(
unsigned int pipe_id,
const struct ia_css_pipeline_stage *stage,
struct ia_css_isp_parameters *params)
{
assert(params);
{
unsigned int size =
stage->binary->info->mem_offsets.offsets.param->vmem.sdis2_vertcoef.size;
unsigned int offset =
stage->binary->info->mem_offsets.offsets.param->vmem.sdis2_vertcoef.offset;
if (size) {
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
"ia_css_process_sdis2_vertcoef() enter:\n");
ia_css_sdis2_vertcoef_vmem_encode((struct sh_css_isp_sdis_vert_coef_tbl *)
&stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_VMEM].address[offset],
¶ms->dvs2_coefs,
size);
params->isp_params_changed = true;
params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_VMEM] =
true;
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
"ia_css_process_sdis2_vertcoef() leave:\n");
}
}
}
/* Code generated by genparam/gencode.c:gen_process_function() */
static void
ia_css_process_sdis2_horiproj(
unsigned int pipe_id,
const struct ia_css_pipeline_stage *stage,
struct ia_css_isp_parameters *params)
{
assert(params);
{
unsigned int size =
stage->binary->info->mem_offsets.offsets.param->dmem.sdis2_horiproj.size;
unsigned int offset =
stage->binary->info->mem_offsets.offsets.param->dmem.sdis2_horiproj.offset;
if (size) {
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
"ia_css_process_sdis2_horiproj() enter:\n");
ia_css_sdis2_horiproj_encode((struct sh_css_isp_sdis_hori_proj_tbl *)
&stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset],
¶ms->dvs2_coefs,
size);
params->isp_params_changed = true;
params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] =
true;
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
"ia_css_process_sdis2_horiproj() leave:\n");
}
}
}
/* Code generated by genparam/gencode.c:gen_process_function() */
static void
ia_css_process_sdis2_vertproj(
unsigned int pipe_id,
const struct ia_css_pipeline_stage *stage,
struct ia_css_isp_parameters *params)
{
assert(params);
{
unsigned int size =
stage->binary->info->mem_offsets.offsets.param->dmem.sdis2_vertproj.size;
unsigned int offset =
stage->binary->info->mem_offsets.offsets.param->dmem.sdis2_vertproj.offset;
if (size) {
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
"ia_css_process_sdis2_vertproj() enter:\n");
ia_css_sdis2_vertproj_encode((struct sh_css_isp_sdis_vert_proj_tbl *)
&stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset],
¶ms->dvs2_coefs,
size);
params->isp_params_changed = true;
params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] =
true;
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
"ia_css_process_sdis2_vertproj() leave:\n");
}
}
}
/* Code generated by genparam/gencode.c:gen_process_function() */
static void
ia_css_process_wb(
unsigned int pipe_id,
const struct ia_css_pipeline_stage *stage,
struct ia_css_isp_parameters *params)
{
assert(params);
{
unsigned int size =
stage->binary->info->mem_offsets.offsets.param->dmem.wb.size;
unsigned int offset =
stage->binary->info->mem_offsets.offsets.param->dmem.wb.offset;
if (size) {
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_wb() enter:\n");
ia_css_wb_encode((struct sh_css_isp_wb_params *)
&stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset],
¶ms->wb_config,
size);
params->isp_params_changed = true;
params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] =
true;
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_wb() leave:\n");
}
}
}
/* Code generated by genparam/gencode.c:gen_process_function() */
static void
ia_css_process_nr(
unsigned int pipe_id,
const struct ia_css_pipeline_stage *stage,
struct ia_css_isp_parameters *params)
{
assert(params);
{
unsigned int size =
stage->binary->info->mem_offsets.offsets.param->dmem.nr.size;
unsigned int offset =
stage->binary->info->mem_offsets.offsets.param->dmem.nr.offset;
if (size) {
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_nr() enter:\n");
ia_css_nr_encode((struct sh_css_isp_ynr_params *)
&stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset],
¶ms->nr_config,
size);
params->isp_params_changed = true;
params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] =
true;
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_nr() leave:\n");
}
}
}
/* Code generated by genparam/gencode.c:gen_process_function() */
static void
ia_css_process_yee(
unsigned int pipe_id,
const struct ia_css_pipeline_stage *stage,
struct ia_css_isp_parameters *params)
{
assert(params);
{
unsigned int size =
stage->binary->info->mem_offsets.offsets.param->dmem.yee.size;
unsigned int offset =
stage->binary->info->mem_offsets.offsets.param->dmem.yee.offset;
if (size) {
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
"ia_css_process_yee() enter:\n");
ia_css_yee_encode((struct sh_css_isp_yee_params *)
&stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset],
¶ms->yee_config,
size);
params->isp_params_changed = true;
params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] =
true;
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
"ia_css_process_yee() leave:\n");
}
}
}
/* Code generated by genparam/gencode.c:gen_process_function() */
static void
ia_css_process_ynr(
unsigned int pipe_id,
const struct ia_css_pipeline_stage *stage,
struct ia_css_isp_parameters *params)
{
assert(params);
{
unsigned int size =
stage->binary->info->mem_offsets.offsets.param->dmem.ynr.size;
unsigned int offset =
stage->binary->info->mem_offsets.offsets.param->dmem.ynr.offset;
if (size) {
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
"ia_css_process_ynr() enter:\n");
ia_css_ynr_encode((struct sh_css_isp_yee2_params *)
&stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset],
¶ms->ynr_config,
size);
params->isp_params_changed = true;
params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] =
true;
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
"ia_css_process_ynr() leave:\n");
}
}
}
/* Code generated by genparam/gencode.c:gen_process_function() */
static void
ia_css_process_fc(
unsigned int pipe_id,
const struct ia_css_pipeline_stage *stage,
struct ia_css_isp_parameters *params)
{
assert(params);
{
unsigned int size =
stage->binary->info->mem_offsets.offsets.param->dmem.fc.size;
unsigned int offset =
stage->binary->info->mem_offsets.offsets.param->dmem.fc.offset;
if (size) {
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_fc() enter:\n");
ia_css_fc_encode((struct sh_css_isp_fc_params *)
&stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset],
¶ms->fc_config,
size);
params->isp_params_changed = true;
params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] =
true;
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_process_fc() leave:\n");
}
}
}
/* Code generated by genparam/gencode.c:gen_process_function() */
static void
ia_css_process_ctc(
unsigned int pipe_id,
const struct ia_css_pipeline_stage *stage,
struct ia_css_isp_parameters *params)
{
assert(params);
{
unsigned int size =
stage->binary->info->mem_offsets.offsets.param->dmem.ctc.size;
unsigned int offset =
stage->binary->info->mem_offsets.offsets.param->dmem.ctc.offset;
if (size) {
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
"ia_css_process_ctc() enter:\n");
ia_css_ctc_encode((struct sh_css_isp_ctc_params *)
&stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset],
¶ms->ctc_config,
size);
params->isp_params_changed = true;
params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] =
true;
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
"ia_css_process_ctc() leave:\n");
}
}
{
unsigned int size =
stage->binary->info->mem_offsets.offsets.param->vamem0.ctc.size;
unsigned int offset =
stage->binary->info->mem_offsets.offsets.param->vamem0.ctc.offset;
if (size) {
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
"ia_css_process_ctc() enter:\n");
ia_css_ctc_vamem_encode((struct sh_css_isp_ctc_vamem_params *)
&stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_VAMEM0].address[offset],
¶ms->ctc_table,
size);
params->isp_params_changed = true;
params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_VAMEM0] =
true;
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
"ia_css_process_ctc() leave:\n");
}
}
}
/* Code generated by genparam/gencode.c:gen_process_function() */
static void
ia_css_process_xnr_table(
unsigned int pipe_id,
const struct ia_css_pipeline_stage *stage,
struct ia_css_isp_parameters *params)
{
assert(params);
{
unsigned int size =
stage->binary->info->mem_offsets.offsets.param->vamem1.xnr_table.size;
unsigned int offset =
stage->binary->info->mem_offsets.offsets.param->vamem1.xnr_table.offset;
if (size) {
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
"ia_css_process_xnr_table() enter:\n");
ia_css_xnr_table_vamem_encode((struct sh_css_isp_xnr_vamem_params *)
&stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_VAMEM1].address[offset],
¶ms->xnr_table,
size);
params->isp_params_changed = true;
params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_VAMEM1] =
true;
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
"ia_css_process_xnr_table() leave:\n");
}
}
}
/* Code generated by genparam/gencode.c:gen_process_function() */
static void
ia_css_process_xnr(
unsigned int pipe_id,
const struct ia_css_pipeline_stage *stage,
struct ia_css_isp_parameters *params)
{
assert(params);
{
unsigned int size =
stage->binary->info->mem_offsets.offsets.param->dmem.xnr.size;
unsigned int offset =
stage->binary->info->mem_offsets.offsets.param->dmem.xnr.offset;
if (size) {
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
"ia_css_process_xnr() enter:\n");
ia_css_xnr_encode((struct sh_css_isp_xnr_params *)
&stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset],
¶ms->xnr_config,
size);
params->isp_params_changed = true;
params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] =
true;
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
"ia_css_process_xnr() leave:\n");
}
}
}
/* Code generated by genparam/gencode.c:gen_process_function() */
static void
ia_css_process_xnr3(
unsigned int pipe_id,
const struct ia_css_pipeline_stage *stage,
struct ia_css_isp_parameters *params)
{
assert(params);
{
unsigned int size =
stage->binary->info->mem_offsets.offsets.param->dmem.xnr3.size;
unsigned int offset =
stage->binary->info->mem_offsets.offsets.param->dmem.xnr3.offset;
if (size) {
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
"ia_css_process_xnr3() enter:\n");
ia_css_xnr3_encode((struct sh_css_isp_xnr3_params *)
&stage->binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset],
¶ms->xnr3_config,
size);
params->isp_params_changed = true;
params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM] =
true;
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
"ia_css_process_xnr3() leave:\n");
}
}
}
/* Code generated by genparam/gencode.c:gen_param_process_table() */
void (*ia_css_kernel_process_param[IA_CSS_NUM_PARAMETER_IDS])(
unsigned int pipe_id,
const struct ia_css_pipeline_stage *stage,
struct ia_css_isp_parameters *params) = {
ia_css_process_aa,
ia_css_process_anr,
ia_css_process_anr2,
ia_css_process_bh,
ia_css_process_cnr,
ia_css_process_crop,
ia_css_process_csc,
ia_css_process_dp,
ia_css_process_bnr,
ia_css_process_de,
ia_css_process_ecd,
ia_css_process_formats,
ia_css_process_fpn,
ia_css_process_gc,
ia_css_process_ce,
ia_css_process_yuv2rgb,
ia_css_process_rgb2yuv,
ia_css_process_r_gamma,
ia_css_process_g_gamma,
ia_css_process_b_gamma,
ia_css_process_uds,
ia_css_process_raa,
ia_css_process_s3a,
ia_css_process_ob,
ia_css_process_output,
ia_css_process_sc,
ia_css_process_bds,
ia_css_process_tnr,
ia_css_process_macc,
ia_css_process_sdis_horicoef,
ia_css_process_sdis_vertcoef,
ia_css_process_sdis_horiproj,
ia_css_process_sdis_vertproj,
ia_css_process_sdis2_horicoef,
ia_css_process_sdis2_vertcoef,
ia_css_process_sdis2_horiproj,
ia_css_process_sdis2_vertproj,
ia_css_process_wb,
ia_css_process_nr,
ia_css_process_yee,
ia_css_process_ynr,
ia_css_process_fc,
ia_css_process_ctc,
ia_css_process_xnr_table,
ia_css_process_xnr,
ia_css_process_xnr3,
};
/* Code generated by genparam/gencode.c:gen_get_function() */
static void
ia_css_get_dp_config(const struct ia_css_isp_parameters *params,
struct ia_css_dp_config *config)
{
if (!config)
return;
assert(params);
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
"ia_css_get_dp_config() enter: config=%p\n",
config);
*config = params->dp_config;
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
"ia_css_get_dp_config() leave\n");
ia_css_dp_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
}
/* Code generated by genparam/gencode.c:gen_set_function() */
void
ia_css_set_dp_config(struct ia_css_isp_parameters *params,
const struct ia_css_dp_config *config)
{
if (!config)
return;
assert(params);
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_dp_config() enter:\n");
ia_css_dp_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
params->dp_config = *config;
params->config_changed[IA_CSS_DP_ID] = true;
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
"ia_css_set_dp_config() leave: return_void\n");
}
/* Code generated by genparam/gencode.c:gen_get_function() */
static void
ia_css_get_wb_config(const struct ia_css_isp_parameters *params,
struct ia_css_wb_config *config)
{
if (!config)
return;
assert(params);
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
"ia_css_get_wb_config() enter: config=%p\n",
config);
*config = params->wb_config;
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
"ia_css_get_wb_config() leave\n");
ia_css_wb_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
}
/* Code generated by genparam/gencode.c:gen_set_function() */
void
ia_css_set_wb_config(struct ia_css_isp_parameters *params,
const struct ia_css_wb_config *config)
{
if (!config)
return;
assert(params);
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_wb_config() enter:\n");
ia_css_wb_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
params->wb_config = *config;
params->config_changed[IA_CSS_WB_ID] = true;
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
"ia_css_set_wb_config() leave: return_void\n");
}
/* Code generated by genparam/gencode.c:gen_get_function() */
static void
ia_css_get_tnr_config(const struct ia_css_isp_parameters *params,
struct ia_css_tnr_config *config)
{
if (!config)
return;
assert(params);
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
"ia_css_get_tnr_config() enter: config=%p\n",
config);
*config = params->tnr_config;
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
"ia_css_get_tnr_config() leave\n");
ia_css_tnr_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
}
/* Code generated by genparam/gencode.c:gen_set_function() */
void
ia_css_set_tnr_config(struct ia_css_isp_parameters *params,
const struct ia_css_tnr_config *config)
{
if (!config)
return;
assert(params);
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_tnr_config() enter:\n");
ia_css_tnr_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
params->tnr_config = *config;
params->config_changed[IA_CSS_TNR_ID] = true;
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
"ia_css_set_tnr_config() leave: return_void\n");
}
/* Code generated by genparam/gencode.c:gen_get_function() */
static void
ia_css_get_ob_config(const struct ia_css_isp_parameters *params,
struct ia_css_ob_config *config)
{
if (!config)
return;
assert(params);
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
"ia_css_get_ob_config() enter: config=%p\n",
config);
*config = params->ob_config;
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
"ia_css_get_ob_config() leave\n");
ia_css_ob_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
}
/* Code generated by genparam/gencode.c:gen_set_function() */
void
ia_css_set_ob_config(struct ia_css_isp_parameters *params,
const struct ia_css_ob_config *config)
{
if (!config)
return;
assert(params);
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_ob_config() enter:\n");
ia_css_ob_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
params->ob_config = *config;
params->config_changed[IA_CSS_OB_ID] = true;
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
"ia_css_set_ob_config() leave: return_void\n");
}
/* Code generated by genparam/gencode.c:gen_get_function() */
static void
ia_css_get_de_config(const struct ia_css_isp_parameters *params,
struct ia_css_de_config *config)
{
if (!config)
return;
assert(params);
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
"ia_css_get_de_config() enter: config=%p\n",
config);
*config = params->de_config;
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
"ia_css_get_de_config() leave\n");
ia_css_de_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
}
/* Code generated by genparam/gencode.c:gen_set_function() */
void
ia_css_set_de_config(struct ia_css_isp_parameters *params,
const struct ia_css_de_config *config)
{
if (!config)
return;
assert(params);
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_de_config() enter:\n");
ia_css_de_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
params->de_config = *config;
params->config_changed[IA_CSS_DE_ID] = true;
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
"ia_css_set_de_config() leave: return_void\n");
}
/* Code generated by genparam/gencode.c:gen_get_function() */
static void
ia_css_get_anr_config(const struct ia_css_isp_parameters *params,
struct ia_css_anr_config *config)
{
if (!config)
return;
assert(params);
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
"ia_css_get_anr_config() enter: config=%p\n",
config);
*config = params->anr_config;
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
"ia_css_get_anr_config() leave\n");
ia_css_anr_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
}
/* Code generated by genparam/gencode.c:gen_set_function() */
void
ia_css_set_anr_config(struct ia_css_isp_parameters *params,
const struct ia_css_anr_config *config)
{
if (!config)
return;
assert(params);
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_anr_config() enter:\n");
ia_css_anr_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
params->anr_config = *config;
params->config_changed[IA_CSS_ANR_ID] = true;
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
"ia_css_set_anr_config() leave: return_void\n");
}
/* Code generated by genparam/gencode.c:gen_get_function() */
static void
ia_css_get_anr2_config(const struct ia_css_isp_parameters *params,
struct ia_css_anr_thres *config)
{
if (!config)
return;
assert(params);
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
"ia_css_get_anr2_config() enter: config=%p\n",
config);
*config = params->anr_thres;
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
"ia_css_get_anr2_config() leave\n");
ia_css_anr2_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
}
/* Code generated by genparam/gencode.c:gen_set_function() */
void
ia_css_set_anr2_config(struct ia_css_isp_parameters *params,
const struct ia_css_anr_thres *config)
{
if (!config)
return;
assert(params);
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_anr2_config() enter:\n");
ia_css_anr2_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
params->anr_thres = *config;
params->config_changed[IA_CSS_ANR2_ID] = true;
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
"ia_css_set_anr2_config() leave: return_void\n");
}
/* Code generated by genparam/gencode.c:gen_get_function() */
static void
ia_css_get_ce_config(const struct ia_css_isp_parameters *params,
struct ia_css_ce_config *config)
{
if (!config)
return;
assert(params);
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
"ia_css_get_ce_config() enter: config=%p\n",
config);
*config = params->ce_config;
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
"ia_css_get_ce_config() leave\n");
ia_css_ce_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
}
/* Code generated by genparam/gencode.c:gen_set_function() */
void
ia_css_set_ce_config(struct ia_css_isp_parameters *params,
const struct ia_css_ce_config *config)
{
if (!config)
return;
assert(params);
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_ce_config() enter:\n");
ia_css_ce_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
params->ce_config = *config;
params->config_changed[IA_CSS_CE_ID] = true;
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
"ia_css_set_ce_config() leave: return_void\n");
}
/* Code generated by genparam/gencode.c:gen_get_function() */
static void
ia_css_get_ecd_config(const struct ia_css_isp_parameters *params,
struct ia_css_ecd_config *config)
{
if (!config)
return;
assert(params);
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
"ia_css_get_ecd_config() enter: config=%p\n",
config);
*config = params->ecd_config;
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
"ia_css_get_ecd_config() leave\n");
ia_css_ecd_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
}
/* Code generated by genparam/gencode.c:gen_set_function() */
void
ia_css_set_ecd_config(struct ia_css_isp_parameters *params,
const struct ia_css_ecd_config *config)
{
if (!config)
return;
assert(params);
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_ecd_config() enter:\n");
ia_css_ecd_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
params->ecd_config = *config;
params->config_changed[IA_CSS_ECD_ID] = true;
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
"ia_css_set_ecd_config() leave: return_void\n");
}
/* Code generated by genparam/gencode.c:gen_get_function() */
static void
ia_css_get_ynr_config(const struct ia_css_isp_parameters *params,
struct ia_css_ynr_config *config)
{
if (!config)
return;
assert(params);
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
"ia_css_get_ynr_config() enter: config=%p\n",
config);
*config = params->ynr_config;
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
"ia_css_get_ynr_config() leave\n");
ia_css_ynr_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
}
/* Code generated by genparam/gencode.c:gen_set_function() */
void
ia_css_set_ynr_config(struct ia_css_isp_parameters *params,
const struct ia_css_ynr_config *config)
{
if (!config)
return;
assert(params);
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_ynr_config() enter:\n");
ia_css_ynr_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
params->ynr_config = *config;
params->config_changed[IA_CSS_YNR_ID] = true;
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
"ia_css_set_ynr_config() leave: return_void\n");
}
/* Code generated by genparam/gencode.c:gen_get_function() */
static void
ia_css_get_fc_config(const struct ia_css_isp_parameters *params,
struct ia_css_fc_config *config)
{
if (!config)
return;
assert(params);
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
"ia_css_get_fc_config() enter: config=%p\n",
config);
*config = params->fc_config;
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
"ia_css_get_fc_config() leave\n");
ia_css_fc_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
}
/* Code generated by genparam/gencode.c:gen_set_function() */
void
ia_css_set_fc_config(struct ia_css_isp_parameters *params,
const struct ia_css_fc_config *config)
{
if (!config)
return;
assert(params);
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_fc_config() enter:\n");
ia_css_fc_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
params->fc_config = *config;
params->config_changed[IA_CSS_FC_ID] = true;
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
"ia_css_set_fc_config() leave: return_void\n");
}
/* Code generated by genparam/gencode.c:gen_get_function() */
static void
ia_css_get_cnr_config(const struct ia_css_isp_parameters *params,
struct ia_css_cnr_config *config)
{
if (!config)
return;
assert(params);
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
"ia_css_get_cnr_config() enter: config=%p\n",
config);
*config = params->cnr_config;
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
"ia_css_get_cnr_config() leave\n");
ia_css_cnr_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
}
/* Code generated by genparam/gencode.c:gen_set_function() */
void
ia_css_set_cnr_config(struct ia_css_isp_parameters *params,
const struct ia_css_cnr_config *config)
{
if (!config)
return;
assert(params);
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_cnr_config() enter:\n");
ia_css_cnr_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
params->cnr_config = *config;
params->config_changed[IA_CSS_CNR_ID] = true;
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
"ia_css_set_cnr_config() leave: return_void\n");
}
/* Code generated by genparam/gencode.c:gen_get_function() */
static void
ia_css_get_macc_config(const struct ia_css_isp_parameters *params,
struct ia_css_macc_config *config)
{
if (!config)
return;
assert(params);
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
"ia_css_get_macc_config() enter: config=%p\n",
config);
*config = params->macc_config;
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
"ia_css_get_macc_config() leave\n");
ia_css_macc_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
}
/* Code generated by genparam/gencode.c:gen_set_function() */
void
ia_css_set_macc_config(struct ia_css_isp_parameters *params,
const struct ia_css_macc_config *config)
{
if (!config)
return;
assert(params);
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_macc_config() enter:\n");
ia_css_macc_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
params->macc_config = *config;
params->config_changed[IA_CSS_MACC_ID] = true;
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
"ia_css_set_macc_config() leave: return_void\n");
}
/* Code generated by genparam/gencode.c:gen_get_function() */
static void
ia_css_get_ctc_config(const struct ia_css_isp_parameters *params,
struct ia_css_ctc_config *config)
{
if (!config)
return;
assert(params);
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
"ia_css_get_ctc_config() enter: config=%p\n",
config);
*config = params->ctc_config;
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
"ia_css_get_ctc_config() leave\n");
ia_css_ctc_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
}
/* Code generated by genparam/gencode.c:gen_set_function() */
void
ia_css_set_ctc_config(struct ia_css_isp_parameters *params,
const struct ia_css_ctc_config *config)
{
if (!config)
return;
assert(params);
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_ctc_config() enter:\n");
ia_css_ctc_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
params->ctc_config = *config;
params->config_changed[IA_CSS_CTC_ID] = true;
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
"ia_css_set_ctc_config() leave: return_void\n");
}
/* Code generated by genparam/gencode.c:gen_get_function() */
static void
ia_css_get_aa_config(const struct ia_css_isp_parameters *params,
struct ia_css_aa_config *config)
{
if (!config)
return;
assert(params);
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
"ia_css_get_aa_config() enter: config=%p\n",
config);
*config = params->aa_config;
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
"ia_css_get_aa_config() leave\n");
}
/* Code generated by genparam/gencode.c:gen_set_function() */
void
ia_css_set_aa_config(struct ia_css_isp_parameters *params,
const struct ia_css_aa_config *config)
{
if (!config)
return;
assert(params);
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_aa_config() enter:\n");
params->aa_config = *config;
params->config_changed[IA_CSS_AA_ID] = true;
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
"ia_css_set_aa_config() leave: return_void\n");
}
/* Code generated by genparam/gencode.c:gen_get_function() */
static void
ia_css_get_yuv2rgb_config(const struct ia_css_isp_parameters *params,
struct ia_css_cc_config *config)
{
if (!config)
return;
assert(params);
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
"ia_css_get_yuv2rgb_config() enter: config=%p\n",
config);
*config = params->yuv2rgb_cc_config;
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
"ia_css_get_yuv2rgb_config() leave\n");
ia_css_yuv2rgb_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
}
/* Code generated by genparam/gencode.c:gen_set_function() */
void
ia_css_set_yuv2rgb_config(struct ia_css_isp_parameters *params,
const struct ia_css_cc_config *config)
{
if (!config)
return;
assert(params);
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_yuv2rgb_config() enter:\n");
ia_css_yuv2rgb_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
params->yuv2rgb_cc_config = *config;
params->config_changed[IA_CSS_YUV2RGB_ID] = true;
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
"ia_css_set_yuv2rgb_config() leave: return_void\n");
}
/* Code generated by genparam/gencode.c:gen_get_function() */
static void
ia_css_get_rgb2yuv_config(const struct ia_css_isp_parameters *params,
struct ia_css_cc_config *config)
{
if (!config)
return;
assert(params);
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
"ia_css_get_rgb2yuv_config() enter: config=%p\n",
config);
*config = params->rgb2yuv_cc_config;
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
"ia_css_get_rgb2yuv_config() leave\n");
ia_css_rgb2yuv_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
}
/* Code generated by genparam/gencode.c:gen_set_function() */
void
ia_css_set_rgb2yuv_config(struct ia_css_isp_parameters *params,
const struct ia_css_cc_config *config)
{
if (!config)
return;
assert(params);
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_rgb2yuv_config() enter:\n");
ia_css_rgb2yuv_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
params->rgb2yuv_cc_config = *config;
params->config_changed[IA_CSS_RGB2YUV_ID] = true;
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
"ia_css_set_rgb2yuv_config() leave: return_void\n");
}
/* Code generated by genparam/gencode.c:gen_get_function() */
static void
ia_css_get_csc_config(const struct ia_css_isp_parameters *params,
struct ia_css_cc_config *config)
{
if (!config)
return;
assert(params);
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
"ia_css_get_csc_config() enter: config=%p\n",
config);
*config = params->cc_config;
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
"ia_css_get_csc_config() leave\n");
ia_css_csc_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
}
/* Code generated by genparam/gencode.c:gen_set_function() */
void
ia_css_set_csc_config(struct ia_css_isp_parameters *params,
const struct ia_css_cc_config *config)
{
if (!config)
return;
assert(params);
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_csc_config() enter:\n");
ia_css_csc_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
params->cc_config = *config;
params->config_changed[IA_CSS_CSC_ID] = true;
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
"ia_css_set_csc_config() leave: return_void\n");
}
/* Code generated by genparam/gencode.c:gen_get_function() */
static void
ia_css_get_nr_config(const struct ia_css_isp_parameters *params,
struct ia_css_nr_config *config)
{
if (!config)
return;
assert(params);
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
"ia_css_get_nr_config() enter: config=%p\n",
config);
*config = params->nr_config;
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
"ia_css_get_nr_config() leave\n");
ia_css_nr_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
}
/* Code generated by genparam/gencode.c:gen_set_function() */
void
ia_css_set_nr_config(struct ia_css_isp_parameters *params,
const struct ia_css_nr_config *config)
{
if (!config)
return;
assert(params);
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_nr_config() enter:\n");
ia_css_nr_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
params->nr_config = *config;
params->config_changed[IA_CSS_BNR_ID] = true;
params->config_changed[IA_CSS_NR_ID] = true;
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
"ia_css_set_nr_config() leave: return_void\n");
}
/* Code generated by genparam/gencode.c:gen_get_function() */
static void
ia_css_get_gc_config(const struct ia_css_isp_parameters *params,
struct ia_css_gc_config *config)
{
if (!config)
return;
assert(params);
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
"ia_css_get_gc_config() enter: config=%p\n",
config);
*config = params->gc_config;
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
"ia_css_get_gc_config() leave\n");
ia_css_gc_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
}
/* Code generated by genparam/gencode.c:gen_set_function() */
void
ia_css_set_gc_config(struct ia_css_isp_parameters *params,
const struct ia_css_gc_config *config)
{
if (!config)
return;
assert(params);
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_gc_config() enter:\n");
ia_css_gc_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
params->gc_config = *config;
params->config_changed[IA_CSS_GC_ID] = true;
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
"ia_css_set_gc_config() leave: return_void\n");
}
/* Code generated by genparam/gencode.c:gen_get_function() */
static void
ia_css_get_sdis_horicoef_config(const struct ia_css_isp_parameters *params,
struct ia_css_dvs_coefficients *config)
{
if (!config)
return;
assert(params);
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
"ia_css_get_sdis_horicoef_config() enter: config=%p\n",
config);
*config = params->dvs_coefs;
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
"ia_css_get_sdis_horicoef_config() leave\n");
ia_css_sdis_horicoef_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
}
/* Code generated by genparam/gencode.c:gen_set_function() */
void
ia_css_set_sdis_horicoef_config(struct ia_css_isp_parameters *params,
const struct ia_css_dvs_coefficients *config)
{
if (!config)
return;
assert(params);
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
"ia_css_set_sdis_horicoef_config() enter:\n");
ia_css_sdis_horicoef_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
params->dvs_coefs = *config;
params->config_changed[IA_CSS_SDIS_HORICOEF_ID] = true;
params->config_changed[IA_CSS_SDIS_VERTCOEF_ID] = true;
params->config_changed[IA_CSS_SDIS_HORIPROJ_ID] = true;
params->config_changed[IA_CSS_SDIS_VERTPROJ_ID] = true;
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
"ia_css_set_sdis_horicoef_config() leave: return_void\n");
}
/* Code generated by genparam/gencode.c:gen_get_function() */
static void
ia_css_get_sdis_vertcoef_config(const struct ia_css_isp_parameters *params,
struct ia_css_dvs_coefficients *config)
{
if (!config)
return;
assert(params);
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
"ia_css_get_sdis_vertcoef_config() enter: config=%p\n",
config);
*config = params->dvs_coefs;
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
"ia_css_get_sdis_vertcoef_config() leave\n");
ia_css_sdis_vertcoef_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
}
/* Code generated by genparam/gencode.c:gen_set_function() */
void
ia_css_set_sdis_vertcoef_config(struct ia_css_isp_parameters *params,
const struct ia_css_dvs_coefficients *config)
{
if (!config)
return;
assert(params);
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
"ia_css_set_sdis_vertcoef_config() enter:\n");
ia_css_sdis_vertcoef_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
params->dvs_coefs = *config;
params->config_changed[IA_CSS_SDIS_HORICOEF_ID] = true;
params->config_changed[IA_CSS_SDIS_VERTCOEF_ID] = true;
params->config_changed[IA_CSS_SDIS_HORIPROJ_ID] = true;
params->config_changed[IA_CSS_SDIS_VERTPROJ_ID] = true;
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
"ia_css_set_sdis_vertcoef_config() leave: return_void\n");
}
/* Code generated by genparam/gencode.c:gen_get_function() */
static void
ia_css_get_sdis_horiproj_config(const struct ia_css_isp_parameters *params,
struct ia_css_dvs_coefficients *config)
{
if (!config)
return;
assert(params);
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
"ia_css_get_sdis_horiproj_config() enter: config=%p\n",
config);
*config = params->dvs_coefs;
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
"ia_css_get_sdis_horiproj_config() leave\n");
ia_css_sdis_horiproj_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
}
/* Code generated by genparam/gencode.c:gen_set_function() */
void
ia_css_set_sdis_horiproj_config(struct ia_css_isp_parameters *params,
const struct ia_css_dvs_coefficients *config)
{
if (!config)
return;
assert(params);
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
"ia_css_set_sdis_horiproj_config() enter:\n");
ia_css_sdis_horiproj_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
params->dvs_coefs = *config;
params->config_changed[IA_CSS_SDIS_HORICOEF_ID] = true;
params->config_changed[IA_CSS_SDIS_VERTCOEF_ID] = true;
params->config_changed[IA_CSS_SDIS_HORIPROJ_ID] = true;
params->config_changed[IA_CSS_SDIS_VERTPROJ_ID] = true;
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
"ia_css_set_sdis_horiproj_config() leave: return_void\n");
}
/* Code generated by genparam/gencode.c:gen_get_function() */
static void
ia_css_get_sdis_vertproj_config(const struct ia_css_isp_parameters *params,
struct ia_css_dvs_coefficients *config)
{
if (!config)
return;
assert(params);
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
"ia_css_get_sdis_vertproj_config() enter: config=%p\n",
config);
*config = params->dvs_coefs;
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
"ia_css_get_sdis_vertproj_config() leave\n");
ia_css_sdis_vertproj_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
}
/* Code generated by genparam/gencode.c:gen_set_function() */
void
ia_css_set_sdis_vertproj_config(struct ia_css_isp_parameters *params,
const struct ia_css_dvs_coefficients *config)
{
if (!config)
return;
assert(params);
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
"ia_css_set_sdis_vertproj_config() enter:\n");
ia_css_sdis_vertproj_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
params->dvs_coefs = *config;
params->config_changed[IA_CSS_SDIS_HORICOEF_ID] = true;
params->config_changed[IA_CSS_SDIS_VERTCOEF_ID] = true;
params->config_changed[IA_CSS_SDIS_HORIPROJ_ID] = true;
params->config_changed[IA_CSS_SDIS_VERTPROJ_ID] = true;
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
"ia_css_set_sdis_vertproj_config() leave: return_void\n");
}
/* Code generated by genparam/gencode.c:gen_get_function() */
static void
ia_css_get_sdis2_horicoef_config(const struct ia_css_isp_parameters *params,
struct ia_css_dvs2_coefficients *config)
{
if (!config)
return;
assert(params);
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
"ia_css_get_sdis2_horicoef_config() enter: config=%p\n",
config);
*config = params->dvs2_coefs;
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
"ia_css_get_sdis2_horicoef_config() leave\n");
ia_css_sdis2_horicoef_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
}
/* Code generated by genparam/gencode.c:gen_set_function() */
void
ia_css_set_sdis2_horicoef_config(struct ia_css_isp_parameters *params,
const struct ia_css_dvs2_coefficients *config)
{
if (!config)
return;
assert(params);
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
"ia_css_set_sdis2_horicoef_config() enter:\n");
ia_css_sdis2_horicoef_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
params->dvs2_coefs = *config;
params->config_changed[IA_CSS_SDIS2_HORICOEF_ID] = true;
params->config_changed[IA_CSS_SDIS2_VERTCOEF_ID] = true;
params->config_changed[IA_CSS_SDIS2_HORIPROJ_ID] = true;
params->config_changed[IA_CSS_SDIS2_VERTPROJ_ID] = true;
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
"ia_css_set_sdis2_horicoef_config() leave: return_void\n");
}
/* Code generated by genparam/gencode.c:gen_get_function() */
static void
ia_css_get_sdis2_vertcoef_config(const struct ia_css_isp_parameters *params,
struct ia_css_dvs2_coefficients *config)
{
if (!config)
return;
assert(params);
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
"ia_css_get_sdis2_vertcoef_config() enter: config=%p\n",
config);
*config = params->dvs2_coefs;
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
"ia_css_get_sdis2_vertcoef_config() leave\n");
ia_css_sdis2_vertcoef_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
}
/* Code generated by genparam/gencode.c:gen_set_function() */
void
ia_css_set_sdis2_vertcoef_config(struct ia_css_isp_parameters *params,
const struct ia_css_dvs2_coefficients *config)
{
if (!config)
return;
assert(params);
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
"ia_css_set_sdis2_vertcoef_config() enter:\n");
ia_css_sdis2_vertcoef_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
params->dvs2_coefs = *config;
params->config_changed[IA_CSS_SDIS2_HORICOEF_ID] = true;
params->config_changed[IA_CSS_SDIS2_VERTCOEF_ID] = true;
params->config_changed[IA_CSS_SDIS2_HORIPROJ_ID] = true;
params->config_changed[IA_CSS_SDIS2_VERTPROJ_ID] = true;
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
"ia_css_set_sdis2_vertcoef_config() leave: return_void\n");
}
/* Code generated by genparam/gencode.c:gen_get_function() */
static void
ia_css_get_sdis2_horiproj_config(const struct ia_css_isp_parameters *params,
struct ia_css_dvs2_coefficients *config)
{
if (!config)
return;
assert(params);
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
"ia_css_get_sdis2_horiproj_config() enter: config=%p\n",
config);
*config = params->dvs2_coefs;
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
"ia_css_get_sdis2_horiproj_config() leave\n");
ia_css_sdis2_horiproj_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
}
/* Code generated by genparam/gencode.c:gen_set_function() */
void
ia_css_set_sdis2_horiproj_config(struct ia_css_isp_parameters *params,
const struct ia_css_dvs2_coefficients *config)
{
if (!config)
return;
assert(params);
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
"ia_css_set_sdis2_horiproj_config() enter:\n");
ia_css_sdis2_horiproj_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
params->dvs2_coefs = *config;
params->config_changed[IA_CSS_SDIS2_HORICOEF_ID] = true;
params->config_changed[IA_CSS_SDIS2_VERTCOEF_ID] = true;
params->config_changed[IA_CSS_SDIS2_HORIPROJ_ID] = true;
params->config_changed[IA_CSS_SDIS2_VERTPROJ_ID] = true;
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
"ia_css_set_sdis2_horiproj_config() leave: return_void\n");
}
/* Code generated by genparam/gencode.c:gen_get_function() */
static void
ia_css_get_sdis2_vertproj_config(const struct ia_css_isp_parameters *params,
struct ia_css_dvs2_coefficients *config)
{
if (!config)
return;
assert(params);
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
"ia_css_get_sdis2_vertproj_config() enter: config=%p\n",
config);
*config = params->dvs2_coefs;
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
"ia_css_get_sdis2_vertproj_config() leave\n");
ia_css_sdis2_vertproj_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
}
/* Code generated by genparam/gencode.c:gen_set_function() */
void
ia_css_set_sdis2_vertproj_config(struct ia_css_isp_parameters *params,
const struct ia_css_dvs2_coefficients *config)
{
if (!config)
return;
assert(params);
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
"ia_css_set_sdis2_vertproj_config() enter:\n");
ia_css_sdis2_vertproj_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
params->dvs2_coefs = *config;
params->config_changed[IA_CSS_SDIS2_HORICOEF_ID] = true;
params->config_changed[IA_CSS_SDIS2_VERTCOEF_ID] = true;
params->config_changed[IA_CSS_SDIS2_HORIPROJ_ID] = true;
params->config_changed[IA_CSS_SDIS2_VERTPROJ_ID] = true;
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
"ia_css_set_sdis2_vertproj_config() leave: return_void\n");
}
/* Code generated by genparam/gencode.c:gen_get_function() */
static void
ia_css_get_r_gamma_config(const struct ia_css_isp_parameters *params,
struct ia_css_rgb_gamma_table *config)
{
if (!config)
return;
assert(params);
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
"ia_css_get_r_gamma_config() enter: config=%p\n",
config);
*config = params->r_gamma_table;
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
"ia_css_get_r_gamma_config() leave\n");
ia_css_r_gamma_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
}
/* Code generated by genparam/gencode.c:gen_set_function() */
void
ia_css_set_r_gamma_config(struct ia_css_isp_parameters *params,
const struct ia_css_rgb_gamma_table *config)
{
if (!config)
return;
assert(params);
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_r_gamma_config() enter:\n");
ia_css_r_gamma_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
params->r_gamma_table = *config;
params->config_changed[IA_CSS_R_GAMMA_ID] = true;
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
"ia_css_set_r_gamma_config() leave: return_void\n");
}
/* Code generated by genparam/gencode.c:gen_get_function() */
static void
ia_css_get_g_gamma_config(const struct ia_css_isp_parameters *params,
struct ia_css_rgb_gamma_table *config)
{
if (!config)
return;
assert(params);
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
"ia_css_get_g_gamma_config() enter: config=%p\n",
config);
*config = params->g_gamma_table;
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
"ia_css_get_g_gamma_config() leave\n");
ia_css_g_gamma_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
}
/* Code generated by genparam/gencode.c:gen_set_function() */
void
ia_css_set_g_gamma_config(struct ia_css_isp_parameters *params,
const struct ia_css_rgb_gamma_table *config)
{
if (!config)
return;
assert(params);
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_g_gamma_config() enter:\n");
ia_css_g_gamma_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
params->g_gamma_table = *config;
params->config_changed[IA_CSS_G_GAMMA_ID] = true;
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
"ia_css_set_g_gamma_config() leave: return_void\n");
}
/* Code generated by genparam/gencode.c:gen_get_function() */
static void
ia_css_get_b_gamma_config(const struct ia_css_isp_parameters *params,
struct ia_css_rgb_gamma_table *config)
{
if (!config)
return;
assert(params);
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
"ia_css_get_b_gamma_config() enter: config=%p\n",
config);
*config = params->b_gamma_table;
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
"ia_css_get_b_gamma_config() leave\n");
ia_css_b_gamma_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
}
/* Code generated by genparam/gencode.c:gen_set_function() */
void
ia_css_set_b_gamma_config(struct ia_css_isp_parameters *params,
const struct ia_css_rgb_gamma_table *config)
{
if (!config)
return;
assert(params);
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_b_gamma_config() enter:\n");
ia_css_b_gamma_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
params->b_gamma_table = *config;
params->config_changed[IA_CSS_B_GAMMA_ID] = true;
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
"ia_css_set_b_gamma_config() leave: return_void\n");
}
/* Code generated by genparam/gencode.c:gen_get_function() */
static void
ia_css_get_xnr_table_config(const struct ia_css_isp_parameters *params,
struct ia_css_xnr_table *config)
{
if (!config)
return;
assert(params);
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
"ia_css_get_xnr_table_config() enter: config=%p\n",
config);
*config = params->xnr_table;
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
"ia_css_get_xnr_table_config() leave\n");
ia_css_xnr_table_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
}
/* Code generated by genparam/gencode.c:gen_set_function() */
void
ia_css_set_xnr_table_config(struct ia_css_isp_parameters *params,
const struct ia_css_xnr_table *config)
{
if (!config)
return;
assert(params);
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
"ia_css_set_xnr_table_config() enter:\n");
ia_css_xnr_table_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
params->xnr_table = *config;
params->config_changed[IA_CSS_XNR_TABLE_ID] = true;
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
"ia_css_set_xnr_table_config() leave: return_void\n");
}
/* Code generated by genparam/gencode.c:gen_get_function() */
static void
ia_css_get_formats_config(const struct ia_css_isp_parameters *params,
struct ia_css_formats_config *config)
{
if (!config)
return;
assert(params);
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
"ia_css_get_formats_config() enter: config=%p\n",
config);
*config = params->formats_config;
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
"ia_css_get_formats_config() leave\n");
ia_css_formats_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
}
/* Code generated by genparam/gencode.c:gen_set_function() */
void
ia_css_set_formats_config(struct ia_css_isp_parameters *params,
const struct ia_css_formats_config *config)
{
if (!config)
return;
assert(params);
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_formats_config() enter:\n");
ia_css_formats_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
params->formats_config = *config;
params->config_changed[IA_CSS_FORMATS_ID] = true;
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
"ia_css_set_formats_config() leave: return_void\n");
}
/* Code generated by genparam/gencode.c:gen_get_function() */
static void
ia_css_get_xnr_config(const struct ia_css_isp_parameters *params,
struct ia_css_xnr_config *config)
{
if (!config)
return;
assert(params);
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
"ia_css_get_xnr_config() enter: config=%p\n",
config);
*config = params->xnr_config;
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
"ia_css_get_xnr_config() leave\n");
ia_css_xnr_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
}
/* Code generated by genparam/gencode.c:gen_set_function() */
void
ia_css_set_xnr_config(struct ia_css_isp_parameters *params,
const struct ia_css_xnr_config *config)
{
if (!config)
return;
assert(params);
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_xnr_config() enter:\n");
ia_css_xnr_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
params->xnr_config = *config;
params->config_changed[IA_CSS_XNR_ID] = true;
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
"ia_css_set_xnr_config() leave: return_void\n");
}
/* Code generated by genparam/gencode.c:gen_get_function() */
static void
ia_css_get_xnr3_config(const struct ia_css_isp_parameters *params,
struct ia_css_xnr3_config *config)
{
if (!config)
return;
assert(params);
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
"ia_css_get_xnr3_config() enter: config=%p\n",
config);
*config = params->xnr3_config;
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
"ia_css_get_xnr3_config() leave\n");
ia_css_xnr3_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
}
/* Code generated by genparam/gencode.c:gen_set_function() */
void
ia_css_set_xnr3_config(struct ia_css_isp_parameters *params,
const struct ia_css_xnr3_config *config)
{
if (!config)
return;
assert(params);
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_xnr3_config() enter:\n");
ia_css_xnr3_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
params->xnr3_config = *config;
params->config_changed[IA_CSS_XNR3_ID] = true;
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
"ia_css_set_xnr3_config() leave: return_void\n");
}
/* Code generated by genparam/gencode.c:gen_get_function() */
static void
ia_css_get_s3a_config(const struct ia_css_isp_parameters *params,
struct ia_css_3a_config *config)
{
if (!config)
return;
assert(params);
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
"ia_css_get_s3a_config() enter: config=%p\n",
config);
*config = params->s3a_config;
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
"ia_css_get_s3a_config() leave\n");
ia_css_s3a_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
}
/* Code generated by genparam/gencode.c:gen_set_function() */
void
ia_css_set_s3a_config(struct ia_css_isp_parameters *params,
const struct ia_css_3a_config *config)
{
if (!config)
return;
assert(params);
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_s3a_config() enter:\n");
ia_css_s3a_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
params->s3a_config = *config;
params->config_changed[IA_CSS_BH_ID] = true;
params->config_changed[IA_CSS_S3A_ID] = true;
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
"ia_css_set_s3a_config() leave: return_void\n");
}
/* Code generated by genparam/gencode.c:gen_get_function() */
static void
ia_css_get_output_config(const struct ia_css_isp_parameters *params,
struct ia_css_output_config *config)
{
if (!config)
return;
assert(params);
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
"ia_css_get_output_config() enter: config=%p\n",
config);
*config = params->output_config;
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
"ia_css_get_output_config() leave\n");
ia_css_output_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
}
/* Code generated by genparam/gencode.c:gen_set_function() */
void
ia_css_set_output_config(struct ia_css_isp_parameters *params,
const struct ia_css_output_config *config)
{
if (!config)
return;
assert(params);
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_output_config() enter:\n");
ia_css_output_debug_dtrace(config, IA_CSS_DEBUG_TRACE);
params->output_config = *config;
params->config_changed[IA_CSS_OUTPUT_ID] = true;
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
"ia_css_set_output_config() leave: return_void\n");
}
/* Code generated by genparam/gencode.c:gen_global_access_function() */
void
ia_css_get_configs(struct ia_css_isp_parameters *params,
const struct ia_css_isp_config *config)
{
ia_css_get_dp_config(params, config->dp_config);
ia_css_get_wb_config(params, config->wb_config);
ia_css_get_tnr_config(params, config->tnr_config);
ia_css_get_ob_config(params, config->ob_config);
ia_css_get_de_config(params, config->de_config);
ia_css_get_anr_config(params, config->anr_config);
ia_css_get_anr2_config(params, config->anr_thres);
ia_css_get_ce_config(params, config->ce_config);
ia_css_get_ecd_config(params, config->ecd_config);
ia_css_get_ynr_config(params, config->ynr_config);
ia_css_get_fc_config(params, config->fc_config);
ia_css_get_cnr_config(params, config->cnr_config);
ia_css_get_macc_config(params, config->macc_config);
ia_css_get_ctc_config(params, config->ctc_config);
ia_css_get_aa_config(params, config->aa_config);
ia_css_get_yuv2rgb_config(params, config->yuv2rgb_cc_config);
ia_css_get_rgb2yuv_config(params, config->rgb2yuv_cc_config);
ia_css_get_csc_config(params, config->cc_config);
ia_css_get_nr_config(params, config->nr_config);
ia_css_get_gc_config(params, config->gc_config);
ia_css_get_sdis_horicoef_config(params, config->dvs_coefs);
ia_css_get_sdis_vertcoef_config(params, config->dvs_coefs);
ia_css_get_sdis_horiproj_config(params, config->dvs_coefs);
ia_css_get_sdis_vertproj_config(params, config->dvs_coefs);
ia_css_get_sdis2_horicoef_config(params, config->dvs2_coefs);
ia_css_get_sdis2_vertcoef_config(params, config->dvs2_coefs);
ia_css_get_sdis2_horiproj_config(params, config->dvs2_coefs);
ia_css_get_sdis2_vertproj_config(params, config->dvs2_coefs);
ia_css_get_r_gamma_config(params, config->r_gamma_table);
ia_css_get_g_gamma_config(params, config->g_gamma_table);
ia_css_get_b_gamma_config(params, config->b_gamma_table);
ia_css_get_xnr_table_config(params, config->xnr_table);
ia_css_get_formats_config(params, config->formats_config);
ia_css_get_xnr_config(params, config->xnr_config);
ia_css_get_xnr3_config(params, config->xnr3_config);
ia_css_get_s3a_config(params, config->s3a_config);
ia_css_get_output_config(params, config->output_config);
}
/* Code generated by genparam/gencode.c:gen_global_access_function() */
void
ia_css_set_configs(struct ia_css_isp_parameters *params,
const struct ia_css_isp_config *config)
{
ia_css_set_dp_config(params, config->dp_config);
ia_css_set_wb_config(params, config->wb_config);
ia_css_set_tnr_config(params, config->tnr_config);
ia_css_set_ob_config(params, config->ob_config);
ia_css_set_de_config(params, config->de_config);
ia_css_set_anr_config(params, config->anr_config);
ia_css_set_anr2_config(params, config->anr_thres);
ia_css_set_ce_config(params, config->ce_config);
ia_css_set_ecd_config(params, config->ecd_config);
ia_css_set_ynr_config(params, config->ynr_config);
ia_css_set_fc_config(params, config->fc_config);
ia_css_set_cnr_config(params, config->cnr_config);
ia_css_set_macc_config(params, config->macc_config);
ia_css_set_ctc_config(params, config->ctc_config);
ia_css_set_aa_config(params, config->aa_config);
ia_css_set_yuv2rgb_config(params, config->yuv2rgb_cc_config);
ia_css_set_rgb2yuv_config(params, config->rgb2yuv_cc_config);
ia_css_set_csc_config(params, config->cc_config);
ia_css_set_nr_config(params, config->nr_config);
ia_css_set_gc_config(params, config->gc_config);
ia_css_set_sdis_horicoef_config(params, config->dvs_coefs);
ia_css_set_sdis_vertcoef_config(params, config->dvs_coefs);
ia_css_set_sdis_horiproj_config(params, config->dvs_coefs);
ia_css_set_sdis_vertproj_config(params, config->dvs_coefs);
ia_css_set_sdis2_horicoef_config(params, config->dvs2_coefs);
ia_css_set_sdis2_vertcoef_config(params, config->dvs2_coefs);
ia_css_set_sdis2_horiproj_config(params, config->dvs2_coefs);
ia_css_set_sdis2_vertproj_config(params, config->dvs2_coefs);
ia_css_set_r_gamma_config(params, config->r_gamma_table);
ia_css_set_g_gamma_config(params, config->g_gamma_table);
ia_css_set_b_gamma_config(params, config->b_gamma_table);
ia_css_set_xnr_table_config(params, config->xnr_table);
ia_css_set_formats_config(params, config->formats_config);
ia_css_set_xnr_config(params, config->xnr_config);
ia_css_set_xnr3_config(params, config->xnr3_config);
ia_css_set_s3a_config(params, config->s3a_config);
ia_css_set_output_config(params, config->output_config);
}
| linux-master | drivers/staging/media/atomisp/pci/ia_css_isp_params.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Code to build software firmware node graph for atomisp2 connected sensors
* from ACPI tables.
*
* Copyright (C) 2023 Hans de Goede <[email protected]>
*
* Based on drivers/media/pci/intel/ipu3/cio2-bridge.c written by:
* Dan Scally <[email protected]>
*/
#include <linux/acpi.h>
#include <linux/clk.h>
#include <linux/device.h>
#include <linux/dmi.h>
#include <linux/property.h>
#include <media/ipu-bridge.h>
#include <media/v4l2-fwnode.h>
#include "atomisp_cmd.h"
#include "atomisp_csi2.h"
#include "atomisp_internal.h"
#define PMC_CLK_RATE_19_2MHZ 19200000
/*
* 79234640-9e10-4fea-a5c1-b5aa8b19756f
* This _DSM GUID returns information about the GPIO lines mapped to a sensor.
* Function number 1 returns a count of the GPIO lines that are mapped.
* Subsequent functions return 32 bit ints encoding information about the GPIO.
*/
static const guid_t intel_sensor_gpio_info_guid =
GUID_INIT(0x79234640, 0x9e10, 0x4fea,
0xa5, 0xc1, 0xb5, 0xaa, 0x8b, 0x19, 0x75, 0x6f);
#define INTEL_GPIO_DSM_TYPE_SHIFT 0
#define INTEL_GPIO_DSM_TYPE_MASK GENMASK(7, 0)
#define INTEL_GPIO_DSM_PIN_SHIFT 8
#define INTEL_GPIO_DSM_PIN_MASK GENMASK(15, 8)
#define INTEL_GPIO_DSM_SENSOR_ON_VAL_SHIFT 24
#define INTEL_GPIO_DSM_SENSOR_ON_VAL_MASK GENMASK(31, 24)
#define INTEL_GPIO_DSM_TYPE(x) \
(((x) & INTEL_GPIO_DSM_TYPE_MASK) >> INTEL_GPIO_DSM_TYPE_SHIFT)
#define INTEL_GPIO_DSM_PIN(x) \
(((x) & INTEL_GPIO_DSM_PIN_MASK) >> INTEL_GPIO_DSM_PIN_SHIFT)
#define INTEL_GPIO_DSM_SENSOR_ON_VAL(x) \
(((x) & INTEL_GPIO_DSM_SENSOR_ON_VAL_MASK) >> INTEL_GPIO_DSM_SENSOR_ON_VAL_SHIFT)
/*
* 822ace8f-2814-4174-a56b-5f029fe079ee
* This _DSM GUID returns a string from the sensor device, which acts as a
* module identifier.
*/
static const guid_t intel_sensor_module_guid =
GUID_INIT(0x822ace8f, 0x2814, 0x4174,
0xa5, 0x6b, 0x5f, 0x02, 0x9f, 0xe0, 0x79, 0xee);
/*
* dc2f6c4f-045b-4f1d-97b9-882a6860a4be
* This _DSM GUID returns a package with n*2 strings, with each set of 2 strings
* forming a key, value pair for settings like e.g. "CsiLanes" = "1".
*/
static const guid_t atomisp_dsm_guid =
GUID_INIT(0xdc2f6c4f, 0x045b, 0x4f1d,
0x97, 0xb9, 0x88, 0x2a, 0x68, 0x60, 0xa4, 0xbe);
/*
* 75c9a639-5c8a-4a00-9f48-a9c3b5da789f
* This _DSM GUID returns a string giving the VCM type e.g. "AD5823".
*/
static const guid_t vcm_dsm_guid =
GUID_INIT(0x75c9a639, 0x5c8a, 0x4a00,
0x9f, 0x48, 0xa9, 0xc3, 0xb5, 0xda, 0x78, 0x9f);
struct atomisp_sensor_config {
int lanes;
bool vcm;
};
#define ATOMISP_SENSOR_CONFIG(_HID, _LANES, _VCM) \
{ \
.id = _HID, \
.driver_data = (long)&((const struct atomisp_sensor_config) { \
.lanes = _LANES, \
.vcm = _VCM, \
}) \
}
/*
* gmin_cfg parsing code. This is a cleaned up version of the gmin_cfg parsing
* code from atomisp_gmin_platform.c.
* Once all sensors are moved to v4l2-async probing atomisp_gmin_platform.c can
* be removed and the duplication of this code goes away.
*/
struct gmin_cfg_var {
const char *acpi_dev_name;
const char *key;
const char *val;
};
static struct gmin_cfg_var lenovo_ideapad_miix_310_vars[] = {
/* _DSM contains the wrong CsiPort! */
{ "OVTI2680:01", "CsiPort", "0" },
{}
};
static const struct dmi_system_id gmin_cfg_dmi_overrides[] = {
{
/* Lenovo Ideapad Miix 310 */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
DMI_MATCH(DMI_PRODUCT_VERSION, "MIIX 310-10"),
},
.driver_data = lenovo_ideapad_miix_310_vars,
},
{}
};
static char *gmin_cfg_get_dsm(struct acpi_device *adev, const char *key)
{
union acpi_object *obj, *key_el, *val_el;
char *val = NULL;
int i;
obj = acpi_evaluate_dsm_typed(adev->handle, &atomisp_dsm_guid, 0, 0,
NULL, ACPI_TYPE_PACKAGE);
if (!obj)
return NULL;
for (i = 0; i < obj->package.count - 1; i += 2) {
key_el = &obj->package.elements[i + 0];
val_el = &obj->package.elements[i + 1];
if (key_el->type != ACPI_TYPE_STRING || val_el->type != ACPI_TYPE_STRING)
break;
if (!strcmp(key_el->string.pointer, key)) {
val = kstrdup(val_el->string.pointer, GFP_KERNEL);
if (!val)
break;
acpi_handle_info(adev->handle, "%s: Using DSM entry %s=%s\n",
dev_name(&adev->dev), key, val);
break;
}
}
ACPI_FREE(obj);
return val;
}
static char *gmin_cfg_get_dmi_override(struct acpi_device *adev, const char *key)
{
const struct dmi_system_id *id;
struct gmin_cfg_var *gv;
id = dmi_first_match(gmin_cfg_dmi_overrides);
if (!id)
return NULL;
for (gv = id->driver_data; gv->acpi_dev_name; gv++) {
if (strcmp(gv->acpi_dev_name, acpi_dev_name(adev)))
continue;
if (strcmp(key, gv->key))
continue;
acpi_handle_info(adev->handle, "%s: Using DMI entry %s=%s\n",
dev_name(&adev->dev), key, gv->val);
return kstrdup(gv->val, GFP_KERNEL);
}
return NULL;
}
static char *gmin_cfg_get(struct acpi_device *adev, const char *key)
{
char *val;
val = gmin_cfg_get_dmi_override(adev, key);
if (val)
return val;
return gmin_cfg_get_dsm(adev, key);
}
static int gmin_cfg_get_int(struct acpi_device *adev, const char *key, int default_val)
{
char *str_val;
long int_val;
int ret;
str_val = gmin_cfg_get(adev, key);
if (!str_val)
goto out_use_default;
ret = kstrtoul(str_val, 0, &int_val);
kfree(str_val);
if (ret)
goto out_use_default;
return int_val;
out_use_default:
acpi_handle_info(adev->handle, "%s: Using default %s=%d\n",
dev_name(&adev->dev), key, default_val);
return default_val;
}
static int atomisp_csi2_get_pmc_clk_nr_from_acpi_pr0(struct acpi_device *adev)
{
/* ACPI_PATH_SEGMENT_LENGTH is guaranteed to be big enough for name + 0 term. */
char name[ACPI_PATH_SEGMENT_LENGTH];
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
struct acpi_buffer b_name = { sizeof(name), name };
union acpi_object *package, *element;
int i, ret = -ENOENT;
acpi_handle rhandle;
acpi_status status;
u8 clock_num;
status = acpi_evaluate_object_typed(adev->handle, "_PR0", NULL, &buffer, ACPI_TYPE_PACKAGE);
if (ACPI_FAILURE(status))
return -ENOENT;
package = buffer.pointer;
for (i = 0; i < package->package.count; i++) {
element = &package->package.elements[i];
if (element->type != ACPI_TYPE_LOCAL_REFERENCE)
continue;
rhandle = element->reference.handle;
if (!rhandle)
continue;
acpi_get_name(rhandle, ACPI_SINGLE_NAME, &b_name);
if (str_has_prefix(name, "CLK") && !kstrtou8(&name[3], 10, &clock_num) &&
clock_num <= 4) {
ret = clock_num;
break;
}
}
ACPI_FREE(buffer.pointer);
if (ret < 0)
acpi_handle_warn(adev->handle, "%s: Could not find PMC clk in _PR0\n",
dev_name(&adev->dev));
return ret;
}
static int atomisp_csi2_set_pmc_clk_freq(struct acpi_device *adev, int clock_num)
{
struct clk *clk;
char name[14];
int ret;
if (clock_num < 0)
return 0;
snprintf(name, sizeof(name), "pmc_plt_clk_%d", clock_num);
clk = clk_get(NULL, name);
if (IS_ERR(clk)) {
ret = PTR_ERR(clk);
acpi_handle_err(adev->handle, "%s: Error getting clk %s: %d\n",
dev_name(&adev->dev), name, ret);
return ret;
}
/*
* The firmware might enable the clock at boot, to change
* the rate we must ensure the clock is disabled.
*/
ret = clk_prepare_enable(clk);
if (!ret)
clk_disable_unprepare(clk);
if (!ret)
ret = clk_set_rate(clk, PMC_CLK_RATE_19_2MHZ);
if (ret)
acpi_handle_err(adev->handle, "%s: Error setting clk-rate for %s: %d\n",
dev_name(&adev->dev), name, ret);
clk_put(clk);
return ret;
}
static int atomisp_csi2_get_port(struct acpi_device *adev, int clock_num)
{
int port;
/*
* Compare clock-number to the PMC-clock used for CsiPort 1
* in the CHT/BYT reference designs.
*/
if (IS_ISP2401)
port = clock_num == 4 ? 1 : 0;
else
port = clock_num == 0 ? 1 : 0;
/* Intel DSM or DMI quirk overrides _PR0 CLK derived default */
return gmin_cfg_get_int(adev, "CsiPort", port);
}
/* Note this always returns 1 to continue looping so that res_count is accurate */
static int atomisp_csi2_handle_acpi_gpio_res(struct acpi_resource *ares, void *_data)
{
struct atomisp_csi2_acpi_gpio_parsing_data *data = _data;
struct acpi_resource_gpio *agpio;
const char *name;
bool active_low;
unsigned int i;
u32 settings = 0;
u16 pin;
if (!acpi_gpio_get_io_resource(ares, &agpio))
return 1; /* Not a GPIO, continue the loop */
data->res_count++;
pin = agpio->pin_table[0];
for (i = 0; i < data->settings_count; i++) {
if (INTEL_GPIO_DSM_PIN(data->settings[i]) == pin) {
settings = data->settings[i];
break;
}
}
if (i == data->settings_count) {
acpi_handle_warn(data->adev->handle,
"%s: Could not find DSM GPIO settings for pin %u\n",
dev_name(&data->adev->dev), pin);
return 1;
}
switch (INTEL_GPIO_DSM_TYPE(settings)) {
case 0:
name = "reset-gpios";
break;
case 1:
name = "powerdown-gpios";
break;
default:
acpi_handle_warn(data->adev->handle, "%s: Unknown GPIO type 0x%02lx for pin %u\n",
dev_name(&data->adev->dev),
INTEL_GPIO_DSM_TYPE(settings), pin);
return 1;
}
/*
* Both reset and power-down need to be logical false when the sensor
* is on (sensor should not be in reset and not be powered-down). So
* when the sensor-on-value (which is the physical pin value) is high,
* then the signal is active-low.
*/
active_low = INTEL_GPIO_DSM_SENSOR_ON_VAL(settings);
i = data->map_count;
if (i == CSI2_MAX_ACPI_GPIOS)
return 1;
/* res_count is already incremented */
data->map->params[i].crs_entry_index = data->res_count - 1;
data->map->params[i].active_low = active_low;
data->map->mapping[i].name = name;
data->map->mapping[i].data = &data->map->params[i];
data->map->mapping[i].size = 1;
data->map_count++;
acpi_handle_info(data->adev->handle, "%s: %s crs %d %s pin %u active-%s\n",
dev_name(&data->adev->dev), name,
data->res_count - 1, agpio->resource_source.string_ptr,
pin, active_low ? "low" : "high");
return 1;
}
/*
* Helper function to create an ACPI GPIO lookup table for sensor reset and
* powerdown signals on Intel Bay Trail (BYT) and Cherry Trail (CHT) devices,
* including setting the correct polarity for the GPIO.
*
* This uses the "79234640-9e10-4fea-a5c1-b5aa8b19756f" DSM method directly
* on the sensor device's ACPI node. This is different from later Intel
* hardware which has a separate INT3472 acpi_device with this info.
*
* This function must be called before creating the sw-noded describing
* the fwnode graph endpoint. And sensor drivers used on these devices
* must return -EPROBE_DEFER when there is no endpoint description yet.
* Together this guarantees that the GPIO lookups are in place before
* the sensor driver tries to get GPIOs with gpiod_get().
*
* Note this code uses the same DSM GUID as the int3472_gpio_guid in
* the INT3472 discrete.c code and there is some overlap, but there are
* enough differences that it is difficult to share the code.
*/
static int atomisp_csi2_add_gpio_mappings(struct acpi_device *adev)
{
struct atomisp_csi2_acpi_gpio_parsing_data data = { };
LIST_HEAD(resource_list);
union acpi_object *obj;
unsigned int i, j;
int ret;
obj = acpi_evaluate_dsm_typed(adev->handle, &intel_sensor_module_guid,
0x00, 1, NULL, ACPI_TYPE_STRING);
if (obj) {
acpi_handle_info(adev->handle, "%s: Sensor module id: '%s'\n",
dev_name(&adev->dev), obj->string.pointer);
ACPI_FREE(obj);
}
/*
* First get the GPIO-settings count and then get count GPIO-settings
* values. Note the order of these may differ from the order in which
* the GPIOs are listed on the ACPI resources! So we first store them all
* and then enumerate the ACPI resources and match them up by pin number.
*/
obj = acpi_evaluate_dsm_typed(adev->handle,
&intel_sensor_gpio_info_guid, 0x00, 1,
NULL, ACPI_TYPE_INTEGER);
if (!obj) {
acpi_handle_err(adev->handle, "%s: No _DSM entry for GPIO pin count\n",
dev_name(&adev->dev));
return -EIO;
}
data.settings_count = obj->integer.value;
ACPI_FREE(obj);
if (data.settings_count > CSI2_MAX_ACPI_GPIOS) {
acpi_handle_err(adev->handle, "%s: Too many GPIOs %u > %u\n",
dev_name(&adev->dev), data.settings_count,
CSI2_MAX_ACPI_GPIOS);
return -EOVERFLOW;
}
for (i = 0; i < data.settings_count; i++) {
/*
* i + 2 because the index of this _DSM function is 1-based
* and the first function is just a count.
*/
obj = acpi_evaluate_dsm_typed(adev->handle,
&intel_sensor_gpio_info_guid,
0x00, i + 2,
NULL, ACPI_TYPE_INTEGER);
if (!obj) {
acpi_handle_err(adev->handle, "%s: No _DSM entry for pin %u\n",
dev_name(&adev->dev), i);
return -EIO;
}
data.settings[i] = obj->integer.value;
ACPI_FREE(obj);
}
/* Since we match up by pin-number the pin-numbers must be unique */
for (i = 0; i < data.settings_count; i++) {
for (j = i + 1; j < data.settings_count; j++) {
if (INTEL_GPIO_DSM_PIN(data.settings[i]) !=
INTEL_GPIO_DSM_PIN(data.settings[j]))
continue;
acpi_handle_err(adev->handle, "%s: Duplicate pin number %lu\n",
dev_name(&adev->dev),
INTEL_GPIO_DSM_PIN(data.settings[i]));
return -EIO;
}
}
data.map = kzalloc(sizeof(*data.map), GFP_KERNEL);
if (!data.map)
return -ENOMEM;
/* Now parse the ACPI resources and build the lookup table */
data.adev = adev;
ret = acpi_dev_get_resources(adev, &resource_list,
atomisp_csi2_handle_acpi_gpio_res, &data);
if (ret < 0)
return ret;
acpi_dev_free_resource_list(&resource_list);
if (data.map_count != data.settings_count ||
data.res_count != data.settings_count)
acpi_handle_warn(adev->handle, "%s: ACPI GPIO resources vs DSM GPIO-info count mismatch (dsm: %d res: %d map %d\n",
dev_name(&adev->dev), data.settings_count,
data.res_count, data.map_count);
ret = acpi_dev_add_driver_gpios(adev, data.map->mapping);
if (ret)
acpi_handle_err(adev->handle, "%s: Error adding driver GPIOs: %d\n",
dev_name(&adev->dev), ret);
return ret;
}
static char *atomisp_csi2_get_vcm_type(struct acpi_device *adev)
{
union acpi_object *obj;
char *vcm_type;
obj = acpi_evaluate_dsm_typed(adev->handle, &vcm_dsm_guid, 0, 0,
NULL, ACPI_TYPE_STRING);
if (!obj)
return NULL;
vcm_type = kstrdup(obj->string.pointer, GFP_KERNEL);
ACPI_FREE(obj);
if (!vcm_type)
return NULL;
string_lower(vcm_type, vcm_type);
return vcm_type;
}
static const struct acpi_device_id atomisp_sensor_configs[] = {
ATOMISP_SENSOR_CONFIG("INT33BE", 2, true), /* OV5693 */
{}
};
static int atomisp_csi2_parse_sensor_fwnode(struct acpi_device *adev,
struct ipu_sensor *sensor)
{
const struct acpi_device_id *id;
int ret, clock_num;
bool vcm = false;
int lanes = 1;
id = acpi_match_acpi_device(atomisp_sensor_configs, adev);
if (id) {
struct atomisp_sensor_config *cfg =
(struct atomisp_sensor_config *)id->driver_data;
lanes = cfg->lanes;
vcm = cfg->vcm;
}
/*
* ACPI takes care of turning the PMC clock on and off, but on BYT
* the clock defaults to 25 MHz instead of the expected 19.2 MHz.
* Get the PMC-clock number from ACPI PR0 method and set it to 19.2 MHz.
* The PMC-clock number is also used to determine the default CSI port.
*/
clock_num = atomisp_csi2_get_pmc_clk_nr_from_acpi_pr0(adev);
ret = atomisp_csi2_set_pmc_clk_freq(adev, clock_num);
if (ret)
return ret;
sensor->link = atomisp_csi2_get_port(adev, clock_num);
if (sensor->link >= ATOMISP_CAMERA_NR_PORTS) {
acpi_handle_err(adev->handle, "%s: Invalid port: %u\n",
dev_name(&adev->dev), sensor->link);
return -EINVAL;
}
sensor->lanes = gmin_cfg_get_int(adev, "CsiLanes", lanes);
if (sensor->lanes > IPU_MAX_LANES) {
acpi_handle_err(adev->handle, "%s: Invalid lane-count: %d\n",
dev_name(&adev->dev), sensor->lanes);
return -EINVAL;
}
ret = atomisp_csi2_add_gpio_mappings(adev);
if (ret)
return ret;
sensor->mclkspeed = PMC_CLK_RATE_19_2MHZ;
sensor->rotation = 0;
sensor->orientation = (sensor->link == 1) ?
V4L2_FWNODE_ORIENTATION_BACK : V4L2_FWNODE_ORIENTATION_FRONT;
if (vcm)
sensor->vcm_type = atomisp_csi2_get_vcm_type(adev);
return 0;
}
int atomisp_csi2_bridge_init(struct atomisp_device *isp)
{
struct device *dev = isp->dev;
struct fwnode_handle *fwnode;
/*
* This function is intended to run only once and then leave
* the created nodes attached even after a rmmod, therefore:
* 1. The bridge memory is leaked deliberately on success
* 2. If a secondary fwnode is already set exit early.
*/
fwnode = dev_fwnode(dev);
if (fwnode && fwnode->secondary)
return 0;
return ipu_bridge_init(dev, atomisp_csi2_parse_sensor_fwnode);
}
/******* V4L2 sub-device asynchronous registration callbacks***********/
struct sensor_async_subdev {
struct v4l2_async_connection asd;
int port;
};
#define to_sensor_asd(a) container_of(a, struct sensor_async_subdev, asd)
#define notifier_to_atomisp(n) container_of(n, struct atomisp_device, notifier)
/* .bound() notifier callback when a match is found */
static int atomisp_notifier_bound(struct v4l2_async_notifier *notifier,
struct v4l2_subdev *sd,
struct v4l2_async_connection *asd)
{
struct atomisp_device *isp = notifier_to_atomisp(notifier);
struct sensor_async_subdev *s_asd = to_sensor_asd(asd);
int ret;
if (s_asd->port >= ATOMISP_CAMERA_NR_PORTS) {
dev_err(isp->dev, "port %d not supported\n", s_asd->port);
return -EINVAL;
}
if (isp->sensor_subdevs[s_asd->port]) {
dev_err(isp->dev, "port %d already has a sensor attached\n", s_asd->port);
return -EBUSY;
}
ret = ipu_bridge_instantiate_vcm(sd->dev);
if (ret)
return ret;
isp->sensor_subdevs[s_asd->port] = sd;
return 0;
}
/* The .unbind callback */
static void atomisp_notifier_unbind(struct v4l2_async_notifier *notifier,
struct v4l2_subdev *sd,
struct v4l2_async_connection *asd)
{
struct atomisp_device *isp = notifier_to_atomisp(notifier);
struct sensor_async_subdev *s_asd = to_sensor_asd(asd);
isp->sensor_subdevs[s_asd->port] = NULL;
}
/* .complete() is called after all subdevices have been located */
static int atomisp_notifier_complete(struct v4l2_async_notifier *notifier)
{
struct atomisp_device *isp = notifier_to_atomisp(notifier);
return atomisp_register_device_nodes(isp);
}
static const struct v4l2_async_notifier_operations atomisp_async_ops = {
.bound = atomisp_notifier_bound,
.unbind = atomisp_notifier_unbind,
.complete = atomisp_notifier_complete,
};
int atomisp_csi2_bridge_parse_firmware(struct atomisp_device *isp)
{
int i, mipi_port, ret;
v4l2_async_nf_init(&isp->notifier, &isp->v4l2_dev);
isp->notifier.ops = &atomisp_async_ops;
for (i = 0; i < ATOMISP_CAMERA_NR_PORTS; i++) {
struct v4l2_fwnode_endpoint vep = {
.bus_type = V4L2_MBUS_CSI2_DPHY,
};
struct sensor_async_subdev *s_asd;
struct fwnode_handle *ep;
ep = fwnode_graph_get_endpoint_by_id(dev_fwnode(isp->dev), i, 0,
FWNODE_GRAPH_ENDPOINT_NEXT);
if (!ep)
continue;
ret = v4l2_fwnode_endpoint_parse(ep, &vep);
if (ret)
goto err_parse;
if (vep.base.port >= ATOMISP_CAMERA_NR_PORTS) {
dev_err(isp->dev, "port %d not supported\n", vep.base.port);
ret = -EINVAL;
goto err_parse;
}
mipi_port = atomisp_port_to_mipi_port(isp, vep.base.port);
isp->sensor_lanes[mipi_port] = vep.bus.mipi_csi2.num_data_lanes;
s_asd = v4l2_async_nf_add_fwnode_remote(&isp->notifier, ep,
struct sensor_async_subdev);
if (IS_ERR(s_asd)) {
ret = PTR_ERR(s_asd);
goto err_parse;
}
s_asd->port = vep.base.port;
fwnode_handle_put(ep);
continue;
err_parse:
fwnode_handle_put(ep);
return ret;
}
return 0;
}
| linux-master | drivers/staging/media/atomisp/pci/atomisp_csi2_bridge.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Support for Intel Camera Imaging ISP subsystem.
* Copyright (c) 2015, Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*/
#include "ia_css_mmu.h"
#include "ia_css_mmu_private.h"
#include <ia_css_debug.h>
#include "sh_css_sp.h"
#include "sh_css_firmware.h"
#include "sp.h"
#include "mmu_device.h"
void
ia_css_mmu_invalidate_cache(void)
{
const struct ia_css_fw_info *fw = &sh_css_sp_fw;
unsigned int HIVE_ADDR_ia_css_dmaproxy_sp_invalidate_tlb;
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
"ia_css_mmu_invalidate_cache() enter\n");
/* if the SP is not running we should not access its dmem */
if (sh_css_sp_is_running()) {
HIVE_ADDR_ia_css_dmaproxy_sp_invalidate_tlb = fw->info.sp.invalidate_tlb;
(void)HIVE_ADDR_ia_css_dmaproxy_sp_invalidate_tlb; /* Suppres warnings in CRUN */
sp_dmem_store_uint32(SP0_ID,
(unsigned int)sp_address_of(ia_css_dmaproxy_sp_invalidate_tlb),
true);
}
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
"ia_css_mmu_invalidate_cache() leave\n");
}
void
sh_css_mmu_set_page_table_base_index(hrt_data base_index)
{
int i;
IA_CSS_ENTER_PRIVATE("base_index=0x%08x\n", base_index);
for (i = 0; i < N_MMU_ID; i++) {
mmu_ID_t mmu_id = i;
mmu_set_page_table_base_index(mmu_id, base_index);
mmu_invalidate_cache(mmu_id);
}
IA_CSS_LEAVE_PRIVATE("");
}
| linux-master | drivers/staging/media/atomisp/pci/sh_css_mmu.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Support for Intel Camera Imaging ISP subsystem.
* Copyright (c) 2015, Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*/
/* Generated code: do not edit or commmit. */
#define IA_CSS_INCLUDE_CONFIGURATIONS
#include "ia_css_pipeline.h"
#include "ia_css_isp_configs.h"
#include "ia_css_debug.h"
#include "assert_support.h"
int ia_css_configure_iterator(const struct ia_css_binary *binary,
const struct ia_css_iterator_configuration *config_dmem)
{
unsigned int offset = 0;
unsigned int size = 0;
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "%s:\n", __func__);
if (!binary->info->mem_offsets.offsets.config)
return 0;
size = binary->info->mem_offsets.offsets.config->dmem.iterator.size;
if (!size)
return 0;
offset = binary->info->mem_offsets.offsets.config->dmem.iterator.offset;
ia_css_iterator_config((struct sh_css_isp_iterator_isp_config *)
&binary->mem_params.params[IA_CSS_PARAM_CLASS_CONFIG][IA_CSS_ISP_DMEM].address[offset],
config_dmem, size);
return 0;
}
int ia_css_configure_copy_output(const struct ia_css_binary *binary,
const struct ia_css_copy_output_configuration *config_dmem)
{
unsigned int offset = 0;
unsigned int size = 0;
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "%s:\n", __func__);
if (!binary->info->mem_offsets.offsets.config)
return 0;
size = binary->info->mem_offsets.offsets.config->dmem.copy_output.size;
if (!size)
return 0;
offset = binary->info->mem_offsets.offsets.config->dmem.copy_output.offset;
ia_css_copy_output_config((struct sh_css_isp_copy_output_isp_config *)
&binary->mem_params.params[IA_CSS_PARAM_CLASS_CONFIG][IA_CSS_ISP_DMEM].address[offset],
config_dmem, size);
return 0;
}
/* Code generated by genparam/genconfig.c:gen_configure_function() */
int ia_css_configure_crop(const struct ia_css_binary *binary,
const struct ia_css_crop_configuration *config_dmem)
{
unsigned int offset = 0;
unsigned int size = 0;
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "%s:\n", __func__);
if (!binary->info->mem_offsets.offsets.config)
return 0;
size = binary->info->mem_offsets.offsets.config->dmem.crop.size;
if (!size)
return 0;
offset = binary->info->mem_offsets.offsets.config->dmem.crop.offset;
ia_css_crop_config((struct sh_css_isp_crop_isp_config *)
&binary->mem_params.params[IA_CSS_PARAM_CLASS_CONFIG][IA_CSS_ISP_DMEM].address[offset],
config_dmem, size);
return 0;
}
int ia_css_configure_fpn(const struct ia_css_binary *binary,
const struct ia_css_fpn_configuration *config_dmem)
{
unsigned int offset = 0;
unsigned int size = 0;
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "%s:\n", __func__);
if (!binary->info->mem_offsets.offsets.config)
return 0;
size = binary->info->mem_offsets.offsets.config->dmem.fpn.size;
if (!size)
return 0;
offset = binary->info->mem_offsets.offsets.config->dmem.fpn.offset;
ia_css_fpn_config((struct sh_css_isp_fpn_isp_config *)
&binary->mem_params.params[IA_CSS_PARAM_CLASS_CONFIG][IA_CSS_ISP_DMEM].address[offset],
config_dmem, size);
return 0;
}
int ia_css_configure_dvs(const struct ia_css_binary *binary,
const struct ia_css_dvs_configuration *config_dmem)
{
unsigned int offset = 0;
unsigned int size = 0;
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "%s:\n", __func__);
if (!binary->info->mem_offsets.offsets.config)
return 0;
size = binary->info->mem_offsets.offsets.config->dmem.dvs.size;
if (!size)
return 0;
offset = binary->info->mem_offsets.offsets.config->dmem.dvs.offset;
ia_css_dvs_config((struct sh_css_isp_dvs_isp_config *)
&binary->mem_params.params[IA_CSS_PARAM_CLASS_CONFIG][IA_CSS_ISP_DMEM].address[offset],
config_dmem, size);
return 0;
}
int ia_css_configure_qplane(const struct ia_css_binary *binary,
const struct ia_css_qplane_configuration *config_dmem)
{
unsigned int offset = 0;
unsigned int size = 0;
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "%s:\n", __func__);
if (!binary->info->mem_offsets.offsets.config)
return 0;
size = binary->info->mem_offsets.offsets.config->dmem.qplane.size;
if (!size)
return 0;
offset = binary->info->mem_offsets.offsets.config->dmem.qplane.offset;
ia_css_qplane_config((struct sh_css_isp_qplane_isp_config *)
&binary->mem_params.params[IA_CSS_PARAM_CLASS_CONFIG][IA_CSS_ISP_DMEM].address[offset],
config_dmem, size);
return 0;
}
int ia_css_configure_output0(const struct ia_css_binary *binary,
const struct ia_css_output0_configuration *config_dmem)
{
unsigned int offset = 0;
unsigned int size = 0;
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "%s:\n", __func__);
if (!binary->info->mem_offsets.offsets.config)
return 0;
size = binary->info->mem_offsets.offsets.config->dmem.output0.size;
if (!size)
return 0;
offset = binary->info->mem_offsets.offsets.config->dmem.output0.offset;
ia_css_output0_config((struct sh_css_isp_output_isp_config *)
&binary->mem_params.params[IA_CSS_PARAM_CLASS_CONFIG][IA_CSS_ISP_DMEM].address[offset],
config_dmem, size);
return 0;
}
int ia_css_configure_output1(const struct ia_css_binary *binary,
const struct ia_css_output1_configuration *config_dmem)
{
unsigned int offset = 0;
unsigned int size = 0;
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "%s:\n", __func__);
if (!binary->info->mem_offsets.offsets.config)
return 0;
size = binary->info->mem_offsets.offsets.config->dmem.output1.size;
if (!size)
return 0;
offset = binary->info->mem_offsets.offsets.config->dmem.output1.offset;
ia_css_output1_config((struct sh_css_isp_output_isp_config *)
&binary->mem_params.params[IA_CSS_PARAM_CLASS_CONFIG][IA_CSS_ISP_DMEM].address[offset],
config_dmem, size);
return 0;
}
int ia_css_configure_output(const struct ia_css_binary *binary,
const struct ia_css_output_configuration *config_dmem)
{
unsigned int offset = 0;
unsigned int size = 0;
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "%s:\n", __func__);
if (!binary->info->mem_offsets.offsets.config)
return 0;
size = binary->info->mem_offsets.offsets.config->dmem.output.size;
if (!size)
return 0;
offset = binary->info->mem_offsets.offsets.config->dmem.output.offset;
ia_css_output_config((struct sh_css_isp_output_isp_config *)
&binary->mem_params.params[IA_CSS_PARAM_CLASS_CONFIG][IA_CSS_ISP_DMEM].address[offset],
config_dmem, size);
return 0;
}
int ia_css_configure_raw(const struct ia_css_binary *binary,
const struct ia_css_raw_configuration *config_dmem)
{
unsigned int offset = 0;
unsigned int size = 0;
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "%s:\n", __func__);
if (!binary->info->mem_offsets.offsets.config)
return 0;
size = binary->info->mem_offsets.offsets.config->dmem.raw.size;
if (!size)
return 0;
offset = binary->info->mem_offsets.offsets.config->dmem.raw.offset;
ia_css_raw_config((struct sh_css_isp_raw_isp_config *)
&binary->mem_params.params[IA_CSS_PARAM_CLASS_CONFIG][IA_CSS_ISP_DMEM].address[offset],
config_dmem, size);
return 0;
}
int ia_css_configure_tnr(const struct ia_css_binary *binary,
const struct ia_css_tnr_configuration *config_dmem)
{
unsigned int offset = 0;
unsigned int size = 0;
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "%s:\n", __func__);
if (!binary->info->mem_offsets.offsets.config)
return 0;
size = binary->info->mem_offsets.offsets.config->dmem.tnr.size;
if (!size)
return 0;
offset = binary->info->mem_offsets.offsets.config->dmem.tnr.offset;
ia_css_tnr_config((struct sh_css_isp_tnr_isp_config *)
&binary->mem_params.params[IA_CSS_PARAM_CLASS_CONFIG][IA_CSS_ISP_DMEM].address[offset],
config_dmem, size);
return 0;
}
int ia_css_configure_ref(const struct ia_css_binary *binary,
const struct ia_css_ref_configuration *config_dmem)
{
unsigned int offset = 0;
unsigned int size = 0;
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "%s:\n", __func__);
if (!binary->info->mem_offsets.offsets.config)
return 0;
size = binary->info->mem_offsets.offsets.config->dmem.ref.size;
if (!size)
return 0;
offset = binary->info->mem_offsets.offsets.config->dmem.ref.offset;
ia_css_ref_config((struct sh_css_isp_ref_isp_config *)
&binary->mem_params.params[IA_CSS_PARAM_CLASS_CONFIG][IA_CSS_ISP_DMEM].address[offset],
config_dmem, size);
return 0;
}
int ia_css_configure_vf(const struct ia_css_binary *binary,
const struct ia_css_vf_configuration *config_dmem)
{
unsigned int offset = 0;
unsigned int size = 0;
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "%s:\n", __func__);
if (!binary->info->mem_offsets.offsets.config)
return 0;
size = binary->info->mem_offsets.offsets.config->dmem.vf.size;
if (!size)
return 0;
offset = binary->info->mem_offsets.offsets.config->dmem.vf.offset;
ia_css_vf_config((struct sh_css_isp_vf_isp_config *)
&binary->mem_params.params[IA_CSS_PARAM_CLASS_CONFIG][IA_CSS_ISP_DMEM].address[offset],
config_dmem, size);
return 0;
}
| linux-master | drivers/staging/media/atomisp/pci/ia_css_isp_configs.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Support for Intel Camera Imaging ISP subsystem.
* Copyright (c) 2015, Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*/
#include "platform_support.h"
#include "sh_css_hrt.h"
#include "ia_css_debug.h"
#include "device_access.h"
#define __INLINE_EVENT__
#include "event_fifo.h"
#define __INLINE_SP__
#include "sp.h"
#define __INLINE_ISP__
#include "isp.h"
#define __INLINE_IRQ__
#include "irq.h"
#define __INLINE_FIFO_MONITOR__
#include "fifo_monitor.h"
/* System independent */
#include "sh_css_internal.h"
bool sh_css_hrt_system_is_idle(void)
{
bool not_idle = false, idle;
fifo_channel_t ch;
idle = sp_ctrl_getbit(SP0_ID, SP_SC_REG, SP_IDLE_BIT);
not_idle |= !idle;
if (!idle)
IA_CSS_WARNING("SP not idle");
idle = isp_ctrl_getbit(ISP0_ID, ISP_SC_REG, ISP_IDLE_BIT);
not_idle |= !idle;
if (!idle)
IA_CSS_WARNING("ISP not idle");
for (ch = 0; ch < N_FIFO_CHANNEL; ch++) {
fifo_channel_state_t state;
fifo_channel_get_state(FIFO_MONITOR0_ID, ch, &state);
if (state.fifo_valid) {
IA_CSS_WARNING("FIFO channel %d is not empty", ch);
not_idle = true;
}
}
return !not_idle;
}
int sh_css_hrt_sp_wait(void)
{
irq_sw_channel_id_t irq_id = IRQ_SW_CHANNEL0_ID;
/*
* Wait till SP is idle or till there is a SW2 interrupt
* The SW2 interrupt will be used when frameloop runs on SP
* and signals an event with similar meaning as SP idle
* (e.g. frame_done)
*/
while (!sp_ctrl_getbit(SP0_ID, SP_SC_REG, SP_IDLE_BIT) &&
((irq_reg_load(IRQ0_ID,
_HRT_IRQ_CONTROLLER_STATUS_REG_IDX) &
(1U << (irq_id + IRQ_SW_CHANNEL_OFFSET))) == 0)) {
udelay(1);
}
return 0;
}
| linux-master | drivers/staging/media/atomisp/pci/sh_css_hrt.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Support for Intel Camera Imaging ISP subsystem.
* Copyright (c) 2015, Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*/
#include "gdc_device.h" /* gdc_lut_store(), ... */
#include "isp.h" /* ISP_VEC_ELEMBITS */
#include "vamem.h"
#ifndef __INLINE_HMEM__
#define __INLINE_HMEM__
#endif
#include "hmem.h"
#define IA_CSS_INCLUDE_PARAMETERS
#define IA_CSS_INCLUDE_ACC_PARAMETERS
#include "hmm.h"
#include "sh_css_params.h"
#include "ia_css_queue.h"
#include "sw_event_global.h" /* Event IDs */
#include "platform_support.h"
#include "assert_support.h"
#include "misc_support.h" /* NOT_USED */
#include "math_support.h" /* max(), min() EVEN_FLOOR()*/
#include "ia_css_stream.h"
#include "sh_css_params_internal.h"
#include "sh_css_param_shading.h"
#include "sh_css_param_dvs.h"
#include "ia_css_refcount.h"
#include "sh_css_internal.h"
#include "ia_css_control.h"
#include "ia_css_shading.h"
#include "sh_css_defs.h"
#include "sh_css_sp.h"
#include "ia_css_pipeline.h"
#include "ia_css_debug.h"
#include "ia_css_isp_param.h"
#include "ia_css_isp_params.h"
#include "ia_css_mipi.h"
#include "ia_css_morph.h"
#include "ia_css_host_data.h"
#include "ia_css_pipe.h"
#include "ia_css_pipe_binarydesc.h"
/* Include all kernel host interfaces for ISP1 */
#include "anr/anr_1.0/ia_css_anr.host.h"
#include "cnr/cnr_1.0/ia_css_cnr.host.h"
#include "csc/csc_1.0/ia_css_csc.host.h"
#include "de/de_1.0/ia_css_de.host.h"
#include "dp/dp_1.0/ia_css_dp.host.h"
#include "bnr/bnr_1.0/ia_css_bnr.host.h"
#include "dvs/dvs_1.0/ia_css_dvs.host.h"
#include "fpn/fpn_1.0/ia_css_fpn.host.h"
#include "gc/gc_1.0/ia_css_gc.host.h"
#include "macc/macc_1.0/ia_css_macc.host.h"
#include "ctc/ctc_1.0/ia_css_ctc.host.h"
#include "ob/ob_1.0/ia_css_ob.host.h"
#include "raw/raw_1.0/ia_css_raw.host.h"
#include "fixedbds/fixedbds_1.0/ia_css_fixedbds_param.h"
#include "s3a/s3a_1.0/ia_css_s3a.host.h"
#include "sc/sc_1.0/ia_css_sc.host.h"
#include "sdis/sdis_1.0/ia_css_sdis.host.h"
#include "tnr/tnr_1.0/ia_css_tnr.host.h"
#include "uds/uds_1.0/ia_css_uds_param.h"
#include "wb/wb_1.0/ia_css_wb.host.h"
#include "ynr/ynr_1.0/ia_css_ynr.host.h"
#include "xnr/xnr_1.0/ia_css_xnr.host.h"
/* Include additional kernel host interfaces for ISP2 */
#include "aa/aa_2/ia_css_aa2.host.h"
#include "anr/anr_2/ia_css_anr2.host.h"
#include "bh/bh_2/ia_css_bh.host.h"
#include "cnr/cnr_2/ia_css_cnr2.host.h"
#include "ctc/ctc1_5/ia_css_ctc1_5.host.h"
#include "de/de_2/ia_css_de2.host.h"
#include "gc/gc_2/ia_css_gc2.host.h"
#include "sdis/sdis_2/ia_css_sdis2.host.h"
#include "ynr/ynr_2/ia_css_ynr2.host.h"
#include "fc/fc_1.0/ia_css_formats.host.h"
#include "xnr/xnr_3.0/ia_css_xnr3.host.h"
#include "sh_css_frac.h"
#include "ia_css_bufq.h"
static size_t fpntbl_bytes(const struct ia_css_binary *binary)
{
return array3_size(sizeof(char),
binary->in_frame_info.res.height,
binary->in_frame_info.padded_width);
}
static size_t sctbl_bytes(const struct ia_css_binary *binary)
{
return size_mul(sizeof(unsigned short),
array3_size(binary->sctbl_height,
binary->sctbl_aligned_width_per_color,
IA_CSS_SC_NUM_COLORS));
}
static size_t morph_plane_bytes(const struct ia_css_binary *binary)
{
return array3_size(SH_CSS_MORPH_TABLE_ELEM_BYTES,
binary->morph_tbl_aligned_width,
binary->morph_tbl_height);
}
/* We keep a second copy of the ptr struct for the SP to access.
Again, this would not be necessary on the chip. */
static ia_css_ptr sp_ddr_ptrs;
/* sp group address on DDR */
static ia_css_ptr xmem_sp_group_ptrs;
static ia_css_ptr xmem_sp_stage_ptrs[IA_CSS_PIPE_ID_NUM]
[SH_CSS_MAX_STAGES];
static ia_css_ptr xmem_isp_stage_ptrs[IA_CSS_PIPE_ID_NUM]
[SH_CSS_MAX_STAGES];
static ia_css_ptr default_gdc_lut;
static int interleaved_lut_temp[4][HRT_GDC_N];
/* END DO NOT MOVE INTO VIMALS_WORLD */
/* Digital Zoom lookup table. See documentation for more details about the
* contents of this table.
*/
static const int zoom_table[4][HRT_GDC_N] = {
{
0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4,
0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4,
0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4,
0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4,
0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4,
0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4,
0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4,
0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4,
0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4,
0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4,
0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4,
0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4,
-1 << 4, -1 << 4, -1 << 4, -1 << 4, -1 << 4, -1 << 4, -1 << 4, -1 << 4,
-1 << 4, -1 << 4, -1 << 4, -1 << 4, -1 << 4, -1 << 4, -1 << 4, -1 << 4,
-1 << 4, -1 << 4, -1 << 4, -1 << 4, -1 << 4, -1 << 4, -1 << 4, -1 << 4,
-1 << 4, -1 << 4, -1 << 4, -1 << 4, -1 << 4, -1 << 4, -1 << 4, -1 << 4,
-1 << 4, -1 << 4, -1 << 4, -1 << 4, -1 << 4, -1 << 4, -1 << 4, -1 << 4,
-1 << 4, -1 << 4, -1 << 4, -1 << 4, -1 << 4, -1 << 4, -1 << 4, -1 << 4,
-2 << 4, -2 << 4, -2 << 4, -2 << 4, -2 << 4, -2 << 4, -2 << 4, -2 << 4,
-2 << 4, -2 << 4, -2 << 4, -2 << 4, -2 << 4, -2 << 4, -2 << 4, -2 << 4,
-2 << 4, -2 << 4, -2 << 4, -2 << 4, -2 << 4, -2 << 4, -2 << 4, -2 << 4,
-2 << 4, -2 << 4, -2 << 4, -2 << 4, -2 << 4, -2 << 4, -2 << 4, -2 << 4,
-3 << 4, -3 << 4, -3 << 4, -3 << 4, -3 << 4, -3 << 4, -3 << 4, -3 << 4,
-3 << 4, -3 << 4, -3 << 4, -3 << 4, -3 << 4, -3 << 4, -3 << 4, -3 << 4,
-3 << 4, -3 << 4, -3 << 4, -3 << 4, -3 << 4, -3 << 4, -3 << 4, -3 << 4,
-3 << 4, -3 << 4, -3 << 4, -3 << 4, -3 << 4, -3 << 4, -3 << 4, -3 << 4,
-4 << 4, -4 << 4, -4 << 4, -4 << 4, -4 << 4, -4 << 4, -4 << 4, -4 << 4,
-4 << 4, -4 << 4, -4 << 4, -4 << 4, -4 << 4, -4 << 4, -4 << 4, -4 << 4,
-4 << 4, -4 << 4, -4 << 4, -4 << 4, -4 << 4, -4 << 4, -4 << 4, -4 << 4,
-4 << 4, -4 << 4, -4 << 4, -4 << 4, -4 << 4, -4 << 4, -4 << 4, -4 << 4,
-5 << 4, -5 << 4, -5 << 4, -5 << 4, -5 << 4, -5 << 4, -5 << 4, -5 << 4,
-5 << 4, -5 << 4, -5 << 4, -5 << 4, -5 << 4, -5 << 4, -5 << 4, -5 << 4,
-6 << 4, -6 << 4, -6 << 4, -6 << 4, -6 << 4, -6 << 4, -6 << 4, -6 << 4,
-6 << 4, -6 << 4, -6 << 4, -6 << 4, -6 << 4, -6 << 4, -6 << 4, -6 << 4,
-6 << 4, -6 << 4, -6 << 4, -6 << 4, -6 << 4, -6 << 4, -6 << 4, -6 << 4,
-6 << 4, -6 << 4, -6 << 4, -6 << 4, -6 << 4, -6 << 4, -6 << 4, -6 << 4,
-7 << 4, -7 << 4, -7 << 4, -7 << 4, -7 << 4, -7 << 4, -7 << 4, -7 << 4,
-7 << 4, -7 << 4, -7 << 4, -7 << 4, -7 << 4, -7 << 4, -7 << 4, -7 << 4,
-7 << 4, -7 << 4, -7 << 4, -7 << 4, -7 << 4, -7 << 4, -7 << 4, -7 << 4,
-7 << 4, -7 << 4, -7 << 4, -7 << 4, -7 << 4, -7 << 4, -7 << 4, -7 << 4,
-8 << 4, -8 << 4, -8 << 4, -8 << 4, -8 << 4, -8 << 4, -8 << 4, -8 << 4,
-8 << 4, -8 << 4, -8 << 4, -8 << 4, -8 << 4, -8 << 4, -8 << 4, -8 << 4,
-9 << 4, -9 << 4, -9 << 4, -9 << 4, -9 << 4, -9 << 4, -9 << 4, -9 << 4,
-9 << 4, -9 << 4, -9 << 4, -9 << 4, -9 << 4, -9 << 4, -9 << 4, -9 << 4,
-9 << 4, -9 << 4, -9 << 4, -9 << 4, -9 << 4, -9 << 4, -9 << 4, -9 << 4,
-9 << 4, -9 << 4, -9 << 4, -9 << 4, -9 << 4, -9 << 4, -9 << 4, -9 << 4,
-10 << 4, -10 << 4, -10 << 4, -10 << 4, -10 << 4, -10 << 4, -10 << 4, -10 << 4,
-10 << 4, -10 << 4, -10 << 4, -10 << 4, -10 << 4, -10 << 4, -10 << 4, -10 << 4,
-11 << 4, -11 << 4, -11 << 4, -11 << 4, -11 << 4, -11 << 4, -11 << 4, -11 << 4,
-11 << 4, -11 << 4, -11 << 4, -11 << 4, -11 << 4, -11 << 4, -11 << 4, -11 << 4,
-11 << 4, -11 << 4, -11 << 4, -11 << 4, -11 << 4, -11 << 4, -11 << 4, -11 << 4,
-11 << 4, -11 << 4, -11 << 4, -11 << 4, -11 << 4, -11 << 4, -11 << 4, -11 << 4,
-12 << 4, -12 << 4, -12 << 4, -12 << 4, -12 << 4, -12 << 4, -12 << 4, -12 << 4,
-12 << 4, -12 << 4, -12 << 4, -12 << 4, -12 << 4, -12 << 4, -12 << 4, -12 << 4,
-13 << 4, -13 << 4, -13 << 4, -13 << 4, -13 << 4, -13 << 4, -13 << 4, -13 << 4,
-13 << 4, -13 << 4, -13 << 4, -13 << 4, -13 << 4, -13 << 4, -13 << 4, -13 << 4,
-13 << 4, -13 << 4, -13 << 4, -13 << 4, -13 << 4, -13 << 4, -13 << 4, -13 << 4,
-13 << 4, -13 << 4, -13 << 4, -13 << 4, -13 << 4, -13 << 4, -13 << 4, -13 << 4,
-14 << 4, -14 << 4, -14 << 4, -14 << 4, -14 << 4, -14 << 4, -14 << 4, -14 << 4,
-14 << 4, -14 << 4, -14 << 4, -14 << 4, -14 << 4, -14 << 4, -14 << 4, -14 << 4,
-14 << 4, -14 << 4, -14 << 4, -14 << 4, -14 << 4, -14 << 4, -14 << 4, -14 << 4,
-14 << 4, -14 << 4, -14 << 4, -14 << 4, -14 << 4, -14 << 4, -14 << 4, -14 << 4,
-15 << 4, -15 << 4, -15 << 4, -15 << 4, -15 << 4, -15 << 4, -15 << 4, -15 << 4,
-15 << 4, -15 << 4, -15 << 4, -15 << 4, -15 << 4, -15 << 4, -15 << 4, -15 << 4,
-16 << 4, -16 << 4, -16 << 4, -16 << 4, -16 << 4, -16 << 4, -16 << 4, -16 << 4,
-16 << 4, -16 << 4, -16 << 4, -16 << 4, -16 << 4, -16 << 4, -16 << 4, -16 << 4,
-16 << 4, -16 << 4, -16 << 4, -16 << 4, -16 << 4, -16 << 4, -16 << 4, -16 << 4,
-16 << 4, -16 << 4, -16 << 4, -16 << 4, -16 << 4, -16 << 4, -16 << 4, -16 << 4,
-16 << 4, -16 << 4, -16 << 4, -16 << 4, -16 << 4, -16 << 4, -16 << 4, -16 << 4,
-16 << 4, -16 << 4, -16 << 4, -16 << 4, -16 << 4, -16 << 4, -16 << 4, -16 << 4,
-17 << 4, -17 << 4, -17 << 4, -17 << 4, -17 << 4, -17 << 4, -17 << 4, -17 << 4,
-17 << 4, -17 << 4, -17 << 4, -17 << 4, -17 << 4, -17 << 4, -17 << 4, -17 << 4,
-17 << 4, -17 << 4, -17 << 4, -17 << 4, -17 << 4, -17 << 4, -17 << 4, -17 << 4,
-17 << 4, -17 << 4, -17 << 4, -17 << 4, -17 << 4, -17 << 4, -17 << 4, -17 << 4,
-18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4,
-18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4,
-18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4,
-18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4,
-18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4,
-18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4,
-18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4,
-18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4,
-18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4,
-18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4,
-18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4,
-18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4,
-18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4,
-18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4,
-18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4,
-18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4,
-18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4,
-18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4,
-18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4,
-18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4,
-18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4,
-18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4,
-18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4,
-18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4,
-17 << 4, -17 << 4, -17 << 4, -17 << 4, -17 << 4, -17 << 4, -17 << 4, -17 << 4,
-17 << 4, -17 << 4, -17 << 4, -17 << 4, -17 << 4, -17 << 4, -17 << 4, -17 << 4,
-17 << 4, -17 << 4, -17 << 4, -17 << 4, -17 << 4, -17 << 4, -17 << 4, -17 << 4,
-17 << 4, -17 << 4, -17 << 4, -17 << 4, -17 << 4, -17 << 4, -17 << 4, -17 << 4,
-16 << 4, -16 << 4, -16 << 4, -16 << 4, -16 << 4, -16 << 4, -16 << 4, -16 << 4,
-16 << 4, -16 << 4, -16 << 4, -16 << 4, -16 << 4, -16 << 4, -16 << 4, -16 << 4,
-15 << 4, -15 << 4, -15 << 4, -15 << 4, -15 << 4, -15 << 4, -15 << 4, -15 << 4,
-15 << 4, -15 << 4, -15 << 4, -15 << 4, -15 << 4, -15 << 4, -15 << 4, -15 << 4,
-15 << 4, -15 << 4, -15 << 4, -15 << 4, -15 << 4, -15 << 4, -15 << 4, -15 << 4,
-15 << 4, -15 << 4, -15 << 4, -15 << 4, -15 << 4, -15 << 4, -15 << 4, -15 << 4,
-14 << 4, -14 << 4, -14 << 4, -14 << 4, -14 << 4, -14 << 4, -14 << 4, -14 << 4,
-14 << 4, -14 << 4, -14 << 4, -14 << 4, -14 << 4, -14 << 4, -14 << 4, -14 << 4,
-13 << 4, -13 << 4, -13 << 4, -13 << 4, -13 << 4, -13 << 4, -13 << 4, -13 << 4,
-13 << 4, -13 << 4, -13 << 4, -13 << 4, -13 << 4, -13 << 4, -13 << 4, -13 << 4,
-12 << 4, -12 << 4, -12 << 4, -12 << 4, -12 << 4, -12 << 4, -12 << 4, -12 << 4,
-12 << 4, -12 << 4, -12 << 4, -12 << 4, -12 << 4, -12 << 4, -12 << 4, -12 << 4,
-11 << 4, -11 << 4, -11 << 4, -11 << 4, -11 << 4, -11 << 4, -11 << 4, -11 << 4,
-11 << 4, -11 << 4, -11 << 4, -11 << 4, -11 << 4, -11 << 4, -11 << 4, -11 << 4,
-9 << 4, -9 << 4, -9 << 4, -9 << 4, -9 << 4, -9 << 4, -9 << 4, -9 << 4,
-9 << 4, -9 << 4, -9 << 4, -9 << 4, -9 << 4, -9 << 4, -9 << 4, -9 << 4,
-8 << 4, -8 << 4, -8 << 4, -8 << 4, -8 << 4, -8 << 4, -8 << 4, -8 << 4,
-8 << 4, -8 << 4, -8 << 4, -8 << 4, -8 << 4, -8 << 4, -8 << 4, -8 << 4,
-7 << 4, -7 << 4, -7 << 4, -7 << 4, -7 << 4, -7 << 4, -7 << 4, -7 << 4,
-7 << 4, -7 << 4, -7 << 4, -7 << 4, -7 << 4, -7 << 4, -7 << 4, -7 << 4,
-5 << 4, -5 << 4, -5 << 4, -5 << 4, -5 << 4, -5 << 4, -5 << 4, -5 << 4,
-5 << 4, -5 << 4, -5 << 4, -5 << 4, -5 << 4, -5 << 4, -5 << 4, -5 << 4,
-3 << 4, -3 << 4, -3 << 4, -3 << 4, -3 << 4, -3 << 4, -3 << 4, -3 << 4,
-3 << 4, -3 << 4, -3 << 4, -3 << 4, -3 << 4, -3 << 4, -3 << 4, -3 << 4,
-1 << 4, -1 << 4, -1 << 4, -1 << 4, -1 << 4, -1 << 4, -1 << 4, -1 << 4,
-1 << 4, -1 << 4, -1 << 4, -1 << 4, -1 << 4, -1 << 4, -1 << 4, -1 << 4
},
{
0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4,
0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4,
2 << 4, 2 << 4, 2 << 4, 2 << 4, 2 << 4, 2 << 4, 2 << 4, 2 << 4,
2 << 4, 2 << 4, 2 << 4, 2 << 4, 2 << 4, 2 << 4, 2 << 4, 2 << 4,
4 << 4, 4 << 4, 4 << 4, 4 << 4, 4 << 4, 4 << 4, 4 << 4, 4 << 4,
4 << 4, 4 << 4, 4 << 4, 4 << 4, 4 << 4, 4 << 4, 4 << 4, 4 << 4,
7 << 4, 7 << 4, 7 << 4, 7 << 4, 7 << 4, 7 << 4, 7 << 4, 7 << 4,
7 << 4, 7 << 4, 7 << 4, 7 << 4, 7 << 4, 7 << 4, 7 << 4, 7 << 4,
9 << 4, 9 << 4, 9 << 4, 9 << 4, 9 << 4, 9 << 4, 9 << 4, 9 << 4,
9 << 4, 9 << 4, 9 << 4, 9 << 4, 9 << 4, 9 << 4, 9 << 4, 9 << 4,
12 << 4, 12 << 4, 12 << 4, 12 << 4, 12 << 4, 12 << 4, 12 << 4, 12 << 4,
12 << 4, 12 << 4, 12 << 4, 12 << 4, 12 << 4, 12 << 4, 12 << 4, 12 << 4,
16 << 4, 16 << 4, 16 << 4, 16 << 4, 16 << 4, 16 << 4, 16 << 4, 16 << 4,
16 << 4, 16 << 4, 16 << 4, 16 << 4, 16 << 4, 16 << 4, 16 << 4, 16 << 4,
19 << 4, 19 << 4, 19 << 4, 19 << 4, 19 << 4, 19 << 4, 19 << 4, 19 << 4,
19 << 4, 19 << 4, 19 << 4, 19 << 4, 19 << 4, 19 << 4, 19 << 4, 19 << 4,
23 << 4, 23 << 4, 23 << 4, 23 << 4, 23 << 4, 23 << 4, 23 << 4, 23 << 4,
23 << 4, 23 << 4, 23 << 4, 23 << 4, 23 << 4, 23 << 4, 23 << 4, 23 << 4,
27 << 4, 27 << 4, 27 << 4, 27 << 4, 27 << 4, 27 << 4, 27 << 4, 27 << 4,
27 << 4, 27 << 4, 27 << 4, 27 << 4, 27 << 4, 27 << 4, 27 << 4, 27 << 4,
31 << 4, 31 << 4, 31 << 4, 31 << 4, 31 << 4, 31 << 4, 31 << 4, 31 << 4,
31 << 4, 31 << 4, 31 << 4, 31 << 4, 31 << 4, 31 << 4, 31 << 4, 31 << 4,
35 << 4, 35 << 4, 35 << 4, 35 << 4, 35 << 4, 35 << 4, 35 << 4, 35 << 4,
35 << 4, 35 << 4, 35 << 4, 35 << 4, 35 << 4, 35 << 4, 35 << 4, 35 << 4,
39 << 4, 39 << 4, 39 << 4, 39 << 4, 39 << 4, 39 << 4, 39 << 4, 39 << 4,
39 << 4, 39 << 4, 39 << 4, 39 << 4, 39 << 4, 39 << 4, 39 << 4, 39 << 4,
43 << 4, 43 << 4, 43 << 4, 43 << 4, 43 << 4, 43 << 4, 43 << 4, 43 << 4,
43 << 4, 43 << 4, 43 << 4, 43 << 4, 43 << 4, 43 << 4, 43 << 4, 43 << 4,
48 << 4, 48 << 4, 48 << 4, 48 << 4, 48 << 4, 48 << 4, 48 << 4, 48 << 4,
48 << 4, 48 << 4, 48 << 4, 48 << 4, 48 << 4, 48 << 4, 48 << 4, 48 << 4,
53 << 4, 53 << 4, 53 << 4, 53 << 4, 53 << 4, 53 << 4, 53 << 4, 53 << 4,
53 << 4, 53 << 4, 53 << 4, 53 << 4, 53 << 4, 53 << 4, 53 << 4, 53 << 4,
58 << 4, 58 << 4, 58 << 4, 58 << 4, 58 << 4, 58 << 4, 58 << 4, 58 << 4,
58 << 4, 58 << 4, 58 << 4, 58 << 4, 58 << 4, 58 << 4, 58 << 4, 58 << 4,
62 << 4, 62 << 4, 62 << 4, 62 << 4, 62 << 4, 62 << 4, 62 << 4, 62 << 4,
62 << 4, 62 << 4, 62 << 4, 62 << 4, 62 << 4, 62 << 4, 62 << 4, 62 << 4,
67 << 4, 67 << 4, 67 << 4, 67 << 4, 67 << 4, 67 << 4, 67 << 4, 67 << 4,
67 << 4, 67 << 4, 67 << 4, 67 << 4, 67 << 4, 67 << 4, 67 << 4, 67 << 4,
73 << 4, 73 << 4, 73 << 4, 73 << 4, 73 << 4, 73 << 4, 73 << 4, 73 << 4,
73 << 4, 73 << 4, 73 << 4, 73 << 4, 73 << 4, 73 << 4, 73 << 4, 73 << 4,
78 << 4, 78 << 4, 78 << 4, 78 << 4, 78 << 4, 78 << 4, 78 << 4, 78 << 4,
78 << 4, 78 << 4, 78 << 4, 78 << 4, 78 << 4, 78 << 4, 78 << 4, 78 << 4,
83 << 4, 83 << 4, 83 << 4, 83 << 4, 83 << 4, 83 << 4, 83 << 4, 83 << 4,
83 << 4, 83 << 4, 83 << 4, 83 << 4, 83 << 4, 83 << 4, 83 << 4, 83 << 4,
88 << 4, 88 << 4, 88 << 4, 88 << 4, 88 << 4, 88 << 4, 88 << 4, 88 << 4,
88 << 4, 88 << 4, 88 << 4, 88 << 4, 88 << 4, 88 << 4, 88 << 4, 88 << 4,
94 << 4, 94 << 4, 94 << 4, 94 << 4, 94 << 4, 94 << 4, 94 << 4, 94 << 4,
94 << 4, 94 << 4, 94 << 4, 94 << 4, 94 << 4, 94 << 4, 94 << 4, 94 << 4,
99 << 4, 99 << 4, 99 << 4, 99 << 4, 99 << 4, 99 << 4, 99 << 4, 99 << 4,
99 << 4, 99 << 4, 99 << 4, 99 << 4, 99 << 4, 99 << 4, 99 << 4, 99 << 4,
105 << 4, 105 << 4, 105 << 4, 105 << 4, 105 << 4, 105 << 4, 105 << 4, 105 << 4,
105 << 4, 105 << 4, 105 << 4, 105 << 4, 105 << 4, 105 << 4, 105 << 4, 105 << 4,
110 << 4, 110 << 4, 110 << 4, 110 << 4, 110 << 4, 110 << 4, 110 << 4, 110 << 4,
110 << 4, 110 << 4, 110 << 4, 110 << 4, 110 << 4, 110 << 4, 110 << 4, 110 << 4,
116 << 4, 116 << 4, 116 << 4, 116 << 4, 116 << 4, 116 << 4, 116 << 4, 116 << 4,
116 << 4, 116 << 4, 116 << 4, 116 << 4, 116 << 4, 116 << 4, 116 << 4, 116 << 4,
121 << 4, 121 << 4, 121 << 4, 121 << 4, 121 << 4, 121 << 4, 121 << 4, 121 << 4,
121 << 4, 121 << 4, 121 << 4, 121 << 4, 121 << 4, 121 << 4, 121 << 4, 121 << 4,
127 << 4, 127 << 4, 127 << 4, 127 << 4, 127 << 4, 127 << 4, 127 << 4, 127 << 4,
127 << 4, 127 << 4, 127 << 4, 127 << 4, 127 << 4, 127 << 4, 127 << 4, 127 << 4,
132 << 4, 132 << 4, 132 << 4, 132 << 4, 132 << 4, 132 << 4, 132 << 4, 132 << 4,
132 << 4, 132 << 4, 132 << 4, 132 << 4, 132 << 4, 132 << 4, 132 << 4, 132 << 4,
138 << 4, 138 << 4, 138 << 4, 138 << 4, 138 << 4, 138 << 4, 138 << 4, 138 << 4,
138 << 4, 138 << 4, 138 << 4, 138 << 4, 138 << 4, 138 << 4, 138 << 4, 138 << 4,
144 << 4, 144 << 4, 144 << 4, 144 << 4, 144 << 4, 144 << 4, 144 << 4, 144 << 4,
144 << 4, 144 << 4, 144 << 4, 144 << 4, 144 << 4, 144 << 4, 144 << 4, 144 << 4,
149 << 4, 149 << 4, 149 << 4, 149 << 4, 149 << 4, 149 << 4, 149 << 4, 149 << 4,
149 << 4, 149 << 4, 149 << 4, 149 << 4, 149 << 4, 149 << 4, 149 << 4, 149 << 4,
154 << 4, 154 << 4, 154 << 4, 154 << 4, 154 << 4, 154 << 4, 154 << 4, 154 << 4,
154 << 4, 154 << 4, 154 << 4, 154 << 4, 154 << 4, 154 << 4, 154 << 4, 154 << 4,
160 << 4, 160 << 4, 160 << 4, 160 << 4, 160 << 4, 160 << 4, 160 << 4, 160 << 4,
160 << 4, 160 << 4, 160 << 4, 160 << 4, 160 << 4, 160 << 4, 160 << 4, 160 << 4,
165 << 4, 165 << 4, 165 << 4, 165 << 4, 165 << 4, 165 << 4, 165 << 4, 165 << 4,
165 << 4, 165 << 4, 165 << 4, 165 << 4, 165 << 4, 165 << 4, 165 << 4, 165 << 4,
170 << 4, 170 << 4, 170 << 4, 170 << 4, 170 << 4, 170 << 4, 170 << 4, 170 << 4,
170 << 4, 170 << 4, 170 << 4, 170 << 4, 170 << 4, 170 << 4, 170 << 4, 170 << 4,
176 << 4, 176 << 4, 176 << 4, 176 << 4, 176 << 4, 176 << 4, 176 << 4, 176 << 4,
176 << 4, 176 << 4, 176 << 4, 176 << 4, 176 << 4, 176 << 4, 176 << 4, 176 << 4,
181 << 4, 181 << 4, 181 << 4, 181 << 4, 181 << 4, 181 << 4, 181 << 4, 181 << 4,
181 << 4, 181 << 4, 181 << 4, 181 << 4, 181 << 4, 181 << 4, 181 << 4, 181 << 4,
186 << 4, 186 << 4, 186 << 4, 186 << 4, 186 << 4, 186 << 4, 186 << 4, 186 << 4,
186 << 4, 186 << 4, 186 << 4, 186 << 4, 186 << 4, 186 << 4, 186 << 4, 186 << 4,
191 << 4, 191 << 4, 191 << 4, 191 << 4, 191 << 4, 191 << 4, 191 << 4, 191 << 4,
191 << 4, 191 << 4, 191 << 4, 191 << 4, 191 << 4, 191 << 4, 191 << 4, 191 << 4,
195 << 4, 195 << 4, 195 << 4, 195 << 4, 195 << 4, 195 << 4, 195 << 4, 195 << 4,
195 << 4, 195 << 4, 195 << 4, 195 << 4, 195 << 4, 195 << 4, 195 << 4, 195 << 4,
200 << 4, 200 << 4, 200 << 4, 200 << 4, 200 << 4, 200 << 4, 200 << 4, 200 << 4,
200 << 4, 200 << 4, 200 << 4, 200 << 4, 200 << 4, 200 << 4, 200 << 4, 200 << 4,
205 << 4, 205 << 4, 205 << 4, 205 << 4, 205 << 4, 205 << 4, 205 << 4, 205 << 4,
205 << 4, 205 << 4, 205 << 4, 205 << 4, 205 << 4, 205 << 4, 205 << 4, 205 << 4,
209 << 4, 209 << 4, 209 << 4, 209 << 4, 209 << 4, 209 << 4, 209 << 4, 209 << 4,
209 << 4, 209 << 4, 209 << 4, 209 << 4, 209 << 4, 209 << 4, 209 << 4, 209 << 4,
213 << 4, 213 << 4, 213 << 4, 213 << 4, 213 << 4, 213 << 4, 213 << 4, 213 << 4,
213 << 4, 213 << 4, 213 << 4, 213 << 4, 213 << 4, 213 << 4, 213 << 4, 213 << 4,
218 << 4, 218 << 4, 218 << 4, 218 << 4, 218 << 4, 218 << 4, 218 << 4, 218 << 4,
218 << 4, 218 << 4, 218 << 4, 218 << 4, 218 << 4, 218 << 4, 218 << 4, 218 << 4,
222 << 4, 222 << 4, 222 << 4, 222 << 4, 222 << 4, 222 << 4, 222 << 4, 222 << 4,
222 << 4, 222 << 4, 222 << 4, 222 << 4, 222 << 4, 222 << 4, 222 << 4, 222 << 4,
225 << 4, 225 << 4, 225 << 4, 225 << 4, 225 << 4, 225 << 4, 225 << 4, 225 << 4,
225 << 4, 225 << 4, 225 << 4, 225 << 4, 225 << 4, 225 << 4, 225 << 4, 225 << 4,
229 << 4, 229 << 4, 229 << 4, 229 << 4, 229 << 4, 229 << 4, 229 << 4, 229 << 4,
229 << 4, 229 << 4, 229 << 4, 229 << 4, 229 << 4, 229 << 4, 229 << 4, 229 << 4,
232 << 4, 232 << 4, 232 << 4, 232 << 4, 232 << 4, 232 << 4, 232 << 4, 232 << 4,
232 << 4, 232 << 4, 232 << 4, 232 << 4, 232 << 4, 232 << 4, 232 << 4, 232 << 4,
236 << 4, 236 << 4, 236 << 4, 236 << 4, 236 << 4, 236 << 4, 236 << 4, 236 << 4,
236 << 4, 236 << 4, 236 << 4, 236 << 4, 236 << 4, 236 << 4, 236 << 4, 236 << 4,
239 << 4, 239 << 4, 239 << 4, 239 << 4, 239 << 4, 239 << 4, 239 << 4, 239 << 4,
239 << 4, 239 << 4, 239 << 4, 239 << 4, 239 << 4, 239 << 4, 239 << 4, 239 << 4,
241 << 4, 241 << 4, 241 << 4, 241 << 4, 241 << 4, 241 << 4, 241 << 4, 241 << 4,
241 << 4, 241 << 4, 241 << 4, 241 << 4, 241 << 4, 241 << 4, 241 << 4, 241 << 4,
244 << 4, 244 << 4, 244 << 4, 244 << 4, 244 << 4, 244 << 4, 244 << 4, 244 << 4,
244 << 4, 244 << 4, 244 << 4, 244 << 4, 244 << 4, 244 << 4, 244 << 4, 244 << 4,
246 << 4, 246 << 4, 246 << 4, 246 << 4, 246 << 4, 246 << 4, 246 << 4, 246 << 4,
246 << 4, 246 << 4, 246 << 4, 246 << 4, 246 << 4, 246 << 4, 246 << 4, 246 << 4,
248 << 4, 248 << 4, 248 << 4, 248 << 4, 248 << 4, 248 << 4, 248 << 4, 248 << 4,
248 << 4, 248 << 4, 248 << 4, 248 << 4, 248 << 4, 248 << 4, 248 << 4, 248 << 4,
250 << 4, 250 << 4, 250 << 4, 250 << 4, 250 << 4, 250 << 4, 250 << 4, 250 << 4,
250 << 4, 250 << 4, 250 << 4, 250 << 4, 250 << 4, 250 << 4, 250 << 4, 250 << 4,
252 << 4, 252 << 4, 252 << 4, 252 << 4, 252 << 4, 252 << 4, 252 << 4, 252 << 4,
252 << 4, 252 << 4, 252 << 4, 252 << 4, 252 << 4, 252 << 4, 252 << 4, 252 << 4,
253 << 4, 253 << 4, 253 << 4, 253 << 4, 253 << 4, 253 << 4, 253 << 4, 253 << 4,
253 << 4, 253 << 4, 253 << 4, 253 << 4, 253 << 4, 253 << 4, 253 << 4, 253 << 4,
254 << 4, 254 << 4, 254 << 4, 254 << 4, 254 << 4, 254 << 4, 254 << 4, 254 << 4,
254 << 4, 254 << 4, 254 << 4, 254 << 4, 254 << 4, 254 << 4, 254 << 4, 254 << 4,
255 << 4, 255 << 4, 255 << 4, 255 << 4, 255 << 4, 255 << 4, 255 << 4, 255 << 4,
255 << 4, 255 << 4, 255 << 4, 255 << 4, 255 << 4, 255 << 4, 255 << 4, 255 << 4,
255 << 4, 255 << 4, 255 << 4, 255 << 4, 255 << 4, 255 << 4, 255 << 4, 255 << 4,
255 << 4, 255 << 4, 255 << 4, 255 << 4, 255 << 4, 255 << 4, 255 << 4, 255 << 4
},
{
256 << 4, 256 << 4, 256 << 4, 256 << 4, 256 << 4, 256 << 4, 256 << 4, 256 << 4,
256 << 4, 256 << 4, 256 << 4, 256 << 4, 256 << 4, 256 << 4, 256 << 4, 256 << 4,
255 << 4, 255 << 4, 255 << 4, 255 << 4, 255 << 4, 255 << 4, 255 << 4, 255 << 4,
255 << 4, 255 << 4, 255 << 4, 255 << 4, 255 << 4, 255 << 4, 255 << 4, 255 << 4,
255 << 4, 255 << 4, 255 << 4, 255 << 4, 255 << 4, 255 << 4, 255 << 4, 255 << 4,
255 << 4, 255 << 4, 255 << 4, 255 << 4, 255 << 4, 255 << 4, 255 << 4, 255 << 4,
254 << 4, 254 << 4, 254 << 4, 254 << 4, 254 << 4, 254 << 4, 254 << 4, 254 << 4,
254 << 4, 254 << 4, 254 << 4, 254 << 4, 254 << 4, 254 << 4, 254 << 4, 254 << 4,
253 << 4, 253 << 4, 253 << 4, 253 << 4, 253 << 4, 253 << 4, 253 << 4, 253 << 4,
253 << 4, 253 << 4, 253 << 4, 253 << 4, 253 << 4, 253 << 4, 253 << 4, 253 << 4,
252 << 4, 252 << 4, 252 << 4, 252 << 4, 252 << 4, 252 << 4, 252 << 4, 252 << 4,
252 << 4, 252 << 4, 252 << 4, 252 << 4, 252 << 4, 252 << 4, 252 << 4, 252 << 4,
250 << 4, 250 << 4, 250 << 4, 250 << 4, 250 << 4, 250 << 4, 250 << 4, 250 << 4,
250 << 4, 250 << 4, 250 << 4, 250 << 4, 250 << 4, 250 << 4, 250 << 4, 250 << 4,
248 << 4, 248 << 4, 248 << 4, 248 << 4, 248 << 4, 248 << 4, 248 << 4, 248 << 4,
248 << 4, 248 << 4, 248 << 4, 248 << 4, 248 << 4, 248 << 4, 248 << 4, 248 << 4,
246 << 4, 246 << 4, 246 << 4, 246 << 4, 246 << 4, 246 << 4, 246 << 4, 246 << 4,
246 << 4, 246 << 4, 246 << 4, 246 << 4, 246 << 4, 246 << 4, 246 << 4, 246 << 4,
244 << 4, 244 << 4, 244 << 4, 244 << 4, 244 << 4, 244 << 4, 244 << 4, 244 << 4,
244 << 4, 244 << 4, 244 << 4, 244 << 4, 244 << 4, 244 << 4, 244 << 4, 244 << 4,
241 << 4, 241 << 4, 241 << 4, 241 << 4, 241 << 4, 241 << 4, 241 << 4, 241 << 4,
241 << 4, 241 << 4, 241 << 4, 241 << 4, 241 << 4, 241 << 4, 241 << 4, 241 << 4,
239 << 4, 239 << 4, 239 << 4, 239 << 4, 239 << 4, 239 << 4, 239 << 4, 239 << 4,
239 << 4, 239 << 4, 239 << 4, 239 << 4, 239 << 4, 239 << 4, 239 << 4, 239 << 4,
236 << 4, 236 << 4, 236 << 4, 236 << 4, 236 << 4, 236 << 4, 236 << 4, 236 << 4,
236 << 4, 236 << 4, 236 << 4, 236 << 4, 236 << 4, 236 << 4, 236 << 4, 236 << 4,
232 << 4, 232 << 4, 232 << 4, 232 << 4, 232 << 4, 232 << 4, 232 << 4, 232 << 4,
232 << 4, 232 << 4, 232 << 4, 232 << 4, 232 << 4, 232 << 4, 232 << 4, 232 << 4,
229 << 4, 229 << 4, 229 << 4, 229 << 4, 229 << 4, 229 << 4, 229 << 4, 229 << 4,
229 << 4, 229 << 4, 229 << 4, 229 << 4, 229 << 4, 229 << 4, 229 << 4, 229 << 4,
225 << 4, 225 << 4, 225 << 4, 225 << 4, 225 << 4, 225 << 4, 225 << 4, 225 << 4,
225 << 4, 225 << 4, 225 << 4, 225 << 4, 225 << 4, 225 << 4, 225 << 4, 225 << 4,
222 << 4, 222 << 4, 222 << 4, 222 << 4, 222 << 4, 222 << 4, 222 << 4, 222 << 4,
222 << 4, 222 << 4, 222 << 4, 222 << 4, 222 << 4, 222 << 4, 222 << 4, 222 << 4,
218 << 4, 218 << 4, 218 << 4, 218 << 4, 218 << 4, 218 << 4, 218 << 4, 218 << 4,
218 << 4, 218 << 4, 218 << 4, 218 << 4, 218 << 4, 218 << 4, 218 << 4, 218 << 4,
213 << 4, 213 << 4, 213 << 4, 213 << 4, 213 << 4, 213 << 4, 213 << 4, 213 << 4,
213 << 4, 213 << 4, 213 << 4, 213 << 4, 213 << 4, 213 << 4, 213 << 4, 213 << 4,
209 << 4, 209 << 4, 209 << 4, 209 << 4, 209 << 4, 209 << 4, 209 << 4, 209 << 4,
209 << 4, 209 << 4, 209 << 4, 209 << 4, 209 << 4, 209 << 4, 209 << 4, 209 << 4,
205 << 4, 205 << 4, 205 << 4, 205 << 4, 205 << 4, 205 << 4, 205 << 4, 205 << 4,
205 << 4, 205 << 4, 205 << 4, 205 << 4, 205 << 4, 205 << 4, 205 << 4, 205 << 4,
200 << 4, 200 << 4, 200 << 4, 200 << 4, 200 << 4, 200 << 4, 200 << 4, 200 << 4,
200 << 4, 200 << 4, 200 << 4, 200 << 4, 200 << 4, 200 << 4, 200 << 4, 200 << 4,
195 << 4, 195 << 4, 195 << 4, 195 << 4, 195 << 4, 195 << 4, 195 << 4, 195 << 4,
195 << 4, 195 << 4, 195 << 4, 195 << 4, 195 << 4, 195 << 4, 195 << 4, 195 << 4,
191 << 4, 191 << 4, 191 << 4, 191 << 4, 191 << 4, 191 << 4, 191 << 4, 191 << 4,
191 << 4, 191 << 4, 191 << 4, 191 << 4, 191 << 4, 191 << 4, 191 << 4, 191 << 4,
186 << 4, 186 << 4, 186 << 4, 186 << 4, 186 << 4, 186 << 4, 186 << 4, 186 << 4,
186 << 4, 186 << 4, 186 << 4, 186 << 4, 186 << 4, 186 << 4, 186 << 4, 186 << 4,
181 << 4, 181 << 4, 181 << 4, 181 << 4, 181 << 4, 181 << 4, 181 << 4, 181 << 4,
181 << 4, 181 << 4, 181 << 4, 181 << 4, 181 << 4, 181 << 4, 181 << 4, 181 << 4,
176 << 4, 176 << 4, 176 << 4, 176 << 4, 176 << 4, 176 << 4, 176 << 4, 176 << 4,
176 << 4, 176 << 4, 176 << 4, 176 << 4, 176 << 4, 176 << 4, 176 << 4, 176 << 4,
170 << 4, 170 << 4, 170 << 4, 170 << 4, 170 << 4, 170 << 4, 170 << 4, 170 << 4,
170 << 4, 170 << 4, 170 << 4, 170 << 4, 170 << 4, 170 << 4, 170 << 4, 170 << 4,
165 << 4, 165 << 4, 165 << 4, 165 << 4, 165 << 4, 165 << 4, 165 << 4, 165 << 4,
165 << 4, 165 << 4, 165 << 4, 165 << 4, 165 << 4, 165 << 4, 165 << 4, 165 << 4,
160 << 4, 160 << 4, 160 << 4, 160 << 4, 160 << 4, 160 << 4, 160 << 4, 160 << 4,
160 << 4, 160 << 4, 160 << 4, 160 << 4, 160 << 4, 160 << 4, 160 << 4, 160 << 4,
154 << 4, 154 << 4, 154 << 4, 154 << 4, 154 << 4, 154 << 4, 154 << 4, 154 << 4,
154 << 4, 154 << 4, 154 << 4, 154 << 4, 154 << 4, 154 << 4, 154 << 4, 154 << 4,
149 << 4, 149 << 4, 149 << 4, 149 << 4, 149 << 4, 149 << 4, 149 << 4, 149 << 4,
149 << 4, 149 << 4, 149 << 4, 149 << 4, 149 << 4, 149 << 4, 149 << 4, 149 << 4,
144 << 4, 144 << 4, 144 << 4, 144 << 4, 144 << 4, 144 << 4, 144 << 4, 144 << 4,
144 << 4, 144 << 4, 144 << 4, 144 << 4, 144 << 4, 144 << 4, 144 << 4, 144 << 4,
138 << 4, 138 << 4, 138 << 4, 138 << 4, 138 << 4, 138 << 4, 138 << 4, 138 << 4,
138 << 4, 138 << 4, 138 << 4, 138 << 4, 138 << 4, 138 << 4, 138 << 4, 138 << 4,
132 << 4, 132 << 4, 132 << 4, 132 << 4, 132 << 4, 132 << 4, 132 << 4, 132 << 4,
132 << 4, 132 << 4, 132 << 4, 132 << 4, 132 << 4, 132 << 4, 132 << 4, 132 << 4,
127 << 4, 127 << 4, 127 << 4, 127 << 4, 127 << 4, 127 << 4, 127 << 4, 127 << 4,
127 << 4, 127 << 4, 127 << 4, 127 << 4, 127 << 4, 127 << 4, 127 << 4, 127 << 4,
121 << 4, 121 << 4, 121 << 4, 121 << 4, 121 << 4, 121 << 4, 121 << 4, 121 << 4,
121 << 4, 121 << 4, 121 << 4, 121 << 4, 121 << 4, 121 << 4, 121 << 4, 121 << 4,
116 << 4, 116 << 4, 116 << 4, 116 << 4, 116 << 4, 116 << 4, 116 << 4, 116 << 4,
116 << 4, 116 << 4, 116 << 4, 116 << 4, 116 << 4, 116 << 4, 116 << 4, 116 << 4,
110 << 4, 110 << 4, 110 << 4, 110 << 4, 110 << 4, 110 << 4, 110 << 4, 110 << 4,
110 << 4, 110 << 4, 110 << 4, 110 << 4, 110 << 4, 110 << 4, 110 << 4, 110 << 4,
105 << 4, 105 << 4, 105 << 4, 105 << 4, 105 << 4, 105 << 4, 105 << 4, 105 << 4,
105 << 4, 105 << 4, 105 << 4, 105 << 4, 105 << 4, 105 << 4, 105 << 4, 105 << 4,
99 << 4, 99 << 4, 99 << 4, 99 << 4, 99 << 4, 99 << 4, 99 << 4, 99 << 4,
99 << 4, 99 << 4, 99 << 4, 99 << 4, 99 << 4, 99 << 4, 99 << 4, 99 << 4,
94 << 4, 94 << 4, 94 << 4, 94 << 4, 94 << 4, 94 << 4, 94 << 4, 94 << 4,
94 << 4, 94 << 4, 94 << 4, 94 << 4, 94 << 4, 94 << 4, 94 << 4, 94 << 4,
88 << 4, 88 << 4, 88 << 4, 88 << 4, 88 << 4, 88 << 4, 88 << 4, 88 << 4,
88 << 4, 88 << 4, 88 << 4, 88 << 4, 88 << 4, 88 << 4, 88 << 4, 88 << 4,
83 << 4, 83 << 4, 83 << 4, 83 << 4, 83 << 4, 83 << 4, 83 << 4, 83 << 4,
83 << 4, 83 << 4, 83 << 4, 83 << 4, 83 << 4, 83 << 4, 83 << 4, 83 << 4,
78 << 4, 78 << 4, 78 << 4, 78 << 4, 78 << 4, 78 << 4, 78 << 4, 78 << 4,
78 << 4, 78 << 4, 78 << 4, 78 << 4, 78 << 4, 78 << 4, 78 << 4, 78 << 4,
73 << 4, 73 << 4, 73 << 4, 73 << 4, 73 << 4, 73 << 4, 73 << 4, 73 << 4,
73 << 4, 73 << 4, 73 << 4, 73 << 4, 73 << 4, 73 << 4, 73 << 4, 73 << 4,
67 << 4, 67 << 4, 67 << 4, 67 << 4, 67 << 4, 67 << 4, 67 << 4, 67 << 4,
67 << 4, 67 << 4, 67 << 4, 67 << 4, 67 << 4, 67 << 4, 67 << 4, 67 << 4,
62 << 4, 62 << 4, 62 << 4, 62 << 4, 62 << 4, 62 << 4, 62 << 4, 62 << 4,
62 << 4, 62 << 4, 62 << 4, 62 << 4, 62 << 4, 62 << 4, 62 << 4, 62 << 4,
58 << 4, 58 << 4, 58 << 4, 58 << 4, 58 << 4, 58 << 4, 58 << 4, 58 << 4,
58 << 4, 58 << 4, 58 << 4, 58 << 4, 58 << 4, 58 << 4, 58 << 4, 58 << 4,
53 << 4, 53 << 4, 53 << 4, 53 << 4, 53 << 4, 53 << 4, 53 << 4, 53 << 4,
53 << 4, 53 << 4, 53 << 4, 53 << 4, 53 << 4, 53 << 4, 53 << 4, 53 << 4,
48 << 4, 48 << 4, 48 << 4, 48 << 4, 48 << 4, 48 << 4, 48 << 4, 48 << 4,
48 << 4, 48 << 4, 48 << 4, 48 << 4, 48 << 4, 48 << 4, 48 << 4, 48 << 4,
43 << 4, 43 << 4, 43 << 4, 43 << 4, 43 << 4, 43 << 4, 43 << 4, 43 << 4,
43 << 4, 43 << 4, 43 << 4, 43 << 4, 43 << 4, 43 << 4, 43 << 4, 43 << 4,
39 << 4, 39 << 4, 39 << 4, 39 << 4, 39 << 4, 39 << 4, 39 << 4, 39 << 4,
39 << 4, 39 << 4, 39 << 4, 39 << 4, 39 << 4, 39 << 4, 39 << 4, 39 << 4,
35 << 4, 35 << 4, 35 << 4, 35 << 4, 35 << 4, 35 << 4, 35 << 4, 35 << 4,
35 << 4, 35 << 4, 35 << 4, 35 << 4, 35 << 4, 35 << 4, 35 << 4, 35 << 4,
31 << 4, 31 << 4, 31 << 4, 31 << 4, 31 << 4, 31 << 4, 31 << 4, 31 << 4,
31 << 4, 31 << 4, 31 << 4, 31 << 4, 31 << 4, 31 << 4, 31 << 4, 31 << 4,
27 << 4, 27 << 4, 27 << 4, 27 << 4, 27 << 4, 27 << 4, 27 << 4, 27 << 4,
27 << 4, 27 << 4, 27 << 4, 27 << 4, 27 << 4, 27 << 4, 27 << 4, 27 << 4,
23 << 4, 23 << 4, 23 << 4, 23 << 4, 23 << 4, 23 << 4, 23 << 4, 23 << 4,
23 << 4, 23 << 4, 23 << 4, 23 << 4, 23 << 4, 23 << 4, 23 << 4, 23 << 4,
19 << 4, 19 << 4, 19 << 4, 19 << 4, 19 << 4, 19 << 4, 19 << 4, 19 << 4,
19 << 4, 19 << 4, 19 << 4, 19 << 4, 19 << 4, 19 << 4, 19 << 4, 19 << 4,
16 << 4, 16 << 4, 16 << 4, 16 << 4, 16 << 4, 16 << 4, 16 << 4, 16 << 4,
16 << 4, 16 << 4, 16 << 4, 16 << 4, 16 << 4, 16 << 4, 16 << 4, 16 << 4,
12 << 4, 12 << 4, 12 << 4, 12 << 4, 12 << 4, 12 << 4, 12 << 4, 12 << 4,
12 << 4, 12 << 4, 12 << 4, 12 << 4, 12 << 4, 12 << 4, 12 << 4, 12 << 4,
9 << 4, 9 << 4, 9 << 4, 9 << 4, 9 << 4, 9 << 4, 9 << 4, 9 << 4,
9 << 4, 9 << 4, 9 << 4, 9 << 4, 9 << 4, 9 << 4, 9 << 4, 9 << 4,
7 << 4, 7 << 4, 7 << 4, 7 << 4, 7 << 4, 7 << 4, 7 << 4, 7 << 4,
7 << 4, 7 << 4, 7 << 4, 7 << 4, 7 << 4, 7 << 4, 7 << 4, 7 << 4,
4 << 4, 4 << 4, 4 << 4, 4 << 4, 4 << 4, 4 << 4, 4 << 4, 4 << 4,
4 << 4, 4 << 4, 4 << 4, 4 << 4, 4 << 4, 4 << 4, 4 << 4, 4 << 4,
2 << 4, 2 << 4, 2 << 4, 2 << 4, 2 << 4, 2 << 4, 2 << 4, 2 << 4,
2 << 4, 2 << 4, 2 << 4, 2 << 4, 2 << 4, 2 << 4, 2 << 4, 2 << 4
},
{
0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4,
0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4,
-1 << 4, -1 << 4, -1 << 4, -1 << 4, -1 << 4, -1 << 4, -1 << 4, -1 << 4,
-1 << 4, -1 << 4, -1 << 4, -1 << 4, -1 << 4, -1 << 4, -1 << 4, -1 << 4,
-3 << 4, -3 << 4, -3 << 4, -3 << 4, -3 << 4, -3 << 4, -3 << 4, -3 << 4,
-3 << 4, -3 << 4, -3 << 4, -3 << 4, -3 << 4, -3 << 4, -3 << 4, -3 << 4,
-5 << 4, -5 << 4, -5 << 4, -5 << 4, -5 << 4, -5 << 4, -5 << 4, -5 << 4,
-5 << 4, -5 << 4, -5 << 4, -5 << 4, -5 << 4, -5 << 4, -5 << 4, -5 << 4,
-6 << 4, -6 << 4, -6 << 4, -6 << 4, -6 << 4, -6 << 4, -6 << 4, -6 << 4,
-6 << 4, -6 << 4, -6 << 4, -6 << 4, -6 << 4, -6 << 4, -6 << 4, -6 << 4,
-8 << 4, -8 << 4, -8 << 4, -8 << 4, -8 << 4, -8 << 4, -8 << 4, -8 << 4,
-8 << 4, -8 << 4, -8 << 4, -8 << 4, -8 << 4, -8 << 4, -8 << 4, -8 << 4,
-9 << 4, -9 << 4, -9 << 4, -9 << 4, -9 << 4, -9 << 4, -9 << 4, -9 << 4,
-9 << 4, -9 << 4, -9 << 4, -9 << 4, -9 << 4, -9 << 4, -9 << 4, -9 << 4,
-10 << 4, -10 << 4, -10 << 4, -10 << 4, -10 << 4, -10 << 4, -10 << 4, -10 << 4,
-10 << 4, -10 << 4, -10 << 4, -10 << 4, -10 << 4, -10 << 4, -10 << 4, -10 << 4,
-12 << 4, -12 << 4, -12 << 4, -12 << 4, -12 << 4, -12 << 4, -12 << 4, -12 << 4,
-12 << 4, -12 << 4, -12 << 4, -12 << 4, -12 << 4, -12 << 4, -12 << 4, -12 << 4,
-13 << 4, -13 << 4, -13 << 4, -13 << 4, -13 << 4, -13 << 4, -13 << 4, -13 << 4,
-13 << 4, -13 << 4, -13 << 4, -13 << 4, -13 << 4, -13 << 4, -13 << 4, -13 << 4,
-14 << 4, -14 << 4, -14 << 4, -14 << 4, -14 << 4, -14 << 4, -14 << 4, -14 << 4,
-14 << 4, -14 << 4, -14 << 4, -14 << 4, -14 << 4, -14 << 4, -14 << 4, -14 << 4,
-15 << 4, -15 << 4, -15 << 4, -15 << 4, -15 << 4, -15 << 4, -15 << 4, -15 << 4,
-15 << 4, -15 << 4, -15 << 4, -15 << 4, -15 << 4, -15 << 4, -15 << 4, -15 << 4,
-16 << 4, -16 << 4, -16 << 4, -16 << 4, -16 << 4, -16 << 4, -16 << 4, -16 << 4,
-16 << 4, -16 << 4, -16 << 4, -16 << 4, -16 << 4, -16 << 4, -16 << 4, -16 << 4,
-15 << 4, -15 << 4, -15 << 4, -15 << 4, -15 << 4, -15 << 4, -15 << 4, -15 << 4,
-15 << 4, -15 << 4, -15 << 4, -15 << 4, -15 << 4, -15 << 4, -15 << 4, -15 << 4,
-17 << 4, -17 << 4, -17 << 4, -17 << 4, -17 << 4, -17 << 4, -17 << 4, -17 << 4,
-17 << 4, -17 << 4, -17 << 4, -17 << 4, -17 << 4, -17 << 4, -17 << 4, -17 << 4,
-17 << 4, -17 << 4, -17 << 4, -17 << 4, -17 << 4, -17 << 4, -17 << 4, -17 << 4,
-17 << 4, -17 << 4, -17 << 4, -17 << 4, -17 << 4, -17 << 4, -17 << 4, -17 << 4,
-18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4,
-18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4,
-18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4,
-18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4,
-17 << 4, -17 << 4, -17 << 4, -17 << 4, -17 << 4, -17 << 4, -17 << 4, -17 << 4,
-17 << 4, -17 << 4, -17 << 4, -17 << 4, -17 << 4, -17 << 4, -17 << 4, -17 << 4,
-19 << 4, -19 << 4, -19 << 4, -19 << 4, -19 << 4, -19 << 4, -19 << 4, -19 << 4,
-19 << 4, -19 << 4, -19 << 4, -19 << 4, -19 << 4, -19 << 4, -19 << 4, -19 << 4,
-19 << 4, -19 << 4, -19 << 4, -19 << 4, -19 << 4, -19 << 4, -19 << 4, -19 << 4,
-19 << 4, -19 << 4, -19 << 4, -19 << 4, -19 << 4, -19 << 4, -19 << 4, -19 << 4,
-18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4,
-18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4,
-18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4,
-18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4,
-19 << 4, -19 << 4, -19 << 4, -19 << 4, -19 << 4, -19 << 4, -19 << 4, -19 << 4,
-19 << 4, -19 << 4, -19 << 4, -19 << 4, -19 << 4, -19 << 4, -19 << 4, -19 << 4,
-18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4,
-18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4,
-19 << 4, -19 << 4, -19 << 4, -19 << 4, -19 << 4, -19 << 4, -19 << 4, -19 << 4,
-19 << 4, -19 << 4, -19 << 4, -19 << 4, -19 << 4, -19 << 4, -19 << 4, -19 << 4,
-18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4,
-18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4, -18 << 4,
-17 << 4, -17 << 4, -17 << 4, -17 << 4, -17 << 4, -17 << 4, -17 << 4, -17 << 4,
-17 << 4, -17 << 4, -17 << 4, -17 << 4, -17 << 4, -17 << 4, -17 << 4, -17 << 4,
-17 << 4, -17 << 4, -17 << 4, -17 << 4, -17 << 4, -17 << 4, -17 << 4, -17 << 4,
-17 << 4, -17 << 4, -17 << 4, -17 << 4, -17 << 4, -17 << 4, -17 << 4, -17 << 4,
-17 << 4, -17 << 4, -17 << 4, -17 << 4, -17 << 4, -17 << 4, -17 << 4, -17 << 4,
-17 << 4, -17 << 4, -17 << 4, -17 << 4, -17 << 4, -17 << 4, -17 << 4, -17 << 4,
-16 << 4, -16 << 4, -16 << 4, -16 << 4, -16 << 4, -16 << 4, -16 << 4, -16 << 4,
-16 << 4, -16 << 4, -16 << 4, -16 << 4, -16 << 4, -16 << 4, -16 << 4, -16 << 4,
-16 << 4, -16 << 4, -16 << 4, -16 << 4, -16 << 4, -16 << 4, -16 << 4, -16 << 4,
-16 << 4, -16 << 4, -16 << 4, -16 << 4, -16 << 4, -16 << 4, -16 << 4, -16 << 4,
-16 << 4, -16 << 4, -16 << 4, -16 << 4, -16 << 4, -16 << 4, -16 << 4, -16 << 4,
-16 << 4, -16 << 4, -16 << 4, -16 << 4, -16 << 4, -16 << 4, -16 << 4, -16 << 4,
-15 << 4, -15 << 4, -15 << 4, -15 << 4, -15 << 4, -15 << 4, -15 << 4, -15 << 4,
-15 << 4, -15 << 4, -15 << 4, -15 << 4, -15 << 4, -15 << 4, -15 << 4, -15 << 4,
-14 << 4, -14 << 4, -14 << 4, -14 << 4, -14 << 4, -14 << 4, -14 << 4, -14 << 4,
-14 << 4, -14 << 4, -14 << 4, -14 << 4, -14 << 4, -14 << 4, -14 << 4, -14 << 4,
-14 << 4, -14 << 4, -14 << 4, -14 << 4, -14 << 4, -14 << 4, -14 << 4, -14 << 4,
-14 << 4, -14 << 4, -14 << 4, -14 << 4, -14 << 4, -14 << 4, -14 << 4, -14 << 4,
-13 << 4, -13 << 4, -13 << 4, -13 << 4, -13 << 4, -13 << 4, -13 << 4, -13 << 4,
-13 << 4, -13 << 4, -13 << 4, -13 << 4, -13 << 4, -13 << 4, -13 << 4, -13 << 4,
-12 << 4, -12 << 4, -12 << 4, -12 << 4, -12 << 4, -12 << 4, -12 << 4, -12 << 4,
-12 << 4, -12 << 4, -12 << 4, -12 << 4, -12 << 4, -12 << 4, -12 << 4, -12 << 4,
-12 << 4, -12 << 4, -12 << 4, -12 << 4, -12 << 4, -12 << 4, -12 << 4, -12 << 4,
-12 << 4, -12 << 4, -12 << 4, -12 << 4, -12 << 4, -12 << 4, -12 << 4, -12 << 4,
-12 << 4, -12 << 4, -12 << 4, -12 << 4, -12 << 4, -12 << 4, -12 << 4, -12 << 4,
-12 << 4, -12 << 4, -12 << 4, -12 << 4, -12 << 4, -12 << 4, -12 << 4, -12 << 4,
-11 << 4, -11 << 4, -11 << 4, -11 << 4, -11 << 4, -11 << 4, -11 << 4, -11 << 4,
-11 << 4, -11 << 4, -11 << 4, -11 << 4, -11 << 4, -11 << 4, -11 << 4, -11 << 4,
-11 << 4, -11 << 4, -11 << 4, -11 << 4, -11 << 4, -11 << 4, -11 << 4, -11 << 4,
-11 << 4, -11 << 4, -11 << 4, -11 << 4, -11 << 4, -11 << 4, -11 << 4, -11 << 4,
-9 << 4, -9 << 4, -9 << 4, -9 << 4, -9 << 4, -9 << 4, -9 << 4, -9 << 4,
-9 << 4, -9 << 4, -9 << 4, -9 << 4, -9 << 4, -9 << 4, -9 << 4, -9 << 4,
-9 << 4, -9 << 4, -9 << 4, -9 << 4, -9 << 4, -9 << 4, -9 << 4, -9 << 4,
-9 << 4, -9 << 4, -9 << 4, -9 << 4, -9 << 4, -9 << 4, -9 << 4, -9 << 4,
-9 << 4, -9 << 4, -9 << 4, -9 << 4, -9 << 4, -9 << 4, -9 << 4, -9 << 4,
-9 << 4, -9 << 4, -9 << 4, -9 << 4, -9 << 4, -9 << 4, -9 << 4, -9 << 4,
-8 << 4, -8 << 4, -8 << 4, -8 << 4, -8 << 4, -8 << 4, -8 << 4, -8 << 4,
-8 << 4, -8 << 4, -8 << 4, -8 << 4, -8 << 4, -8 << 4, -8 << 4, -8 << 4,
-6 << 4, -6 << 4, -6 << 4, -6 << 4, -6 << 4, -6 << 4, -6 << 4, -6 << 4,
-6 << 4, -6 << 4, -6 << 4, -6 << 4, -6 << 4, -6 << 4, -6 << 4, -6 << 4,
-6 << 4, -6 << 4, -6 << 4, -6 << 4, -6 << 4, -6 << 4, -6 << 4, -6 << 4,
-6 << 4, -6 << 4, -6 << 4, -6 << 4, -6 << 4, -6 << 4, -6 << 4, -6 << 4,
-6 << 4, -6 << 4, -6 << 4, -6 << 4, -6 << 4, -6 << 4, -6 << 4, -6 << 4,
-6 << 4, -6 << 4, -6 << 4, -6 << 4, -6 << 4, -6 << 4, -6 << 4, -6 << 4,
-5 << 4, -5 << 4, -5 << 4, -5 << 4, -5 << 4, -5 << 4, -5 << 4, -5 << 4,
-5 << 4, -5 << 4, -5 << 4, -5 << 4, -5 << 4, -5 << 4, -5 << 4, -5 << 4,
-4 << 4, -4 << 4, -4 << 4, -4 << 4, -4 << 4, -4 << 4, -4 << 4, -4 << 4,
-4 << 4, -4 << 4, -4 << 4, -4 << 4, -4 << 4, -4 << 4, -4 << 4, -4 << 4,
-3 << 4, -3 << 4, -3 << 4, -3 << 4, -3 << 4, -3 << 4, -3 << 4, -3 << 4,
-3 << 4, -3 << 4, -3 << 4, -3 << 4, -3 << 4, -3 << 4, -3 << 4, -3 << 4,
-4 << 4, -4 << 4, -4 << 4, -4 << 4, -4 << 4, -4 << 4, -4 << 4, -4 << 4,
-4 << 4, -4 << 4, -4 << 4, -4 << 4, -4 << 4, -4 << 4, -4 << 4, -4 << 4,
-3 << 4, -3 << 4, -3 << 4, -3 << 4, -3 << 4, -3 << 4, -3 << 4, -3 << 4,
-3 << 4, -3 << 4, -3 << 4, -3 << 4, -3 << 4, -3 << 4, -3 << 4, -3 << 4,
-2 << 4, -2 << 4, -2 << 4, -2 << 4, -2 << 4, -2 << 4, -2 << 4, -2 << 4,
-2 << 4, -2 << 4, -2 << 4, -2 << 4, -2 << 4, -2 << 4, -2 << 4, -2 << 4,
-2 << 4, -2 << 4, -2 << 4, -2 << 4, -2 << 4, -2 << 4, -2 << 4, -2 << 4,
-2 << 4, -2 << 4, -2 << 4, -2 << 4, -2 << 4, -2 << 4, -2 << 4, -2 << 4,
-1 << 4, -1 << 4, -1 << 4, -1 << 4, -1 << 4, -1 << 4, -1 << 4, -1 << 4,
-1 << 4, -1 << 4, -1 << 4, -1 << 4, -1 << 4, -1 << 4, -1 << 4, -1 << 4,
0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4,
0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4,
-1 << 4, -1 << 4, -1 << 4, -1 << 4, -1 << 4, -1 << 4, -1 << 4, -1 << 4,
-1 << 4, -1 << 4, -1 << 4, -1 << 4, -1 << 4, -1 << 4, -1 << 4, -1 << 4,
0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4,
0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4,
1 << 4, 1 << 4, 1 << 4, 1 << 4, 1 << 4, 1 << 4, 1 << 4, 1 << 4,
1 << 4, 1 << 4, 1 << 4, 1 << 4, 1 << 4, 1 << 4, 1 << 4, 1 << 4,
0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4,
0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4,
0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4,
0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4,
0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4,
0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4, 0 << 4
}
};
static const struct ia_css_dz_config default_dz_config = {
HRT_GDC_N,
HRT_GDC_N,
{
\
{0, 0}, \
{0, 0}, \
}
};
static const struct ia_css_vector default_motion_config = {
0,
0
};
/* ------ deprecated(bz675) : from ------ */
static const struct ia_css_shading_settings default_shading_settings = {
1 /* enable shading table conversion in the css
(This matches the legacy way.) */
};
/* ------ deprecated(bz675) : to ------ */
struct ia_css_isp_skc_dvs_statistics {
ia_css_ptr p_data;
};
static int
ref_sh_css_ddr_address_map(
struct sh_css_ddr_address_map *map,
struct sh_css_ddr_address_map *out);
static int
write_ia_css_isp_parameter_set_info_to_ddr(
struct ia_css_isp_parameter_set_info *me,
ia_css_ptr *out);
static int
free_ia_css_isp_parameter_set_info(ia_css_ptr ptr);
static int
sh_css_params_write_to_ddr_internal(
struct ia_css_pipe *pipe,
unsigned int pipe_id,
struct ia_css_isp_parameters *params,
const struct ia_css_pipeline_stage *stage,
struct sh_css_ddr_address_map *ddr_map,
struct sh_css_ddr_address_map_size *ddr_map_size);
static int
sh_css_create_isp_params(struct ia_css_stream *stream,
struct ia_css_isp_parameters **isp_params_out);
static bool
sh_css_init_isp_params_from_global(struct ia_css_stream *stream,
struct ia_css_isp_parameters *params,
bool use_default_config,
struct ia_css_pipe *pipe_in);
static int
sh_css_init_isp_params_from_config(struct ia_css_pipe *pipe,
struct ia_css_isp_parameters *params,
const struct ia_css_isp_config *config,
struct ia_css_pipe *pipe_in);
static int
sh_css_set_global_isp_config_on_pipe(
struct ia_css_pipe *curr_pipe,
const struct ia_css_isp_config *config,
struct ia_css_pipe *pipe);
static int
sh_css_set_per_frame_isp_config_on_pipe(
struct ia_css_stream *stream,
const struct ia_css_isp_config *config,
struct ia_css_pipe *pipe);
static int
sh_css_update_uds_and_crop_info_based_on_zoom_region(
const struct ia_css_binary_info *info,
const struct ia_css_frame_info *in_frame_info,
const struct ia_css_frame_info *out_frame_info,
const struct ia_css_resolution *dvs_env,
const struct ia_css_dz_config *zoom,
const struct ia_css_vector *motion_vector,
struct sh_css_uds_info *uds, /* out */
struct sh_css_crop_pos *sp_out_crop_pos, /* out */
struct ia_css_resolution pipe_in_res,
bool enable_zoom);
ia_css_ptr
sh_css_params_ddr_address_map(void)
{
return sp_ddr_ptrs;
}
/* ****************************************************
* Each coefficient is stored as 7bits to fit 2 of them into one
* ISP vector element, so we will store 4 coefficents on every
* memory word (32bits)
*
* 0: Coefficient 0 used bits
* 1: Coefficient 1 used bits
* 2: Coefficient 2 used bits
* 3: Coefficient 3 used bits
* x: not used
*
* xx33333332222222 | xx11111110000000
*
* ***************************************************
*/
static struct ia_css_host_data *
convert_allocate_fpntbl(struct ia_css_isp_parameters *params)
{
unsigned int i, j;
short *data_ptr;
struct ia_css_host_data *me;
unsigned int isp_format_data_size;
u32 *isp_format_data_ptr;
assert(params);
data_ptr = params->fpn_config.data;
isp_format_data_size = params->fpn_config.height * params->fpn_config.width *
sizeof(uint32_t);
me = ia_css_host_data_allocate(isp_format_data_size);
if (!me)
return NULL;
isp_format_data_ptr = (uint32_t *)me->address;
for (i = 0; i < params->fpn_config.height; i++) {
for (j = 0;
j < params->fpn_config.width;
j += 4, data_ptr += 4, isp_format_data_ptr++) {
int data = data_ptr[0] << 0 |
data_ptr[1] << 7 |
data_ptr[2] << 16 |
data_ptr[3] << 23;
*isp_format_data_ptr = data;
}
}
return me;
}
static int
store_fpntbl(struct ia_css_isp_parameters *params, ia_css_ptr ptr)
{
struct ia_css_host_data *isp_data;
assert(params);
assert(ptr != mmgr_NULL);
isp_data = convert_allocate_fpntbl(params);
if (!isp_data) {
IA_CSS_LEAVE_ERR_PRIVATE(-ENOMEM);
return -ENOMEM;
}
ia_css_params_store_ia_css_host_data(ptr, isp_data);
ia_css_host_data_free(isp_data);
return 0;
}
static void
convert_raw_to_fpn(struct ia_css_isp_parameters *params)
{
int maxval = 0;
unsigned int i;
assert(params);
/* Find the maximum value in the table */
for (i = 0; i < params->fpn_config.height * params->fpn_config.width; i++) {
int val = params->fpn_config.data[i];
/* Make sure FPN value can be represented in 13-bit unsigned
* number (ISP precision - 1), but note that actual input range
* depends on precision of input frame data.
*/
if (val < 0) {
/* Checkpatch patch */
val = 0;
} else if (val >= (1 << 13)) {
/* Checkpatch patch */
/* MW: BUG, is "13" a system or application property */
val = (1 << 13) - 1;
}
maxval = max(maxval, val);
}
/* Find the lowest shift value to remap the values in the range
* 0..maxval to 0..2^shiftval*63.
*/
params->fpn_config.shift = 0;
while (maxval > 63) {
/* MW: BUG, is "63" a system or application property */
maxval >>= 1;
params->fpn_config.shift++;
}
/* Adjust the values in the table for the shift value */
for (i = 0; i < params->fpn_config.height * params->fpn_config.width; i++)
((unsigned short *)params->fpn_config.data)[i] >>= params->fpn_config.shift;
}
static void
ia_css_process_kernel(struct ia_css_stream *stream,
struct ia_css_isp_parameters *params,
void (*process)(unsigned int pipe_id,
const struct ia_css_pipeline_stage *stage,
struct ia_css_isp_parameters *params))
{
int i;
for (i = 0; i < stream->num_pipes; i++) {
struct ia_css_pipe *pipe = stream->pipes[i];
struct ia_css_pipeline *pipeline = ia_css_pipe_get_pipeline(pipe);
struct ia_css_pipeline_stage *stage;
/* update the other buffers to the pipe specific copies */
for (stage = pipeline->stages; stage; stage = stage->next) {
if (!stage || !stage->binary) continue;
process(pipeline->pipe_id, stage, params);
}
}
}
static int
sh_css_select_dp_10bpp_config(const struct ia_css_pipe *pipe,
bool *is_dp_10bpp)
{
int err = 0;
/* Currently we check if 10bpp DPC configuration is required based
* on the use case,i.e. if BDS and DPC is both enabled. The more cleaner
* design choice would be to expose the type of DPC (either 10bpp or 13bpp)
* using the binary info, but the current control flow does not allow this
* implementation. (This is because the configuration is set before a
* binary is selected, and the binary info is not available)
*/
if ((!pipe) || (!is_dp_10bpp)) {
IA_CSS_LEAVE_ERR_PRIVATE(-EINVAL);
err = -EINVAL;
} else {
*is_dp_10bpp = false;
/* check if DPC is enabled from the host */
if (pipe->config.enable_dpc) {
/*check if BDS is enabled*/
unsigned int required_bds_factor = SH_CSS_BDS_FACTOR_1_00;
if ((pipe->config.bayer_ds_out_res.width != 0) &&
(pipe->config.bayer_ds_out_res.height != 0)) {
if (0 == binarydesc_calculate_bds_factor(
pipe->config.input_effective_res,
pipe->config.bayer_ds_out_res,
&required_bds_factor)) {
if (required_bds_factor != SH_CSS_BDS_FACTOR_1_00) {
/*we use 10bpp BDS configuration*/
*is_dp_10bpp = true;
}
}
}
}
}
return err;
}
int
sh_css_set_black_frame(struct ia_css_stream *stream,
const struct ia_css_frame *raw_black_frame)
{
struct ia_css_isp_parameters *params;
/* this function desperately needs to be moved to the ISP or SP such
* that it can use the DMA.
*/
unsigned int height, width, y, x, k, data;
ia_css_ptr ptr;
assert(stream);
assert(raw_black_frame);
params = stream->isp_params_configs;
height = raw_black_frame->frame_info.res.height;
width = raw_black_frame->frame_info.padded_width;
ptr = raw_black_frame->data
+ raw_black_frame->planes.raw.offset;
IA_CSS_ENTER_PRIVATE("black_frame=%p", raw_black_frame);
if (params->fpn_config.data &&
(params->fpn_config.width != width || params->fpn_config.height != height)) {
kvfree(params->fpn_config.data);
params->fpn_config.data = NULL;
}
if (!params->fpn_config.data) {
params->fpn_config.data = kvmalloc(array3_size(height, width, sizeof(short)),
GFP_KERNEL);
if (!params->fpn_config.data) {
IA_CSS_ERROR("out of memory");
IA_CSS_LEAVE_ERR_PRIVATE(-ENOMEM);
return -ENOMEM;
}
params->fpn_config.width = width;
params->fpn_config.height = height;
params->fpn_config.shift = 0;
}
/* store raw to fpntbl */
for (y = 0; y < height; y++) {
for (x = 0; x < width; x += (ISP_VEC_NELEMS * 2)) {
int ofs = y * width + x;
for (k = 0; k < ISP_VEC_NELEMS; k += 2) {
hmm_load(ptr, (void *)(&data), sizeof(int));
params->fpn_config.data[ofs + 2 * k] =
(short)(data & 0xFFFF);
params->fpn_config.data[ofs + 2 * k + 2] =
(short)((data >> 16) & 0xFFFF);
ptr += sizeof(int); /* byte system address */
}
for (k = 0; k < ISP_VEC_NELEMS; k += 2) {
hmm_load(ptr, (void *)(&data), sizeof(int));
params->fpn_config.data[ofs + 2 * k + 1] =
(short)(data & 0xFFFF);
params->fpn_config.data[ofs + 2 * k + 3] =
(short)((data >> 16) & 0xFFFF);
ptr += sizeof(int); /* byte system address */
}
}
}
/* raw -> fpn */
convert_raw_to_fpn(params);
/* overwrite isp parameter */
ia_css_process_kernel(stream, params, ia_css_kernel_process_param[IA_CSS_FPN_ID]);
IA_CSS_LEAVE_ERR_PRIVATE(0);
return 0;
}
bool
sh_css_params_set_binning_factor(struct ia_css_stream *stream,
unsigned int binning_fact)
{
struct ia_css_isp_parameters *params;
IA_CSS_ENTER_PRIVATE("void");
assert(stream);
params = stream->isp_params_configs;
if (params->sensor_binning != binning_fact) {
params->sensor_binning = binning_fact;
params->sc_table_changed = true;
}
IA_CSS_LEAVE_PRIVATE("void");
return params->sc_table_changed;
}
static void
sh_css_set_shading_table(struct ia_css_stream *stream,
struct ia_css_isp_parameters *params,
const struct ia_css_shading_table *table)
{
IA_CSS_ENTER_PRIVATE("");
if (!table)
return;
assert(stream);
if (!table->enable)
table = NULL;
if (table != params->sc_table) {
params->sc_table = table;
params->sc_table_changed = true;
/* Not very clean, this goes to sh_css.c to invalidate the
* shading table for all pipes. Should replaced by a loop
* and a pipe-specific call.
*/
if (!params->output_frame)
sh_css_invalidate_shading_tables(stream);
}
IA_CSS_LEAVE_PRIVATE("void");
}
void
ia_css_params_store_ia_css_host_data(
ia_css_ptr ddr_addr,
struct ia_css_host_data *data)
{
assert(data);
assert(data->address);
assert(ddr_addr != mmgr_NULL);
IA_CSS_ENTER_PRIVATE("");
hmm_store(ddr_addr,
(void *)(data->address),
(size_t)data->size);
IA_CSS_LEAVE_PRIVATE("void");
}
struct ia_css_host_data *
ia_css_params_alloc_convert_sctbl(
const struct ia_css_pipeline_stage *stage,
const struct ia_css_shading_table *shading_table)
{
const struct ia_css_binary *binary = stage->binary;
struct ia_css_host_data *sctbl;
unsigned int i, j, aligned_width;
unsigned int sctbl_size;
short int *ptr;
assert(binary);
assert(shading_table);
IA_CSS_ENTER_PRIVATE("");
if (!shading_table) {
IA_CSS_LEAVE_PRIVATE("void");
return NULL;
}
aligned_width = binary->sctbl_aligned_width_per_color;
sctbl_size = shading_table->height * IA_CSS_SC_NUM_COLORS * aligned_width *
sizeof(short);
sctbl = ia_css_host_data_allocate((size_t)sctbl_size);
if (!sctbl)
return NULL;
ptr = (short int *)sctbl->address;
memset(ptr,
0,
sctbl_size);
for (i = 0; i < shading_table->height; i++) {
for (j = 0; j < IA_CSS_SC_NUM_COLORS; j++) {
memcpy(ptr,
&shading_table->data[j]
[i * shading_table->width],
shading_table->width * sizeof(short));
ptr += aligned_width;
}
}
IA_CSS_LEAVE_PRIVATE("void");
return sctbl;
}
int ia_css_params_store_sctbl(
const struct ia_css_pipeline_stage *stage,
ia_css_ptr sc_tbl,
const struct ia_css_shading_table *sc_config)
{
struct ia_css_host_data *isp_sc_tbl;
IA_CSS_ENTER_PRIVATE("");
if (!sc_config) {
IA_CSS_LEAVE_PRIVATE("void");
return 0;
}
isp_sc_tbl = ia_css_params_alloc_convert_sctbl(stage, sc_config);
if (!isp_sc_tbl) {
IA_CSS_LEAVE_ERR_PRIVATE(-ENOMEM);
return -ENOMEM;
}
/* store the shading table to ddr */
ia_css_params_store_ia_css_host_data(sc_tbl, isp_sc_tbl);
ia_css_host_data_free(isp_sc_tbl);
IA_CSS_LEAVE_PRIVATE("void");
return 0;
}
static void
sh_css_enable_pipeline(const struct ia_css_binary *binary)
{
if (!binary)
return;
IA_CSS_ENTER_PRIVATE("");
ia_css_isp_param_enable_pipeline(&binary->mem_params);
IA_CSS_LEAVE_PRIVATE("void");
}
static int
ia_css_process_zoom_and_motion(
struct ia_css_isp_parameters *params,
const struct ia_css_pipeline_stage *first_stage)
{
/* first_stage can be NULL */
const struct ia_css_pipeline_stage *stage;
int err = 0;
struct ia_css_resolution pipe_in_res;
pipe_in_res.width = 0;
pipe_in_res.height = 0;
assert(params);
IA_CSS_ENTER_PRIVATE("");
/* Go through all stages to udate uds and cropping */
for (stage = first_stage; stage; stage = stage->next) {
struct ia_css_binary *binary;
/* note: the var below is made static as it is quite large;
if it is not static it ends up on the stack which could
cause issues for drivers
*/
static struct ia_css_binary tmp_binary;
const struct ia_css_binary_xinfo *info = NULL;
binary = stage->binary;
if (binary) {
info = binary->info;
} else {
const struct sh_css_binary_args *args = &stage->args;
const struct ia_css_frame_info *out_infos[IA_CSS_BINARY_MAX_OUTPUT_PORTS] = {NULL};
out_infos[0] = ia_css_frame_get_info(args->out_frame[0]);
info = &stage->firmware->info.isp;
ia_css_binary_fill_info(info, false, false,
ATOMISP_INPUT_FORMAT_RAW_10,
ia_css_frame_get_info(args->in_frame),
NULL,
out_infos,
ia_css_frame_get_info(args->out_vf_frame),
&tmp_binary,
NULL,
-1, true);
binary = &tmp_binary;
binary->info = info;
}
if (stage == first_stage) {
/* we will use pipe_in_res to scale the zoom crop region if needed */
pipe_in_res = binary->effective_in_frame_res;
}
assert(stage->stage_num < SH_CSS_MAX_STAGES);
if (params->dz_config.zoom_region.resolution.width == 0 &&
params->dz_config.zoom_region.resolution.height == 0) {
sh_css_update_uds_and_crop_info(
&info->sp,
&binary->in_frame_info,
&binary->out_frame_info[0],
&binary->dvs_envelope,
¶ms->dz_config,
¶ms->motion_config,
¶ms->uds[stage->stage_num].uds,
¶ms->uds[stage->stage_num].crop_pos,
stage->enable_zoom);
} else {
err = sh_css_update_uds_and_crop_info_based_on_zoom_region(
&info->sp,
&binary->in_frame_info,
&binary->out_frame_info[0],
&binary->dvs_envelope,
¶ms->dz_config,
¶ms->motion_config,
¶ms->uds[stage->stage_num].uds,
¶ms->uds[stage->stage_num].crop_pos,
pipe_in_res,
stage->enable_zoom);
if (err)
return err;
}
}
params->isp_params_changed = true;
IA_CSS_LEAVE_PRIVATE("void");
return err;
}
static void
sh_css_set_gamma_table(struct ia_css_isp_parameters *params,
const struct ia_css_gamma_table *table)
{
if (!table)
return;
IA_CSS_ENTER_PRIVATE("table=%p", table);
assert(params);
params->gc_table = *table;
params->config_changed[IA_CSS_GC_ID] = true;
IA_CSS_LEAVE_PRIVATE("void");
}
static void
sh_css_get_gamma_table(const struct ia_css_isp_parameters *params,
struct ia_css_gamma_table *table)
{
if (!table)
return;
IA_CSS_ENTER_PRIVATE("table=%p", table);
assert(params);
*table = params->gc_table;
IA_CSS_LEAVE_PRIVATE("void");
}
static void
sh_css_set_ctc_table(struct ia_css_isp_parameters *params,
const struct ia_css_ctc_table *table)
{
if (!table)
return;
IA_CSS_ENTER_PRIVATE("table=%p", table);
assert(params);
params->ctc_table = *table;
params->config_changed[IA_CSS_CTC_ID] = true;
IA_CSS_LEAVE_PRIVATE("void");
}
static void
sh_css_get_ctc_table(const struct ia_css_isp_parameters *params,
struct ia_css_ctc_table *table)
{
if (!table)
return;
IA_CSS_ENTER_PRIVATE("table=%p", table);
assert(params);
*table = params->ctc_table;
IA_CSS_LEAVE_PRIVATE("void");
}
static void
sh_css_set_macc_table(struct ia_css_isp_parameters *params,
const struct ia_css_macc_table *table)
{
if (!table)
return;
IA_CSS_ENTER_PRIVATE("table=%p", table);
assert(params);
params->macc_table = *table;
params->config_changed[IA_CSS_MACC_ID] = true;
IA_CSS_LEAVE_PRIVATE("void");
}
static void
sh_css_get_macc_table(const struct ia_css_isp_parameters *params,
struct ia_css_macc_table *table)
{
if (!table)
return;
IA_CSS_ENTER_PRIVATE("table=%p", table);
assert(params);
*table = params->macc_table;
IA_CSS_LEAVE_PRIVATE("void");
}
void ia_css_morph_table_free(
struct ia_css_morph_table *me)
{
unsigned int i;
if (!me)
return;
IA_CSS_ENTER("");
for (i = 0; i < IA_CSS_MORPH_TABLE_NUM_PLANES; i++) {
if (me->coordinates_x[i]) {
kvfree(me->coordinates_x[i]);
me->coordinates_x[i] = NULL;
}
if (me->coordinates_y[i]) {
kvfree(me->coordinates_y[i]);
me->coordinates_y[i] = NULL;
}
}
kvfree(me);
IA_CSS_LEAVE("void");
}
struct ia_css_morph_table *ia_css_morph_table_allocate(
unsigned int width,
unsigned int height)
{
unsigned int i;
struct ia_css_morph_table *me;
IA_CSS_ENTER("");
me = kvmalloc(sizeof(*me), GFP_KERNEL);
if (!me) {
IA_CSS_ERROR("out of memory");
return me;
}
for (i = 0; i < IA_CSS_MORPH_TABLE_NUM_PLANES; i++) {
me->coordinates_x[i] = NULL;
me->coordinates_y[i] = NULL;
}
for (i = 0; i < IA_CSS_MORPH_TABLE_NUM_PLANES; i++) {
me->coordinates_x[i] = kvmalloc(height * width *
sizeof(*me->coordinates_x[i]),
GFP_KERNEL);
me->coordinates_y[i] = kvmalloc(height * width *
sizeof(*me->coordinates_y[i]),
GFP_KERNEL);
if ((!me->coordinates_x[i]) ||
(!me->coordinates_y[i])) {
ia_css_morph_table_free(me);
me = NULL;
return me;
}
}
me->width = width;
me->height = height;
IA_CSS_LEAVE("");
return me;
}
static int sh_css_params_default_morph_table(
struct ia_css_morph_table **table,
const struct ia_css_binary *binary)
{
/* MW 2400 advanced requires different scaling */
unsigned int i, j, k, step, width, height;
short start_x[IA_CSS_MORPH_TABLE_NUM_PLANES] = { -8, 0, -8, 0, 0, -8 },
start_y[IA_CSS_MORPH_TABLE_NUM_PLANES] = { 0, 0, -8, -8, -8, 0 };
struct ia_css_morph_table *tab;
assert(table);
assert(binary);
IA_CSS_ENTER_PRIVATE("");
step = (ISP_VEC_NELEMS / 16) * 128;
width = binary->morph_tbl_width;
height = binary->morph_tbl_height;
tab = ia_css_morph_table_allocate(width, height);
if (!tab)
return -ENOMEM;
for (i = 0; i < IA_CSS_MORPH_TABLE_NUM_PLANES; i++) {
short val_y = start_y[i];
for (j = 0; j < height; j++) {
short val_x = start_x[i];
unsigned short *x_ptr, *y_ptr;
x_ptr = &tab->coordinates_x[i][j * width];
y_ptr = &tab->coordinates_y[i][j * width];
for (k = 0; k < width;
k++, x_ptr++, y_ptr++, val_x += (short)step) {
if (k == 0)
*x_ptr = 0;
else if (k == width - 1)
*x_ptr = val_x + 2 * start_x[i];
else
*x_ptr = val_x;
if (j == 0)
*y_ptr = 0;
else
*y_ptr = val_y;
}
val_y += (short)step;
}
}
*table = tab;
IA_CSS_LEAVE_ERR_PRIVATE(0);
return 0;
}
static void
sh_css_set_morph_table(struct ia_css_isp_parameters *params,
const struct ia_css_morph_table *table)
{
if (!table)
return;
IA_CSS_ENTER_PRIVATE("table=%p", table);
assert(params);
if (table->enable == false)
table = NULL;
params->morph_table = table;
params->morph_table_changed = true;
IA_CSS_LEAVE_PRIVATE("void");
}
void
ia_css_translate_3a_statistics(
struct ia_css_3a_statistics *host_stats,
const struct ia_css_isp_3a_statistics_map *isp_stats)
{
IA_CSS_ENTER("");
if (host_stats->grid.use_dmem) {
IA_CSS_LOG("3A: DMEM");
ia_css_s3a_dmem_decode(host_stats, isp_stats->dmem_stats);
} else {
IA_CSS_LOG("3A: VMEM");
ia_css_s3a_vmem_decode(host_stats, isp_stats->vmem_stats_hi,
isp_stats->vmem_stats_lo);
}
IA_CSS_LOG("3A: HMEM");
ia_css_s3a_hmem_decode(host_stats, isp_stats->hmem_stats);
IA_CSS_LEAVE("void");
}
void
ia_css_isp_3a_statistics_map_free(struct ia_css_isp_3a_statistics_map *me)
{
if (me) {
if (me->data_allocated) {
kvfree(me->data_ptr);
me->data_ptr = NULL;
me->data_allocated = false;
}
kvfree(me);
}
}
struct ia_css_isp_3a_statistics_map *
ia_css_isp_3a_statistics_map_allocate(
const struct ia_css_isp_3a_statistics *isp_stats,
void *data_ptr)
{
struct ia_css_isp_3a_statistics_map *me;
/* Windows compiler does not like adding sizes to a void *
* so we use a local char * instead. */
char *base_ptr;
me = kvmalloc(sizeof(*me), GFP_KERNEL);
if (!me) {
IA_CSS_LEAVE("cannot allocate memory");
goto err;
}
me->data_ptr = data_ptr;
me->data_allocated = !data_ptr;
if (!data_ptr) {
me->data_ptr = kvmalloc(isp_stats->size, GFP_KERNEL);
if (!me->data_ptr) {
IA_CSS_LEAVE("cannot allocate memory");
goto err;
}
}
base_ptr = me->data_ptr;
me->size = isp_stats->size;
/* GCC complains when we assign a char * to a void *, so these
* casts are necessary unfortunately. */
me->dmem_stats = (void *)base_ptr;
me->vmem_stats_hi = (void *)(base_ptr + isp_stats->dmem_size);
me->vmem_stats_lo = (void *)(base_ptr + isp_stats->dmem_size +
isp_stats->vmem_size);
me->hmem_stats = (void *)(base_ptr + isp_stats->dmem_size +
2 * isp_stats->vmem_size);
IA_CSS_LEAVE("map=%p", me);
return me;
err:
kvfree(me);
return NULL;
}
int
ia_css_get_3a_statistics(struct ia_css_3a_statistics *host_stats,
const struct ia_css_isp_3a_statistics *isp_stats)
{
struct ia_css_isp_3a_statistics_map *map;
int ret = 0;
IA_CSS_ENTER("host_stats=%p, isp_stats=%p", host_stats, isp_stats);
assert(host_stats);
assert(isp_stats);
map = ia_css_isp_3a_statistics_map_allocate(isp_stats, NULL);
if (map) {
hmm_load(isp_stats->data_ptr, map->data_ptr, isp_stats->size);
ia_css_translate_3a_statistics(host_stats, map);
ia_css_isp_3a_statistics_map_free(map);
} else {
IA_CSS_ERROR("out of memory");
ret = -ENOMEM;
}
IA_CSS_LEAVE_ERR(ret);
return ret;
}
/* Parameter encoding is not yet orthogonal.
This function hnadles some of the exceptions.
*/
static void
ia_css_set_param_exceptions(const struct ia_css_pipe *pipe,
struct ia_css_isp_parameters *params)
{
assert(params);
/* Copy also to DP. Should be done by the driver. */
params->dp_config.gr = params->wb_config.gr;
params->dp_config.r = params->wb_config.r;
params->dp_config.b = params->wb_config.b;
params->dp_config.gb = params->wb_config.gb;
}
static void
sh_css_set_nr_config(struct ia_css_isp_parameters *params,
const struct ia_css_nr_config *config)
{
if (!config)
return;
assert(params);
IA_CSS_ENTER_PRIVATE("config=%p", config);
ia_css_nr_debug_dtrace(config, IA_CSS_DEBUG_TRACE_PRIVATE);
params->nr_config = *config;
params->yee_config.nr = *config;
params->config_changed[IA_CSS_NR_ID] = true;
params->config_changed[IA_CSS_YEE_ID] = true;
params->config_changed[IA_CSS_BNR_ID] = true;
IA_CSS_LEAVE_PRIVATE("void");
}
static void
sh_css_set_ee_config(struct ia_css_isp_parameters *params,
const struct ia_css_ee_config *config)
{
if (!config)
return;
assert(params);
IA_CSS_ENTER_PRIVATE("config=%p", config);
ia_css_ee_debug_dtrace(config, IA_CSS_DEBUG_TRACE_PRIVATE);
params->ee_config = *config;
params->yee_config.ee = *config;
params->config_changed[IA_CSS_YEE_ID] = true;
IA_CSS_LEAVE_PRIVATE("void");
}
static void
sh_css_get_ee_config(const struct ia_css_isp_parameters *params,
struct ia_css_ee_config *config)
{
if (!config)
return;
IA_CSS_ENTER_PRIVATE("config=%p", config);
assert(params);
*config = params->ee_config;
ia_css_ee_debug_dtrace(config, IA_CSS_DEBUG_TRACE_PRIVATE);
IA_CSS_LEAVE_PRIVATE("void");
}
static void
sh_css_set_pipe_dvs_6axis_config(const struct ia_css_pipe *pipe,
struct ia_css_isp_parameters *params,
const struct ia_css_dvs_6axis_config *dvs_config)
{
if (!dvs_config)
return;
assert(params);
assert(pipe);
assert(dvs_config->height_y == dvs_config->height_uv);
assert((dvs_config->width_y - 1) == 2 * (dvs_config->width_uv - 1));
assert(pipe->mode < IA_CSS_PIPE_ID_NUM);
IA_CSS_ENTER_PRIVATE("dvs_config=%p", dvs_config);
copy_dvs_6axis_table(params->pipe_dvs_6axis_config[pipe->mode], dvs_config);
params->pipe_dvs_6axis_config_changed[pipe->mode] = true;
IA_CSS_LEAVE_PRIVATE("void");
}
static void
sh_css_get_pipe_dvs_6axis_config(const struct ia_css_pipe *pipe,
const struct ia_css_isp_parameters *params,
struct ia_css_dvs_6axis_config *dvs_config)
{
if (!dvs_config)
return;
assert(params);
assert(pipe);
assert(dvs_config->height_y == dvs_config->height_uv);
assert((dvs_config->width_y - 1) == 2 * dvs_config->width_uv - 1);
IA_CSS_ENTER_PRIVATE("dvs_config=%p", dvs_config);
if ((pipe->mode < IA_CSS_PIPE_ID_NUM) &&
(dvs_config->width_y == params->pipe_dvs_6axis_config[pipe->mode]->width_y) &&
(dvs_config->height_y == params->pipe_dvs_6axis_config[pipe->mode]->height_y) &&
(dvs_config->width_uv == params->pipe_dvs_6axis_config[pipe->mode]->width_uv) &&
(dvs_config->height_uv == params->pipe_dvs_6axis_config[pipe->mode]->height_uv)
&&
dvs_config->xcoords_y &&
dvs_config->ycoords_y &&
dvs_config->xcoords_uv &&
dvs_config->ycoords_uv) {
copy_dvs_6axis_table(dvs_config, params->pipe_dvs_6axis_config[pipe->mode]);
}
IA_CSS_LEAVE_PRIVATE("void");
}
static void
sh_css_set_baa_config(struct ia_css_isp_parameters *params,
const struct ia_css_aa_config *config)
{
if (!config)
return;
assert(params);
IA_CSS_ENTER_PRIVATE("config=%p", config);
params->bds_config = *config;
params->config_changed[IA_CSS_BDS_ID] = true;
IA_CSS_LEAVE_PRIVATE("void");
}
static void
sh_css_get_baa_config(const struct ia_css_isp_parameters *params,
struct ia_css_aa_config *config)
{
if (!config)
return;
assert(params);
IA_CSS_ENTER_PRIVATE("config=%p", config);
*config = params->bds_config;
IA_CSS_LEAVE_PRIVATE("void");
}
static void
sh_css_set_dz_config(struct ia_css_isp_parameters *params,
const struct ia_css_dz_config *config)
{
if (!config)
return;
assert(params);
IA_CSS_ENTER_PRIVATE("dx=%d, dy=%d", config->dx, config->dy);
assert(config->dx <= HRT_GDC_N);
assert(config->dy <= HRT_GDC_N);
params->dz_config = *config;
params->dz_config_changed = true;
/* JK: Why isp params changed?? */
params->isp_params_changed = true;
IA_CSS_LEAVE_PRIVATE("void");
}
static void
sh_css_get_dz_config(const struct ia_css_isp_parameters *params,
struct ia_css_dz_config *config)
{
if (!config)
return;
assert(params);
IA_CSS_ENTER_PRIVATE("config=%p", config);
*config = params->dz_config;
IA_CSS_LEAVE_PRIVATE("dx=%d, dy=%d", config->dx, config->dy);
}
static void
sh_css_set_motion_vector(struct ia_css_isp_parameters *params,
const struct ia_css_vector *motion)
{
if (!motion)
return;
assert(params);
IA_CSS_ENTER_PRIVATE("x=%d, y=%d", motion->x, motion->y);
params->motion_config = *motion;
/* JK: Why do isp params change? */
params->motion_config_changed = true;
params->isp_params_changed = true;
IA_CSS_LEAVE_PRIVATE("void");
}
static void
sh_css_get_motion_vector(const struct ia_css_isp_parameters *params,
struct ia_css_vector *motion)
{
if (!motion)
return;
assert(params);
IA_CSS_ENTER_PRIVATE("motion=%p", motion);
*motion = params->motion_config;
IA_CSS_LEAVE_PRIVATE("x=%d, y=%d", motion->x, motion->y);
}
struct ia_css_isp_config *
sh_css_pipe_isp_config_get(struct ia_css_pipe *pipe)
{
if (!pipe) {
IA_CSS_ERROR("pipe=%p", NULL);
return NULL;
}
return pipe->config.p_isp_config;
}
int
ia_css_stream_set_isp_config(
struct ia_css_stream *stream,
const struct ia_css_isp_config *config)
{
return ia_css_stream_set_isp_config_on_pipe(stream, config, NULL);
}
int
ia_css_stream_set_isp_config_on_pipe(
struct ia_css_stream *stream,
const struct ia_css_isp_config *config,
struct ia_css_pipe *pipe)
{
int err = 0;
if ((!stream) || (!config))
return -EINVAL;
IA_CSS_ENTER("stream=%p, config=%p, pipe=%p", stream, config, pipe);
if (config->output_frame)
err = sh_css_set_per_frame_isp_config_on_pipe(stream, config, pipe);
else
err = sh_css_set_global_isp_config_on_pipe(stream->pipes[0], config, pipe);
IA_CSS_LEAVE_ERR(err);
return err;
}
int
ia_css_pipe_set_isp_config(struct ia_css_pipe *pipe,
struct ia_css_isp_config *config)
{
struct ia_css_pipe *pipe_in = pipe;
int err = 0;
IA_CSS_ENTER("pipe=%p", pipe);
if ((!pipe) || (!pipe->stream))
return -EINVAL;
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "config=%p\n", config);
if (config->output_frame)
err = sh_css_set_per_frame_isp_config_on_pipe(pipe->stream, config, pipe);
else
err = sh_css_set_global_isp_config_on_pipe(pipe, config, pipe_in);
IA_CSS_LEAVE_ERR(err);
return err;
}
static int
sh_css_set_global_isp_config_on_pipe(
struct ia_css_pipe *curr_pipe,
const struct ia_css_isp_config *config,
struct ia_css_pipe *pipe)
{
int err = 0;
int err1 = 0;
int err2 = 0;
IA_CSS_ENTER_PRIVATE("stream=%p, config=%p, pipe=%p", curr_pipe, config, pipe);
err1 = sh_css_init_isp_params_from_config(curr_pipe, curr_pipe->stream->isp_params_configs, config, pipe);
/* Now commit all changes to the SP */
err2 = sh_css_param_update_isp_params(curr_pipe, curr_pipe->stream->isp_params_configs, sh_css_sp_is_running(), pipe);
/* The following code is intentional. The sh_css_init_isp_params_from_config interface
* throws an error when both DPC and BDS is enabled. The CSS API must pass this error
* information to the caller, ie. the host. We do not return this error immediately,
* but instead continue with updating the ISP params to enable testing of features
* which are currently in TR phase. */
err = (err1 != 0) ? err1 : ((err2 != 0) ? err2 : err);
IA_CSS_LEAVE_ERR_PRIVATE(err);
return err;
}
static int
sh_css_set_per_frame_isp_config_on_pipe(
struct ia_css_stream *stream,
const struct ia_css_isp_config *config,
struct ia_css_pipe *pipe)
{
unsigned int i;
bool per_frame_config_created = false;
int err = 0;
int err1 = 0;
int err2 = 0;
int err3 = 0;
struct sh_css_ddr_address_map *ddr_ptrs;
struct sh_css_ddr_address_map_size *ddr_ptrs_size;
struct ia_css_isp_parameters *params;
IA_CSS_ENTER_PRIVATE("stream=%p, config=%p, pipe=%p", stream, config, pipe);
if (!pipe) {
err = -EINVAL;
goto exit;
}
/* create per-frame ISP params object with default values
* from stream->isp_params_configs if one doesn't already exist
*/
if (!stream->per_frame_isp_params_configs) {
err = sh_css_create_isp_params(stream,
&stream->per_frame_isp_params_configs);
if (err)
goto exit;
per_frame_config_created = true;
}
params = stream->per_frame_isp_params_configs;
/* update new ISP params object with the new config */
if (!sh_css_init_isp_params_from_global(stream, params, false, pipe)) {
err1 = -EINVAL;
}
err2 = sh_css_init_isp_params_from_config(stream->pipes[0], params, config, pipe);
if (per_frame_config_created) {
ddr_ptrs = ¶ms->ddr_ptrs;
ddr_ptrs_size = ¶ms->ddr_ptrs_size;
/* create per pipe reference to general ddr_ptrs */
for (i = 0; i < IA_CSS_PIPE_ID_NUM; i++) {
ref_sh_css_ddr_address_map(ddr_ptrs, ¶ms->pipe_ddr_ptrs[i]);
params->pipe_ddr_ptrs_size[i] = *ddr_ptrs_size;
}
}
/* now commit to ddr */
err3 = sh_css_param_update_isp_params(stream->pipes[0], params, sh_css_sp_is_running(), pipe);
/* The following code is intentional. The sh_css_init_sp_params_from_config and
* sh_css_init_isp_params_from_config throws an error when both DPC and BDS is enabled.
* The CSS API must pass this error information to the caller, ie. the host.
* We do not return this error immediately, but instead continue with updating the ISP params
* to enable testing of features which are currently in TR phase. */
err = (err1 != 0) ? err1 :
(err2 != 0) ? err2 :
(err3 != 0) ? err3 : err;
exit:
IA_CSS_LEAVE_ERR_PRIVATE(err);
return err;
}
static int
sh_css_init_isp_params_from_config(struct ia_css_pipe *pipe,
struct ia_css_isp_parameters *params,
const struct ia_css_isp_config *config,
struct ia_css_pipe *pipe_in)
{
int err = 0;
bool is_dp_10bpp = true;
assert(pipe);
IA_CSS_ENTER_PRIVATE("pipe=%p, config=%p, params=%p", pipe, config, params);
ia_css_set_configs(params, config);
sh_css_set_nr_config(params, config->nr_config);
sh_css_set_ee_config(params, config->ee_config);
sh_css_set_baa_config(params, config->baa_config);
if ((pipe->mode < IA_CSS_PIPE_ID_NUM) &&
(params->pipe_dvs_6axis_config[pipe->mode]))
sh_css_set_pipe_dvs_6axis_config(pipe, params, config->dvs_6axis_config);
sh_css_set_dz_config(params, config->dz_config);
sh_css_set_motion_vector(params, config->motion_vector);
sh_css_set_shading_table(pipe->stream, params, config->shading_table);
sh_css_set_morph_table(params, config->morph_table);
sh_css_set_macc_table(params, config->macc_table);
sh_css_set_gamma_table(params, config->gamma_table);
sh_css_set_ctc_table(params, config->ctc_table);
/* ------ deprecated(bz675) : from ------ */
sh_css_set_shading_settings(params, config->shading_settings);
/* ------ deprecated(bz675) : to ------ */
params->dis_coef_table_changed = (config->dvs_coefs);
params->dvs2_coef_table_changed = (config->dvs2_coefs);
params->output_frame = config->output_frame;
params->isp_parameters_id = config->isp_config_id;
if (0 ==
sh_css_select_dp_10bpp_config(pipe, &is_dp_10bpp)) {
/* return an error when both DPC and BDS is enabled by the
* user. */
/* we do not exit from this point immediately to allow internal
* firmware feature testing. */
if (is_dp_10bpp) {
err = -EINVAL;
}
} else {
err = -EINVAL;
goto exit;
}
ia_css_set_param_exceptions(pipe, params);
exit:
IA_CSS_LEAVE_ERR_PRIVATE(err);
return err;
}
void
ia_css_stream_get_isp_config(
const struct ia_css_stream *stream,
struct ia_css_isp_config *config)
{
IA_CSS_ENTER("void");
ia_css_pipe_get_isp_config(stream->pipes[0], config);
IA_CSS_LEAVE("void");
}
void
ia_css_pipe_get_isp_config(struct ia_css_pipe *pipe,
struct ia_css_isp_config *config)
{
struct ia_css_isp_parameters *params = NULL;
assert(config);
IA_CSS_ENTER("config=%p", config);
params = pipe->stream->isp_params_configs;
assert(params);
ia_css_get_configs(params, config);
sh_css_get_ee_config(params, config->ee_config);
sh_css_get_baa_config(params, config->baa_config);
sh_css_get_pipe_dvs_6axis_config(pipe, params, config->dvs_6axis_config);
sh_css_get_macc_table(params, config->macc_table);
sh_css_get_gamma_table(params, config->gamma_table);
sh_css_get_ctc_table(params, config->ctc_table);
sh_css_get_dz_config(params, config->dz_config);
sh_css_get_motion_vector(params, config->motion_vector);
/* ------ deprecated(bz675) : from ------ */
sh_css_get_shading_settings(params, config->shading_settings);
/* ------ deprecated(bz675) : to ------ */
config->output_frame = params->output_frame;
config->isp_config_id = params->isp_parameters_id;
IA_CSS_LEAVE("void");
}
/*
* coding style says the return of "mmgr_NULL" is the error signal
*
* Deprecated: Implement mmgr_realloc()
*/
static bool realloc_isp_css_mm_buf(
ia_css_ptr *curr_buf,
size_t *curr_size,
size_t needed_size,
bool force,
int *err)
{
s32 id;
*err = 0;
/* Possible optimization: add a function sh_css_isp_css_mm_realloc()
* and implement on top of hmm. */
IA_CSS_ENTER_PRIVATE("void");
if (!force && *curr_size >= needed_size) {
IA_CSS_LEAVE_PRIVATE("false");
return false;
}
/* don't reallocate if single ref to buffer and same size */
if (*curr_size == needed_size && ia_css_refcount_is_single(*curr_buf)) {
IA_CSS_LEAVE_PRIVATE("false");
return false;
}
id = IA_CSS_REFCOUNT_PARAM_BUFFER;
ia_css_refcount_decrement(id, *curr_buf);
*curr_buf = ia_css_refcount_increment(id, hmm_alloc(needed_size));
if (!*curr_buf) {
*err = -ENOMEM;
*curr_size = 0;
} else {
*curr_size = needed_size;
}
IA_CSS_LEAVE_PRIVATE("true");
return true;
}
static bool reallocate_buffer(
ia_css_ptr *curr_buf,
size_t *curr_size,
size_t needed_size,
bool force,
int *err)
{
bool ret;
IA_CSS_ENTER_PRIVATE("void");
ret = realloc_isp_css_mm_buf(curr_buf,
curr_size, needed_size, force, err);
IA_CSS_LEAVE_PRIVATE("ret=%d", ret);
return ret;
}
struct ia_css_isp_3a_statistics *
ia_css_isp_3a_statistics_allocate(const struct ia_css_3a_grid_info *grid)
{
struct ia_css_isp_3a_statistics *me;
IA_CSS_ENTER("grid=%p", grid);
assert(grid);
/* MW: Does "grid->enable" also control the histogram output ?? */
if (!grid->enable)
return NULL;
me = kvcalloc(1, sizeof(*me), GFP_KERNEL);
if (!me)
goto err;
if (grid->use_dmem) {
me->dmem_size = sizeof(struct ia_css_3a_output) *
grid->aligned_width *
grid->aligned_height;
} else {
me->vmem_size = ISP_S3ATBL_HI_LO_STRIDE_BYTES *
grid->aligned_height;
}
me->hmem_size = sizeof_hmem(HMEM0_ID);
/* All subsections need to be aligned to the system bus width */
me->dmem_size = CEIL_MUL(me->dmem_size, HIVE_ISP_DDR_WORD_BYTES);
me->vmem_size = CEIL_MUL(me->vmem_size, HIVE_ISP_DDR_WORD_BYTES);
me->hmem_size = CEIL_MUL(me->hmem_size, HIVE_ISP_DDR_WORD_BYTES);
me->size = me->dmem_size + me->vmem_size * 2 + me->hmem_size;
me->data_ptr = hmm_alloc(me->size);
if (me->data_ptr == mmgr_NULL) {
kvfree(me);
me = NULL;
goto err;
}
if (me->dmem_size)
me->data.dmem.s3a_tbl = me->data_ptr;
if (me->vmem_size) {
me->data.vmem.s3a_tbl_hi = me->data_ptr + me->dmem_size;
me->data.vmem.s3a_tbl_lo = me->data_ptr + me->dmem_size + me->vmem_size;
}
if (me->hmem_size)
me->data_hmem.rgby_tbl = me->data_ptr + me->dmem_size + 2 * me->vmem_size;
err:
IA_CSS_LEAVE("return=%p", me);
return me;
}
void
ia_css_isp_3a_statistics_free(struct ia_css_isp_3a_statistics *me)
{
if (me) {
hmm_free(me->data_ptr);
kvfree(me);
}
}
struct ia_css_isp_skc_dvs_statistics *ia_css_skc_dvs_statistics_allocate(void)
{
return NULL;
}
struct ia_css_metadata *
ia_css_metadata_allocate(const struct ia_css_metadata_info *metadata_info)
{
struct ia_css_metadata *md = NULL;
IA_CSS_ENTER("");
if (metadata_info->size == 0)
return NULL;
md = kvmalloc(sizeof(*md), GFP_KERNEL);
if (!md)
goto error;
md->info = *metadata_info;
md->exp_id = 0;
md->address = hmm_alloc(metadata_info->size);
if (md->address == mmgr_NULL)
goto error;
IA_CSS_LEAVE("return=%p", md);
return md;
error:
ia_css_metadata_free(md);
IA_CSS_LEAVE("return=%p", NULL);
return NULL;
}
void
ia_css_metadata_free(struct ia_css_metadata *me)
{
if (me) {
/* The enter and leave macros are placed inside
* the condition to avoid false logging of metadata
* free events when metadata is disabled.
* We found this to be confusing during development
* and debugging. */
IA_CSS_ENTER("me=%p", me);
hmm_free(me->address);
kvfree(me);
IA_CSS_LEAVE("void");
}
}
void
ia_css_metadata_free_multiple(unsigned int num_bufs,
struct ia_css_metadata **bufs)
{
unsigned int i;
if (bufs) {
for (i = 0; i < num_bufs; i++)
ia_css_metadata_free(bufs[i]);
}
}
static unsigned int g_param_buffer_dequeue_count;
static unsigned int g_param_buffer_enqueue_count;
int
ia_css_stream_isp_parameters_init(struct ia_css_stream *stream)
{
int err = 0;
unsigned int i;
struct sh_css_ddr_address_map *ddr_ptrs;
struct sh_css_ddr_address_map_size *ddr_ptrs_size;
struct ia_css_isp_parameters *params;
assert(stream);
IA_CSS_ENTER_PRIVATE("void");
if (!stream) {
IA_CSS_LEAVE_ERR_PRIVATE(-EINVAL);
return -EINVAL;
}
/* TMP: tracking of paramsets */
g_param_buffer_dequeue_count = 0;
g_param_buffer_enqueue_count = 0;
stream->per_frame_isp_params_configs = NULL;
err = sh_css_create_isp_params(stream,
&stream->isp_params_configs);
if (err)
goto ERR;
params = stream->isp_params_configs;
if (!sh_css_init_isp_params_from_global(stream, params, true, NULL)) {
/* we do not return the error immediately to enable internal
* firmware feature testing */
err = -EINVAL;
}
ddr_ptrs = ¶ms->ddr_ptrs;
ddr_ptrs_size = ¶ms->ddr_ptrs_size;
/* create per pipe reference to general ddr_ptrs */
for (i = 0; i < IA_CSS_PIPE_ID_NUM; i++) {
ref_sh_css_ddr_address_map(ddr_ptrs, ¶ms->pipe_ddr_ptrs[i]);
params->pipe_ddr_ptrs_size[i] = *ddr_ptrs_size;
}
ERR:
IA_CSS_LEAVE_ERR_PRIVATE(err);
return err;
}
static void
ia_css_set_sdis_config(
struct ia_css_isp_parameters *params,
const struct ia_css_dvs_coefficients *dvs_coefs)
{
ia_css_set_sdis_horicoef_config(params, dvs_coefs);
ia_css_set_sdis_vertcoef_config(params, dvs_coefs);
ia_css_set_sdis_horiproj_config(params, dvs_coefs);
ia_css_set_sdis_vertproj_config(params, dvs_coefs);
}
static void
ia_css_set_sdis2_config(
struct ia_css_isp_parameters *params,
const struct ia_css_dvs2_coefficients *dvs2_coefs)
{
ia_css_set_sdis2_horicoef_config(params, dvs2_coefs);
ia_css_set_sdis2_vertcoef_config(params, dvs2_coefs);
ia_css_set_sdis2_horiproj_config(params, dvs2_coefs);
ia_css_set_sdis2_vertproj_config(params, dvs2_coefs);
}
static int
sh_css_create_isp_params(struct ia_css_stream *stream,
struct ia_css_isp_parameters **isp_params_out)
{
bool succ = true;
unsigned int i;
struct sh_css_ddr_address_map *ddr_ptrs;
struct sh_css_ddr_address_map_size *ddr_ptrs_size;
int err;
size_t params_size;
struct ia_css_isp_parameters *params =
kvmalloc(sizeof(struct ia_css_isp_parameters), GFP_KERNEL);
if (!params) {
*isp_params_out = NULL;
err = -ENOMEM;
IA_CSS_ERROR("%s:%d error: cannot allocate memory", __FILE__, __LINE__);
IA_CSS_LEAVE_ERR_PRIVATE(err);
return err;
} else {
memset(params, 0, sizeof(struct ia_css_isp_parameters));
}
ddr_ptrs = ¶ms->ddr_ptrs;
ddr_ptrs_size = ¶ms->ddr_ptrs_size;
for (i = 0; i < IA_CSS_PIPE_ID_NUM; i++) {
memset(¶ms->pipe_ddr_ptrs[i], 0,
sizeof(params->pipe_ddr_ptrs[i]));
memset(¶ms->pipe_ddr_ptrs_size[i], 0,
sizeof(params->pipe_ddr_ptrs_size[i]));
}
memset(ddr_ptrs, 0, sizeof(*ddr_ptrs));
memset(ddr_ptrs_size, 0, sizeof(*ddr_ptrs_size));
params_size = sizeof(params->uds);
ddr_ptrs_size->isp_param = params_size;
ddr_ptrs->isp_param =
ia_css_refcount_increment(IA_CSS_REFCOUNT_PARAM_BUFFER,
hmm_alloc(params_size));
succ &= (ddr_ptrs->isp_param != mmgr_NULL);
ddr_ptrs_size->macc_tbl = sizeof(struct ia_css_macc_table);
ddr_ptrs->macc_tbl =
ia_css_refcount_increment(IA_CSS_REFCOUNT_PARAM_BUFFER,
hmm_alloc(sizeof(struct ia_css_macc_table)));
succ &= (ddr_ptrs->macc_tbl != mmgr_NULL);
*isp_params_out = params;
if (!succ)
return -ENOMEM;
return 0;
}
static bool
sh_css_init_isp_params_from_global(struct ia_css_stream *stream,
struct ia_css_isp_parameters *params,
bool use_default_config,
struct ia_css_pipe *pipe_in)
{
bool retval = true;
int i = 0;
bool is_dp_10bpp = true;
unsigned int isp_pipe_version = ia_css_pipe_get_isp_pipe_version(
stream->pipes[0]);
struct ia_css_isp_parameters *stream_params = stream->isp_params_configs;
if (!use_default_config && !stream_params) {
retval = false;
goto exit;
}
params->output_frame = NULL;
params->isp_parameters_id = 0;
if (use_default_config) {
ia_css_set_xnr3_config(params, &default_xnr3_config);
sh_css_set_nr_config(params, &default_nr_config);
sh_css_set_ee_config(params, &default_ee_config);
if (isp_pipe_version == SH_CSS_ISP_PIPE_VERSION_1)
sh_css_set_macc_table(params, &default_macc_table);
else if (isp_pipe_version == SH_CSS_ISP_PIPE_VERSION_2_2)
sh_css_set_macc_table(params, &default_macc2_table);
sh_css_set_gamma_table(params, &default_gamma_table);
sh_css_set_ctc_table(params, &default_ctc_table);
sh_css_set_baa_config(params, &default_baa_config);
sh_css_set_dz_config(params, &default_dz_config);
/* ------ deprecated(bz675) : from ------ */
sh_css_set_shading_settings(params, &default_shading_settings);
/* ------ deprecated(bz675) : to ------ */
ia_css_set_s3a_config(params, &default_3a_config);
ia_css_set_wb_config(params, &default_wb_config);
ia_css_set_csc_config(params, &default_cc_config);
ia_css_set_tnr_config(params, &default_tnr_config);
ia_css_set_ob_config(params, &default_ob_config);
ia_css_set_dp_config(params, &default_dp_config);
ia_css_set_param_exceptions(pipe_in, params);
ia_css_set_de_config(params, &default_de_config);
ia_css_set_gc_config(params, &default_gc_config);
ia_css_set_anr_config(params, &default_anr_config);
ia_css_set_anr2_config(params, &default_anr_thres);
ia_css_set_ce_config(params, &default_ce_config);
ia_css_set_xnr_table_config(params, &default_xnr_table);
ia_css_set_ecd_config(params, &default_ecd_config);
ia_css_set_ynr_config(params, &default_ynr_config);
ia_css_set_fc_config(params, &default_fc_config);
ia_css_set_cnr_config(params, &default_cnr_config);
ia_css_set_macc_config(params, &default_macc_config);
ia_css_set_ctc_config(params, &default_ctc_config);
ia_css_set_aa_config(params, &default_aa_config);
ia_css_set_r_gamma_config(params, &default_r_gamma_table);
ia_css_set_g_gamma_config(params, &default_g_gamma_table);
ia_css_set_b_gamma_config(params, &default_b_gamma_table);
ia_css_set_yuv2rgb_config(params, &default_yuv2rgb_cc_config);
ia_css_set_rgb2yuv_config(params, &default_rgb2yuv_cc_config);
ia_css_set_xnr_config(params, &default_xnr_config);
ia_css_set_sdis_config(params, &default_sdis_config);
ia_css_set_sdis2_config(params, &default_sdis2_config);
ia_css_set_formats_config(params, &default_formats_config);
params->fpn_config.data = NULL;
params->config_changed[IA_CSS_FPN_ID] = true;
params->fpn_config.enabled = 0;
params->motion_config = default_motion_config;
params->motion_config_changed = true;
params->morph_table = NULL;
params->morph_table_changed = true;
params->sc_table = NULL;
params->sc_table_changed = true;
ia_css_sdis2_clear_coefficients(¶ms->dvs2_coefs);
params->dvs2_coef_table_changed = true;
ia_css_sdis_clear_coefficients(¶ms->dvs_coefs);
params->dis_coef_table_changed = true;
} else {
ia_css_set_xnr3_config(params, &stream_params->xnr3_config);
sh_css_set_nr_config(params, &stream_params->nr_config);
sh_css_set_ee_config(params, &stream_params->ee_config);
if (isp_pipe_version == SH_CSS_ISP_PIPE_VERSION_1)
sh_css_set_macc_table(params, &stream_params->macc_table);
else if (isp_pipe_version == SH_CSS_ISP_PIPE_VERSION_2_2)
sh_css_set_macc_table(params, &stream_params->macc_table);
sh_css_set_gamma_table(params, &stream_params->gc_table);
sh_css_set_ctc_table(params, &stream_params->ctc_table);
sh_css_set_baa_config(params, &stream_params->bds_config);
sh_css_set_dz_config(params, &stream_params->dz_config);
/* ------ deprecated(bz675) : from ------ */
sh_css_set_shading_settings(params, &stream_params->shading_settings);
/* ------ deprecated(bz675) : to ------ */
ia_css_set_s3a_config(params, &stream_params->s3a_config);
ia_css_set_wb_config(params, &stream_params->wb_config);
ia_css_set_csc_config(params, &stream_params->cc_config);
ia_css_set_tnr_config(params, &stream_params->tnr_config);
ia_css_set_ob_config(params, &stream_params->ob_config);
ia_css_set_dp_config(params, &stream_params->dp_config);
ia_css_set_de_config(params, &stream_params->de_config);
ia_css_set_gc_config(params, &stream_params->gc_config);
ia_css_set_anr_config(params, &stream_params->anr_config);
ia_css_set_anr2_config(params, &stream_params->anr_thres);
ia_css_set_ce_config(params, &stream_params->ce_config);
ia_css_set_xnr_table_config(params, &stream_params->xnr_table);
ia_css_set_ecd_config(params, &stream_params->ecd_config);
ia_css_set_ynr_config(params, &stream_params->ynr_config);
ia_css_set_fc_config(params, &stream_params->fc_config);
ia_css_set_cnr_config(params, &stream_params->cnr_config);
ia_css_set_macc_config(params, &stream_params->macc_config);
ia_css_set_ctc_config(params, &stream_params->ctc_config);
ia_css_set_aa_config(params, &stream_params->aa_config);
ia_css_set_r_gamma_config(params, &stream_params->r_gamma_table);
ia_css_set_g_gamma_config(params, &stream_params->g_gamma_table);
ia_css_set_b_gamma_config(params, &stream_params->b_gamma_table);
ia_css_set_yuv2rgb_config(params, &stream_params->yuv2rgb_cc_config);
ia_css_set_rgb2yuv_config(params, &stream_params->rgb2yuv_cc_config);
ia_css_set_xnr_config(params, &stream_params->xnr_config);
ia_css_set_formats_config(params, &stream_params->formats_config);
for (i = 0; i < stream->num_pipes; i++) {
if (0 ==
sh_css_select_dp_10bpp_config(stream->pipes[i], &is_dp_10bpp)) {
/* set the return value as false if both DPC and
* BDS is enabled by the user. But we do not return
* the value immediately to enable internal firmware
* feature testing. */
retval = !is_dp_10bpp;
/* FIXME: should it ignore this error? */
} else {
retval = false;
goto exit;
}
}
ia_css_set_param_exceptions(pipe_in, params);
params->fpn_config.data = stream_params->fpn_config.data;
params->config_changed[IA_CSS_FPN_ID] =
stream_params->config_changed[IA_CSS_FPN_ID];
params->fpn_config.enabled = stream_params->fpn_config.enabled;
sh_css_set_motion_vector(params, &stream_params->motion_config);
sh_css_set_morph_table(params, stream_params->morph_table);
if (stream_params->sc_table) {
sh_css_set_shading_table(stream, params, stream_params->sc_table);
} else {
params->sc_table = NULL;
params->sc_table_changed = true;
}
/* Only IA_CSS_PIPE_ID_VIDEO & IA_CSS_PIPE_ID_CAPTURE will support dvs_6axis_config*/
for (i = 0; i < IA_CSS_PIPE_ID_NUM; i++) {
if (stream_params->pipe_dvs_6axis_config[i]) {
if (params->pipe_dvs_6axis_config[i]) {
copy_dvs_6axis_table(params->pipe_dvs_6axis_config[i],
stream_params->pipe_dvs_6axis_config[i]);
} else {
params->pipe_dvs_6axis_config[i] =
generate_dvs_6axis_table_from_config(stream_params->pipe_dvs_6axis_config[i]);
}
}
}
ia_css_set_sdis_config(params, &stream_params->dvs_coefs);
params->dis_coef_table_changed = stream_params->dis_coef_table_changed;
ia_css_set_sdis2_config(params, &stream_params->dvs2_coefs);
params->dvs2_coef_table_changed = stream_params->dvs2_coef_table_changed;
params->sensor_binning = stream_params->sensor_binning;
}
exit:
return retval;
}
int
sh_css_params_init(void)
{
int i, p;
IA_CSS_ENTER_PRIVATE("void");
/* TMP: tracking of paramsets */
g_param_buffer_dequeue_count = 0;
g_param_buffer_enqueue_count = 0;
for (p = 0; p < IA_CSS_PIPE_ID_NUM; p++) {
for (i = 0; i < SH_CSS_MAX_STAGES; i++) {
xmem_sp_stage_ptrs[p][i] =
ia_css_refcount_increment(-1,
hmm_alloc(sizeof(struct sh_css_sp_stage)));
xmem_isp_stage_ptrs[p][i] =
ia_css_refcount_increment(-1,
hmm_alloc(sizeof(struct sh_css_sp_stage)));
if ((xmem_sp_stage_ptrs[p][i] == mmgr_NULL) ||
(xmem_isp_stage_ptrs[p][i] == mmgr_NULL)) {
sh_css_params_uninit();
IA_CSS_LEAVE_ERR_PRIVATE(-ENOMEM);
return -ENOMEM;
}
hmm_set(xmem_sp_stage_ptrs[p][i], 0, sizeof(struct sh_css_sp_stage));
hmm_set(xmem_isp_stage_ptrs[p][i], 0, sizeof(struct sh_css_sp_stage));
}
}
ia_css_config_gamma_table();
ia_css_config_ctc_table();
ia_css_config_rgb_gamma_tables();
ia_css_config_xnr_table();
sp_ddr_ptrs = ia_css_refcount_increment(-1,
hmm_alloc(CEIL_MUL(sizeof(struct sh_css_ddr_address_map),
HIVE_ISP_DDR_WORD_BYTES)));
xmem_sp_group_ptrs = ia_css_refcount_increment(-1,
hmm_alloc(sizeof(struct sh_css_sp_group)));
if ((sp_ddr_ptrs == mmgr_NULL) ||
(xmem_sp_group_ptrs == mmgr_NULL)) {
ia_css_uninit();
IA_CSS_LEAVE_ERR_PRIVATE(-ENOMEM);
return -ENOMEM;
}
hmm_set(sp_ddr_ptrs, 0, CEIL_MUL(sizeof(struct sh_css_ddr_address_map),
HIVE_ISP_DDR_WORD_BYTES));
hmm_set(xmem_sp_group_ptrs, 0, sizeof(struct sh_css_sp_group));
IA_CSS_LEAVE_ERR_PRIVATE(0);
return 0;
}
static void host_lut_store(const void *lut)
{
unsigned int i;
for (i = 0; i < N_GDC_ID; i++)
gdc_lut_store((gdc_ID_t)i, (const int (*)[HRT_GDC_N]) lut);
}
int ia_css_pipe_set_bci_scaler_lut(struct ia_css_pipe *pipe,
const void *lut)
{
int err = 0;
bool stream_started = false;
IA_CSS_ENTER("pipe=%p lut=%p", pipe, lut);
if (!lut || !pipe) {
err = -EINVAL;
IA_CSS_LEAVE("err=%d", err);
return err;
}
/* If the pipe belongs to a stream and the stream has started, it is not
* safe to store lut to gdc HW. If pipe->stream is NULL, then no stream is
* created with this pipe, so it is safe to do this operation as long as
* ia_css_init() has been called. */
if (pipe->stream && pipe->stream->started) {
ia_css_debug_dtrace(IA_CSS_DEBUG_ERROR,
"unable to set scaler lut since stream has started\n");
stream_started = true;
err = -ENOTSUPP;
}
/* Free any existing tables. */
if (pipe->scaler_pp_lut != mmgr_NULL) {
hmm_free(pipe->scaler_pp_lut);
pipe->scaler_pp_lut = mmgr_NULL;
}
if (!stream_started) {
pipe->scaler_pp_lut = hmm_alloc(sizeof(zoom_table));
if (pipe->scaler_pp_lut == mmgr_NULL) {
ia_css_debug_dtrace(IA_CSS_DEBUG_ERROR,
"unable to allocate scaler_pp_lut\n");
err = -ENOMEM;
} else {
gdc_lut_convert_to_isp_format((const int(*)[HRT_GDC_N])lut,
interleaved_lut_temp);
hmm_store(pipe->scaler_pp_lut,
(int *)interleaved_lut_temp,
sizeof(zoom_table));
}
}
IA_CSS_LEAVE("lut(%u) err=%d", pipe->scaler_pp_lut, err);
return err;
}
/* if pipe is NULL, returns default lut addr. */
ia_css_ptr sh_css_pipe_get_pp_gdc_lut(const struct ia_css_pipe *pipe)
{
assert(pipe);
if (pipe->scaler_pp_lut != mmgr_NULL)
return pipe->scaler_pp_lut;
else
return sh_css_params_get_default_gdc_lut();
}
int sh_css_params_map_and_store_default_gdc_lut(void)
{
int err = 0;
IA_CSS_ENTER_PRIVATE("void");
/* Is table already mapped? Nothing to do if it is mapped. */
if (default_gdc_lut != mmgr_NULL)
return err;
host_lut_store((void *)zoom_table);
default_gdc_lut = hmm_alloc(sizeof(zoom_table));
if (default_gdc_lut == mmgr_NULL)
return -ENOMEM;
gdc_lut_convert_to_isp_format((const int(*)[HRT_GDC_N])zoom_table,
interleaved_lut_temp);
hmm_store(default_gdc_lut, (int *)interleaved_lut_temp,
sizeof(zoom_table));
IA_CSS_LEAVE_PRIVATE("lut(%u) err=%d", default_gdc_lut, err);
return err;
}
void sh_css_params_free_default_gdc_lut(void)
{
IA_CSS_ENTER_PRIVATE("void");
if (default_gdc_lut != mmgr_NULL) {
hmm_free(default_gdc_lut);
default_gdc_lut = mmgr_NULL;
}
IA_CSS_LEAVE_PRIVATE("void");
}
ia_css_ptr sh_css_params_get_default_gdc_lut(void)
{
return default_gdc_lut;
}
static void free_param_set_callback(
ia_css_ptr ptr)
{
IA_CSS_ENTER_PRIVATE("void");
free_ia_css_isp_parameter_set_info(ptr);
IA_CSS_LEAVE_PRIVATE("void");
}
static void free_buffer_callback(
ia_css_ptr ptr)
{
IA_CSS_ENTER_PRIVATE("void");
hmm_free(ptr);
IA_CSS_LEAVE_PRIVATE("void");
}
void
sh_css_param_clear_param_sets(void)
{
IA_CSS_ENTER_PRIVATE("void");
ia_css_refcount_clear(IA_CSS_REFCOUNT_PARAM_SET_POOL, &free_param_set_callback);
IA_CSS_LEAVE_PRIVATE("void");
}
/*
* MW: we can define hmm_free() to return a NULL
* then you can write ptr = hmm_free(ptr);
*/
#define safe_free(id, x) \
do { \
ia_css_refcount_decrement(id, x); \
(x) = mmgr_NULL; \
} while (0)
static void free_map(struct sh_css_ddr_address_map *map)
{
unsigned int i;
ia_css_ptr *addrs = (ia_css_ptr *)map;
IA_CSS_ENTER_PRIVATE("void");
/* free buffers */
for (i = 0; i < (sizeof(struct sh_css_ddr_address_map_size) /
sizeof(size_t)); i++) {
if (addrs[i] == mmgr_NULL)
continue;
safe_free(IA_CSS_REFCOUNT_PARAM_BUFFER, addrs[i]);
}
IA_CSS_LEAVE_PRIVATE("void");
}
void
ia_css_stream_isp_parameters_uninit(struct ia_css_stream *stream)
{
int i;
struct ia_css_isp_parameters *params = stream->isp_params_configs;
struct ia_css_isp_parameters *per_frame_params =
stream->per_frame_isp_params_configs;
IA_CSS_ENTER_PRIVATE("void");
if (!params) {
IA_CSS_LEAVE_PRIVATE("isp_param_configs is NULL");
return;
}
/* free existing ddr_ptr maps */
for (i = 0; i < IA_CSS_PIPE_ID_NUM; i++) {
free_map(¶ms->pipe_ddr_ptrs[i]);
if (per_frame_params)
free_map(&per_frame_params->pipe_ddr_ptrs[i]);
/* Free up theDVS table memory blocks before recomputing new table */
if (params->pipe_dvs_6axis_config[i])
free_dvs_6axis_table(¶ms->pipe_dvs_6axis_config[i]);
if (per_frame_params && per_frame_params->pipe_dvs_6axis_config[i])
free_dvs_6axis_table(&per_frame_params->pipe_dvs_6axis_config[i]);
}
free_map(¶ms->ddr_ptrs);
if (per_frame_params)
free_map(&per_frame_params->ddr_ptrs);
if (params->fpn_config.data) {
kvfree(params->fpn_config.data);
params->fpn_config.data = NULL;
}
/* Free up sc_config (temporal shading table) if it is allocated. */
if (params->sc_config) {
ia_css_shading_table_free(params->sc_config);
params->sc_config = NULL;
}
if (per_frame_params) {
if (per_frame_params->sc_config) {
ia_css_shading_table_free(per_frame_params->sc_config);
per_frame_params->sc_config = NULL;
}
}
kvfree(params);
kvfree(per_frame_params);
stream->isp_params_configs = NULL;
stream->per_frame_isp_params_configs = NULL;
IA_CSS_LEAVE_PRIVATE("void");
}
void
sh_css_params_uninit(void)
{
unsigned int p, i;
IA_CSS_ENTER_PRIVATE("void");
ia_css_refcount_decrement(-1, sp_ddr_ptrs);
sp_ddr_ptrs = mmgr_NULL;
ia_css_refcount_decrement(-1, xmem_sp_group_ptrs);
xmem_sp_group_ptrs = mmgr_NULL;
for (p = 0; p < IA_CSS_PIPE_ID_NUM; p++)
for (i = 0; i < SH_CSS_MAX_STAGES; i++) {
ia_css_refcount_decrement(-1, xmem_sp_stage_ptrs[p][i]);
xmem_sp_stage_ptrs[p][i] = mmgr_NULL;
ia_css_refcount_decrement(-1, xmem_isp_stage_ptrs[p][i]);
xmem_isp_stage_ptrs[p][i] = mmgr_NULL;
}
/* go through the pools to clear references */
ia_css_refcount_clear(IA_CSS_REFCOUNT_PARAM_SET_POOL, &free_param_set_callback);
ia_css_refcount_clear(IA_CSS_REFCOUNT_PARAM_BUFFER, &free_buffer_callback);
ia_css_refcount_clear(-1, &free_buffer_callback);
IA_CSS_LEAVE_PRIVATE("void");
}
static struct ia_css_host_data *
convert_allocate_morph_plane(
unsigned short *data,
unsigned int width,
unsigned int height,
unsigned int aligned_width)
{
unsigned int i, j, padding, w;
struct ia_css_host_data *me;
unsigned int isp_data_size;
u16 *isp_data_ptr;
IA_CSS_ENTER_PRIVATE("void");
/* currently we don't have morph table interpolation yet,
* so we allow a wider table to be used. This will be removed
* in the future. */
if (width > aligned_width) {
padding = 0;
w = aligned_width;
} else {
padding = aligned_width - width;
w = width;
}
isp_data_size = height * (w + padding) * sizeof(uint16_t);
me = ia_css_host_data_allocate((size_t)isp_data_size);
if (!me) {
IA_CSS_LEAVE_ERR_PRIVATE(-ENOMEM);
return NULL;
}
isp_data_ptr = (uint16_t *)me->address;
memset(isp_data_ptr, 0, (size_t)isp_data_size);
for (i = 0; i < height; i++) {
for (j = 0; j < w; j++)
*isp_data_ptr++ = (uint16_t)data[j];
isp_data_ptr += padding;
data += width;
}
IA_CSS_LEAVE_PRIVATE("void");
return me;
}
static int
store_morph_plane(
unsigned short *data,
unsigned int width,
unsigned int height,
ia_css_ptr dest,
unsigned int aligned_width)
{
struct ia_css_host_data *isp_data;
assert(dest != mmgr_NULL);
isp_data = convert_allocate_morph_plane(data, width, height, aligned_width);
if (!isp_data) {
IA_CSS_LEAVE_ERR_PRIVATE(-ENOMEM);
return -ENOMEM;
}
ia_css_params_store_ia_css_host_data(dest, isp_data);
ia_css_host_data_free(isp_data);
return 0;
}
static void sh_css_update_isp_params_to_ddr(
struct ia_css_isp_parameters *params,
ia_css_ptr ddr_ptr)
{
size_t size = sizeof(params->uds);
IA_CSS_ENTER_PRIVATE("void");
assert(params);
hmm_store(ddr_ptr, ¶ms->uds, size);
IA_CSS_LEAVE_PRIVATE("void");
}
static void sh_css_update_isp_mem_params_to_ddr(
const struct ia_css_binary *binary,
ia_css_ptr ddr_mem_ptr,
size_t size,
enum ia_css_isp_memories mem)
{
const struct ia_css_host_data *params;
IA_CSS_ENTER_PRIVATE("void");
params = ia_css_isp_param_get_mem_init(&binary->mem_params,
IA_CSS_PARAM_CLASS_PARAM, mem);
hmm_store(ddr_mem_ptr, params->address, size);
IA_CSS_LEAVE_PRIVATE("void");
}
void ia_css_dequeue_param_buffers(/*unsigned int pipe_num*/ void)
{
unsigned int i;
ia_css_ptr cpy;
enum sh_css_queue_id param_queue_ids[3] = { IA_CSS_PARAMETER_SET_QUEUE_ID,
IA_CSS_PER_FRAME_PARAMETER_SET_QUEUE_ID,
SH_CSS_INVALID_QUEUE_ID
};
IA_CSS_ENTER_PRIVATE("void");
if (!sh_css_sp_is_running()) {
IA_CSS_LEAVE_PRIVATE("sp is not running");
/* SP is not running. The queues are not valid */
return;
}
for (i = 0; SH_CSS_INVALID_QUEUE_ID != param_queue_ids[i]; i++) {
cpy = (ia_css_ptr)0;
/* clean-up old copy */
while (ia_css_bufq_dequeue_buffer(param_queue_ids[i],
(uint32_t *)&cpy) == 0) {
/* TMP: keep track of dequeued param set count
*/
g_param_buffer_dequeue_count++;
ia_css_bufq_enqueue_psys_event(
IA_CSS_PSYS_SW_EVENT_BUFFER_DEQUEUED,
0,
param_queue_ids[i],
0);
IA_CSS_LOG("dequeued param set %x from %d, release ref", cpy, 0);
free_ia_css_isp_parameter_set_info(cpy);
cpy = (ia_css_ptr)0;
}
}
IA_CSS_LEAVE_PRIVATE("void");
}
static void
process_kernel_parameters(unsigned int pipe_id,
struct ia_css_pipeline_stage *stage,
struct ia_css_isp_parameters *params,
unsigned int isp_pipe_version,
unsigned int raw_bit_depth)
{
unsigned int param_id;
(void)isp_pipe_version;
(void)raw_bit_depth;
sh_css_enable_pipeline(stage->binary);
if (params->config_changed[IA_CSS_OB_ID]) {
ia_css_ob_configure(¶ms->stream_configs.ob,
isp_pipe_version, raw_bit_depth);
}
if (params->config_changed[IA_CSS_S3A_ID]) {
ia_css_s3a_configure(raw_bit_depth);
}
/* Copy stage uds parameters to config, since they can differ per stage.
*/
params->crop_config.crop_pos = params->uds[stage->stage_num].crop_pos;
params->uds_config.crop_pos = params->uds[stage->stage_num].crop_pos;
params->uds_config.uds = params->uds[stage->stage_num].uds;
/* Call parameter process functions for all kernels */
/* Skip SC, since that is called on a temp sc table */
for (param_id = 0; param_id < IA_CSS_NUM_PARAMETER_IDS; param_id++) {
if (param_id == IA_CSS_SC_ID) continue;
if (params->config_changed[param_id])
ia_css_kernel_process_param[param_id](pipe_id, stage, params);
}
}
int
sh_css_param_update_isp_params(struct ia_css_pipe *curr_pipe,
struct ia_css_isp_parameters *params,
bool commit,
struct ia_css_pipe *pipe_in)
{
int err = 0;
ia_css_ptr cpy;
int i;
unsigned int raw_bit_depth = 10;
unsigned int isp_pipe_version = SH_CSS_ISP_PIPE_VERSION_1;
bool acc_cluster_params_changed = false;
unsigned int thread_id, pipe_num;
(void)acc_cluster_params_changed;
assert(curr_pipe);
IA_CSS_ENTER_PRIVATE("pipe=%p, isp_parameters_id=%d", pipe_in, params->isp_parameters_id);
raw_bit_depth = ia_css_stream_input_format_bits_per_pixel(curr_pipe->stream);
/* now make the map available to the sp */
if (!commit) {
IA_CSS_LEAVE_ERR_PRIVATE(err);
return err;
}
/* enqueue a copies of the mem_map to
the designated pipelines */
for (i = 0; i < curr_pipe->stream->num_pipes; i++) {
struct ia_css_pipe *pipe;
struct sh_css_ddr_address_map *cur_map;
struct sh_css_ddr_address_map_size *cur_map_size;
struct ia_css_isp_parameter_set_info isp_params_info;
struct ia_css_pipeline *pipeline;
struct ia_css_pipeline_stage *stage;
enum sh_css_queue_id queue_id;
pipe = curr_pipe->stream->pipes[i];
pipeline = ia_css_pipe_get_pipeline(pipe);
pipe_num = ia_css_pipe_get_pipe_num(pipe);
isp_pipe_version = ia_css_pipe_get_isp_pipe_version(pipe);
ia_css_pipeline_get_sp_thread_id(pipe_num, &thread_id);
ia_css_query_internal_queue_id(params->output_frame
? IA_CSS_BUFFER_TYPE_PER_FRAME_PARAMETER_SET
: IA_CSS_BUFFER_TYPE_PARAMETER_SET,
thread_id, &queue_id);
if (!sh_css_sp_is_running()) {
/* SP is not running. The queues are not valid */
err = -EBUSY;
break;
}
cur_map = ¶ms->pipe_ddr_ptrs[pipeline->pipe_id];
cur_map_size = ¶ms->pipe_ddr_ptrs_size[pipeline->pipe_id];
/* TODO: Normally, zoom and motion parameters shouldn't
* be part of "isp_params" as it is resolution/pipe dependent
* Therefore, move the zoom config elsewhere (e.g. shading
* table can be taken as an example! @GC
* */
{
/* we have to do this per pipeline because */
/* the processing is a.o. resolution dependent */
err = ia_css_process_zoom_and_motion(params,
pipeline->stages);
if (err)
return err;
}
/* check if to actually update the parameters for this pipe */
/* When API change is implemented making good distinction between
* stream config and pipe config this skipping code can be moved out of the #ifdef */
if (pipe_in && (pipe != pipe_in)) {
IA_CSS_LOG("skipping pipe %p", pipe);
continue;
}
/* BZ 125915, should be moved till after "update other buff" */
/* update the other buffers to the pipe specific copies */
for (stage = pipeline->stages; stage; stage = stage->next) {
unsigned int mem;
if (!stage || !stage->binary)
continue;
process_kernel_parameters(pipeline->pipe_id,
stage, params,
isp_pipe_version, raw_bit_depth);
err = sh_css_params_write_to_ddr_internal(
pipe,
pipeline->pipe_id,
params,
stage,
cur_map,
cur_map_size);
if (err)
break;
for (mem = 0; mem < IA_CSS_NUM_MEMORIES; mem++) {
params->isp_mem_params_changed
[pipeline->pipe_id][stage->stage_num][mem] = false;
}
} /* for */
if (err)
break;
/* update isp_params to pipe specific copies */
if (params->isp_params_changed) {
reallocate_buffer(&cur_map->isp_param,
&cur_map_size->isp_param,
cur_map_size->isp_param,
true,
&err);
if (err)
break;
sh_css_update_isp_params_to_ddr(params, cur_map->isp_param);
}
/* last make referenced copy */
err = ref_sh_css_ddr_address_map(
cur_map,
&isp_params_info.mem_map);
if (err)
break;
/* Update Parameters ID */
isp_params_info.isp_parameters_id = params->isp_parameters_id;
/* Update output frame pointer */
isp_params_info.output_frame_ptr =
(params->output_frame) ? params->output_frame->data : mmgr_NULL;
/* now write the copy to ddr */
err = write_ia_css_isp_parameter_set_info_to_ddr(&isp_params_info, &cpy);
if (err)
break;
/* enqueue the set to sp */
IA_CSS_LOG("queue param set %x to %d", cpy, thread_id);
err = ia_css_bufq_enqueue_buffer(thread_id, queue_id, (uint32_t)cpy);
if (err) {
free_ia_css_isp_parameter_set_info(cpy);
IA_CSS_LOG("pfp: FAILED to add config id %d for OF %d to q %d on thread %d",
isp_params_info.isp_parameters_id,
isp_params_info.output_frame_ptr,
queue_id, thread_id);
break;
} else {
/* TMP: check discrepancy between nr of enqueued
* parameter sets and dequeued sets
*/
g_param_buffer_enqueue_count++;
assert(g_param_buffer_enqueue_count < g_param_buffer_dequeue_count + 50);
/*
* Tell the SP which queues are not empty,
* by sending the software event.
*/
if (!sh_css_sp_is_running()) {
/* SP is not running. The queues are not valid */
IA_CSS_LEAVE_ERR_PRIVATE(-EBUSY);
return -EBUSY;
}
ia_css_bufq_enqueue_psys_event(
IA_CSS_PSYS_SW_EVENT_BUFFER_ENQUEUED,
(uint8_t)thread_id,
(uint8_t)queue_id,
0);
IA_CSS_LOG("pfp: added config id %d for OF %d to q %d on thread %d",
isp_params_info.isp_parameters_id,
isp_params_info.output_frame_ptr,
queue_id, thread_id);
}
/* clean-up old copy */
ia_css_dequeue_param_buffers(/*pipe_num*/);
params->pipe_dvs_6axis_config_changed[pipeline->pipe_id] = false;
} /* end for each 'active' pipeline */
/* clear the changed flags after all params
for all pipelines have been updated */
params->isp_params_changed = false;
params->sc_table_changed = false;
params->dis_coef_table_changed = false;
params->dvs2_coef_table_changed = false;
params->morph_table_changed = false;
params->dz_config_changed = false;
params->motion_config_changed = false;
/* ------ deprecated(bz675) : from ------ */
params->shading_settings_changed = false;
/* ------ deprecated(bz675) : to ------ */
memset(¶ms->config_changed[0], 0, sizeof(params->config_changed));
IA_CSS_LEAVE_ERR_PRIVATE(err);
return err;
}
static int
sh_css_params_write_to_ddr_internal(
struct ia_css_pipe *pipe,
unsigned int pipe_id,
struct ia_css_isp_parameters *params,
const struct ia_css_pipeline_stage *stage,
struct sh_css_ddr_address_map *ddr_map,
struct sh_css_ddr_address_map_size *ddr_map_size)
{
int err;
const struct ia_css_binary *binary;
unsigned int stage_num;
unsigned int mem;
bool buff_realloced;
/* struct is > 128 bytes so it should not be on stack (see checkpatch) */
static struct ia_css_macc_table converted_macc_table;
IA_CSS_ENTER_PRIVATE("void");
assert(params);
assert(ddr_map);
assert(ddr_map_size);
assert(stage);
binary = stage->binary;
assert(binary);
stage_num = stage->stage_num;
if (binary->info->sp.enable.fpnr) {
buff_realloced = reallocate_buffer(&ddr_map->fpn_tbl,
&ddr_map_size->fpn_tbl,
fpntbl_bytes(binary),
params->config_changed[IA_CSS_FPN_ID],
&err);
if (err) {
IA_CSS_LEAVE_ERR_PRIVATE(err);
return err;
}
if (params->config_changed[IA_CSS_FPN_ID] || buff_realloced) {
if (params->fpn_config.enabled) {
err = store_fpntbl(params, ddr_map->fpn_tbl);
if (err) {
IA_CSS_LEAVE_ERR_PRIVATE(err);
return err;
}
}
}
}
if (binary->info->sp.enable.sc) {
u32 enable_conv;
enable_conv = params->shading_settings.enable_shading_table_conversion;
buff_realloced = reallocate_buffer(&ddr_map->sc_tbl,
&ddr_map_size->sc_tbl,
sctbl_bytes(binary),
params->sc_table_changed,
&err);
if (err) {
IA_CSS_LEAVE_ERR_PRIVATE(err);
return err;
}
if (params->shading_settings_changed ||
params->sc_table_changed || buff_realloced) {
if (enable_conv == 0) {
if (params->sc_table) {
/* store the shading table to ddr */
err = ia_css_params_store_sctbl(stage, ddr_map->sc_tbl, params->sc_table);
if (err) {
IA_CSS_LEAVE_ERR_PRIVATE(err);
return err;
}
/* set sc_config to isp */
params->sc_config = (struct ia_css_shading_table *)params->sc_table;
ia_css_kernel_process_param[IA_CSS_SC_ID](pipe_id, stage, params);
params->sc_config = NULL;
} else {
/* generate the identical shading table */
if (params->sc_config) {
ia_css_shading_table_free(params->sc_config);
params->sc_config = NULL;
}
sh_css_params_shading_id_table_generate(¶ms->sc_config,
binary->sctbl_width_per_color,
binary->sctbl_height);
if (!params->sc_config) {
IA_CSS_LEAVE_ERR_PRIVATE(-ENOMEM);
return -ENOMEM;
}
/* store the shading table to ddr */
err = ia_css_params_store_sctbl(stage, ddr_map->sc_tbl, params->sc_config);
if (err) {
IA_CSS_LEAVE_ERR_PRIVATE(err);
return err;
}
/* set sc_config to isp */
ia_css_kernel_process_param[IA_CSS_SC_ID](pipe_id, stage, params);
/* free the shading table */
ia_css_shading_table_free(params->sc_config);
params->sc_config = NULL;
}
} else { /* legacy */
/* ------ deprecated(bz675) : from ------ */
/* shading table is full resolution, reduce */
if (params->sc_config) {
ia_css_shading_table_free(params->sc_config);
params->sc_config = NULL;
}
prepare_shading_table(
(const struct ia_css_shading_table *)params->sc_table,
params->sensor_binning,
¶ms->sc_config,
binary, pipe->required_bds_factor);
if (!params->sc_config) {
IA_CSS_LEAVE_ERR_PRIVATE(-ENOMEM);
return -ENOMEM;
}
/* store the shading table to ddr */
err = ia_css_params_store_sctbl(stage, ddr_map->sc_tbl, params->sc_config);
if (err) {
IA_CSS_LEAVE_ERR_PRIVATE(err);
return err;
}
/* set sc_config to isp */
ia_css_kernel_process_param[IA_CSS_SC_ID](pipe_id, stage, params);
/* free the shading table */
ia_css_shading_table_free(params->sc_config);
params->sc_config = NULL;
/* ------ deprecated(bz675) : to ------ */
}
}
}
if (params->config_changed[IA_CSS_MACC_ID] && binary->info->sp.enable.macc) {
unsigned int i, j, idx;
static const unsigned int idx_map[] = {
0, 1, 3, 2, 6, 7, 5, 4, 12, 13, 15, 14, 10, 11, 9, 8
};
for (i = 0; i < IA_CSS_MACC_NUM_AXES; i++) {
idx = 4 * idx_map[i];
j = 4 * i;
if (binary->info->sp.pipeline.isp_pipe_version == SH_CSS_ISP_PIPE_VERSION_1) {
converted_macc_table.data[idx] =
(int16_t)sDIGIT_FITTING(params->macc_table.data[j],
13, SH_CSS_MACC_COEF_SHIFT);
converted_macc_table.data[idx + 1] =
(int16_t)sDIGIT_FITTING(params->macc_table.data[j + 1],
13, SH_CSS_MACC_COEF_SHIFT);
converted_macc_table.data[idx + 2] =
(int16_t)sDIGIT_FITTING(params->macc_table.data[j + 2],
13, SH_CSS_MACC_COEF_SHIFT);
converted_macc_table.data[idx + 3] =
(int16_t)sDIGIT_FITTING(params->macc_table.data[j + 3],
13, SH_CSS_MACC_COEF_SHIFT);
} else if (binary->info->sp.pipeline.isp_pipe_version ==
SH_CSS_ISP_PIPE_VERSION_2_2) {
converted_macc_table.data[idx] =
params->macc_table.data[j];
converted_macc_table.data[idx + 1] =
params->macc_table.data[j + 1];
converted_macc_table.data[idx + 2] =
params->macc_table.data[j + 2];
converted_macc_table.data[idx + 3] =
params->macc_table.data[j + 3];
}
}
reallocate_buffer(&ddr_map->macc_tbl,
&ddr_map_size->macc_tbl,
ddr_map_size->macc_tbl,
true,
&err);
if (err) {
IA_CSS_LEAVE_ERR_PRIVATE(err);
return err;
}
hmm_store(ddr_map->macc_tbl,
converted_macc_table.data,
sizeof(converted_macc_table.data));
}
if (binary->info->sp.enable.dvs_6axis) {
/* because UV is packed into the Y plane, calc total
* YYU size = /2 gives size of UV-only,
* total YYU size = UV-only * 3.
*/
buff_realloced = reallocate_buffer(
&ddr_map->dvs_6axis_params_y,
&ddr_map_size->dvs_6axis_params_y,
(size_t)((DVS_6AXIS_BYTES(binary) / 2) * 3),
params->pipe_dvs_6axis_config_changed[pipe_id],
&err);
if (err) {
IA_CSS_LEAVE_ERR_PRIVATE(err);
return err;
}
if (params->pipe_dvs_6axis_config_changed[pipe_id] || buff_realloced) {
const struct ia_css_frame_info *dvs_in_frame_info;
if (stage->args.delay_frames[0]) {
/*When delay frames are present(as in case of video),
they are used for dvs. Configure DVS using those params*/
dvs_in_frame_info = &stage->args.delay_frames[0]->frame_info;
} else {
/*Otherwise, use input frame to configure DVS*/
dvs_in_frame_info = &stage->args.in_frame->frame_info;
}
/* Generate default DVS unity table on start up*/
if (!params->pipe_dvs_6axis_config[pipe_id]) {
struct ia_css_resolution dvs_offset = {0};
dvs_offset.width = (PIX_SHIFT_FILTER_RUN_IN_X + binary->dvs_envelope.width) / 2;
dvs_offset.height = (PIX_SHIFT_FILTER_RUN_IN_Y + binary->dvs_envelope.height) / 2;
params->pipe_dvs_6axis_config[pipe_id] =
generate_dvs_6axis_table(&binary->out_frame_info[0].res, &dvs_offset);
if (!params->pipe_dvs_6axis_config[pipe_id]) {
IA_CSS_LEAVE_ERR_PRIVATE(-ENOMEM);
return -ENOMEM;
}
params->pipe_dvs_6axis_config_changed[pipe_id] = true;
store_dvs_6axis_config(params->pipe_dvs_6axis_config[pipe_id],
binary,
dvs_in_frame_info,
ddr_map->dvs_6axis_params_y);
params->isp_params_changed = true;
}
}
}
if (binary->info->sp.enable.ca_gdc) {
unsigned int i;
ia_css_ptr *virt_addr_tetra_x[
IA_CSS_MORPH_TABLE_NUM_PLANES];
size_t *virt_size_tetra_x[
IA_CSS_MORPH_TABLE_NUM_PLANES];
ia_css_ptr *virt_addr_tetra_y[
IA_CSS_MORPH_TABLE_NUM_PLANES];
size_t *virt_size_tetra_y[
IA_CSS_MORPH_TABLE_NUM_PLANES];
virt_addr_tetra_x[0] = &ddr_map->tetra_r_x;
virt_addr_tetra_x[1] = &ddr_map->tetra_gr_x;
virt_addr_tetra_x[2] = &ddr_map->tetra_gb_x;
virt_addr_tetra_x[3] = &ddr_map->tetra_b_x;
virt_addr_tetra_x[4] = &ddr_map->tetra_ratb_x;
virt_addr_tetra_x[5] = &ddr_map->tetra_batr_x;
virt_size_tetra_x[0] = &ddr_map_size->tetra_r_x;
virt_size_tetra_x[1] = &ddr_map_size->tetra_gr_x;
virt_size_tetra_x[2] = &ddr_map_size->tetra_gb_x;
virt_size_tetra_x[3] = &ddr_map_size->tetra_b_x;
virt_size_tetra_x[4] = &ddr_map_size->tetra_ratb_x;
virt_size_tetra_x[5] = &ddr_map_size->tetra_batr_x;
virt_addr_tetra_y[0] = &ddr_map->tetra_r_y;
virt_addr_tetra_y[1] = &ddr_map->tetra_gr_y;
virt_addr_tetra_y[2] = &ddr_map->tetra_gb_y;
virt_addr_tetra_y[3] = &ddr_map->tetra_b_y;
virt_addr_tetra_y[4] = &ddr_map->tetra_ratb_y;
virt_addr_tetra_y[5] = &ddr_map->tetra_batr_y;
virt_size_tetra_y[0] = &ddr_map_size->tetra_r_y;
virt_size_tetra_y[1] = &ddr_map_size->tetra_gr_y;
virt_size_tetra_y[2] = &ddr_map_size->tetra_gb_y;
virt_size_tetra_y[3] = &ddr_map_size->tetra_b_y;
virt_size_tetra_y[4] = &ddr_map_size->tetra_ratb_y;
virt_size_tetra_y[5] = &ddr_map_size->tetra_batr_y;
buff_realloced = false;
for (i = 0; i < IA_CSS_MORPH_TABLE_NUM_PLANES; i++) {
buff_realloced |=
reallocate_buffer(virt_addr_tetra_x[i],
virt_size_tetra_x[i],
morph_plane_bytes(binary),
params->morph_table_changed,
&err);
if (err) {
IA_CSS_LEAVE_ERR_PRIVATE(err);
return err;
}
buff_realloced |=
reallocate_buffer(virt_addr_tetra_y[i],
virt_size_tetra_y[i],
morph_plane_bytes(binary),
params->morph_table_changed,
&err);
if (err) {
IA_CSS_LEAVE_ERR_PRIVATE(err);
return err;
}
}
if (params->morph_table_changed || buff_realloced) {
const struct ia_css_morph_table *table = params->morph_table;
struct ia_css_morph_table *id_table = NULL;
if ((table) &&
(table->width < binary->morph_tbl_width ||
table->height < binary->morph_tbl_height)) {
table = NULL;
}
if (!table) {
err = sh_css_params_default_morph_table(&id_table,
binary);
if (err) {
IA_CSS_LEAVE_ERR_PRIVATE(err);
return err;
}
table = id_table;
}
for (i = 0; i < IA_CSS_MORPH_TABLE_NUM_PLANES; i++) {
store_morph_plane(table->coordinates_x[i],
table->width,
table->height,
*virt_addr_tetra_x[i],
binary->morph_tbl_aligned_width);
store_morph_plane(table->coordinates_y[i],
table->width,
table->height,
*virt_addr_tetra_y[i],
binary->morph_tbl_aligned_width);
}
if (id_table)
ia_css_morph_table_free(id_table);
}
}
/* After special cases like SC, FPN since they may change parameters */
for (mem = 0; mem < N_IA_CSS_MEMORIES; mem++) {
const struct ia_css_isp_data *isp_data =
ia_css_isp_param_get_isp_mem_init(&binary->info->sp.mem_initializers,
IA_CSS_PARAM_CLASS_PARAM, mem);
size_t size = isp_data->size;
if (!size) continue;
buff_realloced = reallocate_buffer(&ddr_map->isp_mem_param[stage_num][mem],
&ddr_map_size->isp_mem_param[stage_num][mem],
size,
params->isp_mem_params_changed[pipe_id][stage_num][mem],
&err);
if (err) {
IA_CSS_LEAVE_ERR_PRIVATE(err);
return err;
}
if (params->isp_mem_params_changed[pipe_id][stage_num][mem] || buff_realloced) {
sh_css_update_isp_mem_params_to_ddr(binary,
ddr_map->isp_mem_param[stage_num][mem],
ddr_map_size->isp_mem_param[stage_num][mem], mem);
}
}
IA_CSS_LEAVE_ERR_PRIVATE(0);
return 0;
}
const struct ia_css_fpn_table *ia_css_get_fpn_table(struct ia_css_stream
*stream)
{
struct ia_css_isp_parameters *params;
IA_CSS_ENTER_LEAVE("void");
assert(stream);
params = stream->isp_params_configs;
return ¶ms->fpn_config;
}
struct ia_css_shading_table *ia_css_get_shading_table(struct ia_css_stream
*stream)
{
struct ia_css_shading_table *table = NULL;
struct ia_css_isp_parameters *params;
IA_CSS_ENTER("void");
assert(stream);
params = stream->isp_params_configs;
if (!params)
return NULL;
if (params->shading_settings.enable_shading_table_conversion == 0) {
if (params->sc_table) {
table = (struct ia_css_shading_table *)params->sc_table;
} else {
const struct ia_css_binary *binary
= ia_css_stream_get_shading_correction_binary(stream);
if (binary) {
/* generate the identical shading table */
if (params->sc_config) {
ia_css_shading_table_free(params->sc_config);
params->sc_config = NULL;
}
sh_css_params_shading_id_table_generate(¶ms->sc_config,
binary->sctbl_width_per_color,
binary->sctbl_height);
table = params->sc_config;
/* The sc_config will be freed in the
* ia_css_stream_isp_parameters_uninit function. */
}
}
} else {
/* ------ deprecated(bz675) : from ------ */
const struct ia_css_binary *binary
= ia_css_stream_get_shading_correction_binary(stream);
struct ia_css_pipe *pipe;
/**********************************************************************/
/* following code is copied from function ia_css_stream_get_shading_correction_binary()
* to match with the binary */
pipe = stream->pipes[0];
if (stream->num_pipes == 2) {
assert(stream->pipes[1]);
if (stream->pipes[1]->config.mode == IA_CSS_PIPE_MODE_VIDEO ||
stream->pipes[1]->config.mode == IA_CSS_PIPE_MODE_PREVIEW)
pipe = stream->pipes[1];
}
/**********************************************************************/
if (binary) {
if (params->sc_config) {
ia_css_shading_table_free(params->sc_config);
params->sc_config = NULL;
}
prepare_shading_table(
(const struct ia_css_shading_table *)params->sc_table,
params->sensor_binning,
¶ms->sc_config,
binary, pipe->required_bds_factor);
table = params->sc_config;
/* The sc_config will be freed in the
* ia_css_stream_isp_parameters_uninit function. */
}
/* ------ deprecated(bz675) : to ------ */
}
IA_CSS_LEAVE("table=%p", table);
return table;
}
ia_css_ptr sh_css_store_sp_group_to_ddr(void)
{
IA_CSS_ENTER_LEAVE_PRIVATE("void");
hmm_store(xmem_sp_group_ptrs,
&sh_css_sp_group,
sizeof(struct sh_css_sp_group));
return xmem_sp_group_ptrs;
}
ia_css_ptr sh_css_store_sp_stage_to_ddr(
unsigned int pipe,
unsigned int stage)
{
IA_CSS_ENTER_LEAVE_PRIVATE("void");
hmm_store(xmem_sp_stage_ptrs[pipe][stage],
&sh_css_sp_stage,
sizeof(struct sh_css_sp_stage));
return xmem_sp_stage_ptrs[pipe][stage];
}
ia_css_ptr sh_css_store_isp_stage_to_ddr(
unsigned int pipe,
unsigned int stage)
{
IA_CSS_ENTER_LEAVE_PRIVATE("void");
hmm_store(xmem_isp_stage_ptrs[pipe][stage],
&sh_css_isp_stage,
sizeof(struct sh_css_isp_stage));
return xmem_isp_stage_ptrs[pipe][stage];
}
static int ref_sh_css_ddr_address_map(
struct sh_css_ddr_address_map *map,
struct sh_css_ddr_address_map *out)
{
int err = 0;
unsigned int i;
/* we will use a union to copy things; overlaying an array
with the struct; that way adding fields in the struct
will keep things working, and we will not get type errors.
*/
union {
struct sh_css_ddr_address_map *map;
ia_css_ptr *addrs;
} in_addrs, to_addrs;
IA_CSS_ENTER_PRIVATE("void");
assert(map);
assert(out);
in_addrs.map = map;
to_addrs.map = out;
assert(sizeof(struct sh_css_ddr_address_map_size) / sizeof(size_t) ==
sizeof(struct sh_css_ddr_address_map) / sizeof(ia_css_ptr));
/* copy map using size info */
for (i = 0; i < (sizeof(struct sh_css_ddr_address_map_size) /
sizeof(size_t)); i++) {
if (in_addrs.addrs[i] == mmgr_NULL)
to_addrs.addrs[i] = mmgr_NULL;
else
to_addrs.addrs[i] = ia_css_refcount_increment(IA_CSS_REFCOUNT_PARAM_BUFFER,
in_addrs.addrs[i]);
}
IA_CSS_LEAVE_ERR_PRIVATE(err);
return err;
}
static int write_ia_css_isp_parameter_set_info_to_ddr(
struct ia_css_isp_parameter_set_info *me,
ia_css_ptr *out)
{
int err = 0;
bool succ;
IA_CSS_ENTER_PRIVATE("void");
assert(me);
assert(out);
*out = ia_css_refcount_increment(IA_CSS_REFCOUNT_PARAM_SET_POOL,
hmm_alloc(sizeof(struct ia_css_isp_parameter_set_info)));
succ = (*out != mmgr_NULL);
if (succ)
hmm_store(*out,
me, sizeof(struct ia_css_isp_parameter_set_info));
else
err = -ENOMEM;
IA_CSS_LEAVE_ERR_PRIVATE(err);
return err;
}
static int
free_ia_css_isp_parameter_set_info(
ia_css_ptr ptr)
{
int err = 0;
struct ia_css_isp_parameter_set_info isp_params_info;
unsigned int i;
ia_css_ptr *addrs = (ia_css_ptr *)&isp_params_info.mem_map;
IA_CSS_ENTER_PRIVATE("ptr = %u", ptr);
/* sanity check - ptr must be valid */
if (!ia_css_refcount_is_valid(ptr)) {
IA_CSS_ERROR("%s: IA_CSS_REFCOUNT_PARAM_SET_POOL(0x%x) invalid arg", __func__,
ptr);
err = -EINVAL;
IA_CSS_LEAVE_ERR_PRIVATE(err);
return err;
}
hmm_load(ptr, &isp_params_info.mem_map, sizeof(struct sh_css_ddr_address_map));
/* copy map using size info */
for (i = 0; i < (sizeof(struct sh_css_ddr_address_map_size) /
sizeof(size_t)); i++) {
if (addrs[i] == mmgr_NULL)
continue;
/* sanity check - ptr must be valid */
if (!ia_css_refcount_is_valid(addrs[i])) {
IA_CSS_ERROR("%s: IA_CSS_REFCOUNT_PARAM_BUFFER(0x%x) invalid arg", __func__,
ptr);
err = -EINVAL;
continue;
}
ia_css_refcount_decrement(IA_CSS_REFCOUNT_PARAM_BUFFER, addrs[i]);
}
ia_css_refcount_decrement(IA_CSS_REFCOUNT_PARAM_SET_POOL, ptr);
IA_CSS_LEAVE_ERR_PRIVATE(err);
return err;
}
/* Mark all parameters as changed to force recomputing the derived ISP parameters */
void
sh_css_invalidate_params(struct ia_css_stream *stream)
{
struct ia_css_isp_parameters *params;
unsigned int i, j, mem;
IA_CSS_ENTER_PRIVATE("void");
assert(stream);
params = stream->isp_params_configs;
params->isp_params_changed = true;
for (i = 0; i < IA_CSS_PIPE_ID_NUM; i++) {
for (j = 0; j < SH_CSS_MAX_STAGES; j++) {
for (mem = 0; mem < N_IA_CSS_MEMORIES; mem++) {
params->isp_mem_params_changed[i][j][mem] = true;
}
}
}
memset(¶ms->config_changed[0], 1, sizeof(params->config_changed));
params->dis_coef_table_changed = true;
params->dvs2_coef_table_changed = true;
params->morph_table_changed = true;
params->sc_table_changed = true;
params->dz_config_changed = true;
params->motion_config_changed = true;
/*Free up theDVS table memory blocks before recomputing new table */
for (i = 0; i < IA_CSS_PIPE_ID_NUM; i++) {
if (params->pipe_dvs_6axis_config[i]) {
free_dvs_6axis_table(¶ms->pipe_dvs_6axis_config[i]);
params->pipe_dvs_6axis_config_changed[i] = true;
}
}
IA_CSS_LEAVE_PRIVATE("void");
}
void
sh_css_update_uds_and_crop_info(
const struct ia_css_binary_info *info,
const struct ia_css_frame_info *in_frame_info,
const struct ia_css_frame_info *out_frame_info,
const struct ia_css_resolution *dvs_env,
const struct ia_css_dz_config *zoom,
const struct ia_css_vector *motion_vector,
struct sh_css_uds_info *uds, /* out */
struct sh_css_crop_pos *sp_out_crop_pos, /* out */
bool enable_zoom)
{
IA_CSS_ENTER_PRIVATE("void");
assert(info);
assert(in_frame_info);
assert(out_frame_info);
assert(dvs_env);
assert(zoom);
assert(motion_vector);
assert(uds);
assert(sp_out_crop_pos);
uds->curr_dx = enable_zoom ? (uint16_t)zoom->dx : HRT_GDC_N;
uds->curr_dy = enable_zoom ? (uint16_t)zoom->dy : HRT_GDC_N;
if (info->enable.dvs_envelope) {
unsigned int crop_x = 0,
crop_y = 0,
uds_xc = 0,
uds_yc = 0,
env_width, env_height;
int half_env_x, half_env_y;
int motion_x = motion_vector->x;
int motion_y = motion_vector->y;
bool upscale_x = in_frame_info->res.width < out_frame_info->res.width;
bool upscale_y = in_frame_info->res.height < out_frame_info->res.height;
if (info->enable.uds && !info->enable.ds) {
/**
* we calculate with the envelope that we can actually
* use, the min dvs envelope is for the filter
* initialization.
*/
env_width = dvs_env->width -
SH_CSS_MIN_DVS_ENVELOPE;
env_height = dvs_env->height -
SH_CSS_MIN_DVS_ENVELOPE;
half_env_x = env_width / 2;
half_env_y = env_height / 2;
/**
* for digital zoom, we use the dvs envelope and make
* sure that we don't include the 8 leftmost pixels or
* 8 topmost rows.
*/
if (upscale_x) {
uds_xc = (in_frame_info->res.width
+ env_width
+ SH_CSS_MIN_DVS_ENVELOPE) / 2;
} else {
uds_xc = (out_frame_info->res.width
+ env_width) / 2
+ SH_CSS_MIN_DVS_ENVELOPE;
}
if (upscale_y) {
uds_yc = (in_frame_info->res.height
+ env_height
+ SH_CSS_MIN_DVS_ENVELOPE) / 2;
} else {
uds_yc = (out_frame_info->res.height
+ env_height) / 2
+ SH_CSS_MIN_DVS_ENVELOPE;
}
/* clip the motion vector to +/- half the envelope */
motion_x = clamp(motion_x, -half_env_x, half_env_x);
motion_y = clamp(motion_y, -half_env_y, half_env_y);
uds_xc += motion_x;
uds_yc += motion_y;
/* uds can be pipelined, remove top lines */
crop_y = 2;
} else if (info->enable.ds) {
env_width = dvs_env->width;
env_height = dvs_env->height;
half_env_x = env_width / 2;
half_env_y = env_height / 2;
/* clip the motion vector to +/- half the envelope */
motion_x = clamp(motion_x, -half_env_x, half_env_x);
motion_y = clamp(motion_y, -half_env_y, half_env_y);
/* for video with downscaling, the envelope is included
in the input resolution. */
uds_xc = in_frame_info->res.width / 2 + motion_x;
uds_yc = in_frame_info->res.height / 2 + motion_y;
crop_x = info->pipeline.left_cropping;
/* ds == 2 (yuv_ds) can be pipelined, remove top
lines */
if (info->enable.ds & 1)
crop_y = info->pipeline.top_cropping;
else
crop_y = 2;
} else {
/* video nodz: here we can only crop. We make sure we
crop at least the first 8x8 pixels away. */
env_width = dvs_env->width -
SH_CSS_MIN_DVS_ENVELOPE;
env_height = dvs_env->height -
SH_CSS_MIN_DVS_ENVELOPE;
half_env_x = env_width / 2;
half_env_y = env_height / 2;
motion_x = clamp(motion_x, -half_env_x, half_env_x);
motion_y = clamp(motion_y, -half_env_y, half_env_y);
crop_x = SH_CSS_MIN_DVS_ENVELOPE
+ half_env_x + motion_x;
crop_y = SH_CSS_MIN_DVS_ENVELOPE
+ half_env_y + motion_y;
}
/* Must enforce that the crop position is even */
crop_x = EVEN_FLOOR(crop_x);
crop_y = EVEN_FLOOR(crop_y);
uds_xc = EVEN_FLOOR(uds_xc);
uds_yc = EVEN_FLOOR(uds_yc);
uds->xc = (uint16_t)uds_xc;
uds->yc = (uint16_t)uds_yc;
sp_out_crop_pos->x = (uint16_t)crop_x;
sp_out_crop_pos->y = (uint16_t)crop_y;
} else {
/* for down scaling, we always use the center of the image */
uds->xc = (uint16_t)in_frame_info->res.width / 2;
uds->yc = (uint16_t)in_frame_info->res.height / 2;
sp_out_crop_pos->x = (uint16_t)info->pipeline.left_cropping;
sp_out_crop_pos->y = (uint16_t)info->pipeline.top_cropping;
}
IA_CSS_LEAVE_PRIVATE("void");
}
static int
sh_css_update_uds_and_crop_info_based_on_zoom_region(
const struct ia_css_binary_info *info,
const struct ia_css_frame_info *in_frame_info,
const struct ia_css_frame_info *out_frame_info,
const struct ia_css_resolution *dvs_env,
const struct ia_css_dz_config *zoom,
const struct ia_css_vector *motion_vector,
struct sh_css_uds_info *uds, /* out */
struct sh_css_crop_pos *sp_out_crop_pos, /* out */
struct ia_css_resolution pipe_in_res,
bool enable_zoom)
{
unsigned int x0 = 0, y0 = 0, x1 = 0, y1 = 0;
int err = 0;
/* Note:
* Filter_Envelope = 0 for NND/LUT
* Filter_Envelope = 1 for BCI
* Filter_Envelope = 3 for BLI
* Currently, not considering this filter envelope because, In uds.sp.c is recalculating
* the dx/dy based on filter envelope and other information (ia_css_uds_sp_scale_params)
* Ideally, That should be done on host side not on sp side.
*/
unsigned int filter_envelope = 0;
IA_CSS_ENTER_PRIVATE("void");
assert(info);
assert(in_frame_info);
assert(out_frame_info);
assert(dvs_env);
assert(zoom);
assert(motion_vector);
assert(uds);
assert(sp_out_crop_pos);
x0 = zoom->zoom_region.origin.x;
y0 = zoom->zoom_region.origin.y;
x1 = zoom->zoom_region.resolution.width + x0;
y1 = zoom->zoom_region.resolution.height + y0;
if ((x0 > x1) || (y0 > y1) || (x1 > pipe_in_res.width) || (y1 > pipe_in_res.height))
return -EINVAL;
if (!enable_zoom) {
uds->curr_dx = HRT_GDC_N;
uds->curr_dy = HRT_GDC_N;
}
if (info->enable.dvs_envelope) {
/* Zoom region is only supported by the UDS module on ISP
* 2 and higher. It is not supported in video mode on ISP 1 */
return -EINVAL;
} else {
if (enable_zoom) {
/* A. Calculate dx/dy based on crop region using in_frame_info
* Scale the crop region if in_frame_info to the stage is not same as
* actual effective input of the pipeline
*/
if (in_frame_info->res.width != pipe_in_res.width ||
in_frame_info->res.height != pipe_in_res.height) {
x0 = (x0 * in_frame_info->res.width) / (pipe_in_res.width);
y0 = (y0 * in_frame_info->res.height) / (pipe_in_res.height);
x1 = (x1 * in_frame_info->res.width) / (pipe_in_res.width);
y1 = (y1 * in_frame_info->res.height) / (pipe_in_res.height);
}
uds->curr_dx =
((x1 - x0 - filter_envelope) * HRT_GDC_N) / in_frame_info->res.width;
uds->curr_dy =
((y1 - y0 - filter_envelope) * HRT_GDC_N) / in_frame_info->res.height;
/* B. Calculate xc/yc based on crop region */
uds->xc = (uint16_t)x0 + (((x1) - (x0)) / 2);
uds->yc = (uint16_t)y0 + (((y1) - (y0)) / 2);
} else {
uds->xc = (uint16_t)in_frame_info->res.width / 2;
uds->yc = (uint16_t)in_frame_info->res.height / 2;
}
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
"uds->curr_dx=%d, uds->xc=%d, uds->yc=%d\n",
uds->curr_dx, uds->xc, uds->yc);
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "x0=%d, y0=%d, x1=%d, y1=%d\n",
x0, y0, x1, y1);
sp_out_crop_pos->x = (uint16_t)info->pipeline.left_cropping;
sp_out_crop_pos->y = (uint16_t)info->pipeline.top_cropping;
}
IA_CSS_LEAVE_PRIVATE("void");
return err;
}
struct ia_css_3a_statistics *
ia_css_3a_statistics_allocate(const struct ia_css_3a_grid_info *grid)
{
struct ia_css_3a_statistics *me;
int grid_size;
IA_CSS_ENTER("grid=%p", grid);
assert(grid);
me = kvcalloc(1, sizeof(*me), GFP_KERNEL);
if (!me)
goto err;
me->grid = *grid;
grid_size = grid->width * grid->height;
me->data = kvmalloc(grid_size * sizeof(*me->data), GFP_KERNEL);
if (!me->data)
goto err;
/* No weighted histogram, no structure, treat the histogram data as a byte dump in a byte array */
me->rgby_data = kvmalloc(sizeof_hmem(HMEM0_ID), GFP_KERNEL);
IA_CSS_LEAVE("return=%p", me);
return me;
err:
ia_css_3a_statistics_free(me);
IA_CSS_LEAVE("return=%p", NULL);
return NULL;
}
void
ia_css_3a_statistics_free(struct ia_css_3a_statistics *me)
{
if (me) {
kvfree(me->rgby_data);
kvfree(me->data);
kvfree(me);
}
}
struct ia_css_dvs_statistics *
ia_css_dvs_statistics_allocate(const struct ia_css_dvs_grid_info *grid)
{
struct ia_css_dvs_statistics *me;
assert(grid);
me = kvcalloc(1, sizeof(*me), GFP_KERNEL);
if (!me)
goto err;
me->grid = *grid;
me->hor_proj = kvmalloc(grid->height * IA_CSS_DVS_NUM_COEF_TYPES *
sizeof(*me->hor_proj), GFP_KERNEL);
if (!me->hor_proj)
goto err;
me->ver_proj = kvmalloc(grid->width * IA_CSS_DVS_NUM_COEF_TYPES *
sizeof(*me->ver_proj), GFP_KERNEL);
if (!me->ver_proj)
goto err;
return me;
err:
ia_css_dvs_statistics_free(me);
return NULL;
}
void
ia_css_dvs_statistics_free(struct ia_css_dvs_statistics *me)
{
if (me) {
kvfree(me->hor_proj);
kvfree(me->ver_proj);
kvfree(me);
}
}
struct ia_css_dvs_coefficients *
ia_css_dvs_coefficients_allocate(const struct ia_css_dvs_grid_info *grid)
{
struct ia_css_dvs_coefficients *me;
assert(grid);
me = kvcalloc(1, sizeof(*me), GFP_KERNEL);
if (!me)
goto err;
me->grid = *grid;
me->hor_coefs = kvmalloc(grid->num_hor_coefs *
IA_CSS_DVS_NUM_COEF_TYPES *
sizeof(*me->hor_coefs), GFP_KERNEL);
if (!me->hor_coefs)
goto err;
me->ver_coefs = kvmalloc(grid->num_ver_coefs *
IA_CSS_DVS_NUM_COEF_TYPES *
sizeof(*me->ver_coefs), GFP_KERNEL);
if (!me->ver_coefs)
goto err;
return me;
err:
ia_css_dvs_coefficients_free(me);
return NULL;
}
void
ia_css_dvs_coefficients_free(struct ia_css_dvs_coefficients *me)
{
if (me) {
kvfree(me->hor_coefs);
kvfree(me->ver_coefs);
kvfree(me);
}
}
struct ia_css_dvs2_statistics *
ia_css_dvs2_statistics_allocate(const struct ia_css_dvs_grid_info *grid)
{
struct ia_css_dvs2_statistics *me;
assert(grid);
me = kvcalloc(1, sizeof(*me), GFP_KERNEL);
if (!me)
goto err;
me->grid = *grid;
me->hor_prod.odd_real = kvmalloc(grid->aligned_width *
grid->aligned_height *
sizeof(*me->hor_prod.odd_real),
GFP_KERNEL);
if (!me->hor_prod.odd_real)
goto err;
me->hor_prod.odd_imag = kvmalloc(grid->aligned_width *
grid->aligned_height *
sizeof(*me->hor_prod.odd_imag),
GFP_KERNEL);
if (!me->hor_prod.odd_imag)
goto err;
me->hor_prod.even_real = kvmalloc(grid->aligned_width *
grid->aligned_height *
sizeof(*me->hor_prod.even_real),
GFP_KERNEL);
if (!me->hor_prod.even_real)
goto err;
me->hor_prod.even_imag = kvmalloc(grid->aligned_width *
grid->aligned_height *
sizeof(*me->hor_prod.even_imag),
GFP_KERNEL);
if (!me->hor_prod.even_imag)
goto err;
me->ver_prod.odd_real = kvmalloc(grid->aligned_width *
grid->aligned_height *
sizeof(*me->ver_prod.odd_real),
GFP_KERNEL);
if (!me->ver_prod.odd_real)
goto err;
me->ver_prod.odd_imag = kvmalloc(grid->aligned_width *
grid->aligned_height *
sizeof(*me->ver_prod.odd_imag),
GFP_KERNEL);
if (!me->ver_prod.odd_imag)
goto err;
me->ver_prod.even_real = kvmalloc(grid->aligned_width *
grid->aligned_height *
sizeof(*me->ver_prod.even_real),
GFP_KERNEL);
if (!me->ver_prod.even_real)
goto err;
me->ver_prod.even_imag = kvmalloc(grid->aligned_width *
grid->aligned_height *
sizeof(*me->ver_prod.even_imag),
GFP_KERNEL);
if (!me->ver_prod.even_imag)
goto err;
return me;
err:
ia_css_dvs2_statistics_free(me);
return NULL;
}
void
ia_css_dvs2_statistics_free(struct ia_css_dvs2_statistics *me)
{
if (me) {
kvfree(me->hor_prod.odd_real);
kvfree(me->hor_prod.odd_imag);
kvfree(me->hor_prod.even_real);
kvfree(me->hor_prod.even_imag);
kvfree(me->ver_prod.odd_real);
kvfree(me->ver_prod.odd_imag);
kvfree(me->ver_prod.even_real);
kvfree(me->ver_prod.even_imag);
kvfree(me);
}
}
struct ia_css_dvs2_coefficients *
ia_css_dvs2_coefficients_allocate(const struct ia_css_dvs_grid_info *grid)
{
struct ia_css_dvs2_coefficients *me;
assert(grid);
me = kvcalloc(1, sizeof(*me), GFP_KERNEL);
if (!me)
goto err;
me->grid = *grid;
me->hor_coefs.odd_real = kvmalloc(grid->num_hor_coefs *
sizeof(*me->hor_coefs.odd_real),
GFP_KERNEL);
if (!me->hor_coefs.odd_real)
goto err;
me->hor_coefs.odd_imag = kvmalloc(grid->num_hor_coefs *
sizeof(*me->hor_coefs.odd_imag),
GFP_KERNEL);
if (!me->hor_coefs.odd_imag)
goto err;
me->hor_coefs.even_real = kvmalloc(grid->num_hor_coefs *
sizeof(*me->hor_coefs.even_real),
GFP_KERNEL);
if (!me->hor_coefs.even_real)
goto err;
me->hor_coefs.even_imag = kvmalloc(grid->num_hor_coefs *
sizeof(*me->hor_coefs.even_imag),
GFP_KERNEL);
if (!me->hor_coefs.even_imag)
goto err;
me->ver_coefs.odd_real = kvmalloc(grid->num_ver_coefs *
sizeof(*me->ver_coefs.odd_real),
GFP_KERNEL);
if (!me->ver_coefs.odd_real)
goto err;
me->ver_coefs.odd_imag = kvmalloc(grid->num_ver_coefs *
sizeof(*me->ver_coefs.odd_imag),
GFP_KERNEL);
if (!me->ver_coefs.odd_imag)
goto err;
me->ver_coefs.even_real = kvmalloc(grid->num_ver_coefs *
sizeof(*me->ver_coefs.even_real),
GFP_KERNEL);
if (!me->ver_coefs.even_real)
goto err;
me->ver_coefs.even_imag = kvmalloc(grid->num_ver_coefs *
sizeof(*me->ver_coefs.even_imag),
GFP_KERNEL);
if (!me->ver_coefs.even_imag)
goto err;
return me;
err:
ia_css_dvs2_coefficients_free(me);
return NULL;
}
void
ia_css_dvs2_coefficients_free(struct ia_css_dvs2_coefficients *me)
{
if (me) {
kvfree(me->hor_coefs.odd_real);
kvfree(me->hor_coefs.odd_imag);
kvfree(me->hor_coefs.even_real);
kvfree(me->hor_coefs.even_imag);
kvfree(me->ver_coefs.odd_real);
kvfree(me->ver_coefs.odd_imag);
kvfree(me->ver_coefs.even_real);
kvfree(me->ver_coefs.even_imag);
kvfree(me);
}
}
struct ia_css_dvs_6axis_config *
ia_css_dvs2_6axis_config_allocate(const struct ia_css_stream *stream)
{
struct ia_css_dvs_6axis_config *dvs_config = NULL;
struct ia_css_isp_parameters *params = NULL;
unsigned int width_y;
unsigned int height_y;
unsigned int width_uv;
unsigned int height_uv;
assert(stream);
params = stream->isp_params_configs;
/* Backward compatibility by default consider pipe as Video*/
if (!params || !params->pipe_dvs_6axis_config[IA_CSS_PIPE_ID_VIDEO])
goto err;
dvs_config = kvcalloc(1, sizeof(struct ia_css_dvs_6axis_config),
GFP_KERNEL);
if (!dvs_config)
goto err;
dvs_config->width_y = width_y =
params->pipe_dvs_6axis_config[IA_CSS_PIPE_ID_VIDEO]->width_y;
dvs_config->height_y = height_y =
params->pipe_dvs_6axis_config[IA_CSS_PIPE_ID_VIDEO]->height_y;
dvs_config->width_uv = width_uv =
params->pipe_dvs_6axis_config[IA_CSS_PIPE_ID_VIDEO]->width_uv;
dvs_config->height_uv = height_uv =
params->pipe_dvs_6axis_config[IA_CSS_PIPE_ID_VIDEO]->height_uv;
IA_CSS_LOG("table Y: W %d H %d", width_y, height_y);
IA_CSS_LOG("table UV: W %d H %d", width_uv, height_uv);
dvs_config->xcoords_y = kvmalloc(width_y * height_y * sizeof(uint32_t),
GFP_KERNEL);
if (!dvs_config->xcoords_y)
goto err;
dvs_config->ycoords_y = kvmalloc(width_y * height_y * sizeof(uint32_t),
GFP_KERNEL);
if (!dvs_config->ycoords_y)
goto err;
dvs_config->xcoords_uv = kvmalloc(width_uv * height_uv *
sizeof(uint32_t),
GFP_KERNEL);
if (!dvs_config->xcoords_uv)
goto err;
dvs_config->ycoords_uv = kvmalloc(width_uv * height_uv *
sizeof(uint32_t),
GFP_KERNEL);
if (!dvs_config->ycoords_uv)
goto err;
return dvs_config;
err:
ia_css_dvs2_6axis_config_free(dvs_config);
return NULL;
}
void
ia_css_dvs2_6axis_config_free(struct ia_css_dvs_6axis_config *dvs_6axis_config)
{
if (dvs_6axis_config) {
kvfree(dvs_6axis_config->xcoords_y);
kvfree(dvs_6axis_config->ycoords_y);
kvfree(dvs_6axis_config->xcoords_uv);
kvfree(dvs_6axis_config->ycoords_uv);
kvfree(dvs_6axis_config);
}
}
void
ia_css_en_dz_capt_pipe(struct ia_css_stream *stream, bool enable)
{
struct ia_css_pipe *pipe;
struct ia_css_pipeline *pipeline;
struct ia_css_pipeline_stage *stage;
enum ia_css_pipe_id pipe_id;
int err;
int i;
if (!stream)
return;
for (i = 0; i < stream->num_pipes; i++) {
pipe = stream->pipes[i];
pipeline = ia_css_pipe_get_pipeline(pipe);
pipe_id = pipeline->pipe_id;
if (pipe_id == IA_CSS_PIPE_ID_CAPTURE) {
err = ia_css_pipeline_get_stage(pipeline, IA_CSS_BINARY_MODE_CAPTURE_PP,
&stage);
if (!err)
stage->enable_zoom = enable;
break;
}
}
}
| linux-master | drivers/staging/media/atomisp/pci/sh_css_params.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Support for Intel Camera Imaging ISP subsystem.
* Copyright (c) 2015, Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*/
#include "system_local.h"
/* ISP */
const hrt_address ISP_CTRL_BASE[N_ISP_ID] = {
0x0000000000020000ULL
};
const hrt_address ISP_DMEM_BASE[N_ISP_ID] = {
0x0000000000200000ULL
};
const hrt_address ISP_BAMEM_BASE[N_BAMEM_ID] = {
0x0000000000100000ULL
};
/* SP */
const hrt_address SP_CTRL_BASE[N_SP_ID] = {
0x0000000000010000ULL
};
const hrt_address SP_DMEM_BASE[N_SP_ID] = {
0x0000000000300000ULL
};
/* MMU */
/*
* MMU0_ID: The data MMU
* MMU1_ID: The icache MMU
*/
const hrt_address MMU_BASE[N_MMU_ID] = {
0x0000000000070000ULL,
0x00000000000A0000ULL
};
/* DMA */
const hrt_address DMA_BASE[N_DMA_ID] = {
0x0000000000040000ULL
};
const hrt_address ISYS2401_DMA_BASE[N_ISYS2401_DMA_ID] = {
0x00000000000CA000ULL
};
/* IRQ */
const hrt_address IRQ_BASE[N_IRQ_ID] = {
0x0000000000000500ULL,
0x0000000000030A00ULL,
0x000000000008C000ULL,
0x0000000000090200ULL
};
/*
0x0000000000000500ULL};
*/
/* GDC */
const hrt_address GDC_BASE[N_GDC_ID] = {
0x0000000000050000ULL,
0x0000000000060000ULL
};
/* FIFO_MONITOR (not a subset of GP_DEVICE) */
const hrt_address FIFO_MONITOR_BASE[N_FIFO_MONITOR_ID] = {
0x0000000000000000ULL
};
/*
const hrt_address GP_REGS_BASE[N_GP_REGS_ID] = {
0x0000000000000000ULL};
const hrt_address GP_DEVICE_BASE[N_GP_DEVICE_ID] = {
0x0000000000090000ULL};
*/
/* GP_DEVICE (single base for all separate GP_REG instances) */
const hrt_address GP_DEVICE_BASE[N_GP_DEVICE_ID] = {
0x0000000000000000ULL
};
/*GP TIMER , all timer registers are inter-twined,
* so, having multiple base addresses for
* different timers does not help*/
const hrt_address GP_TIMER_BASE =
(hrt_address)0x0000000000000600ULL;
/* GPIO */
const hrt_address GPIO_BASE[N_GPIO_ID] = {
0x0000000000000400ULL
};
/* TIMED_CTRL */
const hrt_address TIMED_CTRL_BASE[N_TIMED_CTRL_ID] = {
0x0000000000000100ULL
};
/* INPUT_FORMATTER */
const hrt_address INPUT_FORMATTER_BASE[N_INPUT_FORMATTER_ID] = {
0x0000000000030000ULL,
0x0000000000030200ULL,
0x0000000000030400ULL,
0x0000000000030600ULL
}; /* memcpy() */
/* INPUT_SYSTEM */
const hrt_address INPUT_SYSTEM_BASE[N_INPUT_SYSTEM_ID] = {
0x0000000000080000ULL
};
/* 0x0000000000081000ULL, */ /* capture A */
/* 0x0000000000082000ULL, */ /* capture B */
/* 0x0000000000083000ULL, */ /* capture C */
/* 0x0000000000084000ULL, */ /* Acquisition */
/* 0x0000000000085000ULL, */ /* DMA */
/* 0x0000000000089000ULL, */ /* ctrl */
/* 0x000000000008A000ULL, */ /* GP regs */
/* 0x000000000008B000ULL, */ /* FIFO */
/* 0x000000000008C000ULL, */ /* IRQ */
/* RX, the MIPI lane control regs start at offset 0 */
const hrt_address RX_BASE[N_RX_ID] = {
0x0000000000080100ULL
};
/* IBUF_CTRL, part of the Input System 2401 */
const hrt_address IBUF_CTRL_BASE[N_IBUF_CTRL_ID] = {
0x00000000000C1800ULL, /* ibuf controller A */
0x00000000000C3800ULL, /* ibuf controller B */
0x00000000000C5800ULL /* ibuf controller C */
};
/* ISYS IRQ Controllers, part of the Input System 2401 */
const hrt_address ISYS_IRQ_BASE[N_ISYS_IRQ_ID] = {
0x00000000000C1400ULL, /* port a */
0x00000000000C3400ULL, /* port b */
0x00000000000C5400ULL /* port c */
};
/* CSI FE, part of the Input System 2401 */
const hrt_address CSI_RX_FE_CTRL_BASE[N_CSI_RX_FRONTEND_ID] = {
0x00000000000C0400ULL, /* csi fe controller A */
0x00000000000C2400ULL, /* csi fe controller B */
0x00000000000C4400ULL /* csi fe controller C */
};
/* CSI BE, part of the Input System 2401 */
const hrt_address CSI_RX_BE_CTRL_BASE[N_CSI_RX_BACKEND_ID] = {
0x00000000000C0800ULL, /* csi be controller A */
0x00000000000C2800ULL, /* csi be controller B */
0x00000000000C4800ULL /* csi be controller C */
};
/* PIXEL Generator, part of the Input System 2401 */
const hrt_address PIXELGEN_CTRL_BASE[N_PIXELGEN_ID] = {
0x00000000000C1000ULL, /* pixel gen controller A */
0x00000000000C3000ULL, /* pixel gen controller B */
0x00000000000C5000ULL /* pixel gen controller C */
};
/* Stream2MMIO, part of the Input System 2401 */
const hrt_address STREAM2MMIO_CTRL_BASE[N_STREAM2MMIO_ID] = {
0x00000000000C0C00ULL, /* stream2mmio controller A */
0x00000000000C2C00ULL, /* stream2mmio controller B */
0x00000000000C4C00ULL /* stream2mmio controller C */
};
| linux-master | drivers/staging/media/atomisp/pci/system_local.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Support for Intel Camera Imaging ISP subsystem.
* Copyright (c) 2015, Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*/
#include "ia_css_pipe_util.h"
#include "ia_css_frame_public.h"
#include "ia_css_pipe.h"
#include "ia_css_util.h"
#include "assert_support.h"
unsigned int ia_css_pipe_util_pipe_input_format_bpp(
const struct ia_css_pipe *const pipe)
{
assert(pipe);
assert(pipe->stream);
return ia_css_util_input_format_bpp(pipe->stream->config.input_config.format,
pipe->stream->config.pixels_per_clock == 2);
}
void ia_css_pipe_util_create_output_frames(
struct ia_css_frame *frames[])
{
unsigned int i;
assert(frames);
for (i = 0; i < IA_CSS_BINARY_MAX_OUTPUT_PORTS; i++) {
frames[i] = NULL;
}
}
void ia_css_pipe_util_set_output_frames(
struct ia_css_frame *frames[],
unsigned int idx,
struct ia_css_frame *frame)
{
assert(idx < IA_CSS_BINARY_MAX_OUTPUT_PORTS);
frames[idx] = frame;
}
| linux-master | drivers/staging/media/atomisp/pci/camera/pipe/src/pipe_util.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Support for Intel Camera Imaging ISP subsystem.
* Copyright (c) 2015, Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*/
#include <linux/kernel.h>
#include <linux/math.h>
#include "ia_css_pipe_binarydesc.h"
#include "ia_css_frame_format.h"
#include "ia_css_pipe.h"
#include "ia_css_pipe_util.h"
#include "ia_css_util.h"
#include "ia_css_debug.h"
#include "sh_css_params.h"
#include <assert_support.h>
/* HRT_GDC_N */
#include "gdc_device.h"
/* This module provides a binary descriptions to used to find a binary. Since,
* every stage is associated with a binary, it implicity helps stage
* description. Apart from providing a binary description, this module also
* populates the frame info's when required.*/
/* Generic descriptor for offline binaries. Internal function. */
static void pipe_binarydesc_get_offline(
struct ia_css_pipe const *const pipe,
const int mode,
struct ia_css_binary_descr *descr,
struct ia_css_frame_info *in_info,
struct ia_css_frame_info *out_info[],
struct ia_css_frame_info *vf_info)
{
unsigned int i;
/* in_info, out_info, vf_info can be NULL */
assert(pipe);
assert(descr);
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
"pipe_binarydesc_get_offline() enter:\n");
descr->mode = mode;
descr->online = false;
descr->continuous = pipe->stream->config.continuous;
descr->striped = false;
descr->two_ppc = false;
descr->enable_yuv_ds = false;
descr->enable_high_speed = false;
descr->enable_dvs_6axis = false;
descr->enable_reduced_pipe = false;
descr->enable_dz = true;
descr->enable_xnr = false;
descr->enable_dpc = false;
descr->enable_tnr = false;
descr->enable_capture_pp_bli = false;
descr->enable_fractional_ds = false;
descr->dvs_env.width = 0;
descr->dvs_env.height = 0;
descr->stream_format = pipe->stream->config.input_config.format;
descr->in_info = in_info;
descr->bds_out_info = NULL;
for (i = 0; i < IA_CSS_BINARY_MAX_OUTPUT_PORTS; i++)
descr->out_info[i] = out_info[i];
descr->vf_info = vf_info;
descr->isp_pipe_version = pipe->config.isp_pipe_version;
descr->required_bds_factor = SH_CSS_BDS_FACTOR_1_00;
descr->stream_config_left_padding = -1;
}
void ia_css_pipe_get_copy_binarydesc(
struct ia_css_pipe const *const pipe,
struct ia_css_binary_descr *copy_descr,
struct ia_css_frame_info *in_info,
struct ia_css_frame_info *out_info,
struct ia_css_frame_info *vf_info)
{
struct ia_css_frame_info *out_infos[IA_CSS_BINARY_MAX_OUTPUT_PORTS];
unsigned int i;
/* out_info can be NULL */
assert(pipe);
assert(in_info);
IA_CSS_ENTER_PRIVATE("");
*in_info = *out_info;
out_infos[0] = out_info;
for (i = 1; i < IA_CSS_BINARY_MAX_OUTPUT_PORTS; i++)
out_infos[i] = NULL;
pipe_binarydesc_get_offline(pipe, IA_CSS_BINARY_MODE_COPY,
copy_descr, in_info, out_infos, vf_info);
copy_descr->online = true;
copy_descr->continuous = false;
copy_descr->two_ppc = (pipe->stream->config.pixels_per_clock == 2);
copy_descr->enable_dz = false;
copy_descr->isp_pipe_version = IA_CSS_PIPE_VERSION_1;
IA_CSS_LEAVE_PRIVATE("");
}
void ia_css_pipe_get_vfpp_binarydesc(
struct ia_css_pipe const *const pipe,
struct ia_css_binary_descr *vf_pp_descr,
struct ia_css_frame_info *in_info,
struct ia_css_frame_info *out_info)
{
struct ia_css_frame_info *out_infos[IA_CSS_BINARY_MAX_OUTPUT_PORTS];
unsigned int i;
/* out_info can be NULL ??? */
assert(pipe);
assert(in_info);
IA_CSS_ENTER_PRIVATE("");
in_info->raw_bit_depth = 0;
out_infos[0] = out_info;
for (i = 1; i < IA_CSS_BINARY_MAX_OUTPUT_PORTS; i++)
out_infos[i] = NULL;
pipe_binarydesc_get_offline(pipe, IA_CSS_BINARY_MODE_VF_PP,
vf_pp_descr, in_info, out_infos, NULL);
vf_pp_descr->enable_fractional_ds = true;
IA_CSS_LEAVE_PRIVATE("");
}
static struct u32_fract bds_factors_list[] = {
[SH_CSS_BDS_FACTOR_1_00] = {1, 1},
[SH_CSS_BDS_FACTOR_1_25] = {5, 4},
[SH_CSS_BDS_FACTOR_1_50] = {3, 2},
[SH_CSS_BDS_FACTOR_2_00] = {2, 1},
[SH_CSS_BDS_FACTOR_2_25] = {9, 4},
[SH_CSS_BDS_FACTOR_2_50] = {5, 2},
[SH_CSS_BDS_FACTOR_3_00] = {3, 1},
[SH_CSS_BDS_FACTOR_4_00] = {4, 1},
[SH_CSS_BDS_FACTOR_4_50] = {9, 2},
[SH_CSS_BDS_FACTOR_5_00] = {5, 1},
[SH_CSS_BDS_FACTOR_6_00] = {6, 1},
[SH_CSS_BDS_FACTOR_8_00] = {8, 1},
};
int sh_css_bds_factor_get_fract(unsigned int bds_factor, struct u32_fract *bds)
{
/* Throw an error since bds_factor cannot be found in bds_factors_list */
if (bds_factor >= ARRAY_SIZE(bds_factors_list))
return -EINVAL;
*bds = bds_factors_list[bds_factor];
return 0;
}
int binarydesc_calculate_bds_factor(
struct ia_css_resolution input_res,
struct ia_css_resolution output_res,
unsigned int *bds_factor)
{
unsigned int i;
unsigned int in_w = input_res.width,
in_h = input_res.height,
out_w = output_res.width, out_h = output_res.height;
unsigned int max_bds_factor = 8;
unsigned int max_rounding_margin = 2;
/* delta in pixels to account for rounding margin in the calculation */
unsigned int delta = max_bds_factor * max_rounding_margin;
/* Assert if the resolutions are not set */
assert(in_w != 0 && in_h != 0);
assert(out_w != 0 && out_h != 0);
/* Loop over all bds factors until a match is found */
for (i = 0; i < ARRAY_SIZE(bds_factors_list); i++) {
unsigned int num = bds_factors_list[i].numerator;
unsigned int den = bds_factors_list[i].denominator;
/* See width-wise and height-wise if this bds_factor
* satisfies the condition */
bool cond = (out_w * num / den + delta > in_w) &&
(out_w * num / den <= in_w) &&
(out_h * num / den + delta > in_h) &&
(out_h * num / den <= in_h);
if (cond) {
*bds_factor = i;
return 0;
}
}
/* Throw an error since a suitable bds_factor cannot be found */
return -EINVAL;
}
int ia_css_pipe_get_preview_binarydesc(
struct ia_css_pipe *const pipe,
struct ia_css_binary_descr *preview_descr,
struct ia_css_frame_info *in_info,
struct ia_css_frame_info *bds_out_info,
struct ia_css_frame_info *out_info,
struct ia_css_frame_info *vf_info)
{
int err;
struct ia_css_frame_info *out_infos[IA_CSS_BINARY_MAX_OUTPUT_PORTS];
int mode = IA_CSS_BINARY_MODE_PREVIEW;
unsigned int i;
assert(pipe);
assert(in_info);
assert(out_info);
assert(vf_info);
IA_CSS_ENTER_PRIVATE("");
/*
* Set up the info of the input frame with
* the ISP required resolution
*/
in_info->res = pipe->config.input_effective_res;
in_info->padded_width = in_info->res.width;
in_info->raw_bit_depth = ia_css_pipe_util_pipe_input_format_bpp(pipe);
if (ia_css_util_is_input_format_yuv(pipe->stream->config.input_config.format))
mode = IA_CSS_BINARY_MODE_COPY;
else
in_info->format = IA_CSS_FRAME_FORMAT_RAW;
out_infos[0] = out_info;
for (i = 1; i < IA_CSS_BINARY_MAX_OUTPUT_PORTS; i++)
out_infos[i] = NULL;
pipe_binarydesc_get_offline(pipe, mode,
preview_descr, in_info, out_infos, vf_info);
if (pipe->stream->config.online) {
preview_descr->online = pipe->stream->config.online;
preview_descr->two_ppc =
(pipe->stream->config.pixels_per_clock == 2);
}
preview_descr->stream_format = pipe->stream->config.input_config.format;
/* TODO: Remove this when bds_out_info is available! */
*bds_out_info = *in_info;
if (pipe->extra_config.enable_raw_binning) {
if (pipe->config.bayer_ds_out_res.width != 0 &&
pipe->config.bayer_ds_out_res.height != 0) {
bds_out_info->res.width =
pipe->config.bayer_ds_out_res.width;
bds_out_info->res.height =
pipe->config.bayer_ds_out_res.height;
bds_out_info->padded_width =
pipe->config.bayer_ds_out_res.width;
err =
binarydesc_calculate_bds_factor(in_info->res,
bds_out_info->res,
&preview_descr->required_bds_factor);
if (err)
return err;
} else {
bds_out_info->res.width = in_info->res.width / 2;
bds_out_info->res.height = in_info->res.height / 2;
bds_out_info->padded_width = in_info->padded_width / 2;
preview_descr->required_bds_factor =
SH_CSS_BDS_FACTOR_2_00;
}
} else {
/* TODO: Remove this when bds_out_info->is available! */
bds_out_info->res.width = in_info->res.width;
bds_out_info->res.height = in_info->res.height;
bds_out_info->padded_width = in_info->padded_width;
preview_descr->required_bds_factor = SH_CSS_BDS_FACTOR_1_00;
}
pipe->required_bds_factor = preview_descr->required_bds_factor;
/* bayer ds and fractional ds cannot be enabled at the same time,
so we disable bds_out_info when fractional ds is used */
if (!pipe->extra_config.enable_fractional_ds)
preview_descr->bds_out_info = bds_out_info;
else
preview_descr->bds_out_info = NULL;
/*
----Preview binary-----
--in-->|--out->|vf_veceven|--|--->vf
-----------------------
* Preview binary normally doesn't have a vf_port but
* instead it has an output port. However, the output is
* generated by vf_veceven module in which we might have
* a downscaling (by 1x, 2x, or 4x). Because the resolution
* might change, we need two different info, namely out_info
* & vf_info. In fill_binary_info we use out&vf info to
* calculate vf decimation factor.
*/
*out_info = *vf_info;
/* In case of preview_ds binary, we can do any fractional amount
* of downscale, so there is no DS needed in vf_veceven. Therefore,
* out and vf infos will be the same. Otherwise, we set out resolution
* equal to in resolution. */
if (!pipe->extra_config.enable_fractional_ds) {
/* TODO: Change this when bds_out_info is available! */
out_info->res.width = bds_out_info->res.width;
out_info->res.height = bds_out_info->res.height;
out_info->padded_width = bds_out_info->padded_width;
}
preview_descr->enable_fractional_ds =
pipe->extra_config.enable_fractional_ds;
preview_descr->enable_dpc = pipe->config.enable_dpc;
preview_descr->isp_pipe_version = pipe->config.isp_pipe_version;
IA_CSS_LEAVE_ERR_PRIVATE(0);
return 0;
}
int ia_css_pipe_get_video_binarydesc(
struct ia_css_pipe *const pipe,
struct ia_css_binary_descr *video_descr,
struct ia_css_frame_info *in_info,
struct ia_css_frame_info *bds_out_info,
struct ia_css_frame_info *out_info,
struct ia_css_frame_info *vf_info,
int stream_config_left_padding)
{
int mode = IA_CSS_BINARY_MODE_VIDEO;
unsigned int i;
struct ia_css_frame_info *out_infos[IA_CSS_BINARY_MAX_OUTPUT_PORTS];
int err = 0;
bool stream_dz_config = false;
/* vf_info can be NULL */
assert(pipe);
assert(in_info);
/* assert(vf_info != NULL); */
IA_CSS_ENTER_PRIVATE("");
/* The solution below is not optimal; we should move to using ia_css_pipe_get_copy_binarydesc()
* But for now this fixes things; this code used to be there but was removed
* with gerrit 8908 as this was wrong for Skycam; however 240x still needs this
*/
if (ia_css_util_is_input_format_yuv(pipe->stream->config.input_config.format))
mode = IA_CSS_BINARY_MODE_COPY;
in_info->res = pipe->config.input_effective_res;
in_info->padded_width = in_info->res.width;
in_info->format = IA_CSS_FRAME_FORMAT_RAW;
in_info->raw_bit_depth = ia_css_pipe_util_pipe_input_format_bpp(pipe);
out_infos[0] = out_info;
for (i = 1; i < IA_CSS_BINARY_MAX_OUTPUT_PORTS; i++)
out_infos[i] = NULL;
pipe_binarydesc_get_offline(pipe, mode,
video_descr, in_info, out_infos, vf_info);
if (pipe->stream->config.online) {
video_descr->online = pipe->stream->config.online;
video_descr->two_ppc =
(pipe->stream->config.pixels_per_clock == 2);
}
if (mode == IA_CSS_BINARY_MODE_VIDEO) {
stream_dz_config =
((pipe->stream->isp_params_configs->dz_config.dx !=
HRT_GDC_N)
|| (pipe->stream->isp_params_configs->dz_config.dy !=
HRT_GDC_N));
video_descr->enable_dz = pipe->config.enable_dz
|| stream_dz_config;
video_descr->dvs_env = pipe->config.dvs_envelope;
video_descr->enable_yuv_ds = pipe->extra_config.enable_yuv_ds;
video_descr->enable_high_speed =
pipe->extra_config.enable_high_speed;
video_descr->enable_dvs_6axis =
pipe->extra_config.enable_dvs_6axis;
video_descr->enable_reduced_pipe =
pipe->extra_config.enable_reduced_pipe;
video_descr->isp_pipe_version = pipe->config.isp_pipe_version;
video_descr->enable_fractional_ds =
pipe->extra_config.enable_fractional_ds;
video_descr->enable_dpc =
pipe->config.enable_dpc;
video_descr->enable_tnr =
pipe->config.enable_tnr;
if (pipe->extra_config.enable_raw_binning) {
if (pipe->config.bayer_ds_out_res.width != 0 &&
pipe->config.bayer_ds_out_res.height != 0) {
bds_out_info->res.width =
pipe->config.bayer_ds_out_res.width;
bds_out_info->res.height =
pipe->config.bayer_ds_out_res.height;
bds_out_info->padded_width =
pipe->config.bayer_ds_out_res.width;
err =
binarydesc_calculate_bds_factor(
in_info->res, bds_out_info->res,
&video_descr->required_bds_factor);
if (err)
return err;
} else {
bds_out_info->res.width =
in_info->res.width / 2;
bds_out_info->res.height =
in_info->res.height / 2;
bds_out_info->padded_width =
in_info->padded_width / 2;
video_descr->required_bds_factor =
SH_CSS_BDS_FACTOR_2_00;
}
} else {
bds_out_info->res.width = in_info->res.width;
bds_out_info->res.height = in_info->res.height;
bds_out_info->padded_width = in_info->padded_width;
video_descr->required_bds_factor =
SH_CSS_BDS_FACTOR_1_00;
}
pipe->required_bds_factor = video_descr->required_bds_factor;
/* bayer ds and fractional ds cannot be enabled
at the same time, so we disable bds_out_info when
fractional ds is used */
if (!pipe->extra_config.enable_fractional_ds)
video_descr->bds_out_info = bds_out_info;
else
video_descr->bds_out_info = NULL;
video_descr->enable_fractional_ds =
pipe->extra_config.enable_fractional_ds;
video_descr->stream_config_left_padding = stream_config_left_padding;
}
IA_CSS_LEAVE_ERR_PRIVATE(err);
return err;
}
void ia_css_pipe_get_yuvscaler_binarydesc(
struct ia_css_pipe const *const pipe,
struct ia_css_binary_descr *yuv_scaler_descr,
struct ia_css_frame_info *in_info,
struct ia_css_frame_info *out_info,
struct ia_css_frame_info *internal_out_info,
struct ia_css_frame_info *vf_info)
{
struct ia_css_frame_info *out_infos[IA_CSS_BINARY_MAX_OUTPUT_PORTS];
struct ia_css_frame_info *this_vf_info = NULL;
assert(pipe);
assert(in_info);
/* Note: if the following assert fails, the number of ports has been
* changed; in that case an additional initializer must be added
* a few lines below after which this assert can be updated.
*/
assert(IA_CSS_BINARY_MAX_OUTPUT_PORTS == 2);
IA_CSS_ENTER_PRIVATE("");
in_info->padded_width = in_info->res.width;
in_info->raw_bit_depth = 0;
ia_css_frame_info_set_width(in_info, in_info->res.width, 0);
out_infos[0] = out_info;
out_infos[1] = internal_out_info;
/* add initializers here if
* assert(IA_CSS_BINARY_MAX_OUTPUT_PORTS == ...);
* fails
*/
if (vf_info) {
this_vf_info = (vf_info->res.width == 0 &&
vf_info->res.height == 0) ? NULL : vf_info;
}
pipe_binarydesc_get_offline(pipe,
IA_CSS_BINARY_MODE_CAPTURE_PP,
yuv_scaler_descr,
in_info, out_infos, this_vf_info);
yuv_scaler_descr->enable_fractional_ds = true;
IA_CSS_LEAVE_PRIVATE("");
}
void ia_css_pipe_get_capturepp_binarydesc(
struct ia_css_pipe *const pipe,
struct ia_css_binary_descr *capture_pp_descr,
struct ia_css_frame_info *in_info,
struct ia_css_frame_info *out_info,
struct ia_css_frame_info *vf_info)
{
unsigned int i;
struct ia_css_frame_info *out_infos[IA_CSS_BINARY_MAX_OUTPUT_PORTS];
assert(pipe);
assert(in_info);
assert(vf_info);
IA_CSS_ENTER_PRIVATE("");
/* the in_info is only used for resolution to enable
bayer down scaling. */
if (pipe->out_yuv_ds_input_info.res.width)
*in_info = pipe->out_yuv_ds_input_info;
else
*in_info = *out_info;
in_info->format = IA_CSS_FRAME_FORMAT_YUV420;
in_info->raw_bit_depth = 0;
ia_css_frame_info_set_width(in_info, in_info->res.width, 0);
out_infos[0] = out_info;
for (i = 1; i < IA_CSS_BINARY_MAX_OUTPUT_PORTS; i++)
out_infos[i] = NULL;
pipe_binarydesc_get_offline(pipe,
IA_CSS_BINARY_MODE_CAPTURE_PP,
capture_pp_descr,
in_info, out_infos, vf_info);
capture_pp_descr->enable_capture_pp_bli =
pipe->config.default_capture_config.enable_capture_pp_bli;
capture_pp_descr->enable_fractional_ds = true;
capture_pp_descr->enable_xnr =
pipe->config.default_capture_config.enable_xnr != 0;
IA_CSS_LEAVE_PRIVATE("");
}
/* lookup table for high quality primary binaries */
static unsigned int primary_hq_binary_modes[NUM_PRIMARY_HQ_STAGES] = {
IA_CSS_BINARY_MODE_PRIMARY_HQ_STAGE0,
IA_CSS_BINARY_MODE_PRIMARY_HQ_STAGE1,
IA_CSS_BINARY_MODE_PRIMARY_HQ_STAGE2,
IA_CSS_BINARY_MODE_PRIMARY_HQ_STAGE3,
IA_CSS_BINARY_MODE_PRIMARY_HQ_STAGE4,
IA_CSS_BINARY_MODE_PRIMARY_HQ_STAGE5
};
void ia_css_pipe_get_primary_binarydesc(
struct ia_css_pipe const *const pipe,
struct ia_css_binary_descr *prim_descr,
struct ia_css_frame_info *in_info,
struct ia_css_frame_info *out_info,
struct ia_css_frame_info *vf_info,
unsigned int stage_idx)
{
enum ia_css_pipe_version pipe_version = pipe->config.isp_pipe_version;
int mode;
unsigned int i;
struct ia_css_frame_info *out_infos[IA_CSS_BINARY_MAX_OUTPUT_PORTS];
assert(pipe);
assert(in_info);
assert(out_info);
assert(stage_idx < NUM_PRIMARY_HQ_STAGES);
/* vf_info can be NULL - example video_binarydescr */
/*assert(vf_info != NULL);*/
IA_CSS_ENTER_PRIVATE("");
if (pipe_version == IA_CSS_PIPE_VERSION_2_6_1)
mode = primary_hq_binary_modes[stage_idx];
else
mode = IA_CSS_BINARY_MODE_PRIMARY;
if (ia_css_util_is_input_format_yuv(pipe->stream->config.input_config.format))
mode = IA_CSS_BINARY_MODE_COPY;
in_info->res = pipe->config.input_effective_res;
in_info->padded_width = in_info->res.width;
if (pipe->stream->config.pack_raw_pixels)
in_info->format = IA_CSS_FRAME_FORMAT_RAW_PACKED;
else
in_info->format = IA_CSS_FRAME_FORMAT_RAW;
in_info->raw_bit_depth = ia_css_pipe_util_pipe_input_format_bpp(pipe);
out_infos[0] = out_info;
for (i = 1; i < IA_CSS_BINARY_MAX_OUTPUT_PORTS; i++)
out_infos[i] = NULL;
pipe_binarydesc_get_offline(pipe, mode,
prim_descr, in_info, out_infos, vf_info);
if (pipe->stream->config.online &&
pipe->stream->config.mode != IA_CSS_INPUT_MODE_MEMORY) {
prim_descr->online = true;
prim_descr->two_ppc =
(pipe->stream->config.pixels_per_clock == 2);
prim_descr->stream_format = pipe->stream->config.input_config.format;
}
if (mode == IA_CSS_BINARY_MODE_PRIMARY) {
prim_descr->isp_pipe_version = pipe->config.isp_pipe_version;
prim_descr->enable_fractional_ds =
pipe->extra_config.enable_fractional_ds;
/* We have both striped and non-striped primary binaries,
* if continuous viewfinder is required, then we must select
* a striped one. Otherwise we prefer to use a non-striped
* since it has better performance. */
if (pipe_version == IA_CSS_PIPE_VERSION_2_6_1)
prim_descr->striped = false;
else
prim_descr->striped = prim_descr->continuous &&
(!pipe->stream->stop_copy_preview || !pipe->stream->disable_cont_vf);
}
IA_CSS_LEAVE_PRIVATE("");
}
void ia_css_pipe_get_pre_gdc_binarydesc(
struct ia_css_pipe const *const pipe,
struct ia_css_binary_descr *pre_gdc_descr,
struct ia_css_frame_info *in_info,
struct ia_css_frame_info *out_info)
{
unsigned int i;
struct ia_css_frame_info *out_infos[IA_CSS_BINARY_MAX_OUTPUT_PORTS];
assert(pipe);
assert(in_info);
assert(out_info);
IA_CSS_ENTER_PRIVATE("");
*in_info = *out_info;
in_info->format = IA_CSS_FRAME_FORMAT_RAW;
in_info->raw_bit_depth = ia_css_pipe_util_pipe_input_format_bpp(pipe);
out_infos[0] = out_info;
for (i = 1; i < IA_CSS_BINARY_MAX_OUTPUT_PORTS; i++)
out_infos[i] = NULL;
pipe_binarydesc_get_offline(pipe, IA_CSS_BINARY_MODE_PRE_ISP,
pre_gdc_descr, in_info, out_infos, NULL);
pre_gdc_descr->isp_pipe_version = pipe->config.isp_pipe_version;
IA_CSS_LEAVE_PRIVATE("");
}
void ia_css_pipe_get_gdc_binarydesc(
struct ia_css_pipe const *const pipe,
struct ia_css_binary_descr *gdc_descr,
struct ia_css_frame_info *in_info,
struct ia_css_frame_info *out_info)
{
unsigned int i;
struct ia_css_frame_info *out_infos[IA_CSS_BINARY_MAX_OUTPUT_PORTS];
assert(pipe);
assert(in_info);
assert(out_info);
IA_CSS_ENTER_PRIVATE("");
*in_info = *out_info;
in_info->format = IA_CSS_FRAME_FORMAT_QPLANE6;
out_infos[0] = out_info;
for (i = 1; i < IA_CSS_BINARY_MAX_OUTPUT_PORTS; i++)
out_infos[i] = NULL;
pipe_binarydesc_get_offline(pipe, IA_CSS_BINARY_MODE_GDC,
gdc_descr, in_info, out_infos, NULL);
IA_CSS_LEAVE_PRIVATE("");
}
void ia_css_pipe_get_post_gdc_binarydesc(
struct ia_css_pipe const *const pipe,
struct ia_css_binary_descr *post_gdc_descr,
struct ia_css_frame_info *in_info,
struct ia_css_frame_info *out_info,
struct ia_css_frame_info *vf_info)
{
unsigned int i;
struct ia_css_frame_info *out_infos[IA_CSS_BINARY_MAX_OUTPUT_PORTS];
assert(pipe);
assert(in_info);
assert(out_info);
assert(vf_info);
IA_CSS_ENTER_PRIVATE("");
*in_info = *out_info;
in_info->format = IA_CSS_FRAME_FORMAT_YUV420_16;
in_info->raw_bit_depth = 16;
out_infos[0] = out_info;
for (i = 1; i < IA_CSS_BINARY_MAX_OUTPUT_PORTS; i++)
out_infos[i] = NULL;
pipe_binarydesc_get_offline(pipe, IA_CSS_BINARY_MODE_POST_ISP,
post_gdc_descr, in_info, out_infos, vf_info);
post_gdc_descr->isp_pipe_version = pipe->config.isp_pipe_version;
IA_CSS_LEAVE_PRIVATE("");
}
void ia_css_pipe_get_pre_de_binarydesc(
struct ia_css_pipe const *const pipe,
struct ia_css_binary_descr *pre_de_descr,
struct ia_css_frame_info *in_info,
struct ia_css_frame_info *out_info)
{
unsigned int i;
struct ia_css_frame_info *out_infos[IA_CSS_BINARY_MAX_OUTPUT_PORTS];
assert(pipe);
assert(in_info);
assert(out_info);
IA_CSS_ENTER_PRIVATE("");
*in_info = *out_info;
in_info->format = IA_CSS_FRAME_FORMAT_RAW;
in_info->raw_bit_depth = ia_css_pipe_util_pipe_input_format_bpp(pipe);
out_infos[0] = out_info;
for (i = 1; i < IA_CSS_BINARY_MAX_OUTPUT_PORTS; i++)
out_infos[i] = NULL;
if (pipe->config.isp_pipe_version == IA_CSS_PIPE_VERSION_1)
pipe_binarydesc_get_offline(pipe, IA_CSS_BINARY_MODE_PRE_ISP,
pre_de_descr, in_info, out_infos, NULL);
else if (pipe->config.isp_pipe_version == IA_CSS_PIPE_VERSION_2_2) {
pipe_binarydesc_get_offline(pipe, IA_CSS_BINARY_MODE_PRE_DE,
pre_de_descr, in_info, out_infos, NULL);
}
if (pipe->stream->config.online) {
pre_de_descr->online = true;
pre_de_descr->two_ppc =
(pipe->stream->config.pixels_per_clock == 2);
pre_de_descr->stream_format = pipe->stream->config.input_config.format;
}
pre_de_descr->isp_pipe_version = pipe->config.isp_pipe_version;
IA_CSS_LEAVE_PRIVATE("");
}
void ia_css_pipe_get_pre_anr_binarydesc(
struct ia_css_pipe const *const pipe,
struct ia_css_binary_descr *pre_anr_descr,
struct ia_css_frame_info *in_info,
struct ia_css_frame_info *out_info)
{
unsigned int i;
struct ia_css_frame_info *out_infos[IA_CSS_BINARY_MAX_OUTPUT_PORTS];
assert(pipe);
assert(in_info);
assert(out_info);
IA_CSS_ENTER_PRIVATE("");
*in_info = *out_info;
in_info->format = IA_CSS_FRAME_FORMAT_RAW;
in_info->raw_bit_depth = ia_css_pipe_util_pipe_input_format_bpp(pipe);
out_infos[0] = out_info;
for (i = 1; i < IA_CSS_BINARY_MAX_OUTPUT_PORTS; i++)
out_infos[i] = NULL;
pipe_binarydesc_get_offline(pipe, IA_CSS_BINARY_MODE_PRE_ISP,
pre_anr_descr, in_info, out_infos, NULL);
if (pipe->stream->config.online) {
pre_anr_descr->online = true;
pre_anr_descr->two_ppc =
(pipe->stream->config.pixels_per_clock == 2);
pre_anr_descr->stream_format = pipe->stream->config.input_config.format;
}
pre_anr_descr->isp_pipe_version = pipe->config.isp_pipe_version;
IA_CSS_LEAVE_PRIVATE("");
}
void ia_css_pipe_get_anr_binarydesc(
struct ia_css_pipe const *const pipe,
struct ia_css_binary_descr *anr_descr,
struct ia_css_frame_info *in_info,
struct ia_css_frame_info *out_info)
{
unsigned int i;
struct ia_css_frame_info *out_infos[IA_CSS_BINARY_MAX_OUTPUT_PORTS];
assert(pipe);
assert(in_info);
assert(out_info);
IA_CSS_ENTER_PRIVATE("");
*in_info = *out_info;
in_info->format = IA_CSS_FRAME_FORMAT_RAW;
in_info->raw_bit_depth = ANR_ELEMENT_BITS;
out_infos[0] = out_info;
for (i = 1; i < IA_CSS_BINARY_MAX_OUTPUT_PORTS; i++)
out_infos[i] = NULL;
pipe_binarydesc_get_offline(pipe, IA_CSS_BINARY_MODE_ANR,
anr_descr, in_info, out_infos, NULL);
anr_descr->isp_pipe_version = pipe->config.isp_pipe_version;
IA_CSS_LEAVE_PRIVATE("");
}
void ia_css_pipe_get_post_anr_binarydesc(
struct ia_css_pipe const *const pipe,
struct ia_css_binary_descr *post_anr_descr,
struct ia_css_frame_info *in_info,
struct ia_css_frame_info *out_info,
struct ia_css_frame_info *vf_info)
{
unsigned int i;
struct ia_css_frame_info *out_infos[IA_CSS_BINARY_MAX_OUTPUT_PORTS];
assert(pipe);
assert(in_info);
assert(out_info);
assert(vf_info);
IA_CSS_ENTER_PRIVATE("");
*in_info = *out_info;
in_info->format = IA_CSS_FRAME_FORMAT_RAW;
in_info->raw_bit_depth = ANR_ELEMENT_BITS;
out_infos[0] = out_info;
for (i = 1; i < IA_CSS_BINARY_MAX_OUTPUT_PORTS; i++)
out_infos[i] = NULL;
pipe_binarydesc_get_offline(pipe, IA_CSS_BINARY_MODE_POST_ISP,
post_anr_descr, in_info, out_infos, vf_info);
post_anr_descr->isp_pipe_version = pipe->config.isp_pipe_version;
IA_CSS_LEAVE_PRIVATE("");
}
void ia_css_pipe_get_ldc_binarydesc(
struct ia_css_pipe const *const pipe,
struct ia_css_binary_descr *ldc_descr,
struct ia_css_frame_info *in_info,
struct ia_css_frame_info *out_info)
{
unsigned int i;
struct ia_css_frame_info *out_infos[IA_CSS_BINARY_MAX_OUTPUT_PORTS];
assert(pipe);
assert(in_info);
assert(out_info);
IA_CSS_ENTER_PRIVATE("");
*in_info = *out_info;
in_info->format = IA_CSS_FRAME_FORMAT_YUV420;
in_info->raw_bit_depth = 0;
ia_css_frame_info_set_width(in_info, in_info->res.width, 0);
out_infos[0] = out_info;
for (i = 1; i < IA_CSS_BINARY_MAX_OUTPUT_PORTS; i++)
out_infos[i] = NULL;
pipe_binarydesc_get_offline(pipe, IA_CSS_BINARY_MODE_CAPTURE_PP,
ldc_descr, in_info, out_infos, NULL);
ldc_descr->enable_dvs_6axis =
pipe->extra_config.enable_dvs_6axis;
IA_CSS_LEAVE_PRIVATE("");
}
| linux-master | drivers/staging/media/atomisp/pci/camera/pipe/src/pipe_binarydesc.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Support for Intel Camera Imaging ISP subsystem.
* Copyright (c) 2015, Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*/
#include "ia_css_pipe_stagedesc.h"
#include "assert_support.h"
#include "ia_css_debug.h"
void ia_css_pipe_get_generic_stage_desc(
struct ia_css_pipeline_stage_desc *stage_desc,
struct ia_css_binary *binary,
struct ia_css_frame *out_frame[],
struct ia_css_frame *in_frame,
struct ia_css_frame *vf_frame)
{
unsigned int i;
IA_CSS_ENTER_PRIVATE("stage_desc = %p, binary = %p, out_frame = %p, in_frame = %p, vf_frame = %p",
stage_desc, binary, out_frame, in_frame, vf_frame);
assert(stage_desc && binary && binary->info);
if (!stage_desc || !binary || !binary->info) {
IA_CSS_ERROR("invalid arguments");
goto ERR;
}
stage_desc->binary = binary;
stage_desc->firmware = NULL;
stage_desc->sp_func = IA_CSS_PIPELINE_NO_FUNC;
stage_desc->max_input_width = 0;
stage_desc->mode = binary->info->sp.pipeline.mode;
stage_desc->in_frame = in_frame;
for (i = 0; i < IA_CSS_BINARY_MAX_OUTPUT_PORTS; i++) {
stage_desc->out_frame[i] = out_frame[i];
}
stage_desc->vf_frame = vf_frame;
ERR:
IA_CSS_LEAVE_PRIVATE("");
}
void ia_css_pipe_get_firmwares_stage_desc(
struct ia_css_pipeline_stage_desc *stage_desc,
struct ia_css_binary *binary,
struct ia_css_frame *out_frame[],
struct ia_css_frame *in_frame,
struct ia_css_frame *vf_frame,
const struct ia_css_fw_info *fw,
unsigned int mode)
{
unsigned int i;
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
"ia_css_pipe_get_firmwares_stage_desc() enter:\n");
stage_desc->binary = binary;
stage_desc->firmware = fw;
stage_desc->sp_func = IA_CSS_PIPELINE_NO_FUNC;
stage_desc->max_input_width = 0;
stage_desc->mode = mode;
stage_desc->in_frame = in_frame;
for (i = 0; i < IA_CSS_BINARY_MAX_OUTPUT_PORTS; i++) {
stage_desc->out_frame[i] = out_frame[i];
}
stage_desc->vf_frame = vf_frame;
}
void ia_css_pipe_get_sp_func_stage_desc(
struct ia_css_pipeline_stage_desc *stage_desc,
struct ia_css_frame *out_frame,
enum ia_css_pipeline_stage_sp_func sp_func,
unsigned int max_input_width)
{
unsigned int i;
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
"ia_css_pipe_get_sp_func_stage_desc() enter:\n");
stage_desc->binary = NULL;
stage_desc->firmware = NULL;
stage_desc->sp_func = sp_func;
stage_desc->max_input_width = max_input_width;
stage_desc->mode = (unsigned int)-1;
stage_desc->in_frame = NULL;
stage_desc->out_frame[0] = out_frame;
for (i = 1; i < IA_CSS_BINARY_MAX_OUTPUT_PORTS; i++) {
stage_desc->out_frame[i] = NULL;
}
stage_desc->vf_frame = NULL;
}
| linux-master | drivers/staging/media/atomisp/pci/camera/pipe/src/pipe_stagedesc.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Support for Intel Camera Imaging ISP subsystem.
* Copyright (c) 2015, Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*/
#include "ia_css_util.h"
#include <ia_css_frame.h>
#include <assert_support.h>
#include <math_support.h>
/* for ia_css_binary_max_vf_width() */
#include "ia_css_binary.h"
/* MW: Table look-up ??? */
unsigned int ia_css_util_input_format_bpp(
enum atomisp_input_format format,
bool two_ppc)
{
unsigned int rval = 0;
switch (format) {
case ATOMISP_INPUT_FORMAT_YUV420_8_LEGACY:
case ATOMISP_INPUT_FORMAT_YUV420_8:
case ATOMISP_INPUT_FORMAT_YUV422_8:
case ATOMISP_INPUT_FORMAT_RGB_888:
case ATOMISP_INPUT_FORMAT_RAW_8:
case ATOMISP_INPUT_FORMAT_BINARY_8:
case ATOMISP_INPUT_FORMAT_EMBEDDED:
rval = 8;
break;
case ATOMISP_INPUT_FORMAT_YUV420_10:
case ATOMISP_INPUT_FORMAT_YUV422_10:
case ATOMISP_INPUT_FORMAT_RAW_10:
rval = 10;
break;
case ATOMISP_INPUT_FORMAT_YUV420_16:
case ATOMISP_INPUT_FORMAT_YUV422_16:
rval = 16;
break;
case ATOMISP_INPUT_FORMAT_RGB_444:
rval = 4;
break;
case ATOMISP_INPUT_FORMAT_RGB_555:
rval = 5;
break;
case ATOMISP_INPUT_FORMAT_RGB_565:
rval = 65;
break;
case ATOMISP_INPUT_FORMAT_RGB_666:
case ATOMISP_INPUT_FORMAT_RAW_6:
rval = 6;
break;
case ATOMISP_INPUT_FORMAT_RAW_7:
rval = 7;
break;
case ATOMISP_INPUT_FORMAT_RAW_12:
rval = 12;
break;
case ATOMISP_INPUT_FORMAT_RAW_14:
if (two_ppc)
rval = 14;
else
rval = 12;
break;
case ATOMISP_INPUT_FORMAT_RAW_16:
if (two_ppc)
rval = 16;
else
rval = 12;
break;
default:
rval = 0;
break;
}
return rval;
}
int ia_css_util_check_vf_info(
const struct ia_css_frame_info *const info)
{
int err;
unsigned int max_vf_width;
assert(info);
err = ia_css_frame_check_info(info);
if (err)
return err;
max_vf_width = ia_css_binary_max_vf_width();
if (max_vf_width != 0 && info->res.width > max_vf_width * 2)
return -EINVAL;
return 0;
}
int ia_css_util_check_vf_out_info(
const struct ia_css_frame_info *const out_info,
const struct ia_css_frame_info *const vf_info)
{
int err;
assert(out_info);
assert(vf_info);
err = ia_css_frame_check_info(out_info);
if (err)
return err;
err = ia_css_util_check_vf_info(vf_info);
if (err)
return err;
return 0;
}
int ia_css_util_check_res(unsigned int width, unsigned int height)
{
/* height can be odd number for jpeg/embedded data from ISYS2401 */
if (((width == 0) ||
(height == 0) ||
IS_ODD(width))) {
return -EINVAL;
}
return 0;
}
/* ISP2401 */
bool ia_css_util_res_leq(struct ia_css_resolution a, struct ia_css_resolution b)
{
return a.width <= b.width && a.height <= b.height;
}
/* ISP2401 */
bool ia_css_util_resolution_is_zero(const struct ia_css_resolution resolution)
{
return (resolution.width == 0) || (resolution.height == 0);
}
/* ISP2401 */
bool ia_css_util_resolution_is_even(const struct ia_css_resolution resolution)
{
return IS_EVEN(resolution.height) && IS_EVEN(resolution.width);
}
bool ia_css_util_is_input_format_raw(enum atomisp_input_format format)
{
return ((format == ATOMISP_INPUT_FORMAT_RAW_6) ||
(format == ATOMISP_INPUT_FORMAT_RAW_7) ||
(format == ATOMISP_INPUT_FORMAT_RAW_8) ||
(format == ATOMISP_INPUT_FORMAT_RAW_10) ||
(format == ATOMISP_INPUT_FORMAT_RAW_12));
/* raw_14 and raw_16 are not supported as input formats to the ISP.
* They can only be copied to a frame in memory using the
* copy binary.
*/
}
bool ia_css_util_is_input_format_yuv(enum atomisp_input_format format)
{
return format == ATOMISP_INPUT_FORMAT_YUV420_8_LEGACY ||
format == ATOMISP_INPUT_FORMAT_YUV420_8 ||
format == ATOMISP_INPUT_FORMAT_YUV420_10 ||
format == ATOMISP_INPUT_FORMAT_YUV420_16 ||
format == ATOMISP_INPUT_FORMAT_YUV422_8 ||
format == ATOMISP_INPUT_FORMAT_YUV422_10 ||
format == ATOMISP_INPUT_FORMAT_YUV422_16;
}
int ia_css_util_check_input(
const struct ia_css_stream_config *const stream_config,
bool must_be_raw,
bool must_be_yuv)
{
assert(stream_config);
if (!stream_config)
return -EINVAL;
if (stream_config->input_config.effective_res.width == 0 ||
stream_config->input_config.effective_res.height == 0)
return -EINVAL;
if (must_be_raw &&
!ia_css_util_is_input_format_raw(stream_config->input_config.format))
return -EINVAL;
if (must_be_yuv &&
!ia_css_util_is_input_format_yuv(stream_config->input_config.format))
return -EINVAL;
return 0;
}
| linux-master | drivers/staging/media/atomisp/pci/camera/util/src/util.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Support for Intel Camera Imaging ISP subsystem.
* Copyright (c) 2015, Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*/
#include <type_support.h>
#include "system_global.h"
#include "ibuf_ctrl_global.h"
const u32 N_IBUF_CTRL_PROCS[N_IBUF_CTRL_ID] = {
8, /* IBUF_CTRL0_ID supports at most 8 processes */
4, /* IBUF_CTRL1_ID supports at most 4 processes */
4 /* IBUF_CTRL2_ID supports at most 4 processes */
};
| linux-master | drivers/staging/media/atomisp/pci/css_2401_system/host/ibuf_ctrl.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Support for Intel Camera Imaging ISP subsystem.
* Copyright (c) 2015, Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*/
#include "system_local.h"
#include "isys_dma_global.h"
#include "assert_support.h"
#include "isys_dma_private.h"
const isys2401_dma_channel N_ISYS2401_DMA_CHANNEL_PROCS[N_ISYS2401_DMA_ID] = {
N_ISYS2401_DMA_CHANNEL
};
void isys2401_dma_set_max_burst_size(
const isys2401_dma_ID_t dma_id,
uint32_t max_burst_size)
{
assert(dma_id < N_ISYS2401_DMA_ID);
assert((max_burst_size > 0x00) && (max_burst_size <= 0xFF));
isys2401_dma_reg_store(dma_id,
DMA_DEV_INFO_REG_IDX(_DMA_V2_DEV_INTERF_MAX_BURST_IDX, HIVE_DMA_BUS_DDR_CONN),
(max_burst_size - 1));
}
| linux-master | drivers/staging/media/atomisp/pci/css_2401_system/host/isys_dma.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Support for Intel Camera Imaging ISP subsystem.
* Copyright (c) 2015, Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*/
#include "isys_stream2mmio.h"
const stream2mmio_sid_ID_t N_STREAM2MMIO_SID_PROCS[N_STREAM2MMIO_ID] = {
N_STREAM2MMIO_SID_ID,
STREAM2MMIO_SID4_ID,
STREAM2MMIO_SID4_ID
};
| linux-master | drivers/staging/media/atomisp/pci/css_2401_system/host/isys_stream2mmio.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Support for Intel Camera Imaging ISP subsystem.
* Copyright (c) 2015, Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*/
#include "system_global.h"
#include "csi_rx_global.h"
const u32 N_SHORT_PACKET_LUT_ENTRIES[N_CSI_RX_BACKEND_ID] = {
4, /* 4 entries at CSI_RX_BACKEND0_ID*/
4, /* 4 entries at CSI_RX_BACKEND1_ID*/
4 /* 4 entries at CSI_RX_BACKEND2_ID*/
};
const u32 N_LONG_PACKET_LUT_ENTRIES[N_CSI_RX_BACKEND_ID] = {
8, /* 8 entries at CSI_RX_BACKEND0_ID*/
4, /* 4 entries at CSI_RX_BACKEND1_ID*/
4 /* 4 entries at CSI_RX_BACKEND2_ID*/
};
const u32 N_CSI_RX_FE_CTRL_DLANES[N_CSI_RX_FRONTEND_ID] = {
N_CSI_RX_DLANE_ID, /* 4 dlanes for CSI_RX_FR0NTEND0_ID */
N_CSI_RX_DLANE_ID, /* 4 dlanes for CSI_RX_FR0NTEND1_ID */
N_CSI_RX_DLANE_ID /* 4 dlanes for CSI_RX_FR0NTEND2_ID */
};
/* sid_width for CSI_RX_BACKEND<N>_ID */
const u32 N_CSI_RX_BE_SID_WIDTH[N_CSI_RX_BACKEND_ID] = {
3,
2,
2
};
| linux-master | drivers/staging/media/atomisp/pci/css_2401_system/host/csi_rx.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Support for Intel Camera Imaging ISP subsystem.
* Copyright (c) 2015, Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*/
#include <system_local.h>
#include "device_access.h"
#include "assert_support.h"
#include "ia_css_debug.h"
#include "isys_irq.h"
#ifndef __INLINE_ISYS2401_IRQ__
/*
* Include definitions for isys irq private functions. isys_irq.h includes
* declarations of these functions by including isys_irq_public.h.
*/
#include "isys_irq_private.h"
#endif
/* Public interface */
void isys_irqc_status_enable(const isys_irq_ID_t isys_irqc_id)
{
assert(isys_irqc_id < N_ISYS_IRQ_ID);
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "Setting irq mask for port %u\n",
isys_irqc_id);
isys_irqc_reg_store(isys_irqc_id, ISYS_IRQ_MASK_REG_IDX,
ISYS_IRQ_MASK_REG_VALUE);
isys_irqc_reg_store(isys_irqc_id, ISYS_IRQ_CLEAR_REG_IDX,
ISYS_IRQ_CLEAR_REG_VALUE);
isys_irqc_reg_store(isys_irqc_id, ISYS_IRQ_ENABLE_REG_IDX,
ISYS_IRQ_ENABLE_REG_VALUE);
}
| linux-master | drivers/staging/media/atomisp/pci/css_2401_system/host/isys_irq.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Support for Intel Camera Imaging ISP subsystem.
* Copyright (c) 2015, Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*/
#include "assert_support.h" /* assert */
#include "ia_css_buffer.h"
#include "sp.h"
#include "ia_css_bufq.h" /* Bufq API's */
#include "ia_css_queue.h" /* ia_css_queue_t */
#include "sw_event_global.h" /* Event IDs.*/
#include "ia_css_eventq.h" /* ia_css_eventq_recv()*/
#include "ia_css_debug.h" /* ia_css_debug_dtrace*/
#include "sh_css_internal.h" /* sh_css_queue_type */
#include "sp_local.h" /* sp_address_of */
#include "sh_css_firmware.h" /* sh_css_sp_fw*/
#define BUFQ_DUMP_FILE_NAME_PREFIX_SIZE 256
static char prefix[BUFQ_DUMP_FILE_NAME_PREFIX_SIZE] = {0};
/*********************************************************/
/* Global Queue objects used by CSS */
/*********************************************************/
struct sh_css_queues {
/* Host2SP buffer queue */
ia_css_queue_t host2sp_buffer_queue_handles
[SH_CSS_MAX_SP_THREADS][SH_CSS_MAX_NUM_QUEUES];
/* SP2Host buffer queue */
ia_css_queue_t sp2host_buffer_queue_handles
[SH_CSS_MAX_NUM_QUEUES];
/* Host2SP event queue */
ia_css_queue_t host2sp_psys_event_queue_handle;
/* SP2Host event queue */
ia_css_queue_t sp2host_psys_event_queue_handle;
/* Host2SP ISYS event queue */
ia_css_queue_t host2sp_isys_event_queue_handle;
/* SP2Host ISYS event queue */
ia_css_queue_t sp2host_isys_event_queue_handle;
/* Tagger command queue */
ia_css_queue_t host2sp_tag_cmd_queue_handle;
};
/*******************************************************
*** Static variables
********************************************************/
static struct sh_css_queues css_queues;
static int
buffer_type_to_queue_id_map[SH_CSS_MAX_SP_THREADS][IA_CSS_NUM_DYNAMIC_BUFFER_TYPE];
static bool queue_availability[SH_CSS_MAX_SP_THREADS][SH_CSS_MAX_NUM_QUEUES];
/*******************************************************
*** Static functions
********************************************************/
static void map_buffer_type_to_queue_id(
unsigned int thread_id,
enum ia_css_buffer_type buf_type
);
static void unmap_buffer_type_to_queue_id(
unsigned int thread_id,
enum ia_css_buffer_type buf_type
);
static ia_css_queue_t *bufq_get_qhandle(
enum sh_css_queue_type type,
enum sh_css_queue_id id,
int thread
);
/*******************************************************
*** Public functions
********************************************************/
void ia_css_queue_map_init(void)
{
unsigned int i, j;
for (i = 0; i < SH_CSS_MAX_SP_THREADS; i++) {
for (j = 0; j < SH_CSS_MAX_NUM_QUEUES; j++)
queue_availability[i][j] = true;
}
for (i = 0; i < SH_CSS_MAX_SP_THREADS; i++) {
for (j = 0; j < IA_CSS_NUM_DYNAMIC_BUFFER_TYPE; j++)
buffer_type_to_queue_id_map[i][j] = SH_CSS_INVALID_QUEUE_ID;
}
}
void ia_css_queue_map(
unsigned int thread_id,
enum ia_css_buffer_type buf_type,
bool map)
{
assert(buf_type < IA_CSS_NUM_DYNAMIC_BUFFER_TYPE);
assert(thread_id < SH_CSS_MAX_SP_THREADS);
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
"ia_css_queue_map() enter: buf_type=%d, thread_id=%d\n", buf_type, thread_id);
if (map)
map_buffer_type_to_queue_id(thread_id, buf_type);
else
unmap_buffer_type_to_queue_id(thread_id, buf_type);
}
/*
* @brief Query the internal queue ID.
*/
bool ia_css_query_internal_queue_id(
enum ia_css_buffer_type buf_type,
unsigned int thread_id,
enum sh_css_queue_id *val)
{
IA_CSS_ENTER("buf_type=%d, thread_id=%d, val = %p", buf_type, thread_id, val);
if ((!val) || (thread_id >= SH_CSS_MAX_SP_THREADS) ||
(buf_type >= IA_CSS_NUM_DYNAMIC_BUFFER_TYPE)) {
IA_CSS_LEAVE("return_val = false");
return false;
}
*val = buffer_type_to_queue_id_map[thread_id][buf_type];
if ((*val == SH_CSS_INVALID_QUEUE_ID) || (*val >= SH_CSS_MAX_NUM_QUEUES)) {
IA_CSS_LOG("INVALID queue ID MAP = %d\n", *val);
IA_CSS_LEAVE("return_val = false");
return false;
}
IA_CSS_LEAVE("return_val = true");
return true;
}
/*******************************************************
*** Static functions
********************************************************/
static void map_buffer_type_to_queue_id(
unsigned int thread_id,
enum ia_css_buffer_type buf_type)
{
unsigned int i;
assert(thread_id < SH_CSS_MAX_SP_THREADS);
assert(buf_type < IA_CSS_NUM_DYNAMIC_BUFFER_TYPE);
assert(buffer_type_to_queue_id_map[thread_id][buf_type] ==
SH_CSS_INVALID_QUEUE_ID);
/* queue 0 is reserved for parameters because it doesn't depend on events */
if (buf_type == IA_CSS_BUFFER_TYPE_PARAMETER_SET) {
assert(queue_availability[thread_id][IA_CSS_PARAMETER_SET_QUEUE_ID]);
queue_availability[thread_id][IA_CSS_PARAMETER_SET_QUEUE_ID] = false;
buffer_type_to_queue_id_map[thread_id][buf_type] =
IA_CSS_PARAMETER_SET_QUEUE_ID;
return;
}
/* queue 1 is reserved for per frame parameters because it doesn't depend on events */
if (buf_type == IA_CSS_BUFFER_TYPE_PER_FRAME_PARAMETER_SET) {
assert(queue_availability[thread_id][IA_CSS_PER_FRAME_PARAMETER_SET_QUEUE_ID]);
queue_availability[thread_id][IA_CSS_PER_FRAME_PARAMETER_SET_QUEUE_ID] = false;
buffer_type_to_queue_id_map[thread_id][buf_type] =
IA_CSS_PER_FRAME_PARAMETER_SET_QUEUE_ID;
return;
}
for (i = SH_CSS_QUEUE_C_ID; i < SH_CSS_MAX_NUM_QUEUES; i++) {
if (queue_availability[thread_id][i]) {
queue_availability[thread_id][i] = false;
buffer_type_to_queue_id_map[thread_id][buf_type] = i;
break;
}
}
assert(i != SH_CSS_MAX_NUM_QUEUES);
return;
}
static void unmap_buffer_type_to_queue_id(
unsigned int thread_id,
enum ia_css_buffer_type buf_type)
{
int queue_id;
assert(thread_id < SH_CSS_MAX_SP_THREADS);
assert(buf_type < IA_CSS_NUM_DYNAMIC_BUFFER_TYPE);
assert(buffer_type_to_queue_id_map[thread_id][buf_type] !=
SH_CSS_INVALID_QUEUE_ID);
queue_id = buffer_type_to_queue_id_map[thread_id][buf_type];
buffer_type_to_queue_id_map[thread_id][buf_type] = SH_CSS_INVALID_QUEUE_ID;
queue_availability[thread_id][queue_id] = true;
}
static ia_css_queue_t *bufq_get_qhandle(
enum sh_css_queue_type type,
enum sh_css_queue_id id,
int thread)
{
ia_css_queue_t *q = NULL;
switch (type) {
case sh_css_host2sp_buffer_queue:
if ((thread >= SH_CSS_MAX_SP_THREADS) || (thread < 0) ||
(id == SH_CSS_INVALID_QUEUE_ID))
break;
q = &css_queues.host2sp_buffer_queue_handles[thread][id];
break;
case sh_css_sp2host_buffer_queue:
if (id == SH_CSS_INVALID_QUEUE_ID)
break;
q = &css_queues.sp2host_buffer_queue_handles[id];
break;
case sh_css_host2sp_psys_event_queue:
q = &css_queues.host2sp_psys_event_queue_handle;
break;
case sh_css_sp2host_psys_event_queue:
q = &css_queues.sp2host_psys_event_queue_handle;
break;
case sh_css_host2sp_isys_event_queue:
q = &css_queues.host2sp_isys_event_queue_handle;
break;
case sh_css_sp2host_isys_event_queue:
q = &css_queues.sp2host_isys_event_queue_handle;
break;
case sh_css_host2sp_tag_cmd_queue:
q = &css_queues.host2sp_tag_cmd_queue_handle;
break;
default:
break;
}
return q;
}
/* Local function to initialize a buffer queue. This reduces
* the chances of copy-paste errors or typos.
*/
static inline void
init_bufq(unsigned int desc_offset,
unsigned int elems_offset,
ia_css_queue_t *handle)
{
const struct ia_css_fw_info *fw;
unsigned int q_base_addr;
ia_css_queue_remote_t remoteq;
fw = &sh_css_sp_fw;
q_base_addr = fw->info.sp.host_sp_queue;
/* Setup queue location as SP and proc id as SP0_ID */
remoteq.location = IA_CSS_QUEUE_LOC_SP;
remoteq.proc_id = SP0_ID;
remoteq.cb_desc_addr = q_base_addr + desc_offset;
remoteq.cb_elems_addr = q_base_addr + elems_offset;
/* Initialize the queue instance and obtain handle */
ia_css_queue_remote_init(handle, &remoteq);
}
void ia_css_bufq_init(void)
{
int i, j;
IA_CSS_ENTER_PRIVATE("");
/* Setup all the local queue descriptors for Host2SP Buffer Queues */
for (i = 0; i < SH_CSS_MAX_SP_THREADS; i++)
for (j = 0; j < SH_CSS_MAX_NUM_QUEUES; j++) {
init_bufq((uint32_t)offsetof(struct host_sp_queues,
host2sp_buffer_queues_desc[i][j]),
(uint32_t)offsetof(struct host_sp_queues, host2sp_buffer_queues_elems[i][j]),
&css_queues.host2sp_buffer_queue_handles[i][j]);
}
/* Setup all the local queue descriptors for SP2Host Buffer Queues */
for (i = 0; i < SH_CSS_MAX_NUM_QUEUES; i++) {
init_bufq(offsetof(struct host_sp_queues, sp2host_buffer_queues_desc[i]),
offsetof(struct host_sp_queues, sp2host_buffer_queues_elems[i]),
&css_queues.sp2host_buffer_queue_handles[i]);
}
/* Host2SP event queue*/
init_bufq((uint32_t)offsetof(struct host_sp_queues,
host2sp_psys_event_queue_desc),
(uint32_t)offsetof(struct host_sp_queues, host2sp_psys_event_queue_elems),
&css_queues.host2sp_psys_event_queue_handle);
/* SP2Host event queue */
init_bufq((uint32_t)offsetof(struct host_sp_queues,
sp2host_psys_event_queue_desc),
(uint32_t)offsetof(struct host_sp_queues, sp2host_psys_event_queue_elems),
&css_queues.sp2host_psys_event_queue_handle);
/* Host2SP ISYS event queue */
init_bufq((uint32_t)offsetof(struct host_sp_queues,
host2sp_isys_event_queue_desc),
(uint32_t)offsetof(struct host_sp_queues, host2sp_isys_event_queue_elems),
&css_queues.host2sp_isys_event_queue_handle);
/* SP2Host ISYS event queue*/
init_bufq((uint32_t)offsetof(struct host_sp_queues,
sp2host_isys_event_queue_desc),
(uint32_t)offsetof(struct host_sp_queues, sp2host_isys_event_queue_elems),
&css_queues.sp2host_isys_event_queue_handle);
/* Host2SP tagger command queue */
init_bufq((uint32_t)offsetof(struct host_sp_queues, host2sp_tag_cmd_queue_desc),
(uint32_t)offsetof(struct host_sp_queues, host2sp_tag_cmd_queue_elems),
&css_queues.host2sp_tag_cmd_queue_handle);
IA_CSS_LEAVE_PRIVATE("");
}
int ia_css_bufq_enqueue_buffer(
int thread_index,
int queue_id,
uint32_t item)
{
ia_css_queue_t *q;
int error;
IA_CSS_ENTER_PRIVATE("queue_id=%d", queue_id);
if ((thread_index >= SH_CSS_MAX_SP_THREADS) || (thread_index < 0) ||
(queue_id == SH_CSS_INVALID_QUEUE_ID))
return -EINVAL;
/* Get the queue for communication */
q = bufq_get_qhandle(sh_css_host2sp_buffer_queue,
queue_id,
thread_index);
if (q) {
error = ia_css_queue_enqueue(q, item);
} else {
IA_CSS_ERROR("queue is not initialized");
error = -EBUSY;
}
IA_CSS_LEAVE_ERR_PRIVATE(error);
return error;
}
int ia_css_bufq_dequeue_buffer(
int queue_id,
uint32_t *item)
{
int error;
ia_css_queue_t *q;
IA_CSS_ENTER_PRIVATE("queue_id=%d", queue_id);
if ((!item) ||
(queue_id <= SH_CSS_INVALID_QUEUE_ID) ||
(queue_id >= SH_CSS_MAX_NUM_QUEUES)
)
return -EINVAL;
q = bufq_get_qhandle(sh_css_sp2host_buffer_queue,
queue_id,
-1);
if (q) {
error = ia_css_queue_dequeue(q, item);
} else {
IA_CSS_ERROR("queue is not initialized");
error = -EBUSY;
}
IA_CSS_LEAVE_ERR_PRIVATE(error);
return error;
}
int ia_css_bufq_enqueue_psys_event(
u8 evt_id,
u8 evt_payload_0,
u8 evt_payload_1,
uint8_t evt_payload_2)
{
int error = 0;
ia_css_queue_t *q;
IA_CSS_ENTER_PRIVATE("evt_id=%d", evt_id);
q = bufq_get_qhandle(sh_css_host2sp_psys_event_queue, -1, -1);
if (!q) {
IA_CSS_ERROR("queue is not initialized");
return -EBUSY;
}
error = ia_css_eventq_send(q,
evt_id, evt_payload_0, evt_payload_1, evt_payload_2);
IA_CSS_LEAVE_ERR_PRIVATE(error);
return error;
}
int ia_css_bufq_dequeue_psys_event(
u8 item[BUFQ_EVENT_SIZE])
{
int error = 0;
ia_css_queue_t *q;
/* No ENTER/LEAVE in this function since this is polled
* by some test apps. Enablign logging here floods the log
* files which may cause timeouts. */
if (!item)
return -EINVAL;
q = bufq_get_qhandle(sh_css_sp2host_psys_event_queue, -1, -1);
if (!q) {
IA_CSS_ERROR("queue is not initialized");
return -EBUSY;
}
error = ia_css_eventq_recv(q, item);
return error;
}
int ia_css_bufq_dequeue_isys_event(
u8 item[BUFQ_EVENT_SIZE])
{
int error = 0;
ia_css_queue_t *q;
/* No ENTER/LEAVE in this function since this is polled
* by some test apps. Enablign logging here floods the log
* files which may cause timeouts. */
if (!item)
return -EINVAL;
q = bufq_get_qhandle(sh_css_sp2host_isys_event_queue, -1, -1);
if (!q) {
IA_CSS_ERROR("queue is not initialized");
return -EBUSY;
}
error = ia_css_eventq_recv(q, item);
return error;
}
int ia_css_bufq_enqueue_isys_event(uint8_t evt_id)
{
int error = 0;
ia_css_queue_t *q;
IA_CSS_ENTER_PRIVATE("event_id=%d", evt_id);
q = bufq_get_qhandle(sh_css_host2sp_isys_event_queue, -1, -1);
if (!q) {
IA_CSS_ERROR("queue is not initialized");
return -EBUSY;
}
error = ia_css_eventq_send(q, evt_id, 0, 0, 0);
IA_CSS_LEAVE_ERR_PRIVATE(error);
return error;
}
int ia_css_bufq_enqueue_tag_cmd(
uint32_t item)
{
int error;
ia_css_queue_t *q;
IA_CSS_ENTER_PRIVATE("item=%d", item);
q = bufq_get_qhandle(sh_css_host2sp_tag_cmd_queue, -1, -1);
if (!q) {
IA_CSS_ERROR("queue is not initialized");
return -EBUSY;
}
error = ia_css_queue_enqueue(q, item);
IA_CSS_LEAVE_ERR_PRIVATE(error);
return error;
}
int ia_css_bufq_deinit(void)
{
return 0;
}
static void bufq_dump_queue_info(const char *prefix, ia_css_queue_t *qhandle)
{
u32 free = 0, used = 0;
assert(prefix && qhandle);
ia_css_queue_get_used_space(qhandle, &used);
ia_css_queue_get_free_space(qhandle, &free);
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "%s: used=%u free=%u\n",
prefix, used, free);
}
void ia_css_bufq_dump_queue_info(void)
{
int i, j;
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "Queue Information:\n");
for (i = 0; i < SH_CSS_MAX_SP_THREADS; i++) {
for (j = 0; j < SH_CSS_MAX_NUM_QUEUES; j++) {
snprintf(prefix, BUFQ_DUMP_FILE_NAME_PREFIX_SIZE,
"host2sp_buffer_queue[%u][%u]", i, j);
bufq_dump_queue_info(prefix,
&css_queues.host2sp_buffer_queue_handles[i][j]);
}
}
for (i = 0; i < SH_CSS_MAX_NUM_QUEUES; i++) {
snprintf(prefix, BUFQ_DUMP_FILE_NAME_PREFIX_SIZE,
"sp2host_buffer_queue[%u]", i);
bufq_dump_queue_info(prefix,
&css_queues.sp2host_buffer_queue_handles[i]);
}
bufq_dump_queue_info("host2sp_psys_event",
&css_queues.host2sp_psys_event_queue_handle);
bufq_dump_queue_info("sp2host_psys_event",
&css_queues.sp2host_psys_event_queue_handle);
bufq_dump_queue_info("host2sp_isys_event",
&css_queues.host2sp_isys_event_queue_handle);
bufq_dump_queue_info("sp2host_isys_event",
&css_queues.sp2host_isys_event_queue_handle);
bufq_dump_queue_info("host2sp_tag_cmd",
&css_queues.host2sp_tag_cmd_queue_handle);
}
| linux-master | drivers/staging/media/atomisp/pci/runtime/bufq/src/bufq.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Support for Intel Camera Imaging ISP subsystem.
* Copyright (c) 2010 - 2015, Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*/
#include "hmm.h"
#include "ia_css_debug.h"
#include "sw_event_global.h" /* encode_sw_event */
#include "sp.h" /* cnd_sp_irq_enable() */
#include "assert_support.h"
#include "sh_css_sp.h"
#include "ia_css_pipeline.h"
#include "ia_css_isp_param.h"
#include "ia_css_bufq.h"
#define PIPELINE_NUM_UNMAPPED (~0U)
#define PIPELINE_SP_THREAD_EMPTY_TOKEN (0x0)
#define PIPELINE_SP_THREAD_RESERVED_TOKEN (0x1)
/*******************************************************
*** Static variables
********************************************************/
static unsigned int pipeline_num_to_sp_thread_map[IA_CSS_PIPELINE_NUM_MAX];
static unsigned int pipeline_sp_thread_list[SH_CSS_MAX_SP_THREADS];
/*******************************************************
*** Static functions
********************************************************/
static void pipeline_init_sp_thread_map(void);
static void pipeline_map_num_to_sp_thread(unsigned int pipe_num);
static void pipeline_unmap_num_to_sp_thread(unsigned int pipe_num);
static void pipeline_init_defaults(
struct ia_css_pipeline *pipeline,
enum ia_css_pipe_id pipe_id,
unsigned int pipe_num,
unsigned int dvs_frame_delay);
static void pipeline_stage_destroy(struct ia_css_pipeline_stage *stage);
static int pipeline_stage_create(
struct ia_css_pipeline_stage_desc *stage_desc,
struct ia_css_pipeline_stage **new_stage);
static void ia_css_pipeline_set_zoom_stage(struct ia_css_pipeline *pipeline);
static void ia_css_pipeline_configure_inout_port(struct ia_css_pipeline *me,
bool continuous);
/*******************************************************
*** Public functions
********************************************************/
void ia_css_pipeline_init(void)
{
pipeline_init_sp_thread_map();
}
int ia_css_pipeline_create(
struct ia_css_pipeline *pipeline,
enum ia_css_pipe_id pipe_id,
unsigned int pipe_num,
unsigned int dvs_frame_delay)
{
assert(pipeline);
IA_CSS_ENTER_PRIVATE("pipeline = %p, pipe_id = %d, pipe_num = %d, dvs_frame_delay = %d",
pipeline, pipe_id, pipe_num, dvs_frame_delay);
if (!pipeline) {
IA_CSS_LEAVE_ERR_PRIVATE(-EINVAL);
return -EINVAL;
}
pipeline_init_defaults(pipeline, pipe_id, pipe_num, dvs_frame_delay);
IA_CSS_LEAVE_ERR_PRIVATE(0);
return 0;
}
void ia_css_pipeline_map(unsigned int pipe_num, bool map)
{
assert(pipe_num < IA_CSS_PIPELINE_NUM_MAX);
IA_CSS_ENTER_PRIVATE("pipe_num = %d, map = %d", pipe_num, map);
if (pipe_num >= IA_CSS_PIPELINE_NUM_MAX) {
IA_CSS_ERROR("Invalid pipe number");
IA_CSS_LEAVE_PRIVATE("void");
return;
}
if (map)
pipeline_map_num_to_sp_thread(pipe_num);
else
pipeline_unmap_num_to_sp_thread(pipe_num);
IA_CSS_LEAVE_PRIVATE("void");
}
/* @brief destroy a pipeline
*
* @param[in] pipeline
* @return None
*
*/
void ia_css_pipeline_destroy(struct ia_css_pipeline *pipeline)
{
assert(pipeline);
IA_CSS_ENTER_PRIVATE("pipeline = %p", pipeline);
if (!pipeline) {
IA_CSS_ERROR("NULL input parameter");
IA_CSS_LEAVE_PRIVATE("void");
return;
}
IA_CSS_LOG("pipe_num = %d", pipeline->pipe_num);
/* Free the pipeline number */
ia_css_pipeline_clean(pipeline);
IA_CSS_LEAVE_PRIVATE("void");
}
/* Run a pipeline and wait till it completes. */
void ia_css_pipeline_start(enum ia_css_pipe_id pipe_id,
struct ia_css_pipeline *pipeline)
{
u8 pipe_num = 0;
unsigned int thread_id;
assert(pipeline);
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
"ia_css_pipeline_start() enter: pipe_id=%d, pipeline=%p\n",
pipe_id, pipeline);
pipeline->pipe_id = pipe_id;
sh_css_sp_init_pipeline(pipeline, pipe_id, pipe_num,
false, false, false, true, SH_CSS_BDS_FACTOR_1_00,
SH_CSS_PIPE_CONFIG_OVRD_NO_OVRD,
IA_CSS_INPUT_MODE_MEMORY, NULL, NULL,
(enum mipi_port_id)0);
ia_css_pipeline_get_sp_thread_id(pipe_num, &thread_id);
if (!sh_css_sp_is_running()) {
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
"ia_css_pipeline_start() error,leaving\n");
/* queues are invalid*/
return;
}
ia_css_bufq_enqueue_psys_event(IA_CSS_PSYS_SW_EVENT_START_STREAM,
(uint8_t)thread_id,
0,
0);
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
"ia_css_pipeline_start() leave: return_void\n");
}
/*
* @brief Query the SP thread ID.
* Refer to "sh_css_internal.h" for details.
*/
bool ia_css_pipeline_get_sp_thread_id(unsigned int key, unsigned int *val)
{
IA_CSS_ENTER("key=%d, val=%p", key, val);
if ((!val) || (key >= IA_CSS_PIPELINE_NUM_MAX) || (key >= IA_CSS_PIPE_ID_NUM)) {
IA_CSS_LEAVE("return value = false");
return false;
}
*val = pipeline_num_to_sp_thread_map[key];
if (*val == (unsigned int)PIPELINE_NUM_UNMAPPED) {
IA_CSS_LOG("unmapped pipeline number");
IA_CSS_LEAVE("return value = false");
return false;
}
IA_CSS_LEAVE("return value = true");
return true;
}
void ia_css_pipeline_dump_thread_map_info(void)
{
unsigned int i;
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
"pipeline_num_to_sp_thread_map:\n");
for (i = 0; i < IA_CSS_PIPELINE_NUM_MAX; i++) {
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
"pipe_num: %u, tid: 0x%x\n", i, pipeline_num_to_sp_thread_map[i]);
}
}
int ia_css_pipeline_request_stop(struct ia_css_pipeline *pipeline)
{
int err = 0;
unsigned int thread_id;
assert(pipeline);
if (!pipeline)
return -EINVAL;
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
"ia_css_pipeline_request_stop() enter: pipeline=%p\n",
pipeline);
pipeline->stop_requested = true;
/* Send stop event to the sp*/
/* This needs improvement, stop on all the pipes available
* in the stream*/
ia_css_pipeline_get_sp_thread_id(pipeline->pipe_num, &thread_id);
if (!sh_css_sp_is_running()) {
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
"ia_css_pipeline_request_stop() leaving\n");
/* queues are invalid */
return -EBUSY;
}
ia_css_bufq_enqueue_psys_event(IA_CSS_PSYS_SW_EVENT_STOP_STREAM,
(uint8_t)thread_id,
0,
0);
sh_css_sp_uninit_pipeline(pipeline->pipe_num);
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
"ia_css_pipeline_request_stop() leave: return_err=%d\n",
err);
return err;
}
void ia_css_pipeline_clean(struct ia_css_pipeline *pipeline)
{
struct ia_css_pipeline_stage *s;
assert(pipeline);
IA_CSS_ENTER_PRIVATE("pipeline = %p", pipeline);
if (!pipeline) {
IA_CSS_ERROR("NULL input parameter");
IA_CSS_LEAVE_PRIVATE("void");
return;
}
s = pipeline->stages;
while (s) {
struct ia_css_pipeline_stage *next = s->next;
pipeline_stage_destroy(s);
s = next;
}
pipeline_init_defaults(pipeline, pipeline->pipe_id, pipeline->pipe_num,
pipeline->dvs_frame_delay);
IA_CSS_LEAVE_PRIVATE("void");
}
/* @brief Add a stage to pipeline.
*
* @param pipeline Pointer to the pipeline to be added to.
* @param[in] stage_desc The description of the stage
* @param[out] stage The successor of the stage.
* @return 0 or error code upon error.
*
* Add a new stage to a non-NULL pipeline.
* The stage consists of an ISP binary or firmware and input and
* output arguments.
*/
int ia_css_pipeline_create_and_add_stage(
struct ia_css_pipeline *pipeline,
struct ia_css_pipeline_stage_desc *stage_desc,
struct ia_css_pipeline_stage **stage)
{
struct ia_css_pipeline_stage *last, *new_stage = NULL;
int err;
/* other arguments can be NULL */
assert(pipeline);
assert(stage_desc);
last = pipeline->stages;
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
"ia_css_pipeline_create_and_add_stage() enter:\n");
if (!stage_desc->binary && !stage_desc->firmware
&& (stage_desc->sp_func == IA_CSS_PIPELINE_NO_FUNC)) {
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
"ia_css_pipeline_create_and_add_stage() done: Invalid args\n");
return -EINVAL;
}
/* Find the last stage */
while (last && last->next)
last = last->next;
/* if in_frame is not set, we use the out_frame from the previous
* stage, if no previous stage, it's an error.
*/
if ((stage_desc->sp_func == IA_CSS_PIPELINE_NO_FUNC)
&& (!stage_desc->in_frame)
&& (!stage_desc->firmware)
&& (!stage_desc->binary->online)) {
/* Do this only for ISP stages*/
if (last && last->args.out_frame[0])
stage_desc->in_frame = last->args.out_frame[0];
if (!stage_desc->in_frame)
return -EINVAL;
}
/* Create the new stage */
err = pipeline_stage_create(stage_desc, &new_stage);
if (err) {
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
"ia_css_pipeline_create_and_add_stage() done: stage_create_failed\n");
return err;
}
if (last)
last->next = new_stage;
else
pipeline->stages = new_stage;
/* Output the new stage */
if (stage)
*stage = new_stage;
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
"ia_css_pipeline_create_and_add_stage() done:\n");
return 0;
}
void ia_css_pipeline_finalize_stages(struct ia_css_pipeline *pipeline,
bool continuous)
{
unsigned int i = 0;
struct ia_css_pipeline_stage *stage;
assert(pipeline);
for (stage = pipeline->stages; stage; stage = stage->next) {
stage->stage_num = i;
i++;
}
pipeline->num_stages = i;
ia_css_pipeline_set_zoom_stage(pipeline);
ia_css_pipeline_configure_inout_port(pipeline, continuous);
}
int ia_css_pipeline_get_stage(struct ia_css_pipeline *pipeline,
int mode,
struct ia_css_pipeline_stage **stage)
{
struct ia_css_pipeline_stage *s;
assert(pipeline);
assert(stage);
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
"ia_css_pipeline_get_stage() enter:\n");
for (s = pipeline->stages; s; s = s->next) {
if (s->mode == mode) {
*stage = s;
return 0;
}
}
return -EINVAL;
}
int ia_css_pipeline_get_stage_from_fw(struct ia_css_pipeline
*pipeline,
u32 fw_handle,
struct ia_css_pipeline_stage **stage)
{
struct ia_css_pipeline_stage *s;
assert(pipeline);
assert(stage);
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "%s()\n", __func__);
for (s = pipeline->stages; s; s = s->next) {
if ((s->firmware) && (s->firmware->handle == fw_handle)) {
*stage = s;
return 0;
}
}
return -EINVAL;
}
int ia_css_pipeline_get_fw_from_stage(struct ia_css_pipeline
*pipeline,
u32 stage_num,
uint32_t *fw_handle)
{
struct ia_css_pipeline_stage *s;
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "%s()\n", __func__);
if ((!pipeline) || (!fw_handle))
return -EINVAL;
for (s = pipeline->stages; s; s = s->next) {
if ((s->stage_num == stage_num) && (s->firmware)) {
*fw_handle = s->firmware->handle;
return 0;
}
}
return -EINVAL;
}
int ia_css_pipeline_get_output_stage(
struct ia_css_pipeline *pipeline,
int mode,
struct ia_css_pipeline_stage **stage)
{
struct ia_css_pipeline_stage *s;
assert(pipeline);
assert(stage);
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
"ia_css_pipeline_get_output_stage() enter:\n");
*stage = NULL;
/* First find acceleration firmware at end of pipe */
for (s = pipeline->stages; s; s = s->next) {
if (s->firmware && s->mode == mode &&
s->firmware->info.isp.sp.enable.output)
*stage = s;
}
if (*stage)
return 0;
/* If no firmware, find binary in pipe */
return ia_css_pipeline_get_stage(pipeline, mode, stage);
}
bool ia_css_pipeline_has_stopped(struct ia_css_pipeline *pipeline)
{
/* Android compilation files if made an local variable
stack size on android is limited to 2k and this structure
is around 2.5K, in place of static malloc can be done but
if this call is made too often it will lead to fragment memory
versus a fixed allocation */
static struct sh_css_sp_group sp_group;
unsigned int thread_id;
const struct ia_css_fw_info *fw;
unsigned int HIVE_ADDR_sp_group;
fw = &sh_css_sp_fw;
HIVE_ADDR_sp_group = fw->info.sp.group;
ia_css_pipeline_get_sp_thread_id(pipeline->pipe_num, &thread_id);
sp_dmem_load(SP0_ID,
(unsigned int)sp_address_of(sp_group),
&sp_group, sizeof(struct sh_css_sp_group));
return sp_group.pipe[thread_id].num_stages == 0;
}
#if defined(ISP2401)
struct sh_css_sp_pipeline_io_status *ia_css_pipeline_get_pipe_io_status(void)
{
return(&sh_css_sp_group.pipe_io_status);
}
#endif
bool ia_css_pipeline_is_mapped(unsigned int key)
{
bool ret = false;
IA_CSS_ENTER_PRIVATE("key = %d", key);
if ((key >= IA_CSS_PIPELINE_NUM_MAX) || (key >= IA_CSS_PIPE_ID_NUM)) {
IA_CSS_ERROR("Invalid key!!");
IA_CSS_LEAVE_PRIVATE("return = %d", false);
return false;
}
ret = (bool)(pipeline_num_to_sp_thread_map[key] != (unsigned int)
PIPELINE_NUM_UNMAPPED);
IA_CSS_LEAVE_PRIVATE("return = %d", ret);
return ret;
}
/*******************************************************
*** Static functions
********************************************************/
/* Pipeline:
* To organize the several different binaries for each type of mode,
* we use a pipeline. A pipeline contains a number of stages, each with
* their own binary and frame pointers.
* When stages are added to a pipeline, output frames that are not passed
* from outside are automatically allocated.
* When input frames are not passed from outside, each stage will use the
* output frame of the previous stage as input (the full resolution output,
* not the viewfinder output).
* Pipelines must be cleaned and re-created when settings of the binaries
* change.
*/
static void pipeline_stage_destroy(struct ia_css_pipeline_stage *stage)
{
unsigned int i;
for (i = 0; i < IA_CSS_BINARY_MAX_OUTPUT_PORTS; i++) {
if (stage->out_frame_allocated[i]) {
ia_css_frame_free(stage->args.out_frame[i]);
stage->args.out_frame[i] = NULL;
}
}
if (stage->vf_frame_allocated) {
ia_css_frame_free(stage->args.out_vf_frame);
stage->args.out_vf_frame = NULL;
}
kvfree(stage);
}
static void pipeline_init_sp_thread_map(void)
{
unsigned int i;
for (i = 1; i < SH_CSS_MAX_SP_THREADS; i++)
pipeline_sp_thread_list[i] = PIPELINE_SP_THREAD_EMPTY_TOKEN;
for (i = 0; i < IA_CSS_PIPELINE_NUM_MAX; i++)
pipeline_num_to_sp_thread_map[i] = PIPELINE_NUM_UNMAPPED;
}
static void pipeline_map_num_to_sp_thread(unsigned int pipe_num)
{
unsigned int i;
bool found_sp_thread = false;
/* pipe is not mapped to any thread */
assert(pipeline_num_to_sp_thread_map[pipe_num]
== (unsigned int)PIPELINE_NUM_UNMAPPED);
for (i = 0; i < SH_CSS_MAX_SP_THREADS; i++) {
if (pipeline_sp_thread_list[i] ==
PIPELINE_SP_THREAD_EMPTY_TOKEN) {
pipeline_sp_thread_list[i] =
PIPELINE_SP_THREAD_RESERVED_TOKEN;
pipeline_num_to_sp_thread_map[pipe_num] = i;
found_sp_thread = true;
break;
}
}
/* Make sure a mapping is found */
/* I could do:
assert(i < SH_CSS_MAX_SP_THREADS);
But the below is more descriptive.
*/
assert(found_sp_thread);
}
static void pipeline_unmap_num_to_sp_thread(unsigned int pipe_num)
{
unsigned int thread_id;
assert(pipeline_num_to_sp_thread_map[pipe_num]
!= (unsigned int)PIPELINE_NUM_UNMAPPED);
thread_id = pipeline_num_to_sp_thread_map[pipe_num];
pipeline_num_to_sp_thread_map[pipe_num] = PIPELINE_NUM_UNMAPPED;
pipeline_sp_thread_list[thread_id] = PIPELINE_SP_THREAD_EMPTY_TOKEN;
}
static int pipeline_stage_create(
struct ia_css_pipeline_stage_desc *stage_desc,
struct ia_css_pipeline_stage **new_stage)
{
int err = 0;
struct ia_css_pipeline_stage *stage = NULL;
struct ia_css_binary *binary;
struct ia_css_frame *vf_frame;
struct ia_css_frame *out_frame[IA_CSS_BINARY_MAX_OUTPUT_PORTS];
const struct ia_css_fw_info *firmware;
unsigned int i;
/* Verify input parameters*/
if (!(stage_desc->in_frame) && !(stage_desc->firmware)
&& (stage_desc->binary) && !(stage_desc->binary->online)) {
err = -EINVAL;
goto ERR;
}
binary = stage_desc->binary;
firmware = stage_desc->firmware;
vf_frame = stage_desc->vf_frame;
for (i = 0; i < IA_CSS_BINARY_MAX_OUTPUT_PORTS; i++) {
out_frame[i] = stage_desc->out_frame[i];
}
stage = kvzalloc(sizeof(*stage), GFP_KERNEL);
if (!stage) {
err = -ENOMEM;
goto ERR;
}
if (firmware) {
stage->binary = NULL;
stage->binary_info =
(struct ia_css_binary_info *)&firmware->info.isp;
} else {
stage->binary = binary;
if (binary)
stage->binary_info =
(struct ia_css_binary_info *)binary->info;
else
stage->binary_info = NULL;
}
stage->firmware = firmware;
stage->sp_func = stage_desc->sp_func;
stage->max_input_width = stage_desc->max_input_width;
stage->mode = stage_desc->mode;
for (i = 0; i < IA_CSS_BINARY_MAX_OUTPUT_PORTS; i++)
stage->out_frame_allocated[i] = false;
stage->vf_frame_allocated = false;
stage->next = NULL;
sh_css_binary_args_reset(&stage->args);
for (i = 0; i < IA_CSS_BINARY_MAX_OUTPUT_PORTS; i++) {
if (!(out_frame[i]) && (binary)
&& (binary->out_frame_info[i].res.width)) {
err = ia_css_frame_allocate_from_info(&out_frame[i],
&binary->out_frame_info[i]);
if (err)
goto ERR;
stage->out_frame_allocated[i] = true;
}
}
/* VF frame is not needed in case of need_pp
However, the capture binary needs a vf frame to write to.
*/
if (!vf_frame) {
if ((binary && binary->vf_frame_info.res.width) ||
(firmware && firmware->info.isp.sp.enable.vf_veceven)
) {
err = ia_css_frame_allocate_from_info(&vf_frame,
&binary->vf_frame_info);
if (err)
goto ERR;
stage->vf_frame_allocated = true;
}
} else if (vf_frame && binary && binary->vf_frame_info.res.width
&& !firmware) {
/* only mark as allocated if buffer pointer available */
if (vf_frame->data != mmgr_NULL)
stage->vf_frame_allocated = true;
}
stage->args.in_frame = stage_desc->in_frame;
for (i = 0; i < IA_CSS_BINARY_MAX_OUTPUT_PORTS; i++)
stage->args.out_frame[i] = out_frame[i];
stage->args.out_vf_frame = vf_frame;
*new_stage = stage;
return err;
ERR:
if (stage)
pipeline_stage_destroy(stage);
return err;
}
static const struct ia_css_frame ia_css_default_frame = DEFAULT_FRAME;
static void pipeline_init_defaults(
struct ia_css_pipeline *pipeline,
enum ia_css_pipe_id pipe_id,
unsigned int pipe_num,
unsigned int dvs_frame_delay)
{
unsigned int i;
pipeline->pipe_id = pipe_id;
pipeline->stages = NULL;
pipeline->stop_requested = false;
pipeline->current_stage = NULL;
memcpy(&pipeline->in_frame, &ia_css_default_frame,
sizeof(ia_css_default_frame));
for (i = 0; i < IA_CSS_PIPE_MAX_OUTPUT_STAGE; i++) {
memcpy(&pipeline->out_frame[i], &ia_css_default_frame,
sizeof(ia_css_default_frame));
memcpy(&pipeline->vf_frame[i], &ia_css_default_frame,
sizeof(ia_css_default_frame));
}
pipeline->num_execs = -1;
pipeline->acquire_isp_each_stage = true;
pipeline->pipe_num = (uint8_t)pipe_num;
pipeline->dvs_frame_delay = dvs_frame_delay;
}
static void ia_css_pipeline_set_zoom_stage(struct ia_css_pipeline *pipeline)
{
struct ia_css_pipeline_stage *stage = NULL;
int err = 0;
assert(pipeline);
if (pipeline->pipe_id == IA_CSS_PIPE_ID_PREVIEW) {
/* in preview pipeline, vf_pp stage should do zoom */
err = ia_css_pipeline_get_stage(pipeline, IA_CSS_BINARY_MODE_VF_PP, &stage);
if (!err)
stage->enable_zoom = true;
} else if (pipeline->pipe_id == IA_CSS_PIPE_ID_CAPTURE) {
/* in capture pipeline, capture_pp stage should do zoom */
err = ia_css_pipeline_get_stage(pipeline, IA_CSS_BINARY_MODE_CAPTURE_PP,
&stage);
if (!err)
stage->enable_zoom = true;
} else if (pipeline->pipe_id == IA_CSS_PIPE_ID_VIDEO) {
/* in video pipeline, video stage should do zoom */
err = ia_css_pipeline_get_stage(pipeline, IA_CSS_BINARY_MODE_VIDEO, &stage);
if (!err)
stage->enable_zoom = true;
} else if (pipeline->pipe_id == IA_CSS_PIPE_ID_YUVPP) {
/* in yuvpp pipeline, first yuv_scaler stage should do zoom */
err = ia_css_pipeline_get_stage(pipeline, IA_CSS_BINARY_MODE_CAPTURE_PP,
&stage);
if (!err)
stage->enable_zoom = true;
}
}
static void
ia_css_pipeline_configure_inout_port(struct ia_css_pipeline *me,
bool continuous)
{
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
"ia_css_pipeline_configure_inout_port() enter: pipe_id(%d) continuous(%d)\n",
me->pipe_id, continuous);
switch (me->pipe_id) {
case IA_CSS_PIPE_ID_PREVIEW:
case IA_CSS_PIPE_ID_VIDEO:
SH_CSS_PIPE_PORT_CONFIG_SET(me->inout_port_config,
(uint8_t)SH_CSS_PORT_INPUT,
(uint8_t)(continuous ? SH_CSS_COPYSINK_TYPE : SH_CSS_HOST_TYPE), 1);
SH_CSS_PIPE_PORT_CONFIG_SET(me->inout_port_config,
(uint8_t)SH_CSS_PORT_OUTPUT,
(uint8_t)SH_CSS_HOST_TYPE, 1);
break;
case IA_CSS_PIPE_ID_COPY: /*Copy pipe ports configured to "offline" mode*/
SH_CSS_PIPE_PORT_CONFIG_SET(me->inout_port_config,
(uint8_t)SH_CSS_PORT_INPUT,
(uint8_t)SH_CSS_HOST_TYPE, 1);
if (continuous) {
SH_CSS_PIPE_PORT_CONFIG_SET(me->inout_port_config,
(uint8_t)SH_CSS_PORT_OUTPUT,
(uint8_t)SH_CSS_COPYSINK_TYPE, 1);
SH_CSS_PIPE_PORT_CONFIG_SET(me->inout_port_config,
(uint8_t)SH_CSS_PORT_OUTPUT,
(uint8_t)SH_CSS_TAGGERSINK_TYPE, 1);
} else {
SH_CSS_PIPE_PORT_CONFIG_SET(me->inout_port_config,
(uint8_t)SH_CSS_PORT_OUTPUT,
(uint8_t)SH_CSS_HOST_TYPE, 1);
}
break;
case IA_CSS_PIPE_ID_CAPTURE:
SH_CSS_PIPE_PORT_CONFIG_SET(me->inout_port_config,
(uint8_t)SH_CSS_PORT_INPUT,
(uint8_t)(continuous ? SH_CSS_TAGGERSINK_TYPE : SH_CSS_HOST_TYPE),
1);
SH_CSS_PIPE_PORT_CONFIG_SET(me->inout_port_config,
(uint8_t)SH_CSS_PORT_OUTPUT,
(uint8_t)SH_CSS_HOST_TYPE, 1);
break;
case IA_CSS_PIPE_ID_YUVPP:
SH_CSS_PIPE_PORT_CONFIG_SET(me->inout_port_config,
(uint8_t)SH_CSS_PORT_INPUT,
(uint8_t)(SH_CSS_HOST_TYPE), 1);
SH_CSS_PIPE_PORT_CONFIG_SET(me->inout_port_config,
(uint8_t)SH_CSS_PORT_OUTPUT,
(uint8_t)SH_CSS_HOST_TYPE, 1);
break;
default:
break;
}
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
"ia_css_pipeline_configure_inout_port() leave: inout_port_config(%x)\n",
me->inout_port_config);
}
| linux-master | drivers/staging/media/atomisp/pci/runtime/pipeline/src/pipeline.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Support for Intel Camera Imaging ISP subsystem.
* Copyright (c) 2010 - 2015, Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*/
#include "platform_support.h"
#include "ia_css_inputfifo.h"
#include "device_access.h"
#define __INLINE_SP__
#include "sp.h"
#define __INLINE_ISP__
#include "isp.h"
#define __INLINE_IRQ__
#include "irq.h"
#define __INLINE_FIFO_MONITOR__
#include "fifo_monitor.h"
#define __INLINE_EVENT__
#include "event_fifo.h"
#define __INLINE_SP__
#include "input_system.h" /* MIPI_PREDICTOR_NONE,... */
#include "assert_support.h"
/* System independent */
#include "sh_css_internal.h"
#include "ia_css_isys.h"
#define HBLANK_CYCLES (187)
#define MARKER_CYCLES (6)
#include <hive_isp_css_streaming_to_mipi_types_hrt.h>
/* The data type is used to send special cases:
* yuv420: odd lines (1, 3 etc) are twice as wide as even
* lines (0, 2, 4 etc).
* rgb: for two pixels per clock, the R and B values are sent
* to output_0 while only G is sent to output_1. This means
* that output_1 only gets half the number of values of output_0.
* WARNING: This type should also be used for Legacy YUV420.
* regular: used for all other data types (RAW, YUV422, etc)
*/
enum inputfifo_mipi_data_type {
inputfifo_mipi_data_type_regular,
inputfifo_mipi_data_type_yuv420,
inputfifo_mipi_data_type_yuv420_legacy,
inputfifo_mipi_data_type_rgb,
};
static unsigned int inputfifo_curr_ch_id, inputfifo_curr_fmt_type;
struct inputfifo_instance {
unsigned int ch_id;
enum atomisp_input_format input_format;
bool two_ppc;
bool streaming;
unsigned int hblank_cycles;
unsigned int marker_cycles;
unsigned int fmt_type;
enum inputfifo_mipi_data_type type;
};
/*
* Maintain a basic streaming to Mipi administration with ch_id as index
* ch_id maps on the "Mipi virtual channel ID" and can have value 0..3
*/
#define INPUTFIFO_NR_OF_S2M_CHANNELS (4)
static struct inputfifo_instance
inputfifo_inst_admin[INPUTFIFO_NR_OF_S2M_CHANNELS];
/* Streaming to MIPI */
static unsigned int inputfifo_wrap_marker(
/* static inline unsigned inputfifo_wrap_marker( */
unsigned int marker)
{
return marker |
(inputfifo_curr_ch_id << HIVE_STR_TO_MIPI_CH_ID_LSB) |
(inputfifo_curr_fmt_type << _HIVE_STR_TO_MIPI_FMT_TYPE_LSB);
}
static inline void
_sh_css_fifo_snd(unsigned int token)
{
while (!can_event_send_token(STR2MIPI_EVENT_ID))
udelay(1);
event_send_token(STR2MIPI_EVENT_ID, token);
return;
}
static void inputfifo_send_data_a(
/* static inline void inputfifo_send_data_a( */
unsigned int data)
{
unsigned int token = (1 << HIVE_STR_TO_MIPI_VALID_A_BIT) |
(data << HIVE_STR_TO_MIPI_DATA_A_LSB);
_sh_css_fifo_snd(token);
return;
}
static void inputfifo_send_data_b(
/* static inline void inputfifo_send_data_b( */
unsigned int data)
{
unsigned int token = (1 << HIVE_STR_TO_MIPI_VALID_B_BIT) |
(data << _HIVE_STR_TO_MIPI_DATA_B_LSB);
_sh_css_fifo_snd(token);
return;
}
static void inputfifo_send_data(
/* static inline void inputfifo_send_data( */
unsigned int a,
unsigned int b)
{
unsigned int token = ((1 << HIVE_STR_TO_MIPI_VALID_A_BIT) |
(1 << HIVE_STR_TO_MIPI_VALID_B_BIT) |
(a << HIVE_STR_TO_MIPI_DATA_A_LSB) |
(b << _HIVE_STR_TO_MIPI_DATA_B_LSB));
_sh_css_fifo_snd(token);
return;
}
static void inputfifo_send_sol(void)
/* static inline void inputfifo_send_sol(void) */
{
hrt_data token = inputfifo_wrap_marker(
1 << HIVE_STR_TO_MIPI_SOL_BIT);
_sh_css_fifo_snd(token);
return;
}
static void inputfifo_send_eol(void)
/* static inline void inputfifo_send_eol(void) */
{
hrt_data token = inputfifo_wrap_marker(
1 << HIVE_STR_TO_MIPI_EOL_BIT);
_sh_css_fifo_snd(token);
return;
}
static void inputfifo_send_sof(void)
/* static inline void inputfifo_send_sof(void) */
{
hrt_data token = inputfifo_wrap_marker(
1 << HIVE_STR_TO_MIPI_SOF_BIT);
_sh_css_fifo_snd(token);
return;
}
static void inputfifo_send_eof(void)
/* static inline void inputfifo_send_eof(void) */
{
hrt_data token = inputfifo_wrap_marker(
1 << HIVE_STR_TO_MIPI_EOF_BIT);
_sh_css_fifo_snd(token);
return;
}
static void inputfifo_send_ch_id_and_fmt_type(
/* static inline
void inputfifo_send_ch_id_and_fmt_type( */
unsigned int ch_id,
unsigned int fmt_type)
{
hrt_data token;
inputfifo_curr_ch_id = ch_id & _HIVE_ISP_CH_ID_MASK;
inputfifo_curr_fmt_type = fmt_type & _HIVE_ISP_FMT_TYPE_MASK;
/* we send an zero marker, this will wrap the ch_id and
* fmt_type automatically.
*/
token = inputfifo_wrap_marker(0);
_sh_css_fifo_snd(token);
return;
}
static void inputfifo_send_empty_token(void)
/* static inline void inputfifo_send_empty_token(void) */
{
hrt_data token = inputfifo_wrap_marker(0);
_sh_css_fifo_snd(token);
return;
}
static void inputfifo_start_frame(
/* static inline void inputfifo_start_frame( */
unsigned int ch_id,
unsigned int fmt_type)
{
inputfifo_send_ch_id_and_fmt_type(ch_id, fmt_type);
inputfifo_send_sof();
return;
}
static void inputfifo_end_frame(
unsigned int marker_cycles)
{
unsigned int i;
for (i = 0; i < marker_cycles; i++)
inputfifo_send_empty_token();
inputfifo_send_eof();
return;
}
static void inputfifo_send_line2(
const unsigned short *data,
unsigned int width,
const unsigned short *data2,
unsigned int width2,
unsigned int hblank_cycles,
unsigned int marker_cycles,
unsigned int two_ppc,
enum inputfifo_mipi_data_type type)
{
unsigned int i, is_rgb = 0, is_legacy = 0;
assert(data);
assert((data2) || (width2 == 0));
if (type == inputfifo_mipi_data_type_rgb)
is_rgb = 1;
if (type == inputfifo_mipi_data_type_yuv420_legacy)
is_legacy = 1;
for (i = 0; i < hblank_cycles; i++)
inputfifo_send_empty_token();
inputfifo_send_sol();
for (i = 0; i < marker_cycles; i++)
inputfifo_send_empty_token();
for (i = 0; i < width; i++, data++) {
/* for RGB in two_ppc, we only actually send 2 pixels per
* clock in the even pixels (0, 2 etc). In the other cycles,
* we only send 1 pixel, to data[0].
*/
unsigned int send_two_pixels = two_ppc;
if ((is_rgb || is_legacy) && (i % 3 == 2))
send_two_pixels = 0;
if (send_two_pixels) {
if (i + 1 == width) {
/* for jpg (binary) copy, this can occur
* if the file contains an odd number of bytes.
*/
inputfifo_send_data(
data[0], 0);
} else {
inputfifo_send_data(
data[0], data[1]);
}
/* Additional increment because we send 2 pixels */
data++;
i++;
} else if (two_ppc && is_legacy) {
inputfifo_send_data_b(data[0]);
} else {
inputfifo_send_data_a(data[0]);
}
}
for (i = 0; i < width2; i++, data2++) {
/* for RGB in two_ppc, we only actually send 2 pixels per
* clock in the even pixels (0, 2 etc). In the other cycles,
* we only send 1 pixel, to data2[0].
*/
unsigned int send_two_pixels = two_ppc;
if ((is_rgb || is_legacy) && (i % 3 == 2))
send_two_pixels = 0;
if (send_two_pixels) {
if (i + 1 == width2) {
/* for jpg (binary) copy, this can occur
* if the file contains an odd number of bytes.
*/
inputfifo_send_data(
data2[0], 0);
} else {
inputfifo_send_data(
data2[0], data2[1]);
}
/* Additional increment because we send 2 pixels */
data2++;
i++;
} else if (two_ppc && is_legacy) {
inputfifo_send_data_b(data2[0]);
} else {
inputfifo_send_data_a(data2[0]);
}
}
for (i = 0; i < hblank_cycles; i++)
inputfifo_send_empty_token();
inputfifo_send_eol();
return;
}
static void
inputfifo_send_line(const unsigned short *data,
unsigned int width,
unsigned int hblank_cycles,
unsigned int marker_cycles,
unsigned int two_ppc,
enum inputfifo_mipi_data_type type)
{
assert(data);
inputfifo_send_line2(data, width, NULL, 0,
hblank_cycles,
marker_cycles,
two_ppc,
type);
}
/* Send a frame of data into the input network via the GP FIFO.
* Parameters:
* - data: array of 16 bit values that contains all data for the frame.
* - width: width of a line in number of subpixels, for yuv420 it is the
* number of Y components per line.
* - height: height of the frame in number of lines.
* - ch_id: channel ID.
* - fmt_type: format type.
* - hblank_cycles: length of horizontal blanking in cycles.
* - marker_cycles: number of empty cycles after start-of-line and before
* end-of-frame.
* - two_ppc: boolean, describes whether to send one or two pixels per clock
* cycle. In this mode, we sent pixels N and N+1 in the same cycle,
* to IF_PRIM_A and IF_PRIM_B respectively. The caller must make
* sure the input data has been formatted correctly for this.
* For example, for RGB formats this means that unused values
* must be inserted.
* - yuv420: boolean, describes whether (non-legacy) yuv420 data is used. In
* this mode, the odd lines (1,3,5 etc) are half as long as the
* even lines (2,4,6 etc).
* Note that the first line is odd (1) and the second line is even
* (2).
*
* This function does not do any reordering of pixels, the caller must make
* sure the data is in the righ format. Please refer to the CSS receiver
* documentation for details on the data formats.
*/
static void inputfifo_send_frame(
const unsigned short *data,
unsigned int width,
unsigned int height,
unsigned int ch_id,
unsigned int fmt_type,
unsigned int hblank_cycles,
unsigned int marker_cycles,
unsigned int two_ppc,
enum inputfifo_mipi_data_type type)
{
unsigned int i;
assert(data);
inputfifo_start_frame(ch_id, fmt_type);
for (i = 0; i < height; i++) {
if ((type == inputfifo_mipi_data_type_yuv420) &&
(i & 1) == 1) {
inputfifo_send_line(data, 2 * width,
hblank_cycles,
marker_cycles,
two_ppc, type);
data += 2 * width;
} else {
inputfifo_send_line(data, width,
hblank_cycles,
marker_cycles,
two_ppc, type);
data += width;
}
}
inputfifo_end_frame(marker_cycles);
return;
}
static enum inputfifo_mipi_data_type inputfifo_determine_type(
enum atomisp_input_format input_format)
{
enum inputfifo_mipi_data_type type;
type = inputfifo_mipi_data_type_regular;
if (input_format == ATOMISP_INPUT_FORMAT_YUV420_8_LEGACY) {
type =
inputfifo_mipi_data_type_yuv420_legacy;
} else if (input_format == ATOMISP_INPUT_FORMAT_YUV420_8 ||
input_format == ATOMISP_INPUT_FORMAT_YUV420_10 ||
input_format == ATOMISP_INPUT_FORMAT_YUV420_16) {
type =
inputfifo_mipi_data_type_yuv420;
} else if (input_format >= ATOMISP_INPUT_FORMAT_RGB_444 &&
input_format <= ATOMISP_INPUT_FORMAT_RGB_888) {
type =
inputfifo_mipi_data_type_rgb;
}
return type;
}
static struct inputfifo_instance *inputfifo_get_inst(
unsigned int ch_id)
{
return &inputfifo_inst_admin[ch_id];
}
void ia_css_inputfifo_send_input_frame(
const unsigned short *data,
unsigned int width,
unsigned int height,
unsigned int ch_id,
enum atomisp_input_format input_format,
bool two_ppc)
{
unsigned int fmt_type, hblank_cycles, marker_cycles;
enum inputfifo_mipi_data_type type;
assert(data);
hblank_cycles = HBLANK_CYCLES;
marker_cycles = MARKER_CYCLES;
ia_css_isys_convert_stream_format_to_mipi_format(input_format,
MIPI_PREDICTOR_NONE,
&fmt_type);
type = inputfifo_determine_type(input_format);
inputfifo_send_frame(data, width, height,
ch_id, fmt_type, hblank_cycles, marker_cycles,
two_ppc, type);
}
void ia_css_inputfifo_start_frame(
unsigned int ch_id,
enum atomisp_input_format input_format,
bool two_ppc)
{
struct inputfifo_instance *s2mi;
s2mi = inputfifo_get_inst(ch_id);
s2mi->ch_id = ch_id;
ia_css_isys_convert_stream_format_to_mipi_format(input_format,
MIPI_PREDICTOR_NONE,
&s2mi->fmt_type);
s2mi->two_ppc = two_ppc;
s2mi->type = inputfifo_determine_type(input_format);
s2mi->hblank_cycles = HBLANK_CYCLES;
s2mi->marker_cycles = MARKER_CYCLES;
s2mi->streaming = true;
inputfifo_start_frame(ch_id, s2mi->fmt_type);
return;
}
void ia_css_inputfifo_send_line(
unsigned int ch_id,
const unsigned short *data,
unsigned int width,
const unsigned short *data2,
unsigned int width2)
{
struct inputfifo_instance *s2mi;
assert(data);
assert((data2) || (width2 == 0));
s2mi = inputfifo_get_inst(ch_id);
/* Set global variables that indicate channel_id and format_type */
inputfifo_curr_ch_id = (s2mi->ch_id) & _HIVE_ISP_CH_ID_MASK;
inputfifo_curr_fmt_type = (s2mi->fmt_type) & _HIVE_ISP_FMT_TYPE_MASK;
inputfifo_send_line2(data, width, data2, width2,
s2mi->hblank_cycles,
s2mi->marker_cycles,
s2mi->two_ppc,
s2mi->type);
}
void ia_css_inputfifo_send_embedded_line(
unsigned int ch_id,
enum atomisp_input_format data_type,
const unsigned short *data,
unsigned int width)
{
struct inputfifo_instance *s2mi;
unsigned int fmt_type;
assert(data);
s2mi = inputfifo_get_inst(ch_id);
ia_css_isys_convert_stream_format_to_mipi_format(data_type,
MIPI_PREDICTOR_NONE, &fmt_type);
/* Set format_type for metadata line. */
inputfifo_curr_fmt_type = fmt_type & _HIVE_ISP_FMT_TYPE_MASK;
inputfifo_send_line(data, width, s2mi->hblank_cycles, s2mi->marker_cycles,
s2mi->two_ppc, inputfifo_mipi_data_type_regular);
}
void ia_css_inputfifo_end_frame(
unsigned int ch_id)
{
struct inputfifo_instance *s2mi;
s2mi = inputfifo_get_inst(ch_id);
/* Set global variables that indicate channel_id and format_type */
inputfifo_curr_ch_id = (s2mi->ch_id) & _HIVE_ISP_CH_ID_MASK;
inputfifo_curr_fmt_type = (s2mi->fmt_type) & _HIVE_ISP_FMT_TYPE_MASK;
/* Call existing HRT function */
inputfifo_end_frame(s2mi->marker_cycles);
s2mi->streaming = false;
return;
}
| linux-master | drivers/staging/media/atomisp/pci/runtime/inputfifo/src/inputfifo.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Support for Intel Camera Imaging ISP subsystem.
* Copyright (c) 2010 - 2015, Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*/
#include "hmm.h"
#include "ia_css_pipeline.h"
#include "ia_css_isp_param.h"
/* Set functions for parameter memory descriptors */
void
ia_css_isp_param_set_mem_init(
struct ia_css_isp_param_host_segments *mem_init,
enum ia_css_param_class pclass,
enum ia_css_isp_memories mem,
char *address, size_t size)
{
mem_init->params[pclass][mem].address = address;
mem_init->params[pclass][mem].size = (uint32_t)size;
}
void
ia_css_isp_param_set_css_mem_init(
struct ia_css_isp_param_css_segments *mem_init,
enum ia_css_param_class pclass,
enum ia_css_isp_memories mem,
ia_css_ptr address, size_t size)
{
mem_init->params[pclass][mem].address = address;
mem_init->params[pclass][mem].size = (uint32_t)size;
}
void
ia_css_isp_param_set_isp_mem_init(
struct ia_css_isp_param_isp_segments *mem_init,
enum ia_css_param_class pclass,
enum ia_css_isp_memories mem,
u32 address, size_t size)
{
mem_init->params[pclass][mem].address = address;
mem_init->params[pclass][mem].size = (uint32_t)size;
}
/* Get functions for parameter memory descriptors */
const struct ia_css_host_data *
ia_css_isp_param_get_mem_init(
const struct ia_css_isp_param_host_segments *mem_init,
enum ia_css_param_class pclass,
enum ia_css_isp_memories mem)
{
return &mem_init->params[pclass][mem];
}
const struct ia_css_data *
ia_css_isp_param_get_css_mem_init(
const struct ia_css_isp_param_css_segments *mem_init,
enum ia_css_param_class pclass,
enum ia_css_isp_memories mem)
{
return &mem_init->params[pclass][mem];
}
const struct ia_css_isp_data *
ia_css_isp_param_get_isp_mem_init(
const struct ia_css_isp_param_isp_segments *mem_init,
enum ia_css_param_class pclass,
enum ia_css_isp_memories mem)
{
return &mem_init->params[pclass][mem];
}
void
ia_css_init_memory_interface(
struct ia_css_isp_param_css_segments *isp_mem_if,
const struct ia_css_isp_param_host_segments *mem_params,
const struct ia_css_isp_param_css_segments *css_params)
{
unsigned int pclass, mem;
for (pclass = 0; pclass < IA_CSS_NUM_PARAM_CLASSES; pclass++) {
memset(isp_mem_if->params[pclass], 0, sizeof(isp_mem_if->params[pclass]));
for (mem = 0; mem < IA_CSS_NUM_MEMORIES; mem++) {
if (!mem_params->params[pclass][mem].address)
continue;
isp_mem_if->params[pclass][mem].size = mem_params->params[pclass][mem].size;
if (pclass != IA_CSS_PARAM_CLASS_PARAM)
isp_mem_if->params[pclass][mem].address =
css_params->params[pclass][mem].address;
}
}
}
int
ia_css_isp_param_allocate_isp_parameters(
struct ia_css_isp_param_host_segments *mem_params,
struct ia_css_isp_param_css_segments *css_params,
const struct ia_css_isp_param_isp_segments *mem_initializers) {
int err = 0;
unsigned int mem, pclass;
pclass = IA_CSS_PARAM_CLASS_PARAM;
for (mem = 0; mem < IA_CSS_NUM_MEMORIES; mem++)
{
for (pclass = 0; pclass < IA_CSS_NUM_PARAM_CLASSES; pclass++) {
u32 size = 0;
if (mem_initializers)
size = mem_initializers->params[pclass][mem].size;
mem_params->params[pclass][mem].size = size;
mem_params->params[pclass][mem].address = NULL;
css_params->params[pclass][mem].size = size;
css_params->params[pclass][mem].address = 0x0;
if (size) {
mem_params->params[pclass][mem].address = kvcalloc(1,
size,
GFP_KERNEL);
if (!mem_params->params[pclass][mem].address) {
err = -ENOMEM;
goto cleanup;
}
if (pclass != IA_CSS_PARAM_CLASS_PARAM) {
css_params->params[pclass][mem].address = hmm_alloc(size);
if (!css_params->params[pclass][mem].address) {
err = -ENOMEM;
goto cleanup;
}
}
}
}
}
return err;
cleanup:
ia_css_isp_param_destroy_isp_parameters(mem_params, css_params);
return err;
}
void
ia_css_isp_param_destroy_isp_parameters(
struct ia_css_isp_param_host_segments *mem_params,
struct ia_css_isp_param_css_segments *css_params)
{
unsigned int mem, pclass;
for (mem = 0; mem < IA_CSS_NUM_MEMORIES; mem++) {
for (pclass = 0; pclass < IA_CSS_NUM_PARAM_CLASSES; pclass++) {
kvfree(mem_params->params[pclass][mem].address);
if (css_params->params[pclass][mem].address)
hmm_free(css_params->params[pclass][mem].address);
mem_params->params[pclass][mem].address = NULL;
css_params->params[pclass][mem].address = 0x0;
}
}
}
void
ia_css_isp_param_load_fw_params(
const char *fw,
union ia_css_all_memory_offsets *mem_offsets,
const struct ia_css_isp_param_memory_offsets *memory_offsets,
bool init)
{
unsigned int pclass;
for (pclass = 0; pclass < IA_CSS_NUM_PARAM_CLASSES; pclass++) {
mem_offsets->array[pclass].ptr = NULL;
if (init)
mem_offsets->array[pclass].ptr = (void *)(fw + memory_offsets->offsets[pclass]);
}
}
int
ia_css_isp_param_copy_isp_mem_if_to_ddr(
struct ia_css_isp_param_css_segments *ddr,
const struct ia_css_isp_param_host_segments *host,
enum ia_css_param_class pclass) {
unsigned int mem;
for (mem = 0; mem < N_IA_CSS_ISP_MEMORIES; mem++)
{
size_t size = host->params[pclass][mem].size;
ia_css_ptr ddr_mem_ptr = ddr->params[pclass][mem].address;
char *host_mem_ptr = host->params[pclass][mem].address;
if (size != ddr->params[pclass][mem].size)
return -EINVAL;
if (!size)
continue;
hmm_store(ddr_mem_ptr, host_mem_ptr, size);
}
return 0;
}
void
ia_css_isp_param_enable_pipeline(
const struct ia_css_isp_param_host_segments *mem_params)
{
/* By protocol b0 of the mandatory uint32_t first field of the
input parameter is a disable bit*/
short dmem_offset = 0;
if (mem_params->params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM0].size == 0)
return;
*(uint32_t *)
&mem_params->params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM0].address[dmem_offset]
= 0x0;
}
| linux-master | drivers/staging/media/atomisp/pci/runtime/isp_param/src/isp_param.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Support for Intel Camera Imaging ISP subsystem.
* Copyright (c) 2010 - 2015, Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*/
#define __INLINE_INPUT_SYSTEM__
#include "input_system.h"
#include "assert_support.h"
#include "ia_css_isys.h"
#include "ia_css_irq.h"
#include "sh_css_internal.h"
#if !defined(ISP2401)
void ia_css_isys_rx_enable_all_interrupts(enum mipi_port_id port)
{
hrt_data bits = receiver_port_reg_load(RX0_ID,
port,
_HRT_CSS_RECEIVER_IRQ_ENABLE_REG_IDX);
bits |= (1U << _HRT_CSS_RECEIVER_IRQ_OVERRUN_BIT) |
(1U << _HRT_CSS_RECEIVER_IRQ_INIT_TIMEOUT_BIT) |
(1U << _HRT_CSS_RECEIVER_IRQ_SLEEP_MODE_ENTRY_BIT) |
(1U << _HRT_CSS_RECEIVER_IRQ_SLEEP_MODE_EXIT_BIT) |
(1U << _HRT_CSS_RECEIVER_IRQ_ERR_SOT_HS_BIT) |
(1U << _HRT_CSS_RECEIVER_IRQ_ERR_SOT_SYNC_HS_BIT) |
(1U << _HRT_CSS_RECEIVER_IRQ_ERR_CONTROL_BIT) |
(1U << _HRT_CSS_RECEIVER_IRQ_ERR_ECC_DOUBLE_BIT) |
(1U << _HRT_CSS_RECEIVER_IRQ_ERR_ECC_CORRECTED_BIT) |
/*(1U << _HRT_CSS_RECEIVER_IRQ_ERR_ECC_NO_CORRECTION_BIT) | */
(1U << _HRT_CSS_RECEIVER_IRQ_ERR_CRC_BIT) |
(1U << _HRT_CSS_RECEIVER_IRQ_ERR_ID_BIT) |
(1U << _HRT_CSS_RECEIVER_IRQ_ERR_FRAME_SYNC_BIT) |
(1U << _HRT_CSS_RECEIVER_IRQ_ERR_FRAME_DATA_BIT) |
(1U << _HRT_CSS_RECEIVER_IRQ_DATA_TIMEOUT_BIT) |
(1U << _HRT_CSS_RECEIVER_IRQ_ERR_ESCAPE_BIT);
/*(1U << _HRT_CSS_RECEIVER_IRQ_ERR_LINE_SYNC_BIT); */
receiver_port_reg_store(RX0_ID,
port,
_HRT_CSS_RECEIVER_IRQ_ENABLE_REG_IDX, bits);
/*
* The CSI is nested into the Iunit IRQ's
*/
ia_css_irq_enable(IA_CSS_IRQ_INFO_CSS_RECEIVER_ERROR, true);
return;
}
/* This function converts between the enum used on the CSS API and the
* internal DLI enum type.
* We do not use an array for this since we cannot use named array
* initializers in Windows. Without that there is no easy way to guarantee
* that the array values would be in the correct order.
* */
enum mipi_port_id ia_css_isys_port_to_mipi_port(enum mipi_port_id api_port)
{
/* In this module the validity of the inptu variable should
* have been checked already, so we do not check for erroneous
* values. */
enum mipi_port_id port = MIPI_PORT0_ID;
if (api_port == MIPI_PORT1_ID)
port = MIPI_PORT1_ID;
else if (api_port == MIPI_PORT2_ID)
port = MIPI_PORT2_ID;
return port;
}
unsigned int ia_css_isys_rx_get_interrupt_reg(enum mipi_port_id port)
{
return receiver_port_reg_load(RX0_ID,
port,
_HRT_CSS_RECEIVER_IRQ_STATUS_REG_IDX);
}
void ia_css_rx_get_irq_info(unsigned int *irq_infos)
{
ia_css_rx_port_get_irq_info(MIPI_PORT1_ID, irq_infos);
}
void ia_css_rx_port_get_irq_info(enum mipi_port_id api_port,
unsigned int *irq_infos)
{
enum mipi_port_id port = ia_css_isys_port_to_mipi_port(api_port);
ia_css_isys_rx_get_irq_info(port, irq_infos);
}
void ia_css_isys_rx_get_irq_info(enum mipi_port_id port,
unsigned int *irq_infos)
{
unsigned int bits;
assert(irq_infos);
bits = ia_css_isys_rx_get_interrupt_reg(port);
*irq_infos = ia_css_isys_rx_translate_irq_infos(bits);
}
/* Translate register bits to CSS API enum mask */
unsigned int ia_css_isys_rx_translate_irq_infos(unsigned int bits)
{
unsigned int infos = 0;
if (bits & (1U << _HRT_CSS_RECEIVER_IRQ_OVERRUN_BIT))
infos |= IA_CSS_RX_IRQ_INFO_BUFFER_OVERRUN;
if (bits & (1U << _HRT_CSS_RECEIVER_IRQ_INIT_TIMEOUT_BIT))
infos |= IA_CSS_RX_IRQ_INFO_INIT_TIMEOUT;
if (bits & (1U << _HRT_CSS_RECEIVER_IRQ_SLEEP_MODE_ENTRY_BIT))
infos |= IA_CSS_RX_IRQ_INFO_ENTER_SLEEP_MODE;
if (bits & (1U << _HRT_CSS_RECEIVER_IRQ_SLEEP_MODE_EXIT_BIT))
infos |= IA_CSS_RX_IRQ_INFO_EXIT_SLEEP_MODE;
if (bits & (1U << _HRT_CSS_RECEIVER_IRQ_ERR_ECC_CORRECTED_BIT))
infos |= IA_CSS_RX_IRQ_INFO_ECC_CORRECTED;
if (bits & (1U << _HRT_CSS_RECEIVER_IRQ_ERR_SOT_HS_BIT))
infos |= IA_CSS_RX_IRQ_INFO_ERR_SOT;
if (bits & (1U << _HRT_CSS_RECEIVER_IRQ_ERR_SOT_SYNC_HS_BIT))
infos |= IA_CSS_RX_IRQ_INFO_ERR_SOT_SYNC;
if (bits & (1U << _HRT_CSS_RECEIVER_IRQ_ERR_CONTROL_BIT))
infos |= IA_CSS_RX_IRQ_INFO_ERR_CONTROL;
if (bits & (1U << _HRT_CSS_RECEIVER_IRQ_ERR_ECC_DOUBLE_BIT))
infos |= IA_CSS_RX_IRQ_INFO_ERR_ECC_DOUBLE;
if (bits & (1U << _HRT_CSS_RECEIVER_IRQ_ERR_CRC_BIT))
infos |= IA_CSS_RX_IRQ_INFO_ERR_CRC;
if (bits & (1U << _HRT_CSS_RECEIVER_IRQ_ERR_ID_BIT))
infos |= IA_CSS_RX_IRQ_INFO_ERR_UNKNOWN_ID;
if (bits & (1U << _HRT_CSS_RECEIVER_IRQ_ERR_FRAME_SYNC_BIT))
infos |= IA_CSS_RX_IRQ_INFO_ERR_FRAME_SYNC;
if (bits & (1U << _HRT_CSS_RECEIVER_IRQ_ERR_FRAME_DATA_BIT))
infos |= IA_CSS_RX_IRQ_INFO_ERR_FRAME_DATA;
if (bits & (1U << _HRT_CSS_RECEIVER_IRQ_DATA_TIMEOUT_BIT))
infos |= IA_CSS_RX_IRQ_INFO_ERR_DATA_TIMEOUT;
if (bits & (1U << _HRT_CSS_RECEIVER_IRQ_ERR_ESCAPE_BIT))
infos |= IA_CSS_RX_IRQ_INFO_ERR_UNKNOWN_ESC;
if (bits & (1U << _HRT_CSS_RECEIVER_IRQ_ERR_LINE_SYNC_BIT))
infos |= IA_CSS_RX_IRQ_INFO_ERR_LINE_SYNC;
return infos;
}
void ia_css_rx_clear_irq_info(unsigned int irq_infos)
{
ia_css_rx_port_clear_irq_info(MIPI_PORT1_ID, irq_infos);
}
void ia_css_rx_port_clear_irq_info(enum mipi_port_id api_port,
unsigned int irq_infos)
{
enum mipi_port_id port = ia_css_isys_port_to_mipi_port(api_port);
ia_css_isys_rx_clear_irq_info(port, irq_infos);
}
void ia_css_isys_rx_clear_irq_info(enum mipi_port_id port,
unsigned int irq_infos)
{
hrt_data bits = receiver_port_reg_load(RX0_ID,
port,
_HRT_CSS_RECEIVER_IRQ_ENABLE_REG_IDX);
/* MW: Why do we remap the receiver bitmap */
if (irq_infos & IA_CSS_RX_IRQ_INFO_BUFFER_OVERRUN)
bits |= 1U << _HRT_CSS_RECEIVER_IRQ_OVERRUN_BIT;
if (irq_infos & IA_CSS_RX_IRQ_INFO_INIT_TIMEOUT)
bits |= 1U << _HRT_CSS_RECEIVER_IRQ_INIT_TIMEOUT_BIT;
if (irq_infos & IA_CSS_RX_IRQ_INFO_ENTER_SLEEP_MODE)
bits |= 1U << _HRT_CSS_RECEIVER_IRQ_SLEEP_MODE_ENTRY_BIT;
if (irq_infos & IA_CSS_RX_IRQ_INFO_EXIT_SLEEP_MODE)
bits |= 1U << _HRT_CSS_RECEIVER_IRQ_SLEEP_MODE_EXIT_BIT;
if (irq_infos & IA_CSS_RX_IRQ_INFO_ECC_CORRECTED)
bits |= 1U << _HRT_CSS_RECEIVER_IRQ_ERR_ECC_CORRECTED_BIT;
if (irq_infos & IA_CSS_RX_IRQ_INFO_ERR_SOT)
bits |= 1U << _HRT_CSS_RECEIVER_IRQ_ERR_SOT_HS_BIT;
if (irq_infos & IA_CSS_RX_IRQ_INFO_ERR_SOT_SYNC)
bits |= 1U << _HRT_CSS_RECEIVER_IRQ_ERR_SOT_SYNC_HS_BIT;
if (irq_infos & IA_CSS_RX_IRQ_INFO_ERR_CONTROL)
bits |= 1U << _HRT_CSS_RECEIVER_IRQ_ERR_CONTROL_BIT;
if (irq_infos & IA_CSS_RX_IRQ_INFO_ERR_ECC_DOUBLE)
bits |= 1U << _HRT_CSS_RECEIVER_IRQ_ERR_ECC_DOUBLE_BIT;
if (irq_infos & IA_CSS_RX_IRQ_INFO_ERR_CRC)
bits |= 1U << _HRT_CSS_RECEIVER_IRQ_ERR_CRC_BIT;
if (irq_infos & IA_CSS_RX_IRQ_INFO_ERR_UNKNOWN_ID)
bits |= 1U << _HRT_CSS_RECEIVER_IRQ_ERR_ID_BIT;
if (irq_infos & IA_CSS_RX_IRQ_INFO_ERR_FRAME_SYNC)
bits |= 1U << _HRT_CSS_RECEIVER_IRQ_ERR_FRAME_SYNC_BIT;
if (irq_infos & IA_CSS_RX_IRQ_INFO_ERR_FRAME_DATA)
bits |= 1U << _HRT_CSS_RECEIVER_IRQ_ERR_FRAME_DATA_BIT;
if (irq_infos & IA_CSS_RX_IRQ_INFO_ERR_DATA_TIMEOUT)
bits |= 1U << _HRT_CSS_RECEIVER_IRQ_DATA_TIMEOUT_BIT;
if (irq_infos & IA_CSS_RX_IRQ_INFO_ERR_UNKNOWN_ESC)
bits |= 1U << _HRT_CSS_RECEIVER_IRQ_ERR_ESCAPE_BIT;
if (irq_infos & IA_CSS_RX_IRQ_INFO_ERR_LINE_SYNC)
bits |= 1U << _HRT_CSS_RECEIVER_IRQ_ERR_LINE_SYNC_BIT;
receiver_port_reg_store(RX0_ID,
port,
_HRT_CSS_RECEIVER_IRQ_ENABLE_REG_IDX, bits);
return;
}
#endif /* #if !defined(ISP2401) */
int ia_css_isys_convert_stream_format_to_mipi_format(
enum atomisp_input_format input_format,
mipi_predictor_t compression,
unsigned int *fmt_type)
{
assert(fmt_type);
/*
* Custom (user defined) modes. Used for compressed
* MIPI transfers
*
* Checkpatch thinks the indent before "if" is suspect
* I think the only suspect part is the missing "else"
* because of the return.
*/
if (compression != MIPI_PREDICTOR_NONE) {
switch (input_format) {
case ATOMISP_INPUT_FORMAT_RAW_6:
*fmt_type = 6;
break;
case ATOMISP_INPUT_FORMAT_RAW_7:
*fmt_type = 7;
break;
case ATOMISP_INPUT_FORMAT_RAW_8:
*fmt_type = 8;
break;
case ATOMISP_INPUT_FORMAT_RAW_10:
*fmt_type = 10;
break;
case ATOMISP_INPUT_FORMAT_RAW_12:
*fmt_type = 12;
break;
case ATOMISP_INPUT_FORMAT_RAW_14:
*fmt_type = 14;
break;
case ATOMISP_INPUT_FORMAT_RAW_16:
*fmt_type = 16;
break;
default:
return -EINVAL;
}
return 0;
}
/*
* This mapping comes from the Arasan CSS function spec
* (CSS_func_spec1.08_ahb_sep29_08.pdf).
*
* MW: For some reason the mapping is not 1-to-1
*/
switch (input_format) {
case ATOMISP_INPUT_FORMAT_RGB_888:
*fmt_type = MIPI_FORMAT_RGB888;
break;
case ATOMISP_INPUT_FORMAT_RGB_555:
*fmt_type = MIPI_FORMAT_RGB555;
break;
case ATOMISP_INPUT_FORMAT_RGB_444:
*fmt_type = MIPI_FORMAT_RGB444;
break;
case ATOMISP_INPUT_FORMAT_RGB_565:
*fmt_type = MIPI_FORMAT_RGB565;
break;
case ATOMISP_INPUT_FORMAT_RGB_666:
*fmt_type = MIPI_FORMAT_RGB666;
break;
case ATOMISP_INPUT_FORMAT_RAW_8:
*fmt_type = MIPI_FORMAT_RAW8;
break;
case ATOMISP_INPUT_FORMAT_RAW_10:
*fmt_type = MIPI_FORMAT_RAW10;
break;
case ATOMISP_INPUT_FORMAT_RAW_6:
*fmt_type = MIPI_FORMAT_RAW6;
break;
case ATOMISP_INPUT_FORMAT_RAW_7:
*fmt_type = MIPI_FORMAT_RAW7;
break;
case ATOMISP_INPUT_FORMAT_RAW_12:
*fmt_type = MIPI_FORMAT_RAW12;
break;
case ATOMISP_INPUT_FORMAT_RAW_14:
*fmt_type = MIPI_FORMAT_RAW14;
break;
case ATOMISP_INPUT_FORMAT_YUV420_8:
*fmt_type = MIPI_FORMAT_YUV420_8;
break;
case ATOMISP_INPUT_FORMAT_YUV420_10:
*fmt_type = MIPI_FORMAT_YUV420_10;
break;
case ATOMISP_INPUT_FORMAT_YUV422_8:
*fmt_type = MIPI_FORMAT_YUV422_8;
break;
case ATOMISP_INPUT_FORMAT_YUV422_10:
*fmt_type = MIPI_FORMAT_YUV422_10;
break;
case ATOMISP_INPUT_FORMAT_YUV420_8_LEGACY:
*fmt_type = MIPI_FORMAT_YUV420_8_LEGACY;
break;
case ATOMISP_INPUT_FORMAT_EMBEDDED:
*fmt_type = MIPI_FORMAT_EMBEDDED;
break;
#ifndef ISP2401
case ATOMISP_INPUT_FORMAT_RAW_16:
/* This is not specified by Arasan, so we use
* 17 for now.
*/
*fmt_type = MIPI_FORMAT_RAW16;
break;
case ATOMISP_INPUT_FORMAT_BINARY_8:
*fmt_type = MIPI_FORMAT_BINARY_8;
break;
#else
case ATOMISP_INPUT_FORMAT_USER_DEF1:
*fmt_type = MIPI_FORMAT_CUSTOM0;
break;
case ATOMISP_INPUT_FORMAT_USER_DEF2:
*fmt_type = MIPI_FORMAT_CUSTOM1;
break;
case ATOMISP_INPUT_FORMAT_USER_DEF3:
*fmt_type = MIPI_FORMAT_CUSTOM2;
break;
case ATOMISP_INPUT_FORMAT_USER_DEF4:
*fmt_type = MIPI_FORMAT_CUSTOM3;
break;
case ATOMISP_INPUT_FORMAT_USER_DEF5:
*fmt_type = MIPI_FORMAT_CUSTOM4;
break;
case ATOMISP_INPUT_FORMAT_USER_DEF6:
*fmt_type = MIPI_FORMAT_CUSTOM5;
break;
case ATOMISP_INPUT_FORMAT_USER_DEF7:
*fmt_type = MIPI_FORMAT_CUSTOM6;
break;
case ATOMISP_INPUT_FORMAT_USER_DEF8:
*fmt_type = MIPI_FORMAT_CUSTOM7;
break;
#endif
case ATOMISP_INPUT_FORMAT_YUV420_16:
case ATOMISP_INPUT_FORMAT_YUV422_16:
default:
return -EINVAL;
}
return 0;
}
#if defined(ISP2401)
static mipi_predictor_t sh_css_csi2_compression_type_2_mipi_predictor(
enum ia_css_csi2_compression_type type)
{
mipi_predictor_t predictor = MIPI_PREDICTOR_NONE;
switch (type) {
case IA_CSS_CSI2_COMPRESSION_TYPE_1:
predictor = MIPI_PREDICTOR_TYPE1 - 1;
break;
case IA_CSS_CSI2_COMPRESSION_TYPE_2:
predictor = MIPI_PREDICTOR_TYPE2 - 1;
break;
default:
break;
}
return predictor;
}
int ia_css_isys_convert_compressed_format(
struct ia_css_csi2_compression *comp,
struct isp2401_input_system_cfg_s *cfg)
{
int err = 0;
assert(comp);
assert(cfg);
if (comp->type != IA_CSS_CSI2_COMPRESSION_TYPE_NONE) {
/* compression register bit slicing
4 bit for each user defined data type
3 bit indicate compression scheme
000 No compression
001 10-6-10
010 10-7-10
011 10-8-10
100 12-6-12
101 12-6-12
100 12-7-12
110 12-8-12
1 bit indicate predictor
*/
if (comp->uncompressed_bits_per_pixel == UNCOMPRESSED_BITS_PER_PIXEL_10) {
switch (comp->compressed_bits_per_pixel) {
case COMPRESSED_BITS_PER_PIXEL_6:
cfg->csi_port_attr.comp_scheme = MIPI_COMPRESSOR_10_6_10;
break;
case COMPRESSED_BITS_PER_PIXEL_7:
cfg->csi_port_attr.comp_scheme = MIPI_COMPRESSOR_10_7_10;
break;
case COMPRESSED_BITS_PER_PIXEL_8:
cfg->csi_port_attr.comp_scheme = MIPI_COMPRESSOR_10_8_10;
break;
default:
err = -EINVAL;
}
} else if (comp->uncompressed_bits_per_pixel ==
UNCOMPRESSED_BITS_PER_PIXEL_12) {
switch (comp->compressed_bits_per_pixel) {
case COMPRESSED_BITS_PER_PIXEL_6:
cfg->csi_port_attr.comp_scheme = MIPI_COMPRESSOR_12_6_12;
break;
case COMPRESSED_BITS_PER_PIXEL_7:
cfg->csi_port_attr.comp_scheme = MIPI_COMPRESSOR_12_7_12;
break;
case COMPRESSED_BITS_PER_PIXEL_8:
cfg->csi_port_attr.comp_scheme = MIPI_COMPRESSOR_12_8_12;
break;
default:
err = -EINVAL;
}
} else
err = -EINVAL;
cfg->csi_port_attr.comp_predictor =
sh_css_csi2_compression_type_2_mipi_predictor(comp->type);
cfg->csi_port_attr.comp_enable = true;
} else /* No compression */
cfg->csi_port_attr.comp_enable = false;
return err;
}
unsigned int ia_css_csi2_calculate_input_system_alignment(
enum atomisp_input_format fmt_type)
{
unsigned int memory_alignment_in_bytes = HIVE_ISP_DDR_WORD_BYTES;
switch (fmt_type) {
case ATOMISP_INPUT_FORMAT_RAW_6:
case ATOMISP_INPUT_FORMAT_RAW_7:
case ATOMISP_INPUT_FORMAT_RAW_8:
case ATOMISP_INPUT_FORMAT_RAW_10:
case ATOMISP_INPUT_FORMAT_RAW_12:
case ATOMISP_INPUT_FORMAT_RAW_14:
memory_alignment_in_bytes = 2 * ISP_VEC_NELEMS;
break;
case ATOMISP_INPUT_FORMAT_YUV420_8:
case ATOMISP_INPUT_FORMAT_YUV422_8:
case ATOMISP_INPUT_FORMAT_USER_DEF1:
case ATOMISP_INPUT_FORMAT_USER_DEF2:
case ATOMISP_INPUT_FORMAT_USER_DEF3:
case ATOMISP_INPUT_FORMAT_USER_DEF4:
case ATOMISP_INPUT_FORMAT_USER_DEF5:
case ATOMISP_INPUT_FORMAT_USER_DEF6:
case ATOMISP_INPUT_FORMAT_USER_DEF7:
case ATOMISP_INPUT_FORMAT_USER_DEF8:
/* Planar YUV formats need to have all planes aligned, this means
* double the alignment for the Y plane if the horizontal decimation is 2. */
memory_alignment_in_bytes = 2 * HIVE_ISP_DDR_WORD_BYTES;
break;
case ATOMISP_INPUT_FORMAT_EMBEDDED:
default:
memory_alignment_in_bytes = HIVE_ISP_DDR_WORD_BYTES;
break;
}
return memory_alignment_in_bytes;
}
#endif
#if !defined(ISP2401)
static const mipi_lane_cfg_t MIPI_PORT_LANES[N_RX_MODE][N_MIPI_PORT_ID] = {
{MIPI_4LANE_CFG, MIPI_1LANE_CFG, MIPI_0LANE_CFG},
{MIPI_3LANE_CFG, MIPI_1LANE_CFG, MIPI_0LANE_CFG},
{MIPI_2LANE_CFG, MIPI_1LANE_CFG, MIPI_0LANE_CFG},
{MIPI_1LANE_CFG, MIPI_1LANE_CFG, MIPI_0LANE_CFG},
{MIPI_2LANE_CFG, MIPI_1LANE_CFG, MIPI_2LANE_CFG},
{MIPI_3LANE_CFG, MIPI_1LANE_CFG, MIPI_1LANE_CFG},
{MIPI_2LANE_CFG, MIPI_1LANE_CFG, MIPI_1LANE_CFG},
{MIPI_1LANE_CFG, MIPI_1LANE_CFG, MIPI_1LANE_CFG}
};
void ia_css_isys_rx_configure(const rx_cfg_t *config,
const enum ia_css_input_mode input_mode)
{
bool any_port_enabled = false;
enum mipi_port_id port;
if ((!config)
|| (config->mode >= N_RX_MODE)
|| (config->port >= N_MIPI_PORT_ID)) {
assert(0);
return;
}
for (port = (enum mipi_port_id)0; port < N_MIPI_PORT_ID; port++) {
if (is_receiver_port_enabled(RX0_ID, port))
any_port_enabled = true;
}
/* AM: Check whether this is a problem with multiple
* streams. MS: This is the case. */
port = config->port;
receiver_port_enable(RX0_ID, port, false);
port = config->port;
/* AM: Check whether this is a problem with multiple streams. */
if (MIPI_PORT_LANES[config->mode][port] != MIPI_0LANE_CFG) {
receiver_port_reg_store(RX0_ID, port,
_HRT_CSS_RECEIVER_FUNC_PROG_REG_IDX,
config->timeout);
receiver_port_reg_store(RX0_ID, port,
_HRT_CSS_RECEIVER_2400_INIT_COUNT_REG_IDX,
config->initcount);
receiver_port_reg_store(RX0_ID, port,
_HRT_CSS_RECEIVER_2400_SYNC_COUNT_REG_IDX,
config->synccount);
receiver_port_reg_store(RX0_ID, port,
_HRT_CSS_RECEIVER_2400_RX_COUNT_REG_IDX,
config->rxcount);
if (input_mode != IA_CSS_INPUT_MODE_BUFFERED_SENSOR) {
/* MW: A bit of a hack, straight wiring of the capture
* units,assuming they are linearly enumerated. */
input_system_sub_system_reg_store(INPUT_SYSTEM0_ID,
GPREGS_UNIT0_ID,
HIVE_ISYS_GPREG_MULTICAST_A_IDX
+ (unsigned int)port,
INPUT_SYSTEM_CSI_BACKEND);
/* MW: Like the integration test example we overwite,
* the GPREG_MUX register */
input_system_sub_system_reg_store(INPUT_SYSTEM0_ID,
GPREGS_UNIT0_ID,
HIVE_ISYS_GPREG_MUX_IDX,
(input_system_multiplex_t)port);
} else {
/*
* AM: A bit of a hack, wiring the input system.
*/
input_system_sub_system_reg_store(INPUT_SYSTEM0_ID,
GPREGS_UNIT0_ID,
HIVE_ISYS_GPREG_MULTICAST_A_IDX
+ (unsigned int)port,
INPUT_SYSTEM_INPUT_BUFFER);
input_system_sub_system_reg_store(INPUT_SYSTEM0_ID,
GPREGS_UNIT0_ID,
HIVE_ISYS_GPREG_MUX_IDX,
INPUT_SYSTEM_ACQUISITION_UNIT);
}
}
/*
* The 2ppc is shared for all ports, so we cannot
* disable->configure->enable individual ports
*/
/* AM: Check whether this is a problem with multiple streams. */
/* MS: 2ppc should be a property per binary and should be
* enabled/disabled per binary.
* Currently it is implemented as a system wide setting due
* to effort and risks. */
if (!any_port_enabled) {
receiver_reg_store(RX0_ID,
_HRT_CSS_RECEIVER_TWO_PIXEL_EN_REG_IDX,
config->is_two_ppc);
receiver_reg_store(RX0_ID, _HRT_CSS_RECEIVER_BE_TWO_PPC_REG_IDX,
config->is_two_ppc);
}
receiver_port_enable(RX0_ID, port, true);
/* TODO: JB: need to add the beneath used define to mizuchi */
/* sh_css_sw_hive_isp_css_2400_system_20121224_0125\css
* \hrt\input_system_defs.h
* #define INPUT_SYSTEM_CSI_RECEIVER_SELECT_BACKENG 0X207
*/
/* TODO: need better name for define
* input_system_reg_store(INPUT_SYSTEM0_ID,
* INPUT_SYSTEM_CSI_RECEIVER_SELECT_BACKENG, 1);
*/
input_system_reg_store(INPUT_SYSTEM0_ID, 0x207, 1);
return;
}
void ia_css_isys_rx_disable(void)
{
enum mipi_port_id port;
for (port = (enum mipi_port_id)0; port < N_MIPI_PORT_ID; port++) {
receiver_port_reg_store(RX0_ID, port,
_HRT_CSS_RECEIVER_DEVICE_READY_REG_IDX,
false);
}
return;
}
#endif /* if !defined(ISP2401) */
| linux-master | drivers/staging/media/atomisp/pci/runtime/isys/src/rx.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Support for Intel Camera Imaging ISP subsystem.
* Copyright (c) 2010 - 2015, Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*/
#include "system_global.h"
#ifdef ISP2401
#include "assert_support.h"
#include "platform_support.h"
#include "ia_css_isys.h"
#include "bitop_support.h"
#include "isys_dma_rmgr.h"
static isys_dma_rsrc_t isys_dma_rsrc[N_ISYS2401_DMA_ID];
void ia_css_isys_dma_channel_rmgr_init(void)
{
memset(&isys_dma_rsrc, 0, sizeof(isys_dma_rsrc_t));
}
void ia_css_isys_dma_channel_rmgr_uninit(void)
{
memset(&isys_dma_rsrc, 0, sizeof(isys_dma_rsrc_t));
}
bool ia_css_isys_dma_channel_rmgr_acquire(
isys2401_dma_ID_t dma_id,
isys2401_dma_channel *channel)
{
bool retval = false;
isys2401_dma_channel i;
isys2401_dma_channel max_dma_channel;
isys_dma_rsrc_t *cur_rsrc = NULL;
assert(dma_id < N_ISYS2401_DMA_ID);
assert(channel);
max_dma_channel = N_ISYS2401_DMA_CHANNEL_PROCS[dma_id];
cur_rsrc = &isys_dma_rsrc[dma_id];
if (cur_rsrc->num_active < max_dma_channel) {
for (i = ISYS2401_DMA_CHANNEL_0; i < N_ISYS2401_DMA_CHANNEL; i++) {
if (bitop_getbit(cur_rsrc->active_table, i) == 0) {
bitop_setbit(cur_rsrc->active_table, i);
*channel = i;
cur_rsrc->num_active++;
retval = true;
break;
}
}
}
return retval;
}
void ia_css_isys_dma_channel_rmgr_release(
isys2401_dma_ID_t dma_id,
isys2401_dma_channel *channel)
{
isys2401_dma_channel max_dma_channel;
isys_dma_rsrc_t *cur_rsrc = NULL;
assert(dma_id < N_ISYS2401_DMA_ID);
assert(channel);
max_dma_channel = N_ISYS2401_DMA_CHANNEL_PROCS[dma_id];
cur_rsrc = &isys_dma_rsrc[dma_id];
if ((*channel < max_dma_channel) && (cur_rsrc->num_active > 0)) {
if (bitop_getbit(cur_rsrc->active_table, *channel) == 1) {
bitop_clearbit(cur_rsrc->active_table, *channel);
cur_rsrc->num_active--;
}
}
}
#endif
| linux-master | drivers/staging/media/atomisp/pci/runtime/isys/src/isys_dma_rmgr.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Support for Intel Camera Imaging ISP subsystem.
* Copyright (c) 2010 - 2015, Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*/
#include "system_global.h"
#ifdef ISP2401
#include "assert_support.h"
#include "platform_support.h"
#include "ia_css_isys.h"
#include "bitop_support.h"
#include "ia_css_pipeline.h" /* ia_css_pipeline_get_pipe_io_status() */
#include "sh_css_internal.h" /* sh_css_sp_pipeline_io_status
* SH_CSS_MAX_SP_THREADS
*/
#include "csi_rx_rmgr.h"
static isys_csi_rx_rsrc_t isys_csi_rx_rsrc[N_CSI_RX_BACKEND_ID];
void ia_css_isys_csi_rx_lut_rmgr_init(void)
{
memset(isys_csi_rx_rsrc, 0, sizeof(isys_csi_rx_rsrc));
}
void ia_css_isys_csi_rx_lut_rmgr_uninit(void)
{
memset(isys_csi_rx_rsrc, 0, sizeof(isys_csi_rx_rsrc));
}
bool ia_css_isys_csi_rx_lut_rmgr_acquire(
csi_rx_backend_ID_t backend,
csi_mipi_packet_type_t packet_type,
csi_rx_backend_lut_entry_t *entry)
{
bool retval = false;
u32 max_num_packets_of_type;
u32 num_active_of_type;
isys_csi_rx_rsrc_t *cur_rsrc = NULL;
u16 i;
assert(backend < N_CSI_RX_BACKEND_ID);
assert((packet_type == CSI_MIPI_PACKET_TYPE_LONG) ||
(packet_type == CSI_MIPI_PACKET_TYPE_SHORT));
assert(entry);
if ((backend < N_CSI_RX_BACKEND_ID) && (entry)) {
cur_rsrc = &isys_csi_rx_rsrc[backend];
if (packet_type == CSI_MIPI_PACKET_TYPE_LONG) {
max_num_packets_of_type = N_LONG_PACKET_LUT_ENTRIES[backend];
num_active_of_type = cur_rsrc->num_long_packets;
} else {
max_num_packets_of_type = N_SHORT_PACKET_LUT_ENTRIES[backend];
num_active_of_type = cur_rsrc->num_short_packets;
}
if (num_active_of_type < max_num_packets_of_type) {
for (i = 0; i < max_num_packets_of_type; i++) {
if (bitop_getbit(cur_rsrc->active_table, i) == 0) {
bitop_setbit(cur_rsrc->active_table, i);
if (packet_type == CSI_MIPI_PACKET_TYPE_LONG) {
entry->long_packet_entry = i;
entry->short_packet_entry = 0;
cur_rsrc->num_long_packets++;
} else {
entry->long_packet_entry = 0;
entry->short_packet_entry = i;
cur_rsrc->num_short_packets++;
}
cur_rsrc->num_active++;
retval = true;
break;
}
}
}
}
return retval;
}
void ia_css_isys_csi_rx_lut_rmgr_release(
csi_rx_backend_ID_t backend,
csi_mipi_packet_type_t packet_type,
csi_rx_backend_lut_entry_t *entry)
{
u32 max_num_packets;
isys_csi_rx_rsrc_t *cur_rsrc = NULL;
u32 packet_entry = 0;
assert(backend < N_CSI_RX_BACKEND_ID);
assert(entry);
assert((packet_type >= CSI_MIPI_PACKET_TYPE_LONG) ||
(packet_type <= CSI_MIPI_PACKET_TYPE_SHORT));
if ((backend < N_CSI_RX_BACKEND_ID) && (entry)) {
if (packet_type == CSI_MIPI_PACKET_TYPE_LONG) {
max_num_packets = N_LONG_PACKET_LUT_ENTRIES[backend];
packet_entry = entry->long_packet_entry;
} else {
max_num_packets = N_SHORT_PACKET_LUT_ENTRIES[backend];
packet_entry = entry->short_packet_entry;
}
cur_rsrc = &isys_csi_rx_rsrc[backend];
if ((packet_entry < max_num_packets) && (cur_rsrc->num_active > 0)) {
if (bitop_getbit(cur_rsrc->active_table, packet_entry) == 1) {
bitop_clearbit(cur_rsrc->active_table, packet_entry);
if (packet_type == CSI_MIPI_PACKET_TYPE_LONG)
cur_rsrc->num_long_packets--;
else
cur_rsrc->num_short_packets--;
cur_rsrc->num_active--;
}
}
}
}
int ia_css_isys_csi_rx_register_stream(
enum mipi_port_id port,
uint32_t isys_stream_id)
{
int retval = -EINVAL;
if ((port < N_INPUT_SYSTEM_CSI_PORT) &&
(isys_stream_id < SH_CSS_MAX_ISYS_CHANNEL_NODES)) {
struct sh_css_sp_pipeline_io_status *pipe_io_status;
pipe_io_status = ia_css_pipeline_get_pipe_io_status();
if (bitop_getbit(pipe_io_status->active[port], isys_stream_id) == 0) {
bitop_setbit(pipe_io_status->active[port], isys_stream_id);
pipe_io_status->running[port] = 0;
retval = 0;
}
}
return retval;
}
int ia_css_isys_csi_rx_unregister_stream(
enum mipi_port_id port,
uint32_t isys_stream_id)
{
int retval = -EINVAL;
if ((port < N_INPUT_SYSTEM_CSI_PORT) &&
(isys_stream_id < SH_CSS_MAX_ISYS_CHANNEL_NODES)) {
struct sh_css_sp_pipeline_io_status *pipe_io_status;
pipe_io_status = ia_css_pipeline_get_pipe_io_status();
if (bitop_getbit(pipe_io_status->active[port], isys_stream_id) == 1) {
bitop_clearbit(pipe_io_status->active[port], isys_stream_id);
retval = 0;
}
}
return retval;
}
#endif
| linux-master | drivers/staging/media/atomisp/pci/runtime/isys/src/csi_rx_rmgr.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Support for Intel Camera Imaging ISP subsystem.
* Copyright (c) 2015, Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*/
#include "system_global.h"
#include "assert_support.h"
#include "platform_support.h"
#include "ia_css_isys.h"
#include "ibuf_ctrl_rmgr.h"
static ibuf_rsrc_t ibuf_rsrc;
static ibuf_handle_t *getHandle(uint16_t index)
{
ibuf_handle_t *handle = NULL;
if (index < MAX_IBUF_HANDLES)
handle = &ibuf_rsrc.handles[index];
return handle;
}
void ia_css_isys_ibuf_rmgr_init(void)
{
memset(&ibuf_rsrc, 0, sizeof(ibuf_rsrc));
ibuf_rsrc.free_size = MAX_INPUT_BUFFER_SIZE;
}
void ia_css_isys_ibuf_rmgr_uninit(void)
{
memset(&ibuf_rsrc, 0, sizeof(ibuf_rsrc));
ibuf_rsrc.free_size = MAX_INPUT_BUFFER_SIZE;
}
bool ia_css_isys_ibuf_rmgr_acquire(
u32 size,
uint32_t *start_addr)
{
bool retval = false;
bool input_buffer_found = false;
u32 aligned_size;
ibuf_handle_t *handle = NULL;
u16 i;
assert(start_addr);
assert(size > 0);
aligned_size = (size + (IBUF_ALIGN - 1)) & ~(IBUF_ALIGN - 1);
/* Check if there is an available un-used handle with the size
* that will fulfill the request.
*/
if (ibuf_rsrc.num_active < ibuf_rsrc.num_allocated) {
for (i = 0; i < ibuf_rsrc.num_allocated; i++) {
handle = getHandle(i);
if (!handle->active) {
if (handle->size >= aligned_size) {
handle->active = true;
input_buffer_found = true;
ibuf_rsrc.num_active++;
break;
}
}
}
}
if (!input_buffer_found) {
/* There were no available handles that fulfilled the
* request. Allocate a new handle with the requested size.
*/
if ((ibuf_rsrc.num_allocated < MAX_IBUF_HANDLES) &&
(ibuf_rsrc.free_size >= aligned_size)) {
handle = getHandle(ibuf_rsrc.num_allocated);
handle->start_addr = ibuf_rsrc.free_start_addr;
handle->size = aligned_size;
handle->active = true;
ibuf_rsrc.free_start_addr += aligned_size;
ibuf_rsrc.free_size -= aligned_size;
ibuf_rsrc.num_active++;
ibuf_rsrc.num_allocated++;
input_buffer_found = true;
}
}
if (input_buffer_found && handle) {
*start_addr = handle->start_addr;
retval = true;
}
return retval;
}
void ia_css_isys_ibuf_rmgr_release(
uint32_t *start_addr)
{
u16 i;
ibuf_handle_t *handle = NULL;
assert(start_addr);
for (i = 0; i < ibuf_rsrc.num_allocated; i++) {
handle = getHandle(i);
if (handle->active && handle->start_addr == *start_addr) {
handle->active = false;
ibuf_rsrc.num_active--;
break;
}
}
}
| linux-master | drivers/staging/media/atomisp/pci/runtime/isys/src/ibuf_ctrl_rmgr.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Support for Intel Camera Imaging ISP subsystem.
* Copyright (c) 2010 - 2015, Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*/
#include "system_global.h"
#ifdef ISP2401
#include "assert_support.h"
#include "platform_support.h"
#include "ia_css_isys.h"
#include "bitop_support.h"
#include "isys_stream2mmio_rmgr.h"
static isys_stream2mmio_rsrc_t isys_stream2mmio_rsrc[N_STREAM2MMIO_ID];
void ia_css_isys_stream2mmio_sid_rmgr_init(void)
{
memset(isys_stream2mmio_rsrc, 0, sizeof(isys_stream2mmio_rsrc));
}
void ia_css_isys_stream2mmio_sid_rmgr_uninit(void)
{
memset(isys_stream2mmio_rsrc, 0, sizeof(isys_stream2mmio_rsrc));
}
bool ia_css_isys_stream2mmio_sid_rmgr_acquire(
stream2mmio_ID_t stream2mmio,
stream2mmio_sid_ID_t *sid)
{
bool retval = false;
stream2mmio_sid_ID_t max_sid;
isys_stream2mmio_rsrc_t *cur_rsrc = NULL;
stream2mmio_sid_ID_t i;
assert(stream2mmio < N_STREAM2MMIO_ID);
assert(sid);
if ((stream2mmio < N_STREAM2MMIO_ID) && (sid)) {
max_sid = N_STREAM2MMIO_SID_PROCS[stream2mmio];
cur_rsrc = &isys_stream2mmio_rsrc[stream2mmio];
if (cur_rsrc->num_active < max_sid) {
for (i = STREAM2MMIO_SID0_ID; i < max_sid; i++) {
if (bitop_getbit(cur_rsrc->active_table, i) == 0) {
bitop_setbit(cur_rsrc->active_table, i);
*sid = i;
cur_rsrc->num_active++;
retval = true;
break;
}
}
}
}
return retval;
}
void ia_css_isys_stream2mmio_sid_rmgr_release(
stream2mmio_ID_t stream2mmio,
stream2mmio_sid_ID_t *sid)
{
stream2mmio_sid_ID_t max_sid;
isys_stream2mmio_rsrc_t *cur_rsrc = NULL;
assert(stream2mmio < N_STREAM2MMIO_ID);
assert(sid);
if ((stream2mmio < N_STREAM2MMIO_ID) && (sid)) {
max_sid = N_STREAM2MMIO_SID_PROCS[stream2mmio];
cur_rsrc = &isys_stream2mmio_rsrc[stream2mmio];
if ((*sid < max_sid) && (cur_rsrc->num_active > 0)) {
if (bitop_getbit(cur_rsrc->active_table, *sid) == 1) {
bitop_clearbit(cur_rsrc->active_table, *sid);
cur_rsrc->num_active--;
}
}
}
}
#endif
| linux-master | drivers/staging/media/atomisp/pci/runtime/isys/src/isys_stream2mmio_rmgr.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Support for Intel Camera Imaging ISP subsystem.
* Copyright (c) 2015, Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*/
#include <linux/string.h> /* for memcpy() */
#include "system_global.h"
#ifdef ISP2401
#include "ia_css_isys.h"
#include "ia_css_debug.h"
#include "math_support.h"
#include "virtual_isys.h"
#include "isp.h"
#include "sh_css_defs.h"
/*************************************************
*
* Forwarded Declaration
*
*************************************************/
static bool create_input_system_channel(
isp2401_input_system_cfg_t *cfg,
bool metadata,
input_system_channel_t *channel);
static void destroy_input_system_channel(
input_system_channel_t *channel);
static bool create_input_system_input_port(
isp2401_input_system_cfg_t *cfg,
input_system_input_port_t *input_port);
static void destroy_input_system_input_port(
input_system_input_port_t *input_port);
static bool calculate_input_system_channel_cfg(
input_system_channel_t *channel,
input_system_input_port_t *input_port,
isp2401_input_system_cfg_t *isys_cfg,
input_system_channel_cfg_t *channel_cfg,
bool metadata);
static bool calculate_input_system_input_port_cfg(
input_system_channel_t *channel,
input_system_input_port_t *input_port,
isp2401_input_system_cfg_t *isys_cfg,
input_system_input_port_cfg_t *input_port_cfg);
static bool acquire_sid(
stream2mmio_ID_t stream2mmio,
stream2mmio_sid_ID_t *sid);
static void release_sid(
stream2mmio_ID_t stream2mmio,
stream2mmio_sid_ID_t *sid);
static bool acquire_ib_buffer(
s32 bits_per_pixel,
s32 pixels_per_line,
s32 lines_per_frame,
s32 align_in_bytes,
bool online,
isp2401_ib_buffer_t *buf);
static void release_ib_buffer(
isp2401_ib_buffer_t *buf);
static bool acquire_dma_channel(
isys2401_dma_ID_t dma_id,
isys2401_dma_channel *channel);
static void release_dma_channel(
isys2401_dma_ID_t dma_id,
isys2401_dma_channel *channel);
static bool acquire_be_lut_entry(
csi_rx_backend_ID_t backend,
csi_mipi_packet_type_t packet_type,
csi_rx_backend_lut_entry_t *entry);
static void release_be_lut_entry(
csi_rx_backend_ID_t backend,
csi_mipi_packet_type_t packet_type,
csi_rx_backend_lut_entry_t *entry);
static bool calculate_tpg_cfg(
input_system_channel_t *channel,
input_system_input_port_t *input_port,
isp2401_input_system_cfg_t *isys_cfg,
pixelgen_tpg_cfg_t *cfg);
static bool calculate_prbs_cfg(
input_system_channel_t *channel,
input_system_input_port_t *input_port,
isp2401_input_system_cfg_t *isys_cfg,
pixelgen_prbs_cfg_t *cfg);
static bool calculate_fe_cfg(
const isp2401_input_system_cfg_t *isys_cfg,
csi_rx_frontend_cfg_t *cfg);
static bool calculate_be_cfg(
const input_system_input_port_t *input_port,
const isp2401_input_system_cfg_t *isys_cfg,
bool metadata,
csi_rx_backend_cfg_t *cfg);
static bool calculate_stream2mmio_cfg(
const isp2401_input_system_cfg_t *isys_cfg,
bool metadata,
stream2mmio_cfg_t *cfg);
static bool calculate_ibuf_ctrl_cfg(
const input_system_channel_t *channel,
const input_system_input_port_t *input_port,
const isp2401_input_system_cfg_t *isys_cfg,
ibuf_ctrl_cfg_t *cfg);
static bool calculate_isys2401_dma_cfg(
const input_system_channel_t *channel,
const isp2401_input_system_cfg_t *isys_cfg,
isys2401_dma_cfg_t *cfg);
static bool calculate_isys2401_dma_port_cfg(
const isp2401_input_system_cfg_t *isys_cfg,
bool raw_packed,
bool metadata,
isys2401_dma_port_cfg_t *cfg);
static csi_mipi_packet_type_t get_csi_mipi_packet_type(
int32_t data_type);
static int32_t calculate_stride(
s32 bits_per_pixel,
s32 pixels_per_line,
bool raw_packed,
int32_t align_in_bytes);
/* end of Forwarded Declaration */
/**************************************************
*
* Public Methods
*
**************************************************/
ia_css_isys_error_t ia_css_isys_stream_create(
ia_css_isys_descr_t *isys_stream_descr,
ia_css_isys_stream_h isys_stream,
uint32_t isys_stream_id)
{
ia_css_isys_error_t rc;
if (!isys_stream_descr || !isys_stream ||
isys_stream_id >= SH_CSS_MAX_ISYS_CHANNEL_NODES)
return false;
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
"ia_css_isys_stream_create() enter:\n");
/*Reset isys_stream to 0*/
memset(isys_stream, 0, sizeof(*isys_stream));
isys_stream->enable_metadata = isys_stream_descr->metadata.enable;
isys_stream->id = isys_stream_id;
isys_stream->linked_isys_stream_id = isys_stream_descr->linked_isys_stream_id;
rc = create_input_system_input_port(isys_stream_descr,
&isys_stream->input_port);
if (!rc)
return false;
rc = create_input_system_channel(isys_stream_descr, false,
&isys_stream->channel);
if (!rc) {
destroy_input_system_input_port(&isys_stream->input_port);
return false;
}
/* create metadata channel */
if (isys_stream_descr->metadata.enable) {
rc = create_input_system_channel(isys_stream_descr, true,
&isys_stream->md_channel);
if (!rc) {
destroy_input_system_input_port(&isys_stream->input_port);
destroy_input_system_channel(&isys_stream->channel);
return false;
}
}
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
"ia_css_isys_stream_create() leave:\n");
return true;
}
void ia_css_isys_stream_destroy(
ia_css_isys_stream_h isys_stream)
{
destroy_input_system_input_port(&isys_stream->input_port);
destroy_input_system_channel(&isys_stream->channel);
if (isys_stream->enable_metadata) {
/* Destroy metadata channel only if its allocated*/
destroy_input_system_channel(&isys_stream->md_channel);
}
}
ia_css_isys_error_t ia_css_isys_stream_calculate_cfg(
ia_css_isys_stream_h isys_stream,
ia_css_isys_descr_t *isys_stream_descr,
ia_css_isys_stream_cfg_t *isys_stream_cfg)
{
ia_css_isys_error_t rc;
if (!isys_stream_cfg ||
!isys_stream_descr ||
!isys_stream)
return false;
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
"ia_css_isys_stream_calculate_cfg() enter:\n");
rc = calculate_input_system_channel_cfg(
&isys_stream->channel,
&isys_stream->input_port,
isys_stream_descr,
&isys_stream_cfg->channel_cfg,
false);
if (!rc)
return false;
/* configure metadata channel */
if (isys_stream_descr->metadata.enable) {
isys_stream_cfg->enable_metadata = true;
rc = calculate_input_system_channel_cfg(
&isys_stream->md_channel,
&isys_stream->input_port,
isys_stream_descr,
&isys_stream_cfg->md_channel_cfg,
true);
if (!rc)
return false;
}
rc = calculate_input_system_input_port_cfg(
&isys_stream->channel,
&isys_stream->input_port,
isys_stream_descr,
&isys_stream_cfg->input_port_cfg);
if (!rc)
return false;
isys_stream->valid = 1;
isys_stream_cfg->valid = 1;
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE,
"ia_css_isys_stream_calculate_cfg() leave:\n");
return rc;
}
/* end of Public Methods */
/**************************************************
*
* Private Methods
*
**************************************************/
static bool create_input_system_channel(
isp2401_input_system_cfg_t *cfg,
bool metadata,
input_system_channel_t *me)
{
bool rc = true;
me->dma_id = ISYS2401_DMA0_ID;
switch (cfg->input_port_id) {
case INPUT_SYSTEM_CSI_PORT0_ID:
case INPUT_SYSTEM_PIXELGEN_PORT0_ID:
me->stream2mmio_id = STREAM2MMIO0_ID;
me->ibuf_ctrl_id = IBUF_CTRL0_ID;
break;
case INPUT_SYSTEM_CSI_PORT1_ID:
case INPUT_SYSTEM_PIXELGEN_PORT1_ID:
me->stream2mmio_id = STREAM2MMIO1_ID;
me->ibuf_ctrl_id = IBUF_CTRL1_ID;
break;
case INPUT_SYSTEM_CSI_PORT2_ID:
case INPUT_SYSTEM_PIXELGEN_PORT2_ID:
me->stream2mmio_id = STREAM2MMIO2_ID;
me->ibuf_ctrl_id = IBUF_CTRL2_ID;
break;
default:
rc = false;
break;
}
if (!rc)
return false;
if (!acquire_sid(me->stream2mmio_id, &me->stream2mmio_sid_id)) {
return false;
}
if (!acquire_ib_buffer(
metadata ? cfg->metadata.bits_per_pixel :
cfg->input_port_resolution.bits_per_pixel,
metadata ? cfg->metadata.pixels_per_line :
cfg->input_port_resolution.pixels_per_line,
metadata ? cfg->metadata.lines_per_frame :
cfg->input_port_resolution.lines_per_frame,
metadata ? cfg->metadata.align_req_in_bytes :
cfg->input_port_resolution.align_req_in_bytes,
cfg->online,
&me->ib_buffer)) {
release_sid(me->stream2mmio_id, &me->stream2mmio_sid_id);
return false;
}
if (!acquire_dma_channel(me->dma_id, &me->dma_channel)) {
release_sid(me->stream2mmio_id, &me->stream2mmio_sid_id);
release_ib_buffer(&me->ib_buffer);
return false;
}
return true;
}
static void destroy_input_system_channel(
input_system_channel_t *me)
{
release_sid(me->stream2mmio_id,
&me->stream2mmio_sid_id);
release_ib_buffer(&me->ib_buffer);
release_dma_channel(me->dma_id, &me->dma_channel);
}
static bool create_input_system_input_port(
isp2401_input_system_cfg_t *cfg,
input_system_input_port_t *me)
{
csi_mipi_packet_type_t packet_type;
bool rc = true;
switch (cfg->input_port_id) {
case INPUT_SYSTEM_CSI_PORT0_ID:
me->csi_rx.frontend_id = CSI_RX_FRONTEND0_ID;
me->csi_rx.backend_id = CSI_RX_BACKEND0_ID;
packet_type = get_csi_mipi_packet_type(cfg->csi_port_attr.fmt_type);
me->csi_rx.packet_type = packet_type;
rc = acquire_be_lut_entry(
me->csi_rx.backend_id,
packet_type,
&me->csi_rx.backend_lut_entry);
break;
case INPUT_SYSTEM_PIXELGEN_PORT0_ID:
me->pixelgen.pixelgen_id = PIXELGEN0_ID;
break;
case INPUT_SYSTEM_CSI_PORT1_ID:
me->csi_rx.frontend_id = CSI_RX_FRONTEND1_ID;
me->csi_rx.backend_id = CSI_RX_BACKEND1_ID;
packet_type = get_csi_mipi_packet_type(cfg->csi_port_attr.fmt_type);
me->csi_rx.packet_type = packet_type;
rc = acquire_be_lut_entry(
me->csi_rx.backend_id,
packet_type,
&me->csi_rx.backend_lut_entry);
break;
case INPUT_SYSTEM_PIXELGEN_PORT1_ID:
me->pixelgen.pixelgen_id = PIXELGEN1_ID;
break;
case INPUT_SYSTEM_CSI_PORT2_ID:
me->csi_rx.frontend_id = CSI_RX_FRONTEND2_ID;
me->csi_rx.backend_id = CSI_RX_BACKEND2_ID;
packet_type = get_csi_mipi_packet_type(cfg->csi_port_attr.fmt_type);
me->csi_rx.packet_type = packet_type;
rc = acquire_be_lut_entry(
me->csi_rx.backend_id,
packet_type,
&me->csi_rx.backend_lut_entry);
break;
case INPUT_SYSTEM_PIXELGEN_PORT2_ID:
me->pixelgen.pixelgen_id = PIXELGEN2_ID;
break;
default:
rc = false;
break;
}
me->source_type = cfg->mode;
/* for metadata */
me->metadata.packet_type = CSI_MIPI_PACKET_TYPE_UNDEFINED;
if (rc && cfg->metadata.enable) {
me->metadata.packet_type = get_csi_mipi_packet_type(
cfg->metadata.fmt_type);
rc = acquire_be_lut_entry(
me->csi_rx.backend_id,
me->metadata.packet_type,
&me->metadata.backend_lut_entry);
}
return rc;
}
static void destroy_input_system_input_port(
input_system_input_port_t *me)
{
if (me->source_type == INPUT_SYSTEM_SOURCE_TYPE_SENSOR) {
release_be_lut_entry(
me->csi_rx.backend_id,
me->csi_rx.packet_type,
&me->csi_rx.backend_lut_entry);
}
if (me->metadata.packet_type != CSI_MIPI_PACKET_TYPE_UNDEFINED) {
/*Free the backend lut allocated for metadata*/
release_be_lut_entry(
me->csi_rx.backend_id,
me->metadata.packet_type,
&me->metadata.backend_lut_entry);
}
}
static bool calculate_input_system_channel_cfg(
input_system_channel_t *channel,
input_system_input_port_t *input_port,
isp2401_input_system_cfg_t *isys_cfg,
input_system_channel_cfg_t *channel_cfg,
bool metadata)
{
bool rc;
rc = calculate_stream2mmio_cfg(isys_cfg, metadata,
&channel_cfg->stream2mmio_cfg);
if (!rc)
return false;
rc = calculate_ibuf_ctrl_cfg(
channel,
input_port,
isys_cfg,
&channel_cfg->ibuf_ctrl_cfg);
if (!rc)
return false;
if (metadata)
channel_cfg->ibuf_ctrl_cfg.stores_per_frame =
isys_cfg->metadata.lines_per_frame;
rc = calculate_isys2401_dma_cfg(
channel,
isys_cfg,
&channel_cfg->dma_cfg);
if (!rc)
return false;
rc = calculate_isys2401_dma_port_cfg(
isys_cfg,
false,
metadata,
&channel_cfg->dma_src_port_cfg);
if (!rc)
return false;
rc = calculate_isys2401_dma_port_cfg(
isys_cfg,
isys_cfg->raw_packed,
metadata,
&channel_cfg->dma_dest_port_cfg);
if (!rc)
return false;
return true;
}
static bool calculate_input_system_input_port_cfg(
input_system_channel_t *channel,
input_system_input_port_t *input_port,
isp2401_input_system_cfg_t *isys_cfg,
input_system_input_port_cfg_t *input_port_cfg)
{
bool rc;
switch (input_port->source_type) {
case INPUT_SYSTEM_SOURCE_TYPE_SENSOR:
rc = calculate_fe_cfg(
isys_cfg,
&input_port_cfg->csi_rx_cfg.frontend_cfg);
rc &= calculate_be_cfg(
input_port,
isys_cfg,
false,
&input_port_cfg->csi_rx_cfg.backend_cfg);
if (rc && isys_cfg->metadata.enable)
rc &= calculate_be_cfg(input_port, isys_cfg, true,
&input_port_cfg->csi_rx_cfg.md_backend_cfg);
break;
case INPUT_SYSTEM_SOURCE_TYPE_TPG:
rc = calculate_tpg_cfg(
channel,
input_port,
isys_cfg,
&input_port_cfg->pixelgen_cfg.tpg_cfg);
break;
case INPUT_SYSTEM_SOURCE_TYPE_PRBS:
rc = calculate_prbs_cfg(
channel,
input_port,
isys_cfg,
&input_port_cfg->pixelgen_cfg.prbs_cfg);
break;
default:
rc = false;
break;
}
return rc;
}
static bool acquire_sid(
stream2mmio_ID_t stream2mmio,
stream2mmio_sid_ID_t *sid)
{
return ia_css_isys_stream2mmio_sid_rmgr_acquire(stream2mmio, sid);
}
static void release_sid(
stream2mmio_ID_t stream2mmio,
stream2mmio_sid_ID_t *sid)
{
ia_css_isys_stream2mmio_sid_rmgr_release(stream2mmio, sid);
}
/* See also: ia_css_dma_configure_from_info() */
static int32_t calculate_stride(
s32 bits_per_pixel,
s32 pixels_per_line,
bool raw_packed,
int32_t align_in_bytes)
{
s32 bytes_per_line;
s32 pixels_per_word;
s32 words_per_line;
s32 pixels_per_line_padded;
pixels_per_line_padded = CEIL_MUL(pixels_per_line, align_in_bytes);
if (!raw_packed)
bits_per_pixel = CEIL_MUL(bits_per_pixel, 8);
pixels_per_word = HIVE_ISP_DDR_WORD_BITS / bits_per_pixel;
words_per_line = ceil_div(pixels_per_line_padded, pixels_per_word);
bytes_per_line = HIVE_ISP_DDR_WORD_BYTES * words_per_line;
return bytes_per_line;
}
static bool acquire_ib_buffer(
s32 bits_per_pixel,
s32 pixels_per_line,
s32 lines_per_frame,
s32 align_in_bytes,
bool online,
isp2401_ib_buffer_t *buf)
{
buf->stride = calculate_stride(bits_per_pixel, pixels_per_line, false,
align_in_bytes);
if (online)
buf->lines = 4; /* use double buffering for online usecases */
else
buf->lines = 2;
(void)(lines_per_frame);
return ia_css_isys_ibuf_rmgr_acquire(buf->stride * buf->lines,
&buf->start_addr);
}
static void release_ib_buffer(
isp2401_ib_buffer_t *buf)
{
ia_css_isys_ibuf_rmgr_release(&buf->start_addr);
}
static bool acquire_dma_channel(
isys2401_dma_ID_t dma_id,
isys2401_dma_channel *channel)
{
return ia_css_isys_dma_channel_rmgr_acquire(dma_id, channel);
}
static void release_dma_channel(
isys2401_dma_ID_t dma_id,
isys2401_dma_channel *channel)
{
ia_css_isys_dma_channel_rmgr_release(dma_id, channel);
}
static bool acquire_be_lut_entry(
csi_rx_backend_ID_t backend,
csi_mipi_packet_type_t packet_type,
csi_rx_backend_lut_entry_t *entry)
{
return ia_css_isys_csi_rx_lut_rmgr_acquire(backend, packet_type, entry);
}
static void release_be_lut_entry(
csi_rx_backend_ID_t backend,
csi_mipi_packet_type_t packet_type,
csi_rx_backend_lut_entry_t *entry)
{
ia_css_isys_csi_rx_lut_rmgr_release(backend, packet_type, entry);
}
static bool calculate_tpg_cfg(
input_system_channel_t *channel,
input_system_input_port_t *input_port,
isp2401_input_system_cfg_t *isys_cfg,
pixelgen_tpg_cfg_t *cfg)
{
memcpy(cfg, &isys_cfg->tpg_port_attr, sizeof(pixelgen_tpg_cfg_t));
return true;
}
static bool calculate_prbs_cfg(
input_system_channel_t *channel,
input_system_input_port_t *input_port,
isp2401_input_system_cfg_t *isys_cfg,
pixelgen_prbs_cfg_t *cfg)
{
memcpy(cfg, &isys_cfg->prbs_port_attr, sizeof(pixelgen_prbs_cfg_t));
return true;
}
static bool calculate_fe_cfg(
const isp2401_input_system_cfg_t *isys_cfg,
csi_rx_frontend_cfg_t *cfg)
{
cfg->active_lanes = isys_cfg->csi_port_attr.active_lanes;
return true;
}
static bool calculate_be_cfg(
const input_system_input_port_t *input_port,
const isp2401_input_system_cfg_t *isys_cfg,
bool metadata,
csi_rx_backend_cfg_t *cfg)
{
memcpy(&cfg->lut_entry,
metadata ? &input_port->metadata.backend_lut_entry :
&input_port->csi_rx.backend_lut_entry,
sizeof(csi_rx_backend_lut_entry_t));
cfg->csi_mipi_cfg.virtual_channel = isys_cfg->csi_port_attr.ch_id;
if (metadata) {
cfg->csi_mipi_packet_type = get_csi_mipi_packet_type(
isys_cfg->metadata.fmt_type);
cfg->csi_mipi_cfg.comp_enable = false;
cfg->csi_mipi_cfg.data_type = isys_cfg->metadata.fmt_type;
} else {
cfg->csi_mipi_packet_type = get_csi_mipi_packet_type(
isys_cfg->csi_port_attr.fmt_type);
cfg->csi_mipi_cfg.data_type = isys_cfg->csi_port_attr.fmt_type;
cfg->csi_mipi_cfg.comp_enable = isys_cfg->csi_port_attr.comp_enable;
cfg->csi_mipi_cfg.comp_scheme = isys_cfg->csi_port_attr.comp_scheme;
cfg->csi_mipi_cfg.comp_predictor = isys_cfg->csi_port_attr.comp_predictor;
cfg->csi_mipi_cfg.comp_bit_idx = cfg->csi_mipi_cfg.data_type -
MIPI_FORMAT_CUSTOM0;
}
return true;
}
static bool calculate_stream2mmio_cfg(
const isp2401_input_system_cfg_t *isys_cfg,
bool metadata,
stream2mmio_cfg_t *cfg
)
{
cfg->bits_per_pixel = metadata ? isys_cfg->metadata.bits_per_pixel :
isys_cfg->input_port_resolution.bits_per_pixel;
cfg->enable_blocking =
((isys_cfg->mode == INPUT_SYSTEM_SOURCE_TYPE_TPG) ||
(isys_cfg->mode == INPUT_SYSTEM_SOURCE_TYPE_PRBS));
return true;
}
static bool calculate_ibuf_ctrl_cfg(
const input_system_channel_t *channel,
const input_system_input_port_t *input_port,
const isp2401_input_system_cfg_t *isys_cfg,
ibuf_ctrl_cfg_t *cfg)
{
const s32 bits_per_byte = 8;
s32 bits_per_pixel;
s32 bytes_per_pixel;
s32 left_padding;
(void)input_port;
bits_per_pixel = isys_cfg->input_port_resolution.bits_per_pixel;
bytes_per_pixel = ceil_div(bits_per_pixel, bits_per_byte);
left_padding = CEIL_MUL(isys_cfg->output_port_attr.left_padding, ISP_VEC_NELEMS)
* bytes_per_pixel;
cfg->online = isys_cfg->online;
cfg->dma_cfg.channel = channel->dma_channel;
cfg->dma_cfg.cmd = _DMA_V2_MOVE_A2B_NO_SYNC_CHK_COMMAND;
cfg->dma_cfg.shift_returned_items = 0;
cfg->dma_cfg.elems_per_word_in_ibuf = 0;
cfg->dma_cfg.elems_per_word_in_dest = 0;
cfg->ib_buffer.start_addr = channel->ib_buffer.start_addr;
cfg->ib_buffer.stride = channel->ib_buffer.stride;
cfg->ib_buffer.lines = channel->ib_buffer.lines;
/*
#ifndef ISP2401
* [email protected]:
#endif
* "dest_buf_cfg" should be part of the input system output
* port configuration.
*
* TODO: move "dest_buf_cfg" to the input system output
* port configuration.
*/
/* input_buf addr only available in sched mode;
this buffer is allocated in isp, crun mode addr
can be passed by after ISP allocation */
if (cfg->online) {
cfg->dest_buf_cfg.start_addr = ISP_INPUT_BUF_START_ADDR + left_padding;
cfg->dest_buf_cfg.stride = bytes_per_pixel
* isys_cfg->output_port_attr.max_isp_input_width;
cfg->dest_buf_cfg.lines = LINES_OF_ISP_INPUT_BUF;
} else if (isys_cfg->raw_packed) {
cfg->dest_buf_cfg.stride = calculate_stride(bits_per_pixel,
isys_cfg->input_port_resolution.pixels_per_line,
isys_cfg->raw_packed,
isys_cfg->input_port_resolution.align_req_in_bytes);
} else {
cfg->dest_buf_cfg.stride = channel->ib_buffer.stride;
}
/*
#ifndef ISP2401
* [email protected]:
#endif
* "items_per_store" is hard coded as "1", which is ONLY valid
* when the CSI-MIPI long packet is transferred.
*
* TODO: After the 1st stage of MERR+, make the proper solution to
* configure "items_per_store" so that it can also handle the CSI-MIPI
* short packet.
*/
cfg->items_per_store = 1;
cfg->stores_per_frame = isys_cfg->input_port_resolution.lines_per_frame;
cfg->stream2mmio_cfg.sync_cmd = _STREAM2MMIO_CMD_TOKEN_SYNC_FRAME;
/* TODO: Define conditions as when to use store words vs store packets */
cfg->stream2mmio_cfg.store_cmd = _STREAM2MMIO_CMD_TOKEN_STORE_PACKETS;
return true;
}
static bool calculate_isys2401_dma_cfg(
const input_system_channel_t *channel,
const isp2401_input_system_cfg_t *isys_cfg,
isys2401_dma_cfg_t *cfg)
{
cfg->channel = channel->dma_channel;
/* only online/sensor mode goto vmem
offline/buffered_sensor, tpg and prbs will go to ddr */
if (isys_cfg->online)
cfg->connection = isys2401_dma_ibuf_to_vmem_connection;
else
cfg->connection = isys2401_dma_ibuf_to_ddr_connection;
cfg->extension = isys2401_dma_zero_extension;
cfg->height = 1;
return true;
}
/* See also: ia_css_dma_configure_from_info() */
static bool calculate_isys2401_dma_port_cfg(
const isp2401_input_system_cfg_t *isys_cfg,
bool raw_packed,
bool metadata,
isys2401_dma_port_cfg_t *cfg)
{
s32 bits_per_pixel;
s32 pixels_per_line;
s32 align_req_in_bytes;
/* TODO: Move metadata away from isys_cfg to application layer */
if (metadata) {
bits_per_pixel = isys_cfg->metadata.bits_per_pixel;
pixels_per_line = isys_cfg->metadata.pixels_per_line;
align_req_in_bytes = isys_cfg->metadata.align_req_in_bytes;
} else {
bits_per_pixel = isys_cfg->input_port_resolution.bits_per_pixel;
pixels_per_line = isys_cfg->input_port_resolution.pixels_per_line;
align_req_in_bytes = isys_cfg->input_port_resolution.align_req_in_bytes;
}
cfg->stride = calculate_stride(bits_per_pixel, pixels_per_line, raw_packed,
align_req_in_bytes);
if (!raw_packed)
bits_per_pixel = CEIL_MUL(bits_per_pixel, 8);
cfg->elements = HIVE_ISP_DDR_WORD_BITS / bits_per_pixel;
cfg->cropping = 0;
cfg->width = CEIL_DIV(cfg->stride, HIVE_ISP_DDR_WORD_BYTES);
return true;
}
static csi_mipi_packet_type_t get_csi_mipi_packet_type(
int32_t data_type)
{
csi_mipi_packet_type_t packet_type;
packet_type = CSI_MIPI_PACKET_TYPE_RESERVED;
if (data_type >= 0 && data_type <= MIPI_FORMAT_SHORT8)
packet_type = CSI_MIPI_PACKET_TYPE_SHORT;
if (data_type > MIPI_FORMAT_SHORT8 && data_type <= N_MIPI_FORMAT)
packet_type = CSI_MIPI_PACKET_TYPE_LONG;
return packet_type;
}
/* end of Private Methods */
#endif
| linux-master | drivers/staging/media/atomisp/pci/runtime/isys/src/virtual_isys.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Support for Intel Camera Imaging ISP subsystem.
* Copyright (c) 2010 - 2015, Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*/
#include "input_system.h"
#include "ia_css_isys.h"
#include "platform_support.h"
#ifdef ISP2401
#include "isys_dma_public.h" /* isys2401_dma_set_max_burst_size() */
#include "isys_irq.h"
#endif
#if !defined(ISP2401)
input_system_err_t ia_css_isys_init(void)
{
backend_channel_cfg_t backend_ch0;
backend_channel_cfg_t backend_ch1;
target_cfg2400_t targetB;
target_cfg2400_t targetC;
u32 acq_mem_region_size = 24;
u32 acq_nof_mem_regions = 2;
input_system_err_t error = INPUT_SYSTEM_ERR_NO_ERROR;
memset(&backend_ch0, 0, sizeof(backend_channel_cfg_t));
memset(&backend_ch1, 0, sizeof(backend_channel_cfg_t));
memset(&targetB, 0, sizeof(targetB));
memset(&targetC, 0, sizeof(targetC));
error = input_system_configuration_reset();
if (error != INPUT_SYSTEM_ERR_NO_ERROR)
return error;
error = input_system_csi_xmem_channel_cfg(
0, /*ch_id */
INPUT_SYSTEM_PORT_A, /*port */
backend_ch0, /*backend_ch */
32, /*mem_region_size */
6, /*nof_mem_regions */
acq_mem_region_size, /*acq_mem_region_size */
acq_nof_mem_regions, /*acq_nof_mem_regions */
targetB, /*target */
3); /*nof_xmem_buffers */
if (error != INPUT_SYSTEM_ERR_NO_ERROR)
return error;
error = input_system_csi_xmem_channel_cfg(
1, /*ch_id */
INPUT_SYSTEM_PORT_B, /*port */
backend_ch0, /*backend_ch */
16, /*mem_region_size */
3, /*nof_mem_regions */
acq_mem_region_size, /*acq_mem_region_size */
acq_nof_mem_regions, /*acq_nof_mem_regions */
targetB, /*target */
3); /*nof_xmem_buffers */
if (error != INPUT_SYSTEM_ERR_NO_ERROR)
return error;
error = input_system_csi_xmem_channel_cfg(
2, /*ch_id */
INPUT_SYSTEM_PORT_C, /*port */
backend_ch1, /*backend_ch */
32, /*mem_region_size */
3, /*nof_mem_regions */
acq_mem_region_size, /*acq_mem_region_size */
acq_nof_mem_regions, /*acq_nof_mem_regions */
targetC, /*target */
2); /*nof_xmem_buffers */
if (error != INPUT_SYSTEM_ERR_NO_ERROR)
return error;
error = input_system_configuration_commit();
return error;
}
#elif defined(ISP2401)
input_system_err_t ia_css_isys_init(void)
{
ia_css_isys_csi_rx_lut_rmgr_init();
ia_css_isys_ibuf_rmgr_init();
ia_css_isys_dma_channel_rmgr_init();
ia_css_isys_stream2mmio_sid_rmgr_init();
isys2401_dma_set_max_burst_size(ISYS2401_DMA0_ID,
1 /* Non Burst DMA transactions */);
/* Enable 2401 input system IRQ status for driver to retrieve */
isys_irqc_status_enable(ISYS_IRQ0_ID);
isys_irqc_status_enable(ISYS_IRQ1_ID);
isys_irqc_status_enable(ISYS_IRQ2_ID);
return INPUT_SYSTEM_ERR_NO_ERROR;
}
#endif
#if !defined(ISP2401)
void ia_css_isys_uninit(void)
{
}
#elif defined(ISP2401)
void ia_css_isys_uninit(void)
{
ia_css_isys_csi_rx_lut_rmgr_uninit();
ia_css_isys_ibuf_rmgr_uninit();
ia_css_isys_dma_channel_rmgr_uninit();
ia_css_isys_stream2mmio_sid_rmgr_uninit();
}
#endif
| linux-master | drivers/staging/media/atomisp/pci/runtime/isys/src/isys_init.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Support for Intel Camera Imaging ISP subsystem.
* Copyright (c) 2015, Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*/
#include "ia_css_types.h"
#include "assert_support.h"
#include "ia_css_queue.h" /* sp2host_dequeue_irq_event() */
#include "ia_css_eventq.h"
#include "ia_css_event.h" /* ia_css_event_encode()
ia_css_event_decode()
*/
int ia_css_eventq_recv(
ia_css_queue_t *eventq_handle,
uint8_t *payload)
{
u32 sp_event;
int error;
/* dequeue the IRQ event */
error = ia_css_queue_dequeue(eventq_handle, &sp_event);
/* check whether the IRQ event is available or not */
if (!error)
ia_css_event_decode(sp_event, payload);
return error;
}
/*
* @brief The Host sends the event to the SP.
* Refer to "sh_css_sp.h" for details.
*/
int ia_css_eventq_send(
ia_css_queue_t *eventq_handle,
u8 evt_id,
u8 evt_payload_0,
u8 evt_payload_1,
uint8_t evt_payload_2)
{
u8 tmp[4];
u32 sw_event;
int error = -ENOSYS;
/*
* Encode the queue type, the thread ID and
* the queue ID into the event.
*/
tmp[0] = evt_id;
tmp[1] = evt_payload_0;
tmp[2] = evt_payload_1;
tmp[3] = evt_payload_2;
ia_css_event_encode(tmp, 4, &sw_event);
/* queue the software event (busy-waiting) */
for ( ; ; ) {
error = ia_css_queue_enqueue(eventq_handle, sw_event);
if (error != -ENOBUFS) {
/* We were able to successfully send the event
or had a real failure. return the status*/
break;
}
/* Wait for the queue to be not full and try again*/
udelay(1);
}
return error;
}
| linux-master | drivers/staging/media/atomisp/pci/runtime/eventq/src/eventq.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Support for Intel Camera Imaging ISP subsystem.
* Copyright (c) 2015, Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*/
#include <type_support.h> /* for uint32_t */
#include "ia_css_timer.h" /*struct ia_css_clock_tick */
#include "sh_css_legacy.h" /* IA_CSS_PIPE_ID_NUM*/
#include "gp_timer.h" /*gp_timer_read()*/
#include "assert_support.h"
int ia_css_timer_get_current_tick(struct ia_css_clock_tick *curr_ts)
{
assert(curr_ts);
if (!curr_ts)
return -EINVAL;
curr_ts->ticks = (clock_value_t)gp_timer_read(GP_TIMER_SEL);
return 0;
}
| linux-master | drivers/staging/media/atomisp/pci/runtime/timer/src/timer.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Support for Intel Camera Imaging ISP subsystem.
* Copyright (c) 2015, Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*/
#include "hmm.h"
#include "ia_css_frame.h"
#include <math_support.h>
#include "assert_support.h"
#include "ia_css_debug.h"
#include "isp.h"
#include "sh_css_internal.h"
#include "atomisp_internal.h"
#define NV12_TILEY_TILE_WIDTH 128
#define NV12_TILEY_TILE_HEIGHT 32
/**************************************************************************
** Static functions declarations
**************************************************************************/
static void frame_init_plane(struct ia_css_frame_plane *plane,
unsigned int width,
unsigned int stride,
unsigned int height,
unsigned int offset);
static void frame_init_single_plane(struct ia_css_frame *frame,
struct ia_css_frame_plane *plane,
unsigned int height,
unsigned int subpixels_per_line,
unsigned int bytes_per_pixel);
static void frame_init_raw_single_plane(
struct ia_css_frame *frame,
struct ia_css_frame_plane *plane,
unsigned int height,
unsigned int subpixels_per_line,
unsigned int bits_per_pixel);
static void frame_init_nv_planes(struct ia_css_frame *frame,
unsigned int horizontal_decimation,
unsigned int vertical_decimation,
unsigned int bytes_per_element);
static void frame_init_yuv_planes(struct ia_css_frame *frame,
unsigned int horizontal_decimation,
unsigned int vertical_decimation,
bool swap_uv,
unsigned int bytes_per_element);
static void frame_init_rgb_planes(struct ia_css_frame *frame,
unsigned int bytes_per_element);
static void frame_init_qplane6_planes(struct ia_css_frame *frame);
static int frame_allocate_buffer_data(struct ia_css_frame *frame);
static int frame_allocate_with_data(struct ia_css_frame **frame,
unsigned int width,
unsigned int height,
enum ia_css_frame_format format,
unsigned int padded_width,
unsigned int raw_bit_depth);
static struct ia_css_frame *frame_create(unsigned int width,
unsigned int height,
enum ia_css_frame_format format,
unsigned int padded_width,
unsigned int raw_bit_depth,
bool valid);
static unsigned
ia_css_elems_bytes_from_info(
const struct ia_css_frame_info *info);
/**************************************************************************
** CSS API functions, exposed by ia_css.h
**************************************************************************/
int ia_css_frame_allocate_from_info(struct ia_css_frame **frame,
const struct ia_css_frame_info *info)
{
int err = 0;
if (!frame || !info)
return -EINVAL;
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
"ia_css_frame_allocate_from_info() enter:\n");
err =
ia_css_frame_allocate(frame, info->res.width, info->res.height,
info->format, info->padded_width,
info->raw_bit_depth);
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
"ia_css_frame_allocate_from_info() leave:\n");
return err;
}
int ia_css_frame_allocate(struct ia_css_frame **frame,
unsigned int width,
unsigned int height,
enum ia_css_frame_format format,
unsigned int padded_width,
unsigned int raw_bit_depth)
{
int err = 0;
if (!frame || width == 0 || height == 0)
return -EINVAL;
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
"ia_css_frame_allocate() enter: width=%d, height=%d, format=%d, padded_width=%d, raw_bit_depth=%d\n",
width, height, format, padded_width, raw_bit_depth);
err = frame_allocate_with_data(frame, width, height, format,
padded_width, raw_bit_depth);
if ((*frame) && err == 0)
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
"ia_css_frame_allocate() leave: frame=%p, data(DDR address)=0x%x\n", *frame,
(*frame)->data);
else
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
"ia_css_frame_allocate() leave: frame=%p, data(DDR address)=0x%x\n",
(void *)-1, (unsigned int)-1);
return err;
}
void ia_css_frame_free(struct ia_css_frame *frame)
{
IA_CSS_ENTER_PRIVATE("frame = %p", frame);
if (frame) {
hmm_free(frame->data);
kvfree(frame);
}
IA_CSS_LEAVE_PRIVATE("void");
}
/**************************************************************************
** Module public functions
**************************************************************************/
int ia_css_frame_check_info(const struct ia_css_frame_info *info)
{
assert(info);
if (info->res.width == 0 || info->res.height == 0)
return -EINVAL;
return 0;
}
int ia_css_frame_init_planes(struct ia_css_frame *frame)
{
assert(frame);
switch (frame->frame_info.format) {
case IA_CSS_FRAME_FORMAT_MIPI:
dev_err(atomisp_dev,
"%s: unexpected use of IA_CSS_FRAME_FORMAT_MIPI\n", __func__);
return -EINVAL;
case IA_CSS_FRAME_FORMAT_RAW_PACKED:
frame_init_raw_single_plane(frame, &frame->planes.raw,
frame->frame_info.res.height,
frame->frame_info.padded_width,
frame->frame_info.raw_bit_depth);
break;
case IA_CSS_FRAME_FORMAT_RAW:
frame_init_single_plane(frame, &frame->planes.raw,
frame->frame_info.res.height,
frame->frame_info.padded_width,
frame->frame_info.raw_bit_depth <= 8 ? 1 : 2);
break;
case IA_CSS_FRAME_FORMAT_RGB565:
frame_init_single_plane(frame, &frame->planes.rgb,
frame->frame_info.res.height,
frame->frame_info.padded_width, 2);
break;
case IA_CSS_FRAME_FORMAT_RGBA888:
frame_init_single_plane(frame, &frame->planes.rgb,
frame->frame_info.res.height,
frame->frame_info.padded_width * 4, 1);
break;
case IA_CSS_FRAME_FORMAT_PLANAR_RGB888:
frame_init_rgb_planes(frame, 1);
break;
/* yuyv and uyvu have the same frame layout, only the data
* positioning differs.
*/
case IA_CSS_FRAME_FORMAT_YUYV:
case IA_CSS_FRAME_FORMAT_UYVY:
case IA_CSS_FRAME_FORMAT_CSI_MIPI_YUV420_8:
case IA_CSS_FRAME_FORMAT_CSI_MIPI_LEGACY_YUV420_8:
frame_init_single_plane(frame, &frame->planes.yuyv,
frame->frame_info.res.height,
frame->frame_info.padded_width * 2, 1);
break;
case IA_CSS_FRAME_FORMAT_YUV_LINE:
/* Needs 3 extra lines to allow vf_pp prefetching */
frame_init_single_plane(frame, &frame->planes.yuyv,
frame->frame_info.res.height * 3 / 2 + 3,
frame->frame_info.padded_width, 1);
break;
case IA_CSS_FRAME_FORMAT_NV11:
frame_init_nv_planes(frame, 4, 1, 1);
break;
/* nv12 and nv21 have the same frame layout, only the data
* positioning differs.
*/
case IA_CSS_FRAME_FORMAT_NV12:
case IA_CSS_FRAME_FORMAT_NV21:
case IA_CSS_FRAME_FORMAT_NV12_TILEY:
frame_init_nv_planes(frame, 2, 2, 1);
break;
case IA_CSS_FRAME_FORMAT_NV12_16:
frame_init_nv_planes(frame, 2, 2, 2);
break;
/* nv16 and nv61 have the same frame layout, only the data
* positioning differs.
*/
case IA_CSS_FRAME_FORMAT_NV16:
case IA_CSS_FRAME_FORMAT_NV61:
frame_init_nv_planes(frame, 2, 1, 1);
break;
case IA_CSS_FRAME_FORMAT_YUV420:
frame_init_yuv_planes(frame, 2, 2, false, 1);
break;
case IA_CSS_FRAME_FORMAT_YUV422:
frame_init_yuv_planes(frame, 2, 1, false, 1);
break;
case IA_CSS_FRAME_FORMAT_YUV444:
frame_init_yuv_planes(frame, 1, 1, false, 1);
break;
case IA_CSS_FRAME_FORMAT_YUV420_16:
frame_init_yuv_planes(frame, 2, 2, false, 2);
break;
case IA_CSS_FRAME_FORMAT_YUV422_16:
frame_init_yuv_planes(frame, 2, 1, false, 2);
break;
case IA_CSS_FRAME_FORMAT_YV12:
frame_init_yuv_planes(frame, 2, 2, true, 1);
break;
case IA_CSS_FRAME_FORMAT_YV16:
frame_init_yuv_planes(frame, 2, 1, true, 1);
break;
case IA_CSS_FRAME_FORMAT_QPLANE6:
frame_init_qplane6_planes(frame);
break;
case IA_CSS_FRAME_FORMAT_BINARY_8:
frame_init_single_plane(frame, &frame->planes.binary.data,
frame->frame_info.res.height,
frame->frame_info.padded_width, 1);
frame->planes.binary.size = 0;
break;
default:
return -EINVAL;
}
return 0;
}
unsigned int ia_css_frame_pad_width(unsigned int width, enum ia_css_frame_format format)
{
switch (format) {
/*
* Frames with a U and V plane of 8 bits per pixel need to have
* all planes aligned, this means double the alignment for the
* Y plane if the horizontal decimation is 2.
*/
case IA_CSS_FRAME_FORMAT_YUV420:
case IA_CSS_FRAME_FORMAT_YV12:
case IA_CSS_FRAME_FORMAT_NV12:
case IA_CSS_FRAME_FORMAT_NV21:
case IA_CSS_FRAME_FORMAT_BINARY_8:
case IA_CSS_FRAME_FORMAT_YUV_LINE:
return CEIL_MUL(width, 2 * HIVE_ISP_DDR_WORD_BYTES);
case IA_CSS_FRAME_FORMAT_NV12_TILEY:
return CEIL_MUL(width, NV12_TILEY_TILE_WIDTH);
case IA_CSS_FRAME_FORMAT_RAW:
case IA_CSS_FRAME_FORMAT_RAW_PACKED:
return CEIL_MUL(width, 2 * ISP_VEC_NELEMS);
default:
return CEIL_MUL(width, HIVE_ISP_DDR_WORD_BYTES);
}
}
void ia_css_frame_info_set_width(struct ia_css_frame_info *info,
unsigned int width,
unsigned int min_padded_width)
{
unsigned int align;
IA_CSS_ENTER_PRIVATE("info = %p,width = %d, minimum padded width = %d",
info, width, min_padded_width);
if (!info) {
IA_CSS_ERROR("NULL input parameter");
IA_CSS_LEAVE_PRIVATE("");
return;
}
align = max(min_padded_width, width);
info->res.width = width;
info->padded_width = ia_css_frame_pad_width(align, info->format);
IA_CSS_LEAVE_PRIVATE("");
}
void ia_css_frame_info_set_format(struct ia_css_frame_info *info,
enum ia_css_frame_format format)
{
assert(info);
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
"ia_css_frame_info_set_format() enter:\n");
info->format = format;
}
void ia_css_frame_info_init(struct ia_css_frame_info *info,
unsigned int width,
unsigned int height,
enum ia_css_frame_format format,
unsigned int aligned)
{
IA_CSS_ENTER_PRIVATE("info = %p, width = %d, height = %d, format = %d, aligned = %d",
info, width, height, format, aligned);
if (!info) {
IA_CSS_ERROR("NULL input parameter");
IA_CSS_LEAVE_PRIVATE("");
return;
}
info->res.height = height;
info->format = format;
ia_css_frame_info_set_width(info, width, aligned);
IA_CSS_LEAVE_PRIVATE("");
}
void ia_css_frame_free_multiple(unsigned int num_frames,
struct ia_css_frame **frames_array)
{
unsigned int i;
for (i = 0; i < num_frames; i++) {
if (frames_array[i]) {
ia_css_frame_free(frames_array[i]);
frames_array[i] = NULL;
}
}
}
int ia_css_frame_allocate_with_buffer_size(struct ia_css_frame **frame,
const unsigned int buffer_size_bytes)
{
/* AM: Body coppied from frame_allocate_with_data(). */
int err;
struct ia_css_frame *me = frame_create(0, 0,
IA_CSS_FRAME_FORMAT_NUM,/* Not valid format yet */
0, 0, false);
if (!me)
return -ENOMEM;
/* Get the data size */
me->data_bytes = buffer_size_bytes;
err = frame_allocate_buffer_data(me);
if (err) {
kvfree(me);
me = NULL;
}
*frame = me;
return err;
}
bool ia_css_frame_info_is_same_resolution(
const struct ia_css_frame_info *info_a,
const struct ia_css_frame_info *info_b)
{
if (!info_a || !info_b)
return false;
return (info_a->res.width == info_b->res.width) &&
(info_a->res.height == info_b->res.height);
}
bool ia_css_frame_is_same_type(const struct ia_css_frame *frame_a,
const struct ia_css_frame *frame_b)
{
bool is_equal = false;
const struct ia_css_frame_info *info_a = &frame_a->frame_info;
const struct ia_css_frame_info *info_b = &frame_b->frame_info;
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
"ia_css_frame_is_same_type() enter:\n");
if (!info_a || !info_b)
return false;
if (info_a->format != info_b->format)
return false;
if (info_a->padded_width != info_b->padded_width)
return false;
is_equal = ia_css_frame_info_is_same_resolution(info_a, info_b);
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
"ia_css_frame_is_same_type() leave:\n");
return is_equal;
}
int ia_css_dma_configure_from_info(struct dma_port_config *config,
const struct ia_css_frame_info *info)
{
unsigned int is_raw_packed = info->format == IA_CSS_FRAME_FORMAT_RAW_PACKED;
unsigned int bits_per_pixel = is_raw_packed ? info->raw_bit_depth :
ia_css_elems_bytes_from_info(info) * 8;
unsigned int pix_per_ddrword = HIVE_ISP_DDR_WORD_BITS / bits_per_pixel;
unsigned int words_per_line = CEIL_DIV(info->padded_width, pix_per_ddrword);
unsigned int elems_b = pix_per_ddrword;
config->stride = HIVE_ISP_DDR_WORD_BYTES * words_per_line;
config->elems = (uint8_t)elems_b;
config->width = (uint16_t)info->res.width;
config->crop = 0;
if (config->width > info->padded_width) {
dev_err(atomisp_dev, "internal error: padded_width is too small!\n");
return -EINVAL;
}
return 0;
}
/**************************************************************************
** Static functions
**************************************************************************/
static void frame_init_plane(struct ia_css_frame_plane *plane,
unsigned int width,
unsigned int stride,
unsigned int height,
unsigned int offset)
{
plane->height = height;
plane->width = width;
plane->stride = stride;
plane->offset = offset;
}
static void frame_init_single_plane(struct ia_css_frame *frame,
struct ia_css_frame_plane *plane,
unsigned int height,
unsigned int subpixels_per_line,
unsigned int bytes_per_pixel)
{
unsigned int stride;
stride = subpixels_per_line * bytes_per_pixel;
/* Frame height needs to be even number - needed by hw ISYS2401
In case of odd number, round up to even.
Images won't be impacted by this round up,
only needed by jpeg/embedded data.
As long as buffer allocation and release are using data_bytes,
there won't be memory leak. */
frame->data_bytes = stride * CEIL_MUL2(height, 2);
frame_init_plane(plane, subpixels_per_line, stride, height, 0);
return;
}
static void frame_init_raw_single_plane(
struct ia_css_frame *frame,
struct ia_css_frame_plane *plane,
unsigned int height,
unsigned int subpixels_per_line,
unsigned int bits_per_pixel)
{
unsigned int stride;
assert(frame);
stride = HIVE_ISP_DDR_WORD_BYTES *
CEIL_DIV(subpixels_per_line,
HIVE_ISP_DDR_WORD_BITS / bits_per_pixel);
frame->data_bytes = stride * height;
frame_init_plane(plane, subpixels_per_line, stride, height, 0);
return;
}
static void frame_init_nv_planes(struct ia_css_frame *frame,
unsigned int horizontal_decimation,
unsigned int vertical_decimation,
unsigned int bytes_per_element)
{
unsigned int y_width = frame->frame_info.padded_width;
unsigned int y_height = frame->frame_info.res.height;
unsigned int uv_width;
unsigned int uv_height;
unsigned int y_bytes;
unsigned int uv_bytes;
unsigned int y_stride;
unsigned int uv_stride;
assert(horizontal_decimation != 0 && vertical_decimation != 0);
uv_width = 2 * (y_width / horizontal_decimation);
uv_height = y_height / vertical_decimation;
if (frame->frame_info.format == IA_CSS_FRAME_FORMAT_NV12_TILEY) {
y_width = CEIL_MUL(y_width, NV12_TILEY_TILE_WIDTH);
uv_width = CEIL_MUL(uv_width, NV12_TILEY_TILE_WIDTH);
y_height = CEIL_MUL(y_height, NV12_TILEY_TILE_HEIGHT);
uv_height = CEIL_MUL(uv_height, NV12_TILEY_TILE_HEIGHT);
}
y_stride = y_width * bytes_per_element;
uv_stride = uv_width * bytes_per_element;
y_bytes = y_stride * y_height;
uv_bytes = uv_stride * uv_height;
frame->data_bytes = y_bytes + uv_bytes;
frame_init_plane(&frame->planes.nv.y, y_width, y_stride, y_height, 0);
frame_init_plane(&frame->planes.nv.uv, uv_width,
uv_stride, uv_height, y_bytes);
return;
}
static void frame_init_yuv_planes(struct ia_css_frame *frame,
unsigned int horizontal_decimation,
unsigned int vertical_decimation,
bool swap_uv,
unsigned int bytes_per_element)
{
unsigned int y_width = frame->frame_info.padded_width,
y_height = frame->frame_info.res.height,
uv_width = y_width / horizontal_decimation,
uv_height = y_height / vertical_decimation,
y_stride, y_bytes, uv_bytes, uv_stride;
y_stride = y_width * bytes_per_element;
uv_stride = uv_width * bytes_per_element;
y_bytes = y_stride * y_height;
uv_bytes = uv_stride * uv_height;
frame->data_bytes = y_bytes + 2 * uv_bytes;
frame_init_plane(&frame->planes.yuv.y, y_width, y_stride, y_height, 0);
if (swap_uv) {
frame_init_plane(&frame->planes.yuv.v, uv_width, uv_stride,
uv_height, y_bytes);
frame_init_plane(&frame->planes.yuv.u, uv_width, uv_stride,
uv_height, y_bytes + uv_bytes);
} else {
frame_init_plane(&frame->planes.yuv.u, uv_width, uv_stride,
uv_height, y_bytes);
frame_init_plane(&frame->planes.yuv.v, uv_width, uv_stride,
uv_height, y_bytes + uv_bytes);
}
return;
}
static void frame_init_rgb_planes(struct ia_css_frame *frame,
unsigned int bytes_per_element)
{
unsigned int width = frame->frame_info.res.width,
height = frame->frame_info.res.height, stride, bytes;
stride = width * bytes_per_element;
bytes = stride * height;
frame->data_bytes = 3 * bytes;
frame_init_plane(&frame->planes.planar_rgb.r, width, stride, height, 0);
frame_init_plane(&frame->planes.planar_rgb.g,
width, stride, height, 1 * bytes);
frame_init_plane(&frame->planes.planar_rgb.b,
width, stride, height, 2 * bytes);
return;
}
static void frame_init_qplane6_planes(struct ia_css_frame *frame)
{
unsigned int width = frame->frame_info.padded_width / 2,
height = frame->frame_info.res.height / 2, bytes, stride;
stride = width * 2;
bytes = stride * height;
frame->data_bytes = 6 * bytes;
frame_init_plane(&frame->planes.plane6.r,
width, stride, height, 0 * bytes);
frame_init_plane(&frame->planes.plane6.r_at_b,
width, stride, height, 1 * bytes);
frame_init_plane(&frame->planes.plane6.gr,
width, stride, height, 2 * bytes);
frame_init_plane(&frame->planes.plane6.gb,
width, stride, height, 3 * bytes);
frame_init_plane(&frame->planes.plane6.b,
width, stride, height, 4 * bytes);
frame_init_plane(&frame->planes.plane6.b_at_r,
width, stride, height, 5 * bytes);
return;
}
static int frame_allocate_buffer_data(struct ia_css_frame *frame)
{
frame->data = hmm_alloc(frame->data_bytes);
if (frame->data == mmgr_NULL)
return -ENOMEM;
return 0;
}
static int frame_allocate_with_data(struct ia_css_frame **frame,
unsigned int width,
unsigned int height,
enum ia_css_frame_format format,
unsigned int padded_width,
unsigned int raw_bit_depth)
{
int err;
struct ia_css_frame *me = frame_create(width,
height,
format,
padded_width,
raw_bit_depth,
true);
if (!me)
return -ENOMEM;
err = ia_css_frame_init_planes(me);
if (!err)
err = frame_allocate_buffer_data(me);
if (err) {
kvfree(me);
*frame = NULL;
} else {
*frame = me;
}
return err;
}
static struct ia_css_frame *frame_create(unsigned int width,
unsigned int height,
enum ia_css_frame_format format,
unsigned int padded_width,
unsigned int raw_bit_depth,
bool valid)
{
struct ia_css_frame *me = kvmalloc(sizeof(*me), GFP_KERNEL);
if (!me)
return NULL;
memset(me, 0, sizeof(*me));
me->frame_info.res.width = width;
me->frame_info.res.height = height;
me->frame_info.format = format;
me->frame_info.padded_width = padded_width;
me->frame_info.raw_bit_depth = raw_bit_depth;
me->valid = valid;
me->data_bytes = 0;
me->data = mmgr_NULL;
/* To indicate it is not valid frame. */
me->dynamic_queue_id = (int)SH_CSS_INVALID_QUEUE_ID;
me->buf_type = IA_CSS_BUFFER_TYPE_INVALID;
return me;
}
static unsigned
ia_css_elems_bytes_from_info(const struct ia_css_frame_info *info)
{
if (info->format == IA_CSS_FRAME_FORMAT_RGB565)
return 2; /* bytes per pixel */
if (info->format == IA_CSS_FRAME_FORMAT_YUV420_16)
return 2; /* bytes per pixel */
if (info->format == IA_CSS_FRAME_FORMAT_YUV422_16)
return 2; /* bytes per pixel */
/* Note: Essentially NV12_16 is a 2 bytes per pixel format, this return value is used
* to configure DMA for the output buffer,
* At least in SKC this data is overwritten by isp_output_init.sp.c except for elements(elems),
* which is configured from this return value,
* NV12_16 is implemented by a double buffer of 8 bit elements hence elems should be configured as 8 */
if (info->format == IA_CSS_FRAME_FORMAT_NV12_16)
return 1; /* bytes per pixel */
if (info->format == IA_CSS_FRAME_FORMAT_RAW
|| (info->format == IA_CSS_FRAME_FORMAT_RAW_PACKED)) {
if (info->raw_bit_depth)
return CEIL_DIV(info->raw_bit_depth, 8);
else
return 2; /* bytes per pixel */
}
if (info->format == IA_CSS_FRAME_FORMAT_PLANAR_RGB888)
return 3; /* bytes per pixel */
if (info->format == IA_CSS_FRAME_FORMAT_RGBA888)
return 4; /* bytes per pixel */
if (info->format == IA_CSS_FRAME_FORMAT_QPLANE6)
return 2; /* bytes per pixel */
return 1; /* Default is 1 byte per pixel */
}
void ia_css_frame_info_to_frame_sp_info(
struct ia_css_frame_sp_info *to,
const struct ia_css_frame_info *from)
{
ia_css_resolution_to_sp_resolution(&to->res, &from->res);
to->padded_width = (uint16_t)from->padded_width;
to->format = (uint8_t)from->format;
to->raw_bit_depth = (uint8_t)from->raw_bit_depth;
to->raw_bayer_order = from->raw_bayer_order;
}
void ia_css_resolution_to_sp_resolution(
struct ia_css_sp_resolution *to,
const struct ia_css_resolution *from)
{
to->width = (uint16_t)from->width;
to->height = (uint16_t)from->height;
}
int ia_css_frame_init_from_info(struct ia_css_frame *frame,
const struct ia_css_frame_info *frame_info)
{
frame->frame_info.res.width = frame_info->res.width;
frame->frame_info.res.height = frame_info->res.height;
frame->frame_info.format = frame_info->format;
frame->frame_info.padded_width = frame_info->padded_width;
frame->frame_info.raw_bit_depth = frame_info->raw_bit_depth;
frame->valid = true;
/* To indicate it is not valid frame. */
frame->dynamic_queue_id = SH_CSS_INVALID_QUEUE_ID;
frame->buf_type = IA_CSS_BUFFER_TYPE_INVALID;
return ia_css_frame_init_planes(frame);
}
| linux-master | drivers/staging/media/atomisp/pci/runtime/frame/src/frame.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Support for Intel Camera Imaging ISP subsystem.
* Copyright (c) 2010 - 2015, Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*/
#include "hmm.h"
#include "ia_css_types.h"
#define __INLINE_SP__
#include "sp.h"
#include "assert_support.h"
#include "ia_css_spctrl.h"
#include "ia_css_debug.h"
struct spctrl_context_info {
struct ia_css_sp_init_dmem_cfg dmem_config;
u32 spctrl_config_dmem_addr; /* location of dmem_cfg in SP dmem */
u32 spctrl_state_dmem_addr;
unsigned int sp_entry; /* entry function ptr on SP */
ia_css_ptr code_addr; /* sp firmware location in host mem-DDR*/
u32 code_size;
char *program_name; /* used in case of PLATFORM_SIM */
};
static struct spctrl_context_info spctrl_cofig_info[N_SP_ID];
static bool spctrl_loaded[N_SP_ID] = {0};
/* Load firmware */
int ia_css_spctrl_load_fw(sp_ID_t sp_id, ia_css_spctrl_cfg *spctrl_cfg)
{
ia_css_ptr code_addr = mmgr_NULL;
struct ia_css_sp_init_dmem_cfg *init_dmem_cfg;
if ((sp_id >= N_SP_ID) || (!spctrl_cfg))
return -EINVAL;
spctrl_cofig_info[sp_id].code_addr = mmgr_NULL;
init_dmem_cfg = &spctrl_cofig_info[sp_id].dmem_config;
init_dmem_cfg->dmem_data_addr = spctrl_cfg->dmem_data_addr;
init_dmem_cfg->dmem_bss_addr = spctrl_cfg->dmem_bss_addr;
init_dmem_cfg->data_size = spctrl_cfg->data_size;
init_dmem_cfg->bss_size = spctrl_cfg->bss_size;
init_dmem_cfg->sp_id = sp_id;
spctrl_cofig_info[sp_id].spctrl_config_dmem_addr =
spctrl_cfg->spctrl_config_dmem_addr;
spctrl_cofig_info[sp_id].spctrl_state_dmem_addr =
spctrl_cfg->spctrl_state_dmem_addr;
/* store code (text + icache) and data to DDR
*
* Data used to be stored separately, because of access alignment constraints,
* fix the FW generation instead
*/
code_addr = hmm_alloc(spctrl_cfg->code_size);
if (code_addr == mmgr_NULL)
return -ENOMEM;
hmm_store(code_addr, spctrl_cfg->code, spctrl_cfg->code_size);
if (sizeof(ia_css_ptr) > sizeof(hrt_data)) {
ia_css_debug_dtrace(IA_CSS_DEBUG_ERROR,
"size of ia_css_ptr can not be greater than hrt_data\n");
hmm_free(code_addr);
code_addr = mmgr_NULL;
return -EINVAL;
}
init_dmem_cfg->ddr_data_addr = code_addr + spctrl_cfg->ddr_data_offset;
if ((init_dmem_cfg->ddr_data_addr % HIVE_ISP_DDR_WORD_BYTES) != 0) {
ia_css_debug_dtrace(IA_CSS_DEBUG_ERROR,
"DDR address pointer is not properly aligned for DMA transfer\n");
hmm_free(code_addr);
code_addr = mmgr_NULL;
return -EINVAL;
}
spctrl_cofig_info[sp_id].sp_entry = spctrl_cfg->sp_entry;
spctrl_cofig_info[sp_id].code_addr = code_addr;
spctrl_cofig_info[sp_id].program_name = spctrl_cfg->program_name;
/* now we program the base address into the icache and
* invalidate the cache.
*/
sp_ctrl_store(sp_id, SP_ICACHE_ADDR_REG,
(hrt_data)spctrl_cofig_info[sp_id].code_addr);
sp_ctrl_setbit(sp_id, SP_ICACHE_INV_REG, SP_ICACHE_INV_BIT);
spctrl_loaded[sp_id] = true;
return 0;
}
/* ISP2401 */
/* reload pre-loaded FW */
void sh_css_spctrl_reload_fw(sp_ID_t sp_id)
{
/* now we program the base address into the icache and
* invalidate the cache.
*/
sp_ctrl_store(sp_id, SP_ICACHE_ADDR_REG,
(hrt_data)spctrl_cofig_info[sp_id].code_addr);
sp_ctrl_setbit(sp_id, SP_ICACHE_INV_REG, SP_ICACHE_INV_BIT);
spctrl_loaded[sp_id] = true;
}
ia_css_ptr get_sp_code_addr(sp_ID_t sp_id)
{
return spctrl_cofig_info[sp_id].code_addr;
}
int ia_css_spctrl_unload_fw(sp_ID_t sp_id)
{
if ((sp_id >= N_SP_ID) || ((sp_id < N_SP_ID) && (!spctrl_loaded[sp_id])))
return -EINVAL;
/* freeup the resource */
if (spctrl_cofig_info[sp_id].code_addr) {
hmm_free(spctrl_cofig_info[sp_id].code_addr);
spctrl_cofig_info[sp_id].code_addr = mmgr_NULL;
}
spctrl_loaded[sp_id] = false;
return 0;
}
/* Initialize dmem_cfg in SP dmem and start SP program*/
int ia_css_spctrl_start(sp_ID_t sp_id)
{
if ((sp_id >= N_SP_ID) || ((sp_id < N_SP_ID) && (!spctrl_loaded[sp_id])))
return -EINVAL;
/* Set descr in the SP to initialize the SP DMEM */
/*
* The FW stores user-space pointers to the FW, the ISP pointer
* is only available here
*
*/
assert(sizeof(unsigned int) <= sizeof(hrt_data));
sp_dmem_store(sp_id,
spctrl_cofig_info[sp_id].spctrl_config_dmem_addr,
&spctrl_cofig_info[sp_id].dmem_config,
sizeof(spctrl_cofig_info[sp_id].dmem_config));
/* set the start address */
sp_ctrl_store(sp_id, SP_START_ADDR_REG,
(hrt_data)spctrl_cofig_info[sp_id].sp_entry);
sp_ctrl_setbit(sp_id, SP_SC_REG, SP_RUN_BIT);
sp_ctrl_setbit(sp_id, SP_SC_REG, SP_START_BIT);
return 0;
}
/* Query the state of SP1 */
ia_css_spctrl_sp_sw_state ia_css_spctrl_get_state(sp_ID_t sp_id)
{
ia_css_spctrl_sp_sw_state state = 0;
unsigned int HIVE_ADDR_sp_sw_state;
if (sp_id >= N_SP_ID)
return IA_CSS_SP_SW_TERMINATED;
HIVE_ADDR_sp_sw_state = spctrl_cofig_info[sp_id].spctrl_state_dmem_addr;
(void)HIVE_ADDR_sp_sw_state; /* Suppres warnings in CRUN */
if (sp_id == SP0_ID)
state = sp_dmem_load_uint32(sp_id, (unsigned int)sp_address_of(sp_sw_state));
return state;
}
int ia_css_spctrl_is_idle(sp_ID_t sp_id)
{
int state = 0;
assert(sp_id < N_SP_ID);
state = sp_ctrl_getbit(sp_id, SP_SC_REG, SP_IDLE_BIT);
return state;
}
| linux-master | drivers/staging/media/atomisp/pci/runtime/spctrl/src/spctrl.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Support for Intel Camera Imaging ISP subsystem.
* Copyright (c) 2015, Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*/
#include "ia_css_queue.h"
#include <math_support.h>
#include <ia_css_circbuf.h>
#include <ia_css_circbuf_desc.h>
#include "queue_access.h"
/*****************************************************************************
* Queue Public APIs
*****************************************************************************/
int ia_css_queue_local_init(ia_css_queue_t *qhandle, ia_css_queue_local_t *desc)
{
if (NULL == qhandle || NULL == desc
|| NULL == desc->cb_elems || NULL == desc->cb_desc) {
/* Invalid parameters, return error*/
return -EINVAL;
}
/* Mark the queue as Local */
qhandle->type = IA_CSS_QUEUE_TYPE_LOCAL;
/* Create a local circular buffer queue*/
ia_css_circbuf_create(&qhandle->desc.cb_local,
desc->cb_elems,
desc->cb_desc);
return 0;
}
int ia_css_queue_remote_init(ia_css_queue_t *qhandle, ia_css_queue_remote_t *desc)
{
if (NULL == qhandle || NULL == desc) {
/* Invalid parameters, return error*/
return -EINVAL;
}
/* Mark the queue as remote*/
qhandle->type = IA_CSS_QUEUE_TYPE_REMOTE;
/* Copy over the local queue descriptor*/
qhandle->location = desc->location;
qhandle->proc_id = desc->proc_id;
qhandle->desc.remote.cb_desc_addr = desc->cb_desc_addr;
qhandle->desc.remote.cb_elems_addr = desc->cb_elems_addr;
/* If queue is remote, we let the local processor
* do its init, before using it. This is just to get us
* started, we can remove this restriction as we go ahead
*/
return 0;
}
int ia_css_queue_uninit(ia_css_queue_t *qhandle)
{
if (!qhandle)
return -EINVAL;
/* Load the required queue object */
if (qhandle->type == IA_CSS_QUEUE_TYPE_LOCAL) {
/* Local queues are created. Destroy it*/
ia_css_circbuf_destroy(&qhandle->desc.cb_local);
}
return 0;
}
int ia_css_queue_enqueue(ia_css_queue_t *qhandle, uint32_t item)
{
int error = 0;
if (!qhandle)
return -EINVAL;
/* 1. Load the required queue object */
if (qhandle->type == IA_CSS_QUEUE_TYPE_LOCAL) {
/* Directly de-ref the object and
* operate on the queue
*/
if (ia_css_circbuf_is_full(&qhandle->desc.cb_local)) {
/* Cannot push the element. Return*/
return -ENOBUFS;
}
/* Push the element*/
ia_css_circbuf_push(&qhandle->desc.cb_local, item);
} else if (qhandle->type == IA_CSS_QUEUE_TYPE_REMOTE) {
ia_css_circbuf_desc_t cb_desc;
ia_css_circbuf_elem_t cb_elem;
u32 ignore_desc_flags = QUEUE_IGNORE_STEP_FLAG;
/* a. Load the queue cb_desc from remote */
QUEUE_CB_DESC_INIT(&cb_desc);
error = ia_css_queue_load(qhandle, &cb_desc, ignore_desc_flags);
if (error != 0)
return error;
/* b. Operate on the queue */
if (ia_css_circbuf_desc_is_full(&cb_desc))
return -ENOBUFS;
cb_elem.val = item;
error = ia_css_queue_item_store(qhandle, cb_desc.end, &cb_elem);
if (error != 0)
return error;
cb_desc.end = (cb_desc.end + 1) % cb_desc.size;
/* c. Store the queue object */
/* Set only fields requiring update with
* valid value. Avoids uncessary calls
* to load/store functions
*/
ignore_desc_flags = QUEUE_IGNORE_SIZE_START_STEP_FLAGS;
error = ia_css_queue_store(qhandle, &cb_desc, ignore_desc_flags);
if (error != 0)
return error;
}
return 0;
}
int ia_css_queue_dequeue(ia_css_queue_t *qhandle, uint32_t *item)
{
int error = 0;
if (!qhandle || NULL == item)
return -EINVAL;
/* 1. Load the required queue object */
if (qhandle->type == IA_CSS_QUEUE_TYPE_LOCAL) {
/* Directly de-ref the object and
* operate on the queue
*/
if (ia_css_circbuf_is_empty(&qhandle->desc.cb_local)) {
/* Nothing to pop. Return empty queue*/
return -ENODATA;
}
*item = ia_css_circbuf_pop(&qhandle->desc.cb_local);
} else if (qhandle->type == IA_CSS_QUEUE_TYPE_REMOTE) {
/* a. Load the queue from remote */
ia_css_circbuf_desc_t cb_desc;
ia_css_circbuf_elem_t cb_elem;
u32 ignore_desc_flags = QUEUE_IGNORE_STEP_FLAG;
QUEUE_CB_DESC_INIT(&cb_desc);
error = ia_css_queue_load(qhandle, &cb_desc, ignore_desc_flags);
if (error != 0)
return error;
/* b. Operate on the queue */
if (ia_css_circbuf_desc_is_empty(&cb_desc))
return -ENODATA;
error = ia_css_queue_item_load(qhandle, cb_desc.start, &cb_elem);
if (error != 0)
return error;
*item = cb_elem.val;
cb_desc.start = OP_std_modadd(cb_desc.start, 1, cb_desc.size);
/* c. Store the queue object */
/* Set only fields requiring update with
* valid value. Avoids uncessary calls
* to load/store functions
*/
ignore_desc_flags = QUEUE_IGNORE_SIZE_END_STEP_FLAGS;
error = ia_css_queue_store(qhandle, &cb_desc, ignore_desc_flags);
if (error != 0)
return error;
}
return 0;
}
int ia_css_queue_is_full(ia_css_queue_t *qhandle, bool *is_full)
{
int error = 0;
if ((!qhandle) || (!is_full))
return -EINVAL;
/* 1. Load the required queue object */
if (qhandle->type == IA_CSS_QUEUE_TYPE_LOCAL) {
/* Directly de-ref the object and
* operate on the queue
*/
*is_full = ia_css_circbuf_is_full(&qhandle->desc.cb_local);
return 0;
} else if (qhandle->type == IA_CSS_QUEUE_TYPE_REMOTE) {
/* a. Load the queue from remote */
ia_css_circbuf_desc_t cb_desc;
u32 ignore_desc_flags = QUEUE_IGNORE_STEP_FLAG;
QUEUE_CB_DESC_INIT(&cb_desc);
error = ia_css_queue_load(qhandle, &cb_desc, ignore_desc_flags);
if (error != 0)
return error;
/* b. Operate on the queue */
*is_full = ia_css_circbuf_desc_is_full(&cb_desc);
return 0;
}
return -EINVAL;
}
int ia_css_queue_get_free_space(ia_css_queue_t *qhandle, uint32_t *size)
{
int error = 0;
if ((!qhandle) || (!size))
return -EINVAL;
/* 1. Load the required queue object */
if (qhandle->type == IA_CSS_QUEUE_TYPE_LOCAL) {
/* Directly de-ref the object and
* operate on the queue
*/
*size = ia_css_circbuf_get_free_elems(&qhandle->desc.cb_local);
return 0;
} else if (qhandle->type == IA_CSS_QUEUE_TYPE_REMOTE) {
/* a. Load the queue from remote */
ia_css_circbuf_desc_t cb_desc;
u32 ignore_desc_flags = QUEUE_IGNORE_STEP_FLAG;
QUEUE_CB_DESC_INIT(&cb_desc);
error = ia_css_queue_load(qhandle, &cb_desc, ignore_desc_flags);
if (error != 0)
return error;
/* b. Operate on the queue */
*size = ia_css_circbuf_desc_get_free_elems(&cb_desc);
return 0;
}
return -EINVAL;
}
int ia_css_queue_get_used_space(ia_css_queue_t *qhandle, uint32_t *size)
{
int error = 0;
if ((!qhandle) || (!size))
return -EINVAL;
/* 1. Load the required queue object */
if (qhandle->type == IA_CSS_QUEUE_TYPE_LOCAL) {
/* Directly de-ref the object and
* operate on the queue
*/
*size = ia_css_circbuf_get_num_elems(&qhandle->desc.cb_local);
return 0;
} else if (qhandle->type == IA_CSS_QUEUE_TYPE_REMOTE) {
/* a. Load the queue from remote */
ia_css_circbuf_desc_t cb_desc;
u32 ignore_desc_flags = QUEUE_IGNORE_STEP_FLAG;
QUEUE_CB_DESC_INIT(&cb_desc);
error = ia_css_queue_load(qhandle, &cb_desc, ignore_desc_flags);
if (error != 0)
return error;
/* b. Operate on the queue */
*size = ia_css_circbuf_desc_get_num_elems(&cb_desc);
return 0;
}
return -EINVAL;
}
int ia_css_queue_peek(ia_css_queue_t *qhandle, u32 offset, uint32_t *element)
{
u32 num_elems = 0;
int error = 0;
if ((!qhandle) || (!element))
return -EINVAL;
/* 1. Load the required queue object */
if (qhandle->type == IA_CSS_QUEUE_TYPE_LOCAL) {
/* Directly de-ref the object and
* operate on the queue
*/
/* Check if offset is valid */
num_elems = ia_css_circbuf_get_num_elems(&qhandle->desc.cb_local);
if (offset > num_elems)
return -EINVAL;
*element = ia_css_circbuf_peek_from_start(&qhandle->desc.cb_local, (int)offset);
return 0;
} else if (qhandle->type == IA_CSS_QUEUE_TYPE_REMOTE) {
/* a. Load the queue from remote */
ia_css_circbuf_desc_t cb_desc;
ia_css_circbuf_elem_t cb_elem;
u32 ignore_desc_flags = QUEUE_IGNORE_STEP_FLAG;
QUEUE_CB_DESC_INIT(&cb_desc);
error = ia_css_queue_load(qhandle, &cb_desc, ignore_desc_flags);
if (error != 0)
return error;
/* Check if offset is valid */
num_elems = ia_css_circbuf_desc_get_num_elems(&cb_desc);
if (offset > num_elems)
return -EINVAL;
offset = OP_std_modadd(cb_desc.start, offset, cb_desc.size);
error = ia_css_queue_item_load(qhandle, (uint8_t)offset, &cb_elem);
if (error != 0)
return error;
*element = cb_elem.val;
return 0;
}
return -EINVAL;
}
int ia_css_queue_is_empty(ia_css_queue_t *qhandle, bool *is_empty)
{
int error = 0;
if ((!qhandle) || (!is_empty))
return -EINVAL;
/* 1. Load the required queue object */
if (qhandle->type == IA_CSS_QUEUE_TYPE_LOCAL) {
/* Directly de-ref the object and
* operate on the queue
*/
*is_empty = ia_css_circbuf_is_empty(&qhandle->desc.cb_local);
return 0;
} else if (qhandle->type == IA_CSS_QUEUE_TYPE_REMOTE) {
/* a. Load the queue from remote */
ia_css_circbuf_desc_t cb_desc;
u32 ignore_desc_flags = QUEUE_IGNORE_STEP_FLAG;
QUEUE_CB_DESC_INIT(&cb_desc);
error = ia_css_queue_load(qhandle, &cb_desc, ignore_desc_flags);
if (error != 0)
return error;
/* b. Operate on the queue */
*is_empty = ia_css_circbuf_desc_is_empty(&cb_desc);
return 0;
}
return -EINVAL;
}
int ia_css_queue_get_size(ia_css_queue_t *qhandle, uint32_t *size)
{
int error = 0;
if ((!qhandle) || (!size))
return -EINVAL;
/* 1. Load the required queue object */
if (qhandle->type == IA_CSS_QUEUE_TYPE_LOCAL) {
/* Directly de-ref the object and
* operate on the queue
*/
/* Return maximum usable capacity */
*size = ia_css_circbuf_get_size(&qhandle->desc.cb_local);
} else if (qhandle->type == IA_CSS_QUEUE_TYPE_REMOTE) {
/* a. Load the queue from remote */
ia_css_circbuf_desc_t cb_desc;
u32 ignore_desc_flags = QUEUE_IGNORE_START_END_STEP_FLAGS;
QUEUE_CB_DESC_INIT(&cb_desc);
error = ia_css_queue_load(qhandle, &cb_desc, ignore_desc_flags);
if (error != 0)
return error;
/* Return maximum usable capacity */
*size = cb_desc.size;
}
return 0;
}
| linux-master | drivers/staging/media/atomisp/pci/runtime/queue/src/queue.c |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.