python_code
stringlengths
0
1.8M
repo_name
stringclasses
7 values
file_path
stringlengths
5
99
// SPDX-License-Identifier: GPL-2.0 /* * Support for Intel Camera Imaging ISP subsystem. * Copyright (c) 2010 - 2015, Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. */ #include "hmm.h" #include "type_support.h" #include "queue_access.h" #include "ia_css_circbuf.h" #include "sp.h" #include "assert_support.h" int ia_css_queue_load( struct ia_css_queue *rdesc, ia_css_circbuf_desc_t *cb_desc, uint32_t ignore_desc_flags) { if (!rdesc || !cb_desc) return -EINVAL; if (rdesc->location == IA_CSS_QUEUE_LOC_SP) { assert(ignore_desc_flags <= QUEUE_IGNORE_DESC_FLAGS_MAX); if (0 == (ignore_desc_flags & QUEUE_IGNORE_SIZE_FLAG)) { cb_desc->size = sp_dmem_load_uint8(rdesc->proc_id, rdesc->desc.remote.cb_desc_addr + offsetof(ia_css_circbuf_desc_t, size)); if (cb_desc->size == 0) { /* Adding back the workaround which was removed while refactoring queues. When reading size through sp_dmem_load_*, sometimes we get back the value as zero. This causes division by 0 exception as the size is used in a modular division operation. */ return -EDOM; } } if (0 == (ignore_desc_flags & QUEUE_IGNORE_START_FLAG)) cb_desc->start = sp_dmem_load_uint8(rdesc->proc_id, rdesc->desc.remote.cb_desc_addr + offsetof(ia_css_circbuf_desc_t, start)); if (0 == (ignore_desc_flags & QUEUE_IGNORE_END_FLAG)) cb_desc->end = sp_dmem_load_uint8(rdesc->proc_id, rdesc->desc.remote.cb_desc_addr + offsetof(ia_css_circbuf_desc_t, end)); if (0 == (ignore_desc_flags & QUEUE_IGNORE_STEP_FLAG)) cb_desc->step = sp_dmem_load_uint8(rdesc->proc_id, rdesc->desc.remote.cb_desc_addr + offsetof(ia_css_circbuf_desc_t, step)); } else if (rdesc->location == IA_CSS_QUEUE_LOC_HOST) { /* doing DMA transfer of entire structure */ hmm_load(rdesc->desc.remote.cb_desc_addr, (void *)cb_desc, sizeof(ia_css_circbuf_desc_t)); } else if (rdesc->location == IA_CSS_QUEUE_LOC_ISP) { /* Not supported yet */ return -ENOTSUPP; } return 0; } int ia_css_queue_store( struct ia_css_queue *rdesc, ia_css_circbuf_desc_t *cb_desc, uint32_t ignore_desc_flags) { if (!rdesc || !cb_desc) return -EINVAL; if (rdesc->location == IA_CSS_QUEUE_LOC_SP) { assert(ignore_desc_flags <= QUEUE_IGNORE_DESC_FLAGS_MAX); if (0 == (ignore_desc_flags & QUEUE_IGNORE_SIZE_FLAG)) sp_dmem_store_uint8(rdesc->proc_id, rdesc->desc.remote.cb_desc_addr + offsetof(ia_css_circbuf_desc_t, size), cb_desc->size); if (0 == (ignore_desc_flags & QUEUE_IGNORE_START_FLAG)) sp_dmem_store_uint8(rdesc->proc_id, rdesc->desc.remote.cb_desc_addr + offsetof(ia_css_circbuf_desc_t, start), cb_desc->start); if (0 == (ignore_desc_flags & QUEUE_IGNORE_END_FLAG)) sp_dmem_store_uint8(rdesc->proc_id, rdesc->desc.remote.cb_desc_addr + offsetof(ia_css_circbuf_desc_t, end), cb_desc->end); if (0 == (ignore_desc_flags & QUEUE_IGNORE_STEP_FLAG)) sp_dmem_store_uint8(rdesc->proc_id, rdesc->desc.remote.cb_desc_addr + offsetof(ia_css_circbuf_desc_t, step), cb_desc->step); } else if (rdesc->location == IA_CSS_QUEUE_LOC_HOST) { /* doing DMA transfer of entire structure */ hmm_store(rdesc->desc.remote.cb_desc_addr, (void *)cb_desc, sizeof(ia_css_circbuf_desc_t)); } else if (rdesc->location == IA_CSS_QUEUE_LOC_ISP) { /* Not supported yet */ return -ENOTSUPP; } return 0; } int ia_css_queue_item_load( struct ia_css_queue *rdesc, u8 position, ia_css_circbuf_elem_t *item) { if (!rdesc || !item) return -EINVAL; if (rdesc->location == IA_CSS_QUEUE_LOC_SP) { sp_dmem_load(rdesc->proc_id, rdesc->desc.remote.cb_elems_addr + position * sizeof(ia_css_circbuf_elem_t), item, sizeof(ia_css_circbuf_elem_t)); } else if (rdesc->location == IA_CSS_QUEUE_LOC_HOST) { hmm_load(rdesc->desc.remote.cb_elems_addr + position * sizeof(ia_css_circbuf_elem_t), (void *)item, sizeof(ia_css_circbuf_elem_t)); } else if (rdesc->location == IA_CSS_QUEUE_LOC_ISP) { /* Not supported yet */ return -ENOTSUPP; } return 0; } int ia_css_queue_item_store( struct ia_css_queue *rdesc, u8 position, ia_css_circbuf_elem_t *item) { if (!rdesc || !item) return -EINVAL; if (rdesc->location == IA_CSS_QUEUE_LOC_SP) { sp_dmem_store(rdesc->proc_id, rdesc->desc.remote.cb_elems_addr + position * sizeof(ia_css_circbuf_elem_t), item, sizeof(ia_css_circbuf_elem_t)); } else if (rdesc->location == IA_CSS_QUEUE_LOC_HOST) { hmm_store(rdesc->desc.remote.cb_elems_addr + position * sizeof(ia_css_circbuf_elem_t), (void *)item, sizeof(ia_css_circbuf_elem_t)); } else if (rdesc->location == IA_CSS_QUEUE_LOC_ISP) { /* Not supported yet */ return -ENOTSUPP; } return 0; }
linux-master
drivers/staging/media/atomisp/pci/runtime/queue/src/queue_access.c
// SPDX-License-Identifier: GPL-2.0 /* * Support for Intel Camera Imaging ISP subsystem. * Copyright (c) 2010 - 2015, Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. */ #include "ia_css_rmgr.h" int ia_css_rmgr_init(void) { int err = 0; err = ia_css_rmgr_init_vbuf(vbuf_ref); if (!err) err = ia_css_rmgr_init_vbuf(vbuf_write); if (!err) err = ia_css_rmgr_init_vbuf(hmm_buffer_pool); if (err) ia_css_rmgr_uninit(); return err; } /* * @brief Uninitialize resource pool (host) */ void ia_css_rmgr_uninit(void) { ia_css_rmgr_uninit_vbuf(hmm_buffer_pool); ia_css_rmgr_uninit_vbuf(vbuf_write); ia_css_rmgr_uninit_vbuf(vbuf_ref); }
linux-master
drivers/staging/media/atomisp/pci/runtime/rmgr/src/rmgr.c
// SPDX-License-Identifier: GPL-2.0 /* * Support for Intel Camera Imaging ISP subsystem. * Copyright (c) 2010-2015, Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. */ #include "hmm.h" #include "ia_css_rmgr.h" #include <type_support.h> #include <assert_support.h> #include <platform_support.h> /* memset */ #include <ia_css_debug.h> /* * @brief VBUF resource handles */ #define NUM_HANDLES 1000 static struct ia_css_rmgr_vbuf_handle handle_table[NUM_HANDLES]; /* * @brief VBUF resource pool - refpool */ static struct ia_css_rmgr_vbuf_pool refpool; /* * @brief VBUF resource pool - writepool */ static struct ia_css_rmgr_vbuf_pool writepool = { .copy_on_write = true, }; /* * @brief VBUF resource pool - hmmbufferpool */ static struct ia_css_rmgr_vbuf_pool hmmbufferpool = { .copy_on_write = true, .recycle = true, .size = 32, }; struct ia_css_rmgr_vbuf_pool *vbuf_ref = &refpool; struct ia_css_rmgr_vbuf_pool *vbuf_write = &writepool; struct ia_css_rmgr_vbuf_pool *hmm_buffer_pool = &hmmbufferpool; /* * @brief Initialize the reference count (host, vbuf) */ static void rmgr_refcount_init_vbuf(void) { /* initialize the refcount table */ memset(&handle_table, 0, sizeof(handle_table)); } /* * @brief Retain the reference count for a handle (host, vbuf) * * @param handle The pointer to the handle */ void ia_css_rmgr_refcount_retain_vbuf(struct ia_css_rmgr_vbuf_handle **handle) { int i; struct ia_css_rmgr_vbuf_handle *h; if ((!handle) || (!*handle)) { IA_CSS_LOG("Invalid inputs"); return; } /* new vbuf to count on */ if ((*handle)->count == 0) { h = *handle; *handle = NULL; for (i = 0; i < NUM_HANDLES; i++) { if (handle_table[i].count == 0) { *handle = &handle_table[i]; break; } } /* if the loop dus not break and *handle == NULL * this is an error handle and report it. */ if (!*handle) { ia_css_debug_dtrace(IA_CSS_DEBUG_ERROR, "ia_css_i_host_refcount_retain_vbuf() failed to find empty slot!\n"); return; } (*handle)->vptr = h->vptr; (*handle)->size = h->size; } (*handle)->count++; } /* * @brief Release the reference count for a handle (host, vbuf) * * @param handle The pointer to the handle */ void ia_css_rmgr_refcount_release_vbuf(struct ia_css_rmgr_vbuf_handle **handle) { if ((!handle) || ((*handle) == NULL) || (((*handle)->count) == 0)) { ia_css_debug_dtrace(IA_CSS_DEBUG_ERROR, "%s invalid arguments!\n", __func__); return; } /* decrease reference count */ (*handle)->count--; /* remove from admin */ if ((*handle)->count == 0) { (*handle)->vptr = 0x0; (*handle)->size = 0; *handle = NULL; } } /* * @brief Initialize the resource pool (host, vbuf) * * @param pool The pointer to the pool */ int ia_css_rmgr_init_vbuf(struct ia_css_rmgr_vbuf_pool *pool) { int err = 0; size_t bytes_needed; rmgr_refcount_init_vbuf(); assert(pool); if (!pool) return -EINVAL; /* initialize the recycle pool if used */ if (pool->recycle && pool->size) { /* allocate memory for storing the handles */ bytes_needed = sizeof(void *) * pool->size; pool->handles = kvmalloc(bytes_needed, GFP_KERNEL); if (pool->handles) memset(pool->handles, 0, bytes_needed); else err = -ENOMEM; } else { /* just in case, set the size to 0 */ pool->size = 0; pool->handles = NULL; } return err; } /* * @brief Uninitialize the resource pool (host, vbuf) * * @param pool The pointer to the pool */ void ia_css_rmgr_uninit_vbuf(struct ia_css_rmgr_vbuf_pool *pool) { u32 i; ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "%s\n", __func__); if (!pool) { ia_css_debug_dtrace(IA_CSS_DEBUG_ERROR, "%s NULL argument\n", __func__); return; } if (pool->handles) { /* free the hmm buffers */ for (i = 0; i < pool->size; i++) { if (pool->handles[i]) { ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, " freeing/releasing %x (count=%d)\n", pool->handles[i]->vptr, pool->handles[i]->count); /* free memory */ hmm_free(pool->handles[i]->vptr); /* remove from refcount admin */ ia_css_rmgr_refcount_release_vbuf(&pool->handles[i]); } } /* now free the pool handles list */ kvfree(pool->handles); pool->handles = NULL; } } /* * @brief Push a handle to the pool * * @param pool The pointer to the pool * @param handle The pointer to the handle */ static void rmgr_push_handle(struct ia_css_rmgr_vbuf_pool *pool, struct ia_css_rmgr_vbuf_handle **handle) { u32 i; bool succes = false; assert(pool); assert(pool->recycle); assert(pool->handles); assert(handle); for (i = 0; i < pool->size; i++) { if (!pool->handles[i]) { ia_css_rmgr_refcount_retain_vbuf(handle); pool->handles[i] = *handle; succes = true; break; } } assert(succes); } /* * @brief Pop a handle from the pool * * @param pool The pointer to the pool * @param handle The pointer to the handle */ static void rmgr_pop_handle(struct ia_css_rmgr_vbuf_pool *pool, struct ia_css_rmgr_vbuf_handle **handle) { u32 i; assert(pool); assert(pool->recycle); assert(pool->handles); assert(handle); assert(*handle); for (i = 0; i < pool->size; i++) { if ((pool->handles[i]) && (pool->handles[i]->size == (*handle)->size)) { *handle = pool->handles[i]; pool->handles[i] = NULL; /* dont release, we are returning it... * ia_css_rmgr_refcount_release_vbuf(handle); */ return; } } } /* * @brief Acquire a handle from the pool (host, vbuf) * * @param pool The pointer to the pool * @param handle The pointer to the handle */ void ia_css_rmgr_acq_vbuf(struct ia_css_rmgr_vbuf_pool *pool, struct ia_css_rmgr_vbuf_handle **handle) { if ((!pool) || (!handle) || (!*handle)) { IA_CSS_LOG("Invalid inputs"); return; } if (pool->copy_on_write) { struct ia_css_rmgr_vbuf_handle *new_handle; struct ia_css_rmgr_vbuf_handle h = { 0 }; /* only one reference, reuse (no new retain) */ if ((*handle)->count == 1) return; /* more than one reference, release current buffer */ if ((*handle)->count > 1) { /* store current values */ h.vptr = 0x0; h.size = (*handle)->size; /* release ref to current buffer */ ia_css_rmgr_refcount_release_vbuf(handle); new_handle = &h; } else { new_handle = *handle; } /* get new buffer for needed size */ if (new_handle->vptr == 0x0) { if (pool->recycle) { /* try and pop from pool */ rmgr_pop_handle(pool, &new_handle); } if (new_handle->vptr == 0x0) { /* we need to allocate */ new_handle->vptr = hmm_alloc(new_handle->size); } else { /* we popped a buffer */ *handle = new_handle; return; } } /* Note that new_handle will change to an internally maintained one */ ia_css_rmgr_refcount_retain_vbuf(&new_handle); *handle = new_handle; return; } /* Note that handle will change to an internally maintained one */ ia_css_rmgr_refcount_retain_vbuf(handle); } /* * @brief Release a handle to the pool (host, vbuf) * * @param pool The pointer to the pool * @param handle The pointer to the handle */ void ia_css_rmgr_rel_vbuf(struct ia_css_rmgr_vbuf_pool *pool, struct ia_css_rmgr_vbuf_handle **handle) { if ((!pool) || (!handle) || (!*handle)) { IA_CSS_LOG("Invalid inputs"); return; } /* release the handle */ if ((*handle)->count == 1) { if (!pool->recycle) { /* non recycling pool, free mem */ hmm_free((*handle)->vptr); } else { /* recycle to pool */ rmgr_push_handle(pool, handle); } } ia_css_rmgr_refcount_release_vbuf(handle); *handle = NULL; }
linux-master
drivers/staging/media/atomisp/pci/runtime/rmgr/src/rmgr_vbuf.c
// SPDX-License-Identifier: GPL-2.0 /* * Support for Intel Camera Imaging ISP subsystem. * Copyright (c) 2015, Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. */ #include <linux/math.h> #include <math_support.h> #include <gdc_device.h> /* HR_GDC_N */ #include "hmm.h" #include "isp.h" /* ISP_VEC_NELEMS */ #include "ia_css_binary.h" #include "ia_css_debug.h" #include "ia_css_util.h" #include "ia_css_isp_param.h" #include "sh_css_internal.h" #include "sh_css_sp.h" #include "sh_css_firmware.h" #include "sh_css_defs.h" #include "sh_css_legacy.h" #include "atomisp_internal.h" #include "vf/vf_1.0/ia_css_vf.host.h" #include "sc/sc_1.0/ia_css_sc.host.h" #include "sdis/sdis_1.0/ia_css_sdis.host.h" #include "fixedbds/fixedbds_1.0/ia_css_fixedbds_param.h" /* FRAC_ACC */ #include "camera/pipe/interface/ia_css_pipe_binarydesc.h" #include "assert_support.h" #define IMPLIES(a, b) (!(a) || (b)) /* A => B */ static struct ia_css_binary_xinfo *all_binaries; /* ISP binaries only (no SP) */ static struct ia_css_binary_xinfo *binary_infos[IA_CSS_BINARY_NUM_MODES] = { NULL, }; static void ia_css_binary_dvs_env(const struct ia_css_binary_info *info, const struct ia_css_resolution *dvs_env, struct ia_css_resolution *binary_dvs_env) { if (info->enable.dvs_envelope) { assert(dvs_env); binary_dvs_env->width = max(dvs_env->width, SH_CSS_MIN_DVS_ENVELOPE); binary_dvs_env->height = max(dvs_env->height, SH_CSS_MIN_DVS_ENVELOPE); } } static void ia_css_binary_internal_res(const struct ia_css_frame_info *in_info, const struct ia_css_frame_info *bds_out_info, const struct ia_css_frame_info *out_info, const struct ia_css_resolution *dvs_env, const struct ia_css_binary_info *info, struct ia_css_resolution *internal_res) { unsigned int isp_tmp_internal_width = 0, isp_tmp_internal_height = 0; bool binary_supports_yuv_ds = info->enable.ds & 2; struct ia_css_resolution binary_dvs_env; binary_dvs_env.width = 0; binary_dvs_env.height = 0; ia_css_binary_dvs_env(info, dvs_env, &binary_dvs_env); if (binary_supports_yuv_ds) { if (in_info) { isp_tmp_internal_width = in_info->res.width + info->pipeline.left_cropping + binary_dvs_env.width; isp_tmp_internal_height = in_info->res.height + info->pipeline.top_cropping + binary_dvs_env.height; } } else if ((bds_out_info) && (out_info) && /* TODO: hack to make video_us case work. this should be reverted after a nice solution in ISP */ (bds_out_info->res.width >= out_info->res.width)) { isp_tmp_internal_width = bds_out_info->padded_width; isp_tmp_internal_height = bds_out_info->res.height; } else { if (out_info) { isp_tmp_internal_width = out_info->padded_width; isp_tmp_internal_height = out_info->res.height; } } /* We first calculate the resolutions used by the ISP. After that, * we use those resolutions to compute sizes for tables etc. */ internal_res->width = __ISP_INTERNAL_WIDTH(isp_tmp_internal_width, (int)binary_dvs_env.width, info->pipeline.left_cropping, info->pipeline.mode, info->pipeline.c_subsampling, info->output.num_chunks, info->pipeline.pipelining); internal_res->height = __ISP_INTERNAL_HEIGHT(isp_tmp_internal_height, info->pipeline.top_cropping, binary_dvs_env.height); } /* Computation results of the origin coordinate of bayer on the shading table. */ struct sh_css_shading_table_bayer_origin_compute_results { u32 bayer_scale_hor_ratio_in; /* Horizontal ratio (in) of bayer scaling. */ u32 bayer_scale_hor_ratio_out; /* Horizontal ratio (out) of bayer scaling. */ u32 bayer_scale_ver_ratio_in; /* Vertical ratio (in) of bayer scaling. */ u32 bayer_scale_ver_ratio_out; /* Vertical ratio (out) of bayer scaling. */ u32 sc_bayer_origin_x_bqs_on_shading_table; /* X coordinate (in bqs) of bayer origin on shading table. */ u32 sc_bayer_origin_y_bqs_on_shading_table; /* Y coordinate (in bqs) of bayer origin on shading table. */ }; /* Get the requirements for the shading correction. */ static int ia_css_binary_compute_shading_table_bayer_origin( const struct ia_css_binary *binary, /* [in] */ unsigned int required_bds_factor, /* [in] */ const struct ia_css_stream_config *stream_config, /* [in] */ struct sh_css_shading_table_bayer_origin_compute_results *res) /* [out] */ { int err; /* Rational fraction of the fixed bayer downscaling factor. */ struct u32_fract bds; /* Left padding set by InputFormatter. */ unsigned int left_padding_bqs; /* in bqs */ /* Flag for the NEED_BDS_FACTOR_2_00 macro defined in isp kernels. */ unsigned int need_bds_factor_2_00; /* Left padding adjusted inside the isp. */ unsigned int left_padding_adjusted_bqs; /* in bqs */ /* Bad pixels caused by filters. NxN-filter (before/after bayer scaling) moves the image position to right/bottom directions by a few pixels. It causes bad pixels at left/top sides, and effective bayer size decreases. */ unsigned int bad_bqs_on_left_before_bs; /* in bqs */ unsigned int bad_bqs_on_left_after_bs; /* in bqs */ unsigned int bad_bqs_on_top_before_bs; /* in bqs */ unsigned int bad_bqs_on_top_after_bs; /* in bqs */ /* Get the rational fraction of bayer downscaling factor. */ err = sh_css_bds_factor_get_fract(required_bds_factor, &bds); if (err) return err; /* Set the left padding set by InputFormatter. (ifmtr.c) */ if (stream_config->left_padding == -1) left_padding_bqs = _ISP_BQS(binary->left_padding); else left_padding_bqs = (unsigned int)((int)ISP_VEC_NELEMS - _ISP_BQS(stream_config->left_padding)); /* Set the left padding adjusted inside the isp. When bds_factor 2.00 is needed, some padding is added to left_padding inside the isp, before bayer downscaling. (raw.isp.c) (Hopefully, left_crop/left_padding/top_crop should be defined in css appropriately, depending on bds_factor.) */ need_bds_factor_2_00 = ((binary->info->sp.bds.supported_bds_factors & (PACK_BDS_FACTOR(SH_CSS_BDS_FACTOR_2_00) | PACK_BDS_FACTOR(SH_CSS_BDS_FACTOR_2_50) | PACK_BDS_FACTOR(SH_CSS_BDS_FACTOR_3_00) | PACK_BDS_FACTOR(SH_CSS_BDS_FACTOR_4_00) | PACK_BDS_FACTOR(SH_CSS_BDS_FACTOR_4_50) | PACK_BDS_FACTOR(SH_CSS_BDS_FACTOR_5_00) | PACK_BDS_FACTOR(SH_CSS_BDS_FACTOR_6_00) | PACK_BDS_FACTOR(SH_CSS_BDS_FACTOR_8_00))) != 0); if (need_bds_factor_2_00 && binary->info->sp.pipeline.left_cropping > 0) left_padding_adjusted_bqs = left_padding_bqs + ISP_VEC_NELEMS; else left_padding_adjusted_bqs = left_padding_bqs; /* Currently, the bad pixel caused by filters before bayer scaling is NOT considered, because the bad pixel is subtle. When some large filter is used in the future, we need to consider the bad pixel. Currently, when bds_factor isn't 1.00, 3x3 anti-alias filter is applied to each color plane(Gr/R/B/Gb) before bayer downscaling. This filter moves each color plane to right/bottom directions by 1 pixel at the most, depending on downscaling factor. */ bad_bqs_on_left_before_bs = 0; bad_bqs_on_top_before_bs = 0; /* Currently, the bad pixel caused by filters after bayer scaling is NOT considered, because the bad pixel is subtle. When some large filter is used in the future, we need to consider the bad pixel. Currently, when DPC&BNR is processed between bayer scaling and shading correction, DPC&BNR moves each color plane to right/bottom directions by 1 pixel. */ bad_bqs_on_left_after_bs = 0; bad_bqs_on_top_after_bs = 0; /* Calculate the origin of bayer (real sensor data area) located on the shading table during the shading correction. */ res->sc_bayer_origin_x_bqs_on_shading_table = ((left_padding_adjusted_bqs + bad_bqs_on_left_before_bs) * bds.denominator + bds.numerator / 2) / bds.numerator + bad_bqs_on_left_after_bs; /* "+ bds.numerator / 2": rounding for division by bds.numerator */ res->sc_bayer_origin_y_bqs_on_shading_table = (bad_bqs_on_top_before_bs * bds.denominator + bds.numerator / 2) / bds.numerator + bad_bqs_on_top_after_bs; /* "+ bds.numerator / 2": rounding for division by bds.numerator */ res->bayer_scale_hor_ratio_in = bds.numerator; res->bayer_scale_hor_ratio_out = bds.denominator; res->bayer_scale_ver_ratio_in = bds.numerator; res->bayer_scale_ver_ratio_out = bds.denominator; return err; } /* Get the shading information of Shading Correction Type 1. */ static int binary_get_shading_info_type_1(const struct ia_css_binary *binary, /* [in] */ unsigned int required_bds_factor, /* [in] */ const struct ia_css_stream_config *stream_config, /* [in] */ struct ia_css_shading_info *info) /* [out] */ { int err; struct sh_css_shading_table_bayer_origin_compute_results res; assert(binary); assert(info); info->type = IA_CSS_SHADING_CORRECTION_TYPE_1; info->info.type_1.enable = binary->info->sp.enable.sc; info->info.type_1.num_hor_grids = binary->sctbl_width_per_color; info->info.type_1.num_ver_grids = binary->sctbl_height; info->info.type_1.bqs_per_grid_cell = (1 << binary->deci_factor_log2); /* Initialize by default values. */ info->info.type_1.bayer_scale_hor_ratio_in = 1; info->info.type_1.bayer_scale_hor_ratio_out = 1; info->info.type_1.bayer_scale_ver_ratio_in = 1; info->info.type_1.bayer_scale_ver_ratio_out = 1; info->info.type_1.sc_bayer_origin_x_bqs_on_shading_table = 0; info->info.type_1.sc_bayer_origin_y_bqs_on_shading_table = 0; err = ia_css_binary_compute_shading_table_bayer_origin( binary, required_bds_factor, stream_config, &res); if (err) return err; info->info.type_1.bayer_scale_hor_ratio_in = res.bayer_scale_hor_ratio_in; info->info.type_1.bayer_scale_hor_ratio_out = res.bayer_scale_hor_ratio_out; info->info.type_1.bayer_scale_ver_ratio_in = res.bayer_scale_ver_ratio_in; info->info.type_1.bayer_scale_ver_ratio_out = res.bayer_scale_ver_ratio_out; info->info.type_1.sc_bayer_origin_x_bqs_on_shading_table = res.sc_bayer_origin_x_bqs_on_shading_table; info->info.type_1.sc_bayer_origin_y_bqs_on_shading_table = res.sc_bayer_origin_y_bqs_on_shading_table; return err; } int ia_css_binary_get_shading_info(const struct ia_css_binary *binary, /* [in] */ enum ia_css_shading_correction_type type, /* [in] */ unsigned int required_bds_factor, /* [in] */ const struct ia_css_stream_config *stream_config, /* [in] */ struct ia_css_shading_info *shading_info, /* [out] */ struct ia_css_pipe_config *pipe_config) /* [out] */ { int err; assert(binary); assert(shading_info); IA_CSS_ENTER_PRIVATE("binary=%p, type=%d, required_bds_factor=%d, stream_config=%p", binary, type, required_bds_factor, stream_config); if (type == IA_CSS_SHADING_CORRECTION_TYPE_1) err = binary_get_shading_info_type_1(binary, required_bds_factor, stream_config, shading_info); else err = -ENOTSUPP; IA_CSS_LEAVE_ERR_PRIVATE(err); return err; } static void sh_css_binary_common_grid_info(const struct ia_css_binary *binary, struct ia_css_grid_info *info) { assert(binary); assert(info); info->isp_in_width = binary->internal_frame_info.res.width; info->isp_in_height = binary->internal_frame_info.res.height; info->vamem_type = IA_CSS_VAMEM_TYPE_2; } void ia_css_binary_dvs_grid_info(const struct ia_css_binary *binary, struct ia_css_grid_info *info, struct ia_css_pipe *pipe) { struct ia_css_dvs_grid_info *dvs_info; (void)pipe; assert(binary); assert(info); dvs_info = &info->dvs_grid.dvs_grid_info; /* for DIS, we use a division instead of a ceil_div. If this is smaller * than the 3a grid size, it indicates that the outer values are not * valid for DIS. */ dvs_info->enable = binary->info->sp.enable.dis; dvs_info->width = binary->dis.grid.dim.width; dvs_info->height = binary->dis.grid.dim.height; dvs_info->aligned_width = binary->dis.grid.pad.width; dvs_info->aligned_height = binary->dis.grid.pad.height; dvs_info->bqs_per_grid_cell = 1 << binary->dis.deci_factor_log2; dvs_info->num_hor_coefs = binary->dis.coef.dim.width; dvs_info->num_ver_coefs = binary->dis.coef.dim.height; sh_css_binary_common_grid_info(binary, info); } void ia_css_binary_dvs_stat_grid_info( const struct ia_css_binary *binary, struct ia_css_grid_info *info, struct ia_css_pipe *pipe) { (void)pipe; sh_css_binary_common_grid_info(binary, info); return; } int ia_css_binary_3a_grid_info(const struct ia_css_binary *binary, struct ia_css_grid_info *info, struct ia_css_pipe *pipe) { struct ia_css_3a_grid_info *s3a_info; int err = 0; IA_CSS_ENTER_PRIVATE("binary=%p, info=%p, pipe=%p", binary, info, pipe); assert(binary); assert(info); s3a_info = &info->s3a_grid; /* 3A statistics grid */ s3a_info->enable = binary->info->sp.enable.s3a; s3a_info->width = binary->s3atbl_width; s3a_info->height = binary->s3atbl_height; s3a_info->aligned_width = binary->s3atbl_isp_width; s3a_info->aligned_height = binary->s3atbl_isp_height; s3a_info->bqs_per_grid_cell = (1 << binary->deci_factor_log2); s3a_info->deci_factor_log2 = binary->deci_factor_log2; s3a_info->elem_bit_depth = SH_CSS_BAYER_BITS; s3a_info->use_dmem = binary->info->sp.s3a.s3atbl_use_dmem; s3a_info->has_histogram = 0; IA_CSS_LEAVE_ERR_PRIVATE(err); return err; } static void binary_init_pc_histogram(struct sh_css_pc_histogram *histo) { assert(histo); histo->length = 0; histo->run = NULL; histo->stall = NULL; } static void binary_init_metrics(struct sh_css_binary_metrics *metrics, const struct ia_css_binary_info *info) { assert(metrics); assert(info); metrics->mode = info->pipeline.mode; metrics->id = info->id; metrics->next = NULL; binary_init_pc_histogram(&metrics->isp_histogram); binary_init_pc_histogram(&metrics->sp_histogram); } /* move to host part of output module */ static bool binary_supports_output_format(const struct ia_css_binary_xinfo *info, enum ia_css_frame_format format) { int i; assert(info); for (i = 0; i < info->num_output_formats; i++) { if (info->output_formats[i] == format) return true; } return false; } static bool binary_supports_vf_format(const struct ia_css_binary_xinfo *info, enum ia_css_frame_format format) { int i; assert(info); for (i = 0; i < info->num_vf_formats; i++) { if (info->vf_formats[i] == format) return true; } return false; } /* move to host part of bds module */ static bool supports_bds_factor(u32 supported_factors, uint32_t bds_factor) { return ((supported_factors & PACK_BDS_FACTOR(bds_factor)) != 0); } static int binary_init_info(struct ia_css_binary_xinfo *info, unsigned int i, bool *binary_found) { const unsigned char *blob = sh_css_blob_info[i].blob; unsigned int size = sh_css_blob_info[i].header.blob.size; if ((!info) || (!binary_found)) return -EINVAL; *info = sh_css_blob_info[i].header.info.isp; *binary_found = blob; info->blob_index = i; /* we don't have this binary, skip it */ if (!size) return 0; info->xmem_addr = sh_css_load_blob(blob, size); if (!info->xmem_addr) return -ENOMEM; return 0; } /* When binaries are put at the beginning, they will only * be selected if no other primary matches. */ int ia_css_binary_init_infos(void) { unsigned int i; unsigned int num_of_isp_binaries = sh_css_num_binaries - NUM_OF_SPS - NUM_OF_BLS; if (num_of_isp_binaries == 0) return 0; all_binaries = kvmalloc(num_of_isp_binaries * sizeof(*all_binaries), GFP_KERNEL); if (!all_binaries) return -ENOMEM; for (i = 0; i < num_of_isp_binaries; i++) { int ret; struct ia_css_binary_xinfo *binary = &all_binaries[i]; bool binary_found; ret = binary_init_info(binary, i, &binary_found); if (ret) return ret; if (!binary_found) continue; /* Prepend new binary information */ binary->next = binary_infos[binary->sp.pipeline.mode]; binary_infos[binary->sp.pipeline.mode] = binary; binary->blob = &sh_css_blob_info[i]; binary->mem_offsets = sh_css_blob_info[i].mem_offsets; } return 0; } int ia_css_binary_uninit(void) { unsigned int i; struct ia_css_binary_xinfo *b; for (i = 0; i < IA_CSS_BINARY_NUM_MODES; i++) { for (b = binary_infos[i]; b; b = b->next) { if (b->xmem_addr) hmm_free(b->xmem_addr); b->xmem_addr = mmgr_NULL; } binary_infos[i] = NULL; } kvfree(all_binaries); return 0; } /* @brief Compute decimation factor for 3A statistics and shading correction. * * @param[in] width Frame width in pixels. * @param[in] height Frame height in pixels. * @return Log2 of decimation factor (= grid cell size) in bayer quads. */ static int binary_grid_deci_factor_log2(int width, int height) { /* 3A/Shading decimation factor spcification (at August 2008) * ------------------------------------------------------------------ * [Image Width (BQ)] [Decimation Factor (BQ)] [Resulting grid cells] * 1280 ?c 32 40 ?c * 640 ?c 1279 16 40 ?c 80 * ?c 639 8 ?c 80 * ------------------------------------------------------------------ */ /* Maximum and minimum decimation factor by the specification */ #define MAX_SPEC_DECI_FACT_LOG2 5 #define MIN_SPEC_DECI_FACT_LOG2 3 /* the smallest frame width in bayer quads when decimation factor (log2) is 5 or 4, by the specification */ #define DECI_FACT_LOG2_5_SMALLEST_FRAME_WIDTH_BQ 1280 #define DECI_FACT_LOG2_4_SMALLEST_FRAME_WIDTH_BQ 640 int smallest_factor; /* the smallest factor (log2) where the number of cells does not exceed the limitation */ int spec_factor; /* the factor (log2) which satisfies the specification */ /* Currently supported maximum width and height are 5120(=80*64) and 3840(=60*64). */ assert(ISP_BQ_GRID_WIDTH(width, MAX_SPEC_DECI_FACT_LOG2) <= SH_CSS_MAX_BQ_GRID_WIDTH); assert(ISP_BQ_GRID_HEIGHT(height, MAX_SPEC_DECI_FACT_LOG2) <= SH_CSS_MAX_BQ_GRID_HEIGHT); /* Compute the smallest factor. */ smallest_factor = MAX_SPEC_DECI_FACT_LOG2; while (ISP_BQ_GRID_WIDTH(width, smallest_factor - 1) <= SH_CSS_MAX_BQ_GRID_WIDTH && ISP_BQ_GRID_HEIGHT(height, smallest_factor - 1) <= SH_CSS_MAX_BQ_GRID_HEIGHT && smallest_factor > MIN_SPEC_DECI_FACT_LOG2) smallest_factor--; /* Get the factor by the specification. */ if (_ISP_BQS(width) >= DECI_FACT_LOG2_5_SMALLEST_FRAME_WIDTH_BQ) spec_factor = 5; else if (_ISP_BQS(width) >= DECI_FACT_LOG2_4_SMALLEST_FRAME_WIDTH_BQ) spec_factor = 4; else spec_factor = 3; /* If smallest_factor is smaller than or equal to spec_factor, choose spec_factor to follow the specification. If smallest_factor is larger than spec_factor, choose smallest_factor. ex. width=2560, height=1920 smallest_factor=4, spec_factor=5 smallest_factor < spec_factor -> return spec_factor ex. width=300, height=3000 smallest_factor=5, spec_factor=3 smallest_factor > spec_factor -> return smallest_factor */ return max(smallest_factor, spec_factor); #undef MAX_SPEC_DECI_FACT_LOG2 #undef MIN_SPEC_DECI_FACT_LOG2 #undef DECI_FACT_LOG2_5_SMALLEST_FRAME_WIDTH_BQ #undef DECI_FACT_LOG2_4_SMALLEST_FRAME_WIDTH_BQ } static int binary_in_frame_padded_width(int in_frame_width, int isp_internal_width, int dvs_env_width, int stream_config_left_padding, int left_cropping, bool need_scaling) { int rval; int nr_of_left_paddings; /* number of paddings pixels on the left of an image line */ #if defined(ISP2401) /* the output image line of Input System 2401 does not have the left paddings */ nr_of_left_paddings = 0; #else /* in other cases, the left padding pixels are always 128 */ nr_of_left_paddings = 2 * ISP_VEC_NELEMS; #endif if (need_scaling) { /* In SDV use-case, we need to match left-padding of * primary and the video binary. */ if (stream_config_left_padding != -1) { /* Different than before, we do left&right padding. */ rval = CEIL_MUL(in_frame_width + nr_of_left_paddings, 2 * ISP_VEC_NELEMS); } else { /* Different than before, we do left&right padding. */ in_frame_width += dvs_env_width; rval = CEIL_MUL(in_frame_width + (left_cropping ? nr_of_left_paddings : 0), 2 * ISP_VEC_NELEMS); } } else { rval = isp_internal_width; } return rval; } int ia_css_binary_fill_info(const struct ia_css_binary_xinfo *xinfo, bool online, bool two_ppc, enum atomisp_input_format stream_format, const struct ia_css_frame_info *in_info, /* can be NULL */ const struct ia_css_frame_info *bds_out_info, /* can be NULL */ const struct ia_css_frame_info *out_info[], /* can be NULL */ const struct ia_css_frame_info *vf_info, /* can be NULL */ struct ia_css_binary *binary, struct ia_css_resolution *dvs_env, int stream_config_left_padding, bool accelerator) { const struct ia_css_binary_info *info = &xinfo->sp; unsigned int dvs_env_width = 0, dvs_env_height = 0, vf_log_ds = 0, s3a_log_deci = 0, bits_per_pixel = 0, /* Resolution at SC/3A/DIS kernel. */ sc_3a_dis_width = 0, /* Resolution at SC/3A/DIS kernel. */ sc_3a_dis_padded_width = 0, /* Resolution at SC/3A/DIS kernel. */ sc_3a_dis_height = 0, isp_internal_width = 0, isp_internal_height = 0, s3a_isp_width = 0; bool need_scaling = false; struct ia_css_resolution binary_dvs_env, internal_res; int err; unsigned int i; const struct ia_css_frame_info *bin_out_info = NULL; assert(info); assert(binary); binary->info = xinfo; if (!accelerator) { /* binary->css_params has been filled by accelerator itself. */ err = ia_css_isp_param_allocate_isp_parameters( &binary->mem_params, &binary->css_params, &info->mem_initializers); if (err) { return err; } } for (i = 0; i < IA_CSS_BINARY_MAX_OUTPUT_PORTS; i++) { if (out_info[i] && (out_info[i]->res.width != 0)) { bin_out_info = out_info[i]; break; } } if (in_info && bin_out_info) { need_scaling = (in_info->res.width != bin_out_info->res.width) || (in_info->res.height != bin_out_info->res.height); } /* binary_dvs_env has to be equal or larger than SH_CSS_MIN_DVS_ENVELOPE */ binary_dvs_env.width = 0; binary_dvs_env.height = 0; ia_css_binary_dvs_env(info, dvs_env, &binary_dvs_env); dvs_env_width = binary_dvs_env.width; dvs_env_height = binary_dvs_env.height; binary->dvs_envelope.width = dvs_env_width; binary->dvs_envelope.height = dvs_env_height; /* internal resolution calculation */ internal_res.width = 0; internal_res.height = 0; ia_css_binary_internal_res(in_info, bds_out_info, bin_out_info, dvs_env, info, &internal_res); isp_internal_width = internal_res.width; isp_internal_height = internal_res.height; /* internal frame info */ if (bin_out_info) /* { */ binary->internal_frame_info.format = bin_out_info->format; /* } */ binary->internal_frame_info.res.width = isp_internal_width; binary->internal_frame_info.padded_width = CEIL_MUL(isp_internal_width, 2 * ISP_VEC_NELEMS); binary->internal_frame_info.res.height = isp_internal_height; binary->internal_frame_info.raw_bit_depth = bits_per_pixel; if (in_info) { binary->effective_in_frame_res.width = in_info->res.width; binary->effective_in_frame_res.height = in_info->res.height; bits_per_pixel = in_info->raw_bit_depth; /* input info */ binary->in_frame_info.res.width = in_info->res.width + info->pipeline.left_cropping; binary->in_frame_info.res.height = in_info->res.height + info->pipeline.top_cropping; binary->in_frame_info.res.width += dvs_env_width; binary->in_frame_info.res.height += dvs_env_height; binary->in_frame_info.padded_width = binary_in_frame_padded_width(in_info->res.width, isp_internal_width, dvs_env_width, stream_config_left_padding, info->pipeline.left_cropping, need_scaling); binary->in_frame_info.format = in_info->format; binary->in_frame_info.raw_bayer_order = in_info->raw_bayer_order; binary->in_frame_info.crop_info = in_info->crop_info; } if (online) { bits_per_pixel = ia_css_util_input_format_bpp( stream_format, two_ppc); } binary->in_frame_info.raw_bit_depth = bits_per_pixel; for (i = 0; i < IA_CSS_BINARY_MAX_OUTPUT_PORTS; i++) { if (out_info[i]) { binary->out_frame_info[i].res.width = out_info[i]->res.width; binary->out_frame_info[i].res.height = out_info[i]->res.height; binary->out_frame_info[i].padded_width = out_info[i]->padded_width; if (info->pipeline.mode == IA_CSS_BINARY_MODE_COPY) { binary->out_frame_info[i].raw_bit_depth = bits_per_pixel; } else { /* Only relevant for RAW format. * At the moment, all outputs are raw, 16 bit per pixel, except for copy. * To do this cleanly, the binary should specify in its info * the bit depth per output channel. */ binary->out_frame_info[i].raw_bit_depth = 16; } binary->out_frame_info[i].format = out_info[i]->format; } } if (vf_info && (vf_info->res.width != 0)) { err = ia_css_vf_configure(binary, bin_out_info, (struct ia_css_frame_info *)vf_info, &vf_log_ds); if (err) { if (!accelerator) { ia_css_isp_param_destroy_isp_parameters( &binary->mem_params, &binary->css_params); } return err; } } binary->vf_downscale_log2 = vf_log_ds; binary->online = online; binary->input_format = stream_format; /* viewfinder output info */ if ((vf_info) && (vf_info->res.width != 0)) { unsigned int vf_out_vecs, vf_out_width, vf_out_height; binary->vf_frame_info.format = vf_info->format; if (!bin_out_info) return -EINVAL; vf_out_vecs = __ISP_VF_OUTPUT_WIDTH_VECS(bin_out_info->padded_width, vf_log_ds); vf_out_width = _ISP_VF_OUTPUT_WIDTH(vf_out_vecs); vf_out_height = _ISP_VF_OUTPUT_HEIGHT(bin_out_info->res.height, vf_log_ds); /* For preview mode, output pin is used instead of vf. */ if (info->pipeline.mode == IA_CSS_BINARY_MODE_PREVIEW) { binary->out_frame_info[0].res.width = (bin_out_info->res.width >> vf_log_ds); binary->out_frame_info[0].padded_width = vf_out_width; binary->out_frame_info[0].res.height = vf_out_height; binary->vf_frame_info.res.width = 0; binary->vf_frame_info.padded_width = 0; binary->vf_frame_info.res.height = 0; } else { /* we also store the raw downscaled width. This is * used for digital zoom in preview to zoom only on * the width that we actually want to keep, not on * the aligned width. */ binary->vf_frame_info.res.width = (bin_out_info->res.width >> vf_log_ds); binary->vf_frame_info.padded_width = vf_out_width; binary->vf_frame_info.res.height = vf_out_height; } } else { binary->vf_frame_info.res.width = 0; binary->vf_frame_info.padded_width = 0; binary->vf_frame_info.res.height = 0; } if (info->enable.ca_gdc) { binary->morph_tbl_width = _ISP_MORPH_TABLE_WIDTH(isp_internal_width); binary->morph_tbl_aligned_width = _ISP_MORPH_TABLE_ALIGNED_WIDTH(isp_internal_width); binary->morph_tbl_height = _ISP_MORPH_TABLE_HEIGHT(isp_internal_height); } else { binary->morph_tbl_width = 0; binary->morph_tbl_aligned_width = 0; binary->morph_tbl_height = 0; } sc_3a_dis_width = binary->in_frame_info.res.width; sc_3a_dis_padded_width = binary->in_frame_info.padded_width; sc_3a_dis_height = binary->in_frame_info.res.height; if (bds_out_info && in_info && bds_out_info->res.width != in_info->res.width) { /* TODO: Next, "internal_frame_info" should be derived from * bds_out. So this part will change once it is in place! */ sc_3a_dis_width = bds_out_info->res.width + info->pipeline.left_cropping; sc_3a_dis_padded_width = isp_internal_width; sc_3a_dis_height = isp_internal_height; } s3a_isp_width = _ISP_S3A_ELEMS_ISP_WIDTH(sc_3a_dis_padded_width, info->pipeline.left_cropping); if (info->s3a.fixed_s3a_deci_log) { s3a_log_deci = info->s3a.fixed_s3a_deci_log; } else { s3a_log_deci = binary_grid_deci_factor_log2(s3a_isp_width, sc_3a_dis_height); } binary->deci_factor_log2 = s3a_log_deci; if (info->enable.s3a) { binary->s3atbl_width = _ISP_S3ATBL_WIDTH(sc_3a_dis_width, s3a_log_deci); binary->s3atbl_height = _ISP_S3ATBL_HEIGHT(sc_3a_dis_height, s3a_log_deci); binary->s3atbl_isp_width = _ISP_S3ATBL_ISP_WIDTH(s3a_isp_width, s3a_log_deci); binary->s3atbl_isp_height = _ISP_S3ATBL_ISP_HEIGHT(sc_3a_dis_height, s3a_log_deci); } else { binary->s3atbl_width = 0; binary->s3atbl_height = 0; binary->s3atbl_isp_width = 0; binary->s3atbl_isp_height = 0; } if (info->enable.sc) { binary->sctbl_width_per_color = _ISP_SCTBL_WIDTH_PER_COLOR(sc_3a_dis_padded_width, s3a_log_deci); binary->sctbl_aligned_width_per_color = SH_CSS_MAX_SCTBL_ALIGNED_WIDTH_PER_COLOR; binary->sctbl_height = _ISP_SCTBL_HEIGHT(sc_3a_dis_height, s3a_log_deci); } else { binary->sctbl_width_per_color = 0; binary->sctbl_aligned_width_per_color = 0; binary->sctbl_height = 0; } ia_css_sdis_init_info(&binary->dis, sc_3a_dis_width, sc_3a_dis_padded_width, sc_3a_dis_height, info->pipeline.isp_pipe_version, info->enable.dis); if (info->pipeline.left_cropping) binary->left_padding = 2 * ISP_VEC_NELEMS - info->pipeline.left_cropping; else binary->left_padding = 0; return 0; } static int __ia_css_binary_find(struct ia_css_binary_descr *descr, struct ia_css_binary *binary) { int mode; bool online; bool two_ppc; enum atomisp_input_format stream_format; const struct ia_css_frame_info *req_in_info, *req_bds_out_info, *req_out_info[IA_CSS_BINARY_MAX_OUTPUT_PORTS], *req_bin_out_info = NULL, *req_vf_info; struct ia_css_binary_xinfo *xcandidate; bool need_ds, need_dz, need_dvs, need_xnr, need_dpc; bool striped; bool enable_yuv_ds; bool enable_high_speed; bool enable_dvs_6axis; bool enable_reduced_pipe; bool enable_capture_pp_bli; int err = -EINVAL; bool continuous; unsigned int isp_pipe_version; struct ia_css_resolution dvs_env, internal_res; unsigned int i; assert(descr); /* MW: used after an error check, may accept NULL, but doubtfull */ assert(binary); ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_binary_find() enter: descr=%p, (mode=%d), binary=%p\n", descr, descr->mode, binary); mode = descr->mode; online = descr->online; two_ppc = descr->two_ppc; stream_format = descr->stream_format; req_in_info = descr->in_info; req_bds_out_info = descr->bds_out_info; for (i = 0; i < IA_CSS_BINARY_MAX_OUTPUT_PORTS; i++) { req_out_info[i] = descr->out_info[i]; if (req_out_info[i] && (req_out_info[i]->res.width != 0)) req_bin_out_info = req_out_info[i]; } if (!req_bin_out_info) return -EINVAL; req_vf_info = descr->vf_info; need_xnr = descr->enable_xnr; need_ds = descr->enable_fractional_ds; need_dz = false; need_dvs = false; need_dpc = descr->enable_dpc; enable_yuv_ds = descr->enable_yuv_ds; enable_high_speed = descr->enable_high_speed; enable_dvs_6axis = descr->enable_dvs_6axis; enable_reduced_pipe = descr->enable_reduced_pipe; enable_capture_pp_bli = descr->enable_capture_pp_bli; continuous = descr->continuous; striped = descr->striped; isp_pipe_version = descr->isp_pipe_version; dvs_env.width = 0; dvs_env.height = 0; internal_res.width = 0; internal_res.height = 0; if (mode == IA_CSS_BINARY_MODE_VIDEO) { dvs_env = descr->dvs_env; need_dz = descr->enable_dz; /* Video is the only mode that has a nodz variant. */ need_dvs = dvs_env.width || dvs_env.height; } /* print a map of the binary file */ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "BINARY INFO:\n"); for (i = 0; i < IA_CSS_BINARY_NUM_MODES; i++) { xcandidate = binary_infos[i]; if (xcandidate) { ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "%d:\n", i); while (xcandidate) { ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, " Name:%s Type:%d Cont:%d\n", xcandidate->blob->name, xcandidate->type, xcandidate->sp.enable.continuous); xcandidate = xcandidate->next; } } } /* printf("sh_css_binary_find: pipe version %d\n", isp_pipe_version); */ for (xcandidate = binary_infos[mode]; xcandidate; xcandidate = xcandidate->next) { struct ia_css_binary_info *candidate = &xcandidate->sp; /* printf("sh_css_binary_find: evaluating candidate: * %d\n",candidate->id); */ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_binary_find() candidate = %p, mode = %d ID = %d\n", candidate, candidate->pipeline.mode, candidate->id); /* * MW: Only a limited set of jointly configured binaries can * be used in a continuous preview/video mode unless it is * the copy mode and runs on SP. */ if (!candidate->enable.continuous && continuous && (mode != IA_CSS_BINARY_MODE_COPY)) { ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_binary_find() [%d] continue: !%d && %d && (%d != %d)\n", __LINE__, candidate->enable.continuous, continuous, mode, IA_CSS_BINARY_MODE_COPY); continue; } if (striped && candidate->iterator.num_stripes == 1) { ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_binary_find() [%d] continue: binary is not striped\n", __LINE__); continue; } if (candidate->pipeline.isp_pipe_version != isp_pipe_version && (mode != IA_CSS_BINARY_MODE_COPY) && (mode != IA_CSS_BINARY_MODE_CAPTURE_PP) && (mode != IA_CSS_BINARY_MODE_VF_PP)) { ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_binary_find() [%d] continue: (%d != %d)\n", __LINE__, candidate->pipeline.isp_pipe_version, isp_pipe_version); continue; } if (!candidate->enable.reduced_pipe && enable_reduced_pipe) { ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_binary_find() [%d] continue: !%d && %d\n", __LINE__, candidate->enable.reduced_pipe, enable_reduced_pipe); continue; } if (!candidate->enable.dvs_6axis && enable_dvs_6axis) { ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_binary_find() [%d] continue: !%d && %d\n", __LINE__, candidate->enable.dvs_6axis, enable_dvs_6axis); continue; } if (candidate->enable.high_speed && !enable_high_speed) { ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_binary_find() [%d] continue: %d && !%d\n", __LINE__, candidate->enable.high_speed, enable_high_speed); continue; } if (!candidate->enable.xnr && need_xnr) { ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_binary_find() [%d] continue: %d && !%d\n", __LINE__, candidate->enable.xnr, need_xnr); continue; } if (!(candidate->enable.ds & 2) && enable_yuv_ds) { ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_binary_find() [%d] continue: !%d && %d\n", __LINE__, ((candidate->enable.ds & 2) != 0), enable_yuv_ds); continue; } if ((candidate->enable.ds & 2) && !enable_yuv_ds) { ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_binary_find() [%d] continue: %d && !%d\n", __LINE__, ((candidate->enable.ds & 2) != 0), enable_yuv_ds); continue; } if (mode == IA_CSS_BINARY_MODE_VIDEO && candidate->enable.ds && need_ds) need_dz = false; /* when we require vf output, we need to have vf_veceven */ if ((req_vf_info) && !(candidate->enable.vf_veceven || /* or variable vf vec even */ candidate->vf_dec.is_variable || /* or more than one output pin. */ xcandidate->num_output_pins > 1)) { ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_binary_find() [%d] continue: (%p != NULL) && !(%d || %d || (%d >%d))\n", __LINE__, req_vf_info, candidate->enable.vf_veceven, candidate->vf_dec.is_variable, xcandidate->num_output_pins, 1); continue; } if (!candidate->enable.dvs_envelope && need_dvs) { ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_binary_find() [%d] continue: !%d && %d\n", __LINE__, candidate->enable.dvs_envelope, (int)need_dvs); continue; } /* internal_res check considers input, output, and dvs envelope sizes */ ia_css_binary_internal_res(req_in_info, req_bds_out_info, req_bin_out_info, &dvs_env, candidate, &internal_res); if (internal_res.width > candidate->internal.max_width) { ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_binary_find() [%d] continue: (%d > %d)\n", __LINE__, internal_res.width, candidate->internal.max_width); continue; } if (internal_res.height > candidate->internal.max_height) { ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_binary_find() [%d] continue: (%d > %d)\n", __LINE__, internal_res.height, candidate->internal.max_height); continue; } if (!candidate->enable.ds && need_ds && !(xcandidate->num_output_pins > 1)) { ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_binary_find() [%d] continue: !%d && %d\n", __LINE__, candidate->enable.ds, (int)need_ds); continue; } if (!candidate->enable.uds && !candidate->enable.dvs_6axis && need_dz) { ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_binary_find() [%d] continue: !%d && !%d && %d\n", __LINE__, candidate->enable.uds, candidate->enable.dvs_6axis, (int)need_dz); continue; } if (online && candidate->input.source == IA_CSS_BINARY_INPUT_MEMORY) { ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_binary_find() [%d] continue: %d && (%d == %d)\n", __LINE__, online, candidate->input.source, IA_CSS_BINARY_INPUT_MEMORY); continue; } if (!online && candidate->input.source == IA_CSS_BINARY_INPUT_SENSOR) { ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_binary_find() [%d] continue: !%d && (%d == %d)\n", __LINE__, online, candidate->input.source, IA_CSS_BINARY_INPUT_SENSOR); continue; } if (req_bin_out_info->res.width < candidate->output.min_width || req_bin_out_info->res.width > candidate->output.max_width) { ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_binary_find() [%d] continue: (%d > %d) || (%d < %d)\n", __LINE__, req_bin_out_info->padded_width, candidate->output.min_width, req_bin_out_info->padded_width, candidate->output.max_width); continue; } if (xcandidate->num_output_pins > 1 && /* in case we have a second output pin, */ req_vf_info) { /* and we need vf output. */ if (req_vf_info->res.width > candidate->output.max_width) { ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_binary_find() [%d] continue: (%d < %d)\n", __LINE__, req_vf_info->res.width, candidate->output.max_width); continue; } } if (req_in_info->padded_width > candidate->input.max_width) { ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_binary_find() [%d] continue: (%d > %d)\n", __LINE__, req_in_info->padded_width, candidate->input.max_width); continue; } if (!binary_supports_output_format(xcandidate, req_bin_out_info->format)) { ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_binary_find() [%d] continue: !%d\n", __LINE__, binary_supports_output_format(xcandidate, req_bin_out_info->format)); continue; } if (xcandidate->num_output_pins > 1 && /* in case we have a second output pin, */ req_vf_info && /* and we need vf output. */ /* check if the required vf format is supported. */ !binary_supports_output_format(xcandidate, req_vf_info->format)) { ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_binary_find() [%d] continue: (%d > %d) && (%p != NULL) && !%d\n", __LINE__, xcandidate->num_output_pins, 1, req_vf_info, binary_supports_output_format(xcandidate, req_vf_info->format)); continue; } /* Check if vf_veceven supports the requested vf format */ if (xcandidate->num_output_pins == 1 && req_vf_info && candidate->enable.vf_veceven && !binary_supports_vf_format(xcandidate, req_vf_info->format)) { ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_binary_find() [%d] continue: (%d == %d) && (%p != NULL) && %d && !%d\n", __LINE__, xcandidate->num_output_pins, 1, req_vf_info, candidate->enable.vf_veceven, binary_supports_vf_format(xcandidate, req_vf_info->format)); continue; } /* Check if vf_veceven supports the requested vf width */ if (xcandidate->num_output_pins == 1 && req_vf_info && candidate->enable.vf_veceven) { /* and we need vf output. */ if (req_vf_info->res.width > candidate->output.max_width) { ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_binary_find() [%d] continue: (%d < %d)\n", __LINE__, req_vf_info->res.width, candidate->output.max_width); continue; } } if (!supports_bds_factor(candidate->bds.supported_bds_factors, descr->required_bds_factor)) { ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_binary_find() [%d] continue: 0x%x & 0x%x)\n", __LINE__, candidate->bds.supported_bds_factors, descr->required_bds_factor); continue; } if (!candidate->enable.dpc && need_dpc) { ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_binary_find() [%d] continue: 0x%x & 0x%x)\n", __LINE__, candidate->enable.dpc, descr->enable_dpc); continue; } if (candidate->uds.use_bci && enable_capture_pp_bli) { ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_binary_find() [%d] continue: 0x%x & 0x%x)\n", __LINE__, candidate->uds.use_bci, descr->enable_capture_pp_bli); continue; } /* reconfigure any variable properties of the binary */ err = ia_css_binary_fill_info(xcandidate, online, two_ppc, stream_format, req_in_info, req_bds_out_info, req_out_info, req_vf_info, binary, &dvs_env, descr->stream_config_left_padding, false); if (err) break; binary_init_metrics(&binary->metrics, &binary->info->sp); break; } ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_binary_find() selected = %p, mode = %d ID = %d\n", xcandidate, xcandidate ? xcandidate->sp.pipeline.mode : 0, xcandidate ? xcandidate->sp.id : 0); ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_binary_find() leave: return_err=%d\n", err); if (!err && xcandidate) dev_dbg(atomisp_dev, "Using binary %s (id %d), type %d, mode %d, continuous %s\n", xcandidate->blob->name, xcandidate->sp.id, xcandidate->type, xcandidate->sp.pipeline.mode, xcandidate->sp.enable.continuous ? "true" : "false"); return err; } int ia_css_binary_find(struct ia_css_binary_descr *descr, struct ia_css_binary *binary) { int ret = __ia_css_binary_find(descr, binary); if (unlikely(ret)) { dev_dbg(atomisp_dev, "Seeking for binary failed at:"); dump_stack(); } return ret; } unsigned ia_css_binary_max_vf_width(void) { /* This is (should be) true for IPU1 and IPU2 */ /* For IPU3 (SkyCam) this pointer is guaranteed to be NULL simply because such a binary does not exist */ if (binary_infos[IA_CSS_BINARY_MODE_VF_PP]) return binary_infos[IA_CSS_BINARY_MODE_VF_PP]->sp.output.max_width; return 0; } void ia_css_binary_destroy_isp_parameters(struct ia_css_binary *binary) { if (binary) { ia_css_isp_param_destroy_isp_parameters(&binary->mem_params, &binary->css_params); } } void ia_css_binary_get_isp_binaries(struct ia_css_binary_xinfo **binaries, uint32_t *num_isp_binaries) { assert(binaries); if (num_isp_binaries) *num_isp_binaries = 0; *binaries = all_binaries; if (all_binaries && num_isp_binaries) { /* -1 to account for sp binary which is not stored in all_binaries */ if (sh_css_num_binaries > 0) *num_isp_binaries = sh_css_num_binaries - 1; } }
linux-master
drivers/staging/media/atomisp/pci/runtime/binary/src/binary.c
// SPDX-License-Identifier: GPL-2.0 /* * Support for Intel Camera Imaging ISP subsystem. * Copyright (c) 2010 - 2015, Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. */ #include "system_global.h" #include <linux/kernel.h> #ifndef ISP2401 #include "ia_css_ifmtr.h" #include <math_support.h> #include "sh_css_internal.h" #include "input_formatter.h" #include "assert_support.h" #include "sh_css_sp.h" #include "isp/modes/interface/input_buf.isp.h" /************************************************************ * Static functions declarations ************************************************************/ static int ifmtr_start_column( const struct ia_css_stream_config *config, unsigned int bin_in, unsigned int *start_column); static int ifmtr_input_start_line( const struct ia_css_stream_config *config, unsigned int bin_in, unsigned int *start_line); static void ifmtr_set_if_blocking_mode( const input_formatter_cfg_t *const config_a, const input_formatter_cfg_t *const config_b); /************************************************************ * Public functions ************************************************************/ /* ISP expects GRBG bayer order, we skip one line and/or one row * to correct in case the input bayer order is different. */ unsigned int ia_css_ifmtr_lines_needed_for_bayer_order( const struct ia_css_stream_config *config) { assert(config); if ((config->input_config.bayer_order == IA_CSS_BAYER_ORDER_BGGR) || (config->input_config.bayer_order == IA_CSS_BAYER_ORDER_GBRG)) return 1; return 0; } unsigned int ia_css_ifmtr_columns_needed_for_bayer_order( const struct ia_css_stream_config *config) { assert(config); if ((config->input_config.bayer_order == IA_CSS_BAYER_ORDER_RGGB) || (config->input_config.bayer_order == IA_CSS_BAYER_ORDER_GBRG)) return 1; return 0; } int ia_css_ifmtr_configure(struct ia_css_stream_config *config, struct ia_css_binary *binary) { unsigned int start_line, start_column = 0, cropped_height, cropped_width, num_vectors, buffer_height = 2, buffer_width, two_ppc, vmem_increment = 0, deinterleaving = 0, deinterleaving_b = 0, width_a = 0, width_b = 0, bits_per_pixel, vectors_per_buffer, vectors_per_line = 0, buffers_per_line = 0, buf_offset_a = 0, buf_offset_b = 0, line_width = 0, width_b_factor = 1, start_column_b, left_padding = 0; input_formatter_cfg_t if_a_config, if_b_config; enum atomisp_input_format input_format; int err = 0; u8 if_config_index; /* Determine which input formatter config set is targeted. */ /* Index is equal to the CSI-2 port used. */ enum mipi_port_id port; if (binary) { cropped_height = binary->in_frame_info.res.height; cropped_width = binary->in_frame_info.res.width; /* This should correspond to the input buffer definition for ISP binaries in input_buf.isp.h */ if (binary->info->sp.enable.continuous && binary->info->sp.pipeline.mode != IA_CSS_BINARY_MODE_COPY) buffer_width = MAX_VECTORS_PER_INPUT_LINE_CONT * ISP_VEC_NELEMS; else buffer_width = binary->info->sp.input.max_width; input_format = binary->input_format; } else { /* sp raw copy pipe (IA_CSS_PIPE_MODE_COPY): binary is NULL */ cropped_height = config->input_config.input_res.height; cropped_width = config->input_config.input_res.width; buffer_width = MAX_VECTORS_PER_INPUT_LINE_CONT * ISP_VEC_NELEMS; input_format = config->input_config.format; } two_ppc = config->pixels_per_clock == 2; if (config->mode == IA_CSS_INPUT_MODE_SENSOR || config->mode == IA_CSS_INPUT_MODE_BUFFERED_SENSOR) { port = config->source.port.port; if_config_index = (uint8_t)(port - MIPI_PORT0_ID); } else if (config->mode == IA_CSS_INPUT_MODE_MEMORY) { if_config_index = SH_CSS_IF_CONFIG_NOT_NEEDED; } else { if_config_index = 0; } assert(if_config_index <= SH_CSS_MAX_IF_CONFIGS || if_config_index == SH_CSS_IF_CONFIG_NOT_NEEDED); /* TODO: check to see if input is RAW and if current mode interprets * RAW data in any particular bayer order. copy binary with output * format other than raw should not result in dropping lines and/or * columns. */ err = ifmtr_input_start_line(config, cropped_height, &start_line); if (err) return err; err = ifmtr_start_column(config, cropped_width, &start_column); if (err) return err; if (config->left_padding == -1) if (!binary) /* sp raw copy pipe: set left_padding value */ left_padding = 0; else left_padding = binary->left_padding; else left_padding = 2 * ISP_VEC_NELEMS - config->left_padding; if (left_padding) { num_vectors = CEIL_DIV(cropped_width + left_padding, ISP_VEC_NELEMS); } else { num_vectors = CEIL_DIV(cropped_width, ISP_VEC_NELEMS); num_vectors *= buffer_height; /* todo: in case of left padding, num_vectors is vectors per line, otherwise vectors per line * buffer_height. */ } start_column_b = start_column; bits_per_pixel = input_formatter_get_alignment(INPUT_FORMATTER0_ID) * 8 / ISP_VEC_NELEMS; switch (input_format) { case ATOMISP_INPUT_FORMAT_YUV420_8_LEGACY: if (two_ppc) { vmem_increment = 1; deinterleaving = 1; deinterleaving_b = 1; /* half lines */ width_a = cropped_width * deinterleaving / 2; width_b_factor = 2; /* full lines */ width_b = width_a * width_b_factor; buffer_width *= deinterleaving * 2; /* Patch from bayer to yuv */ num_vectors *= deinterleaving; buf_offset_b = buffer_width / 2 / ISP_VEC_NELEMS; vectors_per_line = num_vectors / buffer_height; /* Even lines are half size */ line_width = vectors_per_line * input_formatter_get_alignment(INPUT_FORMATTER0_ID) / 2; start_column /= 2; } else { vmem_increment = 1; deinterleaving = 3; width_a = cropped_width * deinterleaving / 2; buffer_width = buffer_width * deinterleaving / 2; /* Patch from bayer to yuv */ num_vectors = num_vectors / 2 * deinterleaving; start_column = start_column * deinterleaving / 2; } break; case ATOMISP_INPUT_FORMAT_YUV420_8: case ATOMISP_INPUT_FORMAT_YUV420_10: case ATOMISP_INPUT_FORMAT_YUV420_16: if (two_ppc) { vmem_increment = 1; deinterleaving = 1; width_a = width_b = cropped_width * deinterleaving / 2; buffer_width *= deinterleaving * 2; num_vectors *= deinterleaving; buf_offset_b = buffer_width / 2 / ISP_VEC_NELEMS; vectors_per_line = num_vectors / buffer_height; /* Even lines are half size */ line_width = vectors_per_line * input_formatter_get_alignment(INPUT_FORMATTER0_ID) / 2; start_column *= deinterleaving; start_column /= 2; start_column_b = start_column; } else { vmem_increment = 1; deinterleaving = 1; width_a = cropped_width * deinterleaving; buffer_width *= deinterleaving * 2; num_vectors *= deinterleaving; start_column *= deinterleaving; } break; case ATOMISP_INPUT_FORMAT_YUV422_8: case ATOMISP_INPUT_FORMAT_YUV422_10: case ATOMISP_INPUT_FORMAT_YUV422_16: if (two_ppc) { vmem_increment = 1; deinterleaving = 1; width_a = width_b = cropped_width * deinterleaving; buffer_width *= deinterleaving * 2; num_vectors *= deinterleaving; start_column *= deinterleaving; buf_offset_b = buffer_width / 2 / ISP_VEC_NELEMS; start_column_b = start_column; } else { vmem_increment = 1; deinterleaving = 2; width_a = cropped_width * deinterleaving; buffer_width *= deinterleaving; num_vectors *= deinterleaving; start_column *= deinterleaving; } break; case ATOMISP_INPUT_FORMAT_RGB_444: case ATOMISP_INPUT_FORMAT_RGB_555: case ATOMISP_INPUT_FORMAT_RGB_565: case ATOMISP_INPUT_FORMAT_RGB_666: case ATOMISP_INPUT_FORMAT_RGB_888: num_vectors *= 2; if (two_ppc) { deinterleaving = 2; /* BR in if_a, G in if_b */ deinterleaving_b = 1; /* BR in if_a, G in if_b */ buffers_per_line = 4; start_column_b = start_column; start_column *= deinterleaving; start_column_b *= deinterleaving_b; } else { deinterleaving = 3; /* BGR */ buffers_per_line = 3; start_column *= deinterleaving; } vmem_increment = 1; width_a = cropped_width * deinterleaving; width_b = cropped_width * deinterleaving_b; buffer_width *= buffers_per_line; /* Patch from bayer to rgb */ num_vectors = num_vectors / 2 * deinterleaving; buf_offset_b = buffer_width / 2 / ISP_VEC_NELEMS; break; case ATOMISP_INPUT_FORMAT_RAW_6: case ATOMISP_INPUT_FORMAT_RAW_7: case ATOMISP_INPUT_FORMAT_RAW_8: case ATOMISP_INPUT_FORMAT_RAW_10: case ATOMISP_INPUT_FORMAT_RAW_12: if (two_ppc) { int crop_col = (start_column % 2) == 1; vmem_increment = 2; deinterleaving = 1; width_a = width_b = cropped_width / 2; /* When two_ppc is enabled AND we need to crop one extra * column, if_a crops by one extra and we swap the * output offsets to interleave the bayer pattern in * the correct order. */ buf_offset_a = crop_col ? 1 : 0; buf_offset_b = crop_col ? 0 : 1; start_column_b = start_column / 2; start_column = start_column / 2 + crop_col; } else { vmem_increment = 1; deinterleaving = 2; if ((!binary) || (config->continuous && binary && binary->info->sp.pipeline.mode == IA_CSS_BINARY_MODE_COPY)) { /* !binary -> sp raw copy pipe, no deinterleaving */ deinterleaving = 1; } width_a = cropped_width; /* Must be multiple of deinterleaving */ num_vectors = CEIL_MUL(num_vectors, deinterleaving); } buffer_height *= 2; if ((!binary) || config->continuous) /* !binary -> sp raw copy pipe */ buffer_height *= 2; vectors_per_line = CEIL_DIV(cropped_width, ISP_VEC_NELEMS); vectors_per_line = CEIL_MUL(vectors_per_line, deinterleaving); break; case ATOMISP_INPUT_FORMAT_RAW_14: case ATOMISP_INPUT_FORMAT_RAW_16: if (two_ppc) { num_vectors *= 2; vmem_increment = 1; deinterleaving = 2; width_a = width_b = cropped_width; /* B buffer is one line further */ buf_offset_b = buffer_width / ISP_VEC_NELEMS; bits_per_pixel *= 2; } else { vmem_increment = 1; deinterleaving = 2; width_a = cropped_width; start_column /= deinterleaving; } buffer_height *= 2; break; case ATOMISP_INPUT_FORMAT_BINARY_8: case ATOMISP_INPUT_FORMAT_GENERIC_SHORT1: case ATOMISP_INPUT_FORMAT_GENERIC_SHORT2: case ATOMISP_INPUT_FORMAT_GENERIC_SHORT3: case ATOMISP_INPUT_FORMAT_GENERIC_SHORT4: case ATOMISP_INPUT_FORMAT_GENERIC_SHORT5: case ATOMISP_INPUT_FORMAT_GENERIC_SHORT6: case ATOMISP_INPUT_FORMAT_GENERIC_SHORT7: case ATOMISP_INPUT_FORMAT_GENERIC_SHORT8: case ATOMISP_INPUT_FORMAT_YUV420_8_SHIFT: case ATOMISP_INPUT_FORMAT_YUV420_10_SHIFT: case ATOMISP_INPUT_FORMAT_EMBEDDED: case ATOMISP_INPUT_FORMAT_USER_DEF1: case ATOMISP_INPUT_FORMAT_USER_DEF2: case ATOMISP_INPUT_FORMAT_USER_DEF3: case ATOMISP_INPUT_FORMAT_USER_DEF4: case ATOMISP_INPUT_FORMAT_USER_DEF5: case ATOMISP_INPUT_FORMAT_USER_DEF6: case ATOMISP_INPUT_FORMAT_USER_DEF7: case ATOMISP_INPUT_FORMAT_USER_DEF8: break; } if (width_a == 0) return -EINVAL; if (two_ppc) left_padding /= 2; /* Default values */ if (left_padding) vectors_per_line = num_vectors; if (!vectors_per_line) { vectors_per_line = CEIL_MUL(num_vectors / buffer_height, deinterleaving); line_width = 0; } if (!line_width) line_width = vectors_per_line * input_formatter_get_alignment(INPUT_FORMATTER0_ID); if (!buffers_per_line) buffers_per_line = deinterleaving; line_width = CEIL_MUL(line_width, input_formatter_get_alignment(INPUT_FORMATTER0_ID) * vmem_increment); vectors_per_buffer = buffer_height * buffer_width / ISP_VEC_NELEMS; if (config->mode == IA_CSS_INPUT_MODE_TPG && ((binary && binary->info->sp.pipeline.mode == IA_CSS_BINARY_MODE_VIDEO) || (!binary))) { /* !binary -> sp raw copy pipe */ /* workaround for TPG in video mode */ start_line = 0; start_column = 0; cropped_height -= start_line; width_a -= start_column; } if_a_config.start_line = start_line; if_a_config.start_column = start_column; if_a_config.left_padding = left_padding / deinterleaving; if_a_config.cropped_height = cropped_height; if_a_config.cropped_width = width_a; if_a_config.deinterleaving = deinterleaving; if_a_config.buf_vecs = vectors_per_buffer; if_a_config.buf_start_index = buf_offset_a; if_a_config.buf_increment = vmem_increment; if_a_config.buf_eol_offset = buffer_width * bits_per_pixel / 8 - line_width; if_a_config.is_yuv420_format = (input_format == ATOMISP_INPUT_FORMAT_YUV420_8) || (input_format == ATOMISP_INPUT_FORMAT_YUV420_10) || (input_format == ATOMISP_INPUT_FORMAT_YUV420_16); if_a_config.block_no_reqs = (config->mode != IA_CSS_INPUT_MODE_SENSOR); if (two_ppc) { if (deinterleaving_b) { deinterleaving = deinterleaving_b; width_b = cropped_width * deinterleaving; buffer_width *= deinterleaving; /* Patch from bayer to rgb */ num_vectors = num_vectors / 2 * deinterleaving * width_b_factor; vectors_per_line = num_vectors / buffer_height; line_width = vectors_per_line * input_formatter_get_alignment(INPUT_FORMATTER0_ID); } if_b_config.start_line = start_line; if_b_config.start_column = start_column_b; if_b_config.left_padding = left_padding / deinterleaving; if_b_config.cropped_height = cropped_height; if_b_config.cropped_width = width_b; if_b_config.deinterleaving = deinterleaving; if_b_config.buf_vecs = vectors_per_buffer; if_b_config.buf_start_index = buf_offset_b; if_b_config.buf_increment = vmem_increment; if_b_config.buf_eol_offset = buffer_width * bits_per_pixel / 8 - line_width; if_b_config.is_yuv420_format = input_format == ATOMISP_INPUT_FORMAT_YUV420_8 || input_format == ATOMISP_INPUT_FORMAT_YUV420_10 || input_format == ATOMISP_INPUT_FORMAT_YUV420_16; if_b_config.block_no_reqs = (config->mode != IA_CSS_INPUT_MODE_SENSOR); if (if_config_index != SH_CSS_IF_CONFIG_NOT_NEEDED) { assert(if_config_index <= SH_CSS_MAX_IF_CONFIGS); ifmtr_set_if_blocking_mode(&if_a_config, &if_b_config); /* Set the ifconfigs to SP group */ sh_css_sp_set_if_configs(&if_a_config, &if_b_config, if_config_index); } } else { if (if_config_index != SH_CSS_IF_CONFIG_NOT_NEEDED) { assert(if_config_index <= SH_CSS_MAX_IF_CONFIGS); ifmtr_set_if_blocking_mode(&if_a_config, NULL); /* Set the ifconfigs to SP group */ sh_css_sp_set_if_configs(&if_a_config, NULL, if_config_index); } } return 0; } bool ifmtr_set_if_blocking_mode_reset = true; /************************************************************ * Static functions ************************************************************/ static void ifmtr_set_if_blocking_mode( const input_formatter_cfg_t *const config_a, const input_formatter_cfg_t *const config_b) { int i; bool block[] = { false, false, false, false }; assert(N_INPUT_FORMATTER_ID <= (ARRAY_SIZE(block))); block[INPUT_FORMATTER0_ID] = (bool)config_a->block_no_reqs; if (config_b) block[INPUT_FORMATTER1_ID] = (bool)config_b->block_no_reqs; /* TODO: next could cause issues when streams are started after * eachother. */ /*IF should not be reconfigured/reset from host */ if (ifmtr_set_if_blocking_mode_reset) { ifmtr_set_if_blocking_mode_reset = false; for (i = 0; i < N_INPUT_FORMATTER_ID; i++) { input_formatter_ID_t id = (input_formatter_ID_t)i; input_formatter_rst(id); input_formatter_set_fifo_blocking_mode(id, block[id]); } } return; } static int ifmtr_start_column( const struct ia_css_stream_config *config, unsigned int bin_in, unsigned int *start_column) { unsigned int in = config->input_config.input_res.width, start, for_bayer = ia_css_ifmtr_columns_needed_for_bayer_order(config); if (bin_in + 2 * for_bayer > in) return -EINVAL; /* On the hardware, we want to use the middle of the input, so we * divide the start column by 2. */ start = (in - bin_in) / 2; /* in case the number of extra columns is 2 or odd, we round the start * column down */ start &= ~0x1; /* now we add the one column (if needed) to correct for the bayer * order). */ start += for_bayer; *start_column = start; return 0; } static int ifmtr_input_start_line( const struct ia_css_stream_config *config, unsigned int bin_in, unsigned int *start_line) { unsigned int in = config->input_config.input_res.height, start, for_bayer = ia_css_ifmtr_lines_needed_for_bayer_order(config); if (bin_in + 2 * for_bayer > in) return -EINVAL; /* On the hardware, we want to use the middle of the input, so we * divide the start line by 2. On the simulator, we cannot handle extra * lines at the end of the frame. */ start = (in - bin_in) / 2; /* in case the number of extra lines is 2 or odd, we round the start * line down. */ start &= ~0x1; /* now we add the one line (if needed) to correct for the bayer order */ start += for_bayer; *start_line = start; return 0; } #endif
linux-master
drivers/staging/media/atomisp/pci/runtime/ifmtr/src/ifmtr.c
// SPDX-License-Identifier: GPL-2.0 /* * Support for Intel Camera Imaging ISP subsystem. * Copyright (c) 2010 - 2015, Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. */ #include "sh_css_sp.h" #include "dma.h" /* N_DMA_CHANNEL_ID */ #include <type_support.h> #include "ia_css_binary.h" #include "sh_css_hrt.h" #include "sh_css_defs.h" #include "sh_css_internal.h" #include "ia_css_debug.h" #include "ia_css_debug_internal.h" #include "sh_css_legacy.h" #include "gdc_device.h" /* HRT_GDC_N */ /*#include "sp.h"*/ /* host2sp_enqueue_frame_data() */ #include "assert_support.h" #include "ia_css_queue.h" /* host_sp_enqueue_XXX */ #include "ia_css_event.h" /* ia_css_event_encode */ /* * @brief Encode the information into the software-event. * Refer to "sw_event_public.h" for details. */ bool ia_css_event_encode( u8 *in, u8 nr, uint32_t *out) { bool ret; u32 nr_of_bits; u32 i; assert(in); assert(out); OP___assert(nr > 0 && nr <= MAX_NR_OF_PAYLOADS_PER_SW_EVENT); /* initialize the output */ *out = 0; /* get the number of bits per information */ nr_of_bits = sizeof(uint32_t) * 8 / nr; /* compress the all inputs into a signle output */ for (i = 0; i < nr; i++) { *out <<= nr_of_bits; *out |= in[i]; } /* get the return value */ ret = (nr > 0 && nr <= MAX_NR_OF_PAYLOADS_PER_SW_EVENT); return ret; } void ia_css_event_decode( u32 event, uint8_t *payload) { assert(payload[1] == 0); assert(payload[2] == 0); assert(payload[3] == 0); ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_event_decode() enter:\n"); /* First decode according to the common case * In case of a PORT_EOF event we overwrite with * the specific values * This is somewhat ugly but probably somewhat efficient * (and it avoids some code duplication) */ payload[0] = event & 0xff; /*event_code */ payload[1] = (event >> 8) & 0xff; payload[2] = (event >> 16) & 0xff; payload[3] = 0; switch (payload[0]) { case SH_CSS_SP_EVENT_PORT_EOF: payload[2] = 0; payload[3] = (event >> 24) & 0xff; break; case SH_CSS_SP_EVENT_ACC_STAGE_COMPLETE: case SH_CSS_SP_EVENT_TIMER: case SH_CSS_SP_EVENT_FRAME_TAGGED: case SH_CSS_SP_EVENT_FW_WARNING: case SH_CSS_SP_EVENT_FW_ASSERT: payload[3] = (event >> 24) & 0xff; break; default: break; } }
linux-master
drivers/staging/media/atomisp/pci/runtime/event/src/event.c
// SPDX-License-Identifier: GPL-2.0 /* * Support for Intel Camera Imaging ISP subsystem. * Copyright (c) 2015, Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. */ #include "debug.h" #ifndef __INLINE_INPUT_SYSTEM__ #define __INLINE_INPUT_SYSTEM__ #endif #ifndef __INLINE_IBUF_CTRL__ #define __INLINE_IBUF_CTRL__ #endif #ifndef __INLINE_CSI_RX__ #define __INLINE_CSI_RX__ #endif #ifndef __INLINE_PIXELGEN__ #define __INLINE_PIXELGEN__ #endif #ifndef __INLINE_STREAM2MMIO__ #define __INLINE_STREAM2MMIO__ #endif #include <linux/string.h> /* for strscpy() */ #include "ia_css_debug.h" #include "ia_css_debug_pipe.h" #include "ia_css_irq.h" #include "ia_css_stream.h" #include "ia_css_pipeline.h" #include "ia_css_isp_param.h" #include "sh_css_params.h" #include "ia_css_bufq.h" /* ISP2401 */ #include "ia_css_queue.h" #include "ia_css_isp_params.h" #include "system_local.h" #include "assert_support.h" #include "print_support.h" #include "fifo_monitor.h" #include "input_formatter.h" #include "dma.h" #include "irq.h" #include "gp_device.h" #include "sp.h" #include "isp.h" #include "type_support.h" #include "math_support.h" /* CEIL_DIV */ #include "input_system.h" /* input_formatter_reg_load */ #include "ia_css_tagger_common.h" #include "sh_css_internal.h" #include "ia_css_isys.h" #include "sh_css_sp.h" /* sh_css_sp_get_debug_state() */ #include "css_trace.h" /* tracer */ #include "device_access.h" /* for ia_css_device_load_uint32 */ /* Include all kernel host interfaces for ISP1 */ #include "anr/anr_1.0/ia_css_anr.host.h" #include "cnr/cnr_1.0/ia_css_cnr.host.h" #include "csc/csc_1.0/ia_css_csc.host.h" #include "de/de_1.0/ia_css_de.host.h" #include "dp/dp_1.0/ia_css_dp.host.h" #include "bnr/bnr_1.0/ia_css_bnr.host.h" #include "fpn/fpn_1.0/ia_css_fpn.host.h" #include "gc/gc_1.0/ia_css_gc.host.h" #include "ob/ob_1.0/ia_css_ob.host.h" #include "s3a/s3a_1.0/ia_css_s3a.host.h" #include "sc/sc_1.0/ia_css_sc.host.h" #include "tnr/tnr_1.0/ia_css_tnr.host.h" #include "uds/uds_1.0/ia_css_uds_param.h" #include "wb/wb_1.0/ia_css_wb.host.h" #include "ynr/ynr_1.0/ia_css_ynr.host.h" /* Include additional kernel host interfaces for ISP2 */ #include "aa/aa_2/ia_css_aa2.host.h" #include "anr/anr_2/ia_css_anr2.host.h" #include "cnr/cnr_2/ia_css_cnr2.host.h" #include "de/de_2/ia_css_de2.host.h" #include "gc/gc_2/ia_css_gc2.host.h" #include "ynr/ynr_2/ia_css_ynr2.host.h" #define DPG_START "ia_css_debug_pipe_graph_dump_start " #define DPG_END " ia_css_debug_pipe_graph_dump_end\n" #define ENABLE_LINE_MAX_LENGTH (25) /* * TODO:SH_CSS_MAX_SP_THREADS is not the max number of sp threads * future rework should fix this and remove the define MAX_THREAD_NUM */ #define MAX_THREAD_NUM (SH_CSS_MAX_SP_THREADS + SH_CSS_MAX_SP_INTERNAL_THREADS) static struct pipe_graph_class { bool do_init; int height; int width; int eff_height; int eff_width; enum atomisp_input_format stream_format; } pg_inst = {true, 0, 0, 0, 0, N_ATOMISP_INPUT_FORMAT}; static const char *const queue_id_to_str[] = { /* [SH_CSS_QUEUE_A_ID] =*/ "queue_A", /* [SH_CSS_QUEUE_B_ID] =*/ "queue_B", /* [SH_CSS_QUEUE_C_ID] =*/ "queue_C", /* [SH_CSS_QUEUE_D_ID] =*/ "queue_D", /* [SH_CSS_QUEUE_E_ID] =*/ "queue_E", /* [SH_CSS_QUEUE_F_ID] =*/ "queue_F", /* [SH_CSS_QUEUE_G_ID] =*/ "queue_G", /* [SH_CSS_QUEUE_H_ID] =*/ "queue_H" }; static const char *const pipe_id_to_str[] = { /* [IA_CSS_PIPE_ID_PREVIEW] =*/ "preview", /* [IA_CSS_PIPE_ID_COPY] =*/ "copy", /* [IA_CSS_PIPE_ID_VIDEO] =*/ "video", /* [IA_CSS_PIPE_ID_CAPTURE] =*/ "capture", /* [IA_CSS_PIPE_ID_YUVPP] =*/ "yuvpp", }; static char dot_id_input_bin[SH_CSS_MAX_BINARY_NAME + 10]; static char ring_buffer[200]; void ia_css_debug_dtrace(unsigned int level, const char *fmt, ...) { va_list ap; va_start(ap, fmt); ia_css_debug_vdtrace(level, fmt, ap); va_end(ap); } static void debug_dump_long_array_formatted( const sp_ID_t sp_id, hrt_address stack_sp_addr, unsigned int stack_size) { unsigned int i; u32 val; u32 addr = (uint32_t)stack_sp_addr; u32 stack_size_words = CEIL_DIV(stack_size, sizeof(uint32_t)); /* When size is not multiple of four, last word is only relevant for * remaining bytes */ for (i = 0; i < stack_size_words; i++) { val = sp_dmem_load_uint32(sp_id, (hrt_address)addr); if ((i % 8) == 0) ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE, "\n"); ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE, "0x%08x ", val); addr += sizeof(uint32_t); } ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE, "\n"); } static void debug_dump_sp_stack_info( const sp_ID_t sp_id) { const struct ia_css_fw_info *fw; unsigned int HIVE_ADDR_sp_threads_stack; unsigned int HIVE_ADDR_sp_threads_stack_size; u32 stack_sizes[MAX_THREAD_NUM]; u32 stack_sp_addr[MAX_THREAD_NUM]; unsigned int i; fw = &sh_css_sp_fw; ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE, "sp_id(%u) stack info\n", sp_id); ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE, "from objects stack_addr_offset:0x%x stack_size_offset:0x%x\n", fw->info.sp.threads_stack, fw->info.sp.threads_stack_size); HIVE_ADDR_sp_threads_stack = fw->info.sp.threads_stack; HIVE_ADDR_sp_threads_stack_size = fw->info.sp.threads_stack_size; if (fw->info.sp.threads_stack == 0 || fw->info.sp.threads_stack_size == 0) return; (void)HIVE_ADDR_sp_threads_stack; (void)HIVE_ADDR_sp_threads_stack_size; sp_dmem_load(sp_id, (unsigned int)sp_address_of(sp_threads_stack), &stack_sp_addr, sizeof(stack_sp_addr)); sp_dmem_load(sp_id, (unsigned int)sp_address_of(sp_threads_stack_size), &stack_sizes, sizeof(stack_sizes)); for (i = 0 ; i < MAX_THREAD_NUM; i++) { ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE, "thread: %u stack_addr: 0x%08x stack_size: %u\n", i, stack_sp_addr[i], stack_sizes[i]); debug_dump_long_array_formatted(sp_id, (hrt_address)stack_sp_addr[i], stack_sizes[i]); } } void ia_css_debug_dump_sp_stack_info(void) { debug_dump_sp_stack_info(SP0_ID); } void ia_css_debug_set_dtrace_level(const unsigned int trace_level) { dbg_level = trace_level; return; } unsigned int ia_css_debug_get_dtrace_level(void) { return dbg_level; } static const char *debug_stream_format2str(const enum atomisp_input_format stream_format) { switch (stream_format) { case ATOMISP_INPUT_FORMAT_YUV420_8_LEGACY: return "yuv420-8-legacy"; case ATOMISP_INPUT_FORMAT_YUV420_8: return "yuv420-8"; case ATOMISP_INPUT_FORMAT_YUV420_10: return "yuv420-10"; case ATOMISP_INPUT_FORMAT_YUV420_16: return "yuv420-16"; case ATOMISP_INPUT_FORMAT_YUV422_8: return "yuv422-8"; case ATOMISP_INPUT_FORMAT_YUV422_10: return "yuv422-10"; case ATOMISP_INPUT_FORMAT_YUV422_16: return "yuv422-16"; case ATOMISP_INPUT_FORMAT_RGB_444: return "rgb444"; case ATOMISP_INPUT_FORMAT_RGB_555: return "rgb555"; case ATOMISP_INPUT_FORMAT_RGB_565: return "rgb565"; case ATOMISP_INPUT_FORMAT_RGB_666: return "rgb666"; case ATOMISP_INPUT_FORMAT_RGB_888: return "rgb888"; case ATOMISP_INPUT_FORMAT_RAW_6: return "raw6"; case ATOMISP_INPUT_FORMAT_RAW_7: return "raw7"; case ATOMISP_INPUT_FORMAT_RAW_8: return "raw8"; case ATOMISP_INPUT_FORMAT_RAW_10: return "raw10"; case ATOMISP_INPUT_FORMAT_RAW_12: return "raw12"; case ATOMISP_INPUT_FORMAT_RAW_14: return "raw14"; case ATOMISP_INPUT_FORMAT_RAW_16: return "raw16"; case ATOMISP_INPUT_FORMAT_BINARY_8: return "binary8"; case ATOMISP_INPUT_FORMAT_GENERIC_SHORT1: return "generic-short1"; case ATOMISP_INPUT_FORMAT_GENERIC_SHORT2: return "generic-short2"; case ATOMISP_INPUT_FORMAT_GENERIC_SHORT3: return "generic-short3"; case ATOMISP_INPUT_FORMAT_GENERIC_SHORT4: return "generic-short4"; case ATOMISP_INPUT_FORMAT_GENERIC_SHORT5: return "generic-short5"; case ATOMISP_INPUT_FORMAT_GENERIC_SHORT6: return "generic-short6"; case ATOMISP_INPUT_FORMAT_GENERIC_SHORT7: return "generic-short7"; case ATOMISP_INPUT_FORMAT_GENERIC_SHORT8: return "generic-short8"; case ATOMISP_INPUT_FORMAT_YUV420_8_SHIFT: return "yuv420-8-shift"; case ATOMISP_INPUT_FORMAT_YUV420_10_SHIFT: return "yuv420-10-shift"; case ATOMISP_INPUT_FORMAT_EMBEDDED: return "embedded-8"; case ATOMISP_INPUT_FORMAT_USER_DEF1: return "user-def-8-type-1"; case ATOMISP_INPUT_FORMAT_USER_DEF2: return "user-def-8-type-2"; case ATOMISP_INPUT_FORMAT_USER_DEF3: return "user-def-8-type-3"; case ATOMISP_INPUT_FORMAT_USER_DEF4: return "user-def-8-type-4"; case ATOMISP_INPUT_FORMAT_USER_DEF5: return "user-def-8-type-5"; case ATOMISP_INPUT_FORMAT_USER_DEF6: return "user-def-8-type-6"; case ATOMISP_INPUT_FORMAT_USER_DEF7: return "user-def-8-type-7"; case ATOMISP_INPUT_FORMAT_USER_DEF8: return "user-def-8-type-8"; default: assert(!"Unknown stream format"); return "unknown-stream-format"; } }; static const char *debug_frame_format2str(const enum ia_css_frame_format frame_format) { switch (frame_format) { case IA_CSS_FRAME_FORMAT_NV11: return "NV11"; case IA_CSS_FRAME_FORMAT_NV12: return "NV12"; case IA_CSS_FRAME_FORMAT_NV12_16: return "NV12_16"; case IA_CSS_FRAME_FORMAT_NV12_TILEY: return "NV12_TILEY"; case IA_CSS_FRAME_FORMAT_NV16: return "NV16"; case IA_CSS_FRAME_FORMAT_NV21: return "NV21"; case IA_CSS_FRAME_FORMAT_NV61: return "NV61"; case IA_CSS_FRAME_FORMAT_YV12: return "YV12"; case IA_CSS_FRAME_FORMAT_YV16: return "YV16"; case IA_CSS_FRAME_FORMAT_YUV420: return "YUV420"; case IA_CSS_FRAME_FORMAT_YUV420_16: return "YUV420_16"; case IA_CSS_FRAME_FORMAT_YUV422: return "YUV422"; case IA_CSS_FRAME_FORMAT_YUV422_16: return "YUV422_16"; case IA_CSS_FRAME_FORMAT_UYVY: return "UYVY"; case IA_CSS_FRAME_FORMAT_YUYV: return "YUYV"; case IA_CSS_FRAME_FORMAT_YUV444: return "YUV444"; case IA_CSS_FRAME_FORMAT_YUV_LINE: return "YUV_LINE"; case IA_CSS_FRAME_FORMAT_RAW: return "RAW"; case IA_CSS_FRAME_FORMAT_RGB565: return "RGB565"; case IA_CSS_FRAME_FORMAT_PLANAR_RGB888: return "PLANAR_RGB888"; case IA_CSS_FRAME_FORMAT_RGBA888: return "RGBA888"; case IA_CSS_FRAME_FORMAT_QPLANE6: return "QPLANE6"; case IA_CSS_FRAME_FORMAT_BINARY_8: return "BINARY_8"; case IA_CSS_FRAME_FORMAT_MIPI: return "MIPI"; case IA_CSS_FRAME_FORMAT_RAW_PACKED: return "RAW_PACKED"; case IA_CSS_FRAME_FORMAT_CSI_MIPI_YUV420_8: return "CSI_MIPI_YUV420_8"; case IA_CSS_FRAME_FORMAT_CSI_MIPI_LEGACY_YUV420_8: return "CSI_MIPI_LEGACY_YUV420_8"; case IA_CSS_FRAME_FORMAT_CSI_MIPI_YUV420_10: return "CSI_MIPI_YUV420_10"; default: assert(!"Unknown frame format"); return "unknown-frame-format"; } } static void debug_print_sp_state(const sp_state_t *state, const char *cell) { assert(cell); assert(state); ia_css_debug_dtrace(2, "%s state:\n", cell); ia_css_debug_dtrace(2, "\t%-32s: 0x%X\n", "PC", state->pc); ia_css_debug_dtrace(2, "\t%-32s: 0x%X\n", "Status register", state->status_register); ia_css_debug_dtrace(2, "\t%-32s: %d\n", "Is broken", state->is_broken); ia_css_debug_dtrace(2, "\t%-32s: %d\n", "Is idle", state->is_idle); ia_css_debug_dtrace(2, "\t%-32s: %d\n", "Is sleeping", state->is_sleeping); ia_css_debug_dtrace(2, "\t%-32s: %d\n", "Is stalling", state->is_stalling); return; } static void debug_print_isp_state(const isp_state_t *state, const char *cell) { assert(state); assert(cell); ia_css_debug_dtrace(2, "%s state:\n", cell); ia_css_debug_dtrace(2, "\t%-32s: 0x%X\n", "PC", state->pc); ia_css_debug_dtrace(2, "\t%-32s: 0x%X\n", "Status register", state->status_register); ia_css_debug_dtrace(2, "\t%-32s: %d\n", "Is broken", state->is_broken); ia_css_debug_dtrace(2, "\t%-32s: %d\n", "Is idle", state->is_idle); ia_css_debug_dtrace(2, "\t%-32s: %d\n", "Is sleeping", state->is_sleeping); ia_css_debug_dtrace(2, "\t%-32s: %d\n", "Is stalling", state->is_stalling); return; } void ia_css_debug_dump_isp_state(void) { isp_state_t state; isp_stall_t stall; isp_get_state(ISP0_ID, &state, &stall); debug_print_isp_state(&state, "ISP"); if (state.is_stalling) { if (!IS_ISP2401) { ia_css_debug_dtrace(2, "\t%-32s: %d\n", "[0] if_prim_a_FIFO stalled", stall.fifo0); ia_css_debug_dtrace(2, "\t%-32s: %d\n", "[1] if_prim_b_FIFO stalled", stall.fifo1); } ia_css_debug_dtrace(2, "\t%-32s: %d\n", "[2] dma_FIFO stalled", stall.fifo2); ia_css_debug_dtrace(2, "\t%-32s: %d\n", "[3] gdc0_FIFO stalled", stall.fifo3); ia_css_debug_dtrace(2, "\t%-32s: %d\n", "[4] gdc1_FIFO stalled", stall.fifo4); ia_css_debug_dtrace(2, "\t%-32s: %d\n", "[5] gpio_FIFO stalled", stall.fifo5); ia_css_debug_dtrace(2, "\t%-32s: %d\n", "[6] sp_FIFO stalled", stall.fifo6); ia_css_debug_dtrace(2, "\t%-32s: %d\n", "status & control stalled", stall.stat_ctrl); ia_css_debug_dtrace(2, "\t%-32s: %d\n", "dmem stalled", stall.dmem); ia_css_debug_dtrace(2, "\t%-32s: %d\n", "vmem stalled", stall.vmem); ia_css_debug_dtrace(2, "\t%-32s: %d\n", "vamem1 stalled", stall.vamem1); ia_css_debug_dtrace(2, "\t%-32s: %d\n", "vamem2 stalled", stall.vamem2); ia_css_debug_dtrace(2, "\t%-32s: %d\n", "vamem3 stalled", stall.vamem3); ia_css_debug_dtrace(2, "\t%-32s: %d\n", "hmem stalled", stall.hmem); ia_css_debug_dtrace(2, "\t%-32s: %d\n", "pmem stalled", stall.pmem); } return; } void ia_css_debug_dump_sp_state(void) { sp_state_t state; sp_stall_t stall; sp_get_state(SP0_ID, &state, &stall); debug_print_sp_state(&state, "SP"); if (state.is_stalling) { ia_css_debug_dtrace(2, "\t%-32s: %d\n", "isys_FIFO stalled", stall.fifo0); ia_css_debug_dtrace(2, "\t%-32s: %d\n", "if_sec_FIFO stalled", stall.fifo1); ia_css_debug_dtrace(2, "\t%-32s: %d\n", "str_to_mem_FIFO stalled", stall.fifo2); ia_css_debug_dtrace(2, "\t%-32s: %d\n", "dma_FIFO stalled", stall.fifo3); if (!IS_ISP2401) ia_css_debug_dtrace(2, "\t%-32s: %d\n", "if_prim_a_FIFO stalled", stall.fifo4); ia_css_debug_dtrace(2, "\t%-32s: %d\n", "isp_FIFO stalled", stall.fifo5); ia_css_debug_dtrace(2, "\t%-32s: %d\n", "gp_FIFO stalled", stall.fifo6); if (!IS_ISP2401) ia_css_debug_dtrace(2, "\t%-32s: %d\n", "if_prim_b_FIFO stalled", stall.fifo7); ia_css_debug_dtrace(2, "\t%-32s: %d\n", "gdc0_FIFO stalled", stall.fifo8); ia_css_debug_dtrace(2, "\t%-32s: %d\n", "gdc1_FIFO stalled", stall.fifo9); ia_css_debug_dtrace(2, "\t%-32s: %d\n", "irq FIFO stalled", stall.fifoa); ia_css_debug_dtrace(2, "\t%-32s: %d\n", "dmem stalled", stall.dmem); ia_css_debug_dtrace(2, "\t%-32s: %d\n", "control master stalled", stall.control_master); ia_css_debug_dtrace(2, "\t%-32s: %d\n", "i-cache master stalled", stall.icache_master); } ia_css_debug_dump_trace(); return; } static void debug_print_fifo_channel_state(const fifo_channel_state_t *state, const char *descr) { assert(state); assert(descr); ia_css_debug_dtrace(2, "FIFO channel: %s\n", descr); ia_css_debug_dtrace(2, "\t%-32s: %d\n", "source valid", state->src_valid); ia_css_debug_dtrace(2, "\t%-32s: %d\n", "fifo accept", state->fifo_accept); ia_css_debug_dtrace(2, "\t%-32s: %d\n", "fifo valid", state->fifo_valid); ia_css_debug_dtrace(2, "\t%-32s: %d\n", "sink accept", state->sink_accept); return; } void ia_css_debug_dump_pif_a_isp_fifo_state(void) { fifo_channel_state_t pif_to_isp, isp_to_pif; fifo_channel_get_state(FIFO_MONITOR0_ID, FIFO_CHANNEL_IF0_TO_ISP0, &pif_to_isp); fifo_channel_get_state(FIFO_MONITOR0_ID, FIFO_CHANNEL_ISP0_TO_IF0, &isp_to_pif); debug_print_fifo_channel_state(&pif_to_isp, "Primary IF A to ISP"); debug_print_fifo_channel_state(&isp_to_pif, "ISP to Primary IF A"); } void ia_css_debug_dump_pif_b_isp_fifo_state(void) { fifo_channel_state_t pif_to_isp, isp_to_pif; fifo_channel_get_state(FIFO_MONITOR0_ID, FIFO_CHANNEL_IF1_TO_ISP0, &pif_to_isp); fifo_channel_get_state(FIFO_MONITOR0_ID, FIFO_CHANNEL_ISP0_TO_IF1, &isp_to_pif); debug_print_fifo_channel_state(&pif_to_isp, "Primary IF B to ISP"); debug_print_fifo_channel_state(&isp_to_pif, "ISP to Primary IF B"); } void ia_css_debug_dump_str2mem_sp_fifo_state(void) { fifo_channel_state_t s2m_to_sp, sp_to_s2m; fifo_channel_get_state(FIFO_MONITOR0_ID, FIFO_CHANNEL_STREAM2MEM0_TO_SP0, &s2m_to_sp); fifo_channel_get_state(FIFO_MONITOR0_ID, FIFO_CHANNEL_SP0_TO_STREAM2MEM0, &sp_to_s2m); debug_print_fifo_channel_state(&s2m_to_sp, "Stream-to-memory to SP"); debug_print_fifo_channel_state(&sp_to_s2m, "SP to stream-to-memory"); } #ifndef ISP2401 static void debug_print_if_state(input_formatter_state_t *state, const char *id) { unsigned int val; const char *st_vsync_active_low = (state->vsync_active_low ? "low" : "high"); const char *st_hsync_active_low = (state->hsync_active_low ? "low" : "high"); const char *fsm_sync_status_str = "unknown"; const char *fsm_crop_status_str = "unknown"; const char *fsm_padding_status_str = "unknown"; int st_stline = state->start_line; int st_stcol = state->start_column; int st_crpht = state->cropped_height; int st_crpwd = state->cropped_width; int st_verdcm = state->ver_decimation; int st_hordcm = state->hor_decimation; int st_ver_deinterleaving = state->ver_deinterleaving; int st_hor_deinterleaving = state->hor_deinterleaving; int st_leftpd = state->left_padding; int st_eoloff = state->eol_offset; int st_vmstartaddr = state->vmem_start_address; int st_vmendaddr = state->vmem_end_address; int st_vmincr = state->vmem_increment; int st_yuv420 = state->is_yuv420; int st_allow_fifo_overflow = state->allow_fifo_overflow; int st_block_fifo_when_no_req = state->block_fifo_when_no_req; assert(state); ia_css_debug_dtrace(2, "InputFormatter State (%s):\n", id); ia_css_debug_dtrace(2, "\tConfiguration:\n"); ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "Start line", st_stline); ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "Start column", st_stcol); ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "Cropped height", st_crpht); ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "Cropped width", st_crpwd); ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "Ver decimation", st_verdcm); ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "Hor decimation", st_hordcm); ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "Ver deinterleaving", st_ver_deinterleaving); ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "Hor deinterleaving", st_hor_deinterleaving); ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "Left padding", st_leftpd); ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "EOL offset (bytes)", st_eoloff); ia_css_debug_dtrace(2, "\t\t%-32s: 0x%06X\n", "VMEM start address", st_vmstartaddr); ia_css_debug_dtrace(2, "\t\t%-32s: 0x%06X\n", "VMEM end address", st_vmendaddr); ia_css_debug_dtrace(2, "\t\t%-32s: 0x%06X\n", "VMEM increment", st_vmincr); ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "YUV 420 format", st_yuv420); ia_css_debug_dtrace(2, "\t\t%-32s: Active %s\n", "Vsync", st_vsync_active_low); ia_css_debug_dtrace(2, "\t\t%-32s: Active %s\n", "Hsync", st_hsync_active_low); ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "Allow FIFO overflow", st_allow_fifo_overflow); /* Flag that tells whether the IF gives backpressure on frames */ /* * FYI, this is only on the frame request (indicate), when the IF has * synch'd on a frame it will always give back pressure */ ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "Block when no request", st_block_fifo_when_no_req); ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "IF_BLOCKED_FIFO_NO_REQ_ADDRESS", input_formatter_reg_load(INPUT_FORMATTER0_ID, HIVE_IF_BLOCK_FIFO_NO_REQ_ADDRESS) ); ia_css_debug_dtrace(2, "\t%-32s:\n", "InputSwitch State"); ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "_REG_GP_IFMT_input_switch_lut_reg0", gp_device_reg_load(GP_DEVICE0_ID, _REG_GP_IFMT_input_switch_lut_reg0)); ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "_REG_GP_IFMT_input_switch_lut_reg1", gp_device_reg_load(GP_DEVICE0_ID, _REG_GP_IFMT_input_switch_lut_reg1)); ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "_REG_GP_IFMT_input_switch_lut_reg2", gp_device_reg_load(GP_DEVICE0_ID, _REG_GP_IFMT_input_switch_lut_reg2)); ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "_REG_GP_IFMT_input_switch_lut_reg3", gp_device_reg_load(GP_DEVICE0_ID, _REG_GP_IFMT_input_switch_lut_reg3)); ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "_REG_GP_IFMT_input_switch_lut_reg4", gp_device_reg_load(GP_DEVICE0_ID, _REG_GP_IFMT_input_switch_lut_reg4)); ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "_REG_GP_IFMT_input_switch_lut_reg5", gp_device_reg_load(GP_DEVICE0_ID, _REG_GP_IFMT_input_switch_lut_reg5)); ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "_REG_GP_IFMT_input_switch_lut_reg6", gp_device_reg_load(GP_DEVICE0_ID, _REG_GP_IFMT_input_switch_lut_reg6)); ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "_REG_GP_IFMT_input_switch_lut_reg7", gp_device_reg_load(GP_DEVICE0_ID, _REG_GP_IFMT_input_switch_lut_reg7)); ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "_REG_GP_IFMT_input_switch_fsync_lut", gp_device_reg_load(GP_DEVICE0_ID, _REG_GP_IFMT_input_switch_fsync_lut)); ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "_REG_GP_IFMT_srst", gp_device_reg_load(GP_DEVICE0_ID, _REG_GP_IFMT_srst)); ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "_REG_GP_IFMT_slv_reg_srst", gp_device_reg_load(GP_DEVICE0_ID, _REG_GP_IFMT_slv_reg_srst)); ia_css_debug_dtrace(2, "\tFSM Status:\n"); val = state->fsm_sync_status; if (val > 7) fsm_sync_status_str = "ERROR"; switch (val & 0x7) { case 0: fsm_sync_status_str = "idle"; break; case 1: fsm_sync_status_str = "request frame"; break; case 2: fsm_sync_status_str = "request lines"; break; case 3: fsm_sync_status_str = "request vectors"; break; case 4: fsm_sync_status_str = "send acknowledge"; break; default: fsm_sync_status_str = "unknown"; break; } ia_css_debug_dtrace(2, "\t\t%-32s: (0x%X: %s)\n", "FSM Synchronization Status", val, fsm_sync_status_str); ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "FSM Synchronization Counter", state->fsm_sync_counter); val = state->fsm_crop_status; if (val > 7) fsm_crop_status_str = "ERROR"; switch (val & 0x7) { case 0: fsm_crop_status_str = "idle"; break; case 1: fsm_crop_status_str = "wait line"; break; case 2: fsm_crop_status_str = "crop line"; break; case 3: fsm_crop_status_str = "crop pixel"; break; case 4: fsm_crop_status_str = "pass pixel"; break; case 5: fsm_crop_status_str = "pass line"; break; case 6: fsm_crop_status_str = "lost line"; break; default: fsm_crop_status_str = "unknown"; break; } ia_css_debug_dtrace(2, "\t\t%-32s: (0x%X: %s)\n", "FSM Crop Status", val, fsm_crop_status_str); ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "FSM Crop Line Counter", state->fsm_crop_line_counter); ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "FSM Crop Pixel Counter", state->fsm_crop_pixel_counter); ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "FSM Deinterleaving idx buffer", state->fsm_deinterleaving_index); ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "FSM H decimation counter", state->fsm_dec_h_counter); ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "FSM V decimation counter", state->fsm_dec_v_counter); ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "FSM block V decimation counter", state->fsm_dec_block_v_counter); val = state->fsm_padding_status; if (val > 7) fsm_padding_status_str = "ERROR"; switch (val & 0x7) { case 0: fsm_padding_status_str = "idle"; break; case 1: fsm_padding_status_str = "left pad"; break; case 2: fsm_padding_status_str = "write"; break; case 3: fsm_padding_status_str = "right pad"; break; case 4: fsm_padding_status_str = "send end of line"; break; default: fsm_padding_status_str = "unknown"; break; } ia_css_debug_dtrace(2, "\t\t%-32s: (0x%X: %s)\n", "FSM Padding Status", val, fsm_padding_status_str); ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "FSM Padding element idx counter", state->fsm_padding_elem_counter); ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "FSM Vector support error", state->fsm_vector_support_error); ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "FSM Vector support buf full", state->fsm_vector_buffer_full); ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "FSM Vector support", state->vector_support); ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "Fifo sensor data lost", state->sensor_data_lost); } static void debug_print_if_bin_state(input_formatter_bin_state_t *state) { ia_css_debug_dtrace(2, "Stream-to-memory state:\n"); ia_css_debug_dtrace(2, "\t%-32s: %d\n", "reset", state->reset); ia_css_debug_dtrace(2, "\t%-32s: %d\n", "input endianness", state->input_endianness); ia_css_debug_dtrace(2, "\t%-32s: %d\n", "output endianness", state->output_endianness); ia_css_debug_dtrace(2, "\t%-32s: %d\n", "bitswap", state->bitswap); ia_css_debug_dtrace(2, "\t%-32s: %d\n", "block_synch", state->block_synch); ia_css_debug_dtrace(2, "\t%-32s: %d\n", "packet_synch", state->packet_synch); ia_css_debug_dtrace(2, "\t%-32s: %d\n", "readpostwrite_sync", state->readpostwrite_synch); ia_css_debug_dtrace(2, "\t%-32s: %d\n", "is_2ppc", state->is_2ppc); ia_css_debug_dtrace(2, "\t%-32s: %d\n", "en_status_update", state->en_status_update); } static void ia_css_debug_dump_if_state(void) { input_formatter_state_t if_state; input_formatter_bin_state_t if_bin_state; input_formatter_get_state(INPUT_FORMATTER0_ID, &if_state); debug_print_if_state(&if_state, "Primary IF A"); ia_css_debug_dump_pif_a_isp_fifo_state(); input_formatter_get_state(INPUT_FORMATTER1_ID, &if_state); debug_print_if_state(&if_state, "Primary IF B"); ia_css_debug_dump_pif_b_isp_fifo_state(); input_formatter_bin_get_state(INPUT_FORMATTER3_ID, &if_bin_state); debug_print_if_bin_state(&if_bin_state); ia_css_debug_dump_str2mem_sp_fifo_state(); } #endif void ia_css_debug_dump_dma_state(void) { /* note: the var below is made static as it is quite large; if it is not static it ends up on the stack which could cause issues for drivers */ static dma_state_t state; int i, ch_id; const char *fsm_cmd_st_lbl = "FSM Command flag state"; const char *fsm_ctl_st_lbl = "FSM Control flag state"; const char *fsm_ctl_state = NULL; const char *fsm_ctl_flag = NULL; const char *fsm_pack_st = NULL; const char *fsm_read_st = NULL; const char *fsm_write_st = NULL; char last_cmd_str[64]; dma_get_state(DMA0_ID, &state); /* Print header for DMA dump status */ ia_css_debug_dtrace(2, "DMA dump status:\n"); /* Print FSM command flag state */ if (state.fsm_command_idle) ia_css_debug_dtrace(2, "\t%-32s: %s\n", fsm_cmd_st_lbl, "IDLE"); if (state.fsm_command_run) ia_css_debug_dtrace(2, "\t%-32s: %s\n", fsm_cmd_st_lbl, "RUN"); if (state.fsm_command_stalling) ia_css_debug_dtrace(2, "\t%-32s: %s\n", fsm_cmd_st_lbl, "STALL"); if (state.fsm_command_error) ia_css_debug_dtrace(2, "\t%-32s: %s\n", fsm_cmd_st_lbl, "ERROR"); /* Print last command along with the channel */ ch_id = state.last_command_channel; switch (state.last_command) { case DMA_COMMAND_READ: snprintf(last_cmd_str, 64, "Read 2D Block [Channel: %d]", ch_id); break; case DMA_COMMAND_WRITE: snprintf(last_cmd_str, 64, "Write 2D Block [Channel: %d]", ch_id); break; case DMA_COMMAND_SET_CHANNEL: snprintf(last_cmd_str, 64, "Set Channel [Channel: %d]", ch_id); break; case DMA_COMMAND_SET_PARAM: snprintf(last_cmd_str, 64, "Set Param: %d [Channel: %d]", state.last_command_param, ch_id); break; case DMA_COMMAND_READ_SPECIFIC: snprintf(last_cmd_str, 64, "Read Specific 2D Block [Channel: %d]", ch_id); break; case DMA_COMMAND_WRITE_SPECIFIC: snprintf(last_cmd_str, 64, "Write Specific 2D Block [Channel: %d]", ch_id); break; case DMA_COMMAND_INIT: snprintf(last_cmd_str, 64, "Init 2D Block on Device A [Channel: %d]", ch_id); break; case DMA_COMMAND_INIT_SPECIFIC: snprintf(last_cmd_str, 64, "Init Specific 2D Block [Channel: %d]", ch_id); break; case DMA_COMMAND_RST: snprintf(last_cmd_str, 64, "DMA SW Reset"); break; case N_DMA_COMMANDS: snprintf(last_cmd_str, 64, "UNKNOWN"); break; default: snprintf(last_cmd_str, 64, "unknown [Channel: %d]", ch_id); break; } ia_css_debug_dtrace(2, "\t%-32s: (0x%X : %s)\n", "last command received", state.last_command, last_cmd_str); /* Print DMA registers */ ia_css_debug_dtrace(2, "\t%-32s\n", "DMA registers, connection group 0"); ia_css_debug_dtrace(2, "\t\t%-32s: 0x%X\n", "Cmd Fifo Command", state.current_command); ia_css_debug_dtrace(2, "\t\t%-32s: 0x%X\n", "Cmd Fifo Address A", state.current_addr_a); ia_css_debug_dtrace(2, "\t\t%-32s: 0x%X\n", "Cmd Fifo Address B", state.current_addr_b); if (state.fsm_ctrl_idle) fsm_ctl_flag = "IDLE"; else if (state.fsm_ctrl_run) fsm_ctl_flag = "RUN"; else if (state.fsm_ctrl_stalling) fsm_ctl_flag = "STAL"; else if (state.fsm_ctrl_error) fsm_ctl_flag = "ERROR"; else fsm_ctl_flag = "UNKNOWN"; switch (state.fsm_ctrl_state) { case DMA_CTRL_STATE_IDLE: fsm_ctl_state = "Idle state"; break; case DMA_CTRL_STATE_REQ_RCV: fsm_ctl_state = "Req Rcv state"; break; case DMA_CTRL_STATE_RCV: fsm_ctl_state = "Rcv state"; break; case DMA_CTRL_STATE_RCV_REQ: fsm_ctl_state = "Rcv Req state"; break; case DMA_CTRL_STATE_INIT: fsm_ctl_state = "Init state"; break; case N_DMA_CTRL_STATES: fsm_ctl_state = "Unknown"; break; } ia_css_debug_dtrace(2, "\t\t%-32s: %s -> %s\n", fsm_ctl_st_lbl, fsm_ctl_flag, fsm_ctl_state); ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "FSM Ctrl source dev", state.fsm_ctrl_source_dev); ia_css_debug_dtrace(2, "\t\t%-32s: 0x%X\n", "FSM Ctrl source addr", state.fsm_ctrl_source_addr); ia_css_debug_dtrace(2, "\t\t%-32s: 0x%X\n", "FSM Ctrl source stride", state.fsm_ctrl_source_stride); ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "FSM Ctrl source width", state.fsm_ctrl_source_width); ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "FSM Ctrl source height", state.fsm_ctrl_source_height); ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "FSM Ctrl pack source dev", state.fsm_ctrl_pack_source_dev); ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "FSM Ctrl pack dest dev", state.fsm_ctrl_pack_dest_dev); ia_css_debug_dtrace(2, "\t\t%-32s: 0x%X\n", "FSM Ctrl dest addr", state.fsm_ctrl_dest_addr); ia_css_debug_dtrace(2, "\t\t%-32s: 0x%X\n", "FSM Ctrl dest stride", state.fsm_ctrl_dest_stride); ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "FSM Ctrl pack source width", state.fsm_ctrl_pack_source_width); ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "FSM Ctrl pack dest height", state.fsm_ctrl_pack_dest_height); ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "FSM Ctrl pack dest width", state.fsm_ctrl_pack_dest_width); ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "FSM Ctrl pack source elems", state.fsm_ctrl_pack_source_elems); ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "FSM Ctrl pack dest elems", state.fsm_ctrl_pack_dest_elems); ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "FSM Ctrl pack extension", state.fsm_ctrl_pack_extension); if (state.pack_idle) fsm_pack_st = "IDLE"; if (state.pack_run) fsm_pack_st = "RUN"; if (state.pack_stalling) fsm_pack_st = "STALL"; if (state.pack_error) fsm_pack_st = "ERROR"; ia_css_debug_dtrace(2, "\t\t%-32s: %s\n", "FSM Pack flag state", fsm_pack_st); ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "FSM Pack cnt height", state.pack_cnt_height); ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "FSM Pack src cnt width", state.pack_src_cnt_width); ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "FSM Pack dest cnt width", state.pack_dest_cnt_width); if (state.read_state == DMA_RW_STATE_IDLE) fsm_read_st = "Idle state"; if (state.read_state == DMA_RW_STATE_REQ) fsm_read_st = "Req state"; if (state.read_state == DMA_RW_STATE_NEXT_LINE) fsm_read_st = "Next line"; if (state.read_state == DMA_RW_STATE_UNLOCK_CHANNEL) fsm_read_st = "Unlock channel"; ia_css_debug_dtrace(2, "\t\t%-32s: %s\n", "FSM Read state", fsm_read_st); ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "FSM Read cnt height", state.read_cnt_height); ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "FSM Read cnt width", state.read_cnt_width); if (state.write_state == DMA_RW_STATE_IDLE) fsm_write_st = "Idle state"; if (state.write_state == DMA_RW_STATE_REQ) fsm_write_st = "Req state"; if (state.write_state == DMA_RW_STATE_NEXT_LINE) fsm_write_st = "Next line"; if (state.write_state == DMA_RW_STATE_UNLOCK_CHANNEL) fsm_write_st = "Unlock channel"; ia_css_debug_dtrace(2, "\t\t%-32s: %s\n", "FSM Write state", fsm_write_st); ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "FSM Write height", state.write_height); ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "FSM Write width", state.write_width); for (i = 0; i < HIVE_ISP_NUM_DMA_CONNS; i++) { dma_port_state_t *port = &state.port_states[i]; ia_css_debug_dtrace(2, "\tDMA device interface %d\n", i); ia_css_debug_dtrace(2, "\t\tDMA internal side state\n"); ia_css_debug_dtrace(2, "\t\t\tCS:%d - We_n:%d - Run:%d - Ack:%d\n", port->req_cs, port->req_we_n, port->req_run, port->req_ack); ia_css_debug_dtrace(2, "\t\tMaster Output side state\n"); ia_css_debug_dtrace(2, "\t\t\tCS:%d - We_n:%d - Run:%d - Ack:%d\n", port->send_cs, port->send_we_n, port->send_run, port->send_ack); ia_css_debug_dtrace(2, "\t\tFifo state\n"); if (port->fifo_state == DMA_FIFO_STATE_WILL_BE_FULL) ia_css_debug_dtrace(2, "\t\t\tFiFo will be full\n"); else if (port->fifo_state == DMA_FIFO_STATE_FULL) ia_css_debug_dtrace(2, "\t\t\tFifo Full\n"); else if (port->fifo_state == DMA_FIFO_STATE_EMPTY) ia_css_debug_dtrace(2, "\t\t\tFifo Empty\n"); else ia_css_debug_dtrace(2, "\t\t\tFifo state unknown\n"); ia_css_debug_dtrace(2, "\t\tFifo counter %d\n\n", port->fifo_counter); } for (i = 0; i < HIVE_DMA_NUM_CHANNELS; i++) { dma_channel_state_t *ch = &state.channel_states[i]; ia_css_debug_dtrace(2, "\t%-32s: %d\n", "DMA channel register", i); ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "Connection", ch->connection); ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "Sign extend", ch->sign_extend); ia_css_debug_dtrace(2, "\t\t%-32s: 0x%X\n", "Stride Dev A", ch->stride_a); ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "Elems Dev A", ch->elems_a); ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "Cropping Dev A", ch->cropping_a); ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "Width Dev A", ch->width_a); ia_css_debug_dtrace(2, "\t\t%-32s: 0x%X\n", "Stride Dev B", ch->stride_b); ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "Elems Dev B", ch->elems_b); ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "Cropping Dev B", ch->cropping_b); ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "Width Dev B", ch->width_b); ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "Height", ch->height); } ia_css_debug_dtrace(2, "\n"); return; } void ia_css_debug_dump_dma_sp_fifo_state(void) { fifo_channel_state_t dma_to_sp, sp_to_dma; fifo_channel_get_state(FIFO_MONITOR0_ID, FIFO_CHANNEL_DMA0_TO_SP0, &dma_to_sp); fifo_channel_get_state(FIFO_MONITOR0_ID, FIFO_CHANNEL_SP0_TO_DMA0, &sp_to_dma); debug_print_fifo_channel_state(&dma_to_sp, "DMA to SP"); debug_print_fifo_channel_state(&sp_to_dma, "SP to DMA"); return; } void ia_css_debug_dump_dma_isp_fifo_state(void) { fifo_channel_state_t dma_to_isp, isp_to_dma; fifo_channel_get_state(FIFO_MONITOR0_ID, FIFO_CHANNEL_DMA0_TO_ISP0, &dma_to_isp); fifo_channel_get_state(FIFO_MONITOR0_ID, FIFO_CHANNEL_ISP0_TO_DMA0, &isp_to_dma); debug_print_fifo_channel_state(&dma_to_isp, "DMA to ISP"); debug_print_fifo_channel_state(&isp_to_dma, "ISP to DMA"); return; } void ia_css_debug_dump_isp_sp_fifo_state(void) { fifo_channel_state_t sp_to_isp, isp_to_sp; fifo_channel_get_state(FIFO_MONITOR0_ID, FIFO_CHANNEL_SP0_TO_ISP0, &sp_to_isp); fifo_channel_get_state(FIFO_MONITOR0_ID, FIFO_CHANNEL_ISP0_TO_SP0, &isp_to_sp); debug_print_fifo_channel_state(&sp_to_isp, "SP to ISP"); debug_print_fifo_channel_state(&isp_to_sp, "ISP to SP"); return; } void ia_css_debug_dump_isp_gdc_fifo_state(void) { fifo_channel_state_t gdc_to_isp, isp_to_gdc; fifo_channel_get_state(FIFO_MONITOR0_ID, FIFO_CHANNEL_GDC0_TO_ISP0, &gdc_to_isp); fifo_channel_get_state(FIFO_MONITOR0_ID, FIFO_CHANNEL_ISP0_TO_GDC0, &isp_to_gdc); debug_print_fifo_channel_state(&gdc_to_isp, "GDC to ISP"); debug_print_fifo_channel_state(&isp_to_gdc, "ISP to GDC"); return; } void ia_css_debug_dump_all_fifo_state(void) { int i; fifo_monitor_state_t state; fifo_monitor_get_state(FIFO_MONITOR0_ID, &state); for (i = 0; i < N_FIFO_CHANNEL; i++) debug_print_fifo_channel_state(&state.fifo_channels[i], "squepfstqkt"); return; } static void debug_binary_info_print(const struct ia_css_binary_xinfo *info) { assert(info); ia_css_debug_dtrace(2, "id = %d\n", info->sp.id); ia_css_debug_dtrace(2, "mode = %d\n", info->sp.pipeline.mode); ia_css_debug_dtrace(2, "max_input_width = %d\n", info->sp.input.max_width); ia_css_debug_dtrace(2, "min_output_width = %d\n", info->sp.output.min_width); ia_css_debug_dtrace(2, "max_output_width = %d\n", info->sp.output.max_width); ia_css_debug_dtrace(2, "top_cropping = %d\n", info->sp.pipeline.top_cropping); ia_css_debug_dtrace(2, "left_cropping = %d\n", info->sp.pipeline.left_cropping); ia_css_debug_dtrace(2, "xmem_addr = %d\n", info->xmem_addr); ia_css_debug_dtrace(2, "enable_vf_veceven = %d\n", info->sp.enable.vf_veceven); ia_css_debug_dtrace(2, "enable_dis = %d\n", info->sp.enable.dis); ia_css_debug_dtrace(2, "enable_uds = %d\n", info->sp.enable.uds); ia_css_debug_dtrace(2, "enable ds = %d\n", info->sp.enable.ds); ia_css_debug_dtrace(2, "s3atbl_use_dmem = %d\n", info->sp.s3a.s3atbl_use_dmem); return; } void ia_css_debug_binary_print(const struct ia_css_binary *bi) { unsigned int i; debug_binary_info_print(bi->info); ia_css_debug_dtrace(2, "input: %dx%d, format = %d, padded width = %d\n", bi->in_frame_info.res.width, bi->in_frame_info.res.height, bi->in_frame_info.format, bi->in_frame_info.padded_width); ia_css_debug_dtrace(2, "internal :%dx%d, format = %d, padded width = %d\n", bi->internal_frame_info.res.width, bi->internal_frame_info.res.height, bi->internal_frame_info.format, bi->internal_frame_info.padded_width); for (i = 0; i < IA_CSS_BINARY_MAX_OUTPUT_PORTS; i++) { if (bi->out_frame_info[i].res.width != 0) { ia_css_debug_dtrace(2, "out%d: %dx%d, format = %d, padded width = %d\n", i, bi->out_frame_info[i].res.width, bi->out_frame_info[i].res.height, bi->out_frame_info[i].format, bi->out_frame_info[i].padded_width); } } ia_css_debug_dtrace(2, "vf out: %dx%d, format = %d, padded width = %d\n", bi->vf_frame_info.res.width, bi->vf_frame_info.res.height, bi->vf_frame_info.format, bi->vf_frame_info.padded_width); ia_css_debug_dtrace(2, "online = %d\n", bi->online); ia_css_debug_dtrace(2, "input_buf_vectors = %d\n", bi->input_buf_vectors); ia_css_debug_dtrace(2, "deci_factor_log2 = %d\n", bi->deci_factor_log2); ia_css_debug_dtrace(2, "vf_downscale_log2 = %d\n", bi->vf_downscale_log2); ia_css_debug_dtrace(2, "dis_deci_factor_log2 = %d\n", bi->dis.deci_factor_log2); ia_css_debug_dtrace(2, "dis hor coef num = %d\n", bi->dis.coef.pad.width); ia_css_debug_dtrace(2, "dis ver coef num = %d\n", bi->dis.coef.pad.height); ia_css_debug_dtrace(2, "dis hor proj num = %d\n", bi->dis.proj.pad.height); ia_css_debug_dtrace(2, "sctbl_width_per_color = %d\n", bi->sctbl_width_per_color); ia_css_debug_dtrace(2, "s3atbl_width = %d\n", bi->s3atbl_width); ia_css_debug_dtrace(2, "s3atbl_height = %d\n", bi->s3atbl_height); return; } void ia_css_debug_frame_print(const struct ia_css_frame *frame, const char *descr) { char *data = NULL; assert(frame); assert(descr); data = (char *)HOST_ADDRESS(frame->data); ia_css_debug_dtrace(2, "frame %s (%p):\n", descr, frame); ia_css_debug_dtrace(2, " resolution = %dx%d\n", frame->frame_info.res.width, frame->frame_info.res.height); ia_css_debug_dtrace(2, " padded width = %d\n", frame->frame_info.padded_width); ia_css_debug_dtrace(2, " format = %d\n", frame->frame_info.format); switch (frame->frame_info.format) { case IA_CSS_FRAME_FORMAT_NV12: case IA_CSS_FRAME_FORMAT_NV16: case IA_CSS_FRAME_FORMAT_NV21: case IA_CSS_FRAME_FORMAT_NV61: ia_css_debug_dtrace(2, " Y = %p\n", data + frame->planes.nv.y.offset); ia_css_debug_dtrace(2, " UV = %p\n", data + frame->planes.nv.uv.offset); break; case IA_CSS_FRAME_FORMAT_YUYV: case IA_CSS_FRAME_FORMAT_UYVY: case IA_CSS_FRAME_FORMAT_CSI_MIPI_YUV420_8: case IA_CSS_FRAME_FORMAT_CSI_MIPI_LEGACY_YUV420_8: case IA_CSS_FRAME_FORMAT_YUV_LINE: ia_css_debug_dtrace(2, " YUYV = %p\n", data + frame->planes.yuyv.offset); break; case IA_CSS_FRAME_FORMAT_YUV420: case IA_CSS_FRAME_FORMAT_YUV422: case IA_CSS_FRAME_FORMAT_YUV444: case IA_CSS_FRAME_FORMAT_YV12: case IA_CSS_FRAME_FORMAT_YV16: case IA_CSS_FRAME_FORMAT_YUV420_16: case IA_CSS_FRAME_FORMAT_YUV422_16: ia_css_debug_dtrace(2, " Y = %p\n", data + frame->planes.yuv.y.offset); ia_css_debug_dtrace(2, " U = %p\n", data + frame->planes.yuv.u.offset); ia_css_debug_dtrace(2, " V = %p\n", data + frame->planes.yuv.v.offset); break; case IA_CSS_FRAME_FORMAT_RAW_PACKED: ia_css_debug_dtrace(2, " RAW PACKED = %p\n", data + frame->planes.raw.offset); break; case IA_CSS_FRAME_FORMAT_RAW: ia_css_debug_dtrace(2, " RAW = %p\n", data + frame->planes.raw.offset); break; case IA_CSS_FRAME_FORMAT_RGBA888: case IA_CSS_FRAME_FORMAT_RGB565: ia_css_debug_dtrace(2, " RGB = %p\n", data + frame->planes.rgb.offset); break; case IA_CSS_FRAME_FORMAT_QPLANE6: ia_css_debug_dtrace(2, " R = %p\n", data + frame->planes.plane6.r.offset); ia_css_debug_dtrace(2, " RatB = %p\n", data + frame->planes.plane6.r_at_b.offset); ia_css_debug_dtrace(2, " Gr = %p\n", data + frame->planes.plane6.gr.offset); ia_css_debug_dtrace(2, " Gb = %p\n", data + frame->planes.plane6.gb.offset); ia_css_debug_dtrace(2, " B = %p\n", data + frame->planes.plane6.b.offset); ia_css_debug_dtrace(2, " BatR = %p\n", data + frame->planes.plane6.b_at_r.offset); break; case IA_CSS_FRAME_FORMAT_BINARY_8: ia_css_debug_dtrace(2, " Binary data = %p\n", data + frame->planes.binary.data.offset); break; default: ia_css_debug_dtrace(2, " unknown frame type\n"); break; } return; } #if SP_DEBUG != SP_DEBUG_NONE void ia_css_debug_print_sp_debug_state(const struct sh_css_sp_debug_state *state) { #endif #if SP_DEBUG == SP_DEBUG_DUMP assert(state); ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE, "current SP software counter: %d\n", state->debug[0]); ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE, "empty output buffer queue head: 0x%x\n", state->debug[1]); ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE, "empty output buffer queue tail: 0x%x\n", state->debug[2]); ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE, "empty s3a buffer queue head: 0x%x\n", state->debug[3]); ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE, "empty s3a buffer queue tail: 0x%x\n", state->debug[4]); ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE, "full output buffer queue head: 0x%x\n", state->debug[5]); ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE, "full output buffer queue tail: 0x%x\n", state->debug[6]); ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE, "full s3a buffer queue head: 0x%x\n", state->debug[7]); ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE, "full s3a buffer queue tail: 0x%x\n", state->debug[8]); ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE, "event queue head: 0x%x\n", state->debug[9]); ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE, "event queue tail: 0x%x\n", state->debug[10]); ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE, "num of stages of current pipeline: 0x%x\n", state->debug[11]); ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE, "DDR address of stage 1: 0x%x\n", state->debug[12]); ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE, "DDR address of stage 2: 0x%x\n", state->debug[13]); ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE, "current stage out_vf buffer idx: 0x%x\n", state->debug[14]); ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE, "current stage output buffer idx: 0x%x\n", state->debug[15]); ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE, "current stage s3a buffer idx: 0x%x\n", state->debug[16]); ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE, "first char of current stage name: 0x%x\n", state->debug[17]); ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE, "current SP thread id: 0x%x\n", state->debug[18]); ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE, "empty output buffer address 1: 0x%x\n", state->debug[19]); ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE, "empty output buffer address 2: 0x%x\n", state->debug[20]); ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE, "empty out_vf buffer address 1: 0x%x\n", state->debug[21]); ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE, "empty out_vf buffer address 2: 0x%x\n", state->debug[22]); ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE, "empty s3a_hi buffer address 1: 0x%x\n", state->debug[23]); ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE, "empty s3a_hi buffer address 2: 0x%x\n", state->debug[24]); ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE, "empty s3a_lo buffer address 1: 0x%x\n", state->debug[25]); ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE, "empty s3a_lo buffer address 2: 0x%x\n", state->debug[26]); ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE, "empty dis_hor buffer address 1: 0x%x\n", state->debug[27]); ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE, "empty dis_hor buffer address 2: 0x%x\n", state->debug[28]); ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE, "empty dis_ver buffer address 1: 0x%x\n", state->debug[29]); ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE, "empty dis_ver buffer address 2: 0x%x\n", state->debug[30]); ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE, "empty param buffer address: 0x%x\n", state->debug[31]); ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE, "first incorrect frame address: 0x%x\n", state->debug[32]); ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE, "first incorrect frame container address: 0x%x\n", state->debug[33]); ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE, "first incorrect frame container payload: 0x%x\n", state->debug[34]); ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE, "first incorrect s3a_hi address: 0x%x\n", state->debug[35]); ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE, "first incorrect s3a_hi container address: 0x%x\n", state->debug[36]); ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE, "first incorrect s3a_hi container payload: 0x%x\n", state->debug[37]); ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE, "first incorrect s3a_lo address: 0x%x\n", state->debug[38]); ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE, "first incorrect s3a_lo container address: 0x%x\n", state->debug[39]); ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE, "first incorrect s3a_lo container payload: 0x%x\n", state->debug[40]); ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE, "number of calling flash start function: 0x%x\n", state->debug[41]); ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE, "number of calling flash close function: 0x%x\n", state->debug[42]); ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE, "number of flashed frame: 0x%x\n", state->debug[43]); ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE, "flash in use flag: 0x%x\n", state->debug[44]); ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE, "number of update frame flashed flag: 0x%x\n", state->debug[46]); ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE, "number of active threads: 0x%x\n", state->debug[45]); #elif SP_DEBUG == SP_DEBUG_COPY /* Remember last_index because we only want to print new entries */ static int last_index; int sp_index = state->index; int n; assert(state); if (sp_index < last_index) { /* SP has been reset */ last_index = 0; } if (last_index == 0) { ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE, "copy-trace init: sp_dbg_if_start_line=%d, sp_dbg_if_start_column=%d, sp_dbg_if_cropped_height=%d, sp_debg_if_cropped_width=%d\n", state->if_start_line, state->if_start_column, state->if_cropped_height, state->if_cropped_width); } if ((last_index + SH_CSS_SP_DBG_TRACE_DEPTH) < sp_index) { /* last index can be multiple rounds behind */ /* while trace size is only SH_CSS_SP_DBG_TRACE_DEPTH */ last_index = sp_index - SH_CSS_SP_DBG_TRACE_DEPTH; } for (n = last_index; n < sp_index; n++) { int i = n % SH_CSS_SP_DBG_TRACE_DEPTH; if (state->trace[i].frame != 0) { ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE, "copy-trace: frame=%d, line=%d, pixel_distance=%d, mipi_used_dword=%d, sp_index=%d\n", state->trace[i].frame, state->trace[i].line, state->trace[i].pixel_distance, state->trace[i].mipi_used_dword, state->trace[i].sp_index); } } last_index = sp_index; #elif SP_DEBUG == SP_DEBUG_TRACE /* * This is just an example how TRACE_FILE_ID (see ia_css_debug.sp.h) will * me mapped on the file name string. * * Adjust this to your trace case! */ static char const *const id2filename[8] = { "param_buffer.sp.c | tagger.sp.c | pipe_data.sp.c", "isp_init.sp.c", "sp_raw_copy.hive.c", "dma_configure.sp.c", "sp.hive.c", "event_proxy_sp.hive.c", "circular_buffer.sp.c", "frame_buffer.sp.c" }; /* Example SH_CSS_SP_DBG_NR_OF_TRACES==1 */ /* Adjust this to your trace case */ static char const *trace_name[SH_CSS_SP_DBG_NR_OF_TRACES] = { "default" }; /* Remember host_index_last because we only want to print new entries */ static int host_index_last[SH_CSS_SP_DBG_NR_OF_TRACES] = { 0 }; int t, n; assert(state); for (t = 0; t < SH_CSS_SP_DBG_NR_OF_TRACES; t++) { int sp_index_last = state->index_last[t]; if (sp_index_last < host_index_last[t]) { /* SP has been reset */ host_index_last[t] = 0; } if ((host_index_last[t] + SH_CSS_SP_DBG_TRACE_DEPTH) < sp_index_last) { /* last index can be multiple rounds behind */ /* while trace size is only SH_CSS_SP_DBG_TRACE_DEPTH */ ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE, "Warning: trace %s has gap of %d traces\n", trace_name[t], (sp_index_last - (host_index_last[t] + SH_CSS_SP_DBG_TRACE_DEPTH))); host_index_last[t] = sp_index_last - SH_CSS_SP_DBG_TRACE_DEPTH; } for (n = host_index_last[t]; n < sp_index_last; n++) { int i = n % SH_CSS_SP_DBG_TRACE_DEPTH; int l = state->trace[t][i].location & ((1 << SH_CSS_SP_DBG_TRACE_FILE_ID_BIT_POS) - 1); int fid = state->trace[t][i].location >> SH_CSS_SP_DBG_TRACE_FILE_ID_BIT_POS; int ts = state->trace[t][i].time_stamp; if (ts) { ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE, "%05d trace=%s, file=%s:%d, data=0x%08x\n", ts, trace_name[t], id2filename[fid], l, state->trace[t][i].data); } } host_index_last[t] = sp_index_last; } #elif SP_DEBUG == SP_DEBUG_MINIMAL int i; int base = 0; int limit = SH_CSS_NUM_SP_DEBUG; int step = 1; assert(state); for (i = base; i < limit; i += step) { ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE, "sp_dbg_trace[%d] = %d\n", i, state->debug[i]); } #endif #if SP_DEBUG != SP_DEBUG_NONE return; } #endif #if !defined(ISP2401) static void debug_print_rx_mipi_port_state(mipi_port_state_t *state) { int i; unsigned int bits, infos; assert(state); bits = state->irq_status; infos = ia_css_isys_rx_translate_irq_infos(bits); ia_css_debug_dtrace(2, "\t\t%-32s: (irq reg = 0x%X)\n", "receiver errors", bits); if (infos & IA_CSS_RX_IRQ_INFO_BUFFER_OVERRUN) ia_css_debug_dtrace(2, "\t\t\tbuffer overrun\n"); if (infos & IA_CSS_RX_IRQ_INFO_ERR_SOT) ia_css_debug_dtrace(2, "\t\t\tstart-of-transmission error\n"); if (infos & IA_CSS_RX_IRQ_INFO_ERR_SOT_SYNC) ia_css_debug_dtrace(2, "\t\t\tstart-of-transmission sync error\n"); if (infos & IA_CSS_RX_IRQ_INFO_ERR_CONTROL) ia_css_debug_dtrace(2, "\t\t\tcontrol error\n"); if (infos & IA_CSS_RX_IRQ_INFO_ERR_ECC_DOUBLE) ia_css_debug_dtrace(2, "\t\t\t2 or more ECC errors\n"); if (infos & IA_CSS_RX_IRQ_INFO_ERR_CRC) ia_css_debug_dtrace(2, "\t\t\tCRC mismatch\n"); if (infos & IA_CSS_RX_IRQ_INFO_ERR_UNKNOWN_ID) ia_css_debug_dtrace(2, "\t\t\tunknown error\n"); if (infos & IA_CSS_RX_IRQ_INFO_ERR_FRAME_SYNC) ia_css_debug_dtrace(2, "\t\t\tframe sync error\n"); if (infos & IA_CSS_RX_IRQ_INFO_ERR_FRAME_DATA) ia_css_debug_dtrace(2, "\t\t\tframe data error\n"); if (infos & IA_CSS_RX_IRQ_INFO_ERR_DATA_TIMEOUT) ia_css_debug_dtrace(2, "\t\t\tdata timeout\n"); if (infos & IA_CSS_RX_IRQ_INFO_ERR_UNKNOWN_ESC) ia_css_debug_dtrace(2, "\t\t\tunknown escape command entry\n"); if (infos & IA_CSS_RX_IRQ_INFO_ERR_LINE_SYNC) ia_css_debug_dtrace(2, "\t\t\tline sync error\n"); ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "device_ready", state->device_ready); ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "irq_status", state->irq_status); ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "irq_enable", state->irq_enable); ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "timeout_count", state->timeout_count); ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "init_count", state->init_count); ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "raw16_18", state->raw16_18); ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "sync_count", state->sync_count); ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "rx_count", state->rx_count); for (i = 0; i < MIPI_4LANE_CFG; i++) { ia_css_debug_dtrace(2, "\t\t%-32s%d%-32s: %d\n", "lane_sync_count[", i, "]", state->lane_sync_count[i]); } for (i = 0; i < MIPI_4LANE_CFG; i++) { ia_css_debug_dtrace(2, "\t\t%-32s%d%-32s: %d\n", "lane_rx_count[", i, "]", state->lane_rx_count[i]); } return; } static void debug_print_rx_channel_state(rx_channel_state_t *state) { int i; assert(state); ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "compression_scheme0", state->comp_scheme0); ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "compression_scheme1", state->comp_scheme1); for (i = 0; i < N_MIPI_FORMAT_CUSTOM; i++) { ia_css_debug_dtrace(2, "\t\t%-32s%d: %d\n", "MIPI Predictor ", i, state->pred[i]); } for (i = 0; i < N_MIPI_FORMAT_CUSTOM; i++) { ia_css_debug_dtrace(2, "\t\t%-32s%d: %d\n", "MIPI Compressor ", i, state->comp[i]); } return; } static void debug_print_rx_state(receiver_state_t *state) { int i; assert(state); ia_css_debug_dtrace(2, "CSI Receiver State:\n"); ia_css_debug_dtrace(2, "\tConfiguration:\n"); ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "fs_to_ls_delay", state->fs_to_ls_delay); ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "ls_to_data_delay", state->ls_to_data_delay); ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "data_to_le_delay", state->data_to_le_delay); ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "le_to_fe_delay", state->le_to_fe_delay); ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "fe_to_fs_delay", state->fe_to_fs_delay); ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "le_to_fs_delay", state->le_to_fs_delay); ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "is_two_ppc", state->is_two_ppc); ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "backend_rst", state->backend_rst); ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "raw18", state->raw18); ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "force_raw8", state->force_raw8); ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "raw16", state->raw16); ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "be_gsp_acc_ovl", state->be_gsp_acc_ovl); ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "be_srst", state->be_srst); ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "be_is_two_ppc", state->be_is_two_ppc); ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "be_comp_format0", state->be_comp_format0); ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "be_comp_format1", state->be_comp_format1); ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "be_comp_format2", state->be_comp_format2); ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "be_comp_format3", state->be_comp_format3); ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "be_sel", state->be_sel); ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "be_raw16_config", state->be_raw16_config); ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "be_raw18_config", state->be_raw18_config); ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "be_force_raw8", state->be_force_raw8); ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "be_irq_status", state->be_irq_status); ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "be_irq_clear", state->be_irq_clear); /* mipi port state */ for (i = 0; i < N_MIPI_PORT_ID; i++) { ia_css_debug_dtrace(2, "\tMIPI Port %d State:\n", i); debug_print_rx_mipi_port_state(&state->mipi_port_state[i]); } /* end of mipi port state */ /* rx channel state */ for (i = 0; i < N_RX_CHANNEL_ID; i++) { ia_css_debug_dtrace(2, "\tRX Channel %d State:\n", i); debug_print_rx_channel_state(&state->rx_channel_state[i]); } /* end of rx channel state */ return; } #endif void ia_css_debug_dump_rx_state(void) { #if !defined(ISP2401) receiver_state_t state; receiver_get_state(RX0_ID, &state); debug_print_rx_state(&state); #endif } void ia_css_debug_dump_sp_sw_debug_info(void) { #if SP_DEBUG != SP_DEBUG_NONE struct sh_css_sp_debug_state state; sh_css_sp_get_debug_state(&state); ia_css_debug_print_sp_debug_state(&state); #endif ia_css_bufq_dump_queue_info(); ia_css_pipeline_dump_thread_map_info(); return; } #if !defined(ISP2401) static void debug_print_isys_capture_unit_state(capture_unit_state_t *state) { assert(state); ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "Packet_Length", state->Packet_Length); ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "Received_Length", state->Received_Length); ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "Received_Short_Packets", state->Received_Short_Packets); ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "Received_Long_Packets", state->Received_Long_Packets); ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "Last_Command", state->Last_Command); ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "Next_Command", state->Next_Command); ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "Last_Acknowledge", state->Last_Acknowledge); ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "Next_Acknowledge", state->Next_Acknowledge); ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "FSM_State_Info", state->FSM_State_Info); ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "StartMode", state->StartMode); ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "Start_Addr", state->Start_Addr); ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "Mem_Region_Size", state->Mem_Region_Size); ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "Num_Mem_Regions", state->Num_Mem_Regions); return; } static void debug_print_isys_acquisition_unit_state( acquisition_unit_state_t *state) { assert(state); ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "Received_Short_Packets", state->Received_Short_Packets); ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "Received_Long_Packets", state->Received_Long_Packets); ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "Last_Command", state->Last_Command); ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "Next_Command", state->Next_Command); ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "Last_Acknowledge", state->Last_Acknowledge); ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "Next_Acknowledge", state->Next_Acknowledge); ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "FSM_State_Info", state->FSM_State_Info); ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "Int_Cntr_Info", state->Int_Cntr_Info); ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "Start_Addr", state->Start_Addr); ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "Mem_Region_Size", state->Mem_Region_Size); ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "Num_Mem_Regions", state->Num_Mem_Regions); } static void debug_print_isys_ctrl_unit_state(ctrl_unit_state_t *state) { assert(state); ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "last_cmd", state->last_cmd); ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "next_cmd", state->next_cmd); ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "last_ack", state->last_ack); ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "next_ack", state->next_ack); ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "top_fsm_state", state->top_fsm_state); ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "captA_fsm_state", state->captA_fsm_state); ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "captB_fsm_state", state->captB_fsm_state); ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "captC_fsm_state", state->captC_fsm_state); ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "acq_fsm_state", state->acq_fsm_state); ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "captA_start_addr", state->captA_start_addr); ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "captB_start_addr", state->captB_start_addr); ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "captC_start_addr", state->captC_start_addr); ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "captA_mem_region_size", state->captA_mem_region_size); ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "captB_mem_region_size", state->captB_mem_region_size); ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "captC_mem_region_size", state->captC_mem_region_size); ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "captA_num_mem_regions", state->captA_num_mem_regions); ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "captB_num_mem_regions", state->captB_num_mem_regions); ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "captC_num_mem_regions", state->captC_num_mem_regions); ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "acq_start_addr", state->acq_start_addr); ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "acq_mem_region_size", state->acq_mem_region_size); ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "acq_num_mem_regions", state->acq_num_mem_regions); ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "capt_reserve_one_mem_region", state->capt_reserve_one_mem_region); return; } static void debug_print_isys_state(input_system_state_t *state) { int i; assert(state); ia_css_debug_dtrace(2, "InputSystem State:\n"); /* configuration */ ia_css_debug_dtrace(2, "\tConfiguration:\n"); ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "str_multiCastA_sel", state->str_multicastA_sel); ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "str_multicastB_sel", state->str_multicastB_sel); ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "str_multicastC_sel", state->str_multicastC_sel); ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "str_mux_sel", state->str_mux_sel); ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "str_mon_status", state->str_mon_status); ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "str_mon_irq_cond", state->str_mon_irq_cond); ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "str_mon_irq_en", state->str_mon_irq_en); ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "isys_srst", state->isys_srst); ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "isys_slv_reg_srst", state->isys_slv_reg_srst); ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "str_deint_portA_cnt", state->str_deint_portA_cnt); ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "str_deint_portB_cnd", state->str_deint_portB_cnt); /* end of configuration */ /* capture unit state */ for (i = 0; i < N_CAPTURE_UNIT_ID; i++) { capture_unit_state_t *capture_unit_state; ia_css_debug_dtrace(2, "\tCaptureUnit %d State:\n", i); capture_unit_state = &state->capture_unit[i]; debug_print_isys_capture_unit_state(capture_unit_state); } /* end of capture unit state */ /* acquisition unit state */ for (i = 0; i < N_ACQUISITION_UNIT_ID; i++) { acquisition_unit_state_t *acquisition_unit_state; ia_css_debug_dtrace(2, "\tAcquisitionUnit %d State:\n", i); acquisition_unit_state = &state->acquisition_unit[i]; debug_print_isys_acquisition_unit_state(acquisition_unit_state); } /* end of acquisition unit state */ /* control unit state */ for (i = 0; i < N_CTRL_UNIT_ID; i++) { ia_css_debug_dtrace(2, "\tControlUnit %d State:\n", i); debug_print_isys_ctrl_unit_state(&state->ctrl_unit_state[i]); } /* end of control unit state */ } #endif void ia_css_debug_dump_isys_state(void) { static input_system_state_t state; input_system_get_state(INPUT_SYSTEM0_ID, &state); #ifndef ISP2401 debug_print_isys_state(&state); #else input_system_dump_state(INPUT_SYSTEM0_ID, &state); #endif } void ia_css_debug_dump_debug_info(const char *context) { if (!context) context = "No Context provided"; ia_css_debug_dtrace(2, "CSS Debug Info dump [Context = %s]\n", context); if (!IS_ISP2401) ia_css_debug_dump_rx_state(); #ifndef ISP2401 ia_css_debug_dump_if_state(); #endif ia_css_debug_dump_isp_state(); ia_css_debug_dump_isp_sp_fifo_state(); ia_css_debug_dump_isp_gdc_fifo_state(); ia_css_debug_dump_sp_state(); ia_css_debug_dump_perf_counters(); #ifdef HAS_WATCHDOG_SP_THREAD_DEBUG sh_css_dump_thread_wait_info(); sh_css_dump_pipe_stage_info(); sh_css_dump_pipe_stripe_info(); #endif ia_css_debug_dump_dma_isp_fifo_state(); ia_css_debug_dump_dma_sp_fifo_state(); ia_css_debug_dump_dma_state(); if (!IS_ISP2401) { struct irq_controller_state state; ia_css_debug_dump_isys_state(); irq_controller_get_state(IRQ2_ID, &state); ia_css_debug_dtrace(2, "\t%-32s:\n", "Input System IRQ Controller State"); ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "irq_edge", state.irq_edge); ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "irq_mask", state.irq_mask); ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "irq_status", state.irq_status); ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "irq_enable", state.irq_enable); ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "irq_level_not_pulse", state.irq_level_not_pulse); } else { ia_css_debug_dump_isys_state(); } ia_css_debug_tagger_state(); return; } /* this function is for debug use, it can make SP go to sleep state after each frame, then user can dump the stable SP dmem. this function can be called after ia_css_start_sp() and before sh_css_init_buffer_queues() */ void ia_css_debug_enable_sp_sleep_mode(enum ia_css_sp_sleep_mode mode) { const struct ia_css_fw_info *fw; unsigned int HIVE_ADDR_sp_sleep_mode; fw = &sh_css_sp_fw; HIVE_ADDR_sp_sleep_mode = fw->info.sp.sleep_mode; (void)HIVE_ADDR_sp_sleep_mode; /* Suppres warnings in CRUN */ sp_dmem_store_uint32(SP0_ID, (unsigned int)sp_address_of(sp_sleep_mode), (uint32_t)mode); } void ia_css_debug_wake_up_sp(void) { /*hrt_ctl_start(SP); */ sp_ctrl_setbit(SP0_ID, SP_SC_REG, SP_START_BIT); } #define FIND_DMEM_PARAMS_TYPE(stream, kernel, type) \ (struct HRTCAT(HRTCAT(sh_css_isp_, type), _params) *) \ findf_dmem_params(stream, offsetof(struct ia_css_memory_offsets, dmem.kernel)) #define FIND_DMEM_PARAMS(stream, kernel) FIND_DMEM_PARAMS_TYPE(stream, kernel, kernel) /* Find a stage that support the kernel and return the parameters for that kernel */ static char * findf_dmem_params(struct ia_css_stream *stream, short idx) { int i; for (i = 0; i < stream->num_pipes; i++) { struct ia_css_pipe *pipe = stream->pipes[i]; struct ia_css_pipeline *pipeline = ia_css_pipe_get_pipeline(pipe); struct ia_css_pipeline_stage *stage; for (stage = pipeline->stages; stage; stage = stage->next) { struct ia_css_binary *binary = stage->binary; short *offsets = (short *)&binary->info->mem_offsets.offsets.param->dmem; short dmem_offset = offsets[idx]; const struct ia_css_host_data *isp_data = ia_css_isp_param_get_mem_init(&binary->mem_params, IA_CSS_PARAM_CLASS_PARAM, IA_CSS_ISP_DMEM0); if (dmem_offset < 0) continue; return &isp_data->address[dmem_offset]; } } return NULL; } void ia_css_debug_dump_isp_params(struct ia_css_stream *stream, unsigned int enable) { ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE, "ISP PARAMETERS:\n"); assert(stream); if ((enable & IA_CSS_DEBUG_DUMP_FPN) || (enable & IA_CSS_DEBUG_DUMP_ALL)) { ia_css_fpn_dump(FIND_DMEM_PARAMS(stream, fpn), IA_CSS_DEBUG_VERBOSE); } if ((enable & IA_CSS_DEBUG_DUMP_OB) || (enable & IA_CSS_DEBUG_DUMP_ALL)) { ia_css_ob_dump(FIND_DMEM_PARAMS(stream, ob), IA_CSS_DEBUG_VERBOSE); } if ((enable & IA_CSS_DEBUG_DUMP_SC) || (enable & IA_CSS_DEBUG_DUMP_ALL)) { ia_css_sc_dump(FIND_DMEM_PARAMS(stream, sc), IA_CSS_DEBUG_VERBOSE); } if ((enable & IA_CSS_DEBUG_DUMP_WB) || (enable & IA_CSS_DEBUG_DUMP_ALL)) { ia_css_wb_dump(FIND_DMEM_PARAMS(stream, wb), IA_CSS_DEBUG_VERBOSE); } if ((enable & IA_CSS_DEBUG_DUMP_DP) || (enable & IA_CSS_DEBUG_DUMP_ALL)) { ia_css_dp_dump(FIND_DMEM_PARAMS(stream, dp), IA_CSS_DEBUG_VERBOSE); } if ((enable & IA_CSS_DEBUG_DUMP_BNR) || (enable & IA_CSS_DEBUG_DUMP_ALL)) { ia_css_bnr_dump(FIND_DMEM_PARAMS(stream, bnr), IA_CSS_DEBUG_VERBOSE); } if ((enable & IA_CSS_DEBUG_DUMP_S3A) || (enable & IA_CSS_DEBUG_DUMP_ALL)) { ia_css_s3a_dump(FIND_DMEM_PARAMS(stream, s3a), IA_CSS_DEBUG_VERBOSE); } if ((enable & IA_CSS_DEBUG_DUMP_DE) || (enable & IA_CSS_DEBUG_DUMP_ALL)) { ia_css_de_dump(FIND_DMEM_PARAMS(stream, de), IA_CSS_DEBUG_VERBOSE); } if ((enable & IA_CSS_DEBUG_DUMP_YNR) || (enable & IA_CSS_DEBUG_DUMP_ALL)) { ia_css_nr_dump(FIND_DMEM_PARAMS_TYPE(stream, nr, ynr), IA_CSS_DEBUG_VERBOSE); ia_css_yee_dump(FIND_DMEM_PARAMS(stream, yee), IA_CSS_DEBUG_VERBOSE); } if ((enable & IA_CSS_DEBUG_DUMP_CSC) || (enable & IA_CSS_DEBUG_DUMP_ALL)) { ia_css_csc_dump(FIND_DMEM_PARAMS(stream, csc), IA_CSS_DEBUG_VERBOSE); ia_css_yuv2rgb_dump(FIND_DMEM_PARAMS_TYPE(stream, yuv2rgb, csc), IA_CSS_DEBUG_VERBOSE); ia_css_rgb2yuv_dump(FIND_DMEM_PARAMS_TYPE(stream, rgb2yuv, csc), IA_CSS_DEBUG_VERBOSE); } if ((enable & IA_CSS_DEBUG_DUMP_GC) || (enable & IA_CSS_DEBUG_DUMP_ALL)) { ia_css_gc_dump(FIND_DMEM_PARAMS(stream, gc), IA_CSS_DEBUG_VERBOSE); } if ((enable & IA_CSS_DEBUG_DUMP_TNR) || (enable & IA_CSS_DEBUG_DUMP_ALL)) { ia_css_tnr_dump(FIND_DMEM_PARAMS(stream, tnr), IA_CSS_DEBUG_VERBOSE); } if ((enable & IA_CSS_DEBUG_DUMP_ANR) || (enable & IA_CSS_DEBUG_DUMP_ALL)) { ia_css_anr_dump(FIND_DMEM_PARAMS(stream, anr), IA_CSS_DEBUG_VERBOSE); } if ((enable & IA_CSS_DEBUG_DUMP_CE) || (enable & IA_CSS_DEBUG_DUMP_ALL)) { ia_css_ce_dump(FIND_DMEM_PARAMS(stream, ce), IA_CSS_DEBUG_VERBOSE); } } void sh_css_dump_sp_raw_copy_linecount(bool reduced) { const struct ia_css_fw_info *fw; unsigned int HIVE_ADDR_raw_copy_line_count; s32 raw_copy_line_count; static s32 prev_raw_copy_line_count = -1; fw = &sh_css_sp_fw; HIVE_ADDR_raw_copy_line_count = fw->info.sp.raw_copy_line_count; (void)HIVE_ADDR_raw_copy_line_count; sp_dmem_load(SP0_ID, (unsigned int)sp_address_of(raw_copy_line_count), &raw_copy_line_count, sizeof(raw_copy_line_count)); /* only indicate if copy loop is active */ if (reduced) raw_copy_line_count = (raw_copy_line_count < 0) ? raw_copy_line_count : 1; /* do the handling */ if (prev_raw_copy_line_count != raw_copy_line_count) { ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE, "sh_css_dump_sp_raw_copy_linecount() line_count=%d\n", raw_copy_line_count); prev_raw_copy_line_count = raw_copy_line_count; } } void ia_css_debug_dump_isp_binary(void) { const struct ia_css_fw_info *fw; unsigned int HIVE_ADDR_pipeline_sp_curr_binary_id; u32 curr_binary_id; static u32 prev_binary_id = 0xFFFFFFFF; static u32 sample_count; fw = &sh_css_sp_fw; HIVE_ADDR_pipeline_sp_curr_binary_id = fw->info.sp.curr_binary_id; (void)HIVE_ADDR_pipeline_sp_curr_binary_id; sp_dmem_load(SP0_ID, (unsigned int)sp_address_of(pipeline_sp_curr_binary_id), &curr_binary_id, sizeof(curr_binary_id)); /* do the handling */ sample_count++; if (prev_binary_id != curr_binary_id) { ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE, "sh_css_dump_isp_binary() pipe_id=%d, binary_id=%d, sample_count=%d\n", (curr_binary_id >> 16), (curr_binary_id & 0x0ffff), sample_count); sample_count = 0; prev_binary_id = curr_binary_id; } } void ia_css_debug_dump_perf_counters(void) { const struct ia_css_fw_info *fw; int i; unsigned int HIVE_ADDR_ia_css_isys_sp_error_cnt; /* N_MIPI_PORT_ID + 1: 3 Capture Units and 1 Acquire Unit. */ s32 ia_css_sp_input_system_error_cnt[N_MIPI_PORT_ID + 1]; if (IS_ISP2401) return; ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE, "Input System Error Counters:\n"); fw = &sh_css_sp_fw; HIVE_ADDR_ia_css_isys_sp_error_cnt = fw->info.sp.perf_counter_input_system_error; (void)HIVE_ADDR_ia_css_isys_sp_error_cnt; sp_dmem_load(SP0_ID, (unsigned int)sp_address_of(ia_css_isys_sp_error_cnt), &ia_css_sp_input_system_error_cnt, sizeof(ia_css_sp_input_system_error_cnt)); for (i = 0; i < N_MIPI_PORT_ID + 1; i++) { ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE, "\tport[%d] = %d\n", i, ia_css_sp_input_system_error_cnt[i]); } } /* * @brief Initialize the debug mode. * Refer to "ia_css_debug.h" for more details. */ bool ia_css_debug_mode_init(void) { bool rc; rc = sh_css_sp_init_dma_sw_reg(0); return rc; } /* * @brief Disable the DMA channel. * Refer to "ia_css_debug.h" for more details. */ bool ia_css_debug_mode_disable_dma_channel(int dma_id, int channel_id, int request_type) { bool rc; rc = sh_css_sp_set_dma_sw_reg(dma_id, channel_id, request_type, false); return rc; } /* * @brief Enable the DMA channel. * Refer to "ia_css_debug.h" for more details. */ bool ia_css_debug_mode_enable_dma_channel(int dma_id, int channel_id, int request_type) { bool rc; rc = sh_css_sp_set_dma_sw_reg(dma_id, channel_id, request_type, true); return rc; } static void __printf(1, 2) dtrace_dot(const char *fmt, ...) { va_list ap; assert(fmt); va_start(ap, fmt); ia_css_debug_dtrace(IA_CSS_DEBUG_INFO, "%s", DPG_START); ia_css_debug_vdtrace(IA_CSS_DEBUG_INFO, fmt, ap); ia_css_debug_dtrace(IA_CSS_DEBUG_INFO, "%s", DPG_END); va_end(ap); } #ifdef HAS_WATCHDOG_SP_THREAD_DEBUG void sh_css_dump_thread_wait_info(void) { const struct ia_css_fw_info *fw; int i; unsigned int HIVE_ADDR_sp_thread_wait; s32 sp_thread_wait[MAX_THREAD_NUM]; ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE, "SEM WAITS:\n"); fw = &sh_css_sp_fw; HIVE_ADDR_sp_thread_wait = fw->info.sp.debug_wait; (void)HIVE_ADDR_sp_thread_wait; sp_dmem_load(SP0_ID, (unsigned int)sp_address_of(sp_thread_wait), &sp_thread_wait, sizeof(sp_thread_wait)); for (i = 0; i < MAX_THREAD_NUM; i++) { ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE, "\twait[%d] = 0x%X\n", i, sp_thread_wait[i]); } } void sh_css_dump_pipe_stage_info(void) { const struct ia_css_fw_info *fw; int i; unsigned int HIVE_ADDR_sp_pipe_stage; s32 sp_pipe_stage[MAX_THREAD_NUM]; ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE, "PIPE STAGE:\n"); fw = &sh_css_sp_fw; HIVE_ADDR_sp_pipe_stage = fw->info.sp.debug_stage; (void)HIVE_ADDR_sp_pipe_stage; sp_dmem_load(SP0_ID, (unsigned int)sp_address_of(sp_pipe_stage), &sp_pipe_stage, sizeof(sp_pipe_stage)); for (i = 0; i < MAX_THREAD_NUM; i++) { ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE, "\tstage[%d] = %d\n", i, sp_pipe_stage[i]); } } void sh_css_dump_pipe_stripe_info(void) { const struct ia_css_fw_info *fw; int i; unsigned int HIVE_ADDR_sp_pipe_stripe; s32 sp_pipe_stripe[MAX_THREAD_NUM]; ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE, "PIPE STRIPE:\n"); fw = &sh_css_sp_fw; HIVE_ADDR_sp_pipe_stripe = fw->info.sp.debug_stripe; (void)HIVE_ADDR_sp_pipe_stripe; sp_dmem_load(SP0_ID, (unsigned int)sp_address_of(sp_pipe_stripe), &sp_pipe_stripe, sizeof(sp_pipe_stripe)); for (i = 0; i < MAX_THREAD_NUM; i++) { ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE, "\tstripe[%d] = %d\n", i, sp_pipe_stripe[i]); } } #endif static void ia_css_debug_pipe_graph_dump_frame( const struct ia_css_frame *frame, enum ia_css_pipe_id id, char const *blob_name, char const *frame_name, bool in_frame) { char bufinfo[100]; if (frame->dynamic_queue_id == SH_CSS_INVALID_QUEUE_ID) { snprintf(bufinfo, sizeof(bufinfo), "Internal"); } else { snprintf(bufinfo, sizeof(bufinfo), "Queue: %s %s", pipe_id_to_str[id], queue_id_to_str[frame->dynamic_queue_id]); } dtrace_dot( "node [shape = box, fixedsize=true, width=2, height=0.7]; \"%p\" [label = \"%s\\n%d(%d) x %d, %dbpp\\n%s\"];", frame, debug_frame_format2str(frame->frame_info.format), frame->frame_info.res.width, frame->frame_info.padded_width, frame->frame_info.res.height, frame->frame_info.raw_bit_depth, bufinfo); if (in_frame) { dtrace_dot( "\"%p\"->\"%s(pipe%d)\" [label = %s_frame];", frame, blob_name, id, frame_name); } else { dtrace_dot( "\"%s(pipe%d)\"->\"%p\" [label = %s_frame];", blob_name, id, frame, frame_name); } } void ia_css_debug_pipe_graph_dump_prologue(void) { dtrace_dot("digraph sh_css_pipe_graph {"); dtrace_dot("rankdir=LR;"); dtrace_dot("fontsize=9;"); dtrace_dot("label = \"\\nEnable options: rp=reduced pipe, vfve=vf_veceven, dvse=dvs_envelope, dvs6=dvs_6axis, bo=block_out, fbds=fixed_bayer_ds, bf6=bayer_fir_6db, rawb=raw_binning, cont=continuous, disc=dis_crop\\n" "dp2a=dp_2adjacent, outp=output, outt=out_table, reff=ref_frame, par=params, gam=gamma, cagdc=ca_gdc, ispa=isp_addresses, inf=in_frame, outf=out_frame, hs=high_speed, inpc=input_chunking\""); } void ia_css_debug_pipe_graph_dump_epilogue(void) { if (strlen(ring_buffer) > 0) { dtrace_dot(ring_buffer); } if (pg_inst.stream_format != N_ATOMISP_INPUT_FORMAT) { /* An input stream format has been set so assume we have * an input system and sensor */ dtrace_dot( "node [shape = doublecircle, fixedsize=true, width=2.5]; \"input_system\" [label = \"Input system\"];"); dtrace_dot( "\"input_system\"->\"%s\" [label = \"%s\"];", dot_id_input_bin, debug_stream_format2str(pg_inst.stream_format)); dtrace_dot( "node [shape = doublecircle, fixedsize=true, width=2.5]; \"sensor\" [label = \"Sensor\"];"); dtrace_dot( "\"sensor\"->\"input_system\" [label = \"%s\\n%d x %d\\n(%d x %d)\"];", debug_stream_format2str(pg_inst.stream_format), pg_inst.width, pg_inst.height, pg_inst.eff_width, pg_inst.eff_height); } dtrace_dot("}"); /* Reset temp strings */ memset(dot_id_input_bin, 0, sizeof(dot_id_input_bin)); memset(ring_buffer, 0, sizeof(ring_buffer)); pg_inst.do_init = true; pg_inst.width = 0; pg_inst.height = 0; pg_inst.eff_width = 0; pg_inst.eff_height = 0; pg_inst.stream_format = N_ATOMISP_INPUT_FORMAT; } void ia_css_debug_pipe_graph_dump_stage( struct ia_css_pipeline_stage *stage, enum ia_css_pipe_id id) { char blob_name[SH_CSS_MAX_BINARY_NAME + 10] = "<unknown type>"; char const *bin_type = "<unknown type>"; int i; assert(stage); if (stage->sp_func != IA_CSS_PIPELINE_NO_FUNC) return; if (pg_inst.do_init) { ia_css_debug_pipe_graph_dump_prologue(); pg_inst.do_init = false; } if (stage->binary) { bin_type = "binary"; if (stage->binary->info->blob) snprintf(blob_name, sizeof(blob_name), "%s_stage%d", stage->binary->info->blob->name, stage->stage_num); } else if (stage->firmware) { bin_type = "firmware"; strscpy(blob_name, IA_CSS_EXT_ISP_PROG_NAME(stage->firmware), sizeof(blob_name)); } /* Guard in case of binaries that don't have any binary_info */ if (stage->binary_info) { char enable_info1[100]; char enable_info2[100]; char enable_info3[100]; char enable_info[200]; struct ia_css_binary_info *bi = stage->binary_info; /* Split it in 2 function-calls to keep the amount of * parameters per call "reasonable" */ snprintf(enable_info1, sizeof(enable_info1), "%s%s%s%s%s%s%s%s%s%s%s%s%s%s", bi->enable.reduced_pipe ? "rp," : "", bi->enable.vf_veceven ? "vfve," : "", bi->enable.dis ? "dis," : "", bi->enable.dvs_envelope ? "dvse," : "", bi->enable.uds ? "uds," : "", bi->enable.dvs_6axis ? "dvs6," : "", bi->enable.block_output ? "bo," : "", bi->enable.ds ? "ds," : "", bi->enable.bayer_fir_6db ? "bf6," : "", bi->enable.raw_binning ? "rawb," : "", bi->enable.continuous ? "cont," : "", bi->enable.s3a ? "s3a," : "", bi->enable.fpnr ? "fpnr," : "", bi->enable.sc ? "sc," : "" ); snprintf(enable_info2, sizeof(enable_info2), "%s%s%s%s%s%s%s%s%s%s%s", bi->enable.macc ? "macc," : "", bi->enable.output ? "outp," : "", bi->enable.ref_frame ? "reff," : "", bi->enable.tnr ? "tnr," : "", bi->enable.xnr ? "xnr," : "", bi->enable.params ? "par," : "", bi->enable.ca_gdc ? "cagdc," : "", bi->enable.isp_addresses ? "ispa," : "", bi->enable.in_frame ? "inf," : "", bi->enable.out_frame ? "outf," : "", bi->enable.high_speed ? "hs," : "" ); /* And merge them into one string */ snprintf(enable_info, sizeof(enable_info), "%s%s", enable_info1, enable_info2); { int l, p; char *ei = enable_info; l = strlen(ei); /* Replace last ',' with \0 if present */ if (l && enable_info[l - 1] == ',') enable_info[--l] = '\0'; if (l > ENABLE_LINE_MAX_LENGTH) { /* Too big for one line, find last comma */ p = ENABLE_LINE_MAX_LENGTH; while (ei[p] != ',') p--; /* Last comma found, copy till that comma */ strscpy(enable_info1, ei, p > sizeof(enable_info1) ? sizeof(enable_info1) : p); ei += p + 1; l = strlen(ei); if (l <= ENABLE_LINE_MAX_LENGTH) { /* The 2nd line fits */ /* we cannot use ei as argument because * it is not guaranteed dword aligned */ strscpy(enable_info2, ei, l > sizeof(enable_info2) ? sizeof(enable_info2) : l); snprintf(enable_info, sizeof(enable_info), "%s\\n%s", enable_info1, enable_info2); } else { /* 2nd line is still too long */ p = ENABLE_LINE_MAX_LENGTH; while (ei[p] != ',') p--; strscpy(enable_info2, ei, p > sizeof(enable_info2) ? sizeof(enable_info2) : p); ei += p + 1; l = strlen(ei); if (l <= ENABLE_LINE_MAX_LENGTH) { /* The 3rd line fits */ /* we cannot use ei as argument because * it is not guaranteed dword aligned */ strscpy(enable_info3, ei, sizeof(enable_info3)); snprintf(enable_info, sizeof(enable_info), "%s\\n%s\\n%s", enable_info1, enable_info2, enable_info3); } else { /* 3rd line is still too long */ p = ENABLE_LINE_MAX_LENGTH; while (ei[p] != ',') p--; strscpy(enable_info3, ei, p > sizeof(enable_info3) ? sizeof(enable_info3) : p); ei += p + 1; strscpy(enable_info3, ei, sizeof(enable_info3)); snprintf(enable_info, sizeof(enable_info), "%s\\n%s\\n%s", enable_info1, enable_info2, enable_info3); } } } } dtrace_dot("node [shape = circle, fixedsize=true, width=2.5, label=\"%s\\n%s\\n\\n%s\"]; \"%s(pipe%d)\"", bin_type, blob_name, enable_info, blob_name, id); } else { dtrace_dot("node [shape = circle, fixedsize=true, width=2.5, label=\"%s\\n%s\\n\"]; \"%s(pipe%d)\"", bin_type, blob_name, blob_name, id); } if (stage->stage_num == 0) { /* * There are some implicite assumptions about which bin is the * input binary e.g. which one is connected to the input system * Priority: * 1) sp_raw_copy bin has highest priority * 2) First stage==0 binary of preview, video or capture */ if (strlen(dot_id_input_bin) == 0) { snprintf(dot_id_input_bin, sizeof(dot_id_input_bin), "%s(pipe%d)", blob_name, id); } } if (stage->args.in_frame) { ia_css_debug_pipe_graph_dump_frame( stage->args.in_frame, id, blob_name, "in", true); } for (i = 0; i < NUM_VIDEO_TNR_FRAMES; i++) { if (stage->args.tnr_frames[i]) { ia_css_debug_pipe_graph_dump_frame( stage->args.tnr_frames[i], id, blob_name, "tnr_frame", true); } } for (i = 0; i < MAX_NUM_VIDEO_DELAY_FRAMES; i++) { if (stage->args.delay_frames[i]) { ia_css_debug_pipe_graph_dump_frame( stage->args.delay_frames[i], id, blob_name, "delay_frame", true); } } for (i = 0; i < IA_CSS_BINARY_MAX_OUTPUT_PORTS; i++) { if (stage->args.out_frame[i]) { ia_css_debug_pipe_graph_dump_frame( stage->args.out_frame[i], id, blob_name, "out", false); } } if (stage->args.out_vf_frame) { ia_css_debug_pipe_graph_dump_frame( stage->args.out_vf_frame, id, blob_name, "out_vf", false); } } void ia_css_debug_pipe_graph_dump_sp_raw_copy( struct ia_css_frame *out_frame) { assert(out_frame); if (pg_inst.do_init) { ia_css_debug_pipe_graph_dump_prologue(); pg_inst.do_init = false; } dtrace_dot("node [shape = circle, fixedsize=true, width=2.5, label=\"%s\\n%s\"]; \"%s(pipe%d)\"", "sp-binary", "sp_raw_copy", "sp_raw_copy", 1); snprintf(ring_buffer, sizeof(ring_buffer), "node [shape = box, fixedsize=true, width=2, height=0.7]; \"%p\" [label = \"%s\\n%d(%d) x %d\\nRingbuffer\"];", out_frame, debug_frame_format2str(out_frame->frame_info.format), out_frame->frame_info.res.width, out_frame->frame_info.padded_width, out_frame->frame_info.res.height); dtrace_dot(ring_buffer); dtrace_dot( "\"%s(pipe%d)\"->\"%p\" [label = out_frame];", "sp_raw_copy", 1, out_frame); snprintf(dot_id_input_bin, sizeof(dot_id_input_bin), "%s(pipe%d)", "sp_raw_copy", 1); } void ia_css_debug_pipe_graph_dump_stream_config( const struct ia_css_stream_config *stream_config) { pg_inst.width = stream_config->input_config.input_res.width; pg_inst.height = stream_config->input_config.input_res.height; pg_inst.eff_width = stream_config->input_config.effective_res.width; pg_inst.eff_height = stream_config->input_config.effective_res.height; pg_inst.stream_format = stream_config->input_config.format; } void ia_css_debug_dump_resolution( const struct ia_css_resolution *res, const char *label) { ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "%s: =%d x =%d\n", label, res->width, res->height); } void ia_css_debug_dump_frame_info( const struct ia_css_frame_info *info, const char *label) { ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "%s\n", label); ia_css_debug_dump_resolution(&info->res, "res"); ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "padded_width: %d\n", info->padded_width); ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "format: %d\n", info->format); ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "raw_bit_depth: %d\n", info->raw_bit_depth); ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "raw_bayer_order: %d\n", info->raw_bayer_order); } void ia_css_debug_dump_capture_config( const struct ia_css_capture_config *config) { ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "%s\n", __func__); ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "mode: %d\n", config->mode); ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "enable_xnr: %d\n", config->enable_xnr); ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "enable_raw_output: %d\n", config->enable_raw_output); } void ia_css_debug_dump_pipe_extra_config( const struct ia_css_pipe_extra_config *extra_config) { ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "%s\n", __func__); if (extra_config) { ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "enable_raw_binning: %d\n", extra_config->enable_raw_binning); ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "enable_yuv_ds: %d\n", extra_config->enable_yuv_ds); ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "enable_high_speed: %d\n", extra_config->enable_high_speed); ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "enable_dvs_6axis: %d\n", extra_config->enable_dvs_6axis); ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "enable_reduced_pipe: %d\n", extra_config->enable_reduced_pipe); ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "enable_fractional_ds: %d\n", extra_config->enable_fractional_ds); ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "disable_vf_pp: %d\n", extra_config->disable_vf_pp); } } void ia_css_debug_dump_pipe_config( const struct ia_css_pipe_config *config) { unsigned int i; IA_CSS_ENTER_PRIVATE("config = %p", config); if (!config) { IA_CSS_ERROR("NULL input parameter"); IA_CSS_LEAVE_PRIVATE(""); return; } ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "mode: %d\n", config->mode); ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "isp_pipe_version: %d\n", config->isp_pipe_version); ia_css_debug_dump_resolution(&config->bayer_ds_out_res, "bayer_ds_out_res"); ia_css_debug_dump_resolution(&config->capt_pp_in_res, "capt_pp_in_res"); ia_css_debug_dump_resolution(&config->vf_pp_in_res, "vf_pp_in_res"); if (IS_ISP2401) { ia_css_debug_dump_resolution(&config->output_system_in_res, "output_system_in_res"); } ia_css_debug_dump_resolution(&config->dvs_crop_out_res, "dvs_crop_out_res"); for (i = 0; i < IA_CSS_PIPE_MAX_OUTPUT_STAGE; i++) { ia_css_debug_dump_frame_info(&config->output_info[i], "output_info"); ia_css_debug_dump_frame_info(&config->vf_output_info[i], "vf_output_info"); } ia_css_debug_dump_capture_config(&config->default_capture_config); ia_css_debug_dump_resolution(&config->dvs_envelope, "dvs_envelope"); ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "dvs_frame_delay: %d\n", config->dvs_frame_delay); ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "enable_dz: %d\n", config->enable_dz); IA_CSS_LEAVE_PRIVATE(""); } void ia_css_debug_dump_stream_config_source( const struct ia_css_stream_config *config) { ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "%s()\n", __func__); switch (config->mode) { case IA_CSS_INPUT_MODE_SENSOR: case IA_CSS_INPUT_MODE_BUFFERED_SENSOR: ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "source.port\n"); ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "port: %d\n", config->source.port.port); ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "num_lanes: %d\n", config->source.port.num_lanes); ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "timeout: %d\n", config->source.port.timeout); ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "compression: %d\n", config->source.port.compression.type); break; case IA_CSS_INPUT_MODE_TPG: ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "source.tpg\n"); ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "id: %d\n", config->source.tpg.id); ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "mode: %d\n", config->source.tpg.mode); ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "x_mask: 0x%x\n", config->source.tpg.x_mask); ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "x_delta: %d\n", config->source.tpg.x_delta); ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "y_mask: 0x%x\n", config->source.tpg.y_mask); ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "y_delta: %d\n", config->source.tpg.y_delta); ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "xy_mask: 0x%x\n", config->source.tpg.xy_mask); break; case IA_CSS_INPUT_MODE_PRBS: ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "source.prbs\n"); ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "id: %d\n", config->source.prbs.id); ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "h_blank: %d\n", config->source.prbs.h_blank); ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "v_blank: %d\n", config->source.prbs.v_blank); ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "seed: 0x%x\n", config->source.prbs.seed); ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "seed1: 0x%x\n", config->source.prbs.seed1); break; default: case IA_CSS_INPUT_MODE_FIFO: case IA_CSS_INPUT_MODE_MEMORY: break; } } void ia_css_debug_dump_mipi_buffer_config( const struct ia_css_mipi_buffer_config *config) { ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "%s()\n", __func__); ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "size_mem_words: %d\n", config->size_mem_words); ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "nof_mipi_buffers: %d\n", config->nof_mipi_buffers); } void ia_css_debug_dump_metadata_config( const struct ia_css_metadata_config *config) { ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "%s()\n", __func__); ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "data_type: %d\n", config->data_type); ia_css_debug_dump_resolution(&config->resolution, "resolution"); } void ia_css_debug_dump_stream_config( const struct ia_css_stream_config *config, int num_pipes) { ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "%s()\n", __func__); ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "num_pipes: %d\n", num_pipes); ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "mode: %d\n", config->mode); ia_css_debug_dump_stream_config_source(config); ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "channel_id: %d\n", config->channel_id); ia_css_debug_dump_resolution(&config->input_config.input_res, "input_res"); ia_css_debug_dump_resolution(&config->input_config.effective_res, "effective_res"); ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "format: %d\n", config->input_config.format); ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "bayer_order: %d\n", config->input_config.bayer_order); ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "sensor_binning_factor: %d\n", config->sensor_binning_factor); ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "pixels_per_clock: %d\n", config->pixels_per_clock); ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "online: %d\n", config->online); ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "init_num_cont_raw_buf: %d\n", config->init_num_cont_raw_buf); ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "target_num_cont_raw_buf: %d\n", config->target_num_cont_raw_buf); ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "pack_raw_pixels: %d\n", config->pack_raw_pixels); ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "continuous: %d\n", config->continuous); ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "flash_gpio_pin: %d\n", config->flash_gpio_pin); ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "left_padding: %d\n", config->left_padding); ia_css_debug_dump_mipi_buffer_config(&config->mipi_buffer_config); ia_css_debug_dump_metadata_config(&config->metadata_config); } /* Trace support. This tracer is using a buffer to trace the flow of the FW and dump misc values (see below for details). Currently, support is only for SKC. To enable support for other platforms: - Allocate a buffer for tracing in DMEM. The longer the better. - Use the DBG_init routine in sp.hive.c to initiatilize the tracer with the address and size selected. - Add trace points in the SP code wherever needed. - Enable the dump below with the required address and required adjustments. Dump is called at the end of ia_css_debug_dump_sp_state(). */ /* dump_trace() : dump the trace points from DMEM2. for every trace point, the following are printed: index, major:minor and the 16-bit attached value. The routine looks for the first 0, and then prints from it cyclically. Data forma in DMEM2: first 4 DWORDS: header DWORD 0: data description byte 0: version byte 1: number of threads (for future use) byte 2+3: number ot TPs DWORD 1: command byte + data (for future use) byte 0: command byte 1-3: command signature DWORD 2-3: additional data (for future use) Following data is 4-byte oriented: byte 0: major byte 1: minor byte 2-3: data */ #if TRACE_ENABLE_SP0 || TRACE_ENABLE_SP1 || TRACE_ENABLE_ISP static void debug_dump_one_trace(enum TRACE_CORE_ID proc_id) { #if defined(HAS_TRACER_V2) u32 start_addr; u32 start_addr_data; u32 item_size; u32 tmp; u8 tid_val; enum TRACE_DUMP_FORMAT dump_format; int i, j, max_trace_points, point_num, limit = -1; /* using a static buffer here as the driver has issues allocating memory */ static u32 trace_read_buf[TRACE_BUFF_SIZE] = {0}; static struct trace_header_t header; u8 *header_arr; /* read the header and parse it */ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "~~~ Tracer "); switch (proc_id) { case TRACE_SP0_ID: ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "SP0"); start_addr = TRACE_SP0_ADDR; start_addr_data = TRACE_SP0_DATA_ADDR; item_size = TRACE_SP0_ITEM_SIZE; max_trace_points = TRACE_SP0_MAX_POINTS; break; case TRACE_SP1_ID: ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "SP1"); start_addr = TRACE_SP1_ADDR; start_addr_data = TRACE_SP1_DATA_ADDR; item_size = TRACE_SP1_ITEM_SIZE; max_trace_points = TRACE_SP1_MAX_POINTS; break; case TRACE_ISP_ID: ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ISP"); start_addr = TRACE_ISP_ADDR; start_addr_data = TRACE_ISP_DATA_ADDR; item_size = TRACE_ISP_ITEM_SIZE; max_trace_points = TRACE_ISP_MAX_POINTS; break; default: ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "\t\ttraces are not supported for this processor ID - exiting\n"); return; } if (!IS_ISP2401) { tmp = ia_css_device_load_uint32(start_addr); point_num = (tmp >> 16) & 0xFFFF; ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, " ver %d %d points\n", tmp & 0xFF, point_num); } else { /* Loading byte-by-byte as using the master routine had issues */ header_arr = (uint8_t *)&header; for (i = 0; i < (int)sizeof(struct trace_header_t); i++) header_arr[i] = ia_css_device_load_uint8(start_addr + (i)); point_num = header.max_tracer_points; ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, " ver %d %d points\n", header.version, point_num); tmp = header.version; } if ((tmp & 0xFF) != TRACER_VER) { ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "\t\tUnknown version - exiting\n"); return; } if (point_num > max_trace_points) { ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "\t\tToo many points - exiting\n"); return; } /* copy the TPs and find the first 0 */ for (i = 0; i < point_num; i++) { trace_read_buf[i] = ia_css_device_load_uint32(start_addr_data + (i * item_size)); if ((limit == (-1)) && (trace_read_buf[i] == 0)) limit = i; } if (IS_ISP2401) { ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "Status:\n"); for (i = 0; i < SH_CSS_MAX_SP_THREADS; i++) ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "\tT%d: %3d (%02x) %6d (%04x) %10d (%08x)\n", i, header.thr_status_byte[i], header.thr_status_byte[i], header.thr_status_word[i], header.thr_status_word[i], header.thr_status_dword[i], header.thr_status_dword[i]); ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "Scratch:\n"); for (i = 0; i < MAX_SCRATCH_DATA; i++) ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "%10d (%08x) ", header.scratch_debug[i], header.scratch_debug[i]); ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "\n"); } /* two 0s in the beginning: empty buffer */ if ((trace_read_buf[0] == 0) && (trace_read_buf[1] == 0)) { ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "\t\tEmpty tracer - exiting\n"); return; } /* no overrun: start from 0 */ if ((limit == point_num - 1) || /* first 0 is at the end - border case */ (trace_read_buf[limit + 1] == 0)) /* did not make a full cycle after the memset */ limit = 0; /* overrun: limit is the first non-zero after the first zero */ else limit++; /* print the TPs */ for (i = 0; i < point_num; i++) { j = (limit + i) % point_num; if (trace_read_buf[j]) { if (!IS_ISP2401) { TRACE_DUMP_FORMAT dump_format = FIELD_FORMAT_UNPACK(trace_read_buf[j]); } else { tid_val = FIELD_TID_UNPACK(trace_read_buf[j]); dump_format = TRACE_DUMP_FORMAT_POINT; /* * When tid value is 111b, the data will be interpreted differently: * tid val is ignored, major field contains 2 bits (msb) for format type */ if (tid_val == FIELD_TID_SEL_FORMAT_PAT) { dump_format = FIELD_FORMAT_UNPACK(trace_read_buf[j]); } } switch (dump_format) { case TRACE_DUMP_FORMAT_POINT: ia_css_debug_dtrace( IA_CSS_DEBUG_TRACE, "\t\t%d %d:%d value - %d\n", j, FIELD_MAJOR_UNPACK(trace_read_buf[j]), FIELD_MINOR_UNPACK(trace_read_buf[j]), FIELD_VALUE_UNPACK(trace_read_buf[j])); break; /* ISP2400 */ case TRACE_DUMP_FORMAT_VALUE24_HEX: ia_css_debug_dtrace( IA_CSS_DEBUG_TRACE, "\t\t%d, %d, 24bit value %x H\n", j, FIELD_MAJOR_UNPACK(trace_read_buf[j]), FIELD_VALUE_24_UNPACK(trace_read_buf[j])); break; /* ISP2400 */ case TRACE_DUMP_FORMAT_VALUE24_DEC: ia_css_debug_dtrace( IA_CSS_DEBUG_TRACE, "\t\t%d, %d, 24bit value %d D\n", j, FIELD_MAJOR_UNPACK(trace_read_buf[j]), FIELD_VALUE_24_UNPACK(trace_read_buf[j])); break; /* ISP2401 */ case TRACE_DUMP_FORMAT_POINT_NO_TID: ia_css_debug_dtrace( IA_CSS_DEBUG_TRACE, "\t\t%d %d:%d value - %x (%d)\n", j, FIELD_MAJOR_W_FMT_UNPACK(trace_read_buf[j]), FIELD_MINOR_UNPACK(trace_read_buf[j]), FIELD_VALUE_UNPACK(trace_read_buf[j]), FIELD_VALUE_UNPACK(trace_read_buf[j])); break; /* ISP2401 */ case TRACE_DUMP_FORMAT_VALUE24: ia_css_debug_dtrace( IA_CSS_DEBUG_TRACE, "\t\t%d, %d, 24bit value %x (%d)\n", j, FIELD_MAJOR_UNPACK(trace_read_buf[j]), FIELD_MAJOR_W_FMT_UNPACK(trace_read_buf[j]), FIELD_VALUE_24_UNPACK(trace_read_buf[j]), FIELD_VALUE_24_UNPACK(trace_read_buf[j])); break; case TRACE_DUMP_FORMAT_VALUE24_TIMING: ia_css_debug_dtrace( IA_CSS_DEBUG_TRACE, "\t\t%d, %d, timing %x\n", j, FIELD_MAJOR_UNPACK(trace_read_buf[j]), FIELD_VALUE_24_UNPACK(trace_read_buf[j])); break; case TRACE_DUMP_FORMAT_VALUE24_TIMING_DELTA: ia_css_debug_dtrace( IA_CSS_DEBUG_TRACE, "\t\t%d, %d, timing delta %x\n", j, FIELD_MAJOR_UNPACK(trace_read_buf[j]), FIELD_VALUE_24_UNPACK(trace_read_buf[j])); break; default: ia_css_debug_dtrace( IA_CSS_DEBUG_TRACE, "no such trace dump format %d", dump_format); break; } } } #else (void)proc_id; #endif /* HAS_TRACER_V2 */ } #endif /* TRACE_ENABLE_SP0 || TRACE_ENABLE_SP1 || TRACE_ENABLE_ISP */ void ia_css_debug_dump_trace(void) { #if TRACE_ENABLE_SP0 debug_dump_one_trace(TRACE_SP0_ID); #endif #if TRACE_ENABLE_SP1 debug_dump_one_trace(TRACE_SP1_ID); #endif #if TRACE_ENABLE_ISP debug_dump_one_trace(TRACE_ISP_ID); #endif } /* Tagger state dump function. The tagger is only available when the CSS * contains an input system (2400 or 2401). */ void ia_css_debug_tagger_state(void) { unsigned int i; unsigned int HIVE_ADDR_tagger_frames; ia_css_tagger_buf_sp_elem_t tbuf_frames[MAX_CB_ELEMS_FOR_TAGGER]; HIVE_ADDR_tagger_frames = sh_css_sp_fw.info.sp.tagger_frames_addr; /* This variable is not used in crun */ (void)HIVE_ADDR_tagger_frames; /* 2400 and 2401 only have 1 SP, so the tagger lives on SP0 */ sp_dmem_load(SP0_ID, (unsigned int)sp_address_of(tagger_frames), tbuf_frames, sizeof(tbuf_frames)); ia_css_debug_dtrace(2, "Tagger Info:\n"); for (i = 0; i < MAX_CB_ELEMS_FOR_TAGGER; i++) { ia_css_debug_dtrace(2, "\t tagger frame[%d]: exp_id=%d, marked=%d, locked=%d\n", i, tbuf_frames[i].exp_id, tbuf_frames[i].mark, tbuf_frames[i].lock); } } /* ISP2401 */ void ia_css_debug_pc_dump(sp_ID_t id, unsigned int num_of_dumps) { unsigned int pc; unsigned int i; hrt_data sc = sp_ctrl_load(id, SP_SC_REG); ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "SP%-1d Status reg: 0x%X\n", id, sc); sc = sp_ctrl_load(id, SP_CTRL_SINK_REG); ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "SP%-1d Stall reg: 0x%X\n", id, sc); for (i = 0; i < num_of_dumps; i++) { pc = sp_ctrl_load(id, SP_PC_REG); ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "SP%-1d PC: 0x%X\n", id, pc); } }
linux-master
drivers/staging/media/atomisp/pci/runtime/debug/src/ia_css_debug.c
// SPDX-License-Identifier: GPL-2.0 /* * Support for Intel Camera Imaging ISP subsystem. * Copyright (c) 2015, Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. */ #include "tag.h" #include <platform_support.h> /* NULL */ #include <assert_support.h> #include "tag_local.h" /* * @brief Creates the tag description from the given parameters. * @param[in] num_captures * @param[in] skip * @param[in] offset * @param[out] tag_descr */ void sh_css_create_tag_descr(int num_captures, unsigned int skip, int offset, unsigned int exp_id, struct sh_css_tag_descr *tag_descr) { assert(tag_descr); tag_descr->num_captures = num_captures; tag_descr->skip = skip; tag_descr->offset = offset; tag_descr->exp_id = exp_id; } /* * @brief Encodes the members of tag description into a 32-bit value. * @param[in] tag Pointer to the tag description * @return (unsigned int) Encoded 32-bit tag-info */ unsigned int sh_css_encode_tag_descr(struct sh_css_tag_descr *tag) { int num_captures; unsigned int num_captures_sign; unsigned int skip; int offset; unsigned int offset_sign; unsigned int exp_id; unsigned int encoded_tag; assert(tag); if (tag->num_captures < 0) { num_captures = -tag->num_captures; num_captures_sign = 1; } else { num_captures = tag->num_captures; num_captures_sign = 0; } skip = tag->skip; if (tag->offset < 0) { offset = -tag->offset; offset_sign = 1; } else { offset = tag->offset; offset_sign = 0; } exp_id = tag->exp_id; if (exp_id != 0) { /* we encode either an exp_id or capture data */ assert((num_captures == 0) && (skip == 0) && (offset == 0)); encoded_tag = TAG_EXP | (exp_id & 0xFF) << TAG_EXP_ID_SHIFT; } else { encoded_tag = TAG_CAP | ((num_captures_sign & 0x00000001) << TAG_NUM_CAPTURES_SIGN_SHIFT) | ((offset_sign & 0x00000001) << TAG_OFFSET_SIGN_SHIFT) | ((num_captures & 0x000000FF) << TAG_NUM_CAPTURES_SHIFT) | ((skip & 0x000000FF) << TAG_OFFSET_SHIFT) | ((offset & 0x000000FF) << TAG_SKIP_SHIFT); } return encoded_tag; }
linux-master
drivers/staging/media/atomisp/pci/hive_isp_css_shared/host/tag.c
// SPDX-License-Identifier: GPL-2.0 /* * Support for Intel Camera Imaging ISP subsystem. * Copyright (c) 2010-2015, Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. */ #include "hmem.h" #ifndef __INLINE_HMEM__ #include "hmem_private.h" #endif /* __INLINE_HMEM__ */
linux-master
drivers/staging/media/atomisp/pci/hive_isp_css_common/host/hmem.c
// SPDX-License-Identifier: GPL-2.0 /* * Support for Intel Camera Imaging ISP subsystem. * Copyright (c) 2010-2015, Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. */ #include "assert_support.h" #include "irq.h" #ifndef __INLINE_GP_DEVICE__ #define __INLINE_GP_DEVICE__ #endif #include "gp_device.h" /* _REG_GP_IRQ_REQUEST_ADDR */ static inline void irq_wait_for_write_complete( const irq_ID_t ID); static inline bool any_irq_channel_enabled( const irq_ID_t ID); static inline irq_ID_t virq_get_irq_id(const enum virq_id irq_ID, unsigned int *channel_ID); #ifndef __INLINE_IRQ__ #include "irq_private.h" #endif /* __INLINE_IRQ__ */ static unsigned short IRQ_N_CHANNEL[N_IRQ_ID] = { IRQ0_ID_N_CHANNEL, IRQ1_ID_N_CHANNEL, IRQ2_ID_N_CHANNEL, IRQ3_ID_N_CHANNEL }; static unsigned short IRQ_N_ID_OFFSET[N_IRQ_ID + 1] = { IRQ0_ID_OFFSET, IRQ1_ID_OFFSET, IRQ2_ID_OFFSET, IRQ3_ID_OFFSET, IRQ_END_OFFSET }; static enum virq_id IRQ_NESTING_ID[N_IRQ_ID] = { N_virq_id, virq_ifmt, virq_isys, virq_isel }; void irq_clear_all( const irq_ID_t ID) { hrt_data mask = 0xFFFFFFFF; assert(ID < N_IRQ_ID); assert(IRQ_N_CHANNEL[ID] <= HRT_DATA_WIDTH); if (IRQ_N_CHANNEL[ID] < HRT_DATA_WIDTH) { mask = ~((~(hrt_data)0) >> IRQ_N_CHANNEL[ID]); } irq_reg_store(ID, _HRT_IRQ_CONTROLLER_CLEAR_REG_IDX, mask); return; } /* * Do we want the user to be able to set the signalling method ? */ void irq_enable_channel( const irq_ID_t ID, const unsigned int irq_id) { unsigned int mask = irq_reg_load(ID, _HRT_IRQ_CONTROLLER_MASK_REG_IDX); unsigned int enable = irq_reg_load(ID, _HRT_IRQ_CONTROLLER_ENABLE_REG_IDX); unsigned int edge_in = irq_reg_load(ID, _HRT_IRQ_CONTROLLER_EDGE_REG_IDX); unsigned int me = 1U << irq_id; assert(ID < N_IRQ_ID); assert(irq_id < IRQ_N_CHANNEL[ID]); mask |= me; enable |= me; edge_in |= me; /* rising edge */ /* to avoid mishaps configuration must follow the following order */ /* mask this interrupt */ irq_reg_store(ID, _HRT_IRQ_CONTROLLER_MASK_REG_IDX, mask & ~me); /* rising edge at input */ irq_reg_store(ID, _HRT_IRQ_CONTROLLER_EDGE_REG_IDX, edge_in); /* enable interrupt to output */ irq_reg_store(ID, _HRT_IRQ_CONTROLLER_ENABLE_REG_IDX, enable); /* clear current irq only */ irq_reg_store(ID, _HRT_IRQ_CONTROLLER_CLEAR_REG_IDX, me); /* unmask interrupt from input */ irq_reg_store(ID, _HRT_IRQ_CONTROLLER_MASK_REG_IDX, mask); irq_wait_for_write_complete(ID); return; } void irq_enable_pulse( const irq_ID_t ID, bool pulse) { unsigned int edge_out = 0x0; if (pulse) { edge_out = 0xffffffff; } /* output is given as edge, not pulse */ irq_reg_store(ID, _HRT_IRQ_CONTROLLER_EDGE_NOT_PULSE_REG_IDX, edge_out); return; } void irq_disable_channel( const irq_ID_t ID, const unsigned int irq_id) { unsigned int mask = irq_reg_load(ID, _HRT_IRQ_CONTROLLER_MASK_REG_IDX); unsigned int enable = irq_reg_load(ID, _HRT_IRQ_CONTROLLER_ENABLE_REG_IDX); unsigned int me = 1U << irq_id; assert(ID < N_IRQ_ID); assert(irq_id < IRQ_N_CHANNEL[ID]); mask &= ~me; enable &= ~me; /* enable interrupt to output */ irq_reg_store(ID, _HRT_IRQ_CONTROLLER_ENABLE_REG_IDX, enable); /* unmask interrupt from input */ irq_reg_store(ID, _HRT_IRQ_CONTROLLER_MASK_REG_IDX, mask); /* clear current irq only */ irq_reg_store(ID, _HRT_IRQ_CONTROLLER_CLEAR_REG_IDX, me); irq_wait_for_write_complete(ID); return; } enum hrt_isp_css_irq_status irq_get_channel_id( const irq_ID_t ID, unsigned int *irq_id) { unsigned int irq_status = irq_reg_load(ID, _HRT_IRQ_CONTROLLER_STATUS_REG_IDX); unsigned int idx; enum hrt_isp_css_irq_status status = hrt_isp_css_irq_status_success; assert(ID < N_IRQ_ID); assert(irq_id); /* find the first irq bit */ for (idx = 0; idx < IRQ_N_CHANNEL[ID]; idx++) { if (irq_status & (1U << idx)) break; } if (idx == IRQ_N_CHANNEL[ID]) return hrt_isp_css_irq_status_error; /* now check whether there are more bits set */ if (irq_status != (1U << idx)) status = hrt_isp_css_irq_status_more_irqs; irq_reg_store(ID, _HRT_IRQ_CONTROLLER_CLEAR_REG_IDX, 1U << idx); irq_wait_for_write_complete(ID); if (irq_id) *irq_id = (unsigned int)idx; return status; } static const hrt_address IRQ_REQUEST_ADDR[N_IRQ_SW_CHANNEL_ID] = { _REG_GP_IRQ_REQUEST0_ADDR, _REG_GP_IRQ_REQUEST1_ADDR }; void irq_raise( const irq_ID_t ID, const irq_sw_channel_id_t irq_id) { hrt_address addr; OP___assert(ID == IRQ0_ID); OP___assert(IRQ_BASE[ID] != (hrt_address)-1); OP___assert(irq_id < N_IRQ_SW_CHANNEL_ID); (void)ID; addr = IRQ_REQUEST_ADDR[irq_id]; /* The SW IRQ pins are remapped to offset zero */ gp_device_reg_store(GP_DEVICE0_ID, (unsigned int)addr, 1); gp_device_reg_store(GP_DEVICE0_ID, (unsigned int)addr, 0); return; } void irq_controller_get_state(const irq_ID_t ID, struct irq_controller_state *state) { assert(ID < N_IRQ_ID); assert(state); state->irq_edge = irq_reg_load(ID, _HRT_IRQ_CONTROLLER_EDGE_REG_IDX); state->irq_mask = irq_reg_load(ID, _HRT_IRQ_CONTROLLER_MASK_REG_IDX); state->irq_status = irq_reg_load(ID, _HRT_IRQ_CONTROLLER_STATUS_REG_IDX); state->irq_enable = irq_reg_load(ID, _HRT_IRQ_CONTROLLER_ENABLE_REG_IDX); state->irq_level_not_pulse = irq_reg_load(ID, _HRT_IRQ_CONTROLLER_EDGE_NOT_PULSE_REG_IDX); return; } bool any_virq_signal(void) { unsigned int irq_status = irq_reg_load(IRQ0_ID, _HRT_IRQ_CONTROLLER_STATUS_REG_IDX); return (irq_status != 0); } void cnd_virq_enable_channel( const enum virq_id irq_ID, const bool en) { irq_ID_t i; unsigned int channel_ID; irq_ID_t ID = virq_get_irq_id(irq_ID, &channel_ID); assert(ID < N_IRQ_ID); for (i = IRQ1_ID; i < N_IRQ_ID; i++) { /* It is not allowed to enable the pin of a nested IRQ directly */ assert(irq_ID != IRQ_NESTING_ID[i]); } if (en) { irq_enable_channel(ID, channel_ID); if (IRQ_NESTING_ID[ID] != N_virq_id) { /* Single level nesting, otherwise we'd need to recurse */ irq_enable_channel(IRQ0_ID, IRQ_NESTING_ID[ID]); } } else { irq_disable_channel(ID, channel_ID); if ((IRQ_NESTING_ID[ID] != N_virq_id) && !any_irq_channel_enabled(ID)) { /* Only disable the top if the nested ones are empty */ irq_disable_channel(IRQ0_ID, IRQ_NESTING_ID[ID]); } } return; } void virq_clear_all(void) { irq_ID_t irq_id; for (irq_id = (irq_ID_t)0; irq_id < N_IRQ_ID; irq_id++) { irq_clear_all(irq_id); } return; } enum hrt_isp_css_irq_status virq_get_channel_signals(struct virq_info *irq_info) { enum hrt_isp_css_irq_status irq_status = hrt_isp_css_irq_status_error; irq_ID_t ID; assert(irq_info); for (ID = (irq_ID_t)0 ; ID < N_IRQ_ID; ID++) { if (any_irq_channel_enabled(ID)) { hrt_data irq_data = irq_reg_load(ID, _HRT_IRQ_CONTROLLER_STATUS_REG_IDX); if (irq_data != 0) { /* The error condition is an IRQ pulse received with no IRQ status written */ irq_status = hrt_isp_css_irq_status_success; } irq_info->irq_status_reg[ID] |= irq_data; irq_reg_store(ID, _HRT_IRQ_CONTROLLER_CLEAR_REG_IDX, irq_data); irq_wait_for_write_complete(ID); } } return irq_status; } void virq_clear_info(struct virq_info *irq_info) { irq_ID_t ID; assert(irq_info); for (ID = (irq_ID_t)0 ; ID < N_IRQ_ID; ID++) { irq_info->irq_status_reg[ID] = 0; } return; } enum hrt_isp_css_irq_status virq_get_channel_id( enum virq_id *irq_id) { unsigned int irq_status = irq_reg_load(IRQ0_ID, _HRT_IRQ_CONTROLLER_STATUS_REG_IDX); unsigned int idx; enum hrt_isp_css_irq_status status = hrt_isp_css_irq_status_success; irq_ID_t ID; assert(irq_id); /* find the first irq bit on device 0 */ for (idx = 0; idx < IRQ_N_CHANNEL[IRQ0_ID]; idx++) { if (irq_status & (1U << idx)) break; } if (idx == IRQ_N_CHANNEL[IRQ0_ID]) { return hrt_isp_css_irq_status_error; } /* Check whether there are more bits set on device 0 */ if (irq_status != (1U << idx)) { status = hrt_isp_css_irq_status_more_irqs; } /* Check whether we have an IRQ on one of the nested devices */ for (ID = N_IRQ_ID - 1 ; ID > (irq_ID_t)0; ID--) { if (IRQ_NESTING_ID[ID] == (enum virq_id)idx) { break; } } /* If we have a nested IRQ, load that state, discard the device 0 state */ if (ID != IRQ0_ID) { irq_status = irq_reg_load(ID, _HRT_IRQ_CONTROLLER_STATUS_REG_IDX); /* find the first irq bit on device "id" */ for (idx = 0; idx < IRQ_N_CHANNEL[ID]; idx++) { if (irq_status & (1U << idx)) break; } if (idx == IRQ_N_CHANNEL[ID]) { return hrt_isp_css_irq_status_error; } /* Alternatively check whether there are more bits set on this device */ if (irq_status != (1U << idx)) { status = hrt_isp_css_irq_status_more_irqs; } else { /* If this device is empty, clear the state on device 0 */ irq_reg_store(IRQ0_ID, _HRT_IRQ_CONTROLLER_CLEAR_REG_IDX, 1U << IRQ_NESTING_ID[ID]); } } /* if (ID != IRQ0_ID) */ /* Here we proceed to clear the IRQ on detected device, if no nested IRQ, this is device 0 */ irq_reg_store(ID, _HRT_IRQ_CONTROLLER_CLEAR_REG_IDX, 1U << idx); irq_wait_for_write_complete(ID); idx += IRQ_N_ID_OFFSET[ID]; if (irq_id) *irq_id = (enum virq_id)idx; return status; } static inline void irq_wait_for_write_complete( const irq_ID_t ID) { assert(ID < N_IRQ_ID); assert(IRQ_BASE[ID] != (hrt_address)-1); (void)ia_css_device_load_uint32(IRQ_BASE[ID] + _HRT_IRQ_CONTROLLER_ENABLE_REG_IDX * sizeof(hrt_data)); } static inline bool any_irq_channel_enabled( const irq_ID_t ID) { hrt_data en_reg; assert(ID < N_IRQ_ID); en_reg = irq_reg_load(ID, _HRT_IRQ_CONTROLLER_ENABLE_REG_IDX); return (en_reg != 0); } static inline irq_ID_t virq_get_irq_id( const enum virq_id irq_ID, unsigned int *channel_ID) { irq_ID_t ID; assert(channel_ID); for (ID = (irq_ID_t)0 ; ID < N_IRQ_ID; ID++) { if (irq_ID < IRQ_N_ID_OFFSET[ID + 1]) { break; } } *channel_ID = (unsigned int)irq_ID - IRQ_N_ID_OFFSET[ID]; return ID; }
linux-master
drivers/staging/media/atomisp/pci/hive_isp_css_common/host/irq.c
// SPDX-License-Identifier: GPL-2.0 /* * Support for Intel Camera Imaging ISP subsystem. * Copyright (c) 2010-2015, Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. */ #include <type_support.h> /*uint32_t */ #include "gp_timer.h" /*system_local.h, gp_timer_public.h*/ #ifndef __INLINE_GP_TIMER__ #include "gp_timer_private.h" /*device_access.h*/ #endif /* __INLINE_GP_TIMER__ */ #include "system_local.h" /* FIXME: not sure if reg_load(), reg_store() should be API. */ static uint32_t gp_timer_reg_load(uint32_t reg); static void gp_timer_reg_store(u32 reg, uint32_t value); static uint32_t gp_timer_reg_load(uint32_t reg) { return ia_css_device_load_uint32( GP_TIMER_BASE + (reg * sizeof(uint32_t))); } static void gp_timer_reg_store(u32 reg, uint32_t value) { ia_css_device_store_uint32((GP_TIMER_BASE + (reg * sizeof(uint32_t))), value); } void gp_timer_init(gp_timer_ID_t ID) { /* set_overall_enable*/ gp_timer_reg_store(_REG_GP_TIMER_OVERALL_ENABLE, 1); /*set enable*/ gp_timer_reg_store(_REG_GP_TIMER_ENABLE_ID(ID), 1); /* set signal select */ gp_timer_reg_store(_REG_GP_TIMER_SIGNAL_SELECT_ID(ID), GP_TIMER_SIGNAL_SELECT); /*set count type */ gp_timer_reg_store(_REG_GP_TIMER_COUNT_TYPE_ID(ID), GP_TIMER_COUNT_TYPE_LOW); /*reset gp timer */ gp_timer_reg_store(_REG_GP_TIMER_RESET_REG, 0xFF); } uint32_t gp_timer_read(gp_timer_ID_t ID) { return gp_timer_reg_load(_REG_GP_TIMER_VALUE_ID(ID)); }
linux-master
drivers/staging/media/atomisp/pci/hive_isp_css_common/host/gp_timer.c
// SPDX-License-Identifier: GPL-2.0 /* * Support for Intel Camera Imaging ISP subsystem. * Copyright (c) 2010-2015, Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. */ #include "system_global.h" #ifndef ISP2401 #include "input_formatter.h" #include <type_support.h> #include "gp_device.h" #include "assert_support.h" #ifndef __INLINE_INPUT_FORMATTER__ #include "input_formatter_private.h" #endif /* __INLINE_INPUT_FORMATTER__ */ static const unsigned int input_formatter_alignment[N_INPUT_FORMATTER_ID] = { ISP_VEC_ALIGN, ISP_VEC_ALIGN, HIVE_ISP_CTRL_DATA_BYTES }; const hrt_address HIVE_IF_SRST_ADDRESS[N_INPUT_FORMATTER_ID] = { INPUT_FORMATTER0_SRST_OFFSET, INPUT_FORMATTER1_SRST_OFFSET, INPUT_FORMATTER2_SRST_OFFSET, INPUT_FORMATTER3_SRST_OFFSET }; const hrt_data HIVE_IF_SRST_MASK[N_INPUT_FORMATTER_ID] = { INPUT_FORMATTER0_SRST_MASK, INPUT_FORMATTER1_SRST_MASK, INPUT_FORMATTER2_SRST_MASK, INPUT_FORMATTER3_SRST_MASK }; const u8 HIVE_IF_SWITCH_CODE[N_INPUT_FORMATTER_ID] = { HIVE_INPUT_SWITCH_SELECT_IF_PRIM, HIVE_INPUT_SWITCH_SELECT_IF_PRIM, HIVE_INPUT_SWITCH_SELECT_IF_SEC, HIVE_INPUT_SWITCH_SELECT_STR_TO_MEM }; /* MW Should be part of system_global.h, where we have the main enumeration */ static const bool HIVE_IF_BIN_COPY[N_INPUT_FORMATTER_ID] = { false, false, false, true }; void input_formatter_rst( const input_formatter_ID_t ID) { hrt_address addr; hrt_data rst; assert(ID < N_INPUT_FORMATTER_ID); addr = HIVE_IF_SRST_ADDRESS[ID]; rst = HIVE_IF_SRST_MASK[ID]; /* TEMPORARY HACK: THIS RESET BREAKS THE METADATA FEATURE * WICH USES THE STREAM2MEMRY BLOCK. * MUST BE FIXED PROPERLY */ if (!HIVE_IF_BIN_COPY[ID]) { input_formatter_reg_store(ID, addr, rst); } return; } unsigned int input_formatter_get_alignment( const input_formatter_ID_t ID) { assert(ID < N_INPUT_FORMATTER_ID); return input_formatter_alignment[ID]; } void input_formatter_set_fifo_blocking_mode( const input_formatter_ID_t ID, const bool enable) { assert(ID < N_INPUT_FORMATTER_ID); /* cnd_input_formatter_reg_store() */ if (!HIVE_IF_BIN_COPY[ID]) { input_formatter_reg_store(ID, HIVE_IF_BLOCK_FIFO_NO_REQ_ADDRESS, enable); } return; } void input_formatter_get_switch_state( const input_formatter_ID_t ID, input_formatter_switch_state_t *state) { assert(ID < N_INPUT_FORMATTER_ID); assert(state); /* We'll change this into an intelligent function to get switch info per IF */ (void)ID; state->if_input_switch_lut_reg[0] = gp_device_reg_load(GP_DEVICE0_ID, _REG_GP_IFMT_input_switch_lut_reg0); state->if_input_switch_lut_reg[1] = gp_device_reg_load(GP_DEVICE0_ID, _REG_GP_IFMT_input_switch_lut_reg1); state->if_input_switch_lut_reg[2] = gp_device_reg_load(GP_DEVICE0_ID, _REG_GP_IFMT_input_switch_lut_reg2); state->if_input_switch_lut_reg[3] = gp_device_reg_load(GP_DEVICE0_ID, _REG_GP_IFMT_input_switch_lut_reg3); state->if_input_switch_lut_reg[4] = gp_device_reg_load(GP_DEVICE0_ID, _REG_GP_IFMT_input_switch_lut_reg4); state->if_input_switch_lut_reg[5] = gp_device_reg_load(GP_DEVICE0_ID, _REG_GP_IFMT_input_switch_lut_reg5); state->if_input_switch_lut_reg[6] = gp_device_reg_load(GP_DEVICE0_ID, _REG_GP_IFMT_input_switch_lut_reg6); state->if_input_switch_lut_reg[7] = gp_device_reg_load(GP_DEVICE0_ID, _REG_GP_IFMT_input_switch_lut_reg7); state->if_input_switch_fsync_lut = gp_device_reg_load(GP_DEVICE0_ID, _REG_GP_IFMT_input_switch_fsync_lut); state->if_input_switch_ch_id_fmt_type = gp_device_reg_load(GP_DEVICE0_ID, _REG_GP_IFMT_input_switch_ch_id_fmt_type); return; } void input_formatter_get_state( const input_formatter_ID_t ID, input_formatter_state_t *state) { assert(ID < N_INPUT_FORMATTER_ID); assert(state); /* state->reset = input_formatter_reg_load(ID, HIVE_IF_RESET_ADDRESS); */ state->start_line = input_formatter_reg_load(ID, HIVE_IF_START_LINE_ADDRESS); state->start_column = input_formatter_reg_load(ID, HIVE_IF_START_COLUMN_ADDRESS); state->cropped_height = input_formatter_reg_load(ID, HIVE_IF_CROPPED_HEIGHT_ADDRESS); state->cropped_width = input_formatter_reg_load(ID, HIVE_IF_CROPPED_WIDTH_ADDRESS); state->ver_decimation = input_formatter_reg_load(ID, HIVE_IF_VERTICAL_DECIMATION_ADDRESS); state->hor_decimation = input_formatter_reg_load(ID, HIVE_IF_HORIZONTAL_DECIMATION_ADDRESS); state->hor_deinterleaving = input_formatter_reg_load(ID, HIVE_IF_H_DEINTERLEAVING_ADDRESS); state->left_padding = input_formatter_reg_load(ID, HIVE_IF_LEFTPADDING_WIDTH_ADDRESS); state->eol_offset = input_formatter_reg_load(ID, HIVE_IF_END_OF_LINE_OFFSET_ADDRESS); state->vmem_start_address = input_formatter_reg_load(ID, HIVE_IF_VMEM_START_ADDRESS_ADDRESS); state->vmem_end_address = input_formatter_reg_load(ID, HIVE_IF_VMEM_END_ADDRESS_ADDRESS); state->vmem_increment = input_formatter_reg_load(ID, HIVE_IF_VMEM_INCREMENT_ADDRESS); state->is_yuv420 = input_formatter_reg_load(ID, HIVE_IF_YUV_420_FORMAT_ADDRESS); state->vsync_active_low = input_formatter_reg_load(ID, HIVE_IF_VSYNCK_ACTIVE_LOW_ADDRESS); state->hsync_active_low = input_formatter_reg_load(ID, HIVE_IF_HSYNCK_ACTIVE_LOW_ADDRESS); state->allow_fifo_overflow = input_formatter_reg_load(ID, HIVE_IF_ALLOW_FIFO_OVERFLOW_ADDRESS); state->block_fifo_when_no_req = input_formatter_reg_load(ID, HIVE_IF_BLOCK_FIFO_NO_REQ_ADDRESS); state->ver_deinterleaving = input_formatter_reg_load(ID, HIVE_IF_V_DEINTERLEAVING_ADDRESS); /* FSM */ state->fsm_sync_status = input_formatter_reg_load(ID, HIVE_IF_FSM_SYNC_STATUS); state->fsm_sync_counter = input_formatter_reg_load(ID, HIVE_IF_FSM_SYNC_COUNTER); state->fsm_crop_status = input_formatter_reg_load(ID, HIVE_IF_FSM_CROP_STATUS); state->fsm_crop_line_counter = input_formatter_reg_load(ID, HIVE_IF_FSM_CROP_LINE_COUNTER); state->fsm_crop_pixel_counter = input_formatter_reg_load(ID, HIVE_IF_FSM_CROP_PIXEL_COUNTER); state->fsm_deinterleaving_index = input_formatter_reg_load(ID, HIVE_IF_FSM_DEINTERLEAVING_IDX); state->fsm_dec_h_counter = input_formatter_reg_load(ID, HIVE_IF_FSM_DECIMATION_H_COUNTER); state->fsm_dec_v_counter = input_formatter_reg_load(ID, HIVE_IF_FSM_DECIMATION_V_COUNTER); state->fsm_dec_block_v_counter = input_formatter_reg_load(ID, HIVE_IF_FSM_DECIMATION_BLOCK_V_COUNTER); state->fsm_padding_status = input_formatter_reg_load(ID, HIVE_IF_FSM_PADDING_STATUS); state->fsm_padding_elem_counter = input_formatter_reg_load(ID, HIVE_IF_FSM_PADDING_ELEMENT_COUNTER); state->fsm_vector_support_error = input_formatter_reg_load(ID, HIVE_IF_FSM_VECTOR_SUPPORT_ERROR); state->fsm_vector_buffer_full = input_formatter_reg_load(ID, HIVE_IF_FSM_VECTOR_SUPPORT_BUFF_FULL); state->vector_support = input_formatter_reg_load(ID, HIVE_IF_FSM_VECTOR_SUPPORT); state->sensor_data_lost = input_formatter_reg_load(ID, HIVE_IF_FIFO_SENSOR_STATUS); return; } void input_formatter_bin_get_state( const input_formatter_ID_t ID, input_formatter_bin_state_t *state) { assert(ID < N_INPUT_FORMATTER_ID); assert(state); state->reset = input_formatter_reg_load(ID, HIVE_STR2MEM_SOFT_RESET_REG_ADDRESS); state->input_endianness = input_formatter_reg_load(ID, HIVE_STR2MEM_INPUT_ENDIANNESS_REG_ADDRESS); state->output_endianness = input_formatter_reg_load(ID, HIVE_STR2MEM_OUTPUT_ENDIANNESS_REG_ADDRESS); state->bitswap = input_formatter_reg_load(ID, HIVE_STR2MEM_BIT_SWAPPING_REG_ADDRESS); state->block_synch = input_formatter_reg_load(ID, HIVE_STR2MEM_BLOCK_SYNC_LEVEL_REG_ADDRESS); state->packet_synch = input_formatter_reg_load(ID, HIVE_STR2MEM_PACKET_SYNC_LEVEL_REG_ADDRESS); state->readpostwrite_synch = input_formatter_reg_load(ID, HIVE_STR2MEM_READ_POST_WRITE_SYNC_ENABLE_REG_ADDRESS); state->is_2ppc = input_formatter_reg_load(ID, HIVE_STR2MEM_DUAL_BYTE_INPUTS_ENABLED_REG_ADDRESS); state->en_status_update = input_formatter_reg_load(ID, HIVE_STR2MEM_EN_STAT_UPDATE_ADDRESS); return; } #endif
linux-master
drivers/staging/media/atomisp/pci/hive_isp_css_common/host/input_formatter.c
// SPDX-License-Identifier: GPL-2.0 /* * Support for Intel Camera Imaging ISP subsystem. * Copyright (c) 2010-015, Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. */ #include "system_global.h" #ifndef ISP2401 #include "input_system.h" #include <type_support.h> #include "gp_device.h" #include "assert_support.h" #ifndef __INLINE_INPUT_SYSTEM__ #include "input_system_private.h" #endif /* __INLINE_INPUT_SYSTEM__ */ #define ZERO (0x0) #define ONE (1U) static const isp2400_ib_buffer_t IB_BUFFER_NULL = {0, 0, 0 }; static input_system_err_t input_system_configure_channel( const channel_cfg_t channel); static input_system_err_t input_system_configure_channel_sensor( const channel_cfg_t channel); static input_system_err_t input_buffer_configuration(void); static input_system_err_t configuration_to_registers(void); static void receiver_rst(const rx_ID_t ID); static void input_system_network_rst(const input_system_ID_t ID); static void capture_unit_configure( const input_system_ID_t ID, const sub_system_ID_t sub_id, const isp2400_ib_buffer_t *const cfg); static void acquisition_unit_configure( const input_system_ID_t ID, const sub_system_ID_t sub_id, const isp2400_ib_buffer_t *const cfg); static void ctrl_unit_configure( const input_system_ID_t ID, const sub_system_ID_t sub_id, const ctrl_unit_cfg_t *const cfg); static void input_system_network_configure( const input_system_ID_t ID, const input_system_network_cfg_t *const cfg); // MW: CSI is previously named as "rx" short for "receiver" static input_system_err_t set_csi_cfg( csi_cfg_t *const lhs, const csi_cfg_t *const rhs, input_system_config_flags_t *const flags); static input_system_err_t set_source_type( input_system_source_t *const lhs, const input_system_source_t rhs, input_system_config_flags_t *const flags); static input_system_err_t input_system_multiplexer_cfg( input_system_multiplex_t *const lhs, const input_system_multiplex_t rhs, input_system_config_flags_t *const flags); static inline void capture_unit_get_state( const input_system_ID_t ID, const sub_system_ID_t sub_id, capture_unit_state_t *state); static inline void acquisition_unit_get_state( const input_system_ID_t ID, const sub_system_ID_t sub_id, acquisition_unit_state_t *state); static inline void ctrl_unit_get_state( const input_system_ID_t ID, const sub_system_ID_t sub_id, ctrl_unit_state_t *state); static inline void mipi_port_get_state( const rx_ID_t ID, const enum mipi_port_id port_ID, mipi_port_state_t *state); static inline void rx_channel_get_state( const rx_ID_t ID, const unsigned int ch_id, rx_channel_state_t *state); static void gp_device_rst(const gp_device_ID_t ID); static void input_selector_cfg_for_sensor(const gp_device_ID_t ID); static void input_switch_rst(const gp_device_ID_t ID); static void input_switch_cfg( const gp_device_ID_t ID, const input_switch_cfg_t *const cfg ); void input_system_get_state( const input_system_ID_t ID, input_system_state_t *state) { sub_system_ID_t sub_id; assert(ID < N_INPUT_SYSTEM_ID); assert(state); state->str_multicastA_sel = input_system_sub_system_reg_load(ID, GPREGS_UNIT0_ID, HIVE_ISYS_GPREG_MULTICAST_A_IDX); state->str_multicastB_sel = input_system_sub_system_reg_load(ID, GPREGS_UNIT0_ID, HIVE_ISYS_GPREG_MULTICAST_B_IDX); state->str_multicastC_sel = input_system_sub_system_reg_load(ID, GPREGS_UNIT0_ID, HIVE_ISYS_GPREG_MULTICAST_C_IDX); state->str_mux_sel = input_system_sub_system_reg_load(ID, GPREGS_UNIT0_ID, HIVE_ISYS_GPREG_MUX_IDX); state->str_mon_status = input_system_sub_system_reg_load(ID, GPREGS_UNIT0_ID, HIVE_ISYS_GPREG_STRMON_STAT_IDX); state->str_mon_irq_cond = input_system_sub_system_reg_load(ID, GPREGS_UNIT0_ID, HIVE_ISYS_GPREG_STRMON_COND_IDX); state->str_mon_irq_en = input_system_sub_system_reg_load(ID, GPREGS_UNIT0_ID, HIVE_ISYS_GPREG_STRMON_IRQ_EN_IDX); state->isys_srst = input_system_sub_system_reg_load(ID, GPREGS_UNIT0_ID, HIVE_ISYS_GPREG_SRST_IDX); state->isys_slv_reg_srst = input_system_sub_system_reg_load(ID, GPREGS_UNIT0_ID, HIVE_ISYS_GPREG_SLV_REG_SRST_IDX); state->str_deint_portA_cnt = input_system_sub_system_reg_load(ID, GPREGS_UNIT0_ID, HIVE_ISYS_GPREG_REG_PORT_A_IDX); state->str_deint_portB_cnt = input_system_sub_system_reg_load(ID, GPREGS_UNIT0_ID, HIVE_ISYS_GPREG_REG_PORT_B_IDX); for (sub_id = CAPTURE_UNIT0_ID; sub_id < CAPTURE_UNIT0_ID + N_CAPTURE_UNIT_ID; sub_id++) { capture_unit_get_state(ID, sub_id, &state->capture_unit[sub_id - CAPTURE_UNIT0_ID]); } for (sub_id = ACQUISITION_UNIT0_ID; sub_id < ACQUISITION_UNIT0_ID + N_ACQUISITION_UNIT_ID; sub_id++) { acquisition_unit_get_state(ID, sub_id, &state->acquisition_unit[sub_id - ACQUISITION_UNIT0_ID]); } for (sub_id = CTRL_UNIT0_ID; sub_id < CTRL_UNIT0_ID + N_CTRL_UNIT_ID; sub_id++) { ctrl_unit_get_state(ID, sub_id, &state->ctrl_unit_state[sub_id - CTRL_UNIT0_ID]); } } void receiver_get_state( const rx_ID_t ID, receiver_state_t *state) { enum mipi_port_id port_id; unsigned int ch_id; assert(ID < N_RX_ID); assert(state); state->fs_to_ls_delay = (uint8_t)receiver_reg_load(ID, _HRT_CSS_RECEIVER_FS_TO_LS_DELAY_REG_IDX); state->ls_to_data_delay = (uint8_t)receiver_reg_load(ID, _HRT_CSS_RECEIVER_LS_TO_DATA_DELAY_REG_IDX); state->data_to_le_delay = (uint8_t)receiver_reg_load(ID, _HRT_CSS_RECEIVER_DATA_TO_LE_DELAY_REG_IDX); state->le_to_fe_delay = (uint8_t)receiver_reg_load(ID, _HRT_CSS_RECEIVER_LE_TO_FE_DELAY_REG_IDX); state->fe_to_fs_delay = (uint8_t)receiver_reg_load(ID, _HRT_CSS_RECEIVER_FE_TO_FS_DELAY_REG_IDX); state->le_to_fs_delay = (uint8_t)receiver_reg_load(ID, _HRT_CSS_RECEIVER_LE_TO_LS_DELAY_REG_IDX); state->is_two_ppc = (bool)receiver_reg_load(ID, _HRT_CSS_RECEIVER_TWO_PIXEL_EN_REG_IDX); state->backend_rst = receiver_reg_load(ID, _HRT_CSS_RECEIVER_BACKEND_RST_REG_IDX); state->raw18 = (uint16_t)receiver_reg_load(ID, _HRT_CSS_RECEIVER_RAW18_REG_IDX); state->force_raw8 = (bool)receiver_reg_load(ID, _HRT_CSS_RECEIVER_FORCE_RAW8_REG_IDX); state->raw16 = (uint16_t)receiver_reg_load(ID, _HRT_CSS_RECEIVER_RAW16_REG_IDX); for (port_id = (enum mipi_port_id)0; port_id < N_MIPI_PORT_ID; port_id++) { mipi_port_get_state(ID, port_id, &state->mipi_port_state[port_id]); } for (ch_id = 0U; ch_id < N_RX_CHANNEL_ID; ch_id++) { rx_channel_get_state(ID, ch_id, &state->rx_channel_state[ch_id]); } state->be_gsp_acc_ovl = receiver_reg_load(ID, _HRT_CSS_RECEIVER_BE_GSP_ACC_OVL_REG_IDX); state->be_srst = receiver_reg_load(ID, _HRT_CSS_RECEIVER_BE_SRST_REG_IDX); state->be_is_two_ppc = receiver_reg_load(ID, _HRT_CSS_RECEIVER_BE_TWO_PPC_REG_IDX); state->be_comp_format0 = receiver_reg_load(ID, _HRT_CSS_RECEIVER_BE_COMP_FORMAT_REG0_IDX); state->be_comp_format1 = receiver_reg_load(ID, _HRT_CSS_RECEIVER_BE_COMP_FORMAT_REG1_IDX); state->be_comp_format2 = receiver_reg_load(ID, _HRT_CSS_RECEIVER_BE_COMP_FORMAT_REG2_IDX); state->be_comp_format3 = receiver_reg_load(ID, _HRT_CSS_RECEIVER_BE_COMP_FORMAT_REG3_IDX); state->be_sel = receiver_reg_load(ID, _HRT_CSS_RECEIVER_BE_SEL_REG_IDX); state->be_raw16_config = receiver_reg_load(ID, _HRT_CSS_RECEIVER_BE_RAW16_CONFIG_REG_IDX); state->be_raw18_config = receiver_reg_load(ID, _HRT_CSS_RECEIVER_BE_RAW18_CONFIG_REG_IDX); state->be_force_raw8 = receiver_reg_load(ID, _HRT_CSS_RECEIVER_BE_FORCE_RAW8_REG_IDX); state->be_irq_status = receiver_reg_load(ID, _HRT_CSS_RECEIVER_BE_IRQ_STATUS_REG_IDX); state->be_irq_clear = receiver_reg_load(ID, _HRT_CSS_RECEIVER_BE_IRQ_CLEAR_REG_IDX); } bool is_mipi_format_yuv420( const mipi_format_t mipi_format) { bool is_yuv420 = ( (mipi_format == MIPI_FORMAT_YUV420_8) || (mipi_format == MIPI_FORMAT_YUV420_10) || (mipi_format == MIPI_FORMAT_YUV420_8_SHIFT) || (mipi_format == MIPI_FORMAT_YUV420_10_SHIFT)); /* MIPI_FORMAT_YUV420_8_LEGACY is not YUV420 */ return is_yuv420; } void receiver_set_compression( const rx_ID_t ID, const unsigned int cfg_ID, const mipi_compressor_t comp, const mipi_predictor_t pred) { const unsigned int field_id = cfg_ID % N_MIPI_FORMAT_CUSTOM; const unsigned int ch_id = cfg_ID / N_MIPI_FORMAT_CUSTOM; hrt_data val; hrt_address addr = 0; hrt_data reg; assert(ID < N_RX_ID); assert(cfg_ID < N_MIPI_COMPRESSOR_CONTEXT); assert(field_id < N_MIPI_FORMAT_CUSTOM); assert(ch_id < N_RX_CHANNEL_ID); assert(comp < N_MIPI_COMPRESSOR_METHODS); assert(pred < N_MIPI_PREDICTOR_TYPES); val = (((uint8_t)pred) << 3) | comp; switch (ch_id) { case 0: addr = ((field_id < 6) ? _HRT_CSS_RECEIVER_2400_COMP_SCHEME_VC0_REG0_IDX : _HRT_CSS_RECEIVER_2400_COMP_SCHEME_VC0_REG1_IDX); break; case 1: addr = ((field_id < 6) ? _HRT_CSS_RECEIVER_2400_COMP_SCHEME_VC1_REG0_IDX : _HRT_CSS_RECEIVER_2400_COMP_SCHEME_VC1_REG1_IDX); break; case 2: addr = ((field_id < 6) ? _HRT_CSS_RECEIVER_2400_COMP_SCHEME_VC2_REG0_IDX : _HRT_CSS_RECEIVER_2400_COMP_SCHEME_VC2_REG1_IDX); break; case 3: addr = ((field_id < 6) ? _HRT_CSS_RECEIVER_2400_COMP_SCHEME_VC3_REG0_IDX : _HRT_CSS_RECEIVER_2400_COMP_SCHEME_VC3_REG1_IDX); break; default: /* should not happen */ assert(false); return; } reg = ((field_id < 6) ? (val << (field_id * 5)) : (val << (( field_id - 6) * 5))); receiver_reg_store(ID, addr, reg); } void receiver_port_enable( const rx_ID_t ID, const enum mipi_port_id port_ID, const bool cnd) { hrt_data reg = receiver_port_reg_load(ID, port_ID, _HRT_CSS_RECEIVER_DEVICE_READY_REG_IDX); if (cnd) { reg |= 0x01; } else { reg &= ~0x01; } receiver_port_reg_store(ID, port_ID, _HRT_CSS_RECEIVER_DEVICE_READY_REG_IDX, reg); } bool is_receiver_port_enabled( const rx_ID_t ID, const enum mipi_port_id port_ID) { hrt_data reg = receiver_port_reg_load(ID, port_ID, _HRT_CSS_RECEIVER_DEVICE_READY_REG_IDX); return ((reg & 0x01) != 0); } void receiver_irq_enable( const rx_ID_t ID, const enum mipi_port_id port_ID, const rx_irq_info_t irq_info) { receiver_port_reg_store(ID, port_ID, _HRT_CSS_RECEIVER_IRQ_ENABLE_REG_IDX, irq_info); } rx_irq_info_t receiver_get_irq_info( const rx_ID_t ID, const enum mipi_port_id port_ID) { return receiver_port_reg_load(ID, port_ID, _HRT_CSS_RECEIVER_IRQ_STATUS_REG_IDX); } void receiver_irq_clear( const rx_ID_t ID, const enum mipi_port_id port_ID, const rx_irq_info_t irq_info) { receiver_port_reg_store(ID, port_ID, _HRT_CSS_RECEIVER_IRQ_STATUS_REG_IDX, irq_info); } static inline void capture_unit_get_state( const input_system_ID_t ID, const sub_system_ID_t sub_id, capture_unit_state_t *state) { assert(/*(sub_id >= CAPTURE_UNIT0_ID) &&*/ (sub_id <= CAPTURE_UNIT2_ID)); assert(state); state->StartMode = input_system_sub_system_reg_load(ID, sub_id, CAPT_START_MODE_REG_ID); state->Start_Addr = input_system_sub_system_reg_load(ID, sub_id, CAPT_START_ADDR_REG_ID); state->Mem_Region_Size = input_system_sub_system_reg_load(ID, sub_id, CAPT_MEM_REGION_SIZE_REG_ID); state->Num_Mem_Regions = input_system_sub_system_reg_load(ID, sub_id, CAPT_NUM_MEM_REGIONS_REG_ID); // AM: Illegal read from following registers. /* state->Init = input_system_sub_system_reg_load(ID, sub_id, CAPT_INIT_REG_ID); state->Start = input_system_sub_system_reg_load(ID, sub_id, CAPT_START_REG_ID); state->Stop = input_system_sub_system_reg_load(ID, sub_id, CAPT_STOP_REG_ID); */ state->Packet_Length = input_system_sub_system_reg_load(ID, sub_id, CAPT_PACKET_LENGTH_REG_ID); state->Received_Length = input_system_sub_system_reg_load(ID, sub_id, CAPT_RECEIVED_LENGTH_REG_ID); state->Received_Short_Packets = input_system_sub_system_reg_load(ID, sub_id, CAPT_RECEIVED_SHORT_PACKETS_REG_ID); state->Received_Long_Packets = input_system_sub_system_reg_load(ID, sub_id, CAPT_RECEIVED_LONG_PACKETS_REG_ID); state->Last_Command = input_system_sub_system_reg_load(ID, sub_id, CAPT_LAST_COMMAND_REG_ID); state->Next_Command = input_system_sub_system_reg_load(ID, sub_id, CAPT_NEXT_COMMAND_REG_ID); state->Last_Acknowledge = input_system_sub_system_reg_load(ID, sub_id, CAPT_LAST_ACKNOWLEDGE_REG_ID); state->Next_Acknowledge = input_system_sub_system_reg_load(ID, sub_id, CAPT_NEXT_ACKNOWLEDGE_REG_ID); state->FSM_State_Info = input_system_sub_system_reg_load(ID, sub_id, CAPT_FSM_STATE_INFO_REG_ID); } static inline void acquisition_unit_get_state( const input_system_ID_t ID, const sub_system_ID_t sub_id, acquisition_unit_state_t *state) { assert(sub_id == ACQUISITION_UNIT0_ID); assert(state); state->Start_Addr = input_system_sub_system_reg_load(ID, sub_id, ACQ_START_ADDR_REG_ID); state->Mem_Region_Size = input_system_sub_system_reg_load(ID, sub_id, ACQ_MEM_REGION_SIZE_REG_ID); state->Num_Mem_Regions = input_system_sub_system_reg_load(ID, sub_id, ACQ_NUM_MEM_REGIONS_REG_ID); // AM: Illegal read from following registers. /* state->Init = input_system_sub_system_reg_load(ID, sub_id, ACQ_INIT_REG_ID); */ state->Received_Short_Packets = input_system_sub_system_reg_load(ID, sub_id, ACQ_RECEIVED_SHORT_PACKETS_REG_ID); state->Received_Long_Packets = input_system_sub_system_reg_load(ID, sub_id, ACQ_RECEIVED_LONG_PACKETS_REG_ID); state->Last_Command = input_system_sub_system_reg_load(ID, sub_id, ACQ_LAST_COMMAND_REG_ID); state->Next_Command = input_system_sub_system_reg_load(ID, sub_id, ACQ_NEXT_COMMAND_REG_ID); state->Last_Acknowledge = input_system_sub_system_reg_load(ID, sub_id, ACQ_LAST_ACKNOWLEDGE_REG_ID); state->Next_Acknowledge = input_system_sub_system_reg_load(ID, sub_id, ACQ_NEXT_ACKNOWLEDGE_REG_ID); state->FSM_State_Info = input_system_sub_system_reg_load(ID, sub_id, ACQ_FSM_STATE_INFO_REG_ID); state->Int_Cntr_Info = input_system_sub_system_reg_load(ID, sub_id, ACQ_INT_CNTR_INFO_REG_ID); } static inline void ctrl_unit_get_state( const input_system_ID_t ID, const sub_system_ID_t sub_id, ctrl_unit_state_t *state) { assert(sub_id == CTRL_UNIT0_ID); assert(state); state->captA_start_addr = input_system_sub_system_reg_load(ID, sub_id, ISYS_CTRL_CAPT_START_ADDR_A_REG_ID); state->captB_start_addr = input_system_sub_system_reg_load(ID, sub_id, ISYS_CTRL_CAPT_START_ADDR_B_REG_ID); state->captC_start_addr = input_system_sub_system_reg_load(ID, sub_id, ISYS_CTRL_CAPT_START_ADDR_C_REG_ID); state->captA_mem_region_size = input_system_sub_system_reg_load(ID, sub_id, ISYS_CTRL_CAPT_MEM_REGION_SIZE_A_REG_ID); state->captB_mem_region_size = input_system_sub_system_reg_load(ID, sub_id, ISYS_CTRL_CAPT_MEM_REGION_SIZE_B_REG_ID); state->captC_mem_region_size = input_system_sub_system_reg_load(ID, sub_id, ISYS_CTRL_CAPT_MEM_REGION_SIZE_C_REG_ID); state->captA_num_mem_regions = input_system_sub_system_reg_load(ID, sub_id, ISYS_CTRL_CAPT_NUM_MEM_REGIONS_A_REG_ID); state->captB_num_mem_regions = input_system_sub_system_reg_load(ID, sub_id, ISYS_CTRL_CAPT_NUM_MEM_REGIONS_B_REG_ID); state->captC_num_mem_regions = input_system_sub_system_reg_load(ID, sub_id, ISYS_CTRL_CAPT_NUM_MEM_REGIONS_C_REG_ID); state->acq_start_addr = input_system_sub_system_reg_load(ID, sub_id, ISYS_CTRL_ACQ_START_ADDR_REG_ID); state->acq_mem_region_size = input_system_sub_system_reg_load(ID, sub_id, ISYS_CTRL_ACQ_MEM_REGION_SIZE_REG_ID); state->acq_num_mem_regions = input_system_sub_system_reg_load(ID, sub_id, ISYS_CTRL_ACQ_NUM_MEM_REGIONS_REG_ID); // AM: Illegal read from following registers. /* state->ctrl_init = input_system_sub_system_reg_load(ID, sub_id, ISYS_CTRL_INIT_REG_ID); */ state->last_cmd = input_system_sub_system_reg_load(ID, sub_id, ISYS_CTRL_LAST_COMMAND_REG_ID); state->next_cmd = input_system_sub_system_reg_load(ID, sub_id, ISYS_CTRL_NEXT_COMMAND_REG_ID); state->last_ack = input_system_sub_system_reg_load(ID, sub_id, ISYS_CTRL_LAST_ACKNOWLEDGE_REG_ID); state->next_ack = input_system_sub_system_reg_load(ID, sub_id, ISYS_CTRL_NEXT_ACKNOWLEDGE_REG_ID); state->top_fsm_state = input_system_sub_system_reg_load(ID, sub_id, ISYS_CTRL_FSM_STATE_INFO_REG_ID); state->captA_fsm_state = input_system_sub_system_reg_load(ID, sub_id, ISYS_CTRL_CAPT_A_FSM_STATE_INFO_REG_ID); state->captB_fsm_state = input_system_sub_system_reg_load(ID, sub_id, ISYS_CTRL_CAPT_B_FSM_STATE_INFO_REG_ID); state->captC_fsm_state = input_system_sub_system_reg_load(ID, sub_id, ISYS_CTRL_CAPT_C_FSM_STATE_INFO_REG_ID); state->acq_fsm_state = input_system_sub_system_reg_load(ID, sub_id, ISYS_CTRL_ACQ_FSM_STATE_INFO_REG_ID); state->capt_reserve_one_mem_region = input_system_sub_system_reg_load(ID, sub_id, ISYS_CTRL_CAPT_RESERVE_ONE_MEM_REGION_REG_ID); } static inline void mipi_port_get_state( const rx_ID_t ID, const enum mipi_port_id port_ID, mipi_port_state_t *state) { int i; assert(ID < N_RX_ID); assert(port_ID < N_MIPI_PORT_ID); assert(state); state->device_ready = receiver_port_reg_load(ID, port_ID, _HRT_CSS_RECEIVER_DEVICE_READY_REG_IDX); state->irq_status = receiver_port_reg_load(ID, port_ID, _HRT_CSS_RECEIVER_IRQ_STATUS_REG_IDX); state->irq_enable = receiver_port_reg_load(ID, port_ID, _HRT_CSS_RECEIVER_IRQ_ENABLE_REG_IDX); state->timeout_count = receiver_port_reg_load(ID, port_ID, _HRT_CSS_RECEIVER_TIMEOUT_COUNT_REG_IDX); state->init_count = (uint16_t)receiver_port_reg_load(ID, port_ID, _HRT_CSS_RECEIVER_INIT_COUNT_REG_IDX); state->raw16_18 = (uint16_t)receiver_port_reg_load(ID, port_ID, _HRT_CSS_RECEIVER_RAW16_18_DATAID_REG_IDX); state->sync_count = receiver_port_reg_load(ID, port_ID, _HRT_CSS_RECEIVER_SYNC_COUNT_REG_IDX); state->rx_count = receiver_port_reg_load(ID, port_ID, _HRT_CSS_RECEIVER_RX_COUNT_REG_IDX); for (i = 0; i < MIPI_4LANE_CFG ; i++) { state->lane_sync_count[i] = (uint8_t)((state->sync_count) >> (i * 8)); state->lane_rx_count[i] = (uint8_t)((state->rx_count) >> (i * 8)); } } static inline void rx_channel_get_state( const rx_ID_t ID, const unsigned int ch_id, rx_channel_state_t *state) { int i; assert(ID < N_RX_ID); assert(ch_id < N_RX_CHANNEL_ID); assert(state); switch (ch_id) { case 0: state->comp_scheme0 = receiver_reg_load(ID, _HRT_CSS_RECEIVER_2400_COMP_SCHEME_VC0_REG0_IDX); state->comp_scheme1 = receiver_reg_load(ID, _HRT_CSS_RECEIVER_2400_COMP_SCHEME_VC0_REG1_IDX); break; case 1: state->comp_scheme0 = receiver_reg_load(ID, _HRT_CSS_RECEIVER_2400_COMP_SCHEME_VC1_REG0_IDX); state->comp_scheme1 = receiver_reg_load(ID, _HRT_CSS_RECEIVER_2400_COMP_SCHEME_VC1_REG1_IDX); break; case 2: state->comp_scheme0 = receiver_reg_load(ID, _HRT_CSS_RECEIVER_2400_COMP_SCHEME_VC2_REG0_IDX); state->comp_scheme1 = receiver_reg_load(ID, _HRT_CSS_RECEIVER_2400_COMP_SCHEME_VC2_REG1_IDX); break; case 3: state->comp_scheme0 = receiver_reg_load(ID, _HRT_CSS_RECEIVER_2400_COMP_SCHEME_VC3_REG0_IDX); state->comp_scheme1 = receiver_reg_load(ID, _HRT_CSS_RECEIVER_2400_COMP_SCHEME_VC3_REG1_IDX); break; } /* See Table 7.1.17,..., 7.1.24 */ for (i = 0; i < 6; i++) { u8 val = (uint8_t)((state->comp_scheme0) >> (i * 5)) & 0x1f; state->comp[i] = (mipi_compressor_t)(val & 0x07); state->pred[i] = (mipi_predictor_t)((val & 0x18) >> 3); } for (i = 6; i < N_MIPI_FORMAT_CUSTOM; i++) { u8 val = (uint8_t)((state->comp_scheme0) >> ((i - 6) * 5)) & 0x1f; state->comp[i] = (mipi_compressor_t)(val & 0x07); state->pred[i] = (mipi_predictor_t)((val & 0x18) >> 3); } } // MW: "2400" in the name is not good, but this is to avoid a naming conflict static input_system_cfg2400_t config; static void receiver_rst( const rx_ID_t ID) { enum mipi_port_id port_id; assert(ID < N_RX_ID); // Disable all ports. for (port_id = MIPI_PORT0_ID; port_id < N_MIPI_PORT_ID; port_id++) { receiver_port_enable(ID, port_id, false); } // AM: Additional actions for stopping receiver? } //Single function to reset all the devices mapped via GP_DEVICE. static void gp_device_rst(const gp_device_ID_t ID) { assert(ID < N_GP_DEVICE_ID); gp_device_reg_store(ID, _REG_GP_SYNCGEN_ENABLE_ADDR, ZERO); // gp_device_reg_store(ID, _REG_GP_SYNCGEN_FREE_RUNNING_ADDR, ZERO); // gp_device_reg_store(ID, _REG_GP_SYNCGEN_PAUSE_ADDR, ONE); // gp_device_reg_store(ID, _REG_GP_NR_FRAMES_ADDR, ZERO); // gp_device_reg_store(ID, _REG_GP_SYNGEN_NR_PIX_ADDR, ZERO); // gp_device_reg_store(ID, _REG_GP_SYNGEN_NR_PIX_ADDR, ZERO); // gp_device_reg_store(ID, _REG_GP_SYNGEN_NR_LINES_ADDR, ZERO); // gp_device_reg_store(ID, _REG_GP_SYNGEN_HBLANK_CYCLES_ADDR, ZERO); // gp_device_reg_store(ID, _REG_GP_SYNGEN_VBLANK_CYCLES_ADDR, ZERO); // AM: Following calls cause strange warnings. Probably they should not be initialized. // gp_device_reg_store(ID, _REG_GP_ISEL_SOF_ADDR, ZERO); // gp_device_reg_store(ID, _REG_GP_ISEL_EOF_ADDR, ZERO); // gp_device_reg_store(ID, _REG_GP_ISEL_SOL_ADDR, ZERO); // gp_device_reg_store(ID, _REG_GP_ISEL_EOL_ADDR, ZERO); gp_device_reg_store(ID, _REG_GP_ISEL_LFSR_ENABLE_ADDR, ZERO); gp_device_reg_store(ID, _REG_GP_ISEL_LFSR_ENABLE_B_ADDR, ZERO); gp_device_reg_store(ID, _REG_GP_ISEL_LFSR_RESET_VALUE_ADDR, ZERO); gp_device_reg_store(ID, _REG_GP_ISEL_TPG_ENABLE_ADDR, ZERO); gp_device_reg_store(ID, _REG_GP_ISEL_TPG_ENABLE_B_ADDR, ZERO); gp_device_reg_store(ID, _REG_GP_ISEL_HOR_CNT_MASK_ADDR, ZERO); gp_device_reg_store(ID, _REG_GP_ISEL_VER_CNT_MASK_ADDR, ZERO); gp_device_reg_store(ID, _REG_GP_ISEL_XY_CNT_MASK_ADDR, ZERO); gp_device_reg_store(ID, _REG_GP_ISEL_HOR_CNT_DELTA_ADDR, ZERO); gp_device_reg_store(ID, _REG_GP_ISEL_VER_CNT_DELTA_ADDR, ZERO); gp_device_reg_store(ID, _REG_GP_ISEL_TPG_MODE_ADDR, ZERO); gp_device_reg_store(ID, _REG_GP_ISEL_TPG_RED1_ADDR, ZERO); gp_device_reg_store(ID, _REG_GP_ISEL_TPG_GREEN1_ADDR, ZERO); gp_device_reg_store(ID, _REG_GP_ISEL_TPG_BLUE1_ADDR, ZERO); gp_device_reg_store(ID, _REG_GP_ISEL_TPG_RED2_ADDR, ZERO); gp_device_reg_store(ID, _REG_GP_ISEL_TPG_GREEN2_ADDR, ZERO); gp_device_reg_store(ID, _REG_GP_ISEL_TPG_BLUE2_ADDR, ZERO); //gp_device_reg_store(ID, _REG_GP_ISEL_CH_ID_ADDR, ZERO); //gp_device_reg_store(ID, _REG_GP_ISEL_FMT_TYPE_ADDR, ZERO); gp_device_reg_store(ID, _REG_GP_ISEL_DATA_SEL_ADDR, ZERO); gp_device_reg_store(ID, _REG_GP_ISEL_SBAND_SEL_ADDR, ZERO); gp_device_reg_store(ID, _REG_GP_ISEL_SYNC_SEL_ADDR, ZERO); // gp_device_reg_store(ID, _REG_GP_SYNCGEN_HOR_CNT_ADDR, ZERO); // gp_device_reg_store(ID, _REG_GP_SYNCGEN_VER_CNT_ADDR, ZERO); // gp_device_reg_store(ID, _REG_GP_SYNCGEN_FRAME_CNT_ADDR, ZERO); gp_device_reg_store(ID, _REG_GP_SOFT_RESET_ADDR, ZERO); // AM: Maybe this soft reset is not safe. } static void input_selector_cfg_for_sensor(const gp_device_ID_t ID) { assert(ID < N_GP_DEVICE_ID); gp_device_reg_store(ID, _REG_GP_ISEL_SOF_ADDR, ONE); gp_device_reg_store(ID, _REG_GP_ISEL_EOF_ADDR, ONE); gp_device_reg_store(ID, _REG_GP_ISEL_SOL_ADDR, ONE); gp_device_reg_store(ID, _REG_GP_ISEL_EOL_ADDR, ONE); gp_device_reg_store(ID, _REG_GP_ISEL_CH_ID_ADDR, ZERO); gp_device_reg_store(ID, _REG_GP_ISEL_FMT_TYPE_ADDR, ZERO); gp_device_reg_store(ID, _REG_GP_ISEL_DATA_SEL_ADDR, ZERO); gp_device_reg_store(ID, _REG_GP_ISEL_SBAND_SEL_ADDR, ZERO); gp_device_reg_store(ID, _REG_GP_ISEL_SYNC_SEL_ADDR, ZERO); gp_device_reg_store(ID, _REG_GP_SOFT_RESET_ADDR, ZERO); } static void input_switch_rst(const gp_device_ID_t ID) { int addr; assert(ID < N_GP_DEVICE_ID); // Initialize the data&hsync LUT. for (addr = _REG_GP_IFMT_input_switch_lut_reg0; addr <= _REG_GP_IFMT_input_switch_lut_reg7; addr += SIZEOF_HRT_REG) { gp_device_reg_store(ID, addr, ZERO); } // Initialize the vsync LUT. gp_device_reg_store(ID, _REG_GP_IFMT_input_switch_fsync_lut, ZERO); } static void input_switch_cfg( const gp_device_ID_t ID, const input_switch_cfg_t *const cfg) { int addr_offset; assert(ID < N_GP_DEVICE_ID); assert(cfg); // Initialize the data&hsync LUT. for (addr_offset = 0; addr_offset < N_RX_CHANNEL_ID * 2; addr_offset++) { assert(addr_offset * SIZEOF_HRT_REG + _REG_GP_IFMT_input_switch_lut_reg0 <= _REG_GP_IFMT_input_switch_lut_reg7); gp_device_reg_store(ID, _REG_GP_IFMT_input_switch_lut_reg0 + addr_offset * SIZEOF_HRT_REG, cfg->hsync_data_reg[addr_offset]); } // Initialize the vsync LUT. gp_device_reg_store(ID, _REG_GP_IFMT_input_switch_fsync_lut, cfg->vsync_data_reg); } static void input_system_network_rst(const input_system_ID_t ID) { unsigned int sub_id; // Reset all 3 multicasts. input_system_sub_system_reg_store(ID, GPREGS_UNIT0_ID, HIVE_ISYS_GPREG_MULTICAST_A_IDX, INPUT_SYSTEM_DISCARD_ALL); input_system_sub_system_reg_store(ID, GPREGS_UNIT0_ID, HIVE_ISYS_GPREG_MULTICAST_B_IDX, INPUT_SYSTEM_DISCARD_ALL); input_system_sub_system_reg_store(ID, GPREGS_UNIT0_ID, HIVE_ISYS_GPREG_MULTICAST_C_IDX, INPUT_SYSTEM_DISCARD_ALL); // Reset stream mux. input_system_sub_system_reg_store(ID, GPREGS_UNIT0_ID, HIVE_ISYS_GPREG_MUX_IDX, N_INPUT_SYSTEM_MULTIPLEX); // Reset 3 capture units. for (sub_id = CAPTURE_UNIT0_ID; sub_id < CAPTURE_UNIT0_ID + N_CAPTURE_UNIT_ID; sub_id++) { input_system_sub_system_reg_store(ID, sub_id, CAPT_INIT_REG_ID, 1U << CAPT_INIT_RST_REG_BIT); } // Reset acquisition unit. for (sub_id = ACQUISITION_UNIT0_ID; sub_id < ACQUISITION_UNIT0_ID + N_ACQUISITION_UNIT_ID; sub_id++) { input_system_sub_system_reg_store(ID, sub_id, ACQ_INIT_REG_ID, 1U << ACQ_INIT_RST_REG_BIT); } // DMA unit reset is not needed. // Reset controller units. // NB: In future we need to keep part of ctrl_state for split capture and for (sub_id = CTRL_UNIT0_ID; sub_id < CTRL_UNIT0_ID + N_CTRL_UNIT_ID; sub_id++) { input_system_sub_system_reg_store(ID, sub_id, ISYS_CTRL_INIT_REG_ID, 1U); //AM: Is there any named constant? } } // Function that resets current configuration. input_system_err_t input_system_configuration_reset(void) { unsigned int i; receiver_rst(RX0_ID); input_system_network_rst(INPUT_SYSTEM0_ID); gp_device_rst(GP_DEVICE0_ID); input_switch_rst(GP_DEVICE0_ID); //target_rst(); // Reset IRQ_CTRLs. // Reset configuration data structures. for (i = 0; i < N_CHANNELS; i++) { config.ch_flags[i] = INPUT_SYSTEM_CFG_FLAG_RESET; config.target_isp_flags[i] = INPUT_SYSTEM_CFG_FLAG_RESET; config.target_sp_flags[i] = INPUT_SYSTEM_CFG_FLAG_RESET; config.target_strm2mem_flags[i] = INPUT_SYSTEM_CFG_FLAG_RESET; } for (i = 0; i < N_CSI_PORTS; i++) { config.csi_buffer_flags[i] = INPUT_SYSTEM_CFG_FLAG_RESET; config.multicast[i] = INPUT_SYSTEM_DISCARD_ALL; } config.source_type_flags = INPUT_SYSTEM_CFG_FLAG_RESET; config.acquisition_buffer_unique_flags = INPUT_SYSTEM_CFG_FLAG_RESET; config.unallocated_ib_mem_words = IB_CAPACITY_IN_WORDS; //config.acq_allocated_ib_mem_words = 0; // Set the start of the session cofiguration. config.session_flags = INPUT_SYSTEM_CFG_FLAG_REQUIRED; return INPUT_SYSTEM_ERR_NO_ERROR; } // MW: Comments are good, but doxygen is required, place it at the declaration // Function that appends the channel to current configuration. static input_system_err_t input_system_configure_channel( const channel_cfg_t channel) { input_system_err_t error = INPUT_SYSTEM_ERR_NO_ERROR; // Check if channel is not already configured. if (config.ch_flags[channel.ch_id] & INPUT_SYSTEM_CFG_FLAG_SET) { return INPUT_SYSTEM_ERR_CHANNEL_ALREADY_SET; } else { switch (channel.source_type) { case INPUT_SYSTEM_SOURCE_SENSOR: error = input_system_configure_channel_sensor(channel); break; case INPUT_SYSTEM_SOURCE_TPG: case INPUT_SYSTEM_SOURCE_PRBS: case INPUT_SYSTEM_SOURCE_FIFO: default: return INPUT_SYSTEM_ERR_PARAMETER_NOT_SUPPORTED; } if (error != INPUT_SYSTEM_ERR_NO_ERROR) return error; // Input switch channel configurations must be combined in united config. config.input_switch_cfg.hsync_data_reg[channel.source_cfg.csi_cfg.csi_port * 2] = channel.target_cfg.input_switch_channel_cfg.hsync_data_reg[0]; config.input_switch_cfg.hsync_data_reg[channel.source_cfg.csi_cfg.csi_port * 2 + 1] = channel.target_cfg.input_switch_channel_cfg.hsync_data_reg[1]; config.input_switch_cfg.vsync_data_reg |= (channel.target_cfg.input_switch_channel_cfg.vsync_data_reg & 0x7) << (channel.source_cfg.csi_cfg.csi_port * 3); // Other targets are just copied and marked as set. config.target_isp[channel.source_cfg.csi_cfg.csi_port] = channel.target_cfg.target_isp_cfg; config.target_sp[channel.source_cfg.csi_cfg.csi_port] = channel.target_cfg.target_sp_cfg; config.target_strm2mem[channel.source_cfg.csi_cfg.csi_port] = channel.target_cfg.target_strm2mem_cfg; config.target_isp_flags[channel.source_cfg.csi_cfg.csi_port] |= INPUT_SYSTEM_CFG_FLAG_SET; config.target_sp_flags[channel.source_cfg.csi_cfg.csi_port] |= INPUT_SYSTEM_CFG_FLAG_SET; config.target_strm2mem_flags[channel.source_cfg.csi_cfg.csi_port] |= INPUT_SYSTEM_CFG_FLAG_SET; config.ch_flags[channel.ch_id] = INPUT_SYSTEM_CFG_FLAG_SET; } return INPUT_SYSTEM_ERR_NO_ERROR; } // Function that partitions input buffer space with determining addresses. static input_system_err_t input_buffer_configuration(void) { u32 current_address = 0; u32 unallocated_memory = IB_CAPACITY_IN_WORDS; isp2400_ib_buffer_t candidate_buffer_acq = IB_BUFFER_NULL; u32 size_requested; input_system_config_flags_t acq_already_specified = INPUT_SYSTEM_CFG_FLAG_RESET; input_system_csi_port_t port; for (port = INPUT_SYSTEM_PORT_A; port < N_INPUT_SYSTEM_PORTS; port++) { csi_cfg_t source = config.csi_value[port];//.csi_cfg; if (config.csi_flags[port] & INPUT_SYSTEM_CFG_FLAG_SET) { // Check and set csi buffer in input buffer. switch (source.buffering_mode) { case INPUT_SYSTEM_FIFO_CAPTURE: case INPUT_SYSTEM_XMEM_ACQUIRE: config.csi_buffer_flags[port] = INPUT_SYSTEM_CFG_FLAG_BLOCKED; // Well, not used. break; case INPUT_SYSTEM_FIFO_CAPTURE_WITH_COUNTING: case INPUT_SYSTEM_SRAM_BUFFERING: case INPUT_SYSTEM_XMEM_BUFFERING: case INPUT_SYSTEM_XMEM_CAPTURE: size_requested = source.csi_buffer.mem_reg_size * source.csi_buffer.nof_mem_regs; if (source.csi_buffer.mem_reg_size > 0 && source.csi_buffer.nof_mem_regs > 0 && size_requested <= unallocated_memory ) { config.csi_buffer[port].mem_reg_addr = current_address; config.csi_buffer[port].mem_reg_size = source.csi_buffer.mem_reg_size; config.csi_buffer[port].nof_mem_regs = source.csi_buffer.nof_mem_regs; current_address += size_requested; unallocated_memory -= size_requested; config.csi_buffer_flags[port] = INPUT_SYSTEM_CFG_FLAG_SET; } else { config.csi_buffer_flags[port] |= INPUT_SYSTEM_CFG_FLAG_CONFLICT; return INPUT_SYSTEM_ERR_CONFLICT_ON_RESOURCE; } break; default: config.csi_buffer_flags[port] |= INPUT_SYSTEM_CFG_FLAG_CONFLICT; return INPUT_SYSTEM_ERR_PARAMETER_NOT_SUPPORTED; } // Check acquisition buffer specified but set it later since it has to be unique. switch (source.buffering_mode) { case INPUT_SYSTEM_FIFO_CAPTURE: case INPUT_SYSTEM_SRAM_BUFFERING: case INPUT_SYSTEM_XMEM_CAPTURE: // Nothing to do. break; case INPUT_SYSTEM_FIFO_CAPTURE_WITH_COUNTING: case INPUT_SYSTEM_XMEM_BUFFERING: case INPUT_SYSTEM_XMEM_ACQUIRE: if (acq_already_specified == INPUT_SYSTEM_CFG_FLAG_RESET) { size_requested = source.acquisition_buffer.mem_reg_size * source.acquisition_buffer.nof_mem_regs; if (source.acquisition_buffer.mem_reg_size > 0 && source.acquisition_buffer.nof_mem_regs > 0 && size_requested <= unallocated_memory ) { candidate_buffer_acq = source.acquisition_buffer; acq_already_specified = INPUT_SYSTEM_CFG_FLAG_SET; } } else { // Check if specified acquisition buffer is the same as specified before. if (source.acquisition_buffer.mem_reg_size != candidate_buffer_acq.mem_reg_size || source.acquisition_buffer.nof_mem_regs != candidate_buffer_acq.nof_mem_regs ) { config.acquisition_buffer_unique_flags |= INPUT_SYSTEM_CFG_FLAG_CONFLICT; return INPUT_SYSTEM_ERR_CONFLICT_ON_RESOURCE; } } break; default: return INPUT_SYSTEM_ERR_PARAMETER_NOT_SUPPORTED; } } else { config.csi_buffer_flags[port] = INPUT_SYSTEM_CFG_FLAG_BLOCKED; } } // end of for ( port ) // Set the acquisition buffer at the end. size_requested = candidate_buffer_acq.mem_reg_size * candidate_buffer_acq.nof_mem_regs; if (acq_already_specified == INPUT_SYSTEM_CFG_FLAG_SET && size_requested <= unallocated_memory) { config.acquisition_buffer_unique.mem_reg_addr = current_address; config.acquisition_buffer_unique.mem_reg_size = candidate_buffer_acq.mem_reg_size; config.acquisition_buffer_unique.nof_mem_regs = candidate_buffer_acq.nof_mem_regs; current_address += size_requested; unallocated_memory -= size_requested; config.acquisition_buffer_unique_flags = INPUT_SYSTEM_CFG_FLAG_SET; assert(current_address <= IB_CAPACITY_IN_WORDS); } return INPUT_SYSTEM_ERR_NO_ERROR; } static void capture_unit_configure( const input_system_ID_t ID, const sub_system_ID_t sub_id, const isp2400_ib_buffer_t *const cfg) { assert(ID < N_INPUT_SYSTEM_ID); assert(/*(sub_id >= CAPTURE_UNIT0_ID) &&*/ (sub_id <= CAPTURE_UNIT2_ID)); // Commented part is always true. assert(cfg); input_system_sub_system_reg_store(ID, sub_id, CAPT_START_ADDR_REG_ID, cfg->mem_reg_addr); input_system_sub_system_reg_store(ID, sub_id, CAPT_MEM_REGION_SIZE_REG_ID, cfg->mem_reg_size); input_system_sub_system_reg_store(ID, sub_id, CAPT_NUM_MEM_REGIONS_REG_ID, cfg->nof_mem_regs); } static void acquisition_unit_configure( const input_system_ID_t ID, const sub_system_ID_t sub_id, const isp2400_ib_buffer_t *const cfg) { assert(ID < N_INPUT_SYSTEM_ID); assert(sub_id == ACQUISITION_UNIT0_ID); assert(cfg); input_system_sub_system_reg_store(ID, sub_id, ACQ_START_ADDR_REG_ID, cfg->mem_reg_addr); input_system_sub_system_reg_store(ID, sub_id, ACQ_NUM_MEM_REGIONS_REG_ID, cfg->nof_mem_regs); input_system_sub_system_reg_store(ID, sub_id, ACQ_MEM_REGION_SIZE_REG_ID, cfg->mem_reg_size); } static void ctrl_unit_configure( const input_system_ID_t ID, const sub_system_ID_t sub_id, const ctrl_unit_cfg_t *const cfg) { assert(ID < N_INPUT_SYSTEM_ID); assert(sub_id == CTRL_UNIT0_ID); assert(cfg); input_system_sub_system_reg_store(ID, sub_id, ISYS_CTRL_CAPT_START_ADDR_A_REG_ID, cfg->buffer_mipi[CAPTURE_UNIT0_ID].mem_reg_addr); input_system_sub_system_reg_store(ID, sub_id, ISYS_CTRL_CAPT_MEM_REGION_SIZE_A_REG_ID, cfg->buffer_mipi[CAPTURE_UNIT0_ID].mem_reg_size); input_system_sub_system_reg_store(ID, sub_id, ISYS_CTRL_CAPT_NUM_MEM_REGIONS_A_REG_ID, cfg->buffer_mipi[CAPTURE_UNIT0_ID].nof_mem_regs); input_system_sub_system_reg_store(ID, sub_id, ISYS_CTRL_CAPT_START_ADDR_B_REG_ID, cfg->buffer_mipi[CAPTURE_UNIT1_ID].mem_reg_addr); input_system_sub_system_reg_store(ID, sub_id, ISYS_CTRL_CAPT_MEM_REGION_SIZE_B_REG_ID, cfg->buffer_mipi[CAPTURE_UNIT1_ID].mem_reg_size); input_system_sub_system_reg_store(ID, sub_id, ISYS_CTRL_CAPT_NUM_MEM_REGIONS_B_REG_ID, cfg->buffer_mipi[CAPTURE_UNIT1_ID].nof_mem_regs); input_system_sub_system_reg_store(ID, sub_id, ISYS_CTRL_CAPT_START_ADDR_C_REG_ID, cfg->buffer_mipi[CAPTURE_UNIT2_ID].mem_reg_addr); input_system_sub_system_reg_store(ID, sub_id, ISYS_CTRL_CAPT_MEM_REGION_SIZE_C_REG_ID, cfg->buffer_mipi[CAPTURE_UNIT2_ID].mem_reg_size); input_system_sub_system_reg_store(ID, sub_id, ISYS_CTRL_CAPT_NUM_MEM_REGIONS_C_REG_ID, cfg->buffer_mipi[CAPTURE_UNIT2_ID].nof_mem_regs); input_system_sub_system_reg_store(ID, sub_id, ISYS_CTRL_ACQ_START_ADDR_REG_ID, cfg->buffer_acquire[ACQUISITION_UNIT0_ID - ACQUISITION_UNIT0_ID].mem_reg_addr); input_system_sub_system_reg_store(ID, sub_id, ISYS_CTRL_ACQ_MEM_REGION_SIZE_REG_ID, cfg->buffer_acquire[ACQUISITION_UNIT0_ID - ACQUISITION_UNIT0_ID].mem_reg_size); input_system_sub_system_reg_store(ID, sub_id, ISYS_CTRL_ACQ_NUM_MEM_REGIONS_REG_ID, cfg->buffer_acquire[ACQUISITION_UNIT0_ID - ACQUISITION_UNIT0_ID].nof_mem_regs); input_system_sub_system_reg_store(ID, sub_id, ISYS_CTRL_CAPT_RESERVE_ONE_MEM_REGION_REG_ID, 0); } static void input_system_network_configure( const input_system_ID_t ID, const input_system_network_cfg_t *const cfg) { u32 sub_id; assert(ID < N_INPUT_SYSTEM_ID); assert(cfg); // Set all 3 multicasts. input_system_sub_system_reg_store(ID, GPREGS_UNIT0_ID, HIVE_ISYS_GPREG_MULTICAST_A_IDX, cfg->multicast_cfg[CAPTURE_UNIT0_ID]); input_system_sub_system_reg_store(ID, GPREGS_UNIT0_ID, HIVE_ISYS_GPREG_MULTICAST_B_IDX, cfg->multicast_cfg[CAPTURE_UNIT1_ID]); input_system_sub_system_reg_store(ID, GPREGS_UNIT0_ID, HIVE_ISYS_GPREG_MULTICAST_C_IDX, cfg->multicast_cfg[CAPTURE_UNIT2_ID]); // Set stream mux. input_system_sub_system_reg_store(ID, GPREGS_UNIT0_ID, HIVE_ISYS_GPREG_MUX_IDX, cfg->mux_cfg); // Set capture units. for (sub_id = CAPTURE_UNIT0_ID; sub_id < CAPTURE_UNIT0_ID + N_CAPTURE_UNIT_ID; sub_id++) { capture_unit_configure(ID, sub_id, &cfg->ctrl_unit_cfg[ID].buffer_mipi[sub_id - CAPTURE_UNIT0_ID]); } // Set acquisition units. for (sub_id = ACQUISITION_UNIT0_ID; sub_id < ACQUISITION_UNIT0_ID + N_ACQUISITION_UNIT_ID; sub_id++) { acquisition_unit_configure(ID, sub_id, &cfg->ctrl_unit_cfg[sub_id - ACQUISITION_UNIT0_ID].buffer_acquire[sub_id - ACQUISITION_UNIT0_ID]); } // No DMA configuration needed. Ctrl_unit will fully control it. // Set controller units. for (sub_id = CTRL_UNIT0_ID; sub_id < CTRL_UNIT0_ID + N_CTRL_UNIT_ID; sub_id++) { ctrl_unit_configure(ID, sub_id, &cfg->ctrl_unit_cfg[sub_id - CTRL_UNIT0_ID]); } } static input_system_err_t configuration_to_registers(void) { input_system_network_cfg_t input_system_network_cfg; int i; assert(config.source_type_flags & INPUT_SYSTEM_CFG_FLAG_SET); switch (config.source_type) { case INPUT_SYSTEM_SOURCE_SENSOR: // Determine stream multicasts setting based on the mode of csi_cfg_t. // AM: This should be moved towards earlier function call, e.g. in // the commit function. for (i = MIPI_PORT0_ID; i < N_MIPI_PORT_ID; i++) { if (config.csi_flags[i] & INPUT_SYSTEM_CFG_FLAG_SET) { switch (config.csi_value[i].buffering_mode) { case INPUT_SYSTEM_FIFO_CAPTURE: config.multicast[i] = INPUT_SYSTEM_CSI_BACKEND; break; case INPUT_SYSTEM_XMEM_CAPTURE: case INPUT_SYSTEM_SRAM_BUFFERING: case INPUT_SYSTEM_XMEM_BUFFERING: config.multicast[i] = INPUT_SYSTEM_INPUT_BUFFER; break; case INPUT_SYSTEM_FIFO_CAPTURE_WITH_COUNTING: config.multicast[i] = INPUT_SYSTEM_MULTICAST; break; case INPUT_SYSTEM_XMEM_ACQUIRE: config.multicast[i] = INPUT_SYSTEM_DISCARD_ALL; break; default: config.multicast[i] = INPUT_SYSTEM_DISCARD_ALL; return INPUT_SYSTEM_ERR_PARAMETER_NOT_SUPPORTED; //break; } } else { config.multicast[i] = INPUT_SYSTEM_DISCARD_ALL; } input_system_network_cfg.multicast_cfg[i] = config.multicast[i]; } // for input_system_network_cfg.mux_cfg = config.multiplexer; input_system_network_cfg.ctrl_unit_cfg[CTRL_UNIT0_ID - CTRL_UNIT0_ID].buffer_mipi[CAPTURE_UNIT0_ID] = config.csi_buffer[MIPI_PORT0_ID]; input_system_network_cfg.ctrl_unit_cfg[CTRL_UNIT0_ID - CTRL_UNIT0_ID].buffer_mipi[CAPTURE_UNIT1_ID] = config.csi_buffer[MIPI_PORT1_ID]; input_system_network_cfg.ctrl_unit_cfg[CTRL_UNIT0_ID - CTRL_UNIT0_ID].buffer_mipi[CAPTURE_UNIT2_ID] = config.csi_buffer[MIPI_PORT2_ID]; input_system_network_cfg.ctrl_unit_cfg[CTRL_UNIT0_ID - CTRL_UNIT0_ID].buffer_acquire[ACQUISITION_UNIT0_ID - ACQUISITION_UNIT0_ID] = config.acquisition_buffer_unique; // First set input network around CSI receiver. input_system_network_configure(INPUT_SYSTEM0_ID, &input_system_network_cfg); // Set the CSI receiver. //... break; case INPUT_SYSTEM_SOURCE_TPG: case INPUT_SYSTEM_SOURCE_PRBS: case INPUT_SYSTEM_SOURCE_FIFO: break; default: return INPUT_SYSTEM_ERR_PARAMETER_NOT_SUPPORTED; } // end of switch (source_type) // Set input selector. input_selector_cfg_for_sensor(GP_DEVICE0_ID); // Set input switch. input_switch_cfg(GP_DEVICE0_ID, &config.input_switch_cfg); // Set input formatters. // AM: IF are set dynamically. return INPUT_SYSTEM_ERR_NO_ERROR; } // Function that applies the whole configuration. input_system_err_t input_system_configuration_commit(void) { // The last configuration step is to configure the input buffer. input_system_err_t error = input_buffer_configuration(); if (error != INPUT_SYSTEM_ERR_NO_ERROR) { return error; } // Translate the whole configuration into registers. error = configuration_to_registers(); if (error != INPUT_SYSTEM_ERR_NO_ERROR) { return error; } // Translate the whole configuration into ctrl commands etc. return INPUT_SYSTEM_ERR_NO_ERROR; } // FIFO input_system_err_t input_system_csi_fifo_channel_cfg( u32 ch_id, input_system_csi_port_t port, backend_channel_cfg_t backend_ch, target_cfg2400_t target ) { channel_cfg_t channel; channel.ch_id = ch_id; channel.backend_ch = backend_ch; channel.source_type = INPUT_SYSTEM_SOURCE_SENSOR; //channel.source channel.source_cfg.csi_cfg.csi_port = port; channel.source_cfg.csi_cfg.buffering_mode = INPUT_SYSTEM_FIFO_CAPTURE; channel.source_cfg.csi_cfg.csi_buffer = IB_BUFFER_NULL; channel.source_cfg.csi_cfg.acquisition_buffer = IB_BUFFER_NULL; channel.source_cfg.csi_cfg.nof_xmem_buffers = 0; channel.target_cfg = target; return input_system_configure_channel(channel); } input_system_err_t input_system_csi_fifo_channel_with_counting_cfg( u32 ch_id, u32 nof_frames, input_system_csi_port_t port, backend_channel_cfg_t backend_ch, u32 csi_mem_reg_size, u32 csi_nof_mem_regs, target_cfg2400_t target ) { channel_cfg_t channel; channel.ch_id = ch_id; channel.backend_ch = backend_ch; channel.source_type = INPUT_SYSTEM_SOURCE_SENSOR; //channel.source channel.source_cfg.csi_cfg.csi_port = port; channel.source_cfg.csi_cfg.buffering_mode = INPUT_SYSTEM_FIFO_CAPTURE_WITH_COUNTING; channel.source_cfg.csi_cfg.csi_buffer.mem_reg_size = csi_mem_reg_size; channel.source_cfg.csi_cfg.csi_buffer.nof_mem_regs = csi_nof_mem_regs; channel.source_cfg.csi_cfg.csi_buffer.mem_reg_addr = 0; channel.source_cfg.csi_cfg.acquisition_buffer = IB_BUFFER_NULL; channel.source_cfg.csi_cfg.nof_xmem_buffers = nof_frames; channel.target_cfg = target; return input_system_configure_channel(channel); } // SRAM input_system_err_t input_system_csi_sram_channel_cfg( u32 ch_id, input_system_csi_port_t port, backend_channel_cfg_t backend_ch, u32 csi_mem_reg_size, u32 csi_nof_mem_regs, // uint32_t acq_mem_reg_size, // uint32_t acq_nof_mem_regs, target_cfg2400_t target ) { channel_cfg_t channel; channel.ch_id = ch_id; channel.backend_ch = backend_ch; channel.source_type = INPUT_SYSTEM_SOURCE_SENSOR; //channel.source channel.source_cfg.csi_cfg.csi_port = port; channel.source_cfg.csi_cfg.buffering_mode = INPUT_SYSTEM_SRAM_BUFFERING; channel.source_cfg.csi_cfg.csi_buffer.mem_reg_size = csi_mem_reg_size; channel.source_cfg.csi_cfg.csi_buffer.nof_mem_regs = csi_nof_mem_regs; channel.source_cfg.csi_cfg.csi_buffer.mem_reg_addr = 0; channel.source_cfg.csi_cfg.acquisition_buffer = IB_BUFFER_NULL; channel.source_cfg.csi_cfg.nof_xmem_buffers = 0; channel.target_cfg = target; return input_system_configure_channel(channel); } //XMEM // Collects all parameters and puts them in channel_cfg_t. input_system_err_t input_system_csi_xmem_channel_cfg( u32 ch_id, input_system_csi_port_t port, backend_channel_cfg_t backend_ch, u32 csi_mem_reg_size, u32 csi_nof_mem_regs, u32 acq_mem_reg_size, u32 acq_nof_mem_regs, target_cfg2400_t target, uint32_t nof_xmem_buffers ) { channel_cfg_t channel; channel.ch_id = ch_id; channel.backend_ch = backend_ch; channel.source_type = INPUT_SYSTEM_SOURCE_SENSOR; //channel.source channel.source_cfg.csi_cfg.csi_port = port; channel.source_cfg.csi_cfg.buffering_mode = INPUT_SYSTEM_XMEM_BUFFERING; channel.source_cfg.csi_cfg.csi_buffer.mem_reg_size = csi_mem_reg_size; channel.source_cfg.csi_cfg.csi_buffer.nof_mem_regs = csi_nof_mem_regs; channel.source_cfg.csi_cfg.csi_buffer.mem_reg_addr = 0; channel.source_cfg.csi_cfg.acquisition_buffer.mem_reg_size = acq_mem_reg_size; channel.source_cfg.csi_cfg.acquisition_buffer.nof_mem_regs = acq_nof_mem_regs; channel.source_cfg.csi_cfg.acquisition_buffer.mem_reg_addr = 0; channel.source_cfg.csi_cfg.nof_xmem_buffers = nof_xmem_buffers; channel.target_cfg = target; return input_system_configure_channel(channel); } input_system_err_t input_system_csi_xmem_acquire_only_channel_cfg( u32 ch_id, u32 nof_frames, input_system_csi_port_t port, backend_channel_cfg_t backend_ch, u32 acq_mem_reg_size, u32 acq_nof_mem_regs, target_cfg2400_t target) { channel_cfg_t channel; channel.ch_id = ch_id; channel.backend_ch = backend_ch; channel.source_type = INPUT_SYSTEM_SOURCE_SENSOR; //channel.source channel.source_cfg.csi_cfg.csi_port = port; channel.source_cfg.csi_cfg.buffering_mode = INPUT_SYSTEM_XMEM_ACQUIRE; channel.source_cfg.csi_cfg.csi_buffer = IB_BUFFER_NULL; channel.source_cfg.csi_cfg.acquisition_buffer.mem_reg_size = acq_mem_reg_size; channel.source_cfg.csi_cfg.acquisition_buffer.nof_mem_regs = acq_nof_mem_regs; channel.source_cfg.csi_cfg.acquisition_buffer.mem_reg_addr = 0; channel.source_cfg.csi_cfg.nof_xmem_buffers = nof_frames; channel.target_cfg = target; return input_system_configure_channel(channel); } input_system_err_t input_system_csi_xmem_capture_only_channel_cfg( u32 ch_id, u32 nof_frames, input_system_csi_port_t port, u32 csi_mem_reg_size, u32 csi_nof_mem_regs, u32 acq_mem_reg_size, u32 acq_nof_mem_regs, target_cfg2400_t target) { channel_cfg_t channel; channel.ch_id = ch_id; //channel.backend_ch = backend_ch; channel.source_type = INPUT_SYSTEM_SOURCE_SENSOR; //channel.source channel.source_cfg.csi_cfg.csi_port = port; //channel.source_cfg.csi_cfg.backend_ch = backend_ch; channel.source_cfg.csi_cfg.buffering_mode = INPUT_SYSTEM_XMEM_CAPTURE; channel.source_cfg.csi_cfg.csi_buffer.mem_reg_size = csi_mem_reg_size; channel.source_cfg.csi_cfg.csi_buffer.nof_mem_regs = csi_nof_mem_regs; channel.source_cfg.csi_cfg.csi_buffer.mem_reg_addr = 0; channel.source_cfg.csi_cfg.acquisition_buffer.mem_reg_size = acq_mem_reg_size; channel.source_cfg.csi_cfg.acquisition_buffer.nof_mem_regs = acq_nof_mem_regs; channel.source_cfg.csi_cfg.acquisition_buffer.mem_reg_addr = 0; channel.source_cfg.csi_cfg.nof_xmem_buffers = nof_frames; channel.target_cfg = target; return input_system_configure_channel(channel); } // Non - CSI input_system_err_t input_system_prbs_channel_cfg( u32 ch_id, u32 nof_frames,//not used yet u32 seed, u32 sync_gen_width, u32 sync_gen_height, u32 sync_gen_hblank_cycles, u32 sync_gen_vblank_cycles, target_cfg2400_t target ) { channel_cfg_t channel; (void)nof_frames; channel.ch_id = ch_id; channel.source_type = INPUT_SYSTEM_SOURCE_PRBS; channel.source_cfg.prbs_cfg.seed = seed; channel.source_cfg.prbs_cfg.sync_gen_cfg.width = sync_gen_width; channel.source_cfg.prbs_cfg.sync_gen_cfg.height = sync_gen_height; channel.source_cfg.prbs_cfg.sync_gen_cfg.hblank_cycles = sync_gen_hblank_cycles; channel.source_cfg.prbs_cfg.sync_gen_cfg.vblank_cycles = sync_gen_vblank_cycles; channel.target_cfg = target; return input_system_configure_channel(channel); } input_system_err_t input_system_tpg_channel_cfg( u32 ch_id, u32 nof_frames,//not used yet u32 x_mask, u32 y_mask, u32 x_delta, u32 y_delta, u32 xy_mask, u32 sync_gen_width, u32 sync_gen_height, u32 sync_gen_hblank_cycles, u32 sync_gen_vblank_cycles, target_cfg2400_t target ) { channel_cfg_t channel; (void)nof_frames; channel.ch_id = ch_id; channel.source_type = INPUT_SYSTEM_SOURCE_TPG; channel.source_cfg.tpg_cfg.x_mask = x_mask; channel.source_cfg.tpg_cfg.y_mask = y_mask; channel.source_cfg.tpg_cfg.x_delta = x_delta; channel.source_cfg.tpg_cfg.y_delta = y_delta; channel.source_cfg.tpg_cfg.xy_mask = xy_mask; channel.source_cfg.tpg_cfg.sync_gen_cfg.width = sync_gen_width; channel.source_cfg.tpg_cfg.sync_gen_cfg.height = sync_gen_height; channel.source_cfg.tpg_cfg.sync_gen_cfg.hblank_cycles = sync_gen_hblank_cycles; channel.source_cfg.tpg_cfg.sync_gen_cfg.vblank_cycles = sync_gen_vblank_cycles; channel.target_cfg = target; return input_system_configure_channel(channel); } // MW: Don't use system specific names, (even in system specific files) "cfg2400" -> cfg input_system_err_t input_system_gpfifo_channel_cfg( u32 ch_id, u32 nof_frames, //not used yet target_cfg2400_t target) { channel_cfg_t channel; (void)nof_frames; channel.ch_id = ch_id; channel.source_type = INPUT_SYSTEM_SOURCE_FIFO; channel.target_cfg = target; return input_system_configure_channel(channel); } /////////////////////////////////////////////////////////////////////////// // // Private specialized functions for channel setting. // /////////////////////////////////////////////////////////////////////////// // Fills the parameters to config.csi_value[port] static input_system_err_t input_system_configure_channel_sensor( const channel_cfg_t channel) { const u32 port = channel.source_cfg.csi_cfg.csi_port; input_system_err_t status = INPUT_SYSTEM_ERR_NO_ERROR; input_system_multiplex_t mux; if (port >= N_INPUT_SYSTEM_PORTS) return INPUT_SYSTEM_ERR_GENERIC; //check if port > N_INPUT_SYSTEM_MULTIPLEX status = set_source_type(&config.source_type, channel.source_type, &config.source_type_flags); if (status != INPUT_SYSTEM_ERR_NO_ERROR) return status; // Check for conflicts on source (implicitly on multicast, capture unit and input buffer). status = set_csi_cfg(&config.csi_value[port], &channel.source_cfg.csi_cfg, &config.csi_flags[port]); if (status != INPUT_SYSTEM_ERR_NO_ERROR) return status; switch (channel.source_cfg.csi_cfg.buffering_mode) { case INPUT_SYSTEM_FIFO_CAPTURE: // Check for conflicts on mux. mux = INPUT_SYSTEM_MIPI_PORT0 + port; status = input_system_multiplexer_cfg(&config.multiplexer, mux, &config.multiplexer_flags); if (status != INPUT_SYSTEM_ERR_NO_ERROR) return status; config.multicast[port] = INPUT_SYSTEM_CSI_BACKEND; // Shared resource, so it should be blocked. //config.mux_flags |= INPUT_SYSTEM_CFG_FLAG_BLOCKED; //config.csi_buffer_flags[port] |= INPUT_SYSTEM_CFG_FLAG_BLOCKED; //config.acquisition_buffer_unique_flags |= INPUT_SYSTEM_CFG_FLAG_BLOCKED; break; case INPUT_SYSTEM_SRAM_BUFFERING: // Check for conflicts on mux. mux = INPUT_SYSTEM_ACQUISITION_UNIT; status = input_system_multiplexer_cfg(&config.multiplexer, mux, &config.multiplexer_flags); if (status != INPUT_SYSTEM_ERR_NO_ERROR) return status; config.multicast[port] = INPUT_SYSTEM_INPUT_BUFFER; // Shared resource, so it should be blocked. //config.mux_flags |= INPUT_SYSTEM_CFG_FLAG_BLOCKED; //config.csi_buffer_flags[port] |= INPUT_SYSTEM_CFG_FLAG_BLOCKED; //config.acquisition_buffer_unique_flags |= INPUT_SYSTEM_CFG_FLAG_BLOCKED; break; case INPUT_SYSTEM_XMEM_BUFFERING: // Check for conflicts on mux. mux = INPUT_SYSTEM_ACQUISITION_UNIT; status = input_system_multiplexer_cfg(&config.multiplexer, mux, &config.multiplexer_flags); if (status != INPUT_SYSTEM_ERR_NO_ERROR) return status; config.multicast[port] = INPUT_SYSTEM_INPUT_BUFFER; // Shared resource, so it should be blocked. //config.mux_flags |= INPUT_SYSTEM_CFG_FLAG_BLOCKED; //config.csi_buffer_flags[port] |= INPUT_SYSTEM_CFG_FLAG_BLOCKED; //config.acquisition_buffer_unique_flags |= INPUT_SYSTEM_CFG_FLAG_BLOCKED; break; case INPUT_SYSTEM_FIFO_CAPTURE_WITH_COUNTING: case INPUT_SYSTEM_XMEM_CAPTURE: case INPUT_SYSTEM_XMEM_ACQUIRE: default: return INPUT_SYSTEM_ERR_PARAMETER_NOT_SUPPORTED; } return INPUT_SYSTEM_ERR_NO_ERROR; } // Test flags and set structure. static input_system_err_t set_source_type( input_system_source_t *const lhs, const input_system_source_t rhs, input_system_config_flags_t *const flags) { // MW: Not enough asserts assert(lhs); assert(flags); if ((*flags) & INPUT_SYSTEM_CFG_FLAG_BLOCKED) { *flags |= INPUT_SYSTEM_CFG_FLAG_CONFLICT; return INPUT_SYSTEM_ERR_CONFLICT_ON_RESOURCE; } if ((*flags) & INPUT_SYSTEM_CFG_FLAG_SET) { // Check for consistency with already set value. if ((*lhs) == (rhs)) { return INPUT_SYSTEM_ERR_NO_ERROR; } else { *flags |= INPUT_SYSTEM_CFG_FLAG_CONFLICT; return INPUT_SYSTEM_ERR_CONFLICT_ON_RESOURCE; } } // Check the value (individually). if (rhs >= N_INPUT_SYSTEM_SOURCE) { *flags |= INPUT_SYSTEM_CFG_FLAG_CONFLICT; return INPUT_SYSTEM_ERR_CONFLICT_ON_RESOURCE; } // Set the value. *lhs = rhs; *flags |= INPUT_SYSTEM_CFG_FLAG_SET; return INPUT_SYSTEM_ERR_NO_ERROR; } // Test flags and set structure. static input_system_err_t set_csi_cfg( csi_cfg_t *const lhs, const csi_cfg_t *const rhs, input_system_config_flags_t *const flags) { u32 memory_required; u32 acq_memory_required; assert(lhs); assert(flags); if ((*flags) & INPUT_SYSTEM_CFG_FLAG_BLOCKED) { *flags |= INPUT_SYSTEM_CFG_FLAG_CONFLICT; return INPUT_SYSTEM_ERR_CONFLICT_ON_RESOURCE; } if (*flags & INPUT_SYSTEM_CFG_FLAG_SET) { // check for consistency with already set value. if (/*lhs->backend_ch == rhs.backend_ch &&*/ lhs->buffering_mode == rhs->buffering_mode && lhs->csi_buffer.mem_reg_size == rhs->csi_buffer.mem_reg_size && lhs->csi_buffer.nof_mem_regs == rhs->csi_buffer.nof_mem_regs && lhs->acquisition_buffer.mem_reg_size == rhs->acquisition_buffer.mem_reg_size && lhs->acquisition_buffer.nof_mem_regs == rhs->acquisition_buffer.nof_mem_regs && lhs->nof_xmem_buffers == rhs->nof_xmem_buffers ) { return INPUT_SYSTEM_ERR_NO_ERROR; } else { *flags |= INPUT_SYSTEM_CFG_FLAG_CONFLICT; return INPUT_SYSTEM_ERR_CONFLICT_ON_RESOURCE; } } // Check the value (individually). // no check for backend_ch // no check for nof_xmem_buffers memory_required = rhs->csi_buffer.mem_reg_size * rhs->csi_buffer.nof_mem_regs; acq_memory_required = rhs->acquisition_buffer.mem_reg_size * rhs->acquisition_buffer.nof_mem_regs; if (rhs->buffering_mode >= N_INPUT_SYSTEM_BUFFERING_MODE || // Check if required memory is available in input buffer (SRAM). (memory_required + acq_memory_required) > config.unallocated_ib_mem_words ) { *flags |= INPUT_SYSTEM_CFG_FLAG_CONFLICT; return INPUT_SYSTEM_ERR_CONFLICT_ON_RESOURCE; } // Set the value. //lhs[port]->backend_ch = rhs.backend_ch; lhs->buffering_mode = rhs->buffering_mode; lhs->nof_xmem_buffers = rhs->nof_xmem_buffers; lhs->csi_buffer.mem_reg_size = rhs->csi_buffer.mem_reg_size; lhs->csi_buffer.nof_mem_regs = rhs->csi_buffer.nof_mem_regs; lhs->acquisition_buffer.mem_reg_size = rhs->acquisition_buffer.mem_reg_size; lhs->acquisition_buffer.nof_mem_regs = rhs->acquisition_buffer.nof_mem_regs; // ALX: NB: Here we just set buffer parameters, but still not allocate it // (no addresses determined). That will be done during commit. // FIXIT: acq_memory_required is not deducted, since it can be allocated multiple times. config.unallocated_ib_mem_words -= memory_required; //assert(config.unallocated_ib_mem_words >=0); *flags |= INPUT_SYSTEM_CFG_FLAG_SET; return INPUT_SYSTEM_ERR_NO_ERROR; } // Test flags and set structure. static input_system_err_t input_system_multiplexer_cfg( input_system_multiplex_t *const lhs, const input_system_multiplex_t rhs, input_system_config_flags_t *const flags) { assert(lhs); assert(flags); if ((*flags) & INPUT_SYSTEM_CFG_FLAG_BLOCKED) { *flags |= INPUT_SYSTEM_CFG_FLAG_CONFLICT; return INPUT_SYSTEM_ERR_CONFLICT_ON_RESOURCE; } if ((*flags) & INPUT_SYSTEM_CFG_FLAG_SET) { // Check for consistency with already set value. if ((*lhs) == (rhs)) { return INPUT_SYSTEM_ERR_NO_ERROR; } else { *flags |= INPUT_SYSTEM_CFG_FLAG_CONFLICT; return INPUT_SYSTEM_ERR_CONFLICT_ON_RESOURCE; } } // Check the value (individually). if (rhs >= N_INPUT_SYSTEM_MULTIPLEX) { *flags |= INPUT_SYSTEM_CFG_FLAG_CONFLICT; return INPUT_SYSTEM_ERR_PARAMETER_NOT_SUPPORTED; } // Set the value. *lhs = rhs; *flags |= INPUT_SYSTEM_CFG_FLAG_SET; return INPUT_SYSTEM_ERR_NO_ERROR; } #endif
linux-master
drivers/staging/media/atomisp/pci/hive_isp_css_common/host/input_system.c
// SPDX-License-Identifier: GPL-2.0 /* * Support for Intel Camera Imaging ISP subsystem. * Copyright (c) 2010-2015, Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. */ #include "assert_support.h" #include "gp_device.h" #ifndef __INLINE_GP_DEVICE__ #include "gp_device_private.h" #endif /* __INLINE_GP_DEVICE__ */ void gp_device_get_state( const gp_device_ID_t ID, gp_device_state_t *state) { assert(ID < N_GP_DEVICE_ID); assert(state); state->syncgen_enable = gp_device_reg_load(ID, _REG_GP_SYNCGEN_ENABLE_ADDR); state->syncgen_free_running = gp_device_reg_load(ID, _REG_GP_SYNCGEN_FREE_RUNNING_ADDR); state->syncgen_pause = gp_device_reg_load(ID, _REG_GP_SYNCGEN_PAUSE_ADDR); state->nr_frames = gp_device_reg_load(ID, _REG_GP_NR_FRAMES_ADDR); state->syngen_nr_pix = gp_device_reg_load(ID, _REG_GP_SYNGEN_NR_PIX_ADDR); state->syngen_nr_pix = gp_device_reg_load(ID, _REG_GP_SYNGEN_NR_PIX_ADDR); state->syngen_nr_lines = gp_device_reg_load(ID, _REG_GP_SYNGEN_NR_LINES_ADDR); state->syngen_hblank_cycles = gp_device_reg_load(ID, _REG_GP_SYNGEN_HBLANK_CYCLES_ADDR); state->syngen_vblank_cycles = gp_device_reg_load(ID, _REG_GP_SYNGEN_VBLANK_CYCLES_ADDR); state->isel_sof = gp_device_reg_load(ID, _REG_GP_ISEL_SOF_ADDR); state->isel_eof = gp_device_reg_load(ID, _REG_GP_ISEL_EOF_ADDR); state->isel_sol = gp_device_reg_load(ID, _REG_GP_ISEL_SOL_ADDR); state->isel_eol = gp_device_reg_load(ID, _REG_GP_ISEL_EOL_ADDR); state->isel_lfsr_enable = gp_device_reg_load(ID, _REG_GP_ISEL_LFSR_ENABLE_ADDR); state->isel_lfsr_enable_b = gp_device_reg_load(ID, _REG_GP_ISEL_LFSR_ENABLE_B_ADDR); state->isel_lfsr_reset_value = gp_device_reg_load(ID, _REG_GP_ISEL_LFSR_RESET_VALUE_ADDR); state->isel_tpg_enable = gp_device_reg_load(ID, _REG_GP_ISEL_TPG_ENABLE_ADDR); state->isel_tpg_enable_b = gp_device_reg_load(ID, _REG_GP_ISEL_TPG_ENABLE_B_ADDR); state->isel_hor_cnt_mask = gp_device_reg_load(ID, _REG_GP_ISEL_HOR_CNT_MASK_ADDR); state->isel_ver_cnt_mask = gp_device_reg_load(ID, _REG_GP_ISEL_VER_CNT_MASK_ADDR); state->isel_xy_cnt_mask = gp_device_reg_load(ID, _REG_GP_ISEL_XY_CNT_MASK_ADDR); state->isel_hor_cnt_delta = gp_device_reg_load(ID, _REG_GP_ISEL_HOR_CNT_DELTA_ADDR); state->isel_ver_cnt_delta = gp_device_reg_load(ID, _REG_GP_ISEL_VER_CNT_DELTA_ADDR); state->isel_tpg_mode = gp_device_reg_load(ID, _REG_GP_ISEL_TPG_MODE_ADDR); state->isel_tpg_red1 = gp_device_reg_load(ID, _REG_GP_ISEL_TPG_RED1_ADDR); state->isel_tpg_green1 = gp_device_reg_load(ID, _REG_GP_ISEL_TPG_GREEN1_ADDR); state->isel_tpg_blue1 = gp_device_reg_load(ID, _REG_GP_ISEL_TPG_BLUE1_ADDR); state->isel_tpg_red2 = gp_device_reg_load(ID, _REG_GP_ISEL_TPG_RED2_ADDR); state->isel_tpg_green2 = gp_device_reg_load(ID, _REG_GP_ISEL_TPG_GREEN2_ADDR); state->isel_tpg_blue2 = gp_device_reg_load(ID, _REG_GP_ISEL_TPG_BLUE2_ADDR); state->isel_ch_id = gp_device_reg_load(ID, _REG_GP_ISEL_CH_ID_ADDR); state->isel_fmt_type = gp_device_reg_load(ID, _REG_GP_ISEL_FMT_TYPE_ADDR); state->isel_data_sel = gp_device_reg_load(ID, _REG_GP_ISEL_DATA_SEL_ADDR); state->isel_sband_sel = gp_device_reg_load(ID, _REG_GP_ISEL_SBAND_SEL_ADDR); state->isel_sync_sel = gp_device_reg_load(ID, _REG_GP_ISEL_SYNC_SEL_ADDR); state->syncgen_hor_cnt = gp_device_reg_load(ID, _REG_GP_SYNCGEN_HOR_CNT_ADDR); state->syncgen_ver_cnt = gp_device_reg_load(ID, _REG_GP_SYNCGEN_VER_CNT_ADDR); state->syncgen_frame_cnt = gp_device_reg_load(ID, _REG_GP_SYNCGEN_FRAME_CNT_ADDR); state->soft_reset = gp_device_reg_load(ID, _REG_GP_SOFT_RESET_ADDR); return; }
linux-master
drivers/staging/media/atomisp/pci/hive_isp_css_common/host/gp_device.c
// SPDX-License-Identifier: GPL-2.0 /* * Support for Intel Camera Imaging ISP subsystem. * Copyright (c) 2010 - 2016, Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. */ #include "isp.h" #include "vmem.h" #include "vmem_local.h" #if !defined(HRT_MEMORY_ACCESS) #include "ia_css_device_access.h" #endif #include "assert_support.h" typedef unsigned long long hive_uedge; typedef hive_uedge *hive_wide; /* Copied from SDK: sim_semantics.c */ /* subword bits move like this: MSB[____xxxx____]LSB -> MSB[00000000xxxx]LSB */ static inline hive_uedge subword(hive_uedge w, unsigned int start, unsigned int end) { return (w & (((1ULL << (end - 1)) - 1) << 1 | 1)) >> start; } /* inverse subword bits move like this: MSB[xxxx____xxxx]LSB -> MSB[xxxx0000xxxx]LSB */ static inline hive_uedge inv_subword(hive_uedge w, unsigned int start, unsigned int end) { return w & (~(((1ULL << (end - 1)) - 1) << 1 | 1) | ((1ULL << start) - 1)); } #define uedge_bits (8 * sizeof(hive_uedge)) #define move_lower_bits(target, target_bit, src, src_bit) move_subword(target, target_bit, src, 0, src_bit) #define move_upper_bits(target, target_bit, src, src_bit) move_subword(target, target_bit, src, src_bit, uedge_bits) #define move_word(target, target_bit, src) move_subword(target, target_bit, src, 0, uedge_bits) static void move_subword( hive_uedge *target, unsigned int target_bit, hive_uedge src, unsigned int src_start, unsigned int src_end) { unsigned int start_elem = target_bit / uedge_bits; unsigned int start_bit = target_bit % uedge_bits; unsigned int subword_width = src_end - src_start; hive_uedge src_subword = subword(src, src_start, src_end); if (subword_width + start_bit > uedge_bits) { /* overlap */ hive_uedge old_val1; hive_uedge old_val0 = inv_subword(target[start_elem], start_bit, uedge_bits); target[start_elem] = old_val0 | (src_subword << start_bit); old_val1 = inv_subword(target[start_elem + 1], 0, subword_width + start_bit - uedge_bits); target[start_elem + 1] = old_val1 | (src_subword >> (uedge_bits - start_bit)); } else { hive_uedge old_val = inv_subword(target[start_elem], start_bit, start_bit + subword_width); target[start_elem] = old_val | (src_subword << start_bit); } } static void hive_sim_wide_unpack( hive_wide vector, hive_wide elem, hive_uint elem_bits, hive_uint index) { /* pointers into wide_type: */ unsigned int start_elem = (elem_bits * index) / uedge_bits; unsigned int start_bit = (elem_bits * index) % uedge_bits; unsigned int end_elem = (elem_bits * (index + 1) - 1) / uedge_bits; unsigned int end_bit = ((elem_bits * (index + 1) - 1) % uedge_bits) + 1; if (elem_bits == uedge_bits) { /* easy case for speedup: */ elem[0] = vector[index]; } else if (start_elem == end_elem) { /* only one (<=64 bits) element needs to be (partly) copied: */ move_subword(elem, 0, vector[start_elem], start_bit, end_bit); } else { /* general case: handles edge spanning cases (includes >64bit elements) */ unsigned int bits_written = 0; unsigned int i; move_upper_bits(elem, bits_written, vector[start_elem], start_bit); bits_written += (64 - start_bit); for (i = start_elem + 1; i < end_elem; i++) { move_word(elem, bits_written, vector[i]); bits_written += uedge_bits; } move_lower_bits(elem, bits_written, vector[end_elem], end_bit); } } static void hive_sim_wide_pack( hive_wide vector, hive_wide elem, hive_uint elem_bits, hive_uint index) { /* pointers into wide_type: */ unsigned int start_elem = (elem_bits * index) / uedge_bits; /* easy case for speedup: */ if (elem_bits == uedge_bits) { vector[start_elem] = elem[0]; } else if (elem_bits > uedge_bits) { unsigned int bits_to_write = elem_bits; unsigned int start_bit = elem_bits * index; unsigned int i = 0; for (; bits_to_write > uedge_bits; bits_to_write -= uedge_bits, i++, start_bit += uedge_bits) { move_word(vector, start_bit, elem[i]); } move_lower_bits(vector, start_bit, elem[i], bits_to_write); } else { /* only one element needs to be (partly) copied: */ move_lower_bits(vector, elem_bits * index, elem[0], elem_bits); } } static void load_vector( const isp_ID_t ID, t_vmem_elem *to, const t_vmem_elem *from) { unsigned int i; hive_uedge *data; unsigned int size = sizeof(short) * ISP_NWAY; VMEM_ARRAY(v, 2 * ISP_NWAY); /* Need 2 vectors to work around vmem hss bug */ assert(ISP_BAMEM_BASE[ID] != (hrt_address) - 1); #if !defined(HRT_MEMORY_ACCESS) ia_css_device_load(ISP_BAMEM_BASE[ID] + (unsigned long)from, &v[0][0], size); #else hrt_master_port_load(ISP_BAMEM_BASE[ID] + (unsigned long)from, &v[0][0], size); #endif data = (hive_uedge *)v; for (i = 0; i < ISP_NWAY; i++) { hive_uedge elem = 0; hive_sim_wide_unpack(data, &elem, ISP_VEC_ELEMBITS, i); to[i] = elem; } udelay(1); /* Spend at least 1 cycles per vector */ } static void store_vector( const isp_ID_t ID, t_vmem_elem *to, const t_vmem_elem *from) { unsigned int i; unsigned int size = sizeof(short) * ISP_NWAY; VMEM_ARRAY(v, 2 * ISP_NWAY); /* Need 2 vectors to work around vmem hss bug */ //load_vector (&v[1][0], &to[ISP_NWAY]); /* Fetch the next vector, since it will be overwritten. */ hive_uedge *data = (hive_uedge *)v; for (i = 0; i < ISP_NWAY; i++) { hive_sim_wide_pack(data, (hive_wide)&from[i], ISP_VEC_ELEMBITS, i); } assert(ISP_BAMEM_BASE[ID] != (hrt_address) - 1); #if !defined(HRT_MEMORY_ACCESS) ia_css_device_store(ISP_BAMEM_BASE[ID] + (unsigned long)to, &v, size); #else //hrt_mem_store (ISP, VMEM, (unsigned)to, &v, siz); /* This will overwrite the next vector as well */ hrt_master_port_store(ISP_BAMEM_BASE[ID] + (unsigned long)to, &v, size); #endif udelay(1); /* Spend at least 1 cycles per vector */ } void isp_vmem_load( const isp_ID_t ID, const t_vmem_elem *from, t_vmem_elem *to, unsigned int elems) /* In t_vmem_elem */ { unsigned int c; const t_vmem_elem *vp = from; assert(ID < N_ISP_ID); assert((unsigned long)from % ISP_VEC_ALIGN == 0); assert(elems % ISP_NWAY == 0); for (c = 0; c < elems; c += ISP_NWAY) { load_vector(ID, &to[c], vp); vp = (t_vmem_elem *)((char *)vp + ISP_VEC_ALIGN); } } void isp_vmem_store( const isp_ID_t ID, t_vmem_elem *to, const t_vmem_elem *from, unsigned int elems) /* In t_vmem_elem */ { unsigned int c; t_vmem_elem *vp = to; assert(ID < N_ISP_ID); assert((unsigned long)to % ISP_VEC_ALIGN == 0); assert(elems % ISP_NWAY == 0); for (c = 0; c < elems; c += ISP_NWAY) { store_vector(ID, vp, &from[c]); vp = (t_vmem_elem *)((char *)vp + ISP_VEC_ALIGN); } } void isp_vmem_2d_load( const isp_ID_t ID, const t_vmem_elem *from, t_vmem_elem *to, unsigned int height, unsigned int width, unsigned int stride_to, /* In t_vmem_elem */ unsigned stride_from /* In t_vmem_elem */) { unsigned int h; assert(ID < N_ISP_ID); assert((unsigned long)from % ISP_VEC_ALIGN == 0); assert(width % ISP_NWAY == 0); assert(stride_from % ISP_NWAY == 0); for (h = 0; h < height; h++) { unsigned int c; const t_vmem_elem *vp = from; for (c = 0; c < width; c += ISP_NWAY) { load_vector(ID, &to[stride_to * h + c], vp); vp = (t_vmem_elem *)((char *)vp + ISP_VEC_ALIGN); } from = (const t_vmem_elem *)((const char *)from + stride_from / ISP_NWAY * ISP_VEC_ALIGN); } } void isp_vmem_2d_store( const isp_ID_t ID, t_vmem_elem *to, const t_vmem_elem *from, unsigned int height, unsigned int width, unsigned int stride_to, /* In t_vmem_elem */ unsigned stride_from /* In t_vmem_elem */) { unsigned int h; assert(ID < N_ISP_ID); assert((unsigned long)to % ISP_VEC_ALIGN == 0); assert(width % ISP_NWAY == 0); assert(stride_to % ISP_NWAY == 0); for (h = 0; h < height; h++) { unsigned int c; t_vmem_elem *vp = to; for (c = 0; c < width; c += ISP_NWAY) { store_vector(ID, vp, &from[stride_from * h + c]); vp = (t_vmem_elem *)((char *)vp + ISP_VEC_ALIGN); } to = (t_vmem_elem *)((char *)to + stride_to / ISP_NWAY * ISP_VEC_ALIGN); } }
linux-master
drivers/staging/media/atomisp/pci/hive_isp_css_common/host/vmem.c
// SPDX-License-Identifier: GPL-2.0 /* * Support for Intel Camera Imaging ISP subsystem. * Copyright (c) 2010-2016, Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. */ #include "debug.h" #include "hmm.h" #ifndef __INLINE_DEBUG__ #include "debug_private.h" #endif /* __INLINE_DEBUG__ */ #define __INLINE_SP__ #include "sp.h" #include "assert_support.h" /* The address of the remote copy */ hrt_address debug_buffer_address = (hrt_address) - 1; ia_css_ptr debug_buffer_ddr_address = (ia_css_ptr)-1; /* The local copy */ static debug_data_t debug_data; debug_data_t *debug_data_ptr = &debug_data; void debug_buffer_init(const hrt_address addr) { debug_buffer_address = addr; debug_data.head = 0; debug_data.tail = 0; } void debug_buffer_ddr_init(const ia_css_ptr addr) { debug_buf_mode_t mode = DEBUG_BUFFER_MODE_LINEAR; u32 enable = 1; u32 head = 0; u32 tail = 0; /* set the ddr queue */ debug_buffer_ddr_address = addr; hmm_store(addr + DEBUG_DATA_BUF_MODE_DDR_ADDR, &mode, sizeof(debug_buf_mode_t)); hmm_store(addr + DEBUG_DATA_HEAD_DDR_ADDR, &head, sizeof(uint32_t)); hmm_store(addr + DEBUG_DATA_TAIL_DDR_ADDR, &tail, sizeof(uint32_t)); hmm_store(addr + DEBUG_DATA_ENABLE_DDR_ADDR, &enable, sizeof(uint32_t)); /* set the local copy */ debug_data.head = 0; debug_data.tail = 0; } void debug_buffer_setmode(const debug_buf_mode_t mode) { assert(debug_buffer_address != ((hrt_address)-1)); sp_dmem_store_uint32(SP0_ID, debug_buffer_address + DEBUG_DATA_BUF_MODE_ADDR, mode); }
linux-master
drivers/staging/media/atomisp/pci/hive_isp_css_common/host/debug.c
// SPDX-License-Identifier: GPL-2.0 /* * Support for Intel Camera Imaging ISP subsystem. * Copyright (c) 2010-2016, Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. */ #include <linux/kernel.h> #include "dma.h" #include "assert_support.h" #ifndef __INLINE_DMA__ #include "dma_private.h" #endif /* __INLINE_DMA__ */ void dma_get_state(const dma_ID_t ID, dma_state_t *state) { int i; hrt_data tmp; assert(ID < N_DMA_ID); assert(state); tmp = dma_reg_load(ID, DMA_COMMAND_FSM_REG_IDX); //reg [3:0] : flags error [3], stall, run, idle [0] //reg [9:4] : command //reg[14:10] : channel //reg [23:15] : param state->fsm_command_idle = tmp & 0x1; state->fsm_command_run = tmp & 0x2; state->fsm_command_stalling = tmp & 0x4; state->fsm_command_error = tmp & 0x8; state->last_command_channel = (tmp >> 10 & 0x1F); state->last_command_param = (tmp >> 15 & 0x0F); tmp = (tmp >> 4) & 0x3F; /* state->last_command = (dma_commands_t)tmp; */ /* if the enumerator is made non-linear */ /* AM: the list below does not cover all the cases*/ /* and these are not correct */ /* therefore for just dumpinmg this command*/ state->last_command = tmp; /* if (tmp == 0) state->last_command = DMA_COMMAND_READ; if (tmp == 1) state->last_command = DMA_COMMAND_WRITE; if (tmp == 2) state->last_command = DMA_COMMAND_SET_CHANNEL; if (tmp == 3) state->last_command = DMA_COMMAND_SET_PARAM; if (tmp == 4) state->last_command = DMA_COMMAND_READ_SPECIFIC; if (tmp == 5) state->last_command = DMA_COMMAND_WRITE_SPECIFIC; if (tmp == 8) state->last_command = DMA_COMMAND_INIT; if (tmp == 12) state->last_command = DMA_COMMAND_INIT_SPECIFIC; if (tmp == 15) state->last_command = DMA_COMMAND_RST; */ /* No sub-fields, idx = 0 */ state->current_command = dma_reg_load(ID, DMA_CG_INFO_REG_IDX(0, _DMA_FSM_GROUP_CMD_IDX)); state->current_addr_a = dma_reg_load(ID, DMA_CG_INFO_REG_IDX(0, _DMA_FSM_GROUP_ADDR_A_IDX)); state->current_addr_b = dma_reg_load(ID, DMA_CG_INFO_REG_IDX(0, _DMA_FSM_GROUP_ADDR_B_IDX)); tmp = dma_reg_load(ID, DMA_CG_INFO_REG_IDX( _DMA_FSM_GROUP_FSM_CTRL_STATE_IDX, _DMA_FSM_GROUP_FSM_CTRL_IDX)); state->fsm_ctrl_idle = tmp & 0x1; state->fsm_ctrl_run = tmp & 0x2; state->fsm_ctrl_stalling = tmp & 0x4; state->fsm_ctrl_error = tmp & 0x8; tmp = tmp >> 4; /* state->fsm_ctrl_state = (dma_ctrl_states_t)tmp; */ if (tmp == 0) state->fsm_ctrl_state = DMA_CTRL_STATE_IDLE; if (tmp == 1) state->fsm_ctrl_state = DMA_CTRL_STATE_REQ_RCV; if (tmp == 2) state->fsm_ctrl_state = DMA_CTRL_STATE_RCV; if (tmp == 3) state->fsm_ctrl_state = DMA_CTRL_STATE_RCV_REQ; if (tmp == 4) state->fsm_ctrl_state = DMA_CTRL_STATE_INIT; state->fsm_ctrl_source_dev = dma_reg_load(ID, DMA_CG_INFO_REG_IDX( _DMA_FSM_GROUP_FSM_CTRL_REQ_DEV_IDX, _DMA_FSM_GROUP_FSM_CTRL_IDX)); state->fsm_ctrl_source_addr = dma_reg_load(ID, DMA_CG_INFO_REG_IDX( _DMA_FSM_GROUP_FSM_CTRL_REQ_ADDR_IDX, _DMA_FSM_GROUP_FSM_CTRL_IDX)); state->fsm_ctrl_source_stride = dma_reg_load(ID, DMA_CG_INFO_REG_IDX( _DMA_FSM_GROUP_FSM_CTRL_REQ_STRIDE_IDX, _DMA_FSM_GROUP_FSM_CTRL_IDX)); state->fsm_ctrl_source_width = dma_reg_load(ID, DMA_CG_INFO_REG_IDX( _DMA_FSM_GROUP_FSM_CTRL_REQ_XB_IDX, _DMA_FSM_GROUP_FSM_CTRL_IDX)); state->fsm_ctrl_source_height = dma_reg_load(ID, DMA_CG_INFO_REG_IDX( _DMA_FSM_GROUP_FSM_CTRL_REQ_YB_IDX, _DMA_FSM_GROUP_FSM_CTRL_IDX)); state->fsm_ctrl_pack_source_dev = dma_reg_load(ID, DMA_CG_INFO_REG_IDX( _DMA_FSM_GROUP_FSM_CTRL_PACK_REQ_DEV_IDX, _DMA_FSM_GROUP_FSM_CTRL_IDX)); state->fsm_ctrl_pack_dest_dev = dma_reg_load(ID, DMA_CG_INFO_REG_IDX( _DMA_FSM_GROUP_FSM_CTRL_PACK_WR_DEV_IDX, _DMA_FSM_GROUP_FSM_CTRL_IDX)); state->fsm_ctrl_dest_addr = dma_reg_load(ID, DMA_CG_INFO_REG_IDX( _DMA_FSM_GROUP_FSM_CTRL_WR_ADDR_IDX, _DMA_FSM_GROUP_FSM_CTRL_IDX)); state->fsm_ctrl_dest_stride = dma_reg_load(ID, DMA_CG_INFO_REG_IDX( _DMA_FSM_GROUP_FSM_CTRL_WR_STRIDE_IDX, _DMA_FSM_GROUP_FSM_CTRL_IDX)); state->fsm_ctrl_pack_source_width = dma_reg_load(ID, DMA_CG_INFO_REG_IDX( _DMA_FSM_GROUP_FSM_CTRL_PACK_REQ_XB_IDX, _DMA_FSM_GROUP_FSM_CTRL_IDX)); state->fsm_ctrl_pack_dest_height = dma_reg_load(ID, DMA_CG_INFO_REG_IDX( _DMA_FSM_GROUP_FSM_CTRL_PACK_WR_YB_IDX, _DMA_FSM_GROUP_FSM_CTRL_IDX)); state->fsm_ctrl_pack_dest_width = dma_reg_load(ID, DMA_CG_INFO_REG_IDX( _DMA_FSM_GROUP_FSM_CTRL_PACK_WR_XB_IDX, _DMA_FSM_GROUP_FSM_CTRL_IDX)); state->fsm_ctrl_pack_source_elems = dma_reg_load(ID, DMA_CG_INFO_REG_IDX( _DMA_FSM_GROUP_FSM_CTRL_PACK_ELEM_REQ_IDX, _DMA_FSM_GROUP_FSM_CTRL_IDX)); state->fsm_ctrl_pack_dest_elems = dma_reg_load(ID, DMA_CG_INFO_REG_IDX( _DMA_FSM_GROUP_FSM_CTRL_PACK_ELEM_WR_IDX, _DMA_FSM_GROUP_FSM_CTRL_IDX)); state->fsm_ctrl_pack_extension = dma_reg_load(ID, DMA_CG_INFO_REG_IDX( _DMA_FSM_GROUP_FSM_CTRL_PACK_S_Z_IDX, _DMA_FSM_GROUP_FSM_CTRL_IDX)); tmp = dma_reg_load(ID, DMA_CG_INFO_REG_IDX( _DMA_FSM_GROUP_FSM_PACK_STATE_IDX, _DMA_FSM_GROUP_FSM_PACK_IDX)); state->pack_idle = tmp & 0x1; state->pack_run = tmp & 0x2; state->pack_stalling = tmp & 0x4; state->pack_error = tmp & 0x8; state->pack_cnt_height = dma_reg_load(ID, DMA_CG_INFO_REG_IDX( _DMA_FSM_GROUP_FSM_PACK_CNT_YB_IDX, _DMA_FSM_GROUP_FSM_PACK_IDX)); state->pack_src_cnt_width = dma_reg_load(ID, DMA_CG_INFO_REG_IDX( _DMA_FSM_GROUP_FSM_PACK_CNT_XB_REQ_IDX, _DMA_FSM_GROUP_FSM_PACK_IDX)); state->pack_dest_cnt_width = dma_reg_load(ID, DMA_CG_INFO_REG_IDX( _DMA_FSM_GROUP_FSM_PACK_CNT_XB_WR_IDX, _DMA_FSM_GROUP_FSM_PACK_IDX)); tmp = dma_reg_load(ID, DMA_CG_INFO_REG_IDX( _DMA_FSM_GROUP_FSM_REQ_STATE_IDX, _DMA_FSM_GROUP_FSM_REQ_IDX)); /* state->read_state = (dma_rw_states_t)tmp; */ if (tmp == 0) state->read_state = DMA_RW_STATE_IDLE; if (tmp == 1) state->read_state = DMA_RW_STATE_REQ; if (tmp == 2) state->read_state = DMA_RW_STATE_NEXT_LINE; if (tmp == 3) state->read_state = DMA_RW_STATE_UNLOCK_CHANNEL; state->read_cnt_height = dma_reg_load(ID, DMA_CG_INFO_REG_IDX( _DMA_FSM_GROUP_FSM_REQ_CNT_YB_IDX, _DMA_FSM_GROUP_FSM_REQ_IDX)); state->read_cnt_width = dma_reg_load(ID, DMA_CG_INFO_REG_IDX( _DMA_FSM_GROUP_FSM_REQ_CNT_XB_IDX, _DMA_FSM_GROUP_FSM_REQ_IDX)); tmp = dma_reg_load(ID, DMA_CG_INFO_REG_IDX( _DMA_FSM_GROUP_FSM_WR_STATE_IDX, _DMA_FSM_GROUP_FSM_WR_IDX)); /* state->write_state = (dma_rw_states_t)tmp; */ if (tmp == 0) state->write_state = DMA_RW_STATE_IDLE; if (tmp == 1) state->write_state = DMA_RW_STATE_REQ; if (tmp == 2) state->write_state = DMA_RW_STATE_NEXT_LINE; if (tmp == 3) state->write_state = DMA_RW_STATE_UNLOCK_CHANNEL; state->write_height = dma_reg_load(ID, DMA_CG_INFO_REG_IDX( _DMA_FSM_GROUP_FSM_WR_CNT_YB_IDX, _DMA_FSM_GROUP_FSM_WR_IDX)); state->write_width = dma_reg_load(ID, DMA_CG_INFO_REG_IDX( _DMA_FSM_GROUP_FSM_WR_CNT_XB_IDX, _DMA_FSM_GROUP_FSM_WR_IDX)); for (i = 0; i < HIVE_ISP_NUM_DMA_CONNS; i++) { dma_port_state_t *port = &state->port_states[i]; tmp = dma_reg_load(ID, DMA_DEV_INFO_REG_IDX(0, i)); port->req_cs = ((tmp & 0x1) != 0); port->req_we_n = ((tmp & 0x2) != 0); port->req_run = ((tmp & 0x4) != 0); port->req_ack = ((tmp & 0x8) != 0); tmp = dma_reg_load(ID, DMA_DEV_INFO_REG_IDX(1, i)); port->send_cs = ((tmp & 0x1) != 0); port->send_we_n = ((tmp & 0x2) != 0); port->send_run = ((tmp & 0x4) != 0); port->send_ack = ((tmp & 0x8) != 0); tmp = dma_reg_load(ID, DMA_DEV_INFO_REG_IDX(2, i)); if (tmp & 0x1) port->fifo_state = DMA_FIFO_STATE_WILL_BE_FULL; if (tmp & 0x2) port->fifo_state = DMA_FIFO_STATE_FULL; if (tmp & 0x4) port->fifo_state = DMA_FIFO_STATE_EMPTY; port->fifo_counter = tmp >> 3; } for (i = 0; i < HIVE_DMA_NUM_CHANNELS; i++) { dma_channel_state_t *ch = &state->channel_states[i]; ch->connection = DMA_GET_CONNECTION(dma_reg_load(ID, DMA_CHANNEL_PARAM_REG_IDX(i, _DMA_PACKING_SETUP_PARAM))); ch->sign_extend = DMA_GET_EXTENSION(dma_reg_load(ID, DMA_CHANNEL_PARAM_REG_IDX(i, _DMA_PACKING_SETUP_PARAM))); ch->height = dma_reg_load(ID, DMA_CHANNEL_PARAM_REG_IDX(i, _DMA_HEIGHT_PARAM)); ch->stride_a = dma_reg_load(ID, DMA_CHANNEL_PARAM_REG_IDX(i, _DMA_STRIDE_A_PARAM)); ch->elems_a = DMA_GET_ELEMENTS(dma_reg_load(ID, DMA_CHANNEL_PARAM_REG_IDX(i, _DMA_ELEM_CROPPING_A_PARAM))); ch->cropping_a = DMA_GET_CROPPING(dma_reg_load(ID, DMA_CHANNEL_PARAM_REG_IDX(i, _DMA_ELEM_CROPPING_A_PARAM))); ch->width_a = dma_reg_load(ID, DMA_CHANNEL_PARAM_REG_IDX(i, _DMA_WIDTH_A_PARAM)); ch->stride_b = dma_reg_load(ID, DMA_CHANNEL_PARAM_REG_IDX(i, _DMA_STRIDE_B_PARAM)); ch->elems_b = DMA_GET_ELEMENTS(dma_reg_load(ID, DMA_CHANNEL_PARAM_REG_IDX(i, _DMA_ELEM_CROPPING_B_PARAM))); ch->cropping_b = DMA_GET_CROPPING(dma_reg_load(ID, DMA_CHANNEL_PARAM_REG_IDX(i, _DMA_ELEM_CROPPING_B_PARAM))); ch->width_b = dma_reg_load(ID, DMA_CHANNEL_PARAM_REG_IDX(i, _DMA_WIDTH_B_PARAM)); } } void dma_set_max_burst_size(const dma_ID_t ID, dma_connection conn, uint32_t max_burst_size) { assert(ID < N_DMA_ID); assert(max_burst_size > 0); dma_reg_store(ID, DMA_DEV_INFO_REG_IDX(_DMA_DEV_INTERF_MAX_BURST_IDX, conn), max_burst_size - 1); }
linux-master
drivers/staging/media/atomisp/pci/hive_isp_css_common/host/dma.c
// SPDX-License-Identifier: GPL-2.0 /* * Support for Intel Camera Imaging ISP subsystem. * Copyright (c) 2010-2015, Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. */ #include "event_fifo.h" #ifndef __INLINE_EVENT__ #include "event_fifo_private.h" #endif /* __INLINE_EVENT__ */
linux-master
drivers/staging/media/atomisp/pci/hive_isp_css_common/host/event_fifo.c
// SPDX-License-Identifier: GPL-2.0 /* * Support for Intel Camera Imaging ISP subsystem. * Copyright (c) 2010-2015, Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. */ #include "sp.h" #ifndef __INLINE_SP__ #include "sp_private.h" #endif /* __INLINE_SP__ */ #include "assert_support.h" void cnd_sp_irq_enable( const sp_ID_t ID, const bool cnd) { if (cnd) { sp_ctrl_setbit(ID, SP_IRQ_READY_REG, SP_IRQ_READY_BIT); /* Enabling the IRQ immediately triggers an interrupt, clear it */ sp_ctrl_setbit(ID, SP_IRQ_CLEAR_REG, SP_IRQ_CLEAR_BIT); } else { sp_ctrl_clearbit(ID, SP_IRQ_READY_REG, SP_IRQ_READY_BIT); } } void sp_get_state( const sp_ID_t ID, sp_state_t *state, sp_stall_t *stall) { hrt_data sc = sp_ctrl_load(ID, SP_SC_REG); assert(state); assert(stall); state->pc = sp_ctrl_load(ID, SP_PC_REG); state->status_register = sc; state->is_broken = (sc & (1U << SP_BROKEN_BIT)) != 0; state->is_idle = (sc & (1U << SP_IDLE_BIT)) != 0; state->is_sleeping = (sc & (1U << SP_SLEEPING_BIT)) != 0; state->is_stalling = (sc & (1U << SP_STALLING_BIT)) != 0; stall->fifo0 = !sp_ctrl_getbit(ID, SP_FIFO0_SINK_REG, SP_FIFO0_SINK_BIT); stall->fifo1 = !sp_ctrl_getbit(ID, SP_FIFO1_SINK_REG, SP_FIFO1_SINK_BIT); stall->fifo2 = !sp_ctrl_getbit(ID, SP_FIFO2_SINK_REG, SP_FIFO2_SINK_BIT); stall->fifo3 = !sp_ctrl_getbit(ID, SP_FIFO3_SINK_REG, SP_FIFO3_SINK_BIT); stall->fifo4 = !sp_ctrl_getbit(ID, SP_FIFO4_SINK_REG, SP_FIFO4_SINK_BIT); stall->fifo5 = !sp_ctrl_getbit(ID, SP_FIFO5_SINK_REG, SP_FIFO5_SINK_BIT); stall->fifo6 = !sp_ctrl_getbit(ID, SP_FIFO6_SINK_REG, SP_FIFO6_SINK_BIT); stall->fifo7 = !sp_ctrl_getbit(ID, SP_FIFO7_SINK_REG, SP_FIFO7_SINK_BIT); stall->fifo8 = !sp_ctrl_getbit(ID, SP_FIFO8_SINK_REG, SP_FIFO8_SINK_BIT); stall->fifo9 = !sp_ctrl_getbit(ID, SP_FIFO9_SINK_REG, SP_FIFO9_SINK_BIT); stall->fifoa = !sp_ctrl_getbit(ID, SP_FIFOA_SINK_REG, SP_FIFOA_SINK_BIT); stall->dmem = !sp_ctrl_getbit(ID, SP_DMEM_SINK_REG, SP_DMEM_SINK_BIT); stall->control_master = !sp_ctrl_getbit(ID, SP_CTRL_MT_SINK_REG, SP_CTRL_MT_SINK_BIT); stall->icache_master = !sp_ctrl_getbit(ID, SP_ICACHE_MT_SINK_REG, SP_ICACHE_MT_SINK_BIT); }
linux-master
drivers/staging/media/atomisp/pci/hive_isp_css_common/host/sp.c
// SPDX-License-Identifier: GPL-2.0 /* * Support for Intel Camera Imaging ISP subsystem. * Copyright (c) 2010-2015, Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. */ /* The name "gdc.h is already taken" */ #include "gdc_device.h" #include "device_access.h" #include "assert_support.h" /* * Local function declarations */ static inline void gdc_reg_store( const gdc_ID_t ID, const unsigned int reg, const hrt_data value); #ifndef __INLINE_GDC__ #include "gdc_private.h" #endif /* __INLINE_GDC__ */ /* * Exported function implementations */ void gdc_lut_store( const gdc_ID_t ID, const int data[4][HRT_GDC_N]) { unsigned int i, lut_offset = HRT_GDC_LUT_IDX; assert(ID < N_GDC_ID); assert(HRT_GDC_LUT_COEFF_OFFSET <= (4 * sizeof(hrt_data))); for (i = 0; i < HRT_GDC_N; i++) { hrt_data entry_0 = data[0][i] & HRT_GDC_BCI_COEF_MASK; hrt_data entry_1 = data[1][i] & HRT_GDC_BCI_COEF_MASK; hrt_data entry_2 = data[2][i] & HRT_GDC_BCI_COEF_MASK; hrt_data entry_3 = data[3][i] & HRT_GDC_BCI_COEF_MASK; hrt_data word_0 = entry_0 | (entry_1 << HRT_GDC_LUT_COEFF_OFFSET); hrt_data word_1 = entry_2 | (entry_3 << HRT_GDC_LUT_COEFF_OFFSET); gdc_reg_store(ID, lut_offset++, word_0); gdc_reg_store(ID, lut_offset++, word_1); } return; } /* * Input LUT format: * c0[0-1023], c1[0-1023], c2[0-1023] c3[0-1023] * * Output LUT format (interleaved): * c0[0], c1[0], c2[0], c3[0], c0[1], c1[1], c2[1], c3[1], .... * c0[1023], c1[1023], c2[1023], c3[1023] * * The first format needs c0[0], c1[0] (which are 1024 words apart) * to program gdc LUT registers. This makes it difficult to do piecemeal * reads in SP side gdc_lut_store * * Interleaved format allows use of contiguous bytes to store into * gdc LUT registers. * * See gdc_lut_store() definition in host/gdc.c vs sp/gdc_private.h * */ void gdc_lut_convert_to_isp_format(const int in_lut[4][HRT_GDC_N], int out_lut[4][HRT_GDC_N]) { unsigned int i; int *out = (int *)out_lut; for (i = 0; i < HRT_GDC_N; i++) { out[0] = in_lut[0][i]; out[1] = in_lut[1][i]; out[2] = in_lut[2][i]; out[3] = in_lut[3][i]; out += 4; } } int gdc_get_unity( const gdc_ID_t ID) { assert(ID < N_GDC_ID); (void)ID; return (int)(1UL << HRT_GDC_FRAC_BITS); } /* * Local function implementations */ static inline void gdc_reg_store( const gdc_ID_t ID, const unsigned int reg, const hrt_data value) { ia_css_device_store_uint32(GDC_BASE[ID] + reg * sizeof(hrt_data), value); return; }
linux-master
drivers/staging/media/atomisp/pci/hive_isp_css_common/host/gdc.c
// SPDX-License-Identifier: GPL-2.0 /* * Support for Intel Camera Imaging ISP subsystem. * Copyright (c) 2015, Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. */ #include "timed_ctrl.h" #ifndef __INLINE_TIMED_CTRL__ #include "timed_ctrl_private.h" #endif /* __INLINE_TIMED_CTRL__ */ #include "assert_support.h" void timed_ctrl_snd_commnd( const timed_ctrl_ID_t ID, hrt_data mask, hrt_data condition, hrt_data counter, hrt_address addr, hrt_data value) { OP___assert(ID == TIMED_CTRL0_ID); OP___assert(TIMED_CTRL_BASE[ID] != (hrt_address)-1); timed_ctrl_reg_store(ID, _HRT_TIMED_CONTROLLER_CMD_REG_IDX, mask); timed_ctrl_reg_store(ID, _HRT_TIMED_CONTROLLER_CMD_REG_IDX, condition); timed_ctrl_reg_store(ID, _HRT_TIMED_CONTROLLER_CMD_REG_IDX, counter); timed_ctrl_reg_store(ID, _HRT_TIMED_CONTROLLER_CMD_REG_IDX, (hrt_data)addr); timed_ctrl_reg_store(ID, _HRT_TIMED_CONTROLLER_CMD_REG_IDX, value); } /* pqiao TODO: make sure the following commands get correct BASE address both for csim and android */ void timed_ctrl_snd_sp_commnd( const timed_ctrl_ID_t ID, hrt_data mask, hrt_data condition, hrt_data counter, const sp_ID_t SP_ID, hrt_address offset, hrt_data value) { OP___assert(SP_ID < N_SP_ID); OP___assert(SP_DMEM_BASE[SP_ID] != (hrt_address)-1); timed_ctrl_snd_commnd(ID, mask, condition, counter, SP_DMEM_BASE[SP_ID] + offset, value); } void timed_ctrl_snd_gpio_commnd( const timed_ctrl_ID_t ID, hrt_data mask, hrt_data condition, hrt_data counter, const gpio_ID_t GPIO_ID, hrt_address offset, hrt_data value) { OP___assert(GPIO_ID < N_GPIO_ID); OP___assert(GPIO_BASE[GPIO_ID] != (hrt_address)-1); timed_ctrl_snd_commnd(ID, mask, condition, counter, GPIO_BASE[GPIO_ID] + offset, value); }
linux-master
drivers/staging/media/atomisp/pci/hive_isp_css_common/host/timed_ctrl.c
// SPDX-License-Identifier: GPL-2.0 /* * Support for Intel Camera Imaging ISP subsystem. * Copyright (c) 2010-2015, Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. */ #include "fifo_monitor.h" #include <type_support.h> #include "device_access.h" #include <bits.h> #include "gp_device.h" #include "assert_support.h" #ifndef __INLINE_FIFO_MONITOR__ #define STORAGE_CLASS_FIFO_MONITOR_DATA static const #else #define STORAGE_CLASS_FIFO_MONITOR_DATA const #endif /* __INLINE_FIFO_MONITOR__ */ STORAGE_CLASS_FIFO_MONITOR_DATA unsigned int FIFO_SWITCH_ADDR[N_FIFO_SWITCH] = { _REG_GP_SWITCH_IF_ADDR, _REG_GP_SWITCH_GDC1_ADDR, _REG_GP_SWITCH_GDC2_ADDR }; #ifndef __INLINE_FIFO_MONITOR__ #include "fifo_monitor_private.h" #endif /* __INLINE_FIFO_MONITOR__ */ static inline bool fifo_monitor_status_valid( const fifo_monitor_ID_t ID, const unsigned int reg, const unsigned int port_id); static inline bool fifo_monitor_status_accept( const fifo_monitor_ID_t ID, const unsigned int reg, const unsigned int port_id); void fifo_channel_get_state( const fifo_monitor_ID_t ID, const fifo_channel_t channel_id, fifo_channel_state_t *state) { assert(channel_id < N_FIFO_CHANNEL); assert(state); switch (channel_id) { case FIFO_CHANNEL_ISP0_TO_SP0: state->src_valid = fifo_monitor_status_valid(ID, HIVE_GP_REGS_ISP_STREAM_STAT_IDX, ISP_STR_MON_PORT_SND_SP); /* ISP_STR_MON_PORT_ISP2SP */ state->fifo_accept = fifo_monitor_status_accept(ID, HIVE_GP_REGS_ISP_STREAM_STAT_IDX, ISP_STR_MON_PORT_SND_SP); state->fifo_valid = fifo_monitor_status_valid(ID, HIVE_GP_REGS_SP_STREAM_STAT_IDX, SP_STR_MON_PORT_RCV_ISP); /* ISP_STR_MON_PORT_SP2ISP */ state->sink_accept = fifo_monitor_status_accept(ID, HIVE_GP_REGS_SP_STREAM_STAT_IDX, SP_STR_MON_PORT_RCV_ISP); break; case FIFO_CHANNEL_SP0_TO_ISP0: state->src_valid = fifo_monitor_status_valid(ID, HIVE_GP_REGS_SP_STREAM_STAT_IDX, SP_STR_MON_PORT_SND_ISP); /* ISP_STR_MON_PORT_SP2ISP */ state->fifo_accept = fifo_monitor_status_accept(ID, HIVE_GP_REGS_SP_STREAM_STAT_IDX, SP_STR_MON_PORT_SND_ISP); state->fifo_valid = fifo_monitor_status_valid(ID, HIVE_GP_REGS_ISP_STREAM_STAT_IDX, ISP_STR_MON_PORT_RCV_SP); /* ISP_STR_MON_PORT_ISP2SP */ state->sink_accept = fifo_monitor_status_accept(ID, HIVE_GP_REGS_ISP_STREAM_STAT_IDX, ISP_STR_MON_PORT_RCV_SP); break; case FIFO_CHANNEL_ISP0_TO_IF0: state->src_valid = fifo_monitor_status_valid(ID, HIVE_GP_REGS_ISP_STREAM_STAT_IDX, ISP_STR_MON_PORT_SND_PIF_A); /* ISP_STR_MON_PORT_ISP2PIFA */ state->fifo_accept = fifo_monitor_status_accept(ID, HIVE_GP_REGS_ISP_STREAM_STAT_IDX, ISP_STR_MON_PORT_SND_PIF_A); state->fifo_valid = fifo_monitor_status_valid(ID, HIVE_GP_REGS_MOD_STREAM_STAT_IDX, MOD_STR_MON_PORT_RCV_PIF_A); /* MOD_STR_MON_PORT_CELLS2PIFA */ state->sink_accept = fifo_monitor_status_accept(ID, HIVE_GP_REGS_MOD_STREAM_STAT_IDX, MOD_STR_MON_PORT_RCV_PIF_A); break; case FIFO_CHANNEL_IF0_TO_ISP0: state->src_valid = fifo_monitor_status_valid(ID, HIVE_GP_REGS_MOD_STREAM_STAT_IDX, MOD_STR_MON_PORT_SND_PIF_A); /* MOD_STR_MON_PORT_PIFA2CELLS */ state->fifo_accept = fifo_monitor_status_accept(ID, HIVE_GP_REGS_MOD_STREAM_STAT_IDX, MOD_STR_MON_PORT_SND_PIF_A); state->fifo_valid = fifo_monitor_status_valid(ID, HIVE_GP_REGS_ISP_STREAM_STAT_IDX, ISP_STR_MON_PORT_RCV_PIF_A); /* ISP_STR_MON_PORT_PIFA2ISP */ state->sink_accept = fifo_monitor_status_accept(ID, HIVE_GP_REGS_ISP_STREAM_STAT_IDX, ISP_STR_MON_PORT_RCV_PIF_A); break; case FIFO_CHANNEL_ISP0_TO_IF1: state->src_valid = fifo_monitor_status_valid(ID, HIVE_GP_REGS_ISP_STREAM_STAT_IDX, ISP_STR_MON_PORT_SND_PIF_B); /* ISP_STR_MON_PORT_ISP2PIFA */ state->fifo_accept = fifo_monitor_status_accept(ID, HIVE_GP_REGS_ISP_STREAM_STAT_IDX, ISP_STR_MON_PORT_SND_PIF_B); state->fifo_valid = fifo_monitor_status_valid(ID, HIVE_GP_REGS_MOD_STREAM_STAT_IDX, MOD_STR_MON_PORT_RCV_PIF_B); /* MOD_STR_MON_PORT_CELLS2PIFB */ state->sink_accept = fifo_monitor_status_accept(ID, HIVE_GP_REGS_MOD_STREAM_STAT_IDX, MOD_STR_MON_PORT_RCV_PIF_B); break; case FIFO_CHANNEL_IF1_TO_ISP0: state->src_valid = fifo_monitor_status_valid(ID, HIVE_GP_REGS_MOD_STREAM_STAT_IDX, MOD_STR_MON_PORT_SND_PIF_B); /* MOD_STR_MON_PORT_PIFB2CELLS */ state->fifo_accept = fifo_monitor_status_accept(ID, HIVE_GP_REGS_MOD_STREAM_STAT_IDX, MOD_STR_MON_PORT_SND_PIF_B); state->fifo_valid = fifo_monitor_status_valid(ID, HIVE_GP_REGS_ISP_STREAM_STAT_IDX, ISP_STR_MON_PORT_RCV_PIF_B); /* ISP_STR_MON_PORT_PIFB2ISP */ state->sink_accept = fifo_monitor_status_accept(ID, HIVE_GP_REGS_ISP_STREAM_STAT_IDX, ISP_STR_MON_PORT_RCV_PIF_B); break; case FIFO_CHANNEL_ISP0_TO_DMA0: state->src_valid = fifo_monitor_status_valid(ID, HIVE_GP_REGS_ISP_STREAM_STAT_IDX, ISP_STR_MON_PORT_SND_DMA); /* ISP_STR_MON_PORT_ISP2DMA */ state->fifo_accept = fifo_monitor_status_accept(ID, HIVE_GP_REGS_ISP_STREAM_STAT_IDX, ISP_STR_MON_PORT_SND_DMA); state->fifo_valid = fifo_monitor_status_valid(ID, HIVE_GP_REGS_MOD_STREAM_STAT_IDX, MOD_STR_MON_PORT_RCV_DMA_FR_ISP); /* MOD_STR_MON_PORT_ISP2DMA */ state->sink_accept = fifo_monitor_status_accept(ID, HIVE_GP_REGS_MOD_STREAM_STAT_IDX, MOD_STR_MON_PORT_RCV_DMA_FR_ISP); break; case FIFO_CHANNEL_DMA0_TO_ISP0: state->src_valid = fifo_monitor_status_valid(ID, HIVE_GP_REGS_MOD_STREAM_STAT_IDX, MOD_STR_MON_PORT_SND_DMA2ISP); /* MOD_STR_MON_PORT_DMA2ISP */ state->fifo_accept = fifo_monitor_status_accept(ID, HIVE_GP_REGS_MOD_STREAM_STAT_IDX, MOD_STR_MON_PORT_SND_DMA2ISP); state->fifo_valid = fifo_monitor_status_valid(ID, HIVE_GP_REGS_ISP_STREAM_STAT_IDX, ISP_STR_MON_PORT_RCV_DMA); /* ISP_STR_MON_PORT_DMA2ISP */ state->sink_accept = fifo_monitor_status_accept(ID, HIVE_GP_REGS_ISP_STREAM_STAT_IDX, ISP_STR_MON_PORT_RCV_DMA); break; case FIFO_CHANNEL_ISP0_TO_GDC0: state->src_valid = fifo_monitor_status_valid(ID, HIVE_GP_REGS_ISP_STREAM_STAT_IDX, ISP_STR_MON_PORT_SND_GDC); /* ISP_STR_MON_PORT_ISP2GDC1 */ state->fifo_accept = fifo_monitor_status_accept(ID, HIVE_GP_REGS_ISP_STREAM_STAT_IDX, ISP_STR_MON_PORT_SND_GDC); state->fifo_valid = fifo_monitor_status_valid(ID, HIVE_GP_REGS_MOD_STREAM_STAT_IDX, MOD_STR_MON_PORT_RCV_GDC); /* MOD_STR_MON_PORT_CELLS2GDC1 */ state->sink_accept = fifo_monitor_status_accept(ID, HIVE_GP_REGS_MOD_STREAM_STAT_IDX, MOD_STR_MON_PORT_RCV_GDC); break; case FIFO_CHANNEL_GDC0_TO_ISP0: state->fifo_valid = fifo_monitor_status_valid(ID, HIVE_GP_REGS_MOD_STREAM_STAT_IDX, MOD_STR_MON_PORT_SND_GDC); /* MOD_STR_MON_PORT_GDC12CELLS */ state->sink_accept = fifo_monitor_status_accept(ID, HIVE_GP_REGS_MOD_STREAM_STAT_IDX, MOD_STR_MON_PORT_SND_GDC); state->src_valid = fifo_monitor_status_valid(ID, HIVE_GP_REGS_ISP_STREAM_STAT_IDX, ISP_STR_MON_PORT_RCV_GDC); /* ISP_STR_MON_PORT_GDC12ISP */ state->fifo_accept = fifo_monitor_status_accept(ID, HIVE_GP_REGS_ISP_STREAM_STAT_IDX, ISP_STR_MON_PORT_RCV_GDC); break; case FIFO_CHANNEL_ISP0_TO_GDC1: state->src_valid = fifo_monitor_status_valid(ID, HIVE_GP_REGS_ISP_STREAM_STAT_IDX, ISP_STR_MON_PORT_ISP2GDC2); state->fifo_accept = fifo_monitor_status_accept(ID, HIVE_GP_REGS_ISP_STREAM_STAT_IDX, ISP_STR_MON_PORT_ISP2GDC2); state->fifo_valid = fifo_monitor_status_valid(ID, HIVE_GP_REGS_MOD_STREAM_STAT_IDX, MOD_STR_MON_PORT_CELLS2GDC2); state->sink_accept = fifo_monitor_status_accept(ID, HIVE_GP_REGS_MOD_STREAM_STAT_IDX, MOD_STR_MON_PORT_CELLS2GDC2); break; case FIFO_CHANNEL_GDC1_TO_ISP0: state->fifo_valid = fifo_monitor_status_valid(ID, HIVE_GP_REGS_MOD_STREAM_STAT_IDX, MOD_STR_MON_PORT_GDC22CELLS); state->sink_accept = fifo_monitor_status_accept(ID, HIVE_GP_REGS_MOD_STREAM_STAT_IDX, MOD_STR_MON_PORT_GDC22CELLS); state->src_valid = fifo_monitor_status_valid(ID, HIVE_GP_REGS_ISP_STREAM_STAT_IDX, ISP_STR_MON_PORT_GDC22ISP); state->fifo_accept = fifo_monitor_status_accept(ID, HIVE_GP_REGS_ISP_STREAM_STAT_IDX, ISP_STR_MON_PORT_GDC22ISP); break; case FIFO_CHANNEL_ISP0_TO_HOST0: state->src_valid = fifo_monitor_status_valid(ID, HIVE_GP_REGS_ISP_STREAM_STAT_IDX, ISP_STR_MON_PORT_SND_GPD); /* ISP_STR_MON_PORT_ISP2GPD */ state->fifo_accept = fifo_monitor_status_accept(ID, HIVE_GP_REGS_ISP_STREAM_STAT_IDX, ISP_STR_MON_PORT_SND_GPD); { hrt_data value = ia_css_device_load_uint32(0x0000000000380014ULL); state->fifo_valid = !_hrt_get_bit(value, 0); state->sink_accept = false; /* no monitor connected */ } break; case FIFO_CHANNEL_HOST0_TO_ISP0: { hrt_data value = ia_css_device_load_uint32(0x000000000038001CULL); state->fifo_valid = false; /* no monitor connected */ state->sink_accept = !_hrt_get_bit(value, 0); } state->src_valid = fifo_monitor_status_valid(ID, HIVE_GP_REGS_ISP_STREAM_STAT_IDX, ISP_STR_MON_PORT_RCV_GPD); /* ISP_STR_MON_PORT_FA2ISP */ state->fifo_accept = fifo_monitor_status_accept(ID, HIVE_GP_REGS_ISP_STREAM_STAT_IDX, ISP_STR_MON_PORT_RCV_GPD); break; case FIFO_CHANNEL_SP0_TO_IF0: state->src_valid = fifo_monitor_status_valid(ID, HIVE_GP_REGS_SP_STREAM_STAT_IDX, SP_STR_MON_PORT_SND_PIF_A); /* SP_STR_MON_PORT_SP2PIFA */ state->fifo_accept = fifo_monitor_status_accept(ID, HIVE_GP_REGS_SP_STREAM_STAT_IDX, SP_STR_MON_PORT_SND_PIF_A); state->fifo_valid = fifo_monitor_status_valid(ID, HIVE_GP_REGS_MOD_STREAM_STAT_IDX, MOD_STR_MON_PORT_RCV_PIF_A); /* MOD_STR_MON_PORT_CELLS2PIFA */ state->sink_accept = fifo_monitor_status_accept(ID, HIVE_GP_REGS_MOD_STREAM_STAT_IDX, MOD_STR_MON_PORT_RCV_PIF_A); break; case FIFO_CHANNEL_IF0_TO_SP0: state->src_valid = fifo_monitor_status_valid(ID, HIVE_GP_REGS_MOD_STREAM_STAT_IDX, MOD_STR_MON_PORT_SND_PIF_A); /* MOD_STR_MON_PORT_PIFA2CELLS */ state->fifo_accept = fifo_monitor_status_accept(ID, HIVE_GP_REGS_MOD_STREAM_STAT_IDX, MOD_STR_MON_PORT_SND_PIF_A); state->fifo_valid = fifo_monitor_status_valid(ID, HIVE_GP_REGS_SP_STREAM_STAT_IDX, SP_STR_MON_PORT_RCV_PIF_A); /* SP_STR_MON_PORT_PIFA2SP */ state->sink_accept = fifo_monitor_status_accept(ID, HIVE_GP_REGS_SP_STREAM_STAT_IDX, SP_STR_MON_PORT_RCV_PIF_A); break; case FIFO_CHANNEL_SP0_TO_IF1: state->src_valid = fifo_monitor_status_valid(ID, HIVE_GP_REGS_SP_STREAM_STAT_IDX, SP_STR_MON_PORT_SND_PIF_B); /* SP_STR_MON_PORT_SP2PIFB */ state->fifo_accept = fifo_monitor_status_accept(ID, HIVE_GP_REGS_SP_STREAM_STAT_IDX, SP_STR_MON_PORT_SND_PIF_B); state->fifo_valid = fifo_monitor_status_valid(ID, HIVE_GP_REGS_MOD_STREAM_STAT_IDX, MOD_STR_MON_PORT_RCV_PIF_B); /* MOD_STR_MON_PORT_CELLS2PIFB */ state->sink_accept = fifo_monitor_status_accept(ID, HIVE_GP_REGS_MOD_STREAM_STAT_IDX, MOD_STR_MON_PORT_RCV_PIF_B); break; case FIFO_CHANNEL_IF1_TO_SP0: state->src_valid = fifo_monitor_status_valid(ID, HIVE_GP_REGS_MOD_STREAM_STAT_IDX, MOD_STR_MON_PORT_SND_PIF_B); /* MOD_STR_MON_PORT_PIFB2CELLS */ state->fifo_accept = fifo_monitor_status_accept(ID, HIVE_GP_REGS_MOD_STREAM_STAT_IDX, MOD_STR_MON_PORT_SND_PIF_B); state->fifo_valid = fifo_monitor_status_valid(ID, HIVE_GP_REGS_SP_STREAM_STAT_IDX, ISP_STR_MON_PORT_RCV_PIF_B); /* SP_STR_MON_PORT_PIFB2SP */ state->sink_accept = fifo_monitor_status_accept(ID, HIVE_GP_REGS_SP_STREAM_STAT_IDX, ISP_STR_MON_PORT_RCV_PIF_B); break; case FIFO_CHANNEL_SP0_TO_IF2: state->src_valid = fifo_monitor_status_valid(ID, HIVE_GP_REGS_SP_STREAM_STAT_IDX, SP_STR_MON_PORT_SND_SIF); /* SP_STR_MON_PORT_SP2SIF */ state->fifo_accept = fifo_monitor_status_accept(ID, HIVE_GP_REGS_SP_STREAM_STAT_IDX, SP_STR_MON_PORT_SND_SIF); state->fifo_valid = fifo_monitor_status_valid(ID, HIVE_GP_REGS_MOD_STREAM_STAT_IDX, MOD_STR_MON_PORT_RCV_SIF); /* MOD_STR_MON_PORT_SP2SIF */ state->sink_accept = fifo_monitor_status_accept(ID, HIVE_GP_REGS_MOD_STREAM_STAT_IDX, MOD_STR_MON_PORT_RCV_SIF); break; case FIFO_CHANNEL_IF2_TO_SP0: state->src_valid = fifo_monitor_status_valid(ID, HIVE_GP_REGS_MOD_STREAM_STAT_IDX, MOD_STR_MON_PORT_SND_SIF); /* MOD_STR_MON_PORT_SIF2SP */ state->fifo_accept = fifo_monitor_status_accept(ID, HIVE_GP_REGS_MOD_STREAM_STAT_IDX, MOD_STR_MON_PORT_SND_SIF); state->fifo_valid = fifo_monitor_status_valid(ID, HIVE_GP_REGS_SP_STREAM_STAT_IDX, SP_STR_MON_PORT_RCV_SIF); /* SP_STR_MON_PORT_SIF2SP */ state->sink_accept = fifo_monitor_status_accept(ID, HIVE_GP_REGS_SP_STREAM_STAT_IDX, SP_STR_MON_PORT_RCV_SIF); break; case FIFO_CHANNEL_SP0_TO_DMA0: state->src_valid = fifo_monitor_status_valid(ID, HIVE_GP_REGS_SP_STREAM_STAT_IDX, SP_STR_MON_PORT_SND_DMA); /* SP_STR_MON_PORT_SP2DMA */ state->fifo_accept = fifo_monitor_status_accept(ID, HIVE_GP_REGS_SP_STREAM_STAT_IDX, SP_STR_MON_PORT_SND_DMA); state->fifo_valid = fifo_monitor_status_valid(ID, HIVE_GP_REGS_MOD_STREAM_STAT_IDX, MOD_STR_MON_PORT_RCV_DMA_FR_SP); /* MOD_STR_MON_PORT_SP2DMA */ state->sink_accept = fifo_monitor_status_accept(ID, HIVE_GP_REGS_MOD_STREAM_STAT_IDX, MOD_STR_MON_PORT_RCV_DMA_FR_SP); break; case FIFO_CHANNEL_DMA0_TO_SP0: state->src_valid = fifo_monitor_status_valid(ID, HIVE_GP_REGS_MOD_STREAM_STAT_IDX, MOD_STR_MON_PORT_SND_DMA2SP); /* MOD_STR_MON_PORT_DMA2SP */ state->fifo_accept = fifo_monitor_status_accept(ID, HIVE_GP_REGS_MOD_STREAM_STAT_IDX, MOD_STR_MON_PORT_SND_DMA2SP); state->fifo_valid = fifo_monitor_status_valid(ID, HIVE_GP_REGS_SP_STREAM_STAT_IDX, SP_STR_MON_PORT_RCV_DMA); /* SP_STR_MON_PORT_DMA2SP */ state->sink_accept = fifo_monitor_status_accept(ID, HIVE_GP_REGS_SP_STREAM_STAT_IDX, SP_STR_MON_PORT_RCV_DMA); break; case FIFO_CHANNEL_SP0_TO_GDC0: state->src_valid = fifo_monitor_status_valid(ID, HIVE_GP_REGS_SP_STREAM_STAT_B_IDX, SP_STR_MON_PORT_B_SP2GDC1); state->fifo_accept = fifo_monitor_status_accept(ID, HIVE_GP_REGS_SP_STREAM_STAT_B_IDX, SP_STR_MON_PORT_B_SP2GDC1); state->fifo_valid = fifo_monitor_status_valid(ID, HIVE_GP_REGS_MOD_STREAM_STAT_IDX, MOD_STR_MON_PORT_CELLS2GDC1); state->sink_accept = fifo_monitor_status_accept(ID, HIVE_GP_REGS_MOD_STREAM_STAT_IDX, MOD_STR_MON_PORT_CELLS2GDC1); break; case FIFO_CHANNEL_GDC0_TO_SP0: state->fifo_valid = fifo_monitor_status_valid(ID, HIVE_GP_REGS_MOD_STREAM_STAT_IDX, MOD_STR_MON_PORT_GDC12CELLS); state->sink_accept = fifo_monitor_status_accept(ID, HIVE_GP_REGS_MOD_STREAM_STAT_IDX, MOD_STR_MON_PORT_GDC12CELLS); state->src_valid = fifo_monitor_status_valid(ID, HIVE_GP_REGS_SP_STREAM_STAT_B_IDX, SP_STR_MON_PORT_B_GDC12SP); state->fifo_accept = fifo_monitor_status_accept(ID, HIVE_GP_REGS_SP_STREAM_STAT_B_IDX, SP_STR_MON_PORT_B_GDC12SP); break; case FIFO_CHANNEL_SP0_TO_GDC1: state->src_valid = fifo_monitor_status_valid(ID, HIVE_GP_REGS_SP_STREAM_STAT_B_IDX, SP_STR_MON_PORT_B_SP2GDC2); state->fifo_accept = fifo_monitor_status_accept(ID, HIVE_GP_REGS_SP_STREAM_STAT_B_IDX, SP_STR_MON_PORT_B_SP2GDC2); state->fifo_valid = fifo_monitor_status_valid(ID, HIVE_GP_REGS_MOD_STREAM_STAT_IDX, MOD_STR_MON_PORT_CELLS2GDC2); state->sink_accept = fifo_monitor_status_accept(ID, HIVE_GP_REGS_MOD_STREAM_STAT_IDX, MOD_STR_MON_PORT_CELLS2GDC2); break; case FIFO_CHANNEL_GDC1_TO_SP0: state->fifo_valid = fifo_monitor_status_valid(ID, HIVE_GP_REGS_MOD_STREAM_STAT_IDX, MOD_STR_MON_PORT_GDC22CELLS); state->sink_accept = fifo_monitor_status_accept(ID, HIVE_GP_REGS_MOD_STREAM_STAT_IDX, MOD_STR_MON_PORT_GDC22CELLS); state->src_valid = fifo_monitor_status_valid(ID, HIVE_GP_REGS_SP_STREAM_STAT_B_IDX, SP_STR_MON_PORT_B_GDC22SP); state->fifo_accept = fifo_monitor_status_accept(ID, HIVE_GP_REGS_SP_STREAM_STAT_B_IDX, SP_STR_MON_PORT_B_GDC22SP); break; case FIFO_CHANNEL_SP0_TO_HOST0: state->src_valid = fifo_monitor_status_valid(ID, HIVE_GP_REGS_SP_STREAM_STAT_IDX, SP_STR_MON_PORT_SND_GPD); /* SP_STR_MON_PORT_SP2GPD */ state->fifo_accept = fifo_monitor_status_accept(ID, HIVE_GP_REGS_SP_STREAM_STAT_IDX, SP_STR_MON_PORT_SND_GPD); { hrt_data value = ia_css_device_load_uint32(0x0000000000380010ULL); state->fifo_valid = !_hrt_get_bit(value, 0); state->sink_accept = false; /* no monitor connected */ } break; case FIFO_CHANNEL_HOST0_TO_SP0: { hrt_data value = ia_css_device_load_uint32(0x0000000000380018ULL); state->fifo_valid = false; /* no monitor connected */ state->sink_accept = !_hrt_get_bit(value, 0); } state->src_valid = fifo_monitor_status_valid(ID, HIVE_GP_REGS_SP_STREAM_STAT_IDX, SP_STR_MON_PORT_RCV_GPD); /* SP_STR_MON_PORT_FA2SP */ state->fifo_accept = fifo_monitor_status_accept(ID, HIVE_GP_REGS_SP_STREAM_STAT_IDX, SP_STR_MON_PORT_RCV_GPD); break; case FIFO_CHANNEL_SP0_TO_STREAM2MEM0: state->src_valid = fifo_monitor_status_valid(ID, HIVE_GP_REGS_SP_STREAM_STAT_IDX, SP_STR_MON_PORT_SND_MC); /* SP_STR_MON_PORT_SP2MC */ state->fifo_accept = fifo_monitor_status_accept(ID, HIVE_GP_REGS_SP_STREAM_STAT_IDX, SP_STR_MON_PORT_SND_MC); state->fifo_valid = fifo_monitor_status_valid(ID, HIVE_GP_REGS_MOD_STREAM_STAT_IDX, MOD_STR_MON_PORT_RCV_MC); /* MOD_STR_MON_PORT_SP2MC */ state->sink_accept = fifo_monitor_status_accept(ID, HIVE_GP_REGS_MOD_STREAM_STAT_IDX, MOD_STR_MON_PORT_RCV_MC); break; case FIFO_CHANNEL_STREAM2MEM0_TO_SP0: state->fifo_valid = fifo_monitor_status_valid(ID, HIVE_GP_REGS_MOD_STREAM_STAT_IDX, MOD_STR_MON_PORT_SND_MC); /* SP_STR_MON_PORT_MC2SP */ state->sink_accept = fifo_monitor_status_accept(ID, HIVE_GP_REGS_MOD_STREAM_STAT_IDX, MOD_STR_MON_PORT_SND_MC); state->src_valid = fifo_monitor_status_valid(ID, HIVE_GP_REGS_SP_STREAM_STAT_IDX, SP_STR_MON_PORT_RCV_MC); /* MOD_STR_MON_PORT_MC2SP */ state->fifo_accept = fifo_monitor_status_accept(ID, HIVE_GP_REGS_SP_STREAM_STAT_IDX, SP_STR_MON_PORT_RCV_MC); break; case FIFO_CHANNEL_SP0_TO_INPUT_SYSTEM0: state->src_valid = fifo_monitor_status_valid(ID, HIVE_GP_REGS_SP_STREAM_STAT_IDX, SP_STR_MON_PORT_SP2ISYS); state->fifo_accept = fifo_monitor_status_accept(ID, HIVE_GP_REGS_SP_STREAM_STAT_IDX, SP_STR_MON_PORT_SP2ISYS); state->fifo_valid = false; state->sink_accept = false; break; case FIFO_CHANNEL_INPUT_SYSTEM0_TO_SP0: state->fifo_valid = false; state->sink_accept = false; state->src_valid = fifo_monitor_status_valid(ID, HIVE_GP_REGS_SP_STREAM_STAT_IDX, SP_STR_MON_PORT_ISYS2SP); state->fifo_accept = fifo_monitor_status_accept(ID, HIVE_GP_REGS_SP_STREAM_STAT_IDX, SP_STR_MON_PORT_ISYS2SP); break; default: assert(0); break; } return; } void fifo_switch_get_state( const fifo_monitor_ID_t ID, const fifo_switch_t switch_id, fifo_switch_state_t *state) { hrt_data data = (hrt_data)-1; assert(ID == FIFO_MONITOR0_ID); assert(switch_id < N_FIFO_SWITCH); assert(state); (void)ID; data = gp_device_reg_load(GP_DEVICE0_ID, FIFO_SWITCH_ADDR[switch_id]); state->is_none = (data == HIVE_ISP_CSS_STREAM_SWITCH_NONE); state->is_sp = (data == HIVE_ISP_CSS_STREAM_SWITCH_SP); state->is_isp = (data == HIVE_ISP_CSS_STREAM_SWITCH_ISP); return; } void fifo_monitor_get_state( const fifo_monitor_ID_t ID, fifo_monitor_state_t *state) { fifo_channel_t ch_id; fifo_switch_t sw_id; assert(ID < N_FIFO_MONITOR_ID); assert(state); for (ch_id = 0; ch_id < N_FIFO_CHANNEL; ch_id++) { fifo_channel_get_state(ID, ch_id, &state->fifo_channels[ch_id]); } for (sw_id = 0; sw_id < N_FIFO_SWITCH; sw_id++) { fifo_switch_get_state(ID, sw_id, &state->fifo_switches[sw_id]); } return; } static inline bool fifo_monitor_status_valid( const fifo_monitor_ID_t ID, const unsigned int reg, const unsigned int port_id) { hrt_data data = fifo_monitor_reg_load(ID, reg); return (data >> (((port_id * 2) + _hive_str_mon_valid_offset))) & 0x1; } static inline bool fifo_monitor_status_accept( const fifo_monitor_ID_t ID, const unsigned int reg, const unsigned int port_id) { hrt_data data = fifo_monitor_reg_load(ID, reg); return (data >> (((port_id * 2) + _hive_str_mon_accept_offset))) & 0x1; }
linux-master
drivers/staging/media/atomisp/pci/hive_isp_css_common/host/fifo_monitor.c
// SPDX-License-Identifier: GPL-2.0 /* * Support for Intel Camera Imaging ISP subsystem. * Copyright (c) 2010-2015, Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. */ #include <linux/delay.h> #include <system_global.h> #include "isp.h" #ifndef __INLINE_ISP__ #include "isp_private.h" #endif /* __INLINE_ISP__ */ #include "assert_support.h" void cnd_isp_irq_enable( const isp_ID_t ID, const bool cnd) { if (cnd) { isp_ctrl_setbit(ID, ISP_IRQ_READY_REG, ISP_IRQ_READY_BIT); /* Enabling the IRQ immediately triggers an interrupt, clear it */ isp_ctrl_setbit(ID, ISP_IRQ_CLEAR_REG, ISP_IRQ_CLEAR_BIT); } else { isp_ctrl_clearbit(ID, ISP_IRQ_READY_REG, ISP_IRQ_READY_BIT); } return; } void isp_get_state( const isp_ID_t ID, isp_state_t *state, isp_stall_t *stall) { hrt_data sc = isp_ctrl_load(ID, ISP_SC_REG); assert(state); assert(stall); #if defined(_hrt_sysmem_ident_address) /* Patch to avoid compiler unused symbol warning in C_RUN build */ (void)__hrt_sysmem_ident_address; (void)_hrt_sysmem_map_var; #endif state->pc = isp_ctrl_load(ID, ISP_PC_REG); state->status_register = sc; state->is_broken = isp_ctrl_getbit(ID, ISP_SC_REG, ISP_BROKEN_BIT); state->is_idle = isp_ctrl_getbit(ID, ISP_SC_REG, ISP_IDLE_BIT); state->is_sleeping = isp_ctrl_getbit(ID, ISP_SC_REG, ISP_SLEEPING_BIT); state->is_stalling = isp_ctrl_getbit(ID, ISP_SC_REG, ISP_STALLING_BIT); stall->stat_ctrl = !isp_ctrl_getbit(ID, ISP_CTRL_SINK_REG, ISP_CTRL_SINK_BIT); stall->pmem = !isp_ctrl_getbit(ID, ISP_PMEM_SINK_REG, ISP_PMEM_SINK_BIT); stall->dmem = !isp_ctrl_getbit(ID, ISP_DMEM_SINK_REG, ISP_DMEM_SINK_BIT); stall->vmem = !isp_ctrl_getbit(ID, ISP_VMEM_SINK_REG, ISP_VMEM_SINK_BIT); stall->fifo0 = !isp_ctrl_getbit(ID, ISP_FIFO0_SINK_REG, ISP_FIFO0_SINK_BIT); stall->fifo1 = !isp_ctrl_getbit(ID, ISP_FIFO1_SINK_REG, ISP_FIFO1_SINK_BIT); stall->fifo2 = !isp_ctrl_getbit(ID, ISP_FIFO2_SINK_REG, ISP_FIFO2_SINK_BIT); stall->fifo3 = !isp_ctrl_getbit(ID, ISP_FIFO3_SINK_REG, ISP_FIFO3_SINK_BIT); stall->fifo4 = !isp_ctrl_getbit(ID, ISP_FIFO4_SINK_REG, ISP_FIFO4_SINK_BIT); stall->fifo5 = !isp_ctrl_getbit(ID, ISP_FIFO5_SINK_REG, ISP_FIFO5_SINK_BIT); stall->fifo6 = !isp_ctrl_getbit(ID, ISP_FIFO6_SINK_REG, ISP_FIFO6_SINK_BIT); stall->vamem1 = !isp_ctrl_getbit(ID, ISP_VAMEM1_SINK_REG, ISP_VAMEM1_SINK_BIT); stall->vamem2 = !isp_ctrl_getbit(ID, ISP_VAMEM2_SINK_REG, ISP_VAMEM2_SINK_BIT); stall->vamem3 = !isp_ctrl_getbit(ID, ISP_VAMEM3_SINK_REG, ISP_VAMEM3_SINK_BIT); stall->hmem = !isp_ctrl_getbit(ID, ISP_HMEM_SINK_REG, ISP_HMEM_SINK_BIT); /* stall->icache_master = !isp_ctrl_getbit(ID, ISP_ICACHE_MT_SINK_REG, ISP_ICACHE_MT_SINK_BIT); */ return; } /* ISP functions to control the ISP state from the host, even in crun. */ /* Inspect readiness of an ISP indexed by ID */ unsigned int isp_is_ready(isp_ID_t ID) { assert(ID < N_ISP_ID); return isp_ctrl_getbit(ID, ISP_SC_REG, ISP_IDLE_BIT); } /* Inspect sleeping of an ISP indexed by ID */ unsigned int isp_is_sleeping(isp_ID_t ID) { assert(ID < N_ISP_ID); return isp_ctrl_getbit(ID, ISP_SC_REG, ISP_SLEEPING_BIT); } /* To be called by the host immediately before starting ISP ID. */ void isp_start(isp_ID_t ID) { assert(ID < N_ISP_ID); } /* Wake up ISP ID. */ void isp_wake(isp_ID_t ID) { assert(ID < N_ISP_ID); isp_ctrl_setbit(ID, ISP_SC_REG, ISP_START_BIT); udelay(1); }
linux-master
drivers/staging/media/atomisp/pci/hive_isp_css_common/host/isp.c
// SPDX-License-Identifier: GPL-2.0 /* * Support for Intel Camera Imaging ISP subsystem. * Copyright (c) 2010-2015, Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. */ /* The name "mmu.h is already taken" */ #include "mmu_device.h" void mmu_set_page_table_base_index( const mmu_ID_t ID, const hrt_data base_index) { mmu_reg_store(ID, _HRT_MMU_PAGE_TABLE_BASE_ADDRESS_REG_IDX, base_index); return; } hrt_data mmu_get_page_table_base_index( const mmu_ID_t ID) { return mmu_reg_load(ID, _HRT_MMU_PAGE_TABLE_BASE_ADDRESS_REG_IDX); } void mmu_invalidate_cache( const mmu_ID_t ID) { mmu_reg_store(ID, _HRT_MMU_INVALIDATE_TLB_REG_IDX, 1); return; } void mmu_invalidate_cache_all(void) { mmu_ID_t mmu_id; for (mmu_id = (mmu_ID_t)0; mmu_id < N_MMU_ID; mmu_id++) { mmu_invalidate_cache(mmu_id); } }
linux-master
drivers/staging/media/atomisp/pci/hive_isp_css_common/host/mmu.c
// SPDX-License-Identifier: GPL-2.0 /* * Support for Merrifield PNW Camera Imaging ISP subsystem. * * Copyright (c) 2012 Intel Corporation. All Rights Reserved. * * Copyright (c) 2012 Silicon Hive www.siliconhive.com. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version * 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * */ #include "type_support.h" #include "mmu/isp_mmu.h" #include "mmu/sh_mmu_mrfld.h" #include "atomisp_compat.h" #define MERR_VALID_PTE_MASK 0x80000000 /* * include SH header file here */ static unsigned int sh_phys_to_pte(struct isp_mmu *mmu, phys_addr_t phys) { return phys >> ISP_PAGE_OFFSET; } static phys_addr_t sh_pte_to_phys(struct isp_mmu *mmu, unsigned int pte) { unsigned int mask = mmu->driver->pte_valid_mask; return (phys_addr_t)((pte & ~mask) << ISP_PAGE_OFFSET); } static unsigned int sh_get_pd_base(struct isp_mmu *mmu, phys_addr_t phys) { unsigned int pte = sh_phys_to_pte(mmu, phys); return HOST_ADDRESS(pte); } /* * callback to flush tlb. * * tlb_flush_range will at least flush TLBs containing * address mapping from addr to addr + size. * * tlb_flush_all will flush all TLBs. * * tlb_flush_all is must be provided. if tlb_flush_range is * not valid, it will set to tlb_flush_all by default. */ static void sh_tlb_flush(struct isp_mmu *mmu) { ia_css_mmu_invalidate_cache(); } struct isp_mmu_client sh_mmu_mrfld = { .name = "Silicon Hive ISP3000 MMU", .pte_valid_mask = MERR_VALID_PTE_MASK, .null_pte = ~MERR_VALID_PTE_MASK, .get_pd_base = sh_get_pd_base, .tlb_flush_all = sh_tlb_flush, .phys_to_pte = sh_phys_to_pte, .pte_to_phys = sh_pte_to_phys, };
linux-master
drivers/staging/media/atomisp/pci/mmu/sh_mmu_mrfld.c
// SPDX-License-Identifier: GPL-2.0 /* * Support for Medifield PNW Camera Imaging ISP subsystem. * * Copyright (c) 2010 Intel Corporation. All Rights Reserved. * * Copyright (c) 2010 Silicon Hive www.siliconhive.com. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version * 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * */ /* * ISP MMU management wrap code */ #include <linux/kernel.h> #include <linux/types.h> #include <linux/gfp.h> #include <linux/mm.h> /* for GFP_ATOMIC */ #include <linux/slab.h> /* for kmalloc */ #include <linux/list.h> #include <linux/io.h> #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/string.h> #include <linux/errno.h> #include <linux/sizes.h> #ifdef CONFIG_X86 #include <asm/set_memory.h> #endif #include "atomisp_internal.h" #include "mmu/isp_mmu.h" /* * 64-bit x86 processor physical address layout: * 0 - 0x7fffffff DDR RAM (2GB) * 0x80000000 - 0xffffffff MMIO (2GB) * 0x100000000 - 0x3fffffffffff DDR RAM (64TB) * So if the system has more than 2GB DDR memory, the lower 2GB occupies the * physical address 0 - 0x7fffffff and the rest will start from 0x100000000. * We have to make sure memory is allocated from the lower 2GB for devices * that are only 32-bit capable(e.g. the ISP MMU). * * For any confusion, contact [email protected]. */ #define NR_PAGES_2GB (SZ_2G / PAGE_SIZE) static void free_mmu_map(struct isp_mmu *mmu, unsigned int start_isp_virt, unsigned int end_isp_virt); static unsigned int atomisp_get_pte(phys_addr_t pt, unsigned int idx) { unsigned int *pt_virt = phys_to_virt(pt); return *(pt_virt + idx); } static void atomisp_set_pte(phys_addr_t pt, unsigned int idx, unsigned int pte) { unsigned int *pt_virt = phys_to_virt(pt); *(pt_virt + idx) = pte; } static void *isp_pt_phys_to_virt(phys_addr_t phys) { return phys_to_virt(phys); } static phys_addr_t isp_pte_to_pgaddr(struct isp_mmu *mmu, unsigned int pte) { return mmu->driver->pte_to_phys(mmu, pte); } static unsigned int isp_pgaddr_to_pte_valid(struct isp_mmu *mmu, phys_addr_t phys) { unsigned int pte = mmu->driver->phys_to_pte(mmu, phys); return (unsigned int)(pte | ISP_PTE_VALID_MASK(mmu)); } /* * allocate a uncacheable page table. * return physical address. */ static phys_addr_t alloc_page_table(struct isp_mmu *mmu) { int i; phys_addr_t page; void *virt; virt = (void *)__get_free_page(GFP_KERNEL | GFP_DMA32); if (!virt) return (phys_addr_t)NULL_PAGE; /* * we need a uncacheable page table. */ #ifdef CONFIG_X86 set_memory_uc((unsigned long)virt, 1); #endif page = virt_to_phys(virt); for (i = 0; i < 1024; i++) { /* NEED CHECK */ atomisp_set_pte(page, i, mmu->driver->null_pte); } return page; } static void free_page_table(struct isp_mmu *mmu, phys_addr_t page) { void *virt; page &= ISP_PAGE_MASK; /* * reset the page to write back before free */ virt = phys_to_virt(page); #ifdef CONFIG_X86 set_memory_wb((unsigned long)virt, 1); #endif free_page((unsigned long)virt); } static void mmu_remap_error(struct isp_mmu *mmu, phys_addr_t l1_pt, unsigned int l1_idx, phys_addr_t l2_pt, unsigned int l2_idx, unsigned int isp_virt, phys_addr_t old_phys, phys_addr_t new_phys) { dev_err(atomisp_dev, "address remap:\n\n" "\tL1 PT: virt = %p, phys = 0x%llx, idx = %d\n" "\tL2 PT: virt = %p, phys = 0x%llx, idx = %d\n" "\told: isp_virt = 0x%x, phys = 0x%llx\n" "\tnew: isp_virt = 0x%x, phys = 0x%llx\n", isp_pt_phys_to_virt(l1_pt), (u64)l1_pt, l1_idx, isp_pt_phys_to_virt(l2_pt), (u64)l2_pt, l2_idx, isp_virt, (u64)old_phys, isp_virt, (u64)new_phys); } static void mmu_unmap_l2_pte_error(struct isp_mmu *mmu, phys_addr_t l1_pt, unsigned int l1_idx, phys_addr_t l2_pt, unsigned int l2_idx, unsigned int isp_virt, unsigned int pte) { dev_err(atomisp_dev, "unmap invalid L2 pte:\n\n" "\tL1 PT: virt = %p, phys = 0x%llx, idx = %d\n" "\tL2 PT: virt = %p, phys = 0x%llx, idx = %d\n" "\tisp_virt = 0x%x, pte(page phys) = 0x%x\n", isp_pt_phys_to_virt(l1_pt), (u64)l1_pt, l1_idx, isp_pt_phys_to_virt(l2_pt), (u64)l2_pt, l2_idx, isp_virt, pte); } static void mmu_unmap_l1_pte_error(struct isp_mmu *mmu, phys_addr_t l1_pt, unsigned int l1_idx, unsigned int isp_virt, unsigned int pte) { dev_err(atomisp_dev, "unmap invalid L1 pte (L2 PT):\n\n" "\tL1 PT: virt = %p, phys = 0x%llx, idx = %d\n" "\tisp_virt = 0x%x, l1_pte(L2 PT) = 0x%x\n", isp_pt_phys_to_virt(l1_pt), (u64)l1_pt, l1_idx, (unsigned int)isp_virt, pte); } static void mmu_unmap_l1_pt_error(struct isp_mmu *mmu, unsigned int pte) { dev_err(atomisp_dev, "unmap invalid L1PT:\n\n" "L1PT = 0x%x\n", (unsigned int)pte); } /* * Update L2 page table according to isp virtual address and page physical * address */ static int mmu_l2_map(struct isp_mmu *mmu, phys_addr_t l1_pt, unsigned int l1_idx, phys_addr_t l2_pt, unsigned int start, unsigned int end, phys_addr_t phys) { unsigned int ptr; unsigned int idx; unsigned int pte; l2_pt &= ISP_PAGE_MASK; start = start & ISP_PAGE_MASK; end = ISP_PAGE_ALIGN(end); phys &= ISP_PAGE_MASK; ptr = start; do { idx = ISP_PTR_TO_L2_IDX(ptr); pte = atomisp_get_pte(l2_pt, idx); if (ISP_PTE_VALID(mmu, pte)) { mmu_remap_error(mmu, l1_pt, l1_idx, l2_pt, idx, ptr, pte, phys); /* free all mapped pages */ free_mmu_map(mmu, start, ptr); return -EINVAL; } pte = isp_pgaddr_to_pte_valid(mmu, phys); atomisp_set_pte(l2_pt, idx, pte); mmu->l2_pgt_refcount[l1_idx]++; ptr += (1U << ISP_L2PT_OFFSET); phys += (1U << ISP_L2PT_OFFSET); } while (ptr < end && idx < ISP_L2PT_PTES - 1); return 0; } /* * Update L1 page table according to isp virtual address and page physical * address */ static int mmu_l1_map(struct isp_mmu *mmu, phys_addr_t l1_pt, unsigned int start, unsigned int end, phys_addr_t phys) { phys_addr_t l2_pt; unsigned int ptr, l1_aligned; unsigned int idx; unsigned int l2_pte; int ret; l1_pt &= ISP_PAGE_MASK; start = start & ISP_PAGE_MASK; end = ISP_PAGE_ALIGN(end); phys &= ISP_PAGE_MASK; ptr = start; do { idx = ISP_PTR_TO_L1_IDX(ptr); l2_pte = atomisp_get_pte(l1_pt, idx); if (!ISP_PTE_VALID(mmu, l2_pte)) { l2_pt = alloc_page_table(mmu); if (l2_pt == NULL_PAGE) { dev_err(atomisp_dev, "alloc page table fail.\n"); /* free all mapped pages */ free_mmu_map(mmu, start, ptr); return -ENOMEM; } l2_pte = isp_pgaddr_to_pte_valid(mmu, l2_pt); atomisp_set_pte(l1_pt, idx, l2_pte); mmu->l2_pgt_refcount[idx] = 0; } l2_pt = isp_pte_to_pgaddr(mmu, l2_pte); l1_aligned = (ptr & ISP_PAGE_MASK) + (1U << ISP_L1PT_OFFSET); if (l1_aligned < end) { ret = mmu_l2_map(mmu, l1_pt, idx, l2_pt, ptr, l1_aligned, phys); phys += (l1_aligned - ptr); ptr = l1_aligned; } else { ret = mmu_l2_map(mmu, l1_pt, idx, l2_pt, ptr, end, phys); phys += (end - ptr); ptr = end; } if (ret) { dev_err(atomisp_dev, "setup mapping in L2PT fail.\n"); /* free all mapped pages */ free_mmu_map(mmu, start, ptr); return -EINVAL; } } while (ptr < end && idx < ISP_L1PT_PTES); return 0; } /* * Update page table according to isp virtual address and page physical * address */ static int mmu_map(struct isp_mmu *mmu, unsigned int isp_virt, phys_addr_t phys, unsigned int pgnr) { unsigned int start, end; phys_addr_t l1_pt; int ret; mutex_lock(&mmu->pt_mutex); if (!ISP_PTE_VALID(mmu, mmu->l1_pte)) { /* * allocate 1 new page for L1 page table */ l1_pt = alloc_page_table(mmu); if (l1_pt == NULL_PAGE) { dev_err(atomisp_dev, "alloc page table fail.\n"); mutex_unlock(&mmu->pt_mutex); return -ENOMEM; } /* * setup L1 page table physical addr to MMU */ mmu->base_address = l1_pt; mmu->l1_pte = isp_pgaddr_to_pte_valid(mmu, l1_pt); memset(mmu->l2_pgt_refcount, 0, sizeof(int) * ISP_L1PT_PTES); } l1_pt = isp_pte_to_pgaddr(mmu, mmu->l1_pte); start = (isp_virt) & ISP_PAGE_MASK; end = start + (pgnr << ISP_PAGE_OFFSET); phys &= ISP_PAGE_MASK; ret = mmu_l1_map(mmu, l1_pt, start, end, phys); if (ret) dev_err(atomisp_dev, "setup mapping in L1PT fail.\n"); mutex_unlock(&mmu->pt_mutex); return ret; } /* * Free L2 page table according to isp virtual address and page physical * address */ static void mmu_l2_unmap(struct isp_mmu *mmu, phys_addr_t l1_pt, unsigned int l1_idx, phys_addr_t l2_pt, unsigned int start, unsigned int end) { unsigned int ptr; unsigned int idx; unsigned int pte; l2_pt &= ISP_PAGE_MASK; start = start & ISP_PAGE_MASK; end = ISP_PAGE_ALIGN(end); ptr = start; do { idx = ISP_PTR_TO_L2_IDX(ptr); pte = atomisp_get_pte(l2_pt, idx); if (!ISP_PTE_VALID(mmu, pte)) mmu_unmap_l2_pte_error(mmu, l1_pt, l1_idx, l2_pt, idx, ptr, pte); atomisp_set_pte(l2_pt, idx, mmu->driver->null_pte); mmu->l2_pgt_refcount[l1_idx]--; ptr += (1U << ISP_L2PT_OFFSET); } while (ptr < end && idx < ISP_L2PT_PTES - 1); if (mmu->l2_pgt_refcount[l1_idx] == 0) { free_page_table(mmu, l2_pt); atomisp_set_pte(l1_pt, l1_idx, mmu->driver->null_pte); } } /* * Free L1 page table according to isp virtual address and page physical * address */ static void mmu_l1_unmap(struct isp_mmu *mmu, phys_addr_t l1_pt, unsigned int start, unsigned int end) { phys_addr_t l2_pt; unsigned int ptr, l1_aligned; unsigned int idx; unsigned int l2_pte; l1_pt &= ISP_PAGE_MASK; start = start & ISP_PAGE_MASK; end = ISP_PAGE_ALIGN(end); ptr = start; do { idx = ISP_PTR_TO_L1_IDX(ptr); l2_pte = atomisp_get_pte(l1_pt, idx); if (!ISP_PTE_VALID(mmu, l2_pte)) { mmu_unmap_l1_pte_error(mmu, l1_pt, idx, ptr, l2_pte); continue; } l2_pt = isp_pte_to_pgaddr(mmu, l2_pte); l1_aligned = (ptr & ISP_PAGE_MASK) + (1U << ISP_L1PT_OFFSET); if (l1_aligned < end) { mmu_l2_unmap(mmu, l1_pt, idx, l2_pt, ptr, l1_aligned); ptr = l1_aligned; } else { mmu_l2_unmap(mmu, l1_pt, idx, l2_pt, ptr, end); ptr = end; } /* * use the same L2 page next time, so we don't * need to invalidate and free this PT. */ /* atomisp_set_pte(l1_pt, idx, NULL_PTE); */ } while (ptr < end && idx < ISP_L1PT_PTES); } /* * Free page table according to isp virtual address and page physical * address */ static void mmu_unmap(struct isp_mmu *mmu, unsigned int isp_virt, unsigned int pgnr) { unsigned int start, end; phys_addr_t l1_pt; mutex_lock(&mmu->pt_mutex); if (!ISP_PTE_VALID(mmu, mmu->l1_pte)) { mmu_unmap_l1_pt_error(mmu, mmu->l1_pte); mutex_unlock(&mmu->pt_mutex); return; } l1_pt = isp_pte_to_pgaddr(mmu, mmu->l1_pte); start = (isp_virt) & ISP_PAGE_MASK; end = start + (pgnr << ISP_PAGE_OFFSET); mmu_l1_unmap(mmu, l1_pt, start, end); mutex_unlock(&mmu->pt_mutex); } /* * Free page tables according to isp start virtual address and end virtual * address. */ static void free_mmu_map(struct isp_mmu *mmu, unsigned int start_isp_virt, unsigned int end_isp_virt) { unsigned int pgnr; unsigned int start, end; start = (start_isp_virt) & ISP_PAGE_MASK; end = (end_isp_virt) & ISP_PAGE_MASK; pgnr = (end - start) >> ISP_PAGE_OFFSET; mmu_unmap(mmu, start, pgnr); } int isp_mmu_map(struct isp_mmu *mmu, unsigned int isp_virt, phys_addr_t phys, unsigned int pgnr) { return mmu_map(mmu, isp_virt, phys, pgnr); } void isp_mmu_unmap(struct isp_mmu *mmu, unsigned int isp_virt, unsigned int pgnr) { mmu_unmap(mmu, isp_virt, pgnr); } static void isp_mmu_flush_tlb_range_default(struct isp_mmu *mmu, unsigned int start, unsigned int size) { isp_mmu_flush_tlb(mmu); } /*MMU init for internal structure*/ int isp_mmu_init(struct isp_mmu *mmu, struct isp_mmu_client *driver) { if (!mmu) /* error */ return -EINVAL; if (!driver) /* error */ return -EINVAL; if (!driver->name) dev_warn(atomisp_dev, "NULL name for MMU driver...\n"); mmu->driver = driver; if (!driver->tlb_flush_all) { dev_err(atomisp_dev, "tlb_flush_all operation not provided.\n"); return -EINVAL; } if (!driver->tlb_flush_range) driver->tlb_flush_range = isp_mmu_flush_tlb_range_default; if (!driver->pte_valid_mask) { dev_err(atomisp_dev, "PTE_MASK is missing from mmu driver\n"); return -EINVAL; } mmu->l1_pte = driver->null_pte; mutex_init(&mmu->pt_mutex); return 0; } /*Free L1 and L2 page table*/ void isp_mmu_exit(struct isp_mmu *mmu) { unsigned int idx; unsigned int pte; phys_addr_t l1_pt, l2_pt; if (!mmu) return; if (!ISP_PTE_VALID(mmu, mmu->l1_pte)) { dev_warn(atomisp_dev, "invalid L1PT: pte = 0x%x\n", (unsigned int)mmu->l1_pte); return; } l1_pt = isp_pte_to_pgaddr(mmu, mmu->l1_pte); for (idx = 0; idx < ISP_L1PT_PTES; idx++) { pte = atomisp_get_pte(l1_pt, idx); if (ISP_PTE_VALID(mmu, pte)) { l2_pt = isp_pte_to_pgaddr(mmu, pte); free_page_table(mmu, l2_pt); } } free_page_table(mmu, l1_pt); }
linux-master
drivers/staging/media/atomisp/pci/mmu/isp_mmu.c
// SPDX-License-Identifier: GPL-2.0 /* * Support for Intel Camera Imaging ISP subsystem. * Copyright (c) 2015, Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. */ #include "atomisp_internal.h" #include "ia_css_vf.host.h" #include <assert_support.h> #include <ia_css_err.h> #include <ia_css_frame.h> #include <ia_css_frame_public.h> #include <ia_css_pipeline.h> #define IA_CSS_INCLUDE_CONFIGURATIONS #include "ia_css_isp_configs.h" #include "isp.h" int ia_css_vf_config(struct sh_css_isp_vf_isp_config *to, const struct ia_css_vf_configuration *from, unsigned int size) { unsigned int elems_a = ISP_VEC_NELEMS; int ret; to->vf_downscale_bits = from->vf_downscale_bits; to->enable = from->info != NULL; if (from->info) { ia_css_frame_info_to_frame_sp_info(&to->info, from->info); ret = ia_css_dma_configure_from_info(&to->dma.port_b, from->info); if (ret) return ret; to->dma.width_a_over_b = elems_a / to->dma.port_b.elems; /* Assume divisiblity here, may need to generalize to fixed point. */ if (elems_a % to->dma.port_b.elems != 0) return -EINVAL; } return 0; } /* compute the log2 of the downscale factor needed to get closest * to the requested viewfinder resolution on the upper side. The output cannot * be smaller than the requested viewfinder resolution. */ int sh_css_vf_downscale_log2( const struct ia_css_frame_info *out_info, const struct ia_css_frame_info *vf_info, unsigned int *downscale_log2) { unsigned int ds_log2 = 0; unsigned int out_width; if ((!out_info) || (!vf_info)) return -EINVAL; out_width = out_info->res.width; if (out_width == 0) return -EINVAL; /* downscale until width smaller than the viewfinder width. We don't * test for the height since the vmem buffers only put restrictions on * the width of a line, not on the number of lines in a frame. */ while (out_width >= vf_info->res.width) { ds_log2++; out_width /= 2; } /* now width is smaller, so we go up one step */ if ((ds_log2 > 0) && (out_width < ia_css_binary_max_vf_width())) ds_log2--; /* TODO: use actual max input resolution of vf_pp binary */ if ((out_info->res.width >> ds_log2) >= 2 * ia_css_binary_max_vf_width()) return -EINVAL; *downscale_log2 = ds_log2; return 0; } static int configure_kernel( const struct ia_css_binary_info *info, const struct ia_css_frame_info *out_info, const struct ia_css_frame_info *vf_info, unsigned int *downscale_log2, struct ia_css_vf_configuration *config) { int err; unsigned int vf_log_ds = 0; /* First compute value */ if (vf_info) { err = sh_css_vf_downscale_log2(out_info, vf_info, &vf_log_ds); if (err) return err; } vf_log_ds = min(vf_log_ds, info->vf_dec.max_log_downscale); *downscale_log2 = vf_log_ds; /* Then store it in isp config section */ config->vf_downscale_bits = vf_log_ds; return 0; } static void configure_dma( struct ia_css_vf_configuration *config, const struct ia_css_frame_info *vf_info) { config->info = vf_info; } int ia_css_vf_configure(const struct ia_css_binary *binary, const struct ia_css_frame_info *out_info, struct ia_css_frame_info *vf_info, unsigned int *downscale_log2) { int err; struct ia_css_vf_configuration config; const struct ia_css_binary_info *info = &binary->info->sp; err = configure_kernel(info, out_info, vf_info, downscale_log2, &config); if (err) dev_warn(atomisp_dev, "Couldn't setup downscale\n"); configure_dma(&config, vf_info); if (vf_info) vf_info->raw_bit_depth = info->dma.vfdec_bits_per_pixel; return ia_css_configure_vf(binary, &config); }
linux-master
drivers/staging/media/atomisp/pci/isp/kernels/vf/vf_1.0/ia_css_vf.host.c
// SPDX-License-Identifier: GPL-2.0 /* * Support for Intel Camera Imaging ISP subsystem. * Copyright (c) 2015, Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. */ #include "ia_css_types.h" #include "sh_css_defs.h" #include "ia_css_debug.h" #include "ia_css_anr2.host.h" void ia_css_anr2_vmem_encode( struct ia_css_isp_anr2_params *to, const struct ia_css_anr_thres *from, size_t size) { unsigned int i; (void)size; for (i = 0; i < ANR_PARAM_SIZE; i++) { unsigned int j; for (j = 0; j < ISP_VEC_NELEMS; j++) { to->data[i][j] = from->data[i * ISP_VEC_NELEMS + j]; } } } void ia_css_anr2_debug_dtrace( const struct ia_css_anr_thres *config, unsigned int level) { (void)config; (void)level; }
linux-master
drivers/staging/media/atomisp/pci/isp/kernels/anr/anr_2/ia_css_anr2.host.c
// SPDX-License-Identifier: GPL-2.0 /* * Support for Intel Camera Imaging ISP subsystem. * Copyright (c) 2015, Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. */ #include "system_global.h" #include "ia_css_types.h" #include "ia_css_anr2_table.host.h" #if 1 const struct ia_css_anr_thres default_anr_thres = { { 128, 384, 640, 896, 896, 640, 384, 128, 384, 1152, 1920, 2688, 2688, 1920, 1152, 384, 640, 1920, 3200, 4480, 4480, 3200, 1920, 640, 896, 2688, 4480, 6272, 6272, 4480, 2688, 896, 896, 2688, 4480, 6272, 6272, 4480, 2688, 896, 640, 1920, 3200, 4480, 4480, 3200, 1920, 640, 384, 1152, 1920, 2688, 2688, 1920, 1152, 384, 128, 384, 640, 896, 896, 640, 384, 128, 0, 0, 30, 30, 10, 10, 20, 20, 0, 0, 30, 30, 10, 10, 20, 20, 0, 0, 30, 30, 10, 10, 20, 20, 0, 0, 30, 30, 10, 10, 20, 20, 0, 0, 30, 30, 10, 10, 20, 20, 0, 0, 30, 30, 10, 10, 20, 20, 0, 0, 30, 30, 10, 10, 20, 20, 0, 0, 30, 30, 10, 10, 20, 20, 0, 0, 60, 60, 20, 20, 40, 40, 0, 0, 60, 60, 20, 20, 40, 40, 0, 0, 60, 60, 20, 20, 40, 40, 0, 0, 60, 60, 20, 20, 40, 40, 0, 0, 60, 60, 20, 20, 40, 40, 0, 0, 60, 60, 20, 20, 40, 40, 0, 0, 60, 60, 20, 20, 40, 40, 0, 0, 60, 60, 20, 20, 40, 40, 0, 0, 90, 90, 30, 30, 60, 60, 0, 0, 90, 90, 30, 30, 60, 60, 0, 0, 90, 90, 30, 30, 60, 60, 0, 0, 90, 90, 30, 30, 60, 60, 0, 0, 90, 90, 30, 30, 60, 60, 0, 0, 90, 90, 30, 30, 60, 60, 0, 0, 90, 90, 30, 30, 60, 60, 0, 0, 90, 90, 30, 30, 60, 60, 30, 30, 60, 60, 40, 40, 50, 50, 30, 30, 60, 60, 40, 40, 50, 50, 30, 30, 60, 60, 40, 40, 50, 50, 30, 30, 60, 60, 40, 40, 50, 50, 30, 30, 60, 60, 40, 40, 50, 50, 30, 30, 60, 60, 40, 40, 50, 50, 30, 30, 60, 60, 40, 40, 50, 50, 30, 30, 60, 60, 40, 40, 50, 50, 60, 60, 120, 120, 80, 80, 100, 100, 60, 60, 120, 120, 80, 80, 100, 100, 60, 60, 120, 120, 80, 80, 100, 100, 60, 60, 120, 120, 80, 80, 100, 100, 60, 60, 120, 120, 80, 80, 100, 100, 60, 60, 120, 120, 80, 80, 100, 100, 60, 60, 120, 120, 80, 80, 100, 100, 60, 60, 120, 120, 80, 80, 100, 100, 90, 90, 180, 180, 120, 120, 150, 150, 90, 90, 180, 180, 120, 120, 150, 150, 90, 90, 180, 180, 120, 120, 150, 150, 90, 90, 180, 180, 120, 120, 150, 150, 90, 90, 180, 180, 120, 120, 150, 150, 90, 90, 180, 180, 120, 120, 150, 150, 90, 90, 180, 180, 120, 120, 150, 150, 90, 90, 180, 180, 120, 120, 150, 150, 10, 10, 40, 40, 20, 20, 30, 30, 10, 10, 40, 40, 20, 20, 30, 30, 10, 10, 40, 40, 20, 20, 30, 30, 10, 10, 40, 40, 20, 20, 30, 30, 10, 10, 40, 40, 20, 20, 30, 30, 10, 10, 40, 40, 20, 20, 30, 30, 10, 10, 40, 40, 20, 20, 30, 30, 10, 10, 40, 40, 20, 20, 30, 30, 20, 20, 80, 80, 40, 40, 60, 60, 20, 20, 80, 80, 40, 40, 60, 60, 20, 20, 80, 80, 40, 40, 60, 60, 20, 20, 80, 80, 40, 40, 60, 60, 20, 20, 80, 80, 40, 40, 60, 60, 20, 20, 80, 80, 40, 40, 60, 60, 20, 20, 80, 80, 40, 40, 60, 60, 20, 20, 80, 80, 40, 40, 60, 60, 30, 30, 120, 120, 60, 60, 90, 90, 30, 30, 120, 120, 60, 60, 90, 90, 30, 30, 120, 120, 60, 60, 90, 90, 30, 30, 120, 120, 60, 60, 90, 90, 30, 30, 120, 120, 60, 60, 90, 90, 30, 30, 120, 120, 60, 60, 90, 90, 30, 30, 120, 120, 60, 60, 90, 90, 30, 30, 120, 120, 60, 60, 90, 90, 20, 20, 50, 50, 30, 30, 40, 40, 20, 20, 50, 50, 30, 30, 40, 40, 20, 20, 50, 50, 30, 30, 40, 40, 20, 20, 50, 50, 30, 30, 40, 40, 20, 20, 50, 50, 30, 30, 40, 40, 20, 20, 50, 50, 30, 30, 40, 40, 20, 20, 50, 50, 30, 30, 40, 40, 20, 20, 50, 50, 30, 30, 40, 40, 40, 40, 100, 100, 60, 60, 80, 80, 40, 40, 100, 100, 60, 60, 80, 80, 40, 40, 100, 100, 60, 60, 80, 80, 40, 40, 100, 100, 60, 60, 80, 80, 40, 40, 100, 100, 60, 60, 80, 80, 40, 40, 100, 100, 60, 60, 80, 80, 40, 40, 100, 100, 60, 60, 80, 80, 40, 40, 100, 100, 60, 60, 80, 80, 60, 60, 150, 150, 90, 90, 120, 120, 60, 60, 150, 150, 90, 90, 120, 120, 60, 60, 150, 150, 90, 90, 120, 120, 60, 60, 150, 150, 90, 90, 120, 120, 60, 60, 150, 150, 90, 90, 120, 120, 60, 60, 150, 150, 90, 90, 120, 120, 60, 60, 150, 150, 90, 90, 120, 120, 60, 60, 150, 150, 90, 90, 120, 120 } }; #else const struct ia_css_anr_thres default_anr_thres = { { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } }; #endif
linux-master
drivers/staging/media/atomisp/pci/isp/kernels/anr/anr_2/ia_css_anr2_table.host.c
// SPDX-License-Identifier: GPL-2.0 /* * Support for Intel Camera Imaging ISP subsystem. * Copyright (c) 2015, Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. */ #include "ia_css_types.h" #include "sh_css_defs.h" #include "ia_css_debug.h" #include "ia_css_anr.host.h" const struct ia_css_anr_config default_anr_config = { 10, { 0, 3, 1, 2, 3, 6, 4, 5, 1, 4, 2, 3, 2, 5, 3, 4, 0, 3, 1, 2, 3, 6, 4, 5, 1, 4, 2, 3, 2, 5, 3, 4, 0, 3, 1, 2, 3, 6, 4, 5, 1, 4, 2, 3, 2, 5, 3, 4, 0, 3, 1, 2, 3, 6, 4, 5, 1, 4, 2, 3, 2, 5, 3, 4 }, {10, 20, 30} }; void ia_css_anr_encode( struct sh_css_isp_anr_params *to, const struct ia_css_anr_config *from, unsigned int size) { (void)size; to->threshold = from->threshold; } void ia_css_anr_dump( const struct sh_css_isp_anr_params *anr, unsigned int level) { if (!anr) return; ia_css_debug_dtrace(level, "Advance Noise Reduction:\n"); ia_css_debug_dtrace(level, "\t%-32s = %d\n", "anr_threshold", anr->threshold); } void ia_css_anr_debug_dtrace( const struct ia_css_anr_config *config, unsigned int level) { ia_css_debug_dtrace(level, "config.threshold=%d\n", config->threshold); }
linux-master
drivers/staging/media/atomisp/pci/isp/kernels/anr/anr_1.0/ia_css_anr.host.c
// SPDX-License-Identifier: GPL-2.0 /* * Support for Intel Camera Imaging ISP subsystem. * Copyright (c) 2015, Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. */ #include "ia_css_types.h" #include "sh_css_defs.h" #include "ia_css_debug.h" #include "assert_support.h" #define IA_CSS_INCLUDE_CONFIGURATIONS #include "ia_css_isp_configs.h" #include "ia_css_sc.host.h" void ia_css_sc_encode( struct sh_css_isp_sc_params *to, struct ia_css_shading_table **from, unsigned int size) { (void)size; to->gain_shift = (*from)->fraction_bits; } void ia_css_sc_dump( const struct sh_css_isp_sc_params *sc, unsigned int level) { if (!sc) return; ia_css_debug_dtrace(level, "Shading Correction:\n"); ia_css_debug_dtrace(level, "\t%-32s = %d\n", "sc_gain_shift", sc->gain_shift); } /* ------ deprecated(bz675) : from ------ */ /* It looks like @parameter{} (in *.pipe) is used to generate the process/get/set functions, for parameters which should be used in the isp kernels. However, the ia_css_shading_settings structure has a parameter which is used only in the css, and does not have a parameter which is used in the isp kernels. Then, I did not use @parameter{} to generate the get/set function for the ia_css_shading_settings structure. (michie) */ void sh_css_get_shading_settings(const struct ia_css_isp_parameters *params, struct ia_css_shading_settings *settings) { if (!settings) return; assert(params); ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_get_shading_settings() enter: settings=%p\n", settings); *settings = params->shading_settings; ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_get_shading_settings() leave: settings.enable_shading_table_conversion=%d\n", settings->enable_shading_table_conversion); } void sh_css_set_shading_settings(struct ia_css_isp_parameters *params, const struct ia_css_shading_settings *settings) { if (!settings) return; assert(params); ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_shading_settings() enter: settings.enable_shading_table_conversion=%d\n", settings->enable_shading_table_conversion); params->shading_settings = *settings; params->shading_settings_changed = true; ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_set_shading_settings() leave: return_void\n"); } /* ------ deprecated(bz675) : to ------ */
linux-master
drivers/staging/media/atomisp/pci/isp/kernels/sc/sc_1.0/ia_css_sc.host.c
// SPDX-License-Identifier: GPL-2.0 /* * Support for Intel Camera Imaging ISP subsystem. * Copyright (c) 2015, Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. */ #include <assert_support.h> #include <ia_css_frame_public.h> #include <ia_css_frame.h> #include <ia_css_binary.h> #define IA_CSS_INCLUDE_CONFIGURATIONS #include "ia_css_isp_configs.h" #include "isp.h" #include "ia_css_ref.host.h" int ia_css_ref_config(struct sh_css_isp_ref_isp_config *to, const struct ia_css_ref_configuration *from, unsigned int size) { unsigned int elems_a = ISP_VEC_NELEMS, i; int ret; if (from->ref_frames[0]) { ret = ia_css_dma_configure_from_info(&to->port_b, &from->ref_frames[0]->frame_info); if (ret) return ret; to->width_a_over_b = elems_a / to->port_b.elems; to->dvs_frame_delay = from->dvs_frame_delay; } else { to->width_a_over_b = 1; to->dvs_frame_delay = 0; to->port_b.elems = elems_a; } for (i = 0; i < MAX_NUM_VIDEO_DELAY_FRAMES; i++) { if (from->ref_frames[i]) { to->ref_frame_addr_y[i] = from->ref_frames[i]->data + from->ref_frames[i]->planes.yuv.y.offset; to->ref_frame_addr_c[i] = from->ref_frames[i]->data + from->ref_frames[i]->planes.yuv.u.offset; } else { to->ref_frame_addr_y[i] = 0; to->ref_frame_addr_c[i] = 0; } } /* Assume divisiblity here, may need to generalize to fixed point. */ if (elems_a % to->port_b.elems != 0) return -EINVAL; return 0; } int ia_css_ref_configure(const struct ia_css_binary *binary, const struct ia_css_frame * const *ref_frames, const uint32_t dvs_frame_delay) { struct ia_css_ref_configuration config; unsigned int i; for (i = 0; i < MAX_NUM_VIDEO_DELAY_FRAMES; i++) config.ref_frames[i] = ref_frames[i]; config.dvs_frame_delay = dvs_frame_delay; return ia_css_configure_ref(binary, &config); } void ia_css_init_ref_state( struct sh_css_isp_ref_dmem_state *state, unsigned int size) { (void)size; assert(MAX_NUM_VIDEO_DELAY_FRAMES >= 2); state->ref_in_buf_idx = 0; state->ref_out_buf_idx = 1; }
linux-master
drivers/staging/media/atomisp/pci/isp/kernels/ref/ref_1.0/ia_css_ref.host.c
// SPDX-License-Identifier: GPL-2.0 /* * Support for Intel Camera Imaging ISP subsystem. * Copyright (c) 2015, Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. */ #include "ia_css_types.h" #include "sh_css_internal.h" #include "sh_css_frac.h" #include "ia_css_raa.host.h" void ia_css_raa_encode( struct sh_css_isp_aa_params *to, const struct ia_css_aa_config *from, unsigned int size) { (void)size; (void)to; (void)from; }
linux-master
drivers/staging/media/atomisp/pci/isp/kernels/raw_aa_binning/raw_aa_binning_1.0/ia_css_raa.host.c
// SPDX-License-Identifier: GPL-2.0 /* * Support for Intel Camera Imaging ISP subsystem. * Copyright (c) 2015, Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. */ #include "ia_css_types.h" #include "sh_css_defs.h" #include "ia_css_debug.h" #include "assert_support.h" #include "ia_css_ynr2.host.h" const struct ia_css_ynr_config default_ynr_config = { 0, 0, 0, 0, }; const struct ia_css_fc_config default_fc_config = { 1, 0, /* 0 -> ineffective */ 0, /* 0 -> ineffective */ 0, /* 0 -> ineffective */ 0, /* 0 -> ineffective */ (1 << (ISP_VEC_ELEMBITS - 2)), /* 0.5 */ (1 << (ISP_VEC_ELEMBITS - 2)), /* 0.5 */ (1 << (ISP_VEC_ELEMBITS - 2)), /* 0.5 */ (1 << (ISP_VEC_ELEMBITS - 2)), /* 0.5 */ (1 << (ISP_VEC_ELEMBITS - 1)) - 1, /* 1 */ (1 << (ISP_VEC_ELEMBITS - 1)) - 1, /* 1 */ (int16_t)-(1 << (ISP_VEC_ELEMBITS - 1)), /* -1 */ (int16_t)-(1 << (ISP_VEC_ELEMBITS - 1)), /* -1 */ }; void ia_css_ynr_encode( struct sh_css_isp_yee2_params *to, const struct ia_css_ynr_config *from, unsigned int size) { (void)size; to->edge_sense_gain_0 = from->edge_sense_gain_0; to->edge_sense_gain_1 = from->edge_sense_gain_1; to->corner_sense_gain_0 = from->corner_sense_gain_0; to->corner_sense_gain_1 = from->corner_sense_gain_1; } void ia_css_fc_encode( struct sh_css_isp_fc_params *to, const struct ia_css_fc_config *from, unsigned int size) { (void)size; to->gain_exp = from->gain_exp; to->coring_pos_0 = from->coring_pos_0; to->coring_pos_1 = from->coring_pos_1; to->coring_neg_0 = from->coring_neg_0; to->coring_neg_1 = from->coring_neg_1; to->gain_pos_0 = from->gain_pos_0; to->gain_pos_1 = from->gain_pos_1; to->gain_neg_0 = from->gain_neg_0; to->gain_neg_1 = from->gain_neg_1; to->crop_pos_0 = from->crop_pos_0; to->crop_pos_1 = from->crop_pos_1; to->crop_neg_0 = from->crop_neg_0; to->crop_neg_1 = from->crop_neg_1; } void ia_css_ynr_dump( const struct sh_css_isp_yee2_params *yee2, unsigned int level); void ia_css_fc_dump( const struct sh_css_isp_fc_params *fc, unsigned int level); void ia_css_fc_debug_dtrace( const struct ia_css_fc_config *config, unsigned int level) { ia_css_debug_dtrace(level, "config.gain_exp=%d, config.coring_pos_0=%d, config.coring_pos_1=%d, config.coring_neg_0=%d, config.coring_neg_1=%d, config.gain_pos_0=%d, config.gain_pos_1=%d, config.gain_neg_0=%d, config.gain_neg_1=%d, config.crop_pos_0=%d, config.crop_pos_1=%d, config.crop_neg_0=%d, config.crop_neg_1=%d\n", config->gain_exp, config->coring_pos_0, config->coring_pos_1, config->coring_neg_0, config->coring_neg_1, config->gain_pos_0, config->gain_pos_1, config->gain_neg_0, config->gain_neg_1, config->crop_pos_0, config->crop_pos_1, config->crop_neg_0, config->crop_neg_1); } void ia_css_ynr_debug_dtrace( const struct ia_css_ynr_config *config, unsigned int level) { ia_css_debug_dtrace(level, "config.edge_sense_gain_0=%d, config.edge_sense_gain_1=%d, config.corner_sense_gain_0=%d, config.corner_sense_gain_1=%d\n", config->edge_sense_gain_0, config->edge_sense_gain_1, config->corner_sense_gain_0, config->corner_sense_gain_1); }
linux-master
drivers/staging/media/atomisp/pci/isp/kernels/ynr/ynr_2/ia_css_ynr2.host.c
// SPDX-License-Identifier: GPL-2.0 /* * Support for Intel Camera Imaging ISP subsystem. * Copyright (c) 2015, Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. */ #include "ia_css_types.h" #include "sh_css_defs.h" #include "ia_css_debug.h" #include "sh_css_frac.h" #include "bnr/bnr_1.0/ia_css_bnr.host.h" #include "ia_css_ynr.host.h" const struct ia_css_nr_config default_nr_config = { 16384, 8192, 1280, 0, 0 }; const struct ia_css_ee_config default_ee_config = { 8192, 128, 2048 }; void ia_css_nr_encode( struct sh_css_isp_ynr_params *to, const struct ia_css_nr_config *from, unsigned int size) { (void)size; /* YNR (Y Noise Reduction) */ to->threshold = uDIGIT_FITTING(8192U, 16, SH_CSS_BAYER_BITS); to->gain_all = uDIGIT_FITTING(from->ynr_gain, 16, SH_CSS_YNR_GAIN_SHIFT); to->gain_dir = uDIGIT_FITTING(from->ynr_gain, 16, SH_CSS_YNR_GAIN_SHIFT); to->threshold_cb = uDIGIT_FITTING(from->threshold_cb, 16, SH_CSS_BAYER_BITS); to->threshold_cr = uDIGIT_FITTING(from->threshold_cr, 16, SH_CSS_BAYER_BITS); } void ia_css_yee_encode( struct sh_css_isp_yee_params *to, const struct ia_css_yee_config *from, unsigned int size) { int asiWk1 = (int)from->ee.gain; int asiWk2 = asiWk1 / 8; int asiWk3 = asiWk1 / 4; (void)size; /* YEE (Y Edge Enhancement) */ to->dirthreshold_s = min((uDIGIT_FITTING(from->nr.direction, 16, SH_CSS_BAYER_BITS) << 1), SH_CSS_BAYER_MAXVAL); to->dirthreshold_g = min((uDIGIT_FITTING(from->nr.direction, 16, SH_CSS_BAYER_BITS) << 4), SH_CSS_BAYER_MAXVAL); to->dirthreshold_width_log2 = uFRACTION_BITS_FITTING(8); to->dirthreshold_width = 1 << to->dirthreshold_width_log2; to->detailgain = uDIGIT_FITTING(from->ee.detail_gain, 11, SH_CSS_YEE_DETAIL_GAIN_SHIFT); to->coring_s = (uDIGIT_FITTING(56U, 16, SH_CSS_BAYER_BITS) * from->ee.threshold) >> 8; to->coring_g = (uDIGIT_FITTING(224U, 16, SH_CSS_BAYER_BITS) * from->ee.threshold) >> 8; /* 8; // *1.125 ->[s4.8] */ to->scale_plus_s = (asiWk1 + asiWk2) >> (11 - SH_CSS_YEE_SCALE_SHIFT); /* 8; // ( * -.25)->[s4.8] */ to->scale_plus_g = (0 - asiWk3) >> (11 - SH_CSS_YEE_SCALE_SHIFT); /* 8; // *0.875 ->[s4.8] */ to->scale_minus_s = (asiWk1 - asiWk2) >> (11 - SH_CSS_YEE_SCALE_SHIFT); /* 8; // ( *.25 ) ->[s4.8] */ to->scale_minus_g = (asiWk3) >> (11 - SH_CSS_YEE_SCALE_SHIFT); to->clip_plus_s = uDIGIT_FITTING(32760U, 16, SH_CSS_BAYER_BITS); to->clip_plus_g = 0; to->clip_minus_s = uDIGIT_FITTING(504U, 16, SH_CSS_BAYER_BITS); to->clip_minus_g = uDIGIT_FITTING(32256U, 16, SH_CSS_BAYER_BITS); to->Yclip = SH_CSS_BAYER_MAXVAL; } void ia_css_nr_dump( const struct sh_css_isp_ynr_params *ynr, unsigned int level) { if (!ynr) return; ia_css_debug_dtrace(level, "Y Noise Reduction:\n"); ia_css_debug_dtrace(level, "\t%-32s = %d\n", "ynr_threshold", ynr->threshold); ia_css_debug_dtrace(level, "\t%-32s = %d\n", "ynr_gain_all", ynr->gain_all); ia_css_debug_dtrace(level, "\t%-32s = %d\n", "ynr_gain_dir", ynr->gain_dir); ia_css_debug_dtrace(level, "\t%-32s = %d\n", "ynr_threshold_cb", ynr->threshold_cb); ia_css_debug_dtrace(level, "\t%-32s = %d\n", "ynr_threshold_cr", ynr->threshold_cr); } void ia_css_yee_dump( const struct sh_css_isp_yee_params *yee, unsigned int level) { ia_css_debug_dtrace(level, "Y Edge Enhancement:\n"); ia_css_debug_dtrace(level, "\t%-32s = %d\n", "ynryee_dirthreshold_s", yee->dirthreshold_s); ia_css_debug_dtrace(level, "\t%-32s = %d\n", "ynryee_dirthreshold_g", yee->dirthreshold_g); ia_css_debug_dtrace(level, "\t%-32s = %d\n", "ynryee_dirthreshold_width_log2", yee->dirthreshold_width_log2); ia_css_debug_dtrace(level, "\t%-32s = %d\n", "ynryee_dirthreshold_width", yee->dirthreshold_width); ia_css_debug_dtrace(level, "\t%-32s = %d\n", "yee_detailgain", yee->detailgain); ia_css_debug_dtrace(level, "\t%-32s = %d\n", "yee_coring_s", yee->coring_s); ia_css_debug_dtrace(level, "\t%-32s = %d\n", "yee_coring_g", yee->coring_g); ia_css_debug_dtrace(level, "\t%-32s = %d\n", "yee_scale_plus_s", yee->scale_plus_s); ia_css_debug_dtrace(level, "\t%-32s = %d\n", "yee_scale_plus_g", yee->scale_plus_g); ia_css_debug_dtrace(level, "\t%-32s = %d\n", "yee_scale_minus_s", yee->scale_minus_s); ia_css_debug_dtrace(level, "\t%-32s = %d\n", "yee_scale_minus_g", yee->scale_minus_g); ia_css_debug_dtrace(level, "\t%-32s = %d\n", "yee_clip_plus_s", yee->clip_plus_s); ia_css_debug_dtrace(level, "\t%-32s = %d\n", "yee_clip_plus_g", yee->clip_plus_g); ia_css_debug_dtrace(level, "\t%-32s = %d\n", "yee_clip_minus_s", yee->clip_minus_s); ia_css_debug_dtrace(level, "\t%-32s = %d\n", "yee_clip_minus_g", yee->clip_minus_g); ia_css_debug_dtrace(level, "\t%-32s = %d\n", "ynryee_Yclip", yee->Yclip); } void ia_css_nr_debug_dtrace( const struct ia_css_nr_config *config, unsigned int level) { ia_css_debug_dtrace(level, "config.direction=%d, config.bnr_gain=%d, config.ynr_gain=%d, config.threshold_cb=%d, config.threshold_cr=%d\n", config->direction, config->bnr_gain, config->ynr_gain, config->threshold_cb, config->threshold_cr); } void ia_css_ee_debug_dtrace( const struct ia_css_ee_config *config, unsigned int level) { ia_css_debug_dtrace(level, "config.threshold=%d, config.gain=%d, config.detail_gain=%d\n", config->threshold, config->gain, config->detail_gain); } void ia_css_init_ynr_state( void/*struct sh_css_isp_ynr_vmem_state*/ * state, size_t size) { memset(state, 0, size); }
linux-master
drivers/staging/media/atomisp/pci/isp/kernels/ynr/ynr_1.0/ia_css_ynr.host.c
// SPDX-License-Identifier: GPL-2.0 /* * Support for Intel Camera Imaging ISP subsystem. * Copyright (c) 2015, Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. */ #include "ia_css_types.h" #include "sh_css_defs.h" #include "sh_css_frac.h" #ifndef IA_CSS_NO_DEBUG #include "ia_css_debug.h" #endif #include "isp.h" #include "ia_css_ob2.host.h" const struct ia_css_ob2_config default_ob2_config = { 0, 0, 0, 0 }; void ia_css_ob2_encode( struct sh_css_isp_ob2_params *to, const struct ia_css_ob2_config *from, unsigned int size) { (void)size; /* Blacklevels types are u0_16 */ to->blacklevel_gr = uDIGIT_FITTING(from->level_gr, 16, SH_CSS_BAYER_BITS); to->blacklevel_r = uDIGIT_FITTING(from->level_r, 16, SH_CSS_BAYER_BITS); to->blacklevel_b = uDIGIT_FITTING(from->level_b, 16, SH_CSS_BAYER_BITS); to->blacklevel_gb = uDIGIT_FITTING(from->level_gb, 16, SH_CSS_BAYER_BITS); } #ifndef IA_CSS_NO_DEBUG void ia_css_ob2_dump( const struct sh_css_isp_ob2_params *ob2, unsigned int level) { if (!ob2) return; ia_css_debug_dtrace(level, "Optical Black 2:\n"); ia_css_debug_dtrace(level, "\t%-32s = %d\n", "ob2_blacklevel_gr", ob2->blacklevel_gr); ia_css_debug_dtrace(level, "\t%-32s = %d\n", "ob2_blacklevel_r", ob2->blacklevel_r); ia_css_debug_dtrace(level, "\t%-32s = %d\n", "ob2_blacklevel_b", ob2->blacklevel_b); ia_css_debug_dtrace(level, "\t%-32s = %d\n", "ob2_blacklevel_gb", ob2->blacklevel_gb); } void ia_css_ob2_debug_dtrace( const struct ia_css_ob2_config *config, unsigned int level) { ia_css_debug_dtrace(level, "config.level_gr=%d, config.level_r=%d, config.level_b=%d, config.level_gb=%d, ", config->level_gr, config->level_r, config->level_b, config->level_gb); } #endif
linux-master
drivers/staging/media/atomisp/pci/isp/kernels/ob/ob2/ia_css_ob2.host.c
// SPDX-License-Identifier: GPL-2.0 /* * Support for Intel Camera Imaging ISP subsystem. * Copyright (c) 2015, Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. */ #include "ia_css_types.h" #include "sh_css_defs.h" #include "ia_css_debug.h" #include "isp.h" #include "ia_css_ob.host.h" const struct ia_css_ob_config default_ob_config = { IA_CSS_OB_MODE_NONE, 0, 0, 0, 0, 0, 0 }; /* TODO: include ob.isp.h to get isp knowledge and add assert on platform restrictions */ void ia_css_ob_configure( struct sh_css_isp_ob_stream_config *config, unsigned int isp_pipe_version, unsigned int raw_bit_depth) { config->isp_pipe_version = isp_pipe_version; config->raw_bit_depth = raw_bit_depth; } void ia_css_ob_encode( struct sh_css_isp_ob_params *to, const struct ia_css_ob_config *from, const struct sh_css_isp_ob_stream_config *config, unsigned int size) { unsigned int ob_bit_depth = config->isp_pipe_version == 2 ? SH_CSS_BAYER_BITS : config->raw_bit_depth; unsigned int scale = 16 - ob_bit_depth; (void)size; switch (from->mode) { case IA_CSS_OB_MODE_FIXED: to->blacklevel_gr = from->level_gr >> scale; to->blacklevel_r = from->level_r >> scale; to->blacklevel_b = from->level_b >> scale; to->blacklevel_gb = from->level_gb >> scale; to->area_start_bq = 0; to->area_length_bq = 0; to->area_length_bq_inverse = 0; break; case IA_CSS_OB_MODE_RASTER: to->blacklevel_gr = 0; to->blacklevel_r = 0; to->blacklevel_b = 0; to->blacklevel_gb = 0; to->area_start_bq = from->start_position; to->area_length_bq = (from->end_position - from->start_position) + 1; to->area_length_bq_inverse = AREA_LENGTH_UNIT / to->area_length_bq; break; default: to->blacklevel_gr = 0; to->blacklevel_r = 0; to->blacklevel_b = 0; to->blacklevel_gb = 0; to->area_start_bq = 0; to->area_length_bq = 0; to->area_length_bq_inverse = 0; break; } } void ia_css_ob_vmem_encode( struct sh_css_isp_ob_vmem_params *to, const struct ia_css_ob_config *from, const struct sh_css_isp_ob_stream_config *config, unsigned int size) { struct sh_css_isp_ob_params tmp; struct sh_css_isp_ob_params *ob = &tmp; (void)size; ia_css_ob_encode(&tmp, from, config, sizeof(tmp)); { unsigned int i; unsigned int sp_obarea_start_bq = ob->area_start_bq; unsigned int sp_obarea_length_bq = ob->area_length_bq; unsigned int low = sp_obarea_start_bq; unsigned int high = low + sp_obarea_length_bq; u16 all_ones = ~0; for (i = 0; i < OBAREA_MASK_SIZE; i++) { if (i >= low && i < high) to->vmask[i / ISP_VEC_NELEMS][i % ISP_VEC_NELEMS] = all_ones; else to->vmask[i / ISP_VEC_NELEMS][i % ISP_VEC_NELEMS] = 0; } } } void ia_css_ob_dump( const struct sh_css_isp_ob_params *ob, unsigned int level) { if (!ob) return; ia_css_debug_dtrace(level, "Optical Black:\n"); ia_css_debug_dtrace(level, "\t%-32s = %d\n", "ob_blacklevel_gr", ob->blacklevel_gr); ia_css_debug_dtrace(level, "\t%-32s = %d\n", "ob_blacklevel_r", ob->blacklevel_r); ia_css_debug_dtrace(level, "\t%-32s = %d\n", "ob_blacklevel_b", ob->blacklevel_b); ia_css_debug_dtrace(level, "\t%-32s = %d\n", "ob_blacklevel_gb", ob->blacklevel_gb); ia_css_debug_dtrace(level, "\t%-32s = %d\n", "obarea_start_bq", ob->area_start_bq); ia_css_debug_dtrace(level, "\t%-32s = %d\n", "obarea_length_bq", ob->area_length_bq); ia_css_debug_dtrace(level, "\t%-32s = %d\n", "obarea_length_bq_inverse", ob->area_length_bq_inverse); } void ia_css_ob_debug_dtrace( const struct ia_css_ob_config *config, unsigned int level) { ia_css_debug_dtrace(level, "config.mode=%d, config.level_gr=%d, config.level_r=%d, config.level_b=%d, config.level_gb=%d, config.start_position=%d, config.end_position=%d\n", config->mode, config->level_gr, config->level_r, config->level_b, config->level_gb, config->start_position, config->end_position); }
linux-master
drivers/staging/media/atomisp/pci/isp/kernels/ob/ob_1.0/ia_css_ob.host.c
// SPDX-License-Identifier: GPL-2.0 /* * Support for Intel Camera Imaging ISP subsystem. * Copyright (c) 2015, Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. */ #include "ia_css_formats.host.h" #include "ia_css_types.h" #include "sh_css_defs.h" /*#include "sh_css_frac.h"*/ #ifndef IA_CSS_NO_DEBUG /* FIXME: See BZ 4427 */ #include "ia_css_debug.h" #endif const struct ia_css_formats_config default_formats_config = { 1 }; void ia_css_formats_encode( struct sh_css_isp_formats_params *to, const struct ia_css_formats_config *from, unsigned int size) { (void)size; to->video_full_range_flag = from->video_full_range_flag; } #ifndef IA_CSS_NO_DEBUG /* FIXME: See BZ 4427 */ void ia_css_formats_dump( const struct sh_css_isp_formats_params *formats, unsigned int level) { if (!formats) return; ia_css_debug_dtrace(level, "\t%-32s = %d\n", "video_full_range_flag", formats->video_full_range_flag); } #endif #ifndef IA_CSS_NO_DEBUG /* FIXME: See BZ 4427 */ void ia_css_formats_debug_dtrace( const struct ia_css_formats_config *config, unsigned int level) { ia_css_debug_dtrace(level, "config.video_full_range_flag=%d\n", config->video_full_range_flag); } #endif
linux-master
drivers/staging/media/atomisp/pci/isp/kernels/fc/fc_1.0/ia_css_formats.host.c
// SPDX-License-Identifier: GPL-2.0 /* Release Version: irci_stable_candrpv_0415_20150521_0458 */ /* * Support for Intel Camera Imaging ISP subsystem. * Copyright (c) 2015, Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. */ #include "ia_css_hdr.host.h" void ia_css_hdr_init_config( struct sh_css_isp_hdr_params *to, const struct ia_css_hdr_config *from, unsigned int size) { int i; (void)size; for (i = 0; i < HDR_NUM_INPUT_FRAMES - 1; i++) { to->irradiance.match_shift[i] = from->irradiance.match_shift[i]; to->irradiance.match_mul[i] = from->irradiance.match_mul[i]; to->irradiance.thr_low[i] = from->irradiance.thr_low[i]; to->irradiance.thr_high[i] = from->irradiance.thr_high[i]; to->irradiance.thr_coeff[i] = from->irradiance.thr_coeff[i]; to->irradiance.thr_shift[i] = from->irradiance.thr_shift[i]; } to->irradiance.test_irr = from->irradiance.test_irr; to->irradiance.weight_bpp = from->irradiance.weight_bpp; to->deghost.test_deg = from->deghost.test_deg; to->exclusion.test_excl = from->exclusion.test_excl; }
linux-master
drivers/staging/media/atomisp/pci/isp/kernels/hdr/ia_css_hdr.host.c
// SPDX-License-Identifier: GPL-2.0 /* * Support for Intel Camera Imaging ISP subsystem. * Copyright (c) 2015, Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. */ #include "ia_css_types.h" #include "sh_css_defs.h" #include "ia_css_debug.h" #include "sh_css_frac.h" #include "ia_css_bnr.host.h" void ia_css_bnr_encode( struct sh_css_isp_bnr_params *to, const struct ia_css_nr_config *from, unsigned int size) { (void)size; /* BNR (Bayer Noise Reduction) */ to->threshold_low = uDIGIT_FITTING(from->direction, 16, SH_CSS_BAYER_BITS); to->threshold_width_log2 = uFRACTION_BITS_FITTING(8); to->threshold_width = 1 << to->threshold_width_log2; to->gain_all = uDIGIT_FITTING(from->bnr_gain, 16, SH_CSS_BNR_GAIN_SHIFT); to->gain_dir = uDIGIT_FITTING(from->bnr_gain, 16, SH_CSS_BNR_GAIN_SHIFT); to->clip = uDIGIT_FITTING(16384U, 16, SH_CSS_BAYER_BITS); } void ia_css_bnr_dump( const struct sh_css_isp_bnr_params *bnr, unsigned int level) { if (!bnr) return; ia_css_debug_dtrace(level, "Bayer Noise Reduction:\n"); ia_css_debug_dtrace(level, "\t%-32s = %d\n", "bnr_gain_all", bnr->gain_all); ia_css_debug_dtrace(level, "\t%-32s = %d\n", "bnr_gain_dir", bnr->gain_dir); ia_css_debug_dtrace(level, "\t%-32s = %d\n", "bnr_threshold_low", bnr->threshold_low); ia_css_debug_dtrace(level, "\t%-32s = %d\n", "bnr_threshold_width_log2", bnr->threshold_width_log2); ia_css_debug_dtrace(level, "\t%-32s = %d\n", "bnr_threshold_width", bnr->threshold_width); ia_css_debug_dtrace(level, "\t%-32s = %d\n", "bnr_clip", bnr->clip); }
linux-master
drivers/staging/media/atomisp/pci/isp/kernels/bnr/bnr_1.0/ia_css_bnr.host.c
// SPDX-License-Identifier: GPL-2.0 /* * Support for Intel Camera Imaging ISP subsystem. * Copyright (c) 2015, Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. */ #include "type_support.h" #include "ia_css_bnr2_2.host.h" #ifndef IA_CSS_NO_DEBUG #include "ia_css_debug.h" /* ia_css_debug_dtrace() */ #endif /* Default kernel parameters. */ const struct ia_css_bnr2_2_config default_bnr2_2_config = { 200, 200, 200, 0, 0, 0, 200, 200, 200, 0, 0, 0, 0, 4096, 8191, 128, 1, 0, 0, 0, 8191, 0, 8191 }; void ia_css_bnr2_2_encode( struct sh_css_isp_bnr2_2_params *to, const struct ia_css_bnr2_2_config *from, size_t size) { (void)size; to->d_var_gain_r = from->d_var_gain_r; to->d_var_gain_g = from->d_var_gain_g; to->d_var_gain_b = from->d_var_gain_b; to->d_var_gain_slope_r = from->d_var_gain_slope_r; to->d_var_gain_slope_g = from->d_var_gain_slope_g; to->d_var_gain_slope_b = from->d_var_gain_slope_b; to->n_var_gain_r = from->n_var_gain_r; to->n_var_gain_g = from->n_var_gain_g; to->n_var_gain_b = from->n_var_gain_b; to->n_var_gain_slope_r = from->n_var_gain_slope_r; to->n_var_gain_slope_g = from->n_var_gain_slope_g; to->n_var_gain_slope_b = from->n_var_gain_slope_b; to->dir_thres = from->dir_thres; to->dir_thres_w = from->dir_thres_w; to->var_offset_coef = from->var_offset_coef; to->dir_gain = from->dir_gain; to->detail_gain = from->detail_gain; to->detail_gain_divisor = from->detail_gain_divisor; to->detail_level_offset = from->detail_level_offset; to->d_var_th_min = from->d_var_th_min; to->d_var_th_max = from->d_var_th_max; to->n_var_th_min = from->n_var_th_min; to->n_var_th_max = from->n_var_th_max; } #ifndef IA_CSS_NO_DEBUG void ia_css_bnr2_2_debug_dtrace( const struct ia_css_bnr2_2_config *bnr, unsigned int level) { if (!bnr) return; ia_css_debug_dtrace(level, "Bayer Noise Reduction 2.2:\n"); ia_css_debug_dtrace(level, "\t%-32s = %d\n", "d_var_gain_r", bnr->d_var_gain_r); ia_css_debug_dtrace(level, "\t%-32s = %d\n", "d_var_gain_g", bnr->d_var_gain_g); ia_css_debug_dtrace(level, "\t%-32s = %d\n", "d_var_gain_b", bnr->d_var_gain_b); ia_css_debug_dtrace(level, "\t%-32s = %d\n", "d_var_gain_slope_r", bnr->d_var_gain_slope_r); ia_css_debug_dtrace(level, "\t%-32s = %d\n", "d_var_gain_slope_g", bnr->d_var_gain_slope_g); ia_css_debug_dtrace(level, "\t%-32s = %d\n", "d_var_gain_slope_b", bnr->d_var_gain_slope_b); ia_css_debug_dtrace(level, "\t%-32s = %d\n", "n_var_gain_r", bnr->n_var_gain_r); ia_css_debug_dtrace(level, "\t%-32s = %d\n", "n_var_gain_g", bnr->n_var_gain_g); ia_css_debug_dtrace(level, "\t%-32s = %d\n", "n_var_gain_b", bnr->n_var_gain_b); ia_css_debug_dtrace(level, "\t%-32s = %d\n", "n_var_gain_slope_r", bnr->n_var_gain_slope_r); ia_css_debug_dtrace(level, "\t%-32s = %d\n", "n_var_gain_slope_g", bnr->n_var_gain_slope_g); ia_css_debug_dtrace(level, "\t%-32s = %d\n", "n_var_gain_slope_b", bnr->n_var_gain_slope_b); ia_css_debug_dtrace(level, "\t%-32s = %d\n", "dir_thres", bnr->dir_thres); ia_css_debug_dtrace(level, "\t%-32s = %d\n", "dir_thres_w", bnr->dir_thres_w); ia_css_debug_dtrace(level, "\t%-32s = %d\n", "var_offset_coef", bnr->var_offset_coef); ia_css_debug_dtrace(level, "\t%-32s = %d\n", "dir_gain", bnr->dir_gain); ia_css_debug_dtrace(level, "\t%-32s = %d\n", "detail_gain", bnr->detail_gain); ia_css_debug_dtrace(level, "\t%-32s = %d\n", "detail_gain_divisor", bnr->detail_gain_divisor); ia_css_debug_dtrace(level, "\t%-32s = %d\n", "detail_level_offset", bnr->detail_level_offset); ia_css_debug_dtrace(level, "\t%-32s = %d\n", "d_var_th_min", bnr->d_var_th_min); ia_css_debug_dtrace(level, "\t%-32s = %d\n", "d_var_th_max", bnr->d_var_th_max); ia_css_debug_dtrace(level, "\t%-32s = %d\n", "n_var_th_min", bnr->n_var_th_min); ia_css_debug_dtrace(level, "\t%-32s = %d\n", "n_var_th_max", bnr->n_var_th_max); } #endif /* IA_CSS_NO_DEBUG */
linux-master
drivers/staging/media/atomisp/pci/isp/kernels/bnr/bnr2_2/ia_css_bnr2_2.host.c
// SPDX-License-Identifier: GPL-2.0 /* * Support for Intel Camera Imaging ISP subsystem. * Copyright (c) 2015, Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. */ #include "system_global.h" #include "ia_css_types.h" #include "ia_css_macc_table.host.h" /* Multi-Axes Color Correction table for ISP1. * 64values = 2x2matrix for 16area, [s2.13] * ineffective: 16 of "identity 2x2 matix" {8192,0,0,8192} */ const struct ia_css_macc_table default_macc_table = { { 8192, 0, 0, 8192, 8192, 0, 0, 8192, 8192, 0, 0, 8192, 8192, 0, 0, 8192, 8192, 0, 0, 8192, 8192, 0, 0, 8192, 8192, 0, 0, 8192, 8192, 0, 0, 8192, 8192, 0, 0, 8192, 8192, 0, 0, 8192, 8192, 0, 0, 8192, 8192, 0, 0, 8192, 8192, 0, 0, 8192, 8192, 0, 0, 8192, 8192, 0, 0, 8192, 8192, 0, 0, 8192 } }; /* Multi-Axes Color Correction table for ISP2. * 64values = 2x2matrix for 16area, [s1.12] * ineffective: 16 of "identity 2x2 matix" {4096,0,0,4096} */ const struct ia_css_macc_table default_macc2_table = { { 4096, 0, 0, 4096, 4096, 0, 0, 4096, 4096, 0, 0, 4096, 4096, 0, 0, 4096, 4096, 0, 0, 4096, 4096, 0, 0, 4096, 4096, 0, 0, 4096, 4096, 0, 0, 4096, 4096, 0, 0, 4096, 4096, 0, 0, 4096, 4096, 0, 0, 4096, 4096, 0, 0, 4096, 4096, 0, 0, 4096, 4096, 0, 0, 4096, 4096, 0, 0, 4096, 4096, 0, 0, 4096 } };
linux-master
drivers/staging/media/atomisp/pci/isp/kernels/macc/macc_1.0/ia_css_macc_table.host.c
// SPDX-License-Identifier: GPL-2.0 /* * Support for Intel Camera Imaging ISP subsystem. * Copyright (c) 2015, Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. */ #include "ia_css_types.h" #include "sh_css_defs.h" #include "ia_css_debug.h" #include "sh_css_frac.h" #include "ia_css_macc.host.h" const struct ia_css_macc_config default_macc_config = { 1, }; void ia_css_macc_encode( struct sh_css_isp_macc_params *to, const struct ia_css_macc_config *from, unsigned int size) { (void)size; to->exp = from->exp; } void ia_css_macc_dump( const struct sh_css_isp_macc_params *macc, unsigned int level); void ia_css_macc_debug_dtrace( const struct ia_css_macc_config *config, unsigned int level) { ia_css_debug_dtrace(level, "config.exp=%d\n", config->exp); }
linux-master
drivers/staging/media/atomisp/pci/isp/kernels/macc/macc_1.0/ia_css_macc.host.c
// SPDX-License-Identifier: GPL-2.0 /* * Support for Intel Camera Imaging ISP subsystem. * Copyright (c) 2015, Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. */ #include "system_global.h" #include "ia_css_types.h" #include "ia_css_macc1_5_table.host.h" /* Multi-Axes Color Correction table for ISP2. * 64values = 2x2matrix for 16area, [s1.12] * ineffective: 16 of "identity 2x2 matix" {4096,0,0,4096} */ const struct ia_css_macc1_5_table default_macc1_5_table = { { 4096, 0, 0, 4096, 4096, 0, 0, 4096, 4096, 0, 0, 4096, 4096, 0, 0, 4096, 4096, 0, 0, 4096, 4096, 0, 0, 4096, 4096, 0, 0, 4096, 4096, 0, 0, 4096, 4096, 0, 0, 4096, 4096, 0, 0, 4096, 4096, 0, 0, 4096, 4096, 0, 0, 4096, 4096, 0, 0, 4096, 4096, 0, 0, 4096, 4096, 0, 0, 4096, 4096, 0, 0, 4096 } };
linux-master
drivers/staging/media/atomisp/pci/isp/kernels/macc/macc1_5/ia_css_macc1_5_table.host.c
// SPDX-License-Identifier: GPL-2.0 /* * Support for Intel Camera Imaging ISP subsystem. * Copyright (c) 2015, Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. */ #include "ia_css_types.h" #include "sh_css_defs.h" #ifndef IA_CSS_NO_DEBUG /* FIXME: See BZ 4427 */ #include "ia_css_debug.h" #endif #include "ia_css_macc1_5.host.h" const struct ia_css_macc1_5_config default_macc1_5_config = { 1 }; void ia_css_macc1_5_encode( struct sh_css_isp_macc1_5_params *to, const struct ia_css_macc1_5_config *from, unsigned int size) { (void)size; to->exp = from->exp; } void ia_css_macc1_5_vmem_encode( struct sh_css_isp_macc1_5_vmem_params *params, const struct ia_css_macc1_5_table *from, unsigned int size) { unsigned int i, j, k, idx; static const unsigned int idx_map[] = { 0, 1, 3, 2, 6, 7, 5, 4, 12, 13, 15, 14, 10, 11, 9, 8 }; (void)size; for (k = 0; k < 4; k++) for (i = 0; i < IA_CSS_MACC_NUM_AXES; i++) { idx = idx_map[i] + (k * IA_CSS_MACC_NUM_AXES); j = 4 * i; params->data[0][(idx)] = from->data[j]; params->data[1][(idx)] = from->data[j + 1]; params->data[2][(idx)] = from->data[j + 2]; params->data[3][(idx)] = from->data[j + 3]; } } #ifndef IA_CSS_NO_DEBUG void ia_css_macc1_5_debug_dtrace( const struct ia_css_macc1_5_config *config, unsigned int level) { ia_css_debug_dtrace(level, "config.exp=%d\n", config->exp); } #endif
linux-master
drivers/staging/media/atomisp/pci/isp/kernels/macc/macc1_5/ia_css_macc1_5.host.c
// SPDX-License-Identifier: GPL-2.0 /* * Support for Intel Camera Imaging ISP subsystem. * Copyright (c) 2015, Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. */ #include <assert_support.h> #include <ia_css_frame_public.h> #include <ia_css_frame.h> #include <ia_css_binary.h> #define IA_CSS_INCLUDE_CONFIGURATIONS #include "ia_css_isp_configs.h" #include "isp.h" #include "ia_css_crop.host.h" static const struct ia_css_crop_configuration default_config = { .info = (struct ia_css_frame_info *)NULL, }; void ia_css_crop_encode( struct sh_css_isp_crop_isp_params *to, const struct ia_css_crop_config *from, unsigned int size) { (void)size; to->crop_pos = from->crop_pos; } int ia_css_crop_config(struct sh_css_isp_crop_isp_config *to, const struct ia_css_crop_configuration *from, unsigned int size) { unsigned int elems_a = ISP_VEC_NELEMS; int ret; ret = ia_css_dma_configure_from_info(&to->port_b, from->info); if (ret) return ret; to->width_a_over_b = elems_a / to->port_b.elems; /* Assume divisiblity here, may need to generalize to fixed point. */ if (elems_a % to->port_b.elems != 0) return -EINVAL; return 0; } int ia_css_crop_configure(const struct ia_css_binary *binary, const struct ia_css_frame_info *info) { struct ia_css_crop_configuration config = default_config; config.info = info; return ia_css_configure_crop(binary, &config); }
linux-master
drivers/staging/media/atomisp/pci/isp/kernels/crop/crop_1.0/ia_css_crop.host.c
// SPDX-License-Identifier: GPL-2.0 /* * Support for Intel Camera Imaging ISP subsystem. * Copyright (c) 2015, Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. */ #include "ia_css_frame.h" #include "ia_css_debug.h" #define IA_CSS_INCLUDE_CONFIGURATIONS #include "ia_css_isp_configs.h" #include "ia_css_output.host.h" #include "isp.h" #include "assert_support.h" const struct ia_css_output_config default_output_config = { 0, 0 }; static const struct ia_css_output_configuration default_output_configuration = { .info = (struct ia_css_frame_info *)NULL, }; static const struct ia_css_output0_configuration default_output0_configuration = { .info = (struct ia_css_frame_info *)NULL, }; static const struct ia_css_output1_configuration default_output1_configuration = { .info = (struct ia_css_frame_info *)NULL, }; void ia_css_output_encode( struct sh_css_isp_output_params *to, const struct ia_css_output_config *from, unsigned int size) { (void)size; to->enable_hflip = from->enable_hflip; to->enable_vflip = from->enable_vflip; } int ia_css_output_config(struct sh_css_isp_output_isp_config *to, const struct ia_css_output_configuration *from, unsigned int size) { unsigned int elems_a = ISP_VEC_NELEMS; int ret; ret = ia_css_dma_configure_from_info(&to->port_b, from->info); if (ret) return ret; to->width_a_over_b = elems_a / to->port_b.elems; to->height = from->info ? from->info->res.height : 0; to->enable = from->info != NULL; ia_css_frame_info_to_frame_sp_info(&to->info, from->info); /* Assume divisiblity here, may need to generalize to fixed point. */ if (elems_a % to->port_b.elems != 0) return -EINVAL; return 0; } int ia_css_output0_config(struct sh_css_isp_output_isp_config *to, const struct ia_css_output0_configuration *from, unsigned int size) { return ia_css_output_config(to, (const struct ia_css_output_configuration *)from, size); } int ia_css_output1_config(struct sh_css_isp_output_isp_config *to, const struct ia_css_output1_configuration *from, unsigned int size) { return ia_css_output_config(to, (const struct ia_css_output_configuration *)from, size); } int ia_css_output_configure(const struct ia_css_binary *binary, const struct ia_css_frame_info *info) { if (info) { struct ia_css_output_configuration config = default_output_configuration; config.info = info; return ia_css_configure_output(binary, &config); } return 0; } int ia_css_output0_configure(const struct ia_css_binary *binary, const struct ia_css_frame_info *info) { if (info) { struct ia_css_output0_configuration config = default_output0_configuration; config.info = info; return ia_css_configure_output0(binary, &config); } return 0; } int ia_css_output1_configure(const struct ia_css_binary *binary, const struct ia_css_frame_info *info) { if (info) { struct ia_css_output1_configuration config = default_output1_configuration; config.info = info; return ia_css_configure_output1(binary, &config); } return 0; } void ia_css_output_dump( const struct sh_css_isp_output_params *output, unsigned int level) { if (!output) return; ia_css_debug_dtrace(level, "Horizontal Output Flip:\n"); ia_css_debug_dtrace(level, "\t%-32s = %d\n", "enable", output->enable_hflip); ia_css_debug_dtrace(level, "Vertical Output Flip:\n"); ia_css_debug_dtrace(level, "\t%-32s = %d\n", "enable", output->enable_vflip); } void ia_css_output_debug_dtrace( const struct ia_css_output_config *config, unsigned int level) { ia_css_debug_dtrace(level, "config.enable_hflip=%d", config->enable_hflip); ia_css_debug_dtrace(level, "config.enable_vflip=%d", config->enable_vflip); }
linux-master
drivers/staging/media/atomisp/pci/isp/kernels/output/output_1.0/ia_css_output.host.c
// SPDX-License-Identifier: GPL-2.0 /* * Support for Intel Camera Imaging ISP subsystem. * Copyright (c) 2015, Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. */ #include "ia_css_types.h" #include "ia_css_frame.h" #include "sh_css_defs.h" #include "ia_css_debug.h" #include "sh_css_frac.h" #include "assert_support.h" #define IA_CSS_INCLUDE_CONFIGURATIONS #include "ia_css_isp_configs.h" #include "isp.h" #include "ia_css_tnr.host.h" const struct ia_css_tnr_config default_tnr_config = { 32768, 32, 32, }; void ia_css_tnr_encode( struct sh_css_isp_tnr_params *to, const struct ia_css_tnr_config *from, unsigned int size) { (void)size; to->coef = uDIGIT_FITTING(from->gain, 16, SH_CSS_TNR_COEF_SHIFT); to->threshold_Y = uDIGIT_FITTING(from->threshold_y, 16, SH_CSS_ISP_YUV_BITS); to->threshold_C = uDIGIT_FITTING(from->threshold_uv, 16, SH_CSS_ISP_YUV_BITS); } void ia_css_tnr_dump( const struct sh_css_isp_tnr_params *tnr, unsigned int level) { if (!tnr) return; ia_css_debug_dtrace(level, "Temporal Noise Reduction:\n"); ia_css_debug_dtrace(level, "\t%-32s = %d\n", "tnr_coef", tnr->coef); ia_css_debug_dtrace(level, "\t%-32s = %d\n", "tnr_threshold_Y", tnr->threshold_Y); ia_css_debug_dtrace(level, "\t%-32s = %d\n", "tnr_threshold_C", tnr->threshold_C); } void ia_css_tnr_debug_dtrace( const struct ia_css_tnr_config *config, unsigned int level) { ia_css_debug_dtrace(level, "config.gain=%d, config.threshold_y=%d, config.threshold_uv=%d\n", config->gain, config->threshold_y, config->threshold_uv); } int ia_css_tnr_config(struct sh_css_isp_tnr_isp_config *to, const struct ia_css_tnr_configuration *from, unsigned int size) { unsigned int elems_a = ISP_VEC_NELEMS; unsigned int i; int ret; ret = ia_css_dma_configure_from_info(&to->port_b, &from->tnr_frames[0]->frame_info); if (ret) return ret; to->width_a_over_b = elems_a / to->port_b.elems; to->frame_height = from->tnr_frames[0]->frame_info.res.height; for (i = 0; i < NUM_VIDEO_TNR_FRAMES; i++) { to->tnr_frame_addr[i] = from->tnr_frames[i]->data + from->tnr_frames[i]->planes.yuyv.offset; } /* Assume divisiblity here, may need to generalize to fixed point. */ if (elems_a % to->port_b.elems != 0) return -EINVAL; return 0; } int ia_css_tnr_configure(const struct ia_css_binary *binary, const struct ia_css_frame * const *frames) { struct ia_css_tnr_configuration config; unsigned int i; for (i = 0; i < NUM_VIDEO_TNR_FRAMES; i++) config.tnr_frames[i] = frames[i]; return ia_css_configure_tnr(binary, &config); } void ia_css_init_tnr_state( struct sh_css_isp_tnr_dmem_state *state, size_t size) { (void)size; assert(NUM_VIDEO_TNR_FRAMES >= 2); assert(sizeof(*state) == size); state->tnr_in_buf_idx = 0; state->tnr_out_buf_idx = 1; }
linux-master
drivers/staging/media/atomisp/pci/isp/kernels/tnr/tnr_1.0/ia_css_tnr.host.c
// SPDX-License-Identifier: GPL-2.0 /* * Support for Intel Camera Imaging ISP subsystem. * Copyright (c) 2015, Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. */ #include "type_support.h" #include "ia_css_bnlm.host.h" #ifndef IA_CSS_NO_DEBUG #include "ia_css_debug.h" /* ia_css_debug_dtrace() */ #endif #include <assert_support.h> #define BNLM_DIV_LUT_SIZE (12) static const s32 div_lut_nearests[BNLM_DIV_LUT_SIZE] = { 0, 454, 948, 1484, 2070, 2710, 3412, 4184, 5035, 5978, 7025, 8191 }; static const s32 div_lut_slopes[BNLM_DIV_LUT_SIZE] = { -7760, -6960, -6216, -5536, -4912, -4344, -3832, -3360, -2936, -2552, -2208, -2208 }; static const s32 div_lut_intercepts[BNLM_DIV_LUT_SIZE] = { 8184, 7752, 7336, 6928, 6536, 6152, 5776, 5416, 5064, 4728, 4408, 4408 }; /* Encodes a look-up table from BNLM public parameters to vmem parameters. * Input: * lut : bnlm_lut struct containing encoded vmem parameters look-up table * lut_thr : array containing threshold values for lut * lut_val : array containing output values related to lut_thr * lut_size: Size of lut_val array */ static inline void bnlm_lut_encode(struct bnlm_lut *lut, const int32_t *lut_thr, const s32 *lut_val, const uint32_t lut_size) { u32 blk, i; const u32 block_size = 16; const u32 total_blocks = ISP_VEC_NELEMS / block_size; /* Create VMEM LUTs from the threshold and value arrays. * * Min size of the LUT is 2 entries. * * Max size of the LUT is 16 entries, so that the LUT can fit into a * single group of 16 elements inside a vector. * Then these elements are copied into other groups inside the same * vector. If the LUT size is less than 16, then remaining elements are * set to 0. */ assert((lut_size >= 2) && (lut_size <= block_size)); /* array lut_thr has (lut_size-1) entries */ for (i = 0; i < lut_size - 2; i++) { /* Check if the lut_thr is monotonically increasing */ assert(lut_thr[i] <= lut_thr[i + 1]); } /* Initialize */ for (i = 0; i < total_blocks * block_size; i++) { lut->thr[0][i] = 0; lut->val[0][i] = 0; } /* Copy all data */ for (i = 0; i < lut_size - 1; i++) { lut->thr[0][i] = lut_thr[i]; lut->val[0][i] = lut_val[i]; } lut->val[0][i] = lut_val[i]; /* val has one more element than thr */ /* Copy data from first block to all blocks */ for (blk = 1; blk < total_blocks; blk++) { u32 blk_offset = blk * block_size; for (i = 1; i < lut_size; i++) { lut->thr[0][blk_offset + i] = lut->thr[0][i]; lut->val[0][blk_offset + i] = lut->val[0][i]; } } } /* * - Encodes BNLM public parameters into VMEM parameters * - Generates VMEM parameters which will needed internally ISP */ void ia_css_bnlm_vmem_encode( struct bnlm_vmem_params *to, const struct ia_css_bnlm_config *from, size_t size) { int i; (void)size; /* Initialize LUTs in VMEM parameters */ bnlm_lut_encode(&to->mu_root_lut, from->mu_root_lut_thr, from->mu_root_lut_val, 16); bnlm_lut_encode(&to->sad_norm_lut, from->sad_norm_lut_thr, from->sad_norm_lut_val, 16); bnlm_lut_encode(&to->sig_detail_lut, from->sig_detail_lut_thr, from->sig_detail_lut_val, 16); bnlm_lut_encode(&to->sig_rad_lut, from->sig_rad_lut_thr, from->sig_rad_lut_val, 16); bnlm_lut_encode(&to->rad_pow_lut, from->rad_pow_lut_thr, from->rad_pow_lut_val, 16); bnlm_lut_encode(&to->nl_0_lut, from->nl_0_lut_thr, from->nl_0_lut_val, 16); bnlm_lut_encode(&to->nl_1_lut, from->nl_1_lut_thr, from->nl_1_lut_val, 16); bnlm_lut_encode(&to->nl_2_lut, from->nl_2_lut_thr, from->nl_2_lut_val, 16); bnlm_lut_encode(&to->nl_3_lut, from->nl_3_lut_thr, from->nl_3_lut_val, 16); /* Initialize arrays in VMEM parameters */ memset(to->nl_th, 0, sizeof(to->nl_th)); to->nl_th[0][0] = from->nl_th[0]; to->nl_th[0][1] = from->nl_th[1]; to->nl_th[0][2] = from->nl_th[2]; memset(to->match_quality_max_idx, 0, sizeof(to->match_quality_max_idx)); to->match_quality_max_idx[0][0] = from->match_quality_max_idx[0]; to->match_quality_max_idx[0][1] = from->match_quality_max_idx[1]; to->match_quality_max_idx[0][2] = from->match_quality_max_idx[2]; to->match_quality_max_idx[0][3] = from->match_quality_max_idx[3]; bnlm_lut_encode(&to->div_lut, div_lut_nearests, div_lut_slopes, BNLM_DIV_LUT_SIZE); memset(to->div_lut_intercepts, 0, sizeof(to->div_lut_intercepts)); for (i = 0; i < BNLM_DIV_LUT_SIZE; i++) { to->div_lut_intercepts[0][i] = div_lut_intercepts[i]; } memset(to->power_of_2, 0, sizeof(to->power_of_2)); for (i = 0; i < (ISP_VEC_ELEMBITS - 1); i++) { to->power_of_2[0][i] = 1 << i; } } /* - Encodes BNLM public parameters into DMEM parameters */ void ia_css_bnlm_encode( struct bnlm_dmem_params *to, const struct ia_css_bnlm_config *from, size_t size) { (void)size; to->rad_enable = from->rad_enable; to->rad_x_origin = from->rad_x_origin; to->rad_y_origin = from->rad_y_origin; to->avg_min_th = from->avg_min_th; to->max_min_th = from->max_min_th; to->exp_coeff_a = from->exp_coeff_a; to->exp_coeff_b = from->exp_coeff_b; to->exp_coeff_c = from->exp_coeff_c; to->exp_exponent = from->exp_exponent; } /* Prints debug traces for BNLM public parameters */ void ia_css_bnlm_debug_trace( const struct ia_css_bnlm_config *config, unsigned int level) { if (!config) return; #ifndef IA_CSS_NO_DEBUG ia_css_debug_dtrace(level, "BNLM:\n"); ia_css_debug_dtrace(level, "\t%-32s = %d\n", "rad_enable", config->rad_enable); ia_css_debug_dtrace(level, "\t%-32s = %d\n", "rad_x_origin", config->rad_x_origin); ia_css_debug_dtrace(level, "\t%-32s = %d\n", "rad_y_origin", config->rad_y_origin); ia_css_debug_dtrace(level, "\t%-32s = %d\n", "avg_min_th", config->avg_min_th); ia_css_debug_dtrace(level, "\t%-32s = %d\n", "max_min_th", config->max_min_th); ia_css_debug_dtrace(level, "\t%-32s = %d\n", "exp_coeff_a", config->exp_coeff_a); ia_css_debug_dtrace(level, "\t%-32s = %d\n", "exp_coeff_b", config->exp_coeff_b); ia_css_debug_dtrace(level, "\t%-32s = %d\n", "exp_coeff_c", config->exp_coeff_c); ia_css_debug_dtrace(level, "\t%-32s = %d\n", "exp_exponent", config->exp_exponent); /* ToDo: print traces for LUTs */ #endif /* IA_CSS_NO_DEBUG */ }
linux-master
drivers/staging/media/atomisp/pci/isp/kernels/bnlm/ia_css_bnlm.host.c
// SPDX-License-Identifier: GPL-2.0 /* * Support for Intel Camera Imaging ISP subsystem. * Copyright (c) 2015, Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. */ #include "ia_css_types.h" #include "sh_css_defs.h" #ifndef IA_CSS_NO_DEBUG #include "ia_css_debug.h" #endif #include "sh_css_frac.h" #include "ia_css_wb.host.h" const struct ia_css_wb_config default_wb_config = { 1, 32768, 32768, 32768, 32768 }; void ia_css_wb_encode( struct sh_css_isp_wb_params *to, const struct ia_css_wb_config *from, unsigned int size) { (void)size; to->gain_shift = uISP_REG_BIT - from->integer_bits; to->gain_gr = uDIGIT_FITTING(from->gr, 16 - from->integer_bits, to->gain_shift); to->gain_r = uDIGIT_FITTING(from->r, 16 - from->integer_bits, to->gain_shift); to->gain_b = uDIGIT_FITTING(from->b, 16 - from->integer_bits, to->gain_shift); to->gain_gb = uDIGIT_FITTING(from->gb, 16 - from->integer_bits, to->gain_shift); } #ifndef IA_CSS_NO_DEBUG void ia_css_wb_dump( const struct sh_css_isp_wb_params *wb, unsigned int level) { if (!wb) return; ia_css_debug_dtrace(level, "White Balance:\n"); ia_css_debug_dtrace(level, "\t%-32s = %d\n", "wb_gain_shift", wb->gain_shift); ia_css_debug_dtrace(level, "\t%-32s = %d\n", "wb_gain_gr", wb->gain_gr); ia_css_debug_dtrace(level, "\t%-32s = %d\n", "wb_gain_r", wb->gain_r); ia_css_debug_dtrace(level, "\t%-32s = %d\n", "wb_gain_b", wb->gain_b); ia_css_debug_dtrace(level, "\t%-32s = %d\n", "wb_gain_gb", wb->gain_gb); } void ia_css_wb_debug_dtrace( const struct ia_css_wb_config *config, unsigned int level) { ia_css_debug_dtrace(level, "config.integer_bits=%d, config.gr=%d, config.r=%d, config.b=%d, config.gb=%d\n", config->integer_bits, config->gr, config->r, config->b, config->gb); } #endif
linux-master
drivers/staging/media/atomisp/pci/isp/kernels/wb/wb_1.0/ia_css_wb.host.c
// SPDX-License-Identifier: GPL-2.0 /* * Support for Intel Camera Imaging ISP subsystem. * Copyright (c) 2015, Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. */ #include "ia_css_debug.h" #include "ia_css_tdf.host.h" static const s16 g_pyramid[8][8] = { {128, 384, 640, 896, 896, 640, 384, 128}, {384, 1152, 1920, 2688, 2688, 1920, 1152, 384}, {640, 1920, 3200, 4480, 4480, 3200, 1920, 640}, {896, 2688, 4480, 6272, 6272, 4480, 2688, 896}, {896, 2688, 4480, 6272, 6272, 4480, 2688, 896}, {640, 1920, 3200, 4480, 4480, 3200, 1920, 640}, {384, 1152, 1920, 2688, 2688, 1920, 1152, 384}, {128, 384, 640, 896, 896, 640, 384, 128} }; void ia_css_tdf_vmem_encode( struct ia_css_isp_tdf_vmem_params *to, const struct ia_css_tdf_config *from, size_t size) { unsigned int i; (void)size; for (i = 0; i < ISP_VEC_NELEMS; i++) { to->pyramid[0][i] = g_pyramid[i / 8][i % 8]; to->threshold_flat[0][i] = from->thres_flat_table[i]; to->threshold_detail[0][i] = from->thres_detail_table[i]; } } void ia_css_tdf_encode( struct ia_css_isp_tdf_dmem_params *to, const struct ia_css_tdf_config *from, size_t size) { (void)size; to->Epsilon_0 = from->epsilon_0; to->Epsilon_1 = from->epsilon_1; to->EpsScaleText = from->eps_scale_text; to->EpsScaleEdge = from->eps_scale_edge; to->Sepa_flat = from->sepa_flat; to->Sepa_Edge = from->sepa_edge; to->Blend_Flat = from->blend_flat; to->Blend_Text = from->blend_text; to->Blend_Edge = from->blend_edge; to->Shading_Gain = from->shading_gain; to->Shading_baseGain = from->shading_base_gain; to->LocalY_Gain = from->local_y_gain; to->LocalY_baseGain = from->local_y_base_gain; } void ia_css_tdf_debug_dtrace( const struct ia_css_tdf_config *config, unsigned int level) { (void)config; (void)level; }
linux-master
drivers/staging/media/atomisp/pci/isp/kernels/tdf/tdf_1.0/ia_css_tdf.host.c
// SPDX-License-Identifier: GPL-2.0 /* * Support for Intel Camera Imaging ISP subsystem. * Copyright (c) 2015, Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. */ #include "ia_css_copy_output.host.h" #include "ia_css_binary.h" #include "type_support.h" #define IA_CSS_INCLUDE_CONFIGURATIONS #include "ia_css_isp_configs.h" #include "isp.h" static const struct ia_css_copy_output_configuration default_config = { .enable = false, }; void ia_css_copy_output_config( struct sh_css_isp_copy_output_isp_config *to, const struct ia_css_copy_output_configuration *from, unsigned int size) { (void)size; to->enable = from->enable; } int ia_css_copy_output_configure(const struct ia_css_binary *binary, bool enable) { struct ia_css_copy_output_configuration config = default_config; config.enable = enable; return ia_css_configure_copy_output(binary, &config); }
linux-master
drivers/staging/media/atomisp/pci/isp/kernels/copy_output/copy_output_1.0/ia_css_copy_output.host.c
// SPDX-License-Identifier: GPL-2.0 /* * Support for Intel Camera Imaging ISP subsystem. * Copyright (c) 2015, Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. */ #include "ia_css_dpc2.host.h" #include "assert_support.h" void ia_css_dpc2_encode( struct ia_css_isp_dpc2_params *to, const struct ia_css_dpc2_config *from, size_t size) { (void)size; assert((from->metric1 >= 0) && (from->metric1 <= METRIC1_ONE_FP)); assert((from->metric3 >= 0) && (from->metric3 <= METRIC3_ONE_FP)); assert((from->metric2 >= METRIC2_ONE_FP) && (from->metric2 < 256 * METRIC2_ONE_FP)); assert((from->wb_gain_gr > 0) && (from->wb_gain_gr < 16 * WBGAIN_ONE_FP)); assert((from->wb_gain_r > 0) && (from->wb_gain_r < 16 * WBGAIN_ONE_FP)); assert((from->wb_gain_b > 0) && (from->wb_gain_b < 16 * WBGAIN_ONE_FP)); assert((from->wb_gain_gb > 0) && (from->wb_gain_gb < 16 * WBGAIN_ONE_FP)); to->metric1 = from->metric1; to->metric2 = from->metric2; to->metric3 = from->metric3; to->wb_gain_gr = from->wb_gain_gr; to->wb_gain_r = from->wb_gain_r; to->wb_gain_b = from->wb_gain_b; to->wb_gain_gb = from->wb_gain_gb; } /* TODO: AM: This needs a proper implementation. */ void ia_css_init_dpc2_state( void *state, size_t size) { (void)state; (void)size; } #ifndef IA_CSS_NO_DEBUG /* TODO: AM: This needs a proper implementation. */ void ia_css_dpc2_debug_dtrace( const struct ia_css_dpc2_config *config, unsigned int level) { (void)config; (void)level; } #endif
linux-master
drivers/staging/media/atomisp/pci/isp/kernels/dpc2/ia_css_dpc2.host.c
// SPDX-License-Identifier: GPL-2.0 /* * Support for Intel Camera Imaging ISP subsystem. * Copyright (c) 2015, Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. */ #include "ia_css_norm.host.h"
linux-master
drivers/staging/media/atomisp/pci/isp/kernels/norm/norm_1.0/ia_css_norm.host.c
// SPDX-License-Identifier: GPL-2.0 /* * Support for Intel Camera Imaging ISP subsystem. * Copyright (c) 2015, Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. */ #include "ia_css_types.h" #include "sh_css_defs.h" #ifndef IA_CSS_NO_DEBUG #include "ia_css_debug.h" #endif #include "sh_css_frac.h" #include "assert_support.h" #include "bh/bh_2/ia_css_bh.host.h" #include "ia_css_s3a.host.h" const struct ia_css_3a_config default_3a_config = { 25559, 32768, 7209, 65535, 0, 65535, {-3344, -6104, -19143, 19143, 6104, 3344, 0}, {1027, 0, -9219, 16384, -9219, 1027, 0} }; static unsigned int s3a_raw_bit_depth; void ia_css_s3a_configure(unsigned int raw_bit_depth) { s3a_raw_bit_depth = raw_bit_depth; } static void ia_css_ae_encode( struct sh_css_isp_ae_params *to, const struct ia_css_3a_config *from, unsigned int size) { (void)size; /* coefficients to calculate Y */ to->y_coef_r = uDIGIT_FITTING(from->ae_y_coef_r, 16, SH_CSS_AE_YCOEF_SHIFT); to->y_coef_g = uDIGIT_FITTING(from->ae_y_coef_g, 16, SH_CSS_AE_YCOEF_SHIFT); to->y_coef_b = uDIGIT_FITTING(from->ae_y_coef_b, 16, SH_CSS_AE_YCOEF_SHIFT); } static void ia_css_awb_encode( struct sh_css_isp_awb_params *to, const struct ia_css_3a_config *from, unsigned int size) { (void)size; /* AWB level gate */ to->lg_high_raw = uDIGIT_FITTING(from->awb_lg_high_raw, 16, s3a_raw_bit_depth); to->lg_low = uDIGIT_FITTING(from->awb_lg_low, 16, SH_CSS_BAYER_BITS); to->lg_high = uDIGIT_FITTING(from->awb_lg_high, 16, SH_CSS_BAYER_BITS); } static void ia_css_af_encode( struct sh_css_isp_af_params *to, const struct ia_css_3a_config *from, unsigned int size) { unsigned int i; (void)size; /* af fir coefficients */ for (i = 0; i < 7; ++i) { to->fir1[i] = sDIGIT_FITTING(from->af_fir1_coef[i], 15, SH_CSS_AF_FIR_SHIFT); to->fir2[i] = sDIGIT_FITTING(from->af_fir2_coef[i], 15, SH_CSS_AF_FIR_SHIFT); } } void ia_css_s3a_encode( struct sh_css_isp_s3a_params *to, const struct ia_css_3a_config *from, unsigned int size) { (void)size; ia_css_ae_encode(&to->ae, from, sizeof(to->ae)); ia_css_awb_encode(&to->awb, from, sizeof(to->awb)); ia_css_af_encode(&to->af, from, sizeof(to->af)); } #if 0 void ia_css_process_s3a( unsigned int pipe_id, const struct ia_css_pipeline_stage *stage, struct ia_css_isp_parameters *params) { short dmem_offset = stage->binary->info->mem_offsets->dmem.s3a; assert(params); if (dmem_offset >= 0) { ia_css_s3a_encode((struct sh_css_isp_s3a_params *) &stage->isp_mem_params[IA_CSS_ISP_DMEM0].address[dmem_offset], &params->s3a_config); ia_css_bh_encode((struct sh_css_isp_bh_params *) &stage->isp_mem_params[IA_CSS_ISP_DMEM0].address[dmem_offset], &params->s3a_config); params->isp_params_changed = true; params->isp_mem_params_changed[pipe_id][stage->stage_num][IA_CSS_ISP_DMEM0] = true; } params->isp_params_changed = true; } #endif #ifndef IA_CSS_NO_DEBUG void ia_css_ae_dump( const struct sh_css_isp_ae_params *ae, unsigned int level) { if (!ae) return; ia_css_debug_dtrace(level, "\t%-32s = %d\n", "ae_y_coef_r", ae->y_coef_r); ia_css_debug_dtrace(level, "\t%-32s = %d\n", "ae_y_coef_g", ae->y_coef_g); ia_css_debug_dtrace(level, "\t%-32s = %d\n", "ae_y_coef_b", ae->y_coef_b); } void ia_css_awb_dump( const struct sh_css_isp_awb_params *awb, unsigned int level) { ia_css_debug_dtrace(level, "\t%-32s = %d\n", "awb_lg_high_raw", awb->lg_high_raw); ia_css_debug_dtrace(level, "\t%-32s = %d\n", "awb_lg_low", awb->lg_low); ia_css_debug_dtrace(level, "\t%-32s = %d\n", "awb_lg_high", awb->lg_high); } void ia_css_af_dump( const struct sh_css_isp_af_params *af, unsigned int level) { ia_css_debug_dtrace(level, "\t%-32s = %d\n", "af_fir1[0]", af->fir1[0]); ia_css_debug_dtrace(level, "\t%-32s = %d\n", "af_fir1[1]", af->fir1[1]); ia_css_debug_dtrace(level, "\t%-32s = %d\n", "af_fir1[2]", af->fir1[2]); ia_css_debug_dtrace(level, "\t%-32s = %d\n", "af_fir1[3]", af->fir1[3]); ia_css_debug_dtrace(level, "\t%-32s = %d\n", "af_fir1[4]", af->fir1[4]); ia_css_debug_dtrace(level, "\t%-32s = %d\n", "af_fir1[5]", af->fir1[5]); ia_css_debug_dtrace(level, "\t%-32s = %d\n", "af_fir1[6]", af->fir1[6]); ia_css_debug_dtrace(level, "\t%-32s = %d\n", "af_fir2[0]", af->fir2[0]); ia_css_debug_dtrace(level, "\t%-32s = %d\n", "af_fir2[1]", af->fir2[1]); ia_css_debug_dtrace(level, "\t%-32s = %d\n", "af_fir2[2]", af->fir2[2]); ia_css_debug_dtrace(level, "\t%-32s = %d\n", "af_fir2[3]", af->fir2[3]); ia_css_debug_dtrace(level, "\t%-32s = %d\n", "af_fir2[4]", af->fir2[4]); ia_css_debug_dtrace(level, "\t%-32s = %d\n", "af_fir2[5]", af->fir2[5]); ia_css_debug_dtrace(level, "\t%-32s = %d\n", "af_fir2[6]", af->fir2[6]); } void ia_css_s3a_dump( const struct sh_css_isp_s3a_params *s3a, unsigned int level) { ia_css_debug_dtrace(level, "S3A Support:\n"); ia_css_ae_dump(&s3a->ae, level); ia_css_awb_dump(&s3a->awb, level); ia_css_af_dump(&s3a->af, level); } void ia_css_s3a_debug_dtrace( const struct ia_css_3a_config *config, unsigned int level) { ia_css_debug_dtrace(level, "config.ae_y_coef_r=%d, config.ae_y_coef_g=%d, config.ae_y_coef_b=%d, config.awb_lg_high_raw=%d, config.awb_lg_low=%d, config.awb_lg_high=%d\n", config->ae_y_coef_r, config->ae_y_coef_g, config->ae_y_coef_b, config->awb_lg_high_raw, config->awb_lg_low, config->awb_lg_high); } #endif void ia_css_s3a_hmem_decode( struct ia_css_3a_statistics *host_stats, const struct ia_css_bh_table *hmem_buf) { struct ia_css_3a_rgby_output *out_ptr; int i; /* pixel counts(BQ) for 3A area */ int count_for_3a; int sum_r, diff; assert(host_stats); assert(host_stats->rgby_data); assert(hmem_buf); count_for_3a = host_stats->grid.width * host_stats->grid.height * host_stats->grid.bqs_per_grid_cell * host_stats->grid.bqs_per_grid_cell; out_ptr = host_stats->rgby_data; ia_css_bh_hmem_decode(out_ptr, hmem_buf); /* Calculate sum of histogram of R, which should not be less than count_for_3a */ sum_r = 0; for (i = 0; i < HMEM_UNIT_SIZE; i++) { sum_r += out_ptr[i].r; } if (sum_r < count_for_3a) { /* histogram is invalid */ return; } /* Verify for sum of histogram of R/G/B/Y */ #if 0 { int sum_g = 0; int sum_b = 0; int sum_y = 0; for (i = 0; i < HMEM_UNIT_SIZE; i++) { sum_g += out_ptr[i].g; sum_b += out_ptr[i].b; sum_y += out_ptr[i].y; } if (sum_g != sum_r || sum_b != sum_r || sum_y != sum_r) { /* histogram is invalid */ return; } } #endif /* * Limit the histogram area only to 3A area. * In DSP, the histogram of 0 is incremented for pixels * which are outside of 3A area. That amount should be subtracted here. * hist[0] = hist[0] - ((sum of all hist[]) - (pixel count for 3A area)) */ diff = sum_r - count_for_3a; out_ptr[0].r -= diff; out_ptr[0].g -= diff; out_ptr[0].b -= diff; out_ptr[0].y -= diff; } void ia_css_s3a_dmem_decode( struct ia_css_3a_statistics *host_stats, const struct ia_css_3a_output *isp_stats) { int isp_width, host_width, height, i; struct ia_css_3a_output *host_ptr; assert(host_stats); assert(host_stats->data); assert(isp_stats); isp_width = host_stats->grid.aligned_width; host_width = host_stats->grid.width; height = host_stats->grid.height; host_ptr = host_stats->data; /* Getting 3A statistics from DMEM does not involve any * transformation (like the VMEM version), we just copy the data * using a different output width. */ for (i = 0; i < height; i++) { memcpy(host_ptr, isp_stats, host_width * sizeof(*host_ptr)); isp_stats += isp_width; host_ptr += host_width; } } /* MW: this is an ISP function */ static inline int merge_hi_lo_14(unsigned short hi, unsigned short lo) { int val = (int)((((unsigned int)hi << 14) & 0xfffc000) | ((unsigned int)lo & 0x3fff)); return val; } void ia_css_s3a_vmem_decode( struct ia_css_3a_statistics *host_stats, const u16 *isp_stats_hi, const uint16_t *isp_stats_lo) { int out_width, out_height, chunk, rest, kmax, y, x, k, elm_start, elm, ofs; const u16 *hi, *lo; struct ia_css_3a_output *output; assert(host_stats); assert(host_stats->data); assert(isp_stats_hi); assert(isp_stats_lo); output = host_stats->data; out_width = host_stats->grid.width; out_height = host_stats->grid.height; hi = isp_stats_hi; lo = isp_stats_lo; chunk = ISP_VEC_NELEMS >> host_stats->grid.deci_factor_log2; chunk = max(chunk, 1); for (y = 0; y < out_height; y++) { elm_start = y * ISP_S3ATBL_HI_LO_STRIDE; rest = out_width; x = 0; while (x < out_width) { kmax = (rest > chunk) ? chunk : rest; ofs = y * out_width + x; elm = elm_start + x * sizeof(*output) / sizeof(int32_t); for (k = 0; k < kmax; k++, elm++) { output[ofs + k].ae_y = merge_hi_lo_14( hi[elm + chunk * 0], lo[elm + chunk * 0]); output[ofs + k].awb_cnt = merge_hi_lo_14( hi[elm + chunk * 1], lo[elm + chunk * 1]); output[ofs + k].awb_gr = merge_hi_lo_14( hi[elm + chunk * 2], lo[elm + chunk * 2]); output[ofs + k].awb_r = merge_hi_lo_14( hi[elm + chunk * 3], lo[elm + chunk * 3]); output[ofs + k].awb_b = merge_hi_lo_14( hi[elm + chunk * 4], lo[elm + chunk * 4]); output[ofs + k].awb_gb = merge_hi_lo_14( hi[elm + chunk * 5], lo[elm + chunk * 5]); output[ofs + k].af_hpf1 = merge_hi_lo_14( hi[elm + chunk * 6], lo[elm + chunk * 6]); output[ofs + k].af_hpf2 = merge_hi_lo_14( hi[elm + chunk * 7], lo[elm + chunk * 7]); } x += chunk; rest -= chunk; } } }
linux-master
drivers/staging/media/atomisp/pci/isp/kernels/s3a/s3a_1.0/ia_css_s3a.host.c
// SPDX-License-Identifier: GPL-2.0 /* * Support for Intel Camera Imaging ISP subsystem. * Copyright (c) 2015, Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. */ #include "ia_css_iterator.host.h" #include "ia_css_frame_public.h" #include "ia_css_binary.h" #include "ia_css_err.h" #define IA_CSS_INCLUDE_CONFIGURATIONS #include "ia_css_isp_configs.h" static const struct ia_css_iterator_configuration default_config = { .input_info = (struct ia_css_frame_info *)NULL, }; void ia_css_iterator_config( struct sh_css_isp_iterator_isp_config *to, const struct ia_css_iterator_configuration *from, unsigned int size) { (void)size; ia_css_frame_info_to_frame_sp_info(&to->input_info, from->input_info); ia_css_frame_info_to_frame_sp_info(&to->internal_info, from->internal_info); ia_css_frame_info_to_frame_sp_info(&to->output_info, from->output_info); ia_css_frame_info_to_frame_sp_info(&to->vf_info, from->vf_info); ia_css_resolution_to_sp_resolution(&to->dvs_envelope, from->dvs_envelope); } int ia_css_iterator_configure(const struct ia_css_binary *binary, const struct ia_css_frame_info *in_info) { struct ia_css_frame_info my_info = IA_CSS_BINARY_DEFAULT_FRAME_INFO; struct ia_css_iterator_configuration config = default_config; config.input_info = &binary->in_frame_info; config.internal_info = &binary->internal_frame_info; config.output_info = &binary->out_frame_info[0]; config.vf_info = &binary->vf_frame_info; config.dvs_envelope = &binary->dvs_envelope; /* Use in_info iso binary->in_frame_info. * They can differ in padded width in case of scaling, e.g. for capture_pp. * Find out why. */ if (in_info) config.input_info = in_info; if (binary->out_frame_info[0].res.width == 0) config.output_info = &binary->out_frame_info[1]; my_info = *config.output_info; config.output_info = &my_info; /* we do this only for preview pipe because in fill_binary_info function * we assign vf_out res to out res, but for ISP internal processing, we need * the original out res. for video pipe, it has two output pins --- out and * vf_out, so it can keep these two resolutions already. */ if (binary->info->sp.pipeline.mode == IA_CSS_BINARY_MODE_PREVIEW && binary->vf_downscale_log2 > 0) { /* TODO: Remove this after preview output decimation is fixed * by configuring out&vf info files properly */ my_info.padded_width <<= binary->vf_downscale_log2; my_info.res.width <<= binary->vf_downscale_log2; my_info.res.height <<= binary->vf_downscale_log2; } return ia_css_configure_iterator(binary, &config); }
linux-master
drivers/staging/media/atomisp/pci/isp/kernels/iterator/iterator_1.0/ia_css_iterator.host.c
// SPDX-License-Identifier: GPL-2.0 /* * Support for Intel Camera Imaging ISP subsystem. * Copyright (c) 2015, Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. */ #include "ia_css_types.h" #include "sh_css_defs.h" #include "ia_css_debug.h" #include "ia_css_cnr2.host.h" const struct ia_css_cnr_config default_cnr_config = { 0, 0, 100, 100, 100, 50, 50, 50 }; void ia_css_cnr_encode( struct sh_css_isp_cnr_params *to, const struct ia_css_cnr_config *from, unsigned int size) { (void)size; to->coring_u = from->coring_u; to->coring_v = from->coring_v; to->sense_gain_vy = from->sense_gain_vy; to->sense_gain_vu = from->sense_gain_vu; to->sense_gain_vv = from->sense_gain_vv; to->sense_gain_hy = from->sense_gain_hy; to->sense_gain_hu = from->sense_gain_hu; to->sense_gain_hv = from->sense_gain_hv; } void ia_css_cnr_dump( const struct sh_css_isp_cnr_params *cnr, unsigned int level); void ia_css_cnr_debug_dtrace( const struct ia_css_cnr_config *config, unsigned int level) { ia_css_debug_dtrace(level, "config.coring_u=%d, config.coring_v=%d, config.sense_gain_vy=%d, config.sense_gain_hy=%d, config.sense_gain_vu=%d, config.sense_gain_hu=%d, config.sense_gain_vv=%d, config.sense_gain_hv=%d\n", config->coring_u, config->coring_v, config->sense_gain_vy, config->sense_gain_hy, config->sense_gain_vu, config->sense_gain_hu, config->sense_gain_vv, config->sense_gain_hv); } void ia_css_init_cnr2_state( void/*struct sh_css_isp_cnr_vmem_state*/ * state, size_t size) { memset(state, 0, size); }
linux-master
drivers/staging/media/atomisp/pci/isp/kernels/cnr/cnr_2/ia_css_cnr2.host.c
// SPDX-License-Identifier: GPL-2.0 /* * Support for Intel Camera Imaging ISP subsystem. * Copyright (c) 2015, Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. */ #include "ia_css_types.h" #include "sh_css_defs.h" #include "ia_css_debug.h" #include "ia_css_cnr.host.h" /* keep the interface here, it is not enabled yet because host doesn't know the size of individual state */ void ia_css_init_cnr_state( void/*struct sh_css_isp_cnr_vmem_state*/ * state, size_t size) { memset(state, 0, size); }
linux-master
drivers/staging/media/atomisp/pci/isp/kernels/cnr/cnr_1.0/ia_css_cnr.host.c
// SPDX-License-Identifier: GPL-2.0 /* * Support for Intel Camera Imaging ISP subsystem. * Copyright (c) 2015, Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. */ #include "ia_css_types.h" #include "sh_css_internal.h" #include "assert_support.h" #include "sh_css_frac.h" #include "ia_css_bh.host.h" void ia_css_bh_hmem_decode( struct ia_css_3a_rgby_output *out_ptr, const struct ia_css_bh_table *hmem_buf) { int i; /* * No weighted histogram, hence no grid definition */ if (!hmem_buf) return; assert(sizeof_hmem(HMEM0_ID) == sizeof(*hmem_buf)); /* Deinterleave */ for (i = 0; i < HMEM_UNIT_SIZE; i++) { out_ptr[i].r = hmem_buf->hmem[BH_COLOR_R][i]; out_ptr[i].g = hmem_buf->hmem[BH_COLOR_G][i]; out_ptr[i].b = hmem_buf->hmem[BH_COLOR_B][i]; out_ptr[i].y = hmem_buf->hmem[BH_COLOR_Y][i]; /* sh_css_print ("hmem[%d] = %d, %d, %d, %d\n", i, out_ptr[i].r, out_ptr[i].g, out_ptr[i].b, out_ptr[i].y); */ } } void ia_css_bh_encode( struct sh_css_isp_bh_params *to, const struct ia_css_3a_config *from, unsigned int size) { (void)size; /* coefficients to calculate Y */ to->y_coef_r = uDIGIT_FITTING(from->ae_y_coef_r, 16, SH_CSS_AE_YCOEF_SHIFT); to->y_coef_g = uDIGIT_FITTING(from->ae_y_coef_g, 16, SH_CSS_AE_YCOEF_SHIFT); to->y_coef_b = uDIGIT_FITTING(from->ae_y_coef_b, 16, SH_CSS_AE_YCOEF_SHIFT); }
linux-master
drivers/staging/media/atomisp/pci/isp/kernels/bh/bh_2/ia_css_bh.host.c
// SPDX-License-Identifier: GPL-2.0 /* * Support for Intel Camera Imaging ISP subsystem. * Copyright (c) 2015, Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. */ #include "ia_css_types.h" #include "sh_css_defs.h" #ifndef IA_CSS_NO_DEBUG /* FIXME: See BZ 4427 */ #include "ia_css_debug.h" #endif #include "ia_css_csc.host.h" const struct ia_css_cc_config default_cc_config = { 8, {255, 29, 120, 0, -374, -342, 0, -672, 301}, }; void ia_css_encode_cc( struct sh_css_isp_csc_params *to, const struct ia_css_cc_config *from, unsigned int size) { (void)size; #ifndef IA_CSS_NO_DEBUG ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_encode_cc() enter:\n"); #endif to->m_shift = (int16_t)from->fraction_bits; to->m00 = (int16_t)from->matrix[0]; to->m01 = (int16_t)from->matrix[1]; to->m02 = (int16_t)from->matrix[2]; to->m10 = (int16_t)from->matrix[3]; to->m11 = (int16_t)from->matrix[4]; to->m12 = (int16_t)from->matrix[5]; to->m20 = (int16_t)from->matrix[6]; to->m21 = (int16_t)from->matrix[7]; to->m22 = (int16_t)from->matrix[8]; #ifndef IA_CSS_NO_DEBUG ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_encode_cc() leave:\n"); #endif } void ia_css_csc_encode( struct sh_css_isp_csc_params *to, const struct ia_css_cc_config *from, unsigned int size) { ia_css_encode_cc(to, from, size); } #ifndef IA_CSS_NO_DEBUG void ia_css_cc_dump( const struct sh_css_isp_csc_params *csc, unsigned int level, const char *name) { if (!csc) return; ia_css_debug_dtrace(level, "%s\n", name); ia_css_debug_dtrace(level, "\t%-32s = %d\n", "m_shift", csc->m_shift); ia_css_debug_dtrace(level, "\t%-32s = %d\n", "m00", csc->m00); ia_css_debug_dtrace(level, "\t%-32s = %d\n", "m01", csc->m01); ia_css_debug_dtrace(level, "\t%-32s = %d\n", "m02", csc->m02); ia_css_debug_dtrace(level, "\t%-32s = %d\n", "m10", csc->m10); ia_css_debug_dtrace(level, "\t%-32s = %d\n", "m11", csc->m11); ia_css_debug_dtrace(level, "\t%-32s = %d\n", "m12", csc->m12); ia_css_debug_dtrace(level, "\t%-32s = %d\n", "m20", csc->m20); ia_css_debug_dtrace(level, "\t%-32s = %d\n", "m21", csc->m21); ia_css_debug_dtrace(level, "\t%-32s = %d\n", "m22", csc->m22); } void ia_css_csc_dump( const struct sh_css_isp_csc_params *csc, unsigned int level) { ia_css_cc_dump(csc, level, "Color Space Conversion"); } void ia_css_cc_config_debug_dtrace( const struct ia_css_cc_config *config, unsigned int level) { ia_css_debug_dtrace(level, "config.m[0]=%d, config.m[1]=%d, config.m[2]=%d, config.m[3]=%d, config.m[4]=%d, config.m[5]=%d, config.m[6]=%d, config.m[7]=%d, config.m[8]=%d\n", config->matrix[0], config->matrix[1], config->matrix[2], config->matrix[3], config->matrix[4], config->matrix[5], config->matrix[6], config->matrix[7], config->matrix[8]); } #endif
linux-master
drivers/staging/media/atomisp/pci/isp/kernels/csc/csc_1.0/ia_css_csc.host.c
// SPDX-License-Identifier: GPL-2.0 /* * Support for Intel Camera Imaging ISP subsystem. * Copyright (c) 2015, Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. */ #include "ia_css_types.h" #include "ia_css_conversion.host.h" const struct ia_css_conversion_config default_conversion_config = { 0, 0, 0, 0, }; void ia_css_conversion_encode( struct sh_css_isp_conversion_params *to, const struct ia_css_conversion_config *from, unsigned int size) { (void)size; to->en = from->en; to->dummy0 = from->dummy0; to->dummy1 = from->dummy1; to->dummy2 = from->dummy2; }
linux-master
drivers/staging/media/atomisp/pci/isp/kernels/conversion/conversion_1.0/ia_css_conversion.host.c
// SPDX-License-Identifier: GPL-2.0 /* * Support for Intel Camera Imaging ISP subsystem. * Copyright (c) 2015, Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. */ #include "ia_css_types.h" #include "sh_css_defs.h" #ifndef IA_CSS_NO_DEBUG #include "ia_css_debug.h" #endif #include "ia_css_aa2.host.h" /* YUV Anti-Aliasing configuration. */ const struct ia_css_aa_config default_aa_config = { 8191 /* default should be 0 */ }; /* Bayer Anti-Aliasing configuration. */ const struct ia_css_aa_config default_baa_config = { 8191 /* default should be 0 */ };
linux-master
drivers/staging/media/atomisp/pci/isp/kernels/aa/aa_2/ia_css_aa2.host.c
// SPDX-License-Identifier: GPL-2.0 /* * Support for Intel Camera Imaging ISP subsystem. * Copyright (c) 2015, Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. */ #include "ia_css_types.h" #include "sh_css_defs.h" #include "ia_css_debug.h" #include "sh_css_frac.h" #include "ia_css_dp.host.h" /* We use a different set of DPC configuration parameters when * DPC is used before OBC and NORM. Currently these parameters * are used in usecases which selects both BDS and DPC. **/ const struct ia_css_dp_config default_dp_10bpp_config = { 1024, 2048, 32768, 32768, 32768, 32768 }; const struct ia_css_dp_config default_dp_config = { 8192, 2048, 32768, 32768, 32768, 32768 }; void ia_css_dp_encode( struct sh_css_isp_dp_params *to, const struct ia_css_dp_config *from, unsigned int size) { int gain = from->gain; int gr = from->gr; int r = from->r; int b = from->b; int gb = from->gb; (void)size; to->threshold_single = SH_CSS_BAYER_MAXVAL; to->threshold_2adjacent = uDIGIT_FITTING(from->threshold, 16, SH_CSS_BAYER_BITS); to->gain = uDIGIT_FITTING(from->gain, 8, SH_CSS_DP_GAIN_SHIFT); to->coef_rr_gr = uDIGIT_FITTING(gain * gr / r, 8, SH_CSS_DP_GAIN_SHIFT); to->coef_rr_gb = uDIGIT_FITTING(gain * gb / r, 8, SH_CSS_DP_GAIN_SHIFT); to->coef_bb_gb = uDIGIT_FITTING(gain * gb / b, 8, SH_CSS_DP_GAIN_SHIFT); to->coef_bb_gr = uDIGIT_FITTING(gain * gr / b, 8, SH_CSS_DP_GAIN_SHIFT); to->coef_gr_rr = uDIGIT_FITTING(gain * r / gr, 8, SH_CSS_DP_GAIN_SHIFT); to->coef_gr_bb = uDIGIT_FITTING(gain * b / gr, 8, SH_CSS_DP_GAIN_SHIFT); to->coef_gb_bb = uDIGIT_FITTING(gain * b / gb, 8, SH_CSS_DP_GAIN_SHIFT); to->coef_gb_rr = uDIGIT_FITTING(gain * r / gb, 8, SH_CSS_DP_GAIN_SHIFT); } void ia_css_dp_dump( const struct sh_css_isp_dp_params *dp, unsigned int level) { if (!dp) return; ia_css_debug_dtrace(level, "Defect Pixel Correction:\n"); ia_css_debug_dtrace(level, "\t%-32s = %d\n", "dp_threshold_single_w_2adj_on", dp->threshold_single); ia_css_debug_dtrace(level, "\t%-32s = %d\n", "dp_threshold_2adj_w_2adj_on", dp->threshold_2adjacent); ia_css_debug_dtrace(level, "\t%-32s = %d\n", "dp_gain", dp->gain); ia_css_debug_dtrace(level, "\t%-32s = %d\n", "dpc_coef_rr_gr", dp->coef_rr_gr); ia_css_debug_dtrace(level, "\t%-32s = %d\n", "dpc_coef_rr_gb", dp->coef_rr_gb); ia_css_debug_dtrace(level, "\t%-32s = %d\n", "dpc_coef_bb_gb", dp->coef_bb_gb); ia_css_debug_dtrace(level, "\t%-32s = %d\n", "dpc_coef_bb_gr", dp->coef_bb_gr); ia_css_debug_dtrace(level, "\t%-32s = %d\n", "dpc_coef_gr_rr", dp->coef_gr_rr); ia_css_debug_dtrace(level, "\t%-32s = %d\n", "dpc_coef_gr_bb", dp->coef_gr_bb); ia_css_debug_dtrace(level, "\t%-32s = %d\n", "dpc_coef_gb_bb", dp->coef_gb_bb); ia_css_debug_dtrace(level, "\t%-32s = %d\n", "dpc_coef_gb_rr", dp->coef_gb_rr); } void ia_css_dp_debug_dtrace( const struct ia_css_dp_config *config, unsigned int level) { ia_css_debug_dtrace(level, "config.threshold=%d, config.gain=%d\n", config->threshold, config->gain); } void ia_css_init_dp_state( void/*struct sh_css_isp_dp_vmem_state*/ * state, size_t size) { memset(state, 0, size); }
linux-master
drivers/staging/media/atomisp/pci/isp/kernels/dp/dp_1.0/ia_css_dp.host.c
// SPDX-License-Identifier: GPL-2.0 /* * Support for Intel Camera Imaging ISP subsystem. * Copyright (c) 2015, Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. */ #include "ia_css_types.h" #include "sh_css_defs.h" #include "ia_css_debug.h" #include "sh_css_frac.h" #include "ia_css_de.host.h" const struct ia_css_de_config default_de_config = { 0, 0, 0 }; void ia_css_de_encode( struct sh_css_isp_de_params *to, const struct ia_css_de_config *from, unsigned int size) { (void)size; to->pixelnoise = uDIGIT_FITTING(from->pixelnoise, 16, SH_CSS_BAYER_BITS); to->c1_coring_threshold = uDIGIT_FITTING(from->c1_coring_threshold, 16, SH_CSS_BAYER_BITS); to->c2_coring_threshold = uDIGIT_FITTING(from->c2_coring_threshold, 16, SH_CSS_BAYER_BITS); } void ia_css_de_dump( const struct sh_css_isp_de_params *de, unsigned int level) { if (!de) return; ia_css_debug_dtrace(level, "Demosaic:\n"); ia_css_debug_dtrace(level, "\t%-32s = %d\n", "de_pixelnoise", de->pixelnoise); ia_css_debug_dtrace(level, "\t%-32s = %d\n", "de_c1_coring_threshold", de->c1_coring_threshold); ia_css_debug_dtrace(level, "\t%-32s = %d\n", "de_c2_coring_threshold", de->c2_coring_threshold); } void ia_css_de_debug_dtrace( const struct ia_css_de_config *config, unsigned int level) { ia_css_debug_dtrace(level, "config.pixelnoise=%d, config.c1_coring_threshold=%d, config.c2_coring_threshold=%d\n", config->pixelnoise, config->c1_coring_threshold, config->c2_coring_threshold); } void ia_css_init_de_state( void/*struct sh_css_isp_de_vmem_state*/ * state, size_t size) { memset(state, 0, size); }
linux-master
drivers/staging/media/atomisp/pci/isp/kernels/de/de_1.0/ia_css_de.host.c
// SPDX-License-Identifier: GPL-2.0 /* * Support for Intel Camera Imaging ISP subsystem. * Copyright (c) 2015, Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. */ #include "ia_css_types.h" #include "sh_css_defs.h" #include "ia_css_debug.h" #include "ia_css_de2.host.h" const struct ia_css_ecd_config default_ecd_config = { (1 << (ISP_VEC_ELEMBITS - 1)) * 2 / 3, /* 2/3 */ (1 << (ISP_VEC_ELEMBITS - 1)) - 1, /* 1.0 */ 0, /* 0.0 */ }; void ia_css_ecd_encode( struct sh_css_isp_ecd_params *to, const struct ia_css_ecd_config *from, unsigned int size) { (void)size; to->zip_strength = from->zip_strength; to->fc_strength = from->fc_strength; to->fc_debias = from->fc_debias; } void ia_css_ecd_dump( const struct sh_css_isp_ecd_params *ecd, unsigned int level); void ia_css_ecd_debug_dtrace( const struct ia_css_ecd_config *config, unsigned int level) { ia_css_debug_dtrace(level, "config.zip_strength=%d, config.fc_strength=%d, config.fc_debias=%d\n", config->zip_strength, config->fc_strength, config->fc_debias); }
linux-master
drivers/staging/media/atomisp/pci/isp/kernels/de/de_2/ia_css_de2.host.c
// SPDX-License-Identifier: GPL-2.0 /* * Support for Intel Camera Imaging ISP subsystem. * Copyright (c) 2015, Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. */ #include "ia_css_types.h" #include "sh_css_defs.h" #ifndef IA_CSS_NO_DEBUG /* FIXME: See BZ 4427 */ #include "ia_css_debug.h" #endif #include "sh_css_frac.h" #include "vamem.h" #include "ia_css_gc.host.h" const struct ia_css_gc_config default_gc_config = { 0, 0 }; const struct ia_css_ce_config default_ce_config = { 0, 255 }; void ia_css_gc_encode( struct sh_css_isp_gc_params *to, const struct ia_css_gc_config *from, unsigned int size) { (void)size; to->gain_k1 = uDIGIT_FITTING((int)from->gain_k1, 16, IA_CSS_GAMMA_GAIN_K_SHIFT); to->gain_k2 = uDIGIT_FITTING((int)from->gain_k2, 16, IA_CSS_GAMMA_GAIN_K_SHIFT); } void ia_css_ce_encode( struct sh_css_isp_ce_params *to, const struct ia_css_ce_config *from, unsigned int size) { (void)size; to->uv_level_min = from->uv_level_min; to->uv_level_max = from->uv_level_max; } void ia_css_gc_vamem_encode( struct sh_css_isp_gc_vamem_params *to, const struct ia_css_gamma_table *from, unsigned int size) { (void)size; memcpy(&to->gc, &from->data, sizeof(to->gc)); } #ifndef IA_CSS_NO_DEBUG void ia_css_gc_dump( const struct sh_css_isp_gc_params *gc, unsigned int level) { if (!gc) return; ia_css_debug_dtrace(level, "Gamma Correction:\n"); ia_css_debug_dtrace(level, "\t%-32s = %d\n", "gamma_gain_k1", gc->gain_k1); ia_css_debug_dtrace(level, "\t%-32s = %d\n", "gamma_gain_k2", gc->gain_k2); } void ia_css_ce_dump( const struct sh_css_isp_ce_params *ce, unsigned int level) { ia_css_debug_dtrace(level, "Chroma Enhancement:\n"); ia_css_debug_dtrace(level, "\t%-32s = %d\n", "ce_uv_level_min", ce->uv_level_min); ia_css_debug_dtrace(level, "\t%-32s = %d\n", "ce_uv_level_max", ce->uv_level_max); } void ia_css_gc_debug_dtrace( const struct ia_css_gc_config *config, unsigned int level) { ia_css_debug_dtrace(level, "config.gain_k1=%d, config.gain_k2=%d\n", config->gain_k1, config->gain_k2); } void ia_css_ce_debug_dtrace( const struct ia_css_ce_config *config, unsigned int level) { ia_css_debug_dtrace(level, "config.uv_level_min=%d, config.uv_level_max=%d\n", config->uv_level_min, config->uv_level_max); } #endif
linux-master
drivers/staging/media/atomisp/pci/isp/kernels/gc/gc_1.0/ia_css_gc.host.c
// SPDX-License-Identifier: GPL-2.0 /* * Support for Intel Camera Imaging ISP subsystem. * Copyright (c) 2015, Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. */ #include <linux/string.h> /* for memcpy() */ #include <type_support.h> #include "system_global.h" #include "vamem.h" #include "ia_css_types.h" #include "ia_css_gc_table.host.h" struct ia_css_gamma_table default_gamma_table; static const uint16_t default_gamma_table_data[IA_CSS_VAMEM_2_GAMMA_TABLE_SIZE] = { 0, 4, 8, 12, 17, 21, 27, 32, 38, 44, 49, 55, 61, 66, 71, 76, 80, 84, 88, 92, 95, 98, 102, 105, 108, 110, 113, 116, 118, 121, 123, 126, 128, 130, 132, 135, 137, 139, 141, 143, 145, 146, 148, 150, 152, 153, 155, 156, 158, 160, 161, 162, 164, 165, 166, 168, 169, 170, 171, 172, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 184, 185, 186, 187, 188, 189, 189, 190, 191, 192, 192, 193, 194, 195, 195, 196, 197, 197, 198, 198, 199, 200, 200, 201, 201, 202, 203, 203, 204, 204, 205, 205, 206, 206, 207, 207, 208, 208, 209, 209, 210, 210, 210, 211, 211, 212, 212, 213, 213, 214, 214, 214, 215, 215, 216, 216, 216, 217, 217, 218, 218, 218, 219, 219, 220, 220, 220, 221, 221, 222, 222, 222, 223, 223, 223, 224, 224, 225, 225, 225, 226, 226, 226, 227, 227, 227, 228, 228, 228, 229, 229, 229, 230, 230, 230, 231, 231, 231, 232, 232, 232, 233, 233, 233, 234, 234, 234, 234, 235, 235, 235, 236, 236, 236, 237, 237, 237, 237, 238, 238, 238, 239, 239, 239, 239, 240, 240, 240, 241, 241, 241, 241, 242, 242, 242, 242, 243, 243, 243, 243, 244, 244, 244, 245, 245, 245, 245, 246, 246, 246, 246, 247, 247, 247, 247, 248, 248, 248, 248, 249, 249, 249, 249, 250, 250, 250, 250, 251, 251, 251, 251, 252, 252, 252, 252, 253, 253, 253, 253, 254, 254, 254, 254, 255, 255, 255 }; void ia_css_config_gamma_table(void) { memcpy(default_gamma_table.data.vamem_2, default_gamma_table_data, sizeof(default_gamma_table_data)); default_gamma_table.vamem_type = IA_CSS_VAMEM_TYPE_2; }
linux-master
drivers/staging/media/atomisp/pci/isp/kernels/gc/gc_1.0/ia_css_gc_table.host.c
// SPDX-License-Identifier: GPL-2.0 /* * Support for Intel Camera Imaging ISP subsystem. * Copyright (c) 2015, Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. */ #include <linux/string.h> /* for memcpy() */ #include <type_support.h> #include "system_global.h" #include "vamem.h" #include "ia_css_types.h" #include "ia_css_gc2_table.host.h" struct ia_css_rgb_gamma_table default_r_gamma_table; struct ia_css_rgb_gamma_table default_g_gamma_table; struct ia_css_rgb_gamma_table default_b_gamma_table; /* Identical default gamma table for R, G, and B. */ static const uint16_t default_gamma_table_data[IA_CSS_VAMEM_2_RGB_GAMMA_TABLE_SIZE] = { 0, 72, 144, 216, 288, 360, 426, 486, 541, 592, 641, 687, 730, 772, 812, 850, 887, 923, 958, 991, 1024, 1055, 1086, 1117, 1146, 1175, 1203, 1230, 1257, 1284, 1310, 1335, 1360, 1385, 1409, 1433, 1457, 1480, 1502, 1525, 1547, 1569, 1590, 1612, 1632, 1653, 1674, 1694, 1714, 1734, 1753, 1772, 1792, 1811, 1829, 1848, 1866, 1884, 1902, 1920, 1938, 1955, 1973, 1990, 2007, 2024, 2040, 2057, 2074, 2090, 2106, 2122, 2138, 2154, 2170, 2185, 2201, 2216, 2231, 2247, 2262, 2277, 2291, 2306, 2321, 2335, 2350, 2364, 2378, 2393, 2407, 2421, 2435, 2449, 2462, 2476, 2490, 2503, 2517, 2530, 2543, 2557, 2570, 2583, 2596, 2609, 2622, 2634, 2647, 2660, 2673, 2685, 2698, 2710, 2722, 2735, 2747, 2759, 2771, 2783, 2795, 2807, 2819, 2831, 2843, 2855, 2867, 2878, 2890, 2901, 2913, 2924, 2936, 2947, 2958, 2970, 2981, 2992, 3003, 3014, 3025, 3036, 3047, 3058, 3069, 3080, 3091, 3102, 3112, 3123, 3134, 3144, 3155, 3165, 3176, 3186, 3197, 3207, 3217, 3228, 3238, 3248, 3258, 3268, 3279, 3289, 3299, 3309, 3319, 3329, 3339, 3349, 3358, 3368, 3378, 3388, 3398, 3407, 3417, 3427, 3436, 3446, 3455, 3465, 3474, 3484, 3493, 3503, 3512, 3521, 3531, 3540, 3549, 3559, 3568, 3577, 3586, 3595, 3605, 3614, 3623, 3632, 3641, 3650, 3659, 3668, 3677, 3686, 3694, 3703, 3712, 3721, 3730, 3739, 3747, 3756, 3765, 3773, 3782, 3791, 3799, 3808, 3816, 3825, 3833, 3842, 3850, 3859, 3867, 3876, 3884, 3893, 3901, 3909, 3918, 3926, 3934, 3942, 3951, 3959, 3967, 3975, 3984, 3992, 4000, 4008, 4016, 4024, 4032, 4040, 4048, 4056, 4064, 4072, 4080, 4088, 4095 }; void ia_css_config_rgb_gamma_tables(void) { default_r_gamma_table.vamem_type = IA_CSS_VAMEM_TYPE_2; default_g_gamma_table.vamem_type = IA_CSS_VAMEM_TYPE_2; default_b_gamma_table.vamem_type = IA_CSS_VAMEM_TYPE_2; memcpy(default_r_gamma_table.data.vamem_2, default_gamma_table_data, sizeof(default_gamma_table_data)); memcpy(default_g_gamma_table.data.vamem_2, default_gamma_table_data, sizeof(default_gamma_table_data)); memcpy(default_b_gamma_table.data.vamem_2, default_gamma_table_data, sizeof(default_gamma_table_data)); }
linux-master
drivers/staging/media/atomisp/pci/isp/kernels/gc/gc_2/ia_css_gc2_table.host.c
// SPDX-License-Identifier: GPL-2.0 /* * Support for Intel Camera Imaging ISP subsystem. * Copyright (c) 2015, Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. */ #include "ia_css_types.h" #include "sh_css_defs.h" #ifndef IA_CSS_NO_DEBUG /* FIXME: See BZ 4427 */ #include "ia_css_debug.h" #endif #include "csc/csc_1.0/ia_css_csc.host.h" #include "vamem.h" #include "ia_css_gc2.host.h" const struct ia_css_cc_config default_yuv2rgb_cc_config = { 12, {4096, -4096, 4096, 4096, 4096, 0, 4096, -4096, -4096} }; const struct ia_css_cc_config default_rgb2yuv_cc_config = { 13, {2449, 4809, 934, -1382, -2714, 4096, 4096, -3430, -666} }; void ia_css_yuv2rgb_encode( struct sh_css_isp_csc_params *to, const struct ia_css_cc_config *from, unsigned int size) { ia_css_encode_cc(to, from, size); } void ia_css_rgb2yuv_encode( struct sh_css_isp_csc_params *to, const struct ia_css_cc_config *from, unsigned int size) { ia_css_encode_cc(to, from, size); } void ia_css_r_gamma_vamem_encode( struct sh_css_isp_rgb_gamma_vamem_params *to, const struct ia_css_rgb_gamma_table *from, unsigned int size) { (void)size; memcpy(&to->gc, &from->data, sizeof(to->gc)); } void ia_css_g_gamma_vamem_encode( struct sh_css_isp_rgb_gamma_vamem_params *to, const struct ia_css_rgb_gamma_table *from, unsigned int size) { (void)size; memcpy(&to->gc, &from->data, sizeof(to->gc)); } void ia_css_b_gamma_vamem_encode( struct sh_css_isp_rgb_gamma_vamem_params *to, const struct ia_css_rgb_gamma_table *from, unsigned int size) { (void)size; memcpy(&to->gc, &from->data, sizeof(to->gc)); } #ifndef IA_CSS_NO_DEBUG void ia_css_yuv2rgb_dump( const struct sh_css_isp_csc_params *yuv2rgb, unsigned int level) { ia_css_cc_dump(yuv2rgb, level, "YUV to RGB Conversion"); } void ia_css_rgb2yuv_dump( const struct sh_css_isp_csc_params *rgb2yuv, unsigned int level) { ia_css_cc_dump(rgb2yuv, level, "RGB to YUV Conversion"); } void ia_css_rgb_gamma_table_debug_dtrace( const struct ia_css_rgb_gamma_table *config, unsigned int level) { (void)config; (void)level; } #endif
linux-master
drivers/staging/media/atomisp/pci/isp/kernels/gc/gc_2/ia_css_gc2.host.c
// SPDX-License-Identifier: GPL-2.0 /* * Support for Intel Camera Imaging ISP subsystem. * Copyright (c) 2015, Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. */ #ifndef IA_CSS_NO_DEBUG #include "ia_css_debug.h" #endif #include "type_support.h" #include "assert_support.h" #include "math_support.h" /* for min and max */ #include "ia_css_eed1_8.host.h" /* WARNING1: Number of inv points should be less or equal to 16, * due to implementation limitation. See kernel design document * for more details. * WARNING2: Do not modify the number of inv points without correcting * the EED1_8 kernel implementation assumptions. */ #define NUMBER_OF_CHGRINV_POINTS 15 #define NUMBER_OF_TCINV_POINTS 9 #define NUMBER_OF_FCINV_POINTS 9 static const s16 chgrinv_x[NUMBER_OF_CHGRINV_POINTS] = { 0, 16, 64, 144, 272, 448, 672, 976, 1376, 1888, 2528, 3312, 4256, 5376, 6688 }; static const s16 chgrinv_a[NUMBER_OF_CHGRINV_POINTS] = { -7171, -256, -29, -3456, -1071, -475, -189, -102, -48, -38, -10, -9, -7, -6, 0 }; static const s16 chgrinv_b[NUMBER_OF_CHGRINV_POINTS] = { 8191, 1021, 256, 114, 60, 37, 24, 17, 12, 9, 6, 5, 4, 3, 2 }; static const s16 chgrinv_c[NUMBER_OF_CHGRINV_POINTS] = { 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }; static const s16 tcinv_x[NUMBER_OF_TCINV_POINTS] = { 0, 4, 11, 23, 42, 68, 102, 148, 205 }; static const s16 tcinv_a[NUMBER_OF_TCINV_POINTS] = { -6364, -631, -126, -34, -13, -6, -4452, -2156, 0 }; static const s16 tcinv_b[NUMBER_OF_TCINV_POINTS] = { 8191, 1828, 726, 352, 197, 121, 80, 55, 40 }; static const s16 tcinv_c[NUMBER_OF_TCINV_POINTS] = { 1, 1, 1, 1, 1, 1, 0, 0, 0 }; static const s16 fcinv_x[NUMBER_OF_FCINV_POINTS] = { 0, 80, 216, 456, 824, 1344, 2040, 2952, 4096 }; static const s16 fcinv_a[NUMBER_OF_FCINV_POINTS] = { -5244, -486, -86, -2849, -961, -400, -180, -86, 0 }; static const s16 fcinv_b[NUMBER_OF_FCINV_POINTS] = { 8191, 1637, 607, 287, 159, 98, 64, 44, 32 }; static const s16 fcinv_c[NUMBER_OF_FCINV_POINTS] = { 1, 1, 1, 0, 0, 0, 0, 0, 0 }; void ia_css_eed1_8_vmem_encode( struct eed1_8_vmem_params *to, const struct ia_css_eed1_8_config *from, size_t size) { unsigned int i, j, base; const unsigned int total_blocks = 4; const unsigned int shuffle_block = 16; (void)size; /* Init */ for (i = 0; i < ISP_VEC_NELEMS; i++) { to->e_dew_enh_x[0][i] = 0; to->e_dew_enh_y[0][i] = 0; to->e_dew_enh_a[0][i] = 0; to->e_dew_enh_f[0][i] = 0; to->chgrinv_x[0][i] = 0; to->chgrinv_a[0][i] = 0; to->chgrinv_b[0][i] = 0; to->chgrinv_c[0][i] = 0; to->tcinv_x[0][i] = 0; to->tcinv_a[0][i] = 0; to->tcinv_b[0][i] = 0; to->tcinv_c[0][i] = 0; to->fcinv_x[0][i] = 0; to->fcinv_a[0][i] = 0; to->fcinv_b[0][i] = 0; to->fcinv_c[0][i] = 0; } /* Constraints on dew_enhance_seg_x and dew_enhance_seg_y: * - values should be greater or equal to 0. * - values should be ascending. * - value of index zero is equal to 0. */ /* Checking constraints: */ /* TODO: investigate if an assert is the right way to report that * the constraints are violated. */ for (j = 0; j < IA_CSS_NUMBER_OF_DEW_ENHANCE_SEGMENTS; j++) { assert(from->dew_enhance_seg_x[j] > -1); assert(from->dew_enhance_seg_y[j] > -1); } for (j = 1; j < IA_CSS_NUMBER_OF_DEW_ENHANCE_SEGMENTS; j++) { assert(from->dew_enhance_seg_x[j] > from->dew_enhance_seg_x[j - 1]); assert(from->dew_enhance_seg_y[j] > from->dew_enhance_seg_y[j - 1]); } assert(from->dew_enhance_seg_x[0] == 0); assert(from->dew_enhance_seg_y[0] == 0); /* Constraints on chgrinv_x, tcinv_x and fcinv_x: * - values should be greater or equal to 0. * - values should be ascending. * - value of index zero is equal to 0. */ assert(chgrinv_x[0] == 0); assert(tcinv_x[0] == 0); assert(fcinv_x[0] == 0); for (j = 1; j < NUMBER_OF_CHGRINV_POINTS; j++) { assert(chgrinv_x[j] > chgrinv_x[j - 1]); } for (j = 1; j < NUMBER_OF_TCINV_POINTS; j++) { assert(tcinv_x[j] > tcinv_x[j - 1]); } for (j = 1; j < NUMBER_OF_FCINV_POINTS; j++) { assert(fcinv_x[j] > fcinv_x[j - 1]); } /* The implementation of the calulating 1/x is based on the availability * of the OP_vec_shuffle16 operation. * A 64 element vector is split up in 4 blocks of 16 element. Each array is copied to * a vector 4 times, (starting at 0, 16, 32 and 48). All array elements are copied or * initialised as described in the KFS. The remaining elements of a vector are set to 0. */ /* TODO: guard this code with above assumptions */ for (i = 0; i < total_blocks; i++) { base = shuffle_block * i; for (j = 0; j < IA_CSS_NUMBER_OF_DEW_ENHANCE_SEGMENTS; j++) { to->e_dew_enh_x[0][base + j] = min_t(int, max_t(int, from->dew_enhance_seg_x[j], 0), 8191); to->e_dew_enh_y[0][base + j] = min_t(int, max_t(int, from->dew_enhance_seg_y[j], -8192), 8191); } for (j = 0; j < (IA_CSS_NUMBER_OF_DEW_ENHANCE_SEGMENTS - 1); j++) { to->e_dew_enh_a[0][base + j] = min_t(int, max_t(int, from->dew_enhance_seg_slope[j], -8192), 8191); /* Convert dew_enhance_seg_exp to flag: * 0 -> 0 * 1...13 -> 1 */ to->e_dew_enh_f[0][base + j] = (min_t(int, max_t(int, from->dew_enhance_seg_exp[j], 0), 13) > 0); } /* Hard-coded to 0, in order to be able to handle out of * range input in the same way as the other segments. * See KFS for more details. */ to->e_dew_enh_a[0][base + (IA_CSS_NUMBER_OF_DEW_ENHANCE_SEGMENTS - 1)] = 0; to->e_dew_enh_f[0][base + (IA_CSS_NUMBER_OF_DEW_ENHANCE_SEGMENTS - 1)] = 0; for (j = 0; j < NUMBER_OF_CHGRINV_POINTS; j++) { to->chgrinv_x[0][base + j] = chgrinv_x[j]; to->chgrinv_a[0][base + j] = chgrinv_a[j]; to->chgrinv_b[0][base + j] = chgrinv_b[j]; to->chgrinv_c[0][base + j] = chgrinv_c[j]; } for (j = 0; j < NUMBER_OF_TCINV_POINTS; j++) { to->tcinv_x[0][base + j] = tcinv_x[j]; to->tcinv_a[0][base + j] = tcinv_a[j]; to->tcinv_b[0][base + j] = tcinv_b[j]; to->tcinv_c[0][base + j] = tcinv_c[j]; } for (j = 0; j < NUMBER_OF_FCINV_POINTS; j++) { to->fcinv_x[0][base + j] = fcinv_x[j]; to->fcinv_a[0][base + j] = fcinv_a[j]; to->fcinv_b[0][base + j] = fcinv_b[j]; to->fcinv_c[0][base + j] = fcinv_c[j]; } } } void ia_css_eed1_8_encode( struct eed1_8_dmem_params *to, const struct ia_css_eed1_8_config *from, size_t size) { int i; int min_exp = 0; (void)size; to->rbzp_strength = from->rbzp_strength; to->fcstrength = from->fcstrength; to->fcthres_0 = from->fcthres_0; to->fc_sat_coef = from->fc_sat_coef; to->fc_coring_prm = from->fc_coring_prm; to->fc_slope = from->fcthres_1 - from->fcthres_0; to->aerel_thres0 = from->aerel_thres0; to->aerel_gain0 = from->aerel_gain0; to->aerel_thres_diff = from->aerel_thres1 - from->aerel_thres0; to->aerel_gain_diff = from->aerel_gain1 - from->aerel_gain0; to->derel_thres0 = from->derel_thres0; to->derel_gain0 = from->derel_gain0; to->derel_thres_diff = (from->derel_thres1 - from->derel_thres0); to->derel_gain_diff = (from->derel_gain1 - from->derel_gain0); to->coring_pos0 = from->coring_pos0; to->coring_pos_diff = (from->coring_pos1 - from->coring_pos0); to->coring_neg0 = from->coring_neg0; to->coring_neg_diff = (from->coring_neg1 - from->coring_neg0); /* Note: (ISP_VEC_ELEMBITS -1) * TODO: currently the testbench does not support to use * ISP_VEC_ELEMBITS. Investigate how to fix this */ to->gain_exp = (13 - from->gain_exp); to->gain_pos0 = from->gain_pos0; to->gain_pos_diff = (from->gain_pos1 - from->gain_pos0); to->gain_neg0 = from->gain_neg0; to->gain_neg_diff = (from->gain_neg1 - from->gain_neg0); to->margin_pos0 = from->pos_margin0; to->margin_pos_diff = (from->pos_margin1 - from->pos_margin0); to->margin_neg0 = from->neg_margin0; to->margin_neg_diff = (from->neg_margin1 - from->neg_margin0); /* Encode DEWEnhance exp (e_dew_enh_asr) */ for (i = 0; i < (IA_CSS_NUMBER_OF_DEW_ENHANCE_SEGMENTS - 1); i++) { min_exp = max(min_exp, from->dew_enhance_seg_exp[i]); } to->e_dew_enh_asr = 13 - min(max(min_exp, 0), 13); to->dedgew_max = from->dedgew_max; } void ia_css_init_eed1_8_state( void *state, size_t size) { memset(state, 0, size); } #ifndef IA_CSS_NO_DEBUG void ia_css_eed1_8_debug_dtrace( const struct ia_css_eed1_8_config *eed, unsigned int level) { if (!eed) return; ia_css_debug_dtrace(level, "Edge Enhancing Demosaic 1.8:\n"); ia_css_debug_dtrace(level, "\t%-32s = %d\n", "rbzp_strength", eed->rbzp_strength); ia_css_debug_dtrace(level, "\t%-32s = %d\n", "fcstrength", eed->fcstrength); ia_css_debug_dtrace(level, "\t%-32s = %d\n", "fcthres_0", eed->fcthres_0); ia_css_debug_dtrace(level, "\t%-32s = %d\n", "fcthres_1", eed->fcthres_1); ia_css_debug_dtrace(level, "\t%-32s = %d\n", "fc_sat_coef", eed->fc_sat_coef); ia_css_debug_dtrace(level, "\t%-32s = %d\n", "fc_coring_prm", eed->fc_coring_prm); ia_css_debug_dtrace(level, "\t%-32s = %d\n", "aerel_thres0", eed->aerel_thres0); ia_css_debug_dtrace(level, "\t%-32s = %d\n", "aerel_gain0", eed->aerel_gain0); ia_css_debug_dtrace(level, "\t%-32s = %d\n", "aerel_thres1", eed->aerel_thres1); ia_css_debug_dtrace(level, "\t%-32s = %d\n", "aerel_gain1", eed->aerel_gain1); ia_css_debug_dtrace(level, "\t%-32s = %d\n", "derel_thres0", eed->derel_thres0); ia_css_debug_dtrace(level, "\t%-32s = %d\n", "derel_gain0", eed->derel_gain0); ia_css_debug_dtrace(level, "\t%-32s = %d\n", "derel_thres1", eed->derel_thres1); ia_css_debug_dtrace(level, "\t%-32s = %d\n", "derel_gain1", eed->derel_gain1); ia_css_debug_dtrace(level, "\t%-32s = %d\n", "coring_pos0", eed->coring_pos0); ia_css_debug_dtrace(level, "\t%-32s = %d\n", "coring_pos1", eed->coring_pos1); ia_css_debug_dtrace(level, "\t%-32s = %d\n", "coring_neg0", eed->coring_neg0); ia_css_debug_dtrace(level, "\t%-32s = %d\n", "coring_neg1", eed->coring_neg1); ia_css_debug_dtrace(level, "\t%-32s = %d\n", "gain_exp", eed->gain_exp); ia_css_debug_dtrace(level, "\t%-32s = %d\n", "gain_pos0", eed->gain_pos0); ia_css_debug_dtrace(level, "\t%-32s = %d\n", "gain_pos1", eed->gain_pos1); ia_css_debug_dtrace(level, "\t%-32s = %d\n", "gain_neg0", eed->gain_neg0); ia_css_debug_dtrace(level, "\t%-32s = %d\n", "gain_neg1", eed->gain_neg1); ia_css_debug_dtrace(level, "\t%-32s = %d\n", "pos_margin0", eed->pos_margin0); ia_css_debug_dtrace(level, "\t%-32s = %d\n", "pos_margin1", eed->pos_margin1); ia_css_debug_dtrace(level, "\t%-32s = %d\n", "neg_margin0", eed->neg_margin0); ia_css_debug_dtrace(level, "\t%-32s = %d\n", "neg_margin1", eed->neg_margin1); ia_css_debug_dtrace(level, "\t%-32s = %d\n", "dedgew_max", eed->dedgew_max); } #endif
linux-master
drivers/staging/media/atomisp/pci/isp/kernels/eed1_8/ia_css_eed1_8.host.c
// SPDX-License-Identifier: GPL-2.0 /* * Support for Intel Camera Imaging ISP subsystem. * Copyright (c) 2010 - 2015, Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. */ #include "ia_css_bayer_io.host.h" #include "dma.h" #include "math_support.h" #ifndef IA_CSS_NO_DEBUG #include "ia_css_debug.h" #endif #include "ia_css_isp_params.h" #include "ia_css_frame.h" int ia_css_bayer_io_config(const struct ia_css_binary *binary, const struct sh_css_binary_args *args) { const struct ia_css_frame *in_frame = args->in_frame; const struct ia_css_frame **out_frames = (const struct ia_css_frame **) &args->out_frame; const struct ia_css_frame_info *in_frame_info = ia_css_frame_get_info(in_frame); const unsigned int ddr_bits_per_element = sizeof(short) * 8; const unsigned int ddr_elems_per_word = ceil_div(HIVE_ISP_DDR_WORD_BITS, ddr_bits_per_element); unsigned int size_get = 0, size_put = 0; unsigned int offset = 0; int ret; if (binary->info->mem_offsets.offsets.param) { size_get = binary->info->mem_offsets.offsets.param->dmem.get.size; offset = binary->info->mem_offsets.offsets.param->dmem.get.offset; } if (size_get) { struct ia_css_common_io_config *to = (struct ia_css_common_io_config *) &binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset]; struct dma_port_config config; #ifndef IA_CSS_NO_DEBUG ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_bayer_io_config() get part enter:\n"); #endif ret = ia_css_dma_configure_from_info(&config, in_frame_info); if (ret) return ret; // The base_address of the input frame will be set in the ISP to->width = in_frame_info->res.width; to->height = in_frame_info->res.height; to->stride = config.stride; to->ddr_elems_per_word = ddr_elems_per_word; #ifndef IA_CSS_NO_DEBUG ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_bayer_io_config() get part leave:\n"); #endif } if (binary->info->mem_offsets.offsets.param) { size_put = binary->info->mem_offsets.offsets.param->dmem.put.size; offset = binary->info->mem_offsets.offsets.param->dmem.put.offset; } if (size_put) { struct ia_css_common_io_config *to = (struct ia_css_common_io_config *) &binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset]; struct dma_port_config config; #ifndef IA_CSS_NO_DEBUG ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_bayer_io_config() put part enter:\n"); #endif ret = ia_css_dma_configure_from_info(&config, &out_frames[0]->frame_info); if (ret) return ret; to->base_address = out_frames[0]->data; to->width = out_frames[0]->frame_info.res.width; to->height = out_frames[0]->frame_info.res.height; to->stride = config.stride; to->ddr_elems_per_word = ddr_elems_per_word; #ifndef IA_CSS_NO_DEBUG ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_bayer_io_config() put part leave:\n"); #endif } return 0; }
linux-master
drivers/staging/media/atomisp/pci/isp/kernels/ipu2_io_ls/bayer_io_ls/ia_css_bayer_io.host.c
// SPDX-License-Identifier: GPL-2.0 /* Support for Intel Camera Imaging ISP subsystem. Copyright (c) 2010 - 2015, Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, version 2, as published by the Free Software Foundation. This program is distributed in the hope it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. */ #include "ia_css_yuv444_io.host.h" #include "dma.h" #include "math_support.h" #ifndef IA_CSS_NO_DEBUG #include "ia_css_debug.h" #endif #include "ia_css_isp_params.h" #include "ia_css_frame.h" int ia_css_yuv444_io_config(const struct ia_css_binary *binary, const struct sh_css_binary_args *args) { const struct ia_css_frame *in_frame = args->in_frame; const struct ia_css_frame **out_frames = (const struct ia_css_frame **) &args->out_frame; const struct ia_css_frame_info *in_frame_info = ia_css_frame_get_info(in_frame); const unsigned int ddr_bits_per_element = sizeof(short) * 8; const unsigned int ddr_elems_per_word = ceil_div(HIVE_ISP_DDR_WORD_BITS, ddr_bits_per_element); unsigned int size_get = 0, size_put = 0; unsigned int offset = 0; int ret; if (binary->info->mem_offsets.offsets.param) { size_get = binary->info->mem_offsets.offsets.param->dmem.get.size; offset = binary->info->mem_offsets.offsets.param->dmem.get.offset; } if (size_get) { struct ia_css_common_io_config *to = (struct ia_css_common_io_config *) &binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset]; struct dma_port_config config; #ifndef IA_CSS_NO_DEBUG ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_yuv444_io_config() get part enter:\n"); #endif ret = ia_css_dma_configure_from_info(&config, in_frame_info); if (ret) return ret; // The base_address of the input frame will be set in the ISP to->width = in_frame_info->res.width; to->height = in_frame_info->res.height; to->stride = config.stride; to->ddr_elems_per_word = ddr_elems_per_word; #ifndef IA_CSS_NO_DEBUG ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_yuv444_io_config() get part leave:\n"); #endif } if (binary->info->mem_offsets.offsets.param) { size_put = binary->info->mem_offsets.offsets.param->dmem.put.size; offset = binary->info->mem_offsets.offsets.param->dmem.put.offset; } if (size_put) { struct ia_css_common_io_config *to = (struct ia_css_common_io_config *) &binary->mem_params.params[IA_CSS_PARAM_CLASS_PARAM][IA_CSS_ISP_DMEM].address[offset]; struct dma_port_config config; #ifndef IA_CSS_NO_DEBUG ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_yuv444_io_config() put part enter:\n"); #endif ret = ia_css_dma_configure_from_info(&config, &out_frames[0]->frame_info); if (ret) return ret; to->base_address = out_frames[0]->data; to->width = out_frames[0]->frame_info.res.width; to->height = out_frames[0]->frame_info.res.height; to->stride = config.stride; to->ddr_elems_per_word = ddr_elems_per_word; #ifndef IA_CSS_NO_DEBUG ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ia_css_yuv444_io_config() put part leave:\n"); #endif } return 0; }
linux-master
drivers/staging/media/atomisp/pci/isp/kernels/ipu2_io_ls/yuv444_io_ls/ia_css_yuv444_io.host.c
// SPDX-License-Identifier: GPL-2.0 /* * Support for Intel Camera Imaging ISP subsystem. * Copyright (c) 2015, Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. */ #include "ia_css_types.h" #include "sh_css_defs.h" #include "ia_css_debug.h" #include "assert_support.h" #include "ia_css_ctc.host.h" const struct ia_css_ctc_config default_ctc_config = { ((1 << IA_CSS_CTC_COEF_SHIFT) + 1) / 2, /* 0.5 */ ((1 << IA_CSS_CTC_COEF_SHIFT) + 1) / 2, /* 0.5 */ ((1 << IA_CSS_CTC_COEF_SHIFT) + 1) / 2, /* 0.5 */ ((1 << IA_CSS_CTC_COEF_SHIFT) + 1) / 2, /* 0.5 */ ((1 << IA_CSS_CTC_COEF_SHIFT) + 1) / 2, /* 0.5 */ ((1 << IA_CSS_CTC_COEF_SHIFT) + 1) / 2, /* 0.5 */ 1, SH_CSS_BAYER_MAXVAL / 5, /* To be implemented */ SH_CSS_BAYER_MAXVAL * 2 / 5, /* To be implemented */ SH_CSS_BAYER_MAXVAL * 3 / 5, /* To be implemented */ SH_CSS_BAYER_MAXVAL * 4 / 5, /* To be implemented */ }; void ia_css_ctc_vamem_encode( struct sh_css_isp_ctc_vamem_params *to, const struct ia_css_ctc_table *from, unsigned int size) { (void)size; memcpy(&to->ctc, &from->data, sizeof(to->ctc)); } void ia_css_ctc_debug_dtrace( const struct ia_css_ctc_config *config, unsigned int level) { ia_css_debug_dtrace(level, "config.ce_gain_exp=%d, config.y0=%d, config.x1=%d, config.y1=%d, config.x2=%d, config.y2=%d, config.x3=%d, config.y3=%d, config.x4=%d, config.y4=%d\n", config->ce_gain_exp, config->y0, config->x1, config->y1, config->x2, config->y2, config->x3, config->y3, config->x4, config->y4); }
linux-master
drivers/staging/media/atomisp/pci/isp/kernels/ctc/ctc_1.0/ia_css_ctc.host.c
// SPDX-License-Identifier: GPL-2.0 /* * Support for Intel Camera Imaging ISP subsystem. * Copyright (c) 2015, Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. */ #include <linux/string.h> /* for memcpy() */ #include <type_support.h> #include "system_global.h" #include "vamem.h" #include "ia_css_types.h" #include "ia_css_ctc_table.host.h" struct ia_css_ctc_table default_ctc_table; static const uint16_t default_ctc_table_data[IA_CSS_VAMEM_2_CTC_TABLE_SIZE] = { 0, 384, 837, 957, 1011, 1062, 1083, 1080, 1078, 1077, 1053, 1039, 1012, 992, 969, 951, 929, 906, 886, 866, 845, 823, 809, 790, 772, 758, 741, 726, 711, 701, 688, 675, 666, 656, 648, 639, 633, 626, 618, 612, 603, 594, 582, 572, 557, 545, 529, 516, 504, 491, 480, 467, 459, 447, 438, 429, 419, 412, 404, 397, 389, 382, 376, 368, 363, 357, 351, 345, 340, 336, 330, 326, 321, 318, 312, 308, 304, 300, 297, 294, 291, 286, 284, 281, 278, 275, 271, 268, 261, 257, 251, 245, 240, 235, 232, 225, 223, 218, 213, 209, 206, 204, 199, 197, 193, 189, 186, 185, 183, 179, 177, 175, 172, 170, 169, 167, 164, 164, 162, 160, 158, 157, 156, 154, 154, 152, 151, 150, 149, 148, 146, 147, 146, 144, 143, 143, 142, 141, 140, 141, 139, 138, 138, 138, 137, 136, 136, 135, 134, 134, 134, 133, 132, 132, 131, 130, 131, 130, 129, 128, 129, 127, 127, 127, 127, 125, 125, 125, 123, 123, 122, 120, 118, 115, 114, 111, 110, 108, 106, 105, 103, 102, 100, 99, 97, 97, 96, 95, 94, 93, 93, 91, 91, 91, 90, 90, 89, 89, 88, 88, 89, 88, 88, 87, 87, 87, 87, 86, 87, 87, 86, 87, 86, 86, 84, 84, 82, 80, 78, 76, 74, 72, 70, 68, 67, 65, 62, 60, 58, 56, 55, 54, 53, 51, 49, 49, 47, 45, 45, 45, 41, 40, 39, 39, 34, 33, 34, 32, 25, 23, 24, 20, 13, 9, 12, 0, 0 }; void ia_css_config_ctc_table(void) { memcpy(default_ctc_table.data.vamem_2, default_ctc_table_data, sizeof(default_ctc_table_data)); default_ctc_table.vamem_type = IA_CSS_VAMEM_TYPE_2; }
linux-master
drivers/staging/media/atomisp/pci/isp/kernels/ctc/ctc_1.0/ia_css_ctc_table.host.c
// SPDX-License-Identifier: GPL-2.0 /* * Support for Intel Camera Imaging ISP subsystem. * Copyright (c) 2015, Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. */ #include "ia_css_types.h" #include "sh_css_defs.h" #include "assert_support.h" #include "ia_css_ctc2.host.h" #define INEFFECTIVE_VAL 4096 #define BASIC_VAL 819 /*Default configuration of parameters for Ctc2*/ const struct ia_css_ctc2_config default_ctc2_config = { INEFFECTIVE_VAL, INEFFECTIVE_VAL, INEFFECTIVE_VAL, INEFFECTIVE_VAL, INEFFECTIVE_VAL, INEFFECTIVE_VAL, BASIC_VAL * 2, BASIC_VAL * 4, BASIC_VAL * 6, BASIC_VAL * 8, INEFFECTIVE_VAL, INEFFECTIVE_VAL, BASIC_VAL >> 1, BASIC_VAL }; /* (dydx) = ctc2_slope(y1, y0, x1, x0) * ----------------------------------------------- * Calculation of the Slope of a Line = ((y1 - y0) >> 8)/(x1 - x0) * * Note: y1, y0 , x1 & x0 must lie within the range 0 <-> 8191 */ static int ctc2_slope(int y1, int y0, int x1, int x0) { const int shift_val = 8; const int max_slope = (1 << IA_CSS_CTC_COEF_SHIFT) - 1; int dy = y1 - y0; int dx = x1 - x0; int rounding = (dx + 1) >> 1; int dy_shift = dy << shift_val; int slope, dydx; /*Protection for parameter values, & avoiding zero divisions*/ assert(y0 >= 0 && y0 <= max_slope); assert(y1 >= 0 && y1 <= max_slope); assert(x0 >= 0 && x0 <= max_slope); assert(x1 > 0 && x1 <= max_slope); assert(dx > 0); if (dy < 0) rounding = -rounding; slope = (int)(dy_shift + rounding) / dx; /*the slope must lie within the range (-max_slope-1) >= (dydx) >= (max_slope) */ if (slope <= -max_slope - 1) { dydx = -max_slope - 1; } else if (slope >= max_slope) { dydx = max_slope; } else { dydx = slope; } return dydx; } /* (void) = ia_css_ctc2_vmem_encode(*to, *from) * ----------------------------------------------- * VMEM Encode Function to translate Y parameters from userspace into ISP space */ void ia_css_ctc2_vmem_encode(struct ia_css_isp_ctc2_vmem_params *to, const struct ia_css_ctc2_config *from, size_t size) { unsigned int i, j; const unsigned int shffl_blck = 4; const unsigned int length_zeros = 11; short dydx0, dydx1, dydx2, dydx3, dydx4; (void)size; /* * Calculation of slopes of lines interconnecting * 0.0 -> y_x1 -> y_x2 -> y _x3 -> y_x4 -> 1.0 */ dydx0 = ctc2_slope(from->y_y1, from->y_y0, from->y_x1, 0); dydx1 = ctc2_slope(from->y_y2, from->y_y1, from->y_x2, from->y_x1); dydx2 = ctc2_slope(from->y_y3, from->y_y2, from->y_x3, from->y_x2); dydx3 = ctc2_slope(from->y_y4, from->y_y3, from->y_x4, from->y_x3); dydx4 = ctc2_slope(from->y_y5, from->y_y4, SH_CSS_BAYER_MAXVAL, from->y_x4); /*Fill 3 arrays with: * - Luma input gain values y_y0, y_y1, y_y2, y_3, y_y4 * - Luma kneepoints 0, y_x1, y_x2, y_x3, y_x4 * - Calculated slopes dydx0, dyxd1, dydx2, dydx3, dydx4 * * - Each 64-element array is divided in blocks of 16 elements: * the 5 parameters + zeros in the remaining 11 positions * - All blocks of the same array will contain the same data */ for (i = 0; i < shffl_blck; i++) { to->y_x[0][(i << shffl_blck)] = 0; to->y_x[0][(i << shffl_blck) + 1] = from->y_x1; to->y_x[0][(i << shffl_blck) + 2] = from->y_x2; to->y_x[0][(i << shffl_blck) + 3] = from->y_x3; to->y_x[0][(i << shffl_blck) + 4] = from->y_x4; to->y_y[0][(i << shffl_blck)] = from->y_y0; to->y_y[0][(i << shffl_blck) + 1] = from->y_y1; to->y_y[0][(i << shffl_blck) + 2] = from->y_y2; to->y_y[0][(i << shffl_blck) + 3] = from->y_y3; to->y_y[0][(i << shffl_blck) + 4] = from->y_y4; to->e_y_slope[0][(i << shffl_blck)] = dydx0; to->e_y_slope[0][(i << shffl_blck) + 1] = dydx1; to->e_y_slope[0][(i << shffl_blck) + 2] = dydx2; to->e_y_slope[0][(i << shffl_blck) + 3] = dydx3; to->e_y_slope[0][(i << shffl_blck) + 4] = dydx4; for (j = 0; j < length_zeros; j++) { to->y_x[0][(i << shffl_blck) + 5 + j] = 0; to->y_y[0][(i << shffl_blck) + 5 + j] = 0; to->e_y_slope[0][(i << shffl_blck) + 5 + j] = 0; } } } /* (void) = ia_css_ctc2_encode(*to, *from) * ----------------------------------------------- * DMEM Encode Function to translate UV parameters from userspace into ISP space */ void ia_css_ctc2_encode(struct ia_css_isp_ctc2_dmem_params *to, struct ia_css_ctc2_config *from, size_t size) { (void)size; to->uv_y0 = from->uv_y0; to->uv_y1 = from->uv_y1; to->uv_x0 = from->uv_x0; to->uv_x1 = from->uv_x1; /*Slope Calculation*/ to->uv_dydx = ctc2_slope(from->uv_y1, from->uv_y0, from->uv_x1, from->uv_x0); }
linux-master
drivers/staging/media/atomisp/pci/isp/kernels/ctc/ctc2/ia_css_ctc2.host.c
// SPDX-License-Identifier: GPL-2.0 /* * Support for Intel Camera Imaging ISP subsystem. * Copyright (c) 2015, Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. */ #include "ia_css_types.h" #include "sh_css_defs.h" #include "ia_css_debug.h" #include "assert_support.h" #include "ctc/ctc_1.0/ia_css_ctc.host.h" #include "ia_css_ctc1_5.host.h" static void ctc_gradient( int *dydx, int *shift, int y1, int y0, int x1, int x0) { int frc_bits = max(IA_CSS_CTC_COEF_SHIFT, 16); int dy = y1 - y0; int dx = x1 - x0; int dydx_int; int dydx_frc; int sft; /* max_dydx = the maxinum gradient = the maximum y (gain) */ int max_dydx = (1 << IA_CSS_CTC_COEF_SHIFT) - 1; if (dx == 0) { ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ctc_gradient() error, illegal division operation\n"); return; } else { dydx_int = dy / dx; dydx_frc = ((dy - dydx_int * dx) << frc_bits) / dx; } assert(y0 >= 0 && y0 <= max_dydx); assert(y1 >= 0 && y1 <= max_dydx); assert(x0 < x1); assert(dydx); assert(shift); ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ctc_gradient() enter:\n"); /* search "sft" which meets this condition: (1 << (IA_CSS_CTC_COEF_SHIFT - 1)) <= (((float)dy / (float)dx) * (1 << sft)) <= ((1 << IA_CSS_CTC_COEF_SHIFT) - 1) */ for (sft = 0; sft <= IA_CSS_CTC_COEF_SHIFT; sft++) { int tmp_dydx = (dydx_int << sft) + (dydx_frc >> (frc_bits - sft)); if (tmp_dydx <= max_dydx) { *dydx = tmp_dydx; *shift = sft; } if (tmp_dydx >= max_dydx) break; } ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "ctc_gradient() leave:\n"); } void ia_css_ctc_encode( struct sh_css_isp_ctc_params *to, const struct ia_css_ctc_config *from, unsigned int size) { (void)size; to->y0 = from->y0; to->y1 = from->y1; to->y2 = from->y2; to->y3 = from->y3; to->y4 = from->y4; to->y5 = from->y5; to->ce_gain_exp = from->ce_gain_exp; to->x1 = from->x1; to->x2 = from->x2; to->x3 = from->x3; to->x4 = from->x4; ctc_gradient(&to->dydx0, &to->dydx0_shift, from->y1, from->y0, from->x1, 0); ctc_gradient(&to->dydx1, &to->dydx1_shift, from->y2, from->y1, from->x2, from->x1); ctc_gradient(&to->dydx2, &to->dydx2_shift, from->y3, from->y2, from->x3, from->x2); ctc_gradient(&to->dydx3, &to->dydx3_shift, from->y4, from->y3, from->x4, from->x3); ctc_gradient(&to->dydx4, &to->dydx4_shift, from->y5, from->y4, SH_CSS_BAYER_MAXVAL, from->x4); } void ia_css_ctc_dump( const struct sh_css_isp_ctc_params *ctc, unsigned int level);
linux-master
drivers/staging/media/atomisp/pci/isp/kernels/ctc/ctc1_5/ia_css_ctc1_5.host.c
// SPDX-License-Identifier: GPL-2.0 /* * Support for Intel Camera Imaging ISP subsystem. * Copyright (c) 2015, Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. */ #include "hmm.h" #include <assert_support.h> #include "ia_css_debug.h" #include "ia_css_sdis2.host.h" const struct ia_css_dvs2_coefficients default_sdis2_config = { .grid = { 0, 0, 0, 0, 0, 0, 0, 0 }, .hor_coefs = { NULL, NULL, NULL, NULL }, .ver_coefs = { NULL, NULL, NULL, NULL }, }; static void fill_row(short *private, const short *public, unsigned int width, unsigned int padding) { memcpy(private, public, width * sizeof(short)); memset(&private[width], 0, padding * sizeof(short)); } void ia_css_sdis2_horicoef_vmem_encode( struct sh_css_isp_sdis_hori_coef_tbl *to, const struct ia_css_dvs2_coefficients *from, unsigned int size) { unsigned int aligned_width = from->grid.aligned_width * from->grid.bqs_per_grid_cell; unsigned int width = from->grid.num_hor_coefs; int padding = aligned_width - width; unsigned int stride = size / IA_CSS_DVS2_NUM_COEF_TYPES / sizeof(short); unsigned int total_bytes = aligned_width * IA_CSS_DVS2_NUM_COEF_TYPES * sizeof(short); short *private = (short *)to; /* Copy the table, add padding */ assert(padding >= 0); assert(total_bytes <= size); assert(size % (IA_CSS_DVS2_NUM_COEF_TYPES * ISP_VEC_NELEMS * sizeof( short)) == 0); fill_row(&private[0 * stride], from->hor_coefs.odd_real, width, padding); fill_row(&private[1 * stride], from->hor_coefs.odd_imag, width, padding); fill_row(&private[2 * stride], from->hor_coefs.even_real, width, padding); fill_row(&private[3 * stride], from->hor_coefs.even_imag, width, padding); } void ia_css_sdis2_vertcoef_vmem_encode( struct sh_css_isp_sdis_vert_coef_tbl *to, const struct ia_css_dvs2_coefficients *from, unsigned int size) { unsigned int aligned_height = from->grid.aligned_height * from->grid.bqs_per_grid_cell; unsigned int height = from->grid.num_ver_coefs; int padding = aligned_height - height; unsigned int stride = size / IA_CSS_DVS2_NUM_COEF_TYPES / sizeof(short); unsigned int total_bytes = aligned_height * IA_CSS_DVS2_NUM_COEF_TYPES * sizeof(short); short *private = (short *)to; /* Copy the table, add padding */ assert(padding >= 0); assert(total_bytes <= size); assert(size % (IA_CSS_DVS2_NUM_COEF_TYPES * ISP_VEC_NELEMS * sizeof( short)) == 0); fill_row(&private[0 * stride], from->ver_coefs.odd_real, height, padding); fill_row(&private[1 * stride], from->ver_coefs.odd_imag, height, padding); fill_row(&private[2 * stride], from->ver_coefs.even_real, height, padding); fill_row(&private[3 * stride], from->ver_coefs.even_imag, height, padding); } void ia_css_sdis2_horiproj_encode( struct sh_css_isp_sdis_hori_proj_tbl *to, const struct ia_css_dvs2_coefficients *from, unsigned int size) { (void)to; (void)from; (void)size; } void ia_css_sdis2_vertproj_encode( struct sh_css_isp_sdis_vert_proj_tbl *to, const struct ia_css_dvs2_coefficients *from, unsigned int size) { (void)to; (void)from; (void)size; } void ia_css_get_isp_dvs2_coefficients( struct ia_css_stream *stream, short *hor_coefs_odd_real, short *hor_coefs_odd_imag, short *hor_coefs_even_real, short *hor_coefs_even_imag, short *ver_coefs_odd_real, short *ver_coefs_odd_imag, short *ver_coefs_even_real, short *ver_coefs_even_imag) { struct ia_css_isp_parameters *params; unsigned int hor_num_3a, ver_num_3a; struct ia_css_binary *dvs_binary; IA_CSS_ENTER("void"); assert(stream); assert(hor_coefs_odd_real); assert(hor_coefs_odd_imag); assert(hor_coefs_even_real); assert(hor_coefs_even_imag); assert(ver_coefs_odd_real); assert(ver_coefs_odd_imag); assert(ver_coefs_even_real); assert(ver_coefs_even_imag); params = stream->isp_params_configs; /* Only video pipe supports DVS */ dvs_binary = ia_css_stream_get_dvs_binary(stream); if (!dvs_binary) return; hor_num_3a = dvs_binary->dis.coef.dim.width; ver_num_3a = dvs_binary->dis.coef.dim.height; memcpy(hor_coefs_odd_real, params->dvs2_coefs.hor_coefs.odd_real, hor_num_3a * sizeof(short)); memcpy(hor_coefs_odd_imag, params->dvs2_coefs.hor_coefs.odd_imag, hor_num_3a * sizeof(short)); memcpy(hor_coefs_even_real, params->dvs2_coefs.hor_coefs.even_real, hor_num_3a * sizeof(short)); memcpy(hor_coefs_even_imag, params->dvs2_coefs.hor_coefs.even_imag, hor_num_3a * sizeof(short)); memcpy(ver_coefs_odd_real, params->dvs2_coefs.ver_coefs.odd_real, ver_num_3a * sizeof(short)); memcpy(ver_coefs_odd_imag, params->dvs2_coefs.ver_coefs.odd_imag, ver_num_3a * sizeof(short)); memcpy(ver_coefs_even_real, params->dvs2_coefs.ver_coefs.even_real, ver_num_3a * sizeof(short)); memcpy(ver_coefs_even_imag, params->dvs2_coefs.ver_coefs.even_imag, ver_num_3a * sizeof(short)); IA_CSS_LEAVE("void"); } void ia_css_sdis2_clear_coefficients( struct ia_css_dvs2_coefficients *dvs2_coefs) { dvs2_coefs->hor_coefs.odd_real = NULL; dvs2_coefs->hor_coefs.odd_imag = NULL; dvs2_coefs->hor_coefs.even_real = NULL; dvs2_coefs->hor_coefs.even_imag = NULL; dvs2_coefs->ver_coefs.odd_real = NULL; dvs2_coefs->ver_coefs.odd_imag = NULL; dvs2_coefs->ver_coefs.even_real = NULL; dvs2_coefs->ver_coefs.even_imag = NULL; } int ia_css_get_dvs2_statistics( struct ia_css_dvs2_statistics *host_stats, const struct ia_css_isp_dvs_statistics *isp_stats) { struct ia_css_isp_dvs_statistics_map *map; int ret = 0; IA_CSS_ENTER("host_stats=%p, isp_stats=%p", host_stats, isp_stats); assert(host_stats); assert(isp_stats); map = ia_css_isp_dvs_statistics_map_allocate(isp_stats, NULL); if (map) { hmm_load(isp_stats->data_ptr, map->data_ptr, isp_stats->size); ia_css_translate_dvs2_statistics(host_stats, map); ia_css_isp_dvs_statistics_map_free(map); } else { IA_CSS_ERROR("out of memory"); ret = -ENOMEM; } IA_CSS_LEAVE_ERR(ret); return ret; } void ia_css_translate_dvs2_statistics( struct ia_css_dvs2_statistics *host_stats, const struct ia_css_isp_dvs_statistics_map *isp_stats) { unsigned int size_bytes, table_width, table_size, height; unsigned int src_offset = 0, dst_offset = 0; s32 *htemp_ptr, *vtemp_ptr; assert(host_stats); assert(host_stats->hor_prod.odd_real); assert(host_stats->hor_prod.odd_imag); assert(host_stats->hor_prod.even_real); assert(host_stats->hor_prod.even_imag); assert(host_stats->ver_prod.odd_real); assert(host_stats->ver_prod.odd_imag); assert(host_stats->ver_prod.even_real); assert(host_stats->ver_prod.even_imag); assert(isp_stats); assert(isp_stats->hor_proj); assert(isp_stats->ver_proj); IA_CSS_ENTER("hor_coefs.odd_real=%p, hor_coefs.odd_imag=%p, hor_coefs.even_real=%p, hor_coefs.even_imag=%p, ver_coefs.odd_real=%p, ver_coefs.odd_imag=%p, ver_coefs.even_real=%p, ver_coefs.even_imag=%p, haddr=%p, vaddr=%p", host_stats->hor_prod.odd_real, host_stats->hor_prod.odd_imag, host_stats->hor_prod.even_real, host_stats->hor_prod.even_imag, host_stats->ver_prod.odd_real, host_stats->ver_prod.odd_imag, host_stats->ver_prod.even_real, host_stats->ver_prod.even_imag, isp_stats->hor_proj, isp_stats->ver_proj); /* Host side: reflecting the true width in bytes */ size_bytes = host_stats->grid.aligned_width * sizeof(*htemp_ptr); /* DDR side: need to be aligned to the system bus width */ /* statistics table width in terms of 32-bit words*/ table_width = CEIL_MUL(size_bytes, HIVE_ISP_DDR_WORD_BYTES) / sizeof(*htemp_ptr); table_size = table_width * host_stats->grid.aligned_height; htemp_ptr = isp_stats->hor_proj; /* horizontal stats */ vtemp_ptr = isp_stats->ver_proj; /* vertical stats */ for (height = 0; height < host_stats->grid.aligned_height; height++) { /* hor stats */ memcpy(host_stats->hor_prod.odd_real + dst_offset, &htemp_ptr[0 * table_size + src_offset], size_bytes); memcpy(host_stats->hor_prod.odd_imag + dst_offset, &htemp_ptr[1 * table_size + src_offset], size_bytes); memcpy(host_stats->hor_prod.even_real + dst_offset, &htemp_ptr[2 * table_size + src_offset], size_bytes); memcpy(host_stats->hor_prod.even_imag + dst_offset, &htemp_ptr[3 * table_size + src_offset], size_bytes); /* ver stats */ memcpy(host_stats->ver_prod.odd_real + dst_offset, &vtemp_ptr[0 * table_size + src_offset], size_bytes); memcpy(host_stats->ver_prod.odd_imag + dst_offset, &vtemp_ptr[1 * table_size + src_offset], size_bytes); memcpy(host_stats->ver_prod.even_real + dst_offset, &vtemp_ptr[2 * table_size + src_offset], size_bytes); memcpy(host_stats->ver_prod.even_imag + dst_offset, &vtemp_ptr[3 * table_size + src_offset], size_bytes); src_offset += table_width; /* aligned table width */ dst_offset += host_stats->grid.aligned_width; } IA_CSS_LEAVE("void"); } struct ia_css_isp_dvs_statistics * ia_css_isp_dvs2_statistics_allocate( const struct ia_css_dvs_grid_info *grid) { struct ia_css_isp_dvs_statistics *me; int size; assert(grid); IA_CSS_ENTER("grid=%p", grid); if (!grid->enable) return NULL; me = kvcalloc(1, sizeof(*me), GFP_KERNEL); if (!me) goto err; /* on ISP 2 SDIS DMA model, every row of projection table width must be aligned to HIVE_ISP_DDR_WORD_BYTES */ size = CEIL_MUL(sizeof(int) * grid->aligned_width, HIVE_ISP_DDR_WORD_BYTES) * grid->aligned_height * IA_CSS_DVS2_NUM_COEF_TYPES; me->size = 2 * size; me->data_ptr = hmm_alloc(me->size); if (me->data_ptr == mmgr_NULL) goto err; me->hor_proj = me->data_ptr; me->hor_size = size; me->ver_proj = me->data_ptr + size; me->ver_size = size; IA_CSS_LEAVE("return=%p", me); return me; err: ia_css_isp_dvs2_statistics_free(me); IA_CSS_LEAVE("return=%p", NULL); return NULL; } void ia_css_isp_dvs2_statistics_free(struct ia_css_isp_dvs_statistics *me) { if (me) { hmm_free(me->data_ptr); kvfree(me); } } void ia_css_sdis2_horicoef_debug_dtrace( const struct ia_css_dvs2_coefficients *config, unsigned int level) { (void)config; (void)level; } void ia_css_sdis2_vertcoef_debug_dtrace( const struct ia_css_dvs2_coefficients *config, unsigned int level) { (void)config; (void)level; } void ia_css_sdis2_horiproj_debug_dtrace( const struct ia_css_dvs2_coefficients *config, unsigned int level) { (void)config; (void)level; } void ia_css_sdis2_vertproj_debug_dtrace( const struct ia_css_dvs2_coefficients *config, unsigned int level) { (void)config; (void)level; }
linux-master
drivers/staging/media/atomisp/pci/isp/kernels/sdis/sdis_2/ia_css_sdis2.host.c
// SPDX-License-Identifier: GPL-2.0 /* * Support for Intel Camera Imaging ISP subsystem. * Copyright (c) 2015, Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. */ #include "hmm.h" #include "assert_support.h" #include "ia_css_debug.h" #include "ia_css_sdis_types.h" #include "sdis/common/ia_css_sdis_common.host.h" #include "ia_css_sdis.host.h" const struct ia_css_dvs_coefficients default_sdis_config = { .grid = { 0, 0, 0, 0, 0, 0, 0, 0 }, .hor_coefs = NULL, .ver_coefs = NULL }; static void fill_row(short *private, const short *public, unsigned int width, unsigned int padding) { assert((int)width >= 0); assert((int)padding >= 0); memcpy(private, public, width * sizeof(short)); memset(&private[width], 0, padding * sizeof(short)); } void ia_css_sdis_horicoef_vmem_encode( struct sh_css_isp_sdis_hori_coef_tbl *to, const struct ia_css_dvs_coefficients *from, unsigned int size) { unsigned int aligned_width = from->grid.aligned_width * from->grid.bqs_per_grid_cell; unsigned int width = from->grid.num_hor_coefs; int padding = aligned_width - width; unsigned int stride = size / IA_CSS_DVS_NUM_COEF_TYPES / sizeof(short); unsigned int total_bytes = aligned_width * IA_CSS_DVS_NUM_COEF_TYPES * sizeof( short); short *public = from->hor_coefs; short *private = (short *)to; unsigned int type; /* Copy the table, add padding */ assert(padding >= 0); assert(total_bytes <= size); assert(size % (IA_CSS_DVS_NUM_COEF_TYPES * ISP_VEC_NELEMS * sizeof( short)) == 0); for (type = 0; type < IA_CSS_DVS_NUM_COEF_TYPES; type++) { fill_row(&private[type * stride], &public[type * width], width, padding); } } void ia_css_sdis_vertcoef_vmem_encode( struct sh_css_isp_sdis_vert_coef_tbl *to, const struct ia_css_dvs_coefficients *from, unsigned int size) { unsigned int aligned_height = from->grid.aligned_height * from->grid.bqs_per_grid_cell; unsigned int height = from->grid.num_ver_coefs; int padding = aligned_height - height; unsigned int stride = size / IA_CSS_DVS_NUM_COEF_TYPES / sizeof(short); unsigned int total_bytes = aligned_height * IA_CSS_DVS_NUM_COEF_TYPES * sizeof(short); short *public = from->ver_coefs; short *private = (short *)to; unsigned int type; /* Copy the table, add padding */ assert(padding >= 0); assert(total_bytes <= size); assert(size % (IA_CSS_DVS_NUM_COEF_TYPES * ISP_VEC_NELEMS * sizeof( short)) == 0); for (type = 0; type < IA_CSS_DVS_NUM_COEF_TYPES; type++) { fill_row(&private[type * stride], &public[type * height], height, padding); } } void ia_css_sdis_horiproj_encode( struct sh_css_isp_sdis_hori_proj_tbl *to, const struct ia_css_dvs_coefficients *from, unsigned int size) { (void)to; (void)from; (void)size; } void ia_css_sdis_vertproj_encode( struct sh_css_isp_sdis_vert_proj_tbl *to, const struct ia_css_dvs_coefficients *from, unsigned int size) { (void)to; (void)from; (void)size; } void ia_css_get_isp_dis_coefficients( struct ia_css_stream *stream, short *horizontal_coefficients, short *vertical_coefficients) { struct ia_css_isp_parameters *params; unsigned int hor_num_isp, ver_num_isp; unsigned int hor_num_3a, ver_num_3a; int i; struct ia_css_binary *dvs_binary; IA_CSS_ENTER("void"); assert(horizontal_coefficients); assert(vertical_coefficients); params = stream->isp_params_configs; /* Only video pipe supports DVS */ dvs_binary = ia_css_stream_get_dvs_binary(stream); if (!dvs_binary) return; hor_num_isp = dvs_binary->dis.coef.pad.width; ver_num_isp = dvs_binary->dis.coef.pad.height; hor_num_3a = dvs_binary->dis.coef.dim.width; ver_num_3a = dvs_binary->dis.coef.dim.height; for (i = 0; i < IA_CSS_DVS_NUM_COEF_TYPES; i++) { fill_row(&horizontal_coefficients[i * hor_num_isp], &params->dvs_coefs.hor_coefs[i * hor_num_3a], hor_num_3a, hor_num_isp - hor_num_3a); } for (i = 0; i < SH_CSS_DIS_VER_NUM_COEF_TYPES(dvs_binary); i++) { fill_row(&vertical_coefficients[i * ver_num_isp], &params->dvs_coefs.ver_coefs[i * ver_num_3a], ver_num_3a, ver_num_isp - ver_num_3a); } IA_CSS_LEAVE("void"); } size_t ia_css_sdis_hor_coef_tbl_bytes( const struct ia_css_binary *binary) { if (binary->info->sp.pipeline.isp_pipe_version == 1) return sizeof(short) * IA_CSS_DVS_NUM_COEF_TYPES * binary->dis.coef.pad.width; else return sizeof(short) * IA_CSS_DVS2_NUM_COEF_TYPES * binary->dis.coef.pad.width; } size_t ia_css_sdis_ver_coef_tbl_bytes( const struct ia_css_binary *binary) { return sizeof(short) * SH_CSS_DIS_VER_NUM_COEF_TYPES(binary) * binary->dis.coef.pad.height; } void ia_css_sdis_init_info( struct ia_css_sdis_info *dis, unsigned int sc_3a_dis_width, unsigned int sc_3a_dis_padded_width, unsigned int sc_3a_dis_height, unsigned int isp_pipe_version, unsigned int enabled) { if (!enabled) { *dis = (struct ia_css_sdis_info) { }; return; } dis->deci_factor_log2 = SH_CSS_DIS_DECI_FACTOR_LOG2; dis->grid.dim.width = _ISP_BQS(sc_3a_dis_width) >> SH_CSS_DIS_DECI_FACTOR_LOG2; dis->grid.dim.height = _ISP_BQS(sc_3a_dis_height) >> SH_CSS_DIS_DECI_FACTOR_LOG2; dis->grid.pad.width = CEIL_SHIFT(_ISP_BQS(sc_3a_dis_padded_width), SH_CSS_DIS_DECI_FACTOR_LOG2); dis->grid.pad.height = CEIL_SHIFT(_ISP_BQS(sc_3a_dis_height), SH_CSS_DIS_DECI_FACTOR_LOG2); dis->coef.dim.width = (_ISP_BQS(sc_3a_dis_width) >> SH_CSS_DIS_DECI_FACTOR_LOG2) << SH_CSS_DIS_DECI_FACTOR_LOG2; dis->coef.dim.height = (_ISP_BQS(sc_3a_dis_height) >> SH_CSS_DIS_DECI_FACTOR_LOG2) << SH_CSS_DIS_DECI_FACTOR_LOG2; dis->coef.pad.width = __ISP_SDIS_HOR_COEF_NUM_VECS(sc_3a_dis_padded_width) * ISP_VEC_NELEMS; dis->coef.pad.height = __ISP_SDIS_VER_COEF_NUM_VECS(sc_3a_dis_height) * ISP_VEC_NELEMS; if (isp_pipe_version == 1) { dis->proj.dim.width = _ISP_BQS(sc_3a_dis_height) >> SH_CSS_DIS_DECI_FACTOR_LOG2; dis->proj.dim.height = _ISP_BQS(sc_3a_dis_width) >> SH_CSS_DIS_DECI_FACTOR_LOG2; } else { dis->proj.dim.width = (_ISP_BQS(sc_3a_dis_width) >> SH_CSS_DIS_DECI_FACTOR_LOG2) * (_ISP_BQS(sc_3a_dis_height) >> SH_CSS_DIS_DECI_FACTOR_LOG2); dis->proj.dim.height = (_ISP_BQS(sc_3a_dis_width) >> SH_CSS_DIS_DECI_FACTOR_LOG2) * (_ISP_BQS(sc_3a_dis_height) >> SH_CSS_DIS_DECI_FACTOR_LOG2); } dis->proj.pad.width = __ISP_SDIS_HOR_PROJ_NUM_ISP(sc_3a_dis_padded_width, sc_3a_dis_height, SH_CSS_DIS_DECI_FACTOR_LOG2, isp_pipe_version); dis->proj.pad.height = __ISP_SDIS_VER_PROJ_NUM_ISP(sc_3a_dis_padded_width, SH_CSS_DIS_DECI_FACTOR_LOG2); } void ia_css_sdis_clear_coefficients( struct ia_css_dvs_coefficients *dvs_coefs) { dvs_coefs->hor_coefs = NULL; dvs_coefs->ver_coefs = NULL; } int ia_css_get_dvs_statistics( struct ia_css_dvs_statistics *host_stats, const struct ia_css_isp_dvs_statistics *isp_stats) { struct ia_css_isp_dvs_statistics_map *map; int ret = 0; IA_CSS_ENTER("host_stats=%p, isp_stats=%p", host_stats, isp_stats); assert(host_stats); assert(isp_stats); map = ia_css_isp_dvs_statistics_map_allocate(isp_stats, NULL); if (map) { hmm_load(isp_stats->data_ptr, map->data_ptr, isp_stats->size); ia_css_translate_dvs_statistics(host_stats, map); ia_css_isp_dvs_statistics_map_free(map); } else { IA_CSS_ERROR("out of memory"); ret = -ENOMEM; } IA_CSS_LEAVE_ERR(ret); return ret; } void ia_css_translate_dvs_statistics( struct ia_css_dvs_statistics *host_stats, const struct ia_css_isp_dvs_statistics_map *isp_stats) { unsigned int hor_num_isp, ver_num_isp, hor_num_dvs, ver_num_dvs, i; s32 *hor_ptr_dvs, *ver_ptr_dvs, *hor_ptr_isp, *ver_ptr_isp; assert(host_stats); assert(host_stats->hor_proj); assert(host_stats->ver_proj); assert(isp_stats); assert(isp_stats->hor_proj); assert(isp_stats->ver_proj); IA_CSS_ENTER("hproj=%p, vproj=%p, haddr=%p, vaddr=%p", host_stats->hor_proj, host_stats->ver_proj, isp_stats->hor_proj, isp_stats->ver_proj); hor_num_isp = host_stats->grid.aligned_height; ver_num_isp = host_stats->grid.aligned_width; hor_ptr_isp = isp_stats->hor_proj; ver_ptr_isp = isp_stats->ver_proj; hor_num_dvs = host_stats->grid.height; ver_num_dvs = host_stats->grid.width; hor_ptr_dvs = host_stats->hor_proj; ver_ptr_dvs = host_stats->ver_proj; for (i = 0; i < IA_CSS_DVS_NUM_COEF_TYPES; i++) { memcpy(hor_ptr_dvs, hor_ptr_isp, hor_num_dvs * sizeof(int32_t)); hor_ptr_isp += hor_num_isp; hor_ptr_dvs += hor_num_dvs; memcpy(ver_ptr_dvs, ver_ptr_isp, ver_num_dvs * sizeof(int32_t)); ver_ptr_isp += ver_num_isp; ver_ptr_dvs += ver_num_dvs; } IA_CSS_LEAVE("void"); } struct ia_css_isp_dvs_statistics * ia_css_isp_dvs_statistics_allocate( const struct ia_css_dvs_grid_info *grid) { struct ia_css_isp_dvs_statistics *me; int hor_size, ver_size; assert(grid); IA_CSS_ENTER("grid=%p", grid); if (!grid->enable) return NULL; me = kvcalloc(1, sizeof(*me), GFP_KERNEL); if (!me) goto err; hor_size = CEIL_MUL(sizeof(int) * IA_CSS_DVS_NUM_COEF_TYPES * grid->aligned_height, HIVE_ISP_DDR_WORD_BYTES); ver_size = CEIL_MUL(sizeof(int) * IA_CSS_DVS_NUM_COEF_TYPES * grid->aligned_width, HIVE_ISP_DDR_WORD_BYTES); me->size = hor_size + ver_size; me->data_ptr = hmm_alloc(me->size); if (me->data_ptr == mmgr_NULL) goto err; me->hor_size = hor_size; me->hor_proj = me->data_ptr; me->ver_size = ver_size; me->ver_proj = me->data_ptr + hor_size; IA_CSS_LEAVE("return=%p", me); return me; err: ia_css_isp_dvs_statistics_free(me); IA_CSS_LEAVE("return=%p", NULL); return NULL; } struct ia_css_isp_dvs_statistics_map * ia_css_isp_dvs_statistics_map_allocate( const struct ia_css_isp_dvs_statistics *isp_stats, void *data_ptr) { struct ia_css_isp_dvs_statistics_map *me; /* Windows compiler does not like adding sizes to a void * * so we use a local char * instead. */ char *base_ptr; me = kvmalloc(sizeof(*me), GFP_KERNEL); if (!me) { IA_CSS_LOG("cannot allocate memory"); goto err; } me->data_ptr = data_ptr; me->data_allocated = !data_ptr; if (!me->data_ptr) { me->data_ptr = kvmalloc(isp_stats->size, GFP_KERNEL); if (!me->data_ptr) { IA_CSS_LOG("cannot allocate memory"); goto err; } } base_ptr = me->data_ptr; me->size = isp_stats->size; /* GCC complains when we assign a char * to a void *, so these * casts are necessary unfortunately. */ me->hor_proj = (void *)base_ptr; me->ver_proj = (void *)(base_ptr + isp_stats->hor_size); return me; err: kvfree(me); return NULL; } void ia_css_isp_dvs_statistics_map_free(struct ia_css_isp_dvs_statistics_map *me) { if (me) { if (me->data_allocated) kvfree(me->data_ptr); kvfree(me); } } void ia_css_isp_dvs_statistics_free(struct ia_css_isp_dvs_statistics *me) { if (me) { hmm_free(me->data_ptr); kvfree(me); } } void ia_css_sdis_horicoef_debug_dtrace( const struct ia_css_dvs_coefficients *config, unsigned int level) { (void)config; (void)level; } void ia_css_sdis_vertcoef_debug_dtrace( const struct ia_css_dvs_coefficients *config, unsigned int level) { (void)config; (void)level; } void ia_css_sdis_horiproj_debug_dtrace( const struct ia_css_dvs_coefficients *config, unsigned int level) { (void)config; (void)level; } void ia_css_sdis_vertproj_debug_dtrace( const struct ia_css_dvs_coefficients *config, unsigned int level) { (void)config; (void)level; }
linux-master
drivers/staging/media/atomisp/pci/isp/kernels/sdis/sdis_1.0/ia_css_sdis.host.c
// SPDX-License-Identifier: GPL-2.0 /* * Support for Intel Camera Imaging ISP subsystem. * Copyright (c) 2015, Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. */ #include "hmm.h" #include "ia_css_frame_public.h" #define IA_CSS_INCLUDE_CONFIGURATIONS #include "ia_css_isp_configs.h" #include "ia_css_types.h" #include "ia_css_host_data.h" #include "sh_css_param_dvs.h" #include "sh_css_params.h" #include "ia_css_binary.h" #include "ia_css_debug.h" #include "assert_support.h" #include "ia_css_dvs.host.h" static const struct ia_css_dvs_configuration default_config = { .info = (struct ia_css_frame_info *)NULL, }; void ia_css_dvs_config( struct sh_css_isp_dvs_isp_config *to, const struct ia_css_dvs_configuration *from, unsigned int size) { (void)size; to->num_horizontal_blocks = DVS_NUM_BLOCKS_X(from->info->res.width); to->num_vertical_blocks = DVS_NUM_BLOCKS_Y(from->info->res.height); } int ia_css_dvs_configure(const struct ia_css_binary *binary, const struct ia_css_frame_info *info) { struct ia_css_dvs_configuration config = default_config; config.info = info; return ia_css_configure_dvs(binary, &config); } static void convert_coords_to_ispparams( struct ia_css_host_data *gdc_warp_table, const struct ia_css_dvs_6axis_config *config, unsigned int i_stride, unsigned int o_width, unsigned int o_height, unsigned int uv_flag) { unsigned int i, j; gdc_warp_param_mem_t s = { 0 }; unsigned int x00, x01, x10, x11, y00, y01, y10, y11; unsigned int xmin, ymin, xmax, ymax; unsigned int topleft_x, topleft_y, bottom_x, bottom_y, topleft_x_frac, topleft_y_frac; unsigned int dvs_interp_envelope = (DVS_GDC_INTERP_METHOD == HRT_GDC_BLI_MODE ? DVS_GDC_BLI_INTERP_ENVELOPE : DVS_GDC_BCI_INTERP_ENVELOPE); /* number of blocks per height and width */ unsigned int num_blocks_y = (uv_flag ? DVS_NUM_BLOCKS_Y_CHROMA( o_height) : DVS_NUM_BLOCKS_Y(o_height)); unsigned int num_blocks_x = (uv_flag ? DVS_NUM_BLOCKS_X_CHROMA( o_width) : DVS_NUM_BLOCKS_X( o_width)); // round num_x up to blockdim_x, if it concerns the Y0Y1 block (uv_flag==0) round up to even unsigned int in_stride = i_stride * DVS_INPUT_BYTES_PER_PIXEL; unsigned int width, height; unsigned int *xbuff = NULL; unsigned int *ybuff = NULL; struct gdc_warp_param_mem_s *ptr; assert(config); assert(gdc_warp_table); assert(gdc_warp_table->address); ptr = (struct gdc_warp_param_mem_s *)gdc_warp_table->address; ptr += (2 * uv_flag); /* format is Y0 Y1 UV, so UV starts at 3rd position */ if (uv_flag == 0) { xbuff = config->xcoords_y; ybuff = config->ycoords_y; width = config->width_y; height = config->height_y; } else { xbuff = config->xcoords_uv; ybuff = config->ycoords_uv; width = config->width_uv; height = config->height_uv; } IA_CSS_LOG("blockdim_x %d blockdim_y %d", DVS_BLOCKDIM_X, DVS_BLOCKDIM_Y_LUMA >> uv_flag); IA_CSS_LOG("num_blocks_x %d num_blocks_y %d", num_blocks_x, num_blocks_y); IA_CSS_LOG("width %d height %d", width, height); assert(width == num_blocks_x + 1); // the width and height of the provided morphing table should be 1 more than the number of blocks assert(height == num_blocks_y + 1); for (j = 0; j < num_blocks_y; j++) { for (i = 0; i < num_blocks_x; i++) { x00 = xbuff[j * width + i]; x01 = xbuff[j * width + (i + 1)]; x10 = xbuff[(j + 1) * width + i]; x11 = xbuff[(j + 1) * width + (i + 1)]; y00 = ybuff[j * width + i]; y01 = ybuff[j * width + (i + 1)]; y10 = ybuff[(j + 1) * width + i]; y11 = ybuff[(j + 1) * width + (i + 1)]; xmin = min(x00, x10); xmax = max(x01, x11); ymin = min(y00, y01); ymax = max(y10, y11); /* Assert that right column's X is greater */ assert(x01 >= xmin); assert(x11 >= xmin); /* Assert that bottom row's Y is greater */ assert(y10 >= ymin); assert(y11 >= ymin); topleft_y = ymin >> DVS_COORD_FRAC_BITS; topleft_x = ((xmin >> DVS_COORD_FRAC_BITS) >> XMEM_ALIGN_LOG2) << (XMEM_ALIGN_LOG2); s.in_addr_offset = topleft_y * in_stride + topleft_x; /* similar to topleft_y calculation, but round up if ymax * has any fraction bits */ bottom_y = CEIL_DIV(ymax, 1 << DVS_COORD_FRAC_BITS); s.in_block_height = bottom_y - topleft_y + dvs_interp_envelope; bottom_x = CEIL_DIV(xmax, 1 << DVS_COORD_FRAC_BITS); s.in_block_width = bottom_x - topleft_x + dvs_interp_envelope; topleft_x_frac = topleft_x << (DVS_COORD_FRAC_BITS); topleft_y_frac = topleft_y << (DVS_COORD_FRAC_BITS); s.p0_x = x00 - topleft_x_frac; s.p1_x = x01 - topleft_x_frac; s.p2_x = x10 - topleft_x_frac; s.p3_x = x11 - topleft_x_frac; s.p0_y = y00 - topleft_y_frac; s.p1_y = y01 - topleft_y_frac; s.p2_y = y10 - topleft_y_frac; s.p3_y = y11 - topleft_y_frac; // block should fit within the boundingbox. assert(s.p0_x < (s.in_block_width << DVS_COORD_FRAC_BITS)); assert(s.p1_x < (s.in_block_width << DVS_COORD_FRAC_BITS)); assert(s.p2_x < (s.in_block_width << DVS_COORD_FRAC_BITS)); assert(s.p3_x < (s.in_block_width << DVS_COORD_FRAC_BITS)); assert(s.p0_y < (s.in_block_height << DVS_COORD_FRAC_BITS)); assert(s.p1_y < (s.in_block_height << DVS_COORD_FRAC_BITS)); assert(s.p2_y < (s.in_block_height << DVS_COORD_FRAC_BITS)); assert(s.p3_y < (s.in_block_height << DVS_COORD_FRAC_BITS)); // block size should be greater than zero. assert(s.p0_x < s.p1_x); assert(s.p2_x < s.p3_x); assert(s.p0_y < s.p2_y); assert(s.p1_y < s.p3_y); #if 0 printf("j: %d\ti:%d\n", j, i); printf("offset: %d\n", s.in_addr_offset); printf("p0_x: %d\n", s.p0_x); printf("p0_y: %d\n", s.p0_y); printf("p1_x: %d\n", s.p1_x); printf("p1_y: %d\n", s.p1_y); printf("p2_x: %d\n", s.p2_x); printf("p2_y: %d\n", s.p2_y); printf("p3_x: %d\n", s.p3_x); printf("p3_y: %d\n", s.p3_y); printf("p0_x_nofrac[0]: %d\n", s.p0_x >> DVS_COORD_FRAC_BITS); printf("p0_y_nofrac[1]: %d\n", s.p0_y >> DVS_COORD_FRAC_BITS); printf("p1_x_nofrac[2]: %d\n", s.p1_x >> DVS_COORD_FRAC_BITS); printf("p1_y_nofrac[3]: %d\n", s.p1_y >> DVS_COORD_FRAC_BITS); printf("p2_x_nofrac[0]: %d\n", s.p2_x >> DVS_COORD_FRAC_BITS); printf("p2_y_nofrac[1]: %d\n", s.p2_y >> DVS_COORD_FRAC_BITS); printf("p3_x_nofrac[2]: %d\n", s.p3_x >> DVS_COORD_FRAC_BITS); printf("p3_y_nofrac[3]: %d\n", s.p3_y >> DVS_COORD_FRAC_BITS); printf("\n"); #endif *ptr = s; // storage format: // Y0 Y1 UV0 Y2 Y3 UV1 /* if uv_flag equals true increment with 2 incase x is odd, this to skip the uv position. */ if (uv_flag) ptr += 3; else ptr += (1 + (i & 1)); } } } struct ia_css_host_data * convert_allocate_dvs_6axis_config( const struct ia_css_dvs_6axis_config *dvs_6axis_config, const struct ia_css_binary *binary, const struct ia_css_frame_info *dvs_in_frame_info) { unsigned int i_stride; unsigned int o_width; unsigned int o_height; struct ia_css_host_data *me; assert(binary); assert(dvs_6axis_config); assert(dvs_in_frame_info); me = ia_css_host_data_allocate((size_t)((DVS_6AXIS_BYTES(binary) / 2) * 3)); if (!me) return NULL; /*DVS only supports input frame of YUV420 or NV12. Fail for all other cases*/ assert((dvs_in_frame_info->format == IA_CSS_FRAME_FORMAT_NV12) || (dvs_in_frame_info->format == IA_CSS_FRAME_FORMAT_YUV420)); i_stride = dvs_in_frame_info->padded_width; o_width = binary->out_frame_info[0].res.width; o_height = binary->out_frame_info[0].res.height; /* Y plane */ convert_coords_to_ispparams(me, dvs_6axis_config, i_stride, o_width, o_height, 0); if (dvs_in_frame_info->format == IA_CSS_FRAME_FORMAT_YUV420) { /*YUV420 has half the stride for U/V plane*/ i_stride /= 2; } /* UV plane (packed inside the y plane) */ convert_coords_to_ispparams(me, dvs_6axis_config, i_stride, o_width / 2, o_height / 2, 1); return me; } int store_dvs_6axis_config( const struct ia_css_dvs_6axis_config *dvs_6axis_config, const struct ia_css_binary *binary, const struct ia_css_frame_info *dvs_in_frame_info, ia_css_ptr ddr_addr_y) { struct ia_css_host_data *me; assert(dvs_6axis_config); assert(ddr_addr_y != mmgr_NULL); assert(dvs_in_frame_info); me = convert_allocate_dvs_6axis_config(dvs_6axis_config, binary, dvs_in_frame_info); if (!me) { IA_CSS_LEAVE_ERR_PRIVATE(-ENOMEM); return -ENOMEM; } ia_css_params_store_ia_css_host_data( ddr_addr_y, me); ia_css_host_data_free(me); return 0; }
linux-master
drivers/staging/media/atomisp/pci/isp/kernels/dvs/dvs_1.0/ia_css_dvs.host.c
// SPDX-License-Identifier: GPL-2.0 /* * Support for Intel Camera Imaging ISP subsystem. * Copyright (c) 2015, Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. */ #include "ia_css_frame.h" #include "ia_css_types.h" #include "sh_css_defs.h" #include "ia_css_debug.h" #include "assert_support.h" #define IA_CSS_INCLUDE_CONFIGURATIONS #include "ia_css_isp_configs.h" #include "isp.h" #include "isp/modes/interface/isp_types.h" #include "ia_css_raw.host.h" static const struct ia_css_raw_configuration default_config = { .pipe = (struct sh_css_sp_pipeline *)NULL, }; /* MW: These areMIPI / ISYS properties, not camera function properties */ static enum sh_stream_format css2isp_stream_format(enum atomisp_input_format from) { switch (from) { case ATOMISP_INPUT_FORMAT_YUV420_8_LEGACY: return sh_stream_format_yuv420_legacy; case ATOMISP_INPUT_FORMAT_YUV420_8: case ATOMISP_INPUT_FORMAT_YUV420_10: case ATOMISP_INPUT_FORMAT_YUV420_16: return sh_stream_format_yuv420; case ATOMISP_INPUT_FORMAT_YUV422_8: case ATOMISP_INPUT_FORMAT_YUV422_10: case ATOMISP_INPUT_FORMAT_YUV422_16: return sh_stream_format_yuv422; case ATOMISP_INPUT_FORMAT_RGB_444: case ATOMISP_INPUT_FORMAT_RGB_555: case ATOMISP_INPUT_FORMAT_RGB_565: case ATOMISP_INPUT_FORMAT_RGB_666: case ATOMISP_INPUT_FORMAT_RGB_888: return sh_stream_format_rgb; case ATOMISP_INPUT_FORMAT_RAW_6: case ATOMISP_INPUT_FORMAT_RAW_7: case ATOMISP_INPUT_FORMAT_RAW_8: case ATOMISP_INPUT_FORMAT_RAW_10: case ATOMISP_INPUT_FORMAT_RAW_12: case ATOMISP_INPUT_FORMAT_RAW_14: case ATOMISP_INPUT_FORMAT_RAW_16: return sh_stream_format_raw; case ATOMISP_INPUT_FORMAT_BINARY_8: default: return sh_stream_format_raw; } } int ia_css_raw_config(struct sh_css_isp_raw_isp_config *to, const struct ia_css_raw_configuration *from, unsigned int size) { unsigned int elems_a = ISP_VEC_NELEMS; const struct ia_css_frame_info *in_info = from->in_info; const struct ia_css_frame_info *internal_info = from->internal_info; int ret; #if !defined(ISP2401) /* 2401 input system uses input width width */ in_info = internal_info; #else /*in some cases, in_info is NULL*/ if (in_info) (void)internal_info; else in_info = internal_info; #endif ret = ia_css_dma_configure_from_info(&to->port_b, in_info); if (ret) return ret; /* Assume divisiblity here, may need to generalize to fixed point. */ assert((in_info->format == IA_CSS_FRAME_FORMAT_RAW_PACKED) || (elems_a % to->port_b.elems == 0)); to->width_a_over_b = elems_a / to->port_b.elems; to->inout_port_config = from->pipe->inout_port_config; to->format = in_info->format; to->required_bds_factor = from->pipe->required_bds_factor; to->two_ppc = from->two_ppc; to->stream_format = css2isp_stream_format(from->stream_format); to->deinterleaved = from->deinterleaved; #if defined(ISP2401) to->start_column = in_info->crop_info.start_column; to->start_line = in_info->crop_info.start_line; to->enable_left_padding = from->enable_left_padding; #endif return 0; } int ia_css_raw_configure(const struct sh_css_sp_pipeline *pipe, const struct ia_css_binary *binary, const struct ia_css_frame_info *in_info, const struct ia_css_frame_info *internal_info, bool two_ppc, bool deinterleaved) { u8 enable_left_padding = (uint8_t)((binary->left_padding) ? 1 : 0); struct ia_css_raw_configuration config = default_config; config.pipe = pipe; config.in_info = in_info; config.internal_info = internal_info; config.two_ppc = two_ppc; config.stream_format = binary->input_format; config.deinterleaved = deinterleaved; config.enable_left_padding = enable_left_padding; return ia_css_configure_raw(binary, &config); }
linux-master
drivers/staging/media/atomisp/pci/isp/kernels/raw/raw_1.0/ia_css_raw.host.c
// SPDX-License-Identifier: GPL-2.0 /* * Support for Intel Camera Imaging ISP subsystem. * Copyright (c) 2015, Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. */ #include "ia_css_frame.h" #include "ia_css_types.h" #include "sh_css_defs.h" #include "ia_css_debug.h" #include "assert_support.h" #define IA_CSS_INCLUDE_CONFIGURATIONS #include "ia_css_isp_configs.h" #include "isp.h" #include "ia_css_qplane.host.h" static const struct ia_css_qplane_configuration default_config = { .pipe = (struct sh_css_sp_pipeline *)NULL, }; int ia_css_qplane_config(struct sh_css_isp_qplane_isp_config *to, const struct ia_css_qplane_configuration *from, unsigned int size) { unsigned int elems_a = ISP_VEC_NELEMS; int ret; ret = ia_css_dma_configure_from_info(&to->port_b, from->info); if (ret) return ret; to->width_a_over_b = elems_a / to->port_b.elems; /* Assume divisiblity here, may need to generalize to fixed point. */ if (elems_a % to->port_b.elems != 0) return -EINVAL; to->inout_port_config = from->pipe->inout_port_config; to->format = from->info->format; return 0; } int ia_css_qplane_configure(const struct sh_css_sp_pipeline *pipe, const struct ia_css_binary *binary, const struct ia_css_frame_info *info) { struct ia_css_qplane_configuration config = default_config; config.pipe = pipe; config.info = info; return ia_css_configure_qplane(binary, &config); }
linux-master
drivers/staging/media/atomisp/pci/isp/kernels/qplane/qplane_2/ia_css_qplane.host.c
// SPDX-License-Identifier: GPL-2.0 /* * Support for Intel Camera Imaging ISP subsystem. * Copyright (c) 2015, Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. */ #include <assert_support.h> #include <ia_css_frame_public.h> #include <ia_css_frame.h> #include <ia_css_binary.h> #include <ia_css_types.h> #include <sh_css_defs.h> #include <ia_css_debug.h> #define IA_CSS_INCLUDE_CONFIGURATIONS #include "ia_css_isp_configs.h" #include "isp.h" #include "ia_css_fpn.host.h" void ia_css_fpn_encode( struct sh_css_isp_fpn_params *to, const struct ia_css_fpn_table *from, unsigned int size) { (void)size; to->shift = from->shift; to->enabled = from->data != NULL; } void ia_css_fpn_dump( const struct sh_css_isp_fpn_params *fpn, unsigned int level) { if (!fpn) return; ia_css_debug_dtrace(level, "Fixed Pattern Noise Reduction:\n"); ia_css_debug_dtrace(level, "\t%-32s = %d\n", "fpn_shift", fpn->shift); ia_css_debug_dtrace(level, "\t%-32s = %d\n", "fpn_enabled", fpn->enabled); } int ia_css_fpn_config(struct sh_css_isp_fpn_isp_config *to, const struct ia_css_fpn_configuration *from, unsigned int size) { unsigned int elems_a = ISP_VEC_NELEMS; int ret; ret = ia_css_dma_configure_from_info(&to->port_b, from->info); if (ret) return ret; to->width_a_over_b = elems_a / to->port_b.elems; /* Assume divisiblity here, may need to generalize to fixed point. */ if (elems_a % to->port_b.elems != 0) return -EINVAL; return 0; } int ia_css_fpn_configure(const struct ia_css_binary *binary, const struct ia_css_frame_info *info) { struct ia_css_frame_info my_info = IA_CSS_BINARY_DEFAULT_FRAME_INFO; const struct ia_css_fpn_configuration config = { &my_info }; my_info.res.width = CEIL_DIV(info->res.width, 2); /* Packed by 2x */ my_info.res.height = info->res.height; my_info.padded_width = CEIL_DIV(info->padded_width, 2); /* Packed by 2x */ my_info.format = info->format; my_info.raw_bit_depth = FPN_BITS_PER_PIXEL; my_info.raw_bayer_order = info->raw_bayer_order; my_info.crop_info = info->crop_info; return ia_css_configure_fpn(binary, &config); }
linux-master
drivers/staging/media/atomisp/pci/isp/kernels/fpn/fpn_1.0/ia_css_fpn.host.c
// SPDX-License-Identifier: GPL-2.0 /* * Support for Intel Camera Imaging ISP subsystem. * Copyright (c) 2015, Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. */ #include "type_support.h" #include "math_support.h" #include "sh_css_defs.h" #include "ia_css_types.h" #include "assert_support.h" #include "ia_css_xnr3.host.h" /* Maximum value for alpha on ISP interface */ #define XNR_MAX_ALPHA ((1 << (ISP_VEC_ELEMBITS - 1)) - 1) /* Minimum value for sigma on host interface. Lower values translate to * max_alpha. */ #define XNR_MIN_SIGMA (IA_CSS_XNR3_SIGMA_SCALE / 100) /* * division look-up table * Refers to XNR3.0.5 */ #define XNR3_LOOK_UP_TABLE_POINTS 16 static const s16 x[XNR3_LOOK_UP_TABLE_POINTS] = { 1024, 1164, 1320, 1492, 1680, 1884, 2108, 2352, 2616, 2900, 3208, 3540, 3896, 4276, 4684, 5120 }; static const s16 a[XNR3_LOOK_UP_TABLE_POINTS] = { -7213, -5580, -4371, -3421, -2722, -2159, -6950, -5585, -4529, -3697, -3010, -2485, -2070, -1727, -1428, 0 }; static const s16 b[XNR3_LOOK_UP_TABLE_POINTS] = { 4096, 3603, 3178, 2811, 2497, 2226, 1990, 1783, 1603, 1446, 1307, 1185, 1077, 981, 895, 819 }; static const s16 c[XNR3_LOOK_UP_TABLE_POINTS] = { 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }; /* * Default kernel parameters. In general, default is bypass mode or as close * to the ineffective values as possible. Due to the chroma down+upsampling, * perfect bypass mode is not possible for xnr3 filter itself. Instead, the * 'blending' parameter is used to create a bypass. */ const struct ia_css_xnr3_config default_xnr3_config = { /* sigma */ { 0, 0, 0, 0, 0, 0 }, /* coring */ { 0, 0, 0, 0 }, /* blending */ { 0 } }; /* * Compute an alpha value for the ISP kernel from sigma value on the host * parameter interface as: alpha_scale * 1/(sigma/sigma_scale) */ static int32_t compute_alpha(int sigma) { s32 alpha; int offset = sigma / 2; if (sigma < XNR_MIN_SIGMA) { alpha = XNR_MAX_ALPHA; } else { alpha = ((IA_CSS_XNR3_SIGMA_SCALE * XNR_ALPHA_SCALE_FACTOR) + offset) / sigma; if (alpha > XNR_MAX_ALPHA) alpha = XNR_MAX_ALPHA; } return alpha; } /* * Compute the scaled coring value for the ISP kernel from the value on the * host parameter interface. */ static int32_t compute_coring(int coring) { s32 isp_coring; s32 isp_scale = XNR_CORING_SCALE_FACTOR; s32 host_scale = IA_CSS_XNR3_CORING_SCALE; s32 offset = host_scale / 2; /* fixed-point 0.5 */ /* Convert from public host-side scale factor to isp-side scale * factor. Clip to [0, isp_scale-1). */ isp_coring = ((coring * isp_scale) + offset) / host_scale; return min(max(isp_coring, 0), isp_scale - 1); } /* * Compute the scaled blending strength for the ISP kernel from the value on * the host parameter interface. */ static int32_t compute_blending(int strength) { s32 isp_strength; s32 isp_scale = XNR_BLENDING_SCALE_FACTOR; s32 host_scale = IA_CSS_XNR3_BLENDING_SCALE; s32 offset = host_scale / 2; /* fixed-point 0.5 */ /* Convert from public host-side scale factor to isp-side scale * factor. The blending factor is positive on the host side, but * negative on the ISP side because +1.0 cannot be represented * exactly as s0.11 fixed point, but -1.0 can. */ isp_strength = -(((strength * isp_scale) + offset) / host_scale); return MAX(MIN(isp_strength, 0), -isp_scale); } void ia_css_xnr3_encode( struct sh_css_isp_xnr3_params *to, const struct ia_css_xnr3_config *from, unsigned int size) { int kernel_size = XNR_FILTER_SIZE; /* The adjust factor is the next power of 2 w.r.t. the kernel size*/ int adjust_factor = ceil_pow2(kernel_size); s32 max_diff = (1 << (ISP_VEC_ELEMBITS - 1)) - 1; s32 min_diff = -(1 << (ISP_VEC_ELEMBITS - 1)); s32 alpha_y0 = compute_alpha(from->sigma.y0); s32 alpha_y1 = compute_alpha(from->sigma.y1); s32 alpha_u0 = compute_alpha(from->sigma.u0); s32 alpha_u1 = compute_alpha(from->sigma.u1); s32 alpha_v0 = compute_alpha(from->sigma.v0); s32 alpha_v1 = compute_alpha(from->sigma.v1); s32 alpha_ydiff = (alpha_y1 - alpha_y0) * adjust_factor / kernel_size; s32 alpha_udiff = (alpha_u1 - alpha_u0) * adjust_factor / kernel_size; s32 alpha_vdiff = (alpha_v1 - alpha_v0) * adjust_factor / kernel_size; s32 coring_u0 = compute_coring(from->coring.u0); s32 coring_u1 = compute_coring(from->coring.u1); s32 coring_v0 = compute_coring(from->coring.v0); s32 coring_v1 = compute_coring(from->coring.v1); s32 coring_udiff = (coring_u1 - coring_u0) * adjust_factor / kernel_size; s32 coring_vdiff = (coring_v1 - coring_v0) * adjust_factor / kernel_size; s32 blending = compute_blending(from->blending.strength); (void)size; /* alpha's are represented in qN.5 format */ to->alpha.y0 = alpha_y0; to->alpha.u0 = alpha_u0; to->alpha.v0 = alpha_v0; to->alpha.ydiff = min(max(alpha_ydiff, min_diff), max_diff); to->alpha.udiff = min(max(alpha_udiff, min_diff), max_diff); to->alpha.vdiff = min(max(alpha_vdiff, min_diff), max_diff); /* coring parameters are expressed in q1.NN format */ to->coring.u0 = coring_u0; to->coring.v0 = coring_v0; to->coring.udiff = min(max(coring_udiff, min_diff), max_diff); to->coring.vdiff = min(max(coring_vdiff, min_diff), max_diff); /* blending strength is expressed in q1.NN format */ to->blending.strength = blending; } /* ISP2401 */ /* (void) = ia_css_xnr3_vmem_encode(*to, *from) * ----------------------------------------------- * VMEM Encode Function to translate UV parameters from userspace into ISP space */ void ia_css_xnr3_vmem_encode( struct sh_css_isp_xnr3_vmem_params *to, const struct ia_css_xnr3_config *from, unsigned int size) { unsigned int i, j, base; const unsigned int total_blocks = 4; const unsigned int shuffle_block = 16; (void)from; (void)size; /* Init */ for (i = 0; i < ISP_VEC_NELEMS; i++) { to->x[0][i] = 0; to->a[0][i] = 0; to->b[0][i] = 0; to->c[0][i] = 0; } /* Constraints on "x": * - values should be greater or equal to 0. * - values should be ascending. */ assert(x[0] >= 0); for (j = 1; j < XNR3_LOOK_UP_TABLE_POINTS; j++) { assert(x[j] >= 0); assert(x[j] > x[j - 1]); } /* The implementation of the calulating 1/x is based on the availability * of the OP_vec_shuffle16 operation. * A 64 element vector is split up in 4 blocks of 16 element. Each array is copied to * a vector 4 times, (starting at 0, 16, 32 and 48). All array elements are copied or * initialised as described in the KFS. The remaining elements of a vector are set to 0. */ /* TODO: guard this code with above assumptions */ for (i = 0; i < total_blocks; i++) { base = shuffle_block * i; for (j = 0; j < XNR3_LOOK_UP_TABLE_POINTS; j++) { to->x[0][base + j] = x[j]; to->a[0][base + j] = a[j]; to->b[0][base + j] = b[j]; to->c[0][base + j] = c[j]; } } } /* Dummy Function added as the tool expects it*/ void ia_css_xnr3_debug_dtrace( const struct ia_css_xnr3_config *config, unsigned int level) { (void)config; (void)level; }
linux-master
drivers/staging/media/atomisp/pci/isp/kernels/xnr/xnr_3.0/ia_css_xnr3.host.c
// SPDX-License-Identifier: GPL-2.0 /* * Support for Intel Camera Imaging ISP subsystem. * Copyright (c) 2015, Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. */ #include <linux/string.h> /* for memcpy() */ #include <type_support.h> #include "system_global.h" #include "vamem.h" #include "ia_css_types.h" #include "ia_css_xnr_table.host.h" struct ia_css_xnr_table default_xnr_table; static const uint16_t default_xnr_table_data[IA_CSS_VAMEM_2_XNR_TABLE_SIZE] = { /* 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 */ 8191 >> 1, 4096 >> 1, 2730 >> 1, 2048 >> 1, 1638 >> 1, 1365 >> 1, 1170 >> 1, 1024 >> 1, 910 >> 1, 819 >> 1, 744 >> 1, 682 >> 1, 630 >> 1, 585 >> 1, 546 >> 1, 512 >> 1, /* 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 */ 481 >> 1, 455 >> 1, 431 >> 1, 409 >> 1, 390 >> 1, 372 >> 1, 356 >> 1, 341 >> 1, 327 >> 1, 315 >> 1, 303 >> 1, 292 >> 1, 282 >> 1, 273 >> 1, 264 >> 1, 256 >> 1, /* 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 */ 248 >> 1, 240 >> 1, 234 >> 1, 227 >> 1, 221 >> 1, 215 >> 1, 210 >> 1, 204 >> 1, 199 >> 1, 195 >> 1, 190 >> 1, 186 >> 1, 182 >> 1, 178 >> 1, 174 >> 1, 170 >> 1, /* 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 */ 167 >> 1, 163 >> 1, 160 >> 1, 157 >> 1, 154 >> 1, 151 >> 1, 148 >> 1, 146 >> 1, 143 >> 1, 141 >> 1, 138 >> 1, 136 >> 1, 134 >> 1, 132 >> 1, 130 >> 1, 128 >> 1 }; void ia_css_config_xnr_table(void) { memcpy(default_xnr_table.data.vamem_2, default_xnr_table_data, sizeof(default_xnr_table_data)); default_xnr_table.vamem_type = IA_CSS_VAMEM_TYPE_2; }
linux-master
drivers/staging/media/atomisp/pci/isp/kernels/xnr/xnr_1.0/ia_css_xnr_table.host.c
// SPDX-License-Identifier: GPL-2.0 /* * Support for Intel Camera Imaging ISP subsystem. * Copyright (c) 2015, Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. */ #include "ia_css_types.h" #include "sh_css_defs.h" #include "ia_css_debug.h" #include "sh_css_frac.h" #include "ia_css_xnr.host.h" const struct ia_css_xnr_config default_xnr_config = { /* default threshold 6400 translates to 25 on ISP. */ 6400 }; void ia_css_xnr_table_vamem_encode( struct sh_css_isp_xnr_vamem_params *to, const struct ia_css_xnr_table *from, unsigned int size) { (void)size; memcpy(&to->xnr, &from->data, sizeof(to->xnr)); } void ia_css_xnr_encode( struct sh_css_isp_xnr_params *to, const struct ia_css_xnr_config *from, unsigned int size) { (void)size; to->threshold = (uint16_t)uDIGIT_FITTING(from->threshold, 16, SH_CSS_ISP_YUV_BITS); } void ia_css_xnr_table_debug_dtrace( const struct ia_css_xnr_table *config, unsigned int level) { (void)config; (void)level; } void ia_css_xnr_debug_dtrace( const struct ia_css_xnr_config *config, unsigned int level) { ia_css_debug_dtrace(level, "config.threshold=%d\n", config->threshold); }
linux-master
drivers/staging/media/atomisp/pci/isp/kernels/xnr/xnr_1.0/ia_css_xnr.host.c
// SPDX-License-Identifier: GPL-2.0 /* * Support for Medifield PNW Camera Imaging ISP subsystem. * * Copyright (c) 2010 Intel Corporation. All Rights Reserved. * * Copyright (c) 2010 Silicon Hive www.siliconhive.com. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version * 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * */ /* * This file contains functions for buffer object structure management */ #include <linux/kernel.h> #include <linux/types.h> #include <linux/gfp.h> /* for GFP_ATOMIC */ #include <linux/mm.h> #include <linux/mm_types.h> #include <linux/hugetlb.h> #include <linux/highmem.h> #include <linux/slab.h> /* for kmalloc */ #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/string.h> #include <linux/list.h> #include <linux/errno.h> #include <linux/io.h> #include <asm/current.h> #include <linux/sched/signal.h> #include <linux/file.h> #include <asm/set_memory.h> #include "atomisp_internal.h" #include "hmm/hmm_common.h" #include "hmm/hmm_bo.h" static int __bo_init(struct hmm_bo_device *bdev, struct hmm_buffer_object *bo, unsigned int pgnr) { check_bodev_null_return(bdev, -EINVAL); var_equal_return(hmm_bo_device_inited(bdev), 0, -EINVAL, "hmm_bo_device not inited yet.\n"); /* prevent zero size buffer object */ if (pgnr == 0) { dev_err(atomisp_dev, "0 size buffer is not allowed.\n"); return -EINVAL; } memset(bo, 0, sizeof(*bo)); mutex_init(&bo->mutex); /* init the bo->list HEAD as an element of entire_bo_list */ INIT_LIST_HEAD(&bo->list); bo->bdev = bdev; bo->vmap_addr = NULL; bo->status = HMM_BO_FREE; bo->start = bdev->start; bo->pgnr = pgnr; bo->end = bo->start + pgnr_to_size(pgnr); bo->prev = NULL; bo->next = NULL; return 0; } static struct hmm_buffer_object *__bo_search_and_remove_from_free_rbtree( struct rb_node *node, unsigned int pgnr) { struct hmm_buffer_object *this, *ret_bo, *temp_bo; this = rb_entry(node, struct hmm_buffer_object, node); if (this->pgnr == pgnr || (this->pgnr > pgnr && !this->node.rb_left)) { goto remove_bo_and_return; } else { if (this->pgnr < pgnr) { if (!this->node.rb_right) return NULL; ret_bo = __bo_search_and_remove_from_free_rbtree( this->node.rb_right, pgnr); } else { ret_bo = __bo_search_and_remove_from_free_rbtree( this->node.rb_left, pgnr); } if (!ret_bo) { if (this->pgnr > pgnr) goto remove_bo_and_return; else return NULL; } return ret_bo; } remove_bo_and_return: /* NOTE: All nodes on free rbtree have a 'prev' that points to NULL. * 1. check if 'this->next' is NULL: * yes: erase 'this' node and rebalance rbtree, return 'this'. */ if (!this->next) { rb_erase(&this->node, &this->bdev->free_rbtree); return this; } /* NOTE: if 'this->next' is not NULL, always return 'this->next' bo. * 2. check if 'this->next->next' is NULL: * yes: change the related 'next/prev' pointer, * return 'this->next' but the rbtree stays unchanged. */ temp_bo = this->next; this->next = temp_bo->next; if (temp_bo->next) temp_bo->next->prev = this; temp_bo->next = NULL; temp_bo->prev = NULL; return temp_bo; } static struct hmm_buffer_object *__bo_search_by_addr(struct rb_root *root, ia_css_ptr start) { struct rb_node *n = root->rb_node; struct hmm_buffer_object *bo; do { bo = rb_entry(n, struct hmm_buffer_object, node); if (bo->start > start) { if (!n->rb_left) return NULL; n = n->rb_left; } else if (bo->start < start) { if (!n->rb_right) return NULL; n = n->rb_right; } else { return bo; } } while (n); return NULL; } static struct hmm_buffer_object *__bo_search_by_addr_in_range( struct rb_root *root, unsigned int start) { struct rb_node *n = root->rb_node; struct hmm_buffer_object *bo; do { bo = rb_entry(n, struct hmm_buffer_object, node); if (bo->start > start) { if (!n->rb_left) return NULL; n = n->rb_left; } else { if (bo->end > start) return bo; if (!n->rb_right) return NULL; n = n->rb_right; } } while (n); return NULL; } static void __bo_insert_to_free_rbtree(struct rb_root *root, struct hmm_buffer_object *bo) { struct rb_node **new = &root->rb_node; struct rb_node *parent = NULL; struct hmm_buffer_object *this; unsigned int pgnr = bo->pgnr; while (*new) { parent = *new; this = container_of(*new, struct hmm_buffer_object, node); if (pgnr < this->pgnr) { new = &((*new)->rb_left); } else if (pgnr > this->pgnr) { new = &((*new)->rb_right); } else { bo->prev = this; bo->next = this->next; if (this->next) this->next->prev = bo; this->next = bo; bo->status = (bo->status & ~HMM_BO_MASK) | HMM_BO_FREE; return; } } bo->status = (bo->status & ~HMM_BO_MASK) | HMM_BO_FREE; rb_link_node(&bo->node, parent, new); rb_insert_color(&bo->node, root); } static void __bo_insert_to_alloc_rbtree(struct rb_root *root, struct hmm_buffer_object *bo) { struct rb_node **new = &root->rb_node; struct rb_node *parent = NULL; struct hmm_buffer_object *this; unsigned int start = bo->start; while (*new) { parent = *new; this = container_of(*new, struct hmm_buffer_object, node); if (start < this->start) new = &((*new)->rb_left); else new = &((*new)->rb_right); } kref_init(&bo->kref); bo->status = (bo->status & ~HMM_BO_MASK) | HMM_BO_ALLOCED; rb_link_node(&bo->node, parent, new); rb_insert_color(&bo->node, root); } static struct hmm_buffer_object *__bo_break_up(struct hmm_bo_device *bdev, struct hmm_buffer_object *bo, unsigned int pgnr) { struct hmm_buffer_object *new_bo; unsigned long flags; int ret; new_bo = kmem_cache_alloc(bdev->bo_cache, GFP_KERNEL); if (!new_bo) { dev_err(atomisp_dev, "%s: __bo_alloc failed!\n", __func__); return NULL; } ret = __bo_init(bdev, new_bo, pgnr); if (ret) { dev_err(atomisp_dev, "%s: __bo_init failed!\n", __func__); kmem_cache_free(bdev->bo_cache, new_bo); return NULL; } new_bo->start = bo->start; new_bo->end = new_bo->start + pgnr_to_size(pgnr); bo->start = new_bo->end; bo->pgnr = bo->pgnr - pgnr; spin_lock_irqsave(&bdev->list_lock, flags); list_add_tail(&new_bo->list, &bo->list); spin_unlock_irqrestore(&bdev->list_lock, flags); return new_bo; } static void __bo_take_off_handling(struct hmm_buffer_object *bo) { struct hmm_bo_device *bdev = bo->bdev; /* There are 4 situations when we take off a known bo from free rbtree: * 1. if bo->next && bo->prev == NULL, bo is a rbtree node * and does not have a linked list after bo, to take off this bo, * we just need erase bo directly and rebalance the free rbtree */ if (!bo->prev && !bo->next) { rb_erase(&bo->node, &bdev->free_rbtree); /* 2. when bo->next != NULL && bo->prev == NULL, bo is a rbtree node, * and has a linked list,to take off this bo we need erase bo * first, then, insert bo->next into free rbtree and rebalance * the free rbtree */ } else if (!bo->prev && bo->next) { bo->next->prev = NULL; rb_erase(&bo->node, &bdev->free_rbtree); __bo_insert_to_free_rbtree(&bdev->free_rbtree, bo->next); bo->next = NULL; /* 3. when bo->prev != NULL && bo->next == NULL, bo is not a rbtree * node, bo is the last element of the linked list after rbtree * node, to take off this bo, we just need set the "prev/next" * pointers to NULL, the free rbtree stays unchaged */ } else if (bo->prev && !bo->next) { bo->prev->next = NULL; bo->prev = NULL; /* 4. when bo->prev != NULL && bo->next != NULL ,bo is not a rbtree * node, bo is in the middle of the linked list after rbtree node, * to take off this bo, we just set take the "prev/next" pointers * to NULL, the free rbtree stays unchaged */ } else if (bo->prev && bo->next) { bo->next->prev = bo->prev; bo->prev->next = bo->next; bo->next = NULL; bo->prev = NULL; } } static struct hmm_buffer_object *__bo_merge(struct hmm_buffer_object *bo, struct hmm_buffer_object *next_bo) { struct hmm_bo_device *bdev; unsigned long flags; bdev = bo->bdev; next_bo->start = bo->start; next_bo->pgnr = next_bo->pgnr + bo->pgnr; spin_lock_irqsave(&bdev->list_lock, flags); list_del(&bo->list); spin_unlock_irqrestore(&bdev->list_lock, flags); kmem_cache_free(bo->bdev->bo_cache, bo); return next_bo; } /* * hmm_bo_device functions. */ int hmm_bo_device_init(struct hmm_bo_device *bdev, struct isp_mmu_client *mmu_driver, unsigned int vaddr_start, unsigned int size) { struct hmm_buffer_object *bo; unsigned long flags; int ret; check_bodev_null_return(bdev, -EINVAL); ret = isp_mmu_init(&bdev->mmu, mmu_driver); if (ret) { dev_err(atomisp_dev, "isp_mmu_init failed.\n"); return ret; } bdev->start = vaddr_start; bdev->pgnr = size_to_pgnr_ceil(size); bdev->size = pgnr_to_size(bdev->pgnr); spin_lock_init(&bdev->list_lock); mutex_init(&bdev->rbtree_mutex); bdev->flag = HMM_BO_DEVICE_INITED; INIT_LIST_HEAD(&bdev->entire_bo_list); bdev->allocated_rbtree = RB_ROOT; bdev->free_rbtree = RB_ROOT; bdev->bo_cache = kmem_cache_create("bo_cache", sizeof(struct hmm_buffer_object), 0, 0, NULL); if (!bdev->bo_cache) { dev_err(atomisp_dev, "%s: create cache failed!\n", __func__); isp_mmu_exit(&bdev->mmu); return -ENOMEM; } bo = kmem_cache_alloc(bdev->bo_cache, GFP_KERNEL); if (!bo) { dev_err(atomisp_dev, "%s: __bo_alloc failed!\n", __func__); isp_mmu_exit(&bdev->mmu); return -ENOMEM; } ret = __bo_init(bdev, bo, bdev->pgnr); if (ret) { dev_err(atomisp_dev, "%s: __bo_init failed!\n", __func__); kmem_cache_free(bdev->bo_cache, bo); isp_mmu_exit(&bdev->mmu); return -EINVAL; } spin_lock_irqsave(&bdev->list_lock, flags); list_add_tail(&bo->list, &bdev->entire_bo_list); spin_unlock_irqrestore(&bdev->list_lock, flags); __bo_insert_to_free_rbtree(&bdev->free_rbtree, bo); return 0; } struct hmm_buffer_object *hmm_bo_alloc(struct hmm_bo_device *bdev, unsigned int pgnr) { struct hmm_buffer_object *bo, *new_bo; struct rb_root *root = &bdev->free_rbtree; check_bodev_null_return(bdev, NULL); var_equal_return(hmm_bo_device_inited(bdev), 0, NULL, "hmm_bo_device not inited yet.\n"); if (pgnr == 0) { dev_err(atomisp_dev, "0 size buffer is not allowed.\n"); return NULL; } mutex_lock(&bdev->rbtree_mutex); bo = __bo_search_and_remove_from_free_rbtree(root->rb_node, pgnr); if (!bo) { mutex_unlock(&bdev->rbtree_mutex); dev_err(atomisp_dev, "%s: Out of Memory! hmm_bo_alloc failed", __func__); return NULL; } if (bo->pgnr > pgnr) { new_bo = __bo_break_up(bdev, bo, pgnr); if (!new_bo) { mutex_unlock(&bdev->rbtree_mutex); dev_err(atomisp_dev, "%s: __bo_break_up failed!\n", __func__); return NULL; } __bo_insert_to_alloc_rbtree(&bdev->allocated_rbtree, new_bo); __bo_insert_to_free_rbtree(&bdev->free_rbtree, bo); mutex_unlock(&bdev->rbtree_mutex); return new_bo; } __bo_insert_to_alloc_rbtree(&bdev->allocated_rbtree, bo); mutex_unlock(&bdev->rbtree_mutex); return bo; } void hmm_bo_release(struct hmm_buffer_object *bo) { struct hmm_bo_device *bdev = bo->bdev; struct hmm_buffer_object *next_bo, *prev_bo; mutex_lock(&bdev->rbtree_mutex); /* * FIX ME: * * how to destroy the bo when it is stilled MMAPED? * * ideally, this will not happened as hmm_bo_release * will only be called when kref reaches 0, and in mmap * operation the hmm_bo_ref will eventually be called. * so, if this happened, something goes wrong. */ if (bo->status & HMM_BO_MMAPED) { mutex_unlock(&bdev->rbtree_mutex); dev_dbg(atomisp_dev, "destroy bo which is MMAPED, do nothing\n"); return; } if (bo->status & HMM_BO_BINDED) { dev_warn(atomisp_dev, "the bo is still binded, unbind it first...\n"); hmm_bo_unbind(bo); } if (bo->status & HMM_BO_PAGE_ALLOCED) { dev_warn(atomisp_dev, "the pages is not freed, free pages first\n"); hmm_bo_free_pages(bo); } if (bo->status & HMM_BO_VMAPED || bo->status & HMM_BO_VMAPED_CACHED) { dev_warn(atomisp_dev, "the vunmap is not done, do it...\n"); hmm_bo_vunmap(bo); } rb_erase(&bo->node, &bdev->allocated_rbtree); prev_bo = list_entry(bo->list.prev, struct hmm_buffer_object, list); next_bo = list_entry(bo->list.next, struct hmm_buffer_object, list); if (bo->list.prev != &bdev->entire_bo_list && prev_bo->end == bo->start && (prev_bo->status & HMM_BO_MASK) == HMM_BO_FREE) { __bo_take_off_handling(prev_bo); bo = __bo_merge(prev_bo, bo); } if (bo->list.next != &bdev->entire_bo_list && next_bo->start == bo->end && (next_bo->status & HMM_BO_MASK) == HMM_BO_FREE) { __bo_take_off_handling(next_bo); bo = __bo_merge(bo, next_bo); } __bo_insert_to_free_rbtree(&bdev->free_rbtree, bo); mutex_unlock(&bdev->rbtree_mutex); return; } void hmm_bo_device_exit(struct hmm_bo_device *bdev) { struct hmm_buffer_object *bo; unsigned long flags; dev_dbg(atomisp_dev, "%s: entering!\n", __func__); check_bodev_null_return_void(bdev); /* * release all allocated bos even they a in use * and all bos will be merged into a big bo */ while (!RB_EMPTY_ROOT(&bdev->allocated_rbtree)) hmm_bo_release( rbtree_node_to_hmm_bo(bdev->allocated_rbtree.rb_node)); dev_dbg(atomisp_dev, "%s: finished releasing all allocated bos!\n", __func__); /* free all bos to release all ISP virtual memory */ while (!list_empty(&bdev->entire_bo_list)) { bo = list_to_hmm_bo(bdev->entire_bo_list.next); spin_lock_irqsave(&bdev->list_lock, flags); list_del(&bo->list); spin_unlock_irqrestore(&bdev->list_lock, flags); kmem_cache_free(bdev->bo_cache, bo); } dev_dbg(atomisp_dev, "%s: finished to free all bos!\n", __func__); kmem_cache_destroy(bdev->bo_cache); isp_mmu_exit(&bdev->mmu); } int hmm_bo_device_inited(struct hmm_bo_device *bdev) { check_bodev_null_return(bdev, -EINVAL); return bdev->flag == HMM_BO_DEVICE_INITED; } int hmm_bo_allocated(struct hmm_buffer_object *bo) { check_bo_null_return(bo, 0); return bo->status & HMM_BO_ALLOCED; } struct hmm_buffer_object *hmm_bo_device_search_start( struct hmm_bo_device *bdev, ia_css_ptr vaddr) { struct hmm_buffer_object *bo; check_bodev_null_return(bdev, NULL); mutex_lock(&bdev->rbtree_mutex); bo = __bo_search_by_addr(&bdev->allocated_rbtree, vaddr); if (!bo) { mutex_unlock(&bdev->rbtree_mutex); dev_err(atomisp_dev, "%s can not find bo with addr: 0x%x\n", __func__, vaddr); return NULL; } mutex_unlock(&bdev->rbtree_mutex); return bo; } struct hmm_buffer_object *hmm_bo_device_search_in_range( struct hmm_bo_device *bdev, unsigned int vaddr) { struct hmm_buffer_object *bo; check_bodev_null_return(bdev, NULL); mutex_lock(&bdev->rbtree_mutex); bo = __bo_search_by_addr_in_range(&bdev->allocated_rbtree, vaddr); if (!bo) { mutex_unlock(&bdev->rbtree_mutex); dev_err(atomisp_dev, "%s can not find bo contain addr: 0x%x\n", __func__, vaddr); return NULL; } mutex_unlock(&bdev->rbtree_mutex); return bo; } struct hmm_buffer_object *hmm_bo_device_search_vmap_start( struct hmm_bo_device *bdev, const void *vaddr) { struct list_head *pos; struct hmm_buffer_object *bo; unsigned long flags; check_bodev_null_return(bdev, NULL); spin_lock_irqsave(&bdev->list_lock, flags); list_for_each(pos, &bdev->entire_bo_list) { bo = list_to_hmm_bo(pos); /* pass bo which has no vm_node allocated */ if ((bo->status & HMM_BO_MASK) == HMM_BO_FREE) continue; if (bo->vmap_addr == vaddr) goto found; } spin_unlock_irqrestore(&bdev->list_lock, flags); return NULL; found: spin_unlock_irqrestore(&bdev->list_lock, flags); return bo; } static void free_pages_bulk_array(unsigned long nr_pages, struct page **page_array) { unsigned long i; for (i = 0; i < nr_pages; i++) __free_pages(page_array[i], 0); } static void free_private_bo_pages(struct hmm_buffer_object *bo) { set_pages_array_wb(bo->pages, bo->pgnr); free_pages_bulk_array(bo->pgnr, bo->pages); } /*Allocate pages which will be used only by ISP*/ static int alloc_private_pages(struct hmm_buffer_object *bo) { const gfp_t gfp = __GFP_NOWARN | __GFP_RECLAIM | __GFP_FS; int ret; ret = alloc_pages_bulk_array(gfp, bo->pgnr, bo->pages); if (ret != bo->pgnr) { free_pages_bulk_array(ret, bo->pages); dev_err(atomisp_dev, "alloc_pages_bulk_array() failed\n"); return -ENOMEM; } ret = set_pages_array_uc(bo->pages, bo->pgnr); if (ret) { dev_err(atomisp_dev, "set pages uncacheable failed.\n"); free_pages_bulk_array(bo->pgnr, bo->pages); return ret; } return 0; } static int alloc_vmalloc_pages(struct hmm_buffer_object *bo, void *vmalloc_addr) { void *vaddr = vmalloc_addr; int i; for (i = 0; i < bo->pgnr; i++) { bo->pages[i] = vmalloc_to_page(vaddr); if (!bo->pages[i]) { dev_err(atomisp_dev, "Error could not get page %d of vmalloc buf\n", i); return -ENOMEM; } vaddr += PAGE_SIZE; } return 0; } /* * allocate/free physical pages for the bo. * * type indicate where are the pages from. currently we have 3 types * of memory: HMM_BO_PRIVATE, HMM_BO_VMALLOC. * * vmalloc_addr is only valid when type is HMM_BO_VMALLOC. */ int hmm_bo_alloc_pages(struct hmm_buffer_object *bo, enum hmm_bo_type type, void *vmalloc_addr) { int ret = -EINVAL; check_bo_null_return(bo, -EINVAL); mutex_lock(&bo->mutex); check_bo_status_no_goto(bo, HMM_BO_PAGE_ALLOCED, status_err); bo->pages = kcalloc(bo->pgnr, sizeof(struct page *), GFP_KERNEL); if (unlikely(!bo->pages)) { ret = -ENOMEM; goto alloc_err; } if (type == HMM_BO_PRIVATE) { ret = alloc_private_pages(bo); } else if (type == HMM_BO_VMALLOC) { ret = alloc_vmalloc_pages(bo, vmalloc_addr); } else { dev_err(atomisp_dev, "invalid buffer type.\n"); ret = -EINVAL; } if (ret) goto alloc_err; bo->type = type; bo->status |= HMM_BO_PAGE_ALLOCED; mutex_unlock(&bo->mutex); return 0; alloc_err: kfree(bo->pages); mutex_unlock(&bo->mutex); dev_err(atomisp_dev, "alloc pages err...\n"); return ret; status_err: mutex_unlock(&bo->mutex); dev_err(atomisp_dev, "buffer object has already page allocated.\n"); return -EINVAL; } /* * free physical pages of the bo. */ void hmm_bo_free_pages(struct hmm_buffer_object *bo) { check_bo_null_return_void(bo); mutex_lock(&bo->mutex); check_bo_status_yes_goto(bo, HMM_BO_PAGE_ALLOCED, status_err2); /* clear the flag anyway. */ bo->status &= (~HMM_BO_PAGE_ALLOCED); if (bo->type == HMM_BO_PRIVATE) free_private_bo_pages(bo); else if (bo->type == HMM_BO_VMALLOC) ; /* No-op, nothing to do */ else dev_err(atomisp_dev, "invalid buffer type.\n"); kfree(bo->pages); mutex_unlock(&bo->mutex); return; status_err2: mutex_unlock(&bo->mutex); dev_err(atomisp_dev, "buffer object not page allocated yet.\n"); } int hmm_bo_page_allocated(struct hmm_buffer_object *bo) { check_bo_null_return(bo, 0); return bo->status & HMM_BO_PAGE_ALLOCED; } /* * bind the physical pages to a virtual address space. */ int hmm_bo_bind(struct hmm_buffer_object *bo) { int ret; unsigned int virt; struct hmm_bo_device *bdev; unsigned int i; check_bo_null_return(bo, -EINVAL); mutex_lock(&bo->mutex); check_bo_status_yes_goto(bo, HMM_BO_PAGE_ALLOCED | HMM_BO_ALLOCED, status_err1); check_bo_status_no_goto(bo, HMM_BO_BINDED, status_err2); bdev = bo->bdev; virt = bo->start; for (i = 0; i < bo->pgnr; i++) { ret = isp_mmu_map(&bdev->mmu, virt, page_to_phys(bo->pages[i]), 1); if (ret) goto map_err; virt += (1 << PAGE_SHIFT); } /* * flush TBL here. * * theoretically, we donot need to flush TLB as we didnot change * any existed address mappings, but for Silicon Hive's MMU, its * really a bug here. I guess when fetching PTEs (page table entity) * to TLB, its MMU will fetch additional INVALID PTEs automatically * for performance issue. EX, we only set up 1 page address mapping, * meaning updating 1 PTE, but the MMU fetches 4 PTE at one time, * so the additional 3 PTEs are invalid. */ if (bo->start != 0x0) isp_mmu_flush_tlb_range(&bdev->mmu, bo->start, (bo->pgnr << PAGE_SHIFT)); bo->status |= HMM_BO_BINDED; mutex_unlock(&bo->mutex); return 0; map_err: /* unbind the physical pages with related virtual address space */ virt = bo->start; for ( ; i > 0; i--) { isp_mmu_unmap(&bdev->mmu, virt, 1); virt += pgnr_to_size(1); } mutex_unlock(&bo->mutex); dev_err(atomisp_dev, "setup MMU address mapping failed.\n"); return ret; status_err2: mutex_unlock(&bo->mutex); dev_err(atomisp_dev, "buffer object already binded.\n"); return -EINVAL; status_err1: mutex_unlock(&bo->mutex); dev_err(atomisp_dev, "buffer object vm_node or page not allocated.\n"); return -EINVAL; } /* * unbind the physical pages with related virtual address space. */ void hmm_bo_unbind(struct hmm_buffer_object *bo) { unsigned int virt; struct hmm_bo_device *bdev; unsigned int i; check_bo_null_return_void(bo); mutex_lock(&bo->mutex); check_bo_status_yes_goto(bo, HMM_BO_PAGE_ALLOCED | HMM_BO_ALLOCED | HMM_BO_BINDED, status_err); bdev = bo->bdev; virt = bo->start; for (i = 0; i < bo->pgnr; i++) { isp_mmu_unmap(&bdev->mmu, virt, 1); virt += pgnr_to_size(1); } /* * flush TLB as the address mapping has been removed and * related TLBs should be invalidated. */ isp_mmu_flush_tlb_range(&bdev->mmu, bo->start, (bo->pgnr << PAGE_SHIFT)); bo->status &= (~HMM_BO_BINDED); mutex_unlock(&bo->mutex); return; status_err: mutex_unlock(&bo->mutex); dev_err(atomisp_dev, "buffer vm or page not allocated or not binded yet.\n"); } int hmm_bo_binded(struct hmm_buffer_object *bo) { int ret; check_bo_null_return(bo, 0); mutex_lock(&bo->mutex); ret = bo->status & HMM_BO_BINDED; mutex_unlock(&bo->mutex); return ret; } void *hmm_bo_vmap(struct hmm_buffer_object *bo, bool cached) { check_bo_null_return(bo, NULL); mutex_lock(&bo->mutex); if (((bo->status & HMM_BO_VMAPED) && !cached) || ((bo->status & HMM_BO_VMAPED_CACHED) && cached)) { mutex_unlock(&bo->mutex); return bo->vmap_addr; } /* cached status need to be changed, so vunmap first */ if (bo->status & HMM_BO_VMAPED || bo->status & HMM_BO_VMAPED_CACHED) { vunmap(bo->vmap_addr); bo->vmap_addr = NULL; bo->status &= ~(HMM_BO_VMAPED | HMM_BO_VMAPED_CACHED); } bo->vmap_addr = vmap(bo->pages, bo->pgnr, VM_MAP, cached ? PAGE_KERNEL : PAGE_KERNEL_NOCACHE); if (unlikely(!bo->vmap_addr)) { mutex_unlock(&bo->mutex); dev_err(atomisp_dev, "vmap failed...\n"); return NULL; } bo->status |= (cached ? HMM_BO_VMAPED_CACHED : HMM_BO_VMAPED); mutex_unlock(&bo->mutex); return bo->vmap_addr; } void hmm_bo_flush_vmap(struct hmm_buffer_object *bo) { check_bo_null_return_void(bo); mutex_lock(&bo->mutex); if (!(bo->status & HMM_BO_VMAPED_CACHED) || !bo->vmap_addr) { mutex_unlock(&bo->mutex); return; } clflush_cache_range(bo->vmap_addr, bo->pgnr * PAGE_SIZE); mutex_unlock(&bo->mutex); } void hmm_bo_vunmap(struct hmm_buffer_object *bo) { check_bo_null_return_void(bo); mutex_lock(&bo->mutex); if (bo->status & HMM_BO_VMAPED || bo->status & HMM_BO_VMAPED_CACHED) { vunmap(bo->vmap_addr); bo->vmap_addr = NULL; bo->status &= ~(HMM_BO_VMAPED | HMM_BO_VMAPED_CACHED); } mutex_unlock(&bo->mutex); return; } void hmm_bo_ref(struct hmm_buffer_object *bo) { check_bo_null_return_void(bo); kref_get(&bo->kref); } static void kref_hmm_bo_release(struct kref *kref) { if (!kref) return; hmm_bo_release(kref_to_hmm_bo(kref)); } void hmm_bo_unref(struct hmm_buffer_object *bo) { check_bo_null_return_void(bo); kref_put(&bo->kref, kref_hmm_bo_release); } static void hmm_bo_vm_open(struct vm_area_struct *vma) { struct hmm_buffer_object *bo = (struct hmm_buffer_object *)vma->vm_private_data; check_bo_null_return_void(bo); hmm_bo_ref(bo); mutex_lock(&bo->mutex); bo->status |= HMM_BO_MMAPED; bo->mmap_count++; mutex_unlock(&bo->mutex); } static void hmm_bo_vm_close(struct vm_area_struct *vma) { struct hmm_buffer_object *bo = (struct hmm_buffer_object *)vma->vm_private_data; check_bo_null_return_void(bo); hmm_bo_unref(bo); mutex_lock(&bo->mutex); bo->mmap_count--; if (!bo->mmap_count) { bo->status &= (~HMM_BO_MMAPED); vma->vm_private_data = NULL; } mutex_unlock(&bo->mutex); } static const struct vm_operations_struct hmm_bo_vm_ops = { .open = hmm_bo_vm_open, .close = hmm_bo_vm_close, }; /* * mmap the bo to user space. */ int hmm_bo_mmap(struct vm_area_struct *vma, struct hmm_buffer_object *bo) { unsigned int start, end; unsigned int virt; unsigned int pgnr, i; unsigned int pfn; check_bo_null_return(bo, -EINVAL); check_bo_status_yes_goto(bo, HMM_BO_PAGE_ALLOCED, status_err); pgnr = bo->pgnr; start = vma->vm_start; end = vma->vm_end; /* * check vma's virtual address space size and buffer object's size. * must be the same. */ if ((start + pgnr_to_size(pgnr)) != end) { dev_warn(atomisp_dev, "vma's address space size not equal to buffer object's size"); return -EINVAL; } virt = vma->vm_start; for (i = 0; i < pgnr; i++) { pfn = page_to_pfn(bo->pages[i]); if (remap_pfn_range(vma, virt, pfn, PAGE_SIZE, PAGE_SHARED)) { dev_warn(atomisp_dev, "remap_pfn_range failed: virt = 0x%x, pfn = 0x%x, mapped_pgnr = %d\n", virt, pfn, 1); return -EINVAL; } virt += PAGE_SIZE; } vma->vm_private_data = bo; vma->vm_ops = &hmm_bo_vm_ops; vm_flags_set(vma, VM_IO | VM_DONTEXPAND | VM_DONTDUMP); /* * call hmm_bo_vm_open explicitly. */ hmm_bo_vm_open(vma); return 0; status_err: dev_err(atomisp_dev, "buffer page not allocated yet.\n"); return -EINVAL; }
linux-master
drivers/staging/media/atomisp/pci/hmm/hmm_bo.c
// SPDX-License-Identifier: GPL-2.0 /* * Support for Medifield PNW Camera Imaging ISP subsystem. * * Copyright (c) 2010-2017 Intel Corporation. All Rights Reserved. * * Copyright (c) 2010 Silicon Hive www.siliconhive.com. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version * 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * */ /* * This file contains entry functions for memory management of ISP driver */ #include <linux/kernel.h> #include <linux/types.h> #include <linux/mm.h> #include <linux/highmem.h> /* for kmap */ #include <linux/io.h> /* for page_to_phys */ #include <linux/sysfs.h> #include "hmm/hmm.h" #include "hmm/hmm_bo.h" #include "atomisp_internal.h" #include "asm/cacheflush.h" #include "mmu/isp_mmu.h" #include "mmu/sh_mmu_mrfld.h" struct hmm_bo_device bo_device; static ia_css_ptr dummy_ptr = mmgr_EXCEPTION; static bool hmm_initialized; /* * p: private * v: vmalloc */ static const char hmm_bo_type_string[] = "pv"; static ssize_t bo_show(struct device *dev, struct device_attribute *attr, char *buf, struct list_head *bo_list, bool active) { ssize_t ret = 0; struct hmm_buffer_object *bo; unsigned long flags; int i; long total[HMM_BO_LAST] = { 0 }; long count[HMM_BO_LAST] = { 0 }; int index1 = 0; int index2 = 0; ret = scnprintf(buf, PAGE_SIZE, "type pgnr\n"); if (ret <= 0) return 0; index1 += ret; spin_lock_irqsave(&bo_device.list_lock, flags); list_for_each_entry(bo, bo_list, list) { if ((active && (bo->status & HMM_BO_ALLOCED)) || (!active && !(bo->status & HMM_BO_ALLOCED))) { ret = scnprintf(buf + index1, PAGE_SIZE - index1, "%c %d\n", hmm_bo_type_string[bo->type], bo->pgnr); total[bo->type] += bo->pgnr; count[bo->type]++; if (ret > 0) index1 += ret; } } spin_unlock_irqrestore(&bo_device.list_lock, flags); for (i = 0; i < HMM_BO_LAST; i++) { if (count[i]) { ret = scnprintf(buf + index1 + index2, PAGE_SIZE - index1 - index2, "%ld %c buffer objects: %ld KB\n", count[i], hmm_bo_type_string[i], total[i] * 4); if (ret > 0) index2 += ret; } } /* Add trailing zero, not included by scnprintf */ return index1 + index2 + 1; } static ssize_t active_bo_show(struct device *dev, struct device_attribute *attr, char *buf) { return bo_show(dev, attr, buf, &bo_device.entire_bo_list, true); } static ssize_t free_bo_show(struct device *dev, struct device_attribute *attr, char *buf) { return bo_show(dev, attr, buf, &bo_device.entire_bo_list, false); } static DEVICE_ATTR_RO(active_bo); static DEVICE_ATTR_RO(free_bo); static struct attribute *sysfs_attrs_ctrl[] = { &dev_attr_active_bo.attr, &dev_attr_free_bo.attr, NULL }; static struct attribute_group atomisp_attribute_group[] = { {.attrs = sysfs_attrs_ctrl }, }; int hmm_init(void) { int ret; ret = hmm_bo_device_init(&bo_device, &sh_mmu_mrfld, ISP_VM_START, ISP_VM_SIZE); if (ret) dev_err(atomisp_dev, "hmm_bo_device_init failed.\n"); hmm_initialized = true; /* * As hmm use NULL to indicate invalid ISP virtual address, * and ISP_VM_START is defined to 0 too, so we allocate * one piece of dummy memory, which should return value 0, * at the beginning, to avoid hmm_alloc return 0 in the * further allocation. */ dummy_ptr = hmm_alloc(1); if (!ret) { ret = sysfs_create_group(&atomisp_dev->kobj, atomisp_attribute_group); if (ret) dev_err(atomisp_dev, "%s Failed to create sysfs\n", __func__); } return ret; } void hmm_cleanup(void) { if (dummy_ptr == mmgr_EXCEPTION) return; sysfs_remove_group(&atomisp_dev->kobj, atomisp_attribute_group); /* free dummy memory first */ hmm_free(dummy_ptr); dummy_ptr = 0; hmm_bo_device_exit(&bo_device); hmm_initialized = false; } static ia_css_ptr __hmm_alloc(size_t bytes, enum hmm_bo_type type, void *vmalloc_addr) { unsigned int pgnr; struct hmm_buffer_object *bo; int ret; /* * Check if we are initialized. In the ideal world we wouldn't need * this but we can tackle it once the driver is a lot cleaner */ if (!hmm_initialized) hmm_init(); /* Get page number from size */ pgnr = size_to_pgnr_ceil(bytes); /* Buffer object structure init */ bo = hmm_bo_alloc(&bo_device, pgnr); if (!bo) { dev_err(atomisp_dev, "hmm_bo_create failed.\n"); goto create_bo_err; } /* Allocate pages for memory */ ret = hmm_bo_alloc_pages(bo, type, vmalloc_addr); if (ret) { dev_err(atomisp_dev, "hmm_bo_alloc_pages failed.\n"); goto alloc_page_err; } /* Combine the virtual address and pages together */ ret = hmm_bo_bind(bo); if (ret) { dev_err(atomisp_dev, "hmm_bo_bind failed.\n"); goto bind_err; } dev_dbg(atomisp_dev, "pages: 0x%08x (%zu bytes), type: %d, vmalloc %p\n", bo->start, bytes, type, vmalloc); return bo->start; bind_err: hmm_bo_free_pages(bo); alloc_page_err: hmm_bo_unref(bo); create_bo_err: return 0; } ia_css_ptr hmm_alloc(size_t bytes) { return __hmm_alloc(bytes, HMM_BO_PRIVATE, NULL); } ia_css_ptr hmm_create_from_vmalloc_buf(size_t bytes, void *vmalloc_addr) { return __hmm_alloc(bytes, HMM_BO_VMALLOC, vmalloc_addr); } void hmm_free(ia_css_ptr virt) { struct hmm_buffer_object *bo; dev_dbg(atomisp_dev, "%s: free 0x%08x\n", __func__, virt); if (WARN_ON(virt == mmgr_EXCEPTION)) return; bo = hmm_bo_device_search_start(&bo_device, (unsigned int)virt); if (!bo) { dev_err(atomisp_dev, "can not find buffer object start with address 0x%x\n", (unsigned int)virt); return; } hmm_bo_unbind(bo); hmm_bo_free_pages(bo); hmm_bo_unref(bo); } static inline int hmm_check_bo(struct hmm_buffer_object *bo, unsigned int ptr) { if (!bo) { dev_err(atomisp_dev, "can not find buffer object contains address 0x%x\n", ptr); return -EINVAL; } if (!hmm_bo_page_allocated(bo)) { dev_err(atomisp_dev, "buffer object has no page allocated.\n"); return -EINVAL; } if (!hmm_bo_allocated(bo)) { dev_err(atomisp_dev, "buffer object has no virtual address space allocated.\n"); return -EINVAL; } return 0; } /* Read function in ISP memory management */ static int load_and_flush_by_kmap(ia_css_ptr virt, void *data, unsigned int bytes) { struct hmm_buffer_object *bo; unsigned int idx, offset, len; char *src, *des; int ret; bo = hmm_bo_device_search_in_range(&bo_device, virt); ret = hmm_check_bo(bo, virt); if (ret) return ret; des = (char *)data; while (bytes) { idx = (virt - bo->start) >> PAGE_SHIFT; offset = (virt - bo->start) - (idx << PAGE_SHIFT); src = (char *)kmap_local_page(bo->pages[idx]) + offset; if ((bytes + offset) >= PAGE_SIZE) { len = PAGE_SIZE - offset; bytes -= len; } else { len = bytes; bytes = 0; } virt += len; /* update virt for next loop */ if (des) { memcpy(des, src, len); des += len; } clflush_cache_range(src, len); kunmap_local(src); } return 0; } /* Read function in ISP memory management */ static int load_and_flush(ia_css_ptr virt, void *data, unsigned int bytes) { struct hmm_buffer_object *bo; int ret; bo = hmm_bo_device_search_in_range(&bo_device, virt); ret = hmm_check_bo(bo, virt); if (ret) return ret; if (bo->status & HMM_BO_VMAPED || bo->status & HMM_BO_VMAPED_CACHED) { void *src = bo->vmap_addr; src += (virt - bo->start); memcpy(data, src, bytes); if (bo->status & HMM_BO_VMAPED_CACHED) clflush_cache_range(src, bytes); } else { void *vptr; vptr = hmm_bo_vmap(bo, true); if (!vptr) return load_and_flush_by_kmap(virt, data, bytes); else vptr = vptr + (virt - bo->start); memcpy(data, vptr, bytes); clflush_cache_range(vptr, bytes); hmm_bo_vunmap(bo); } return 0; } /* Read function in ISP memory management */ int hmm_load(ia_css_ptr virt, void *data, unsigned int bytes) { if (!virt) { dev_warn(atomisp_dev, "hmm_store: address is NULL\n"); return -EINVAL; } if (!data) { dev_err(atomisp_dev, "hmm_store: data is a NULL argument\n"); return -EINVAL; } return load_and_flush(virt, data, bytes); } /* Flush hmm data from the data cache */ int hmm_flush(ia_css_ptr virt, unsigned int bytes) { return load_and_flush(virt, NULL, bytes); } /* Write function in ISP memory management */ int hmm_store(ia_css_ptr virt, const void *data, unsigned int bytes) { struct hmm_buffer_object *bo; unsigned int idx, offset, len; char *src, *des; int ret; if (!virt) { dev_warn(atomisp_dev, "hmm_store: address is NULL\n"); return -EINVAL; } if (!data) { dev_err(atomisp_dev, "hmm_store: data is a NULL argument\n"); return -EINVAL; } bo = hmm_bo_device_search_in_range(&bo_device, virt); ret = hmm_check_bo(bo, virt); if (ret) return ret; if (bo->status & HMM_BO_VMAPED || bo->status & HMM_BO_VMAPED_CACHED) { void *dst = bo->vmap_addr; dst += (virt - bo->start); memcpy(dst, data, bytes); if (bo->status & HMM_BO_VMAPED_CACHED) clflush_cache_range(dst, bytes); } else { void *vptr; vptr = hmm_bo_vmap(bo, true); if (vptr) { vptr = vptr + (virt - bo->start); memcpy(vptr, data, bytes); clflush_cache_range(vptr, bytes); hmm_bo_vunmap(bo); return 0; } } src = (char *)data; while (bytes) { idx = (virt - bo->start) >> PAGE_SHIFT; offset = (virt - bo->start) - (idx << PAGE_SHIFT); des = (char *)kmap_local_page(bo->pages[idx]); if (!des) { dev_err(atomisp_dev, "kmap buffer object page failed: pg_idx = %d\n", idx); return -EINVAL; } des += offset; if ((bytes + offset) >= PAGE_SIZE) { len = PAGE_SIZE - offset; bytes -= len; } else { len = bytes; bytes = 0; } virt += len; memcpy(des, src, len); src += len; clflush_cache_range(des, len); kunmap_local(des); } return 0; } /* memset function in ISP memory management */ int hmm_set(ia_css_ptr virt, int c, unsigned int bytes) { struct hmm_buffer_object *bo; unsigned int idx, offset, len; char *des; int ret; bo = hmm_bo_device_search_in_range(&bo_device, virt); ret = hmm_check_bo(bo, virt); if (ret) return ret; if (bo->status & HMM_BO_VMAPED || bo->status & HMM_BO_VMAPED_CACHED) { void *dst = bo->vmap_addr; dst += (virt - bo->start); memset(dst, c, bytes); if (bo->status & HMM_BO_VMAPED_CACHED) clflush_cache_range(dst, bytes); } else { void *vptr; vptr = hmm_bo_vmap(bo, true); if (vptr) { vptr = vptr + (virt - bo->start); memset(vptr, c, bytes); clflush_cache_range(vptr, bytes); hmm_bo_vunmap(bo); return 0; } } while (bytes) { idx = (virt - bo->start) >> PAGE_SHIFT; offset = (virt - bo->start) - (idx << PAGE_SHIFT); des = (char *)kmap_local_page(bo->pages[idx]) + offset; if ((bytes + offset) >= PAGE_SIZE) { len = PAGE_SIZE - offset; bytes -= len; } else { len = bytes; bytes = 0; } virt += len; memset(des, c, len); clflush_cache_range(des, len); kunmap_local(des); } return 0; } /* Virtual address to physical address convert */ phys_addr_t hmm_virt_to_phys(ia_css_ptr virt) { unsigned int idx, offset; struct hmm_buffer_object *bo; bo = hmm_bo_device_search_in_range(&bo_device, virt); if (!bo) { dev_err(atomisp_dev, "can not find buffer object contains address 0x%x\n", virt); return -1; } idx = (virt - bo->start) >> PAGE_SHIFT; offset = (virt - bo->start) - (idx << PAGE_SHIFT); return page_to_phys(bo->pages[idx]) + offset; } int hmm_mmap(struct vm_area_struct *vma, ia_css_ptr virt) { struct hmm_buffer_object *bo; bo = hmm_bo_device_search_start(&bo_device, virt); if (!bo) { dev_err(atomisp_dev, "can not find buffer object start with address 0x%x\n", virt); return -EINVAL; } return hmm_bo_mmap(vma, bo); } /* Map ISP virtual address into IA virtual address */ void *hmm_vmap(ia_css_ptr virt, bool cached) { struct hmm_buffer_object *bo; void *ptr; bo = hmm_bo_device_search_in_range(&bo_device, virt); if (!bo) { dev_err(atomisp_dev, "can not find buffer object contains address 0x%x\n", virt); return NULL; } ptr = hmm_bo_vmap(bo, cached); if (ptr) return ptr + (virt - bo->start); else return NULL; } /* Flush the memory which is mapped as cached memory through hmm_vmap */ void hmm_flush_vmap(ia_css_ptr virt) { struct hmm_buffer_object *bo; bo = hmm_bo_device_search_in_range(&bo_device, virt); if (!bo) { dev_warn(atomisp_dev, "can not find buffer object contains address 0x%x\n", virt); return; } hmm_bo_flush_vmap(bo); } void hmm_vunmap(ia_css_ptr virt) { struct hmm_buffer_object *bo; bo = hmm_bo_device_search_in_range(&bo_device, virt); if (!bo) { dev_warn(atomisp_dev, "can not find buffer object contains address 0x%x\n", virt); return; } hmm_bo_vunmap(bo); }
linux-master
drivers/staging/media/atomisp/pci/hmm/hmm.c
// SPDX-License-Identifier: GPL-2.0 /* * Support for Intel Camera Imaging ISP subsystem. * Copyright (c) 2015, Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. */ #include "hmm.h" #include "ia_css_refcount.h" #include "sh_css_defs.h" #include "platform_support.h" #include "assert_support.h" #include "ia_css_debug.h" /* TODO: enable for other memory aswell now only for ia_css_ptr */ struct ia_css_refcount_entry { u32 count; ia_css_ptr data; s32 id; }; struct ia_css_refcount_list { u32 size; struct ia_css_refcount_entry *items; }; static struct ia_css_refcount_list myrefcount; static struct ia_css_refcount_entry *refcount_find_entry(ia_css_ptr ptr, bool firstfree) { u32 i; if (ptr == 0) return NULL; if (!myrefcount.items) { ia_css_debug_dtrace(IA_CSS_DEBUG_ERROR, "%s(): Ref count not initialized!\n", __func__); return NULL; } for (i = 0; i < myrefcount.size; i++) { if ((&myrefcount.items[i])->data == 0) { if (firstfree) { /* for new entry */ return &myrefcount.items[i]; } } if ((&myrefcount.items[i])->data == ptr) { /* found entry */ return &myrefcount.items[i]; } } return NULL; } int ia_css_refcount_init(uint32_t size) { int err = 0; if (size == 0) { ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "%s(): Size of 0 for Ref count init!\n", __func__); return -EINVAL; } if (myrefcount.items) { ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "%s(): Ref count is already initialized\n", __func__); return -EINVAL; } myrefcount.items = kvmalloc(sizeof(struct ia_css_refcount_entry) * size, GFP_KERNEL); if (!myrefcount.items) err = -ENOMEM; if (!err) { memset(myrefcount.items, 0, sizeof(struct ia_css_refcount_entry) * size); myrefcount.size = size; } return err; } void ia_css_refcount_uninit(void) { struct ia_css_refcount_entry *entry; u32 i; ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "%s() entry\n", __func__); for (i = 0; i < myrefcount.size; i++) { /* driver verifier tool has issues with &arr[i] and prefers arr + i; as these are actually equivalent the line below uses + i */ entry = myrefcount.items + i; if (entry->data != mmgr_NULL) { /* ia_css_debug_dtrace(IA_CSS_DBG_TRACE, "ia_css_refcount_uninit: freeing (%x)\n", entry->data);*/ hmm_free(entry->data); entry->data = mmgr_NULL; entry->count = 0; entry->id = 0; } } kvfree(myrefcount.items); myrefcount.items = NULL; myrefcount.size = 0; ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "%s() leave\n", __func__); } ia_css_ptr ia_css_refcount_increment(s32 id, ia_css_ptr ptr) { struct ia_css_refcount_entry *entry; if (ptr == mmgr_NULL) return ptr; entry = refcount_find_entry(ptr, false); ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "%s(%x) 0x%x\n", __func__, id, ptr); if (!entry) { entry = refcount_find_entry(ptr, true); assert(entry); if (!entry) return mmgr_NULL; entry->id = id; } if (entry->id != id) { ia_css_debug_dtrace(IA_CSS_DEBUG_ERROR, "%s(): Ref count IDS do not match!\n", __func__); return mmgr_NULL; } if (entry->data == ptr) entry->count += 1; else if (entry->data == mmgr_NULL) { entry->data = ptr; entry->count = 1; } else return mmgr_NULL; return ptr; } bool ia_css_refcount_decrement(s32 id, ia_css_ptr ptr) { struct ia_css_refcount_entry *entry; ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "%s(%x) 0x%x\n", __func__, id, ptr); if (ptr == mmgr_NULL) return false; entry = refcount_find_entry(ptr, false); if (entry) { if (entry->id != id) { ia_css_debug_dtrace(IA_CSS_DEBUG_ERROR, "%s(): Ref count IDS do not match!\n", __func__); return false; } if (entry->count > 0) { entry->count -= 1; if (entry->count == 0) { /* ia_css_debug_dtrace(IA_CSS_DBEUG_TRACE, "ia_css_refcount_decrement: freeing\n");*/ hmm_free(ptr); entry->data = mmgr_NULL; entry->id = 0; } return true; } } /* SHOULD NOT HAPPEN: ptr not managed by refcount, or not valid anymore */ if (entry) IA_CSS_ERROR("id %x, ptr 0x%x entry %p entry->id %x entry->count %d\n", id, ptr, entry, entry->id, entry->count); else IA_CSS_ERROR("entry NULL\n"); assert(false); return false; } bool ia_css_refcount_is_single(ia_css_ptr ptr) { struct ia_css_refcount_entry *entry; if (ptr == mmgr_NULL) return false; entry = refcount_find_entry(ptr, false); if (entry) return (entry->count == 1); return true; } void ia_css_refcount_clear(s32 id, clear_func clear_func_ptr) { struct ia_css_refcount_entry *entry; u32 i; u32 count = 0; assert(clear_func_ptr); ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "%s(%x)\n", __func__, id); for (i = 0; i < myrefcount.size; i++) { /* driver verifier tool has issues with &arr[i] and prefers arr + i; as these are actually equivalent the line below uses + i */ entry = myrefcount.items + i; if ((entry->data != mmgr_NULL) && (entry->id == id)) { ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "%s: %x: 0x%x\n", __func__, id, entry->data); if (clear_func_ptr) { /* clear using provided function */ clear_func_ptr(entry->data); } else { ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "%s: using hmm_free: no clear_func\n", __func__); hmm_free(entry->data); } if (entry->count != 0) { IA_CSS_WARNING("Ref count for entry %x is not zero!", entry->id); } assert(entry->count == 0); entry->data = mmgr_NULL; entry->count = 0; entry->id = 0; count++; } } ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "%s(%x): cleared %d\n", __func__, id, count); } bool ia_css_refcount_is_valid(ia_css_ptr ptr) { struct ia_css_refcount_entry *entry; if (ptr == mmgr_NULL) return false; entry = refcount_find_entry(ptr, false); return entry; }
linux-master
drivers/staging/media/atomisp/pci/base/refcount/src/refcount.c
// SPDX-License-Identifier: GPL-2.0 /* * Support for Intel Camera Imaging ISP subsystem. * Copyright (c) 2015, Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. */ #include "ia_css_circbuf.h" #include <assert_support.h> /********************************************************************** * * Forward declarations. * **********************************************************************/ /* * @brief Read the oldest element from the circular buffer. * Read the oldest element WITHOUT checking whehter the * circular buffer is empty or not. The oldest element is * also removed out from the circular buffer. * * @param cb The pointer to the circular buffer. * * @return the oldest element. */ static inline ia_css_circbuf_elem_t ia_css_circbuf_read(ia_css_circbuf_t *cb); /* * @brief Shift a chunk of elements in the circular buffer. * A chunk of elements (i.e. the ones from the "start" position * to the "chunk_src" position) are shifted in the circular buffer, * along the direction of new elements coming. * * @param cb The pointer to the circular buffer. * @param chunk_src The position at which the first element in the chunk is. * @param chunk_dest The position to which the first element in the chunk would be shift. */ static inline void ia_css_circbuf_shift_chunk(ia_css_circbuf_t *cb, u32 chunk_src, uint32_t chunk_dest); /* * @brief Get the "val" field in the element. * * @param elem The pointer to the element. * * @return the "val" field. */ static inline uint32_t ia_css_circbuf_elem_get_val(ia_css_circbuf_elem_t *elem); /********************************************************************** * * Non-inline functions. * **********************************************************************/ /* * @brief Create the circular buffer. * Refer to "ia_css_circbuf.h" for details. */ void ia_css_circbuf_create(ia_css_circbuf_t *cb, ia_css_circbuf_elem_t *elems, ia_css_circbuf_desc_t *desc) { u32 i; OP___assert(desc); cb->desc = desc; /* Initialize to defaults */ cb->desc->start = 0; cb->desc->end = 0; cb->desc->step = 0; for (i = 0; i < cb->desc->size; i++) ia_css_circbuf_elem_init(&elems[i]); cb->elems = elems; } /* * @brief Destroy the circular buffer. * Refer to "ia_css_circbuf.h" for details. */ void ia_css_circbuf_destroy(ia_css_circbuf_t *cb) { cb->desc = NULL; cb->elems = NULL; } /* * @brief Pop a value out of the circular buffer. * Refer to "ia_css_circbuf.h" for details. */ uint32_t ia_css_circbuf_pop(ia_css_circbuf_t *cb) { u32 ret; ia_css_circbuf_elem_t elem; assert(!ia_css_circbuf_is_empty(cb)); /* read an element from the buffer */ elem = ia_css_circbuf_read(cb); ret = ia_css_circbuf_elem_get_val(&elem); return ret; } /* * @brief Extract a value out of the circular buffer. * Refer to "ia_css_circbuf.h" for details. */ uint32_t ia_css_circbuf_extract(ia_css_circbuf_t *cb, int offset) { int max_offset; u32 val; u32 pos; u32 src_pos; u32 dest_pos; /* get the maximum offest */ max_offset = ia_css_circbuf_get_offset(cb, cb->desc->start, cb->desc->end); max_offset--; /* * Step 1: When the target element is at the "start" position. */ if (offset == 0) { val = ia_css_circbuf_pop(cb); return val; } /* * Step 2: When the target element is out of the range. */ if (offset > max_offset) { val = 0; return val; } /* * Step 3: When the target element is between the "start" and * "end" position. */ /* get the position of the target element */ pos = ia_css_circbuf_get_pos_at_offset(cb, cb->desc->start, offset); /* get the value from the target element */ val = ia_css_circbuf_elem_get_val(&cb->elems[pos]); /* shift the elements */ src_pos = ia_css_circbuf_get_pos_at_offset(cb, pos, -1); dest_pos = pos; ia_css_circbuf_shift_chunk(cb, src_pos, dest_pos); return val; } /* * @brief Peek an element from the circular buffer. * Refer to "ia_css_circbuf.h" for details. */ uint32_t ia_css_circbuf_peek(ia_css_circbuf_t *cb, int offset) { int pos; pos = ia_css_circbuf_get_pos_at_offset(cb, cb->desc->end, offset); /* get the value at the position */ return cb->elems[pos].val; } /* * @brief Get the value of an element from the circular buffer. * Refer to "ia_css_circbuf.h" for details. */ uint32_t ia_css_circbuf_peek_from_start(ia_css_circbuf_t *cb, int offset) { int pos; pos = ia_css_circbuf_get_pos_at_offset(cb, cb->desc->start, offset); /* get the value at the position */ return cb->elems[pos].val; } /* @brief increase size of a circular buffer. * Use 'CAUTION' before using this function. This was added to * support / fix issue with increasing size for tagger only * Please refer to "ia_css_circbuf.h" for details. */ bool ia_css_circbuf_increase_size( ia_css_circbuf_t *cb, unsigned int sz_delta, ia_css_circbuf_elem_t *elems) { u8 curr_size; u8 curr_end; unsigned int i = 0; if (!cb || sz_delta == 0) return false; curr_size = cb->desc->size; curr_end = cb->desc->end; /* We assume cb was pre defined as global to allow * increase in size */ /* FM: are we sure this cannot cause size to become too big? */ if (((uint8_t)(cb->desc->size + (uint8_t)sz_delta) > cb->desc->size) && ((uint8_t)sz_delta == sz_delta)) cb->desc->size += (uint8_t)sz_delta; else return false; /* overflow in size */ /* If elems are passed update them else we assume its been taken * care before calling this function */ if (elems) { /* cb element array size will not be increased dynamically, * but pointers to new elements can be added at the end * of existing pre defined cb element array of * size >= new size if not already added */ for (i = curr_size; i < cb->desc->size; i++) cb->elems[i] = elems[i - curr_size]; } /* Fix Start / End */ if (curr_end < cb->desc->start) { if (curr_end == 0) { /* Easily fix End */ cb->desc->end = curr_size; } else { /* Move elements and fix Start*/ ia_css_circbuf_shift_chunk(cb, curr_size - 1, curr_size + sz_delta - 1); } } return true; } /**************************************************************** * * Inline functions. * ****************************************************************/ /* * @brief Get the "val" field in the element. * Refer to "Forward declarations" for details. */ static inline uint32_t ia_css_circbuf_elem_get_val(ia_css_circbuf_elem_t *elem) { return elem->val; } /* * @brief Read the oldest element from the circular buffer. * Refer to "Forward declarations" for details. */ static inline ia_css_circbuf_elem_t ia_css_circbuf_read(ia_css_circbuf_t *cb) { ia_css_circbuf_elem_t elem; /* get the element from the target position */ elem = cb->elems[cb->desc->start]; /* clear the target position */ ia_css_circbuf_elem_init(&cb->elems[cb->desc->start]); /* adjust the "start" position */ cb->desc->start = ia_css_circbuf_get_pos_at_offset(cb, cb->desc->start, 1); return elem; } /* * @brief Shift a chunk of elements in the circular buffer. * Refer to "Forward declarations" for details. */ static inline void ia_css_circbuf_shift_chunk(ia_css_circbuf_t *cb, u32 chunk_src, uint32_t chunk_dest) { int chunk_offset; int chunk_sz; int i; /* get the chunk offset and size */ chunk_offset = ia_css_circbuf_get_offset(cb, chunk_src, chunk_dest); chunk_sz = ia_css_circbuf_get_offset(cb, cb->desc->start, chunk_src) + 1; /* shift each element to its terminal position */ for (i = 0; i < chunk_sz; i++) { /* copy the element from the source to the destination */ ia_css_circbuf_elem_cpy(&cb->elems[chunk_src], &cb->elems[chunk_dest]); /* clear the source position */ ia_css_circbuf_elem_init(&cb->elems[chunk_src]); /* adjust the source/terminal positions */ chunk_src = ia_css_circbuf_get_pos_at_offset(cb, chunk_src, -1); chunk_dest = ia_css_circbuf_get_pos_at_offset(cb, chunk_dest, -1); } /* adjust the index "start" */ cb->desc->start = ia_css_circbuf_get_pos_at_offset(cb, cb->desc->start, chunk_offset); }
linux-master
drivers/staging/media/atomisp/pci/base/circbuf/src/circbuf.c
// SPDX-License-Identifier: GPL-2.0 /* * LED flash driver for LM3554 * * Copyright (c) 2010-2012 Intel Corporation. All Rights Reserved. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version * 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * */ #include <linux/module.h> #include <linux/i2c.h> #include <linux/mutex.h> #include <linux/delay.h> #include <linux/gpio/consumer.h> #include <linux/slab.h> #include "../include/media/lm3554.h" #include <media/v4l2-ctrls.h> #include <media/v4l2-device.h> #include <linux/acpi.h> #include "../include/linux/atomisp_gmin_platform.h" #include "../include/linux/atomisp.h" /* Registers */ #define LM3554_TORCH_BRIGHTNESS_REG 0xA0 #define LM3554_TORCH_MODE_SHIFT 0 #define LM3554_TORCH_CURRENT_SHIFT 3 #define LM3554_INDICATOR_CURRENT_SHIFT 6 #define LM3554_FLASH_BRIGHTNESS_REG 0xB0 #define LM3554_FLASH_MODE_SHIFT 0 #define LM3554_FLASH_CURRENT_SHIFT 3 #define LM3554_STROBE_SENSITIVITY_SHIFT 7 #define LM3554_FLASH_DURATION_REG 0xC0 #define LM3554_FLASH_TIMEOUT_SHIFT 0 #define LM3554_CURRENT_LIMIT_SHIFT 5 #define LM3554_FLAGS_REG 0xD0 #define LM3554_FLAG_TIMEOUT BIT(0) #define LM3554_FLAG_THERMAL_SHUTDOWN BIT(1) #define LM3554_FLAG_LED_FAULT BIT(2) #define LM3554_FLAG_TX1_INTERRUPT BIT(3) #define LM3554_FLAG_TX2_INTERRUPT BIT(4) #define LM3554_FLAG_LED_THERMAL_FAULT BIT(5) #define LM3554_FLAG_UNUSED BIT(6) #define LM3554_FLAG_INPUT_VOLTAGE_LOW BIT(7) #define LM3554_CONFIG_REG_1 0xE0 #define LM3554_ENVM_TX2_SHIFT 5 #define LM3554_TX2_POLARITY_SHIFT 6 struct lm3554 { struct v4l2_subdev sd; struct mutex power_lock; struct v4l2_ctrl_handler ctrl_handler; int power_count; unsigned int mode; int timeout; u8 torch_current; u8 indicator_current; u8 flash_current; struct timer_list flash_off_delay; struct lm3554_platform_data *pdata; }; #define to_lm3554(p_sd) container_of(p_sd, struct lm3554, sd) /* Return negative errno else zero on success */ static int lm3554_write(struct lm3554 *flash, u8 addr, u8 val) { struct i2c_client *client = v4l2_get_subdevdata(&flash->sd); int ret; ret = i2c_smbus_write_byte_data(client, addr, val); dev_dbg(&client->dev, "Write Addr:%02X Val:%02X %s\n", addr, val, ret < 0 ? "fail" : "ok"); return ret; } /* Return negative errno else a data byte received from the device. */ static int lm3554_read(struct lm3554 *flash, u8 addr) { struct i2c_client *client = v4l2_get_subdevdata(&flash->sd); int ret; ret = i2c_smbus_read_byte_data(client, addr); dev_dbg(&client->dev, "Read Addr:%02X Val:%02X %s\n", addr, ret, ret < 0 ? "fail" : "ok"); return ret; } /* ----------------------------------------------------------------------------- * Hardware configuration */ static int lm3554_set_mode(struct lm3554 *flash, unsigned int mode) { u8 val; int ret; val = (mode << LM3554_FLASH_MODE_SHIFT) | (flash->flash_current << LM3554_FLASH_CURRENT_SHIFT); ret = lm3554_write(flash, LM3554_FLASH_BRIGHTNESS_REG, val); if (ret == 0) flash->mode = mode; return ret; } static int lm3554_set_torch(struct lm3554 *flash) { u8 val; val = (flash->mode << LM3554_TORCH_MODE_SHIFT) | (flash->torch_current << LM3554_TORCH_CURRENT_SHIFT) | (flash->indicator_current << LM3554_INDICATOR_CURRENT_SHIFT); return lm3554_write(flash, LM3554_TORCH_BRIGHTNESS_REG, val); } static int lm3554_set_flash(struct lm3554 *flash) { u8 val; val = (flash->mode << LM3554_FLASH_MODE_SHIFT) | (flash->flash_current << LM3554_FLASH_CURRENT_SHIFT); return lm3554_write(flash, LM3554_FLASH_BRIGHTNESS_REG, val); } static int lm3554_set_duration(struct lm3554 *flash) { u8 val; val = (flash->timeout << LM3554_FLASH_TIMEOUT_SHIFT) | (flash->pdata->current_limit << LM3554_CURRENT_LIMIT_SHIFT); return lm3554_write(flash, LM3554_FLASH_DURATION_REG, val); } static int lm3554_set_config1(struct lm3554 *flash) { u8 val; val = (flash->pdata->envm_tx2 << LM3554_ENVM_TX2_SHIFT) | (flash->pdata->tx2_polarity << LM3554_TX2_POLARITY_SHIFT); return lm3554_write(flash, LM3554_CONFIG_REG_1, val); } /* ----------------------------------------------------------------------------- * Hardware trigger */ static void lm3554_flash_off_delay(struct timer_list *t) { struct lm3554 *flash = from_timer(flash, t, flash_off_delay); struct lm3554_platform_data *pdata = flash->pdata; gpiod_set_value(pdata->gpio_strobe, 0); } static int lm3554_hw_strobe(struct i2c_client *client, bool strobe) { int ret, timer_pending; struct v4l2_subdev *sd = i2c_get_clientdata(client); struct lm3554 *flash = to_lm3554(sd); struct lm3554_platform_data *pdata = flash->pdata; /* * An abnormal high flash current is observed when strobe off the * flash. Workaround here is firstly set flash current to lower level, * wait a short moment, and then strobe off the flash. */ timer_pending = del_timer_sync(&flash->flash_off_delay); /* Flash off */ if (!strobe) { /* set current to 70mA and wait a while */ ret = lm3554_write(flash, LM3554_FLASH_BRIGHTNESS_REG, 0); if (ret < 0) goto err; mod_timer(&flash->flash_off_delay, jiffies + msecs_to_jiffies(LM3554_TIMER_DELAY)); return 0; } /* Flash on */ /* * If timer is killed before run, flash is not strobe off, * so must strobe off here */ if (timer_pending) gpiod_set_value(pdata->gpio_strobe, 0); /* Restore flash current settings */ ret = lm3554_set_flash(flash); if (ret < 0) goto err; /* Strobe on Flash */ gpiod_set_value(pdata->gpio_strobe, 1); return 0; err: dev_err(&client->dev, "failed to %s flash strobe (%d)\n", strobe ? "on" : "off", ret); return ret; } /* ----------------------------------------------------------------------------- * V4L2 controls */ static int lm3554_read_status(struct lm3554 *flash) { int ret; struct i2c_client *client = v4l2_get_subdevdata(&flash->sd); /* NOTE: reading register clear fault status */ ret = lm3554_read(flash, LM3554_FLAGS_REG); if (ret < 0) return ret; /* * Accordingly to datasheet we read back '1' in bit 6. * Clear it first. */ ret &= ~LM3554_FLAG_UNUSED; /* * Do not take TX1/TX2 signal as an error * because MSIC will not turn off flash, but turn to * torch mode according to gsm modem signal by hardware. */ ret &= ~(LM3554_FLAG_TX1_INTERRUPT | LM3554_FLAG_TX2_INTERRUPT); if (ret > 0) dev_dbg(&client->dev, "LM3554 flag status: %02x\n", ret); return ret; } static int lm3554_s_flash_timeout(struct v4l2_subdev *sd, u32 val) { struct lm3554 *flash = to_lm3554(sd); val = clamp(val, LM3554_MIN_TIMEOUT, LM3554_MAX_TIMEOUT); val = val / LM3554_TIMEOUT_STEPSIZE - 1; flash->timeout = val; return lm3554_set_duration(flash); } static int lm3554_g_flash_timeout(struct v4l2_subdev *sd, s32 *val) { struct lm3554 *flash = to_lm3554(sd); *val = (u32)(flash->timeout + 1) * LM3554_TIMEOUT_STEPSIZE; return 0; } static int lm3554_s_flash_intensity(struct v4l2_subdev *sd, u32 intensity) { struct lm3554 *flash = to_lm3554(sd); intensity = LM3554_CLAMP_PERCENTAGE(intensity); intensity = LM3554_PERCENT_TO_VALUE(intensity, LM3554_FLASH_STEP); flash->flash_current = intensity; return lm3554_set_flash(flash); } static int lm3554_g_flash_intensity(struct v4l2_subdev *sd, s32 *val) { struct lm3554 *flash = to_lm3554(sd); *val = LM3554_VALUE_TO_PERCENT((u32)flash->flash_current, LM3554_FLASH_STEP); return 0; } static int lm3554_s_torch_intensity(struct v4l2_subdev *sd, u32 intensity) { struct lm3554 *flash = to_lm3554(sd); intensity = LM3554_CLAMP_PERCENTAGE(intensity); intensity = LM3554_PERCENT_TO_VALUE(intensity, LM3554_TORCH_STEP); flash->torch_current = intensity; return lm3554_set_torch(flash); } static int lm3554_g_torch_intensity(struct v4l2_subdev *sd, s32 *val) { struct lm3554 *flash = to_lm3554(sd); *val = LM3554_VALUE_TO_PERCENT((u32)flash->torch_current, LM3554_TORCH_STEP); return 0; } static int lm3554_s_indicator_intensity(struct v4l2_subdev *sd, u32 intensity) { struct lm3554 *flash = to_lm3554(sd); intensity = LM3554_CLAMP_PERCENTAGE(intensity); intensity = LM3554_PERCENT_TO_VALUE(intensity, LM3554_INDICATOR_STEP); flash->indicator_current = intensity; return lm3554_set_torch(flash); } static int lm3554_g_indicator_intensity(struct v4l2_subdev *sd, s32 *val) { struct lm3554 *flash = to_lm3554(sd); *val = LM3554_VALUE_TO_PERCENT((u32)flash->indicator_current, LM3554_INDICATOR_STEP); return 0; } static int lm3554_s_flash_strobe(struct v4l2_subdev *sd, u32 val) { struct i2c_client *client = v4l2_get_subdevdata(sd); return lm3554_hw_strobe(client, val); } static int lm3554_s_flash_mode(struct v4l2_subdev *sd, u32 new_mode) { struct lm3554 *flash = to_lm3554(sd); unsigned int mode; switch (new_mode) { case ATOMISP_FLASH_MODE_OFF: mode = LM3554_MODE_SHUTDOWN; break; case ATOMISP_FLASH_MODE_FLASH: mode = LM3554_MODE_FLASH; break; case ATOMISP_FLASH_MODE_INDICATOR: mode = LM3554_MODE_INDICATOR; break; case ATOMISP_FLASH_MODE_TORCH: mode = LM3554_MODE_TORCH; break; default: return -EINVAL; } return lm3554_set_mode(flash, mode); } static int lm3554_g_flash_mode(struct v4l2_subdev *sd, s32 *val) { struct lm3554 *flash = to_lm3554(sd); *val = flash->mode; return 0; } static int lm3554_g_flash_status(struct v4l2_subdev *sd, s32 *val) { struct lm3554 *flash = to_lm3554(sd); int value; value = lm3554_read_status(flash); if (value < 0) return value; if (value & LM3554_FLAG_TIMEOUT) *val = ATOMISP_FLASH_STATUS_TIMEOUT; else if (value > 0) *val = ATOMISP_FLASH_STATUS_HW_ERROR; else *val = ATOMISP_FLASH_STATUS_OK; return 0; } static int lm3554_g_flash_status_register(struct v4l2_subdev *sd, s32 *val) { struct lm3554 *flash = to_lm3554(sd); int ret; ret = lm3554_read(flash, LM3554_FLAGS_REG); if (ret < 0) return ret; *val = ret; return 0; } static int lm3554_s_ctrl(struct v4l2_ctrl *ctrl) { struct lm3554 *dev = container_of(ctrl->handler, struct lm3554, ctrl_handler); int ret = 0; switch (ctrl->id) { case V4L2_CID_FLASH_TIMEOUT: ret = lm3554_s_flash_timeout(&dev->sd, ctrl->val); break; case V4L2_CID_FLASH_INTENSITY: ret = lm3554_s_flash_intensity(&dev->sd, ctrl->val); break; case V4L2_CID_FLASH_TORCH_INTENSITY: ret = lm3554_s_torch_intensity(&dev->sd, ctrl->val); break; case V4L2_CID_FLASH_INDICATOR_INTENSITY: ret = lm3554_s_indicator_intensity(&dev->sd, ctrl->val); break; case V4L2_CID_FLASH_STROBE: ret = lm3554_s_flash_strobe(&dev->sd, ctrl->val); break; case V4L2_CID_FLASH_MODE: ret = lm3554_s_flash_mode(&dev->sd, ctrl->val); break; default: ret = -EINVAL; } return ret; } static int lm3554_g_volatile_ctrl(struct v4l2_ctrl *ctrl) { struct lm3554 *dev = container_of(ctrl->handler, struct lm3554, ctrl_handler); int ret = 0; switch (ctrl->id) { case V4L2_CID_FLASH_TIMEOUT: ret = lm3554_g_flash_timeout(&dev->sd, &ctrl->val); break; case V4L2_CID_FLASH_INTENSITY: ret = lm3554_g_flash_intensity(&dev->sd, &ctrl->val); break; case V4L2_CID_FLASH_TORCH_INTENSITY: ret = lm3554_g_torch_intensity(&dev->sd, &ctrl->val); break; case V4L2_CID_FLASH_INDICATOR_INTENSITY: ret = lm3554_g_indicator_intensity(&dev->sd, &ctrl->val); break; case V4L2_CID_FLASH_MODE: ret = lm3554_g_flash_mode(&dev->sd, &ctrl->val); break; case V4L2_CID_FLASH_STATUS: ret = lm3554_g_flash_status(&dev->sd, &ctrl->val); break; case V4L2_CID_FLASH_STATUS_REGISTER: ret = lm3554_g_flash_status_register(&dev->sd, &ctrl->val); break; default: ret = -EINVAL; } return ret; } static const struct v4l2_ctrl_ops ctrl_ops = { .s_ctrl = lm3554_s_ctrl, .g_volatile_ctrl = lm3554_g_volatile_ctrl }; static const struct v4l2_ctrl_config lm3554_controls[] = { { .ops = &ctrl_ops, .id = V4L2_CID_FLASH_TIMEOUT, .type = V4L2_CTRL_TYPE_INTEGER, .name = "Flash Timeout", .min = 0x0, .max = LM3554_MAX_TIMEOUT, .step = 0x01, .def = LM3554_DEFAULT_TIMEOUT, .flags = 0, }, { .ops = &ctrl_ops, .id = V4L2_CID_FLASH_INTENSITY, .type = V4L2_CTRL_TYPE_INTEGER, .name = "Flash Intensity", .min = LM3554_MIN_PERCENT, .max = LM3554_MAX_PERCENT, .step = 0x01, .def = LM3554_FLASH_DEFAULT_BRIGHTNESS, .flags = 0, }, { .ops = &ctrl_ops, .id = V4L2_CID_FLASH_TORCH_INTENSITY, .type = V4L2_CTRL_TYPE_INTEGER, .name = "Torch Intensity", .min = LM3554_MIN_PERCENT, .max = LM3554_MAX_PERCENT, .step = 0x01, .def = LM3554_TORCH_DEFAULT_BRIGHTNESS, .flags = 0, }, { .ops = &ctrl_ops, .id = V4L2_CID_FLASH_INDICATOR_INTENSITY, .type = V4L2_CTRL_TYPE_INTEGER, .name = "Indicator Intensity", .min = LM3554_MIN_PERCENT, .max = LM3554_MAX_PERCENT, .step = 0x01, .def = LM3554_INDICATOR_DEFAULT_BRIGHTNESS, .flags = 0, }, { .ops = &ctrl_ops, .id = V4L2_CID_FLASH_STROBE, .type = V4L2_CTRL_TYPE_BOOLEAN, .name = "Flash Strobe", .min = 0, .max = 1, .step = 1, .def = 0, .flags = 0, }, { .ops = &ctrl_ops, .id = V4L2_CID_FLASH_MODE, .type = V4L2_CTRL_TYPE_INTEGER, .name = "Flash Mode", .min = 0, .max = 100, .step = 1, .def = ATOMISP_FLASH_MODE_OFF, .flags = 0, }, { .ops = &ctrl_ops, .id = V4L2_CID_FLASH_STATUS, .type = V4L2_CTRL_TYPE_INTEGER, .name = "Flash Status", .min = ATOMISP_FLASH_STATUS_OK, .max = ATOMISP_FLASH_STATUS_TIMEOUT, .step = 1, .def = ATOMISP_FLASH_STATUS_OK, .flags = 0, }, { .ops = &ctrl_ops, .id = V4L2_CID_FLASH_STATUS_REGISTER, .type = V4L2_CTRL_TYPE_INTEGER, .name = "Flash Status Register", .min = 0, .max = 255, .step = 1, .def = 0, .flags = 0, }, }; /* ----------------------------------------------------------------------------- * V4L2 subdev core operations */ /* Put device into known state. */ static int lm3554_setup(struct lm3554 *flash) { struct i2c_client *client = v4l2_get_subdevdata(&flash->sd); int ret; /* clear the flags register */ ret = lm3554_read(flash, LM3554_FLAGS_REG); if (ret < 0) return ret; dev_dbg(&client->dev, "Fault info: %02x\n", ret); ret = lm3554_set_config1(flash); if (ret < 0) return ret; ret = lm3554_set_duration(flash); if (ret < 0) return ret; ret = lm3554_set_torch(flash); if (ret < 0) return ret; ret = lm3554_set_flash(flash); if (ret < 0) return ret; /* read status */ ret = lm3554_read_status(flash); if (ret < 0) return ret; return ret ? -EIO : 0; } static int __lm3554_s_power(struct lm3554 *flash, int power) { struct lm3554_platform_data *pdata = flash->pdata; int ret; /*initialize flash driver*/ gpiod_set_value(pdata->gpio_reset, power); usleep_range(100, 100 + 1); if (power) { /* Setup default values. This makes sure that the chip * is in a known state. */ ret = lm3554_setup(flash); if (ret < 0) { __lm3554_s_power(flash, 0); return ret; } } return 0; } static int lm3554_s_power(struct v4l2_subdev *sd, int power) { struct lm3554 *flash = to_lm3554(sd); int ret = 0; mutex_lock(&flash->power_lock); if (flash->power_count == !power) { ret = __lm3554_s_power(flash, !!power); if (ret < 0) goto done; } flash->power_count += power ? 1 : -1; WARN_ON(flash->power_count < 0); done: mutex_unlock(&flash->power_lock); return ret; } static const struct v4l2_subdev_core_ops lm3554_core_ops = { .s_power = lm3554_s_power, }; static const struct v4l2_subdev_ops lm3554_ops = { .core = &lm3554_core_ops, }; static int lm3554_detect(struct v4l2_subdev *sd) { struct i2c_client *client = v4l2_get_subdevdata(sd); struct i2c_adapter *adapter = client->adapter; struct lm3554 *flash = to_lm3554(sd); int ret; if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA)) { dev_err(&client->dev, "lm3554_detect i2c error\n"); return -ENODEV; } /* Power up the flash driver and reset it */ ret = lm3554_s_power(&flash->sd, 1); if (ret < 0) { dev_err(&client->dev, "Failed to power on lm3554 LED flash\n"); } else { dev_dbg(&client->dev, "Successfully detected lm3554 LED flash\n"); lm3554_s_power(&flash->sd, 0); } return ret; } static int lm3554_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh) { return lm3554_s_power(sd, 1); } static int lm3554_close(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh) { return lm3554_s_power(sd, 0); } static const struct v4l2_subdev_internal_ops lm3554_internal_ops = { .registered = lm3554_detect, .open = lm3554_open, .close = lm3554_close, }; /* ----------------------------------------------------------------------------- * I2C driver */ #ifdef CONFIG_PM static int lm3554_suspend(struct device *dev) { struct i2c_client *client = to_i2c_client(dev); struct v4l2_subdev *subdev = i2c_get_clientdata(client); struct lm3554 *flash = to_lm3554(subdev); int rval; if (flash->power_count == 0) return 0; rval = __lm3554_s_power(flash, 0); dev_dbg(&client->dev, "Suspend %s\n", rval < 0 ? "failed" : "ok"); return rval; } static int lm3554_resume(struct device *dev) { struct i2c_client *client = to_i2c_client(dev); struct v4l2_subdev *subdev = i2c_get_clientdata(client); struct lm3554 *flash = to_lm3554(subdev); int rval; if (flash->power_count == 0) return 0; rval = __lm3554_s_power(flash, 1); dev_dbg(&client->dev, "Resume %s\n", rval < 0 ? "fail" : "ok"); return rval; } #else #define lm3554_suspend NULL #define lm3554_resume NULL #endif /* CONFIG_PM */ static int lm3554_gpio_init(struct i2c_client *client) { struct v4l2_subdev *sd = i2c_get_clientdata(client); struct lm3554 *flash = to_lm3554(sd); struct lm3554_platform_data *pdata = flash->pdata; int ret; if (!pdata->gpio_reset) return -EINVAL; ret = gpiod_direction_output(pdata->gpio_reset, 0); if (ret < 0) return ret; if (!pdata->gpio_strobe) return -EINVAL; ret = gpiod_direction_output(pdata->gpio_strobe, 0); if (ret < 0) return ret; return 0; } static void lm3554_gpio_uninit(struct i2c_client *client) { struct v4l2_subdev *sd = i2c_get_clientdata(client); struct lm3554 *flash = to_lm3554(sd); struct lm3554_platform_data *pdata = flash->pdata; int ret; ret = gpiod_direction_output(pdata->gpio_strobe, 0); if (ret < 0) dev_err(&client->dev, "gpio request/direction_output fail for gpio_strobe"); ret = gpiod_direction_output(pdata->gpio_reset, 0); if (ret < 0) dev_err(&client->dev, "gpio request/direction_output fail for gpio_reset"); } static void *lm3554_platform_data_func(struct i2c_client *client) { static struct lm3554_platform_data platform_data; platform_data.gpio_reset = gpiod_get_index(&client->dev, NULL, 2, GPIOD_OUT_LOW); if (IS_ERR(platform_data.gpio_reset)) return ERR_CAST(platform_data.gpio_reset); platform_data.gpio_strobe = gpiod_get_index(&client->dev, NULL, 0, GPIOD_OUT_LOW); if (IS_ERR(platform_data.gpio_strobe)) return ERR_CAST(platform_data.gpio_strobe); platform_data.gpio_torch = gpiod_get_index(&client->dev, NULL, 1, GPIOD_OUT_LOW); if (IS_ERR(platform_data.gpio_torch)) return ERR_CAST(platform_data.gpio_torch); /* Set to TX2 mode, then ENVM/TX2 pin is a power amplifier sync input: * ENVM/TX pin asserted, flash forced into torch; * ENVM/TX pin desserted, flash set back; */ platform_data.envm_tx2 = 1; platform_data.tx2_polarity = 0; /* set peak current limit to be 1000mA */ platform_data.current_limit = 0; return &platform_data; } static int lm3554_probe(struct i2c_client *client) { int err = 0; struct lm3554 *flash; unsigned int i; flash = kzalloc(sizeof(*flash), GFP_KERNEL); if (!flash) return -ENOMEM; flash->pdata = lm3554_platform_data_func(client); if (IS_ERR(flash->pdata)) { err = PTR_ERR(flash->pdata); goto free_flash; } v4l2_i2c_subdev_init(&flash->sd, client, &lm3554_ops); flash->sd.internal_ops = &lm3554_internal_ops; flash->sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE; flash->mode = ATOMISP_FLASH_MODE_OFF; flash->timeout = LM3554_MAX_TIMEOUT / LM3554_TIMEOUT_STEPSIZE - 1; err = v4l2_ctrl_handler_init(&flash->ctrl_handler, ARRAY_SIZE(lm3554_controls)); if (err) { dev_err(&client->dev, "error initialize a ctrl_handler.\n"); goto unregister_subdev; } for (i = 0; i < ARRAY_SIZE(lm3554_controls); i++) v4l2_ctrl_new_custom(&flash->ctrl_handler, &lm3554_controls[i], NULL); if (flash->ctrl_handler.error) { dev_err(&client->dev, "ctrl_handler error.\n"); err = flash->ctrl_handler.error; goto free_handler; } flash->sd.ctrl_handler = &flash->ctrl_handler; err = media_entity_pads_init(&flash->sd.entity, 0, NULL); if (err) { dev_err(&client->dev, "error initialize a media entity.\n"); goto free_handler; } flash->sd.entity.function = MEDIA_ENT_F_FLASH; mutex_init(&flash->power_lock); timer_setup(&flash->flash_off_delay, lm3554_flash_off_delay, 0); err = lm3554_gpio_init(client); if (err) { dev_err(&client->dev, "gpio request/direction_output fail.\n"); goto cleanup_media; } err = atomisp_register_i2c_module(&flash->sd, NULL, LED_FLASH); if (err) { dev_err(&client->dev, "fail to register atomisp i2c module.\n"); goto uninit_gpio; } return 0; uninit_gpio: lm3554_gpio_uninit(client); cleanup_media: media_entity_cleanup(&flash->sd.entity); free_handler: v4l2_ctrl_handler_free(&flash->ctrl_handler); unregister_subdev: v4l2_device_unregister_subdev(&flash->sd); free_flash: kfree(flash); return err; } static void lm3554_remove(struct i2c_client *client) { struct v4l2_subdev *sd = i2c_get_clientdata(client); struct lm3554 *flash = to_lm3554(sd); media_entity_cleanup(&flash->sd.entity); v4l2_ctrl_handler_free(&flash->ctrl_handler); v4l2_device_unregister_subdev(sd); atomisp_gmin_remove_subdev(sd); timer_shutdown_sync(&flash->flash_off_delay); lm3554_gpio_uninit(client); kfree(flash); } static const struct dev_pm_ops lm3554_pm_ops = { .suspend = lm3554_suspend, .resume = lm3554_resume, }; static const struct acpi_device_id lm3554_acpi_match[] = { { "INTCF1C" }, {}, }; MODULE_DEVICE_TABLE(acpi, lm3554_acpi_match); static struct i2c_driver lm3554_driver = { .driver = { .name = "lm3554", .pm = &lm3554_pm_ops, .acpi_match_table = lm3554_acpi_match, }, .probe = lm3554_probe, .remove = lm3554_remove, }; module_i2c_driver(lm3554_driver); MODULE_AUTHOR("Jing Tao <[email protected]>"); MODULE_DESCRIPTION("LED flash driver for LM3554"); MODULE_LICENSE("GPL");
linux-master
drivers/staging/media/atomisp/i2c/atomisp-lm3554.c
// SPDX-License-Identifier: GPL-2.0 /* * Support for GalaxyCore GC2235 2M camera sensor. * * Copyright (c) 2014 Intel Corporation. All Rights Reserved. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version * 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #include <linux/module.h> #include <linux/types.h> #include <linux/kernel.h> #include <linux/mm.h> #include <linux/string.h> #include <linux/errno.h> #include <linux/init.h> #include <linux/kmod.h> #include <linux/device.h> #include <linux/delay.h> #include <linux/slab.h> #include <linux/i2c.h> #include <linux/moduleparam.h> #include <media/v4l2-device.h> #include "../include/linux/atomisp_gmin_platform.h" #include <linux/acpi.h> #include <linux/io.h> #include "gc2235.h" /* i2c read/write stuff */ static int gc2235_read_reg(struct i2c_client *client, u16 data_length, u16 reg, u16 *val) { int err; struct i2c_msg msg[2]; unsigned char data[6]; if (!client->adapter) { dev_err(&client->dev, "%s error, no client->adapter\n", __func__); return -ENODEV; } if (data_length != GC2235_8BIT) { dev_err(&client->dev, "%s error, invalid data length\n", __func__); return -EINVAL; } memset(msg, 0, sizeof(msg)); msg[0].addr = client->addr; msg[0].flags = 0; msg[0].len = 1; msg[0].buf = data; /* high byte goes out first */ data[0] = (u8)(reg & 0xff); msg[1].addr = client->addr; msg[1].len = data_length; msg[1].flags = I2C_M_RD; msg[1].buf = data; err = i2c_transfer(client->adapter, msg, 2); if (err != 2) { if (err >= 0) err = -EIO; dev_err(&client->dev, "read from offset 0x%x error %d", reg, err); return err; } *val = 0; /* high byte comes first */ if (data_length == GC2235_8BIT) *val = (u8)data[0]; return 0; } static int gc2235_i2c_write(struct i2c_client *client, u16 len, u8 *data) { struct i2c_msg msg; const int num_msg = 1; int ret; msg.addr = client->addr; msg.flags = 0; msg.len = len; msg.buf = data; ret = i2c_transfer(client->adapter, &msg, 1); return ret == num_msg ? 0 : -EIO; } static int gc2235_write_reg(struct i2c_client *client, u16 data_length, u8 reg, u8 val) { int ret; unsigned char data[4] = {0}; const u16 len = data_length + sizeof(u8); /* 16-bit address + data */ if (data_length != GC2235_8BIT) { dev_err(&client->dev, "%s error, invalid data_length\n", __func__); return -EINVAL; } /* high byte goes out first */ data[0] = reg; data[1] = val; ret = gc2235_i2c_write(client, len, data); if (ret) dev_err(&client->dev, "write error: wrote 0x%x to offset 0x%x error %d", val, reg, ret); return ret; } static int __gc2235_flush_reg_array(struct i2c_client *client, struct gc2235_write_ctrl *ctrl) { u16 size; if (ctrl->index == 0) return 0; size = sizeof(u8) + ctrl->index; /* 8-bit address + data */ ctrl->index = 0; return gc2235_i2c_write(client, size, (u8 *)&ctrl->buffer); } static int __gc2235_buf_reg_array(struct i2c_client *client, struct gc2235_write_ctrl *ctrl, const struct gc2235_reg *next) { int size; if (next->type != GC2235_8BIT) return -EINVAL; size = 1; ctrl->buffer.data[ctrl->index] = (u8)next->val; /* When first item is added, we need to store its starting address */ if (ctrl->index == 0) ctrl->buffer.addr = next->reg; ctrl->index += size; /* * Buffer cannot guarantee free space for u32? Better flush it to avoid * possible lack of memory for next item. */ if (ctrl->index + sizeof(u8) >= GC2235_MAX_WRITE_BUF_SIZE) return __gc2235_flush_reg_array(client, ctrl); return 0; } static int __gc2235_write_reg_is_consecutive(struct i2c_client *client, struct gc2235_write_ctrl *ctrl, const struct gc2235_reg *next) { if (ctrl->index == 0) return 1; return ctrl->buffer.addr + ctrl->index == next->reg; } static int gc2235_write_reg_array(struct i2c_client *client, const struct gc2235_reg *reglist) { const struct gc2235_reg *next = reglist; struct gc2235_write_ctrl ctrl; int err; ctrl.index = 0; for (; next->type != GC2235_TOK_TERM; next++) { switch (next->type & GC2235_TOK_MASK) { case GC2235_TOK_DELAY: err = __gc2235_flush_reg_array(client, &ctrl); if (err) return err; msleep(next->val); break; default: /* * If next address is not consecutive, data needs to be * flushed before proceed. */ if (!__gc2235_write_reg_is_consecutive(client, &ctrl, next)) { err = __gc2235_flush_reg_array(client, &ctrl); if (err) return err; } err = __gc2235_buf_reg_array(client, &ctrl, next); if (err) { dev_err(&client->dev, "%s: write error, aborted\n", __func__); return err; } break; } } return __gc2235_flush_reg_array(client, &ctrl); } static long __gc2235_set_exposure(struct v4l2_subdev *sd, int coarse_itg, int gain, int digitgain) { struct i2c_client *client = v4l2_get_subdevdata(sd); u16 coarse_integration = (u16)coarse_itg; int ret = 0; u16 expo_coarse_h, expo_coarse_l, gain_val = 0xF0, gain_val2 = 0xF0; expo_coarse_h = coarse_integration >> 8; expo_coarse_l = coarse_integration & 0xff; ret = gc2235_write_reg(client, GC2235_8BIT, GC2235_EXPOSURE_H, expo_coarse_h); ret = gc2235_write_reg(client, GC2235_8BIT, GC2235_EXPOSURE_L, expo_coarse_l); if (gain <= 0x58) { gain_val = 0x40; gain_val2 = 0x58; } else if (gain < 256) { gain_val = 0x40; gain_val2 = gain; } else { gain_val2 = 64 * gain / 256; gain_val = 0xff; } ret = gc2235_write_reg(client, GC2235_8BIT, GC2235_GLOBAL_GAIN, (u8)gain_val); ret = gc2235_write_reg(client, GC2235_8BIT, GC2235_PRE_GAIN, (u8)gain_val2); return ret; } static int gc2235_set_exposure(struct v4l2_subdev *sd, int exposure, int gain, int digitgain) { struct gc2235_device *dev = to_gc2235_sensor(sd); int ret; mutex_lock(&dev->input_lock); ret = __gc2235_set_exposure(sd, exposure, gain, digitgain); mutex_unlock(&dev->input_lock); return ret; } static long gc2235_s_exposure(struct v4l2_subdev *sd, struct atomisp_exposure *exposure) { int exp = exposure->integration_time[0]; int gain = exposure->gain[0]; int digitgain = exposure->gain[1]; /* we should not accept the invalid value below. */ if (gain == 0) { struct i2c_client *client = v4l2_get_subdevdata(sd); v4l2_err(client, "%s: invalid value\n", __func__); return -EINVAL; } return gc2235_set_exposure(sd, exp, gain, digitgain); } static long gc2235_ioctl(struct v4l2_subdev *sd, unsigned int cmd, void *arg) { switch (cmd) { case ATOMISP_IOC_S_EXPOSURE: return gc2235_s_exposure(sd, arg); default: return -EINVAL; } return 0; } /* * This returns the exposure time being used. This should only be used * for filling in EXIF data, not for actual image processing. */ static int gc2235_q_exposure(struct v4l2_subdev *sd, s32 *value) { struct i2c_client *client = v4l2_get_subdevdata(sd); u16 reg_v, reg_v2; int ret; /* get exposure */ ret = gc2235_read_reg(client, GC2235_8BIT, GC2235_EXPOSURE_L, &reg_v); if (ret) goto err; ret = gc2235_read_reg(client, GC2235_8BIT, GC2235_EXPOSURE_H, &reg_v2); if (ret) goto err; reg_v += reg_v2 << 8; *value = reg_v; err: return ret; } static int gc2235_g_volatile_ctrl(struct v4l2_ctrl *ctrl) { struct gc2235_device *dev = container_of(ctrl->handler, struct gc2235_device, ctrl_handler); int ret = 0; switch (ctrl->id) { case V4L2_CID_EXPOSURE_ABSOLUTE: ret = gc2235_q_exposure(&dev->sd, &ctrl->val); break; default: ret = -EINVAL; } return ret; } static const struct v4l2_ctrl_ops ctrl_ops = { .g_volatile_ctrl = gc2235_g_volatile_ctrl }; static struct v4l2_ctrl_config gc2235_controls[] = { { .ops = &ctrl_ops, .id = V4L2_CID_EXPOSURE_ABSOLUTE, .type = V4L2_CTRL_TYPE_INTEGER, .name = "exposure", .min = 0x0, .max = 0xffff, .step = 0x01, .def = 0x00, .flags = 0, }, }; static int __gc2235_init(struct v4l2_subdev *sd) { struct i2c_client *client = v4l2_get_subdevdata(sd); /* restore settings */ gc2235_res = gc2235_res_preview; N_RES = N_RES_PREVIEW; return gc2235_write_reg_array(client, gc2235_init_settings); } static int is_init; static int power_ctrl(struct v4l2_subdev *sd, bool flag) { int ret = -1; struct gc2235_device *dev = to_gc2235_sensor(sd); if (!dev || !dev->platform_data) return -ENODEV; if (flag) { ret = dev->platform_data->v1p8_ctrl(sd, 1); usleep_range(60, 90); if (ret == 0) ret |= dev->platform_data->v2p8_ctrl(sd, 1); } else { ret = dev->platform_data->v1p8_ctrl(sd, 0); ret |= dev->platform_data->v2p8_ctrl(sd, 0); } return ret; } static int gpio_ctrl(struct v4l2_subdev *sd, bool flag) { struct gc2235_device *dev = to_gc2235_sensor(sd); int ret; if (!dev || !dev->platform_data) return -ENODEV; ret = dev->platform_data->gpio1_ctrl(sd, !flag); usleep_range(60, 90); ret |= dev->platform_data->gpio0_ctrl(sd, flag); return ret; } static int power_up(struct v4l2_subdev *sd) { struct gc2235_device *dev = to_gc2235_sensor(sd); struct i2c_client *client = v4l2_get_subdevdata(sd); int ret; if (!dev->platform_data) { dev_err(&client->dev, "no camera_sensor_platform_data"); return -ENODEV; } /* power control */ ret = power_ctrl(sd, 1); if (ret) goto fail_power; /* according to DS, at least 5ms is needed between DOVDD and PWDN */ usleep_range(5000, 6000); ret = dev->platform_data->flisclk_ctrl(sd, 1); if (ret) goto fail_clk; usleep_range(5000, 6000); /* gpio ctrl */ ret = gpio_ctrl(sd, 1); if (ret) { ret = gpio_ctrl(sd, 1); if (ret) goto fail_power; } msleep(5); return 0; fail_clk: gpio_ctrl(sd, 0); fail_power: power_ctrl(sd, 0); dev_err(&client->dev, "sensor power-up failed\n"); return ret; } static int power_down(struct v4l2_subdev *sd) { struct gc2235_device *dev = to_gc2235_sensor(sd); struct i2c_client *client = v4l2_get_subdevdata(sd); int ret = 0; if (!dev->platform_data) { dev_err(&client->dev, "no camera_sensor_platform_data"); return -ENODEV; } /* gpio ctrl */ ret = gpio_ctrl(sd, 0); if (ret) { ret = gpio_ctrl(sd, 0); if (ret) dev_err(&client->dev, "gpio failed 2\n"); } ret = dev->platform_data->flisclk_ctrl(sd, 0); if (ret) dev_err(&client->dev, "flisclk failed\n"); /* power control */ ret = power_ctrl(sd, 0); if (ret) dev_err(&client->dev, "vprog failed.\n"); return ret; } static int gc2235_s_power(struct v4l2_subdev *sd, int on) { int ret; if (on == 0) { ret = power_down(sd); } else { ret = power_up(sd); if (!ret) ret = __gc2235_init(sd); is_init = 1; } return ret; } static int startup(struct v4l2_subdev *sd) { struct gc2235_device *dev = to_gc2235_sensor(sd); struct i2c_client *client = v4l2_get_subdevdata(sd); int ret = 0; if (is_init == 0) { /* * force gc2235 to do a reset in res change, otherwise it * can not output normal after switching res. and it is not * necessary for first time run up after power on, for the sack * of performance */ power_down(sd); power_up(sd); gc2235_write_reg_array(client, gc2235_init_settings); } ret = gc2235_write_reg_array(client, dev->res->regs); if (ret) { dev_err(&client->dev, "gc2235 write register err.\n"); return ret; } is_init = 0; return ret; } static int gc2235_set_fmt(struct v4l2_subdev *sd, struct v4l2_subdev_state *sd_state, struct v4l2_subdev_format *format) { struct v4l2_mbus_framefmt *fmt = &format->format; struct gc2235_device *dev = to_gc2235_sensor(sd); struct i2c_client *client = v4l2_get_subdevdata(sd); struct camera_mipi_info *gc2235_info = NULL; struct gc2235_resolution *res; int ret = 0; gc2235_info = v4l2_get_subdev_hostdata(sd); if (!gc2235_info) return -EINVAL; if (format->pad) return -EINVAL; if (!fmt) return -EINVAL; mutex_lock(&dev->input_lock); res = v4l2_find_nearest_size(gc2235_res_preview, ARRAY_SIZE(gc2235_res_preview), width, height, fmt->width, fmt->height); if (!res) res = &gc2235_res_preview[N_RES - 1]; fmt->width = res->width; fmt->height = res->height; dev->res = res; fmt->code = MEDIA_BUS_FMT_SGRBG10_1X10; if (format->which == V4L2_SUBDEV_FORMAT_TRY) { sd_state->pads->try_fmt = *fmt; mutex_unlock(&dev->input_lock); return 0; } ret = startup(sd); if (ret) { dev_err(&client->dev, "gc2235 startup err\n"); goto err; } err: mutex_unlock(&dev->input_lock); return ret; } static int gc2235_get_fmt(struct v4l2_subdev *sd, struct v4l2_subdev_state *sd_state, struct v4l2_subdev_format *format) { struct v4l2_mbus_framefmt *fmt = &format->format; struct gc2235_device *dev = to_gc2235_sensor(sd); if (format->pad) return -EINVAL; if (!fmt) return -EINVAL; fmt->width = dev->res->width; fmt->height = dev->res->height; fmt->code = MEDIA_BUS_FMT_SGRBG10_1X10; return 0; } static int gc2235_detect(struct i2c_client *client) { struct i2c_adapter *adapter = client->adapter; u16 high = 0, low = 0; u16 id; if (!i2c_check_functionality(adapter, I2C_FUNC_I2C)) return -ENODEV; gc2235_read_reg(client, GC2235_8BIT, GC2235_SENSOR_ID_H, &high); gc2235_read_reg(client, GC2235_8BIT, GC2235_SENSOR_ID_L, &low); id = ((high << 8) | low); if (id != GC2235_ID) { dev_err(&client->dev, "sensor ID error, 0x%x\n", id); return -ENODEV; } dev_info(&client->dev, "detect gc2235 success\n"); return 0; } static int gc2235_s_stream(struct v4l2_subdev *sd, int enable) { struct gc2235_device *dev = to_gc2235_sensor(sd); struct i2c_client *client = v4l2_get_subdevdata(sd); int ret; mutex_lock(&dev->input_lock); if (enable) ret = gc2235_write_reg_array(client, gc2235_stream_on); else ret = gc2235_write_reg_array(client, gc2235_stream_off); mutex_unlock(&dev->input_lock); return ret; } static int gc2235_s_config(struct v4l2_subdev *sd, int irq, void *platform_data) { struct gc2235_device *dev = to_gc2235_sensor(sd); struct i2c_client *client = v4l2_get_subdevdata(sd); int ret = 0; if (!platform_data) return -ENODEV; dev->platform_data = (struct camera_sensor_platform_data *)platform_data; mutex_lock(&dev->input_lock); /* * power off the module, then power on it in future * as first power on by board may not fulfill the * power on sequqence needed by the module */ ret = power_down(sd); if (ret) { dev_err(&client->dev, "gc2235 power-off err.\n"); goto fail_power_off; } ret = power_up(sd); if (ret) { dev_err(&client->dev, "gc2235 power-up err.\n"); goto fail_power_on; } ret = dev->platform_data->csi_cfg(sd, 1); if (ret) goto fail_csi_cfg; /* config & detect sensor */ ret = gc2235_detect(client); if (ret) { dev_err(&client->dev, "gc2235_detect err s_config.\n"); goto fail_csi_cfg; } /* turn off sensor, after probed */ ret = power_down(sd); if (ret) { dev_err(&client->dev, "gc2235 power-off err.\n"); goto fail_csi_cfg; } mutex_unlock(&dev->input_lock); return 0; fail_csi_cfg: dev->platform_data->csi_cfg(sd, 0); fail_power_on: power_down(sd); dev_err(&client->dev, "sensor power-gating failed\n"); fail_power_off: mutex_unlock(&dev->input_lock); return ret; } static int gc2235_g_frame_interval(struct v4l2_subdev *sd, struct v4l2_subdev_frame_interval *interval) { struct gc2235_device *dev = to_gc2235_sensor(sd); interval->interval.numerator = 1; interval->interval.denominator = dev->res->fps; return 0; } static int gc2235_enum_mbus_code(struct v4l2_subdev *sd, struct v4l2_subdev_state *sd_state, struct v4l2_subdev_mbus_code_enum *code) { if (code->index >= MAX_FMTS) return -EINVAL; code->code = MEDIA_BUS_FMT_SBGGR10_1X10; return 0; } static int gc2235_enum_frame_size(struct v4l2_subdev *sd, struct v4l2_subdev_state *sd_state, struct v4l2_subdev_frame_size_enum *fse) { int index = fse->index; if (index >= N_RES) return -EINVAL; fse->min_width = gc2235_res[index].width; fse->min_height = gc2235_res[index].height; fse->max_width = gc2235_res[index].width; fse->max_height = gc2235_res[index].height; return 0; } static int gc2235_g_skip_frames(struct v4l2_subdev *sd, u32 *frames) { struct gc2235_device *dev = to_gc2235_sensor(sd); mutex_lock(&dev->input_lock); *frames = dev->res->skip_frames; mutex_unlock(&dev->input_lock); return 0; } static const struct v4l2_subdev_sensor_ops gc2235_sensor_ops = { .g_skip_frames = gc2235_g_skip_frames, }; static const struct v4l2_subdev_video_ops gc2235_video_ops = { .s_stream = gc2235_s_stream, .g_frame_interval = gc2235_g_frame_interval, }; static const struct v4l2_subdev_core_ops gc2235_core_ops = { .s_power = gc2235_s_power, .ioctl = gc2235_ioctl, }; static const struct v4l2_subdev_pad_ops gc2235_pad_ops = { .enum_mbus_code = gc2235_enum_mbus_code, .enum_frame_size = gc2235_enum_frame_size, .get_fmt = gc2235_get_fmt, .set_fmt = gc2235_set_fmt, }; static const struct v4l2_subdev_ops gc2235_ops = { .core = &gc2235_core_ops, .video = &gc2235_video_ops, .pad = &gc2235_pad_ops, .sensor = &gc2235_sensor_ops, }; static void gc2235_remove(struct i2c_client *client) { struct v4l2_subdev *sd = i2c_get_clientdata(client); struct gc2235_device *dev = to_gc2235_sensor(sd); dev_dbg(&client->dev, "gc2235_remove...\n"); dev->platform_data->csi_cfg(sd, 0); v4l2_device_unregister_subdev(sd); media_entity_cleanup(&dev->sd.entity); v4l2_ctrl_handler_free(&dev->ctrl_handler); kfree(dev); } static int gc2235_probe(struct i2c_client *client) { struct gc2235_device *dev; void *gcpdev; int ret; unsigned int i; dev = kzalloc(sizeof(*dev), GFP_KERNEL); if (!dev) return -ENOMEM; mutex_init(&dev->input_lock); dev->res = &gc2235_res_preview[0]; v4l2_i2c_subdev_init(&dev->sd, client, &gc2235_ops); gcpdev = gmin_camera_platform_data(&dev->sd, ATOMISP_INPUT_FORMAT_RAW_10, atomisp_bayer_order_grbg); ret = gc2235_s_config(&dev->sd, client->irq, gcpdev); if (ret) goto out_free; dev->sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE; dev->pad.flags = MEDIA_PAD_FL_SOURCE; dev->format.code = MEDIA_BUS_FMT_SBGGR10_1X10; dev->sd.entity.function = MEDIA_ENT_F_CAM_SENSOR; ret = v4l2_ctrl_handler_init(&dev->ctrl_handler, ARRAY_SIZE(gc2235_controls)); if (ret) { gc2235_remove(client); return ret; } for (i = 0; i < ARRAY_SIZE(gc2235_controls); i++) v4l2_ctrl_new_custom(&dev->ctrl_handler, &gc2235_controls[i], NULL); if (dev->ctrl_handler.error) { gc2235_remove(client); return dev->ctrl_handler.error; } /* Use same lock for controls as for everything else. */ dev->ctrl_handler.lock = &dev->input_lock; dev->sd.ctrl_handler = &dev->ctrl_handler; ret = media_entity_pads_init(&dev->sd.entity, 1, &dev->pad); if (ret) gc2235_remove(client); return atomisp_register_i2c_module(&dev->sd, gcpdev, RAW_CAMERA); out_free: v4l2_device_unregister_subdev(&dev->sd); kfree(dev); return ret; } static const struct acpi_device_id gc2235_acpi_match[] = { { "INT33F8" }, {}, }; MODULE_DEVICE_TABLE(acpi, gc2235_acpi_match); static struct i2c_driver gc2235_driver = { .driver = { .name = "gc2235", .acpi_match_table = gc2235_acpi_match, }, .probe = gc2235_probe, .remove = gc2235_remove, }; module_i2c_driver(gc2235_driver); MODULE_AUTHOR("Shuguang Gong <[email protected]>"); MODULE_DESCRIPTION("A low-level driver for GC2235 sensors"); MODULE_LICENSE("GPL");
linux-master
drivers/staging/media/atomisp/i2c/atomisp-gc2235.c
// SPDX-License-Identifier: GPL-2.0 /* * Copyright (c) 2013 Intel Corporation. All Rights Reserved. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version * 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * */ #include <linux/i2c.h> #include <linux/firmware.h> #include <linux/device.h> #include <linux/export.h> #include "../include/linux/libmsrlisthelper.h" #include <linux/module.h> #include <linux/slab.h> /* Tagged binary data container structure definitions. */ struct tbd_header { u32 tag; /*!< Tag identifier, also checks endianness */ u32 size; /*!< Container size including this header */ u32 version; /*!< Version, format 0xYYMMDDVV */ u32 revision; /*!< Revision, format 0xYYMMDDVV */ u32 config_bits; /*!< Configuration flag bits set */ u32 checksum; /*!< Global checksum, header included */ } __packed; struct tbd_record_header { u32 size; /*!< Size of record including header */ u8 format_id; /*!< tbd_format_t enumeration values used */ u8 packing_key; /*!< Packing method; 0 = no packing */ u16 class_id; /*!< tbd_class_t enumeration values used */ } __packed; struct tbd_data_record_header { u16 next_offset; u16 flags; u16 data_offset; u16 data_size; } __packed; #define TBD_CLASS_DRV_ID 2 static int set_msr_configuration(struct i2c_client *client, uint8_t *bufptr, unsigned int size) { /* * The configuration data contains any number of sequences where * the first byte (that is, uint8_t) that marks the number of bytes * in the sequence to follow, is indeed followed by the indicated * number of bytes of actual data to be written to sensor. * By convention, the first two bytes of actual data should be * understood as an address in the sensor address space (hibyte * followed by lobyte) where the remaining data in the sequence * will be written. */ u8 *ptr = bufptr; while (ptr < bufptr + size) { struct i2c_msg msg = { .addr = client->addr, .flags = 0, }; int ret; /* How many bytes */ msg.len = *ptr++; /* Where the bytes are located */ msg.buf = ptr; ptr += msg.len; if (ptr > bufptr + size) /* Accessing data beyond bounds is not tolerated */ return -EINVAL; ret = i2c_transfer(client->adapter, &msg, 1); if (ret < 0) { dev_err(&client->dev, "i2c write error: %d", ret); return ret; } } return 0; } static int parse_and_apply(struct i2c_client *client, uint8_t *buffer, unsigned int size) { u8 *endptr8 = buffer + size; struct tbd_data_record_header *header = (struct tbd_data_record_header *)buffer; /* There may be any number of datasets present */ unsigned int dataset = 0; do { /* In below, four variables are read from buffer */ if ((uint8_t *)header + sizeof(*header) > endptr8) return -EINVAL; /* All data should be located within given buffer */ if ((uint8_t *)header + header->data_offset + header->data_size > endptr8) return -EINVAL; /* We have a new valid dataset */ dataset++; /* See whether there is MSR data */ /* If yes, update the reg info */ if (header->data_size && (header->flags & 1)) { int ret; dev_info(&client->dev, "New MSR data for sensor driver (dataset %02d) size:%d\n", dataset, header->data_size); ret = set_msr_configuration(client, buffer + header->data_offset, header->data_size); if (ret) return ret; } header = (struct tbd_data_record_header *)(buffer + header->next_offset); } while (header->next_offset); return 0; } int apply_msr_data(struct i2c_client *client, const struct firmware *fw) { struct tbd_header *header; struct tbd_record_header *record; if (!fw) { dev_warn(&client->dev, "Drv data is not loaded.\n"); return -EINVAL; } if (sizeof(*header) > fw->size) return -EINVAL; header = (struct tbd_header *)fw->data; /* Check that we have drvb block. */ if (memcmp(&header->tag, "DRVB", 4)) return -EINVAL; /* Check the size */ if (header->size != fw->size) return -EINVAL; if (sizeof(*header) + sizeof(*record) > fw->size) return -EINVAL; record = (struct tbd_record_header *)(header + 1); /* Check that class id mathes tbd's drv id. */ if (record->class_id != TBD_CLASS_DRV_ID) return -EINVAL; /* Size 0 shall not be treated as an error */ if (!record->size) return 0; return parse_and_apply(client, (uint8_t *)(record + 1), record->size); } EXPORT_SYMBOL_GPL(apply_msr_data); int load_msr_list(struct i2c_client *client, char *name, const struct firmware **fw) { int ret = request_firmware(fw, name, &client->dev); if (ret) { dev_err(&client->dev, "Error %d while requesting firmware %s\n", ret, name); return ret; } dev_info(&client->dev, "Received %lu bytes drv data\n", (unsigned long)(*fw)->size); return 0; } EXPORT_SYMBOL_GPL(load_msr_list); void release_msr_list(struct i2c_client *client, const struct firmware *fw) { release_firmware(fw); } EXPORT_SYMBOL_GPL(release_msr_list); static int init_msrlisthelper(void) { return 0; } static void exit_msrlisthelper(void) { } module_init(init_msrlisthelper); module_exit(exit_msrlisthelper); MODULE_AUTHOR("Jukka Kaartinen <[email protected]>"); MODULE_LICENSE("GPL");
linux-master
drivers/staging/media/atomisp/i2c/atomisp-libmsrlisthelper.c
// SPDX-License-Identifier: GPL-2.0 /* * Support for mt9m114 Camera Sensor. * * Copyright (c) 2010 Intel Corporation. All Rights Reserved. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version * 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * */ #include <linux/module.h> #include <linux/types.h> #include <linux/kernel.h> #include <linux/mm.h> #include <linux/string.h> #include <linux/errno.h> #include <linux/init.h> #include <linux/kmod.h> #include <linux/device.h> #include <linux/fs.h> #include <linux/slab.h> #include <linux/delay.h> #include <linux/i2c.h> #include <linux/acpi.h> #include "../include/linux/atomisp_gmin_platform.h" #include <media/v4l2-device.h> #include "mt9m114.h" #define to_mt9m114_sensor(sd) container_of(sd, struct mt9m114_device, sd) /* * TODO: use debug parameter to actually define when debug messages should * be printed. */ static int debug; static int aaalock; module_param(debug, int, 0644); MODULE_PARM_DESC(debug, "Debug level (0-1)"); static int mt9m114_t_vflip(struct v4l2_subdev *sd, int value); static int mt9m114_t_hflip(struct v4l2_subdev *sd, int value); static int mt9m114_wait_state(struct i2c_client *client, int timeout); static int mt9m114_read_reg(struct i2c_client *client, u16 data_length, u32 reg, u32 *val) { int err; struct i2c_msg msg[2]; unsigned char data[4]; if (!client->adapter) { v4l2_err(client, "%s error, no client->adapter\n", __func__); return -ENODEV; } if (data_length != MISENSOR_8BIT && data_length != MISENSOR_16BIT && data_length != MISENSOR_32BIT) { v4l2_err(client, "%s error, invalid data length\n", __func__); return -EINVAL; } msg[0].addr = client->addr; msg[0].flags = 0; msg[0].len = MSG_LEN_OFFSET; msg[0].buf = data; /* high byte goes out first */ data[0] = (u16)(reg >> 8); data[1] = (u16)(reg & 0xff); msg[1].addr = client->addr; msg[1].len = data_length; msg[1].flags = I2C_M_RD; msg[1].buf = data; err = i2c_transfer(client->adapter, msg, 2); if (err >= 0) { *val = 0; /* high byte comes first */ if (data_length == MISENSOR_8BIT) *val = data[0]; else if (data_length == MISENSOR_16BIT) *val = data[1] + (data[0] << 8); else *val = data[3] + (data[2] << 8) + (data[1] << 16) + (data[0] << 24); return 0; } dev_err(&client->dev, "read from offset 0x%x error %d", reg, err); return err; } static int mt9m114_write_reg(struct i2c_client *client, u16 data_length, u16 reg, u32 val) { int num_msg; struct i2c_msg msg; unsigned char data[6] = {0}; __be16 *wreg; int retry = 0; if (!client->adapter) { v4l2_err(client, "%s error, no client->adapter\n", __func__); return -ENODEV; } if (data_length != MISENSOR_8BIT && data_length != MISENSOR_16BIT && data_length != MISENSOR_32BIT) { v4l2_err(client, "%s error, invalid data_length\n", __func__); return -EINVAL; } memset(&msg, 0, sizeof(msg)); again: msg.addr = client->addr; msg.flags = 0; msg.len = 2 + data_length; msg.buf = data; /* high byte goes out first */ wreg = (void *)data; *wreg = cpu_to_be16(reg); if (data_length == MISENSOR_8BIT) { data[2] = (u8)(val); } else if (data_length == MISENSOR_16BIT) { u16 *wdata = (void *)&data[2]; *wdata = be16_to_cpu(*(__be16 *)&data[2]); } else { /* MISENSOR_32BIT */ u32 *wdata = (void *)&data[2]; *wdata = be32_to_cpu(*(__be32 *)&data[2]); } num_msg = i2c_transfer(client->adapter, &msg, 1); /* * HACK: Need some delay here for Rev 2 sensors otherwise some * registers do not seem to load correctly. */ mdelay(1); if (num_msg >= 0) return 0; dev_err(&client->dev, "write error: wrote 0x%x to offset 0x%x error %d", val, reg, num_msg); if (retry <= I2C_RETRY_COUNT) { dev_dbg(&client->dev, "retrying... %d", retry); retry++; msleep(20); goto again; } return num_msg; } /** * misensor_rmw_reg - Read/Modify/Write a value to a register in the sensor * device * @client: i2c driver client structure * @data_length: 8/16/32-bits length * @reg: register address * @mask: masked out bits * @set: bits set * * Read/modify/write a value to a register in the sensor device. * Returns zero if successful, or non-zero otherwise. */ static int misensor_rmw_reg(struct i2c_client *client, u16 data_length, u16 reg, u32 mask, u32 set) { int err; u32 val; /* Exit when no mask */ if (mask == 0) return 0; /* @mask must not exceed data length */ switch (data_length) { case MISENSOR_8BIT: if (mask & ~0xff) return -EINVAL; break; case MISENSOR_16BIT: if (mask & ~0xffff) return -EINVAL; break; case MISENSOR_32BIT: break; default: /* Wrong @data_length */ return -EINVAL; } err = mt9m114_read_reg(client, data_length, reg, &val); if (err) { v4l2_err(client, "%s error exit, read failed\n", __func__); return -EINVAL; } val &= ~mask; /* * Perform the OR function if the @set exists. * Shift @set value to target bit location. @set should set only * bits included in @mask. * * REVISIT: This function expects @set to be non-shifted. Its shift * value is then defined to be equal to mask's LSB position. * How about to inform values in their right offset position and avoid * this unneeded shift operation? */ set <<= ffs(mask) - 1; val |= set & mask; err = mt9m114_write_reg(client, data_length, reg, val); if (err) { v4l2_err(client, "%s error exit, write failed\n", __func__); return -EINVAL; } return 0; } static int __mt9m114_flush_reg_array(struct i2c_client *client, struct mt9m114_write_ctrl *ctrl) { struct i2c_msg msg; const int num_msg = 1; int ret; int retry = 0; __be16 *data16 = (void *)&ctrl->buffer.addr; if (ctrl->index == 0) return 0; again: msg.addr = client->addr; msg.flags = 0; msg.len = 2 + ctrl->index; *data16 = cpu_to_be16(ctrl->buffer.addr); msg.buf = (u8 *)&ctrl->buffer; ret = i2c_transfer(client->adapter, &msg, num_msg); if (ret != num_msg) { if (++retry <= I2C_RETRY_COUNT) { dev_dbg(&client->dev, "retrying... %d\n", retry); msleep(20); goto again; } dev_err(&client->dev, "%s: i2c transfer error\n", __func__); return -EIO; } ctrl->index = 0; /* * REVISIT: Previously we had a delay after writing data to sensor. * But it was removed as our tests have shown it is not necessary * anymore. */ return 0; } static int __mt9m114_buf_reg_array(struct i2c_client *client, struct mt9m114_write_ctrl *ctrl, const struct misensor_reg *next) { __be16 *data16; __be32 *data32; int err; /* Insufficient buffer? Let's flush and get more free space. */ if (ctrl->index + next->length >= MT9M114_MAX_WRITE_BUF_SIZE) { err = __mt9m114_flush_reg_array(client, ctrl); if (err) return err; } switch (next->length) { case MISENSOR_8BIT: ctrl->buffer.data[ctrl->index] = (u8)next->val; break; case MISENSOR_16BIT: data16 = (__be16 *)&ctrl->buffer.data[ctrl->index]; *data16 = cpu_to_be16((u16)next->val); break; case MISENSOR_32BIT: data32 = (__be32 *)&ctrl->buffer.data[ctrl->index]; *data32 = cpu_to_be32(next->val); break; default: return -EINVAL; } /* When first item is added, we need to store its starting address */ if (ctrl->index == 0) ctrl->buffer.addr = next->reg; ctrl->index += next->length; return 0; } static int __mt9m114_write_reg_is_consecutive(struct i2c_client *client, struct mt9m114_write_ctrl *ctrl, const struct misensor_reg *next) { if (ctrl->index == 0) return 1; return ctrl->buffer.addr + ctrl->index == next->reg; } /* * mt9m114_write_reg_array - Initializes a list of mt9m114 registers * @client: i2c driver client structure * @reglist: list of registers to be written * @poll: completion polling requirement * This function initializes a list of registers. When consecutive addresses * are found in a row on the list, this function creates a buffer and sends * consecutive data in a single i2c_transfer(). * * __mt9m114_flush_reg_array, __mt9m114_buf_reg_array() and * __mt9m114_write_reg_is_consecutive() are internal functions to * mt9m114_write_reg_array() and should be not used anywhere else. * */ static int mt9m114_write_reg_array(struct i2c_client *client, const struct misensor_reg *reglist, int poll) { const struct misensor_reg *next = reglist; struct mt9m114_write_ctrl ctrl; int err; if (poll == PRE_POLLING) { err = mt9m114_wait_state(client, MT9M114_WAIT_STAT_TIMEOUT); if (err) return err; } ctrl.index = 0; for (; next->length != MISENSOR_TOK_TERM; next++) { switch (next->length & MISENSOR_TOK_MASK) { case MISENSOR_TOK_DELAY: err = __mt9m114_flush_reg_array(client, &ctrl); if (err) return err; msleep(next->val); break; case MISENSOR_TOK_RMW: err = __mt9m114_flush_reg_array(client, &ctrl); err |= misensor_rmw_reg(client, next->length & ~MISENSOR_TOK_RMW, next->reg, next->val, next->val2); if (err) { dev_err(&client->dev, "%s read err. aborted\n", __func__); return -EINVAL; } break; default: /* * If next address is not consecutive, data needs to be * flushed before proceed. */ if (!__mt9m114_write_reg_is_consecutive(client, &ctrl, next)) { err = __mt9m114_flush_reg_array(client, &ctrl); if (err) return err; } err = __mt9m114_buf_reg_array(client, &ctrl, next); if (err) { v4l2_err(client, "%s: write error, aborted\n", __func__); return err; } break; } } err = __mt9m114_flush_reg_array(client, &ctrl); if (err) return err; if (poll == POST_POLLING) return mt9m114_wait_state(client, MT9M114_WAIT_STAT_TIMEOUT); return 0; } static int mt9m114_wait_state(struct i2c_client *client, int timeout) { int ret; unsigned int val; while (timeout-- > 0) { ret = mt9m114_read_reg(client, MISENSOR_16BIT, 0x0080, &val); if (ret) return ret; if ((val & 0x2) == 0) return 0; msleep(20); } return -EINVAL; } static int mt9m114_set_suspend(struct v4l2_subdev *sd) { struct i2c_client *client = v4l2_get_subdevdata(sd); return mt9m114_write_reg_array(client, mt9m114_standby_reg, POST_POLLING); } static int mt9m114_init_common(struct v4l2_subdev *sd) { struct i2c_client *client = v4l2_get_subdevdata(sd); return mt9m114_write_reg_array(client, mt9m114_common, PRE_POLLING); } static int power_ctrl(struct v4l2_subdev *sd, bool flag) { int ret; struct mt9m114_device *dev = to_mt9m114_sensor(sd); if (!dev || !dev->platform_data) return -ENODEV; if (flag) { ret = dev->platform_data->v2p8_ctrl(sd, 1); if (ret == 0) { ret = dev->platform_data->v1p8_ctrl(sd, 1); if (ret) ret = dev->platform_data->v2p8_ctrl(sd, 0); } } else { ret = dev->platform_data->v2p8_ctrl(sd, 0); ret = dev->platform_data->v1p8_ctrl(sd, 0); } return ret; } static int gpio_ctrl(struct v4l2_subdev *sd, bool flag) { int ret; struct mt9m114_device *dev = to_mt9m114_sensor(sd); if (!dev || !dev->platform_data) return -ENODEV; /* * Note: current modules wire only one GPIO signal (RESET#), * but the schematic wires up two to the connector. BIOS * versions have been unfortunately inconsistent with which * ACPI index RESET# is on, so hit both */ if (flag) { ret = dev->platform_data->gpio0_ctrl(sd, 0); ret = dev->platform_data->gpio1_ctrl(sd, 0); msleep(60); ret |= dev->platform_data->gpio0_ctrl(sd, 1); ret |= dev->platform_data->gpio1_ctrl(sd, 1); } else { ret = dev->platform_data->gpio0_ctrl(sd, 0); ret = dev->platform_data->gpio1_ctrl(sd, 0); } return ret; } static int power_up(struct v4l2_subdev *sd) { struct mt9m114_device *dev = to_mt9m114_sensor(sd); struct i2c_client *client = v4l2_get_subdevdata(sd); int ret; if (!dev->platform_data) { dev_err(&client->dev, "no camera_sensor_platform_data"); return -ENODEV; } /* power control */ ret = power_ctrl(sd, 1); if (ret) goto fail_power; /* flis clock control */ ret = dev->platform_data->flisclk_ctrl(sd, 1); if (ret) goto fail_clk; /* gpio ctrl */ ret = gpio_ctrl(sd, 1); if (ret) dev_err(&client->dev, "gpio failed 1\n"); /* * according to DS, 44ms is needed between power up and first i2c * commend */ msleep(50); return 0; fail_clk: dev->platform_data->flisclk_ctrl(sd, 0); fail_power: power_ctrl(sd, 0); dev_err(&client->dev, "sensor power-up failed\n"); return ret; } static int power_down(struct v4l2_subdev *sd) { struct mt9m114_device *dev = to_mt9m114_sensor(sd); struct i2c_client *client = v4l2_get_subdevdata(sd); int ret; if (!dev->platform_data) { dev_err(&client->dev, "no camera_sensor_platform_data"); return -ENODEV; } ret = dev->platform_data->flisclk_ctrl(sd, 0); if (ret) dev_err(&client->dev, "flisclk failed\n"); /* gpio ctrl */ ret = gpio_ctrl(sd, 0); if (ret) dev_err(&client->dev, "gpio failed 1\n"); /* power control */ ret = power_ctrl(sd, 0); if (ret) dev_err(&client->dev, "vprog failed.\n"); /* according to DS, 20ms is needed after power down */ msleep(20); return ret; } static int mt9m114_s_power(struct v4l2_subdev *sd, int power) { if (power == 0) return power_down(sd); if (power_up(sd)) return -EINVAL; return mt9m114_init_common(sd); } static int mt9m114_res2size(struct v4l2_subdev *sd, int *h_size, int *v_size) { struct mt9m114_device *dev = to_mt9m114_sensor(sd); unsigned short hsize; unsigned short vsize; switch (dev->res) { case MT9M114_RES_736P: hsize = MT9M114_RES_736P_SIZE_H; vsize = MT9M114_RES_736P_SIZE_V; break; case MT9M114_RES_864P: hsize = MT9M114_RES_864P_SIZE_H; vsize = MT9M114_RES_864P_SIZE_V; break; case MT9M114_RES_960P: hsize = MT9M114_RES_960P_SIZE_H; vsize = MT9M114_RES_960P_SIZE_V; break; default: v4l2_err(sd, "%s: Resolution 0x%08x unknown\n", __func__, dev->res); return -EINVAL; } if (h_size) *h_size = hsize; if (v_size) *v_size = vsize; return 0; } static int mt9m114_get_fmt(struct v4l2_subdev *sd, struct v4l2_subdev_state *sd_state, struct v4l2_subdev_format *format) { struct v4l2_mbus_framefmt *fmt = &format->format; int width, height; int ret; if (format->pad) return -EINVAL; fmt->code = MEDIA_BUS_FMT_SGRBG10_1X10; ret = mt9m114_res2size(sd, &width, &height); if (ret) return ret; fmt->width = width; fmt->height = height; return 0; } static int mt9m114_set_fmt(struct v4l2_subdev *sd, struct v4l2_subdev_state *sd_state, struct v4l2_subdev_format *format) { struct v4l2_mbus_framefmt *fmt = &format->format; struct i2c_client *c = v4l2_get_subdevdata(sd); struct mt9m114_device *dev = to_mt9m114_sensor(sd); struct mt9m114_res_struct *res; u32 width = fmt->width; u32 height = fmt->height; struct camera_mipi_info *mt9m114_info = NULL; int ret; if (format->pad) return -EINVAL; dev->streamon = 0; dev->first_exp = MT9M114_DEFAULT_FIRST_EXP; mt9m114_info = v4l2_get_subdev_hostdata(sd); if (!mt9m114_info) return -EINVAL; res = v4l2_find_nearest_size(mt9m114_res, ARRAY_SIZE(mt9m114_res), width, height, fmt->width, fmt->height); if (!res) res = &mt9m114_res[N_RES - 1]; fmt->width = res->width; fmt->height = res->height; if (format->which == V4L2_SUBDEV_FORMAT_TRY) { sd_state->pads->try_fmt = *fmt; return 0; } switch (res->res) { case MT9M114_RES_736P: ret = mt9m114_write_reg_array(c, mt9m114_736P_init, NO_POLLING); ret += misensor_rmw_reg(c, MISENSOR_16BIT, MISENSOR_READ_MODE, MISENSOR_R_MODE_MASK, MISENSOR_NORMAL_SET); break; case MT9M114_RES_864P: ret = mt9m114_write_reg_array(c, mt9m114_864P_init, NO_POLLING); ret += misensor_rmw_reg(c, MISENSOR_16BIT, MISENSOR_READ_MODE, MISENSOR_R_MODE_MASK, MISENSOR_NORMAL_SET); break; case MT9M114_RES_960P: ret = mt9m114_write_reg_array(c, mt9m114_976P_init, NO_POLLING); /* set sensor read_mode to Normal */ ret += misensor_rmw_reg(c, MISENSOR_16BIT, MISENSOR_READ_MODE, MISENSOR_R_MODE_MASK, MISENSOR_NORMAL_SET); break; default: v4l2_err(sd, "set resolution: %d failed!\n", res->res); return -EINVAL; } if (ret) return -EINVAL; ret = mt9m114_write_reg_array(c, mt9m114_chgstat_reg, POST_POLLING); if (ret < 0) return ret; if (mt9m114_set_suspend(sd)) return -EINVAL; if (dev->res != res->res) { int index; /* Switch to different size */ if (width <= 640) { dev->nctx = 0x00; /* Set for context A */ } else { /* * Context B is used for resolutions larger than 640x480 * Using YUV for Context B. */ dev->nctx = 0x01; /* set for context B */ } /* * Marked current sensor res as being "used" * * REVISIT: We don't need to use an "used" field on each mode * list entry to know which mode is selected. If this * information is really necessary, how about to use a single * variable on sensor dev struct? */ for (index = 0; index < N_RES; index++) { if ((width == mt9m114_res[index].width) && (height == mt9m114_res[index].height)) { mt9m114_res[index].used = true; continue; } mt9m114_res[index].used = false; } } /* * mt9m114 - we don't poll for context switch * because it does not happen with streaming disabled. */ dev->res = res->res; fmt->width = width; fmt->height = height; fmt->code = MEDIA_BUS_FMT_SGRBG10_1X10; return 0; } /* Horizontal flip the image. */ static int mt9m114_g_hflip(struct v4l2_subdev *sd, s32 *val) { struct i2c_client *c = v4l2_get_subdevdata(sd); int ret; u32 data; ret = mt9m114_read_reg(c, MISENSOR_16BIT, (u32)MISENSOR_READ_MODE, &data); if (ret) return ret; *val = !!(data & MISENSOR_HFLIP_MASK); return 0; } static int mt9m114_g_vflip(struct v4l2_subdev *sd, s32 *val) { struct i2c_client *c = v4l2_get_subdevdata(sd); int ret; u32 data; ret = mt9m114_read_reg(c, MISENSOR_16BIT, (u32)MISENSOR_READ_MODE, &data); if (ret) return ret; *val = !!(data & MISENSOR_VFLIP_MASK); return 0; } static long mt9m114_s_exposure(struct v4l2_subdev *sd, struct atomisp_exposure *exposure) { struct i2c_client *client = v4l2_get_subdevdata(sd); struct mt9m114_device *dev = to_mt9m114_sensor(sd); int ret = 0; unsigned int coarse_integration = 0; unsigned int f_lines = 0; unsigned int frame_len_lines = 0; /* ExposureTime.FrameLengthLines; */ unsigned int analog_gain, digital_gain; u32 analog_gain_to_write = 0; dev_dbg(&client->dev, "%s(0x%X 0x%X 0x%X)\n", __func__, exposure->integration_time[0], exposure->gain[0], exposure->gain[1]); coarse_integration = exposure->integration_time[0]; /* * fine_integration = ExposureTime.FineIntegrationTime; * frame_len_lines = ExposureTime.FrameLengthLines; */ f_lines = mt9m114_res[dev->res].lines_per_frame; analog_gain = exposure->gain[0]; digital_gain = exposure->gain[1]; if (!dev->streamon) { /*Save the first exposure values while stream is off*/ dev->first_exp = coarse_integration; dev->first_gain = analog_gain; dev->first_diggain = digital_gain; } /* digital_gain = 0x400 * (((u16) digital_gain) >> 8) + */ /* ((unsigned int)(0x400 * (((u16) digital_gain) & 0xFF)) >>8); */ /* set frame length */ if (f_lines < coarse_integration + 6) f_lines = coarse_integration + 6; if (f_lines < frame_len_lines) f_lines = frame_len_lines; ret = mt9m114_write_reg(client, MISENSOR_16BIT, 0x300A, f_lines); if (ret) { v4l2_err(client, "%s: fail to set f_lines\n", __func__); return -EINVAL; } /* set coarse integration */ /* * 3A provide real exposure time. * should not translate to any value here. */ ret = mt9m114_write_reg(client, MISENSOR_16BIT, REG_EXPO_COARSE, (u16)(coarse_integration)); if (ret) { v4l2_err(client, "%s: fail to set exposure time\n", __func__); return -EINVAL; } /* * set analog/digital gain switch(analog_gain) { case 0: analog_gain_to_write = 0x0; break; case 1: analog_gain_to_write = 0x20; break; case 2: analog_gain_to_write = 0x60; break; case 4: analog_gain_to_write = 0xA0; break; case 8: analog_gain_to_write = 0xE0; break; default: analog_gain_to_write = 0x20; break; } */ if (digital_gain >= 16 || digital_gain <= 1) digital_gain = 1; /* * analog_gain_to_write = (u16)((digital_gain << 12) * | analog_gain_to_write); */ analog_gain_to_write = (u16)((digital_gain << 12) | (u16)analog_gain); ret = mt9m114_write_reg(client, MISENSOR_16BIT, REG_GAIN, analog_gain_to_write); if (ret) { v4l2_err(client, "%s: fail to set analog_gain_to_write\n", __func__); return -EINVAL; } return ret; } static long mt9m114_ioctl(struct v4l2_subdev *sd, unsigned int cmd, void *arg) { switch (cmd) { case ATOMISP_IOC_S_EXPOSURE: return mt9m114_s_exposure(sd, arg); default: return -EINVAL; } return 0; } /* * This returns the exposure time being used. This should only be used * for filling in EXIF data, not for actual image processing. */ static int mt9m114_g_exposure(struct v4l2_subdev *sd, s32 *value) { struct i2c_client *client = v4l2_get_subdevdata(sd); u32 coarse; int ret; /* the fine integration time is currently not calculated */ ret = mt9m114_read_reg(client, MISENSOR_16BIT, REG_EXPO_COARSE, &coarse); if (ret) return ret; *value = coarse; return 0; } /* * This function will return the sensor supported max exposure zone number. * the sensor which supports max exposure zone number is 1. */ static int mt9m114_g_exposure_zone_num(struct v4l2_subdev *sd, s32 *val) { *val = 1; return 0; } /* * set exposure metering, average/center_weighted/spot/matrix. */ static int mt9m114_s_exposure_metering(struct v4l2_subdev *sd, s32 val) { struct i2c_client *client = v4l2_get_subdevdata(sd); int ret; switch (val) { case V4L2_EXPOSURE_METERING_SPOT: ret = mt9m114_write_reg_array(client, mt9m114_exp_average, NO_POLLING); if (ret) { dev_err(&client->dev, "write exp_average reg err.\n"); return ret; } break; case V4L2_EXPOSURE_METERING_CENTER_WEIGHTED: default: ret = mt9m114_write_reg_array(client, mt9m114_exp_center, NO_POLLING); if (ret) { dev_err(&client->dev, "write exp_default reg err"); return ret; } } return 0; } /* * This function is for touch exposure feature. */ static int mt9m114_s_exposure_selection(struct v4l2_subdev *sd, struct v4l2_subdev_state *sd_state, struct v4l2_subdev_selection *sel) { struct i2c_client *client = v4l2_get_subdevdata(sd); struct misensor_reg exp_reg; int width, height; int grid_width, grid_height; int grid_left, grid_top, grid_right, grid_bottom; int win_left, win_top, win_right, win_bottom; int i, j; int ret; if (sel->which != V4L2_SUBDEV_FORMAT_TRY && sel->which != V4L2_SUBDEV_FORMAT_ACTIVE) return -EINVAL; grid_left = sel->r.left; grid_top = sel->r.top; grid_right = sel->r.left + sel->r.width - 1; grid_bottom = sel->r.top + sel->r.height - 1; ret = mt9m114_res2size(sd, &width, &height); if (ret) return ret; grid_width = width / 5; grid_height = height / 5; if (grid_width && grid_height) { win_left = grid_left / grid_width; win_top = grid_top / grid_height; win_right = grid_right / grid_width; win_bottom = grid_bottom / grid_height; } else { dev_err(&client->dev, "Incorrect exp grid.\n"); return -EINVAL; } win_left = clamp_t(int, win_left, 0, 4); win_top = clamp_t(int, win_top, 0, 4); win_right = clamp_t(int, win_right, 0, 4); win_bottom = clamp_t(int, win_bottom, 0, 4); ret = mt9m114_write_reg_array(client, mt9m114_exp_average, NO_POLLING); if (ret) { dev_err(&client->dev, "write exp_average reg err.\n"); return ret; } for (i = win_top; i <= win_bottom; i++) { for (j = win_left; j <= win_right; j++) { exp_reg = mt9m114_exp_win[i][j]; ret = mt9m114_write_reg(client, exp_reg.length, exp_reg.reg, exp_reg.val); if (ret) { dev_err(&client->dev, "write exp_reg err.\n"); return ret; } } } return 0; } static int mt9m114_s_ev(struct v4l2_subdev *sd, s32 val) { struct i2c_client *c = v4l2_get_subdevdata(sd); s32 luma = 0x37; int err; /* * EV value only support -2 to 2 * 0: 0x37, 1:0x47, 2:0x57, -1:0x27, -2:0x17 */ if (val < -2 || val > 2) return -EINVAL; luma += 0x10 * val; dev_dbg(&c->dev, "%s val:%d luma:0x%x\n", __func__, val, luma); err = mt9m114_write_reg(c, MISENSOR_16BIT, 0x098E, 0xC87A); if (err) { dev_err(&c->dev, "%s logic addr access error\n", __func__); return err; } err = mt9m114_write_reg(c, MISENSOR_8BIT, 0xC87A, (u32)luma); if (err) { dev_err(&c->dev, "%s write target_average_luma failed\n", __func__); return err; } udelay(10); return 0; } static int mt9m114_g_ev(struct v4l2_subdev *sd, s32 *val) { struct i2c_client *c = v4l2_get_subdevdata(sd); int err; u32 luma; err = mt9m114_write_reg(c, MISENSOR_16BIT, 0x098E, 0xC87A); if (err) { dev_err(&c->dev, "%s logic addr access error\n", __func__); return err; } err = mt9m114_read_reg(c, MISENSOR_8BIT, 0xC87A, &luma); if (err) { dev_err(&c->dev, "%s read target_average_luma failed\n", __func__); return err; } luma -= 0x17; luma /= 0x10; *val = (s32)luma - 2; dev_dbg(&c->dev, "%s val:%d\n", __func__, *val); return 0; } /* * Fake interface * mt9m114 now can not support 3a_lock */ static int mt9m114_s_3a_lock(struct v4l2_subdev *sd, s32 val) { aaalock = val; return 0; } static int mt9m114_g_3a_lock(struct v4l2_subdev *sd, s32 *val) { if (aaalock) return V4L2_LOCK_EXPOSURE | V4L2_LOCK_WHITE_BALANCE | V4L2_LOCK_FOCUS; return 0; } static int mt9m114_s_ctrl(struct v4l2_ctrl *ctrl) { struct mt9m114_device *dev = container_of(ctrl->handler, struct mt9m114_device, ctrl_handler); struct i2c_client *client = v4l2_get_subdevdata(&dev->sd); int ret = 0; switch (ctrl->id) { case V4L2_CID_VFLIP: dev_dbg(&client->dev, "%s: CID_VFLIP:%d.\n", __func__, ctrl->val); ret = mt9m114_t_vflip(&dev->sd, ctrl->val); break; case V4L2_CID_HFLIP: dev_dbg(&client->dev, "%s: CID_HFLIP:%d.\n", __func__, ctrl->val); ret = mt9m114_t_hflip(&dev->sd, ctrl->val); break; case V4L2_CID_EXPOSURE_METERING: ret = mt9m114_s_exposure_metering(&dev->sd, ctrl->val); break; case V4L2_CID_EXPOSURE: ret = mt9m114_s_ev(&dev->sd, ctrl->val); break; case V4L2_CID_3A_LOCK: ret = mt9m114_s_3a_lock(&dev->sd, ctrl->val); break; default: ret = -EINVAL; } return ret; } static int mt9m114_g_volatile_ctrl(struct v4l2_ctrl *ctrl) { struct mt9m114_device *dev = container_of(ctrl->handler, struct mt9m114_device, ctrl_handler); int ret = 0; switch (ctrl->id) { case V4L2_CID_VFLIP: ret = mt9m114_g_vflip(&dev->sd, &ctrl->val); break; case V4L2_CID_HFLIP: ret = mt9m114_g_hflip(&dev->sd, &ctrl->val); break; case V4L2_CID_EXPOSURE_ABSOLUTE: ret = mt9m114_g_exposure(&dev->sd, &ctrl->val); break; case V4L2_CID_EXPOSURE_ZONE_NUM: ret = mt9m114_g_exposure_zone_num(&dev->sd, &ctrl->val); break; case V4L2_CID_EXPOSURE: ret = mt9m114_g_ev(&dev->sd, &ctrl->val); break; case V4L2_CID_3A_LOCK: ret = mt9m114_g_3a_lock(&dev->sd, &ctrl->val); break; default: ret = -EINVAL; } return ret; } static const struct v4l2_ctrl_ops ctrl_ops = { .s_ctrl = mt9m114_s_ctrl, .g_volatile_ctrl = mt9m114_g_volatile_ctrl }; static struct v4l2_ctrl_config mt9m114_controls[] = { { .ops = &ctrl_ops, .id = V4L2_CID_VFLIP, .name = "Image v-Flip", .type = V4L2_CTRL_TYPE_INTEGER, .min = 0, .max = 1, .step = 1, .def = 0, }, { .ops = &ctrl_ops, .id = V4L2_CID_HFLIP, .name = "Image h-Flip", .type = V4L2_CTRL_TYPE_INTEGER, .min = 0, .max = 1, .step = 1, .def = 0, }, { .ops = &ctrl_ops, .id = V4L2_CID_EXPOSURE_ABSOLUTE, .name = "exposure", .type = V4L2_CTRL_TYPE_INTEGER, .min = 0, .max = 0xffff, .step = 1, .def = 0, .flags = 0, }, { .ops = &ctrl_ops, .id = V4L2_CID_EXPOSURE_ZONE_NUM, .name = "one-time exposure zone number", .type = V4L2_CTRL_TYPE_INTEGER, .min = 0, .max = 0xffff, .step = 1, .def = 0, .flags = 0, }, { .ops = &ctrl_ops, .id = V4L2_CID_EXPOSURE_METERING, .name = "metering", .type = V4L2_CTRL_TYPE_MENU, .min = 0, .max = 3, .step = 0, .def = 1, .flags = 0, }, { .ops = &ctrl_ops, .id = V4L2_CID_EXPOSURE, .name = "exposure biasx", .type = V4L2_CTRL_TYPE_INTEGER, .min = -2, .max = 2, .step = 1, .def = 0, .flags = 0, }, { .ops = &ctrl_ops, .id = V4L2_CID_3A_LOCK, .name = "3a lock", .type = V4L2_CTRL_TYPE_BITMASK, .min = 0, .max = V4L2_LOCK_EXPOSURE | V4L2_LOCK_WHITE_BALANCE | V4L2_LOCK_FOCUS, .step = 1, .def = 0, .flags = 0, }, }; static int mt9m114_detect(struct mt9m114_device *dev, struct i2c_client *client) { struct i2c_adapter *adapter = client->adapter; u32 model; int ret; if (!i2c_check_functionality(adapter, I2C_FUNC_I2C)) { dev_err(&client->dev, "%s: i2c error", __func__); return -ENODEV; } ret = mt9m114_read_reg(client, MISENSOR_16BIT, MT9M114_PID, &model); if (ret) return ret; dev->real_model_id = model; if (model != MT9M114_MOD_ID) { dev_err(&client->dev, "%s: failed: client->addr = %x\n", __func__, client->addr); return -ENODEV; } return 0; } static int mt9m114_s_config(struct v4l2_subdev *sd, int irq, void *platform_data) { struct mt9m114_device *dev = to_mt9m114_sensor(sd); struct i2c_client *client = v4l2_get_subdevdata(sd); int ret; if (!platform_data) return -ENODEV; dev->platform_data = (struct camera_sensor_platform_data *)platform_data; ret = power_up(sd); if (ret) { v4l2_err(client, "mt9m114 power-up err"); return ret; } /* config & detect sensor */ ret = mt9m114_detect(dev, client); if (ret) { v4l2_err(client, "mt9m114_detect err s_config.\n"); goto fail_detect; } ret = dev->platform_data->csi_cfg(sd, 1); if (ret) goto fail_csi_cfg; ret = mt9m114_set_suspend(sd); if (ret) { v4l2_err(client, "mt9m114 suspend err"); return ret; } ret = power_down(sd); if (ret) { v4l2_err(client, "mt9m114 power down err"); return ret; } return ret; fail_csi_cfg: dev->platform_data->csi_cfg(sd, 0); fail_detect: power_down(sd); dev_err(&client->dev, "sensor power-gating failed\n"); return ret; } /* Horizontal flip the image. */ static int mt9m114_t_hflip(struct v4l2_subdev *sd, int value) { struct i2c_client *c = v4l2_get_subdevdata(sd); struct mt9m114_device *dev = to_mt9m114_sensor(sd); int err; /* set for direct mode */ err = mt9m114_write_reg(c, MISENSOR_16BIT, 0x098E, 0xC850); if (value) { /* enable H flip ctx A */ err += misensor_rmw_reg(c, MISENSOR_8BIT, 0xC850, 0x01, 0x01); err += misensor_rmw_reg(c, MISENSOR_8BIT, 0xC851, 0x01, 0x01); /* ctx B */ err += misensor_rmw_reg(c, MISENSOR_8BIT, 0xC888, 0x01, 0x01); err += misensor_rmw_reg(c, MISENSOR_8BIT, 0xC889, 0x01, 0x01); err += misensor_rmw_reg(c, MISENSOR_16BIT, MISENSOR_READ_MODE, MISENSOR_HFLIP_MASK, MISENSOR_FLIP_EN); dev->bpat = MT9M114_BPAT_GRGRBGBG; } else { /* disable H flip ctx A */ err += misensor_rmw_reg(c, MISENSOR_8BIT, 0xC850, 0x01, 0x00); err += misensor_rmw_reg(c, MISENSOR_8BIT, 0xC851, 0x01, 0x00); /* ctx B */ err += misensor_rmw_reg(c, MISENSOR_8BIT, 0xC888, 0x01, 0x00); err += misensor_rmw_reg(c, MISENSOR_8BIT, 0xC889, 0x01, 0x00); err += misensor_rmw_reg(c, MISENSOR_16BIT, MISENSOR_READ_MODE, MISENSOR_HFLIP_MASK, MISENSOR_FLIP_DIS); dev->bpat = MT9M114_BPAT_BGBGGRGR; } err += mt9m114_write_reg(c, MISENSOR_8BIT, 0x8404, 0x06); udelay(10); return !!err; } /* Vertically flip the image */ static int mt9m114_t_vflip(struct v4l2_subdev *sd, int value) { struct i2c_client *c = v4l2_get_subdevdata(sd); int err; /* set for direct mode */ err = mt9m114_write_reg(c, MISENSOR_16BIT, 0x098E, 0xC850); if (value >= 1) { /* enable H flip - ctx A */ err += misensor_rmw_reg(c, MISENSOR_8BIT, 0xC850, 0x02, 0x01); err += misensor_rmw_reg(c, MISENSOR_8BIT, 0xC851, 0x02, 0x01); /* ctx B */ err += misensor_rmw_reg(c, MISENSOR_8BIT, 0xC888, 0x02, 0x01); err += misensor_rmw_reg(c, MISENSOR_8BIT, 0xC889, 0x02, 0x01); err += misensor_rmw_reg(c, MISENSOR_16BIT, MISENSOR_READ_MODE, MISENSOR_VFLIP_MASK, MISENSOR_FLIP_EN); } else { /* disable H flip - ctx A */ err += misensor_rmw_reg(c, MISENSOR_8BIT, 0xC850, 0x02, 0x00); err += misensor_rmw_reg(c, MISENSOR_8BIT, 0xC851, 0x02, 0x00); /* ctx B */ err += misensor_rmw_reg(c, MISENSOR_8BIT, 0xC888, 0x02, 0x00); err += misensor_rmw_reg(c, MISENSOR_8BIT, 0xC889, 0x02, 0x00); err += misensor_rmw_reg(c, MISENSOR_16BIT, MISENSOR_READ_MODE, MISENSOR_VFLIP_MASK, MISENSOR_FLIP_DIS); } err += mt9m114_write_reg(c, MISENSOR_8BIT, 0x8404, 0x06); udelay(10); return !!err; } static int mt9m114_g_frame_interval(struct v4l2_subdev *sd, struct v4l2_subdev_frame_interval *interval) { struct mt9m114_device *dev = to_mt9m114_sensor(sd); interval->interval.numerator = 1; interval->interval.denominator = mt9m114_res[dev->res].fps; return 0; } static int mt9m114_s_stream(struct v4l2_subdev *sd, int enable) { int ret; struct i2c_client *c = v4l2_get_subdevdata(sd); struct mt9m114_device *dev = to_mt9m114_sensor(sd); struct atomisp_exposure exposure; if (enable) { ret = mt9m114_write_reg_array(c, mt9m114_chgstat_reg, POST_POLLING); if (ret < 0) return ret; if (dev->first_exp > MT9M114_MAX_FIRST_EXP) { exposure.integration_time[0] = dev->first_exp; exposure.gain[0] = dev->first_gain; exposure.gain[1] = dev->first_diggain; mt9m114_s_exposure(sd, &exposure); } dev->streamon = 1; } else { dev->streamon = 0; ret = mt9m114_set_suspend(sd); } return ret; } static int mt9m114_enum_mbus_code(struct v4l2_subdev *sd, struct v4l2_subdev_state *sd_state, struct v4l2_subdev_mbus_code_enum *code) { if (code->index) return -EINVAL; code->code = MEDIA_BUS_FMT_SGRBG10_1X10; return 0; } static int mt9m114_enum_frame_size(struct v4l2_subdev *sd, struct v4l2_subdev_state *sd_state, struct v4l2_subdev_frame_size_enum *fse) { unsigned int index = fse->index; if (index >= N_RES) return -EINVAL; fse->min_width = mt9m114_res[index].width; fse->min_height = mt9m114_res[index].height; fse->max_width = mt9m114_res[index].width; fse->max_height = mt9m114_res[index].height; return 0; } static int mt9m114_g_skip_frames(struct v4l2_subdev *sd, u32 *frames) { int index; struct mt9m114_device *snr = to_mt9m114_sensor(sd); if (!frames) return -EINVAL; for (index = 0; index < N_RES; index++) { if (mt9m114_res[index].res == snr->res) break; } if (index >= N_RES) return -EINVAL; *frames = mt9m114_res[index].skip_frames; return 0; } static const struct v4l2_subdev_video_ops mt9m114_video_ops = { .s_stream = mt9m114_s_stream, .g_frame_interval = mt9m114_g_frame_interval, }; static const struct v4l2_subdev_sensor_ops mt9m114_sensor_ops = { .g_skip_frames = mt9m114_g_skip_frames, }; static const struct v4l2_subdev_core_ops mt9m114_core_ops = { .s_power = mt9m114_s_power, .ioctl = mt9m114_ioctl, }; /* REVISIT: Do we need pad operations? */ static const struct v4l2_subdev_pad_ops mt9m114_pad_ops = { .enum_mbus_code = mt9m114_enum_mbus_code, .enum_frame_size = mt9m114_enum_frame_size, .get_fmt = mt9m114_get_fmt, .set_fmt = mt9m114_set_fmt, .set_selection = mt9m114_s_exposure_selection, }; static const struct v4l2_subdev_ops mt9m114_ops = { .core = &mt9m114_core_ops, .video = &mt9m114_video_ops, .pad = &mt9m114_pad_ops, .sensor = &mt9m114_sensor_ops, }; static void mt9m114_remove(struct i2c_client *client) { struct mt9m114_device *dev; struct v4l2_subdev *sd = i2c_get_clientdata(client); dev = container_of(sd, struct mt9m114_device, sd); dev->platform_data->csi_cfg(sd, 0); v4l2_device_unregister_subdev(sd); media_entity_cleanup(&dev->sd.entity); v4l2_ctrl_handler_free(&dev->ctrl_handler); kfree(dev); } static int mt9m114_probe(struct i2c_client *client) { struct mt9m114_device *dev; int ret = 0; unsigned int i; void *pdata; /* Setup sensor configuration structure */ dev = kzalloc(sizeof(*dev), GFP_KERNEL); if (!dev) return -ENOMEM; v4l2_i2c_subdev_init(&dev->sd, client, &mt9m114_ops); pdata = gmin_camera_platform_data(&dev->sd, ATOMISP_INPUT_FORMAT_RAW_10, atomisp_bayer_order_grbg); if (pdata) ret = mt9m114_s_config(&dev->sd, client->irq, pdata); if (!pdata || ret) { v4l2_device_unregister_subdev(&dev->sd); kfree(dev); return ret; } ret = atomisp_register_i2c_module(&dev->sd, pdata, RAW_CAMERA); if (ret) { v4l2_device_unregister_subdev(&dev->sd); kfree(dev); /* Coverity CID 298095 - return on error */ return ret; } /* TODO add format code here */ dev->sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE; dev->pad.flags = MEDIA_PAD_FL_SOURCE; dev->format.code = MEDIA_BUS_FMT_SGRBG10_1X10; dev->sd.entity.function = MEDIA_ENT_F_CAM_SENSOR; ret = v4l2_ctrl_handler_init(&dev->ctrl_handler, ARRAY_SIZE(mt9m114_controls)); if (ret) { mt9m114_remove(client); return ret; } for (i = 0; i < ARRAY_SIZE(mt9m114_controls); i++) v4l2_ctrl_new_custom(&dev->ctrl_handler, &mt9m114_controls[i], NULL); if (dev->ctrl_handler.error) { mt9m114_remove(client); return dev->ctrl_handler.error; } /* Use same lock for controls as for everything else. */ dev->ctrl_handler.lock = &dev->input_lock; dev->sd.ctrl_handler = &dev->ctrl_handler; /* REVISIT: Do we need media controller? */ ret = media_entity_pads_init(&dev->sd.entity, 1, &dev->pad); if (ret) { mt9m114_remove(client); return ret; } return 0; } static const struct acpi_device_id mt9m114_acpi_match[] = { { "INT33F0" }, { "CRMT1040" }, {}, }; MODULE_DEVICE_TABLE(acpi, mt9m114_acpi_match); static struct i2c_driver mt9m114_driver = { .driver = { .name = "mt9m114", .acpi_match_table = mt9m114_acpi_match, }, .probe = mt9m114_probe, .remove = mt9m114_remove, }; module_i2c_driver(mt9m114_driver); MODULE_AUTHOR("Shuguang Gong <[email protected]>"); MODULE_LICENSE("GPL");
linux-master
drivers/staging/media/atomisp/i2c/atomisp-mt9m114.c
// SPDX-License-Identifier: GPL-2.0 /* * Support for GalaxyCore GC0310 VGA camera sensor. * * Copyright (c) 2013 Intel Corporation. All Rights Reserved. * Copyright (c) 2023 Hans de Goede <[email protected]> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version * 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #include <linux/delay.h> #include <linux/errno.h> #include <linux/gpio/consumer.h> #include <linux/i2c.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/pm_runtime.h> #include <linux/string.h> #include <linux/types.h> #include <media/v4l2-ctrls.h> #include <media/v4l2-device.h> #define GC0310_NATIVE_WIDTH 656 #define GC0310_NATIVE_HEIGHT 496 #define GC0310_FPS 30 #define GC0310_SKIP_FRAMES 3 #define GC0310_FOCAL_LENGTH_NUM 278 /* 2.78mm */ #define GC0310_ID 0xa310 #define GC0310_RESET_RELATED 0xFE #define GC0310_REGISTER_PAGE_0 0x0 #define GC0310_REGISTER_PAGE_3 0x3 /* * GC0310 System control registers */ #define GC0310_SW_STREAM 0x10 #define GC0310_SC_CMMN_CHIP_ID_H 0xf0 #define GC0310_SC_CMMN_CHIP_ID_L 0xf1 #define GC0310_AEC_PK_EXPO_H 0x03 #define GC0310_AEC_PK_EXPO_L 0x04 #define GC0310_AGC_ADJ 0x48 #define GC0310_DGC_ADJ 0x71 #define GC0310_GROUP_ACCESS 0x3208 #define GC0310_H_CROP_START_H 0x09 #define GC0310_H_CROP_START_L 0x0A #define GC0310_V_CROP_START_H 0x0B #define GC0310_V_CROP_START_L 0x0C #define GC0310_H_OUTSIZE_H 0x0F #define GC0310_H_OUTSIZE_L 0x10 #define GC0310_V_OUTSIZE_H 0x0D #define GC0310_V_OUTSIZE_L 0x0E #define GC0310_H_BLANKING_H 0x05 #define GC0310_H_BLANKING_L 0x06 #define GC0310_V_BLANKING_H 0x07 #define GC0310_V_BLANKING_L 0x08 #define GC0310_SH_DELAY 0x11 #define GC0310_START_STREAMING 0x94 /* 8-bit enable */ #define GC0310_STOP_STREAMING 0x0 /* 8-bit disable */ #define to_gc0310_sensor(x) container_of(x, struct gc0310_device, sd) struct gc0310_device { struct v4l2_subdev sd; struct media_pad pad; /* Protect against concurrent changes to controls */ struct mutex input_lock; bool is_streaming; struct fwnode_handle *ep_fwnode; struct gpio_desc *reset; struct gpio_desc *powerdown; struct gc0310_mode { struct v4l2_mbus_framefmt fmt; } mode; struct gc0310_ctrls { struct v4l2_ctrl_handler handler; struct v4l2_ctrl *exposure; struct v4l2_ctrl *gain; } ctrls; }; struct gc0310_reg { u8 reg; u8 val; }; static const struct gc0310_reg gc0310_reset_register[] = { /* System registers */ { 0xfe, 0xf0 }, { 0xfe, 0xf0 }, { 0xfe, 0x00 }, { 0xfc, 0x0e }, /* 4e */ { 0xfc, 0x0e }, /* 16//4e // [0]apwd [6]regf_clk_gate */ { 0xf2, 0x80 }, /* sync output */ { 0xf3, 0x00 }, /* 1f//01 data output */ { 0xf7, 0x33 }, /* f9 */ { 0xf8, 0x05 }, /* 00 */ { 0xf9, 0x0e }, /* 0x8e //0f */ { 0xfa, 0x11 }, /* MIPI */ { 0xfe, 0x03 }, { 0x01, 0x03 }, /* mipi 1lane */ { 0x02, 0x22 }, /* 0x33 */ { 0x03, 0x94 }, { 0x04, 0x01 }, /* fifo_prog */ { 0x05, 0x00 }, /* fifo_prog */ { 0x06, 0x80 }, /* b0 //YUV ISP data */ { 0x11, 0x2a }, /* 1e //LDI set YUV422 */ { 0x12, 0x90 }, /* 00 //04 //00 //04//00 //LWC[7:0] */ { 0x13, 0x02 }, /* 05 //05 //LWC[15:8] */ { 0x15, 0x12 }, /* 0x10 //DPHYY_MODE read_ready */ { 0x17, 0x01 }, { 0x40, 0x08 }, { 0x41, 0x00 }, { 0x42, 0x00 }, { 0x43, 0x00 }, { 0x21, 0x02 }, /* 0x01 */ { 0x22, 0x02 }, /* 0x01 */ { 0x23, 0x01 }, /* 0x05 //Nor:0x05 DOU:0x06 */ { 0x29, 0x00 }, { 0x2A, 0x25 }, /* 0x05 //data zero 0x7a de */ { 0x2B, 0x02 }, { 0xfe, 0x00 }, /* CISCTL */ { 0x00, 0x2f }, /* 2f//0f//02//01 */ { 0x01, 0x0f }, /* 06 */ { 0x02, 0x04 }, { 0x4f, 0x00 }, /* AEC 0FF */ { 0x03, 0x01 }, /* 0x03 //04 */ { 0x04, 0xc0 }, /* 0xe8 //58 */ { 0x05, 0x00 }, { 0x06, 0xb2 }, /* 0x0a //HB */ { 0x07, 0x00 }, { 0x08, 0x0c }, /* 0x89 //VB */ { 0x09, 0x00 }, /* row start */ { 0x0a, 0x00 }, { 0x0b, 0x00 }, /* col start */ { 0x0c, 0x00 }, { 0x0d, 0x01 }, /* height */ { 0x0e, 0xf2 }, /* 0xf7 //height */ { 0x0f, 0x02 }, /* width */ { 0x10, 0x94 }, /* 0xa0 //height */ { 0x17, 0x14 }, { 0x18, 0x1a }, /* 0a//[4]double reset */ { 0x19, 0x14 }, /* AD pipeline */ { 0x1b, 0x48 }, { 0x1e, 0x6b }, /* 3b//col bias */ { 0x1f, 0x28 }, /* 20//00//08//txlow */ { 0x20, 0x89 }, /* 88//0c//[3:2]DA15 */ { 0x21, 0x49 }, /* 48//[3] txhigh */ { 0x22, 0xb0 }, { 0x23, 0x04 }, /* [1:0]vcm_r */ { 0x24, 0x16 }, /* 15 */ { 0x34, 0x20 }, /* [6:4] rsg high//range */ /* BLK */ { 0x26, 0x23 }, /* [1]dark_current_en [0]offset_en */ { 0x28, 0xff }, /* BLK_limie_value */ { 0x29, 0x00 }, /* global offset */ { 0x33, 0x18 }, /* offset_ratio */ { 0x37, 0x20 }, /* dark_current_ratio */ { 0x2a, 0x00 }, { 0x2b, 0x00 }, { 0x2c, 0x00 }, { 0x2d, 0x00 }, { 0x2e, 0x00 }, { 0x2f, 0x00 }, { 0x30, 0x00 }, { 0x31, 0x00 }, { 0x47, 0x80 }, /* a7 */ { 0x4e, 0x66 }, /* select_row */ { 0xa8, 0x02 }, /* win_width_dark, same with crop_win_width */ { 0xa9, 0x80 }, /* ISP */ { 0x40, 0x06 }, /* 0xff //ff //48 */ { 0x41, 0x00 }, /* 0x21 //00//[0]curve_en */ { 0x42, 0x04 }, /* 0xcf //0a//[1]awn_en */ { 0x44, 0x18 }, /* 0x18 //02 */ { 0x46, 0x02 }, /* 0x03 //sync */ { 0x49, 0x03 }, { 0x4c, 0x20 }, /* 00[5]pretect exp */ { 0x50, 0x01 }, /* crop enable */ { 0x51, 0x00 }, { 0x52, 0x00 }, { 0x53, 0x00 }, { 0x54, 0x01 }, { 0x55, 0x01 }, /* crop window height */ { 0x56, 0xf0 }, { 0x57, 0x02 }, /* crop window width */ { 0x58, 0x90 }, /* Gain */ { 0x70, 0x70 }, /* 70 //80//global gain */ { 0x71, 0x20 }, /* pregain gain */ { 0x72, 0x40 }, /* post gain */ { 0x5a, 0x84 }, /* 84//analog gain 0 */ { 0x5b, 0xc9 }, /* c9 */ { 0x5c, 0xed }, /* ed//not use pga gain highest level */ { 0x77, 0x40 }, /* R gain 0x74 //awb gain */ { 0x78, 0x40 }, /* G gain */ { 0x79, 0x40 }, /* B gain 0x5f */ { 0x48, 0x00 }, { 0xfe, 0x01 }, { 0x0a, 0x45 }, /* [7]col gain mode */ { 0x3e, 0x40 }, { 0x3f, 0x5c }, { 0x40, 0x7b }, { 0x41, 0xbd }, { 0x42, 0xf6 }, { 0x43, 0x63 }, { 0x03, 0x60 }, { 0x44, 0x03 }, /* Dark / Sun mode related */ { 0xfe, 0x01 }, { 0x45, 0xa4 }, /* 0xf7 */ { 0x46, 0xf0 }, /* 0xff //f0//sun value th */ { 0x48, 0x03 }, /* sun mode */ { 0x4f, 0x60 }, /* sun_clamp */ { 0xfe, 0x00 }, }; static const struct gc0310_reg gc0310_VGA_30fps[] = { { 0xfe, 0x00 }, { 0x0d, 0x01 }, /* height */ { 0x0e, 0xf2 }, /* 0xf7 //height */ { 0x0f, 0x02 }, /* width */ { 0x10, 0x94 }, /* 0xa0 //height */ { 0x50, 0x01 }, /* crop enable */ { 0x51, 0x00 }, { 0x52, 0x00 }, { 0x53, 0x00 }, { 0x54, 0x01 }, { 0x55, 0x01 }, /* crop window height */ { 0x56, 0xf0 }, { 0x57, 0x02 }, /* crop window width */ { 0x58, 0x90 }, { 0xfe, 0x03 }, { 0x12, 0x90 }, /* 00 //04 //00 //04//00 //LWC[7:0] */ { 0x13, 0x02 }, /* 05 //05 //LWC[15:8] */ { 0xfe, 0x00 }, }; /* * gc0310_write_reg_array - Initializes a list of GC0310 registers * @client: i2c driver client structure * @reglist: list of registers to be written * @count: number of register, value pairs in the list */ static int gc0310_write_reg_array(struct i2c_client *client, const struct gc0310_reg *reglist, int count) { int i, err; for (i = 0; i < count; i++) { err = i2c_smbus_write_byte_data(client, reglist[i].reg, reglist[i].val); if (err) { dev_err(&client->dev, "write error: wrote 0x%x to offset 0x%x error %d", reglist[i].val, reglist[i].reg, err); return err; } } return 0; } static int gc0310_exposure_set(struct gc0310_device *dev, u32 exp) { struct i2c_client *client = v4l2_get_subdevdata(&dev->sd); return i2c_smbus_write_word_swapped(client, GC0310_AEC_PK_EXPO_H, exp); } static int gc0310_gain_set(struct gc0310_device *dev, u32 gain) { struct i2c_client *client = v4l2_get_subdevdata(&dev->sd); u8 again, dgain; int ret; /* Taken from original driver, this never sets dgain lower then 32? */ /* Change 0 - 95 to 32 - 127 */ gain += 32; if (gain < 64) { again = 0x0; /* sqrt(2) */ dgain = gain; } else { again = 0x2; /* 2 * sqrt(2) */ dgain = gain / 2; } ret = i2c_smbus_write_byte_data(client, GC0310_AGC_ADJ, again); if (ret) return ret; return i2c_smbus_write_byte_data(client, GC0310_DGC_ADJ, dgain); } static int gc0310_s_ctrl(struct v4l2_ctrl *ctrl) { struct gc0310_device *dev = container_of(ctrl->handler, struct gc0310_device, ctrls.handler); int ret; /* Only apply changes to the controls if the device is powered up */ if (!pm_runtime_get_if_in_use(dev->sd.dev)) return 0; switch (ctrl->id) { case V4L2_CID_EXPOSURE: ret = gc0310_exposure_set(dev, ctrl->val); break; case V4L2_CID_GAIN: ret = gc0310_gain_set(dev, ctrl->val); break; default: ret = -EINVAL; break; } pm_runtime_put(dev->sd.dev); return ret; } static const struct v4l2_ctrl_ops ctrl_ops = { .s_ctrl = gc0310_s_ctrl, }; static struct v4l2_mbus_framefmt * gc0310_get_pad_format(struct gc0310_device *dev, struct v4l2_subdev_state *state, unsigned int pad, enum v4l2_subdev_format_whence which) { if (which == V4L2_SUBDEV_FORMAT_TRY) return v4l2_subdev_get_try_format(&dev->sd, state, pad); return &dev->mode.fmt; } /* The GC0310 currently only supports 1 fixed fmt */ static void gc0310_fill_format(struct v4l2_mbus_framefmt *fmt) { memset(fmt, 0, sizeof(*fmt)); fmt->width = GC0310_NATIVE_WIDTH; fmt->height = GC0310_NATIVE_HEIGHT; fmt->field = V4L2_FIELD_NONE; fmt->code = MEDIA_BUS_FMT_SGRBG8_1X8; } static int gc0310_set_fmt(struct v4l2_subdev *sd, struct v4l2_subdev_state *sd_state, struct v4l2_subdev_format *format) { struct gc0310_device *dev = to_gc0310_sensor(sd); struct v4l2_mbus_framefmt *fmt; fmt = gc0310_get_pad_format(dev, sd_state, format->pad, format->which); gc0310_fill_format(fmt); format->format = *fmt; return 0; } static int gc0310_get_fmt(struct v4l2_subdev *sd, struct v4l2_subdev_state *sd_state, struct v4l2_subdev_format *format) { struct gc0310_device *dev = to_gc0310_sensor(sd); struct v4l2_mbus_framefmt *fmt; fmt = gc0310_get_pad_format(dev, sd_state, format->pad, format->which); format->format = *fmt; return 0; } static int gc0310_detect(struct i2c_client *client) { struct i2c_adapter *adapter = client->adapter; int ret; if (!i2c_check_functionality(adapter, I2C_FUNC_I2C)) return -ENODEV; ret = pm_runtime_get_sync(&client->dev); if (ret >= 0) ret = i2c_smbus_read_word_swapped(client, GC0310_SC_CMMN_CHIP_ID_H); pm_runtime_put(&client->dev); if (ret < 0) { dev_err(&client->dev, "read sensor_id failed: %d\n", ret); return -ENODEV; } dev_dbg(&client->dev, "sensor ID = 0x%x\n", ret); if (ret != GC0310_ID) { dev_err(&client->dev, "sensor ID error, read id = 0x%x, target id = 0x%x\n", ret, GC0310_ID); return -ENODEV; } dev_dbg(&client->dev, "detect gc0310 success\n"); return 0; } static int gc0310_s_stream(struct v4l2_subdev *sd, int enable) { struct gc0310_device *dev = to_gc0310_sensor(sd); struct i2c_client *client = v4l2_get_subdevdata(sd); int ret = 0; dev_dbg(&client->dev, "%s S enable=%d\n", __func__, enable); mutex_lock(&dev->input_lock); if (dev->is_streaming == enable) { dev_warn(&client->dev, "stream already %s\n", enable ? "started" : "stopped"); goto error_unlock; } if (enable) { ret = pm_runtime_get_sync(&client->dev); if (ret < 0) goto error_power_down; msleep(100); ret = gc0310_write_reg_array(client, gc0310_reset_register, ARRAY_SIZE(gc0310_reset_register)); if (ret) goto error_power_down; ret = gc0310_write_reg_array(client, gc0310_VGA_30fps, ARRAY_SIZE(gc0310_VGA_30fps)); if (ret) goto error_power_down; /* restore value of all ctrls */ ret = __v4l2_ctrl_handler_setup(&dev->ctrls.handler); if (ret) goto error_power_down; /* enable per frame MIPI and sensor ctrl reset */ ret = i2c_smbus_write_byte_data(client, 0xFE, 0x30); if (ret) goto error_power_down; } ret = i2c_smbus_write_byte_data(client, GC0310_RESET_RELATED, GC0310_REGISTER_PAGE_3); if (ret) goto error_power_down; ret = i2c_smbus_write_byte_data(client, GC0310_SW_STREAM, enable ? GC0310_START_STREAMING : GC0310_STOP_STREAMING); if (ret) goto error_power_down; ret = i2c_smbus_write_byte_data(client, GC0310_RESET_RELATED, GC0310_REGISTER_PAGE_0); if (ret) goto error_power_down; if (!enable) pm_runtime_put(&client->dev); dev->is_streaming = enable; mutex_unlock(&dev->input_lock); return 0; error_power_down: pm_runtime_put(&client->dev); dev->is_streaming = false; error_unlock: mutex_unlock(&dev->input_lock); return ret; } static int gc0310_g_frame_interval(struct v4l2_subdev *sd, struct v4l2_subdev_frame_interval *interval) { interval->interval.numerator = 1; interval->interval.denominator = GC0310_FPS; return 0; } static int gc0310_enum_mbus_code(struct v4l2_subdev *sd, struct v4l2_subdev_state *sd_state, struct v4l2_subdev_mbus_code_enum *code) { /* We support only a single format */ if (code->index) return -EINVAL; code->code = MEDIA_BUS_FMT_SGRBG8_1X8; return 0; } static int gc0310_enum_frame_size(struct v4l2_subdev *sd, struct v4l2_subdev_state *sd_state, struct v4l2_subdev_frame_size_enum *fse) { /* We support only a single resolution */ if (fse->index) return -EINVAL; fse->min_width = GC0310_NATIVE_WIDTH; fse->max_width = GC0310_NATIVE_WIDTH; fse->min_height = GC0310_NATIVE_HEIGHT; fse->max_height = GC0310_NATIVE_HEIGHT; return 0; } static int gc0310_g_skip_frames(struct v4l2_subdev *sd, u32 *frames) { *frames = GC0310_SKIP_FRAMES; return 0; } static const struct v4l2_subdev_sensor_ops gc0310_sensor_ops = { .g_skip_frames = gc0310_g_skip_frames, }; static const struct v4l2_subdev_video_ops gc0310_video_ops = { .s_stream = gc0310_s_stream, .g_frame_interval = gc0310_g_frame_interval, }; static const struct v4l2_subdev_pad_ops gc0310_pad_ops = { .enum_mbus_code = gc0310_enum_mbus_code, .enum_frame_size = gc0310_enum_frame_size, .get_fmt = gc0310_get_fmt, .set_fmt = gc0310_set_fmt, }; static const struct v4l2_subdev_ops gc0310_ops = { .video = &gc0310_video_ops, .pad = &gc0310_pad_ops, .sensor = &gc0310_sensor_ops, }; static int gc0310_init_controls(struct gc0310_device *dev) { struct v4l2_ctrl_handler *hdl = &dev->ctrls.handler; v4l2_ctrl_handler_init(hdl, 2); /* Use the same lock for controls as for everything else */ hdl->lock = &dev->input_lock; dev->sd.ctrl_handler = hdl; dev->ctrls.exposure = v4l2_ctrl_new_std(hdl, &ctrl_ops, V4L2_CID_EXPOSURE, 0, 4095, 1, 1023); /* 32 steps at base gain 1 + 64 half steps at base gain 2 */ dev->ctrls.gain = v4l2_ctrl_new_std(hdl, &ctrl_ops, V4L2_CID_GAIN, 0, 95, 1, 31); return hdl->error; } static void gc0310_remove(struct i2c_client *client) { struct v4l2_subdev *sd = i2c_get_clientdata(client); struct gc0310_device *dev = to_gc0310_sensor(sd); dev_dbg(&client->dev, "gc0310_remove...\n"); v4l2_async_unregister_subdev(sd); media_entity_cleanup(&dev->sd.entity); v4l2_ctrl_handler_free(&dev->ctrls.handler); mutex_destroy(&dev->input_lock); fwnode_handle_put(dev->ep_fwnode); pm_runtime_disable(&client->dev); } static int gc0310_probe(struct i2c_client *client) { struct gc0310_device *dev; int ret; dev = devm_kzalloc(&client->dev, sizeof(*dev), GFP_KERNEL); if (!dev) return -ENOMEM; /* * Sometimes the fwnode graph is initialized by the bridge driver. * Bridge drivers doing this may also add GPIO mappings, wait for this. */ dev->ep_fwnode = fwnode_graph_get_next_endpoint(dev_fwnode(&client->dev), NULL); if (!dev->ep_fwnode) return dev_err_probe(&client->dev, -EPROBE_DEFER, "waiting for fwnode graph endpoint\n"); dev->reset = devm_gpiod_get(&client->dev, "reset", GPIOD_OUT_HIGH); if (IS_ERR(dev->reset)) { fwnode_handle_put(dev->ep_fwnode); return dev_err_probe(&client->dev, PTR_ERR(dev->reset), "getting reset GPIO\n"); } dev->powerdown = devm_gpiod_get(&client->dev, "powerdown", GPIOD_OUT_HIGH); if (IS_ERR(dev->powerdown)) { fwnode_handle_put(dev->ep_fwnode); return dev_err_probe(&client->dev, PTR_ERR(dev->powerdown), "getting powerdown GPIO\n"); } mutex_init(&dev->input_lock); v4l2_i2c_subdev_init(&dev->sd, client, &gc0310_ops); gc0310_fill_format(&dev->mode.fmt); pm_runtime_set_suspended(&client->dev); pm_runtime_enable(&client->dev); pm_runtime_set_autosuspend_delay(&client->dev, 1000); pm_runtime_use_autosuspend(&client->dev); ret = gc0310_detect(client); if (ret) { gc0310_remove(client); return ret; } dev->sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE; dev->pad.flags = MEDIA_PAD_FL_SOURCE; dev->sd.entity.function = MEDIA_ENT_F_CAM_SENSOR; dev->sd.fwnode = dev->ep_fwnode; ret = gc0310_init_controls(dev); if (ret) { gc0310_remove(client); return ret; } ret = media_entity_pads_init(&dev->sd.entity, 1, &dev->pad); if (ret) { gc0310_remove(client); return ret; } ret = v4l2_async_register_subdev_sensor(&dev->sd); if (ret) { gc0310_remove(client); return ret; } return 0; } static int gc0310_suspend(struct device *dev) { struct v4l2_subdev *sd = dev_get_drvdata(dev); struct gc0310_device *gc0310_dev = to_gc0310_sensor(sd); gpiod_set_value_cansleep(gc0310_dev->powerdown, 1); gpiod_set_value_cansleep(gc0310_dev->reset, 1); return 0; } static int gc0310_resume(struct device *dev) { struct v4l2_subdev *sd = dev_get_drvdata(dev); struct gc0310_device *gc0310_dev = to_gc0310_sensor(sd); usleep_range(10000, 15000); gpiod_set_value_cansleep(gc0310_dev->reset, 0); usleep_range(10000, 15000); gpiod_set_value_cansleep(gc0310_dev->powerdown, 0); return 0; } static DEFINE_RUNTIME_DEV_PM_OPS(gc0310_pm_ops, gc0310_suspend, gc0310_resume, NULL); static const struct acpi_device_id gc0310_acpi_match[] = { {"INT0310"}, {}, }; MODULE_DEVICE_TABLE(acpi, gc0310_acpi_match); static struct i2c_driver gc0310_driver = { .driver = { .name = "gc0310", .pm = pm_sleep_ptr(&gc0310_pm_ops), .acpi_match_table = gc0310_acpi_match, }, .probe = gc0310_probe, .remove = gc0310_remove, }; module_i2c_driver(gc0310_driver); MODULE_AUTHOR("Lai, Angie <[email protected]>"); MODULE_DESCRIPTION("A low-level driver for GalaxyCore GC0310 sensors"); MODULE_LICENSE("GPL");
linux-master
drivers/staging/media/atomisp/i2c/atomisp-gc0310.c
// SPDX-License-Identifier: GPL-2.0 /* * Support for OmniVision OV2722 1080p HD camera sensor. * * Copyright (c) 2013 Intel Corporation. All Rights Reserved. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version * 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #include <linux/module.h> #include <linux/types.h> #include <linux/kernel.h> #include <linux/mm.h> #include <linux/string.h> #include <linux/errno.h> #include <linux/init.h> #include <linux/kmod.h> #include <linux/device.h> #include <linux/delay.h> #include <linux/slab.h> #include <linux/i2c.h> #include <linux/moduleparam.h> #include <media/v4l2-device.h> #include "../include/linux/atomisp_gmin_platform.h" #include <linux/acpi.h> #include <linux/io.h> #include "ov2722.h" /* i2c read/write stuff */ static int ov2722_read_reg(struct i2c_client *client, u16 data_length, u16 reg, u16 *val) { int err; struct i2c_msg msg[2]; unsigned char data[6]; if (!client->adapter) { dev_err(&client->dev, "%s error, no client->adapter\n", __func__); return -ENODEV; } if (data_length != OV2722_8BIT && data_length != OV2722_16BIT && data_length != OV2722_32BIT) { dev_err(&client->dev, "%s error, invalid data length\n", __func__); return -EINVAL; } memset(msg, 0, sizeof(msg)); msg[0].addr = client->addr; msg[0].flags = 0; msg[0].len = I2C_MSG_LENGTH; msg[0].buf = data; /* high byte goes out first */ data[0] = (u8)(reg >> 8); data[1] = (u8)(reg & 0xff); msg[1].addr = client->addr; msg[1].len = data_length; msg[1].flags = I2C_M_RD; msg[1].buf = data; err = i2c_transfer(client->adapter, msg, 2); if (err != 2) { if (err >= 0) err = -EIO; dev_err(&client->dev, "read from offset 0x%x error %d", reg, err); return err; } *val = 0; /* high byte comes first */ if (data_length == OV2722_8BIT) *val = (u8)data[0]; else if (data_length == OV2722_16BIT) *val = be16_to_cpu(*(__be16 *)&data[0]); else *val = be32_to_cpu(*(__be32 *)&data[0]); return 0; } static int ov2722_i2c_write(struct i2c_client *client, u16 len, u8 *data) { struct i2c_msg msg; const int num_msg = 1; int ret; msg.addr = client->addr; msg.flags = 0; msg.len = len; msg.buf = data; ret = i2c_transfer(client->adapter, &msg, 1); return ret == num_msg ? 0 : -EIO; } static int ov2722_write_reg(struct i2c_client *client, u16 data_length, u16 reg, u16 val) { int ret; unsigned char data[4] = {0}; __be16 *wreg = (__be16 *)data; const u16 len = data_length + sizeof(u16); /* 16-bit address + data */ if (data_length != OV2722_8BIT && data_length != OV2722_16BIT) { dev_err(&client->dev, "%s error, invalid data_length\n", __func__); return -EINVAL; } /* high byte goes out first */ *wreg = cpu_to_be16(reg); if (data_length == OV2722_8BIT) { data[2] = (u8)(val); } else { /* OV2722_16BIT */ __be16 *wdata = (__be16 *)&data[2]; *wdata = cpu_to_be16(val); } ret = ov2722_i2c_write(client, len, data); if (ret) dev_err(&client->dev, "write error: wrote 0x%x to offset 0x%x error %d", val, reg, ret); return ret; } /* * ov2722_write_reg_array - Initializes a list of OV2722 registers * @client: i2c driver client structure * @reglist: list of registers to be written * * This function initializes a list of registers. When consecutive addresses * are found in a row on the list, this function creates a buffer and sends * consecutive data in a single i2c_transfer(). * * __ov2722_flush_reg_array, __ov2722_buf_reg_array() and * __ov2722_write_reg_is_consecutive() are internal functions to * ov2722_write_reg_array_fast() and should be not used anywhere else. * */ static int __ov2722_flush_reg_array(struct i2c_client *client, struct ov2722_write_ctrl *ctrl) { u16 size; __be16 *data16 = (void *)&ctrl->buffer.addr; if (ctrl->index == 0) return 0; size = sizeof(u16) + ctrl->index; /* 16-bit address + data */ *data16 = cpu_to_be16(ctrl->buffer.addr); ctrl->index = 0; return ov2722_i2c_write(client, size, (u8 *)&ctrl->buffer); } static int __ov2722_buf_reg_array(struct i2c_client *client, struct ov2722_write_ctrl *ctrl, const struct ov2722_reg *next) { int size; __be16 *data16; switch (next->type) { case OV2722_8BIT: size = 1; ctrl->buffer.data[ctrl->index] = (u8)next->val; break; case OV2722_16BIT: size = 2; data16 = (void *)&ctrl->buffer.data[ctrl->index]; *data16 = cpu_to_be16((u16)next->val); break; default: return -EINVAL; } /* When first item is added, we need to store its starting address */ if (ctrl->index == 0) ctrl->buffer.addr = next->reg; ctrl->index += size; /* * Buffer cannot guarantee free space for u32? Better flush it to avoid * possible lack of memory for next item. */ if (ctrl->index + sizeof(u16) >= OV2722_MAX_WRITE_BUF_SIZE) return __ov2722_flush_reg_array(client, ctrl); return 0; } static int __ov2722_write_reg_is_consecutive(struct i2c_client *client, struct ov2722_write_ctrl *ctrl, const struct ov2722_reg *next) { if (ctrl->index == 0) return 1; return ctrl->buffer.addr + ctrl->index == next->reg; } static int ov2722_write_reg_array(struct i2c_client *client, const struct ov2722_reg *reglist) { const struct ov2722_reg *next = reglist; struct ov2722_write_ctrl ctrl; int err; ctrl.index = 0; for (; next->type != OV2722_TOK_TERM; next++) { switch (next->type & OV2722_TOK_MASK) { case OV2722_TOK_DELAY: err = __ov2722_flush_reg_array(client, &ctrl); if (err) return err; msleep(next->val); break; default: /* * If next address is not consecutive, data needs to be * flushed before proceed. */ if (!__ov2722_write_reg_is_consecutive(client, &ctrl, next)) { err = __ov2722_flush_reg_array(client, &ctrl); if (err) return err; } err = __ov2722_buf_reg_array(client, &ctrl, next); if (err) { dev_err(&client->dev, "%s: write error, aborted\n", __func__); return err; } break; } } return __ov2722_flush_reg_array(client, &ctrl); } static long __ov2722_set_exposure(struct v4l2_subdev *sd, int coarse_itg, int gain, int digitgain) { struct i2c_client *client = v4l2_get_subdevdata(sd); struct ov2722_device *dev = to_ov2722_sensor(sd); u16 hts, vts; int ret; dev_dbg(&client->dev, "set_exposure without group hold\n"); /* clear VTS_DIFF on manual mode */ ret = ov2722_write_reg(client, OV2722_16BIT, OV2722_VTS_DIFF_H, 0); if (ret) return ret; hts = dev->pixels_per_line; vts = dev->lines_per_frame; if ((coarse_itg + OV2722_COARSE_INTG_TIME_MAX_MARGIN) > vts) vts = coarse_itg + OV2722_COARSE_INTG_TIME_MAX_MARGIN; coarse_itg <<= 4; digitgain <<= 2; ret = ov2722_write_reg(client, OV2722_16BIT, OV2722_VTS_H, vts); if (ret) return ret; ret = ov2722_write_reg(client, OV2722_16BIT, OV2722_HTS_H, hts); if (ret) return ret; /* set exposure */ ret = ov2722_write_reg(client, OV2722_8BIT, OV2722_AEC_PK_EXPO_L, coarse_itg & 0xff); if (ret) return ret; ret = ov2722_write_reg(client, OV2722_16BIT, OV2722_AEC_PK_EXPO_H, (coarse_itg >> 8) & 0xfff); if (ret) return ret; /* set analog gain */ ret = ov2722_write_reg(client, OV2722_16BIT, OV2722_AGC_ADJ_H, gain); if (ret) return ret; /* set digital gain */ ret = ov2722_write_reg(client, OV2722_16BIT, OV2722_MWB_GAIN_R_H, digitgain); if (ret) return ret; ret = ov2722_write_reg(client, OV2722_16BIT, OV2722_MWB_GAIN_G_H, digitgain); if (ret) return ret; ret = ov2722_write_reg(client, OV2722_16BIT, OV2722_MWB_GAIN_B_H, digitgain); return ret; } static int ov2722_set_exposure(struct v4l2_subdev *sd, int exposure, int gain, int digitgain) { struct ov2722_device *dev = to_ov2722_sensor(sd); int ret; mutex_lock(&dev->input_lock); ret = __ov2722_set_exposure(sd, exposure, gain, digitgain); mutex_unlock(&dev->input_lock); return ret; } static long ov2722_s_exposure(struct v4l2_subdev *sd, struct atomisp_exposure *exposure) { int exp = exposure->integration_time[0]; int gain = exposure->gain[0]; int digitgain = exposure->gain[1]; /* we should not accept the invalid value below. */ if (gain == 0) { struct i2c_client *client = v4l2_get_subdevdata(sd); v4l2_err(client, "%s: invalid value\n", __func__); return -EINVAL; } return ov2722_set_exposure(sd, exp, gain, digitgain); } static long ov2722_ioctl(struct v4l2_subdev *sd, unsigned int cmd, void *arg) { switch (cmd) { case ATOMISP_IOC_S_EXPOSURE: return ov2722_s_exposure(sd, arg); default: return -EINVAL; } return 0; } /* This returns the exposure time being used. This should only be used * for filling in EXIF data, not for actual image processing. */ static int ov2722_q_exposure(struct v4l2_subdev *sd, s32 *value) { struct i2c_client *client = v4l2_get_subdevdata(sd); u16 reg_v, reg_v2; int ret; /* get exposure */ ret = ov2722_read_reg(client, OV2722_8BIT, OV2722_AEC_PK_EXPO_L, &reg_v); if (ret) goto err; ret = ov2722_read_reg(client, OV2722_8BIT, OV2722_AEC_PK_EXPO_M, &reg_v2); if (ret) goto err; reg_v += reg_v2 << 8; ret = ov2722_read_reg(client, OV2722_8BIT, OV2722_AEC_PK_EXPO_H, &reg_v2); if (ret) goto err; *value = reg_v + (((u32)reg_v2 << 16)); err: return ret; } static int ov2722_g_volatile_ctrl(struct v4l2_ctrl *ctrl) { struct ov2722_device *dev = container_of(ctrl->handler, struct ov2722_device, ctrl_handler); int ret = 0; unsigned int val; switch (ctrl->id) { case V4L2_CID_EXPOSURE_ABSOLUTE: ret = ov2722_q_exposure(&dev->sd, &ctrl->val); break; case V4L2_CID_LINK_FREQ: val = dev->res->mipi_freq; if (val == 0) return -EINVAL; ctrl->val = val * 1000; /* To Hz */ break; default: ret = -EINVAL; } return ret; } static const struct v4l2_ctrl_ops ctrl_ops = { .g_volatile_ctrl = ov2722_g_volatile_ctrl }; static const struct v4l2_ctrl_config ov2722_controls[] = { { .ops = &ctrl_ops, .id = V4L2_CID_EXPOSURE_ABSOLUTE, .type = V4L2_CTRL_TYPE_INTEGER, .name = "exposure", .min = 0x0, .max = 0xffff, .step = 0x01, .def = 0x00, .flags = 0, }, { .ops = &ctrl_ops, .id = V4L2_CID_LINK_FREQ, .name = "Link Frequency", .type = V4L2_CTRL_TYPE_INTEGER, .min = 1, .max = 1500000 * 1000, .step = 1, .def = 1, .flags = V4L2_CTRL_FLAG_VOLATILE | V4L2_CTRL_FLAG_READ_ONLY, }, }; static int ov2722_init(struct v4l2_subdev *sd) { struct ov2722_device *dev = to_ov2722_sensor(sd); mutex_lock(&dev->input_lock); /* restore settings */ ov2722_res = ov2722_res_preview; N_RES = N_RES_PREVIEW; mutex_unlock(&dev->input_lock); return 0; } static int power_ctrl(struct v4l2_subdev *sd, bool flag) { int ret = -1; struct ov2722_device *dev = to_ov2722_sensor(sd); if (!dev || !dev->platform_data) return -ENODEV; if (flag) { ret = dev->platform_data->v1p8_ctrl(sd, 1); if (ret == 0) { ret = dev->platform_data->v2p8_ctrl(sd, 1); if (ret) dev->platform_data->v1p8_ctrl(sd, 0); } } else { ret = dev->platform_data->v1p8_ctrl(sd, 0); ret |= dev->platform_data->v2p8_ctrl(sd, 0); } return ret; } static int gpio_ctrl(struct v4l2_subdev *sd, bool flag) { struct ov2722_device *dev = to_ov2722_sensor(sd); int ret = -1; if (!dev || !dev->platform_data) return -ENODEV; /* Note: the GPIO order is asymmetric: always RESET# * before PWDN# when turning it on or off. */ ret = dev->platform_data->gpio0_ctrl(sd, flag); ret |= dev->platform_data->gpio1_ctrl(sd, flag); return ret; } static int power_up(struct v4l2_subdev *sd) { struct ov2722_device *dev = to_ov2722_sensor(sd); struct i2c_client *client = v4l2_get_subdevdata(sd); int ret; if (!dev->platform_data) { dev_err(&client->dev, "no camera_sensor_platform_data"); return -ENODEV; } if (dev->power_on == 1) return 0; /* Already on */ /* power control */ ret = power_ctrl(sd, 1); if (ret) goto fail_power; /* according to DS, at least 5ms is needed between DOVDD and PWDN */ usleep_range(5000, 6000); /* gpio ctrl */ ret = gpio_ctrl(sd, 1); if (ret) { ret = gpio_ctrl(sd, 0); if (ret) goto fail_power; } /* flis clock control */ ret = dev->platform_data->flisclk_ctrl(sd, 1); if (ret) goto fail_clk; /* according to DS, 20ms is needed between PWDN and i2c access */ msleep(20); dev->power_on = 1; return 0; fail_clk: gpio_ctrl(sd, 0); fail_power: power_ctrl(sd, 0); dev_err(&client->dev, "sensor power-up failed\n"); return ret; } static int power_down(struct v4l2_subdev *sd) { struct ov2722_device *dev = to_ov2722_sensor(sd); struct i2c_client *client = v4l2_get_subdevdata(sd); int ret = 0; if (!dev->platform_data) { dev_err(&client->dev, "no camera_sensor_platform_data"); return -ENODEV; } if (dev->power_on == 0) return 0; /* Already off */ ret = dev->platform_data->flisclk_ctrl(sd, 0); if (ret) dev_err(&client->dev, "flisclk failed\n"); /* gpio ctrl */ ret = gpio_ctrl(sd, 0); if (ret) { ret = gpio_ctrl(sd, 0); if (ret) dev_err(&client->dev, "gpio failed 2\n"); } /* power control */ ret = power_ctrl(sd, 0); if (ret) dev_err(&client->dev, "vprog failed.\n"); dev->power_on = 0; return ret; } static int ov2722_s_power(struct v4l2_subdev *sd, int on) { int ret; if (on == 0) return power_down(sd); ret = power_up(sd); if (!ret) return ov2722_init(sd); return ret; } /* TODO: remove it. */ static int startup(struct v4l2_subdev *sd) { struct ov2722_device *dev = to_ov2722_sensor(sd); struct i2c_client *client = v4l2_get_subdevdata(sd); int ret = 0; ret = ov2722_write_reg(client, OV2722_8BIT, OV2722_SW_RESET, 0x01); if (ret) { dev_err(&client->dev, "ov2722 reset err.\n"); return ret; } ret = ov2722_write_reg_array(client, dev->res->regs); if (ret) { dev_err(&client->dev, "ov2722 write register err.\n"); return ret; } return ret; } static int ov2722_set_fmt(struct v4l2_subdev *sd, struct v4l2_subdev_state *sd_state, struct v4l2_subdev_format *format) { struct v4l2_mbus_framefmt *fmt = &format->format; struct ov2722_device *dev = to_ov2722_sensor(sd); struct i2c_client *client = v4l2_get_subdevdata(sd); struct ov2722_resolution *res; struct camera_mipi_info *ov2722_info = NULL; int ret = 0; if (format->pad) return -EINVAL; if (!fmt) return -EINVAL; ov2722_info = v4l2_get_subdev_hostdata(sd); if (!ov2722_info) return -EINVAL; res = v4l2_find_nearest_size(ov2722_res_preview, ARRAY_SIZE(ov2722_res_preview), width, height, fmt->width, fmt->height); if (!res) res = &ov2722_res_preview[N_RES - 1]; fmt->width = res->width; fmt->height = res->height; dev->res = res; fmt->code = MEDIA_BUS_FMT_SGRBG10_1X10; if (format->which == V4L2_SUBDEV_FORMAT_TRY) { sd_state->pads->try_fmt = *fmt; return 0; } mutex_lock(&dev->input_lock); /* s_power has not been called yet for std v4l2 clients (camorama) */ power_up(sd); dev->pixels_per_line = dev->res->pixels_per_line; dev->lines_per_frame = dev->res->lines_per_frame; ret = startup(sd); if (ret) { int i = 0; dev_err(&client->dev, "ov2722 startup err, retry to power up\n"); for (i = 0; i < OV2722_POWER_UP_RETRY_NUM; i++) { dev_err(&client->dev, "ov2722 retry to power up %d/%d times, result: ", i + 1, OV2722_POWER_UP_RETRY_NUM); power_down(sd); ret = power_up(sd); if (ret) { dev_err(&client->dev, "power up failed, continue\n"); continue; } ret = startup(sd); if (ret) { dev_err(&client->dev, " startup FAILED!\n"); } else { dev_err(&client->dev, " startup SUCCESS!\n"); break; } } if (ret) { dev_err(&client->dev, "ov2722 startup err\n"); goto err; } } err: mutex_unlock(&dev->input_lock); return ret; } static int ov2722_get_fmt(struct v4l2_subdev *sd, struct v4l2_subdev_state *sd_state, struct v4l2_subdev_format *format) { struct v4l2_mbus_framefmt *fmt = &format->format; struct ov2722_device *dev = to_ov2722_sensor(sd); if (format->pad) return -EINVAL; if (!fmt) return -EINVAL; fmt->width = dev->res->width; fmt->height = dev->res->height; fmt->code = MEDIA_BUS_FMT_SBGGR10_1X10; return 0; } static int ov2722_detect(struct i2c_client *client) { struct i2c_adapter *adapter = client->adapter; u16 high = 0, low = 0; u16 id; u8 revision; if (!i2c_check_functionality(adapter, I2C_FUNC_I2C)) return -ENODEV; ov2722_read_reg(client, OV2722_8BIT, OV2722_SC_CMMN_CHIP_ID_H, &high); ov2722_read_reg(client, OV2722_8BIT, OV2722_SC_CMMN_CHIP_ID_L, &low); id = (high << 8) | low; if ((id != OV2722_ID) && (id != OV2720_ID)) { dev_err(&client->dev, "sensor ID error\n"); return -ENODEV; } high = 0; ov2722_read_reg(client, OV2722_8BIT, OV2722_SC_CMMN_SUB_ID, &high); revision = (u8)high & 0x0f; dev_dbg(&client->dev, "sensor_revision = 0x%x\n", revision); dev_dbg(&client->dev, "detect ov2722 success\n"); return 0; } static int ov2722_s_stream(struct v4l2_subdev *sd, int enable) { struct ov2722_device *dev = to_ov2722_sensor(sd); struct i2c_client *client = v4l2_get_subdevdata(sd); int ret; mutex_lock(&dev->input_lock); ret = ov2722_write_reg(client, OV2722_8BIT, OV2722_SW_STREAM, enable ? OV2722_START_STREAMING : OV2722_STOP_STREAMING); mutex_unlock(&dev->input_lock); return ret; } static int ov2722_s_config(struct v4l2_subdev *sd, int irq, void *platform_data) { struct ov2722_device *dev = to_ov2722_sensor(sd); struct i2c_client *client = v4l2_get_subdevdata(sd); int ret = 0; if (!platform_data) return -ENODEV; dev->platform_data = (struct camera_sensor_platform_data *)platform_data; mutex_lock(&dev->input_lock); /* power off the module, then power on it in future * as first power on by board may not fulfill the * power on sequqence needed by the module */ ret = power_down(sd); if (ret) { dev_err(&client->dev, "ov2722 power-off err.\n"); goto fail_power_off; } ret = power_up(sd); if (ret) { dev_err(&client->dev, "ov2722 power-up err.\n"); goto fail_power_on; } ret = dev->platform_data->csi_cfg(sd, 1); if (ret) goto fail_csi_cfg; /* config & detect sensor */ ret = ov2722_detect(client); if (ret) { dev_err(&client->dev, "ov2722_detect err s_config.\n"); goto fail_csi_cfg; } /* turn off sensor, after probed */ ret = power_down(sd); if (ret) { dev_err(&client->dev, "ov2722 power-off err.\n"); goto fail_csi_cfg; } mutex_unlock(&dev->input_lock); return 0; fail_csi_cfg: dev->platform_data->csi_cfg(sd, 0); fail_power_on: power_down(sd); dev_err(&client->dev, "sensor power-gating failed\n"); fail_power_off: mutex_unlock(&dev->input_lock); return ret; } static int ov2722_g_frame_interval(struct v4l2_subdev *sd, struct v4l2_subdev_frame_interval *interval) { struct ov2722_device *dev = to_ov2722_sensor(sd); interval->interval.numerator = 1; interval->interval.denominator = dev->res->fps; return 0; } static int ov2722_enum_mbus_code(struct v4l2_subdev *sd, struct v4l2_subdev_state *sd_state, struct v4l2_subdev_mbus_code_enum *code) { if (code->index >= MAX_FMTS) return -EINVAL; code->code = MEDIA_BUS_FMT_SBGGR10_1X10; return 0; } static int ov2722_enum_frame_size(struct v4l2_subdev *sd, struct v4l2_subdev_state *sd_state, struct v4l2_subdev_frame_size_enum *fse) { int index = fse->index; if (index >= N_RES) return -EINVAL; fse->min_width = ov2722_res[index].width; fse->min_height = ov2722_res[index].height; fse->max_width = ov2722_res[index].width; fse->max_height = ov2722_res[index].height; return 0; } static int ov2722_g_skip_frames(struct v4l2_subdev *sd, u32 *frames) { struct ov2722_device *dev = to_ov2722_sensor(sd); mutex_lock(&dev->input_lock); *frames = dev->res->skip_frames; mutex_unlock(&dev->input_lock); return 0; } static const struct v4l2_subdev_sensor_ops ov2722_sensor_ops = { .g_skip_frames = ov2722_g_skip_frames, }; static const struct v4l2_subdev_video_ops ov2722_video_ops = { .s_stream = ov2722_s_stream, .g_frame_interval = ov2722_g_frame_interval, }; static const struct v4l2_subdev_core_ops ov2722_core_ops = { .s_power = ov2722_s_power, .ioctl = ov2722_ioctl, }; static const struct v4l2_subdev_pad_ops ov2722_pad_ops = { .enum_mbus_code = ov2722_enum_mbus_code, .enum_frame_size = ov2722_enum_frame_size, .get_fmt = ov2722_get_fmt, .set_fmt = ov2722_set_fmt, }; static const struct v4l2_subdev_ops ov2722_ops = { .core = &ov2722_core_ops, .video = &ov2722_video_ops, .pad = &ov2722_pad_ops, .sensor = &ov2722_sensor_ops, }; static void ov2722_remove(struct i2c_client *client) { struct v4l2_subdev *sd = i2c_get_clientdata(client); struct ov2722_device *dev = to_ov2722_sensor(sd); dev->platform_data->csi_cfg(sd, 0); v4l2_ctrl_handler_free(&dev->ctrl_handler); v4l2_device_unregister_subdev(sd); atomisp_gmin_remove_subdev(sd); media_entity_cleanup(&dev->sd.entity); kfree(dev); } static int __ov2722_init_ctrl_handler(struct ov2722_device *dev) { struct v4l2_ctrl_handler *hdl; unsigned int i; hdl = &dev->ctrl_handler; v4l2_ctrl_handler_init(&dev->ctrl_handler, ARRAY_SIZE(ov2722_controls)); for (i = 0; i < ARRAY_SIZE(ov2722_controls); i++) v4l2_ctrl_new_custom(&dev->ctrl_handler, &ov2722_controls[i], NULL); dev->link_freq = v4l2_ctrl_find(&dev->ctrl_handler, V4L2_CID_LINK_FREQ); if (dev->ctrl_handler.error || !dev->link_freq) return dev->ctrl_handler.error; dev->sd.ctrl_handler = hdl; return 0; } static int ov2722_probe(struct i2c_client *client) { struct ov2722_device *dev; void *ovpdev; int ret; dev = kzalloc(sizeof(*dev), GFP_KERNEL); if (!dev) return -ENOMEM; mutex_init(&dev->input_lock); dev->power_on = -1; dev->res = &ov2722_res_preview[0]; v4l2_i2c_subdev_init(&dev->sd, client, &ov2722_ops); ovpdev = gmin_camera_platform_data(&dev->sd, ATOMISP_INPUT_FORMAT_RAW_10, atomisp_bayer_order_grbg); ret = ov2722_s_config(&dev->sd, client->irq, ovpdev); if (ret) goto out_free; ret = __ov2722_init_ctrl_handler(dev); if (ret) goto out_ctrl_handler_free; dev->sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE; dev->pad.flags = MEDIA_PAD_FL_SOURCE; dev->format.code = MEDIA_BUS_FMT_SBGGR10_1X10; dev->sd.entity.function = MEDIA_ENT_F_CAM_SENSOR; ret = media_entity_pads_init(&dev->sd.entity, 1, &dev->pad); if (ret) ov2722_remove(client); return atomisp_register_i2c_module(&dev->sd, ovpdev, RAW_CAMERA); out_ctrl_handler_free: v4l2_ctrl_handler_free(&dev->ctrl_handler); out_free: atomisp_gmin_remove_subdev(&dev->sd); v4l2_device_unregister_subdev(&dev->sd); kfree(dev); return ret; } static const struct acpi_device_id ov2722_acpi_match[] = { { "INT33FB" }, {}, }; MODULE_DEVICE_TABLE(acpi, ov2722_acpi_match); static struct i2c_driver ov2722_driver = { .driver = { .name = "ov2722", .acpi_match_table = ov2722_acpi_match, }, .probe = ov2722_probe, .remove = ov2722_remove, }; module_i2c_driver(ov2722_driver); MODULE_AUTHOR("Wei Liu <[email protected]>"); MODULE_DESCRIPTION("A low-level driver for OmniVision 2722 sensors"); MODULE_LICENSE("GPL");
linux-master
drivers/staging/media/atomisp/i2c/atomisp-ov2722.c
// SPDX-License-Identifier: GPL-2.0 /* * Support for OmniVision OV5693 1080p HD camera sensor. * * Copyright (c) 2013 Intel Corporation. All Rights Reserved. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version * 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * */ #include <linux/module.h> #include <linux/types.h> #include <linux/kernel.h> #include <linux/mm.h> #include <linux/string.h> #include <linux/errno.h> #include <linux/init.h> #include <linux/kmod.h> #include <linux/device.h> #include <linux/delay.h> #include <linux/slab.h> #include <linux/i2c.h> #include <linux/moduleparam.h> #include <media/v4l2-device.h> #include <linux/io.h> #include <linux/acpi.h> #include "../../include/linux/atomisp_gmin_platform.h" #include "ov5693.h" #include "ad5823.h" #define __cci_delay(t) \ do { \ if ((t) < 10) { \ usleep_range((t) * 1000, ((t) + 1) * 1000); \ } else { \ msleep((t)); \ } \ } while (0) /* Value 30ms reached through experimentation on byt ecs. * The DS specifies a much lower value but when using a smaller value * the I2C bus sometimes locks up permanently when starting the camera. * This issue could not be reproduced on cht, so we can reduce the * delay value to a lower value when insmod. */ static uint up_delay = 30; module_param(up_delay, uint, 0644); MODULE_PARM_DESC(up_delay, "Delay prior to the first CCI transaction for ov5693"); static int vcm_ad_i2c_wr8(struct i2c_client *client, u8 reg, u8 val) { int err; struct i2c_msg msg; u8 buf[2]; buf[0] = reg; buf[1] = val; msg.addr = VCM_ADDR; msg.flags = 0; msg.len = 2; msg.buf = &buf[0]; err = i2c_transfer(client->adapter, &msg, 1); if (err != 1) { dev_err(&client->dev, "%s: vcm i2c fail, err code = %d\n", __func__, err); return -EIO; } return 0; } static int ad5823_i2c_write(struct i2c_client *client, u8 reg, u8 val) { struct i2c_msg msg; u8 buf[2]; buf[0] = reg; buf[1] = val; msg.addr = AD5823_VCM_ADDR; msg.flags = 0; msg.len = 0x02; msg.buf = &buf[0]; if (i2c_transfer(client->adapter, &msg, 1) != 1) return -EIO; return 0; } static int ad5823_i2c_read(struct i2c_client *client, u8 reg, u8 *val) { struct i2c_msg msg[2]; u8 buf[2]; buf[0] = reg; buf[1] = 0; msg[0].addr = AD5823_VCM_ADDR; msg[0].flags = 0; msg[0].len = 0x01; msg[0].buf = &buf[0]; msg[1].addr = 0x0c; msg[1].flags = I2C_M_RD; msg[1].len = 0x01; msg[1].buf = &buf[1]; *val = 0; if (i2c_transfer(client->adapter, msg, 2) != 2) return -EIO; *val = buf[1]; return 0; } static const u32 ov5693_embedded_effective_size = 28; /* i2c read/write stuff */ static int ov5693_read_reg(struct i2c_client *client, u16 data_length, u16 reg, u16 *val) { int err; struct i2c_msg msg[2]; unsigned char data[6]; if (!client->adapter) { dev_err(&client->dev, "%s error, no client->adapter\n", __func__); return -ENODEV; } if (data_length != OV5693_8BIT && data_length != OV5693_16BIT && data_length != OV5693_32BIT) { dev_err(&client->dev, "%s error, invalid data length\n", __func__); return -EINVAL; } memset(msg, 0, sizeof(msg)); msg[0].addr = client->addr; msg[0].flags = 0; msg[0].len = I2C_MSG_LENGTH; msg[0].buf = data; /* high byte goes out first */ data[0] = (u8)(reg >> 8); data[1] = (u8)(reg & 0xff); msg[1].addr = client->addr; msg[1].len = data_length; msg[1].flags = I2C_M_RD; msg[1].buf = data; err = i2c_transfer(client->adapter, msg, 2); if (err != 2) { if (err >= 0) err = -EIO; dev_err(&client->dev, "read from offset 0x%x error %d", reg, err); return err; } *val = 0; /* high byte comes first */ if (data_length == OV5693_8BIT) *val = (u8)data[0]; else if (data_length == OV5693_16BIT) *val = be16_to_cpu(*(__be16 *)&data[0]); else *val = be32_to_cpu(*(__be32 *)&data[0]); return 0; } static int ov5693_i2c_write(struct i2c_client *client, u16 len, u8 *data) { struct i2c_msg msg; const int num_msg = 1; int ret; msg.addr = client->addr; msg.flags = 0; msg.len = len; msg.buf = data; ret = i2c_transfer(client->adapter, &msg, 1); return ret == num_msg ? 0 : -EIO; } static int vcm_dw_i2c_write(struct i2c_client *client, u16 data) { struct i2c_msg msg; const int num_msg = 1; int ret; __be16 val; val = cpu_to_be16(data); msg.addr = VCM_ADDR; msg.flags = 0; msg.len = OV5693_16BIT; msg.buf = (void *)&val; ret = i2c_transfer(client->adapter, &msg, 1); return ret == num_msg ? 0 : -EIO; } /* * Theory: per datasheet, the two VCMs both allow for a 2-byte read. * The DW9714 doesn't actually specify what this does (it has a * two-byte write-only protocol, but specifies the read sequence as * legal), but it returns the same data (zeroes) always, after an * undocumented initial NAK. The AD5823 has a one-byte address * register to which all writes go, and subsequent reads will cycle * through the 8 bytes of registers. Notably, the default values (the * device is always power-cycled affirmatively, so we can rely on * these) in AD5823 are not pairwise repetitions of the same 16 bit * word. So all we have to do is sequentially read two bytes at a * time and see if we detect a difference in any of the first four * pairs. */ static int vcm_detect(struct i2c_client *client) { int i, ret; struct i2c_msg msg; u16 data0 = 0, data; for (i = 0; i < 4; i++) { msg.addr = VCM_ADDR; msg.flags = I2C_M_RD; msg.len = sizeof(data); msg.buf = (u8 *)&data; ret = i2c_transfer(client->adapter, &msg, 1); /* * DW9714 always fails the first read and returns * zeroes for subsequent ones */ if (i == 0 && ret == -EREMOTEIO) { data0 = 0; continue; } if (i == 0) data0 = data; if (data != data0) return VCM_AD5823; } return ret == 1 ? VCM_DW9714 : ret; } static int ov5693_write_reg(struct i2c_client *client, u16 data_length, u16 reg, u16 val) { int ret; unsigned char data[4] = {0}; __be16 *wreg = (void *)data; const u16 len = data_length + sizeof(u16); /* 16-bit address + data */ if (data_length != OV5693_8BIT && data_length != OV5693_16BIT) { dev_err(&client->dev, "%s error, invalid data_length\n", __func__); return -EINVAL; } /* high byte goes out first */ *wreg = cpu_to_be16(reg); if (data_length == OV5693_8BIT) { data[2] = (u8)(val); } else { /* OV5693_16BIT */ __be16 *wdata = (void *)&data[2]; *wdata = cpu_to_be16(val); } ret = ov5693_i2c_write(client, len, data); if (ret) dev_err(&client->dev, "write error: wrote 0x%x to offset 0x%x error %d", val, reg, ret); return ret; } /* * ov5693_write_reg_array - Initializes a list of OV5693 registers * @client: i2c driver client structure * @reglist: list of registers to be written * * This function initializes a list of registers. When consecutive addresses * are found in a row on the list, this function creates a buffer and sends * consecutive data in a single i2c_transfer(). * * __ov5693_flush_reg_array, __ov5693_buf_reg_array() and * __ov5693_write_reg_is_consecutive() are internal functions to * ov5693_write_reg_array_fast() and should be not used anywhere else. * */ static int __ov5693_flush_reg_array(struct i2c_client *client, struct ov5693_write_ctrl *ctrl) { u16 size; __be16 *reg = (void *)&ctrl->buffer.addr; if (ctrl->index == 0) return 0; size = sizeof(u16) + ctrl->index; /* 16-bit address + data */ *reg = cpu_to_be16(ctrl->buffer.addr); ctrl->index = 0; return ov5693_i2c_write(client, size, (u8 *)reg); } static int __ov5693_buf_reg_array(struct i2c_client *client, struct ov5693_write_ctrl *ctrl, const struct ov5693_reg *next) { int size; __be16 *data16; switch (next->type) { case OV5693_8BIT: size = 1; ctrl->buffer.data[ctrl->index] = (u8)next->val; break; case OV5693_16BIT: size = 2; data16 = (void *)&ctrl->buffer.data[ctrl->index]; *data16 = cpu_to_be16((u16)next->val); break; default: return -EINVAL; } /* When first item is added, we need to store its starting address */ if (ctrl->index == 0) ctrl->buffer.addr = next->reg; ctrl->index += size; /* * Buffer cannot guarantee free space for u32? Better flush it to avoid * possible lack of memory for next item. */ if (ctrl->index + sizeof(u16) >= OV5693_MAX_WRITE_BUF_SIZE) return __ov5693_flush_reg_array(client, ctrl); return 0; } static int __ov5693_write_reg_is_consecutive(struct i2c_client *client, struct ov5693_write_ctrl *ctrl, const struct ov5693_reg *next) { if (ctrl->index == 0) return 1; return ctrl->buffer.addr + ctrl->index == next->reg; } static int ov5693_write_reg_array(struct i2c_client *client, const struct ov5693_reg *reglist) { const struct ov5693_reg *next = reglist; struct ov5693_write_ctrl ctrl; int err; ctrl.index = 0; for (; next->type != OV5693_TOK_TERM; next++) { switch (next->type & OV5693_TOK_MASK) { case OV5693_TOK_DELAY: err = __ov5693_flush_reg_array(client, &ctrl); if (err) return err; msleep(next->val); break; default: /* * If next address is not consecutive, data needs to be * flushed before proceed. */ if (!__ov5693_write_reg_is_consecutive(client, &ctrl, next)) { err = __ov5693_flush_reg_array(client, &ctrl); if (err) return err; } err = __ov5693_buf_reg_array(client, &ctrl, next); if (err) { dev_err(&client->dev, "%s: write error, aborted\n", __func__); return err; } break; } } return __ov5693_flush_reg_array(client, &ctrl); } static long __ov5693_set_exposure(struct v4l2_subdev *sd, int coarse_itg, int gain, int digitgain) { struct i2c_client *client = v4l2_get_subdevdata(sd); struct ov5693_device *dev = to_ov5693_sensor(sd); u16 vts, hts; int ret, exp_val; hts = ov5693_res[dev->fmt_idx].pixels_per_line; vts = ov5693_res[dev->fmt_idx].lines_per_frame; /* * If coarse_itg is larger than 1<<15, can not write to reg directly. * The way is to write coarse_itg/2 to the reg, meanwhile write 2*hts * to the reg. */ if (coarse_itg > (1 << 15)) { hts = hts * 2; coarse_itg = (int)coarse_itg / 2; } /* group hold */ ret = ov5693_write_reg(client, OV5693_8BIT, OV5693_GROUP_ACCESS, 0x00); if (ret) { dev_err(&client->dev, "%s: write %x error, aborted\n", __func__, OV5693_GROUP_ACCESS); return ret; } ret = ov5693_write_reg(client, OV5693_8BIT, OV5693_TIMING_HTS_H, (hts >> 8) & 0xFF); if (ret) { dev_err(&client->dev, "%s: write %x error, aborted\n", __func__, OV5693_TIMING_HTS_H); return ret; } ret = ov5693_write_reg(client, OV5693_8BIT, OV5693_TIMING_HTS_L, hts & 0xFF); if (ret) { dev_err(&client->dev, "%s: write %x error, aborted\n", __func__, OV5693_TIMING_HTS_L); return ret; } /* Increase the VTS to match exposure + MARGIN */ if (coarse_itg > vts - OV5693_INTEGRATION_TIME_MARGIN) vts = (u16)coarse_itg + OV5693_INTEGRATION_TIME_MARGIN; ret = ov5693_write_reg(client, OV5693_8BIT, OV5693_TIMING_VTS_H, (vts >> 8) & 0xFF); if (ret) { dev_err(&client->dev, "%s: write %x error, aborted\n", __func__, OV5693_TIMING_VTS_H); return ret; } ret = ov5693_write_reg(client, OV5693_8BIT, OV5693_TIMING_VTS_L, vts & 0xFF); if (ret) { dev_err(&client->dev, "%s: write %x error, aborted\n", __func__, OV5693_TIMING_VTS_L); return ret; } /* set exposure */ /* Lower four bit should be 0*/ exp_val = coarse_itg << 4; ret = ov5693_write_reg(client, OV5693_8BIT, OV5693_EXPOSURE_L, exp_val & 0xFF); if (ret) { dev_err(&client->dev, "%s: write %x error, aborted\n", __func__, OV5693_EXPOSURE_L); return ret; } ret = ov5693_write_reg(client, OV5693_8BIT, OV5693_EXPOSURE_M, (exp_val >> 8) & 0xFF); if (ret) { dev_err(&client->dev, "%s: write %x error, aborted\n", __func__, OV5693_EXPOSURE_M); return ret; } ret = ov5693_write_reg(client, OV5693_8BIT, OV5693_EXPOSURE_H, (exp_val >> 16) & 0x0F); if (ret) { dev_err(&client->dev, "%s: write %x error, aborted\n", __func__, OV5693_EXPOSURE_H); return ret; } /* Analog gain */ ret = ov5693_write_reg(client, OV5693_8BIT, OV5693_AGC_L, gain & 0xff); if (ret) { dev_err(&client->dev, "%s: write %x error, aborted\n", __func__, OV5693_AGC_L); return ret; } ret = ov5693_write_reg(client, OV5693_8BIT, OV5693_AGC_H, (gain >> 8) & 0xff); if (ret) { dev_err(&client->dev, "%s: write %x error, aborted\n", __func__, OV5693_AGC_H); return ret; } /* Digital gain */ if (digitgain) { ret = ov5693_write_reg(client, OV5693_16BIT, OV5693_MWB_RED_GAIN_H, digitgain); if (ret) { dev_err(&client->dev, "%s: write %x error, aborted\n", __func__, OV5693_MWB_RED_GAIN_H); return ret; } ret = ov5693_write_reg(client, OV5693_16BIT, OV5693_MWB_GREEN_GAIN_H, digitgain); if (ret) { dev_err(&client->dev, "%s: write %x error, aborted\n", __func__, OV5693_MWB_RED_GAIN_H); return ret; } ret = ov5693_write_reg(client, OV5693_16BIT, OV5693_MWB_BLUE_GAIN_H, digitgain); if (ret) { dev_err(&client->dev, "%s: write %x error, aborted\n", __func__, OV5693_MWB_RED_GAIN_H); return ret; } } /* End group */ ret = ov5693_write_reg(client, OV5693_8BIT, OV5693_GROUP_ACCESS, 0x10); if (ret) return ret; /* Delay launch group */ ret = ov5693_write_reg(client, OV5693_8BIT, OV5693_GROUP_ACCESS, 0xa0); if (ret) return ret; return ret; } static int ov5693_set_exposure(struct v4l2_subdev *sd, int exposure, int gain, int digitgain) { struct ov5693_device *dev = to_ov5693_sensor(sd); int ret; mutex_lock(&dev->input_lock); ret = __ov5693_set_exposure(sd, exposure, gain, digitgain); mutex_unlock(&dev->input_lock); return ret; } static long ov5693_s_exposure(struct v4l2_subdev *sd, struct atomisp_exposure *exposure) { u16 coarse_itg = exposure->integration_time[0]; u16 analog_gain = exposure->gain[0]; u16 digital_gain = exposure->gain[1]; /* we should not accept the invalid value below */ if (analog_gain == 0) { struct i2c_client *client = v4l2_get_subdevdata(sd); v4l2_err(client, "%s: invalid value\n", __func__); return -EINVAL; } return ov5693_set_exposure(sd, coarse_itg, analog_gain, digital_gain); } static int ov5693_read_otp_reg_array(struct i2c_client *client, u16 size, u16 addr, u8 *buf) { u16 index; int ret; u16 *pVal = NULL; for (index = 0; index <= size; index++) { pVal = (u16 *)(buf + index); ret = ov5693_read_reg(client, OV5693_8BIT, addr + index, pVal); if (ret) return ret; } return 0; } static int __ov5693_otp_read(struct v4l2_subdev *sd, u8 *buf) { struct i2c_client *client = v4l2_get_subdevdata(sd); struct ov5693_device *dev = to_ov5693_sensor(sd); int ret; int i; u8 *b = buf; dev->otp_size = 0; for (i = 1; i < OV5693_OTP_BANK_MAX; i++) { /*set bank NO and OTP read mode. */ ret = ov5693_write_reg(client, OV5693_8BIT, OV5693_OTP_BANK_REG, (i | 0xc0)); //[7:6] 2'b11 [5:0] bank no if (ret) { dev_err(&client->dev, "failed to prepare OTP page\n"); return ret; } //pr_debug("write 0x%x->0x%x\n",OV5693_OTP_BANK_REG,(i|0xc0)); /*enable read */ ret = ov5693_write_reg(client, OV5693_8BIT, OV5693_OTP_READ_REG, OV5693_OTP_MODE_READ); // enable :1 if (ret) { dev_err(&client->dev, "failed to set OTP reading mode page"); return ret; } //pr_debug("write 0x%x->0x%x\n",OV5693_OTP_READ_REG,OV5693_OTP_MODE_READ); /* Reading the OTP data array */ ret = ov5693_read_otp_reg_array(client, OV5693_OTP_BANK_SIZE, OV5693_OTP_START_ADDR, b); if (ret) { dev_err(&client->dev, "failed to read OTP data\n"); return ret; } //pr_debug("BANK[%2d] %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x\n", i, *b, *(b+1), *(b+2), *(b+3), *(b+4), *(b+5), *(b+6), *(b+7), *(b+8), *(b+9), *(b+10), *(b+11), *(b+12), *(b+13), *(b+14), *(b+15)); //Intel OTP map, try to read 320byts first. if (i == 21) { if ((*b) == 0) { dev->otp_size = 320; break; } else { b = buf; continue; } } else if (i == 24) { //if the first 320bytes data doesn't not exist, try to read the next 32bytes data. if ((*b) == 0) { dev->otp_size = 32; break; } else { b = buf; continue; } } else if (i == 27) { //if the prvious 32bytes data doesn't exist, try to read the next 32bytes data again. if ((*b) == 0) { dev->otp_size = 32; break; } else { dev->otp_size = 0; // no OTP data. break; } } b = b + OV5693_OTP_BANK_SIZE; } return 0; } /* * Read otp data and store it into a kmalloced buffer. * The caller must kfree the buffer when no more needed. * @size: set to the size of the returned otp data. */ static void *ov5693_otp_read(struct v4l2_subdev *sd) { struct i2c_client *client = v4l2_get_subdevdata(sd); u8 *buf; int ret; buf = devm_kzalloc(&client->dev, (OV5693_OTP_DATA_SIZE + 16), GFP_KERNEL); if (!buf) return ERR_PTR(-ENOMEM); //otp valid after mipi on and sw stream on ret = ov5693_write_reg(client, OV5693_8BIT, OV5693_FRAME_OFF_NUM, 0x00); ret = ov5693_write_reg(client, OV5693_8BIT, OV5693_SW_STREAM, OV5693_START_STREAMING); ret = __ov5693_otp_read(sd, buf); //mipi off and sw stream off after otp read ret = ov5693_write_reg(client, OV5693_8BIT, OV5693_FRAME_OFF_NUM, 0x0f); ret = ov5693_write_reg(client, OV5693_8BIT, OV5693_SW_STREAM, OV5693_STOP_STREAMING); /* Driver has failed to find valid data */ if (ret) { dev_err(&client->dev, "sensor found no valid OTP data\n"); return ERR_PTR(ret); } return buf; } static long ov5693_ioctl(struct v4l2_subdev *sd, unsigned int cmd, void *arg) { switch (cmd) { case ATOMISP_IOC_S_EXPOSURE: return ov5693_s_exposure(sd, arg); default: return -EINVAL; } return 0; } /* * This returns the exposure time being used. This should only be used * for filling in EXIF data, not for actual image processing. */ static int ov5693_q_exposure(struct v4l2_subdev *sd, s32 *value) { struct i2c_client *client = v4l2_get_subdevdata(sd); u16 reg_v, reg_v2; int ret; /* get exposure */ ret = ov5693_read_reg(client, OV5693_8BIT, OV5693_EXPOSURE_L, &reg_v); if (ret) goto err; ret = ov5693_read_reg(client, OV5693_8BIT, OV5693_EXPOSURE_M, &reg_v2); if (ret) goto err; reg_v += reg_v2 << 8; ret = ov5693_read_reg(client, OV5693_8BIT, OV5693_EXPOSURE_H, &reg_v2); if (ret) goto err; *value = reg_v + (((u32)reg_v2 << 16)); err: return ret; } static int ad5823_t_focus_vcm(struct v4l2_subdev *sd, u16 val) { struct i2c_client *client = v4l2_get_subdevdata(sd); int ret; u8 vcm_code; ret = ad5823_i2c_read(client, AD5823_REG_VCM_CODE_MSB, &vcm_code); if (ret) return ret; /* set reg VCM_CODE_MSB Bit[1:0] */ vcm_code = (vcm_code & VCM_CODE_MSB_MASK) | ((val >> 8) & ~VCM_CODE_MSB_MASK); ret = ad5823_i2c_write(client, AD5823_REG_VCM_CODE_MSB, vcm_code); if (ret) return ret; /* set reg VCM_CODE_LSB Bit[7:0] */ ret = ad5823_i2c_write(client, AD5823_REG_VCM_CODE_LSB, (val & 0xff)); if (ret) return ret; /* set required vcm move time */ vcm_code = AD5823_RESONANCE_PERIOD / AD5823_RESONANCE_COEF - AD5823_HIGH_FREQ_RANGE; ret = ad5823_i2c_write(client, AD5823_REG_VCM_MOVE_TIME, vcm_code); return ret; } static int ad5823_t_focus_abs(struct v4l2_subdev *sd, s32 value) { value = min(value, AD5823_MAX_FOCUS_POS); return ad5823_t_focus_vcm(sd, value); } static int ov5693_t_focus_abs(struct v4l2_subdev *sd, s32 value) { struct ov5693_device *dev = to_ov5693_sensor(sd); struct i2c_client *client = v4l2_get_subdevdata(sd); int ret = 0; dev_dbg(&client->dev, "%s: FOCUS_POS: 0x%x\n", __func__, value); value = clamp(value, 0, OV5693_VCM_MAX_FOCUS_POS); if (dev->vcm == VCM_DW9714) { if (dev->vcm_update) { ret = vcm_dw_i2c_write(client, VCM_PROTECTION_OFF); if (ret) return ret; ret = vcm_dw_i2c_write(client, DIRECT_VCM); if (ret) return ret; ret = vcm_dw_i2c_write(client, VCM_PROTECTION_ON); if (ret) return ret; dev->vcm_update = false; } ret = vcm_dw_i2c_write(client, vcm_val(value, VCM_DEFAULT_S)); } else if (dev->vcm == VCM_AD5823) { ad5823_t_focus_abs(sd, value); } if (ret == 0) { dev->number_of_steps = value - dev->focus; dev->focus = value; dev->timestamp_t_focus_abs = ktime_get(); } else dev_err(&client->dev, "%s: i2c failed. ret %d\n", __func__, ret); return ret; } static int ov5693_t_focus_rel(struct v4l2_subdev *sd, s32 value) { struct ov5693_device *dev = to_ov5693_sensor(sd); return ov5693_t_focus_abs(sd, dev->focus + value); } #define DELAY_PER_STEP_NS 1000000 #define DELAY_MAX_PER_STEP_NS (1000000 * 1023) static int ov5693_q_focus_status(struct v4l2_subdev *sd, s32 *value) { u32 status = 0; struct ov5693_device *dev = to_ov5693_sensor(sd); ktime_t temptime; ktime_t timedelay = ns_to_ktime(min_t(u32, abs(dev->number_of_steps) * DELAY_PER_STEP_NS, DELAY_MAX_PER_STEP_NS)); temptime = ktime_sub(ktime_get(), (dev->timestamp_t_focus_abs)); if (ktime_compare(temptime, timedelay) <= 0) { status |= ATOMISP_FOCUS_STATUS_MOVING; status |= ATOMISP_FOCUS_HP_IN_PROGRESS; } else { status |= ATOMISP_FOCUS_STATUS_ACCEPTS_NEW_MOVE; status |= ATOMISP_FOCUS_HP_COMPLETE; } *value = status; return 0; } static int ov5693_q_focus_abs(struct v4l2_subdev *sd, s32 *value) { struct ov5693_device *dev = to_ov5693_sensor(sd); s32 val; ov5693_q_focus_status(sd, &val); if (val & ATOMISP_FOCUS_STATUS_MOVING) *value = dev->focus - dev->number_of_steps; else *value = dev->focus; return 0; } static int ov5693_t_vcm_slew(struct v4l2_subdev *sd, s32 value) { struct ov5693_device *dev = to_ov5693_sensor(sd); dev->number_of_steps = value; dev->vcm_update = true; return 0; } static int ov5693_t_vcm_timing(struct v4l2_subdev *sd, s32 value) { struct ov5693_device *dev = to_ov5693_sensor(sd); dev->number_of_steps = value; dev->vcm_update = true; return 0; } static int ov5693_s_ctrl(struct v4l2_ctrl *ctrl) { struct ov5693_device *dev = container_of(ctrl->handler, struct ov5693_device, ctrl_handler); struct i2c_client *client = v4l2_get_subdevdata(&dev->sd); int ret = 0; switch (ctrl->id) { case V4L2_CID_FOCUS_ABSOLUTE: dev_dbg(&client->dev, "%s: CID_FOCUS_ABSOLUTE:%d.\n", __func__, ctrl->val); ret = ov5693_t_focus_abs(&dev->sd, ctrl->val); break; case V4L2_CID_FOCUS_RELATIVE: dev_dbg(&client->dev, "%s: CID_FOCUS_RELATIVE:%d.\n", __func__, ctrl->val); ret = ov5693_t_focus_rel(&dev->sd, ctrl->val); break; case V4L2_CID_VCM_SLEW: ret = ov5693_t_vcm_slew(&dev->sd, ctrl->val); break; case V4L2_CID_VCM_TIMING: ret = ov5693_t_vcm_timing(&dev->sd, ctrl->val); break; default: ret = -EINVAL; } return ret; } static int ov5693_g_volatile_ctrl(struct v4l2_ctrl *ctrl) { struct ov5693_device *dev = container_of(ctrl->handler, struct ov5693_device, ctrl_handler); int ret = 0; switch (ctrl->id) { case V4L2_CID_EXPOSURE_ABSOLUTE: ret = ov5693_q_exposure(&dev->sd, &ctrl->val); break; case V4L2_CID_FOCUS_ABSOLUTE: ret = ov5693_q_focus_abs(&dev->sd, &ctrl->val); break; case V4L2_CID_FOCUS_STATUS: ret = ov5693_q_focus_status(&dev->sd, &ctrl->val); break; default: ret = -EINVAL; } return ret; } static const struct v4l2_ctrl_ops ctrl_ops = { .s_ctrl = ov5693_s_ctrl, .g_volatile_ctrl = ov5693_g_volatile_ctrl }; static const struct v4l2_ctrl_config ov5693_controls[] = { { .ops = &ctrl_ops, .id = V4L2_CID_EXPOSURE_ABSOLUTE, .type = V4L2_CTRL_TYPE_INTEGER, .name = "exposure", .min = 0x0, .max = 0xffff, .step = 0x01, .def = 0x00, .flags = 0, }, { .ops = &ctrl_ops, .id = V4L2_CID_FOCUS_ABSOLUTE, .type = V4L2_CTRL_TYPE_INTEGER, .name = "focus move absolute", .min = 0, .max = OV5693_VCM_MAX_FOCUS_POS, .step = 1, .def = 0, .flags = 0, }, { .ops = &ctrl_ops, .id = V4L2_CID_FOCUS_RELATIVE, .type = V4L2_CTRL_TYPE_INTEGER, .name = "focus move relative", .min = OV5693_VCM_MAX_FOCUS_NEG, .max = OV5693_VCM_MAX_FOCUS_POS, .step = 1, .def = 0, .flags = 0, }, { .ops = &ctrl_ops, .id = V4L2_CID_FOCUS_STATUS, .type = V4L2_CTRL_TYPE_INTEGER, .name = "focus status", .min = 0, .max = 100, /* allow enum to grow in the future */ .step = 1, .def = 0, .flags = 0, }, { .ops = &ctrl_ops, .id = V4L2_CID_VCM_SLEW, .type = V4L2_CTRL_TYPE_INTEGER, .name = "vcm slew", .min = 0, .max = OV5693_VCM_SLEW_STEP_MAX, .step = 1, .def = 0, .flags = 0, }, { .ops = &ctrl_ops, .id = V4L2_CID_VCM_TIMING, .type = V4L2_CTRL_TYPE_INTEGER, .name = "vcm step time", .min = 0, .max = OV5693_VCM_SLEW_TIME_MAX, .step = 1, .def = 0, .flags = 0, }, }; static int ov5693_init(struct v4l2_subdev *sd) { struct ov5693_device *dev = to_ov5693_sensor(sd); struct i2c_client *client = v4l2_get_subdevdata(sd); int ret; pr_info("%s\n", __func__); mutex_lock(&dev->input_lock); dev->vcm_update = false; if (dev->vcm == VCM_AD5823) { ret = vcm_ad_i2c_wr8(client, 0x01, 0x01); /* vcm init test */ if (ret) dev_err(&client->dev, "vcm reset failed\n"); /*change the mode*/ ret = ad5823_i2c_write(client, AD5823_REG_VCM_CODE_MSB, AD5823_RING_CTRL_ENABLE); if (ret) dev_err(&client->dev, "vcm enable ringing failed\n"); ret = ad5823_i2c_write(client, AD5823_REG_MODE, AD5823_ARC_RES1); if (ret) dev_err(&client->dev, "vcm change mode failed\n"); } /*change initial focus value for ad5823*/ if (dev->vcm == VCM_AD5823) { dev->focus = AD5823_INIT_FOCUS_POS; ov5693_t_focus_abs(sd, AD5823_INIT_FOCUS_POS); } else { dev->focus = 0; ov5693_t_focus_abs(sd, 0); } mutex_unlock(&dev->input_lock); return 0; } static int power_ctrl(struct v4l2_subdev *sd, bool flag) { int ret; struct ov5693_device *dev = to_ov5693_sensor(sd); if (!dev || !dev->platform_data) return -ENODEV; /* * This driver assumes "internal DVDD, PWDNB tied to DOVDD". * In this set up only gpio0 (XSHUTDN) should be available * but in some products (for example ECS) gpio1 (PWDNB) is * also available. If gpio1 is available we emulate it being * tied to DOVDD here. */ if (flag) { ret = dev->platform_data->v2p8_ctrl(sd, 1); dev->platform_data->gpio1_ctrl(sd, 1); if (ret == 0) { ret = dev->platform_data->v1p8_ctrl(sd, 1); if (ret) { dev->platform_data->gpio1_ctrl(sd, 0); ret = dev->platform_data->v2p8_ctrl(sd, 0); } } } else { dev->platform_data->gpio1_ctrl(sd, 0); ret = dev->platform_data->v1p8_ctrl(sd, 0); ret |= dev->platform_data->v2p8_ctrl(sd, 0); } return ret; } static int gpio_ctrl(struct v4l2_subdev *sd, bool flag) { struct ov5693_device *dev = to_ov5693_sensor(sd); if (!dev || !dev->platform_data) return -ENODEV; return dev->platform_data->gpio0_ctrl(sd, flag); } static int __power_up(struct v4l2_subdev *sd) { struct ov5693_device *dev = to_ov5693_sensor(sd); struct i2c_client *client = v4l2_get_subdevdata(sd); int ret; if (!dev->platform_data) { dev_err(&client->dev, "no camera_sensor_platform_data"); return -ENODEV; } /* power control */ ret = power_ctrl(sd, 1); if (ret) goto fail_power; /* according to DS, at least 5ms is needed between DOVDD and PWDN */ /* add this delay time to 10~11ms*/ usleep_range(10000, 11000); /* gpio ctrl */ ret = gpio_ctrl(sd, 1); if (ret) { ret = gpio_ctrl(sd, 1); if (ret) goto fail_power; } /* flis clock control */ ret = dev->platform_data->flisclk_ctrl(sd, 1); if (ret) goto fail_clk; __cci_delay(up_delay); return 0; fail_clk: gpio_ctrl(sd, 0); fail_power: power_ctrl(sd, 0); dev_err(&client->dev, "sensor power-up failed\n"); return ret; } static int power_down(struct v4l2_subdev *sd) { struct ov5693_device *dev = to_ov5693_sensor(sd); struct i2c_client *client = v4l2_get_subdevdata(sd); int ret = 0; dev->focus = OV5693_INVALID_CONFIG; if (!dev->platform_data) { dev_err(&client->dev, "no camera_sensor_platform_data"); return -ENODEV; } ret = dev->platform_data->flisclk_ctrl(sd, 0); if (ret) dev_err(&client->dev, "flisclk failed\n"); /* gpio ctrl */ ret = gpio_ctrl(sd, 0); if (ret) { ret = gpio_ctrl(sd, 0); if (ret) dev_err(&client->dev, "gpio failed 2\n"); } /* power control */ ret = power_ctrl(sd, 0); if (ret) dev_err(&client->dev, "vprog failed.\n"); return ret; } static int power_up(struct v4l2_subdev *sd) { static const int retry_count = 4; int i, ret; for (i = 0; i < retry_count; i++) { ret = __power_up(sd); if (!ret) return 0; power_down(sd); } return ret; } static int ov5693_s_power(struct v4l2_subdev *sd, int on) { int ret; pr_info("%s: on %d\n", __func__, on); if (on == 0) return power_down(sd); else { ret = power_up(sd); if (!ret) { ret = ov5693_init(sd); /* restore settings */ ov5693_res = ov5693_res_preview; N_RES = N_RES_PREVIEW; } } return ret; } /* * distance - calculate the distance * @res: resolution * @w: width * @h: height * * Get the gap between res_w/res_h and w/h. * distance = (res_w/res_h - w/h) / (w/h) * 8192 * res->width/height smaller than w/h wouldn't be considered. * The gap of ratio larger than 1/8 wouldn't be considered. * Returns the value of gap or -1 if fail. */ #define LARGEST_ALLOWED_RATIO_MISMATCH 1024 static int distance(struct ov5693_resolution *res, u32 w, u32 h) { int ratio; int distance; if (w == 0 || h == 0 || res->width < w || res->height < h) return -1; ratio = res->width << 13; ratio /= w; ratio *= h; ratio /= res->height; distance = abs(ratio - 8192); if (distance > LARGEST_ALLOWED_RATIO_MISMATCH) return -1; return distance; } /* Return the nearest higher resolution index * Firstly try to find the approximate aspect ratio resolution * If we find multiple same AR resolutions, choose the * minimal size. */ static int nearest_resolution_index(int w, int h) { int i; int idx = -1; int dist; int min_dist = INT_MAX; int min_res_w = INT_MAX; struct ov5693_resolution *tmp_res = NULL; for (i = 0; i < N_RES; i++) { tmp_res = &ov5693_res[i]; dist = distance(tmp_res, w, h); if (dist == -1) continue; if (dist < min_dist) { min_dist = dist; idx = i; min_res_w = ov5693_res[i].width; continue; } if (dist == min_dist && ov5693_res[i].width < min_res_w) idx = i; } return idx; } static int get_resolution_index(int w, int h) { int i; for (i = 0; i < N_RES; i++) { if (w != ov5693_res[i].width) continue; if (h != ov5693_res[i].height) continue; return i; } return -1; } /* TODO: remove it. */ static int startup(struct v4l2_subdev *sd) { struct ov5693_device *dev = to_ov5693_sensor(sd); struct i2c_client *client = v4l2_get_subdevdata(sd); int ret = 0; ret = ov5693_write_reg(client, OV5693_8BIT, OV5693_SW_RESET, 0x01); if (ret) { dev_err(&client->dev, "ov5693 reset err.\n"); return ret; } ret = ov5693_write_reg_array(client, ov5693_global_setting); if (ret) { dev_err(&client->dev, "ov5693 write register err.\n"); return ret; } ret = ov5693_write_reg_array(client, ov5693_res[dev->fmt_idx].regs); if (ret) { dev_err(&client->dev, "ov5693 write register err.\n"); return ret; } return ret; } static int ov5693_set_fmt(struct v4l2_subdev *sd, struct v4l2_subdev_state *sd_state, struct v4l2_subdev_format *format) { struct v4l2_mbus_framefmt *fmt = &format->format; struct ov5693_device *dev = to_ov5693_sensor(sd); struct i2c_client *client = v4l2_get_subdevdata(sd); struct camera_mipi_info *ov5693_info = NULL; int ret = 0; int idx; if (format->pad) return -EINVAL; if (!fmt) return -EINVAL; ov5693_info = v4l2_get_subdev_hostdata(sd); if (!ov5693_info) return -EINVAL; mutex_lock(&dev->input_lock); idx = nearest_resolution_index(fmt->width, fmt->height); if (idx == -1) { /* return the largest resolution */ fmt->width = ov5693_res[N_RES - 1].width; fmt->height = ov5693_res[N_RES - 1].height; } else { fmt->width = ov5693_res[idx].width; fmt->height = ov5693_res[idx].height; } fmt->code = MEDIA_BUS_FMT_SBGGR10_1X10; if (format->which == V4L2_SUBDEV_FORMAT_TRY) { sd_state->pads->try_fmt = *fmt; mutex_unlock(&dev->input_lock); return 0; } dev->fmt_idx = get_resolution_index(fmt->width, fmt->height); if (dev->fmt_idx == -1) { dev_err(&client->dev, "get resolution fail\n"); mutex_unlock(&dev->input_lock); return -EINVAL; } ret = startup(sd); if (ret) { int i = 0; dev_err(&client->dev, "ov5693 startup err, retry to power up\n"); for (i = 0; i < OV5693_POWER_UP_RETRY_NUM; i++) { dev_err(&client->dev, "ov5693 retry to power up %d/%d times, result: ", i + 1, OV5693_POWER_UP_RETRY_NUM); power_down(sd); ret = power_up(sd); if (!ret) { mutex_unlock(&dev->input_lock); ov5693_init(sd); mutex_lock(&dev->input_lock); } else { dev_err(&client->dev, "power up failed, continue\n"); continue; } ret = startup(sd); if (ret) { dev_err(&client->dev, " startup FAILED!\n"); } else { dev_err(&client->dev, " startup SUCCESS!\n"); break; } } } /* * After sensor settings are set to HW, sometimes stream is started. * This would cause ISP timeout because ISP is not ready to receive * data yet. So add stop streaming here. */ ret = ov5693_write_reg(client, OV5693_8BIT, OV5693_SW_STREAM, OV5693_STOP_STREAMING); if (ret) dev_warn(&client->dev, "ov5693 stream off err\n"); ov5693_info->metadata_width = fmt->width * 10 / 8; ov5693_info->metadata_height = 1; ov5693_info->metadata_effective_width = &ov5693_embedded_effective_size; mutex_unlock(&dev->input_lock); return ret; } static int ov5693_get_fmt(struct v4l2_subdev *sd, struct v4l2_subdev_state *sd_state, struct v4l2_subdev_format *format) { struct v4l2_mbus_framefmt *fmt = &format->format; struct ov5693_device *dev = to_ov5693_sensor(sd); if (format->pad) return -EINVAL; if (!fmt) return -EINVAL; fmt->width = ov5693_res[dev->fmt_idx].width; fmt->height = ov5693_res[dev->fmt_idx].height; fmt->code = MEDIA_BUS_FMT_SBGGR10_1X10; return 0; } static int ov5693_detect(struct i2c_client *client) { struct i2c_adapter *adapter = client->adapter; u16 high, low; int ret; u16 id; u8 revision; if (!i2c_check_functionality(adapter, I2C_FUNC_I2C)) return -ENODEV; ret = ov5693_read_reg(client, OV5693_8BIT, OV5693_SC_CMMN_CHIP_ID_H, &high); if (ret) { dev_err(&client->dev, "sensor_id_high = 0x%x\n", high); return -ENODEV; } ret = ov5693_read_reg(client, OV5693_8BIT, OV5693_SC_CMMN_CHIP_ID_L, &low); if (ret) return ret; id = ((((u16)high) << 8) | (u16)low); if (id != OV5693_ID) { dev_err(&client->dev, "sensor ID error 0x%x\n", id); return -ENODEV; } ret = ov5693_read_reg(client, OV5693_8BIT, OV5693_SC_CMMN_SUB_ID, &high); revision = (u8)high & 0x0f; dev_dbg(&client->dev, "sensor_revision = 0x%x\n", revision); dev_dbg(&client->dev, "detect ov5693 success\n"); return 0; } static int ov5693_s_stream(struct v4l2_subdev *sd, int enable) { struct ov5693_device *dev = to_ov5693_sensor(sd); struct i2c_client *client = v4l2_get_subdevdata(sd); int ret; mutex_lock(&dev->input_lock); ret = ov5693_write_reg(client, OV5693_8BIT, OV5693_SW_STREAM, enable ? OV5693_START_STREAMING : OV5693_STOP_STREAMING); mutex_unlock(&dev->input_lock); return ret; } static int ov5693_s_config(struct v4l2_subdev *sd, int irq, void *platform_data) { struct ov5693_device *dev = to_ov5693_sensor(sd); struct i2c_client *client = v4l2_get_subdevdata(sd); int ret = 0; if (!platform_data) return -ENODEV; dev->platform_data = (struct camera_sensor_platform_data *)platform_data; mutex_lock(&dev->input_lock); /* power off the module, then power on it in future * as first power on by board may not fulfill the * power on sequqence needed by the module */ ret = power_down(sd); if (ret) { dev_err(&client->dev, "ov5693 power-off err.\n"); goto fail_power_off; } ret = power_up(sd); if (ret) { dev_err(&client->dev, "ov5693 power-up err.\n"); goto fail_power_on; } if (!dev->vcm) dev->vcm = vcm_detect(client); ret = dev->platform_data->csi_cfg(sd, 1); if (ret) goto fail_csi_cfg; /* config & detect sensor */ ret = ov5693_detect(client); if (ret) { dev_err(&client->dev, "ov5693_detect err s_config.\n"); goto fail_csi_cfg; } dev->otp_data = ov5693_otp_read(sd); /* turn off sensor, after probed */ ret = power_down(sd); if (ret) { dev_err(&client->dev, "ov5693 power-off err.\n"); goto fail_csi_cfg; } mutex_unlock(&dev->input_lock); return ret; fail_csi_cfg: dev->platform_data->csi_cfg(sd, 0); fail_power_on: power_down(sd); dev_err(&client->dev, "sensor power-gating failed\n"); fail_power_off: mutex_unlock(&dev->input_lock); return ret; } static int ov5693_g_frame_interval(struct v4l2_subdev *sd, struct v4l2_subdev_frame_interval *interval) { struct ov5693_device *dev = to_ov5693_sensor(sd); interval->interval.numerator = 1; interval->interval.denominator = ov5693_res[dev->fmt_idx].fps; return 0; } static int ov5693_enum_mbus_code(struct v4l2_subdev *sd, struct v4l2_subdev_state *sd_state, struct v4l2_subdev_mbus_code_enum *code) { if (code->index >= MAX_FMTS) return -EINVAL; code->code = MEDIA_BUS_FMT_SBGGR10_1X10; return 0; } static int ov5693_enum_frame_size(struct v4l2_subdev *sd, struct v4l2_subdev_state *sd_state, struct v4l2_subdev_frame_size_enum *fse) { int index = fse->index; if (index >= N_RES) return -EINVAL; fse->min_width = ov5693_res[index].width; fse->min_height = ov5693_res[index].height; fse->max_width = ov5693_res[index].width; fse->max_height = ov5693_res[index].height; return 0; } static const struct v4l2_subdev_video_ops ov5693_video_ops = { .s_stream = ov5693_s_stream, .g_frame_interval = ov5693_g_frame_interval, }; static const struct v4l2_subdev_core_ops ov5693_core_ops = { .s_power = ov5693_s_power, .ioctl = ov5693_ioctl, }; static const struct v4l2_subdev_pad_ops ov5693_pad_ops = { .enum_mbus_code = ov5693_enum_mbus_code, .enum_frame_size = ov5693_enum_frame_size, .get_fmt = ov5693_get_fmt, .set_fmt = ov5693_set_fmt, }; static const struct v4l2_subdev_ops ov5693_ops = { .core = &ov5693_core_ops, .video = &ov5693_video_ops, .pad = &ov5693_pad_ops, }; static void ov5693_remove(struct i2c_client *client) { struct v4l2_subdev *sd = i2c_get_clientdata(client); struct ov5693_device *dev = to_ov5693_sensor(sd); dev_dbg(&client->dev, "ov5693_remove...\n"); dev->platform_data->csi_cfg(sd, 0); v4l2_device_unregister_subdev(sd); atomisp_gmin_remove_subdev(sd); media_entity_cleanup(&dev->sd.entity); v4l2_ctrl_handler_free(&dev->ctrl_handler); kfree(dev); } static int ov5693_probe(struct i2c_client *client) { struct ov5693_device *dev; int i2c; int ret; void *pdata; unsigned int i; /* * Firmware workaround: Some modules use a "secondary default" * address of 0x10 which doesn't appear on schematics, and * some BIOS versions haven't gotten the memo. Work around * via config. */ i2c = gmin_get_var_int(&client->dev, false, "I2CAddr", -1); if (i2c != -1) { dev_info(&client->dev, "Overriding firmware-provided I2C address (0x%x) with 0x%x\n", client->addr, i2c); client->addr = i2c; } dev = kzalloc(sizeof(*dev), GFP_KERNEL); if (!dev) return -ENOMEM; mutex_init(&dev->input_lock); dev->fmt_idx = 0; v4l2_i2c_subdev_init(&dev->sd, client, &ov5693_ops); pdata = gmin_camera_platform_data(&dev->sd, ATOMISP_INPUT_FORMAT_RAW_10, atomisp_bayer_order_bggr); if (!pdata) { ret = -EINVAL; goto out_free; } ret = ov5693_s_config(&dev->sd, client->irq, pdata); if (ret) goto out_free; ret = atomisp_register_i2c_module(&dev->sd, pdata, RAW_CAMERA); if (ret) goto out_free; dev->sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE; dev->pad.flags = MEDIA_PAD_FL_SOURCE; dev->format.code = MEDIA_BUS_FMT_SBGGR10_1X10; dev->sd.entity.function = MEDIA_ENT_F_CAM_SENSOR; ret = v4l2_ctrl_handler_init(&dev->ctrl_handler, ARRAY_SIZE(ov5693_controls)); if (ret) { ov5693_remove(client); return ret; } for (i = 0; i < ARRAY_SIZE(ov5693_controls); i++) v4l2_ctrl_new_custom(&dev->ctrl_handler, &ov5693_controls[i], NULL); if (dev->ctrl_handler.error) { ov5693_remove(client); return dev->ctrl_handler.error; } /* Use same lock for controls as for everything else. */ dev->ctrl_handler.lock = &dev->input_lock; dev->sd.ctrl_handler = &dev->ctrl_handler; ret = media_entity_pads_init(&dev->sd.entity, 1, &dev->pad); if (ret) ov5693_remove(client); return ret; out_free: v4l2_device_unregister_subdev(&dev->sd); kfree(dev); return ret; } static const struct acpi_device_id ov5693_acpi_match[] = { {"INT33BE"}, {}, }; MODULE_DEVICE_TABLE(acpi, ov5693_acpi_match); static struct i2c_driver ov5693_driver = { .driver = { .name = "ov5693", .acpi_match_table = ov5693_acpi_match, }, .probe = ov5693_probe, .remove = ov5693_remove, }; module_i2c_driver(ov5693_driver); MODULE_DESCRIPTION("A low-level driver for OmniVision 5693 sensors"); MODULE_LICENSE("GPL");
linux-master
drivers/staging/media/atomisp/i2c/ov5693/atomisp-ov5693.c
// SPDX-License-Identifier: GPL-2.0 /* * Copyright (C) 2018 Intel Corporation. * Copyright 2018 Google LLC. * * Author: Tuukka Toivonen <[email protected]> * Author: Sakari Ailus <[email protected]> * Author: Samu Onkalo <[email protected]> * Author: Tomasz Figa <[email protected]> * */ #include <linux/dma-mapping.h> #include <linux/iopoll.h> #include <linux/pm_runtime.h> #include <linux/slab.h> #include <linux/vmalloc.h> #include <asm/set_memory.h> #include "ipu3-mmu.h" #define IPU3_PT_BITS 10 #define IPU3_PT_PTES (1UL << IPU3_PT_BITS) #define IPU3_PT_SIZE (IPU3_PT_PTES << 2) #define IPU3_PT_ORDER (IPU3_PT_SIZE >> PAGE_SHIFT) #define IPU3_ADDR2PTE(addr) ((addr) >> IPU3_PAGE_SHIFT) #define IPU3_PTE2ADDR(pte) ((phys_addr_t)(pte) << IPU3_PAGE_SHIFT) #define IPU3_L2PT_SHIFT IPU3_PT_BITS #define IPU3_L2PT_MASK ((1UL << IPU3_L2PT_SHIFT) - 1) #define IPU3_L1PT_SHIFT IPU3_PT_BITS #define IPU3_L1PT_MASK ((1UL << IPU3_L1PT_SHIFT) - 1) #define IPU3_MMU_ADDRESS_BITS (IPU3_PAGE_SHIFT + \ IPU3_L2PT_SHIFT + \ IPU3_L1PT_SHIFT) #define IMGU_REG_BASE 0x4000 #define REG_TLB_INVALIDATE (IMGU_REG_BASE + 0x300) #define TLB_INVALIDATE 1 #define REG_L1_PHYS (IMGU_REG_BASE + 0x304) /* 27-bit pfn */ #define REG_GP_HALT (IMGU_REG_BASE + 0x5dc) #define REG_GP_HALTED (IMGU_REG_BASE + 0x5e0) struct imgu_mmu { struct device *dev; void __iomem *base; /* protect access to l2pts, l1pt */ spinlock_t lock; void *dummy_page; u32 dummy_page_pteval; u32 *dummy_l2pt; u32 dummy_l2pt_pteval; u32 **l2pts; u32 *l1pt; struct imgu_mmu_info geometry; }; static inline struct imgu_mmu *to_imgu_mmu(struct imgu_mmu_info *info) { return container_of(info, struct imgu_mmu, geometry); } /** * imgu_mmu_tlb_invalidate - invalidate translation look-aside buffer * @mmu: MMU to perform the invalidate operation on * * This function invalidates the whole TLB. Must be called when the hardware * is powered on. */ static void imgu_mmu_tlb_invalidate(struct imgu_mmu *mmu) { writel(TLB_INVALIDATE, mmu->base + REG_TLB_INVALIDATE); } static void call_if_imgu_is_powered(struct imgu_mmu *mmu, void (*func)(struct imgu_mmu *mmu)) { if (!pm_runtime_get_if_in_use(mmu->dev)) return; func(mmu); pm_runtime_put(mmu->dev); } /** * imgu_mmu_set_halt - set CIO gate halt bit * @mmu: MMU to set the CIO gate bit in. * @halt: Desired state of the gate bit. * * This function sets the CIO gate bit that controls whether external memory * accesses are allowed. Must be called when the hardware is powered on. */ static void imgu_mmu_set_halt(struct imgu_mmu *mmu, bool halt) { int ret; u32 val; writel(halt, mmu->base + REG_GP_HALT); ret = readl_poll_timeout(mmu->base + REG_GP_HALTED, val, (val & 1) == halt, 1000, 100000); if (ret) dev_err(mmu->dev, "failed to %s CIO gate halt\n", halt ? "set" : "clear"); } /** * imgu_mmu_alloc_page_table - allocate a pre-filled page table * @pteval: Value to initialize for page table entries with. * * Return: Pointer to allocated page table or NULL on failure. */ static u32 *imgu_mmu_alloc_page_table(u32 pteval) { u32 *pt; int pte; pt = (u32 *)__get_free_page(GFP_KERNEL); if (!pt) return NULL; for (pte = 0; pte < IPU3_PT_PTES; pte++) pt[pte] = pteval; set_memory_uc((unsigned long)pt, IPU3_PT_ORDER); return pt; } /** * imgu_mmu_free_page_table - free page table * @pt: Page table to free. */ static void imgu_mmu_free_page_table(u32 *pt) { set_memory_wb((unsigned long)pt, IPU3_PT_ORDER); free_page((unsigned long)pt); } /** * address_to_pte_idx - split IOVA into L1 and L2 page table indices * @iova: IOVA to split. * @l1pt_idx: Output for the L1 page table index. * @l2pt_idx: Output for the L2 page index. */ static inline void address_to_pte_idx(unsigned long iova, u32 *l1pt_idx, u32 *l2pt_idx) { iova >>= IPU3_PAGE_SHIFT; if (l2pt_idx) *l2pt_idx = iova & IPU3_L2PT_MASK; iova >>= IPU3_L2PT_SHIFT; if (l1pt_idx) *l1pt_idx = iova & IPU3_L1PT_MASK; } static u32 *imgu_mmu_get_l2pt(struct imgu_mmu *mmu, u32 l1pt_idx) { unsigned long flags; u32 *l2pt, *new_l2pt; u32 pteval; spin_lock_irqsave(&mmu->lock, flags); l2pt = mmu->l2pts[l1pt_idx]; if (l2pt) { spin_unlock_irqrestore(&mmu->lock, flags); return l2pt; } spin_unlock_irqrestore(&mmu->lock, flags); new_l2pt = imgu_mmu_alloc_page_table(mmu->dummy_page_pteval); if (!new_l2pt) return NULL; spin_lock_irqsave(&mmu->lock, flags); dev_dbg(mmu->dev, "allocated page table %p for l1pt_idx %u\n", new_l2pt, l1pt_idx); l2pt = mmu->l2pts[l1pt_idx]; if (l2pt) { spin_unlock_irqrestore(&mmu->lock, flags); imgu_mmu_free_page_table(new_l2pt); return l2pt; } l2pt = new_l2pt; mmu->l2pts[l1pt_idx] = new_l2pt; pteval = IPU3_ADDR2PTE(virt_to_phys(new_l2pt)); mmu->l1pt[l1pt_idx] = pteval; spin_unlock_irqrestore(&mmu->lock, flags); return l2pt; } static int __imgu_mmu_map(struct imgu_mmu *mmu, unsigned long iova, phys_addr_t paddr) { u32 l1pt_idx, l2pt_idx; unsigned long flags; u32 *l2pt; if (!mmu) return -ENODEV; address_to_pte_idx(iova, &l1pt_idx, &l2pt_idx); l2pt = imgu_mmu_get_l2pt(mmu, l1pt_idx); if (!l2pt) return -ENOMEM; spin_lock_irqsave(&mmu->lock, flags); if (l2pt[l2pt_idx] != mmu->dummy_page_pteval) { spin_unlock_irqrestore(&mmu->lock, flags); return -EBUSY; } l2pt[l2pt_idx] = IPU3_ADDR2PTE(paddr); spin_unlock_irqrestore(&mmu->lock, flags); return 0; } /** * imgu_mmu_map - map a buffer to a physical address * * @info: MMU mappable range * @iova: the virtual address * @paddr: the physical address * @size: length of the mappable area * * The function has been adapted from iommu_map() in * drivers/iommu/iommu.c . */ int imgu_mmu_map(struct imgu_mmu_info *info, unsigned long iova, phys_addr_t paddr, size_t size) { struct imgu_mmu *mmu = to_imgu_mmu(info); int ret = 0; /* * both the virtual address and the physical one, as well as * the size of the mapping, must be aligned (at least) to the * size of the smallest page supported by the hardware */ if (!IS_ALIGNED(iova | paddr | size, IPU3_PAGE_SIZE)) { dev_err(mmu->dev, "unaligned: iova 0x%lx pa %pa size 0x%zx\n", iova, &paddr, size); return -EINVAL; } dev_dbg(mmu->dev, "map: iova 0x%lx pa %pa size 0x%zx\n", iova, &paddr, size); while (size) { dev_dbg(mmu->dev, "mapping: iova 0x%lx pa %pa\n", iova, &paddr); ret = __imgu_mmu_map(mmu, iova, paddr); if (ret) break; iova += IPU3_PAGE_SIZE; paddr += IPU3_PAGE_SIZE; size -= IPU3_PAGE_SIZE; } call_if_imgu_is_powered(mmu, imgu_mmu_tlb_invalidate); return ret; } /** * imgu_mmu_map_sg - Map a scatterlist * * @info: MMU mappable range * @iova: the virtual address * @sg: the scatterlist to map * @nents: number of entries in the scatterlist * * The function has been adapted from default_iommu_map_sg() in * drivers/iommu/iommu.c . */ size_t imgu_mmu_map_sg(struct imgu_mmu_info *info, unsigned long iova, struct scatterlist *sg, unsigned int nents) { struct imgu_mmu *mmu = to_imgu_mmu(info); struct scatterlist *s; size_t s_length, mapped = 0; unsigned int i; int ret; for_each_sg(sg, s, nents, i) { phys_addr_t phys = page_to_phys(sg_page(s)) + s->offset; s_length = s->length; if (!IS_ALIGNED(s->offset, IPU3_PAGE_SIZE)) goto out_err; /* must be IPU3_PAGE_SIZE aligned to be mapped singlely */ if (i == nents - 1 && !IS_ALIGNED(s->length, IPU3_PAGE_SIZE)) s_length = PAGE_ALIGN(s->length); ret = imgu_mmu_map(info, iova + mapped, phys, s_length); if (ret) goto out_err; mapped += s_length; } call_if_imgu_is_powered(mmu, imgu_mmu_tlb_invalidate); return mapped; out_err: /* undo mappings already done */ imgu_mmu_unmap(info, iova, mapped); return 0; } static size_t __imgu_mmu_unmap(struct imgu_mmu *mmu, unsigned long iova, size_t size) { u32 l1pt_idx, l2pt_idx; unsigned long flags; size_t unmap = size; u32 *l2pt; if (!mmu) return 0; address_to_pte_idx(iova, &l1pt_idx, &l2pt_idx); spin_lock_irqsave(&mmu->lock, flags); l2pt = mmu->l2pts[l1pt_idx]; if (!l2pt) { spin_unlock_irqrestore(&mmu->lock, flags); return 0; } if (l2pt[l2pt_idx] == mmu->dummy_page_pteval) unmap = 0; l2pt[l2pt_idx] = mmu->dummy_page_pteval; spin_unlock_irqrestore(&mmu->lock, flags); return unmap; } /** * imgu_mmu_unmap - Unmap a buffer * * @info: MMU mappable range * @iova: the virtual address * @size: the length of the buffer * * The function has been adapted from iommu_unmap() in * drivers/iommu/iommu.c . */ size_t imgu_mmu_unmap(struct imgu_mmu_info *info, unsigned long iova, size_t size) { struct imgu_mmu *mmu = to_imgu_mmu(info); size_t unmapped_page, unmapped = 0; /* * The virtual address, as well as the size of the mapping, must be * aligned (at least) to the size of the smallest page supported * by the hardware */ if (!IS_ALIGNED(iova | size, IPU3_PAGE_SIZE)) { dev_err(mmu->dev, "unaligned: iova 0x%lx size 0x%zx\n", iova, size); return -EINVAL; } dev_dbg(mmu->dev, "unmap this: iova 0x%lx size 0x%zx\n", iova, size); /* * Keep iterating until we either unmap 'size' bytes (or more) * or we hit an area that isn't mapped. */ while (unmapped < size) { unmapped_page = __imgu_mmu_unmap(mmu, iova, IPU3_PAGE_SIZE); if (!unmapped_page) break; dev_dbg(mmu->dev, "unmapped: iova 0x%lx size 0x%zx\n", iova, unmapped_page); iova += unmapped_page; unmapped += unmapped_page; } call_if_imgu_is_powered(mmu, imgu_mmu_tlb_invalidate); return unmapped; } /** * imgu_mmu_init() - initialize IPU3 MMU block * * @parent: struct device parent * @base: IOMEM base of hardware registers. * * Return: Pointer to IPU3 MMU private data pointer or ERR_PTR() on error. */ struct imgu_mmu_info *imgu_mmu_init(struct device *parent, void __iomem *base) { struct imgu_mmu *mmu; u32 pteval; mmu = kzalloc(sizeof(*mmu), GFP_KERNEL); if (!mmu) return ERR_PTR(-ENOMEM); mmu->dev = parent; mmu->base = base; spin_lock_init(&mmu->lock); /* Disallow external memory access when having no valid page tables. */ imgu_mmu_set_halt(mmu, true); /* * The MMU does not have a "valid" bit, so we have to use a dummy * page for invalid entries. */ mmu->dummy_page = (void *)__get_free_page(GFP_KERNEL); if (!mmu->dummy_page) goto fail_group; pteval = IPU3_ADDR2PTE(virt_to_phys(mmu->dummy_page)); mmu->dummy_page_pteval = pteval; /* * Allocate a dummy L2 page table with all entries pointing to * the dummy page. */ mmu->dummy_l2pt = imgu_mmu_alloc_page_table(pteval); if (!mmu->dummy_l2pt) goto fail_dummy_page; pteval = IPU3_ADDR2PTE(virt_to_phys(mmu->dummy_l2pt)); mmu->dummy_l2pt_pteval = pteval; /* * Allocate the array of L2PT CPU pointers, initialized to zero, * which means the dummy L2PT allocated above. */ mmu->l2pts = vzalloc(IPU3_PT_PTES * sizeof(*mmu->l2pts)); if (!mmu->l2pts) goto fail_l2pt; /* Allocate the L1 page table. */ mmu->l1pt = imgu_mmu_alloc_page_table(mmu->dummy_l2pt_pteval); if (!mmu->l1pt) goto fail_l2pts; pteval = IPU3_ADDR2PTE(virt_to_phys(mmu->l1pt)); writel(pteval, mmu->base + REG_L1_PHYS); imgu_mmu_tlb_invalidate(mmu); imgu_mmu_set_halt(mmu, false); mmu->geometry.aperture_start = 0; mmu->geometry.aperture_end = DMA_BIT_MASK(IPU3_MMU_ADDRESS_BITS); return &mmu->geometry; fail_l2pts: vfree(mmu->l2pts); fail_l2pt: imgu_mmu_free_page_table(mmu->dummy_l2pt); fail_dummy_page: free_page((unsigned long)mmu->dummy_page); fail_group: kfree(mmu); return ERR_PTR(-ENOMEM); } /** * imgu_mmu_exit() - clean up IPU3 MMU block * * @info: MMU mappable range */ void imgu_mmu_exit(struct imgu_mmu_info *info) { struct imgu_mmu *mmu = to_imgu_mmu(info); /* We are going to free our page tables, no more memory access. */ imgu_mmu_set_halt(mmu, true); imgu_mmu_tlb_invalidate(mmu); imgu_mmu_free_page_table(mmu->l1pt); vfree(mmu->l2pts); imgu_mmu_free_page_table(mmu->dummy_l2pt); free_page((unsigned long)mmu->dummy_page); kfree(mmu); } void imgu_mmu_suspend(struct imgu_mmu_info *info) { struct imgu_mmu *mmu = to_imgu_mmu(info); imgu_mmu_set_halt(mmu, true); } void imgu_mmu_resume(struct imgu_mmu_info *info) { struct imgu_mmu *mmu = to_imgu_mmu(info); u32 pteval; imgu_mmu_set_halt(mmu, true); pteval = IPU3_ADDR2PTE(virt_to_phys(mmu->l1pt)); writel(pteval, mmu->base + REG_L1_PHYS); imgu_mmu_tlb_invalidate(mmu); imgu_mmu_set_halt(mmu, false); }
linux-master
drivers/staging/media/ipu3/ipu3-mmu.c
// SPDX-License-Identifier: GPL-2.0 // Copyright (C) 2018 Intel Corporation #include <linux/device.h> #include <linux/firmware.h> #include <linux/mm.h> #include <linux/slab.h> #include "ipu3-css.h" #include "ipu3-css-fw.h" #include "ipu3-dmamap.h" static void imgu_css_fw_show_binary(struct device *dev, struct imgu_fw_info *bi, const char *name) { unsigned int i; dev_dbg(dev, "found firmware binary type %i size %i name %s\n", bi->type, bi->blob.size, name); if (bi->type != IMGU_FW_ISP_FIRMWARE) return; dev_dbg(dev, " id %i mode %i bds 0x%x veceven %i/%i out_pins %i\n", bi->info.isp.sp.id, bi->info.isp.sp.pipeline.mode, bi->info.isp.sp.bds.supported_bds_factors, bi->info.isp.sp.enable.vf_veceven, bi->info.isp.sp.vf_dec.is_variable, bi->info.isp.num_output_pins); dev_dbg(dev, " input (%i,%i)-(%i,%i) formats %s%s%s\n", bi->info.isp.sp.input.min_width, bi->info.isp.sp.input.min_height, bi->info.isp.sp.input.max_width, bi->info.isp.sp.input.max_height, bi->info.isp.sp.enable.input_yuv ? "yuv420 " : "", bi->info.isp.sp.enable.input_feeder || bi->info.isp.sp.enable.input_raw ? "raw8 raw10 " : "", bi->info.isp.sp.enable.input_raw ? "raw12" : ""); dev_dbg(dev, " internal (%i,%i)\n", bi->info.isp.sp.internal.max_width, bi->info.isp.sp.internal.max_height); dev_dbg(dev, " output (%i,%i)-(%i,%i) formats", bi->info.isp.sp.output.min_width, bi->info.isp.sp.output.min_height, bi->info.isp.sp.output.max_width, bi->info.isp.sp.output.max_height); for (i = 0; i < bi->info.isp.num_output_formats; i++) dev_dbg(dev, " %i", bi->info.isp.output_formats[i]); dev_dbg(dev, " vf"); for (i = 0; i < bi->info.isp.num_vf_formats; i++) dev_dbg(dev, " %i", bi->info.isp.vf_formats[i]); dev_dbg(dev, "\n"); } unsigned int imgu_css_fw_obgrid_size(const struct imgu_fw_info *bi) { unsigned int width = DIV_ROUND_UP(bi->info.isp.sp.internal.max_width, IMGU_OBGRID_TILE_SIZE * 2) + 1; unsigned int height = DIV_ROUND_UP(bi->info.isp.sp.internal.max_height, IMGU_OBGRID_TILE_SIZE * 2) + 1; unsigned int obgrid_size; width = ALIGN(width, IPU3_UAPI_ISP_VEC_ELEMS / 4); obgrid_size = PAGE_ALIGN(width * height * sizeof(struct ipu3_uapi_obgrid_param)) * bi->info.isp.sp.iterator.num_stripes; return obgrid_size; } void *imgu_css_fw_pipeline_params(struct imgu_css *css, unsigned int pipe, enum imgu_abi_param_class cls, enum imgu_abi_memories mem, struct imgu_fw_isp_parameter *par, size_t par_size, void *binary_params) { struct imgu_fw_info *bi = &css->fwp->binary_header[css->pipes[pipe].bindex]; if (par->offset + par->size > bi->info.isp.sp.mem_initializers.params[cls][mem].size) return NULL; if (par->size != par_size) pr_warn("parameter size doesn't match defined size\n"); if (par->size < par_size) return NULL; return binary_params + par->offset; } void imgu_css_fw_cleanup(struct imgu_css *css) { struct imgu_device *imgu = dev_get_drvdata(css->dev); if (css->binary) { unsigned int i; for (i = 0; i < css->fwp->file_header.binary_nr; i++) imgu_dmamap_free(imgu, &css->binary[i]); kfree(css->binary); } if (css->fw) release_firmware(css->fw); css->binary = NULL; css->fw = NULL; } int imgu_css_fw_init(struct imgu_css *css) { static const u32 BLOCK_MAX = 65536; struct imgu_device *imgu = dev_get_drvdata(css->dev); struct device *dev = css->dev; unsigned int i, j, binary_nr; int r; r = request_firmware(&css->fw, IMGU_FW_NAME_20161208, css->dev); if (r == -ENOENT) r = request_firmware(&css->fw, IMGU_FW_NAME, css->dev); if (r) return r; /* Check and display fw header info */ css->fwp = (struct imgu_fw_header *)css->fw->data; if (css->fw->size < struct_size(css->fwp, binary_header, 1) || css->fwp->file_header.h_size != sizeof(struct imgu_fw_bi_file_h)) goto bad_fw; if (struct_size(css->fwp, binary_header, css->fwp->file_header.binary_nr) > css->fw->size) goto bad_fw; dev_info(dev, "loaded firmware version %.64s, %u binaries, %zu bytes\n", css->fwp->file_header.version, css->fwp->file_header.binary_nr, css->fw->size); /* Validate and display info on fw binaries */ binary_nr = css->fwp->file_header.binary_nr; css->fw_bl = -1; css->fw_sp[0] = -1; css->fw_sp[1] = -1; for (i = 0; i < binary_nr; i++) { struct imgu_fw_info *bi = &css->fwp->binary_header[i]; const char *name = (void *)css->fwp + bi->blob.prog_name_offset; size_t len; if (bi->blob.prog_name_offset >= css->fw->size) goto bad_fw; len = strnlen(name, css->fw->size - bi->blob.prog_name_offset); if (len + 1 > css->fw->size - bi->blob.prog_name_offset || len + 1 >= IMGU_ABI_MAX_BINARY_NAME) goto bad_fw; if (bi->blob.size != bi->blob.text_size + bi->blob.icache_size + bi->blob.data_size + bi->blob.padding_size) goto bad_fw; if (bi->blob.offset + bi->blob.size > css->fw->size) goto bad_fw; if (bi->type == IMGU_FW_BOOTLOADER_FIRMWARE) { css->fw_bl = i; if (bi->info.bl.sw_state >= css->iomem_length || bi->info.bl.num_dma_cmds >= css->iomem_length || bi->info.bl.dma_cmd_list >= css->iomem_length) goto bad_fw; } if (bi->type == IMGU_FW_SP_FIRMWARE || bi->type == IMGU_FW_SP1_FIRMWARE) { css->fw_sp[bi->type == IMGU_FW_SP_FIRMWARE ? 0 : 1] = i; if (bi->info.sp.per_frame_data >= css->iomem_length || bi->info.sp.init_dmem_data >= css->iomem_length || bi->info.sp.host_sp_queue >= css->iomem_length || bi->info.sp.isp_started >= css->iomem_length || bi->info.sp.sw_state >= css->iomem_length || bi->info.sp.sleep_mode >= css->iomem_length || bi->info.sp.invalidate_tlb >= css->iomem_length || bi->info.sp.host_sp_com >= css->iomem_length || bi->info.sp.output + 12 >= css->iomem_length || bi->info.sp.host_sp_queues_initialized >= css->iomem_length) goto bad_fw; } if (bi->type != IMGU_FW_ISP_FIRMWARE) continue; if (bi->info.isp.sp.pipeline.mode >= IPU3_CSS_PIPE_ID_NUM) goto bad_fw; if (bi->info.isp.sp.iterator.num_stripes > IPU3_UAPI_MAX_STRIPES) goto bad_fw; if (bi->info.isp.num_vf_formats > IMGU_ABI_FRAME_FORMAT_NUM || bi->info.isp.num_output_formats > IMGU_ABI_FRAME_FORMAT_NUM) goto bad_fw; for (j = 0; j < bi->info.isp.num_output_formats; j++) if (bi->info.isp.output_formats[j] >= IMGU_ABI_FRAME_FORMAT_NUM) goto bad_fw; for (j = 0; j < bi->info.isp.num_vf_formats; j++) if (bi->info.isp.vf_formats[j] >= IMGU_ABI_FRAME_FORMAT_NUM) goto bad_fw; if (bi->info.isp.sp.block.block_width <= 0 || bi->info.isp.sp.block.block_width > BLOCK_MAX || bi->info.isp.sp.block.output_block_height <= 0 || bi->info.isp.sp.block.output_block_height > BLOCK_MAX) goto bad_fw; if (bi->blob.memory_offsets.offsets[IMGU_ABI_PARAM_CLASS_PARAM] + sizeof(struct imgu_fw_param_memory_offsets) > css->fw->size || bi->blob.memory_offsets.offsets[IMGU_ABI_PARAM_CLASS_CONFIG] + sizeof(struct imgu_fw_config_memory_offsets) > css->fw->size || bi->blob.memory_offsets.offsets[IMGU_ABI_PARAM_CLASS_STATE] + sizeof(struct imgu_fw_state_memory_offsets) > css->fw->size) goto bad_fw; imgu_css_fw_show_binary(dev, bi, name); } if (css->fw_bl == -1 || css->fw_sp[0] == -1 || css->fw_sp[1] == -1) goto bad_fw; /* Allocate and map fw binaries into IMGU */ css->binary = kcalloc(binary_nr, sizeof(*css->binary), GFP_KERNEL); if (!css->binary) { r = -ENOMEM; goto error_out; } for (i = 0; i < css->fwp->file_header.binary_nr; i++) { struct imgu_fw_info *bi = &css->fwp->binary_header[i]; void *blob = (void *)css->fwp + bi->blob.offset; size_t size = bi->blob.size; if (!imgu_dmamap_alloc(imgu, &css->binary[i], size)) { r = -ENOMEM; goto error_out; } memcpy(css->binary[i].vaddr, blob, size); } return 0; bad_fw: dev_err(dev, "invalid firmware binary, size %u\n", (int)css->fw->size); r = -ENODEV; error_out: imgu_css_fw_cleanup(css); return r; }
linux-master
drivers/staging/media/ipu3/ipu3-css-fw.c
// SPDX-License-Identifier: GPL-2.0 /* * Copyright (C) 2018 Intel Corporation * Copyright 2018 Google LLC. * * Author: Tomasz Figa <[email protected]> * Author: Yong Zhi <[email protected]> */ #include <linux/vmalloc.h> #include "ipu3.h" #include "ipu3-css-pool.h" #include "ipu3-mmu.h" #include "ipu3-dmamap.h" /* * Free a buffer allocated by imgu_dmamap_alloc_buffer() */ static void imgu_dmamap_free_buffer(struct page **pages, size_t size) { int count = size >> PAGE_SHIFT; while (count--) __free_page(pages[count]); kvfree(pages); } /* * Based on the implementation of __iommu_dma_alloc_pages() * defined in drivers/iommu/dma-iommu.c */ static struct page **imgu_dmamap_alloc_buffer(size_t size, gfp_t gfp) { struct page **pages; unsigned int i = 0, count = size >> PAGE_SHIFT; unsigned int order_mask = 1; const gfp_t high_order_gfp = __GFP_NOWARN | __GFP_NORETRY; /* Allocate mem for array of page ptrs */ pages = kvmalloc_array(count, sizeof(*pages), GFP_KERNEL); if (!pages) return NULL; gfp |= __GFP_HIGHMEM | __GFP_ZERO; while (count) { struct page *page = NULL; unsigned int order_size; for (order_mask &= (2U << __fls(count)) - 1; order_mask; order_mask &= ~order_size) { unsigned int order = __fls(order_mask); order_size = 1U << order; page = alloc_pages((order_mask - order_size) ? gfp | high_order_gfp : gfp, order); if (!page) continue; if (!order) break; if (!PageCompound(page)) { split_page(page, order); break; } __free_pages(page, order); } if (!page) { imgu_dmamap_free_buffer(pages, i << PAGE_SHIFT); return NULL; } count -= order_size; while (order_size--) pages[i++] = page++; } return pages; } /** * imgu_dmamap_alloc - allocate and map a buffer into KVA * @imgu: struct device pointer * @map: struct to store mapping variables * @len: size required * * Returns: * KVA on success * %NULL on failure */ void *imgu_dmamap_alloc(struct imgu_device *imgu, struct imgu_css_map *map, size_t len) { unsigned long shift = iova_shift(&imgu->iova_domain); struct device *dev = &imgu->pci_dev->dev; size_t size = PAGE_ALIGN(len); int count = size >> PAGE_SHIFT; struct page **pages; dma_addr_t iovaddr; struct iova *iova; int i, rval; dev_dbg(dev, "%s: allocating %zu\n", __func__, size); iova = alloc_iova(&imgu->iova_domain, size >> shift, imgu->mmu->aperture_end >> shift, 0); if (!iova) return NULL; pages = imgu_dmamap_alloc_buffer(size, GFP_KERNEL); if (!pages) goto out_free_iova; /* Call IOMMU driver to setup pgt */ iovaddr = iova_dma_addr(&imgu->iova_domain, iova); for (i = 0; i < count; ++i) { rval = imgu_mmu_map(imgu->mmu, iovaddr, page_to_phys(pages[i]), PAGE_SIZE); if (rval) goto out_unmap; iovaddr += PAGE_SIZE; } map->vaddr = vmap(pages, count, VM_USERMAP, PAGE_KERNEL); if (!map->vaddr) goto out_unmap; map->pages = pages; map->size = size; map->daddr = iova_dma_addr(&imgu->iova_domain, iova); dev_dbg(dev, "%s: allocated %zu @ IOVA %pad @ VA %p\n", __func__, size, &map->daddr, map->vaddr); return map->vaddr; out_unmap: imgu_dmamap_free_buffer(pages, size); imgu_mmu_unmap(imgu->mmu, iova_dma_addr(&imgu->iova_domain, iova), i * PAGE_SIZE); out_free_iova: __free_iova(&imgu->iova_domain, iova); return NULL; } void imgu_dmamap_unmap(struct imgu_device *imgu, struct imgu_css_map *map) { struct iova *iova; iova = find_iova(&imgu->iova_domain, iova_pfn(&imgu->iova_domain, map->daddr)); if (WARN_ON(!iova)) return; imgu_mmu_unmap(imgu->mmu, iova_dma_addr(&imgu->iova_domain, iova), iova_size(iova) << iova_shift(&imgu->iova_domain)); __free_iova(&imgu->iova_domain, iova); } /* * Counterpart of imgu_dmamap_alloc */ void imgu_dmamap_free(struct imgu_device *imgu, struct imgu_css_map *map) { dev_dbg(&imgu->pci_dev->dev, "%s: freeing %zu @ IOVA %pad @ VA %p\n", __func__, map->size, &map->daddr, map->vaddr); if (!map->vaddr) return; imgu_dmamap_unmap(imgu, map); vunmap(map->vaddr); imgu_dmamap_free_buffer(map->pages, map->size); map->vaddr = NULL; } int imgu_dmamap_map_sg(struct imgu_device *imgu, struct scatterlist *sglist, int nents, struct imgu_css_map *map) { unsigned long shift = iova_shift(&imgu->iova_domain); struct scatterlist *sg; struct iova *iova; size_t size = 0; int i; for_each_sg(sglist, sg, nents, i) { if (sg->offset) return -EINVAL; if (i != nents - 1 && !PAGE_ALIGNED(sg->length)) return -EINVAL; size += sg->length; } size = iova_align(&imgu->iova_domain, size); dev_dbg(&imgu->pci_dev->dev, "dmamap: mapping sg %d entries, %zu pages\n", nents, size >> shift); iova = alloc_iova(&imgu->iova_domain, size >> shift, imgu->mmu->aperture_end >> shift, 0); if (!iova) return -ENOMEM; dev_dbg(&imgu->pci_dev->dev, "dmamap: iova low pfn %lu, high pfn %lu\n", iova->pfn_lo, iova->pfn_hi); if (imgu_mmu_map_sg(imgu->mmu, iova_dma_addr(&imgu->iova_domain, iova), sglist, nents) < size) goto out_fail; memset(map, 0, sizeof(*map)); map->daddr = iova_dma_addr(&imgu->iova_domain, iova); map->size = size; return 0; out_fail: __free_iova(&imgu->iova_domain, iova); return -EFAULT; } int imgu_dmamap_init(struct imgu_device *imgu) { unsigned long order, base_pfn; int ret = iova_cache_get(); if (ret) return ret; order = __ffs(IPU3_PAGE_SIZE); base_pfn = max_t(unsigned long, 1, imgu->mmu->aperture_start >> order); init_iova_domain(&imgu->iova_domain, 1UL << order, base_pfn); return 0; } void imgu_dmamap_exit(struct imgu_device *imgu) { put_iova_domain(&imgu->iova_domain); iova_cache_put(); }
linux-master
drivers/staging/media/ipu3/ipu3-dmamap.c
// SPDX-License-Identifier: GPL-2.0 /* * Copyright (C) 2017 - 2018 Intel Corporation * Copyright 2017 Google LLC * * Based on Intel IPU4 driver. * */ #include <linux/delay.h> #include <linux/interrupt.h> #include <linux/module.h> #include <linux/pm_runtime.h> #include "ipu3.h" #include "ipu3-dmamap.h" #include "ipu3-mmu.h" #define IMGU_PCI_ID 0x1919 #define IMGU_PCI_BAR 0 #define IMGU_DMA_MASK DMA_BIT_MASK(39) #define IMGU_MAX_QUEUE_DEPTH (2 + 2) /* * pre-allocated buffer size for IMGU dummy buffers. Those * values should be tuned to big enough to avoid buffer * re-allocation when streaming to lower streaming latency. */ #define CSS_QUEUE_IN_BUF_SIZE 0 #define CSS_QUEUE_PARAMS_BUF_SIZE 0 #define CSS_QUEUE_OUT_BUF_SIZE (4160 * 3120 * 12 / 8) #define CSS_QUEUE_VF_BUF_SIZE (1920 * 1080 * 12 / 8) #define CSS_QUEUE_STAT_3A_BUF_SIZE sizeof(struct ipu3_uapi_stats_3a) static const size_t css_queue_buf_size_map[IPU3_CSS_QUEUES] = { [IPU3_CSS_QUEUE_IN] = CSS_QUEUE_IN_BUF_SIZE, [IPU3_CSS_QUEUE_PARAMS] = CSS_QUEUE_PARAMS_BUF_SIZE, [IPU3_CSS_QUEUE_OUT] = CSS_QUEUE_OUT_BUF_SIZE, [IPU3_CSS_QUEUE_VF] = CSS_QUEUE_VF_BUF_SIZE, [IPU3_CSS_QUEUE_STAT_3A] = CSS_QUEUE_STAT_3A_BUF_SIZE, }; static const struct imgu_node_mapping imgu_node_map[IMGU_NODE_NUM] = { [IMGU_NODE_IN] = {IPU3_CSS_QUEUE_IN, "input"}, [IMGU_NODE_PARAMS] = {IPU3_CSS_QUEUE_PARAMS, "parameters"}, [IMGU_NODE_OUT] = {IPU3_CSS_QUEUE_OUT, "output"}, [IMGU_NODE_VF] = {IPU3_CSS_QUEUE_VF, "viewfinder"}, [IMGU_NODE_STAT_3A] = {IPU3_CSS_QUEUE_STAT_3A, "3a stat"}, }; unsigned int imgu_node_to_queue(unsigned int node) { return imgu_node_map[node].css_queue; } unsigned int imgu_map_node(struct imgu_device *imgu, unsigned int css_queue) { unsigned int i; for (i = 0; i < IMGU_NODE_NUM; i++) if (imgu_node_map[i].css_queue == css_queue) break; return i; } /**************** Dummy buffers ****************/ static void imgu_dummybufs_cleanup(struct imgu_device *imgu, unsigned int pipe) { unsigned int i; struct imgu_media_pipe *imgu_pipe = &imgu->imgu_pipe[pipe]; for (i = 0; i < IPU3_CSS_QUEUES; i++) imgu_dmamap_free(imgu, &imgu_pipe->queues[i].dmap); } static int imgu_dummybufs_preallocate(struct imgu_device *imgu, unsigned int pipe) { unsigned int i; size_t size; struct imgu_media_pipe *imgu_pipe = &imgu->imgu_pipe[pipe]; for (i = 0; i < IPU3_CSS_QUEUES; i++) { size = css_queue_buf_size_map[i]; /* * Do not enable dummy buffers for master queue, * always require that real buffers from user are * available. */ if (i == IMGU_QUEUE_MASTER || size == 0) continue; if (!imgu_dmamap_alloc(imgu, &imgu_pipe->queues[i].dmap, size)) { imgu_dummybufs_cleanup(imgu, pipe); return -ENOMEM; } } return 0; } static int imgu_dummybufs_init(struct imgu_device *imgu, unsigned int pipe) { const struct v4l2_pix_format_mplane *mpix; const struct v4l2_meta_format *meta; unsigned int i, k, node; size_t size; struct imgu_media_pipe *imgu_pipe = &imgu->imgu_pipe[pipe]; /* Allocate a dummy buffer for each queue where buffer is optional */ for (i = 0; i < IPU3_CSS_QUEUES; i++) { node = imgu_map_node(imgu, i); if (!imgu_pipe->queue_enabled[node] || i == IMGU_QUEUE_MASTER) continue; if (!imgu_pipe->nodes[IMGU_NODE_VF].enabled && i == IPU3_CSS_QUEUE_VF) /* * Do not enable dummy buffers for VF if it is not * requested by the user. */ continue; meta = &imgu_pipe->nodes[node].vdev_fmt.fmt.meta; mpix = &imgu_pipe->nodes[node].vdev_fmt.fmt.pix_mp; if (node == IMGU_NODE_STAT_3A || node == IMGU_NODE_PARAMS) size = meta->buffersize; else size = mpix->plane_fmt[0].sizeimage; if (imgu_css_dma_buffer_resize(imgu, &imgu_pipe->queues[i].dmap, size)) { imgu_dummybufs_cleanup(imgu, pipe); return -ENOMEM; } for (k = 0; k < IMGU_MAX_QUEUE_DEPTH; k++) imgu_css_buf_init(&imgu_pipe->queues[i].dummybufs[k], i, imgu_pipe->queues[i].dmap.daddr); } return 0; } /* May be called from atomic context */ static struct imgu_css_buffer *imgu_dummybufs_get(struct imgu_device *imgu, int queue, unsigned int pipe) { unsigned int i; struct imgu_media_pipe *imgu_pipe = &imgu->imgu_pipe[pipe]; /* dummybufs are not allocated for master q */ if (queue == IPU3_CSS_QUEUE_IN) return NULL; if (WARN_ON(!imgu_pipe->queues[queue].dmap.vaddr)) /* Buffer should not be allocated here */ return NULL; for (i = 0; i < IMGU_MAX_QUEUE_DEPTH; i++) if (imgu_css_buf_state(&imgu_pipe->queues[queue].dummybufs[i]) != IPU3_CSS_BUFFER_QUEUED) break; if (i == IMGU_MAX_QUEUE_DEPTH) return NULL; imgu_css_buf_init(&imgu_pipe->queues[queue].dummybufs[i], queue, imgu_pipe->queues[queue].dmap.daddr); return &imgu_pipe->queues[queue].dummybufs[i]; } /* Check if given buffer is a dummy buffer */ static bool imgu_dummybufs_check(struct imgu_device *imgu, struct imgu_css_buffer *buf, unsigned int pipe) { unsigned int i; struct imgu_media_pipe *imgu_pipe = &imgu->imgu_pipe[pipe]; for (i = 0; i < IMGU_MAX_QUEUE_DEPTH; i++) if (buf == &imgu_pipe->queues[buf->queue].dummybufs[i]) break; return i < IMGU_MAX_QUEUE_DEPTH; } static void imgu_buffer_done(struct imgu_device *imgu, struct vb2_buffer *vb, enum vb2_buffer_state state) { mutex_lock(&imgu->lock); imgu_v4l2_buffer_done(vb, state); mutex_unlock(&imgu->lock); } static struct imgu_css_buffer *imgu_queue_getbuf(struct imgu_device *imgu, unsigned int node, unsigned int pipe) { struct imgu_buffer *buf; struct imgu_media_pipe *imgu_pipe = &imgu->imgu_pipe[pipe]; if (WARN_ON(node >= IMGU_NODE_NUM)) return NULL; /* Find first free buffer from the node */ list_for_each_entry(buf, &imgu_pipe->nodes[node].buffers, vid_buf.list) { if (imgu_css_buf_state(&buf->css_buf) == IPU3_CSS_BUFFER_NEW) return &buf->css_buf; } /* There were no free buffers, try to return a dummy buffer */ return imgu_dummybufs_get(imgu, imgu_node_map[node].css_queue, pipe); } /* * Queue as many buffers to CSS as possible. If all buffers don't fit into * CSS buffer queues, they remain unqueued and will be queued later. */ int imgu_queue_buffers(struct imgu_device *imgu, bool initial, unsigned int pipe) { unsigned int node; int r = 0; struct imgu_media_pipe *imgu_pipe = &imgu->imgu_pipe[pipe]; if (!imgu_css_is_streaming(&imgu->css)) return 0; dev_dbg(&imgu->pci_dev->dev, "Queue buffers to pipe %d", pipe); mutex_lock(&imgu->lock); if (!imgu_css_pipe_queue_empty(&imgu->css, pipe)) { mutex_unlock(&imgu->lock); return 0; } /* Buffer set is queued to FW only when input buffer is ready */ for (node = IMGU_NODE_NUM - 1; imgu_queue_getbuf(imgu, IMGU_NODE_IN, pipe); node = node ? node - 1 : IMGU_NODE_NUM - 1) { if (node == IMGU_NODE_VF && !imgu_pipe->nodes[IMGU_NODE_VF].enabled) { dev_warn(&imgu->pci_dev->dev, "Vf not enabled, ignore queue"); continue; } else if (node == IMGU_NODE_PARAMS && imgu_pipe->nodes[node].enabled) { struct vb2_buffer *vb; struct imgu_vb2_buffer *ivb; /* No parameters for this frame */ if (list_empty(&imgu_pipe->nodes[node].buffers)) continue; ivb = list_first_entry(&imgu_pipe->nodes[node].buffers, struct imgu_vb2_buffer, list); list_del(&ivb->list); vb = &ivb->vbb.vb2_buf; r = imgu_css_set_parameters(&imgu->css, pipe, vb2_plane_vaddr(vb, 0)); if (r) { vb2_buffer_done(vb, VB2_BUF_STATE_ERROR); dev_warn(&imgu->pci_dev->dev, "set parameters failed."); continue; } vb2_buffer_done(vb, VB2_BUF_STATE_DONE); dev_dbg(&imgu->pci_dev->dev, "queue user parameters %d to css.", vb->index); } else if (imgu_pipe->queue_enabled[node]) { struct imgu_css_buffer *buf = imgu_queue_getbuf(imgu, node, pipe); struct imgu_buffer *ibuf = NULL; bool dummy; if (!buf) break; r = imgu_css_buf_queue(&imgu->css, pipe, buf); if (r) break; dummy = imgu_dummybufs_check(imgu, buf, pipe); if (!dummy) ibuf = container_of(buf, struct imgu_buffer, css_buf); dev_dbg(&imgu->pci_dev->dev, "queue %s %s buffer %u to css da: 0x%08x\n", dummy ? "dummy" : "user", imgu_node_map[node].name, dummy ? 0 : ibuf->vid_buf.vbb.vb2_buf.index, (u32)buf->daddr); } } mutex_unlock(&imgu->lock); if (r && r != -EBUSY) goto failed; return 0; failed: /* * On error, mark all buffers as failed which are not * yet queued to CSS */ dev_err(&imgu->pci_dev->dev, "failed to queue buffer to CSS on queue %i (%d)\n", node, r); if (initial) /* If we were called from streamon(), no need to finish bufs */ return r; for (node = 0; node < IMGU_NODE_NUM; node++) { struct imgu_buffer *buf, *buf0; if (!imgu_pipe->queue_enabled[node]) continue; /* Skip disabled queues */ mutex_lock(&imgu->lock); list_for_each_entry_safe(buf, buf0, &imgu_pipe->nodes[node].buffers, vid_buf.list) { if (imgu_css_buf_state(&buf->css_buf) == IPU3_CSS_BUFFER_QUEUED) continue; /* Was already queued, skip */ imgu_v4l2_buffer_done(&buf->vid_buf.vbb.vb2_buf, VB2_BUF_STATE_ERROR); } mutex_unlock(&imgu->lock); } return r; } static int imgu_powerup(struct imgu_device *imgu) { int r; unsigned int pipe; unsigned int freq = 200; struct v4l2_mbus_framefmt *fmt; /* input larger than 2048*1152, ask imgu to work on high freq */ for_each_set_bit(pipe, imgu->css.enabled_pipes, IMGU_MAX_PIPE_NUM) { fmt = &imgu->imgu_pipe[pipe].nodes[IMGU_NODE_IN].pad_fmt; dev_dbg(&imgu->pci_dev->dev, "pipe %u input format = %ux%u", pipe, fmt->width, fmt->height); if ((fmt->width * fmt->height) >= (2048 * 1152)) freq = 450; } r = imgu_css_set_powerup(&imgu->pci_dev->dev, imgu->base, freq); if (r) return r; imgu_mmu_resume(imgu->mmu); return 0; } static void imgu_powerdown(struct imgu_device *imgu) { imgu_mmu_suspend(imgu->mmu); imgu_css_set_powerdown(&imgu->pci_dev->dev, imgu->base); } int imgu_s_stream(struct imgu_device *imgu, int enable) { struct device *dev = &imgu->pci_dev->dev; int r, pipe; if (!enable) { /* Stop streaming */ dev_dbg(dev, "stream off\n"); /* Block new buffers to be queued to CSS. */ atomic_set(&imgu->qbuf_barrier, 1); imgu_css_stop_streaming(&imgu->css); synchronize_irq(imgu->pci_dev->irq); atomic_set(&imgu->qbuf_barrier, 0); imgu_powerdown(imgu); pm_runtime_put(&imgu->pci_dev->dev); return 0; } /* Set Power */ r = pm_runtime_resume_and_get(dev); if (r < 0) { dev_err(dev, "failed to set imgu power\n"); return r; } r = imgu_powerup(imgu); if (r) { dev_err(dev, "failed to power up imgu\n"); pm_runtime_put(dev); return r; } /* Start CSS streaming */ r = imgu_css_start_streaming(&imgu->css); if (r) { dev_err(dev, "failed to start css streaming (%d)", r); goto fail_start_streaming; } for_each_set_bit(pipe, imgu->css.enabled_pipes, IMGU_MAX_PIPE_NUM) { /* Initialize dummy buffers */ r = imgu_dummybufs_init(imgu, pipe); if (r) { dev_err(dev, "failed to initialize dummy buffers (%d)", r); goto fail_dummybufs; } /* Queue as many buffers from queue as possible */ r = imgu_queue_buffers(imgu, true, pipe); if (r) { dev_err(dev, "failed to queue initial buffers (%d)", r); goto fail_queueing; } } return 0; fail_queueing: for_each_set_bit(pipe, imgu->css.enabled_pipes, IMGU_MAX_PIPE_NUM) imgu_dummybufs_cleanup(imgu, pipe); fail_dummybufs: imgu_css_stop_streaming(&imgu->css); fail_start_streaming: pm_runtime_put(dev); return r; } static void imgu_video_nodes_exit(struct imgu_device *imgu) { int i; for (i = 0; i < IMGU_MAX_PIPE_NUM; i++) imgu_dummybufs_cleanup(imgu, i); imgu_v4l2_unregister(imgu); } static int imgu_video_nodes_init(struct imgu_device *imgu) { struct v4l2_pix_format_mplane *fmts[IPU3_CSS_QUEUES] = { NULL }; struct v4l2_rect *rects[IPU3_CSS_RECTS] = { NULL }; struct imgu_media_pipe *imgu_pipe; unsigned int i, j; int r; imgu->buf_struct_size = sizeof(struct imgu_buffer); for (j = 0; j < IMGU_MAX_PIPE_NUM; j++) { imgu_pipe = &imgu->imgu_pipe[j]; for (i = 0; i < IMGU_NODE_NUM; i++) { imgu_pipe->nodes[i].name = imgu_node_map[i].name; imgu_pipe->nodes[i].output = i < IMGU_QUEUE_FIRST_INPUT; imgu_pipe->nodes[i].enabled = false; if (i != IMGU_NODE_PARAMS && i != IMGU_NODE_STAT_3A) fmts[imgu_node_map[i].css_queue] = &imgu_pipe->nodes[i].vdev_fmt.fmt.pix_mp; atomic_set(&imgu_pipe->nodes[i].sequence, 0); } } r = imgu_v4l2_register(imgu); if (r) return r; /* Set initial formats and initialize formats of video nodes */ for (j = 0; j < IMGU_MAX_PIPE_NUM; j++) { imgu_pipe = &imgu->imgu_pipe[j]; rects[IPU3_CSS_RECT_EFFECTIVE] = &imgu_pipe->imgu_sd.rect.eff; rects[IPU3_CSS_RECT_BDS] = &imgu_pipe->imgu_sd.rect.bds; imgu_css_fmt_set(&imgu->css, fmts, rects, j); /* Pre-allocate dummy buffers */ r = imgu_dummybufs_preallocate(imgu, j); if (r) { dev_err(&imgu->pci_dev->dev, "failed to pre-allocate dummy buffers (%d)", r); goto out_cleanup; } } return 0; out_cleanup: imgu_video_nodes_exit(imgu); return r; } /**************** PCI interface ****************/ static irqreturn_t imgu_isr_threaded(int irq, void *imgu_ptr) { struct imgu_device *imgu = imgu_ptr; struct imgu_media_pipe *imgu_pipe; int p; /* Dequeue / queue buffers */ do { u64 ns = ktime_get_ns(); struct imgu_css_buffer *b; struct imgu_buffer *buf = NULL; unsigned int node, pipe; bool dummy; do { mutex_lock(&imgu->lock); b = imgu_css_buf_dequeue(&imgu->css); mutex_unlock(&imgu->lock); } while (PTR_ERR(b) == -EAGAIN); if (IS_ERR(b)) { if (PTR_ERR(b) != -EBUSY) /* All done */ dev_err(&imgu->pci_dev->dev, "failed to dequeue buffers (%ld)\n", PTR_ERR(b)); break; } node = imgu_map_node(imgu, b->queue); pipe = b->pipe; dummy = imgu_dummybufs_check(imgu, b, pipe); if (!dummy) buf = container_of(b, struct imgu_buffer, css_buf); dev_dbg(&imgu->pci_dev->dev, "dequeue %s %s buffer %d daddr 0x%x from css\n", dummy ? "dummy" : "user", imgu_node_map[node].name, dummy ? 0 : buf->vid_buf.vbb.vb2_buf.index, (u32)b->daddr); if (dummy) /* It was a dummy buffer, skip it */ continue; /* Fill vb2 buffer entries and tell it's ready */ imgu_pipe = &imgu->imgu_pipe[pipe]; if (!imgu_pipe->nodes[node].output) { buf->vid_buf.vbb.vb2_buf.timestamp = ns; buf->vid_buf.vbb.field = V4L2_FIELD_NONE; buf->vid_buf.vbb.sequence = atomic_inc_return( &imgu_pipe->nodes[node].sequence); dev_dbg(&imgu->pci_dev->dev, "vb2 buffer sequence %d", buf->vid_buf.vbb.sequence); } imgu_buffer_done(imgu, &buf->vid_buf.vbb.vb2_buf, imgu_css_buf_state(&buf->css_buf) == IPU3_CSS_BUFFER_DONE ? VB2_BUF_STATE_DONE : VB2_BUF_STATE_ERROR); mutex_lock(&imgu->lock); if (imgu_css_queue_empty(&imgu->css)) wake_up_all(&imgu->buf_drain_wq); mutex_unlock(&imgu->lock); } while (1); /* * Try to queue more buffers for CSS. * qbuf_barrier is used to disable new buffers * to be queued to CSS. */ if (!atomic_read(&imgu->qbuf_barrier)) for_each_set_bit(p, imgu->css.enabled_pipes, IMGU_MAX_PIPE_NUM) imgu_queue_buffers(imgu, false, p); return IRQ_HANDLED; } static irqreturn_t imgu_isr(int irq, void *imgu_ptr) { struct imgu_device *imgu = imgu_ptr; /* acknowledge interruption */ if (imgu_css_irq_ack(&imgu->css) < 0) return IRQ_NONE; return IRQ_WAKE_THREAD; } static int imgu_pci_config_setup(struct pci_dev *dev) { u16 pci_command; int r = pci_enable_msi(dev); if (r) { dev_err(&dev->dev, "failed to enable MSI (%d)\n", r); return r; } pci_read_config_word(dev, PCI_COMMAND, &pci_command); pci_command |= PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER | PCI_COMMAND_INTX_DISABLE; pci_write_config_word(dev, PCI_COMMAND, pci_command); return 0; } static int imgu_pci_probe(struct pci_dev *pci_dev, const struct pci_device_id *id) { struct imgu_device *imgu; phys_addr_t phys; unsigned long phys_len; void __iomem *const *iomap; int r; imgu = devm_kzalloc(&pci_dev->dev, sizeof(*imgu), GFP_KERNEL); if (!imgu) return -ENOMEM; imgu->pci_dev = pci_dev; r = pcim_enable_device(pci_dev); if (r) { dev_err(&pci_dev->dev, "failed to enable device (%d)\n", r); return r; } dev_info(&pci_dev->dev, "device 0x%x (rev: 0x%x)\n", pci_dev->device, pci_dev->revision); phys = pci_resource_start(pci_dev, IMGU_PCI_BAR); phys_len = pci_resource_len(pci_dev, IMGU_PCI_BAR); r = pcim_iomap_regions(pci_dev, 1 << IMGU_PCI_BAR, pci_name(pci_dev)); if (r) { dev_err(&pci_dev->dev, "failed to remap I/O memory (%d)\n", r); return r; } dev_info(&pci_dev->dev, "physical base address %pap, %lu bytes\n", &phys, phys_len); iomap = pcim_iomap_table(pci_dev); if (!iomap) { dev_err(&pci_dev->dev, "failed to iomap table\n"); return -ENODEV; } imgu->base = iomap[IMGU_PCI_BAR]; pci_set_drvdata(pci_dev, imgu); pci_set_master(pci_dev); r = dma_coerce_mask_and_coherent(&pci_dev->dev, IMGU_DMA_MASK); if (r) { dev_err(&pci_dev->dev, "failed to set DMA mask (%d)\n", r); return -ENODEV; } r = imgu_pci_config_setup(pci_dev); if (r) return r; mutex_init(&imgu->lock); mutex_init(&imgu->streaming_lock); atomic_set(&imgu->qbuf_barrier, 0); init_waitqueue_head(&imgu->buf_drain_wq); r = imgu_css_set_powerup(&pci_dev->dev, imgu->base, 200); if (r) { dev_err(&pci_dev->dev, "failed to power up CSS (%d)\n", r); goto out_mutex_destroy; } imgu->mmu = imgu_mmu_init(&pci_dev->dev, imgu->base); if (IS_ERR(imgu->mmu)) { r = PTR_ERR(imgu->mmu); dev_err(&pci_dev->dev, "failed to initialize MMU (%d)\n", r); goto out_css_powerdown; } r = imgu_dmamap_init(imgu); if (r) { dev_err(&pci_dev->dev, "failed to initialize DMA mapping (%d)\n", r); goto out_mmu_exit; } /* ISP programming */ r = imgu_css_init(&pci_dev->dev, &imgu->css, imgu->base, phys_len); if (r) { dev_err(&pci_dev->dev, "failed to initialize CSS (%d)\n", r); goto out_dmamap_exit; } /* v4l2 sub-device registration */ r = imgu_video_nodes_init(imgu); if (r) { dev_err(&pci_dev->dev, "failed to create V4L2 devices (%d)\n", r); goto out_css_cleanup; } r = devm_request_threaded_irq(&pci_dev->dev, pci_dev->irq, imgu_isr, imgu_isr_threaded, IRQF_SHARED, IMGU_NAME, imgu); if (r) { dev_err(&pci_dev->dev, "failed to request IRQ (%d)\n", r); goto out_video_exit; } pm_runtime_put_noidle(&pci_dev->dev); pm_runtime_allow(&pci_dev->dev); return 0; out_video_exit: imgu_video_nodes_exit(imgu); out_css_cleanup: imgu_css_cleanup(&imgu->css); out_dmamap_exit: imgu_dmamap_exit(imgu); out_mmu_exit: imgu_mmu_exit(imgu->mmu); out_css_powerdown: imgu_css_set_powerdown(&pci_dev->dev, imgu->base); out_mutex_destroy: mutex_destroy(&imgu->streaming_lock); mutex_destroy(&imgu->lock); return r; } static void imgu_pci_remove(struct pci_dev *pci_dev) { struct imgu_device *imgu = pci_get_drvdata(pci_dev); pm_runtime_forbid(&pci_dev->dev); pm_runtime_get_noresume(&pci_dev->dev); imgu_video_nodes_exit(imgu); imgu_css_cleanup(&imgu->css); imgu_css_set_powerdown(&pci_dev->dev, imgu->base); imgu_dmamap_exit(imgu); imgu_mmu_exit(imgu->mmu); mutex_destroy(&imgu->streaming_lock); mutex_destroy(&imgu->lock); } static int __maybe_unused imgu_suspend(struct device *dev) { struct pci_dev *pci_dev = to_pci_dev(dev); struct imgu_device *imgu = pci_get_drvdata(pci_dev); dev_dbg(dev, "enter %s\n", __func__); imgu->suspend_in_stream = imgu_css_is_streaming(&imgu->css); if (!imgu->suspend_in_stream) goto out; /* Block new buffers to be queued to CSS. */ atomic_set(&imgu->qbuf_barrier, 1); /* * Wait for currently running irq handler to be done so that * no new buffers will be queued to fw later. */ synchronize_irq(pci_dev->irq); /* Wait until all buffers in CSS are done. */ if (!wait_event_timeout(imgu->buf_drain_wq, imgu_css_queue_empty(&imgu->css), msecs_to_jiffies(1000))) dev_err(dev, "wait buffer drain timeout.\n"); imgu_css_stop_streaming(&imgu->css); atomic_set(&imgu->qbuf_barrier, 0); imgu_powerdown(imgu); pm_runtime_force_suspend(dev); out: dev_dbg(dev, "leave %s\n", __func__); return 0; } static int __maybe_unused imgu_resume(struct device *dev) { struct imgu_device *imgu = dev_get_drvdata(dev); int r = 0; unsigned int pipe; dev_dbg(dev, "enter %s\n", __func__); if (!imgu->suspend_in_stream) goto out; pm_runtime_force_resume(dev); r = imgu_powerup(imgu); if (r) { dev_err(dev, "failed to power up imgu\n"); goto out; } /* Start CSS streaming */ r = imgu_css_start_streaming(&imgu->css); if (r) { dev_err(dev, "failed to resume css streaming (%d)", r); goto out; } for_each_set_bit(pipe, imgu->css.enabled_pipes, IMGU_MAX_PIPE_NUM) { r = imgu_queue_buffers(imgu, true, pipe); if (r) dev_err(dev, "failed to queue buffers to pipe %d (%d)", pipe, r); } out: dev_dbg(dev, "leave %s\n", __func__); return r; } /* * PCI rpm framework checks the existence of driver rpm callbacks. * Place a dummy callback here to avoid rpm going into error state. */ static __maybe_unused int imgu_rpm_dummy_cb(struct device *dev) { return 0; } static const struct dev_pm_ops imgu_pm_ops = { SET_RUNTIME_PM_OPS(&imgu_rpm_dummy_cb, &imgu_rpm_dummy_cb, NULL) SET_SYSTEM_SLEEP_PM_OPS(&imgu_suspend, &imgu_resume) }; static const struct pci_device_id imgu_pci_tbl[] = { { PCI_DEVICE(PCI_VENDOR_ID_INTEL, IMGU_PCI_ID) }, { 0, } }; MODULE_DEVICE_TABLE(pci, imgu_pci_tbl); static struct pci_driver imgu_pci_driver = { .name = IMGU_NAME, .id_table = imgu_pci_tbl, .probe = imgu_pci_probe, .remove = imgu_pci_remove, .driver = { .pm = &imgu_pm_ops, }, }; module_pci_driver(imgu_pci_driver); MODULE_AUTHOR("Tuukka Toivonen <[email protected]>"); MODULE_AUTHOR("Tianshu Qiu <[email protected]>"); MODULE_AUTHOR("Jian Xu Zheng <[email protected]>"); MODULE_AUTHOR("Yuning Pu <[email protected]>"); MODULE_AUTHOR("Yong Zhi <[email protected]>"); MODULE_LICENSE("GPL v2"); MODULE_DESCRIPTION("Intel ipu3_imgu PCI driver");
linux-master
drivers/staging/media/ipu3/ipu3.c